forked from toolshed/abra
chore: make deps, go mod vendor
This commit is contained in:
14
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
generated
vendored
14
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
generated
vendored
@ -18,20 +18,6 @@ const (
|
||||
WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded)
|
||||
)
|
||||
|
||||
// Server HTTP metrics.
|
||||
const (
|
||||
serverRequestSize = "http.server.request.size" // Incoming request bytes total
|
||||
serverResponseSize = "http.server.response.size" // Incoming response bytes total
|
||||
serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
|
||||
)
|
||||
|
||||
// Client HTTP metrics.
|
||||
const (
|
||||
clientRequestSize = "http.client.request.size" // Outgoing request bytes total
|
||||
clientResponseSize = "http.client.response.size" // Outgoing response bytes total
|
||||
clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds
|
||||
)
|
||||
|
||||
// Filter is a predicate used to determine whether a given http.request should
|
||||
// be traced. A Filter must return true if the request should be traced.
|
||||
type Filter func(*http.Request) bool
|
||||
|
15
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
generated
vendored
15
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
generated
vendored
@ -8,6 +8,8 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
@ -33,8 +35,9 @@ type config struct {
|
||||
SpanNameFormatter func(string, *http.Request) string
|
||||
ClientTrace func(context.Context) *httptrace.ClientTrace
|
||||
|
||||
TracerProvider trace.TracerProvider
|
||||
MeterProvider metric.MeterProvider
|
||||
TracerProvider trace.TracerProvider
|
||||
MeterProvider metric.MeterProvider
|
||||
MetricAttributesFn func(*http.Request) []attribute.KeyValue
|
||||
}
|
||||
|
||||
// Option interface used for setting optional config properties.
|
||||
@ -194,3 +197,11 @@ func WithServerName(server string) Option {
|
||||
c.ServerName = server
|
||||
})
|
||||
}
|
||||
|
||||
// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue.
|
||||
// These attributes will be included in metrics for every request.
|
||||
func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option {
|
||||
return optionFunc(func(c *config) {
|
||||
c.MetricAttributesFn = metricAttributesFn
|
||||
})
|
||||
}
|
||||
|
108
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
generated
vendored
108
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
generated
vendored
@ -9,11 +9,9 @@ import (
|
||||
|
||||
"github.com/felixge/httpsnoop"
|
||||
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
@ -24,7 +22,6 @@ type middleware struct {
|
||||
server string
|
||||
|
||||
tracer trace.Tracer
|
||||
meter metric.Meter
|
||||
propagators propagation.TextMapPropagator
|
||||
spanStartOptions []trace.SpanStartOption
|
||||
readEvent bool
|
||||
@ -34,10 +31,7 @@ type middleware struct {
|
||||
publicEndpoint bool
|
||||
publicEndpointFn func(*http.Request) bool
|
||||
|
||||
traceSemconv semconv.HTTPServer
|
||||
requestBytesCounter metric.Int64Counter
|
||||
responseBytesCounter metric.Int64Counter
|
||||
serverLatencyMeasure metric.Float64Histogram
|
||||
semconv semconv.HTTPServer
|
||||
}
|
||||
|
||||
func defaultHandlerFormatter(operation string, _ *http.Request) string {
|
||||
@ -56,8 +50,6 @@ func NewHandler(handler http.Handler, operation string, opts ...Option) http.Han
|
||||
func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler {
|
||||
h := middleware{
|
||||
operation: operation,
|
||||
|
||||
traceSemconv: semconv.NewHTTPServer(),
|
||||
}
|
||||
|
||||
defaultOpts := []Option{
|
||||
@ -67,7 +59,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han
|
||||
|
||||
c := newConfig(append(defaultOpts, opts...)...)
|
||||
h.configure(c)
|
||||
h.createMeasures()
|
||||
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
@ -78,7 +69,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han
|
||||
|
||||
func (h *middleware) configure(c *config) {
|
||||
h.tracer = c.Tracer
|
||||
h.meter = c.Meter
|
||||
h.propagators = c.Propagators
|
||||
h.spanStartOptions = c.SpanStartOptions
|
||||
h.readEvent = c.ReadEvent
|
||||
@ -88,36 +78,7 @@ func (h *middleware) configure(c *config) {
|
||||
h.publicEndpoint = c.PublicEndpoint
|
||||
h.publicEndpointFn = c.PublicEndpointFn
|
||||
h.server = c.ServerName
|
||||
}
|
||||
|
||||
func handleErr(err error) {
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *middleware) createMeasures() {
|
||||
var err error
|
||||
h.requestBytesCounter, err = h.meter.Int64Counter(
|
||||
serverRequestSize,
|
||||
metric.WithUnit("By"),
|
||||
metric.WithDescription("Measures the size of HTTP request messages."),
|
||||
)
|
||||
handleErr(err)
|
||||
|
||||
h.responseBytesCounter, err = h.meter.Int64Counter(
|
||||
serverResponseSize,
|
||||
metric.WithUnit("By"),
|
||||
metric.WithDescription("Measures the size of HTTP response messages."),
|
||||
)
|
||||
handleErr(err)
|
||||
|
||||
h.serverLatencyMeasure, err = h.meter.Float64Histogram(
|
||||
serverDuration,
|
||||
metric.WithUnit("ms"),
|
||||
metric.WithDescription("Measures the duration of inbound HTTP requests."),
|
||||
)
|
||||
handleErr(err)
|
||||
h.semconv = semconv.NewHTTPServer(c.Meter)
|
||||
}
|
||||
|
||||
// serveHTTP sets up tracing and calls the given next http.Handler with the span
|
||||
@ -134,7 +95,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
|
||||
|
||||
ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header))
|
||||
opts := []trace.SpanStartOption{
|
||||
trace.WithAttributes(h.traceSemconv.RequestTraceAttrs(h.server, r)...),
|
||||
trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...),
|
||||
}
|
||||
|
||||
opts = append(opts, h.spanStartOptions...)
|
||||
@ -156,6 +117,11 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
|
||||
}
|
||||
}
|
||||
|
||||
if startTime := StartTimeFromContext(ctx); !startTime.IsZero() {
|
||||
opts = append(opts, trace.WithTimestamp(startTime))
|
||||
requestStartTime = startTime
|
||||
}
|
||||
|
||||
ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...)
|
||||
defer span.End()
|
||||
|
||||
@ -166,14 +132,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
|
||||
}
|
||||
}
|
||||
|
||||
var bw bodyWrapper
|
||||
// if request body is nil or NoBody, we don't want to mutate the body as it
|
||||
// will affect the identity of it in an unforeseeable way because we assert
|
||||
// ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
|
||||
bw := request.NewBodyWrapper(r.Body, readRecordFunc)
|
||||
if r.Body != nil && r.Body != http.NoBody {
|
||||
bw.ReadCloser = r.Body
|
||||
bw.record = readRecordFunc
|
||||
r.Body = &bw
|
||||
r.Body = bw
|
||||
}
|
||||
|
||||
writeRecordFunc := func(int64) {}
|
||||
@ -183,13 +147,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
|
||||
}
|
||||
}
|
||||
|
||||
rww := &respWriterWrapper{
|
||||
ResponseWriter: w,
|
||||
record: writeRecordFunc,
|
||||
ctx: ctx,
|
||||
props: h.propagators,
|
||||
statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything
|
||||
}
|
||||
rww := request.NewRespWriterWrapper(w, writeRecordFunc)
|
||||
|
||||
// Wrap w to use our ResponseWriter methods while also exposing
|
||||
// other interfaces that w may implement (http.CloseNotifier,
|
||||
@ -217,35 +175,39 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
|
||||
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
|
||||
span.SetStatus(semconv.ServerStatus(rww.statusCode))
|
||||
span.SetAttributes(h.traceSemconv.ResponseTraceAttrs(semconv.ResponseTelemetry{
|
||||
StatusCode: rww.statusCode,
|
||||
ReadBytes: bw.read.Load(),
|
||||
ReadError: bw.err,
|
||||
WriteBytes: rww.written,
|
||||
WriteError: rww.err,
|
||||
statusCode := rww.StatusCode()
|
||||
bytesWritten := rww.BytesWritten()
|
||||
span.SetStatus(h.semconv.Status(statusCode))
|
||||
span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{
|
||||
StatusCode: statusCode,
|
||||
ReadBytes: bw.BytesRead(),
|
||||
ReadError: bw.Error(),
|
||||
WriteBytes: bytesWritten,
|
||||
WriteError: rww.Error(),
|
||||
})...)
|
||||
|
||||
// Add metrics
|
||||
attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...)
|
||||
if rww.statusCode > 0 {
|
||||
attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode))
|
||||
}
|
||||
o := metric.WithAttributeSet(attribute.NewSet(attributes...))
|
||||
addOpts := []metric.AddOption{o} // Allocate vararg slice once.
|
||||
h.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...)
|
||||
h.responseBytesCounter.Add(ctx, rww.written, addOpts...)
|
||||
|
||||
// Use floating point division here for higher precision (instead of Millisecond method).
|
||||
elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
|
||||
|
||||
h.serverLatencyMeasure.Record(ctx, elapsedTime, o)
|
||||
h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{
|
||||
ServerName: h.server,
|
||||
ResponseSize: bytesWritten,
|
||||
MetricAttributes: semconv.MetricAttributes{
|
||||
Req: r,
|
||||
StatusCode: statusCode,
|
||||
AdditionalAttributes: labeler.Get(),
|
||||
},
|
||||
MetricData: semconv.MetricData{
|
||||
RequestSize: bw.BytesRead(),
|
||||
ElapsedTime: elapsedTime,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// WithRouteTag annotates spans and metrics with the provided route name
|
||||
// with HTTP route attribute.
|
||||
func WithRouteTag(route string, h http.Handler) http.Handler {
|
||||
attr := semconv.NewHTTPServer().Route(route)
|
||||
attr := semconv.NewHTTPServer(nil).Route(route)
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
span := trace.SpanFromContext(r.Context())
|
||||
span.SetAttributes(attr)
|
||||
|
75
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go
generated
vendored
Normal file
75
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var _ io.ReadCloser = &BodyWrapper{}
|
||||
|
||||
// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
|
||||
// of bytes read and the last error.
|
||||
type BodyWrapper struct {
|
||||
io.ReadCloser
|
||||
OnRead func(n int64) // must not be nil
|
||||
|
||||
mu sync.Mutex
|
||||
read int64
|
||||
err error
|
||||
}
|
||||
|
||||
// NewBodyWrapper creates a new BodyWrapper.
|
||||
//
|
||||
// The onRead attribute is a callback that will be called every time the data
|
||||
// is read, with the number of bytes being read.
|
||||
func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper {
|
||||
return &BodyWrapper{
|
||||
ReadCloser: body,
|
||||
OnRead: onRead,
|
||||
}
|
||||
}
|
||||
|
||||
// Read reads the data from the io.ReadCloser, and stores the number of bytes
|
||||
// read and the error.
|
||||
func (w *BodyWrapper) Read(b []byte) (int, error) {
|
||||
n, err := w.ReadCloser.Read(b)
|
||||
n1 := int64(n)
|
||||
|
||||
w.updateReadData(n1, err)
|
||||
w.OnRead(n1)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (w *BodyWrapper) updateReadData(n int64, err error) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
w.read += n
|
||||
if err != nil {
|
||||
w.err = err
|
||||
}
|
||||
}
|
||||
|
||||
// Closes closes the io.ReadCloser.
|
||||
func (w *BodyWrapper) Close() error {
|
||||
return w.ReadCloser.Close()
|
||||
}
|
||||
|
||||
// BytesRead returns the number of bytes read up to this point.
|
||||
func (w *BodyWrapper) BytesRead() int64 {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
return w.read
|
||||
}
|
||||
|
||||
// Error returns the last error.
|
||||
func (w *BodyWrapper) Error() error {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
return w.err
|
||||
}
|
119
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go
generated
vendored
Normal file
119
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var _ http.ResponseWriter = &RespWriterWrapper{}
|
||||
|
||||
// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of
|
||||
// bytes written, the last error, and to catch the first written statusCode.
|
||||
// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional
|
||||
// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc)
|
||||
// that may be useful when using it in real life situations.
|
||||
type RespWriterWrapper struct {
|
||||
http.ResponseWriter
|
||||
OnWrite func(n int64) // must not be nil
|
||||
|
||||
mu sync.RWMutex
|
||||
written int64
|
||||
statusCode int
|
||||
err error
|
||||
wroteHeader bool
|
||||
}
|
||||
|
||||
// NewRespWriterWrapper creates a new RespWriterWrapper.
|
||||
//
|
||||
// The onWrite attribute is a callback that will be called every time the data
|
||||
// is written, with the number of bytes that were written.
|
||||
func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper {
|
||||
return &RespWriterWrapper{
|
||||
ResponseWriter: w,
|
||||
OnWrite: onWrite,
|
||||
statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes the bytes array into the [ResponseWriter], and tracks the
|
||||
// number of bytes written and last error.
|
||||
func (w *RespWriterWrapper) Write(p []byte) (int, error) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
if !w.wroteHeader {
|
||||
w.writeHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
n, err := w.ResponseWriter.Write(p)
|
||||
n1 := int64(n)
|
||||
w.OnWrite(n1)
|
||||
w.written += n1
|
||||
w.err = err
|
||||
return n, err
|
||||
}
|
||||
|
||||
// WriteHeader persists initial statusCode for span attribution.
|
||||
// All calls to WriteHeader will be propagated to the underlying ResponseWriter
|
||||
// and will persist the statusCode from the first call.
|
||||
// Blocking consecutive calls to WriteHeader alters expected behavior and will
|
||||
// remove warning logs from net/http where developers will notice incorrect handler implementations.
|
||||
func (w *RespWriterWrapper) WriteHeader(statusCode int) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
w.writeHeader(statusCode)
|
||||
}
|
||||
|
||||
// writeHeader persists the status code for span attribution, and propagates
|
||||
// the call to the underlying ResponseWriter.
|
||||
// It does not acquire a lock, and therefore assumes that is being handled by a
|
||||
// parent method.
|
||||
func (w *RespWriterWrapper) writeHeader(statusCode int) {
|
||||
if !w.wroteHeader {
|
||||
w.wroteHeader = true
|
||||
w.statusCode = statusCode
|
||||
}
|
||||
w.ResponseWriter.WriteHeader(statusCode)
|
||||
}
|
||||
|
||||
// Flush implements [http.Flusher].
|
||||
func (w *RespWriterWrapper) Flush() {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
if !w.wroteHeader {
|
||||
w.writeHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
if f, ok := w.ResponseWriter.(http.Flusher); ok {
|
||||
f.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
// BytesWritten returns the number of bytes written.
|
||||
func (w *RespWriterWrapper) BytesWritten() int64 {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
|
||||
return w.written
|
||||
}
|
||||
|
||||
// BytesWritten returns the HTTP status code that was sent.
|
||||
func (w *RespWriterWrapper) StatusCode() int {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
|
||||
return w.statusCode
|
||||
}
|
||||
|
||||
// Error returns the last error.
|
||||
func (w *RespWriterWrapper) Error() error {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
|
||||
return w.err
|
||||
}
|
159
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
generated
vendored
159
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
generated
vendored
@ -4,6 +4,7 @@
|
||||
package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -11,6 +12,7 @@ import (
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
)
|
||||
|
||||
type ResponseTelemetry struct {
|
||||
@ -23,6 +25,11 @@ type ResponseTelemetry struct {
|
||||
|
||||
type HTTPServer struct {
|
||||
duplicate bool
|
||||
|
||||
// Old metrics
|
||||
requestBytesCounter metric.Int64Counter
|
||||
responseBytesCounter metric.Int64Counter
|
||||
serverLatencyMeasure metric.Float64Histogram
|
||||
}
|
||||
|
||||
// RequestTraceAttrs returns trace attributes for an HTTP request received by a
|
||||
@ -63,15 +70,10 @@ func (s HTTPServer) Route(route string) attribute.KeyValue {
|
||||
return oldHTTPServer{}.Route(route)
|
||||
}
|
||||
|
||||
func NewHTTPServer() HTTPServer {
|
||||
env := strings.ToLower(os.Getenv("OTEL_HTTP_CLIENT_COMPATIBILITY_MODE"))
|
||||
return HTTPServer{duplicate: env == "http/dup"}
|
||||
}
|
||||
|
||||
// ServerStatus returns a span status code and message for an HTTP status code
|
||||
// Status returns a span status code and message for an HTTP status code
|
||||
// value returned by a server. Status codes in the 400-499 range are not
|
||||
// returned as errors.
|
||||
func ServerStatus(code int) (codes.Code, string) {
|
||||
func (s HTTPServer) Status(code int) (codes.Code, string) {
|
||||
if code < 100 || code >= 600 {
|
||||
return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
|
||||
}
|
||||
@ -80,3 +82,146 @@ func ServerStatus(code int) (codes.Code, string) {
|
||||
}
|
||||
return codes.Unset, ""
|
||||
}
|
||||
|
||||
type ServerMetricData struct {
|
||||
ServerName string
|
||||
ResponseSize int64
|
||||
|
||||
MetricData
|
||||
MetricAttributes
|
||||
}
|
||||
|
||||
type MetricAttributes struct {
|
||||
Req *http.Request
|
||||
StatusCode int
|
||||
AdditionalAttributes []attribute.KeyValue
|
||||
}
|
||||
|
||||
type MetricData struct {
|
||||
RequestSize int64
|
||||
ElapsedTime float64
|
||||
}
|
||||
|
||||
func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) {
|
||||
if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil {
|
||||
// This will happen if an HTTPServer{} is used insted of NewHTTPServer.
|
||||
return
|
||||
}
|
||||
|
||||
attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
|
||||
o := metric.WithAttributeSet(attribute.NewSet(attributes...))
|
||||
addOpts := []metric.AddOption{o}
|
||||
s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...)
|
||||
s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...)
|
||||
s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o)
|
||||
|
||||
// TODO: Duplicate Metrics
|
||||
}
|
||||
|
||||
func NewHTTPServer(meter metric.Meter) HTTPServer {
|
||||
env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN"))
|
||||
duplicate := env == "http/dup"
|
||||
server := HTTPServer{
|
||||
duplicate: duplicate,
|
||||
}
|
||||
server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter)
|
||||
return server
|
||||
}
|
||||
|
||||
type HTTPClient struct {
|
||||
duplicate bool
|
||||
|
||||
// old metrics
|
||||
requestBytesCounter metric.Int64Counter
|
||||
responseBytesCounter metric.Int64Counter
|
||||
latencyMeasure metric.Float64Histogram
|
||||
}
|
||||
|
||||
func NewHTTPClient(meter metric.Meter) HTTPClient {
|
||||
env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN"))
|
||||
client := HTTPClient{
|
||||
duplicate: env == "http/dup",
|
||||
}
|
||||
client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = oldHTTPClient{}.createMeasures(meter)
|
||||
return client
|
||||
}
|
||||
|
||||
// RequestTraceAttrs returns attributes for an HTTP request made by a client.
|
||||
func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
|
||||
if c.duplicate {
|
||||
return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...)
|
||||
}
|
||||
return oldHTTPClient{}.RequestTraceAttrs(req)
|
||||
}
|
||||
|
||||
// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client.
|
||||
func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
|
||||
if c.duplicate {
|
||||
return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...)
|
||||
}
|
||||
|
||||
return oldHTTPClient{}.ResponseTraceAttrs(resp)
|
||||
}
|
||||
|
||||
func (c HTTPClient) Status(code int) (codes.Code, string) {
|
||||
if code < 100 || code >= 600 {
|
||||
return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
|
||||
}
|
||||
if code >= 400 {
|
||||
return codes.Error, ""
|
||||
}
|
||||
return codes.Unset, ""
|
||||
}
|
||||
|
||||
func (c HTTPClient) ErrorType(err error) attribute.KeyValue {
|
||||
if c.duplicate {
|
||||
return newHTTPClient{}.ErrorType(err)
|
||||
}
|
||||
|
||||
return attribute.KeyValue{}
|
||||
}
|
||||
|
||||
type MetricOpts struct {
|
||||
measurement metric.MeasurementOption
|
||||
addOptions metric.AddOption
|
||||
}
|
||||
|
||||
func (o MetricOpts) MeasurementOption() metric.MeasurementOption {
|
||||
return o.measurement
|
||||
}
|
||||
|
||||
func (o MetricOpts) AddOptions() metric.AddOption {
|
||||
return o.addOptions
|
||||
}
|
||||
|
||||
func (c HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts {
|
||||
attributes := oldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes)
|
||||
// TODO: Duplicate Metrics
|
||||
set := metric.WithAttributeSet(attribute.NewSet(attributes...))
|
||||
return MetricOpts{
|
||||
measurement: set,
|
||||
addOptions: set,
|
||||
}
|
||||
}
|
||||
|
||||
func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts MetricOpts) {
|
||||
if s.requestBytesCounter == nil || s.latencyMeasure == nil {
|
||||
// This will happen if an HTTPClient{} is used insted of NewHTTPClient().
|
||||
return
|
||||
}
|
||||
|
||||
s.requestBytesCounter.Add(ctx, md.RequestSize, opts.AddOptions())
|
||||
s.latencyMeasure.Record(ctx, md.ElapsedTime, opts.MeasurementOption())
|
||||
|
||||
// TODO: Duplicate Metrics
|
||||
}
|
||||
|
||||
func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts metric.AddOption) {
|
||||
if s.responseBytesCounter == nil {
|
||||
// This will happen if an HTTPClient{} is used insted of NewHTTPClient().
|
||||
return
|
||||
}
|
||||
|
||||
s.responseBytesCounter.Add(ctx, responseData, opts)
|
||||
// TODO: Duplicate Metrics
|
||||
}
|
||||
|
@ -4,11 +4,14 @@
|
||||
package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0"
|
||||
semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
)
|
||||
|
||||
type newHTTPServer struct{}
|
||||
@ -195,3 +198,151 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke
|
||||
func (n newHTTPServer) Route(route string) attribute.KeyValue {
|
||||
return semconvNew.HTTPRoute(route)
|
||||
}
|
||||
|
||||
type newHTTPClient struct{}
|
||||
|
||||
// RequestTraceAttrs returns trace attributes for an HTTP request made by a client.
|
||||
func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
|
||||
/*
|
||||
below attributes are returned:
|
||||
- http.request.method
|
||||
- http.request.method.original
|
||||
- url.full
|
||||
- server.address
|
||||
- server.port
|
||||
- network.protocol.name
|
||||
- network.protocol.version
|
||||
*/
|
||||
numOfAttributes := 3 // URL, server address, proto, and method.
|
||||
|
||||
var urlHost string
|
||||
if req.URL != nil {
|
||||
urlHost = req.URL.Host
|
||||
}
|
||||
var requestHost string
|
||||
var requestPort int
|
||||
for _, hostport := range []string{urlHost, req.Header.Get("Host")} {
|
||||
requestHost, requestPort = splitHostPort(hostport)
|
||||
if requestHost != "" || requestPort > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
|
||||
if eligiblePort > 0 {
|
||||
numOfAttributes++
|
||||
}
|
||||
useragent := req.UserAgent()
|
||||
if useragent != "" {
|
||||
numOfAttributes++
|
||||
}
|
||||
|
||||
protoName, protoVersion := netProtocol(req.Proto)
|
||||
if protoName != "" && protoName != "http" {
|
||||
numOfAttributes++
|
||||
}
|
||||
if protoVersion != "" {
|
||||
numOfAttributes++
|
||||
}
|
||||
|
||||
method, originalMethod := n.method(req.Method)
|
||||
if originalMethod != (attribute.KeyValue{}) {
|
||||
numOfAttributes++
|
||||
}
|
||||
|
||||
attrs := make([]attribute.KeyValue, 0, numOfAttributes)
|
||||
|
||||
attrs = append(attrs, method)
|
||||
if originalMethod != (attribute.KeyValue{}) {
|
||||
attrs = append(attrs, originalMethod)
|
||||
}
|
||||
|
||||
var u string
|
||||
if req.URL != nil {
|
||||
// Remove any username/password info that may be in the URL.
|
||||
userinfo := req.URL.User
|
||||
req.URL.User = nil
|
||||
u = req.URL.String()
|
||||
// Restore any username/password info that was removed.
|
||||
req.URL.User = userinfo
|
||||
}
|
||||
attrs = append(attrs, semconvNew.URLFull(u))
|
||||
|
||||
attrs = append(attrs, semconvNew.ServerAddress(requestHost))
|
||||
if eligiblePort > 0 {
|
||||
attrs = append(attrs, semconvNew.ServerPort(eligiblePort))
|
||||
}
|
||||
|
||||
if protoName != "" && protoName != "http" {
|
||||
attrs = append(attrs, semconvNew.NetworkProtocolName(protoName))
|
||||
}
|
||||
if protoVersion != "" {
|
||||
attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion))
|
||||
}
|
||||
|
||||
return attrs
|
||||
}
|
||||
|
||||
// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client.
|
||||
func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
|
||||
/*
|
||||
below attributes are returned:
|
||||
- http.response.status_code
|
||||
- error.type
|
||||
*/
|
||||
var count int
|
||||
if resp.StatusCode > 0 {
|
||||
count++
|
||||
}
|
||||
|
||||
if isErrorStatusCode(resp.StatusCode) {
|
||||
count++
|
||||
}
|
||||
|
||||
attrs := make([]attribute.KeyValue, 0, count)
|
||||
if resp.StatusCode > 0 {
|
||||
attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode))
|
||||
}
|
||||
|
||||
if isErrorStatusCode(resp.StatusCode) {
|
||||
errorType := strconv.Itoa(resp.StatusCode)
|
||||
attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType))
|
||||
}
|
||||
return attrs
|
||||
}
|
||||
|
||||
func (n newHTTPClient) ErrorType(err error) attribute.KeyValue {
|
||||
t := reflect.TypeOf(err)
|
||||
var value string
|
||||
if t.PkgPath() == "" && t.Name() == "" {
|
||||
// Likely a builtin type.
|
||||
value = t.String()
|
||||
} else {
|
||||
value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
|
||||
}
|
||||
|
||||
if value == "" {
|
||||
return semconvNew.ErrorTypeOther
|
||||
}
|
||||
|
||||
return semconvNew.ErrorTypeKey.String(value)
|
||||
}
|
||||
|
||||
func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) {
|
||||
if method == "" {
|
||||
return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{}
|
||||
}
|
||||
if attr, ok := methodLookup[method]; ok {
|
||||
return attr, attribute.KeyValue{}
|
||||
}
|
||||
|
||||
orig := semconvNew.HTTPRequestMethodOriginal(method)
|
||||
if attr, ok := methodLookup[strings.ToUpper(method)]; ok {
|
||||
return attr, orig
|
||||
}
|
||||
return semconvNew.HTTPRequestMethodGet, orig
|
||||
}
|
||||
|
||||
func isErrorStatusCode(code int) bool {
|
||||
return code >= 400 || code < 100
|
||||
}
|
@ -9,8 +9,9 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0"
|
||||
semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
)
|
||||
|
||||
// splitHostPort splits a network address hostport of the form "host",
|
||||
@ -49,7 +50,7 @@ func splitHostPort(hostport string) (host string, port int) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return host, int(p)
|
||||
return host, int(p) // nolint: gosec // Byte size checked 16 above.
|
||||
}
|
||||
|
||||
func requiredHTTPPort(https bool, port int) int { // nolint:revive
|
||||
@ -89,3 +90,9 @@ var methodLookup = map[string]attribute.KeyValue{
|
||||
http.MethodPut: semconvNew.HTTPRequestMethodPut,
|
||||
http.MethodTrace: semconvNew.HTTPRequestMethodTrace,
|
||||
}
|
||||
|
||||
func handleErr(err error) {
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
}
|
||||
|
@ -7,9 +7,13 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/noop"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
|
||||
)
|
||||
|
||||
@ -72,3 +76,199 @@ func (o oldHTTPServer) Route(route string) attribute.KeyValue {
|
||||
func HTTPStatusCode(status int) attribute.KeyValue {
|
||||
return semconv.HTTPStatusCode(status)
|
||||
}
|
||||
|
||||
// Server HTTP metrics.
|
||||
const (
|
||||
serverRequestSize = "http.server.request.size" // Incoming request bytes total
|
||||
serverResponseSize = "http.server.response.size" // Incoming response bytes total
|
||||
serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
|
||||
)
|
||||
|
||||
func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
|
||||
if meter == nil {
|
||||
return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
|
||||
}
|
||||
var err error
|
||||
requestBytesCounter, err := meter.Int64Counter(
|
||||
serverRequestSize,
|
||||
metric.WithUnit("By"),
|
||||
metric.WithDescription("Measures the size of HTTP request messages."),
|
||||
)
|
||||
handleErr(err)
|
||||
|
||||
responseBytesCounter, err := meter.Int64Counter(
|
||||
serverResponseSize,
|
||||
metric.WithUnit("By"),
|
||||
metric.WithDescription("Measures the size of HTTP response messages."),
|
||||
)
|
||||
handleErr(err)
|
||||
|
||||
serverLatencyMeasure, err := meter.Float64Histogram(
|
||||
serverDuration,
|
||||
metric.WithUnit("ms"),
|
||||
metric.WithDescription("Measures the duration of inbound HTTP requests."),
|
||||
)
|
||||
handleErr(err)
|
||||
|
||||
return requestBytesCounter, responseBytesCounter, serverLatencyMeasure
|
||||
}
|
||||
|
||||
func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
|
||||
n := len(additionalAttributes) + 3
|
||||
var host string
|
||||
var p int
|
||||
if server == "" {
|
||||
host, p = splitHostPort(req.Host)
|
||||
} else {
|
||||
// Prioritize the primary server name.
|
||||
host, p = splitHostPort(server)
|
||||
if p < 0 {
|
||||
_, p = splitHostPort(req.Host)
|
||||
}
|
||||
}
|
||||
hostPort := requiredHTTPPort(req.TLS != nil, p)
|
||||
if hostPort > 0 {
|
||||
n++
|
||||
}
|
||||
protoName, protoVersion := netProtocol(req.Proto)
|
||||
if protoName != "" {
|
||||
n++
|
||||
}
|
||||
if protoVersion != "" {
|
||||
n++
|
||||
}
|
||||
|
||||
if statusCode > 0 {
|
||||
n++
|
||||
}
|
||||
|
||||
attributes := slices.Grow(additionalAttributes, n)
|
||||
attributes = append(attributes,
|
||||
standardizeHTTPMethodMetric(req.Method),
|
||||
o.scheme(req.TLS != nil),
|
||||
semconv.NetHostName(host))
|
||||
|
||||
if hostPort > 0 {
|
||||
attributes = append(attributes, semconv.NetHostPort(hostPort))
|
||||
}
|
||||
if protoName != "" {
|
||||
attributes = append(attributes, semconv.NetProtocolName(protoName))
|
||||
}
|
||||
if protoVersion != "" {
|
||||
attributes = append(attributes, semconv.NetProtocolVersion(protoVersion))
|
||||
}
|
||||
|
||||
if statusCode > 0 {
|
||||
attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
|
||||
}
|
||||
return attributes
|
||||
}
|
||||
|
||||
func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
|
||||
if https {
|
||||
return semconv.HTTPSchemeHTTPS
|
||||
}
|
||||
return semconv.HTTPSchemeHTTP
|
||||
}
|
||||
|
||||
type oldHTTPClient struct{}
|
||||
|
||||
func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
|
||||
return semconvutil.HTTPClientRequest(req)
|
||||
}
|
||||
|
||||
func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
|
||||
return semconvutil.HTTPClientResponse(resp)
|
||||
}
|
||||
|
||||
func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
|
||||
/* The following semantic conventions are returned if present:
|
||||
http.method string
|
||||
http.status_code int
|
||||
net.peer.name string
|
||||
net.peer.port int
|
||||
*/
|
||||
|
||||
n := 2 // method, peer name.
|
||||
var h string
|
||||
if req.URL != nil {
|
||||
h = req.URL.Host
|
||||
}
|
||||
var requestHost string
|
||||
var requestPort int
|
||||
for _, hostport := range []string{h, req.Header.Get("Host")} {
|
||||
requestHost, requestPort = splitHostPort(hostport)
|
||||
if requestHost != "" || requestPort > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
|
||||
if port > 0 {
|
||||
n++
|
||||
}
|
||||
|
||||
if statusCode > 0 {
|
||||
n++
|
||||
}
|
||||
|
||||
attributes := slices.Grow(additionalAttributes, n)
|
||||
attributes = append(attributes,
|
||||
standardizeHTTPMethodMetric(req.Method),
|
||||
semconv.NetPeerName(requestHost),
|
||||
)
|
||||
|
||||
if port > 0 {
|
||||
attributes = append(attributes, semconv.NetPeerPort(port))
|
||||
}
|
||||
|
||||
if statusCode > 0 {
|
||||
attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
|
||||
}
|
||||
return attributes
|
||||
}
|
||||
|
||||
// Client HTTP metrics.
|
||||
const (
|
||||
clientRequestSize = "http.client.request.size" // Incoming request bytes total
|
||||
clientResponseSize = "http.client.response.size" // Incoming response bytes total
|
||||
clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds
|
||||
)
|
||||
|
||||
func (o oldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
|
||||
if meter == nil {
|
||||
return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
|
||||
}
|
||||
requestBytesCounter, err := meter.Int64Counter(
|
||||
clientRequestSize,
|
||||
metric.WithUnit("By"),
|
||||
metric.WithDescription("Measures the size of HTTP request messages."),
|
||||
)
|
||||
handleErr(err)
|
||||
|
||||
responseBytesCounter, err := meter.Int64Counter(
|
||||
clientResponseSize,
|
||||
metric.WithUnit("By"),
|
||||
metric.WithDescription("Measures the size of HTTP response messages."),
|
||||
)
|
||||
handleErr(err)
|
||||
|
||||
latencyMeasure, err := meter.Float64Histogram(
|
||||
clientDuration,
|
||||
metric.WithUnit("ms"),
|
||||
metric.WithDescription("Measures the duration of outbound HTTP requests."),
|
||||
)
|
||||
handleErr(err)
|
||||
|
||||
return requestBytesCounter, responseBytesCounter, latencyMeasure
|
||||
}
|
||||
|
||||
func standardizeHTTPMethodMetric(method string) attribute.KeyValue {
|
||||
method = strings.ToUpper(method)
|
||||
switch method {
|
||||
case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
|
||||
default:
|
||||
method = "_OTHER"
|
||||
}
|
||||
return semconv.HTTPMethod(method)
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ func splitHostPort(hostport string) (host string, port int) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return host, int(p)
|
||||
return host, int(p) // nolint: gosec // Bitsize checked to be 16 above.
|
||||
}
|
||||
|
||||
func netProtocol(proto string) (name string, version string) {
|
||||
|
29
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go
generated
vendored
Normal file
29
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
type startTimeContextKeyType int
|
||||
|
||||
const startTimeContextKey startTimeContextKeyType = 0
|
||||
|
||||
// ContextWithStartTime returns a new context with the provided start time. The
|
||||
// start time will be used for metrics and traces emitted by the
|
||||
// instrumentation. Only one labeller can be injected into the context.
|
||||
// Injecting it multiple times will override the previous calls.
|
||||
func ContextWithStartTime(parent context.Context, start time.Time) context.Context {
|
||||
return context.WithValue(parent, startTimeContextKey, start)
|
||||
}
|
||||
|
||||
// StartTimeFromContext retrieves a time.Time from the provided context if one
|
||||
// is available. If no start time was found in the provided context, a new,
|
||||
// zero start time is returned and the second return value is false.
|
||||
func StartTimeFromContext(ctx context.Context) time.Time {
|
||||
t, _ := ctx.Value(startTimeContextKey).(time.Time)
|
||||
return t
|
||||
}
|
106
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
generated
vendored
106
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
generated
vendored
@ -11,13 +11,13 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
|
||||
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
@ -26,17 +26,15 @@ import (
|
||||
type Transport struct {
|
||||
rt http.RoundTripper
|
||||
|
||||
tracer trace.Tracer
|
||||
meter metric.Meter
|
||||
propagators propagation.TextMapPropagator
|
||||
spanStartOptions []trace.SpanStartOption
|
||||
filters []Filter
|
||||
spanNameFormatter func(string, *http.Request) string
|
||||
clientTrace func(context.Context) *httptrace.ClientTrace
|
||||
tracer trace.Tracer
|
||||
propagators propagation.TextMapPropagator
|
||||
spanStartOptions []trace.SpanStartOption
|
||||
filters []Filter
|
||||
spanNameFormatter func(string, *http.Request) string
|
||||
clientTrace func(context.Context) *httptrace.ClientTrace
|
||||
metricAttributesFn func(*http.Request) []attribute.KeyValue
|
||||
|
||||
requestBytesCounter metric.Int64Counter
|
||||
responseBytesCounter metric.Int64Counter
|
||||
latencyMeasure metric.Float64Histogram
|
||||
semconv semconv.HTTPClient
|
||||
}
|
||||
|
||||
var _ http.RoundTripper = &Transport{}
|
||||
@ -63,43 +61,19 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport {
|
||||
|
||||
c := newConfig(append(defaultOpts, opts...)...)
|
||||
t.applyConfig(c)
|
||||
t.createMeasures()
|
||||
|
||||
return &t
|
||||
}
|
||||
|
||||
func (t *Transport) applyConfig(c *config) {
|
||||
t.tracer = c.Tracer
|
||||
t.meter = c.Meter
|
||||
t.propagators = c.Propagators
|
||||
t.spanStartOptions = c.SpanStartOptions
|
||||
t.filters = c.Filters
|
||||
t.spanNameFormatter = c.SpanNameFormatter
|
||||
t.clientTrace = c.ClientTrace
|
||||
}
|
||||
|
||||
func (t *Transport) createMeasures() {
|
||||
var err error
|
||||
t.requestBytesCounter, err = t.meter.Int64Counter(
|
||||
clientRequestSize,
|
||||
metric.WithUnit("By"),
|
||||
metric.WithDescription("Measures the size of HTTP request messages."),
|
||||
)
|
||||
handleErr(err)
|
||||
|
||||
t.responseBytesCounter, err = t.meter.Int64Counter(
|
||||
clientResponseSize,
|
||||
metric.WithUnit("By"),
|
||||
metric.WithDescription("Measures the size of HTTP response messages."),
|
||||
)
|
||||
handleErr(err)
|
||||
|
||||
t.latencyMeasure, err = t.meter.Float64Histogram(
|
||||
clientDuration,
|
||||
metric.WithUnit("ms"),
|
||||
metric.WithDescription("Measures the duration of outbound HTTP requests."),
|
||||
)
|
||||
handleErr(err)
|
||||
t.semconv = semconv.NewHTTPClient(c.Meter)
|
||||
t.metricAttributesFn = c.MetricAttributesFn
|
||||
}
|
||||
|
||||
func defaultTransportFormatter(_ string, r *http.Request) string {
|
||||
@ -143,54 +117,68 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
|
||||
r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request.
|
||||
|
||||
// use a body wrapper to determine the request size
|
||||
var bw bodyWrapper
|
||||
// if request body is nil or NoBody, we don't want to mutate the body as it
|
||||
// will affect the identity of it in an unforeseeable way because we assert
|
||||
// ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
|
||||
bw := request.NewBodyWrapper(r.Body, func(int64) {})
|
||||
if r.Body != nil && r.Body != http.NoBody {
|
||||
bw.ReadCloser = r.Body
|
||||
// noop to prevent nil panic. not using this record fun yet.
|
||||
bw.record = func(int64) {}
|
||||
r.Body = &bw
|
||||
r.Body = bw
|
||||
}
|
||||
|
||||
span.SetAttributes(semconvutil.HTTPClientRequest(r)...)
|
||||
span.SetAttributes(t.semconv.RequestTraceAttrs(r)...)
|
||||
t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header))
|
||||
|
||||
res, err := t.rt.RoundTrip(r)
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
// set error type attribute if the error is part of the predefined
|
||||
// error types.
|
||||
// otherwise, record it as an exception
|
||||
if errType := t.semconv.ErrorType(err); errType.Valid() {
|
||||
span.SetAttributes(errType)
|
||||
} else {
|
||||
span.RecordError(err)
|
||||
}
|
||||
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
span.End()
|
||||
return res, err
|
||||
}
|
||||
|
||||
// metrics
|
||||
metricAttrs := append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...)
|
||||
if res.StatusCode > 0 {
|
||||
metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode))
|
||||
}
|
||||
o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...))
|
||||
addOpts := []metric.AddOption{o} // Allocate vararg slice once.
|
||||
t.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...)
|
||||
metricOpts := t.semconv.MetricOptions(semconv.MetricAttributes{
|
||||
Req: r,
|
||||
StatusCode: res.StatusCode,
|
||||
AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...),
|
||||
})
|
||||
|
||||
// For handling response bytes we leverage a callback when the client reads the http response
|
||||
readRecordFunc := func(n int64) {
|
||||
t.responseBytesCounter.Add(ctx, n, addOpts...)
|
||||
t.semconv.RecordResponseSize(ctx, n, metricOpts.AddOptions())
|
||||
}
|
||||
|
||||
// traces
|
||||
span.SetAttributes(semconvutil.HTTPClientResponse(res)...)
|
||||
span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode))
|
||||
span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...)
|
||||
span.SetStatus(t.semconv.Status(res.StatusCode))
|
||||
|
||||
res.Body = newWrappedBody(span, readRecordFunc, res.Body)
|
||||
|
||||
// Use floating point division here for higher precision (instead of Millisecond method).
|
||||
elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
|
||||
|
||||
t.latencyMeasure.Record(ctx, elapsedTime, o)
|
||||
t.semconv.RecordMetrics(ctx, semconv.MetricData{
|
||||
RequestSize: bw.BytesRead(),
|
||||
ElapsedTime: elapsedTime,
|
||||
}, metricOpts)
|
||||
|
||||
return res, err
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue {
|
||||
var attributeForRequest []attribute.KeyValue
|
||||
if t.metricAttributesFn != nil {
|
||||
attributeForRequest = t.metricAttributesFn(r)
|
||||
}
|
||||
return attributeForRequest
|
||||
}
|
||||
|
||||
// newWrappedBody returns a new and appropriately scoped *wrappedBody as an
|
||||
|
2
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
generated
vendored
2
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
generated
vendored
@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http
|
||||
|
||||
// Version is the current release version of the otelhttp instrumentation.
|
||||
func Version() string {
|
||||
return "0.53.0"
|
||||
return "0.57.0"
|
||||
// This string is updated by the pre_release.sh script during release
|
||||
}
|
||||
|
||||
|
99
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
generated
vendored
99
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
generated
vendored
@ -1,99 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync/atomic"
|
||||
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
)
|
||||
|
||||
var _ io.ReadCloser = &bodyWrapper{}
|
||||
|
||||
// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
|
||||
// of bytes read and the last error.
|
||||
type bodyWrapper struct {
|
||||
io.ReadCloser
|
||||
record func(n int64) // must not be nil
|
||||
|
||||
read atomic.Int64
|
||||
err error
|
||||
}
|
||||
|
||||
func (w *bodyWrapper) Read(b []byte) (int, error) {
|
||||
n, err := w.ReadCloser.Read(b)
|
||||
n1 := int64(n)
|
||||
w.read.Add(n1)
|
||||
w.err = err
|
||||
w.record(n1)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (w *bodyWrapper) Close() error {
|
||||
return w.ReadCloser.Close()
|
||||
}
|
||||
|
||||
var _ http.ResponseWriter = &respWriterWrapper{}
|
||||
|
||||
// respWriterWrapper wraps a http.ResponseWriter in order to track the number of
|
||||
// bytes written, the last error, and to catch the first written statusCode.
|
||||
// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional
|
||||
// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc)
|
||||
// that may be useful when using it in real life situations.
|
||||
type respWriterWrapper struct {
|
||||
http.ResponseWriter
|
||||
record func(n int64) // must not be nil
|
||||
|
||||
// used to inject the header
|
||||
ctx context.Context
|
||||
|
||||
props propagation.TextMapPropagator
|
||||
|
||||
written int64
|
||||
statusCode int
|
||||
err error
|
||||
wroteHeader bool
|
||||
}
|
||||
|
||||
func (w *respWriterWrapper) Header() http.Header {
|
||||
return w.ResponseWriter.Header()
|
||||
}
|
||||
|
||||
func (w *respWriterWrapper) Write(p []byte) (int, error) {
|
||||
if !w.wroteHeader {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
n, err := w.ResponseWriter.Write(p)
|
||||
n1 := int64(n)
|
||||
w.record(n1)
|
||||
w.written += n1
|
||||
w.err = err
|
||||
return n, err
|
||||
}
|
||||
|
||||
// WriteHeader persists initial statusCode for span attribution.
|
||||
// All calls to WriteHeader will be propagated to the underlying ResponseWriter
|
||||
// and will persist the statusCode from the first call.
|
||||
// Blocking consecutive calls to WriteHeader alters expected behavior and will
|
||||
// remove warning logs from net/http where developers will notice incorrect handler implementations.
|
||||
func (w *respWriterWrapper) WriteHeader(statusCode int) {
|
||||
if !w.wroteHeader {
|
||||
w.wroteHeader = true
|
||||
w.statusCode = statusCode
|
||||
}
|
||||
w.ResponseWriter.WriteHeader(statusCode)
|
||||
}
|
||||
|
||||
func (w *respWriterWrapper) Flush() {
|
||||
if !w.wroteHeader {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
if f, ok := w.ResponseWriter.(http.Flusher); ok {
|
||||
f.Flush()
|
||||
}
|
||||
}
|
8
vendor/go.opentelemetry.io/otel/.gitignore
generated
vendored
8
vendor/go.opentelemetry.io/otel/.gitignore
generated
vendored
@ -12,11 +12,3 @@ go.work
|
||||
go.work.sum
|
||||
|
||||
gen/
|
||||
|
||||
/example/dice/dice
|
||||
/example/namedtracer/namedtracer
|
||||
/example/otel-collector/otel-collector
|
||||
/example/opencensus/opencensus
|
||||
/example/passthrough/passthrough
|
||||
/example/prometheus/prometheus
|
||||
/example/zipkin/zipkin
|
||||
|
15
vendor/go.opentelemetry.io/otel/.golangci.yml
generated
vendored
15
vendor/go.opentelemetry.io/otel/.golangci.yml
generated
vendored
@ -9,6 +9,8 @@ linters:
|
||||
disable-all: true
|
||||
# Specifically enable linters we want to use.
|
||||
enable:
|
||||
- asasalint
|
||||
- bodyclose
|
||||
- depguard
|
||||
- errcheck
|
||||
- errorlint
|
||||
@ -23,6 +25,7 @@ linters:
|
||||
- revive
|
||||
- staticcheck
|
||||
- tenv
|
||||
- testifylint
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
@ -62,12 +65,12 @@ issues:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- gosec
|
||||
# Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
|
||||
# Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
|
||||
# as we commonly use it in tests and examples.
|
||||
- text: "G404:"
|
||||
linters:
|
||||
- gosec
|
||||
# Igonoring gosec G402: TLS MinVersion too low
|
||||
# Ignoring gosec G402: TLS MinVersion too low
|
||||
# as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
|
||||
- text: "G402: TLS MinVersion too low."
|
||||
linters:
|
||||
@ -124,8 +127,6 @@ linters-settings:
|
||||
- "**/metric/**/*.go"
|
||||
- "**/bridge/*.go"
|
||||
- "**/bridge/**/*.go"
|
||||
- "**/example/*.go"
|
||||
- "**/example/**/*.go"
|
||||
- "**/trace/*.go"
|
||||
- "**/trace/**/*.go"
|
||||
- "**/log/*.go"
|
||||
@ -300,3 +301,9 @@ linters-settings:
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value
|
||||
- name: waitgroup-by-value
|
||||
disabled: false
|
||||
testifylint:
|
||||
enable-all: true
|
||||
disable:
|
||||
- float-compare
|
||||
- go-require
|
||||
- require-error
|
||||
|
168
vendor/go.opentelemetry.io/otel/CHANGELOG.md
generated
vendored
168
vendor/go.opentelemetry.io/otel/CHANGELOG.md
generated
vendored
@ -8,6 +8,158 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
<!-- Released section -->
|
||||
<!-- Don't change this section unless doing release -->
|
||||
|
||||
## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08
|
||||
|
||||
### Added
|
||||
|
||||
- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850)
|
||||
- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850)
|
||||
- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861)
|
||||
- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861)
|
||||
- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861)
|
||||
- The `go.opentelemetry.io/otel/semconv/v1.27.0` package.
|
||||
The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894)
|
||||
- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903)
|
||||
- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927)
|
||||
- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934)
|
||||
- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934)
|
||||
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935)
|
||||
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935)
|
||||
- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933)
|
||||
- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933)
|
||||
- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932)
|
||||
|
||||
### Changed
|
||||
|
||||
- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924)
|
||||
- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926)
|
||||
- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925)
|
||||
- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931)
|
||||
- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881)
|
||||
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892)
|
||||
- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911)
|
||||
- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915)
|
||||
- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912)
|
||||
- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944)
|
||||
- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944)
|
||||
- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944)
|
||||
- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944)
|
||||
- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900)
|
||||
|
||||
### Removed
|
||||
|
||||
- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930)
|
||||
|
||||
## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11
|
||||
|
||||
### Added
|
||||
|
||||
- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862)
|
||||
- Add `WithExportBufferSize` option to log batch processor.(#5877)
|
||||
|
||||
### Changed
|
||||
|
||||
- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778)
|
||||
- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791)
|
||||
- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791)
|
||||
- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847)
|
||||
- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864)
|
||||
- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858)
|
||||
- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874)
|
||||
|
||||
### Deprecated
|
||||
|
||||
- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854)
|
||||
|
||||
### Fixed
|
||||
|
||||
- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819)
|
||||
- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803)
|
||||
- Fix timer channel drain to avoid hanging on Go 1.23. (#5868)
|
||||
- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827)
|
||||
- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827)
|
||||
|
||||
## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09
|
||||
|
||||
### Added
|
||||
|
||||
- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739)
|
||||
- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773)
|
||||
- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773)
|
||||
- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754)
|
||||
- Fix panic on instruments creation when setting meter provider. (#5758)
|
||||
- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780)
|
||||
|
||||
### Removed
|
||||
|
||||
- Drop support for [Go 1.21]. (#5736, #5740, #5800)
|
||||
|
||||
## [1.29.0/0.51.0/0.5.0] 2024-08-23
|
||||
|
||||
This release is the last to support [Go 1.21].
|
||||
The next release will require at least [Go 1.22].
|
||||
|
||||
### Added
|
||||
|
||||
- Add MacOS ARM64 platform to the compatibility testing suite. (#5577)
|
||||
- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627)
|
||||
- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`.
|
||||
This new module contains an OTLP exporter that transmits log telemetry using gRPC.
|
||||
This module is unstable and breaking changes may be introduced.
|
||||
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629)
|
||||
- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651)
|
||||
- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651)
|
||||
- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665)
|
||||
- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`.
|
||||
This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not.
|
||||
It replaces the existing `Enabled` method that is removed from the `Processor` interface itself.
|
||||
It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692)
|
||||
- Support [Go 1.23]. (#5720)
|
||||
|
||||
### Changed
|
||||
|
||||
- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132)
|
||||
- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636)
|
||||
- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665)
|
||||
- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666)
|
||||
- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666)
|
||||
- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method.
|
||||
See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692)
|
||||
- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693)
|
||||
- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584)
|
||||
- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541)
|
||||
- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612)
|
||||
- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612)
|
||||
- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612)
|
||||
- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612)
|
||||
- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612)
|
||||
- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612)
|
||||
- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612)
|
||||
- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641)
|
||||
- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650)
|
||||
- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705)
|
||||
- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705)
|
||||
|
||||
### Removed
|
||||
|
||||
- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692)
|
||||
- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692)
|
||||
|
||||
## [1.28.0/0.50.0/0.4.0] 2024-07-02
|
||||
|
||||
### Added
|
||||
@ -49,6 +201,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
|
||||
- Fix stale timestamps reported by the last-value aggregation. (#5517)
|
||||
- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521)
|
||||
- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549)
|
||||
- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528)
|
||||
|
||||
## [1.27.0/0.49.0/0.3.0] 2024-05-21
|
||||
|
||||
@ -175,7 +328,7 @@ The next release will require at least [Go 1.21].
|
||||
This module includes OpenTelemetry Go's implementation of the Logs Bridge API.
|
||||
This module is in an alpha state, it is subject to breaking changes.
|
||||
See our [versioning policy](./VERSIONING.md) for more info. (#4961)
|
||||
- ARM64 platform to the compatibility testing suite. (#4994)
|
||||
- Add ARM64 platform to the compatibility testing suite. (#4994)
|
||||
|
||||
### Fixed
|
||||
|
||||
@ -1836,7 +1989,7 @@ with major version 0.
|
||||
- Setting error status while recording error with Span from oteltest package. (#1729)
|
||||
- The concept of a remote and local Span stored in a context is unified to just the current Span.
|
||||
Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed.
|
||||
Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span.
|
||||
Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span.
|
||||
If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731)
|
||||
- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed.
|
||||
This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749)
|
||||
@ -2410,7 +2563,7 @@ This release migrates the default OpenTelemetry SDK into its own Go module, deco
|
||||
- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903)
|
||||
- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905)
|
||||
- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913)
|
||||
- Update otel-colector example to use the v0.5.0 collector. (#915)
|
||||
- Update otel-collector example to use the v0.5.0 collector. (#915)
|
||||
- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922)
|
||||
- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922)
|
||||
- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists.
|
||||
@ -3003,7 +3156,11 @@ It contains api and sdk for trace and meter.
|
||||
- CircleCI build CI manifest files.
|
||||
- CODEOWNERS file to track owners of this project.
|
||||
|
||||
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD
|
||||
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.32.0...HEAD
|
||||
[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0
|
||||
[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0
|
||||
[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0
|
||||
[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0
|
||||
[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0
|
||||
[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0
|
||||
[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0
|
||||
@ -3086,6 +3243,9 @@ It contains api and sdk for trace and meter.
|
||||
[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
|
||||
[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
|
||||
|
||||
<!-- Released section ended -->
|
||||
|
||||
[Go 1.23]: https://go.dev/doc/go1.23
|
||||
[Go 1.22]: https://go.dev/doc/go1.22
|
||||
[Go 1.21]: https://go.dev/doc/go1.21
|
||||
[Go 1.20]: https://go.dev/doc/go1.20
|
||||
|
6
vendor/go.opentelemetry.io/otel/CODEOWNERS
generated
vendored
6
vendor/go.opentelemetry.io/otel/CODEOWNERS
generated
vendored
@ -5,13 +5,13 @@
|
||||
#####################################################
|
||||
#
|
||||
# Learn about membership in OpenTelemetry community:
|
||||
# https://github.com/open-telemetry/community/blob/main/community-membership.md
|
||||
# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md
|
||||
#
|
||||
#
|
||||
# Learn about CODEOWNERS file format:
|
||||
# https://help.github.com/en/articles/about-code-owners
|
||||
#
|
||||
|
||||
* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
|
||||
* @MrAlias @XSAM @dashpole @pellared @dmathieu
|
||||
|
||||
CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu
|
||||
CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu
|
||||
|
26
vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
generated
vendored
26
vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
generated
vendored
@ -578,7 +578,10 @@ See also:
|
||||
The tests should never leak goroutines.
|
||||
|
||||
Use the term `ConcurrentSafe` in the test name when it aims to verify the
|
||||
absence of race conditions.
|
||||
absence of race conditions. The top-level tests with this term will be run
|
||||
many times in the `test-concurrent-safe` CI job to increase the chance of
|
||||
catching concurrency issues. This does not apply to subtests when this term
|
||||
is not in their root name.
|
||||
|
||||
### Internal packages
|
||||
|
||||
@ -626,13 +629,14 @@ should be canceled.
|
||||
|
||||
## Approvers and Maintainers
|
||||
|
||||
### Approvers
|
||||
### Triagers
|
||||
|
||||
- [Chester Cheung](https://github.com/hanyuancheung), Tencent
|
||||
- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
|
||||
|
||||
### Approvers
|
||||
|
||||
### Maintainers
|
||||
|
||||
- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
|
||||
- [Damien Mathieu](https://github.com/dmathieu), Elastic
|
||||
- [David Ashpole](https://github.com/dashpole), Google
|
||||
- [Robert Pająk](https://github.com/pellared), Splunk
|
||||
@ -641,16 +645,18 @@ should be canceled.
|
||||
|
||||
### Emeritus
|
||||
|
||||
- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
|
||||
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
|
||||
- [Josh MacDonald](https://github.com/jmacd), LightStep
|
||||
- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
|
||||
- [Evan Torrie](https://github.com/evantorrie), Yahoo
|
||||
- [Aaron Clawson](https://github.com/MadVikingGod)
|
||||
- [Anthony Mirabella](https://github.com/Aneurysm9)
|
||||
- [Chester Cheung](https://github.com/hanyuancheung)
|
||||
- [Evan Torrie](https://github.com/evantorrie)
|
||||
- [Gustavo Silva Paiva](https://github.com/paivagustavo)
|
||||
- [Josh MacDonald](https://github.com/jmacd)
|
||||
- [Liz Fong-Jones](https://github.com/lizthegrey)
|
||||
|
||||
### Become an Approver or a Maintainer
|
||||
|
||||
See the [community membership document in OpenTelemetry community
|
||||
repo](https://github.com/open-telemetry/community/blob/main/community-membership.md).
|
||||
repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md).
|
||||
|
||||
[Approver]: #approvers
|
||||
[Maintainer]: #maintainers
|
||||
|
24
vendor/go.opentelemetry.io/otel/Makefile
generated
vendored
24
vendor/go.opentelemetry.io/otel/Makefile
generated
vendored
@ -54,9 +54,6 @@ $(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer
|
||||
PORTO = $(TOOLS)/porto
|
||||
$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto
|
||||
|
||||
GOJQ = $(TOOLS)/gojq
|
||||
$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq
|
||||
|
||||
GOTMPL = $(TOOLS)/gotmpl
|
||||
$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl
|
||||
|
||||
@ -67,7 +64,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck
|
||||
$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
|
||||
|
||||
.PHONY: tools
|
||||
tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
|
||||
tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
|
||||
|
||||
# Virtualized python tools via docker
|
||||
|
||||
@ -145,12 +142,14 @@ build-tests/%:
|
||||
|
||||
# Tests
|
||||
|
||||
TEST_TARGETS := test-default test-bench test-short test-verbose test-race
|
||||
TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe
|
||||
.PHONY: $(TEST_TARGETS) test
|
||||
test-default test-race: ARGS=-race
|
||||
test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
|
||||
test-short: ARGS=-short
|
||||
test-verbose: ARGS=-v -race
|
||||
test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race
|
||||
test-concurrent-safe: TIMEOUT=120
|
||||
$(TEST_TARGETS): test
|
||||
test: $(OTEL_GO_MOD_DIRS:%=test/%)
|
||||
test/%: DIR=$*
|
||||
@ -178,17 +177,14 @@ test-coverage: $(GOCOVMERGE)
|
||||
done; \
|
||||
$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
|
||||
|
||||
# Adding a directory will include all benchmarks in that directory if a filter is not specified.
|
||||
BENCHMARK_TARGETS := sdk/trace
|
||||
.PHONY: benchmark
|
||||
benchmark: $(BENCHMARK_TARGETS:%=benchmark/%)
|
||||
BENCHMARK_FILTER = .
|
||||
# You can override the filter for a particular directory by adding a rule here.
|
||||
benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample
|
||||
benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%)
|
||||
benchmark/%:
|
||||
@echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \
|
||||
@echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \
|
||||
&& cd $* \
|
||||
$(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter))
|
||||
&& $(GO) list ./... \
|
||||
| grep -v third_party \
|
||||
| xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=.
|
||||
|
||||
.PHONY: golangci-lint golangci-lint-fix
|
||||
golangci-lint-fix: ARGS=--fix
|
||||
@ -264,7 +260,7 @@ SEMCONVPKG ?= "semconv/"
|
||||
semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT)
|
||||
[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 )
|
||||
[ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 )
|
||||
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
|
||||
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
|
||||
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)"
|
||||
$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
|
||||
|
||||
|
34
vendor/go.opentelemetry.io/otel/README.md
generated
vendored
34
vendor/go.opentelemetry.io/otel/README.md
generated
vendored
@ -47,20 +47,22 @@ stop ensuring compatibility with these versions in the following manner:
|
||||
|
||||
Currently, this project supports the following environments.
|
||||
|
||||
| OS | Go Version | Architecture |
|
||||
|---------|------------|--------------|
|
||||
| Ubuntu | 1.22 | amd64 |
|
||||
| Ubuntu | 1.21 | amd64 |
|
||||
| Ubuntu | 1.22 | 386 |
|
||||
| Ubuntu | 1.21 | 386 |
|
||||
| Linux | 1.22 | arm64 |
|
||||
| Linux | 1.21 | arm64 |
|
||||
| MacOS | 1.22 | amd64 |
|
||||
| MacOS | 1.21 | amd64 |
|
||||
| Windows | 1.22 | amd64 |
|
||||
| Windows | 1.21 | amd64 |
|
||||
| Windows | 1.22 | 386 |
|
||||
| Windows | 1.21 | 386 |
|
||||
| OS | Go Version | Architecture |
|
||||
|----------|------------|--------------|
|
||||
| Ubuntu | 1.23 | amd64 |
|
||||
| Ubuntu | 1.22 | amd64 |
|
||||
| Ubuntu | 1.23 | 386 |
|
||||
| Ubuntu | 1.22 | 386 |
|
||||
| Linux | 1.23 | arm64 |
|
||||
| Linux | 1.22 | arm64 |
|
||||
| macOS 13 | 1.23 | amd64 |
|
||||
| macOS 13 | 1.22 | amd64 |
|
||||
| macOS | 1.23 | arm64 |
|
||||
| macOS | 1.22 | arm64 |
|
||||
| Windows | 1.23 | amd64 |
|
||||
| Windows | 1.22 | amd64 |
|
||||
| Windows | 1.23 | 386 |
|
||||
| Windows | 1.22 | 386 |
|
||||
|
||||
While this project should work for other systems, no compatibility guarantees
|
||||
are made for those systems currently.
|
||||
@ -87,8 +89,8 @@ If you need to extend the telemetry an instrumentation library provides or want
|
||||
to build your own instrumentation for your application directly you will need
|
||||
to use the
|
||||
[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel)
|
||||
package. The included [examples](./example/) are a good way to see some
|
||||
practical uses of this process.
|
||||
package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples)
|
||||
are a good way to see some practical uses of this process.
|
||||
|
||||
### Export
|
||||
|
||||
|
12
vendor/go.opentelemetry.io/otel/RELEASING.md
generated
vendored
12
vendor/go.opentelemetry.io/otel/RELEASING.md
generated
vendored
@ -69,6 +69,7 @@ Update go.mod for submodules to depend on the new release which will happen in t
|
||||
```
|
||||
|
||||
- Move all the `Unreleased` changes into a new section following the title scheme (`[<new tag>] - <date of release>`).
|
||||
- Make sure the new section is under the comment for released section, like `<!-- Released section -->`, so it is protected from being overwritten in the future.
|
||||
- Update all the appropriate links at the bottom.
|
||||
|
||||
4. Push the changes to upstream and create a Pull Request on GitHub.
|
||||
@ -110,17 +111,6 @@ It is critical you make sure the version you push upstream is correct.
|
||||
Finally create a Release for the new `<new tag>` on GitHub.
|
||||
The release body should include all the release notes from the Changelog for this release.
|
||||
|
||||
## Verify Examples
|
||||
|
||||
After releasing verify that examples build outside of the repository.
|
||||
|
||||
```
|
||||
./verify_examples.sh
|
||||
```
|
||||
|
||||
The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them.
|
||||
This ensures they build with the published release, not the local copy.
|
||||
|
||||
## Post-Release
|
||||
|
||||
### Contrib Repository
|
||||
|
40
vendor/go.opentelemetry.io/otel/attribute/set.go
generated
vendored
40
vendor/go.opentelemetry.io/otel/attribute/set.go
generated
vendored
@ -347,45 +347,25 @@ func computeDistinct(kvs []KeyValue) Distinct {
|
||||
func computeDistinctFixed(kvs []KeyValue) interface{} {
|
||||
switch len(kvs) {
|
||||
case 1:
|
||||
ptr := new([1]KeyValue)
|
||||
copy((*ptr)[:], kvs)
|
||||
return *ptr
|
||||
return [1]KeyValue(kvs)
|
||||
case 2:
|
||||
ptr := new([2]KeyValue)
|
||||
copy((*ptr)[:], kvs)
|
||||
return *ptr
|
||||
return [2]KeyValue(kvs)
|
||||
case 3:
|
||||
ptr := new([3]KeyValue)
|
||||
copy((*ptr)[:], kvs)
|
||||
return *ptr
|
||||
return [3]KeyValue(kvs)
|
||||
case 4:
|
||||
ptr := new([4]KeyValue)
|
||||
copy((*ptr)[:], kvs)
|
||||
return *ptr
|
||||
return [4]KeyValue(kvs)
|
||||
case 5:
|
||||
ptr := new([5]KeyValue)
|
||||
copy((*ptr)[:], kvs)
|
||||
return *ptr
|
||||
return [5]KeyValue(kvs)
|
||||
case 6:
|
||||
ptr := new([6]KeyValue)
|
||||
copy((*ptr)[:], kvs)
|
||||
return *ptr
|
||||
return [6]KeyValue(kvs)
|
||||
case 7:
|
||||
ptr := new([7]KeyValue)
|
||||
copy((*ptr)[:], kvs)
|
||||
return *ptr
|
||||
return [7]KeyValue(kvs)
|
||||
case 8:
|
||||
ptr := new([8]KeyValue)
|
||||
copy((*ptr)[:], kvs)
|
||||
return *ptr
|
||||
return [8]KeyValue(kvs)
|
||||
case 9:
|
||||
ptr := new([9]KeyValue)
|
||||
copy((*ptr)[:], kvs)
|
||||
return *ptr
|
||||
return [9]KeyValue(kvs)
|
||||
case 10:
|
||||
ptr := new([10]KeyValue)
|
||||
copy((*ptr)[:], kvs)
|
||||
return *ptr
|
||||
return [10]KeyValue(kvs)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
150
vendor/go.opentelemetry.io/otel/baggage/baggage.go
generated
vendored
150
vendor/go.opentelemetry.io/otel/baggage/baggage.go
generated
vendored
@ -44,9 +44,15 @@ type Property struct {
|
||||
|
||||
// NewKeyProperty returns a new Property for key.
|
||||
//
|
||||
// The passed key must be valid, non-empty UTF-8 string.
|
||||
// If key is invalid, an error will be returned.
|
||||
// However, the specific Propagators that are used to transmit baggage entries across
|
||||
// component boundaries may impose their own restrictions on Property key.
|
||||
// For example, the W3C Baggage specification restricts the Property keys to strings that
|
||||
// satisfy the token definition from RFC7230, Section 3.2.6.
|
||||
// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key.
|
||||
func NewKeyProperty(key string) (Property, error) {
|
||||
if !validateKey(key) {
|
||||
if !validateBaggageName(key) {
|
||||
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
|
||||
}
|
||||
|
||||
@ -62,6 +68,10 @@ func NewKeyProperty(key string) (Property, error) {
|
||||
// Notice: Consider using [NewKeyValuePropertyRaw] instead
|
||||
// that does not require percent-encoding of the value.
|
||||
func NewKeyValueProperty(key, value string) (Property, error) {
|
||||
if !validateKey(key) {
|
||||
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
|
||||
}
|
||||
|
||||
if !validateValue(value) {
|
||||
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
|
||||
}
|
||||
@ -74,11 +84,20 @@ func NewKeyValueProperty(key, value string) (Property, error) {
|
||||
|
||||
// NewKeyValuePropertyRaw returns a new Property for key with value.
|
||||
//
|
||||
// The passed key must be compliant with W3C Baggage specification.
|
||||
// The passed key must be valid, non-empty UTF-8 string.
|
||||
// The passed value must be valid UTF-8 string.
|
||||
// However, the specific Propagators that are used to transmit baggage entries across
|
||||
// component boundaries may impose their own restrictions on Property key.
|
||||
// For example, the W3C Baggage specification restricts the Property keys to strings that
|
||||
// satisfy the token definition from RFC7230, Section 3.2.6.
|
||||
// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key.
|
||||
func NewKeyValuePropertyRaw(key, value string) (Property, error) {
|
||||
if !validateKey(key) {
|
||||
if !validateBaggageName(key) {
|
||||
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
|
||||
}
|
||||
if !validateBaggageValue(value) {
|
||||
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
|
||||
}
|
||||
|
||||
p := Property{
|
||||
key: key,
|
||||
@ -115,12 +134,15 @@ func (p Property) validate() error {
|
||||
return fmt.Errorf("invalid property: %w", err)
|
||||
}
|
||||
|
||||
if !validateKey(p.key) {
|
||||
if !validateBaggageName(p.key) {
|
||||
return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
|
||||
}
|
||||
if !p.hasValue && p.value != "" {
|
||||
return errFunc(errors.New("inconsistent value"))
|
||||
}
|
||||
if p.hasValue && !validateBaggageValue(p.value) {
|
||||
return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -138,7 +160,15 @@ func (p Property) Value() (string, bool) {
|
||||
|
||||
// String encodes Property into a header string compliant with the W3C Baggage
|
||||
// specification.
|
||||
// It would return empty string if the key is invalid with the W3C Baggage
|
||||
// specification. This could happen for a UTF-8 key, as it may contain
|
||||
// invalid characters.
|
||||
func (p Property) String() string {
|
||||
// W3C Baggage specification does not allow percent-encoded keys.
|
||||
if !validateKey(p.key) {
|
||||
return ""
|
||||
}
|
||||
|
||||
if p.hasValue {
|
||||
return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value))
|
||||
}
|
||||
@ -203,9 +233,14 @@ func (p properties) validate() error {
|
||||
// String encodes properties into a header string compliant with the W3C Baggage
|
||||
// specification.
|
||||
func (p properties) String() string {
|
||||
props := make([]string, len(p))
|
||||
for i, prop := range p {
|
||||
props[i] = prop.String()
|
||||
props := make([]string, 0, len(p))
|
||||
for _, prop := range p {
|
||||
s := prop.String()
|
||||
|
||||
// Ignored empty properties.
|
||||
if s != "" {
|
||||
props = append(props, s)
|
||||
}
|
||||
}
|
||||
return strings.Join(props, propertyDelimiter)
|
||||
}
|
||||
@ -230,6 +265,10 @@ type Member struct {
|
||||
// Notice: Consider using [NewMemberRaw] instead
|
||||
// that does not require percent-encoding of the value.
|
||||
func NewMember(key, value string, props ...Property) (Member, error) {
|
||||
if !validateKey(key) {
|
||||
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
|
||||
}
|
||||
|
||||
if !validateValue(value) {
|
||||
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
|
||||
}
|
||||
@ -242,7 +281,13 @@ func NewMember(key, value string, props ...Property) (Member, error) {
|
||||
|
||||
// NewMemberRaw returns a new Member from the passed arguments.
|
||||
//
|
||||
// The passed key must be compliant with W3C Baggage specification.
|
||||
// The passed key must be valid, non-empty UTF-8 string.
|
||||
// The passed value must be valid UTF-8 string.
|
||||
// However, the specific Propagators that are used to transmit baggage entries across
|
||||
// component boundaries may impose their own restrictions on baggage key.
|
||||
// For example, the W3C Baggage specification restricts the baggage keys to strings that
|
||||
// satisfy the token definition from RFC7230, Section 3.2.6.
|
||||
// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key.
|
||||
func NewMemberRaw(key, value string, props ...Property) (Member, error) {
|
||||
m := Member{
|
||||
key: key,
|
||||
@ -294,19 +339,45 @@ func parseMember(member string) (Member, error) {
|
||||
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
|
||||
}
|
||||
|
||||
val := strings.TrimSpace(v)
|
||||
if !validateValue(val) {
|
||||
rawVal := strings.TrimSpace(v)
|
||||
if !validateValue(rawVal) {
|
||||
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v)
|
||||
}
|
||||
|
||||
// Decode a percent-encoded value.
|
||||
value, err := url.PathUnescape(val)
|
||||
unescapeVal, err := url.PathUnescape(rawVal)
|
||||
if err != nil {
|
||||
return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err)
|
||||
}
|
||||
|
||||
value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
|
||||
return Member{key: key, value: value, properties: props, hasData: true}, nil
|
||||
}
|
||||
|
||||
// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '<27>'.
|
||||
func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string {
|
||||
if utf8.ValidString(unescapeVal) {
|
||||
return unescapeVal
|
||||
}
|
||||
// W3C baggage spec:
|
||||
// https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69
|
||||
|
||||
var b strings.Builder
|
||||
b.Grow(cap)
|
||||
for i := 0; i < len(unescapeVal); {
|
||||
r, size := utf8.DecodeRuneInString(unescapeVal[i:])
|
||||
if r == utf8.RuneError && size == 1 {
|
||||
// Invalid UTF-8 sequence found, replace it with '<27>'
|
||||
_, _ = b.WriteString("<22>")
|
||||
} else {
|
||||
_, _ = b.WriteRune(r)
|
||||
}
|
||||
i += size
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// validate ensures m conforms to the W3C Baggage specification.
|
||||
// A key must be an ASCII string, returning an error otherwise.
|
||||
func (m Member) validate() error {
|
||||
@ -314,9 +385,12 @@ func (m Member) validate() error {
|
||||
return fmt.Errorf("%w: %q", errInvalidMember, m)
|
||||
}
|
||||
|
||||
if !validateKey(m.key) {
|
||||
if !validateBaggageName(m.key) {
|
||||
return fmt.Errorf("%w: %q", errInvalidKey, m.key)
|
||||
}
|
||||
if !validateBaggageValue(m.value) {
|
||||
return fmt.Errorf("%w: %q", errInvalidValue, m.value)
|
||||
}
|
||||
return m.properties.validate()
|
||||
}
|
||||
|
||||
@ -331,10 +405,15 @@ func (m Member) Properties() []Property { return m.properties.Copy() }
|
||||
|
||||
// String encodes Member into a header string compliant with the W3C Baggage
|
||||
// specification.
|
||||
// It would return empty string if the key is invalid with the W3C Baggage
|
||||
// specification. This could happen for a UTF-8 key, as it may contain
|
||||
// invalid characters.
|
||||
func (m Member) String() string {
|
||||
// A key is just an ASCII string. A value is restricted to be
|
||||
// US-ASCII characters excluding CTLs, whitespace,
|
||||
// DQUOTE, comma, semicolon, and backslash.
|
||||
// W3C Baggage specification does not allow percent-encoded keys.
|
||||
if !validateKey(m.key) {
|
||||
return ""
|
||||
}
|
||||
|
||||
s := m.key + keyValueDelimiter + valueEscape(m.value)
|
||||
if len(m.properties) > 0 {
|
||||
s += propertyDelimiter + m.properties.String()
|
||||
@ -448,7 +527,7 @@ func (b Baggage) Member(key string) Member {
|
||||
}
|
||||
|
||||
// Members returns all the baggage list-members.
|
||||
// The order of the returned list-members does not have significance.
|
||||
// The order of the returned list-members is not significant.
|
||||
//
|
||||
// The returned members are not validated, as we assume the validation happened
|
||||
// when they were added to the Baggage.
|
||||
@ -469,8 +548,8 @@ func (b Baggage) Members() []Member {
|
||||
return members
|
||||
}
|
||||
|
||||
// SetMember returns a copy the Baggage with the member included. If the
|
||||
// baggage contains a Member with the same key the existing Member is
|
||||
// SetMember returns a copy of the Baggage with the member included. If the
|
||||
// baggage contains a Member with the same key, the existing Member is
|
||||
// replaced.
|
||||
//
|
||||
// If member is invalid according to the W3C Baggage specification, an error
|
||||
@ -528,14 +607,22 @@ func (b Baggage) Len() int {
|
||||
|
||||
// String encodes Baggage into a header string compliant with the W3C Baggage
|
||||
// specification.
|
||||
// It would ignore members where the member key is invalid with the W3C Baggage
|
||||
// specification. This could happen for a UTF-8 key, as it may contain
|
||||
// invalid characters.
|
||||
func (b Baggage) String() string {
|
||||
members := make([]string, 0, len(b.list))
|
||||
for k, v := range b.list {
|
||||
members = append(members, Member{
|
||||
s := Member{
|
||||
key: k,
|
||||
value: v.Value,
|
||||
properties: fromInternalProperties(v.Properties),
|
||||
}.String())
|
||||
}.String()
|
||||
|
||||
// Ignored empty members.
|
||||
if s != "" {
|
||||
members = append(members, s)
|
||||
}
|
||||
}
|
||||
return strings.Join(members, listDelimiter)
|
||||
}
|
||||
@ -607,10 +694,12 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
|
||||
}
|
||||
|
||||
// Decode a percent-encoded value.
|
||||
value, err := url.PathUnescape(s[valueStart:valueEnd])
|
||||
rawVal := s[valueStart:valueEnd]
|
||||
unescapeVal, err := url.PathUnescape(rawVal)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
|
||||
|
||||
ok = true
|
||||
p.key = s[keyStart:keyEnd]
|
||||
@ -720,6 +809,24 @@ var safeKeyCharset = [utf8.RuneSelf]bool{
|
||||
'~': true,
|
||||
}
|
||||
|
||||
// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name.
|
||||
// Baggage name is a valid, non-empty UTF-8 string.
|
||||
func validateBaggageName(s string) bool {
|
||||
if len(s) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return utf8.ValidString(s)
|
||||
}
|
||||
|
||||
// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value.
|
||||
// Baggage value is a valid UTF-8 strings.
|
||||
// Empty string is also a valid UTF-8 string.
|
||||
func validateBaggageValue(s string) bool {
|
||||
return utf8.ValidString(s)
|
||||
}
|
||||
|
||||
// validateKey checks if the string is a valid W3C Baggage key.
|
||||
func validateKey(s string) bool {
|
||||
if len(s) == 0 {
|
||||
return false
|
||||
@ -738,6 +845,7 @@ func validateKeyChar(c int32) bool {
|
||||
return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c]
|
||||
}
|
||||
|
||||
// validateValue checks if the string is a valid W3C Baggage value.
|
||||
func validateValue(s string) bool {
|
||||
for _, c := range s {
|
||||
if !validateValueChar(c) {
|
||||
|
2
vendor/go.opentelemetry.io/otel/codes/codes.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/codes/codes.go
generated
vendored
@ -83,7 +83,7 @@ func (c *Code) UnmarshalJSON(b []byte) error {
|
||||
return fmt.Errorf("invalid code: %q", ci)
|
||||
}
|
||||
|
||||
*c = Code(ci)
|
||||
*c = Code(ci) // nolint: gosec // Bit size of 32 check above.
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("invalid code: %q", string(b))
|
||||
|
2
vendor/go.opentelemetry.io/otel/doc.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/doc.go
generated
vendored
@ -17,6 +17,8 @@ To read more about tracing, see go.opentelemetry.io/otel/trace.
|
||||
|
||||
To read more about metrics, see go.opentelemetry.io/otel/metric.
|
||||
|
||||
To read more about logs, see go.opentelemetry.io/otel/log.
|
||||
|
||||
To read more about propagation, see go.opentelemetry.io/otel/propagation and
|
||||
go.opentelemetry.io/otel/baggage.
|
||||
*/
|
||||
|
7
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
generated
vendored
7
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
generated
vendored
@ -155,7 +155,12 @@ func (c *client) exportContext(parent context.Context) (context.Context, context
|
||||
}
|
||||
|
||||
if c.metadata.Len() > 0 {
|
||||
ctx = metadata.NewOutgoingContext(ctx, c.metadata)
|
||||
md := c.metadata
|
||||
if outMD, ok := metadata.FromOutgoingContext(ctx); ok {
|
||||
md = metadata.Join(md, outMD)
|
||||
}
|
||||
|
||||
ctx = metadata.NewOutgoingContext(ctx, md)
|
||||
}
|
||||
|
||||
return ctx, cancel
|
||||
|
10
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
generated
vendored
10
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
generated
vendored
@ -66,8 +66,9 @@ func WithInsecure() Option {
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// will take precedence.
|
||||
// value will be used. If both environment variables are set,
|
||||
// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. If an environment
|
||||
// variable is set, and this option is passed, this option will take precedence.
|
||||
//
|
||||
// If both this option and WithEndpointURL are used, the last used option will
|
||||
// take precedence.
|
||||
@ -84,8 +85,9 @@ func WithEndpoint(endpoint string) Option {
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// will take precedence.
|
||||
// value will be used. If both environment variables are set,
|
||||
// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. If an environment
|
||||
// variable is set, and this option is passed, this option will take precedence.
|
||||
//
|
||||
// If both this option and WithEndpoint are used, the last used option will
|
||||
// take precedence.
|
||||
|
5
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go
generated
vendored
5
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go
generated
vendored
@ -12,9 +12,8 @@ The environment variables described below can be used for configuration.
|
||||
OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4317") -
|
||||
target to which the exporter sends telemetry.
|
||||
The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md.
|
||||
The value must contain a host.
|
||||
The value may additionally a port, a scheme, and a path.
|
||||
The value accepts "http" and "https" scheme.
|
||||
The value must contain a scheme ("http" or "https") and host.
|
||||
The value may additionally contain a port, and a path.
|
||||
The value should not contain a query string or fragment.
|
||||
OTEL_EXPORTER_OTLP_METRICS_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
|
||||
The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options.
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
)
|
||||
@ -163,12 +164,16 @@ func stringToHeader(value string) map[string]string {
|
||||
global.Error(errors.New("missing '="), "parse headers", "input", header)
|
||||
continue
|
||||
}
|
||||
name, err := url.PathUnescape(n)
|
||||
if err != nil {
|
||||
global.Error(err, "escape header key", "key", n)
|
||||
|
||||
trimmedName := strings.TrimSpace(n)
|
||||
|
||||
// Validate the key.
|
||||
if !isValidHeaderKey(trimmedName) {
|
||||
global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName)
|
||||
continue
|
||||
}
|
||||
trimmedName := strings.TrimSpace(name)
|
||||
|
||||
// Only decode the value.
|
||||
value, err := url.PathUnescape(v)
|
||||
if err != nil {
|
||||
global.Error(err, "escape header value", "value", v)
|
||||
@ -189,3 +194,22 @@ func createCertPool(certBytes []byte) (*x509.CertPool, error) {
|
||||
}
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
func isValidHeaderKey(key string) bool {
|
||||
if key == "" {
|
||||
return false
|
||||
}
|
||||
for _, c := range key {
|
||||
if !isTokenChar(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isTokenChar(c rune) bool {
|
||||
return c <= unicode.MaxASCII && (unicode.IsLetter(c) ||
|
||||
unicode.IsDigit(c) ||
|
||||
c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' ||
|
||||
c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~')
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
|
||||
if cfg.ServiceConfig != "" {
|
||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
|
||||
}
|
||||
// Priroritize GRPCCredentials over Insecure (passing both is an error).
|
||||
// Prioritize GRPCCredentials over Insecure (passing both is an error).
|
||||
if cfg.Metrics.GRPCCredentials != nil {
|
||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials))
|
||||
} else if cfg.Metrics.Insecure {
|
||||
@ -287,9 +287,7 @@ func WithEndpointURL(v string) GenericOption {
|
||||
|
||||
cfg.Metrics.Endpoint = u.Host
|
||||
cfg.Metrics.URLPath = u.Path
|
||||
if u.Scheme != "https" {
|
||||
cfg.Metrics.Insecure = true
|
||||
}
|
||||
cfg.Metrics.Insecure = u.Scheme != "https"
|
||||
|
||||
return cfg
|
||||
})
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
// ReadTLSConfigFromFile reads a PEM certificate file and creates
|
||||
// a tls.Config that will use this certifate to verify a server certificate.
|
||||
// a tls.Config that will use this certificate to verify a server certificate.
|
||||
func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
|
||||
b, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
|
@ -46,8 +46,9 @@ func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) {
|
||||
|
||||
out = append(out, &mpb.ScopeMetrics{
|
||||
Scope: &cpb.InstrumentationScope{
|
||||
Name: sm.Scope.Name,
|
||||
Version: sm.Scope.Version,
|
||||
Name: sm.Scope.Name,
|
||||
Version: sm.Scope.Version,
|
||||
Attributes: AttrIter(sm.Scope.Attributes.Iter()),
|
||||
},
|
||||
Metrics: ms,
|
||||
SchemaUrl: sm.Scope.SchemaURL,
|
||||
@ -83,13 +84,13 @@ func metric(m metricdata.Metrics) (*mpb.Metric, error) {
|
||||
}
|
||||
switch a := m.Data.(type) {
|
||||
case metricdata.Gauge[int64]:
|
||||
out.Data = Gauge[int64](a)
|
||||
out.Data = Gauge(a)
|
||||
case metricdata.Gauge[float64]:
|
||||
out.Data = Gauge[float64](a)
|
||||
out.Data = Gauge(a)
|
||||
case metricdata.Sum[int64]:
|
||||
out.Data, err = Sum[int64](a)
|
||||
out.Data, err = Sum(a)
|
||||
case metricdata.Sum[float64]:
|
||||
out.Data, err = Sum[float64](a)
|
||||
out.Data, err = Sum(a)
|
||||
case metricdata.Histogram[int64]:
|
||||
out.Data, err = Histogram(a)
|
||||
case metricdata.Histogram[float64]:
|
||||
@ -279,10 +280,7 @@ func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) {
|
||||
// timeUnixNano on the zero Time returns 0.
|
||||
// The result does not depend on the location associated with t.
|
||||
func timeUnixNano(t time.Time) uint64 {
|
||||
if t.IsZero() {
|
||||
return 0
|
||||
}
|
||||
return uint64(t.UnixNano())
|
||||
return uint64(max(0, t.UnixNano())) // nolint:gosec // Overflow checked.
|
||||
}
|
||||
|
||||
// Exemplars returns a slice of OTLP Exemplars generated from exemplars.
|
||||
|
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
generated
vendored
@ -5,5 +5,5 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme
|
||||
|
||||
// Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use.
|
||||
func Version() string {
|
||||
return "1.28.0"
|
||||
return "1.32.0"
|
||||
}
|
||||
|
@ -13,7 +13,8 @@ func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationSco
|
||||
return nil
|
||||
}
|
||||
return &commonpb.InstrumentationScope{
|
||||
Name: il.Name,
|
||||
Version: il.Version,
|
||||
Name: il.Name,
|
||||
Version: il.Version,
|
||||
Attributes: Iterator(il.Attributes.Iter()),
|
||||
}
|
||||
}
|
||||
|
30
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
generated
vendored
30
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
generated
vendored
@ -4,6 +4,8 @@
|
||||
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
@ -95,16 +97,16 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span {
|
||||
SpanId: sid[:],
|
||||
TraceState: sd.SpanContext().TraceState().String(),
|
||||
Status: status(sd.Status().Code, sd.Status().Description),
|
||||
StartTimeUnixNano: uint64(sd.StartTime().UnixNano()),
|
||||
EndTimeUnixNano: uint64(sd.EndTime().UnixNano()),
|
||||
StartTimeUnixNano: uint64(max(0, sd.StartTime().UnixNano())), // nolint:gosec // Overflow checked.
|
||||
EndTimeUnixNano: uint64(max(0, sd.EndTime().UnixNano())), // nolint:gosec // Overflow checked.
|
||||
Links: links(sd.Links()),
|
||||
Kind: spanKind(sd.SpanKind()),
|
||||
Name: sd.Name(),
|
||||
Attributes: KeyValues(sd.Attributes()),
|
||||
Events: spanEvents(sd.Events()),
|
||||
DroppedAttributesCount: uint32(sd.DroppedAttributes()),
|
||||
DroppedEventsCount: uint32(sd.DroppedEvents()),
|
||||
DroppedLinksCount: uint32(sd.DroppedLinks()),
|
||||
DroppedAttributesCount: clampUint32(sd.DroppedAttributes()),
|
||||
DroppedEventsCount: clampUint32(sd.DroppedEvents()),
|
||||
DroppedLinksCount: clampUint32(sd.DroppedLinks()),
|
||||
}
|
||||
|
||||
if psid := sd.Parent().SpanID(); psid.IsValid() {
|
||||
@ -115,6 +117,16 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span {
|
||||
return s
|
||||
}
|
||||
|
||||
func clampUint32(v int) uint32 {
|
||||
if v < 0 {
|
||||
return 0
|
||||
}
|
||||
if int64(v) > math.MaxUint32 {
|
||||
return math.MaxUint32
|
||||
}
|
||||
return uint32(v) // nolint: gosec // Overflow/Underflow checked.
|
||||
}
|
||||
|
||||
// status transform a span code and message into an OTLP span status.
|
||||
func status(status codes.Code, message string) *tracepb.Status {
|
||||
var c tracepb.Status_StatusCode
|
||||
@ -153,7 +165,7 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link {
|
||||
TraceId: tid[:],
|
||||
SpanId: sid[:],
|
||||
Attributes: KeyValues(otLink.Attributes),
|
||||
DroppedAttributesCount: uint32(otLink.DroppedAttributeCount),
|
||||
DroppedAttributesCount: clampUint32(otLink.DroppedAttributeCount),
|
||||
Flags: flags,
|
||||
})
|
||||
}
|
||||
@ -166,7 +178,7 @@ func buildSpanFlags(sc trace.SpanContext) uint32 {
|
||||
flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK
|
||||
}
|
||||
|
||||
return uint32(flags)
|
||||
return uint32(flags) // nolint:gosec // Flags is a bitmask and can't be negative
|
||||
}
|
||||
|
||||
// spanEvents transforms span Events to an OTLP span events.
|
||||
@ -180,9 +192,9 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event {
|
||||
for i := 0; i < len(es); i++ {
|
||||
events[i] = &tracepb.Span_Event{
|
||||
Name: es[i].Name,
|
||||
TimeUnixNano: uint64(es[i].Time.UnixNano()),
|
||||
TimeUnixNano: uint64(max(0, es[i].Time.UnixNano())), // nolint:gosec // Overflow checked.
|
||||
Attributes: KeyValues(es[i].Attributes),
|
||||
DroppedAttributesCount: uint32(es[i].DroppedAttributeCount),
|
||||
DroppedAttributesCount: clampUint32(es[i].DroppedAttributeCount),
|
||||
}
|
||||
}
|
||||
return events
|
||||
|
7
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
generated
vendored
7
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
generated
vendored
@ -229,7 +229,12 @@ func (c *client) exportContext(parent context.Context) (context.Context, context
|
||||
}
|
||||
|
||||
if c.metadata.Len() > 0 {
|
||||
ctx = metadata.NewOutgoingContext(ctx, c.metadata)
|
||||
md := c.metadata
|
||||
if outMD, ok := metadata.FromOutgoingContext(ctx); ok {
|
||||
md = metadata.Join(md, outMD)
|
||||
}
|
||||
|
||||
ctx = metadata.NewOutgoingContext(ctx, md)
|
||||
}
|
||||
|
||||
// Unify the client stopCtx with the parent.
|
||||
|
5
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
generated
vendored
5
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
generated
vendored
@ -12,9 +12,8 @@ The environment variables described below can be used for configuration.
|
||||
OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") -
|
||||
target to which the exporter sends telemetry.
|
||||
The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md.
|
||||
The value must contain a host.
|
||||
The value may additionally a port, a scheme, and a path.
|
||||
The value accepts "http" and "https" scheme.
|
||||
The value must contain a scheme ("http" or "https") and host.
|
||||
The value may additionally contain a port, and a path.
|
||||
The value should not contain a query string or fragment.
|
||||
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
|
||||
The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options.
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
)
|
||||
@ -163,12 +164,16 @@ func stringToHeader(value string) map[string]string {
|
||||
global.Error(errors.New("missing '="), "parse headers", "input", header)
|
||||
continue
|
||||
}
|
||||
name, err := url.PathUnescape(n)
|
||||
if err != nil {
|
||||
global.Error(err, "escape header key", "key", n)
|
||||
|
||||
trimmedName := strings.TrimSpace(n)
|
||||
|
||||
// Validate the key.
|
||||
if !isValidHeaderKey(trimmedName) {
|
||||
global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName)
|
||||
continue
|
||||
}
|
||||
trimmedName := strings.TrimSpace(name)
|
||||
|
||||
// Only decode the value.
|
||||
value, err := url.PathUnescape(v)
|
||||
if err != nil {
|
||||
global.Error(err, "escape header value", "value", v)
|
||||
@ -189,3 +194,22 @@ func createCertPool(certBytes []byte) (*x509.CertPool, error) {
|
||||
}
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
func isValidHeaderKey(key string) bool {
|
||||
if key == "" {
|
||||
return false
|
||||
}
|
||||
for _, c := range key {
|
||||
if !isTokenChar(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isTokenChar(c rune) bool {
|
||||
return c <= unicode.MaxASCII && (unicode.IsLetter(c) ||
|
||||
unicode.IsDigit(c) ||
|
||||
c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' ||
|
||||
c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~')
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
|
||||
if cfg.ServiceConfig != "" {
|
||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
|
||||
}
|
||||
// Priroritize GRPCCredentials over Insecure (passing both is an error).
|
||||
// Prioritize GRPCCredentials over Insecure (passing both is an error).
|
||||
if cfg.Traces.GRPCCredentials != nil {
|
||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
|
||||
} else if cfg.Traces.Insecure {
|
||||
@ -278,9 +278,7 @@ func WithEndpointURL(v string) GenericOption {
|
||||
|
||||
cfg.Traces.Endpoint = u.Host
|
||||
cfg.Traces.URLPath = u.Path
|
||||
if u.Scheme != "https" {
|
||||
cfg.Traces.Insecure = true
|
||||
}
|
||||
cfg.Traces.Insecure = u.Scheme != "https"
|
||||
|
||||
return cfg
|
||||
})
|
||||
|
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
generated
vendored
@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||
|
||||
// Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
|
||||
func Version() string {
|
||||
return "1.28.0"
|
||||
return "1.32.0"
|
||||
}
|
||||
|
14
vendor/go.opentelemetry.io/otel/internal/global/instruments.go
generated
vendored
14
vendor/go.opentelemetry.io/otel/internal/global/instruments.go
generated
vendored
@ -13,7 +13,7 @@ import (
|
||||
|
||||
// unwrapper unwraps to return the underlying instrument implementation.
|
||||
type unwrapper interface {
|
||||
Unwrap() metric.Observable
|
||||
unwrap() metric.Observable
|
||||
}
|
||||
|
||||
type afCounter struct {
|
||||
@ -40,7 +40,7 @@ func (i *afCounter) setDelegate(m metric.Meter) {
|
||||
i.delegate.Store(ctr)
|
||||
}
|
||||
|
||||
func (i *afCounter) Unwrap() metric.Observable {
|
||||
func (i *afCounter) unwrap() metric.Observable {
|
||||
if ctr := i.delegate.Load(); ctr != nil {
|
||||
return ctr.(metric.Float64ObservableCounter)
|
||||
}
|
||||
@ -71,7 +71,7 @@ func (i *afUpDownCounter) setDelegate(m metric.Meter) {
|
||||
i.delegate.Store(ctr)
|
||||
}
|
||||
|
||||
func (i *afUpDownCounter) Unwrap() metric.Observable {
|
||||
func (i *afUpDownCounter) unwrap() metric.Observable {
|
||||
if ctr := i.delegate.Load(); ctr != nil {
|
||||
return ctr.(metric.Float64ObservableUpDownCounter)
|
||||
}
|
||||
@ -102,7 +102,7 @@ func (i *afGauge) setDelegate(m metric.Meter) {
|
||||
i.delegate.Store(ctr)
|
||||
}
|
||||
|
||||
func (i *afGauge) Unwrap() metric.Observable {
|
||||
func (i *afGauge) unwrap() metric.Observable {
|
||||
if ctr := i.delegate.Load(); ctr != nil {
|
||||
return ctr.(metric.Float64ObservableGauge)
|
||||
}
|
||||
@ -133,7 +133,7 @@ func (i *aiCounter) setDelegate(m metric.Meter) {
|
||||
i.delegate.Store(ctr)
|
||||
}
|
||||
|
||||
func (i *aiCounter) Unwrap() metric.Observable {
|
||||
func (i *aiCounter) unwrap() metric.Observable {
|
||||
if ctr := i.delegate.Load(); ctr != nil {
|
||||
return ctr.(metric.Int64ObservableCounter)
|
||||
}
|
||||
@ -164,7 +164,7 @@ func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
|
||||
i.delegate.Store(ctr)
|
||||
}
|
||||
|
||||
func (i *aiUpDownCounter) Unwrap() metric.Observable {
|
||||
func (i *aiUpDownCounter) unwrap() metric.Observable {
|
||||
if ctr := i.delegate.Load(); ctr != nil {
|
||||
return ctr.(metric.Int64ObservableUpDownCounter)
|
||||
}
|
||||
@ -195,7 +195,7 @@ func (i *aiGauge) setDelegate(m metric.Meter) {
|
||||
i.delegate.Store(ctr)
|
||||
}
|
||||
|
||||
func (i *aiGauge) Unwrap() metric.Observable {
|
||||
func (i *aiGauge) unwrap() metric.Observable {
|
||||
if ctr := i.delegate.Load(); ctr != nil {
|
||||
return ctr.(metric.Int64ObservableGauge)
|
||||
}
|
||||
|
382
vendor/go.opentelemetry.io/otel/internal/global/meter.go
generated
vendored
382
vendor/go.opentelemetry.io/otel/internal/global/meter.go
generated
vendored
@ -5,8 +5,9 @@ package global // import "go.opentelemetry.io/otel/internal/global"
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"context"
|
||||
"reflect"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/embedded"
|
||||
@ -66,6 +67,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me
|
||||
name: name,
|
||||
version: c.InstrumentationVersion(),
|
||||
schema: c.SchemaURL(),
|
||||
attrs: c.InstrumentationAttributes(),
|
||||
}
|
||||
|
||||
if p.meters == nil {
|
||||
@ -76,7 +78,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me
|
||||
return val
|
||||
}
|
||||
|
||||
t := &meter{name: name, opts: opts}
|
||||
t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)}
|
||||
p.meters[key] = t
|
||||
return t
|
||||
}
|
||||
@ -92,17 +94,29 @@ type meter struct {
|
||||
opts []metric.MeterOption
|
||||
|
||||
mtx sync.Mutex
|
||||
instruments []delegatedInstrument
|
||||
instruments map[instID]delegatedInstrument
|
||||
|
||||
registry list.List
|
||||
|
||||
delegate atomic.Value // metric.Meter
|
||||
delegate metric.Meter
|
||||
}
|
||||
|
||||
type delegatedInstrument interface {
|
||||
setDelegate(metric.Meter)
|
||||
}
|
||||
|
||||
// instID are the identifying properties of a instrument.
|
||||
type instID struct {
|
||||
// name is the name of the stream.
|
||||
name string
|
||||
// description is the description of the stream.
|
||||
description string
|
||||
// kind defines the functional group of the instrument.
|
||||
kind reflect.Type
|
||||
// unit is the unit of the stream.
|
||||
unit string
|
||||
}
|
||||
|
||||
// setDelegate configures m to delegate all Meter functionality to Meters
|
||||
// created by provider.
|
||||
//
|
||||
@ -110,12 +124,12 @@ type delegatedInstrument interface {
|
||||
//
|
||||
// It is guaranteed by the caller that this happens only once.
|
||||
func (m *meter) setDelegate(provider metric.MeterProvider) {
|
||||
meter := provider.Meter(m.name, m.opts...)
|
||||
m.delegate.Store(meter)
|
||||
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
meter := provider.Meter(m.name, m.opts...)
|
||||
m.delegate = meter
|
||||
|
||||
for _, inst := range m.instruments {
|
||||
inst.setDelegate(meter)
|
||||
}
|
||||
@ -133,169 +147,336 @@ func (m *meter) setDelegate(provider metric.MeterProvider) {
|
||||
}
|
||||
|
||||
func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
|
||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
||||
return del.Int64Counter(name, options...)
|
||||
}
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.delegate != nil {
|
||||
return m.delegate.Int64Counter(name, options...)
|
||||
}
|
||||
|
||||
cfg := metric.NewInt64CounterConfig(options...)
|
||||
id := instID{
|
||||
name: name,
|
||||
kind: reflect.TypeOf((*siCounter)(nil)),
|
||||
description: cfg.Description(),
|
||||
unit: cfg.Unit(),
|
||||
}
|
||||
if f, ok := m.instruments[id]; ok {
|
||||
return f.(metric.Int64Counter), nil
|
||||
}
|
||||
i := &siCounter{name: name, opts: options}
|
||||
m.instruments = append(m.instruments, i)
|
||||
m.instruments[id] = i
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
|
||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
||||
return del.Int64UpDownCounter(name, options...)
|
||||
}
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.delegate != nil {
|
||||
return m.delegate.Int64UpDownCounter(name, options...)
|
||||
}
|
||||
|
||||
cfg := metric.NewInt64UpDownCounterConfig(options...)
|
||||
id := instID{
|
||||
name: name,
|
||||
kind: reflect.TypeOf((*siUpDownCounter)(nil)),
|
||||
description: cfg.Description(),
|
||||
unit: cfg.Unit(),
|
||||
}
|
||||
if f, ok := m.instruments[id]; ok {
|
||||
return f.(metric.Int64UpDownCounter), nil
|
||||
}
|
||||
i := &siUpDownCounter{name: name, opts: options}
|
||||
m.instruments = append(m.instruments, i)
|
||||
m.instruments[id] = i
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
|
||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
||||
return del.Int64Histogram(name, options...)
|
||||
}
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.delegate != nil {
|
||||
return m.delegate.Int64Histogram(name, options...)
|
||||
}
|
||||
|
||||
cfg := metric.NewInt64HistogramConfig(options...)
|
||||
id := instID{
|
||||
name: name,
|
||||
kind: reflect.TypeOf((*siHistogram)(nil)),
|
||||
description: cfg.Description(),
|
||||
unit: cfg.Unit(),
|
||||
}
|
||||
if f, ok := m.instruments[id]; ok {
|
||||
return f.(metric.Int64Histogram), nil
|
||||
}
|
||||
i := &siHistogram{name: name, opts: options}
|
||||
m.instruments = append(m.instruments, i)
|
||||
m.instruments[id] = i
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
|
||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
||||
return del.Int64Gauge(name, options...)
|
||||
}
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.delegate != nil {
|
||||
return m.delegate.Int64Gauge(name, options...)
|
||||
}
|
||||
|
||||
cfg := metric.NewInt64GaugeConfig(options...)
|
||||
id := instID{
|
||||
name: name,
|
||||
kind: reflect.TypeOf((*siGauge)(nil)),
|
||||
description: cfg.Description(),
|
||||
unit: cfg.Unit(),
|
||||
}
|
||||
if f, ok := m.instruments[id]; ok {
|
||||
return f.(metric.Int64Gauge), nil
|
||||
}
|
||||
i := &siGauge{name: name, opts: options}
|
||||
m.instruments = append(m.instruments, i)
|
||||
m.instruments[id] = i
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
|
||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
||||
return del.Int64ObservableCounter(name, options...)
|
||||
}
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.delegate != nil {
|
||||
return m.delegate.Int64ObservableCounter(name, options...)
|
||||
}
|
||||
|
||||
cfg := metric.NewInt64ObservableCounterConfig(options...)
|
||||
id := instID{
|
||||
name: name,
|
||||
kind: reflect.TypeOf((*aiCounter)(nil)),
|
||||
description: cfg.Description(),
|
||||
unit: cfg.Unit(),
|
||||
}
|
||||
if f, ok := m.instruments[id]; ok {
|
||||
return f.(metric.Int64ObservableCounter), nil
|
||||
}
|
||||
i := &aiCounter{name: name, opts: options}
|
||||
m.instruments = append(m.instruments, i)
|
||||
m.instruments[id] = i
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
|
||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
||||
return del.Int64ObservableUpDownCounter(name, options...)
|
||||
}
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.delegate != nil {
|
||||
return m.delegate.Int64ObservableUpDownCounter(name, options...)
|
||||
}
|
||||
|
||||
cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
|
||||
id := instID{
|
||||
name: name,
|
||||
kind: reflect.TypeOf((*aiUpDownCounter)(nil)),
|
||||
description: cfg.Description(),
|
||||
unit: cfg.Unit(),
|
||||
}
|
||||
if f, ok := m.instruments[id]; ok {
|
||||
return f.(metric.Int64ObservableUpDownCounter), nil
|
||||
}
|
||||
i := &aiUpDownCounter{name: name, opts: options}
|
||||
m.instruments = append(m.instruments, i)
|
||||
m.instruments[id] = i
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
|
||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
||||
return del.Int64ObservableGauge(name, options...)
|
||||
}
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.delegate != nil {
|
||||
return m.delegate.Int64ObservableGauge(name, options...)
|
||||
}
|
||||
|
||||
cfg := metric.NewInt64ObservableGaugeConfig(options...)
|
||||
id := instID{
|
||||
name: name,
|
||||
kind: reflect.TypeOf((*aiGauge)(nil)),
|
||||
description: cfg.Description(),
|
||||
unit: cfg.Unit(),
|
||||
}
|
||||
if f, ok := m.instruments[id]; ok {
|
||||
return f.(metric.Int64ObservableGauge), nil
|
||||
}
|
||||
i := &aiGauge{name: name, opts: options}
|
||||
m.instruments = append(m.instruments, i)
|
||||
m.instruments[id] = i
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
|
||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
||||
return del.Float64Counter(name, options...)
|
||||
}
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.delegate != nil {
|
||||
return m.delegate.Float64Counter(name, options...)
|
||||
}
|
||||
|
||||
cfg := metric.NewFloat64CounterConfig(options...)
|
||||
id := instID{
|
||||
name: name,
|
||||
kind: reflect.TypeOf((*sfCounter)(nil)),
|
||||
description: cfg.Description(),
|
||||
unit: cfg.Unit(),
|
||||
}
|
||||
if f, ok := m.instruments[id]; ok {
|
||||
return f.(metric.Float64Counter), nil
|
||||
}
|
||||
i := &sfCounter{name: name, opts: options}
|
||||
m.instruments = append(m.instruments, i)
|
||||
m.instruments[id] = i
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
|
||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
||||
return del.Float64UpDownCounter(name, options...)
|
||||
}
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.delegate != nil {
|
||||
return m.delegate.Float64UpDownCounter(name, options...)
|
||||
}
|
||||
|
||||
cfg := metric.NewFloat64UpDownCounterConfig(options...)
|
||||
id := instID{
|
||||
name: name,
|
||||
kind: reflect.TypeOf((*sfUpDownCounter)(nil)),
|
||||
description: cfg.Description(),
|
||||
unit: cfg.Unit(),
|
||||
}
|
||||
if f, ok := m.instruments[id]; ok {
|
||||
return f.(metric.Float64UpDownCounter), nil
|
||||
}
|
||||
i := &sfUpDownCounter{name: name, opts: options}
|
||||
m.instruments = append(m.instruments, i)
|
||||
m.instruments[id] = i
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
|
||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
||||
return del.Float64Histogram(name, options...)
|
||||
}
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.delegate != nil {
|
||||
return m.delegate.Float64Histogram(name, options...)
|
||||
}
|
||||
|
||||
cfg := metric.NewFloat64HistogramConfig(options...)
|
||||
id := instID{
|
||||
name: name,
|
||||
kind: reflect.TypeOf((*sfHistogram)(nil)),
|
||||
description: cfg.Description(),
|
||||
unit: cfg.Unit(),
|
||||
}
|
||||
if f, ok := m.instruments[id]; ok {
|
||||
return f.(metric.Float64Histogram), nil
|
||||
}
|
||||
i := &sfHistogram{name: name, opts: options}
|
||||
m.instruments = append(m.instruments, i)
|
||||
m.instruments[id] = i
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
|
||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
||||
return del.Float64Gauge(name, options...)
|
||||
}
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.delegate != nil {
|
||||
return m.delegate.Float64Gauge(name, options...)
|
||||
}
|
||||
|
||||
cfg := metric.NewFloat64GaugeConfig(options...)
|
||||
id := instID{
|
||||
name: name,
|
||||
kind: reflect.TypeOf((*sfGauge)(nil)),
|
||||
description: cfg.Description(),
|
||||
unit: cfg.Unit(),
|
||||
}
|
||||
if f, ok := m.instruments[id]; ok {
|
||||
return f.(metric.Float64Gauge), nil
|
||||
}
|
||||
i := &sfGauge{name: name, opts: options}
|
||||
m.instruments = append(m.instruments, i)
|
||||
m.instruments[id] = i
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
|
||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
||||
return del.Float64ObservableCounter(name, options...)
|
||||
}
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.delegate != nil {
|
||||
return m.delegate.Float64ObservableCounter(name, options...)
|
||||
}
|
||||
|
||||
cfg := metric.NewFloat64ObservableCounterConfig(options...)
|
||||
id := instID{
|
||||
name: name,
|
||||
kind: reflect.TypeOf((*afCounter)(nil)),
|
||||
description: cfg.Description(),
|
||||
unit: cfg.Unit(),
|
||||
}
|
||||
if f, ok := m.instruments[id]; ok {
|
||||
return f.(metric.Float64ObservableCounter), nil
|
||||
}
|
||||
i := &afCounter{name: name, opts: options}
|
||||
m.instruments = append(m.instruments, i)
|
||||
m.instruments[id] = i
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
|
||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
||||
return del.Float64ObservableUpDownCounter(name, options...)
|
||||
}
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.delegate != nil {
|
||||
return m.delegate.Float64ObservableUpDownCounter(name, options...)
|
||||
}
|
||||
|
||||
cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
|
||||
id := instID{
|
||||
name: name,
|
||||
kind: reflect.TypeOf((*afUpDownCounter)(nil)),
|
||||
description: cfg.Description(),
|
||||
unit: cfg.Unit(),
|
||||
}
|
||||
if f, ok := m.instruments[id]; ok {
|
||||
return f.(metric.Float64ObservableUpDownCounter), nil
|
||||
}
|
||||
i := &afUpDownCounter{name: name, opts: options}
|
||||
m.instruments = append(m.instruments, i)
|
||||
m.instruments[id] = i
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
|
||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
||||
return del.Float64ObservableGauge(name, options...)
|
||||
}
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.delegate != nil {
|
||||
return m.delegate.Float64ObservableGauge(name, options...)
|
||||
}
|
||||
|
||||
cfg := metric.NewFloat64ObservableGaugeConfig(options...)
|
||||
id := instID{
|
||||
name: name,
|
||||
kind: reflect.TypeOf((*afGauge)(nil)),
|
||||
description: cfg.Description(),
|
||||
unit: cfg.Unit(),
|
||||
}
|
||||
if f, ok := m.instruments[id]; ok {
|
||||
return f.(metric.Float64ObservableGauge), nil
|
||||
}
|
||||
i := &afGauge{name: name, opts: options}
|
||||
m.instruments = append(m.instruments, i)
|
||||
m.instruments[id] = i
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// RegisterCallback captures the function that will be called during Collect.
|
||||
func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) {
|
||||
if del, ok := m.delegate.Load().(metric.Meter); ok {
|
||||
insts = unwrapInstruments(insts)
|
||||
return del.RegisterCallback(f, insts...)
|
||||
}
|
||||
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
if m.delegate != nil {
|
||||
return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...)
|
||||
}
|
||||
|
||||
reg := ®istration{instruments: insts, function: f}
|
||||
e := m.registry.PushBack(reg)
|
||||
reg.unreg = func() error {
|
||||
@ -307,15 +488,11 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable)
|
||||
return reg, nil
|
||||
}
|
||||
|
||||
type wrapped interface {
|
||||
unwrap() metric.Observable
|
||||
}
|
||||
|
||||
func unwrapInstruments(instruments []metric.Observable) []metric.Observable {
|
||||
out := make([]metric.Observable, 0, len(instruments))
|
||||
|
||||
for _, inst := range instruments {
|
||||
if in, ok := inst.(wrapped); ok {
|
||||
if in, ok := inst.(unwrapper); ok {
|
||||
out = append(out, in.unwrap())
|
||||
} else {
|
||||
out = append(out, inst)
|
||||
@ -335,9 +512,61 @@ type registration struct {
|
||||
unregMu sync.Mutex
|
||||
}
|
||||
|
||||
func (c *registration) setDelegate(m metric.Meter) {
|
||||
insts := unwrapInstruments(c.instruments)
|
||||
type unwrapObs struct {
|
||||
embedded.Observer
|
||||
obs metric.Observer
|
||||
}
|
||||
|
||||
// unwrapFloat64Observable returns an expected metric.Float64Observable after
|
||||
// unwrapping the global object.
|
||||
func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable {
|
||||
if unwrapped, ok := inst.(unwrapper); ok {
|
||||
if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok {
|
||||
// Note: if the unwrapped object does not
|
||||
// unwrap as an observable for either of the
|
||||
// predicates here, it means an internal bug in
|
||||
// this package. We avoid logging an error in
|
||||
// this case, because the SDK has to try its
|
||||
// own type conversion on the object. The SDK
|
||||
// will see this and be forced to respond with
|
||||
// its own error.
|
||||
//
|
||||
// This code uses a double-nested if statement
|
||||
// to avoid creating a branch that is
|
||||
// impossible to cover.
|
||||
inst = floatObs
|
||||
}
|
||||
}
|
||||
return inst
|
||||
}
|
||||
|
||||
// unwrapInt64Observable returns an expected metric.Int64Observable after
|
||||
// unwrapping the global object.
|
||||
func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable {
|
||||
if unwrapped, ok := inst.(unwrapper); ok {
|
||||
if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok {
|
||||
// See the comment in unwrapFloat64Observable().
|
||||
inst = unint
|
||||
}
|
||||
}
|
||||
return inst
|
||||
}
|
||||
|
||||
func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) {
|
||||
uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...)
|
||||
}
|
||||
|
||||
func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) {
|
||||
uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...)
|
||||
}
|
||||
|
||||
func unwrapCallback(f metric.Callback) metric.Callback {
|
||||
return func(ctx context.Context, obs metric.Observer) error {
|
||||
return f(ctx, &unwrapObs{obs: obs})
|
||||
}
|
||||
}
|
||||
|
||||
func (c *registration) setDelegate(m metric.Meter) {
|
||||
c.unregMu.Lock()
|
||||
defer c.unregMu.Unlock()
|
||||
|
||||
@ -346,9 +575,10 @@ func (c *registration) setDelegate(m metric.Meter) {
|
||||
return
|
||||
}
|
||||
|
||||
reg, err := m.RegisterCallback(c.function, insts...)
|
||||
reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...)
|
||||
if err != nil {
|
||||
GetErrorHandler().Handle(err)
|
||||
return
|
||||
}
|
||||
|
||||
c.unreg = reg.Unregister
|
||||
|
8
vendor/go.opentelemetry.io/otel/internal/global/trace.go
generated
vendored
8
vendor/go.opentelemetry.io/otel/internal/global/trace.go
generated
vendored
@ -87,6 +87,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
|
||||
name: name,
|
||||
version: c.InstrumentationVersion(),
|
||||
schema: c.SchemaURL(),
|
||||
attrs: c.InstrumentationAttributes(),
|
||||
}
|
||||
|
||||
if p.tracers == nil {
|
||||
@ -102,7 +103,12 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
|
||||
return t
|
||||
}
|
||||
|
||||
type il struct{ name, version, schema string }
|
||||
type il struct {
|
||||
name string
|
||||
version string
|
||||
schema string
|
||||
attrs attribute.Set
|
||||
}
|
||||
|
||||
// tracer is a placeholder for a trace.Tracer.
|
||||
//
|
||||
|
12
vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
generated
vendored
12
vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
generated
vendored
@ -20,11 +20,13 @@ func RawToBool(r uint64) bool {
|
||||
}
|
||||
|
||||
func Int64ToRaw(i int64) uint64 {
|
||||
return uint64(i)
|
||||
// Assumes original was a valid int64 (overflow not checked).
|
||||
return uint64(i) // nolint: gosec
|
||||
}
|
||||
|
||||
func RawToInt64(r uint64) int64 {
|
||||
return int64(r)
|
||||
// Assumes original was a valid int64 (overflow not checked).
|
||||
return int64(r) // nolint: gosec
|
||||
}
|
||||
|
||||
func Float64ToRaw(f float64) uint64 {
|
||||
@ -36,9 +38,11 @@ func RawToFloat64(r uint64) float64 {
|
||||
}
|
||||
|
||||
func RawPtrToFloat64Ptr(r *uint64) *float64 {
|
||||
return (*float64)(unsafe.Pointer(r))
|
||||
// Assumes original was a valid *float64 (overflow not checked).
|
||||
return (*float64)(unsafe.Pointer(r)) // nolint: gosec
|
||||
}
|
||||
|
||||
func RawPtrToInt64Ptr(r *uint64) *int64 {
|
||||
return (*int64)(unsafe.Pointer(r))
|
||||
// Assumes original was a valid *int64 (overflow not checked).
|
||||
return (*int64)(unsafe.Pointer(r)) // nolint: gosec
|
||||
}
|
||||
|
2
vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
generated
vendored
@ -213,7 +213,7 @@ type Float64Observer interface {
|
||||
}
|
||||
|
||||
// Float64Callback is a function registered with a Meter that makes
|
||||
// observations for a Float64Observerable instrument it is registered with.
|
||||
// observations for a Float64Observable instrument it is registered with.
|
||||
// Calls to the Float64Observer record measurement values for the
|
||||
// Float64Observable.
|
||||
//
|
||||
|
2
vendor/go.opentelemetry.io/otel/metric/asyncint64.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/metric/asyncint64.go
generated
vendored
@ -212,7 +212,7 @@ type Int64Observer interface {
|
||||
}
|
||||
|
||||
// Int64Callback is a function registered with a Meter that makes observations
|
||||
// for an Int64Observerable instrument it is registered with. Calls to the
|
||||
// for an Int64Observable instrument it is registered with. Calls to the
|
||||
// Int64Observer record measurement values for the Int64Observable.
|
||||
//
|
||||
// The function needs to complete in a finite amount of time and the deadline
|
||||
|
2
vendor/go.opentelemetry.io/otel/metric/instrument.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/metric/instrument.go
generated
vendored
@ -351,7 +351,7 @@ func WithAttributeSet(attributes attribute.Set) MeasurementOption {
|
||||
//
|
||||
// cp := make([]attribute.KeyValue, len(attributes))
|
||||
// copy(cp, attributes)
|
||||
// WithAttributes(attribute.NewSet(cp...))
|
||||
// WithAttributeSet(attribute.NewSet(cp...))
|
||||
//
|
||||
// [attribute.NewSet] may modify the passed attributes so this will make a copy
|
||||
// of attributes before creating a set in order to ensure this function is
|
||||
|
13
vendor/go.opentelemetry.io/otel/metric/meter.go
generated
vendored
13
vendor/go.opentelemetry.io/otel/metric/meter.go
generated
vendored
@ -52,6 +52,7 @@ type Meter interface {
|
||||
// See the Instrument Name section of the package documentation for more
|
||||
// information.
|
||||
Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error)
|
||||
|
||||
// Int64UpDownCounter returns a new Int64UpDownCounter instrument
|
||||
// identified by name and configured with options. The instrument is used
|
||||
// to synchronously record int64 measurements during a computational
|
||||
@ -61,6 +62,7 @@ type Meter interface {
|
||||
// See the Instrument Name section of the package documentation for more
|
||||
// information.
|
||||
Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error)
|
||||
|
||||
// Int64Histogram returns a new Int64Histogram instrument identified by
|
||||
// name and configured with options. The instrument is used to
|
||||
// synchronously record the distribution of int64 measurements during a
|
||||
@ -70,6 +72,7 @@ type Meter interface {
|
||||
// See the Instrument Name section of the package documentation for more
|
||||
// information.
|
||||
Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error)
|
||||
|
||||
// Int64Gauge returns a new Int64Gauge instrument identified by name and
|
||||
// configured with options. The instrument is used to synchronously record
|
||||
// instantaneous int64 measurements during a computational operation.
|
||||
@ -78,6 +81,7 @@ type Meter interface {
|
||||
// See the Instrument Name section of the package documentation for more
|
||||
// information.
|
||||
Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error)
|
||||
|
||||
// Int64ObservableCounter returns a new Int64ObservableCounter identified
|
||||
// by name and configured with options. The instrument is used to
|
||||
// asynchronously record increasing int64 measurements once per a
|
||||
@ -92,6 +96,7 @@ type Meter interface {
|
||||
// See the Instrument Name section of the package documentation for more
|
||||
// information.
|
||||
Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error)
|
||||
|
||||
// Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter
|
||||
// instrument identified by name and configured with options. The
|
||||
// instrument is used to asynchronously record int64 measurements once per
|
||||
@ -106,6 +111,7 @@ type Meter interface {
|
||||
// See the Instrument Name section of the package documentation for more
|
||||
// information.
|
||||
Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error)
|
||||
|
||||
// Int64ObservableGauge returns a new Int64ObservableGauge instrument
|
||||
// identified by name and configured with options. The instrument is used
|
||||
// to asynchronously record instantaneous int64 measurements once per a
|
||||
@ -130,6 +136,7 @@ type Meter interface {
|
||||
// See the Instrument Name section of the package documentation for more
|
||||
// information.
|
||||
Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error)
|
||||
|
||||
// Float64UpDownCounter returns a new Float64UpDownCounter instrument
|
||||
// identified by name and configured with options. The instrument is used
|
||||
// to synchronously record float64 measurements during a computational
|
||||
@ -139,6 +146,7 @@ type Meter interface {
|
||||
// See the Instrument Name section of the package documentation for more
|
||||
// information.
|
||||
Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error)
|
||||
|
||||
// Float64Histogram returns a new Float64Histogram instrument identified by
|
||||
// name and configured with options. The instrument is used to
|
||||
// synchronously record the distribution of float64 measurements during a
|
||||
@ -148,6 +156,7 @@ type Meter interface {
|
||||
// See the Instrument Name section of the package documentation for more
|
||||
// information.
|
||||
Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error)
|
||||
|
||||
// Float64Gauge returns a new Float64Gauge instrument identified by name and
|
||||
// configured with options. The instrument is used to synchronously record
|
||||
// instantaneous float64 measurements during a computational operation.
|
||||
@ -156,6 +165,7 @@ type Meter interface {
|
||||
// See the Instrument Name section of the package documentation for more
|
||||
// information.
|
||||
Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error)
|
||||
|
||||
// Float64ObservableCounter returns a new Float64ObservableCounter
|
||||
// instrument identified by name and configured with options. The
|
||||
// instrument is used to asynchronously record increasing float64
|
||||
@ -170,6 +180,7 @@ type Meter interface {
|
||||
// See the Instrument Name section of the package documentation for more
|
||||
// information.
|
||||
Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error)
|
||||
|
||||
// Float64ObservableUpDownCounter returns a new
|
||||
// Float64ObservableUpDownCounter instrument identified by name and
|
||||
// configured with options. The instrument is used to asynchronously record
|
||||
@ -184,6 +195,7 @@ type Meter interface {
|
||||
// See the Instrument Name section of the package documentation for more
|
||||
// information.
|
||||
Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error)
|
||||
|
||||
// Float64ObservableGauge returns a new Float64ObservableGauge instrument
|
||||
// identified by name and configured with options. The instrument is used
|
||||
// to asynchronously record instantaneous float64 measurements once per a
|
||||
@ -242,6 +254,7 @@ type Observer interface {
|
||||
|
||||
// ObserveFloat64 records the float64 value for obsrv.
|
||||
ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption)
|
||||
|
||||
// ObserveInt64 records the int64 value for obsrv.
|
||||
ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption)
|
||||
}
|
||||
|
8
vendor/go.opentelemetry.io/otel/renovate.json
generated
vendored
8
vendor/go.opentelemetry.io/otel/renovate.json
generated
vendored
@ -19,6 +19,14 @@
|
||||
"matchManagers": ["gomod"],
|
||||
"matchDepTypes": ["indirect"],
|
||||
"enabled": false
|
||||
},
|
||||
{
|
||||
"matchPackageNames": ["google.golang.org/genproto/googleapis/**"],
|
||||
"groupName": "googleapis"
|
||||
},
|
||||
{
|
||||
"matchPackageNames": ["golang.org/x/**"],
|
||||
"groupName": "golang.org/x"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
3
vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
generated
vendored
3
vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
generated
vendored
@ -4,5 +4,6 @@
|
||||
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
|
||||
// Library represents the instrumentation library.
|
||||
// Deprecated: please use Scope instead.
|
||||
//
|
||||
// Deprecated: use [Scope] instead.
|
||||
type Library = Scope
|
||||
|
4
vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
generated
vendored
4
vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
generated
vendored
@ -3,6 +3,8 @@
|
||||
|
||||
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
|
||||
import "go.opentelemetry.io/otel/attribute"
|
||||
|
||||
// Scope represents the instrumentation scope.
|
||||
type Scope struct {
|
||||
// Name is the name of the instrumentation scope. This should be the
|
||||
@ -12,4 +14,6 @@ type Scope struct {
|
||||
Version string
|
||||
// SchemaURL of the telemetry emitted by the scope.
|
||||
SchemaURL string
|
||||
// Attributes of the telemetry emitted by the scope.
|
||||
Attributes attribute.Set
|
||||
}
|
||||
|
79
vendor/go.opentelemetry.io/otel/sdk/metric/config.go
generated
vendored
79
vendor/go.opentelemetry.io/otel/sdk/metric/config.go
generated
vendored
@ -5,17 +5,22 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/sdk/metric/exemplar"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
// config contains configuration options for a MeterProvider.
|
||||
type config struct {
|
||||
res *resource.Resource
|
||||
readers []Reader
|
||||
views []View
|
||||
res *resource.Resource
|
||||
readers []Reader
|
||||
views []View
|
||||
exemplarFilter exemplar.Filter
|
||||
}
|
||||
|
||||
// readerSignals returns a force-flush and shutdown function for a
|
||||
@ -39,25 +44,13 @@ func (c config) readerSignals() (forceFlush, shutdown func(context.Context) erro
|
||||
// value.
|
||||
func unify(funcs []func(context.Context) error) func(context.Context) error {
|
||||
return func(ctx context.Context) error {
|
||||
var errs []error
|
||||
var err error
|
||||
for _, f := range funcs {
|
||||
if err := f(ctx); err != nil {
|
||||
errs = append(errs, err)
|
||||
if e := f(ctx); e != nil {
|
||||
err = errors.Join(err, e)
|
||||
}
|
||||
}
|
||||
return unifyErrors(errs)
|
||||
}
|
||||
}
|
||||
|
||||
// unifyErrors combines multiple errors into a single error.
|
||||
func unifyErrors(errs []error) error {
|
||||
switch len(errs) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return errs[0]
|
||||
default:
|
||||
return fmt.Errorf("%v", errs)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@ -75,7 +68,13 @@ func unifyShutdown(funcs []func(context.Context) error) func(context.Context) er
|
||||
|
||||
// newConfig returns a config configured with options.
|
||||
func newConfig(options []Option) config {
|
||||
conf := config{res: resource.Default()}
|
||||
conf := config{
|
||||
res: resource.Default(),
|
||||
exemplarFilter: exemplar.TraceBasedFilter,
|
||||
}
|
||||
for _, o := range meterProviderOptionsFromEnv() {
|
||||
conf = o.apply(conf)
|
||||
}
|
||||
for _, o := range options {
|
||||
conf = o.apply(conf)
|
||||
}
|
||||
@ -103,7 +102,11 @@ func (o optionFunc) apply(conf config) config {
|
||||
// go.opentelemetry.io/otel/sdk/resource package will be used.
|
||||
func WithResource(res *resource.Resource) Option {
|
||||
return optionFunc(func(conf config) config {
|
||||
conf.res = res
|
||||
var err error
|
||||
conf.res, err = resource.Merge(resource.Environment(), res)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
return conf
|
||||
})
|
||||
}
|
||||
@ -135,3 +138,35 @@ func WithView(views ...View) Option {
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
// WithExemplarFilter configures the exemplar filter.
|
||||
//
|
||||
// The exemplar filter determines which measurements are offered to the
|
||||
// exemplar reservoir, but the exemplar reservoir makes the final decision of
|
||||
// whether to store an exemplar.
|
||||
//
|
||||
// By default, the [exemplar.SampledFilter]
|
||||
// is used. Exemplars can be entirely disabled by providing the
|
||||
// [exemplar.AlwaysOffFilter].
|
||||
func WithExemplarFilter(filter exemplar.Filter) Option {
|
||||
return optionFunc(func(cfg config) config {
|
||||
cfg.exemplarFilter = filter
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func meterProviderOptionsFromEnv() []Option {
|
||||
var opts []Option
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar
|
||||
const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER"
|
||||
|
||||
switch strings.ToLower(strings.TrimSpace(os.Getenv(filterEnvKey))) {
|
||||
case "always_on":
|
||||
opts = append(opts, WithExemplarFilter(exemplar.AlwaysOnFilter))
|
||||
case "always_off":
|
||||
opts = append(opts, WithExemplarFilter(exemplar.AlwaysOffFilter))
|
||||
case "trace_based":
|
||||
opts = append(opts, WithExemplarFilter(exemplar.TraceBasedFilter))
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
8
vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
generated
vendored
8
vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
generated
vendored
@ -31,6 +31,14 @@
|
||||
// is being run on. That way when multiple instances of the code are collected
|
||||
// at a single endpoint their origin is decipherable.
|
||||
//
|
||||
// To avoid leaking memory, the SDK returns the same instrument for calls to
|
||||
// create new instruments with the same Name, Unit, and Description.
|
||||
// Importantly, callbacks provided using metric.WithFloat64Callback or
|
||||
// metric.WithInt64Callback will only apply for the first instrument created
|
||||
// with a given Name, Unit, and Description. Instead, use
|
||||
// Meter.RegisterCallback and Registration.Unregister to add and remove
|
||||
// callbacks without leaking memory.
|
||||
//
|
||||
// See [go.opentelemetry.io/otel/metric] for more information about
|
||||
// the metric API.
|
||||
//
|
||||
|
70
vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go
generated
vendored
70
vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go
generated
vendored
@ -4,51 +4,49 @@
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"slices"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/x"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/exemplar"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
|
||||
)
|
||||
|
||||
// ExemplarReservoirProviderSelector selects the
|
||||
// [exemplar.ReservoirProvider] to use
|
||||
// based on the [Aggregation] of the metric.
|
||||
type ExemplarReservoirProviderSelector func(Aggregation) exemplar.ReservoirProvider
|
||||
|
||||
// reservoirFunc returns the appropriately configured exemplar reservoir
|
||||
// creation func based on the passed InstrumentKind and user defined
|
||||
// environment variables.
|
||||
// creation func based on the passed InstrumentKind and filter configuration.
|
||||
func reservoirFunc[N int64 | float64](provider exemplar.ReservoirProvider, filter exemplar.Filter) func(attribute.Set) aggregate.FilteredExemplarReservoir[N] {
|
||||
return func(attrs attribute.Set) aggregate.FilteredExemplarReservoir[N] {
|
||||
return aggregate.NewFilteredExemplarReservoir[N](filter, provider(attrs))
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultExemplarReservoirProviderSelector returns the default
|
||||
// [exemplar.ReservoirProvider] for the
|
||||
// provided [Aggregation].
|
||||
//
|
||||
// Note: This will only return non-nil values when the experimental exemplar
|
||||
// feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable
|
||||
// is not set to always_off.
|
||||
func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.FilteredReservoir[N] {
|
||||
if !x.Exemplars.Enabled() {
|
||||
return nil
|
||||
}
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar
|
||||
const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER"
|
||||
|
||||
var filter exemplar.Filter
|
||||
|
||||
switch os.Getenv(filterEnvKey) {
|
||||
case "always_on":
|
||||
filter = exemplar.AlwaysOnFilter
|
||||
case "always_off":
|
||||
return exemplar.Drop
|
||||
case "trace_based":
|
||||
fallthrough
|
||||
default:
|
||||
filter = exemplar.SampledFilter
|
||||
}
|
||||
|
||||
// For explicit bucket histograms with more than 1 bucket, it uses the
|
||||
// [exemplar.HistogramReservoirProvider].
|
||||
// For exponential histograms, it uses the
|
||||
// [exemplar.FixedSizeReservoirProvider]
|
||||
// with a size of min(20, max_buckets).
|
||||
// For all other aggregations, it uses the
|
||||
// [exemplar.FixedSizeReservoirProvider]
|
||||
// with a size equal to the number of CPUs.
|
||||
//
|
||||
// Exemplar default reservoirs MAY change in a minor version bump. No
|
||||
// guarantees are made on the shape or statistical properties of returned
|
||||
// exemplars.
|
||||
func DefaultExemplarReservoirProviderSelector(agg Aggregation) exemplar.ReservoirProvider {
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults
|
||||
// Explicit bucket histogram aggregation with more than 1 bucket will
|
||||
// use AlignedHistogramBucketExemplarReservoir.
|
||||
a, ok := agg.(AggregationExplicitBucketHistogram)
|
||||
if ok && len(a.Boundaries) > 0 {
|
||||
cp := slices.Clone(a.Boundaries)
|
||||
return func() exemplar.FilteredReservoir[N] {
|
||||
bounds := cp
|
||||
return exemplar.NewFilteredReservoir[N](filter, exemplar.Histogram(bounds))
|
||||
}
|
||||
return exemplar.HistogramReservoirProvider(a.Boundaries)
|
||||
}
|
||||
|
||||
var n int
|
||||
@ -75,7 +73,5 @@ func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.FilteredR
|
||||
}
|
||||
}
|
||||
|
||||
return func() exemplar.FilteredReservoir[N] {
|
||||
return exemplar.NewFilteredReservoir[N](filter, exemplar.FixedSize(n))
|
||||
}
|
||||
return exemplar.FixedSizeReservoirProvider(n)
|
||||
}
|
||||
|
3
vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/README.md
generated
vendored
Normal file
3
vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/README.md
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# Metric SDK Exemplars
|
||||
|
||||
[](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric/exemplar)
|
@ -3,4 +3,4 @@
|
||||
|
||||
// Package exemplar provides an implementation of the OpenTelemetry exemplar
|
||||
// reservoir to be used in metric collection pipelines.
|
||||
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
|
@ -1,7 +1,7 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
|
||||
|
||||
import (
|
||||
"time"
|
@ -1,7 +1,7 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -16,10 +16,10 @@ import (
|
||||
// Reservoir in making a sampling decision.
|
||||
type Filter func(context.Context) bool
|
||||
|
||||
// SampledFilter is a [Filter] that will only offer measurements
|
||||
// TraceBasedFilter is a [Filter] that will only offer measurements
|
||||
// if the passed context associated with the measurement contains a sampled
|
||||
// [go.opentelemetry.io/otel/trace.SpanContext].
|
||||
func SampledFilter(ctx context.Context) bool {
|
||||
func TraceBasedFilter(ctx context.Context) bool {
|
||||
return trace.SpanContextFromContext(ctx).IsSampled()
|
||||
}
|
||||
|
||||
@ -27,3 +27,8 @@ func SampledFilter(ctx context.Context) bool {
|
||||
func AlwaysOnFilter(ctx context.Context) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// AlwaysOffFilter is a [Filter] that never offers measurements.
|
||||
func AlwaysOffFilter(ctx context.Context) bool {
|
||||
return false
|
||||
}
|
@ -1,31 +1,69 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
)
|
||||
|
||||
var (
|
||||
// FixedSizeReservoirProvider returns a provider of [FixedSizeReservoir].
|
||||
func FixedSizeReservoirProvider(k int) ReservoirProvider {
|
||||
return func(_ attribute.Set) Reservoir {
|
||||
return NewFixedSizeReservoir(k)
|
||||
}
|
||||
}
|
||||
|
||||
// NewFixedSizeReservoir returns a [FixedSizeReservoir] that samples at most
|
||||
// k exemplars. If there are k or less measurements made, the Reservoir will
|
||||
// sample each one. If there are more than k, the Reservoir will then randomly
|
||||
// sample all additional measurement with a decreasing probability.
|
||||
func NewFixedSizeReservoir(k int) *FixedSizeReservoir {
|
||||
return newFixedSizeReservoir(newStorage(k))
|
||||
}
|
||||
|
||||
var _ Reservoir = &FixedSizeReservoir{}
|
||||
|
||||
// FixedSizeReservoir is a [Reservoir] that samples at most k exemplars. If
|
||||
// there are k or less measurements made, the Reservoir will sample each one.
|
||||
// If there are more than k, the Reservoir will then randomly sample all
|
||||
// additional measurement with a decreasing probability.
|
||||
type FixedSizeReservoir struct {
|
||||
*storage
|
||||
|
||||
// count is the number of measurement seen.
|
||||
count int64
|
||||
// next is the next count that will store a measurement at a random index
|
||||
// once the reservoir has been filled.
|
||||
next int64
|
||||
// w is the largest random number in a distribution that is used to compute
|
||||
// the next next.
|
||||
w float64
|
||||
|
||||
// rng is used to make sampling decisions.
|
||||
//
|
||||
// Do not use crypto/rand. There is no reason for the decrease in performance
|
||||
// given this is not a security sensitive decision.
|
||||
rng = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
// Ensure concurrent safe accecess to rng and its underlying source.
|
||||
rngMu sync.Mutex
|
||||
)
|
||||
rng *rand.Rand
|
||||
}
|
||||
|
||||
// random returns, as a float64, a uniform pseudo-random number in the open
|
||||
// interval (0.0,1.0).
|
||||
func random() float64 {
|
||||
func newFixedSizeReservoir(s *storage) *FixedSizeReservoir {
|
||||
r := &FixedSizeReservoir{
|
||||
storage: s,
|
||||
rng: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
}
|
||||
r.reset()
|
||||
return r
|
||||
}
|
||||
|
||||
// randomFloat64 returns, as a float64, a uniform pseudo-random number in the
|
||||
// open interval (0.0,1.0).
|
||||
func (r *FixedSizeReservoir) randomFloat64() float64 {
|
||||
// TODO: This does not return a uniform number. rng.Float64 returns a
|
||||
// uniformly random int in [0,2^53) that is divided by 2^53. Meaning it
|
||||
// returns multiples of 2^-53, and not all floating point numbers between 0
|
||||
@ -43,40 +81,25 @@ func random() float64 {
|
||||
//
|
||||
// There are likely many other methods to explore here as well.
|
||||
|
||||
rngMu.Lock()
|
||||
defer rngMu.Unlock()
|
||||
|
||||
f := rng.Float64()
|
||||
f := r.rng.Float64()
|
||||
for f == 0 {
|
||||
f = rng.Float64()
|
||||
f = r.rng.Float64()
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// FixedSize returns a [Reservoir] that samples at most k exemplars. If there
|
||||
// are k or less measurements made, the Reservoir will sample each one. If
|
||||
// there are more than k, the Reservoir will then randomly sample all
|
||||
// additional measurement with a decreasing probability.
|
||||
func FixedSize(k int) Reservoir {
|
||||
r := &randRes{storage: newStorage(k)}
|
||||
r.reset()
|
||||
return r
|
||||
}
|
||||
|
||||
type randRes struct {
|
||||
*storage
|
||||
|
||||
// count is the number of measurement seen.
|
||||
count int64
|
||||
// next is the next count that will store a measurement at a random index
|
||||
// once the reservoir has been filled.
|
||||
next int64
|
||||
// w is the largest random number in a distribution that is used to compute
|
||||
// the next next.
|
||||
w float64
|
||||
}
|
||||
|
||||
func (r *randRes) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) {
|
||||
// Offer accepts the parameters associated with a measurement. The
|
||||
// parameters will be stored as an exemplar if the Reservoir decides to
|
||||
// sample the measurement.
|
||||
//
|
||||
// The passed ctx needs to contain any baggage or span that were active
|
||||
// when the measurement was made. This information may be used by the
|
||||
// Reservoir in making a sampling decision.
|
||||
//
|
||||
// The time t is the time when the measurement was made. The v and a
|
||||
// parameters are the value and dropped (filtered) attributes of the
|
||||
// measurement respectively.
|
||||
func (r *FixedSizeReservoir) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) {
|
||||
// The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December
|
||||
// 1994). "Reservoir-Sampling Algorithms of Time Complexity
|
||||
// O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4):
|
||||
@ -123,7 +146,7 @@ func (r *randRes) Offer(ctx context.Context, t time.Time, n Value, a []attribute
|
||||
} else {
|
||||
if r.count == r.next {
|
||||
// Overwrite a random existing measurement with the one offered.
|
||||
idx := int(rng.Int63n(int64(cap(r.store))))
|
||||
idx := int(r.rng.Int63n(int64(cap(r.store))))
|
||||
r.store[idx] = newMeasurement(ctx, t, n, a)
|
||||
r.advance()
|
||||
}
|
||||
@ -132,7 +155,7 @@ func (r *randRes) Offer(ctx context.Context, t time.Time, n Value, a []attribute
|
||||
}
|
||||
|
||||
// reset resets r to the initial state.
|
||||
func (r *randRes) reset() {
|
||||
func (r *FixedSizeReservoir) reset() {
|
||||
// This resets the number of exemplars known.
|
||||
r.count = 0
|
||||
// Random index inserts should only happen after the storage is full.
|
||||
@ -147,14 +170,14 @@ func (r *randRes) reset() {
|
||||
// This maps the uniform random number in (0,1) to a geometric distribution
|
||||
// over the same interval. The mean of the distribution is inversely
|
||||
// proportional to the storage capacity.
|
||||
r.w = math.Exp(math.Log(random()) / float64(cap(r.store)))
|
||||
r.w = math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.store)))
|
||||
|
||||
r.advance()
|
||||
}
|
||||
|
||||
// advance updates the count at which the offered measurement will overwrite an
|
||||
// existing exemplar.
|
||||
func (r *randRes) advance() {
|
||||
func (r *FixedSizeReservoir) advance() {
|
||||
// Calculate the next value in the random number series.
|
||||
//
|
||||
// The current value of r.w is based on the max of a distribution of random
|
||||
@ -167,7 +190,7 @@ func (r *randRes) advance() {
|
||||
// therefore the next r.w will be based on the same distribution (i.e.
|
||||
// `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by
|
||||
// computing the next random number `u` and take r.w as `w * u^(1/k)`.
|
||||
r.w *= math.Exp(math.Log(random()) / float64(cap(r.store)))
|
||||
r.w *= math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.store)))
|
||||
// Use the new random number in the series to calculate the count of the
|
||||
// next measurement that will be stored.
|
||||
//
|
||||
@ -178,10 +201,13 @@ func (r *randRes) advance() {
|
||||
//
|
||||
// Important to note, the new r.next will always be at least 1 more than
|
||||
// the last r.next.
|
||||
r.next += int64(math.Log(random())/math.Log(1-r.w)) + 1
|
||||
r.next += int64(math.Log(r.randomFloat64())/math.Log(1-r.w)) + 1
|
||||
}
|
||||
|
||||
func (r *randRes) Collect(dest *[]Exemplar) {
|
||||
// Collect returns all the held exemplars.
|
||||
//
|
||||
// The Reservoir state is preserved after this call.
|
||||
func (r *FixedSizeReservoir) Collect(dest *[]Exemplar) {
|
||||
r.storage.Collect(dest)
|
||||
// Call reset here even though it will reset r.count and restart the random
|
||||
// number series. This will persist any old exemplars as long as no new
|
70
vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go
generated
vendored
Normal file
70
vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
)
|
||||
|
||||
// HistogramReservoirProvider is a provider of [HistogramReservoir].
|
||||
func HistogramReservoirProvider(bounds []float64) ReservoirProvider {
|
||||
cp := slices.Clone(bounds)
|
||||
slices.Sort(cp)
|
||||
return func(_ attribute.Set) Reservoir {
|
||||
return NewHistogramReservoir(cp)
|
||||
}
|
||||
}
|
||||
|
||||
// NewHistogramReservoir returns a [HistogramReservoir] that samples the last
|
||||
// measurement that falls within a histogram bucket. The histogram bucket
|
||||
// upper-boundaries are define by bounds.
|
||||
//
|
||||
// The passed bounds must be sorted before calling this function.
|
||||
func NewHistogramReservoir(bounds []float64) *HistogramReservoir {
|
||||
return &HistogramReservoir{
|
||||
bounds: bounds,
|
||||
storage: newStorage(len(bounds) + 1),
|
||||
}
|
||||
}
|
||||
|
||||
var _ Reservoir = &HistogramReservoir{}
|
||||
|
||||
// HistogramReservoir is a [Reservoir] that samples the last measurement that
|
||||
// falls within a histogram bucket. The histogram bucket upper-boundaries are
|
||||
// define by bounds.
|
||||
type HistogramReservoir struct {
|
||||
*storage
|
||||
|
||||
// bounds are bucket bounds in ascending order.
|
||||
bounds []float64
|
||||
}
|
||||
|
||||
// Offer accepts the parameters associated with a measurement. The
|
||||
// parameters will be stored as an exemplar if the Reservoir decides to
|
||||
// sample the measurement.
|
||||
//
|
||||
// The passed ctx needs to contain any baggage or span that were active
|
||||
// when the measurement was made. This information may be used by the
|
||||
// Reservoir in making a sampling decision.
|
||||
//
|
||||
// The time t is the time when the measurement was made. The v and a
|
||||
// parameters are the value and dropped (filtered) attributes of the
|
||||
// measurement respectively.
|
||||
func (r *HistogramReservoir) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) {
|
||||
var x float64
|
||||
switch v.Type() {
|
||||
case Int64ValueType:
|
||||
x = float64(v.Int64())
|
||||
case Float64ValueType:
|
||||
x = v.Float64()
|
||||
default:
|
||||
panic("unknown value type")
|
||||
}
|
||||
r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a)
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -30,3 +30,11 @@ type Reservoir interface {
|
||||
// The Reservoir state is preserved after this call.
|
||||
Collect(dest *[]Exemplar)
|
||||
}
|
||||
|
||||
// ReservoirProvider creates new [Reservoir]s.
|
||||
//
|
||||
// The attributes provided are attributes which are kept by the aggregation, and
|
||||
// are exclusive with attributes passed to Offer. The combination of these
|
||||
// attributes and the attributes passed to Offer is the complete set of
|
||||
// attributes a measurement was made with.
|
||||
type ReservoirProvider func(attr attribute.Set) Reservoir
|
@ -1,7 +1,7 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -35,7 +35,7 @@ func (r *storage) Collect(dest *[]Exemplar) {
|
||||
continue
|
||||
}
|
||||
|
||||
m.Exemplar(&(*dest)[n])
|
||||
m.exemplar(&(*dest)[n])
|
||||
n++
|
||||
}
|
||||
*dest = (*dest)[:n]
|
||||
@ -66,8 +66,8 @@ func newMeasurement(ctx context.Context, ts time.Time, v Value, droppedAttr []at
|
||||
}
|
||||
}
|
||||
|
||||
// Exemplar returns m as an [Exemplar].
|
||||
func (m measurement) Exemplar(dest *Exemplar) {
|
||||
// exemplar returns m as an [Exemplar].
|
||||
func (m measurement) exemplar(dest *Exemplar) {
|
||||
dest.FilteredAttributes = m.FilteredAttributes
|
||||
dest.Time = m.Time
|
||||
dest.Value = m.Value
|
@ -1,7 +1,7 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
|
||||
|
||||
import "math"
|
||||
|
||||
@ -28,7 +28,8 @@ type Value struct {
|
||||
func NewValue[N int64 | float64](value N) Value {
|
||||
switch v := any(value).(type) {
|
||||
case int64:
|
||||
return Value{t: Int64ValueType, val: uint64(v)}
|
||||
// This can be later converted back to int64 (overflow not checked).
|
||||
return Value{t: Int64ValueType, val: uint64(v)} // nolint:gosec
|
||||
case float64:
|
||||
return Value{t: Float64ValueType, val: math.Float64bits(v)}
|
||||
}
|
||||
@ -42,7 +43,8 @@ func (v Value) Type() ValueType { return v.t }
|
||||
// Int64ValueType, 0 is returned.
|
||||
func (v Value) Int64() int64 {
|
||||
if v.t == Int64ValueType {
|
||||
return int64(v.val)
|
||||
// Assumes the correct int64 was stored in v.val based on type.
|
||||
return int64(v.val) // nolint: gosec
|
||||
}
|
||||
return 0
|
||||
}
|
14
vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
generated
vendored
14
vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
generated
vendored
@ -144,6 +144,12 @@ type Stream struct {
|
||||
// Use NewAllowKeysFilter from "go.opentelemetry.io/otel/attribute" to
|
||||
// provide an allow-list of attribute keys here.
|
||||
AttributeFilter attribute.Filter
|
||||
// ExemplarReservoirProvider selects the
|
||||
// [go.opentelemetry.io/otel/sdk/metric/exemplar.ReservoirProvider] based
|
||||
// on the [Aggregation].
|
||||
//
|
||||
// If unspecified, [DefaultExemplarReservoirProviderSelector] is used.
|
||||
ExemplarReservoirProviderSelector ExemplarReservoirProviderSelector
|
||||
}
|
||||
|
||||
// instID are the identifying properties of a instrument.
|
||||
@ -234,8 +240,8 @@ func (i *float64Inst) aggregate(ctx context.Context, val float64, s attribute.Se
|
||||
}
|
||||
}
|
||||
|
||||
// observablID is a comparable unique identifier of an observable.
|
||||
type observablID[N int64 | float64] struct {
|
||||
// observableID is a comparable unique identifier of an observable.
|
||||
type observableID[N int64 | float64] struct {
|
||||
name string
|
||||
description string
|
||||
kind InstrumentKind
|
||||
@ -287,7 +293,7 @@ func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string) int
|
||||
|
||||
type observable[N int64 | float64] struct {
|
||||
metric.Observable
|
||||
observablID[N]
|
||||
observableID[N]
|
||||
|
||||
meter *meter
|
||||
measures measures[N]
|
||||
@ -296,7 +302,7 @@ type observable[N int64 | float64] struct {
|
||||
|
||||
func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string) *observable[N] {
|
||||
return &observable[N]{
|
||||
observablID: observablID[N]{
|
||||
observableID: observableID[N]{
|
||||
name: name,
|
||||
description: desc,
|
||||
kind: kind,
|
||||
|
9
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
generated
vendored
9
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
generated
vendored
@ -8,7 +8,6 @@ import (
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
@ -38,8 +37,8 @@ type Builder[N int64 | float64] struct {
|
||||
// create new exemplar reservoirs for a new seen attribute set.
|
||||
//
|
||||
// If this is not provided a default factory function that returns an
|
||||
// exemplar.Drop reservoir will be used.
|
||||
ReservoirFunc func() exemplar.FilteredReservoir[N]
|
||||
// dropReservoir reservoir will be used.
|
||||
ReservoirFunc func(attribute.Set) FilteredExemplarReservoir[N]
|
||||
// AggregationLimit is the cardinality limit of measurement attributes. Any
|
||||
// measurement for new attributes once the limit has been reached will be
|
||||
// aggregated into a single aggregate for the "otel.metric.overflow"
|
||||
@ -50,12 +49,12 @@ type Builder[N int64 | float64] struct {
|
||||
AggregationLimit int
|
||||
}
|
||||
|
||||
func (b Builder[N]) resFunc() func() exemplar.FilteredReservoir[N] {
|
||||
func (b Builder[N]) resFunc() func(attribute.Set) FilteredExemplarReservoir[N] {
|
||||
if b.ReservoirFunc != nil {
|
||||
return b.ReservoirFunc
|
||||
}
|
||||
|
||||
return exemplar.Drop
|
||||
return dropReservoir
|
||||
}
|
||||
|
||||
type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue)
|
||||
|
27
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go
generated
vendored
Normal file
27
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/exemplar"
|
||||
)
|
||||
|
||||
// dropReservoir returns a [FilteredReservoir] that drops all measurements it is offered.
|
||||
func dropReservoir[N int64 | float64](attribute.Set) FilteredExemplarReservoir[N] {
|
||||
return &dropRes[N]{}
|
||||
}
|
||||
|
||||
type dropRes[N int64 | float64] struct{}
|
||||
|
||||
// Offer does nothing, all measurements offered will be dropped.
|
||||
func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {}
|
||||
|
||||
// Collect resets dest. No exemplars will ever be returned.
|
||||
func (r *dropRes[N]) Collect(dest *[]exemplar.Exemplar) {
|
||||
clear(*dest) // Erase elements to let GC collect objects
|
||||
*dest = (*dest)[:0]
|
||||
}
|
3
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go
generated
vendored
3
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go
generated
vendored
@ -6,7 +6,7 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggreg
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
"go.opentelemetry.io/otel/sdk/metric/exemplar"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
@ -17,6 +17,7 @@ var exemplarPool = sync.Pool{
|
||||
func collectExemplars[N int64 | float64](out *[]metricdata.Exemplar[N], f func(*[]exemplar.Exemplar)) {
|
||||
dest := exemplarPool.Get().(*[]exemplar.Exemplar)
|
||||
defer func() {
|
||||
clear(*dest) // Erase elements to let GC collect objects.
|
||||
*dest = (*dest)[:0]
|
||||
exemplarPool.Put(dest)
|
||||
}()
|
||||
|
87
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
generated
vendored
87
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
generated
vendored
@ -12,7 +12,6 @@ import (
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
@ -31,7 +30,7 @@ const (
|
||||
// expoHistogramDataPoint is a single data point in an exponential histogram.
|
||||
type expoHistogramDataPoint[N int64 | float64] struct {
|
||||
attrs attribute.Set
|
||||
res exemplar.FilteredReservoir[N]
|
||||
res FilteredExemplarReservoir[N]
|
||||
|
||||
count uint64
|
||||
min N
|
||||
@ -42,14 +41,14 @@ type expoHistogramDataPoint[N int64 | float64] struct {
|
||||
noMinMax bool
|
||||
noSum bool
|
||||
|
||||
scale int
|
||||
scale int32
|
||||
|
||||
posBuckets expoBuckets
|
||||
negBuckets expoBuckets
|
||||
zeroCount uint64
|
||||
}
|
||||
|
||||
func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize, maxScale int, noMinMax, noSum bool) *expoHistogramDataPoint[N] {
|
||||
func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize int, maxScale int32, noMinMax, noSum bool) *expoHistogramDataPoint[N] {
|
||||
f := math.MaxFloat64
|
||||
max := N(f) // if N is int64, max will overflow to -9223372036854775808
|
||||
min := N(-f)
|
||||
@ -119,11 +118,13 @@ func (p *expoHistogramDataPoint[N]) record(v N) {
|
||||
}
|
||||
|
||||
// getBin returns the bin v should be recorded into.
|
||||
func (p *expoHistogramDataPoint[N]) getBin(v float64) int {
|
||||
frac, exp := math.Frexp(v)
|
||||
func (p *expoHistogramDataPoint[N]) getBin(v float64) int32 {
|
||||
frac, expInt := math.Frexp(v)
|
||||
// 11-bit exponential.
|
||||
exp := int32(expInt) // nolint: gosec
|
||||
if p.scale <= 0 {
|
||||
// Because of the choice of fraction is always 1 power of two higher than we want.
|
||||
correction := 1
|
||||
var correction int32 = 1
|
||||
if frac == .5 {
|
||||
// If v is an exact power of two the frac will be .5 and the exp
|
||||
// will be one higher than we want.
|
||||
@ -131,7 +132,7 @@ func (p *expoHistogramDataPoint[N]) getBin(v float64) int {
|
||||
}
|
||||
return (exp - correction) >> (-p.scale)
|
||||
}
|
||||
return exp<<p.scale + int(math.Log(frac)*scaleFactors[p.scale]) - 1
|
||||
return exp<<p.scale + int32(math.Log(frac)*scaleFactors[p.scale]) - 1
|
||||
}
|
||||
|
||||
// scaleFactors are constants used in calculating the logarithm index. They are
|
||||
@ -162,20 +163,20 @@ var scaleFactors = [21]float64{
|
||||
|
||||
// scaleChange returns the magnitude of the scale change needed to fit bin in
|
||||
// the bucket. If no scale change is needed 0 is returned.
|
||||
func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin, length int) int {
|
||||
func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin int32, length int) int32 {
|
||||
if length == 0 {
|
||||
// No need to rescale if there are no buckets.
|
||||
return 0
|
||||
}
|
||||
|
||||
low := startBin
|
||||
high := bin
|
||||
low := int(startBin)
|
||||
high := int(bin)
|
||||
if startBin >= bin {
|
||||
low = bin
|
||||
high = startBin + length - 1
|
||||
low = int(bin)
|
||||
high = int(startBin) + length - 1
|
||||
}
|
||||
|
||||
count := 0
|
||||
var count int32
|
||||
for high-low >= p.maxSize {
|
||||
low = low >> 1
|
||||
high = high >> 1
|
||||
@ -189,39 +190,39 @@ func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin, length int) int {
|
||||
|
||||
// expoBuckets is a set of buckets in an exponential histogram.
|
||||
type expoBuckets struct {
|
||||
startBin int
|
||||
startBin int32
|
||||
counts []uint64
|
||||
}
|
||||
|
||||
// record increments the count for the given bin, and expands the buckets if needed.
|
||||
// Size changes must be done before calling this function.
|
||||
func (b *expoBuckets) record(bin int) {
|
||||
func (b *expoBuckets) record(bin int32) {
|
||||
if len(b.counts) == 0 {
|
||||
b.counts = []uint64{1}
|
||||
b.startBin = bin
|
||||
return
|
||||
}
|
||||
|
||||
endBin := b.startBin + len(b.counts) - 1
|
||||
endBin := int(b.startBin) + len(b.counts) - 1
|
||||
|
||||
// if the new bin is inside the current range
|
||||
if bin >= b.startBin && bin <= endBin {
|
||||
if bin >= b.startBin && int(bin) <= endBin {
|
||||
b.counts[bin-b.startBin]++
|
||||
return
|
||||
}
|
||||
// if the new bin is before the current start add spaces to the counts
|
||||
if bin < b.startBin {
|
||||
origLen := len(b.counts)
|
||||
newLength := endBin - bin + 1
|
||||
newLength := endBin - int(bin) + 1
|
||||
shift := b.startBin - bin
|
||||
|
||||
if newLength > cap(b.counts) {
|
||||
b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...)
|
||||
}
|
||||
|
||||
copy(b.counts[shift:origLen+shift], b.counts[:])
|
||||
copy(b.counts[shift:origLen+int(shift)], b.counts[:])
|
||||
b.counts = b.counts[:newLength]
|
||||
for i := 1; i < shift; i++ {
|
||||
for i := 1; i < int(shift); i++ {
|
||||
b.counts[i] = 0
|
||||
}
|
||||
b.startBin = bin
|
||||
@ -229,17 +230,17 @@ func (b *expoBuckets) record(bin int) {
|
||||
return
|
||||
}
|
||||
// if the new is after the end add spaces to the end
|
||||
if bin > endBin {
|
||||
if bin-b.startBin < cap(b.counts) {
|
||||
if int(bin) > endBin {
|
||||
if int(bin-b.startBin) < cap(b.counts) {
|
||||
b.counts = b.counts[:bin-b.startBin+1]
|
||||
for i := endBin + 1 - b.startBin; i < len(b.counts); i++ {
|
||||
for i := endBin + 1 - int(b.startBin); i < len(b.counts); i++ {
|
||||
b.counts[i] = 0
|
||||
}
|
||||
b.counts[bin-b.startBin] = 1
|
||||
return
|
||||
}
|
||||
|
||||
end := make([]uint64, bin-b.startBin-len(b.counts)+1)
|
||||
end := make([]uint64, int(bin-b.startBin)-len(b.counts)+1)
|
||||
b.counts = append(b.counts, end...)
|
||||
b.counts[bin-b.startBin] = 1
|
||||
}
|
||||
@ -247,7 +248,7 @@ func (b *expoBuckets) record(bin int) {
|
||||
|
||||
// downscale shrinks a bucket by a factor of 2*s. It will sum counts into the
|
||||
// correct lower resolution bucket.
|
||||
func (b *expoBuckets) downscale(delta int) {
|
||||
func (b *expoBuckets) downscale(delta int32) {
|
||||
// Example
|
||||
// delta = 2
|
||||
// Original offset: -6
|
||||
@ -262,19 +263,19 @@ func (b *expoBuckets) downscale(delta int) {
|
||||
return
|
||||
}
|
||||
|
||||
steps := 1 << delta
|
||||
steps := int32(1) << delta
|
||||
offset := b.startBin % steps
|
||||
offset = (offset + steps) % steps // to make offset positive
|
||||
for i := 1; i < len(b.counts); i++ {
|
||||
idx := i + offset
|
||||
if idx%steps == 0 {
|
||||
b.counts[idx/steps] = b.counts[i]
|
||||
idx := i + int(offset)
|
||||
if idx%int(steps) == 0 {
|
||||
b.counts[idx/int(steps)] = b.counts[i]
|
||||
continue
|
||||
}
|
||||
b.counts[idx/steps] += b.counts[i]
|
||||
b.counts[idx/int(steps)] += b.counts[i]
|
||||
}
|
||||
|
||||
lastIdx := (len(b.counts) - 1 + offset) / steps
|
||||
lastIdx := (len(b.counts) - 1 + int(offset)) / int(steps)
|
||||
b.counts = b.counts[:lastIdx+1]
|
||||
b.startBin = b.startBin >> delta
|
||||
}
|
||||
@ -282,12 +283,12 @@ func (b *expoBuckets) downscale(delta int) {
|
||||
// newExponentialHistogram returns an Aggregator that summarizes a set of
|
||||
// measurements as an exponential histogram. Each histogram is scoped by attributes
|
||||
// and the aggregation cycle the measurements were made in.
|
||||
func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *expoHistogram[N] {
|
||||
func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *expoHistogram[N] {
|
||||
return &expoHistogram[N]{
|
||||
noSum: noSum,
|
||||
noMinMax: noMinMax,
|
||||
maxSize: int(maxSize),
|
||||
maxScale: int(maxScale),
|
||||
maxScale: maxScale,
|
||||
|
||||
newRes: r,
|
||||
limit: newLimiter[*expoHistogramDataPoint[N]](limit),
|
||||
@ -303,9 +304,9 @@ type expoHistogram[N int64 | float64] struct {
|
||||
noSum bool
|
||||
noMinMax bool
|
||||
maxSize int
|
||||
maxScale int
|
||||
maxScale int32
|
||||
|
||||
newRes func() exemplar.FilteredReservoir[N]
|
||||
newRes func(attribute.Set) FilteredExemplarReservoir[N]
|
||||
limit limiter[*expoHistogramDataPoint[N]]
|
||||
values map[attribute.Distinct]*expoHistogramDataPoint[N]
|
||||
valuesMu sync.Mutex
|
||||
@ -326,7 +327,7 @@ func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attrib
|
||||
v, ok := e.values[attr.Equivalent()]
|
||||
if !ok {
|
||||
v = newExpoHistogramDataPoint[N](attr, e.maxSize, e.maxScale, e.noMinMax, e.noSum)
|
||||
v.res = e.newRes()
|
||||
v.res = e.newRes(attr)
|
||||
|
||||
e.values[attr.Equivalent()] = v
|
||||
}
|
||||
@ -354,15 +355,15 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
|
||||
hDPts[i].StartTime = e.start
|
||||
hDPts[i].Time = t
|
||||
hDPts[i].Count = val.count
|
||||
hDPts[i].Scale = int32(val.scale)
|
||||
hDPts[i].Scale = val.scale
|
||||
hDPts[i].ZeroCount = val.zeroCount
|
||||
hDPts[i].ZeroThreshold = 0.0
|
||||
|
||||
hDPts[i].PositiveBucket.Offset = int32(val.posBuckets.startBin)
|
||||
hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin
|
||||
hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts))
|
||||
copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
|
||||
|
||||
hDPts[i].NegativeBucket.Offset = int32(val.negBuckets.startBin)
|
||||
hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin
|
||||
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts))
|
||||
copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
|
||||
|
||||
@ -407,15 +408,15 @@ func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int {
|
||||
hDPts[i].StartTime = e.start
|
||||
hDPts[i].Time = t
|
||||
hDPts[i].Count = val.count
|
||||
hDPts[i].Scale = int32(val.scale)
|
||||
hDPts[i].Scale = val.scale
|
||||
hDPts[i].ZeroCount = val.zeroCount
|
||||
hDPts[i].ZeroThreshold = 0.0
|
||||
|
||||
hDPts[i].PositiveBucket.Offset = int32(val.posBuckets.startBin)
|
||||
hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin
|
||||
hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts))
|
||||
copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
|
||||
|
||||
hDPts[i].NegativeBucket.Offset = int32(val.negBuckets.startBin)
|
||||
hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin
|
||||
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts))
|
||||
copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
|
||||
|
||||
|
50
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go
generated
vendored
Normal file
50
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/exemplar"
|
||||
)
|
||||
|
||||
// FilteredExemplarReservoir wraps a [exemplar.Reservoir] with a filter.
|
||||
type FilteredExemplarReservoir[N int64 | float64] interface {
|
||||
// Offer accepts the parameters associated with a measurement. The
|
||||
// parameters will be stored as an exemplar if the filter decides to
|
||||
// sample the measurement.
|
||||
//
|
||||
// The passed ctx needs to contain any baggage or span that were active
|
||||
// when the measurement was made. This information may be used by the
|
||||
// Reservoir in making a sampling decision.
|
||||
Offer(ctx context.Context, val N, attr []attribute.KeyValue)
|
||||
// Collect returns all the held exemplars in the reservoir.
|
||||
Collect(dest *[]exemplar.Exemplar)
|
||||
}
|
||||
|
||||
// filteredExemplarReservoir handles the pre-sampled exemplar of measurements made.
|
||||
type filteredExemplarReservoir[N int64 | float64] struct {
|
||||
filter exemplar.Filter
|
||||
reservoir exemplar.Reservoir
|
||||
}
|
||||
|
||||
// NewFilteredExemplarReservoir creates a [FilteredExemplarReservoir] which only offers values
|
||||
// that are allowed by the filter.
|
||||
func NewFilteredExemplarReservoir[N int64 | float64](f exemplar.Filter, r exemplar.Reservoir) FilteredExemplarReservoir[N] {
|
||||
return &filteredExemplarReservoir[N]{
|
||||
filter: f,
|
||||
reservoir: r,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *filteredExemplarReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) {
|
||||
if f.filter(ctx) {
|
||||
// only record the current time if we are sampling this measurement.
|
||||
f.reservoir.Offer(ctx, time.Now(), exemplar.NewValue(val), attr)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *filteredExemplarReservoir[N]) Collect(dest *[]exemplar.Exemplar) { f.reservoir.Collect(dest) }
|
11
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
generated
vendored
11
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
generated
vendored
@ -11,13 +11,12 @@ import (
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
type buckets[N int64 | float64] struct {
|
||||
attrs attribute.Set
|
||||
res exemplar.FilteredReservoir[N]
|
||||
res FilteredExemplarReservoir[N]
|
||||
|
||||
counts []uint64
|
||||
count uint64
|
||||
@ -48,13 +47,13 @@ type histValues[N int64 | float64] struct {
|
||||
noSum bool
|
||||
bounds []float64
|
||||
|
||||
newRes func() exemplar.FilteredReservoir[N]
|
||||
newRes func(attribute.Set) FilteredExemplarReservoir[N]
|
||||
limit limiter[*buckets[N]]
|
||||
values map[attribute.Distinct]*buckets[N]
|
||||
valuesMu sync.Mutex
|
||||
}
|
||||
|
||||
func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histValues[N] {
|
||||
func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histValues[N] {
|
||||
// The responsibility of keeping all buckets correctly associated with the
|
||||
// passed boundaries is ultimately this type's responsibility. Make a copy
|
||||
// here so we can always guarantee this. Or, in the case of failure, have
|
||||
@ -94,7 +93,7 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute
|
||||
//
|
||||
// buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞)
|
||||
b = newBuckets[N](attr, len(s.bounds)+1)
|
||||
b.res = s.newRes()
|
||||
b.res = s.newRes(attr)
|
||||
|
||||
// Ensure min and max are recorded values (not zero), for new buckets.
|
||||
b.min, b.max = value, value
|
||||
@ -109,7 +108,7 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute
|
||||
|
||||
// newHistogram returns an Aggregator that summarizes a set of measurements as
|
||||
// an histogram.
|
||||
func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histogram[N] {
|
||||
func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histogram[N] {
|
||||
return &histogram[N]{
|
||||
histValues: newHistValues[N](boundaries, noSum, limit, r),
|
||||
noMinMax: noMinMax,
|
||||
|
11
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
generated
vendored
11
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
generated
vendored
@ -9,7 +9,6 @@ import (
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
@ -17,10 +16,10 @@ import (
|
||||
type datapoint[N int64 | float64] struct {
|
||||
attrs attribute.Set
|
||||
value N
|
||||
res exemplar.FilteredReservoir[N]
|
||||
res FilteredExemplarReservoir[N]
|
||||
}
|
||||
|
||||
func newLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *lastValue[N] {
|
||||
func newLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *lastValue[N] {
|
||||
return &lastValue[N]{
|
||||
newRes: r,
|
||||
limit: newLimiter[datapoint[N]](limit),
|
||||
@ -33,7 +32,7 @@ func newLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReserv
|
||||
type lastValue[N int64 | float64] struct {
|
||||
sync.Mutex
|
||||
|
||||
newRes func() exemplar.FilteredReservoir[N]
|
||||
newRes func(attribute.Set) FilteredExemplarReservoir[N]
|
||||
limit limiter[datapoint[N]]
|
||||
values map[attribute.Distinct]datapoint[N]
|
||||
start time.Time
|
||||
@ -46,7 +45,7 @@ func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.
|
||||
attr := s.limit.Attributes(fltrAttr, s.values)
|
||||
d, ok := s.values[attr.Equivalent()]
|
||||
if !ok {
|
||||
d.res = s.newRes()
|
||||
d.res = s.newRes(attr)
|
||||
}
|
||||
|
||||
d.attrs = attr
|
||||
@ -115,7 +114,7 @@ func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) in
|
||||
|
||||
// newPrecomputedLastValue returns an aggregator that summarizes a set of
|
||||
// observations as the last one made.
|
||||
func newPrecomputedLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *precomputedLastValue[N] {
|
||||
func newPrecomputedLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedLastValue[N] {
|
||||
return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)}
|
||||
}
|
||||
|
||||
|
17
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
generated
vendored
17
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
generated
vendored
@ -9,25 +9,24 @@ import (
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
type sumValue[N int64 | float64] struct {
|
||||
n N
|
||||
res exemplar.FilteredReservoir[N]
|
||||
res FilteredExemplarReservoir[N]
|
||||
attrs attribute.Set
|
||||
}
|
||||
|
||||
// valueMap is the storage for sums.
|
||||
type valueMap[N int64 | float64] struct {
|
||||
sync.Mutex
|
||||
newRes func() exemplar.FilteredReservoir[N]
|
||||
newRes func(attribute.Set) FilteredExemplarReservoir[N]
|
||||
limit limiter[sumValue[N]]
|
||||
values map[attribute.Distinct]sumValue[N]
|
||||
}
|
||||
|
||||
func newValueMap[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *valueMap[N] {
|
||||
func newValueMap[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *valueMap[N] {
|
||||
return &valueMap[N]{
|
||||
newRes: r,
|
||||
limit: newLimiter[sumValue[N]](limit),
|
||||
@ -42,7 +41,7 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S
|
||||
attr := s.limit.Attributes(fltrAttr, s.values)
|
||||
v, ok := s.values[attr.Equivalent()]
|
||||
if !ok {
|
||||
v.res = s.newRes()
|
||||
v.res = s.newRes(attr)
|
||||
}
|
||||
|
||||
v.attrs = attr
|
||||
@ -55,7 +54,7 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S
|
||||
// newSum returns an aggregator that summarizes a set of measurements as their
|
||||
// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle
|
||||
// the measurements were made in.
|
||||
func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *sum[N] {
|
||||
func newSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *sum[N] {
|
||||
return &sum[N]{
|
||||
valueMap: newValueMap[N](limit, r),
|
||||
monotonic: monotonic,
|
||||
@ -142,9 +141,9 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int {
|
||||
}
|
||||
|
||||
// newPrecomputedSum returns an aggregator that summarizes a set of
|
||||
// observatrions as their arithmetic sum. Each sum is scoped by attributes and
|
||||
// observations as their arithmetic sum. Each sum is scoped by attributes and
|
||||
// the aggregation cycle the measurements were made in.
|
||||
func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *precomputedSum[N] {
|
||||
func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedSum[N] {
|
||||
return &precomputedSum[N]{
|
||||
valueMap: newValueMap[N](limit, r),
|
||||
monotonic: monotonic,
|
||||
@ -152,7 +151,7 @@ func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() ex
|
||||
}
|
||||
}
|
||||
|
||||
// precomputedSum summarizes a set of observatrions as their arithmetic sum.
|
||||
// precomputedSum summarizes a set of observations as their arithmetic sum.
|
||||
type precomputedSum[N int64 | float64] struct {
|
||||
*valueMap[N]
|
||||
|
||||
|
23
vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go
generated
vendored
23
vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go
generated
vendored
@ -1,23 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
)
|
||||
|
||||
// Drop returns a [FilteredReservoir] that drops all measurements it is offered.
|
||||
func Drop[N int64 | float64]() FilteredReservoir[N] { return &dropRes[N]{} }
|
||||
|
||||
type dropRes[N int64 | float64] struct{}
|
||||
|
||||
// Offer does nothing, all measurements offered will be dropped.
|
||||
func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {}
|
||||
|
||||
// Collect resets dest. No exemplars will ever be returned.
|
||||
func (r *dropRes[N]) Collect(dest *[]Exemplar) {
|
||||
*dest = (*dest)[:0]
|
||||
}
|
49
vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go
generated
vendored
49
vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go
generated
vendored
@ -1,49 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
)
|
||||
|
||||
// FilteredReservoir wraps a [Reservoir] with a filter.
|
||||
type FilteredReservoir[N int64 | float64] interface {
|
||||
// Offer accepts the parameters associated with a measurement. The
|
||||
// parameters will be stored as an exemplar if the filter decides to
|
||||
// sample the measurement.
|
||||
//
|
||||
// The passed ctx needs to contain any baggage or span that were active
|
||||
// when the measurement was made. This information may be used by the
|
||||
// Reservoir in making a sampling decision.
|
||||
Offer(ctx context.Context, val N, attr []attribute.KeyValue)
|
||||
// Collect returns all the held exemplars in the reservoir.
|
||||
Collect(dest *[]Exemplar)
|
||||
}
|
||||
|
||||
// filteredReservoir handles the pre-sampled exemplar of measurements made.
|
||||
type filteredReservoir[N int64 | float64] struct {
|
||||
filter Filter
|
||||
reservoir Reservoir
|
||||
}
|
||||
|
||||
// NewFilteredReservoir creates a [FilteredReservoir] which only offers values
|
||||
// that are allowed by the filter.
|
||||
func NewFilteredReservoir[N int64 | float64](f Filter, r Reservoir) FilteredReservoir[N] {
|
||||
return &filteredReservoir[N]{
|
||||
filter: f,
|
||||
reservoir: r,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *filteredReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) {
|
||||
if f.filter(ctx) {
|
||||
// only record the current time if we are sampling this measurment.
|
||||
f.reservoir.Offer(ctx, time.Now(), NewValue(val), attr)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *filteredReservoir[N]) Collect(dest *[]Exemplar) { f.reservoir.Collect(dest) }
|
46
vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go
generated
vendored
46
vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go
generated
vendored
@ -1,46 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
)
|
||||
|
||||
// Histogram returns a [Reservoir] that samples the last measurement that falls
|
||||
// within a histogram bucket. The histogram bucket upper-boundaries are define
|
||||
// by bounds.
|
||||
//
|
||||
// The passed bounds will be sorted by this function.
|
||||
func Histogram(bounds []float64) Reservoir {
|
||||
slices.Sort(bounds)
|
||||
return &histRes{
|
||||
bounds: bounds,
|
||||
storage: newStorage(len(bounds) + 1),
|
||||
}
|
||||
}
|
||||
|
||||
type histRes struct {
|
||||
*storage
|
||||
|
||||
// bounds are bucket bounds in ascending order.
|
||||
bounds []float64
|
||||
}
|
||||
|
||||
func (r *histRes) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) {
|
||||
var x float64
|
||||
switch v.Type() {
|
||||
case Int64ValueType:
|
||||
x = float64(v.Int64())
|
||||
case Float64ValueType:
|
||||
x = v.Float64()
|
||||
default:
|
||||
panic("unknown value type")
|
||||
}
|
||||
r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a)
|
||||
}
|
46
vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go
generated
vendored
46
vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go
generated
vendored
@ -10,39 +10,23 @@ package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x"
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// Exemplars is an experimental feature flag that defines if exemplars
|
||||
// should be recorded for metric data-points.
|
||||
//
|
||||
// To enable this feature set the OTEL_GO_X_EXEMPLAR environment variable
|
||||
// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
|
||||
// will also enable this).
|
||||
Exemplars = newFeature("EXEMPLAR", func(v string) (string, bool) {
|
||||
if strings.ToLower(v) == "true" {
|
||||
return v, true
|
||||
}
|
||||
return "", false
|
||||
})
|
||||
|
||||
// CardinalityLimit is an experimental feature flag that defines if
|
||||
// cardinality limits should be applied to the recorded metric data-points.
|
||||
//
|
||||
// To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment
|
||||
// variable to the integer limit value you want to use.
|
||||
//
|
||||
// Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0
|
||||
// will disable the cardinality limits.
|
||||
CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) {
|
||||
n, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
return n, true
|
||||
})
|
||||
)
|
||||
// CardinalityLimit is an experimental feature flag that defines if
|
||||
// cardinality limits should be applied to the recorded metric data-points.
|
||||
//
|
||||
// To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment
|
||||
// variable to the integer limit value you want to use.
|
||||
//
|
||||
// Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0
|
||||
// will disable the cardinality limits.
|
||||
var CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) {
|
||||
n, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
return n, true
|
||||
})
|
||||
|
||||
// Feature is an experimental feature control flag. It provides a uniform way
|
||||
// to interact with these feature flags and parse their values.
|
||||
|
9
vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
generated
vendored
9
vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
generated
vendored
@ -113,18 +113,17 @@ func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetr
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var errs []error
|
||||
for _, producer := range mr.externalProducers.Load().([]Producer) {
|
||||
externalMetrics, err := producer.Produce(ctx)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
externalMetrics, e := producer.Produce(ctx)
|
||||
if e != nil {
|
||||
err = errors.Join(err, e)
|
||||
}
|
||||
rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...)
|
||||
}
|
||||
|
||||
global.Debug("ManualReader collection", "Data", rm)
|
||||
|
||||
return unifyErrors(errs)
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalLog returns logging data about the ManualReader.
|
||||
|
141
vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
generated
vendored
141
vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
generated
vendored
@ -150,6 +150,11 @@ func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int6
|
||||
continue
|
||||
}
|
||||
inst.appendMeasures(in)
|
||||
|
||||
// Add the measures to the pipeline. It is required to maintain
|
||||
// measures per pipeline to avoid calling the measure that
|
||||
// is not part of the pipeline.
|
||||
insert.pipeline.addInt64Measure(inst.observableID, in)
|
||||
for _, cback := range callbacks {
|
||||
inst := int64Observer{measures: in}
|
||||
fn := cback
|
||||
@ -185,6 +190,11 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser
|
||||
// configured with options. The instrument is used to asynchronously record
|
||||
// int64 measurements once per a measurement collection cycle. Only the
|
||||
// measurements recorded during the collection cycle are exported.
|
||||
//
|
||||
// If Int64ObservableUpDownCounter is invoked repeatedly with the same Name,
|
||||
// Description, and Unit, only the first set of callbacks provided are used.
|
||||
// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
|
||||
// if instrumentation can be created multiple times with different callbacks.
|
||||
func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
|
||||
cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
|
||||
id := Instrument{
|
||||
@ -201,6 +211,11 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6
|
||||
// configured with options. The instrument is used to asynchronously record
|
||||
// instantaneous int64 measurements once per a measurement collection cycle.
|
||||
// Only the measurements recorded during the collection cycle are exported.
|
||||
//
|
||||
// If Int64ObservableGauge is invoked repeatedly with the same Name,
|
||||
// Description, and Unit, only the first set of callbacks provided are used.
|
||||
// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
|
||||
// if instrumentation can be created multiple times with different callbacks.
|
||||
func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
|
||||
cfg := metric.NewInt64ObservableGaugeConfig(options...)
|
||||
id := Instrument{
|
||||
@ -299,6 +314,11 @@ func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Fl
|
||||
continue
|
||||
}
|
||||
inst.appendMeasures(in)
|
||||
|
||||
// Add the measures to the pipeline. It is required to maintain
|
||||
// measures per pipeline to avoid calling the measure that
|
||||
// is not part of the pipeline.
|
||||
insert.pipeline.addFloat64Measure(inst.observableID, in)
|
||||
for _, cback := range callbacks {
|
||||
inst := float64Observer{measures: in}
|
||||
fn := cback
|
||||
@ -334,6 +354,11 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O
|
||||
// and configured with options. The instrument is used to asynchronously record
|
||||
// float64 measurements once per a measurement collection cycle. Only the
|
||||
// measurements recorded during the collection cycle are exported.
|
||||
//
|
||||
// If Float64ObservableUpDownCounter is invoked repeatedly with the same Name,
|
||||
// Description, and Unit, only the first set of callbacks provided are used.
|
||||
// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
|
||||
// if instrumentation can be created multiple times with different callbacks.
|
||||
func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
|
||||
cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
|
||||
id := Instrument{
|
||||
@ -350,6 +375,11 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl
|
||||
// configured with options. The instrument is used to asynchronously record
|
||||
// instantaneous float64 measurements once per a measurement collection cycle.
|
||||
// Only the measurements recorded during the collection cycle are exported.
|
||||
//
|
||||
// If Float64ObservableGauge is invoked repeatedly with the same Name,
|
||||
// Description, and Unit, only the first set of callbacks provided are used.
|
||||
// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
|
||||
// if instrumentation can be created multiple times with different callbacks.
|
||||
func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
|
||||
cfg := metric.NewFloat64ObservableGaugeConfig(options...)
|
||||
id := Instrument{
|
||||
@ -421,73 +451,80 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable)
|
||||
return noopRegister{}, nil
|
||||
}
|
||||
|
||||
reg := newObserver()
|
||||
var errs multierror
|
||||
var err error
|
||||
validInstruments := make([]metric.Observable, 0, len(insts))
|
||||
for _, inst := range insts {
|
||||
// Unwrap any global.
|
||||
if u, ok := inst.(interface {
|
||||
Unwrap() metric.Observable
|
||||
}); ok {
|
||||
inst = u.Unwrap()
|
||||
}
|
||||
|
||||
switch o := inst.(type) {
|
||||
case int64Observable:
|
||||
if err := o.registerable(m); err != nil {
|
||||
if !errors.Is(err, errEmptyAgg) {
|
||||
errs.append(err)
|
||||
if e := o.registerable(m); e != nil {
|
||||
if !errors.Is(e, errEmptyAgg) {
|
||||
err = errors.Join(err, e)
|
||||
}
|
||||
continue
|
||||
}
|
||||
reg.registerInt64(o.observablID)
|
||||
|
||||
validInstruments = append(validInstruments, inst)
|
||||
case float64Observable:
|
||||
if err := o.registerable(m); err != nil {
|
||||
if !errors.Is(err, errEmptyAgg) {
|
||||
errs.append(err)
|
||||
if e := o.registerable(m); e != nil {
|
||||
if !errors.Is(e, errEmptyAgg) {
|
||||
err = errors.Join(err, e)
|
||||
}
|
||||
continue
|
||||
}
|
||||
reg.registerFloat64(o.observablID)
|
||||
|
||||
validInstruments = append(validInstruments, inst)
|
||||
default:
|
||||
// Instrument external to the SDK.
|
||||
return nil, fmt.Errorf("invalid observable: from different implementation")
|
||||
}
|
||||
}
|
||||
|
||||
err := errs.errorOrNil()
|
||||
if reg.len() == 0 {
|
||||
if len(validInstruments) == 0 {
|
||||
// All insts use drop aggregation or are invalid.
|
||||
return noopRegister{}, err
|
||||
}
|
||||
|
||||
// Some or all instruments were valid.
|
||||
cback := func(ctx context.Context) error { return f(ctx, reg) }
|
||||
return m.pipes.registerMultiCallback(cback), err
|
||||
unregs := make([]func(), len(m.pipes))
|
||||
for ix, pipe := range m.pipes {
|
||||
reg := newObserver(pipe)
|
||||
for _, inst := range validInstruments {
|
||||
switch o := inst.(type) {
|
||||
case int64Observable:
|
||||
reg.registerInt64(o.observableID)
|
||||
case float64Observable:
|
||||
reg.registerFloat64(o.observableID)
|
||||
}
|
||||
}
|
||||
|
||||
// Some or all instruments were valid.
|
||||
cBack := func(ctx context.Context) error { return f(ctx, reg) }
|
||||
unregs[ix] = pipe.addMultiCallback(cBack)
|
||||
}
|
||||
|
||||
return unregisterFuncs{f: unregs}, err
|
||||
}
|
||||
|
||||
type observer struct {
|
||||
embedded.Observer
|
||||
|
||||
float64 map[observablID[float64]]struct{}
|
||||
int64 map[observablID[int64]]struct{}
|
||||
pipe *pipeline
|
||||
float64 map[observableID[float64]]struct{}
|
||||
int64 map[observableID[int64]]struct{}
|
||||
}
|
||||
|
||||
func newObserver() observer {
|
||||
func newObserver(p *pipeline) observer {
|
||||
return observer{
|
||||
float64: make(map[observablID[float64]]struct{}),
|
||||
int64: make(map[observablID[int64]]struct{}),
|
||||
pipe: p,
|
||||
float64: make(map[observableID[float64]]struct{}),
|
||||
int64: make(map[observableID[int64]]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (r observer) len() int {
|
||||
return len(r.float64) + len(r.int64)
|
||||
}
|
||||
|
||||
func (r observer) registerFloat64(id observablID[float64]) {
|
||||
func (r observer) registerFloat64(id observableID[float64]) {
|
||||
r.float64[id] = struct{}{}
|
||||
}
|
||||
|
||||
func (r observer) registerInt64(id observablID[int64]) {
|
||||
func (r observer) registerInt64(id observableID[int64]) {
|
||||
r.int64[id] = struct{}{}
|
||||
}
|
||||
|
||||
@ -501,22 +538,12 @@ func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ...
|
||||
switch conv := o.(type) {
|
||||
case float64Observable:
|
||||
oImpl = conv
|
||||
case interface {
|
||||
Unwrap() metric.Observable
|
||||
}:
|
||||
// Unwrap any global.
|
||||
async := conv.Unwrap()
|
||||
var ok bool
|
||||
if oImpl, ok = async.(float64Observable); !ok {
|
||||
global.Error(errUnknownObserver, "failed to record asynchronous")
|
||||
return
|
||||
}
|
||||
default:
|
||||
global.Error(errUnknownObserver, "failed to record")
|
||||
return
|
||||
}
|
||||
|
||||
if _, registered := r.float64[oImpl.observablID]; !registered {
|
||||
if _, registered := r.float64[oImpl.observableID]; !registered {
|
||||
if !oImpl.dropAggregation {
|
||||
global.Error(errUnregObserver, "failed to record",
|
||||
"name", oImpl.name,
|
||||
@ -528,7 +555,12 @@ func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ...
|
||||
return
|
||||
}
|
||||
c := metric.NewObserveConfig(opts)
|
||||
oImpl.observe(v, c.Attributes())
|
||||
// Access to r.pipe.float64Measure is already guarded by a lock in pipeline.produce.
|
||||
// TODO (#5946): Refactor pipeline and observable measures.
|
||||
measures := r.pipe.float64Measures[oImpl.observableID]
|
||||
for _, m := range measures {
|
||||
m(context.Background(), v, c.Attributes())
|
||||
}
|
||||
}
|
||||
|
||||
func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric.ObserveOption) {
|
||||
@ -536,22 +568,12 @@ func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric
|
||||
switch conv := o.(type) {
|
||||
case int64Observable:
|
||||
oImpl = conv
|
||||
case interface {
|
||||
Unwrap() metric.Observable
|
||||
}:
|
||||
// Unwrap any global.
|
||||
async := conv.Unwrap()
|
||||
var ok bool
|
||||
if oImpl, ok = async.(int64Observable); !ok {
|
||||
global.Error(errUnknownObserver, "failed to record asynchronous")
|
||||
return
|
||||
}
|
||||
default:
|
||||
global.Error(errUnknownObserver, "failed to record")
|
||||
return
|
||||
}
|
||||
|
||||
if _, registered := r.int64[oImpl.observablID]; !registered {
|
||||
if _, registered := r.int64[oImpl.observableID]; !registered {
|
||||
if !oImpl.dropAggregation {
|
||||
global.Error(errUnregObserver, "failed to record",
|
||||
"name", oImpl.name,
|
||||
@ -563,7 +585,12 @@ func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric
|
||||
return
|
||||
}
|
||||
c := metric.NewObserveConfig(opts)
|
||||
oImpl.observe(v, c.Attributes())
|
||||
// Access to r.pipe.int64Measures is already guarded b a lock in pipeline.produce.
|
||||
// TODO (#5946): Refactor pipeline and observable measures.
|
||||
measures := r.pipe.int64Measures[oImpl.observableID]
|
||||
for _, m := range measures {
|
||||
m(context.Background(), v, c.Attributes())
|
||||
}
|
||||
}
|
||||
|
||||
type noopRegister struct{ embedded.Registration }
|
||||
|
9
vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
generated
vendored
9
vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
generated
vendored
@ -251,18 +251,17 @@ func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricd
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var errs []error
|
||||
for _, producer := range r.externalProducers.Load().([]Producer) {
|
||||
externalMetrics, err := producer.Produce(ctx)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
externalMetrics, e := producer.Produce(ctx)
|
||||
if e != nil {
|
||||
err = errors.Join(err, e)
|
||||
}
|
||||
rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...)
|
||||
}
|
||||
|
||||
global.Debug("PeriodicReader collection", "Data", rm)
|
||||
|
||||
return unifyErrors(errs)
|
||||
return err
|
||||
}
|
||||
|
||||
// export exports metric data m using r's exporter.
|
||||
|
132
vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
generated
vendored
132
vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
generated
vendored
@ -8,14 +8,13 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/embedded"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/exemplar"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/x"
|
||||
@ -38,14 +37,17 @@ type instrumentSync struct {
|
||||
compAgg aggregate.ComputeAggregation
|
||||
}
|
||||
|
||||
func newPipeline(res *resource.Resource, reader Reader, views []View) *pipeline {
|
||||
func newPipeline(res *resource.Resource, reader Reader, views []View, exemplarFilter exemplar.Filter) *pipeline {
|
||||
if res == nil {
|
||||
res = resource.Empty()
|
||||
}
|
||||
return &pipeline{
|
||||
resource: res,
|
||||
reader: reader,
|
||||
views: views,
|
||||
resource: res,
|
||||
reader: reader,
|
||||
views: views,
|
||||
int64Measures: map[observableID[int64]][]aggregate.Measure[int64]{},
|
||||
float64Measures: map[observableID[float64]][]aggregate.Measure[float64]{},
|
||||
exemplarFilter: exemplarFilter,
|
||||
// aggregations is lazy allocated when needed.
|
||||
}
|
||||
}
|
||||
@ -63,9 +65,26 @@ type pipeline struct {
|
||||
views []View
|
||||
|
||||
sync.Mutex
|
||||
aggregations map[instrumentation.Scope][]instrumentSync
|
||||
callbacks []func(context.Context) error
|
||||
multiCallbacks list.List
|
||||
int64Measures map[observableID[int64]][]aggregate.Measure[int64]
|
||||
float64Measures map[observableID[float64]][]aggregate.Measure[float64]
|
||||
aggregations map[instrumentation.Scope][]instrumentSync
|
||||
callbacks []func(context.Context) error
|
||||
multiCallbacks list.List
|
||||
exemplarFilter exemplar.Filter
|
||||
}
|
||||
|
||||
// addInt64Measure adds a new int64 measure to the pipeline for each observer.
|
||||
func (p *pipeline) addInt64Measure(id observableID[int64], m []aggregate.Measure[int64]) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
p.int64Measures[id] = m
|
||||
}
|
||||
|
||||
// addFloat64Measure adds a new float64 measure to the pipeline for each observer.
|
||||
func (p *pipeline) addFloat64Measure(id observableID[float64], m []aggregate.Measure[float64]) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
p.float64Measures[id] = m
|
||||
}
|
||||
|
||||
// addSync adds the instrumentSync to pipeline p with scope. This method is not
|
||||
@ -105,14 +124,15 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics)
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
var errs multierror
|
||||
var err error
|
||||
for _, c := range p.callbacks {
|
||||
// TODO make the callbacks parallel. ( #3034 )
|
||||
if err := c(ctx); err != nil {
|
||||
errs.append(err)
|
||||
if e := c(ctx); e != nil {
|
||||
err = errors.Join(err, e)
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
rm.Resource = nil
|
||||
clear(rm.ScopeMetrics) // Erase elements to let GC collect objects.
|
||||
rm.ScopeMetrics = rm.ScopeMetrics[:0]
|
||||
return err
|
||||
}
|
||||
@ -120,12 +140,13 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics)
|
||||
for e := p.multiCallbacks.Front(); e != nil; e = e.Next() {
|
||||
// TODO make the callbacks parallel. ( #3034 )
|
||||
f := e.Value.(multiCallback)
|
||||
if err := f(ctx); err != nil {
|
||||
errs.append(err)
|
||||
if e := f(ctx); e != nil {
|
||||
err = errors.Join(err, e)
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
// This means the context expired before we finished running callbacks.
|
||||
rm.Resource = nil
|
||||
clear(rm.ScopeMetrics) // Erase elements to let GC collect objects.
|
||||
rm.ScopeMetrics = rm.ScopeMetrics[:0]
|
||||
return err
|
||||
}
|
||||
@ -157,7 +178,7 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics)
|
||||
|
||||
rm.ScopeMetrics = rm.ScopeMetrics[:i]
|
||||
|
||||
return errs.errorOrNil()
|
||||
return err
|
||||
}
|
||||
|
||||
// inserter facilitates inserting of new instruments from a single scope into a
|
||||
@ -219,7 +240,7 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation)
|
||||
measures []aggregate.Measure[N]
|
||||
)
|
||||
|
||||
errs := &multierror{wrapped: errCreatingAggregators}
|
||||
var err error
|
||||
seen := make(map[uint64]struct{})
|
||||
for _, v := range i.pipeline.views {
|
||||
stream, match := v(inst)
|
||||
@ -227,9 +248,9 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation)
|
||||
continue
|
||||
}
|
||||
matched = true
|
||||
in, id, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation)
|
||||
if err != nil {
|
||||
errs.append(err)
|
||||
in, id, e := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation)
|
||||
if e != nil {
|
||||
err = errors.Join(err, e)
|
||||
}
|
||||
if in == nil { // Drop aggregation.
|
||||
continue
|
||||
@ -242,8 +263,12 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation)
|
||||
measures = append(measures, in)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
err = errors.Join(errCreatingAggregators, err)
|
||||
}
|
||||
|
||||
if matched {
|
||||
return measures, errs.errorOrNil()
|
||||
return measures, err
|
||||
}
|
||||
|
||||
// Apply implicit default view if no explicit matched.
|
||||
@ -252,15 +277,18 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation)
|
||||
Description: inst.Description,
|
||||
Unit: inst.Unit,
|
||||
}
|
||||
in, _, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation)
|
||||
if err != nil {
|
||||
errs.append(err)
|
||||
in, _, e := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation)
|
||||
if e != nil {
|
||||
if err == nil {
|
||||
err = errCreatingAggregators
|
||||
}
|
||||
err = errors.Join(err, e)
|
||||
}
|
||||
if in != nil {
|
||||
// Ensured to have not seen given matched was false.
|
||||
measures = append(measures, in)
|
||||
}
|
||||
return measures, errs.errorOrNil()
|
||||
return measures, err
|
||||
}
|
||||
|
||||
// addCallback registers a single instrument callback to be run when
|
||||
@ -329,6 +357,9 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum
|
||||
// The view explicitly requested the default aggregation.
|
||||
stream.Aggregation = DefaultAggregationSelector(kind)
|
||||
}
|
||||
if stream.ExemplarReservoirProviderSelector == nil {
|
||||
stream.ExemplarReservoirProviderSelector = DefaultExemplarReservoirProviderSelector
|
||||
}
|
||||
|
||||
if err := isAggregatorCompatible(kind, stream.Aggregation); err != nil {
|
||||
return nil, 0, fmt.Errorf(
|
||||
@ -349,7 +380,7 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum
|
||||
cv := i.aggregators.Lookup(normID, func() aggVal[N] {
|
||||
b := aggregate.Builder[N]{
|
||||
Temporality: i.pipeline.reader.temporality(kind),
|
||||
ReservoirFunc: reservoirFunc[N](stream.Aggregation),
|
||||
ReservoirFunc: reservoirFunc[N](stream.ExemplarReservoirProviderSelector(stream.Aggregation), i.pipeline.exemplarFilter),
|
||||
}
|
||||
b.Filter = stream.AttributeFilter
|
||||
// A value less than or equal to zero will disable the aggregation
|
||||
@ -552,24 +583,16 @@ func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error {
|
||||
// measurement.
|
||||
type pipelines []*pipeline
|
||||
|
||||
func newPipelines(res *resource.Resource, readers []Reader, views []View) pipelines {
|
||||
func newPipelines(res *resource.Resource, readers []Reader, views []View, exemplarFilter exemplar.Filter) pipelines {
|
||||
pipes := make([]*pipeline, 0, len(readers))
|
||||
for _, r := range readers {
|
||||
p := newPipeline(res, r, views)
|
||||
p := newPipeline(res, r, views, exemplarFilter)
|
||||
r.register(p)
|
||||
pipes = append(pipes, p)
|
||||
}
|
||||
return pipes
|
||||
}
|
||||
|
||||
func (p pipelines) registerMultiCallback(c multiCallback) metric.Registration {
|
||||
unregs := make([]func(), len(p))
|
||||
for i, pipe := range p {
|
||||
unregs[i] = pipe.addMultiCallback(c)
|
||||
}
|
||||
return unregisterFuncs{f: unregs}
|
||||
}
|
||||
|
||||
type unregisterFuncs struct {
|
||||
embedded.Registration
|
||||
f []func()
|
||||
@ -602,15 +625,15 @@ func newResolver[N int64 | float64](p pipelines, vc *cache[string, instID]) reso
|
||||
func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) {
|
||||
var measures []aggregate.Measure[N]
|
||||
|
||||
errs := &multierror{}
|
||||
var err error
|
||||
for _, i := range r.inserters {
|
||||
in, err := i.Instrument(id, i.readerDefaultAggregation(id.Kind))
|
||||
if err != nil {
|
||||
errs.append(err)
|
||||
in, e := i.Instrument(id, i.readerDefaultAggregation(id.Kind))
|
||||
if e != nil {
|
||||
err = errors.Join(err, e)
|
||||
}
|
||||
measures = append(measures, in...)
|
||||
}
|
||||
return measures, errs.errorOrNil()
|
||||
return measures, err
|
||||
}
|
||||
|
||||
// HistogramAggregators returns the histogram Aggregators that must be updated by the instrument
|
||||
@ -619,37 +642,18 @@ func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error)
|
||||
func (r resolver[N]) HistogramAggregators(id Instrument, boundaries []float64) ([]aggregate.Measure[N], error) {
|
||||
var measures []aggregate.Measure[N]
|
||||
|
||||
errs := &multierror{}
|
||||
var err error
|
||||
for _, i := range r.inserters {
|
||||
agg := i.readerDefaultAggregation(id.Kind)
|
||||
if histAgg, ok := agg.(AggregationExplicitBucketHistogram); ok && len(boundaries) > 0 {
|
||||
histAgg.Boundaries = boundaries
|
||||
agg = histAgg
|
||||
}
|
||||
in, err := i.Instrument(id, agg)
|
||||
if err != nil {
|
||||
errs.append(err)
|
||||
in, e := i.Instrument(id, agg)
|
||||
if e != nil {
|
||||
err = errors.Join(err, e)
|
||||
}
|
||||
measures = append(measures, in...)
|
||||
}
|
||||
return measures, errs.errorOrNil()
|
||||
}
|
||||
|
||||
type multierror struct {
|
||||
wrapped error
|
||||
errors []string
|
||||
}
|
||||
|
||||
func (m *multierror) errorOrNil() error {
|
||||
if len(m.errors) == 0 {
|
||||
return nil
|
||||
}
|
||||
if m.wrapped == nil {
|
||||
return errors.New(strings.Join(m.errors, "; "))
|
||||
}
|
||||
return fmt.Errorf("%w: %s", m.wrapped, strings.Join(m.errors, "; "))
|
||||
}
|
||||
|
||||
func (m *multierror) append(err error) {
|
||||
m.errors = append(m.errors, err.Error())
|
||||
return measures, err
|
||||
}
|
||||
|
10
vendor/go.opentelemetry.io/otel/sdk/metric/provider.go
generated
vendored
10
vendor/go.opentelemetry.io/otel/sdk/metric/provider.go
generated
vendored
@ -42,7 +42,7 @@ func NewMeterProvider(options ...Option) *MeterProvider {
|
||||
flush, sdown := conf.readerSignals()
|
||||
|
||||
mp := &MeterProvider{
|
||||
pipes: newPipelines(conf.res, conf.readers, conf.views),
|
||||
pipes: newPipelines(conf.res, conf.readers, conf.views, conf.exemplarFilter),
|
||||
forceFlush: flush,
|
||||
shutdown: sdown,
|
||||
}
|
||||
@ -76,15 +76,17 @@ func (mp *MeterProvider) Meter(name string, options ...metric.MeterOption) metri
|
||||
|
||||
c := metric.NewMeterConfig(options...)
|
||||
s := instrumentation.Scope{
|
||||
Name: name,
|
||||
Version: c.InstrumentationVersion(),
|
||||
SchemaURL: c.SchemaURL(),
|
||||
Name: name,
|
||||
Version: c.InstrumentationVersion(),
|
||||
SchemaURL: c.SchemaURL(),
|
||||
Attributes: c.InstrumentationAttributes(),
|
||||
}
|
||||
|
||||
global.Info("Meter created",
|
||||
"Name", s.Name,
|
||||
"Version", s.Version,
|
||||
"SchemaURL", s.SchemaURL,
|
||||
"Attributes", s.Attributes,
|
||||
)
|
||||
|
||||
return mp.meters.Lookup(s, func() *meter {
|
||||
|
2
vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
generated
vendored
@ -34,7 +34,7 @@ var errNonPositiveDuration = fmt.Errorf("non-positive duration")
|
||||
// start of bi-directional control flow.
|
||||
//
|
||||
// Typically, push-based exporters that are periodic will
|
||||
// implement PeroidicExporter themselves and construct a
|
||||
// implement PeriodicExporter themselves and construct a
|
||||
// PeriodicReader to satisfy this interface.
|
||||
//
|
||||
// Pull-based exporters will typically implement Register
|
||||
|
2
vendor/go.opentelemetry.io/otel/sdk/metric/version.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/sdk/metric/version.go
generated
vendored
@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
// version is the current release version of the metric SDK in use.
|
||||
func version() string {
|
||||
return "1.28.0"
|
||||
return "1.32.0"
|
||||
}
|
||||
|
11
vendor/go.opentelemetry.io/otel/sdk/metric/view.go
generated
vendored
11
vendor/go.opentelemetry.io/otel/sdk/metric/view.go
generated
vendored
@ -96,11 +96,12 @@ func NewView(criteria Instrument, mask Stream) View {
|
||||
return func(i Instrument) (Stream, bool) {
|
||||
if matchFunc(i) {
|
||||
return Stream{
|
||||
Name: nonZero(mask.Name, i.Name),
|
||||
Description: nonZero(mask.Description, i.Description),
|
||||
Unit: nonZero(mask.Unit, i.Unit),
|
||||
Aggregation: agg,
|
||||
AttributeFilter: mask.AttributeFilter,
|
||||
Name: nonZero(mask.Name, i.Name),
|
||||
Description: nonZero(mask.Description, i.Description),
|
||||
Unit: nonZero(mask.Unit, i.Unit),
|
||||
Aggregation: agg,
|
||||
AttributeFilter: mask.AttributeFilter,
|
||||
ExemplarReservoirProviderSelector: mask.ExemplarReservoirProviderSelector,
|
||||
}, true
|
||||
}
|
||||
return Stream{}, false
|
||||
|
64
vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
generated
vendored
64
vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
generated
vendored
@ -7,7 +7,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ErrPartialResource is returned by a detector when complete source
|
||||
@ -57,62 +56,37 @@ func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) {
|
||||
// these errors will be returned. Otherwise, nil is returned.
|
||||
func detect(ctx context.Context, res *Resource, detectors []Detector) error {
|
||||
var (
|
||||
r *Resource
|
||||
errs detectErrs
|
||||
err error
|
||||
r *Resource
|
||||
err error
|
||||
e error
|
||||
)
|
||||
|
||||
for _, detector := range detectors {
|
||||
if detector == nil {
|
||||
continue
|
||||
}
|
||||
r, err = detector.Detect(ctx)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
if !errors.Is(err, ErrPartialResource) {
|
||||
r, e = detector.Detect(ctx)
|
||||
if e != nil {
|
||||
err = errors.Join(err, e)
|
||||
if !errors.Is(e, ErrPartialResource) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
r, err = Merge(res, r)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
r, e = Merge(res, r)
|
||||
if e != nil {
|
||||
err = errors.Join(err, e)
|
||||
}
|
||||
*res = *r
|
||||
}
|
||||
|
||||
if len(errs) == 0 {
|
||||
return nil
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrSchemaURLConflict) {
|
||||
// If there has been a merge conflict, ensure the resource has no
|
||||
// schema URL.
|
||||
res.schemaURL = ""
|
||||
}
|
||||
|
||||
err = fmt.Errorf("error detecting resource: %w", err)
|
||||
}
|
||||
if errors.Is(errs, ErrSchemaURLConflict) {
|
||||
// If there has been a merge conflict, ensure the resource has no
|
||||
// schema URL.
|
||||
res.schemaURL = ""
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
type detectErrs []error
|
||||
|
||||
func (e detectErrs) Error() string {
|
||||
errStr := make([]string, len(e))
|
||||
for i, err := range e {
|
||||
errStr[i] = fmt.Sprintf("* %s", err)
|
||||
}
|
||||
|
||||
format := "%d errors occurred detecting resource:\n\t%s"
|
||||
return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t"))
|
||||
}
|
||||
|
||||
func (e detectErrs) Unwrap() error {
|
||||
switch len(e) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return e[0]
|
||||
}
|
||||
return e[1:]
|
||||
}
|
||||
|
||||
func (e detectErrs) Is(target error) bool {
|
||||
return len(e) != 0 && errors.Is(e[0], target)
|
||||
return err
|
||||
}
|
||||
|
6
vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
generated
vendored
6
vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
generated
vendored
@ -20,15 +20,13 @@ type (
|
||||
// telemetrySDK is a Detector that provides information about
|
||||
// the OpenTelemetry SDK used. This Detector is included as a
|
||||
// builtin. If these resource attributes are not wanted, use
|
||||
// the WithTelemetrySDK(nil) or WithoutBuiltin() options to
|
||||
// explicitly disable them.
|
||||
// resource.New() to explicitly disable them.
|
||||
telemetrySDK struct{}
|
||||
|
||||
// host is a Detector that provides information about the host
|
||||
// being run on. This Detector is included as a builtin. If
|
||||
// these resource attributes are not wanted, use the
|
||||
// WithHost(nil) or WithoutBuiltin() options to explicitly
|
||||
// disable them.
|
||||
// resource.New() to explicitly disable them.
|
||||
host struct{}
|
||||
|
||||
stringDetector struct {
|
||||
|
7
vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
generated
vendored
7
vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
generated
vendored
@ -10,17 +10,16 @@ import (
|
||||
"golang.org/x/sys/windows/registry"
|
||||
)
|
||||
|
||||
// implements hostIDReader
|
||||
// implements hostIDReader.
|
||||
type hostIDReaderWindows struct{}
|
||||
|
||||
// read reads MachineGuid from the windows registry key:
|
||||
// SOFTWARE\Microsoft\Cryptography
|
||||
// read reads MachineGuid from the Windows registry key:
|
||||
// SOFTWARE\Microsoft\Cryptography.
|
||||
func (*hostIDReaderWindows) read() (string, error) {
|
||||
k, err := registry.OpenKey(
|
||||
registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`,
|
||||
registry.QUERY_VALUE|registry.WOW64_64KEY,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
1
vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
generated
vendored
1
vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
generated
vendored
@ -17,7 +17,6 @@ import (
|
||||
func platformOSDescription() (string, error) {
|
||||
k, err := registry.OpenKey(
|
||||
registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
7
vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
generated
vendored
7
vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
generated
vendored
@ -280,6 +280,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
|
||||
//
|
||||
// It is up to the exporter to implement any type of retry logic if a batch is failing
|
||||
// to be exported, since it is specific to the protocol and backend being sent to.
|
||||
clear(bsp.batch) // Erase elements to let GC collect objects
|
||||
bsp.batch = bsp.batch[:0]
|
||||
|
||||
if err != nil {
|
||||
@ -316,7 +317,11 @@ func (bsp *batchSpanProcessor) processQueue() {
|
||||
bsp.batchMutex.Unlock()
|
||||
if shouldExport {
|
||||
if !bsp.timer.Stop() {
|
||||
<-bsp.timer.C
|
||||
// Handle both GODEBUG=asynctimerchan=[0|1] properly.
|
||||
select {
|
||||
case <-bsp.timer.C:
|
||||
default:
|
||||
}
|
||||
}
|
||||
if err := bsp.exportSpans(ctx); err != nil {
|
||||
otel.Handle(err)
|
||||
|
21
vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
generated
vendored
21
vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
generated
vendored
@ -12,25 +12,26 @@ import (
|
||||
|
||||
// evictedQueue is a FIFO queue with a configurable capacity.
|
||||
type evictedQueue[T any] struct {
|
||||
queue []T
|
||||
capacity int
|
||||
droppedCount int
|
||||
logDropped func()
|
||||
queue []T
|
||||
capacity int
|
||||
droppedCount int
|
||||
logDroppedMsg string
|
||||
logDroppedOnce sync.Once
|
||||
}
|
||||
|
||||
func newEvictedQueueEvent(capacity int) evictedQueue[Event] {
|
||||
// Do not pre-allocate queue, do this lazily.
|
||||
return evictedQueue[Event]{
|
||||
capacity: capacity,
|
||||
logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Event") }),
|
||||
capacity: capacity,
|
||||
logDroppedMsg: "limit reached: dropping trace trace.Event",
|
||||
}
|
||||
}
|
||||
|
||||
func newEvictedQueueLink(capacity int) evictedQueue[Link] {
|
||||
// Do not pre-allocate queue, do this lazily.
|
||||
return evictedQueue[Link]{
|
||||
capacity: capacity,
|
||||
logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Link") }),
|
||||
capacity: capacity,
|
||||
logDroppedMsg: "limit reached: dropping trace trace.Link",
|
||||
}
|
||||
}
|
||||
|
||||
@ -53,6 +54,10 @@ func (eq *evictedQueue[T]) add(value T) {
|
||||
eq.queue = append(eq.queue, value)
|
||||
}
|
||||
|
||||
func (eq *evictedQueue[T]) logDropped() {
|
||||
eq.logDroppedOnce.Do(func() { global.Warn(eq.logDroppedMsg) })
|
||||
}
|
||||
|
||||
// copy returns a copy of the evictedQueue.
|
||||
func (eq *evictedQueue[T]) copy() []T {
|
||||
return slices.Clone(eq.queue)
|
||||
|
9
vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
generated
vendored
9
vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
generated
vendored
@ -139,9 +139,10 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
|
||||
name = defaultTracerName
|
||||
}
|
||||
is := instrumentation.Scope{
|
||||
Name: name,
|
||||
Version: c.InstrumentationVersion(),
|
||||
SchemaURL: c.SchemaURL(),
|
||||
Name: name,
|
||||
Version: c.InstrumentationVersion(),
|
||||
SchemaURL: c.SchemaURL(),
|
||||
Attributes: c.InstrumentationAttributes(),
|
||||
}
|
||||
|
||||
t, ok := func() (trace.Tracer, bool) {
|
||||
@ -168,7 +169,7 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
|
||||
// slowing down all tracing consumers.
|
||||
// - Logging code may be instrumented with tracing and deadlock because it could try
|
||||
// acquiring the same non-reentrant mutex.
|
||||
global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL)
|
||||
global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL, "attributes", is.Attributes)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
2
vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
generated
vendored
@ -99,7 +99,7 @@ func (s snapshot) InstrumentationScope() instrumentation.Scope {
|
||||
|
||||
// InstrumentationLibrary returns information about the instrumentation
|
||||
// library that created the span.
|
||||
func (s snapshot) InstrumentationLibrary() instrumentation.Library {
|
||||
func (s snapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility
|
||||
return s.instrumentationScope
|
||||
}
|
||||
|
||||
|
112
vendor/go.opentelemetry.io/otel/sdk/trace/span.go
generated
vendored
112
vendor/go.opentelemetry.io/otel/sdk/trace/span.go
generated
vendored
@ -62,7 +62,7 @@ type ReadOnlySpan interface {
|
||||
// InstrumentationLibrary returns information about the instrumentation
|
||||
// library that created the span.
|
||||
// Deprecated: please use InstrumentationScope instead.
|
||||
InstrumentationLibrary() instrumentation.Library
|
||||
InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility
|
||||
// Resource returns information about the entity that produced the span.
|
||||
Resource() *resource.Resource
|
||||
// DroppedAttributes returns the number of attributes dropped by the span
|
||||
@ -174,6 +174,17 @@ func (s *recordingSpan) IsRecording() bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
return s.isRecording()
|
||||
}
|
||||
|
||||
// isRecording returns if this span is being recorded. If this span has ended
|
||||
// this will return false.
|
||||
//
|
||||
// This method assumes s.mu.Lock is held by the caller.
|
||||
func (s *recordingSpan) isRecording() bool {
|
||||
if s == nil {
|
||||
return false
|
||||
}
|
||||
return s.endTime.IsZero()
|
||||
}
|
||||
|
||||
@ -182,11 +193,15 @@ func (s *recordingSpan) IsRecording() bool {
|
||||
// included in the set status when the code is for an error. If this span is
|
||||
// not being recorded than this method does nothing.
|
||||
func (s *recordingSpan) SetStatus(code codes.Code, description string) {
|
||||
if !s.IsRecording() {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if !s.isRecording() {
|
||||
return
|
||||
}
|
||||
if s.status.Code > code {
|
||||
return
|
||||
}
|
||||
@ -210,12 +225,15 @@ func (s *recordingSpan) SetStatus(code codes.Code, description string) {
|
||||
// attributes the span is configured to have, the last added attributes will
|
||||
// be dropped.
|
||||
func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
|
||||
if !s.IsRecording() {
|
||||
if s == nil || len(attributes) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if !s.isRecording() {
|
||||
return
|
||||
}
|
||||
|
||||
limit := s.tracer.provider.spanLimits.AttributeCountLimit
|
||||
if limit == 0 {
|
||||
@ -233,7 +251,7 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
|
||||
|
||||
// Otherwise, add without deduplication. When attributes are read they
|
||||
// will be deduplicated, optimizing the operation.
|
||||
s.attributes = slices.Grow(s.attributes, len(s.attributes)+len(attributes))
|
||||
s.attributes = slices.Grow(s.attributes, len(attributes))
|
||||
for _, a := range attributes {
|
||||
if !a.Valid() {
|
||||
// Drop all invalid attributes.
|
||||
@ -280,13 +298,17 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
|
||||
|
||||
// Do not set a capacity when creating this map. Benchmark testing has
|
||||
// showed this to only add unused memory allocations in general use.
|
||||
exists := make(map[attribute.Key]int)
|
||||
s.dedupeAttrsFromRecord(&exists)
|
||||
exists := make(map[attribute.Key]int, len(s.attributes))
|
||||
s.dedupeAttrsFromRecord(exists)
|
||||
|
||||
// Now that s.attributes is deduplicated, adding unique attributes up to
|
||||
// the capacity of s will not over allocate s.attributes.
|
||||
sum := len(attrs) + len(s.attributes)
|
||||
s.attributes = slices.Grow(s.attributes, min(sum, limit))
|
||||
|
||||
// max size = limit
|
||||
maxCap := min(len(attrs)+len(s.attributes), limit)
|
||||
if cap(s.attributes) < maxCap {
|
||||
s.attributes = slices.Grow(s.attributes, maxCap-cap(s.attributes))
|
||||
}
|
||||
for _, a := range attrs {
|
||||
if !a.Valid() {
|
||||
// Drop all invalid attributes.
|
||||
@ -296,6 +318,7 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
|
||||
|
||||
if idx, ok := exists[a.Key]; ok {
|
||||
// Perform all updates before dropping, even when at capacity.
|
||||
a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
|
||||
s.attributes[idx] = a
|
||||
continue
|
||||
}
|
||||
@ -386,9 +409,10 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
|
||||
// the span's duration in case some operation below takes a while.
|
||||
et := monotonicEndTime(s.startTime)
|
||||
|
||||
// Do relative expensive check now that we have an end time and see if we
|
||||
// need to do any more processing.
|
||||
if !s.IsRecording() {
|
||||
// Lock the span now that we have an end time and see if we need to do any more processing.
|
||||
s.mu.Lock()
|
||||
if !s.isRecording() {
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
@ -413,10 +437,11 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
|
||||
}
|
||||
|
||||
if s.executionTracerTaskEnd != nil {
|
||||
s.mu.Unlock()
|
||||
s.executionTracerTaskEnd()
|
||||
s.mu.Lock()
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
// Setting endTime to non-zero marks the span as ended and not recording.
|
||||
if config.Timestamp().IsZero() {
|
||||
s.endTime = et
|
||||
@ -450,7 +475,13 @@ func monotonicEndTime(start time.Time) time.Time {
|
||||
// does not change the Span status. If this span is not being recorded or err is nil
|
||||
// than this method does nothing.
|
||||
func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) {
|
||||
if s == nil || err == nil || !s.IsRecording() {
|
||||
if s == nil || err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if !s.isRecording() {
|
||||
return
|
||||
}
|
||||
|
||||
@ -486,14 +517,23 @@ func recordStackTrace() string {
|
||||
}
|
||||
|
||||
// AddEvent adds an event with the provided name and options. If this span is
|
||||
// not being recorded than this method does nothing.
|
||||
// not being recorded then this method does nothing.
|
||||
func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) {
|
||||
if !s.IsRecording() {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if !s.isRecording() {
|
||||
return
|
||||
}
|
||||
s.addEvent(name, o...)
|
||||
}
|
||||
|
||||
// addEvent adds an event with the provided name and options.
|
||||
//
|
||||
// This method assumes s.mu.Lock is held by the caller.
|
||||
func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) {
|
||||
c := trace.NewEventConfig(o...)
|
||||
e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()}
|
||||
@ -510,20 +550,21 @@ func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) {
|
||||
e.Attributes = e.Attributes[:limit]
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.events.add(e)
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// SetName sets the name of this span. If this span is not being recorded than
|
||||
// this method does nothing.
|
||||
func (s *recordingSpan) SetName(name string) {
|
||||
if !s.IsRecording() {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if !s.isRecording() {
|
||||
return
|
||||
}
|
||||
s.name = name
|
||||
}
|
||||
|
||||
@ -579,29 +620,26 @@ func (s *recordingSpan) Attributes() []attribute.KeyValue {
|
||||
func (s *recordingSpan) dedupeAttrs() {
|
||||
// Do not set a capacity when creating this map. Benchmark testing has
|
||||
// showed this to only add unused memory allocations in general use.
|
||||
exists := make(map[attribute.Key]int)
|
||||
s.dedupeAttrsFromRecord(&exists)
|
||||
exists := make(map[attribute.Key]int, len(s.attributes))
|
||||
s.dedupeAttrsFromRecord(exists)
|
||||
}
|
||||
|
||||
// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity
|
||||
// using record as the record of unique attribute keys to their index.
|
||||
//
|
||||
// This method assumes s.mu.Lock is held by the caller.
|
||||
func (s *recordingSpan) dedupeAttrsFromRecord(record *map[attribute.Key]int) {
|
||||
func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) {
|
||||
// Use the fact that slices share the same backing array.
|
||||
unique := s.attributes[:0]
|
||||
for _, a := range s.attributes {
|
||||
if idx, ok := (*record)[a.Key]; ok {
|
||||
if idx, ok := record[a.Key]; ok {
|
||||
unique[idx] = a
|
||||
} else {
|
||||
unique = append(unique, a)
|
||||
(*record)[a.Key] = len(unique) - 1
|
||||
record[a.Key] = len(unique) - 1
|
||||
}
|
||||
}
|
||||
// s.attributes have element types of attribute.KeyValue. These types are
|
||||
// not pointers and they themselves do not contain pointer fields,
|
||||
// therefore the duplicate values do not need to be zeroed for them to be
|
||||
// garbage collected.
|
||||
clear(s.attributes[len(unique):]) // Erase unneeded elements to let GC collect objects.
|
||||
s.attributes = unique
|
||||
}
|
||||
|
||||
@ -642,7 +680,7 @@ func (s *recordingSpan) InstrumentationScope() instrumentation.Scope {
|
||||
|
||||
// InstrumentationLibrary returns the instrumentation.Library associated with
|
||||
// the Tracer that created this span.
|
||||
func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library {
|
||||
func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.tracer.instrumentationScope
|
||||
@ -657,7 +695,7 @@ func (s *recordingSpan) Resource() *resource.Resource {
|
||||
}
|
||||
|
||||
func (s *recordingSpan) AddLink(link trace.Link) {
|
||||
if !s.IsRecording() {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
if !link.SpanContext.IsValid() && len(link.Attributes) == 0 &&
|
||||
@ -665,6 +703,12 @@ func (s *recordingSpan) AddLink(link trace.Link) {
|
||||
return
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if !s.isRecording() {
|
||||
return
|
||||
}
|
||||
|
||||
l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes}
|
||||
|
||||
// Discard attributes over limit.
|
||||
@ -678,9 +722,7 @@ func (s *recordingSpan) AddLink(link trace.Link) {
|
||||
l.Attributes = l.Attributes[:limit]
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.links.add(l)
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// DroppedAttributes returns the number of attributes dropped by the span
|
||||
@ -755,12 +797,16 @@ func (s *recordingSpan) snapshot() ReadOnlySpan {
|
||||
}
|
||||
|
||||
func (s *recordingSpan) addChild() {
|
||||
if !s.IsRecording() {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if !s.isRecording() {
|
||||
return
|
||||
}
|
||||
s.childSpanCount++
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (*recordingSpan) private() {}
|
||||
|
2
vendor/go.opentelemetry.io/otel/sdk/version.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/sdk/version.go
generated
vendored
@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk"
|
||||
|
||||
// Version is the current release version of the OpenTelemetry SDK in use.
|
||||
func Version() string {
|
||||
return "1.28.0"
|
||||
return "1.32.0"
|
||||
}
|
||||
|
3
vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md
generated
vendored
3
vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md
generated
vendored
@ -1,3 +0,0 @@
|
||||
# Semconv v1.24.0
|
||||
|
||||
[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.24.0)
|
4387
vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go
generated
vendored
4387
vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go
generated
vendored
File diff suppressed because it is too large
Load Diff
9
vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go
generated
vendored
9
vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go
generated
vendored
@ -1,9 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package semconv implements OpenTelemetry semantic conventions.
|
||||
//
|
||||
// OpenTelemetry semantic conventions are agreed standardized naming
|
||||
// patterns for OpenTelemetry things. This package represents the v1.24.0
|
||||
// version of the OpenTelemetry semantic conventions.
|
||||
package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
|
200
vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go
generated
vendored
200
vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go
generated
vendored
@ -1,200 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Code generated from semantic convention specification. DO NOT EDIT.
|
||||
|
||||
package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
|
||||
|
||||
import "go.opentelemetry.io/otel/attribute"
|
||||
|
||||
// This event represents an occurrence of a lifecycle transition on the iOS
|
||||
// platform.
|
||||
const (
|
||||
// IosStateKey is the attribute Key conforming to the "ios.state" semantic
|
||||
// conventions. It represents the this attribute represents the state the
|
||||
// application has transitioned into at the occurrence of the event.
|
||||
//
|
||||
// Type: Enum
|
||||
// RequirementLevel: Required
|
||||
// Stability: experimental
|
||||
// Note: The iOS lifecycle states are defined in the [UIApplicationDelegate
|
||||
// documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902),
|
||||
// and from which the `OS terminology` column values are derived.
|
||||
IosStateKey = attribute.Key("ios.state")
|
||||
)
|
||||
|
||||
var (
|
||||
// The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive`
|
||||
IosStateActive = IosStateKey.String("active")
|
||||
// The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive`
|
||||
IosStateInactive = IosStateKey.String("inactive")
|
||||
// The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground`
|
||||
IosStateBackground = IosStateKey.String("background")
|
||||
// The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground`
|
||||
IosStateForeground = IosStateKey.String("foreground")
|
||||
// The app is about to terminate. Associated with UIKit notification `applicationWillTerminate`
|
||||
IosStateTerminate = IosStateKey.String("terminate")
|
||||
)
|
||||
|
||||
// This event represents an occurrence of a lifecycle transition on the Android
|
||||
// platform.
|
||||
const (
|
||||
// AndroidStateKey is the attribute Key conforming to the "android.state"
|
||||
// semantic conventions. It represents the this attribute represents the
|
||||
// state the application has transitioned into at the occurrence of the
|
||||
// event.
|
||||
//
|
||||
// Type: Enum
|
||||
// RequirementLevel: Required
|
||||
// Stability: experimental
|
||||
// Note: The Android lifecycle states are defined in [Activity lifecycle
|
||||
// callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc),
|
||||
// and from which the `OS identifiers` are derived.
|
||||
AndroidStateKey = attribute.Key("android.state")
|
||||
)
|
||||
|
||||
var (
|
||||
// Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time
|
||||
AndroidStateCreated = AndroidStateKey.String("created")
|
||||
// Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state
|
||||
AndroidStateBackground = AndroidStateKey.String("background")
|
||||
// Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states
|
||||
AndroidStateForeground = AndroidStateKey.String("foreground")
|
||||
)
|
||||
|
||||
// This semantic convention defines the attributes used to represent a feature
|
||||
// flag evaluation as an event.
|
||||
const (
|
||||
// FeatureFlagKeyKey is the attribute Key conforming to the
|
||||
// "feature_flag.key" semantic conventions. It represents the unique
|
||||
// identifier of the feature flag.
|
||||
//
|
||||
// Type: string
|
||||
// RequirementLevel: Required
|
||||
// Stability: experimental
|
||||
// Examples: 'logo-color'
|
||||
FeatureFlagKeyKey = attribute.Key("feature_flag.key")
|
||||
|
||||
// FeatureFlagProviderNameKey is the attribute Key conforming to the
|
||||
// "feature_flag.provider_name" semantic conventions. It represents the
|
||||
// name of the service provider that performs the flag evaluation.
|
||||
//
|
||||
// Type: string
|
||||
// RequirementLevel: Recommended
|
||||
// Stability: experimental
|
||||
// Examples: 'Flag Manager'
|
||||
FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
|
||||
|
||||
// FeatureFlagVariantKey is the attribute Key conforming to the
|
||||
// "feature_flag.variant" semantic conventions. It represents the sHOULD be
|
||||
// a semantic identifier for a value. If one is unavailable, a stringified
|
||||
// version of the value can be used.
|
||||
//
|
||||
// Type: string
|
||||
// RequirementLevel: Recommended
|
||||
// Stability: experimental
|
||||
// Examples: 'red', 'true', 'on'
|
||||
// Note: A semantic identifier, commonly referred to as a variant, provides
|
||||
// a means
|
||||
// for referring to a value without including the value itself. This can
|
||||
// provide additional context for understanding the meaning behind a value.
|
||||
// For example, the variant `red` maybe be used for the value `#c05543`.
|
||||
//
|
||||
// A stringified version of the value can be used in situations where a
|
||||
// semantic identifier is unavailable. String representation of the value
|
||||
// should be determined by the implementer.
|
||||
FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
|
||||
)
|
||||
|
||||
// FeatureFlagKey returns an attribute KeyValue conforming to the
|
||||
// "feature_flag.key" semantic conventions. It represents the unique identifier
|
||||
// of the feature flag.
|
||||
func FeatureFlagKey(val string) attribute.KeyValue {
|
||||
return FeatureFlagKeyKey.String(val)
|
||||
}
|
||||
|
||||
// FeatureFlagProviderName returns an attribute KeyValue conforming to the
|
||||
// "feature_flag.provider_name" semantic conventions. It represents the name of
|
||||
// the service provider that performs the flag evaluation.
|
||||
func FeatureFlagProviderName(val string) attribute.KeyValue {
|
||||
return FeatureFlagProviderNameKey.String(val)
|
||||
}
|
||||
|
||||
// FeatureFlagVariant returns an attribute KeyValue conforming to the
|
||||
// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
|
||||
// semantic identifier for a value. If one is unavailable, a stringified
|
||||
// version of the value can be used.
|
||||
func FeatureFlagVariant(val string) attribute.KeyValue {
|
||||
return FeatureFlagVariantKey.String(val)
|
||||
}
|
||||
|
||||
// RPC received/sent message.
|
||||
const (
|
||||
// MessageCompressedSizeKey is the attribute Key conforming to the
|
||||
// "message.compressed_size" semantic conventions. It represents the
|
||||
// compressed size of the message in bytes.
|
||||
//
|
||||
// Type: int
|
||||
// RequirementLevel: Optional
|
||||
// Stability: experimental
|
||||
MessageCompressedSizeKey = attribute.Key("message.compressed_size")
|
||||
|
||||
// MessageIDKey is the attribute Key conforming to the "message.id"
|
||||
// semantic conventions. It represents the mUST be calculated as two
|
||||
// different counters starting from `1` one for sent messages and one for
|
||||
// received message.
|
||||
//
|
||||
// Type: int
|
||||
// RequirementLevel: Optional
|
||||
// Stability: experimental
|
||||
// Note: This way we guarantee that the values will be consistent between
|
||||
// different implementations.
|
||||
MessageIDKey = attribute.Key("message.id")
|
||||
|
||||
// MessageTypeKey is the attribute Key conforming to the "message.type"
|
||||
// semantic conventions. It represents the whether this is a received or
|
||||
// sent message.
|
||||
//
|
||||
// Type: Enum
|
||||
// RequirementLevel: Optional
|
||||
// Stability: experimental
|
||||
MessageTypeKey = attribute.Key("message.type")
|
||||
|
||||
// MessageUncompressedSizeKey is the attribute Key conforming to the
|
||||
// "message.uncompressed_size" semantic conventions. It represents the
|
||||
// uncompressed size of the message in bytes.
|
||||
//
|
||||
// Type: int
|
||||
// RequirementLevel: Optional
|
||||
// Stability: experimental
|
||||
MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
|
||||
)
|
||||
|
||||
var (
|
||||
// sent
|
||||
MessageTypeSent = MessageTypeKey.String("SENT")
|
||||
// received
|
||||
MessageTypeReceived = MessageTypeKey.String("RECEIVED")
|
||||
)
|
||||
|
||||
// MessageCompressedSize returns an attribute KeyValue conforming to the
|
||||
// "message.compressed_size" semantic conventions. It represents the compressed
|
||||
// size of the message in bytes.
|
||||
func MessageCompressedSize(val int) attribute.KeyValue {
|
||||
return MessageCompressedSizeKey.Int(val)
|
||||
}
|
||||
|
||||
// MessageID returns an attribute KeyValue conforming to the "message.id"
|
||||
// semantic conventions. It represents the mUST be calculated as two different
|
||||
// counters starting from `1` one for sent messages and one for received
|
||||
// message.
|
||||
func MessageID(val int) attribute.KeyValue {
|
||||
return MessageIDKey.Int(val)
|
||||
}
|
||||
|
||||
// MessageUncompressedSize returns an attribute KeyValue conforming to the
|
||||
// "message.uncompressed_size" semantic conventions. It represents the
|
||||
// uncompressed size of the message in bytes.
|
||||
func MessageUncompressedSize(val int) attribute.KeyValue {
|
||||
return MessageUncompressedSizeKey.Int(val)
|
||||
}
|
9
vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go
generated
vendored
9
vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go
generated
vendored
@ -1,9 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
|
||||
|
||||
const (
|
||||
// ExceptionEventName is the name of the Span event representing an exception.
|
||||
ExceptionEventName = "exception"
|
||||
)
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user