vendor: otel v1.35.0, otel/contrib v0.60.0, grpc v1.72.2

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn 2025-06-12 14:16:23 +02:00
parent a76643bca3
commit 59e34093bc
No known key found for this signature in database
GPG Key ID: 76698F39D527CE8C
167 changed files with 8168 additions and 1457 deletions

View File

@ -46,13 +46,13 @@ require (
github.com/theupdateframework/notary v0.7.1-0.20210315103452-bf96a202a09a
github.com/tonistiigi/go-rosetta v0.0.0-20220804170347-3f4430f2d346
github.com/xeipuuv/gojsonschema v1.2.0
go.opentelemetry.io/otel v1.31.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0
go.opentelemetry.io/otel/metric v1.31.0
go.opentelemetry.io/otel/sdk v1.31.0
go.opentelemetry.io/otel/sdk/metric v1.31.0
go.opentelemetry.io/otel/trace v1.31.0
go.opentelemetry.io/otel v1.35.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0
go.opentelemetry.io/otel/metric v1.35.0
go.opentelemetry.io/otel/sdk v1.35.0
go.opentelemetry.io/otel/sdk/metric v1.35.0
go.opentelemetry.io/otel/trace v1.35.0
golang.org/x/sync v0.14.0
golang.org/x/sys v0.33.0
golang.org/x/term v0.31.0
@ -78,7 +78,7 @@ require (
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
@ -96,15 +96,16 @@ require (
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
go.etcd.io/etcd/raft/v3 v3.5.16 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 // indirect
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
golang.org/x/crypto v0.37.0 // indirect
golang.org/x/net v0.39.0 // indirect
golang.org/x/time v0.11.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect
google.golang.org/grpc v1.69.4 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect
google.golang.org/grpc v1.72.2 // indirect
google.golang.org/protobuf v1.36.6 // indirect
)

View File

@ -122,8 +122,8 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@ -296,28 +296,30 @@ github.com/zmap/zlint/v3 v3.1.0 h1:WjVytZo79m/L1+/Mlphl09WBob6YTGljN5IGWZFpAv0=
github.com/zmap/zlint/v3 v3.1.0/go.mod h1:L7t8s3sEKkb0A2BxGy1IWrxt1ZATa1R4QfJZaQOD3zU=
go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk=
go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM=
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 h1:FZ6ei8GFW7kyPYdxJaV2rgI6M+4tvZzhYsQ2wgyVC08=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0/go.mod h1:MdEu/mC6j3D+tTEfvI15b5Ci2Fn7NneJ71YMoiS3tpI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4=
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ=
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 h1:QcFwRrZLc82r8wODjvyCbP7Ifp3UANaBSmhDSFjnqSc=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0/go.mod h1:CXIWhUomyWBG/oY2/r/kLp6K/cmx9e/7DLpBuuGdLCA=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk=
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg=
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -379,13 +381,13 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 h1:2oV8dfuIkM1Ti7DwXc0BJfnwr9csz4TDXI9EmiI+Rbw=
google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38/go.mod h1:vuAjtvlwkDKF6L1GQ0SokiRLCGFfeBUXWr/aFFkHACc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0=
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ=
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8=
google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=

View File

@ -94,7 +94,7 @@ func Int64(val string) (int64, error) {
}
// Int64Slice converts 'val' where individual integers are separated by
// 'sep' into a int64 slice.
// 'sep' into an int64 slice.
func Int64Slice(val, sep string) ([]int64, error) {
s := strings.Split(val, sep)
values := make([]int64, len(s))
@ -118,7 +118,7 @@ func Int32(val string) (int32, error) {
}
// Int32Slice converts 'val' where individual integers are separated by
// 'sep' into a int32 slice.
// 'sep' into an int32 slice.
func Int32Slice(val, sep string) ([]int32, error) {
s := strings.Split(val, sep)
values := make([]int32, len(s))
@ -190,7 +190,7 @@ func Bytes(val string) ([]byte, error) {
}
// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
// base64 without padding, are separated by 'sep' into a slice of bytes slices slice.
// base64 without padding, are separated by 'sep' into a slice of byte slices.
func BytesSlice(val, sep string) ([][]byte, error) {
s := strings.Split(val, sep)
values := make([][]byte, len(s))

View File

@ -81,6 +81,21 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R
mux.errorHandler(ctx, mux, marshaler, w, r, err)
}
// HTTPStreamError uses the mux-configured stream error handler to notify error to the client without closing the connection.
func HTTPStreamError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
st := mux.streamErrorHandler(ctx, err)
msg := errorChunk(st)
buf, err := marshaler.Marshal(msg)
if err != nil {
grpclog.Errorf("Failed to marshal an error: %v", err)
return
}
if _, err := w.Write(buf); err != nil {
grpclog.Errorf("Failed to notify error to client: %v", err)
return
}
}
// DefaultHTTPErrorHandler is the default error handler.
// If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode.
// If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is

View File

@ -155,7 +155,7 @@ func buildPathsBlindly(name string, in interface{}) []string {
return paths
}
// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
// fieldMaskPathItem stores an in-progress deconstruction of a path for a fieldmask
type fieldMaskPathItem struct {
// the list of prior fields leading up to node connected by dots
path string

View File

@ -64,7 +64,13 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
}
if !wroteHeader {
w.Header().Set("Content-Type", marshaler.ContentType(respRw))
var contentType string
if sct, ok := marshaler.(StreamContentType); ok {
contentType = sct.StreamContentType(respRw)
} else {
contentType = marshaler.ContentType(respRw)
}
w.Header().Set("Content-Type", contentType)
}
var buf []byte
@ -190,11 +196,11 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
return
}
if !doForwardTrailers {
if !doForwardTrailers && mux.writeContentLength {
w.Header().Set("Content-Length", strconv.Itoa(len(buf)))
}
if _, err = w.Write(buf); err != nil {
if _, err = w.Write(buf); err != nil && !errors.Is(err, http.ErrBodyNotAllowed) {
grpclog.Errorf("Failed to write response: %v", err)
}

View File

@ -48,3 +48,11 @@ type Delimited interface {
// Delimiter returns the record separator for the stream.
Delimiter() []byte
}
// StreamContentType defines the streaming content type.
type StreamContentType interface {
// StreamContentType returns the content type for a stream. This shares the
// same behaviour as for `Marshaler.ContentType`, but is called, if present,
// in the case of a streamed response.
StreamContentType(v interface{}) string
}

View File

@ -86,8 +86,8 @@ func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
//
// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
// with a "application/json" Content-Type.
// with an "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
// with an "application/json" Content-Type.
// "*" can be used to match any Content-Type.
// This can be attached to a ServerMux with the marshaler option.
func makeMarshalerMIMERegistry() marshalerRegistry {

View File

@ -71,6 +71,7 @@ type ServeMux struct {
routingErrorHandler RoutingErrorHandlerFunc
disablePathLengthFallback bool
unescapingMode UnescapingMode
writeContentLength bool
}
// ServeMuxOption is an option that can be given to a ServeMux on construction.
@ -258,6 +259,13 @@ func WithDisablePathLengthFallback() ServeMuxOption {
}
}
// WithWriteContentLength returns a ServeMuxOption to enable writing content length on non-streaming responses
func WithWriteContentLength() ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.writeContentLength = true
}
}
// WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath.
// When called the handler will forward the request to the upstream grpc service health check (defined in the
// gRPC Health Checking Protocol).

View File

@ -40,7 +40,7 @@ func Float32P(val string) (*float32, error) {
}
// Int64P parses the given string representation of an integer
// and returns a pointer to a int64 whose value is same as the parsed integer.
// and returns a pointer to an int64 whose value is same as the parsed integer.
func Int64P(val string) (*int64, error) {
i, err := Int64(val)
if err != nil {
@ -50,7 +50,7 @@ func Int64P(val string) (*int64, error) {
}
// Int32P parses the given string representation of an integer
// and returns a pointer to a int32 whose value is same as the parsed integer.
// and returns a pointer to an int32 whose value is same as the parsed integer.
func Int32P(val string) (*int32, error) {
i, err := Int32(val)
if err != nil {

View File

@ -126,6 +126,15 @@ func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []strin
}
}
// Check if oneof already set
if of := fieldDescriptor.ContainingOneof(); of != nil && !of.IsSynthetic() {
if f := msgValue.WhichOneof(of); f != nil {
if fieldDescriptor.Message() == nil || fieldDescriptor.FullName() != f.FullName() {
return fmt.Errorf("field already set for oneof %q", of.FullName().Name())
}
}
}
// If this is the last element, we're done
if i == len(fieldPath)-1 {
break
@ -140,13 +149,6 @@ func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []strin
msgValue = msgValue.Mutable(fieldDescriptor).Message()
}
// Check if oneof already set
if of := fieldDescriptor.ContainingOneof(); of != nil {
if f := msgValue.WhichOneof(of); f != nil {
return fmt.Errorf("field already set for oneof %q", of.FullName().Name())
}
}
switch {
case fieldDescriptor.IsList():
return populateRepeatedField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).List(), values)
@ -291,7 +293,11 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p
if err != nil {
return protoreflect.Value{}, err
}
msg = timestamppb.New(t)
timestamp := timestamppb.New(t)
if ok := timestamp.IsValid(); !ok {
return protoreflect.Value{}, fmt.Errorf("%s before 0001-01-01", value)
}
msg = timestamp
case "google.protobuf.Duration":
d, err := time.ParseDuration(value)
if err != nil {

View File

@ -1,6 +1,6 @@
package utilities
// An OpCode is a opcode of compiled path patterns.
// OpCode is an opcode of compiled path patterns.
type OpCode int
// These constants are the valid values of OpCode.

View File

@ -5,7 +5,7 @@ import (
"strings"
)
// flagInterface is an cut down interface to `flag`
// flagInterface is a cut down interface to `flag`
type flagInterface interface {
Var(value flag.Value, name string, usage string)
}

27
vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,27 @@
# Contributing to go.opentelemetry.io/auto/sdk
The `go.opentelemetry.io/auto/sdk` module is a purpose built OpenTelemetry SDK.
It is designed to be:
0. An OpenTelemetry compliant SDK
1. Instrumented by auto-instrumentation (serializable into OTLP JSON)
2. Lightweight
3. User-friendly
These design choices are listed in the order of their importance.
The primary design goal of this module is to be an OpenTelemetry SDK.
This means that it needs to implement the Go APIs found in `go.opentelemetry.io/otel`.
Having met the requirement of SDK compliance, this module needs to provide code that the `go.opentelemetry.io/auto` module can instrument.
The chosen approach to meet this goal is to ensure the telemetry from the SDK is serializable into JSON encoded OTLP.
This ensures then that the serialized form is compatible with other OpenTelemetry systems, and the auto-instrumentation can use these systems to deserialize any telemetry it is sent.
Outside of these first two goals, the intended use becomes relevant.
This package is intended to be used in the `go.opentelemetry.io/otel` global API as a default when the auto-instrumentation is running.
Because of this, this package needs to not add unnecessary dependencies to that API.
Ideally, it adds none.
It also needs to operate efficiently.
Finally, this module is designed to be user-friendly to Go development.
It hides complexity in order to provide simpler APIs when the previous goals can all still be met.

201
vendor/go.opentelemetry.io/auto/sdk/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

15
vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md generated vendored Normal file
View File

@ -0,0 +1,15 @@
# Versioning
This document describes the versioning policy for this module.
This policy is designed so the following goals can be achieved.
**Users are provided a codebase of value that is stable and secure.**
## Policy
* Versioning of this module will be idiomatic of a Go project using [Go modules](https://github.com/golang/go/wiki/Modules).
* [Semantic import versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) will be used.
* Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html).
* Any `v2` or higher version of this module will be included as a `/vN` at the end of the module path used in `go.mod` files and in the package import path.
* GitHub releases will be made for all releases.

14
vendor/go.opentelemetry.io/auto/sdk/doc.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
/*
Package sdk provides an auto-instrumentable OpenTelemetry SDK.
An [go.opentelemetry.io/auto.Instrumentation] can be configured to target the
process running this SDK. In that case, all telemetry the SDK produces will be
processed and handled by that [go.opentelemetry.io/auto.Instrumentation].
By default, if there is no [go.opentelemetry.io/auto.Instrumentation] set to
auto-instrument the SDK, the SDK will not generate any telemetry.
*/
package sdk

View File

@ -0,0 +1,58 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package telemetry
// Attr is a key-value pair.
type Attr struct {
Key string `json:"key,omitempty"`
Value Value `json:"value,omitempty"`
}
// String returns an Attr for a string value.
func String(key, value string) Attr {
return Attr{key, StringValue(value)}
}
// Int64 returns an Attr for an int64 value.
func Int64(key string, value int64) Attr {
return Attr{key, Int64Value(value)}
}
// Int returns an Attr for an int value.
func Int(key string, value int) Attr {
return Int64(key, int64(value))
}
// Float64 returns an Attr for a float64 value.
func Float64(key string, value float64) Attr {
return Attr{key, Float64Value(value)}
}
// Bool returns an Attr for a bool value.
func Bool(key string, value bool) Attr {
return Attr{key, BoolValue(value)}
}
// Bytes returns an Attr for a []byte value.
// The passed slice must not be changed after it is passed.
func Bytes(key string, value []byte) Attr {
return Attr{key, BytesValue(value)}
}
// Slice returns an Attr for a []Value value.
// The passed slice must not be changed after it is passed.
func Slice(key string, value ...Value) Attr {
return Attr{key, SliceValue(value...)}
}
// Map returns an Attr for a map value.
// The passed slice must not be changed after it is passed.
func Map(key string, value ...Attr) Attr {
return Attr{key, MapValue(value...)}
}
// Equal returns if a is equal to b.
func (a Attr) Equal(b Attr) bool {
return a.Key == b.Key && a.Value.Equal(b.Value)
}

View File

@ -0,0 +1,8 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
/*
Package telemetry provides a lightweight representations of OpenTelemetry
telemetry that is compatible with the OTLP JSON protobuf encoding.
*/
package telemetry

View File

@ -0,0 +1,103 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package telemetry
import (
"encoding/hex"
"errors"
"fmt"
)
const (
traceIDSize = 16
spanIDSize = 8
)
// TraceID is a custom data type that is used for all trace IDs.
type TraceID [traceIDSize]byte
// String returns the hex string representation form of a TraceID.
func (tid TraceID) String() string {
return hex.EncodeToString(tid[:])
}
// IsEmpty returns false if id contains at least one non-zero byte.
func (tid TraceID) IsEmpty() bool {
return tid == [traceIDSize]byte{}
}
// MarshalJSON converts the trace ID into a hex string enclosed in quotes.
func (tid TraceID) MarshalJSON() ([]byte, error) {
if tid.IsEmpty() {
return []byte(`""`), nil
}
return marshalJSON(tid[:])
}
// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in
// quotes.
func (tid *TraceID) UnmarshalJSON(data []byte) error {
*tid = [traceIDSize]byte{}
return unmarshalJSON(tid[:], data)
}
// SpanID is a custom data type that is used for all span IDs.
type SpanID [spanIDSize]byte
// String returns the hex string representation form of a SpanID.
func (sid SpanID) String() string {
return hex.EncodeToString(sid[:])
}
// IsEmpty returns true if the span ID contains at least one non-zero byte.
func (sid SpanID) IsEmpty() bool {
return sid == [spanIDSize]byte{}
}
// MarshalJSON converts span ID into a hex string enclosed in quotes.
func (sid SpanID) MarshalJSON() ([]byte, error) {
if sid.IsEmpty() {
return []byte(`""`), nil
}
return marshalJSON(sid[:])
}
// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes.
func (sid *SpanID) UnmarshalJSON(data []byte) error {
*sid = [spanIDSize]byte{}
return unmarshalJSON(sid[:], data)
}
// marshalJSON converts id into a hex string enclosed in quotes.
func marshalJSON(id []byte) ([]byte, error) {
// Plus 2 quote chars at the start and end.
hexLen := hex.EncodedLen(len(id)) + 2
b := make([]byte, hexLen)
hex.Encode(b[1:hexLen-1], id)
b[0], b[hexLen-1] = '"', '"'
return b, nil
}
// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
func unmarshalJSON(dst []byte, src []byte) error {
if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' {
src = src[1 : l-1]
}
nLen := len(src)
if nLen == 0 {
return nil
}
if len(dst) != hex.DecodedLen(nLen) {
return errors.New("invalid length for ID")
}
_, err := hex.Decode(dst, src)
if err != nil {
return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err)
}
return nil
}

View File

@ -0,0 +1,67 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package telemetry
import (
"encoding/json"
"strconv"
)
// protoInt64 represents the protobuf encoding of integers which can be either
// strings or integers.
type protoInt64 int64
// Int64 returns the protoInt64 as an int64.
func (i *protoInt64) Int64() int64 { return int64(*i) }
// UnmarshalJSON decodes both strings and integers.
func (i *protoInt64) UnmarshalJSON(data []byte) error {
if data[0] == '"' {
var str string
if err := json.Unmarshal(data, &str); err != nil {
return err
}
parsedInt, err := strconv.ParseInt(str, 10, 64)
if err != nil {
return err
}
*i = protoInt64(parsedInt)
} else {
var parsedInt int64
if err := json.Unmarshal(data, &parsedInt); err != nil {
return err
}
*i = protoInt64(parsedInt)
}
return nil
}
// protoUint64 represents the protobuf encoding of integers which can be either
// strings or integers.
type protoUint64 uint64
// Int64 returns the protoUint64 as a uint64.
func (i *protoUint64) Uint64() uint64 { return uint64(*i) }
// UnmarshalJSON decodes both strings and integers.
func (i *protoUint64) UnmarshalJSON(data []byte) error {
if data[0] == '"' {
var str string
if err := json.Unmarshal(data, &str); err != nil {
return err
}
parsedUint, err := strconv.ParseUint(str, 10, 64)
if err != nil {
return err
}
*i = protoUint64(parsedUint)
} else {
var parsedUint uint64
if err := json.Unmarshal(data, &parsedUint); err != nil {
return err
}
*i = protoUint64(parsedUint)
}
return nil
}

View File

@ -0,0 +1,66 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package telemetry
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
)
// Resource information.
type Resource struct {
// Attrs are the set of attributes that describe the resource. Attribute
// keys MUST be unique (it is not allowed to have more than one attribute
// with the same key).
Attrs []Attr `json:"attributes,omitempty"`
// DroppedAttrs is the number of dropped attributes. If the value
// is 0, then no attributes were dropped.
DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
}
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r.
func (r *Resource) UnmarshalJSON(data []byte) error {
decoder := json.NewDecoder(bytes.NewReader(data))
t, err := decoder.Token()
if err != nil {
return err
}
if t != json.Delim('{') {
return errors.New("invalid Resource type")
}
for decoder.More() {
keyIface, err := decoder.Token()
if err != nil {
if errors.Is(err, io.EOF) {
// Empty.
return nil
}
return err
}
key, ok := keyIface.(string)
if !ok {
return fmt.Errorf("invalid Resource field: %#v", keyIface)
}
switch key {
case "attributes":
err = decoder.Decode(&r.Attrs)
case "droppedAttributesCount", "dropped_attributes_count":
err = decoder.Decode(&r.DroppedAttrs)
default:
// Skip unknown.
}
if err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,67 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package telemetry
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
)
// Scope is the identifying values of the instrumentation scope.
type Scope struct {
Name string `json:"name,omitempty"`
Version string `json:"version,omitempty"`
Attrs []Attr `json:"attributes,omitempty"`
DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
}
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r.
func (s *Scope) UnmarshalJSON(data []byte) error {
decoder := json.NewDecoder(bytes.NewReader(data))
t, err := decoder.Token()
if err != nil {
return err
}
if t != json.Delim('{') {
return errors.New("invalid Scope type")
}
for decoder.More() {
keyIface, err := decoder.Token()
if err != nil {
if errors.Is(err, io.EOF) {
// Empty.
return nil
}
return err
}
key, ok := keyIface.(string)
if !ok {
return fmt.Errorf("invalid Scope field: %#v", keyIface)
}
switch key {
case "name":
err = decoder.Decode(&s.Name)
case "version":
err = decoder.Decode(&s.Version)
case "attributes":
err = decoder.Decode(&s.Attrs)
case "droppedAttributesCount", "dropped_attributes_count":
err = decoder.Decode(&s.DroppedAttrs)
default:
// Skip unknown.
}
if err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,456 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package telemetry
import (
"bytes"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"time"
)
// A Span represents a single operation performed by a single component of the
// system.
type Span struct {
// A unique identifier for a trace. All spans from the same trace share
// the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
// of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
// is zero-length and thus is also invalid).
//
// This field is required.
TraceID TraceID `json:"traceId,omitempty"`
// A unique identifier for a span within a trace, assigned when the span
// is created. The ID is an 8-byte array. An ID with all zeroes OR of length
// other than 8 bytes is considered invalid (empty string in OTLP/JSON
// is zero-length and thus is also invalid).
//
// This field is required.
SpanID SpanID `json:"spanId,omitempty"`
// trace_state conveys information about request position in multiple distributed tracing graphs.
// It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
// See also https://github.com/w3c/distributed-tracing for more details about this field.
TraceState string `json:"traceState,omitempty"`
// The `span_id` of this span's parent span. If this is a root span, then this
// field must be empty. The ID is an 8-byte array.
ParentSpanID SpanID `json:"parentSpanId,omitempty"`
// Flags, a bit field.
//
// Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
// Context specification. To read the 8-bit W3C trace flag, use
// `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
//
// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
//
// Bits 8 and 9 represent the 3 states of whether a span's parent
// is remote. The states are (unknown, is not remote, is remote).
// To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
// To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
//
// When creating span messages, if the message is logically forwarded from another source
// with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD
// be copied as-is. If creating from a source that does not have an equivalent flags field
// (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST
// be set to zero.
// Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
//
// [Optional].
Flags uint32 `json:"flags,omitempty"`
// A description of the span's operation.
//
// For example, the name can be a qualified method name or a file name
// and a line number where the operation is called. A best practice is to use
// the same display name at the same call point in an application.
// This makes it easier to correlate spans in different traces.
//
// This field is semantically required to be set to non-empty string.
// Empty value is equivalent to an unknown span name.
//
// This field is required.
Name string `json:"name"`
// Distinguishes between spans generated in a particular context. For example,
// two spans with the same name may be distinguished using `CLIENT` (caller)
// and `SERVER` (callee) to identify queueing latency associated with the span.
Kind SpanKind `json:"kind,omitempty"`
// start_time_unix_nano is the start time of the span. On the client side, this is the time
// kept by the local machine where the span execution starts. On the server side, this
// is the time when the server's application handler starts running.
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
//
// This field is semantically required and it is expected that end_time >= start_time.
StartTime time.Time `json:"startTimeUnixNano,omitempty"`
// end_time_unix_nano is the end time of the span. On the client side, this is the time
// kept by the local machine where the span execution ends. On the server side, this
// is the time when the server application handler stops running.
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
//
// This field is semantically required and it is expected that end_time >= start_time.
EndTime time.Time `json:"endTimeUnixNano,omitempty"`
// attributes is a collection of key/value pairs. Note, global attributes
// like server name can be set using the resource API. Examples of attributes:
//
// "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
// "/http/server_latency": 300
// "example.com/myattribute": true
// "example.com/score": 10.239
//
// The OpenTelemetry API specification further restricts the allowed value types:
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
Attrs []Attr `json:"attributes,omitempty"`
// dropped_attributes_count is the number of attributes that were discarded. Attributes
// can be discarded because their keys are too long or because there are too many
// attributes. If this value is 0, then no attributes were dropped.
DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
// events is a collection of Event items.
Events []*SpanEvent `json:"events,omitempty"`
// dropped_events_count is the number of dropped events. If the value is 0, then no
// events were dropped.
DroppedEvents uint32 `json:"droppedEventsCount,omitempty"`
// links is a collection of Links, which are references from this span to a span
// in the same or different trace.
Links []*SpanLink `json:"links,omitempty"`
// dropped_links_count is the number of dropped links after the maximum size was
// enforced. If this value is 0, then no links were dropped.
DroppedLinks uint32 `json:"droppedLinksCount,omitempty"`
// An optional final status for this span. Semantically when Status isn't set, it means
// span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0).
Status *Status `json:"status,omitempty"`
}
// MarshalJSON encodes s into OTLP formatted JSON.
func (s Span) MarshalJSON() ([]byte, error) {
startT := s.StartTime.UnixNano()
if s.StartTime.IsZero() || startT < 0 {
startT = 0
}
endT := s.EndTime.UnixNano()
if s.EndTime.IsZero() || endT < 0 {
endT = 0
}
// Override non-empty default SpanID marshal and omitempty.
var parentSpanId string
if !s.ParentSpanID.IsEmpty() {
b := make([]byte, hex.EncodedLen(spanIDSize))
hex.Encode(b, s.ParentSpanID[:])
parentSpanId = string(b)
}
type Alias Span
return json.Marshal(struct {
Alias
ParentSpanID string `json:"parentSpanId,omitempty"`
StartTime uint64 `json:"startTimeUnixNano,omitempty"`
EndTime uint64 `json:"endTimeUnixNano,omitempty"`
}{
Alias: Alias(s),
ParentSpanID: parentSpanId,
StartTime: uint64(startT),
EndTime: uint64(endT),
})
}
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s.
func (s *Span) UnmarshalJSON(data []byte) error {
decoder := json.NewDecoder(bytes.NewReader(data))
t, err := decoder.Token()
if err != nil {
return err
}
if t != json.Delim('{') {
return errors.New("invalid Span type")
}
for decoder.More() {
keyIface, err := decoder.Token()
if err != nil {
if errors.Is(err, io.EOF) {
// Empty.
return nil
}
return err
}
key, ok := keyIface.(string)
if !ok {
return fmt.Errorf("invalid Span field: %#v", keyIface)
}
switch key {
case "traceId", "trace_id":
err = decoder.Decode(&s.TraceID)
case "spanId", "span_id":
err = decoder.Decode(&s.SpanID)
case "traceState", "trace_state":
err = decoder.Decode(&s.TraceState)
case "parentSpanId", "parent_span_id":
err = decoder.Decode(&s.ParentSpanID)
case "flags":
err = decoder.Decode(&s.Flags)
case "name":
err = decoder.Decode(&s.Name)
case "kind":
err = decoder.Decode(&s.Kind)
case "startTimeUnixNano", "start_time_unix_nano":
var val protoUint64
err = decoder.Decode(&val)
s.StartTime = time.Unix(0, int64(val.Uint64()))
case "endTimeUnixNano", "end_time_unix_nano":
var val protoUint64
err = decoder.Decode(&val)
s.EndTime = time.Unix(0, int64(val.Uint64()))
case "attributes":
err = decoder.Decode(&s.Attrs)
case "droppedAttributesCount", "dropped_attributes_count":
err = decoder.Decode(&s.DroppedAttrs)
case "events":
err = decoder.Decode(&s.Events)
case "droppedEventsCount", "dropped_events_count":
err = decoder.Decode(&s.DroppedEvents)
case "links":
err = decoder.Decode(&s.Links)
case "droppedLinksCount", "dropped_links_count":
err = decoder.Decode(&s.DroppedLinks)
case "status":
err = decoder.Decode(&s.Status)
default:
// Skip unknown.
}
if err != nil {
return err
}
}
return nil
}
// SpanFlags represents constants used to interpret the
// Span.flags field, which is protobuf 'fixed32' type and is to
// be used as bit-fields. Each non-zero value defined in this enum is
// a bit-mask. To extract the bit-field, for example, use an
// expression like:
//
// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK)
//
// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
//
// Note that Span flags were introduced in version 1.1 of the
// OpenTelemetry protocol. Older Span producers do not set this
// field, consequently consumers should not rely on the absence of a
// particular flag bit to indicate the presence of a particular feature.
type SpanFlags int32
const (
// Bits 0-7 are used for trace flags.
SpanFlagsTraceFlagsMask SpanFlags = 255
// Bits 8 and 9 are used to indicate that the parent span or link span is remote.
// Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
// Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote.
SpanFlagsContextHasIsRemoteMask SpanFlags = 256
// SpanFlagsContextHasIsRemoteMask indicates the Span is remote.
SpanFlagsContextIsRemoteMask SpanFlags = 512
)
// SpanKind is the type of span. Can be used to specify additional relationships between spans
// in addition to a parent/child relationship.
type SpanKind int32
const (
// Indicates that the span represents an internal operation within an application,
// as opposed to an operation happening at the boundaries. Default value.
SpanKindInternal SpanKind = 1
// Indicates that the span covers server-side handling of an RPC or other
// remote network request.
SpanKindServer SpanKind = 2
// Indicates that the span describes a request to some remote service.
SpanKindClient SpanKind = 3
// Indicates that the span describes a producer sending a message to a broker.
// Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
// between producer and consumer spans. A PRODUCER span ends when the message was accepted
// by the broker while the logical processing of the message might span a much longer time.
SpanKindProducer SpanKind = 4
// Indicates that the span describes consumer receiving a message from a broker.
// Like the PRODUCER kind, there is often no direct critical path latency relationship
// between producer and consumer spans.
SpanKindConsumer SpanKind = 5
)
// Event is a time-stamped annotation of the span, consisting of user-supplied
// text description and key-value pairs.
type SpanEvent struct {
// time_unix_nano is the time the event occurred.
Time time.Time `json:"timeUnixNano,omitempty"`
// name of the event.
// This field is semantically required to be set to non-empty string.
Name string `json:"name,omitempty"`
// attributes is a collection of attribute key/value pairs on the event.
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
Attrs []Attr `json:"attributes,omitempty"`
// dropped_attributes_count is the number of dropped attributes. If the value is 0,
// then no attributes were dropped.
DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
}
// MarshalJSON encodes e into OTLP formatted JSON.
func (e SpanEvent) MarshalJSON() ([]byte, error) {
t := e.Time.UnixNano()
if e.Time.IsZero() || t < 0 {
t = 0
}
type Alias SpanEvent
return json.Marshal(struct {
Alias
Time uint64 `json:"timeUnixNano,omitempty"`
}{
Alias: Alias(e),
Time: uint64(t),
})
}
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se.
func (se *SpanEvent) UnmarshalJSON(data []byte) error {
decoder := json.NewDecoder(bytes.NewReader(data))
t, err := decoder.Token()
if err != nil {
return err
}
if t != json.Delim('{') {
return errors.New("invalid SpanEvent type")
}
for decoder.More() {
keyIface, err := decoder.Token()
if err != nil {
if errors.Is(err, io.EOF) {
// Empty.
return nil
}
return err
}
key, ok := keyIface.(string)
if !ok {
return fmt.Errorf("invalid SpanEvent field: %#v", keyIface)
}
switch key {
case "timeUnixNano", "time_unix_nano":
var val protoUint64
err = decoder.Decode(&val)
se.Time = time.Unix(0, int64(val.Uint64()))
case "name":
err = decoder.Decode(&se.Name)
case "attributes":
err = decoder.Decode(&se.Attrs)
case "droppedAttributesCount", "dropped_attributes_count":
err = decoder.Decode(&se.DroppedAttrs)
default:
// Skip unknown.
}
if err != nil {
return err
}
}
return nil
}
// A pointer from the current span to another span in the same trace or in a
// different trace. For example, this can be used in batching operations,
// where a single batch handler processes multiple requests from different
// traces or when the handler receives a request from a different project.
type SpanLink struct {
// A unique identifier of a trace that this linked span is part of. The ID is a
// 16-byte array.
TraceID TraceID `json:"traceId,omitempty"`
// A unique identifier for the linked span. The ID is an 8-byte array.
SpanID SpanID `json:"spanId,omitempty"`
// The trace_state associated with the link.
TraceState string `json:"traceState,omitempty"`
// attributes is a collection of attribute key/value pairs on the link.
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
Attrs []Attr `json:"attributes,omitempty"`
// dropped_attributes_count is the number of dropped attributes. If the value is 0,
// then no attributes were dropped.
DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
// Flags, a bit field.
//
// Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
// Context specification. To read the 8-bit W3C trace flag, use
// `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
//
// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
//
// Bits 8 and 9 represent the 3 states of whether the link is remote.
// The states are (unknown, is not remote, is remote).
// To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
// To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
//
// Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
// When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero.
//
// [Optional].
Flags uint32 `json:"flags,omitempty"`
}
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl.
func (sl *SpanLink) UnmarshalJSON(data []byte) error {
decoder := json.NewDecoder(bytes.NewReader(data))
t, err := decoder.Token()
if err != nil {
return err
}
if t != json.Delim('{') {
return errors.New("invalid SpanLink type")
}
for decoder.More() {
keyIface, err := decoder.Token()
if err != nil {
if errors.Is(err, io.EOF) {
// Empty.
return nil
}
return err
}
key, ok := keyIface.(string)
if !ok {
return fmt.Errorf("invalid SpanLink field: %#v", keyIface)
}
switch key {
case "traceId", "trace_id":
err = decoder.Decode(&sl.TraceID)
case "spanId", "span_id":
err = decoder.Decode(&sl.SpanID)
case "traceState", "trace_state":
err = decoder.Decode(&sl.TraceState)
case "attributes":
err = decoder.Decode(&sl.Attrs)
case "droppedAttributesCount", "dropped_attributes_count":
err = decoder.Decode(&sl.DroppedAttrs)
case "flags":
err = decoder.Decode(&sl.Flags)
default:
// Skip unknown.
}
if err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,40 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package telemetry
// For the semantics of status codes see
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
type StatusCode int32
const (
// The default status.
StatusCodeUnset StatusCode = 0
// The Span has been validated by an Application developer or Operator to
// have completed successfully.
StatusCodeOK StatusCode = 1
// The Span contains an error.
StatusCodeError StatusCode = 2
)
var statusCodeStrings = []string{
"Unset",
"OK",
"Error",
}
func (s StatusCode) String() string {
if s >= 0 && int(s) < len(statusCodeStrings) {
return statusCodeStrings[s]
}
return "<unknown telemetry.StatusCode>"
}
// The Status type defines a logical error model that is suitable for different
// programming environments, including REST APIs and RPC APIs.
type Status struct {
// A developer-facing human readable error message.
Message string `json:"message,omitempty"`
// The status code.
Code StatusCode `json:"code,omitempty"`
}

View File

@ -0,0 +1,189 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package telemetry
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
)
// Traces represents the traces data that can be stored in a persistent storage,
// OR can be embedded by other protocols that transfer OTLP traces data but do
// not implement the OTLP protocol.
//
// The main difference between this message and collector protocol is that
// in this message there will not be any "control" or "metadata" specific to
// OTLP protocol.
//
// When new fields are added into this message, the OTLP request MUST be updated
// as well.
type Traces struct {
// An array of ResourceSpans.
// For data coming from a single resource this array will typically contain
// one element. Intermediary nodes that receive data from multiple origins
// typically batch the data before forwarding further and in that case this
// array will contain multiple elements.
ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"`
}
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td.
func (td *Traces) UnmarshalJSON(data []byte) error {
decoder := json.NewDecoder(bytes.NewReader(data))
t, err := decoder.Token()
if err != nil {
return err
}
if t != json.Delim('{') {
return errors.New("invalid TracesData type")
}
for decoder.More() {
keyIface, err := decoder.Token()
if err != nil {
if errors.Is(err, io.EOF) {
// Empty.
return nil
}
return err
}
key, ok := keyIface.(string)
if !ok {
return fmt.Errorf("invalid TracesData field: %#v", keyIface)
}
switch key {
case "resourceSpans", "resource_spans":
err = decoder.Decode(&td.ResourceSpans)
default:
// Skip unknown.
}
if err != nil {
return err
}
}
return nil
}
// A collection of ScopeSpans from a Resource.
type ResourceSpans struct {
// The resource for the spans in this message.
// If this field is not set then no resource info is known.
Resource Resource `json:"resource"`
// A list of ScopeSpans that originate from a resource.
ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"`
// This schema_url applies to the data in the "resource" field. It does not apply
// to the data in the "scope_spans" field which have their own schema_url field.
SchemaURL string `json:"schemaUrl,omitempty"`
}
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs.
func (rs *ResourceSpans) UnmarshalJSON(data []byte) error {
decoder := json.NewDecoder(bytes.NewReader(data))
t, err := decoder.Token()
if err != nil {
return err
}
if t != json.Delim('{') {
return errors.New("invalid ResourceSpans type")
}
for decoder.More() {
keyIface, err := decoder.Token()
if err != nil {
if errors.Is(err, io.EOF) {
// Empty.
return nil
}
return err
}
key, ok := keyIface.(string)
if !ok {
return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface)
}
switch key {
case "resource":
err = decoder.Decode(&rs.Resource)
case "scopeSpans", "scope_spans":
err = decoder.Decode(&rs.ScopeSpans)
case "schemaUrl", "schema_url":
err = decoder.Decode(&rs.SchemaURL)
default:
// Skip unknown.
}
if err != nil {
return err
}
}
return nil
}
// A collection of Spans produced by an InstrumentationScope.
type ScopeSpans struct {
// The instrumentation scope information for the spans in this message.
// Semantically when InstrumentationScope isn't set, it is equivalent with
// an empty instrumentation scope name (unknown).
Scope *Scope `json:"scope"`
// A list of Spans that originate from an instrumentation scope.
Spans []*Span `json:"spans,omitempty"`
// The Schema URL, if known. This is the identifier of the Schema that the span data
// is recorded in. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to all spans and span events in the "spans" field.
SchemaURL string `json:"schemaUrl,omitempty"`
}
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss.
func (ss *ScopeSpans) UnmarshalJSON(data []byte) error {
decoder := json.NewDecoder(bytes.NewReader(data))
t, err := decoder.Token()
if err != nil {
return err
}
if t != json.Delim('{') {
return errors.New("invalid ScopeSpans type")
}
for decoder.More() {
keyIface, err := decoder.Token()
if err != nil {
if errors.Is(err, io.EOF) {
// Empty.
return nil
}
return err
}
key, ok := keyIface.(string)
if !ok {
return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface)
}
switch key {
case "scope":
err = decoder.Decode(&ss.Scope)
case "spans":
err = decoder.Decode(&ss.Spans)
case "schemaUrl", "schema_url":
err = decoder.Decode(&ss.SchemaURL)
default:
// Skip unknown.
}
if err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,452 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
//go:generate stringer -type=ValueKind -trimprefix=ValueKind
package telemetry
import (
"bytes"
"cmp"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"math"
"slices"
"strconv"
"unsafe"
)
// A Value represents a structured value.
// A zero value is valid and represents an empty value.
type Value struct {
// Ensure forward compatibility by explicitly making this not comparable.
noCmp [0]func() //nolint: unused // This is indeed used.
// num holds the value for Int64, Float64, and Bool. It holds the length
// for String, Bytes, Slice, Map.
num uint64
// any holds either the KindBool, KindInt64, KindFloat64, stringptr,
// bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64
// then the value of Value is in num as described above. Otherwise, it
// contains the value wrapped in the appropriate type.
any any
}
type (
// sliceptr represents a value in Value.any for KindString Values.
stringptr *byte
// bytesptr represents a value in Value.any for KindBytes Values.
bytesptr *byte
// sliceptr represents a value in Value.any for KindSlice Values.
sliceptr *Value
// mapptr represents a value in Value.any for KindMap Values.
mapptr *Attr
)
// ValueKind is the kind of a [Value].
type ValueKind int
// ValueKind values.
const (
ValueKindEmpty ValueKind = iota
ValueKindBool
ValueKindFloat64
ValueKindInt64
ValueKindString
ValueKindBytes
ValueKindSlice
ValueKindMap
)
var valueKindStrings = []string{
"Empty",
"Bool",
"Float64",
"Int64",
"String",
"Bytes",
"Slice",
"Map",
}
func (k ValueKind) String() string {
if k >= 0 && int(k) < len(valueKindStrings) {
return valueKindStrings[k]
}
return "<unknown telemetry.ValueKind>"
}
// StringValue returns a new [Value] for a string.
func StringValue(v string) Value {
return Value{
num: uint64(len(v)),
any: stringptr(unsafe.StringData(v)),
}
}
// IntValue returns a [Value] for an int.
func IntValue(v int) Value { return Int64Value(int64(v)) }
// Int64Value returns a [Value] for an int64.
func Int64Value(v int64) Value {
return Value{num: uint64(v), any: ValueKindInt64}
}
// Float64Value returns a [Value] for a float64.
func Float64Value(v float64) Value {
return Value{num: math.Float64bits(v), any: ValueKindFloat64}
}
// BoolValue returns a [Value] for a bool.
func BoolValue(v bool) Value { //nolint:revive // Not a control flag.
var n uint64
if v {
n = 1
}
return Value{num: n, any: ValueKindBool}
}
// BytesValue returns a [Value] for a byte slice. The passed slice must not be
// changed after it is passed.
func BytesValue(v []byte) Value {
return Value{
num: uint64(len(v)),
any: bytesptr(unsafe.SliceData(v)),
}
}
// SliceValue returns a [Value] for a slice of [Value]. The passed slice must
// not be changed after it is passed.
func SliceValue(vs ...Value) Value {
return Value{
num: uint64(len(vs)),
any: sliceptr(unsafe.SliceData(vs)),
}
}
// MapValue returns a new [Value] for a slice of key-value pairs. The passed
// slice must not be changed after it is passed.
func MapValue(kvs ...Attr) Value {
return Value{
num: uint64(len(kvs)),
any: mapptr(unsafe.SliceData(kvs)),
}
}
// AsString returns the value held by v as a string.
func (v Value) AsString() string {
if sp, ok := v.any.(stringptr); ok {
return unsafe.String(sp, v.num)
}
// TODO: error handle
return ""
}
// asString returns the value held by v as a string. It will panic if the Value
// is not KindString.
func (v Value) asString() string {
return unsafe.String(v.any.(stringptr), v.num)
}
// AsInt64 returns the value held by v as an int64.
func (v Value) AsInt64() int64 {
if v.Kind() != ValueKindInt64 {
// TODO: error handle
return 0
}
return v.asInt64()
}
// asInt64 returns the value held by v as an int64. If v is not of KindInt64,
// this will return garbage.
func (v Value) asInt64() int64 {
// Assumes v.num was a valid int64 (overflow not checked).
return int64(v.num) // nolint: gosec
}
// AsBool returns the value held by v as a bool.
func (v Value) AsBool() bool {
if v.Kind() != ValueKindBool {
// TODO: error handle
return false
}
return v.asBool()
}
// asBool returns the value held by v as a bool. If v is not of KindBool, this
// will return garbage.
func (v Value) asBool() bool { return v.num == 1 }
// AsFloat64 returns the value held by v as a float64.
func (v Value) AsFloat64() float64 {
if v.Kind() != ValueKindFloat64 {
// TODO: error handle
return 0
}
return v.asFloat64()
}
// asFloat64 returns the value held by v as a float64. If v is not of
// KindFloat64, this will return garbage.
func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) }
// AsBytes returns the value held by v as a []byte.
func (v Value) AsBytes() []byte {
if sp, ok := v.any.(bytesptr); ok {
return unsafe.Slice((*byte)(sp), v.num)
}
// TODO: error handle
return nil
}
// asBytes returns the value held by v as a []byte. It will panic if the Value
// is not KindBytes.
func (v Value) asBytes() []byte {
return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num)
}
// AsSlice returns the value held by v as a []Value.
func (v Value) AsSlice() []Value {
if sp, ok := v.any.(sliceptr); ok {
return unsafe.Slice((*Value)(sp), v.num)
}
// TODO: error handle
return nil
}
// asSlice returns the value held by v as a []Value. It will panic if the Value
// is not KindSlice.
func (v Value) asSlice() []Value {
return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num)
}
// AsMap returns the value held by v as a []Attr.
func (v Value) AsMap() []Attr {
if sp, ok := v.any.(mapptr); ok {
return unsafe.Slice((*Attr)(sp), v.num)
}
// TODO: error handle
return nil
}
// asMap returns the value held by v as a []Attr. It will panic if the
// Value is not KindMap.
func (v Value) asMap() []Attr {
return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num)
}
// Kind returns the Kind of v.
func (v Value) Kind() ValueKind {
switch x := v.any.(type) {
case ValueKind:
return x
case stringptr:
return ValueKindString
case bytesptr:
return ValueKindBytes
case sliceptr:
return ValueKindSlice
case mapptr:
return ValueKindMap
default:
return ValueKindEmpty
}
}
// Empty returns if v does not hold any value.
func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty }
// Equal returns if v is equal to w.
func (v Value) Equal(w Value) bool {
k1 := v.Kind()
k2 := w.Kind()
if k1 != k2 {
return false
}
switch k1 {
case ValueKindInt64, ValueKindBool:
return v.num == w.num
case ValueKindString:
return v.asString() == w.asString()
case ValueKindFloat64:
return v.asFloat64() == w.asFloat64()
case ValueKindSlice:
return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal)
case ValueKindMap:
sv := sortMap(v.asMap())
sw := sortMap(w.asMap())
return slices.EqualFunc(sv, sw, Attr.Equal)
case ValueKindBytes:
return bytes.Equal(v.asBytes(), w.asBytes())
case ValueKindEmpty:
return true
default:
// TODO: error handle
return false
}
}
func sortMap(m []Attr) []Attr {
sm := make([]Attr, len(m))
copy(sm, m)
slices.SortFunc(sm, func(a, b Attr) int {
return cmp.Compare(a.Key, b.Key)
})
return sm
}
// String returns Value's value as a string, formatted like [fmt.Sprint].
//
// The returned string is meant for debugging;
// the string representation is not stable.
func (v Value) String() string {
switch v.Kind() {
case ValueKindString:
return v.asString()
case ValueKindInt64:
// Assumes v.num was a valid int64 (overflow not checked).
return strconv.FormatInt(int64(v.num), 10) // nolint: gosec
case ValueKindFloat64:
return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64)
case ValueKindBool:
return strconv.FormatBool(v.asBool())
case ValueKindBytes:
return fmt.Sprint(v.asBytes())
case ValueKindMap:
return fmt.Sprint(v.asMap())
case ValueKindSlice:
return fmt.Sprint(v.asSlice())
case ValueKindEmpty:
return "<nil>"
default:
// Try to handle this as gracefully as possible.
//
// Don't panic here. The goal here is to have developers find this
// first if a slog.Kind is is not handled. It is
// preferable to have user's open issue asking why their attributes
// have a "unhandled: " prefix than say that their code is panicking.
return fmt.Sprintf("<unhandled telemetry.ValueKind: %s>", v.Kind())
}
}
// MarshalJSON encodes v into OTLP formatted JSON.
func (v *Value) MarshalJSON() ([]byte, error) {
switch v.Kind() {
case ValueKindString:
return json.Marshal(struct {
Value string `json:"stringValue"`
}{v.asString()})
case ValueKindInt64:
return json.Marshal(struct {
Value string `json:"intValue"`
}{strconv.FormatInt(int64(v.num), 10)})
case ValueKindFloat64:
return json.Marshal(struct {
Value float64 `json:"doubleValue"`
}{v.asFloat64()})
case ValueKindBool:
return json.Marshal(struct {
Value bool `json:"boolValue"`
}{v.asBool()})
case ValueKindBytes:
return json.Marshal(struct {
Value []byte `json:"bytesValue"`
}{v.asBytes()})
case ValueKindMap:
return json.Marshal(struct {
Value struct {
Values []Attr `json:"values"`
} `json:"kvlistValue"`
}{struct {
Values []Attr `json:"values"`
}{v.asMap()}})
case ValueKindSlice:
return json.Marshal(struct {
Value struct {
Values []Value `json:"values"`
} `json:"arrayValue"`
}{struct {
Values []Value `json:"values"`
}{v.asSlice()}})
case ValueKindEmpty:
return nil, nil
default:
return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String())
}
}
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v.
func (v *Value) UnmarshalJSON(data []byte) error {
decoder := json.NewDecoder(bytes.NewReader(data))
t, err := decoder.Token()
if err != nil {
return err
}
if t != json.Delim('{') {
return errors.New("invalid Value type")
}
for decoder.More() {
keyIface, err := decoder.Token()
if err != nil {
if errors.Is(err, io.EOF) {
// Empty.
return nil
}
return err
}
key, ok := keyIface.(string)
if !ok {
return fmt.Errorf("invalid Value key: %#v", keyIface)
}
switch key {
case "stringValue", "string_value":
var val string
err = decoder.Decode(&val)
*v = StringValue(val)
case "boolValue", "bool_value":
var val bool
err = decoder.Decode(&val)
*v = BoolValue(val)
case "intValue", "int_value":
var val protoInt64
err = decoder.Decode(&val)
*v = Int64Value(val.Int64())
case "doubleValue", "double_value":
var val float64
err = decoder.Decode(&val)
*v = Float64Value(val)
case "bytesValue", "bytes_value":
var val64 string
if err := decoder.Decode(&val64); err != nil {
return err
}
var val []byte
val, err = base64.StdEncoding.DecodeString(val64)
*v = BytesValue(val)
case "arrayValue", "array_value":
var val struct{ Values []Value }
err = decoder.Decode(&val)
*v = SliceValue(val.Values...)
case "kvlistValue", "kvlist_value":
var val struct{ Values []Attr }
err = decoder.Decode(&val)
*v = MapValue(val.Values...)
default:
// Skip unknown.
continue
}
// Use first valid. Ignore the rest.
return err
}
// Only unknown fields. Return nil without unmarshaling any value.
return nil
}

94
vendor/go.opentelemetry.io/auto/sdk/limit.go generated vendored Normal file
View File

@ -0,0 +1,94 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sdk
import (
"log/slog"
"os"
"strconv"
)
// maxSpan are the span limits resolved during startup.
var maxSpan = newSpanLimits()
type spanLimits struct {
// Attrs is the number of allowed attributes for a span.
//
// This is resolved from the environment variable value for the
// OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the
// environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if
// that is not set, is used.
Attrs int
// AttrValueLen is the maximum attribute value length allowed for a span.
//
// This is resolved from the environment variable value for the
// OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the
// environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1
// if that is not set, is used.
AttrValueLen int
// Events is the number of allowed events for a span.
//
// This is resolved from the environment variable value for the
// OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set.
Events int
// EventAttrs is the number of allowed attributes for a span event.
//
// The is resolved from the environment variable value for the
// OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set.
EventAttrs int
// Links is the number of allowed Links for a span.
//
// This is resolved from the environment variable value for the
// OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set.
Links int
// LinkAttrs is the number of allowed attributes for a span link.
//
// This is resolved from the environment variable value for the
// OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set.
LinkAttrs int
}
func newSpanLimits() spanLimits {
return spanLimits{
Attrs: firstEnv(
128,
"OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT",
"OTEL_ATTRIBUTE_COUNT_LIMIT",
),
AttrValueLen: firstEnv(
-1, // Unlimited.
"OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT",
"OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT",
),
Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"),
EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"),
Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"),
LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"),
}
}
// firstEnv returns the parsed integer value of the first matching environment
// variable from keys. The defaultVal is returned if the value is not an
// integer or no match is found.
func firstEnv(defaultVal int, keys ...string) int {
for _, key := range keys {
strV := os.Getenv(key)
if strV == "" {
continue
}
v, err := strconv.Atoi(strV)
if err == nil {
return v
}
slog.Warn(
"invalid limit environment variable",
"error", err,
"key", key,
"value", strV,
)
}
return defaultVal
}

432
vendor/go.opentelemetry.io/auto/sdk/span.go generated vendored Normal file
View File

@ -0,0 +1,432 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sdk
import (
"encoding/json"
"fmt"
"reflect"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
"unicode/utf8"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/noop"
"go.opentelemetry.io/auto/sdk/internal/telemetry"
)
type span struct {
noop.Span
spanContext trace.SpanContext
sampled atomic.Bool
mu sync.Mutex
traces *telemetry.Traces
span *telemetry.Span
}
func (s *span) SpanContext() trace.SpanContext {
if s == nil {
return trace.SpanContext{}
}
// s.spanContext is immutable, do not acquire lock s.mu.
return s.spanContext
}
func (s *span) IsRecording() bool {
if s == nil {
return false
}
return s.sampled.Load()
}
func (s *span) SetStatus(c codes.Code, msg string) {
if s == nil || !s.sampled.Load() {
return
}
s.mu.Lock()
defer s.mu.Unlock()
if s.span.Status == nil {
s.span.Status = new(telemetry.Status)
}
s.span.Status.Message = msg
switch c {
case codes.Unset:
s.span.Status.Code = telemetry.StatusCodeUnset
case codes.Error:
s.span.Status.Code = telemetry.StatusCodeError
case codes.Ok:
s.span.Status.Code = telemetry.StatusCodeOK
}
}
func (s *span) SetAttributes(attrs ...attribute.KeyValue) {
if s == nil || !s.sampled.Load() {
return
}
s.mu.Lock()
defer s.mu.Unlock()
limit := maxSpan.Attrs
if limit == 0 {
// No attributes allowed.
s.span.DroppedAttrs += uint32(len(attrs))
return
}
m := make(map[string]int)
for i, a := range s.span.Attrs {
m[a.Key] = i
}
for _, a := range attrs {
val := convAttrValue(a.Value)
if val.Empty() {
s.span.DroppedAttrs++
continue
}
if idx, ok := m[string(a.Key)]; ok {
s.span.Attrs[idx] = telemetry.Attr{
Key: string(a.Key),
Value: val,
}
} else if limit < 0 || len(s.span.Attrs) < limit {
s.span.Attrs = append(s.span.Attrs, telemetry.Attr{
Key: string(a.Key),
Value: val,
})
m[string(a.Key)] = len(s.span.Attrs) - 1
} else {
s.span.DroppedAttrs++
}
}
}
// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The
// number of dropped attributes is also returned.
func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) {
if limit == 0 {
return nil, uint32(len(attrs))
}
if limit < 0 {
// Unlimited.
return convAttrs(attrs), 0
}
limit = min(len(attrs), limit)
return convAttrs(attrs[:limit]), uint32(len(attrs) - limit)
}
func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr {
if len(attrs) == 0 {
// Avoid allocations if not necessary.
return nil
}
out := make([]telemetry.Attr, 0, len(attrs))
for _, attr := range attrs {
key := string(attr.Key)
val := convAttrValue(attr.Value)
if val.Empty() {
continue
}
out = append(out, telemetry.Attr{Key: key, Value: val})
}
return out
}
func convAttrValue(value attribute.Value) telemetry.Value {
switch value.Type() {
case attribute.BOOL:
return telemetry.BoolValue(value.AsBool())
case attribute.INT64:
return telemetry.Int64Value(value.AsInt64())
case attribute.FLOAT64:
return telemetry.Float64Value(value.AsFloat64())
case attribute.STRING:
v := truncate(maxSpan.AttrValueLen, value.AsString())
return telemetry.StringValue(v)
case attribute.BOOLSLICE:
slice := value.AsBoolSlice()
out := make([]telemetry.Value, 0, len(slice))
for _, v := range slice {
out = append(out, telemetry.BoolValue(v))
}
return telemetry.SliceValue(out...)
case attribute.INT64SLICE:
slice := value.AsInt64Slice()
out := make([]telemetry.Value, 0, len(slice))
for _, v := range slice {
out = append(out, telemetry.Int64Value(v))
}
return telemetry.SliceValue(out...)
case attribute.FLOAT64SLICE:
slice := value.AsFloat64Slice()
out := make([]telemetry.Value, 0, len(slice))
for _, v := range slice {
out = append(out, telemetry.Float64Value(v))
}
return telemetry.SliceValue(out...)
case attribute.STRINGSLICE:
slice := value.AsStringSlice()
out := make([]telemetry.Value, 0, len(slice))
for _, v := range slice {
v = truncate(maxSpan.AttrValueLen, v)
out = append(out, telemetry.StringValue(v))
}
return telemetry.SliceValue(out...)
}
return telemetry.Value{}
}
// truncate returns a truncated version of s such that it contains less than
// the limit number of characters. Truncation is applied by returning the limit
// number of valid characters contained in s.
//
// If limit is negative, it returns the original string.
//
// UTF-8 is supported. When truncating, all invalid characters are dropped
// before applying truncation.
//
// If s already contains less than the limit number of bytes, it is returned
// unchanged. No invalid characters are removed.
func truncate(limit int, s string) string {
// This prioritize performance in the following order based on the most
// common expected use-cases.
//
// - Short values less than the default limit (128).
// - Strings with valid encodings that exceed the limit.
// - No limit.
// - Strings with invalid encodings that exceed the limit.
if limit < 0 || len(s) <= limit {
return s
}
// Optimistically, assume all valid UTF-8.
var b strings.Builder
count := 0
for i, c := range s {
if c != utf8.RuneError {
count++
if count > limit {
return s[:i]
}
continue
}
_, size := utf8.DecodeRuneInString(s[i:])
if size == 1 {
// Invalid encoding.
b.Grow(len(s) - 1)
_, _ = b.WriteString(s[:i])
s = s[i:]
break
}
}
// Fast-path, no invalid input.
if b.Cap() == 0 {
return s
}
// Truncate while validating UTF-8.
for i := 0; i < len(s) && count < limit; {
c := s[i]
if c < utf8.RuneSelf {
// Optimization for single byte runes (common case).
_ = b.WriteByte(c)
i++
count++
continue
}
_, size := utf8.DecodeRuneInString(s[i:])
if size == 1 {
// We checked for all 1-byte runes above, this is a RuneError.
i++
continue
}
_, _ = b.WriteString(s[i : i+size])
i += size
count++
}
return b.String()
}
func (s *span) End(opts ...trace.SpanEndOption) {
if s == nil || !s.sampled.Swap(false) {
return
}
// s.end exists so the lock (s.mu) is not held while s.ended is called.
s.ended(s.end(opts))
}
func (s *span) end(opts []trace.SpanEndOption) []byte {
s.mu.Lock()
defer s.mu.Unlock()
cfg := trace.NewSpanEndConfig(opts...)
if t := cfg.Timestamp(); !t.IsZero() {
s.span.EndTime = cfg.Timestamp()
} else {
s.span.EndTime = time.Now()
}
b, _ := json.Marshal(s.traces) // TODO: do not ignore this error.
return b
}
// Expected to be implemented in eBPF.
//
//go:noinline
func (*span) ended(buf []byte) { ended(buf) }
// ended is used for testing.
var ended = func([]byte) {}
func (s *span) RecordError(err error, opts ...trace.EventOption) {
if s == nil || err == nil || !s.sampled.Load() {
return
}
cfg := trace.NewEventConfig(opts...)
attrs := cfg.Attributes()
attrs = append(attrs,
semconv.ExceptionType(typeStr(err)),
semconv.ExceptionMessage(err.Error()),
)
if cfg.StackTrace() {
buf := make([]byte, 2048)
n := runtime.Stack(buf, false)
attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n])))
}
s.mu.Lock()
defer s.mu.Unlock()
s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs)
}
func typeStr(i any) string {
t := reflect.TypeOf(i)
if t.PkgPath() == "" && t.Name() == "" {
// Likely a builtin type.
return t.String()
}
return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
}
func (s *span) AddEvent(name string, opts ...trace.EventOption) {
if s == nil || !s.sampled.Load() {
return
}
cfg := trace.NewEventConfig(opts...)
s.mu.Lock()
defer s.mu.Unlock()
s.addEvent(name, cfg.Timestamp(), cfg.Attributes())
}
// addEvent adds an event with name and attrs at tStamp to the span. The span
// lock (s.mu) needs to be held by the caller.
func (s *span) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) {
limit := maxSpan.Events
if limit == 0 {
s.span.DroppedEvents++
return
}
if limit > 0 && len(s.span.Events) == limit {
// Drop head while avoiding allocation of more capacity.
copy(s.span.Events[:limit-1], s.span.Events[1:])
s.span.Events = s.span.Events[:limit-1]
s.span.DroppedEvents++
}
e := &telemetry.SpanEvent{Time: tStamp, Name: name}
e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs)
s.span.Events = append(s.span.Events, e)
}
func (s *span) AddLink(link trace.Link) {
if s == nil || !s.sampled.Load() {
return
}
l := maxSpan.Links
s.mu.Lock()
defer s.mu.Unlock()
if l == 0 {
s.span.DroppedLinks++
return
}
if l > 0 && len(s.span.Links) == l {
// Drop head while avoiding allocation of more capacity.
copy(s.span.Links[:l-1], s.span.Links[1:])
s.span.Links = s.span.Links[:l-1]
s.span.DroppedLinks++
}
s.span.Links = append(s.span.Links, convLink(link))
}
func convLinks(links []trace.Link) []*telemetry.SpanLink {
out := make([]*telemetry.SpanLink, 0, len(links))
for _, link := range links {
out = append(out, convLink(link))
}
return out
}
func convLink(link trace.Link) *telemetry.SpanLink {
l := &telemetry.SpanLink{
TraceID: telemetry.TraceID(link.SpanContext.TraceID()),
SpanID: telemetry.SpanID(link.SpanContext.SpanID()),
TraceState: link.SpanContext.TraceState().String(),
Flags: uint32(link.SpanContext.TraceFlags()),
}
l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes)
return l
}
func (s *span) SetName(name string) {
if s == nil || !s.sampled.Load() {
return
}
s.mu.Lock()
defer s.mu.Unlock()
s.span.Name = name
}
func (*span) TracerProvider() trace.TracerProvider { return TracerProvider() }

124
vendor/go.opentelemetry.io/auto/sdk/tracer.go generated vendored Normal file
View File

@ -0,0 +1,124 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sdk
import (
"context"
"time"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/noop"
"go.opentelemetry.io/auto/sdk/internal/telemetry"
)
type tracer struct {
noop.Tracer
name, schemaURL, version string
}
var _ trace.Tracer = tracer{}
func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
var psc trace.SpanContext
sampled := true
span := new(span)
// Ask eBPF for sampling decision and span context info.
t.start(ctx, span, &psc, &sampled, &span.spanContext)
span.sampled.Store(sampled)
ctx = trace.ContextWithSpan(ctx, span)
if sampled {
// Only build traces if sampled.
cfg := trace.NewSpanStartConfig(opts...)
span.traces, span.span = t.traces(name, cfg, span.spanContext, psc)
}
return ctx, span
}
// Expected to be implemented in eBPF.
//
//go:noinline
func (t *tracer) start(
ctx context.Context,
spanPtr *span,
psc *trace.SpanContext,
sampled *bool,
sc *trace.SpanContext,
) {
start(ctx, spanPtr, psc, sampled, sc)
}
// start is used for testing.
var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {}
func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) {
span := &telemetry.Span{
TraceID: telemetry.TraceID(sc.TraceID()),
SpanID: telemetry.SpanID(sc.SpanID()),
Flags: uint32(sc.TraceFlags()),
TraceState: sc.TraceState().String(),
ParentSpanID: telemetry.SpanID(psc.SpanID()),
Name: name,
Kind: spanKind(cfg.SpanKind()),
}
span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes())
links := cfg.Links()
if limit := maxSpan.Links; limit == 0 {
span.DroppedLinks = uint32(len(links))
} else {
if limit > 0 {
n := max(len(links)-limit, 0)
span.DroppedLinks = uint32(n)
links = links[n:]
}
span.Links = convLinks(links)
}
if t := cfg.Timestamp(); !t.IsZero() {
span.StartTime = cfg.Timestamp()
} else {
span.StartTime = time.Now()
}
return &telemetry.Traces{
ResourceSpans: []*telemetry.ResourceSpans{
{
ScopeSpans: []*telemetry.ScopeSpans{
{
Scope: &telemetry.Scope{
Name: t.name,
Version: t.version,
},
Spans: []*telemetry.Span{span},
SchemaURL: t.schemaURL,
},
},
},
},
}, span
}
func spanKind(kind trace.SpanKind) telemetry.SpanKind {
switch kind {
case trace.SpanKindInternal:
return telemetry.SpanKindInternal
case trace.SpanKindServer:
return telemetry.SpanKindServer
case trace.SpanKindClient:
return telemetry.SpanKindClient
case trace.SpanKindProducer:
return telemetry.SpanKindProducer
case trace.SpanKindConsumer:
return telemetry.SpanKindConsumer
}
return telemetry.SpanKind(0) // undefined.
}

33
vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sdk
import (
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/noop"
)
// TracerProvider returns an auto-instrumentable [trace.TracerProvider].
//
// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument
// the process using the returned TracerProvider, all of the telemetry it
// produces will be processed and handled by that Instrumentation. By default,
// if no Instrumentation instruments the TracerProvider it will not generate
// any trace telemetry.
func TracerProvider() trace.TracerProvider { return tracerProviderInstance }
var tracerProviderInstance = new(tracerProvider)
type tracerProvider struct{ noop.TracerProvider }
var _ trace.TracerProvider = tracerProvider{}
func (p tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
cfg := trace.NewTracerConfig(opts...)
return tracer{
name: name,
version: cfg.InstrumentationVersion(),
schemaURL: cfg.SchemaURL(),
}
}

View File

@ -18,7 +18,7 @@ var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)}
// Get is a convenient replacement for http.Get that adds a span around the request.
func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) {
req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil)
if err != nil {
return nil, err
}
@ -27,7 +27,7 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error)
// Head is a convenient replacement for http.Head that adds a span around the request.
func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) {
req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil)
req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil)
if err != nil {
return nil, err
}
@ -36,7 +36,7 @@ func Head(ctx context.Context, targetURL string) (resp *http.Response, err error
// Post is a convenient replacement for http.Post that adds a span around the request.
func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) {
req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, targetURL, body)
if err != nil {
return nil, err
}

View File

@ -12,6 +12,7 @@ import (
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
)
@ -21,15 +22,16 @@ type middleware struct {
operation string
server string
tracer trace.Tracer
propagators propagation.TextMapPropagator
spanStartOptions []trace.SpanStartOption
readEvent bool
writeEvent bool
filters []Filter
spanNameFormatter func(string, *http.Request) string
publicEndpoint bool
publicEndpointFn func(*http.Request) bool
tracer trace.Tracer
propagators propagation.TextMapPropagator
spanStartOptions []trace.SpanStartOption
readEvent bool
writeEvent bool
filters []Filter
spanNameFormatter func(string, *http.Request) string
publicEndpoint bool
publicEndpointFn func(*http.Request) bool
metricAttributesFn func(*http.Request) []attribute.KeyValue
semconv semconv.HTTPServer
}
@ -79,6 +81,7 @@ func (h *middleware) configure(c *config) {
h.publicEndpointFn = c.PublicEndpointFn
h.server = c.ServerName
h.semconv = semconv.NewHTTPServer(c.Meter)
h.metricAttributesFn = c.MetricAttributesFn
}
// serveHTTP sets up tracing and calls the given next http.Handler with the span
@ -117,6 +120,11 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
}
}
if startTime := StartTimeFromContext(ctx); !startTime.IsZero() {
opts = append(opts, trace.WithTimestamp(startTime))
requestStartTime = startTime
}
ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...)
defer span.End()
@ -184,14 +192,16 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
// Use floating point division here for higher precision (instead of Millisecond method).
elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
metricAttributes := semconv.MetricAttributes{
Req: r,
StatusCode: statusCode,
AdditionalAttributes: append(labeler.Get(), h.metricAttributesFromRequest(r)...),
}
h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{
ServerName: h.server,
ResponseSize: bytesWritten,
MetricAttributes: semconv.MetricAttributes{
Req: r,
StatusCode: statusCode,
AdditionalAttributes: labeler.Get(),
},
ServerName: h.server,
ResponseSize: bytesWritten,
MetricAttributes: metricAttributes,
MetricData: semconv.MetricData{
RequestSize: bw.BytesRead(),
ElapsedTime: elapsedTime,
@ -199,6 +209,14 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
})
}
func (h *middleware) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue {
var attributeForRequest []attribute.KeyValue
if h.metricAttributesFn != nil {
attributeForRequest = h.metricAttributesFn(r)
}
return attributeForRequest
}
// WithRouteTag annotates spans and metrics with the provided route name
// with HTTP route attribute.
func WithRouteTag(route string, h http.Handler) http.Handler {

View File

@ -1,3 +1,6 @@
// Code created by gotmpl. DO NOT MODIFY.
// source: internal/shared/request/body_wrapper.go.tmpl
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0

View File

@ -0,0 +1,10 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
// Generate request package:
//go:generate gotmpl --body=../../../../../../internal/shared/request/body_wrapper.go.tmpl "--data={}" --out=body_wrapper.go
//go:generate gotmpl --body=../../../../../../internal/shared/request/body_wrapper_test.go.tmpl "--data={}" --out=body_wrapper_test.go
//go:generate gotmpl --body=../../../../../../internal/shared/request/resp_writer_wrapper.go.tmpl "--data={}" --out=resp_writer_wrapper.go
//go:generate gotmpl --body=../../../../../../internal/shared/request/resp_writer_wrapper_test.go.tmpl "--data={}" --out=resp_writer_wrapper_test.go

View File

@ -1,3 +1,6 @@
// Code created by gotmpl. DO NOT MODIFY.
// source: internal/shared/request/resp_writer_wrapper.go.tmpl
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0

View File

@ -1,3 +1,6 @@
// Code created by gotmpl. DO NOT MODIFY.
// source: internal/shared/semconv/env.go.tmpl
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
@ -9,12 +12,17 @@ import (
"net/http"
"os"
"strings"
"sync"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/metric"
)
// OTelSemConvStabilityOptIn is an environment variable.
// That can be set to "old" or "http/dup" to opt into the new HTTP semantic conventions.
const OTelSemConvStabilityOptIn = "OTEL_SEMCONV_STABILITY_OPT_IN"
type ResponseTelemetry struct {
StatusCode int
ReadBytes int64
@ -30,6 +38,11 @@ type HTTPServer struct {
requestBytesCounter metric.Int64Counter
responseBytesCounter metric.Int64Counter
serverLatencyMeasure metric.Float64Histogram
// New metrics
requestBodySizeHistogram metric.Int64Histogram
responseBodySizeHistogram metric.Int64Histogram
requestDurationHistogram metric.Float64Histogram
}
// RequestTraceAttrs returns trace attributes for an HTTP request received by a
@ -50,9 +63,18 @@ type HTTPServer struct {
// The req Host will be used to determine the server instead.
func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue {
if s.duplicate {
return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...)
return append(OldHTTPServer{}.RequestTraceAttrs(server, req), CurrentHTTPServer{}.RequestTraceAttrs(server, req)...)
}
return OldHTTPServer{}.RequestTraceAttrs(server, req)
}
func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue {
if s.duplicate {
return append([]attribute.KeyValue{OldHTTPServer{}.NetworkTransportAttr(network)}, CurrentHTTPServer{}.NetworkTransportAttr(network))
}
return []attribute.KeyValue{
OldHTTPServer{}.NetworkTransportAttr(network),
}
return oldHTTPServer{}.RequestTraceAttrs(server, req)
}
// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response.
@ -60,14 +82,14 @@ func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attrib
// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue {
if s.duplicate {
return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...)
return append(OldHTTPServer{}.ResponseTraceAttrs(resp), CurrentHTTPServer{}.ResponseTraceAttrs(resp)...)
}
return oldHTTPServer{}.ResponseTraceAttrs(resp)
return OldHTTPServer{}.ResponseTraceAttrs(resp)
}
// Route returns the attribute for the route.
func (s HTTPServer) Route(route string) attribute.KeyValue {
return oldHTTPServer{}.Route(route)
return OldHTTPServer{}.Route(route)
}
// Status returns a span status code and message for an HTTP status code
@ -102,29 +124,56 @@ type MetricData struct {
ElapsedTime float64
}
func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) {
if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil {
// This will happen if an HTTPServer{} is used insted of NewHTTPServer.
return
var (
metricAddOptionPool = &sync.Pool{
New: func() interface{} {
return &[]metric.AddOption{}
},
}
attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
o := metric.WithAttributeSet(attribute.NewSet(attributes...))
addOpts := []metric.AddOption{o}
s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...)
s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...)
s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o)
metricRecordOptionPool = &sync.Pool{
New: func() interface{} {
return &[]metric.RecordOption{}
},
}
)
// TODO: Duplicate Metrics
func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) {
if s.requestBytesCounter != nil && s.responseBytesCounter != nil && s.serverLatencyMeasure != nil {
attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
o := metric.WithAttributeSet(attribute.NewSet(attributes...))
addOpts := metricAddOptionPool.Get().(*[]metric.AddOption)
*addOpts = append(*addOpts, o)
s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...)
s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...)
s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o)
*addOpts = (*addOpts)[:0]
metricAddOptionPool.Put(addOpts)
}
if s.duplicate && s.requestDurationHistogram != nil && s.requestBodySizeHistogram != nil && s.responseBodySizeHistogram != nil {
attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
o := metric.WithAttributeSet(attribute.NewSet(attributes...))
recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption)
*recordOpts = append(*recordOpts, o)
s.requestBodySizeHistogram.Record(ctx, md.RequestSize, *recordOpts...)
s.responseBodySizeHistogram.Record(ctx, md.ResponseSize, *recordOpts...)
s.requestDurationHistogram.Record(ctx, md.ElapsedTime, o)
*recordOpts = (*recordOpts)[:0]
metricRecordOptionPool.Put(recordOpts)
}
}
func NewHTTPServer(meter metric.Meter) HTTPServer {
env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN"))
env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn))
duplicate := env == "http/dup"
server := HTTPServer{
duplicate: duplicate,
}
server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter)
server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter)
if duplicate {
server.requestBodySizeHistogram, server.responseBodySizeHistogram, server.requestDurationHistogram = CurrentHTTPServer{}.createMeasures(meter)
}
return server
}
@ -135,32 +184,41 @@ type HTTPClient struct {
requestBytesCounter metric.Int64Counter
responseBytesCounter metric.Int64Counter
latencyMeasure metric.Float64Histogram
// new metrics
requestBodySize metric.Int64Histogram
requestDuration metric.Float64Histogram
}
func NewHTTPClient(meter metric.Meter) HTTPClient {
env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN"))
env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn))
duplicate := env == "http/dup"
client := HTTPClient{
duplicate: env == "http/dup",
duplicate: duplicate,
}
client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = oldHTTPClient{}.createMeasures(meter)
client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter)
if duplicate {
client.requestBodySize, client.requestDuration = CurrentHTTPClient{}.createMeasures(meter)
}
return client
}
// RequestTraceAttrs returns attributes for an HTTP request made by a client.
func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
if c.duplicate {
return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...)
return append(OldHTTPClient{}.RequestTraceAttrs(req), CurrentHTTPClient{}.RequestTraceAttrs(req)...)
}
return oldHTTPClient{}.RequestTraceAttrs(req)
return OldHTTPClient{}.RequestTraceAttrs(req)
}
// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client.
func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
if c.duplicate {
return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...)
return append(OldHTTPClient{}.ResponseTraceAttrs(resp), CurrentHTTPClient{}.ResponseTraceAttrs(resp)...)
}
return oldHTTPClient{}.ResponseTraceAttrs(resp)
return OldHTTPClient{}.ResponseTraceAttrs(resp)
}
func (c HTTPClient) Status(code int) (codes.Code, string) {
@ -175,7 +233,7 @@ func (c HTTPClient) Status(code int) (codes.Code, string) {
func (c HTTPClient) ErrorType(err error) attribute.KeyValue {
if c.duplicate {
return newHTTPClient{}.ErrorType(err)
return CurrentHTTPClient{}.ErrorType(err)
}
return attribute.KeyValue{}
@ -194,34 +252,56 @@ func (o MetricOpts) AddOptions() metric.AddOption {
return o.addOptions
}
func (c HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts {
attributes := oldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes)
// TODO: Duplicate Metrics
func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts {
opts := map[string]MetricOpts{}
attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes)
set := metric.WithAttributeSet(attribute.NewSet(attributes...))
return MetricOpts{
opts["old"] = MetricOpts{
measurement: set,
addOptions: set,
}
if c.duplicate {
attributes := CurrentHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes)
set := metric.WithAttributeSet(attribute.NewSet(attributes...))
opts["new"] = MetricOpts{
measurement: set,
addOptions: set,
}
}
return opts
}
func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts MetricOpts) {
func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) {
if s.requestBytesCounter == nil || s.latencyMeasure == nil {
// This will happen if an HTTPClient{} is used insted of NewHTTPClient().
// This will happen if an HTTPClient{} is used instead of NewHTTPClient().
return
}
s.requestBytesCounter.Add(ctx, md.RequestSize, opts.AddOptions())
s.latencyMeasure.Record(ctx, md.ElapsedTime, opts.MeasurementOption())
s.requestBytesCounter.Add(ctx, md.RequestSize, opts["old"].AddOptions())
s.latencyMeasure.Record(ctx, md.ElapsedTime, opts["old"].MeasurementOption())
// TODO: Duplicate Metrics
if s.duplicate {
s.requestBodySize.Record(ctx, md.RequestSize, opts["new"].MeasurementOption())
s.requestDuration.Record(ctx, md.ElapsedTime, opts["new"].MeasurementOption())
}
}
func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts metric.AddOption) {
func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts map[string]MetricOpts) {
if s.responseBytesCounter == nil {
// This will happen if an HTTPClient{} is used insted of NewHTTPClient().
// This will happen if an HTTPClient{} is used instead of NewHTTPClient().
return
}
s.responseBytesCounter.Add(ctx, responseData, opts)
// TODO: Duplicate Metrics
s.responseBytesCounter.Add(ctx, responseData, opts["old"].AddOptions())
}
func (s HTTPClient) TraceAttributes(host string) []attribute.KeyValue {
if s.duplicate {
return append(OldHTTPClient{}.TraceAttributes(host), CurrentHTTPClient{}.TraceAttributes(host)...)
}
return OldHTTPClient{}.TraceAttributes(host)
}

View File

@ -0,0 +1,14 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
// Generate semconv package:
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/bench_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=bench_test.go
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env.go
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env_test.go
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv.go
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv_test.go
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util.go
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util_test.go
//go:generate gotmpl --body=../../../../../../internal/shared/semconv/v1.20.0.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=v1.20.0.go

View File

@ -1,3 +1,6 @@
// Code created by gotmpl. DO NOT MODIFY.
// source: internal/shared/semconv/httpconv.go.tmpl
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
@ -7,14 +10,17 @@ import (
"fmt"
"net/http"
"reflect"
"slices"
"strconv"
"strings"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/noop"
semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
)
type newHTTPServer struct{}
type CurrentHTTPServer struct{}
// TraceRequest returns trace attributes for an HTTP request received by a
// server.
@ -32,18 +38,18 @@ type newHTTPServer struct{}
//
// If the primary server name is not known, server should be an empty string.
// The req Host will be used to determine the server instead.
func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue {
func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue {
count := 3 // ServerAddress, Method, Scheme
var host string
var p int
if server == "" {
host, p = splitHostPort(req.Host)
host, p = SplitHostPort(req.Host)
} else {
// Prioritize the primary server name.
host, p = splitHostPort(server)
host, p = SplitHostPort(server)
if p < 0 {
_, p = splitHostPort(req.Host)
_, p = SplitHostPort(req.Host)
}
}
@ -59,7 +65,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att
scheme := n.scheme(req.TLS != nil)
if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" {
if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" {
// The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
// file-path that would be interpreted with a sock family.
count++
@ -104,7 +110,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att
attrs = append(attrs, methodOriginal)
}
if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" {
if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" {
// The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
// file-path that would be interpreted with a sock family.
attrs = append(attrs, semconvNew.NetworkPeerAddress(peer))
@ -135,7 +141,20 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att
return attrs
}
func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) {
func (o CurrentHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue {
switch network {
case "tcp", "tcp4", "tcp6":
return semconvNew.NetworkTransportTCP
case "udp", "udp4", "udp6":
return semconvNew.NetworkTransportUDP
case "unix", "unixgram", "unixpacket":
return semconvNew.NetworkTransportUnix
default:
return semconvNew.NetworkTransportPipe
}
}
func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) {
if method == "" {
return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{}
}
@ -150,7 +169,7 @@ func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyV
return semconvNew.HTTPRequestMethodGet, orig
}
func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
if https {
return semconvNew.URLScheme("https")
}
@ -160,7 +179,7 @@ func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
// TraceResponse returns trace attributes for telemetry from an HTTP response.
//
// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue {
func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue {
var count int
if resp.ReadBytes > 0 {
@ -195,14 +214,94 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke
}
// Route returns the attribute for the route.
func (n newHTTPServer) Route(route string) attribute.KeyValue {
func (n CurrentHTTPServer) Route(route string) attribute.KeyValue {
return semconvNew.HTTPRoute(route)
}
type newHTTPClient struct{}
func (n CurrentHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Int64Histogram, metric.Float64Histogram) {
if meter == nil {
return noop.Int64Histogram{}, noop.Int64Histogram{}, noop.Float64Histogram{}
}
var err error
requestBodySizeHistogram, err := meter.Int64Histogram(
semconvNew.HTTPServerRequestBodySizeName,
metric.WithUnit(semconvNew.HTTPServerRequestBodySizeUnit),
metric.WithDescription(semconvNew.HTTPServerRequestBodySizeDescription),
)
handleErr(err)
responseBodySizeHistogram, err := meter.Int64Histogram(
semconvNew.HTTPServerResponseBodySizeName,
metric.WithUnit(semconvNew.HTTPServerResponseBodySizeUnit),
metric.WithDescription(semconvNew.HTTPServerResponseBodySizeDescription),
)
handleErr(err)
requestDurationHistogram, err := meter.Float64Histogram(
semconvNew.HTTPServerRequestDurationName,
metric.WithUnit(semconvNew.HTTPServerRequestDurationUnit),
metric.WithDescription(semconvNew.HTTPServerRequestDurationDescription),
)
handleErr(err)
return requestBodySizeHistogram, responseBodySizeHistogram, requestDurationHistogram
}
func (n CurrentHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
num := len(additionalAttributes) + 3
var host string
var p int
if server == "" {
host, p = SplitHostPort(req.Host)
} else {
// Prioritize the primary server name.
host, p = SplitHostPort(server)
if p < 0 {
_, p = SplitHostPort(req.Host)
}
}
hostPort := requiredHTTPPort(req.TLS != nil, p)
if hostPort > 0 {
num++
}
protoName, protoVersion := netProtocol(req.Proto)
if protoName != "" {
num++
}
if protoVersion != "" {
num++
}
if statusCode > 0 {
num++
}
attributes := slices.Grow(additionalAttributes, num)
attributes = append(attributes,
semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)),
n.scheme(req.TLS != nil),
semconvNew.ServerAddress(host))
if hostPort > 0 {
attributes = append(attributes, semconvNew.ServerPort(hostPort))
}
if protoName != "" {
attributes = append(attributes, semconvNew.NetworkProtocolName(protoName))
}
if protoVersion != "" {
attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion))
}
if statusCode > 0 {
attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode))
}
return attributes
}
type CurrentHTTPClient struct{}
// RequestTraceAttrs returns trace attributes for an HTTP request made by a client.
func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
/*
below attributes are returned:
- http.request.method
@ -222,7 +321,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue
var requestHost string
var requestPort int
for _, hostport := range []string{urlHost, req.Header.Get("Host")} {
requestHost, requestPort = splitHostPort(hostport)
requestHost, requestPort = SplitHostPort(hostport)
if requestHost != "" || requestPort > 0 {
break
}
@ -284,7 +383,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue
}
// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client.
func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
/*
below attributes are returned:
- http.response.status_code
@ -311,7 +410,7 @@ func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyVa
return attrs
}
func (n newHTTPClient) ErrorType(err error) attribute.KeyValue {
func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue {
t := reflect.TypeOf(err)
var value string
if t.PkgPath() == "" && t.Name() == "" {
@ -328,7 +427,7 @@ func (n newHTTPClient) ErrorType(err error) attribute.KeyValue {
return semconvNew.ErrorTypeKey.String(value)
}
func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) {
func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) {
if method == "" {
return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{}
}
@ -343,6 +442,98 @@ func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyV
return semconvNew.HTTPRequestMethodGet, orig
}
func (n CurrentHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Float64Histogram) {
if meter == nil {
return noop.Int64Histogram{}, noop.Float64Histogram{}
}
var err error
requestBodySize, err := meter.Int64Histogram(
semconvNew.HTTPClientRequestBodySizeName,
metric.WithUnit(semconvNew.HTTPClientRequestBodySizeUnit),
metric.WithDescription(semconvNew.HTTPClientRequestBodySizeDescription),
)
handleErr(err)
requestDuration, err := meter.Float64Histogram(
semconvNew.HTTPClientRequestDurationName,
metric.WithUnit(semconvNew.HTTPClientRequestDurationUnit),
metric.WithDescription(semconvNew.HTTPClientRequestDurationDescription),
)
handleErr(err)
return requestBodySize, requestDuration
}
func (n CurrentHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
num := len(additionalAttributes) + 2
var h string
if req.URL != nil {
h = req.URL.Host
}
var requestHost string
var requestPort int
for _, hostport := range []string{h, req.Header.Get("Host")} {
requestHost, requestPort = SplitHostPort(hostport)
if requestHost != "" || requestPort > 0 {
break
}
}
port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
if port > 0 {
num++
}
protoName, protoVersion := netProtocol(req.Proto)
if protoName != "" {
num++
}
if protoVersion != "" {
num++
}
if statusCode > 0 {
num++
}
attributes := slices.Grow(additionalAttributes, num)
attributes = append(attributes,
semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)),
semconvNew.ServerAddress(requestHost),
n.scheme(req.TLS != nil),
)
if port > 0 {
attributes = append(attributes, semconvNew.ServerPort(port))
}
if protoName != "" {
attributes = append(attributes, semconvNew.NetworkProtocolName(protoName))
}
if protoVersion != "" {
attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion))
}
if statusCode > 0 {
attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode))
}
return attributes
}
// Attributes for httptrace.
func (n CurrentHTTPClient) TraceAttributes(host string) []attribute.KeyValue {
return []attribute.KeyValue{
semconvNew.ServerAddress(host),
}
}
func (n CurrentHTTPClient) scheme(https bool) attribute.KeyValue { // nolint:revive
if https {
return semconvNew.URLScheme("https")
}
return semconvNew.URLScheme("http")
}
func isErrorStatusCode(code int) bool {
return code >= 400 || code < 100
}

View File

@ -1,3 +1,6 @@
// Code created by gotmpl. DO NOT MODIFY.
// source: internal/shared/semconv/util.go.tmpl
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
@ -14,14 +17,14 @@ import (
semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
)
// splitHostPort splits a network address hostport of the form "host",
// SplitHostPort splits a network address hostport of the form "host",
// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port",
// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and
// port.
//
// An empty host is returned if it is not provided or unparsable. A negative
// port is returned if it is not provided or unparsable.
func splitHostPort(hostport string) (host string, port int) {
func SplitHostPort(hostport string) (host string, port int) {
port = -1
if strings.HasPrefix(hostport, "[") {
@ -75,7 +78,16 @@ func serverClientIP(xForwardedFor string) string {
func netProtocol(proto string) (name string, version string) {
name, version, _ = strings.Cut(proto, "/")
name = strings.ToLower(name)
switch name {
case "HTTP":
name = "http"
case "QUIC":
name = "quic"
case "SPDY":
name = "spdy"
default:
name = strings.ToLower(name)
}
return name, version
}
@ -96,3 +108,13 @@ func handleErr(err error) {
otel.Handle(err)
}
}
func standardizeHTTPMethod(method string) string {
method = strings.ToUpper(method)
switch method {
case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
default:
method = "_OTHER"
}
return method
}

View File

@ -1,3 +1,6 @@
// Code created by gotmpl. DO NOT MODIFY.
// source: internal/shared/semconv/v120.0.go.tmpl
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
@ -8,7 +11,6 @@ import (
"io"
"net/http"
"slices"
"strings"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
"go.opentelemetry.io/otel/attribute"
@ -17,7 +19,7 @@ import (
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
)
type oldHTTPServer struct{}
type OldHTTPServer struct{}
// RequestTraceAttrs returns trace attributes for an HTTP request received by a
// server.
@ -35,14 +37,18 @@ type oldHTTPServer struct{}
//
// If the primary server name is not known, server should be an empty string.
// The req Host will be used to determine the server instead.
func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue {
func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue {
return semconvutil.HTTPServerRequest(server, req)
}
func (o OldHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue {
return semconvutil.NetTransport(network)
}
// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response.
//
// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue {
func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue {
attributes := []attribute.KeyValue{}
if resp.ReadBytes > 0 {
@ -67,7 +73,7 @@ func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke
}
// Route returns the attribute for the route.
func (o oldHTTPServer) Route(route string) attribute.KeyValue {
func (o OldHTTPServer) Route(route string) attribute.KeyValue {
return semconv.HTTPRoute(route)
}
@ -84,7 +90,7 @@ const (
serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
)
func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
if meter == nil {
return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
}
@ -113,17 +119,17 @@ func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter,
return requestBytesCounter, responseBytesCounter, serverLatencyMeasure
}
func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
n := len(additionalAttributes) + 3
var host string
var p int
if server == "" {
host, p = splitHostPort(req.Host)
host, p = SplitHostPort(req.Host)
} else {
// Prioritize the primary server name.
host, p = splitHostPort(server)
host, p = SplitHostPort(server)
if p < 0 {
_, p = splitHostPort(req.Host)
_, p = SplitHostPort(req.Host)
}
}
hostPort := requiredHTTPPort(req.TLS != nil, p)
@ -144,7 +150,7 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status
attributes := slices.Grow(additionalAttributes, n)
attributes = append(attributes,
standardizeHTTPMethodMetric(req.Method),
semconv.HTTPMethod(standardizeHTTPMethod(req.Method)),
o.scheme(req.TLS != nil),
semconv.NetHostName(host))
@ -164,24 +170,24 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status
return attributes
}
func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
if https {
return semconv.HTTPSchemeHTTPS
}
return semconv.HTTPSchemeHTTP
}
type oldHTTPClient struct{}
type OldHTTPClient struct{}
func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
func (o OldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
return semconvutil.HTTPClientRequest(req)
}
func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
return semconvutil.HTTPClientResponse(resp)
}
func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
/* The following semantic conventions are returned if present:
http.method string
http.status_code int
@ -197,7 +203,7 @@ func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, addit
var requestHost string
var requestPort int
for _, hostport := range []string{h, req.Header.Get("Host")} {
requestHost, requestPort = splitHostPort(hostport)
requestHost, requestPort = SplitHostPort(hostport)
if requestHost != "" || requestPort > 0 {
break
}
@ -214,7 +220,7 @@ func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, addit
attributes := slices.Grow(additionalAttributes, n)
attributes = append(attributes,
standardizeHTTPMethodMetric(req.Method),
semconv.HTTPMethod(standardizeHTTPMethod(req.Method)),
semconv.NetPeerName(requestHost),
)
@ -235,7 +241,7 @@ const (
clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds
)
func (o oldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
if meter == nil {
return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
}
@ -263,12 +269,9 @@ func (o oldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter,
return requestBytesCounter, responseBytesCounter, latencyMeasure
}
func standardizeHTTPMethodMetric(method string) attribute.KeyValue {
method = strings.ToUpper(method)
switch method {
case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
default:
method = "_OTHER"
// Attributes for httptrace.
func (c OldHTTPClient) TraceAttributes(host string) []attribute.KeyValue {
return []attribute.KeyValue{
semconv.NetHostName(host),
}
return semconv.HTTPMethod(method)
}

View File

@ -200,6 +200,15 @@ func splitHostPort(hostport string) (host string, port int) {
func netProtocol(proto string) (name string, version string) {
name, version, _ = strings.Cut(proto, "/")
name = strings.ToLower(name)
switch name {
case "HTTP":
name = "http"
case "QUIC":
name = "quic"
case "SPDY":
name = "spdy"
default:
name = strings.ToLower(name)
}
return name, version
}

View File

@ -0,0 +1,29 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
import (
"context"
"time"
)
type startTimeContextKeyType int
const startTimeContextKey startTimeContextKeyType = 0
// ContextWithStartTime returns a new context with the provided start time. The
// start time will be used for metrics and traces emitted by the
// instrumentation. Only one labeller can be injected into the context.
// Injecting it multiple times will override the previous calls.
func ContextWithStartTime(parent context.Context, start time.Time) context.Context {
return context.WithValue(parent, startTimeContextKey, start)
}
// StartTimeFromContext retrieves a time.Time from the provided context if one
// is available. If no start time was found in the provided context, a new,
// zero start time is returned and the second return value is false.
func StartTimeFromContext(ctx context.Context) time.Time {
t, _ := ctx.Value(startTimeContextKey).(time.Time)
return t
}

View File

@ -153,7 +153,7 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
// For handling response bytes we leverage a callback when the client reads the http response
readRecordFunc := func(n int64) {
t.semconv.RecordResponseSize(ctx, n, metricOpts.AddOptions())
t.semconv.RecordResponseSize(ctx, n, metricOpts)
}
// traces

View File

@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http
// Version is the current release version of the otelhttp instrumentation.
func Version() string {
return "0.56.0"
return "0.60.0"
// This string is updated by the pre_release.sh script during release
}

View File

@ -1,6 +1,7 @@
.DS_Store
Thumbs.db
.cache/
.tools/
venv/
.idea/
@ -12,11 +13,3 @@ go.work
go.work.sum
gen/
/example/dice/dice
/example/namedtracer/namedtracer
/example/otel-collector/otel-collector
/example/opencensus/opencensus
/example/passthrough/passthrough
/example/prometheus/prometheus
/example/zipkin/zipkin

View File

@ -22,14 +22,16 @@ linters:
- govet
- ineffassign
- misspell
- perfsprint
- revive
- staticcheck
- tenv
- testifylint
- typecheck
- unconvert
- unused
- unparam
- usestdlibvars
- usetesting
issues:
# Maximum issues count per one linter.
@ -61,10 +63,11 @@ issues:
text: "calls to (.+) only in main[(][)] or init[(][)] functions"
linters:
- revive
# It's okay to not run gosec in a test.
# It's okay to not run gosec and perfsprint in a test.
- path: _test\.go
linters:
- gosec
- perfsprint
# Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
# as we commonly use it in tests and examples.
- text: "G404:"
@ -95,6 +98,13 @@ linters-settings:
- pkg: "crypto/md5"
- pkg: "crypto/sha1"
- pkg: "crypto/**/pkix"
auto/sdk:
files:
- "!internal/global/trace.go"
- "~internal/global/trace_test.go"
deny:
- pkg: "go.opentelemetry.io/auto/sdk"
desc: Do not use SDK from automatic instrumentation.
otlp-internal:
files:
- "!**/exporters/otlp/internal/**/*.go"
@ -127,8 +137,6 @@ linters-settings:
- "**/metric/**/*.go"
- "**/bridge/*.go"
- "**/bridge/**/*.go"
- "**/example/*.go"
- "**/example/**/*.go"
- "**/trace/*.go"
- "**/trace/**/*.go"
- "**/log/*.go"
@ -156,137 +164,71 @@ linters-settings:
locale: US
ignore-words:
- cancelled
perfsprint:
err-error: true
errorf: true
int-conversion: true
sprintf1: true
strconcat: true
revive:
# Sets the default failure confidence.
# This means that linting errors with less than 0.8 confidence will be ignored.
# Default: 0.8
confidence: 0.01
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md
rules:
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports
- name: blank-imports
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr
- name: bool-literal-in-expr
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr
- name: constant-logical-expr
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument
# TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280
- name: context-as-argument
disabled: true
arguments:
allowTypesBefore: "*testing.T"
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type
- allowTypesBefore: "*testing.T"
- name: context-keys-type
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit
- name: deep-exit
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer
- name: defer
disabled: false
arguments:
- ["call-chain", "loop"]
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports
- name: dot-imports
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports
- name: duplicated-imports
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return
- name: early-return
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block
arguments:
- "preserveScope"
- name: empty-block
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines
- name: empty-lines
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming
- name: error-naming
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return
- name: error-return
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings
- name: error-strings
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf
- name: errorf
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported
- name: exported
disabled: false
arguments:
- "sayRepetitiveInsteadOfStutters"
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter
- name: flag-parameter
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches
- name: identical-branches
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return
- name: if-return
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement
- name: increment-decrement
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow
- name: indent-error-flow
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing
- name: import-shadowing
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments
- name: increment-decrement
- name: indent-error-flow
arguments:
- "preserveScope"
- name: package-comments
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range
- name: range
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure
- name: range-val-in-closure
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address
- name: range-val-address
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id
- name: redefines-builtin-id
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format
- name: string-format
disabled: false
arguments:
- - panic
- '/^[^\n]*$/'
- must not contain line breaks
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag
- name: struct-tag
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else
- name: superfluous-else
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal
- name: time-equal
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming
- name: var-naming
disabled: false
arguments:
- ["ID"] # AllowList
- ["Otel", "Aws", "Gcp"] # DenyList
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration
- name: var-declaration
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion
- "preserveScope"
- name: time-equal
- name: unconditional-recursion
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return
- name: unexported-return
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error
- name: unhandled-error
disabled: false
arguments:
- "fmt.Fprint"
- "fmt.Fprintf"
@ -294,15 +236,14 @@ linters-settings:
- "fmt.Print"
- "fmt.Printf"
- "fmt.Println"
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt
- name: unnecessary-stmt
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break
- name: useless-break
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value
- name: var-declaration
- name: var-naming
arguments:
- ["ID"] # AllowList
- ["Otel", "Aws", "Gcp"] # DenyList
- name: waitgroup-by-value
disabled: false
testifylint:
enable-all: true
disable:

View File

@ -11,6 +11,133 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
<!-- Released section -->
<!-- Don't change this section unless doing release -->
## [1.35.0/0.57.0/0.11.0] 2025-03-05
This release is the last to support [Go 1.22].
The next release will require at least [Go 1.23].
### Added
- Add `ValueFromAttribute` and `KeyValueFromAttribute` in `go.opentelemetry.io/otel/log`. (#6180)
- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/log`. (#6187)
- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/log/logtest`. (#6187)
- `AssertRecordEqual` in `go.opentelemetry.io/otel/log/logtest` checks `Record.EventName`. (#6187)
- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/sdk/log`. (#6193)
- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest`. (#6193)
- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6211)
- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6211)
- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` (#6210)
- The `go.opentelemetry.io/otel/semconv/v1.28.0` package.
The package contains semantic conventions from the `v1.28.0` version of the OpenTelemetry Semantic Conventions.
See the [migration documentation](./semconv/v1.28.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.27.0`(#6236)
- The `go.opentelemetry.io/otel/semconv/v1.30.0` package.
The package contains semantic conventions from the `v1.30.0` version of the OpenTelemetry Semantic Conventions.
See the [migration documentation](./semconv/v1.30.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.28.0`(#6240)
- Document the pitfalls of using `Resource` as a comparable type.
`Resource.Equal` and `Resource.Equivalent` should be used instead. (#6272)
- Support [Go 1.24]. (#6304)
- Add `FilterProcessor` and `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`.
It replaces `go.opentelemetry.io/otel/sdk/log/internal/x.FilterProcessor`.
Compared to previous version it additionally gives the possibility to filter by resource and instrumentation scope. (#6317)
### Changed
- Update `github.com/prometheus/common` to `v0.62.0`, which changes the `NameValidationScheme` to `NoEscaping`.
This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores.
This is controlled by the `Content-Type` header, or can be reverted by setting `NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6198)
### Fixes
- Eliminate goroutine leak for the processor returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `Shutdown` is called and the passed `ctx` is canceled and `SpanExporter.Shutdown` has not returned. (#6368)
- Eliminate goroutine leak for the processor returned by `NewBatchSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `ForceFlush` is called and the passed `ctx` is canceled and `SpanExporter.Export` has not returned. (#6369)
## [1.34.0/0.56.0/0.10.0] 2025-01-17
### Changed
- Remove the notices from `Logger` to make the whole Logs API user-facing in `go.opentelemetry.io/otel/log`. (#6167)
### Fixed
- Relax minimum Go version to 1.22.0 in various modules. (#6073)
- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` client is corrected from `otlphttpgrpc` to `otlptracegrpc`. (#6143)
- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlphttpgrpc` client is corrected from `otlphttphttp` to `otlptracehttp`. (#6143)
## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12
### Added
- Add `Reset` method to `SpanRecorder` in `go.opentelemetry.io/otel/sdk/trace/tracetest`. (#5994)
- Add `EnabledInstrument` interface in `go.opentelemetry.io/otel/sdk/metric/internal/x`.
This is an experimental interface that is implemented by synchronous instruments provided by `go.opentelemetry.io/otel/sdk/metric`.
Users can use it to avoid performing computationally expensive operations when recording measurements.
It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#6016)
### Changed
- The default global API now supports full auto-instrumentation from the `go.opentelemetry.io/auto` package.
See that package for more information. (#5920)
- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5929)
- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5929)
- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5929)
- Performance improvements for attribute value `AsStringSlice`, `AsFloat64Slice`, `AsInt64Slice`, `AsBoolSlice`. (#6011)
- Change `EnabledParameters` to have a `Severity` field instead of a getter and setter in `go.opentelemetry.io/otel/log`. (#6009)
### Fixed
- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5954)
- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5954)
- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5954)
- Fix invalid exemplar keys in `go.opentelemetry.io/otel/exporters/prometheus`. (#5995)
- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997)
- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032)
## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08
### Added
- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850)
- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850)
- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861)
- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861)
- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861)
- The `go.opentelemetry.io/otel/semconv/v1.27.0` package.
The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894)
- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903)
- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927)
- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934)
- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934)
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935)
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935)
- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933)
- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933)
- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932)
### Changed
- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924)
- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926)
- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925)
- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931)
- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804)
### Fixed
- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881)
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892)
- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911)
- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915)
- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912)
- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944)
- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944)
- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944)
- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944)
- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900)
### Removed
- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930)
## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11
### Added
@ -3110,7 +3237,11 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...HEAD
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.35.0...HEAD
[1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0
[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0
[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0
[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0
[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0
[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0
[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0
@ -3198,6 +3329,7 @@ It contains api and sdk for trace and meter.
<!-- Released section ended -->
[Go 1.24]: https://go.dev/doc/go1.24
[Go 1.23]: https://go.dev/doc/go1.23
[Go 1.22]: https://go.dev/doc/go1.22
[Go 1.21]: https://go.dev/doc/go1.21

View File

@ -181,6 +181,18 @@ patterns in the spec.
For a deeper discussion, see
[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165).
## Tests
Each functionality should be covered by tests.
Performance-critical functionality should also be covered by benchmarks.
- Pull requests adding a performance-critical functionality
should have `go test -bench` output in their description.
- Pull requests changing a performance-critical functionality
should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat)
output in their description.
## Documentation
Each (non-internal, non-test) package must be documented using
@ -629,6 +641,10 @@ should be canceled.
## Approvers and Maintainers
### Triagers
- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
### Approvers
### Maintainers
@ -641,13 +657,13 @@ should be canceled.
### Emeritus
- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
- [Chester Cheung](https://github.com/hanyuancheung), Tencent
- [Evan Torrie](https://github.com/evantorrie), Yahoo
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
- [Josh MacDonald](https://github.com/jmacd), LightStep
- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
- [Aaron Clawson](https://github.com/MadVikingGod)
- [Anthony Mirabella](https://github.com/Aneurysm9)
- [Chester Cheung](https://github.com/hanyuancheung)
- [Evan Torrie](https://github.com/evantorrie)
- [Gustavo Silva Paiva](https://github.com/paivagustavo)
- [Josh MacDonald](https://github.com/jmacd)
- [Liz Fong-Jones](https://github.com/lizthegrey)
### Become an Approver or a Maintainer

View File

@ -11,11 +11,15 @@ ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {}
GO = go
TIMEOUT = 60
# User to run as in docker images.
DOCKER_USER=$(shell id -u):$(shell id -g)
DEPENDENCIES_DOCKERFILE=./dependencies.Dockerfile
.DEFAULT_GOAL := precommit
.PHONY: precommit ci
precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default
ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage
precommit: generate toolchain-check license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default
ci: generate toolchain-check license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage
# Tools
@ -81,20 +85,20 @@ PIP := $(PYTOOLS)/pip
WORKDIR := /workdir
# The python image to use for the virtual environment.
PYTHONIMAGE := python:3.11.3-slim-bullseye
PYTHONIMAGE := $(shell awk '$$4=="python" {print $$2}' $(DEPENDENCIES_DOCKERFILE))
# Run the python image with the current directory mounted.
DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE)
DOCKERPY := docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE)
# Create a virtual environment for Python tools.
$(PYTOOLS):
# The `--upgrade` flag is needed to ensure that the virtual environment is
# created with the latest pip version.
@$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip"
@$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade --cache-dir=$(WORKDIR)/.cache/pip pip"
# Install python packages into the virtual environment.
$(PYTOOLS)/%: $(PYTOOLS)
@$(DOCKERPY) $(PIP) install -r requirements.txt
@$(DOCKERPY) $(PIP) install --cache-dir=$(WORKDIR)/.cache/pip -r requirements.txt
CODESPELL = $(PYTOOLS)/codespell
$(CODESPELL): PACKAGE=codespell
@ -119,7 +123,7 @@ vanity-import-fix: $(PORTO)
# Generate go.work file for local development.
.PHONY: go-work
go-work: $(CROSSLINK)
$(CROSSLINK) work --root=$(shell pwd)
$(CROSSLINK) work --root=$(shell pwd) --go=1.22.7
# Build
@ -235,6 +239,16 @@ govulncheck/%: $(GOVULNCHECK)
codespell: $(CODESPELL)
@$(DOCKERPY) $(CODESPELL)
.PHONY: toolchain-check
toolchain-check:
@toolchainRes=$$(for f in $(ALL_GO_MOD_DIRS); do \
awk '/^toolchain/ { found=1; next } END { if (found) print FILENAME }' $$f/go.mod; \
done); \
if [ -n "$${toolchainRes}" ]; then \
echo "toolchain checking failed:"; echo "$${toolchainRes}"; \
exit 1; \
fi
.PHONY: license-check
license-check:
@licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \
@ -255,13 +269,30 @@ check-clean-work-tree:
exit 1; \
fi
# The weaver docker image to use for semconv-generate.
WEAVER_IMAGE := $(shell awk '$$4=="weaver" {print $$2}' $(DEPENDENCIES_DOCKERFILE))
SEMCONVPKG ?= "semconv/"
.PHONY: semconv-generate
semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT)
semconv-generate: $(SEMCONVKIT)
[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 )
[ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 )
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)"
# Ensure the target directory for source code is available.
mkdir -p $(PWD)/$(SEMCONVPKG)/${TAG}
# Note: We mount a home directory for downloading/storing the semconv repository.
# Weaver will automatically clean the cache when finished, but the directories will remain.
mkdir -p ~/.weaver
docker run --rm \
-u $(DOCKER_USER) \
--env HOME=/tmp/weaver \
--mount 'type=bind,source=$(PWD)/semconv,target=/home/weaver/templates/registry/go,readonly' \
--mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \
--mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \
$(WEAVER_IMAGE) registry generate \
--registry=https://github.com/open-telemetry/semantic-conventions/archive/refs/tags/$(TAG).zip[model] \
--templates=/home/weaver/templates \
--param tag=$(TAG) \
go \
/home/weaver/target
$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
.PHONY: gorelease

View File

@ -1,9 +1,11 @@
# OpenTelemetry-Go
[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain)
[![ci](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml)
[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main)
[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel)
[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel)
[![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/open-telemetry/opentelemetry-go/badge)](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go)
[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9996/badge)](https://www.bestpractices.dev/projects/9996)
[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT)
OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/).
@ -49,18 +51,25 @@ Currently, this project supports the following environments.
| OS | Go Version | Architecture |
|----------|------------|--------------|
| Ubuntu | 1.24 | amd64 |
| Ubuntu | 1.23 | amd64 |
| Ubuntu | 1.22 | amd64 |
| Ubuntu | 1.24 | 386 |
| Ubuntu | 1.23 | 386 |
| Ubuntu | 1.22 | 386 |
| Linux | 1.23 | arm64 |
| Linux | 1.22 | arm64 |
| Ubuntu | 1.24 | arm64 |
| Ubuntu | 1.23 | arm64 |
| Ubuntu | 1.22 | arm64 |
| macOS 13 | 1.24 | amd64 |
| macOS 13 | 1.23 | amd64 |
| macOS 13 | 1.22 | amd64 |
| macOS | 1.24 | arm64 |
| macOS | 1.23 | arm64 |
| macOS | 1.22 | arm64 |
| Windows | 1.24 | amd64 |
| Windows | 1.23 | amd64 |
| Windows | 1.22 | amd64 |
| Windows | 1.24 | 386 |
| Windows | 1.23 | 386 |
| Windows | 1.22 | 386 |

View File

@ -5,17 +5,14 @@
New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated.
The `semconv-generate` make target is used for this.
1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag.
2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest`
3. Run the `make semconv-generate ...` target from this repository.
1. Set the `TAG` environment variable to the semantic convention tag you want to generate.
2. Run the `make semconv-generate ...` target from this repository.
For example,
```sh
export TAG="v1.21.0" # Change to the release version you are generating.
export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions"
docker pull otel/semconvgen:latest
make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO.
export TAG="v1.30.0" # Change to the release version you are generating.
make semconv-generate # Uses the exported TAG.
```
This should create a new sub-package of [`semconv`](./semconv).
@ -130,6 +127,6 @@ Importantly, bump any package versions referenced to be the latest one you just
Bump the dependencies in the following Go services:
- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice)
- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice)
- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice)
- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting)
- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout)
- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog)

View File

@ -26,7 +26,7 @@ is designed so the following goals can be achieved.
go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path
(e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the
paths used in `go get` commands (e.g., `go get
go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a
go.opentelemetry.io/otel/v2@v2.0.1`). Note there is both a `/v2` and a
`@v2.0.1` in that example. One way to think about it is that the module
name now includes the `/v2`, so include `/v2` whenever you are using the
module name).

View File

@ -355,7 +355,7 @@ func parseMember(member string) (Member, error) {
}
// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '<27>'.
func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string {
func replaceInvalidUTF8Sequences(c int, unescapeVal string) string {
if utf8.ValidString(unescapeVal) {
return unescapeVal
}
@ -363,7 +363,7 @@ func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string {
// https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69
var b strings.Builder
b.Grow(cap)
b.Grow(c)
for i := 0; i < len(unescapeVal); {
r, size := utf8.DecodeRuneInString(unescapeVal[i:])
if r == utf8.RuneError && size == 1 {

View File

@ -5,6 +5,7 @@ package codes // import "go.opentelemetry.io/otel/codes"
import (
"encoding/json"
"errors"
"fmt"
"strconv"
)
@ -63,7 +64,7 @@ func (c *Code) UnmarshalJSON(b []byte) error {
return nil
}
if c == nil {
return fmt.Errorf("nil receiver passed to UnmarshalJSON")
return errors.New("nil receiver passed to UnmarshalJSON")
}
var x interface{}

View File

@ -0,0 +1,3 @@
# This is a renovate-friendly source of Docker images.
FROM python:3.13.2-slim-bullseye@sha256:31b581c8218e1f3c58672481b3b7dba8e898852866b408c6a984c22832523935 AS python
FROM otel/weaver:v0.13.2@sha256:ae7346b992e477f629ea327e0979e8a416a97f7956ab1f7e95ac1f44edf1a893 AS weaver

View File

@ -155,7 +155,12 @@ func (c *client) exportContext(parent context.Context) (context.Context, context
}
if c.metadata.Len() > 0 {
ctx = metadata.NewOutgoingContext(ctx, c.metadata)
md := c.metadata
if outMD, ok := metadata.FromOutgoingContext(ctx); ok {
md = metadata.Join(md, outMD)
}
ctx = metadata.NewOutgoingContext(ctx, md)
}
return ctx, cancel

View File

@ -5,6 +5,7 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme
import (
"context"
"errors"
"fmt"
"sync"
@ -114,7 +115,7 @@ func (e *Exporter) Shutdown(ctx context.Context) error {
return err
}
var errShutdown = fmt.Errorf("gRPC exporter is shutdown")
var errShutdown = errors.New("gRPC exporter is shutdown")
type shutdownClient struct{}

View File

@ -111,7 +111,7 @@ func cleanPath(urlPath string, defaultPath string) string {
return defaultPath
}
if !path.IsAbs(tmp) {
tmp = fmt.Sprintf("/%s", tmp)
tmp = "/" + tmp
}
return tmp
}
@ -287,9 +287,7 @@ func WithEndpointURL(v string) GenericOption {
cfg.Metrics.Endpoint = u.Host
cfg.Metrics.URLPath = u.Path
if u.Scheme != "https" {
cfg.Metrics.Insecure = true
}
cfg.Metrics.Insecure = u.Scheme != "https"
return cfg
})

View File

@ -46,8 +46,9 @@ func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) {
out = append(out, &mpb.ScopeMetrics{
Scope: &cpb.InstrumentationScope{
Name: sm.Scope.Name,
Version: sm.Scope.Version,
Name: sm.Scope.Name,
Version: sm.Scope.Version,
Attributes: AttrIter(sm.Scope.Attributes.Iter()),
},
Metrics: ms,
SchemaUrl: sm.Scope.SchemaURL,
@ -83,13 +84,13 @@ func metric(m metricdata.Metrics) (*mpb.Metric, error) {
}
switch a := m.Data.(type) {
case metricdata.Gauge[int64]:
out.Data = Gauge[int64](a)
out.Data = Gauge(a)
case metricdata.Gauge[float64]:
out.Data = Gauge[float64](a)
out.Data = Gauge(a)
case metricdata.Sum[int64]:
out.Data, err = Sum[int64](a)
out.Data, err = Sum(a)
case metricdata.Sum[float64]:
out.Data, err = Sum[float64](a)
out.Data, err = Sum(a)
case metricdata.Histogram[int64]:
out.Data, err = Histogram(a)
case metricdata.Histogram[float64]:

View File

@ -5,5 +5,5 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme
// Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use.
func Version() string {
return "1.31.0"
return "1.35.0"
}

View File

@ -13,7 +13,8 @@ func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationSco
return nil
}
return &commonpb.InstrumentationScope{
Name: il.Name,
Version: il.Version,
Name: il.Name,
Version: il.Version,
Attributes: Iterator(il.Attributes.Iter()),
}
}

View File

@ -229,7 +229,12 @@ func (c *client) exportContext(parent context.Context) (context.Context, context
}
if c.metadata.Len() > 0 {
ctx = metadata.NewOutgoingContext(ctx, c.metadata)
md := c.metadata
if outMD, ok := metadata.FromOutgoingContext(ctx); ok {
md = metadata.Join(md, outMD)
}
ctx = metadata.NewOutgoingContext(ctx, md)
}
// Unify the client stopCtx with the parent.
@ -289,7 +294,7 @@ func (c *client) MarshalLog() interface{} {
Type string
Endpoint string
}{
Type: "otlphttpgrpc",
Type: "otlptracegrpc",
Endpoint: c.endpoint,
}
}

View File

@ -98,7 +98,7 @@ func cleanPath(urlPath string, defaultPath string) string {
return defaultPath
}
if !path.IsAbs(tmp) {
tmp = fmt.Sprintf("/%s", tmp)
tmp = "/" + tmp
}
return tmp
}
@ -278,9 +278,7 @@ func WithEndpointURL(v string) GenericOption {
cfg.Traces.Endpoint = u.Host
cfg.Traces.URLPath = u.Path
if u.Scheme != "https" {
cfg.Traces.Insecure = true
}
cfg.Traces.Insecure = u.Scheme != "https"
return cfg
})

View File

@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
// Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
func Version() string {
return "1.31.0"
return "1.35.0"
}

View File

@ -49,12 +49,11 @@ func AsBoolSlice(v interface{}) []bool {
if rv.Type().Kind() != reflect.Array {
return nil
}
var zero bool
correctLen := rv.Len()
correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
cpy := reflect.New(correctType)
_ = reflect.Copy(cpy.Elem(), rv)
return cpy.Elem().Slice(0, correctLen).Interface().([]bool)
cpy := make([]bool, rv.Len())
if len(cpy) > 0 {
_ = reflect.Copy(reflect.ValueOf(cpy), rv)
}
return cpy
}
// AsInt64Slice converts an int64 array into a slice into with same elements as array.
@ -63,12 +62,11 @@ func AsInt64Slice(v interface{}) []int64 {
if rv.Type().Kind() != reflect.Array {
return nil
}
var zero int64
correctLen := rv.Len()
correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
cpy := reflect.New(correctType)
_ = reflect.Copy(cpy.Elem(), rv)
return cpy.Elem().Slice(0, correctLen).Interface().([]int64)
cpy := make([]int64, rv.Len())
if len(cpy) > 0 {
_ = reflect.Copy(reflect.ValueOf(cpy), rv)
}
return cpy
}
// AsFloat64Slice converts a float64 array into a slice into with same elements as array.
@ -77,12 +75,11 @@ func AsFloat64Slice(v interface{}) []float64 {
if rv.Type().Kind() != reflect.Array {
return nil
}
var zero float64
correctLen := rv.Len()
correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
cpy := reflect.New(correctType)
_ = reflect.Copy(cpy.Elem(), rv)
return cpy.Elem().Slice(0, correctLen).Interface().([]float64)
cpy := make([]float64, rv.Len())
if len(cpy) > 0 {
_ = reflect.Copy(reflect.ValueOf(cpy), rv)
}
return cpy
}
// AsStringSlice converts a string array into a slice into with same elements as array.
@ -91,10 +88,9 @@ func AsStringSlice(v interface{}) []string {
if rv.Type().Kind() != reflect.Array {
return nil
}
var zero string
correctLen := rv.Len()
correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
cpy := reflect.New(correctType)
_ = reflect.Copy(cpy.Elem(), rv)
return cpy.Elem().Slice(0, correctLen).Interface().([]string)
cpy := make([]string, rv.Len())
if len(cpy) > 0 {
_ = reflect.Copy(reflect.ValueOf(cpy), rv)
}
return cpy
}

View File

@ -13,7 +13,7 @@ import (
// unwrapper unwraps to return the underlying instrument implementation.
type unwrapper interface {
Unwrap() metric.Observable
unwrap() metric.Observable
}
type afCounter struct {
@ -40,7 +40,7 @@ func (i *afCounter) setDelegate(m metric.Meter) {
i.delegate.Store(ctr)
}
func (i *afCounter) Unwrap() metric.Observable {
func (i *afCounter) unwrap() metric.Observable {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64ObservableCounter)
}
@ -71,7 +71,7 @@ func (i *afUpDownCounter) setDelegate(m metric.Meter) {
i.delegate.Store(ctr)
}
func (i *afUpDownCounter) Unwrap() metric.Observable {
func (i *afUpDownCounter) unwrap() metric.Observable {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64ObservableUpDownCounter)
}
@ -102,7 +102,7 @@ func (i *afGauge) setDelegate(m metric.Meter) {
i.delegate.Store(ctr)
}
func (i *afGauge) Unwrap() metric.Observable {
func (i *afGauge) unwrap() metric.Observable {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64ObservableGauge)
}
@ -133,7 +133,7 @@ func (i *aiCounter) setDelegate(m metric.Meter) {
i.delegate.Store(ctr)
}
func (i *aiCounter) Unwrap() metric.Observable {
func (i *aiCounter) unwrap() metric.Observable {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64ObservableCounter)
}
@ -164,7 +164,7 @@ func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
i.delegate.Store(ctr)
}
func (i *aiUpDownCounter) Unwrap() metric.Observable {
func (i *aiUpDownCounter) unwrap() metric.Observable {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64ObservableUpDownCounter)
}
@ -195,7 +195,7 @@ func (i *aiGauge) setDelegate(m metric.Meter) {
i.delegate.Store(ctr)
}
func (i *aiGauge) Unwrap() metric.Observable {
func (i *aiGauge) unwrap() metric.Observable {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64ObservableGauge)
}

View File

@ -5,6 +5,7 @@ package global // import "go.opentelemetry.io/otel/internal/global"
import (
"container/list"
"context"
"reflect"
"sync"
@ -66,6 +67,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me
name: name,
version: c.InstrumentationVersion(),
schema: c.SchemaURL(),
attrs: c.InstrumentationAttributes(),
}
if p.meters == nil {
@ -472,8 +474,7 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable)
defer m.mtx.Unlock()
if m.delegate != nil {
insts = unwrapInstruments(insts)
return m.delegate.RegisterCallback(f, insts...)
return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...)
}
reg := &registration{instruments: insts, function: f}
@ -487,15 +488,11 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable)
return reg, nil
}
type wrapped interface {
unwrap() metric.Observable
}
func unwrapInstruments(instruments []metric.Observable) []metric.Observable {
out := make([]metric.Observable, 0, len(instruments))
for _, inst := range instruments {
if in, ok := inst.(wrapped); ok {
if in, ok := inst.(unwrapper); ok {
out = append(out, in.unwrap())
} else {
out = append(out, inst)
@ -515,9 +512,61 @@ type registration struct {
unregMu sync.Mutex
}
func (c *registration) setDelegate(m metric.Meter) {
insts := unwrapInstruments(c.instruments)
type unwrapObs struct {
embedded.Observer
obs metric.Observer
}
// unwrapFloat64Observable returns an expected metric.Float64Observable after
// unwrapping the global object.
func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable {
if unwrapped, ok := inst.(unwrapper); ok {
if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok {
// Note: if the unwrapped object does not
// unwrap as an observable for either of the
// predicates here, it means an internal bug in
// this package. We avoid logging an error in
// this case, because the SDK has to try its
// own type conversion on the object. The SDK
// will see this and be forced to respond with
// its own error.
//
// This code uses a double-nested if statement
// to avoid creating a branch that is
// impossible to cover.
inst = floatObs
}
}
return inst
}
// unwrapInt64Observable returns an expected metric.Int64Observable after
// unwrapping the global object.
func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable {
if unwrapped, ok := inst.(unwrapper); ok {
if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok {
// See the comment in unwrapFloat64Observable().
inst = unint
}
}
return inst
}
func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) {
uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...)
}
func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) {
uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...)
}
func unwrapCallback(f metric.Callback) metric.Callback {
return func(ctx context.Context, obs metric.Observer) error {
return f(ctx, &unwrapObs{obs: obs})
}
}
func (c *registration) setDelegate(m metric.Meter) {
c.unregMu.Lock()
defer c.unregMu.Unlock()
@ -526,7 +575,7 @@ func (c *registration) setDelegate(m metric.Meter) {
return
}
reg, err := m.RegisterCallback(c.function, insts...)
reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...)
if err != nil {
GetErrorHandler().Handle(err)
return

View File

@ -25,6 +25,7 @@ import (
"sync"
"sync/atomic"
"go.opentelemetry.io/auto/sdk"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace"
@ -87,6 +88,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
name: name,
version: c.InstrumentationVersion(),
schema: c.SchemaURL(),
attrs: c.InstrumentationAttributes(),
}
if p.tracers == nil {
@ -102,7 +104,12 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
return t
}
type il struct{ name, version, schema string }
type il struct {
name string
version string
schema string
attrs attribute.Set
}
// tracer is a placeholder for a trace.Tracer.
//
@ -139,6 +146,30 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart
return delegate.(trace.Tracer).Start(ctx, name, opts...)
}
return t.newSpan(ctx, autoInstEnabled, name, opts)
}
// autoInstEnabled determines if the auto-instrumentation SDK span is returned
// from the tracer when not backed by a delegate and auto-instrumentation has
// attached to this process.
//
// The auto-instrumentation is expected to overwrite this value to true when it
// attaches. By default, this will point to false and mean a tracer will return
// a nonRecordingSpan by default.
var autoInstEnabled = new(bool)
func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) {
// autoInstEnabled is passed to newSpan via the autoSpan parameter. This is
// so the auto-instrumentation can define a uprobe for (*t).newSpan and be
// provided with the address of the bool autoInstEnabled points to. It
// needs to be a parameter so that pointer can be reliably determined, it
// should not be read from the global.
if *autoSpan {
tracer := sdk.TracerProvider().Tracer(t.name, t.opts...)
return tracer.Start(ctx, name, opts...)
}
s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t}
ctx = trace.ContextWithSpan(ctx, s)
return ctx, s

View File

@ -1,7 +1,7 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:recommended"
"config:best-practices"
],
"ignorePaths": [],
"labels": ["Skip Changelog", "dependencies"],
@ -15,10 +15,8 @@
"enabled": true
},
{
"matchFileNames": ["internal/tools/**"],
"matchManagers": ["gomod"],
"matchDepTypes": ["indirect"],
"enabled": false
"matchPackageNames": ["go.opentelemetry.io/build-tools/**"],
"groupName": "build-tools"
},
{
"matchPackageNames": ["google.golang.org/genproto/googleapis/**"],

View File

@ -1 +1 @@
codespell==2.3.0
codespell==2.4.1

View File

@ -3,6 +3,8 @@
package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
import "go.opentelemetry.io/otel/attribute"
// Scope represents the instrumentation scope.
type Scope struct {
// Name is the name of the instrumentation scope. This should be the
@ -12,4 +14,6 @@ type Scope struct {
Version string
// SchemaURL of the telemetry emitted by the scope.
SchemaURL string
// Attributes of the telemetry emitted by the scope.
Attributes attribute.Set
}

View File

@ -5,18 +5,22 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric"
import (
"context"
"fmt"
"errors"
"os"
"strings"
"sync"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/sdk/metric/exemplar"
"go.opentelemetry.io/otel/sdk/resource"
)
// config contains configuration options for a MeterProvider.
type config struct {
res *resource.Resource
readers []Reader
views []View
res *resource.Resource
readers []Reader
views []View
exemplarFilter exemplar.Filter
}
// readerSignals returns a force-flush and shutdown function for a
@ -40,25 +44,13 @@ func (c config) readerSignals() (forceFlush, shutdown func(context.Context) erro
// value.
func unify(funcs []func(context.Context) error) func(context.Context) error {
return func(ctx context.Context) error {
var errs []error
var err error
for _, f := range funcs {
if err := f(ctx); err != nil {
errs = append(errs, err)
if e := f(ctx); e != nil {
err = errors.Join(err, e)
}
}
return unifyErrors(errs)
}
}
// unifyErrors combines multiple errors into a single error.
func unifyErrors(errs []error) error {
switch len(errs) {
case 0:
return nil
case 1:
return errs[0]
default:
return fmt.Errorf("%v", errs)
return err
}
}
@ -76,7 +68,13 @@ func unifyShutdown(funcs []func(context.Context) error) func(context.Context) er
// newConfig returns a config configured with options.
func newConfig(options []Option) config {
conf := config{res: resource.Default()}
conf := config{
res: resource.Default(),
exemplarFilter: exemplar.TraceBasedFilter,
}
for _, o := range meterProviderOptionsFromEnv() {
conf = o.apply(conf)
}
for _, o := range options {
conf = o.apply(conf)
}
@ -140,3 +138,35 @@ func WithView(views ...View) Option {
return cfg
})
}
// WithExemplarFilter configures the exemplar filter.
//
// The exemplar filter determines which measurements are offered to the
// exemplar reservoir, but the exemplar reservoir makes the final decision of
// whether to store an exemplar.
//
// By default, the [exemplar.SampledFilter]
// is used. Exemplars can be entirely disabled by providing the
// [exemplar.AlwaysOffFilter].
func WithExemplarFilter(filter exemplar.Filter) Option {
return optionFunc(func(cfg config) config {
cfg.exemplarFilter = filter
return cfg
})
}
func meterProviderOptionsFromEnv() []Option {
var opts []Option
// https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar
const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER"
switch strings.ToLower(strings.TrimSpace(os.Getenv(filterEnvKey))) {
case "always_on":
opts = append(opts, WithExemplarFilter(exemplar.AlwaysOnFilter))
case "always_off":
opts = append(opts, WithExemplarFilter(exemplar.AlwaysOffFilter))
case "trace_based":
opts = append(opts, WithExemplarFilter(exemplar.TraceBasedFilter))
}
return opts
}

View File

@ -4,48 +4,49 @@
package metric // import "go.opentelemetry.io/otel/sdk/metric"
import (
"os"
"runtime"
"slices"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric/exemplar"
"go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
)
// ExemplarReservoirProviderSelector selects the
// [exemplar.ReservoirProvider] to use
// based on the [Aggregation] of the metric.
type ExemplarReservoirProviderSelector func(Aggregation) exemplar.ReservoirProvider
// reservoirFunc returns the appropriately configured exemplar reservoir
// creation func based on the passed InstrumentKind and user defined
// environment variables.
//
// Note: This will only return non-nil values when the experimental exemplar
// feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable
// is not set to always_off.
func reservoirFunc[N int64 | float64](agg Aggregation) func() aggregate.FilteredExemplarReservoir[N] {
// https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar
const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER"
var filter exemplar.Filter
switch os.Getenv(filterEnvKey) {
case "always_on":
filter = exemplar.AlwaysOnFilter
case "always_off":
return aggregate.DropReservoir
case "trace_based":
fallthrough
default:
filter = exemplar.TraceBasedFilter
// creation func based on the passed InstrumentKind and filter configuration.
func reservoirFunc[N int64 | float64](provider exemplar.ReservoirProvider, filter exemplar.Filter) func(attribute.Set) aggregate.FilteredExemplarReservoir[N] {
return func(attrs attribute.Set) aggregate.FilteredExemplarReservoir[N] {
return aggregate.NewFilteredExemplarReservoir[N](filter, provider(attrs))
}
}
// DefaultExemplarReservoirProviderSelector returns the default
// [exemplar.ReservoirProvider] for the
// provided [Aggregation].
//
// For explicit bucket histograms with more than 1 bucket, it uses the
// [exemplar.HistogramReservoirProvider].
// For exponential histograms, it uses the
// [exemplar.FixedSizeReservoirProvider]
// with a size of min(20, max_buckets).
// For all other aggregations, it uses the
// [exemplar.FixedSizeReservoirProvider]
// with a size equal to the number of CPUs.
//
// Exemplar default reservoirs MAY change in a minor version bump. No
// guarantees are made on the shape or statistical properties of returned
// exemplars.
func DefaultExemplarReservoirProviderSelector(agg Aggregation) exemplar.ReservoirProvider {
// https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults
// Explicit bucket histogram aggregation with more than 1 bucket will
// use AlignedHistogramBucketExemplarReservoir.
a, ok := agg.(AggregationExplicitBucketHistogram)
if ok && len(a.Boundaries) > 0 {
cp := slices.Clone(a.Boundaries)
return func() aggregate.FilteredExemplarReservoir[N] {
bounds := cp
return aggregate.NewFilteredExemplarReservoir[N](filter, exemplar.NewHistogramReservoir(bounds))
}
return exemplar.HistogramReservoirProvider(a.Boundaries)
}
var n int
@ -72,7 +73,5 @@ func reservoirFunc[N int64 | float64](agg Aggregation) func() aggregate.Filtered
}
}
return func() aggregate.FilteredExemplarReservoir[N] {
return aggregate.NewFilteredExemplarReservoir[N](filter, exemplar.NewFixedSizeReservoir(n))
}
return exemplar.FixedSizeReservoirProvider(n)
}

View File

@ -27,3 +27,8 @@ func TraceBasedFilter(ctx context.Context) bool {
func AlwaysOnFilter(ctx context.Context) bool {
return true
}
// AlwaysOffFilter is a [Filter] that never offers measurements.
func AlwaysOffFilter(ctx context.Context) bool {
return false
}

View File

@ -12,6 +12,13 @@ import (
"go.opentelemetry.io/otel/attribute"
)
// FixedSizeReservoirProvider returns a provider of [FixedSizeReservoir].
func FixedSizeReservoirProvider(k int) ReservoirProvider {
return func(_ attribute.Set) Reservoir {
return NewFixedSizeReservoir(k)
}
}
// NewFixedSizeReservoir returns a [FixedSizeReservoir] that samples at most
// k exemplars. If there are k or less measurements made, the Reservoir will
// sample each one. If there are more than k, the Reservoir will then randomly

View File

@ -12,13 +12,21 @@ import (
"go.opentelemetry.io/otel/attribute"
)
// HistogramReservoirProvider is a provider of [HistogramReservoir].
func HistogramReservoirProvider(bounds []float64) ReservoirProvider {
cp := slices.Clone(bounds)
slices.Sort(cp)
return func(_ attribute.Set) Reservoir {
return NewHistogramReservoir(cp)
}
}
// NewHistogramReservoir returns a [HistogramReservoir] that samples the last
// measurement that falls within a histogram bucket. The histogram bucket
// upper-boundaries are define by bounds.
//
// The passed bounds will be sorted by this function.
// The passed bounds must be sorted before calling this function.
func NewHistogramReservoir(bounds []float64) *HistogramReservoir {
slices.Sort(bounds)
return &HistogramReservoir{
bounds: bounds,
storage: newStorage(len(bounds) + 1),

View File

@ -30,3 +30,11 @@ type Reservoir interface {
// The Reservoir state is preserved after this call.
Collect(dest *[]Exemplar)
}
// ReservoirProvider creates new [Reservoir]s.
//
// The attributes provided are attributes which are kept by the aggregation, and
// are exclusive with attributes passed to Offer. The combination of these
// attributes and the attributes passed to Offer is the complete set of
// attributes a measurement was made with.
type ReservoirProvider func(attr attribute.Set) Reservoir

View File

@ -5,14 +5,14 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric"
import (
"context"
"fmt"
"errors"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
// ErrExporterShutdown is returned if Export or Shutdown are called after an
// Exporter has been Shutdown.
var ErrExporterShutdown = fmt.Errorf("exporter is shutdown")
var ErrExporterShutdown = errors.New("exporter is shutdown")
// Exporter handles the delivery of metric data to external receivers. This is
// the final component in the metric push pipeline.

View File

@ -16,6 +16,7 @@ import (
"go.opentelemetry.io/otel/metric/embedded"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
"go.opentelemetry.io/otel/sdk/metric/internal/x"
)
var zeroScope instrumentation.Scope
@ -144,6 +145,12 @@ type Stream struct {
// Use NewAllowKeysFilter from "go.opentelemetry.io/otel/attribute" to
// provide an allow-list of attribute keys here.
AttributeFilter attribute.Filter
// ExemplarReservoirProvider selects the
// [go.opentelemetry.io/otel/sdk/metric/exemplar.ReservoirProvider] based
// on the [Aggregation].
//
// If unspecified, [DefaultExemplarReservoirProviderSelector] is used.
ExemplarReservoirProviderSelector ExemplarReservoirProviderSelector
}
// instID are the identifying properties of a instrument.
@ -184,6 +191,7 @@ var (
_ metric.Int64UpDownCounter = (*int64Inst)(nil)
_ metric.Int64Histogram = (*int64Inst)(nil)
_ metric.Int64Gauge = (*int64Inst)(nil)
_ x.EnabledInstrument = (*int64Inst)(nil)
)
func (i *int64Inst) Add(ctx context.Context, val int64, opts ...metric.AddOption) {
@ -196,6 +204,10 @@ func (i *int64Inst) Record(ctx context.Context, val int64, opts ...metric.Record
i.aggregate(ctx, val, c.Attributes())
}
func (i *int64Inst) Enabled(_ context.Context) bool {
return len(i.measures) != 0
}
func (i *int64Inst) aggregate(ctx context.Context, val int64, s attribute.Set) { // nolint:revive // okay to shadow pkg with method.
for _, in := range i.measures {
in(ctx, val, s)
@ -216,6 +228,7 @@ var (
_ metric.Float64UpDownCounter = (*float64Inst)(nil)
_ metric.Float64Histogram = (*float64Inst)(nil)
_ metric.Float64Gauge = (*float64Inst)(nil)
_ x.EnabledInstrument = (*float64Inst)(nil)
)
func (i *float64Inst) Add(ctx context.Context, val float64, opts ...metric.AddOption) {
@ -228,6 +241,10 @@ func (i *float64Inst) Record(ctx context.Context, val float64, opts ...metric.Re
i.aggregate(ctx, val, c.Attributes())
}
func (i *float64Inst) Enabled(_ context.Context) bool {
return len(i.measures) != 0
}
func (i *float64Inst) aggregate(ctx context.Context, val float64, s attribute.Set) {
for _, in := range i.measures {
in(ctx, val, s)

View File

@ -37,8 +37,8 @@ type Builder[N int64 | float64] struct {
// create new exemplar reservoirs for a new seen attribute set.
//
// If this is not provided a default factory function that returns an
// DropReservoir reservoir will be used.
ReservoirFunc func() FilteredExemplarReservoir[N]
// dropReservoir reservoir will be used.
ReservoirFunc func(attribute.Set) FilteredExemplarReservoir[N]
// AggregationLimit is the cardinality limit of measurement attributes. Any
// measurement for new attributes once the limit has been reached will be
// aggregated into a single aggregate for the "otel.metric.overflow"
@ -49,12 +49,12 @@ type Builder[N int64 | float64] struct {
AggregationLimit int
}
func (b Builder[N]) resFunc() func() FilteredExemplarReservoir[N] {
func (b Builder[N]) resFunc() func(attribute.Set) FilteredExemplarReservoir[N] {
if b.ReservoirFunc != nil {
return b.ReservoirFunc
}
return DropReservoir
return dropReservoir
}
type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue)

View File

@ -10,8 +10,10 @@ import (
"go.opentelemetry.io/otel/sdk/metric/exemplar"
)
// DropReservoir returns a [FilteredReservoir] that drops all measurements it is offered.
func DropReservoir[N int64 | float64]() FilteredExemplarReservoir[N] { return &dropRes[N]{} }
// dropReservoir returns a [FilteredReservoir] that drops all measurements it is offered.
func dropReservoir[N int64 | float64](attribute.Set) FilteredExemplarReservoir[N] {
return &dropRes[N]{}
}
type dropRes[N int64 | float64] struct{}
@ -20,5 +22,6 @@ func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {}
// Collect resets dest. No exemplars will ever be returned.
func (r *dropRes[N]) Collect(dest *[]exemplar.Exemplar) {
clear(*dest) // Erase elements to let GC collect objects
*dest = (*dest)[:0]
}

View File

@ -17,6 +17,7 @@ var exemplarPool = sync.Pool{
func collectExemplars[N int64 | float64](out *[]metricdata.Exemplar[N], f func(*[]exemplar.Exemplar)) {
dest := exemplarPool.Get().(*[]exemplar.Exemplar)
defer func() {
clear(*dest) // Erase elements to let GC collect objects.
*dest = (*dest)[:0]
exemplarPool.Put(dest)
}()

View File

@ -48,18 +48,18 @@ type expoHistogramDataPoint[N int64 | float64] struct {
zeroCount uint64
}
func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize int, maxScale int32, noMinMax, noSum bool) *expoHistogramDataPoint[N] {
func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize int, maxScale int32, noMinMax, noSum bool) *expoHistogramDataPoint[N] { // nolint:revive // we need this control flag
f := math.MaxFloat64
max := N(f) // if N is int64, max will overflow to -9223372036854775808
min := N(-f)
ma := N(f) // if N is int64, max will overflow to -9223372036854775808
mi := N(-f)
if N(maxInt64) > N(f) {
max = N(maxInt64)
min = N(minInt64)
ma = N(maxInt64)
mi = N(minInt64)
}
return &expoHistogramDataPoint[N]{
attrs: attrs,
min: max,
max: min,
min: ma,
max: mi,
maxSize: maxSize,
noMinMax: noMinMax,
noSum: noSum,
@ -283,7 +283,7 @@ func (b *expoBuckets) downscale(delta int32) {
// newExponentialHistogram returns an Aggregator that summarizes a set of
// measurements as an exponential histogram. Each histogram is scoped by attributes
// and the aggregation cycle the measurements were made in.
func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() FilteredExemplarReservoir[N]) *expoHistogram[N] {
func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *expoHistogram[N] {
return &expoHistogram[N]{
noSum: noSum,
noMinMax: noMinMax,
@ -306,7 +306,7 @@ type expoHistogram[N int64 | float64] struct {
maxSize int
maxScale int32
newRes func() FilteredExemplarReservoir[N]
newRes func(attribute.Set) FilteredExemplarReservoir[N]
limit limiter[*expoHistogramDataPoint[N]]
values map[attribute.Distinct]*expoHistogramDataPoint[N]
valuesMu sync.Mutex
@ -327,7 +327,7 @@ func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attrib
v, ok := e.values[attr.Equivalent()]
if !ok {
v = newExpoHistogramDataPoint[N](attr, e.maxSize, e.maxScale, e.noMinMax, e.noSum)
v.res = e.newRes()
v.res = e.newRes(attr)
e.values[attr.Equivalent()] = v
}

View File

@ -47,13 +47,13 @@ type histValues[N int64 | float64] struct {
noSum bool
bounds []float64
newRes func() FilteredExemplarReservoir[N]
newRes func(attribute.Set) FilteredExemplarReservoir[N]
limit limiter[*buckets[N]]
values map[attribute.Distinct]*buckets[N]
valuesMu sync.Mutex
}
func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() FilteredExemplarReservoir[N]) *histValues[N] {
func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histValues[N] {
// The responsibility of keeping all buckets correctly associated with the
// passed boundaries is ultimately this type's responsibility. Make a copy
// here so we can always guarantee this. Or, in the case of failure, have
@ -93,7 +93,7 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute
//
// buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞)
b = newBuckets[N](attr, len(s.bounds)+1)
b.res = s.newRes()
b.res = s.newRes(attr)
// Ensure min and max are recorded values (not zero), for new buckets.
b.min, b.max = value, value
@ -108,7 +108,7 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute
// newHistogram returns an Aggregator that summarizes a set of measurements as
// an histogram.
func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() FilteredExemplarReservoir[N]) *histogram[N] {
func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histogram[N] {
return &histogram[N]{
histValues: newHistValues[N](boundaries, noSum, limit, r),
noMinMax: noMinMax,

View File

@ -19,7 +19,7 @@ type datapoint[N int64 | float64] struct {
res FilteredExemplarReservoir[N]
}
func newLastValue[N int64 | float64](limit int, r func() FilteredExemplarReservoir[N]) *lastValue[N] {
func newLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *lastValue[N] {
return &lastValue[N]{
newRes: r,
limit: newLimiter[datapoint[N]](limit),
@ -32,7 +32,7 @@ func newLastValue[N int64 | float64](limit int, r func() FilteredExemplarReservo
type lastValue[N int64 | float64] struct {
sync.Mutex
newRes func() FilteredExemplarReservoir[N]
newRes func(attribute.Set) FilteredExemplarReservoir[N]
limit limiter[datapoint[N]]
values map[attribute.Distinct]datapoint[N]
start time.Time
@ -45,7 +45,7 @@ func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.
attr := s.limit.Attributes(fltrAttr, s.values)
d, ok := s.values[attr.Equivalent()]
if !ok {
d.res = s.newRes()
d.res = s.newRes(attr)
}
d.attrs = attr
@ -114,7 +114,7 @@ func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) in
// newPrecomputedLastValue returns an aggregator that summarizes a set of
// observations as the last one made.
func newPrecomputedLastValue[N int64 | float64](limit int, r func() FilteredExemplarReservoir[N]) *precomputedLastValue[N] {
func newPrecomputedLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedLastValue[N] {
return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)}
}

View File

@ -21,12 +21,12 @@ type sumValue[N int64 | float64] struct {
// valueMap is the storage for sums.
type valueMap[N int64 | float64] struct {
sync.Mutex
newRes func() FilteredExemplarReservoir[N]
newRes func(attribute.Set) FilteredExemplarReservoir[N]
limit limiter[sumValue[N]]
values map[attribute.Distinct]sumValue[N]
}
func newValueMap[N int64 | float64](limit int, r func() FilteredExemplarReservoir[N]) *valueMap[N] {
func newValueMap[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *valueMap[N] {
return &valueMap[N]{
newRes: r,
limit: newLimiter[sumValue[N]](limit),
@ -41,7 +41,7 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S
attr := s.limit.Attributes(fltrAttr, s.values)
v, ok := s.values[attr.Equivalent()]
if !ok {
v.res = s.newRes()
v.res = s.newRes(attr)
}
v.attrs = attr
@ -54,7 +54,7 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S
// newSum returns an aggregator that summarizes a set of measurements as their
// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle
// the measurements were made in.
func newSum[N int64 | float64](monotonic bool, limit int, r func() FilteredExemplarReservoir[N]) *sum[N] {
func newSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *sum[N] {
return &sum[N]{
valueMap: newValueMap[N](limit, r),
monotonic: monotonic,
@ -143,7 +143,7 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int {
// newPrecomputedSum returns an aggregator that summarizes a set of
// observations as their arithmetic sum. Each sum is scoped by attributes and
// the aggregation cycle the measurements were made in.
func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() FilteredExemplarReservoir[N]) *precomputedSum[N] {
func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedSum[N] {
return &precomputedSum[N]{
valueMap: newValueMap[N](limit, r),
monotonic: monotonic,

View File

@ -10,6 +10,7 @@ See the [Compatibility and Stability](#compatibility-and-stability) section for
- [Cardinality Limit](#cardinality-limit)
- [Exemplars](#exemplars)
- [Instrument Enabled](#instrument-enabled)
### Cardinality Limit
@ -102,6 +103,24 @@ Revert to the default exemplar filter (`"trace_based"`)
unset OTEL_METRICS_EXEMPLAR_FILTER
```
### Instrument Enabled
To help users avoid performing computationally expensive operations when recording measurements, synchronous instruments provide an `Enabled` method.
#### Examples
The following code shows an example of how to check if an instrument implements the `EnabledInstrument` interface before using the `Enabled` function to avoid doing an expensive computation:
```go
type enabledInstrument interface { Enabled(context.Context) bool }
ctr, err := m.Int64Counter("expensive-counter")
c, ok := ctr.(enabledInstrument)
if !ok || c.Enabled(context.Background()) {
c.Add(expensiveComputation())
}
```
## Compatibility and Stability
Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md).

View File

@ -8,6 +8,7 @@
package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x"
import (
"context"
"os"
"strconv"
)
@ -67,3 +68,14 @@ func (f Feature[T]) Enabled() bool {
_, ok := f.Lookup()
return ok
}
// EnabledInstrument informs whether the instrument is enabled.
//
// EnabledInstrument interface is implemented by synchronous instruments.
type EnabledInstrument interface {
// Enabled returns whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
Enabled(context.Context) bool
}

View File

@ -113,18 +113,17 @@ func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetr
if err != nil {
return err
}
var errs []error
for _, producer := range mr.externalProducers.Load().([]Producer) {
externalMetrics, err := producer.Produce(ctx)
if err != nil {
errs = append(errs, err)
externalMetrics, e := producer.Produce(ctx)
if e != nil {
err = errors.Join(err, e)
}
rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...)
}
global.Debug("ManualReader collection", "Data", rm)
return unifyErrors(errs)
return err
}
// MarshalLog returns logging data about the ManualReader.

View File

@ -150,6 +150,11 @@ func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int6
continue
}
inst.appendMeasures(in)
// Add the measures to the pipeline. It is required to maintain
// measures per pipeline to avoid calling the measure that
// is not part of the pipeline.
insert.pipeline.addInt64Measure(inst.observableID, in)
for _, cback := range callbacks {
inst := int64Observer{measures: in}
fn := cback
@ -309,6 +314,11 @@ func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Fl
continue
}
inst.appendMeasures(in)
// Add the measures to the pipeline. It is required to maintain
// measures per pipeline to avoid calling the measure that
// is not part of the pipeline.
insert.pipeline.addFloat64Measure(inst.observableID, in)
for _, cback := range callbacks {
inst := float64Observer{measures: in}
fn := cback
@ -441,68 +451,75 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable)
return noopRegister{}, nil
}
reg := newObserver()
var errs multierror
var err error
validInstruments := make([]metric.Observable, 0, len(insts))
for _, inst := range insts {
// Unwrap any global.
if u, ok := inst.(interface {
Unwrap() metric.Observable
}); ok {
inst = u.Unwrap()
}
switch o := inst.(type) {
case int64Observable:
if err := o.registerable(m); err != nil {
if !errors.Is(err, errEmptyAgg) {
errs.append(err)
if e := o.registerable(m); e != nil {
if !errors.Is(e, errEmptyAgg) {
err = errors.Join(err, e)
}
continue
}
reg.registerInt64(o.observableID)
validInstruments = append(validInstruments, inst)
case float64Observable:
if err := o.registerable(m); err != nil {
if !errors.Is(err, errEmptyAgg) {
errs.append(err)
if e := o.registerable(m); e != nil {
if !errors.Is(e, errEmptyAgg) {
err = errors.Join(err, e)
}
continue
}
reg.registerFloat64(o.observableID)
validInstruments = append(validInstruments, inst)
default:
// Instrument external to the SDK.
return nil, fmt.Errorf("invalid observable: from different implementation")
return nil, errors.New("invalid observable: from different implementation")
}
}
err := errs.errorOrNil()
if reg.len() == 0 {
if len(validInstruments) == 0 {
// All insts use drop aggregation or are invalid.
return noopRegister{}, err
}
// Some or all instruments were valid.
cback := func(ctx context.Context) error { return f(ctx, reg) }
return m.pipes.registerMultiCallback(cback), err
unregs := make([]func(), len(m.pipes))
for ix, pipe := range m.pipes {
reg := newObserver(pipe)
for _, inst := range validInstruments {
switch o := inst.(type) {
case int64Observable:
reg.registerInt64(o.observableID)
case float64Observable:
reg.registerFloat64(o.observableID)
}
}
// Some or all instruments were valid.
cBack := func(ctx context.Context) error { return f(ctx, reg) }
unregs[ix] = pipe.addMultiCallback(cBack)
}
return unregisterFuncs{f: unregs}, err
}
type observer struct {
embedded.Observer
pipe *pipeline
float64 map[observableID[float64]]struct{}
int64 map[observableID[int64]]struct{}
}
func newObserver() observer {
func newObserver(p *pipeline) observer {
return observer{
pipe: p,
float64: make(map[observableID[float64]]struct{}),
int64: make(map[observableID[int64]]struct{}),
}
}
func (r observer) len() int {
return len(r.float64) + len(r.int64)
}
func (r observer) registerFloat64(id observableID[float64]) {
r.float64[id] = struct{}{}
}
@ -521,16 +538,6 @@ func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ...
switch conv := o.(type) {
case float64Observable:
oImpl = conv
case interface {
Unwrap() metric.Observable
}:
// Unwrap any global.
async := conv.Unwrap()
var ok bool
if oImpl, ok = async.(float64Observable); !ok {
global.Error(errUnknownObserver, "failed to record asynchronous")
return
}
default:
global.Error(errUnknownObserver, "failed to record")
return
@ -548,7 +555,12 @@ func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ...
return
}
c := metric.NewObserveConfig(opts)
oImpl.observe(v, c.Attributes())
// Access to r.pipe.float64Measure is already guarded by a lock in pipeline.produce.
// TODO (#5946): Refactor pipeline and observable measures.
measures := r.pipe.float64Measures[oImpl.observableID]
for _, m := range measures {
m(context.Background(), v, c.Attributes())
}
}
func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric.ObserveOption) {
@ -556,16 +568,6 @@ func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric
switch conv := o.(type) {
case int64Observable:
oImpl = conv
case interface {
Unwrap() metric.Observable
}:
// Unwrap any global.
async := conv.Unwrap()
var ok bool
if oImpl, ok = async.(int64Observable); !ok {
global.Error(errUnknownObserver, "failed to record asynchronous")
return
}
default:
global.Error(errUnknownObserver, "failed to record")
return
@ -583,7 +585,12 @@ func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric
return
}
c := metric.NewObserveConfig(opts)
oImpl.observe(v, c.Attributes())
// Access to r.pipe.int64Measures is already guarded b a lock in pipeline.produce.
// TODO (#5946): Refactor pipeline and observable measures.
measures := r.pipe.int64Measures[oImpl.observableID]
for _, m := range measures {
m(context.Background(), v, c.Attributes())
}
}
type noopRegister struct{ embedded.Registration }

View File

@ -251,18 +251,17 @@ func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricd
if err != nil {
return err
}
var errs []error
for _, producer := range r.externalProducers.Load().([]Producer) {
externalMetrics, err := producer.Produce(ctx)
if err != nil {
errs = append(errs, err)
externalMetrics, e := producer.Produce(ctx)
if e != nil {
err = errors.Join(err, e)
}
rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...)
}
global.Debug("PeriodicReader collection", "Data", rm)
return unifyErrors(errs)
return err
}
// export exports metric data m using r's exporter.

View File

@ -8,14 +8,13 @@ import (
"context"
"errors"
"fmt"
"strings"
"sync"
"sync/atomic"
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/embedded"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/metric/exemplar"
"go.opentelemetry.io/otel/sdk/metric/internal"
"go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
"go.opentelemetry.io/otel/sdk/metric/internal/x"
@ -38,14 +37,17 @@ type instrumentSync struct {
compAgg aggregate.ComputeAggregation
}
func newPipeline(res *resource.Resource, reader Reader, views []View) *pipeline {
func newPipeline(res *resource.Resource, reader Reader, views []View, exemplarFilter exemplar.Filter) *pipeline {
if res == nil {
res = resource.Empty()
}
return &pipeline{
resource: res,
reader: reader,
views: views,
resource: res,
reader: reader,
views: views,
int64Measures: map[observableID[int64]][]aggregate.Measure[int64]{},
float64Measures: map[observableID[float64]][]aggregate.Measure[float64]{},
exemplarFilter: exemplarFilter,
// aggregations is lazy allocated when needed.
}
}
@ -63,9 +65,26 @@ type pipeline struct {
views []View
sync.Mutex
aggregations map[instrumentation.Scope][]instrumentSync
callbacks []func(context.Context) error
multiCallbacks list.List
int64Measures map[observableID[int64]][]aggregate.Measure[int64]
float64Measures map[observableID[float64]][]aggregate.Measure[float64]
aggregations map[instrumentation.Scope][]instrumentSync
callbacks []func(context.Context) error
multiCallbacks list.List
exemplarFilter exemplar.Filter
}
// addInt64Measure adds a new int64 measure to the pipeline for each observer.
func (p *pipeline) addInt64Measure(id observableID[int64], m []aggregate.Measure[int64]) {
p.Lock()
defer p.Unlock()
p.int64Measures[id] = m
}
// addFloat64Measure adds a new float64 measure to the pipeline for each observer.
func (p *pipeline) addFloat64Measure(id observableID[float64], m []aggregate.Measure[float64]) {
p.Lock()
defer p.Unlock()
p.float64Measures[id] = m
}
// addSync adds the instrumentSync to pipeline p with scope. This method is not
@ -105,14 +124,15 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics)
p.Lock()
defer p.Unlock()
var errs multierror
var err error
for _, c := range p.callbacks {
// TODO make the callbacks parallel. ( #3034 )
if err := c(ctx); err != nil {
errs.append(err)
if e := c(ctx); e != nil {
err = errors.Join(err, e)
}
if err := ctx.Err(); err != nil {
rm.Resource = nil
clear(rm.ScopeMetrics) // Erase elements to let GC collect objects.
rm.ScopeMetrics = rm.ScopeMetrics[:0]
return err
}
@ -120,12 +140,13 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics)
for e := p.multiCallbacks.Front(); e != nil; e = e.Next() {
// TODO make the callbacks parallel. ( #3034 )
f := e.Value.(multiCallback)
if err := f(ctx); err != nil {
errs.append(err)
if e := f(ctx); e != nil {
err = errors.Join(err, e)
}
if err := ctx.Err(); err != nil {
// This means the context expired before we finished running callbacks.
rm.Resource = nil
clear(rm.ScopeMetrics) // Erase elements to let GC collect objects.
rm.ScopeMetrics = rm.ScopeMetrics[:0]
return err
}
@ -157,7 +178,7 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics)
rm.ScopeMetrics = rm.ScopeMetrics[:i]
return errs.errorOrNil()
return err
}
// inserter facilitates inserting of new instruments from a single scope into a
@ -219,7 +240,7 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation)
measures []aggregate.Measure[N]
)
errs := &multierror{wrapped: errCreatingAggregators}
var err error
seen := make(map[uint64]struct{})
for _, v := range i.pipeline.views {
stream, match := v(inst)
@ -227,9 +248,9 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation)
continue
}
matched = true
in, id, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation)
if err != nil {
errs.append(err)
in, id, e := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation)
if e != nil {
err = errors.Join(err, e)
}
if in == nil { // Drop aggregation.
continue
@ -242,8 +263,12 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation)
measures = append(measures, in)
}
if err != nil {
err = errors.Join(errCreatingAggregators, err)
}
if matched {
return measures, errs.errorOrNil()
return measures, err
}
// Apply implicit default view if no explicit matched.
@ -252,15 +277,18 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation)
Description: inst.Description,
Unit: inst.Unit,
}
in, _, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation)
if err != nil {
errs.append(err)
in, _, e := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation)
if e != nil {
if err == nil {
err = errCreatingAggregators
}
err = errors.Join(err, e)
}
if in != nil {
// Ensured to have not seen given matched was false.
measures = append(measures, in)
}
return measures, errs.errorOrNil()
return measures, err
}
// addCallback registers a single instrument callback to be run when
@ -329,6 +357,9 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum
// The view explicitly requested the default aggregation.
stream.Aggregation = DefaultAggregationSelector(kind)
}
if stream.ExemplarReservoirProviderSelector == nil {
stream.ExemplarReservoirProviderSelector = DefaultExemplarReservoirProviderSelector
}
if err := isAggregatorCompatible(kind, stream.Aggregation); err != nil {
return nil, 0, fmt.Errorf(
@ -349,7 +380,7 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum
cv := i.aggregators.Lookup(normID, func() aggVal[N] {
b := aggregate.Builder[N]{
Temporality: i.pipeline.reader.temporality(kind),
ReservoirFunc: reservoirFunc[N](stream.Aggregation),
ReservoirFunc: reservoirFunc[N](stream.ExemplarReservoirProviderSelector(stream.Aggregation), i.pipeline.exemplarFilter),
}
b.Filter = stream.AttributeFilter
// A value less than or equal to zero will disable the aggregation
@ -552,24 +583,16 @@ func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error {
// measurement.
type pipelines []*pipeline
func newPipelines(res *resource.Resource, readers []Reader, views []View) pipelines {
func newPipelines(res *resource.Resource, readers []Reader, views []View, exemplarFilter exemplar.Filter) pipelines {
pipes := make([]*pipeline, 0, len(readers))
for _, r := range readers {
p := newPipeline(res, r, views)
p := newPipeline(res, r, views, exemplarFilter)
r.register(p)
pipes = append(pipes, p)
}
return pipes
}
func (p pipelines) registerMultiCallback(c multiCallback) metric.Registration {
unregs := make([]func(), len(p))
for i, pipe := range p {
unregs[i] = pipe.addMultiCallback(c)
}
return unregisterFuncs{f: unregs}
}
type unregisterFuncs struct {
embedded.Registration
f []func()
@ -602,15 +625,15 @@ func newResolver[N int64 | float64](p pipelines, vc *cache[string, instID]) reso
func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) {
var measures []aggregate.Measure[N]
errs := &multierror{}
var err error
for _, i := range r.inserters {
in, err := i.Instrument(id, i.readerDefaultAggregation(id.Kind))
if err != nil {
errs.append(err)
in, e := i.Instrument(id, i.readerDefaultAggregation(id.Kind))
if e != nil {
err = errors.Join(err, e)
}
measures = append(measures, in...)
}
return measures, errs.errorOrNil()
return measures, err
}
// HistogramAggregators returns the histogram Aggregators that must be updated by the instrument
@ -619,37 +642,18 @@ func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error)
func (r resolver[N]) HistogramAggregators(id Instrument, boundaries []float64) ([]aggregate.Measure[N], error) {
var measures []aggregate.Measure[N]
errs := &multierror{}
var err error
for _, i := range r.inserters {
agg := i.readerDefaultAggregation(id.Kind)
if histAgg, ok := agg.(AggregationExplicitBucketHistogram); ok && len(boundaries) > 0 {
histAgg.Boundaries = boundaries
agg = histAgg
}
in, err := i.Instrument(id, agg)
if err != nil {
errs.append(err)
in, e := i.Instrument(id, agg)
if e != nil {
err = errors.Join(err, e)
}
measures = append(measures, in...)
}
return measures, errs.errorOrNil()
}
type multierror struct {
wrapped error
errors []string
}
func (m *multierror) errorOrNil() error {
if len(m.errors) == 0 {
return nil
}
if m.wrapped == nil {
return errors.New(strings.Join(m.errors, "; "))
}
return fmt.Errorf("%w: %s", m.wrapped, strings.Join(m.errors, "; "))
}
func (m *multierror) append(err error) {
m.errors = append(m.errors, err.Error())
return measures, err
}

View File

@ -42,7 +42,7 @@ func NewMeterProvider(options ...Option) *MeterProvider {
flush, sdown := conf.readerSignals()
mp := &MeterProvider{
pipes: newPipelines(conf.res, conf.readers, conf.views),
pipes: newPipelines(conf.res, conf.readers, conf.views, conf.exemplarFilter),
forceFlush: flush,
shutdown: sdown,
}
@ -76,15 +76,17 @@ func (mp *MeterProvider) Meter(name string, options ...metric.MeterOption) metri
c := metric.NewMeterConfig(options...)
s := instrumentation.Scope{
Name: name,
Version: c.InstrumentationVersion(),
SchemaURL: c.SchemaURL(),
Name: name,
Version: c.InstrumentationVersion(),
SchemaURL: c.SchemaURL(),
Attributes: c.InstrumentationAttributes(),
}
global.Info("Meter created",
"Name", s.Name,
"Version", s.Version,
"SchemaURL", s.SchemaURL,
"Attributes", s.Attributes,
)
return mp.meters.Lookup(s, func() *meter {

View File

@ -5,26 +5,26 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric"
import (
"context"
"fmt"
"errors"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
// errDuplicateRegister is logged by a Reader when an attempt to registered it
// more than once occurs.
var errDuplicateRegister = fmt.Errorf("duplicate reader registration")
var errDuplicateRegister = errors.New("duplicate reader registration")
// ErrReaderNotRegistered is returned if Collect or Shutdown are called before
// the reader is registered with a MeterProvider.
var ErrReaderNotRegistered = fmt.Errorf("reader is not registered")
var ErrReaderNotRegistered = errors.New("reader is not registered")
// ErrReaderShutdown is returned if Collect or Shutdown are called after a
// reader has been Shutdown once.
var ErrReaderShutdown = fmt.Errorf("reader is shutdown")
var ErrReaderShutdown = errors.New("reader is shutdown")
// errNonPositiveDuration is logged when an environmental variable
// has non-positive value.
var errNonPositiveDuration = fmt.Errorf("non-positive duration")
var errNonPositiveDuration = errors.New("non-positive duration")
// Reader is the interface used between the SDK and an
// exporter. Control flow is bi-directional through the
@ -60,8 +60,8 @@ type Reader interface {
aggregation(InstrumentKind) Aggregation // nolint:revive // import-shadow for method scoped by type.
// Collect gathers and returns all metric data related to the Reader from
// the SDK and stores it in out. An error is returned if this is called
// after Shutdown or if out is nil.
// the SDK and stores it in rm. An error is returned if this is called
// after Shutdown or if rm is nil.
//
// This method needs to be concurrent safe, and the cancellation of the
// passed context is expected to be honored.

View File

@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric"
// version is the current release version of the metric SDK in use.
func version() string {
return "1.31.0"
return "1.35.0"
}

View File

@ -96,11 +96,12 @@ func NewView(criteria Instrument, mask Stream) View {
return func(i Instrument) (Stream, bool) {
if matchFunc(i) {
return Stream{
Name: nonZero(mask.Name, i.Name),
Description: nonZero(mask.Description, i.Description),
Unit: nonZero(mask.Unit, i.Unit),
Aggregation: agg,
AttributeFilter: mask.AttributeFilter,
Name: nonZero(mask.Name, i.Name),
Description: nonZero(mask.Description, i.Description),
Unit: nonZero(mask.Unit, i.Unit),
Aggregation: agg,
AttributeFilter: mask.AttributeFilter,
ExemplarReservoirProviderSelector: mask.ExemplarReservoirProviderSelector,
}, true
}
return Stream{}, false

View File

@ -7,7 +7,6 @@ import (
"context"
"errors"
"fmt"
"strings"
)
// ErrPartialResource is returned by a detector when complete source
@ -57,62 +56,37 @@ func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) {
// these errors will be returned. Otherwise, nil is returned.
func detect(ctx context.Context, res *Resource, detectors []Detector) error {
var (
r *Resource
errs detectErrs
err error
r *Resource
err error
e error
)
for _, detector := range detectors {
if detector == nil {
continue
}
r, err = detector.Detect(ctx)
if err != nil {
errs = append(errs, err)
if !errors.Is(err, ErrPartialResource) {
r, e = detector.Detect(ctx)
if e != nil {
err = errors.Join(err, e)
if !errors.Is(e, ErrPartialResource) {
continue
}
}
r, err = Merge(res, r)
if err != nil {
errs = append(errs, err)
r, e = Merge(res, r)
if e != nil {
err = errors.Join(err, e)
}
*res = *r
}
if len(errs) == 0 {
return nil
if err != nil {
if errors.Is(err, ErrSchemaURLConflict) {
// If there has been a merge conflict, ensure the resource has no
// schema URL.
res.schemaURL = ""
}
err = fmt.Errorf("error detecting resource: %w", err)
}
if errors.Is(errs, ErrSchemaURLConflict) {
// If there has been a merge conflict, ensure the resource has no
// schema URL.
res.schemaURL = ""
}
return errs
}
type detectErrs []error
func (e detectErrs) Error() string {
errStr := make([]string, len(e))
for i, err := range e {
errStr[i] = fmt.Sprintf("* %s", err)
}
format := "%d errors occurred detecting resource:\n\t%s"
return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t"))
}
func (e detectErrs) Unwrap() error {
switch len(e) {
case 0:
return nil
case 1:
return e[0]
}
return e[1:]
}
func (e detectErrs) Is(target error) bool {
return len(e) != 0 && errors.Is(e[0], target)
return err
}

View File

@ -20,15 +20,13 @@ type (
// telemetrySDK is a Detector that provides information about
// the OpenTelemetry SDK used. This Detector is included as a
// builtin. If these resource attributes are not wanted, use
// the WithTelemetrySDK(nil) or WithoutBuiltin() options to
// explicitly disable them.
// resource.New() to explicitly disable them.
telemetrySDK struct{}
// host is a Detector that provides information about the host
// being run on. This Detector is included as a builtin. If
// these resource attributes are not wanted, use the
// WithHost(nil) or WithoutBuiltin() options to explicitly
// disable them.
// resource.New() to explicitly disable them.
host struct{}
stringDetector struct {

View File

@ -5,6 +5,7 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource"
import (
"encoding/xml"
"errors"
"fmt"
"io"
"os"
@ -63,7 +64,7 @@ func parsePlistFile(file io.Reader) (map[string]string, error) {
}
if len(v.Dict.Key) != len(v.Dict.String) {
return nil, fmt.Errorf("the number of <key> and <string> elements doesn't match")
return nil, errors.New("the number of <key> and <string> elements doesn't match")
}
properties := make(map[string]string, len(v.Dict.Key))

Some files were not shown because too many files have changed in this diff Show More