diff --git a/CHANGELOG.md b/CHANGELOG.md index d2dd844cfb6..2390da03225 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Add info and debug logging to the metric SDK. (#4315) - The `go.opentelemetry.io/otel/semconv/v1.21.0` package. The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362) +- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381) - Expand the set of units supported by the prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374) ### Changed @@ -38,7 +39,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - If an attribute set is Observed multiple times in an async callback, the values will be summed instead of the last observation winning. (#4289) - Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332) - Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333) -- `PeriodicReader.Shutdown` in `go.opentelemetry.io/otel/sdk/metric` now applies the periodic reader's timeout by default. (#4356) +- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377) ### Fixed diff --git a/bridge/opentracing/test/go.mod b/bridge/opentracing/test/go.mod index a6890d96be2..5fae281e791 100644 --- a/bridge/opentracing/test/go.mod +++ b/bridge/opentracing/test/go.mod @@ -14,7 +14,7 @@ require ( github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.16.0 go.opentelemetry.io/otel/bridge/opentracing v1.16.0 - google.golang.org/grpc v1.56.2 + google.golang.org/grpc v1.57.0 ) require ( @@ -28,7 +28,7 @@ require ( golang.org/x/net v0.9.0 // indirect golang.org/x/sys v0.10.0 // indirect golang.org/x/text v0.9.0 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/bridge/opentracing/test/go.sum b/bridge/opentracing/test/go.sum index 4ef826058b5..84b6433927a 100644 --- a/bridge/opentracing/test/go.sum +++ b/bridge/opentracing/test/go.sum @@ -51,11 +51,11 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= -google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= +google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= diff --git a/example/otel-collector/go.mod b/example/otel-collector/go.mod index c853a5090bd..a1e3e94e74e 100644 --- a/example/otel-collector/go.mod +++ b/example/otel-collector/go.mod @@ -12,7 +12,7 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 go.opentelemetry.io/otel/sdk v1.16.0 go.opentelemetry.io/otel/trace v1.16.0 - google.golang.org/grpc v1.56.2 + google.golang.org/grpc v1.57.0 ) require ( diff --git a/example/otel-collector/go.sum b/example/otel-collector/go.sum index e6c8f8ae224..5b599066e70 100644 --- a/example/otel-collector/go.sum +++ b/example/otel-collector/go.sum @@ -31,8 +31,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1: google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= -google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= +google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= diff --git a/exporters/otlp/otlpmetric/go.mod b/exporters/otlp/otlpmetric/go.mod index e6de4a05d04..dc15ccb98e4 100644 --- a/exporters/otlp/otlpmetric/go.mod +++ b/exporters/otlp/otlpmetric/go.mod @@ -10,7 +10,7 @@ require ( go.opentelemetry.io/otel/sdk v1.16.0 go.opentelemetry.io/otel/sdk/metric v0.39.0 go.opentelemetry.io/proto/otlp v1.0.0 - google.golang.org/grpc v1.56.2 + google.golang.org/grpc v1.57.0 google.golang.org/protobuf v1.31.0 ) diff --git a/exporters/otlp/otlpmetric/go.sum b/exporters/otlp/otlpmetric/go.sum index 5e842f2732d..e64a9be9ae2 100644 --- a/exporters/otlp/otlpmetric/go.sum +++ b/exporters/otlp/otlpmetric/go.sum @@ -40,8 +40,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1: google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= -google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= +google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod index e58dc4b5212..9da7b62bb1b 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod @@ -12,7 +12,7 @@ require ( go.opentelemetry.io/otel/sdk/metric v0.39.0 go.opentelemetry.io/proto/otlp v1.0.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc - google.golang.org/grpc v1.56.2 + google.golang.org/grpc v1.57.0 google.golang.org/protobuf v1.31.0 ) diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum index f50cfdefa80..395c2e0f273 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum @@ -37,8 +37,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1: google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= -google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= +google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod b/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod index 7c81578bfd6..93ad274ed7a 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod @@ -31,7 +31,7 @@ require ( golang.org/x/text v0.9.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect - google.golang.org/grpc v1.56.2 // indirect + google.golang.org/grpc v1.57.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum b/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum index f50cfdefa80..395c2e0f273 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum @@ -37,8 +37,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1: google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= -google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= +google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= diff --git a/exporters/otlp/otlptrace/go.mod b/exporters/otlp/otlptrace/go.mod index f14b3aca25e..0fb0bdc2eeb 100644 --- a/exporters/otlp/otlptrace/go.mod +++ b/exporters/otlp/otlptrace/go.mod @@ -10,7 +10,7 @@ require ( go.opentelemetry.io/otel/sdk v1.16.0 go.opentelemetry.io/otel/trace v1.16.0 go.opentelemetry.io/proto/otlp v1.0.0 - google.golang.org/grpc v1.56.2 + google.golang.org/grpc v1.57.0 google.golang.org/protobuf v1.31.0 ) diff --git a/exporters/otlp/otlptrace/go.sum b/exporters/otlp/otlptrace/go.sum index 5e842f2732d..e64a9be9ae2 100644 --- a/exporters/otlp/otlptrace/go.sum +++ b/exporters/otlp/otlptrace/go.sum @@ -40,8 +40,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1: google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= -google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= +google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= diff --git a/exporters/otlp/otlptrace/otlptracegrpc/go.mod b/exporters/otlp/otlptrace/otlptracegrpc/go.mod index f2f6d624c2e..d34741ad73b 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/go.mod +++ b/exporters/otlp/otlptrace/otlptracegrpc/go.mod @@ -11,7 +11,7 @@ require ( go.opentelemetry.io/proto/otlp v1.0.0 go.uber.org/goleak v1.2.1 google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc - google.golang.org/grpc v1.56.2 + google.golang.org/grpc v1.57.0 google.golang.org/protobuf v1.31.0 ) diff --git a/exporters/otlp/otlptrace/otlptracegrpc/go.sum b/exporters/otlp/otlptrace/otlptracegrpc/go.sum index 36b9e0b392a..0ec71e1edc1 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/go.sum +++ b/exporters/otlp/otlptrace/otlptracegrpc/go.sum @@ -38,8 +38,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1: google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= -google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= +google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= diff --git a/exporters/otlp/otlptrace/otlptracehttp/go.mod b/exporters/otlp/otlptrace/otlptracehttp/go.mod index e1701a39210..a681ca5632b 100644 --- a/exporters/otlp/otlptrace/otlptracehttp/go.mod +++ b/exporters/otlp/otlptrace/otlptracehttp/go.mod @@ -27,7 +27,7 @@ require ( golang.org/x/text v0.9.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect - google.golang.org/grpc v1.56.2 // indirect + google.golang.org/grpc v1.57.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporters/otlp/otlptrace/otlptracehttp/go.sum b/exporters/otlp/otlptrace/otlptracehttp/go.sum index af8b14941b7..a361fa6f5a8 100644 --- a/exporters/otlp/otlptrace/otlptracehttp/go.sum +++ b/exporters/otlp/otlptrace/otlptracehttp/go.sum @@ -36,8 +36,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1: google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= -google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= +google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= diff --git a/internal/tools/go.mod b/internal/tools/go.mod index 30e82b2be69..97b3643eae4 100644 --- a/internal/tools/go.mod +++ b/internal/tools/go.mod @@ -9,10 +9,10 @@ require ( github.com/itchyny/gojq v0.12.13 github.com/jcchavezs/porto v0.4.0 github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad - go.opentelemetry.io/build-tools/crosslink v0.9.0 - go.opentelemetry.io/build-tools/dbotconf v0.9.0 - go.opentelemetry.io/build-tools/multimod v0.9.0 - go.opentelemetry.io/build-tools/semconvgen v0.9.0 + go.opentelemetry.io/build-tools/crosslink v0.10.0 + go.opentelemetry.io/build-tools/dbotconf v0.10.0 + go.opentelemetry.io/build-tools/multimod v0.10.0 + go.opentelemetry.io/build-tools/semconvgen v0.10.0 golang.org/x/tools v0.11.0 ) @@ -63,7 +63,7 @@ require ( github.com/go-critic/go-critic v0.8.1 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.4.1 // indirect - github.com/go-git/go-git/v5 v5.7.0 // indirect + github.com/go-git/go-git/v5 v5.8.0 // indirect github.com/go-toolsmith/astcast v1.1.0 // indirect github.com/go-toolsmith/astcopy v1.1.0 // indirect github.com/go-toolsmith/astequal v1.1.0 // indirect @@ -189,7 +189,7 @@ require ( github.com/yeya24/promlinter v0.2.0 // indirect github.com/ykadowak/zerologlint v0.1.2 // indirect gitlab.com/bosi/decorder v0.2.3 // indirect - go.opentelemetry.io/build-tools v0.9.0 // indirect + go.opentelemetry.io/build-tools v0.10.0 // indirect go.tmz.dev/musttag v0.7.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect diff --git a/internal/tools/go.sum b/internal/tools/go.sum index ece4c286518..efbe9f1b15b 100644 --- a/internal/tools/go.sum +++ b/internal/tools/go.sum @@ -167,8 +167,8 @@ github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmS github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4= github.com/go-git/go-billy/v5 v5.4.1/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f h1:Pz0DHeFij3XFhoBRGUDPzSJ+w2UcK5/0JvF8DRI58r8= -github.com/go-git/go-git/v5 v5.7.0 h1:t9AudWVLmqzlo+4bqdf7GY+46SUuRsx59SboFxkq2aE= -github.com/go-git/go-git/v5 v5.7.0/go.mod h1:coJHKEOk5kUClpsNlXrUvPrDxY3w3gjHvhcZd8Fodw8= +github.com/go-git/go-git/v5 v5.8.0 h1:Rc543s6Tyq+YcyPwZRvU4jzZGM8rB/wWu94TnTIYALQ= +github.com/go-git/go-git/v5 v5.8.0/go.mod h1:coJHKEOk5kUClpsNlXrUvPrDxY3w3gjHvhcZd8Fodw8= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -622,16 +622,16 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opentelemetry.io/build-tools v0.9.0 h1:P6WstNx1gmj3bMFJFrJThuxFQlKztxOSCPR/2hBgkUg= -go.opentelemetry.io/build-tools v0.9.0/go.mod h1:ZoM+TLPhEhTi09/nI9YKPlU563ocHoWrQXD994N5dMc= -go.opentelemetry.io/build-tools/crosslink v0.9.0 h1:LOeJzMxsxBG2qMKeO22fRs91QvDfY+BA5pF1skTjbx0= -go.opentelemetry.io/build-tools/crosslink v0.9.0/go.mod h1:VaSi2ahs+r+v//m6OpqTkD5siSFVta9eTHhKqPsfH/Q= -go.opentelemetry.io/build-tools/dbotconf v0.9.0 h1:lEIVW6aJYgVOt/Ifyiwf0gSQvENZSvO1Mfq+hMFLeek= -go.opentelemetry.io/build-tools/dbotconf v0.9.0/go.mod h1:z+oKEld1zmGI/thdGcF6eOB8Mj5Ahde01gyLhVzgss8= -go.opentelemetry.io/build-tools/multimod v0.9.0 h1:Im9PCGhfmKQC2XR0aTYzADNiOZLk9QEQgibDhadH+i0= -go.opentelemetry.io/build-tools/multimod v0.9.0/go.mod h1:9KdBtlVebuj00X4bIt6DX1zagilSzIQmkJo8XzQ9OTQ= -go.opentelemetry.io/build-tools/semconvgen v0.9.0 h1:Tsqd5KAVI2onoX+3Bjg7rD4kA8JdBxmxTUHcAZddYKw= -go.opentelemetry.io/build-tools/semconvgen v0.9.0/go.mod h1:2kP8f7p2ecJfxuC3z+bqg5bVWRVWVuDpxkzKIAvl+wA= +go.opentelemetry.io/build-tools v0.10.0 h1:5asgwud1lI/pMYQM9P/vwEgOjyv6G3nhYnwo0znqAvA= +go.opentelemetry.io/build-tools v0.10.0/go.mod h1:GFpz8YD/DG5shfY1J2f3uuK88zr61U5rVRGOhKMDE9M= +go.opentelemetry.io/build-tools/crosslink v0.10.0 h1:lyXOJP2z3G/o+e0w00q3vkzFZw48Av1yzPOV6JJXVMY= +go.opentelemetry.io/build-tools/crosslink v0.10.0/go.mod h1:B//a0+OAU8kjgLLJnxjDNnlGv8CJt9pF/BaGc0nChvU= +go.opentelemetry.io/build-tools/dbotconf v0.10.0 h1:BKk+Q2BoHqcA9lbrbVqYeQVHPJJb+jHePoO4TIyCy3Y= +go.opentelemetry.io/build-tools/dbotconf v0.10.0/go.mod h1:SOyfUbctj4X9QakCfGy/eTlYlFo/HTNtyjJs7hHP9gE= +go.opentelemetry.io/build-tools/multimod v0.10.0 h1:jYlpTXAJCZqjzYZdEdwdDIX90qnqQvZkgaPkMt9r6BA= +go.opentelemetry.io/build-tools/multimod v0.10.0/go.mod h1:jn2a5TTq9wSR9ragaruZ2ilqOV+VerkNtNPTqwtIMyM= +go.opentelemetry.io/build-tools/semconvgen v0.10.0 h1:enthiZ2ucgEh6GdSf2FjPJh+tW7lWTKzLPS+oxxWqDY= +go.opentelemetry.io/build-tools/semconvgen v0.10.0/go.mod h1:2vdk73CHKwYfwTWMR/hthGn/5Wbsk8jNiNTI/RthNt0= go.tmz.dev/musttag v0.7.0 h1:QfytzjTWGXZmChoX0L++7uQN+yRCPfyFm+whsM+lfGc= go.tmz.dev/musttag v0.7.0/go.mod h1:oTFPvgOkJmp5kYL02S8+jrH0eLrBIl57rzWeA26zDEM= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= diff --git a/sdk/metric/exporter.go b/sdk/metric/exporter.go index b4f7c7b72f9..7efb8bf2fbe 100644 --- a/sdk/metric/exporter.go +++ b/sdk/metric/exporter.go @@ -30,9 +30,15 @@ var ErrExporterShutdown = fmt.Errorf("exporter is shutdown") // the final component in the metric push pipeline. type Exporter interface { // Temporality returns the Temporality to use for an instrument kind. + // + // This method needs to be concurrent safe with itself and all the other + // Exporter methods. Temporality(InstrumentKind) metricdata.Temporality // Aggregation returns the Aggregation to use for an instrument kind. + // + // This method needs to be concurrent safe with itself and all the other + // Exporter methods. Aggregation(InstrumentKind) aggregation.Aggregation // Export serializes and transmits metric data to a receiver. diff --git a/sdk/metric/internal/aggregate/aggregate.go b/sdk/metric/internal/aggregate/aggregate.go index 76ea3636a27..a6cceb2c253 100644 --- a/sdk/metric/internal/aggregate/aggregate.go +++ b/sdk/metric/internal/aggregate/aggregate.go @@ -16,12 +16,17 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggreg import ( "context" + "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric/aggregation" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) +// now is used to return the current local time while allowing tests to +// override the default time.Now function. +var now = time.Now + // Measure receives measurements to be aggregated. type Measure[N int64 | float64] func(context.Context, N, attribute.Set) @@ -53,19 +58,6 @@ func (b Builder[N]) filter(f Measure[N]) Measure[N] { return f } -func (b Builder[N]) input(agg aggregator[N]) Measure[N] { - if b.Filter != nil { - fltr := b.Filter // Copy to make it immutable after assignment. - return func(_ context.Context, n N, a attribute.Set) { - fAttr, _ := a.Filter(fltr) - agg.Aggregate(n, fAttr) - } - } - return func(_ context.Context, n N, a attribute.Set) { - agg.Aggregate(n, a) - } -} - // LastValue returns a last-value aggregate function input and output. // // The Builder.Temporality is ignored and delta is use always. @@ -111,19 +103,12 @@ func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) { // ExplicitBucketHistogram returns a histogram aggregate function input and // output. func (b Builder[N]) ExplicitBucketHistogram(cfg aggregation.ExplicitBucketHistogram, noSum bool) (Measure[N], ComputeAggregation) { - var h aggregator[N] + h := newHistogram[N](cfg, noSum) switch b.Temporality { case metricdata.DeltaTemporality: - h = newDeltaHistogram[N](cfg, noSum) + return b.filter(h.measure), h.delta default: - h = newCumulativeHistogram[N](cfg, noSum) - } - return b.input(h), func(dest *metricdata.Aggregation) int { - // TODO (#4220): optimize memory reuse here. - *dest = h.Aggregation() - - hData, _ := (*dest).(metricdata.Histogram[N]) - return len(hData.DataPoints) + return b.filter(h.measure), h.cumulative } } diff --git a/sdk/metric/internal/aggregate/aggregate_test.go b/sdk/metric/internal/aggregate/aggregate_test.go index 76ba3a26eaf..79031b40218 100644 --- a/sdk/metric/internal/aggregate/aggregate_test.go +++ b/sdk/metric/internal/aggregate/aggregate_test.go @@ -55,21 +55,12 @@ var ( } ) -type inputTester[N int64 | float64] struct { - aggregator[N] - - value N - attr attribute.Set +func TestBuilderFilter(t *testing.T) { + t.Run("Int64", testBuilderFilter[int64]()) + t.Run("Float64", testBuilderFilter[float64]()) } -func (it *inputTester[N]) Aggregate(v N, a attribute.Set) { it.value, it.attr = v, a } - -func TestBuilderInput(t *testing.T) { - t.Run("Int64", testBuilderInput[int64]()) - t.Run("Float64", testBuilderInput[float64]()) -} - -func testBuilderInput[N int64 | float64]() func(t *testing.T) { +func testBuilderFilter[N int64 | float64]() func(t *testing.T) { return func(t *testing.T) { t.Helper() @@ -78,12 +69,11 @@ func testBuilderInput[N int64 | float64]() func(t *testing.T) { return func(t *testing.T) { t.Helper() - it := &inputTester[N]{} - meas := b.input(it) + meas := b.filter(func(_ context.Context, v N, a attribute.Set) { + assert.Equal(t, value, v, "measured incorrect value") + assert.Equal(t, wantA, a, "measured incorrect attributes") + }) meas(context.Background(), value, attr) - - assert.Equal(t, value, it.value, "measured incorrect value") - assert.Equal(t, wantA, it.attr, "measured incorrect attributes") } } diff --git a/sdk/metric/internal/aggregate/aggregator.go b/sdk/metric/internal/aggregate/aggregator.go deleted file mode 100644 index fac0dfd901a..00000000000 --- a/sdk/metric/internal/aggregate/aggregator.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" - -import ( - "time" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/metricdata" -) - -// now is used to return the current local time while allowing tests to -// override the default time.Now function. -var now = time.Now - -// aggregator forms an aggregation from a collection of recorded measurements. -// -// Aggregators need to be comparable so they can be de-duplicated by the SDK -// when it creates them for multiple views. -type aggregator[N int64 | float64] interface { - // Aggregate records the measurement, scoped by attr, and aggregates it - // into an aggregation. - Aggregate(measurement N, attr attribute.Set) - - // Aggregation returns an Aggregation, for all the aggregated - // measurements made and ends an aggregation cycle. - Aggregation() metricdata.Aggregation -} diff --git a/sdk/metric/internal/aggregate/aggregator_test.go b/sdk/metric/internal/aggregate/aggregator_test.go deleted file mode 100644 index 4e16175159b..00000000000 --- a/sdk/metric/internal/aggregate/aggregator_test.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" - -import ( - "strconv" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/metricdata" - "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" -) - -const ( - defaultGoroutines = 5 - defaultMeasurements = 30 - defaultCycles = 3 -) - -var carol = attribute.NewSet(attribute.String("user", "carol"), attribute.Bool("admin", false)) - -func monoIncr[N int64 | float64]() setMap[N] { - return setMap[N]{alice: 1, bob: 10, carol: 2} -} - -// setMap maps attribute sets to a number. -type setMap[N int64 | float64] map[attribute.Set]N - -// expectFunc is a function that returns an Aggregation of expected values for -// a cycle that contains m measurements (total across all goroutines). Each -// call advances the cycle. -type expectFunc func(m int) metricdata.Aggregation - -// aggregatorTester runs an acceptance test on an Aggregator. It will ask an -// Aggregator to aggregate a set of values as if they were real measurements -// made MeasurementN number of times. This will be done in GoroutineN number -// of different goroutines. After the Aggregator has been asked to aggregate -// all these measurements, it is validated using a passed expecterFunc. This -// set of operation is a single cycle, and the the aggregatorTester will run -// CycleN number of cycles. -type aggregatorTester[N int64 | float64] struct { - // GoroutineN is the number of goroutines aggregatorTester will use to run - // the test with. - GoroutineN int - // MeasurementN is the number of measurements that are made each cycle a - // goroutine runs the test. - MeasurementN int - // CycleN is the number of times a goroutine will make a set of - // measurements. - CycleN int -} - -func (at *aggregatorTester[N]) Run(a aggregator[N], incr setMap[N], eFunc expectFunc) func(*testing.T) { - m := at.MeasurementN * at.GoroutineN - return func(t *testing.T) { - t.Run("Comparable", func(t *testing.T) { - assert.NotPanics(t, func() { - _ = map[aggregator[N]]struct{}{a: {}} - }) - }) - - t.Run("CorrectnessConcurrentSafe", func(t *testing.T) { - for i := 0; i < at.CycleN; i++ { - var wg sync.WaitGroup - wg.Add(at.GoroutineN) - for j := 0; j < at.GoroutineN; j++ { - go func() { - defer wg.Done() - for k := 0; k < at.MeasurementN; k++ { - for attrs, n := range incr { - a.Aggregate(N(n), attrs) - } - } - }() - } - wg.Wait() - - metricdatatest.AssertAggregationsEqual(t, eFunc(m), a.Aggregation()) - } - }) - } -} - -var bmarkResults metricdata.Aggregation - -func benchmarkAggregatorN[N int64 | float64](b *testing.B, factory func() aggregator[N], count int) { - attrs := make([]attribute.Set, count) - for i := range attrs { - attrs[i] = attribute.NewSet(attribute.Int("value", i)) - } - - b.Run("Aggregate", func(b *testing.B) { - agg := factory() - b.ReportAllocs() - b.ResetTimer() - - for n := 0; n < b.N; n++ { - for _, attr := range attrs { - agg.Aggregate(1, attr) - } - } - bmarkResults = agg.Aggregation() - }) - - b.Run("Aggregations", func(b *testing.B) { - aggs := make([]aggregator[N], b.N) - for n := range aggs { - a := factory() - for _, attr := range attrs { - a.Aggregate(1, attr) - } - aggs[n] = a - } - - b.ReportAllocs() - b.ResetTimer() - - for n := 0; n < b.N; n++ { - bmarkResults = aggs[n].Aggregation() - } - }) -} - -func benchmarkAggregator[N int64 | float64](factory func() aggregator[N]) func(*testing.B) { - counts := []int{1, 10, 100} - return func(b *testing.B) { - for _, n := range counts { - b.Run(strconv.Itoa(n), func(b *testing.B) { - benchmarkAggregatorN(b, factory, n) - }) - } - } -} diff --git a/sdk/metric/internal/aggregate/histogram.go b/sdk/metric/internal/aggregate/histogram.go index 0683ff2eb23..cd030076fdc 100644 --- a/sdk/metric/internal/aggregate/histogram.go +++ b/sdk/metric/internal/aggregate/histogram.go @@ -15,6 +15,7 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" import ( + "context" "sort" "sync" "time" @@ -75,7 +76,7 @@ func newHistValues[N int64 | float64](bounds []float64, noSum bool) *histValues[ // Aggregate records the measurement value, scoped by attr, and aggregates it // into a histogram. -func (s *histValues[N]) Aggregate(value N, attr attribute.Set) { +func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set) { // This search will return an index in the range [0, len(s.bounds)], where // it will return len(s.bounds) if value is greater than the last element // of s.bounds. This aligns with the buckets in that the length of buckets @@ -106,111 +107,93 @@ func (s *histValues[N]) Aggregate(value N, attr attribute.Set) { } } -// newDeltaHistogram returns an Aggregator that summarizes a set of -// measurements as an histogram. Each histogram is scoped by attributes and -// the aggregation cycle the measurements were made in. -// -// Each aggregation cycle is treated independently. When the returned -// Aggregator's Aggregations method is called it will reset all histogram -// counts to zero. -func newDeltaHistogram[N int64 | float64](cfg aggregation.ExplicitBucketHistogram, noSum bool) aggregator[N] { - return &deltaHistogram[N]{ +// newHistogram returns an Aggregator that summarizes a set of measurements as +// an histogram. +func newHistogram[N int64 | float64](cfg aggregation.ExplicitBucketHistogram, noSum bool) *histogram[N] { + return &histogram[N]{ histValues: newHistValues[N](cfg.Boundaries, noSum), noMinMax: cfg.NoMinMax, start: now(), } } -// deltaHistogram summarizes a set of measurements made in a single -// aggregation cycle as an histogram with explicitly defined buckets. -type deltaHistogram[N int64 | float64] struct { +// histogram summarizes a set of measurements as an histogram with explicitly +// defined buckets. +type histogram[N int64 | float64] struct { *histValues[N] noMinMax bool start time.Time } -func (s *deltaHistogram[N]) Aggregation() metricdata.Aggregation { +func (s *histogram[N]) delta(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Histogram, memory reuse is missed. In that + // case, use the zero-value h and hope for better alignment next cycle. + h, _ := (*dest).(metricdata.Histogram[N]) + h.Temporality = metricdata.DeltaTemporality + s.valuesMu.Lock() defer s.valuesMu.Unlock() - if len(s.values) == 0 { - return nil - } - - t := now() // Do not allow modification of our copy of bounds. bounds := make([]float64, len(s.bounds)) copy(bounds, s.bounds) - h := metricdata.Histogram[N]{ - Temporality: metricdata.DeltaTemporality, - DataPoints: make([]metricdata.HistogramDataPoint[N], 0, len(s.values)), - } + + n := len(s.values) + hDPts := reset(h.DataPoints, n, n) + + var i int for a, b := range s.values { - hdp := metricdata.HistogramDataPoint[N]{ - Attributes: a, - StartTime: s.start, - Time: t, - Count: b.count, - Bounds: bounds, - BucketCounts: b.counts, - } + hDPts[i].Attributes = a + hDPts[i].StartTime = s.start + hDPts[i].Time = t + hDPts[i].Count = b.count + hDPts[i].Bounds = bounds + hDPts[i].BucketCounts = b.counts + if !s.noSum { - hdp.Sum = b.total + hDPts[i].Sum = b.total } + if !s.noMinMax { - hdp.Min = metricdata.NewExtrema(b.min) - hdp.Max = metricdata.NewExtrema(b.max) + hDPts[i].Min = metricdata.NewExtrema(b.min) + hDPts[i].Max = metricdata.NewExtrema(b.max) } - h.DataPoints = append(h.DataPoints, hdp) // Unused attribute sets do not report. delete(s.values, a) + i++ } // The delta collection cycle resets. s.start = t - return h -} -// newCumulativeHistogram returns an Aggregator that summarizes a set of -// measurements as an histogram. Each histogram is scoped by attributes. -// -// Each aggregation cycle builds from the previous, the histogram counts are -// the bucketed counts of all values aggregated since the returned Aggregator -// was created. -func newCumulativeHistogram[N int64 | float64](cfg aggregation.ExplicitBucketHistogram, noSum bool) aggregator[N] { - return &cumulativeHistogram[N]{ - histValues: newHistValues[N](cfg.Boundaries, noSum), - noMinMax: cfg.NoMinMax, - start: now(), - } + h.DataPoints = hDPts + *dest = h + + return n } -// cumulativeHistogram summarizes a set of measurements made over all -// aggregation cycles as an histogram with explicitly defined buckets. -type cumulativeHistogram[N int64 | float64] struct { - *histValues[N] +func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() - noMinMax bool - start time.Time -} + // If *dest is not a metricdata.Histogram, memory reuse is missed. In that + // case, use the zero-value h and hope for better alignment next cycle. + h, _ := (*dest).(metricdata.Histogram[N]) + h.Temporality = metricdata.CumulativeTemporality -func (s *cumulativeHistogram[N]) Aggregation() metricdata.Aggregation { s.valuesMu.Lock() defer s.valuesMu.Unlock() - if len(s.values) == 0 { - return nil - } - - t := now() // Do not allow modification of our copy of bounds. bounds := make([]float64, len(s.bounds)) copy(bounds, s.bounds) - h := metricdata.Histogram[N]{ - Temporality: metricdata.CumulativeTemporality, - DataPoints: make([]metricdata.HistogramDataPoint[N], 0, len(s.values)), - } + + n := len(s.values) + hDPts := reset(h.DataPoints, n, n) + + var i int for a, b := range s.values { // The HistogramDataPoint field values returned need to be copies of // the buckets value as we will keep updating them. @@ -220,26 +203,30 @@ func (s *cumulativeHistogram[N]) Aggregation() metricdata.Aggregation { counts := make([]uint64, len(b.counts)) copy(counts, b.counts) - hdp := metricdata.HistogramDataPoint[N]{ - Attributes: a, - StartTime: s.start, - Time: t, - Count: b.count, - Bounds: bounds, - BucketCounts: counts, - } + hDPts[i].Attributes = a + hDPts[i].StartTime = s.start + hDPts[i].Time = t + hDPts[i].Count = b.count + hDPts[i].Bounds = bounds + hDPts[i].BucketCounts = counts + if !s.noSum { - hdp.Sum = b.total + hDPts[i].Sum = b.total } + if !s.noMinMax { - hdp.Min = metricdata.NewExtrema(b.min) - hdp.Max = metricdata.NewExtrema(b.max) + hDPts[i].Min = metricdata.NewExtrema(b.min) + hDPts[i].Max = metricdata.NewExtrema(b.max) } - h.DataPoints = append(h.DataPoints, hdp) + i++ // TODO (#3006): This will use an unbounded amount of memory if there // are unbounded number of attribute sets being aggregated. Attribute // sets that become "stale" need to be forgotten so this will not // overload the system. } - return h + + h.DataPoints = hDPts + *dest = h + + return n } diff --git a/sdk/metric/internal/aggregate/histogram_test.go b/sdk/metric/internal/aggregate/histogram_test.go index 8c75562198d..68a00f2a90f 100644 --- a/sdk/metric/internal/aggregate/histogram_test.go +++ b/sdk/metric/internal/aggregate/histogram_test.go @@ -15,6 +15,7 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" import ( + "context" "sort" "testing" @@ -37,74 +38,155 @@ var ( func TestHistogram(t *testing.T) { t.Cleanup(mockTime(now)) - t.Run("Int64", testHistogram[int64]) - t.Run("Float64", testHistogram[float64]) -} -func testHistogram[N int64 | float64](t *testing.T) { - tester := &aggregatorTester[N]{ - GoroutineN: defaultGoroutines, - MeasurementN: defaultMeasurements, - CycleN: defaultCycles, - } + t.Run("Int64/Delta/Sum", testDeltaHist[int64](conf[int64]{hPt: hPointSummed[int64]})) + t.Run("Int64/Delta/NoSum", testDeltaHist[int64](conf[int64]{noSum: true, hPt: hPoint[int64]})) + t.Run("Float64/Delta/Sum", testDeltaHist[float64](conf[float64]{hPt: hPointSummed[float64]})) + t.Run("Float64/Delta/NoSum", testDeltaHist[float64](conf[float64]{noSum: true, hPt: hPoint[float64]})) - incr := monoIncr[N]() - eFunc := deltaHistSummedExpecter[N](incr) - t.Run("Delta/Summed", tester.Run(newDeltaHistogram[N](histConf, false), incr, eFunc)) - eFunc = deltaHistExpecter[N](incr) - t.Run("Delta/NoSum", tester.Run(newDeltaHistogram[N](histConf, true), incr, eFunc)) - eFunc = cumuHistSummedExpecter[N](incr) - t.Run("Cumulative/Summed", tester.Run(newCumulativeHistogram[N](histConf, false), incr, eFunc)) - eFunc = cumuHistExpecter[N](incr) - t.Run("Cumulative/NoSum", tester.Run(newCumulativeHistogram[N](histConf, true), incr, eFunc)) + t.Run("Int64/Cumulative/Sum", testCumulativeHist[int64](conf[int64]{hPt: hPointSummed[int64]})) + t.Run("Int64/Cumulative/NoSum", testCumulativeHist[int64](conf[int64]{noSum: true, hPt: hPoint[int64]})) + t.Run("Float64/Cumulative/Sum", testCumulativeHist[float64](conf[float64]{hPt: hPointSummed[float64]})) + t.Run("Float64/Cumulative/NoSum", testCumulativeHist[float64](conf[float64]{noSum: true, hPt: hPoint[float64]})) } -func deltaHistSummedExpecter[N int64 | float64](incr setMap[N]) expectFunc { - h := metricdata.Histogram[N]{Temporality: metricdata.DeltaTemporality} - return func(m int) metricdata.Aggregation { - h.DataPoints = make([]metricdata.HistogramDataPoint[N], 0, len(incr)) - for a, v := range incr { - h.DataPoints = append(h.DataPoints, hPointSummed[N](a, v, uint64(m))) - } - return h - } +type conf[N int64 | float64] struct { + noSum bool + hPt func(attribute.Set, N, uint64) metricdata.HistogramDataPoint[N] } -func deltaHistExpecter[N int64 | float64](incr setMap[N]) expectFunc { - h := metricdata.Histogram[N]{Temporality: metricdata.DeltaTemporality} - return func(m int) metricdata.Aggregation { - h.DataPoints = make([]metricdata.HistogramDataPoint[N], 0, len(incr)) - for a, v := range incr { - h.DataPoints = append(h.DataPoints, hPoint[N](a, v, uint64(m))) - } - return h - } -} - -func cumuHistSummedExpecter[N int64 | float64](incr setMap[N]) expectFunc { - var cycle int - h := metricdata.Histogram[N]{Temporality: metricdata.CumulativeTemporality} - return func(m int) metricdata.Aggregation { - cycle++ - h.DataPoints = make([]metricdata.HistogramDataPoint[N], 0, len(incr)) - for a, v := range incr { - h.DataPoints = append(h.DataPoints, hPointSummed[N](a, v, uint64(cycle*m))) - } - return h - } +func testDeltaHist[N int64 | float64](c conf[N]) func(t *testing.T) { + in, out := Builder[N]{ + Temporality: metricdata.DeltaTemporality, + Filter: attrFltr, + }.ExplicitBucketHistogram(histConf, c.noSum) + ctx := context.Background() + return test[N](in, out, []teststep[N]{ + { + input: []arg[N]{}, + expect: output{ + n: 0, + agg: metricdata.Histogram[N]{ + Temporality: metricdata.DeltaTemporality, + DataPoints: []metricdata.HistogramDataPoint[N]{}, + }, + }, + }, + { + input: []arg[N]{ + {ctx, 2, alice}, + {ctx, 10, bob}, + {ctx, 2, alice}, + {ctx, 2, alice}, + {ctx, 10, bob}, + }, + expect: output{ + n: 2, + agg: metricdata.Histogram[N]{ + Temporality: metricdata.DeltaTemporality, + DataPoints: []metricdata.HistogramDataPoint[N]{ + c.hPt(fltrAlice, 2, 3), + c.hPt(fltrBob, 10, 2), + }, + }, + }, + }, + { + input: []arg[N]{ + {ctx, 10, alice}, + {ctx, 3, bob}, + }, + expect: output{ + n: 2, + agg: metricdata.Histogram[N]{ + Temporality: metricdata.DeltaTemporality, + DataPoints: []metricdata.HistogramDataPoint[N]{ + c.hPt(fltrAlice, 10, 1), + c.hPt(fltrBob, 3, 1), + }, + }, + }, + }, + { + input: []arg[N]{}, + // Delta histograms are expected to reset. + expect: output{ + n: 0, + agg: metricdata.Histogram[N]{ + Temporality: metricdata.DeltaTemporality, + DataPoints: []metricdata.HistogramDataPoint[N]{}, + }, + }, + }, + }) } -func cumuHistExpecter[N int64 | float64](incr setMap[N]) expectFunc { - var cycle int - h := metricdata.Histogram[N]{Temporality: metricdata.CumulativeTemporality} - return func(m int) metricdata.Aggregation { - cycle++ - h.DataPoints = make([]metricdata.HistogramDataPoint[N], 0, len(incr)) - for a, v := range incr { - h.DataPoints = append(h.DataPoints, hPoint[N](a, v, uint64(cycle*m))) - } - return h - } +func testCumulativeHist[N int64 | float64](c conf[N]) func(t *testing.T) { + in, out := Builder[N]{ + Temporality: metricdata.CumulativeTemporality, + Filter: attrFltr, + }.ExplicitBucketHistogram(histConf, c.noSum) + ctx := context.Background() + return test[N](in, out, []teststep[N]{ + { + input: []arg[N]{}, + expect: output{ + n: 0, + agg: metricdata.Histogram[N]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint[N]{}, + }, + }, + }, + { + input: []arg[N]{ + {ctx, 2, alice}, + {ctx, 10, bob}, + {ctx, 2, alice}, + {ctx, 2, alice}, + {ctx, 10, bob}, + }, + expect: output{ + n: 2, + agg: metricdata.Histogram[N]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint[N]{ + c.hPt(fltrAlice, 2, 3), + c.hPt(fltrBob, 10, 2), + }, + }, + }, + }, + { + input: []arg[N]{ + {ctx, 2, alice}, + {ctx, 10, bob}, + }, + expect: output{ + n: 2, + agg: metricdata.Histogram[N]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint[N]{ + c.hPt(fltrAlice, 2, 4), + c.hPt(fltrBob, 10, 3), + }, + }, + }, + }, + { + input: []arg[N]{}, + expect: output{ + n: 2, + agg: metricdata.Histogram[N]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint[N]{ + c.hPt(fltrAlice, 2, 4), + c.hPt(fltrBob, 10, 3), + }, + }, + }, + }, + }) } // hPointSummed returns an HistogramDataPoint that started and ended now with @@ -190,93 +272,89 @@ func testBucketsSum[N int64 | float64]() func(t *testing.T) { } } -func testHistImmutableBounds[N int64 | float64](newA func(aggregation.ExplicitBucketHistogram, bool) aggregator[N], getBounds func(aggregator[N]) []float64) func(t *testing.T) { +func TestHistogramImmutableBounds(t *testing.T) { b := []float64{0, 1, 2} cpB := make([]float64, len(b)) copy(cpB, b) - a := newA(aggregation.ExplicitBucketHistogram{Boundaries: b}, false) - return func(t *testing.T) { - require.Equal(t, cpB, getBounds(a)) + h := newHistogram[int64](aggregation.ExplicitBucketHistogram{Boundaries: b}, false) + require.Equal(t, cpB, h.bounds) - b[0] = 10 - assert.Equal(t, cpB, getBounds(a), "modifying the bounds argument should not change the bounds") + b[0] = 10 + assert.Equal(t, cpB, h.bounds, "modifying the bounds argument should not change the bounds") - a.Aggregate(5, alice) - hdp := a.Aggregation().(metricdata.Histogram[N]).DataPoints[0] - hdp.Bounds[1] = 10 - assert.Equal(t, cpB, getBounds(a), "modifying the Aggregation bounds should not change the bounds") - } -} - -func TestHistogramImmutableBounds(t *testing.T) { - t.Run("Delta", testHistImmutableBounds( - newDeltaHistogram[int64], - func(a aggregator[int64]) []float64 { - deltaH := a.(*deltaHistogram[int64]) - return deltaH.bounds - }, - )) + h.measure(context.Background(), 5, alice) - t.Run("Cumulative", testHistImmutableBounds( - newCumulativeHistogram[int64], - func(a aggregator[int64]) []float64 { - cumuH := a.(*cumulativeHistogram[int64]) - return cumuH.bounds - }, - )) + var data metricdata.Aggregation = metricdata.Histogram[int64]{} + h.cumulative(&data) + hdp := data.(metricdata.Histogram[int64]).DataPoints[0] + hdp.Bounds[1] = 10 + assert.Equal(t, cpB, h.bounds, "modifying the Aggregation bounds should not change the bounds") } func TestCumulativeHistogramImutableCounts(t *testing.T) { - a := newCumulativeHistogram[int64](histConf, false) - a.Aggregate(5, alice) - hdp := a.Aggregation().(metricdata.Histogram[int64]).DataPoints[0] + h := newHistogram[int64](histConf, false) + h.measure(context.Background(), 5, alice) + + var data metricdata.Aggregation = metricdata.Histogram[int64]{} + h.cumulative(&data) + hdp := data.(metricdata.Histogram[int64]).DataPoints[0] - cumuH := a.(*cumulativeHistogram[int64]) - require.Equal(t, hdp.BucketCounts, cumuH.values[alice].counts) + require.Equal(t, hdp.BucketCounts, h.values[alice].counts) cpCounts := make([]uint64, len(hdp.BucketCounts)) copy(cpCounts, hdp.BucketCounts) hdp.BucketCounts[0] = 10 - assert.Equal(t, cpCounts, cumuH.values[alice].counts, "modifying the Aggregator bucket counts should not change the Aggregator") + assert.Equal(t, cpCounts, h.values[alice].counts, "modifying the Aggregator bucket counts should not change the Aggregator") } func TestDeltaHistogramReset(t *testing.T) { t.Cleanup(mockTime(now)) - a := newDeltaHistogram[int64](histConf, false) - assert.Nil(t, a.Aggregation()) + h := newHistogram[int64](histConf, false) + + var data metricdata.Aggregation = metricdata.Histogram[int64]{} + require.Equal(t, 0, h.delta(&data)) + require.Len(t, data.(metricdata.Histogram[int64]).DataPoints, 0) + + h.measure(context.Background(), 1, alice) - a.Aggregate(1, alice) expect := metricdata.Histogram[int64]{Temporality: metricdata.DeltaTemporality} expect.DataPoints = []metricdata.HistogramDataPoint[int64]{hPointSummed[int64](alice, 1, 1)} - metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation()) + h.delta(&data) + metricdatatest.AssertAggregationsEqual(t, expect, data) // The attr set should be forgotten once Aggregations is called. expect.DataPoints = nil - assert.Nil(t, a.Aggregation()) + assert.Equal(t, 0, h.delta(&data)) + assert.Len(t, data.(metricdata.Histogram[int64]).DataPoints, 0) // Aggregating another set should not affect the original (alice). - a.Aggregate(1, bob) + h.measure(context.Background(), 1, bob) expect.DataPoints = []metricdata.HistogramDataPoint[int64]{hPointSummed[int64](bob, 1, 1)} - metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation()) -} - -func TestEmptyHistogramNilAggregation(t *testing.T) { - assert.Nil(t, newCumulativeHistogram[int64](histConf, false).Aggregation()) - assert.Nil(t, newCumulativeHistogram[float64](histConf, false).Aggregation()) - assert.Nil(t, newDeltaHistogram[int64](histConf, false).Aggregation()) - assert.Nil(t, newDeltaHistogram[float64](histConf, false).Aggregation()) + h.delta(&data) + metricdatatest.AssertAggregationsEqual(t, expect, data) } func BenchmarkHistogram(b *testing.B) { - b.Run("Int64", benchmarkHistogram[int64]) - b.Run("Float64", benchmarkHistogram[float64]) -} - -func benchmarkHistogram[N int64 | float64](b *testing.B) { - factory := func() aggregator[N] { return newDeltaHistogram[N](histConf, false) } - b.Run("Delta", benchmarkAggregator(factory)) - factory = func() aggregator[N] { return newCumulativeHistogram[N](histConf, false) } - b.Run("Cumulative", benchmarkAggregator(factory)) + b.Run("Int64/Cumulative", benchmarkAggregate(func() (Measure[int64], ComputeAggregation) { + return Builder[int64]{ + Temporality: metricdata.CumulativeTemporality, + }.ExplicitBucketHistogram(histConf, false) + })) + b.Run("Int64/Delta", benchmarkAggregate(func() (Measure[int64], ComputeAggregation) { + return Builder[int64]{ + Temporality: metricdata.DeltaTemporality, + }.ExplicitBucketHistogram(histConf, false) + })) + b.Run("Float64/Cumulative", benchmarkAggregate(func() (Measure[float64], ComputeAggregation) { + return Builder[float64]{ + Temporality: metricdata.CumulativeTemporality, + }.ExplicitBucketHistogram(histConf, false) + })) + b.Run("Float64/Delta", benchmarkAggregate(func() (Measure[float64], ComputeAggregation) { + return Builder[float64]{ + Temporality: metricdata.DeltaTemporality, + }.ExplicitBucketHistogram(histConf, false) + })) } diff --git a/sdk/metric/periodic_reader.go b/sdk/metric/periodic_reader.go index e04c1edbe18..f62a2ae41e3 100644 --- a/sdk/metric/periodic_reader.go +++ b/sdk/metric/periodic_reader.go @@ -68,7 +68,9 @@ func (o periodicReaderOptionFunc) applyPeriodic(conf periodicReaderConfig) perio // WithTimeout configures the time a PeriodicReader waits for an export to // complete before canceling it. This includes an export which occurs as part -// of Shutdown. +// of Shutdown or ForceFlush if the user passed context does not have a +// deadline. If the user passed context does have a deadline, it will be used +// instead. // // This option overrides any value set for the // OTEL_METRIC_EXPORT_TIMEOUT environment variable. @@ -298,6 +300,13 @@ func (r *PeriodicReader) export(ctx context.Context, m *metricdata.ResourceMetri // // This method is safe to call concurrently. func (r *PeriodicReader) ForceFlush(ctx context.Context) error { + // Prioritize the ctx timeout if it is set. + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, r.timeout) + defer cancel() + } + errCh := make(chan error, 1) select { case r.flushCh <- errCh: @@ -324,8 +333,13 @@ func (r *PeriodicReader) ForceFlush(ctx context.Context) error { func (r *PeriodicReader) Shutdown(ctx context.Context) error { err := ErrReaderShutdown r.shutdownOnce.Do(func() { - ctx, cancel := context.WithTimeout(ctx, r.timeout) - defer cancel() + // Prioritize the ctx timeout if it is set. + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, r.timeout) + defer cancel() + } + // Stop the run loop. r.cancel() <-r.done diff --git a/sdk/metric/reader.go b/sdk/metric/reader.go index a281104bd08..da68ef33686 100644 --- a/sdk/metric/reader.go +++ b/sdk/metric/reader.go @@ -65,9 +65,15 @@ type Reader interface { RegisterProducer(Producer) // temporality reports the Temporality for the instrument kind provided. + // + // This method needs to be concurrent safe with itself and all the other + // Reader methods. temporality(InstrumentKind) metricdata.Temporality // aggregation returns what Aggregation to use for an instrument kind. + // + // This method needs to be concurrent safe with itself and all the other + // Reader methods. aggregation(InstrumentKind) aggregation.Aggregation // nolint:revive // import-shadow for method scoped by type. // Collect gathers and returns all metric data related to the Reader from diff --git a/sdk/metric/reader_test.go b/sdk/metric/reader_test.go index d375a1b4f6c..8904d68ee40 100644 --- a/sdk/metric/reader_test.go +++ b/sdk/metric/reader_test.go @@ -168,6 +168,18 @@ func (ts *readerTestSuite) TestMethodConcurrentSafe() { var wg sync.WaitGroup const threads = 2 for i := 0; i < threads; i++ { + wg.Add(1) + go func() { + defer wg.Done() + _ = ts.Reader.temporality(InstrumentKindCounter) + }() + + wg.Add(1) + go func() { + defer wg.Done() + _ = ts.Reader.aggregation(InstrumentKindCounter) + }() + wg.Add(1) go func() { defer wg.Done()