Skip to content

Commit 77d109e

Browse files
committed
Merge branch 'main' into ft/exemplars
2 parents ecd03bc + d0fb920 commit 77d109e

File tree

5 files changed

+212
-152
lines changed

5 files changed

+212
-152
lines changed

CHANGELOG.md

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
77

88
## Unreleased
99

10-
- sdk: Implementation of exemplars
11-
([#4094](https://github.com/open-telemetry/opentelemetry-python/pull/4094))
10+
- Include metric info in encoding exceptions
11+
([#4154](https://github.com/open-telemetry/opentelemetry-python/pull/4154))
1212
- sdk: Add support for log formatting
1313
([#4137](https://github.com/open-telemetry/opentelemetry-python/pull/4166))
14-
14+
- sdk: Implementation of exemplars
15+
([#4094](https://github.com/open-telemetry/opentelemetry-python/pull/4094))
16+
1517
## Version 1.27.0/0.48b0 (2024-08-28)
1618

1719
- Implementation of Events API

CONTRIBUTING.md

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -224,6 +224,23 @@ updating the GitHub workflow to reference a PR in the Contrib repo
224224
* Trivial change (typo, cosmetic, doc, etc.) doesn't have to wait for one day.
225225
* Urgent fix can take exception as long as it has been actively communicated.
226226

227+
#### Allow edits from maintainers
228+
229+
Something _very important_ is to allow edits from maintainers when opening a PR. This will
230+
allow maintainers to rebase your PR against `main` which is necessary in order to merge
231+
your PR. You could do it yourself too, but keep in mind that every time another PR gets
232+
merged, your PR will require rebasing. Since only maintainers can merge your PR it is
233+
almost impossible for maintainers to find your PR just when it has been rebased by you so
234+
that it can be merged. Allowing maintainers to edit your PR also allows them to help you
235+
get your PR merged by making any minor fixes to solve any issue that while being unrelated
236+
to your PR, can still happen.
237+
238+
#### Fork from a personal Github account
239+
240+
Right now Github [does not allow](https://github.com/orgs/community/discussions/5634) PRs
241+
to be edited by maintainers if the corresponding repo fork exists in a Github organization.
242+
Please for this repo in a personal Github account instead.
243+
227244
One of the maintainers will merge the PR once it is **ready to merge**.
228245

229246
## Design Choices

exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py

Lines changed: 153 additions & 147 deletions
Original file line numberDiff line numberDiff line change
@@ -175,156 +175,26 @@ def _get_aggregation(
175175
return instrument_class_aggregation
176176

177177

178-
def encode_metrics(data: MetricsData) -> ExportMetricsServiceRequest:
179-
resource_metrics_dict = {}
180-
181-
for resource_metrics in data.resource_metrics:
182-
183-
resource = resource_metrics.resource
184-
185-
# It is safe to assume that each entry in data.resource_metrics is
186-
# associated with an unique resource.
187-
scope_metrics_dict = {}
188-
189-
resource_metrics_dict[resource] = scope_metrics_dict
190-
191-
for scope_metrics in resource_metrics.scope_metrics:
178+
class EncodingException(Exception):
179+
"""
180+
Raised by encode_metrics() when an exception is caught during encoding. Contains the problematic metric so
181+
the misbehaving metric name and details can be logged during exception handling.
182+
"""
192183

193-
instrumentation_scope = scope_metrics.scope
184+
def __init__(self, original_exception, metric):
185+
super().__init__()
186+
self.original_exception = original_exception
187+
self.metric = metric
194188

195-
# The SDK groups metrics in instrumentation scopes already so
196-
# there is no need to check for existing instrumentation scopes
197-
# here.
198-
pb2_scope_metrics = pb2.ScopeMetrics(
199-
scope=InstrumentationScope(
200-
name=instrumentation_scope.name,
201-
version=instrumentation_scope.version,
202-
)
203-
)
189+
def __str__(self):
190+
return f"{self.metric}\n{self.original_exception}"
204191

205-
scope_metrics_dict[instrumentation_scope] = pb2_scope_metrics
206192

207-
for metric in scope_metrics.metrics:
208-
pb2_metric = pb2.Metric(
209-
name=metric.name,
210-
description=metric.description,
211-
unit=metric.unit,
212-
)
193+
def encode_metrics(data: MetricsData) -> ExportMetricsServiceRequest:
194+
resource_metrics_dict = {}
213195

214-
if isinstance(metric.data, Gauge):
215-
for data_point in metric.data.data_points:
216-
pt = pb2.NumberDataPoint(
217-
attributes=_encode_attributes(
218-
data_point.attributes
219-
),
220-
time_unix_nano=data_point.time_unix_nano,
221-
exemplars=encode_exemplars(data_point.exemplars),
222-
)
223-
if isinstance(data_point.value, int):
224-
pt.as_int = data_point.value
225-
else:
226-
pt.as_double = data_point.value
227-
pb2_metric.gauge.data_points.append(pt)
228-
229-
elif isinstance(metric.data, HistogramType):
230-
for data_point in metric.data.data_points:
231-
pt = pb2.HistogramDataPoint(
232-
attributes=_encode_attributes(
233-
data_point.attributes
234-
),
235-
time_unix_nano=data_point.time_unix_nano,
236-
start_time_unix_nano=(
237-
data_point.start_time_unix_nano
238-
),
239-
exemplars=encode_exemplars(data_point.exemplars),
240-
count=data_point.count,
241-
sum=data_point.sum,
242-
bucket_counts=data_point.bucket_counts,
243-
explicit_bounds=data_point.explicit_bounds,
244-
max=data_point.max,
245-
min=data_point.min,
246-
)
247-
pb2_metric.histogram.aggregation_temporality = (
248-
metric.data.aggregation_temporality
249-
)
250-
pb2_metric.histogram.data_points.append(pt)
251-
252-
elif isinstance(metric.data, Sum):
253-
for data_point in metric.data.data_points:
254-
pt = pb2.NumberDataPoint(
255-
attributes=_encode_attributes(
256-
data_point.attributes
257-
),
258-
start_time_unix_nano=(
259-
data_point.start_time_unix_nano
260-
),
261-
time_unix_nano=data_point.time_unix_nano,
262-
exemplars=encode_exemplars(data_point.exemplars),
263-
)
264-
if isinstance(data_point.value, int):
265-
pt.as_int = data_point.value
266-
else:
267-
pt.as_double = data_point.value
268-
# note that because sum is a message type, the
269-
# fields must be set individually rather than
270-
# instantiating a pb2.Sum and setting it once
271-
pb2_metric.sum.aggregation_temporality = (
272-
metric.data.aggregation_temporality
273-
)
274-
pb2_metric.sum.is_monotonic = metric.data.is_monotonic
275-
pb2_metric.sum.data_points.append(pt)
276-
277-
elif isinstance(metric.data, ExponentialHistogramType):
278-
for data_point in metric.data.data_points:
279-
280-
if data_point.positive.bucket_counts:
281-
positive = pb2.ExponentialHistogramDataPoint.Buckets(
282-
offset=data_point.positive.offset,
283-
bucket_counts=data_point.positive.bucket_counts,
284-
)
285-
else:
286-
positive = None
287-
288-
if data_point.negative.bucket_counts:
289-
negative = pb2.ExponentialHistogramDataPoint.Buckets(
290-
offset=data_point.negative.offset,
291-
bucket_counts=data_point.negative.bucket_counts,
292-
)
293-
else:
294-
negative = None
295-
296-
pt = pb2.ExponentialHistogramDataPoint(
297-
attributes=_encode_attributes(
298-
data_point.attributes
299-
),
300-
time_unix_nano=data_point.time_unix_nano,
301-
start_time_unix_nano=(
302-
data_point.start_time_unix_nano
303-
),
304-
exemplars=encode_exemplars(data_point.exemplars),
305-
count=data_point.count,
306-
sum=data_point.sum,
307-
scale=data_point.scale,
308-
zero_count=data_point.zero_count,
309-
positive=positive,
310-
negative=negative,
311-
flags=data_point.flags,
312-
max=data_point.max,
313-
min=data_point.min,
314-
)
315-
pb2_metric.exponential_histogram.aggregation_temporality = (
316-
metric.data.aggregation_temporality
317-
)
318-
pb2_metric.exponential_histogram.data_points.append(pt)
319-
320-
else:
321-
_logger.warning(
322-
"unsupported data type %s",
323-
metric.data.__class__.__name__,
324-
)
325-
continue
326-
327-
pb2_scope_metrics.metrics.append(pb2_metric)
196+
for resource_metrics in data.resource_metrics:
197+
_encode_resource_metrics(resource_metrics, resource_metrics_dict)
328198

329199
resource_data = []
330200
for (
@@ -340,8 +210,144 @@ def encode_metrics(data: MetricsData) -> ExportMetricsServiceRequest:
340210
schema_url=sdk_resource.schema_url,
341211
)
342212
)
343-
resource_metrics = resource_data
344-
return ExportMetricsServiceRequest(resource_metrics=resource_metrics)
213+
return ExportMetricsServiceRequest(resource_metrics=resource_data)
214+
215+
216+
def _encode_resource_metrics(resource_metrics, resource_metrics_dict):
217+
resource = resource_metrics.resource
218+
# It is safe to assume that each entry in data.resource_metrics is
219+
# associated with an unique resource.
220+
scope_metrics_dict = {}
221+
resource_metrics_dict[resource] = scope_metrics_dict
222+
for scope_metrics in resource_metrics.scope_metrics:
223+
instrumentation_scope = scope_metrics.scope
224+
225+
# The SDK groups metrics in instrumentation scopes already so
226+
# there is no need to check for existing instrumentation scopes
227+
# here.
228+
pb2_scope_metrics = pb2.ScopeMetrics(
229+
scope=InstrumentationScope(
230+
name=instrumentation_scope.name,
231+
version=instrumentation_scope.version,
232+
)
233+
)
234+
235+
scope_metrics_dict[instrumentation_scope] = pb2_scope_metrics
236+
237+
for metric in scope_metrics.metrics:
238+
pb2_metric = pb2.Metric(
239+
name=metric.name,
240+
description=metric.description,
241+
unit=metric.unit,
242+
)
243+
244+
try:
245+
_encode_metric(metric, pb2_metric)
246+
except Exception as ex:
247+
# `from None` so we don't get "During handling of the above exception, another exception occurred:"
248+
raise EncodingException(ex, metric) from None
249+
250+
pb2_scope_metrics.metrics.append(pb2_metric)
251+
252+
253+
def _encode_metric(metric, pb2_metric):
254+
if isinstance(metric.data, Gauge):
255+
for data_point in metric.data.data_points:
256+
pt = pb2.NumberDataPoint(
257+
attributes=_encode_attributes(data_point.attributes),
258+
time_unix_nano=data_point.time_unix_nano,
259+
exemplars=encode_exemplars(data_point.exemplars),
260+
)
261+
if isinstance(data_point.value, int):
262+
pt.as_int = data_point.value
263+
else:
264+
pt.as_double = data_point.value
265+
pb2_metric.gauge.data_points.append(pt)
266+
267+
elif isinstance(metric.data, HistogramType):
268+
for data_point in metric.data.data_points:
269+
pt = pb2.HistogramDataPoint(
270+
attributes=_encode_attributes(data_point.attributes),
271+
time_unix_nano=data_point.time_unix_nano,
272+
start_time_unix_nano=data_point.start_time_unix_nano,
273+
exemplars=encode_exemplars(data_point.exemplars),
274+
count=data_point.count,
275+
sum=data_point.sum,
276+
bucket_counts=data_point.bucket_counts,
277+
explicit_bounds=data_point.explicit_bounds,
278+
max=data_point.max,
279+
min=data_point.min,
280+
)
281+
pb2_metric.histogram.aggregation_temporality = (
282+
metric.data.aggregation_temporality
283+
)
284+
pb2_metric.histogram.data_points.append(pt)
285+
286+
elif isinstance(metric.data, Sum):
287+
for data_point in metric.data.data_points:
288+
pt = pb2.NumberDataPoint(
289+
attributes=_encode_attributes(data_point.attributes),
290+
start_time_unix_nano=data_point.start_time_unix_nano,
291+
time_unix_nano=data_point.time_unix_nano,
292+
exemplars=encode_exemplars(data_point.exemplars),
293+
)
294+
if isinstance(data_point.value, int):
295+
pt.as_int = data_point.value
296+
else:
297+
pt.as_double = data_point.value
298+
# note that because sum is a message type, the
299+
# fields must be set individually rather than
300+
# instantiating a pb2.Sum and setting it once
301+
pb2_metric.sum.aggregation_temporality = (
302+
metric.data.aggregation_temporality
303+
)
304+
pb2_metric.sum.is_monotonic = metric.data.is_monotonic
305+
pb2_metric.sum.data_points.append(pt)
306+
307+
elif isinstance(metric.data, ExponentialHistogramType):
308+
for data_point in metric.data.data_points:
309+
310+
if data_point.positive.bucket_counts:
311+
positive = pb2.ExponentialHistogramDataPoint.Buckets(
312+
offset=data_point.positive.offset,
313+
bucket_counts=data_point.positive.bucket_counts,
314+
)
315+
else:
316+
positive = None
317+
318+
if data_point.negative.bucket_counts:
319+
negative = pb2.ExponentialHistogramDataPoint.Buckets(
320+
offset=data_point.negative.offset,
321+
bucket_counts=data_point.negative.bucket_counts,
322+
)
323+
else:
324+
negative = None
325+
326+
pt = pb2.ExponentialHistogramDataPoint(
327+
attributes=_encode_attributes(data_point.attributes),
328+
time_unix_nano=data_point.time_unix_nano,
329+
start_time_unix_nano=data_point.start_time_unix_nano,
330+
exemplars=encode_exemplars(data_point.exemplars),
331+
count=data_point.count,
332+
sum=data_point.sum,
333+
scale=data_point.scale,
334+
zero_count=data_point.zero_count,
335+
positive=positive,
336+
negative=negative,
337+
flags=data_point.flags,
338+
max=data_point.max,
339+
min=data_point.min,
340+
)
341+
pb2_metric.exponential_histogram.aggregation_temporality = (
342+
metric.data.aggregation_temporality
343+
)
344+
pb2_metric.exponential_histogram.data_points.append(pt)
345+
346+
else:
347+
_logger.warning(
348+
"unsupported data type %s",
349+
metric.data.__class__.__name__,
350+
)
345351

346352

347353
def encode_exemplars(sdk_exemplars: list) -> list:

0 commit comments

Comments
 (0)