From 6d4ae8718d4808603c4a8e5ed5eae72b0cc8fb3f Mon Sep 17 00:00:00 2001 From: Lalit Kumar Bhasin Date: Wed, 22 May 2024 08:31:11 -0700 Subject: [PATCH 01/37] Nit fix for review comments from #1798 (#1803) --- .../examples/basic-otlp-http/src/main.rs | 14 +++++++++----- opentelemetry-otlp/examples/basic-otlp/src/main.rs | 14 +++++++++----- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs b/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs index 8b3e27316a..34a294f6af 100644 --- a/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs @@ -89,13 +89,17 @@ async fn main() -> Result<(), Box> { // emitting. let logger_provider = init_logs().unwrap(); - // Create a new OpenTelemetryLogBridge using the above LoggerProvider. + // Create a new OpenTelemetryTracingBridge using the above LoggerProvider. let layer = OpenTelemetryTracingBridge::new(&logger_provider); - // add a tracing filter to filter the events generated from the crates used by opentelemetry-otlp - // Below filter level means: - // - Logs at `info` level and above are allowed by default. - // - Only `error` level logs from `hyper`, `tonic`, and `reqwest` crates are allowed. + // Add a tracing filter to filter events from crates used by opentelemetry-otlp. + // The filter levels are set as follows: + // - Allow `info` level and above by default. + // - Restrict `hyper`, `tonic`, and `reqwest` to `error` level logs only. + // This ensures events generated from these crates within the OTLP Exporter are not looped back, + // thus preventing infinite event generation. + // Note: This will also drop events from these crates used outside the OTLP Exporter. + // For more details, see: https://github.com/open-telemetry/opentelemetry-rust/issues/761 let filter = EnvFilter::new("info") .add_directive("hyper=error".parse().unwrap()) .add_directive("tonic=error".parse().unwrap()) diff --git a/opentelemetry-otlp/examples/basic-otlp/src/main.rs b/opentelemetry-otlp/examples/basic-otlp/src/main.rs index da14a2a89b..580828f4aa 100644 --- a/opentelemetry-otlp/examples/basic-otlp/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp/src/main.rs @@ -92,13 +92,17 @@ async fn main() -> Result<(), Box> { // Initialize logs and save the logger_provider. let logger_provider = init_logs().unwrap(); - // Create a new OpenTelemetryLogBridge using the above LoggerProvider. + // Create a new OpenTelemetryTracingBridge using the above LoggerProvider. let layer = OpenTelemetryTracingBridge::new(&logger_provider); - // add a tracing filter to filter the events generated from the crates used by opentelemetry-otlp - // Below filter level means: - // - Logs at `info` level and above are allowed by default. - // - Only `error` level logs from `hyper`, `tonic`, and `reqwest` crates are allowed. + // Add a tracing filter to filter events from crates used by opentelemetry-otlp. + // The filter levels are set as follows: + // - Allow `info` level and above by default. + // - Restrict `hyper`, `tonic`, and `reqwest` to `error` level logs only. + // This ensures events generated from these crates within the OTLP Exporter are not looped back, + // thus preventing infinite event generation. + // Note: This will also drop events from these crates used outside the OTLP Exporter. + // For more details, see: https://github.com/open-telemetry/opentelemetry-rust/issues/761 let filter = EnvFilter::new("info") .add_directive("hyper=error".parse().unwrap()) .add_directive("tonic=error".parse().unwrap()) From 6d3bea728208f9e00f3d3b5ce4111d60a501676f Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Wed, 22 May 2024 09:05:26 -0700 Subject: [PATCH 02/37] Add name to info logs in OTLP Example (#1802) --- opentelemetry-otlp/examples/basic-otlp/src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/opentelemetry-otlp/examples/basic-otlp/src/main.rs b/opentelemetry-otlp/examples/basic-otlp/src/main.rs index 580828f4aa..9932faa8cf 100644 --- a/opentelemetry-otlp/examples/basic-otlp/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp/src/main.rs @@ -142,7 +142,7 @@ async fn main() -> Result<(), Box> { ); span.set_attribute(KeyValue::new("another.key", "yes")); - info!(target: "my-target", "hello from {}. My price is {}. I am also inside a Span!", "banana", 2.99); + info!(name: "my-event-inside-span", target: "my-target", "hello from {}. My price is {}. I am also inside a Span!", "banana", 2.99); tracer.in_span("Sub operation...", |cx| { let span = cx.span(); @@ -151,7 +151,7 @@ async fn main() -> Result<(), Box> { }); }); - info!(target: "my-target", "hello from {}. My price is {}", "apple", 1.99); + info!(name: "my-event", target: "my-target", "hello from {}. My price is {}", "apple", 1.99); global::shutdown_tracer_provider(); logger_provider.shutdown()?; From f64bdd2e1c5ed9a30682e3d97b4efe48afd1c000 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Wed, 22 May 2024 11:54:59 -0700 Subject: [PATCH 03/37] Use better random generation in benchmark and stress tests (#1800) Co-authored-by: Zhongyang Wu --- .../benches/logs.rs | 4 +- opentelemetry-sdk/benches/metric_counter.rs | 51 +++++++++++++++---- stress/src/logs.rs | 8 +++ stress/src/metrics.rs | 28 +++++++--- stress/src/random.rs | 25 +++++++-- stress/src/traces.rs | 8 +++ 6 files changed, 100 insertions(+), 24 deletions(-) diff --git a/opentelemetry-appender-tracing/benches/logs.rs b/opentelemetry-appender-tracing/benches/logs.rs index b37f3de024..f41f99e180 100644 --- a/opentelemetry-appender-tracing/benches/logs.rs +++ b/opentelemetry-appender-tracing/benches/logs.rs @@ -1,7 +1,7 @@ /* The benchmark results: criterion = "0.5.1" - OS: Ubuntu 22.04.2 LTS (5.10.102.1-microsoft-standard-WSL2) + OS: Ubuntu 22.04.3 LTS (5.15.146.1-microsoft-standard-WSL2) Hardware: AMD EPYC 7763 64-Core Processor - 2.44 GHz, 16vCPUs, RAM: 64.0 GB | Test | Average time| @@ -10,7 +10,7 @@ | noop_layer_disabled | 12 ns | | noop_layer_enabled | 25 ns | | ot_layer_disabled | 19 ns | - | ot_layer_enabled | 561 ns | + | ot_layer_enabled | 588 ns | */ use async_trait::async_trait; diff --git a/opentelemetry-sdk/benches/metric_counter.rs b/opentelemetry-sdk/benches/metric_counter.rs index 67672e8469..dd566b3673 100644 --- a/opentelemetry-sdk/benches/metric_counter.rs +++ b/opentelemetry-sdk/benches/metric_counter.rs @@ -1,10 +1,33 @@ +/* + The benchmark results: + criterion = "0.5.1" + OS: Ubuntu 22.04.3 LTS (5.15.146.1-microsoft-standard-WSL2) + Hardware: AMD EPYC 7763 64-Core Processor - 2.44 GHz, 16vCPUs, + RAM: 64.0 GB + | Test | Average time| + |--------------------------------|-------------| + | Counter_Add_Sorted | 586 ns | + | Counter_Add_Unsorted | 592 ns | + | Random_Generator_5 | 377 ns | + | ThreadLocal_Random_Generator_5 | 37 ns | +*/ + use criterion::{criterion_group, criterion_main, Criterion}; use opentelemetry::{ metrics::{Counter, MeterProvider as _}, KeyValue, }; use opentelemetry_sdk::metrics::{ManualReader, SdkMeterProvider}; -use rand::{rngs::SmallRng, Rng, SeedableRng}; +use rand::{ + rngs::{self, SmallRng}, + Rng, SeedableRng, +}; +use std::cell::RefCell; + +thread_local! { + /// Store random number generator for each thread + static CURRENT_RNG: RefCell = RefCell::new(rngs::SmallRng::from_entropy()); +} // Run this benchmark with: // cargo bench --bench metric_counter --features=metrics @@ -30,12 +53,12 @@ fn counter_add(c: &mut Criterion) { let counter = create_counter(); c.bench_function("Counter_Add_Sorted", |b| { b.iter(|| { - let mut rng = SmallRng::from_entropy(); // 4*4*10*10 = 1600 time series. - let index_first_attribute = rng.gen_range(0..4); - let index_second_attribute = rng.gen_range(0..4); - let index_third_attribute = rng.gen_range(0..10); - let index_forth_attribute = rng.gen_range(0..10); + let rands = CURRENT_RNG.with_borrow_mut(|rng| [rng.gen_range(0..4), rng.gen_range(0..4), rng.gen_range(0..10), rng.gen_range(0..10)]); + let index_first_attribute = rands[0]; + let index_second_attribute = rands[1]; + let index_third_attribute = rands[2]; + let index_forth_attribute = rands[3]; counter.add( 1, &[ @@ -50,12 +73,12 @@ fn counter_add(c: &mut Criterion) { c.bench_function("Counter_Add_Unsorted", |b| { b.iter(|| { - let mut rng = SmallRng::from_entropy(); // 4*4*10*10 = 1600 time series. - let index_first_attribute = rng.gen_range(0..4); - let index_second_attribute = rng.gen_range(0..4); - let index_third_attribute = rng.gen_range(0..10); - let index_forth_attribute = rng.gen_range(0..10); + let rands = CURRENT_RNG.with_borrow_mut(|rng| [rng.gen_range(0..4), rng.gen_range(0..4), rng.gen_range(0..10), rng.gen_range(0..10)]); + let index_first_attribute = rands[0]; + let index_second_attribute = rands[1]; + let index_third_attribute = rands[2]; + let index_forth_attribute = rands[3]; counter.add( 1, &[ @@ -78,6 +101,12 @@ fn counter_add(c: &mut Criterion) { let _i5 = rng.gen_range(0..10); }); }); + + c.bench_function("ThreadLocal_Random_Generator_5", |b| { + b.iter(|| { + let _i1 = CURRENT_RNG.with_borrow_mut(|rng| [rng.gen_range(0..4),rng.gen_range(0..4),rng.gen_range(0..10), rng.gen_range(0..10), rng.gen_range(0..10)]); + }); + }); } criterion_group!(benches, criterion_benchmark); diff --git a/stress/src/logs.rs b/stress/src/logs.rs index fdc08e9dae..a70e81014d 100644 --- a/stress/src/logs.rs +++ b/stress/src/logs.rs @@ -1,3 +1,11 @@ +/* + Stress test results: + OS: Ubuntu 22.04.3 LTS (5.15.146.1-microsoft-standard-WSL2) + Hardware: AMD EPYC 7763 64-Core Processor - 2.44 GHz, 16vCPUs, + RAM: 64.0 GB + 27 M/sec +*/ + use opentelemetry_appender_tracing::layer; use opentelemetry_sdk::logs::{LogProcessor, LoggerProvider}; use tracing::error; diff --git a/stress/src/metrics.rs b/stress/src/metrics.rs index 9f7d2d2349..265456fd17 100644 --- a/stress/src/metrics.rs +++ b/stress/src/metrics.rs @@ -1,11 +1,22 @@ +/* + Stress test results: + OS: Ubuntu 22.04.3 LTS (5.15.146.1-microsoft-standard-WSL2) + Hardware: AMD EPYC 7763 64-Core Processor - 2.44 GHz, 16vCPUs, + RAM: 64.0 GB + 3M /sec +*/ + use lazy_static::lazy_static; use opentelemetry::{ metrics::{Counter, MeterProvider as _}, KeyValue, }; use opentelemetry_sdk::metrics::{ManualReader, SdkMeterProvider}; -use rand::{rngs::SmallRng, Rng, SeedableRng}; -use std::borrow::Cow; +use rand::{ + rngs::{self}, + Rng, SeedableRng, +}; +use std::{borrow::Cow, cell::RefCell}; mod throughput; @@ -23,16 +34,21 @@ lazy_static! { .init(); } +thread_local! { + /// Store random number generator for each thread + static CURRENT_RNG: RefCell = RefCell::new(rngs::SmallRng::from_entropy()); +} + fn main() { throughput::test_throughput(test_counter); } fn test_counter() { - let mut rng = SmallRng::from_entropy(); let len = ATTRIBUTE_VALUES.len(); - let index_first_attribute = rng.gen_range(0..len); - let index_second_attribute = rng.gen_range(0..len); - let index_third_attribute = rng.gen_range(0..len); + let rands = CURRENT_RNG.with_borrow_mut(|rng| [rng.gen_range(0..len), rng.gen_range(0..len), rng.gen_range(0..len)]); + let index_first_attribute = rands[0]; + let index_second_attribute = rands[1]; + let index_third_attribute = rands[2]; // each attribute has 10 possible values, so there are 1000 possible combinations (time-series) COUNTER.add( diff --git a/stress/src/random.rs b/stress/src/random.rs index 5cefe5af80..b1e57fe0e7 100644 --- a/stress/src/random.rs +++ b/stress/src/random.rs @@ -1,14 +1,29 @@ -use rand::{rngs::SmallRng, Rng, SeedableRng}; +/* + Stress test results: + OS: Ubuntu 22.04.3 LTS (5.15.146.1-microsoft-standard-WSL2) + Hardware: AMD EPYC 7763 64-Core Processor - 2.44 GHz, 16vCPUs, + RAM: 64.0 GB + 1.25 B/sec +*/ + +use rand::{ + rngs::{self}, + Rng, SeedableRng, +}; mod throughput; +use std::cell::RefCell; + +thread_local! { + /// Store random number generator for each thread + static CURRENT_RNG: RefCell = RefCell::new(rngs::SmallRng::from_entropy()); +} + fn main() { throughput::test_throughput(test_random_generation); } fn test_random_generation() { - let mut rng = SmallRng::from_entropy(); - let _index_first_attribute = rng.gen_range(0..10); - let _index_second_attribute = rng.gen_range(0..10); - let _index_third_attribute = rng.gen_range(0..10); + let _i1 = CURRENT_RNG.with_borrow_mut(|rng| [rng.gen_range(0..10), rng.gen_range(0..10), rng.gen_range(0..10)]); } diff --git a/stress/src/traces.rs b/stress/src/traces.rs index 2f95135dbc..0dd992f708 100644 --- a/stress/src/traces.rs +++ b/stress/src/traces.rs @@ -1,3 +1,11 @@ +/* + Stress test results: + OS: Ubuntu 22.04.3 LTS (5.15.146.1-microsoft-standard-WSL2) + Hardware: AMD EPYC 7763 64-Core Processor - 2.44 GHz, 16vCPUs, + RAM: 64.0 GB + 9 M/sec +*/ + use lazy_static::lazy_static; use opentelemetry::{ trace::{Span, SpanBuilder, TraceResult, Tracer, TracerProvider as _}, From f82dd14d06944a8b3d8ff4211961db969b94cabb Mon Sep 17 00:00:00 2001 From: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com> Date: Wed, 22 May 2024 15:29:40 -0700 Subject: [PATCH 04/37] [opentelemetry-otlp] Update example output (#1807) --- .../examples/basic-otlp/README.md | 191 ++++++------------ .../examples/basic-otlp/src/main.rs | 2 +- 2 files changed, 59 insertions(+), 134 deletions(-) diff --git a/opentelemetry-otlp/examples/basic-otlp/README.md b/opentelemetry-otlp/examples/basic-otlp/README.md index e7e57bf815..28206793bc 100644 --- a/opentelemetry-otlp/examples/basic-otlp/README.md +++ b/opentelemetry-otlp/examples/basic-otlp/README.md @@ -59,208 +59,133 @@ You should be able to see something similar below with different time and ID in ### Span ```text -2023-09-08T21:50:35.884Z info ResourceSpans #0 +2024-05-22T20:25:42.892Z info TracesExporter {"kind": "exporter", "data_type": "traces", "name": "logging", "resource spans": 2, "spans": 2} +2024-05-22T20:25:42.892Z info ResourceSpans #0 Resource SchemaURL: Resource attributes: - -> service.name: Str(basic-otlp-tracing-example) + -> service.name: Str(basic-otlp-example) ScopeSpans #0 ScopeSpans SchemaURL: -InstrumentationScope ex.com/basic +InstrumentationScope basic +InstrumentationScope attributes: + -> scope-key: Str(scope-value) Span #0 - Trace ID : f8e7ea4dcab43689cea14f708309d682 - Parent ID : 8b560e2e7238eab5 - ID : 9e36b48dc07b32fe + Trace ID : f3f6a43579f63734fe866d34d9aa0b88 + Parent ID : b66eacd1fcc728d3 + ID : af01696ea60b9229 Name : Sub operation... Kind : Internal - Start time : 2023-09-08 21:50:35.872800345 +0000 UTC - End time : 2023-09-08 21:50:35.87282574 +0000 UTC + Start time : 2024-05-22 20:25:42.877134 +0000 UTC + End time : 2024-05-22 20:25:42.8771425 +0000 UTC Status code : Unset Status message : Attributes: - -> lemons: Str(five) + -> another.key: Str(yes) Events: SpanEvent #0 -> Name: Sub span event - -> Timestamp: 2023-09-08 21:50:35.872808684 +0000 UTC + -> Timestamp: 2024-05-22 20:25:42.8771371 +0000 UTC -> DroppedAttributesCount: 0 ResourceSpans #1 Resource SchemaURL: Resource attributes: - -> service.name: Str(basic-otlp-tracing-example) + -> service.name: Str(basic-otlp-example) ScopeSpans #0 ScopeSpans SchemaURL: -InstrumentationScope ex.com/basic +InstrumentationScope basic +InstrumentationScope attributes: + -> scope-key: Str(scope-value) Span #0 - Trace ID : f8e7ea4dcab43689cea14f708309d682 + Trace ID : f3f6a43579f63734fe866d34d9aa0b88 Parent ID : - ID : 8b560e2e7238eab5 - Name : operation + ID : b66eacd1fcc728d3 + Name : Main operation Kind : Internal - Start time : 2023-09-08 21:50:35.872735497 +0000 UTC - End time : 2023-09-08 21:50:35.872832026 +0000 UTC + Start time : 2024-05-22 20:25:42.8770371 +0000 UTC + End time : 2024-05-22 20:25:42.8771505 +0000 UTC Status code : Unset Status message : Attributes: - -> ex.com/another: Str(yes) + -> another.key: Str(yes) Events: SpanEvent #0 -> Name: Nice operation! - -> Timestamp: 2023-09-08 21:50:35.872750123 +0000 UTC + -> Timestamp: 2024-05-22 20:25:42.8770471 +0000 UTC -> DroppedAttributesCount: 0 -> Attributes:: -> bogons: Int(100) - {"kind": "exporter", "data_type": "traces", "name": "logging"} + {"kind": "exporter", "data_type": "traces", "name": "logging"} ``` ### Metric ```text -2023-09-08T19:14:12.522Z info ResourceMetrics #0 +2024-05-22T20:25:42.908Z info MetricsExporter {"kind": "exporter", "data_type": "metrics", "name": "logging", "resource metrics": 1, "metrics": 1, "data points": 1} +2024-05-22T20:25:42.908Z info ResourceMetrics #0 Resource SchemaURL: Resource attributes: - -> service.name: Str(basic-otlp-metrics-example) + -> service.name: Str(basic-otlp-example) ScopeMetrics #0 -ScopeMetrics SchemaURL: -InstrumentationScope ex.com/basic +ScopeMetrics SchemaURL: schema_url +InstrumentationScope basic v1.0 +InstrumentationScope attributes: + -> scope-key: Str(scope-value) Metric #0 Descriptor: - -> Name: ex.com.one - -> Description: A gauge set to 1.0 - -> Unit: - -> DataType: Gauge -NumberDataPoints #0 -Data point attributes: - -> A: Str(1) - -> B: Str(2) - -> C: Str(3) - -> lemons: Int(10) -StartTimestamp: 1970-01-01 00:00:00 +0000 UTC -Timestamp: 2023-09-08 19:14:12.468030127 +0000 UTC -Value: 1.000000 -Metric #1 -Descriptor: - -> Name: ex.com.two - -> Description: - -> Unit: - -> DataType: Histogram + -> Name: test_counter + -> Description: a simple counter for demo purposes. + -> Unit: my_unit + -> DataType: Sum + -> IsMonotonic: true -> AggregationTemporality: Cumulative -HistogramDataPoints #0 +NumberDataPoints #0 Data point attributes: - -> A: Str(1) - -> B: Str(2) - -> C: Str(3) - -> lemons: Int(10) -StartTimestamp: 2023-09-08 19:14:12.466896812 +0000 UTC -Timestamp: 2023-09-08 19:14:12.468052807 +0000 UTC -Count: 1 -Sum: 5.500000 -Min: 5.500000 -Max: 5.500000 -ExplicitBounds #0: 0.000000 -ExplicitBounds #1: 5.000000 -ExplicitBounds #2: 10.000000 -ExplicitBounds #3: 25.000000 -ExplicitBounds #4: 50.000000 -ExplicitBounds #5: 75.000000 -ExplicitBounds #6: 100.000000 -ExplicitBounds #7: 250.000000 -ExplicitBounds #8: 500.000000 -ExplicitBounds #9: 750.000000 -ExplicitBounds #10: 1000.000000 -ExplicitBounds #11: 2500.000000 -ExplicitBounds #12: 5000.000000 -ExplicitBounds #13: 7500.000000 -ExplicitBounds #14: 10000.000000 -Buckets #0, Count: 0 -Buckets #1, Count: 0 -Buckets #2, Count: 1 -Buckets #3, Count: 0 -Buckets #4, Count: 0 -Buckets #5, Count: 0 -Buckets #6, Count: 0 -Buckets #7, Count: 0 -Buckets #8, Count: 0 -Buckets #9, Count: 0 -Buckets #10, Count: 0 -Buckets #11, Count: 0 -Buckets #12, Count: 0 -Buckets #13, Count: 0 -Buckets #14, Count: 0 -Buckets #15, Count: 0 -HistogramDataPoints #1 -StartTimestamp: 2023-09-08 19:14:12.466896812 +0000 UTC -Timestamp: 2023-09-08 19:14:12.468052807 +0000 UTC -Count: 1 -Sum: 1.300000 -Min: 1.300000 -Max: 1.300000 -ExplicitBounds #0: 0.000000 -ExplicitBounds #1: 5.000000 -ExplicitBounds #2: 10.000000 -ExplicitBounds #3: 25.000000 -ExplicitBounds #4: 50.000000 -ExplicitBounds #5: 75.000000 -ExplicitBounds #6: 100.000000 -ExplicitBounds #7: 250.000000 -ExplicitBounds #8: 500.000000 -ExplicitBounds #9: 750.000000 -ExplicitBounds #10: 1000.000000 -ExplicitBounds #11: 2500.000000 -ExplicitBounds #12: 5000.000000 -ExplicitBounds #13: 7500.000000 -ExplicitBounds #14: 10000.000000 -Buckets #0, Count: 0 -Buckets #1, Count: 1 -Buckets #2, Count: 0 -Buckets #3, Count: 0 -Buckets #4, Count: 0 -Buckets #5, Count: 0 -Buckets #6, Count: 0 -Buckets #7, Count: 0 -Buckets #8, Count: 0 -Buckets #9, Count: 0 -Buckets #10, Count: 0 -Buckets #11, Count: 0 -Buckets #12, Count: 0 -Buckets #13, Count: 0 -Buckets #14, Count: 0 -Buckets #15, Count: 0 - {"kind": "exporter", "data_type": "metrics", "name": "logging"} + -> test_key: Str(test_value) +StartTimestamp: 2024-05-22 20:25:42.8767804 +0000 UTC +Timestamp: 2024-05-22 20:25:42.8937799 +0000 UTC +Value: 10 + {"kind": "exporter", "data_type": "metrics", "name": "logging"} ``` ### Logs ```text -2023-09-08T21:50:35.884Z info ResourceLog #0 +2024-05-22T20:25:42.914Z info LogsExporter {"kind": "exporter", "data_type": "logs", "name": "logging", "resource logs": 2, "log records": 2} +2024-05-22T20:25:42.914Z info ResourceLog #0 Resource SchemaURL: Resource attributes: - -> service.name: Str(basic-otlp-logging-example) + -> service.name: Str(basic-otlp-example) ScopeLogs #0 ScopeLogs SchemaURL: -InstrumentationScope opentelemetry-log-appender 0.1.0 +InstrumentationScope opentelemetry-appender-tracing 0.4.0 LogRecord #0 -ObservedTimestamp: 2023-09-08 21:50:35.872759168 +0000 UTC +ObservedTimestamp: 2024-05-22 20:25:42.8771025 +0000 UTC Timestamp: 1970-01-01 00:00:00 +0000 UTC SeverityText: INFO SeverityNumber: Info(9) Body: Str(hello from banana. My price is 2.99. I am also inside a Span!) -Trace ID: f8e7ea4dcab43689cea14f708309d682 -Span ID: 8b560e2e7238eab5 +Attributes: + -> name: Str(my-event-inside-span) +Trace ID: f3f6a43579f63734fe866d34d9aa0b88 +Span ID: b66eacd1fcc728d3 Flags: 1 ResourceLog #1 Resource SchemaURL: Resource attributes: - -> service.name: Str(basic-otlp-logging-example) + -> service.name: Str(basic-otlp-example) ScopeLogs #0 ScopeLogs SchemaURL: -InstrumentationScope opentelemetry-log-appender 0.1.0 +InstrumentationScope opentelemetry-appender-tracing 0.4.0 LogRecord #0 -ObservedTimestamp: 2023-09-08 21:50:35.872833713 +0000 UTC +ObservedTimestamp: 2024-05-22 20:25:42.8771591 +0000 UTC Timestamp: 1970-01-01 00:00:00 +0000 UTC SeverityText: INFO SeverityNumber: Info(9) Body: Str(hello from apple. My price is 1.99) +Attributes: + -> name: Str(my-event) Trace ID: Span ID: Flags: 0 + {"kind": "exporter", "data_type": "logs", "name": "logging"} ``` diff --git a/opentelemetry-otlp/examples/basic-otlp/src/main.rs b/opentelemetry-otlp/examples/basic-otlp/src/main.rs index 9932faa8cf..507d6c6fce 100644 --- a/opentelemetry-otlp/examples/basic-otlp/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp/src/main.rs @@ -154,8 +154,8 @@ async fn main() -> Result<(), Box> { info!(name: "my-event", target: "my-target", "hello from {}. My price is {}", "apple", 1.99); global::shutdown_tracer_provider(); - logger_provider.shutdown()?; meter_provider.shutdown()?; + logger_provider.shutdown()?; Ok(()) } From 35f9a60d5c0533311055b75fa9b702eeb7e0bfb2 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Wed, 22 May 2024 16:09:27 -0700 Subject: [PATCH 05/37] Removes ordered-float dependency, and make own implementation for f64 hashing. (#1806) --- opentelemetry-sdk/CHANGELOG.md | 1 + opentelemetry-sdk/Cargo.toml | 1 - opentelemetry-sdk/benches/metric_counter.rs | 31 +++- opentelemetry-sdk/src/metrics/mod.rs | 114 +------------- opentelemetry/CHANGELOG.md | 3 + opentelemetry/Cargo.toml | 1 + opentelemetry/src/metrics/mod.rs | 155 +++++++++++++++++++- stress/src/metrics.rs | 9 +- stress/src/random.rs | 9 +- 9 files changed, 211 insertions(+), 113 deletions(-) diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index 4c718ebc84..3d141e1ad5 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -4,6 +4,7 @@ - Add "metrics", "logs" to default features. With this, default feature list is "trace", "metrics" and "logs". +- Removed dependency on `ordered-float`. ## v0.23.0 diff --git a/opentelemetry-sdk/Cargo.toml b/opentelemetry-sdk/Cargo.toml index b57bd2f043..b535dd5438 100644 --- a/opentelemetry-sdk/Cargo.toml +++ b/opentelemetry-sdk/Cargo.toml @@ -18,7 +18,6 @@ futures-channel = "0.3" futures-executor = { workspace = true } futures-util = { workspace = true, features = ["std", "sink", "async-await-macro"] } once_cell = { workspace = true } -ordered-float = { workspace = true } percent-encoding = { version = "2.0", optional = true } rand = { workspace = true, features = ["std", "std_rng","small_rng"], optional = true } glob = { version = "0.3.1", optional =true} diff --git a/opentelemetry-sdk/benches/metric_counter.rs b/opentelemetry-sdk/benches/metric_counter.rs index dd566b3673..d6e42bb6d5 100644 --- a/opentelemetry-sdk/benches/metric_counter.rs +++ b/opentelemetry-sdk/benches/metric_counter.rs @@ -54,7 +54,15 @@ fn counter_add(c: &mut Criterion) { c.bench_function("Counter_Add_Sorted", |b| { b.iter(|| { // 4*4*10*10 = 1600 time series. - let rands = CURRENT_RNG.with_borrow_mut(|rng| [rng.gen_range(0..4), rng.gen_range(0..4), rng.gen_range(0..10), rng.gen_range(0..10)]); + let rands = CURRENT_RNG.with(|rng| { + let mut rng = rng.borrow_mut(); + [ + rng.gen_range(0..4), + rng.gen_range(0..4), + rng.gen_range(0..10), + rng.gen_range(0..10), + ] + }); let index_first_attribute = rands[0]; let index_second_attribute = rands[1]; let index_third_attribute = rands[2]; @@ -74,7 +82,15 @@ fn counter_add(c: &mut Criterion) { c.bench_function("Counter_Add_Unsorted", |b| { b.iter(|| { // 4*4*10*10 = 1600 time series. - let rands = CURRENT_RNG.with_borrow_mut(|rng| [rng.gen_range(0..4), rng.gen_range(0..4), rng.gen_range(0..10), rng.gen_range(0..10)]); + let rands = CURRENT_RNG.with(|rng| { + let mut rng = rng.borrow_mut(); + [ + rng.gen_range(0..4), + rng.gen_range(0..4), + rng.gen_range(0..10), + rng.gen_range(0..10), + ] + }); let index_first_attribute = rands[0]; let index_second_attribute = rands[1]; let index_third_attribute = rands[2]; @@ -104,7 +120,16 @@ fn counter_add(c: &mut Criterion) { c.bench_function("ThreadLocal_Random_Generator_5", |b| { b.iter(|| { - let _i1 = CURRENT_RNG.with_borrow_mut(|rng| [rng.gen_range(0..4),rng.gen_range(0..4),rng.gen_range(0..10), rng.gen_range(0..10), rng.gen_range(0..10)]); + let __i1 = CURRENT_RNG.with(|rng| { + let mut rng = rng.borrow_mut(); + [ + rng.gen_range(0..4), + rng.gen_range(0..4), + rng.gen_range(0..10), + rng.gen_range(0..10), + rng.gen_range(0..10), + ] + }); }); }); } diff --git a/opentelemetry-sdk/src/metrics/mod.rs b/opentelemetry-sdk/src/metrics/mod.rs index 534226ea53..830f7a7d35 100644 --- a/opentelemetry-sdk/src/metrics/mod.rs +++ b/opentelemetry-sdk/src/metrics/mod.rs @@ -66,71 +66,16 @@ pub use view::*; use std::collections::hash_map::DefaultHasher; use std::collections::HashSet; -use std::{ - cmp::Ordering, - hash::{Hash, Hasher}, -}; +use std::hash::{Hash, Hasher}; -use opentelemetry::{Array, Key, KeyValue, Value}; -use ordered_float::OrderedFloat; - -#[derive(Clone, Debug)] -struct HashKeyValue(KeyValue); - -impl Hash for HashKeyValue { - fn hash(&self, state: &mut H) { - self.0.key.hash(state); - match &self.0.value { - Value::F64(f) => OrderedFloat(*f).hash(state), - Value::Array(a) => match a { - Array::Bool(b) => b.hash(state), - Array::I64(i) => i.hash(state), - Array::F64(f) => f.iter().for_each(|f| OrderedFloat(*f).hash(state)), - Array::String(s) => s.hash(state), - }, - Value::Bool(b) => b.hash(state), - Value::I64(i) => i.hash(state), - Value::String(s) => s.hash(state), - }; - } -} - -impl PartialOrd for HashKeyValue { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for HashKeyValue { - fn cmp(&self, other: &Self) -> Ordering { - self.0.key.cmp(&other.0.key) - } -} - -impl PartialEq for HashKeyValue { - fn eq(&self, other: &Self) -> bool { - self.0.key == other.0.key - && match (&self.0.value, &other.0.value) { - (Value::F64(f), Value::F64(of)) => OrderedFloat(*f).eq(&OrderedFloat(*of)), - (Value::Array(Array::F64(f)), Value::Array(Array::F64(of))) => { - f.len() == of.len() - && f.iter() - .zip(of.iter()) - .all(|(f, of)| OrderedFloat(*f).eq(&OrderedFloat(*of))) - } - (non_float, other_non_float) => non_float.eq(other_non_float), - } - } -} - -impl Eq for HashKeyValue {} +use opentelemetry::{Key, KeyValue, Value}; /// A unique set of attributes that can be used as instrument identifiers. /// /// This must implement [Hash], [PartialEq], and [Eq] so it may be used as /// HashMap keys and other de-duplication methods. #[derive(Clone, Default, Debug, PartialEq, Eq)] -pub struct AttributeSet(Vec, u64); +pub struct AttributeSet(Vec, u64); impl From<&[KeyValue]> for AttributeSet { fn from(values: &[KeyValue]) -> Self { @@ -140,7 +85,7 @@ impl From<&[KeyValue]> for AttributeSet { .rev() .filter_map(|kv| { if seen_keys.insert(kv.key.clone()) { - Some(HashKeyValue(kv.clone())) + Some(kv.clone()) } else { None } @@ -151,7 +96,7 @@ impl From<&[KeyValue]> for AttributeSet { } } -fn calculate_hash(values: &[HashKeyValue]) -> u64 { +fn calculate_hash(values: &[KeyValue]) -> u64 { let mut hasher = DefaultHasher::new(); values.iter().fold(&mut hasher, |mut hasher, item| { item.hash(&mut hasher); @@ -161,7 +106,7 @@ fn calculate_hash(values: &[HashKeyValue]) -> u64 { } impl AttributeSet { - fn new(mut values: Vec) -> Self { + fn new(mut values: Vec) -> Self { values.sort_unstable(); let hash = calculate_hash(&values); AttributeSet(values, hash) @@ -177,7 +122,7 @@ impl AttributeSet { where F: Fn(&KeyValue) -> bool, { - self.0.retain(|kv| f(&kv.0)); + self.0.retain(|kv| f(kv)); // Recalculate the hash as elements are changed. self.1 = calculate_hash(&self.0); @@ -185,7 +130,7 @@ impl AttributeSet { /// Iterate over key value pairs in the set pub fn iter(&self) -> impl Iterator { - self.0.iter().map(|kv| (&kv.0.key, &kv.0.value)) + self.0.iter().map(|kv| (&kv.key, &kv.value)) } } @@ -209,52 +154,9 @@ mod tests { KeyValue, }; use std::borrow::Cow; - use std::hash::DefaultHasher; - use std::hash::{Hash, Hasher}; // Run all tests in this mod // cargo test metrics::tests --features=metrics,testing - - #[test] - fn equality_kv_float() { - let kv1 = HashKeyValue(KeyValue::new("key", 1.0)); - let kv2 = HashKeyValue(KeyValue::new("key", 1.0)); - assert_eq!(kv1, kv2); - - let kv1 = HashKeyValue(KeyValue::new("key", 1.0)); - let kv2 = HashKeyValue(KeyValue::new("key", 1.01)); - assert_ne!(kv1, kv2); - - let kv1 = HashKeyValue(KeyValue::new("key", std::f64::NAN)); - let kv2 = HashKeyValue(KeyValue::new("key", std::f64::NAN)); - assert_eq!(kv1, kv2); - - let kv1 = HashKeyValue(KeyValue::new("key", std::f64::INFINITY)); - let kv2 = HashKeyValue(KeyValue::new("key", std::f64::INFINITY)); - assert_eq!(kv1, kv2); - } - - #[test] - fn hash_kv_float() { - let kv1 = HashKeyValue(KeyValue::new("key", 1.0)); - let kv2 = HashKeyValue(KeyValue::new("key", 1.0)); - assert_eq!(hash_helper(&kv1), hash_helper(&kv2)); - - let kv1 = HashKeyValue(KeyValue::new("key", std::f64::NAN)); - let kv2 = HashKeyValue(KeyValue::new("key", std::f64::NAN)); - assert_eq!(hash_helper(&kv1), hash_helper(&kv2)); - - let kv1 = HashKeyValue(KeyValue::new("key", std::f64::INFINITY)); - let kv2 = HashKeyValue(KeyValue::new("key", std::f64::INFINITY)); - assert_eq!(hash_helper(&kv1), hash_helper(&kv2)); - } - - fn hash_helper(item: &T) -> u64 { - let mut hasher = DefaultHasher::new(); - item.hash(&mut hasher); - hasher.finish() - } - // Note for all tests from this point onwards in this mod: // "multi_thread" tokio flavor must be used else flush won't // be able to make progress! diff --git a/opentelemetry/CHANGELOG.md b/opentelemetry/CHANGELOG.md index 4641d4425e..396e48c388 100644 --- a/opentelemetry/CHANGELOG.md +++ b/opentelemetry/CHANGELOG.md @@ -4,6 +4,9 @@ - Add "metrics", "logs" to default features. With this, default feature list is "trace", "metrics" and "logs". +- When "metrics" feature is enabled, `KeyValue` implements `PartialEq`, `Eq`, + `PartialOrder`, `Order`, `Hash`. This is meant to be used for metrics + aggregation purposes only. ## v0.23.0 diff --git a/opentelemetry/Cargo.toml b/opentelemetry/Cargo.toml index 24ff2fe215..f9e7d9c42e 100644 --- a/opentelemetry/Cargo.toml +++ b/opentelemetry/Cargo.toml @@ -42,6 +42,7 @@ otel_unstable = [] [dev-dependencies] opentelemetry_sdk = { path = "../opentelemetry-sdk", features = ["logs_level_enabled"]} # for documentation tests criterion = { version = "0.3" } +rand = { workspace = true } [[bench]] name = "metrics" diff --git a/opentelemetry/src/metrics/mod.rs b/opentelemetry/src/metrics/mod.rs index 34ed16fa39..bd24d6368d 100644 --- a/opentelemetry/src/metrics/mod.rs +++ b/opentelemetry/src/metrics/mod.rs @@ -1,6 +1,8 @@ //! # OpenTelemetry Metrics API use std::any::Any; +use std::cmp::Ordering; +use std::hash::{Hash, Hasher}; use std::result; use std::sync::PoisonError; use std::{borrow::Cow, sync::Arc}; @@ -10,7 +12,7 @@ mod instruments; mod meter; pub mod noop; -use crate::ExportError; +use crate::{Array, ExportError, KeyValue, Value}; pub use instruments::{ counter::{Counter, ObservableCounter, SyncCounter}, gauge::{Gauge, ObservableGauge, SyncGauge}, @@ -81,6 +83,55 @@ impl AsRef for Unit { } } +struct F64Hashable(f64); + +impl PartialEq for F64Hashable { + fn eq(&self, other: &Self) -> bool { + self.0.to_bits() == other.0.to_bits() + } +} + +impl Eq for F64Hashable {} + +impl Hash for F64Hashable { + fn hash(&self, state: &mut H) { + self.0.to_bits().hash(state); + } +} + +impl Hash for KeyValue { + fn hash(&self, state: &mut H) { + self.key.hash(state); + match &self.value { + Value::F64(f) => F64Hashable(*f).hash(state), + Value::Array(a) => match a { + Array::Bool(b) => b.hash(state), + Array::I64(i) => i.hash(state), + Array::F64(f) => f.iter().for_each(|f| F64Hashable(*f).hash(state)), + Array::String(s) => s.hash(state), + }, + Value::Bool(b) => b.hash(state), + Value::I64(i) => i.hash(state), + Value::String(s) => s.hash(state), + }; + } +} + +impl PartialOrd for KeyValue { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +/// Ordering is based on the key only. +impl Ord for KeyValue { + fn cmp(&self, other: &Self) -> Ordering { + self.key.cmp(&other.key) + } +} + +impl Eq for KeyValue {} + /// SDK implemented trait for creating instruments pub trait InstrumentProvider { /// creates an instrument for recording increasing values. @@ -279,3 +330,105 @@ pub trait InstrumentProvider { } type MultiInstrumentCallback = dyn Fn(&dyn Observer) + Send + Sync; + +#[cfg(test)] +mod tests { + use rand::Rng; + + use crate::KeyValue; + use std::collections::hash_map::DefaultHasher; + use std::f64; + use std::hash::{Hash, Hasher}; + + #[test] + fn kv_float_equality() { + let kv1 = KeyValue::new("key", 1.0); + let kv2 = KeyValue::new("key", 1.0); + assert_eq!(kv1, kv2); + + let kv1 = KeyValue::new("key", 1.0); + let kv2 = KeyValue::new("key", 1.01); + assert_ne!(kv1, kv2); + + let kv1 = KeyValue::new("key", f64::NAN); + let kv2 = KeyValue::new("key", f64::NAN); + assert_ne!(kv1, kv2, "NAN is not equal to itself"); + + for float_val in [ + f64::INFINITY, + f64::NEG_INFINITY, + f64::MAX, + f64::MIN, + f64::MIN_POSITIVE, + ] + .iter() + { + let kv1 = KeyValue::new("key", *float_val); + let kv2 = KeyValue::new("key", *float_val); + assert_eq!(kv1, kv2); + } + + let mut rng = rand::thread_rng(); + + for _ in 0..100 { + let random_value = rng.gen::(); + let kv1 = KeyValue::new("key", random_value); + let kv2 = KeyValue::new("key", random_value); + assert_eq!(kv1, kv2); + } + } + + #[test] + fn kv_float_hash() { + for float_val in [ + f64::NAN, + f64::INFINITY, + f64::NEG_INFINITY, + f64::MAX, + f64::MIN, + f64::MIN_POSITIVE, + ] + .iter() + { + let kv1 = KeyValue::new("key", *float_val); + let kv2 = KeyValue::new("key", *float_val); + assert_eq!(hash_helper(&kv1), hash_helper(&kv2)); + } + + let mut rng = rand::thread_rng(); + + for _ in 0..100 { + let random_value = rng.gen::(); + let kv1 = KeyValue::new("key", random_value); + let kv2 = KeyValue::new("key", random_value); + assert_eq!(hash_helper(&kv1), hash_helper(&kv2)); + } + } + + #[test] + fn kv_float_order() { + // TODO: Extend this test to all value types, not just F64 + let float_vals = [ + 0.0, + 1.0, + -1.0, + f64::INFINITY, + f64::NEG_INFINITY, + f64::NAN, + f64::MIN, + f64::MAX, + ]; + + for v in float_vals { + let kv1 = KeyValue::new("a", v); + let kv2 = KeyValue::new("b", v); + assert!(kv1 < kv2, "Order is solely based on key!"); + } + } + + fn hash_helper(item: &T) -> u64 { + let mut hasher = DefaultHasher::new(); + item.hash(&mut hasher); + hasher.finish() + } +} diff --git a/stress/src/metrics.rs b/stress/src/metrics.rs index 265456fd17..2309d8eaaa 100644 --- a/stress/src/metrics.rs +++ b/stress/src/metrics.rs @@ -45,7 +45,14 @@ fn main() { fn test_counter() { let len = ATTRIBUTE_VALUES.len(); - let rands = CURRENT_RNG.with_borrow_mut(|rng| [rng.gen_range(0..len), rng.gen_range(0..len), rng.gen_range(0..len)]); + let rands = CURRENT_RNG.with(|rng| { + let mut rng = rng.borrow_mut(); + [ + rng.gen_range(0..len), + rng.gen_range(0..len), + rng.gen_range(0..len), + ] + }); let index_first_attribute = rands[0]; let index_second_attribute = rands[1]; let index_third_attribute = rands[2]; diff --git a/stress/src/random.rs b/stress/src/random.rs index b1e57fe0e7..8b08aa8b0d 100644 --- a/stress/src/random.rs +++ b/stress/src/random.rs @@ -25,5 +25,12 @@ fn main() { } fn test_random_generation() { - let _i1 = CURRENT_RNG.with_borrow_mut(|rng| [rng.gen_range(0..10), rng.gen_range(0..10), rng.gen_range(0..10)]); + let _i1 = CURRENT_RNG.with(|rng| { + let mut rng = rng.borrow_mut(); + [ + rng.gen_range(0..10), + rng.gen_range(0..10), + rng.gen_range(0..10), + ] + }); } From d21b13ae1ee5f2c2fef28f4b0743c2f0f2004b93 Mon Sep 17 00:00:00 2001 From: Lalit Kumar Bhasin Date: Thu, 23 May 2024 09:08:35 -0700 Subject: [PATCH 06/37] Integration tests for Logs (#1759) Co-authored-by: Zhongyang Wu --- .../tests/integration_test/Cargo.toml | 3 +- .../expected/failed_logs.json | 36 +++++++ .../tests/integration_test/expected/logs.json | 36 +++++++ .../otel-collector-config.yaml | 5 +- .../tests/integration_test/src/lib.rs | 3 +- .../integration_test/src/logs_asserter.rs | 102 ++++++++++++++++++ .../src/{asserter.rs => trace_asserter.rs} | 0 .../tests/integration_tests.rs | 69 +++++++++--- .../tests/integration_test/tests/logs.rs | 58 ++++++++++ .../tests/integration_test/tests/traces.rs | 2 +- .../tonic/opentelemetry.proto.logs.v1.rs | 7 ++ opentelemetry-proto/tests/grpc_build.rs | 6 +- 12 files changed, 307 insertions(+), 20 deletions(-) create mode 100644 opentelemetry-otlp/tests/integration_test/expected/failed_logs.json create mode 100644 opentelemetry-otlp/tests/integration_test/expected/logs.json create mode 100644 opentelemetry-otlp/tests/integration_test/src/logs_asserter.rs rename opentelemetry-otlp/tests/integration_test/src/{asserter.rs => trace_asserter.rs} (100%) create mode 100644 opentelemetry-otlp/tests/integration_test/tests/logs.rs diff --git a/opentelemetry-otlp/tests/integration_test/Cargo.toml b/opentelemetry-otlp/tests/integration_test/Cargo.toml index b21861d19d..662d7ea9fd 100644 --- a/opentelemetry-otlp/tests/integration_test/Cargo.toml +++ b/opentelemetry-otlp/tests/integration_test/Cargo.toml @@ -12,7 +12,8 @@ opentelemetry_sdk = { path = "../../../opentelemetry-sdk", features = ["rt-tokio opentelemetry-otlp = { path = "../../../opentelemetry-otlp", features = ["tonic", "metrics", "logs"] } opentelemetry-semantic-conventions = { path = "../../../opentelemetry-semantic-conventions" } opentelemetry-proto = { path = "../../../opentelemetry-proto", features = ["gen-tonic-messages", "trace", "with-serde"] } +opentelemetry-appender-log = { path = "../../../opentelemetry-appender-log", default-features = false} +log = { workspace = true } tokio = { version = "1.0", features = ["full"] } serde_json = "1" testcontainers = "0.15.0" -# env_logger = "0.11.3" // uncomment if needed for local debugging. diff --git a/opentelemetry-otlp/tests/integration_test/expected/failed_logs.json b/opentelemetry-otlp/tests/integration_test/expected/failed_logs.json new file mode 100644 index 0000000000..923316dfed --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/expected/failed_logs.json @@ -0,0 +1,36 @@ +{ + "resourceLogs": [ + { + "resource": { + "attributes": [ + { + "key": "service.name", + "value": { + "stringValue": "logs-integration-test" + } + } + ] + }, + "scopeLogs": [ + { + "scope": { + "name": "opentelemetry-log-appender", + "version": "0.3.0" + }, + "logRecords": [ + { + "observedTimeUnixNano": "1715753202587469939", + "severityNumber": 9, + "severityText": "INFO", + "body": { + "stringValue": "hello from apple. My price is 2.99." + }, + "traceId": "", + "spanId": "" + } + ] + } + ] + } + ] +} diff --git a/opentelemetry-otlp/tests/integration_test/expected/logs.json b/opentelemetry-otlp/tests/integration_test/expected/logs.json new file mode 100644 index 0000000000..4653189c82 --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/expected/logs.json @@ -0,0 +1,36 @@ +{ + "resourceLogs": [ + { + "resource": { + "attributes": [ + { + "key": "service.name", + "value": { + "stringValue": "logs-integration-test" + } + } + ] + }, + "scopeLogs": [ + { + "scope": { + "name": "opentelemetry-log-appender", + "version": "0.3.0" + }, + "logRecords": [ + { + "observedTimeUnixNano": "1715753202587469939", + "severityNumber": 9, + "severityText": "INFO", + "body": { + "stringValue": "hello from banana. My price is 2.99." + }, + "traceId": "", + "spanId": "" + } + ] + } + ] + } + ] +} diff --git a/opentelemetry-otlp/tests/integration_test/otel-collector-config.yaml b/opentelemetry-otlp/tests/integration_test/otel-collector-config.yaml index bcd9d9a429..39d755cc9a 100644 --- a/opentelemetry-otlp/tests/integration_test/otel-collector-config.yaml +++ b/opentelemetry-otlp/tests/integration_test/otel-collector-config.yaml @@ -8,10 +8,13 @@ exporters: logging: loglevel: debug file: - path: /testresults/traces.json + path: /testresults/result.json service: pipelines: traces: receivers: [otlp] exporters: [file] + logs: + receivers: [otlp] + exporters: [file] diff --git a/opentelemetry-otlp/tests/integration_test/src/lib.rs b/opentelemetry-otlp/tests/integration_test/src/lib.rs index 54080e6844..d6b7196d84 100644 --- a/opentelemetry-otlp/tests/integration_test/src/lib.rs +++ b/opentelemetry-otlp/tests/integration_test/src/lib.rs @@ -1,2 +1,3 @@ -pub mod asserter; pub mod images; +pub mod logs_asserter; +pub mod trace_asserter; diff --git a/opentelemetry-otlp/tests/integration_test/src/logs_asserter.rs b/opentelemetry-otlp/tests/integration_test/src/logs_asserter.rs new file mode 100644 index 0000000000..da045691f5 --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/src/logs_asserter.rs @@ -0,0 +1,102 @@ +use opentelemetry_proto::tonic::logs::v1::{LogRecord, LogsData, ResourceLogs}; +use std::fs::File; + +// Given two ResourceLogs, assert that they are equal except for the timestamps +pub struct LogsAsserter { + results: Vec, + expected: Vec, +} + +impl LogsAsserter { + // Create a new LogsAsserter + pub fn new(results: Vec, expected: Vec) -> Self { + LogsAsserter { results, expected } + } + + pub fn assert(self) { + self.assert_resource_logs_eq(&self.results, &self.expected); + } + + fn assert_resource_logs_eq(&self, results: &[ResourceLogs], expected: &[ResourceLogs]) { + let mut results_logs = Vec::new(); + let mut expected_logs = Vec::new(); + + assert_eq!(results.len(), expected.len()); + for i in 0..results.len() { + let result_resource_logs = &results[i]; + let expected_resource_logs = &expected[i]; + assert_eq!( + result_resource_logs.resource, + expected_resource_logs.resource + ); + assert_eq!( + result_resource_logs.schema_url, + expected_resource_logs.schema_url + ); + + assert_eq!( + result_resource_logs.scope_logs.len(), + expected_resource_logs.scope_logs.len() + ); + + for i in 0..result_resource_logs.scope_logs.len() { + let result_scope_logs = &result_resource_logs.scope_logs[i]; + let expected_scope_logs = &expected_resource_logs.scope_logs[i]; + + results_logs.extend(result_scope_logs.log_records.clone()); + expected_logs.extend(expected_scope_logs.log_records.clone()); + } + } + + for (result_log, expected_log) in results_logs.iter().zip(expected_logs.iter()) { + assert_eq!( + LogRecordWrapper(result_log.clone()), + LogRecordWrapper(expected_log.clone()) + ); + } + } +} + +pub struct LogRecordWrapper(pub LogRecord); + +impl PartialEq for LogRecordWrapper { + fn eq(&self, other: &Self) -> bool { + let LogRecordWrapper(ref a) = *self; + let LogRecordWrapper(ref b) = *other; + + assert_eq!( + a.severity_number, b.severity_number, + "severity_number does not match" + ); + assert_eq!( + a.severity_text, b.severity_text, + "severity_text does not match" + ); + assert_eq!(a.body, b.body, "body does not match"); + assert_eq!(a.attributes, b.attributes, "attributes do not match"); + assert_eq!( + a.dropped_attributes_count, b.dropped_attributes_count, + "dropped_attributes_count does not match" + ); + assert_eq!(a.flags, b.flags, "flags do not match"); + assert_eq!(a.trace_id, b.trace_id, "trace_id does not match"); + assert_eq!(a.span_id, b.span_id, "span_id does not match"); + + true + } +} + +impl std::fmt::Debug for LogRecordWrapper { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let LogRecordWrapper(ref inner) = *self; + inner.fmt(f) + } +} + +// read a file contains ResourceSpans in json format +pub fn read_logs_from_json(file: File) -> Vec { + let reader = std::io::BufReader::new(file); + + let log_data: LogsData = serde_json::from_reader(reader).unwrap(); + log_data.resource_logs +} diff --git a/opentelemetry-otlp/tests/integration_test/src/asserter.rs b/opentelemetry-otlp/tests/integration_test/src/trace_asserter.rs similarity index 100% rename from opentelemetry-otlp/tests/integration_test/src/asserter.rs rename to opentelemetry-otlp/tests/integration_test/src/trace_asserter.rs diff --git a/opentelemetry-otlp/tests/integration_test/tests/integration_tests.rs b/opentelemetry-otlp/tests/integration_test/tests/integration_tests.rs index a2d4e5b7ab..01df65239a 100644 --- a/opentelemetry-otlp/tests/integration_test/tests/integration_tests.rs +++ b/opentelemetry-otlp/tests/integration_test/tests/integration_tests.rs @@ -8,34 +8,33 @@ use testcontainers::clients::Cli; use testcontainers::core::Port; use testcontainers::RunnableImage; +mod logs; mod traces; const COLLECTOR_CONTAINER_NAME: &str = "otel-collector"; const TEST_RESULT_DIR_IN_CONTAINER: &str = "testresults"; const EXPECTED_DIR: &str = "./expected"; +const RESULT_FILE_PATH: &str = "./result.json"; struct TestSuite { - result_file_path: &'static str, + expected_file_path: &'static str, } impl TestSuite { - fn new(result_file_path: &'static str) -> Self { - Self { result_file_path } + fn new(expected_file_path: &'static str) -> Self { + Self { expected_file_path } } pub fn expected_file_path(&self) -> String { - format!("{}/{}", EXPECTED_DIR, self.result_file_path) + format!("{}/{}", EXPECTED_DIR, self.expected_file_path) } pub fn result_file_path_in_container(&self) -> String { - format!( - "/{}/{}", - TEST_RESULT_DIR_IN_CONTAINER, self.result_file_path - ) + format!("/{}/{}", TEST_RESULT_DIR_IN_CONTAINER, RESULT_FILE_PATH) } pub fn result_file_path(&self) -> String { - format!("./{}", self.result_file_path) + format!("./{}", RESULT_FILE_PATH) } /// Create a empty file on localhost and copy it to container with proper permissions @@ -52,13 +51,12 @@ impl TestSuite { #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[ignore] // skip when running unit test async fn integration_tests() { - // Uncomment to see logs from testcontainers - // env_logger::init(); - // Run locally using the below command - // cargo test -- --ignored --nocapture + trace_integration_tests().await; + logs_integration_tests().await; +} +async fn trace_integration_tests() { let test_suites = [TestSuite::new("traces.json")]; - let mut collector_image = Collector::default(); for test in test_suites.as_ref() { let _ = test.create_temporary_result_file(); @@ -98,3 +96,46 @@ async fn integration_tests() { collector_container.stop(); } + +async fn logs_integration_tests() { + let test_suites = [TestSuite::new("logs.json")]; + + let mut collector_image = Collector::default(); + for test in test_suites.as_ref() { + let _ = test.create_temporary_result_file(); + collector_image = collector_image.with_volume( + test.result_file_path().as_str(), + test.result_file_path_in_container().as_str(), + ); + } + + let docker = Cli::default(); + let mut image = + RunnableImage::from(collector_image).with_container_name(COLLECTOR_CONTAINER_NAME); + + for port in [ + 4317, // gRPC port + 4318, // HTTP port + ] { + image = image.with_mapped_port(Port { + local: port, + internal: port, + }) + } + + let collector_container = docker.run(image); + + tokio::time::sleep(Duration::from_secs(5)).await; + logs::logs().await.unwrap(); + + // wait for file to flush to disks + // ideally we should use volume mount but otel collector file exporter doesn't handle permission too well + // bind mount mitigate the issue by set up the permission correctly on host system + tokio::time::sleep(Duration::from_secs(5)).await; + logs::assert_logs_results( + test_suites[0].result_file_path().as_str(), + test_suites[0].expected_file_path().as_str(), + ); + + collector_container.stop(); +} diff --git a/opentelemetry-otlp/tests/integration_test/tests/logs.rs b/opentelemetry-otlp/tests/integration_test/tests/logs.rs new file mode 100644 index 0000000000..22fabd0ae9 --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/tests/logs.rs @@ -0,0 +1,58 @@ +#![cfg(unix)] + +use integration_test_runner::logs_asserter::{read_logs_from_json, LogsAsserter}; +use log::{info, Level}; +use opentelemetry::logs::LogError; +use opentelemetry::KeyValue; +use opentelemetry_appender_log::OpenTelemetryLogBridge; +use opentelemetry_sdk::{logs as sdklogs, runtime, Resource}; +use std::error::Error; +use std::fs::File; +use std::os::unix::fs::MetadataExt; + +fn init_logs() -> Result { + opentelemetry_otlp::new_pipeline() + .logging() + .with_exporter(opentelemetry_otlp::new_exporter().tonic()) + .with_log_config( + sdklogs::config().with_resource(Resource::new(vec![KeyValue::new( + opentelemetry_semantic_conventions::resource::SERVICE_NAME, + "logs-integration-test", + )])), + ) + .install_batch(runtime::Tokio) +} + +pub async fn logs() -> Result<(), Box> { + let logger_provider = init_logs().unwrap(); + let otel_log_appender = OpenTelemetryLogBridge::new(&logger_provider); + log::set_boxed_logger(Box::new(otel_log_appender))?; + log::set_max_level(Level::Info.to_level_filter()); + + info!(target: "my-target", "hello from {}. My price is {}.", "banana", 2.99); + let _ = logger_provider.shutdown(); + Ok(()) +} + +pub fn assert_logs_results(result: &str, expected: &str) { + let left = read_logs_from_json(File::open(expected).unwrap()); + let right = read_logs_from_json(File::open(result).unwrap()); + + LogsAsserter::new(left, right).assert(); + + assert!(File::open(result).unwrap().metadata().unwrap().size() > 0) +} + +#[test] +#[should_panic(expected = "assertion `left == right` failed: body does not match")] +pub fn test_assert_logs_eq_failure() { + let left = read_logs_from_json(File::open("./expected/logs.json").unwrap()); + let right = read_logs_from_json(File::open("./expected/failed_logs.json").unwrap()); + LogsAsserter::new(right, left).assert(); +} + +#[test] +pub fn test_assert_logs_eq() { + let logs = read_logs_from_json(File::open("./expected/logs.json").unwrap()); + LogsAsserter::new(logs.clone(), logs).assert(); +} diff --git a/opentelemetry-otlp/tests/integration_test/tests/traces.rs b/opentelemetry-otlp/tests/integration_test/tests/traces.rs index 64d59e911e..e68e0d36a7 100644 --- a/opentelemetry-otlp/tests/integration_test/tests/traces.rs +++ b/opentelemetry-otlp/tests/integration_test/tests/traces.rs @@ -1,6 +1,6 @@ #![cfg(unix)] -use integration_test_runner::asserter::{read_spans_from_json, TraceAsserter}; +use integration_test_runner::trace_asserter::{read_spans_from_json, TraceAsserter}; use opentelemetry::global; use opentelemetry::global::shutdown_tracer_provider; use opentelemetry::trace::TraceError; diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs index 487261a893..9b737c5ead 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs @@ -126,6 +126,13 @@ pub struct LogRecord { /// string message (including multi-line) describing the event in a free form or it can /// be a structured data composed of arrays and maps of other values. \[Optional\]. #[prost(message, optional, tag = "5")] + #[cfg_attr( + feature = "with-serde", + serde( + serialize_with = "crate::proto::serializers::serialize_to_value", + deserialize_with = "crate::proto::serializers::deserialize_from_value" + ) + )] pub body: ::core::option::Option, /// Additional attributes that describe the specific event occurrence. \[Optional\]. /// Attribute keys MUST be unique (it is not allowed to have more than one diff --git a/opentelemetry-proto/tests/grpc_build.rs b/opentelemetry-proto/tests/grpc_build.rs index d9265ac4f7..7201a0f8f8 100644 --- a/opentelemetry-proto/tests/grpc_build.rs +++ b/opentelemetry-proto/tests/grpc_build.rs @@ -94,8 +94,10 @@ fn build_tonic() { } // add custom serializer and deserializer for AnyValue - builder = builder - .field_attribute("common.v1.KeyValue.value", "#[cfg_attr(feature =\"with-serde\", serde(serialize_with = \"crate::proto::serializers::serialize_to_value\", deserialize_with = \"crate::proto::serializers::deserialize_from_value\"))]"); + for path in ["common.v1.KeyValue.value", "logs.v1.LogRecord.body"] { + builder = builder + .field_attribute(path, "#[cfg_attr(feature =\"with-serde\", serde(serialize_with = \"crate::proto::serializers::serialize_to_value\", deserialize_with = \"crate::proto::serializers::deserialize_from_value\"))]"); + } builder .out_dir(out_dir.path()) From 33abef2a78884daaf3d23388fa1b19752394c198 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Thu, 23 May 2024 09:49:39 -0700 Subject: [PATCH 07/37] Log SDK, OTLP builders to accept Resource directly instead of wrapping in Config (#1788) Co-authored-by: Zhongyang Wu --- examples/logs-basic/src/main.rs | 12 ++- .../benches/logs.rs | 12 ++- .../examples/basic.rs | 15 ++-- opentelemetry-otlp/CHANGELOG.md | 5 ++ .../examples/basic-otlp-http/src/main.rs | 4 +- .../examples/basic-otlp/src/main.rs | 3 +- opentelemetry-otlp/src/logs.rs | 37 +++++---- .../tests/integration_test/tests/logs.rs | 10 +-- opentelemetry-sdk/CHANGELOG.md | 5 ++ opentelemetry-sdk/src/logs/config.rs | 23 ------ opentelemetry-sdk/src/logs/log_emitter.rs | 79 ++++++++----------- opentelemetry-sdk/src/logs/log_processor.rs | 11 ++- opentelemetry-sdk/src/logs/mod.rs | 4 +- opentelemetry-stdout/Cargo.toml | 2 + opentelemetry-stdout/examples/basic.rs | 17 +++- 15 files changed, 110 insertions(+), 129 deletions(-) delete mode 100644 opentelemetry-sdk/src/logs/config.rs diff --git a/examples/logs-basic/src/main.rs b/examples/logs-basic/src/main.rs index b19928b1e2..1bcf75a99d 100644 --- a/examples/logs-basic/src/main.rs +++ b/examples/logs-basic/src/main.rs @@ -1,7 +1,7 @@ use log::{error, Level}; use opentelemetry::KeyValue; use opentelemetry_appender_log::OpenTelemetryLogBridge; -use opentelemetry_sdk::logs::{Config, LoggerProvider}; +use opentelemetry_sdk::logs::LoggerProvider; use opentelemetry_sdk::Resource; use opentelemetry_semantic_conventions::resource::SERVICE_NAME; @@ -13,12 +13,10 @@ fn main() { // Ok(serde_json::to_writer_pretty(writer, &data).unwrap())) .build(); let logger_provider = LoggerProvider::builder() - .with_config( - Config::default().with_resource(Resource::new(vec![KeyValue::new( - SERVICE_NAME, - "logs-basic-example", - )])), - ) + .with_resource(Resource::new(vec![KeyValue::new( + SERVICE_NAME, + "logs-basic-example", + )])) .with_simple_exporter(exporter) .build(); diff --git a/opentelemetry-appender-tracing/benches/logs.rs b/opentelemetry-appender-tracing/benches/logs.rs index f41f99e180..9f73e9c890 100644 --- a/opentelemetry-appender-tracing/benches/logs.rs +++ b/opentelemetry-appender-tracing/benches/logs.rs @@ -19,7 +19,7 @@ use opentelemetry::logs::LogResult; use opentelemetry::KeyValue; use opentelemetry_appender_tracing::layer as tracing_layer; use opentelemetry_sdk::export::logs::{LogData, LogExporter}; -use opentelemetry_sdk::logs::{Config, LogProcessor, LoggerProvider}; +use opentelemetry_sdk::logs::{LogProcessor, LoggerProvider}; use opentelemetry_sdk::Resource; use tracing::error; use tracing_subscriber::prelude::*; @@ -125,12 +125,10 @@ fn benchmark_with_ot_layer(c: &mut Criterion, enabled: bool, bench_name: &str) { let exporter = NoopExporter { enabled }; let processor = NoopProcessor::new(Box::new(exporter)); let provider = LoggerProvider::builder() - .with_config( - Config::default().with_resource(Resource::new(vec![KeyValue::new( - "service.name", - "benchmark", - )])), - ) + .with_resource(Resource::new(vec![KeyValue::new( + "service.name", + "benchmark", + )])) .with_log_processor(processor) .build(); let ot_layer = tracing_layer::OpenTelemetryTracingBridge::new(&provider); diff --git a/opentelemetry-appender-tracing/examples/basic.rs b/opentelemetry-appender-tracing/examples/basic.rs index 9e8e8366b3..689c3f7550 100644 --- a/opentelemetry-appender-tracing/examples/basic.rs +++ b/opentelemetry-appender-tracing/examples/basic.rs @@ -2,22 +2,17 @@ use opentelemetry::KeyValue; use opentelemetry_appender_tracing::layer; -use opentelemetry_sdk::{ - logs::{Config, LoggerProvider}, - Resource, -}; +use opentelemetry_sdk::{logs::LoggerProvider, Resource}; use tracing::error; use tracing_subscriber::prelude::*; fn main() { let exporter = opentelemetry_stdout::LogExporter::default(); let provider: LoggerProvider = LoggerProvider::builder() - .with_config( - Config::default().with_resource(Resource::new(vec![KeyValue::new( - "service.name", - "log-appender-tracing-example", - )])), - ) + .with_resource(Resource::new(vec![KeyValue::new( + "service.name", + "log-appender-tracing-example", + )])) .with_simple_exporter(exporter) .build(); let layer = layer::OpenTelemetryTracingBridge::new(&provider); diff --git a/opentelemetry-otlp/CHANGELOG.md b/opentelemetry-otlp/CHANGELOG.md index f39e0f8cec..07213b96aa 100644 --- a/opentelemetry-otlp/CHANGELOG.md +++ b/opentelemetry-otlp/CHANGELOG.md @@ -7,6 +7,11 @@ - `OtlpMetricPipeline.build()` no longer invoke the `global::set_meter_provider`. User who setup the pipeline must do it themselves using `global::set_meter_provider(meter_provider.clone());`. +- Add `with_resource` on `OtlpLogPipeline`, replacing the `with_config` method. +Instead of using +`.with_config(Config::default().with_resource(RESOURCE::default()))` users must +now use `.with_resource(RESOURCE::default())` to configure Resource when using +`OtlpLogPipeline`. ## v0.16.0 diff --git a/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs b/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs index 34a294f6af..7cc6490164 100644 --- a/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs @@ -9,7 +9,7 @@ use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge; use opentelemetry_otlp::WithExportConfig; use opentelemetry_sdk::trace as sdktrace; use opentelemetry_sdk::{ - logs::{self as sdklogs, Config}, + logs::{self as sdklogs}, Resource, }; use tracing::info; @@ -28,7 +28,7 @@ static RESOURCE: Lazy = Lazy::new(|| { fn init_logs() -> Result { opentelemetry_otlp::new_pipeline() .logging() - .with_log_config(Config::default().with_resource(RESOURCE.clone())) + .with_resource(RESOURCE.clone()) .with_exporter( opentelemetry_otlp::new_exporter() .http() diff --git a/opentelemetry-otlp/examples/basic-otlp/src/main.rs b/opentelemetry-otlp/examples/basic-otlp/src/main.rs index 507d6c6fce..605916801d 100644 --- a/opentelemetry-otlp/examples/basic-otlp/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp/src/main.rs @@ -9,7 +9,6 @@ use opentelemetry::{ }; use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge; use opentelemetry_otlp::{ExportConfig, WithExportConfig}; -use opentelemetry_sdk::logs::Config; use opentelemetry_sdk::{runtime, trace as sdktrace, Resource}; use std::error::Error; use tracing::info; @@ -58,7 +57,7 @@ fn init_metrics() -> Result Result { opentelemetry_otlp::new_pipeline() .logging() - .with_log_config(Config::default().with_resource(RESOURCE.clone())) + .with_resource(RESOURCE.clone()) .with_exporter( opentelemetry_otlp::new_exporter() .tonic() diff --git a/opentelemetry-otlp/src/logs.rs b/opentelemetry-otlp/src/logs.rs index 714d4508a3..13e407a678 100644 --- a/opentelemetry-otlp/src/logs.rs +++ b/opentelemetry-otlp/src/logs.rs @@ -14,7 +14,7 @@ use std::fmt::Debug; use opentelemetry::logs::LogError; -use opentelemetry_sdk::{export::logs::LogData, runtime::RuntimeChannel}; +use opentelemetry_sdk::{export::logs::LogData, runtime::RuntimeChannel, Resource}; /// Compression algorithm to use, defaults to none. pub const OTEL_EXPORTER_OTLP_LOGS_COMPRESSION: &str = "OTEL_EXPORTER_OTLP_LOGS_COMPRESSION"; @@ -35,7 +35,7 @@ impl OtlpPipeline { /// Create a OTLP logging pipeline. pub fn logging(self) -> OtlpLogPipeline { OtlpLogPipeline { - log_config: None, + resource: None, exporter_builder: NoExporterConfig(()), batch_config: None, } @@ -111,15 +111,17 @@ impl opentelemetry_sdk::export::logs::LogExporter for LogExporter { #[derive(Debug)] pub struct OtlpLogPipeline { exporter_builder: EB, - log_config: Option, + resource: Option, batch_config: Option, } impl OtlpLogPipeline { - /// Set the log provider configuration. - pub fn with_log_config(mut self, log_config: opentelemetry_sdk::logs::Config) -> Self { - self.log_config = Some(log_config); - self + /// Set the Resource associated with log provider. + pub fn with_resource(self, resource: Resource) -> Self { + OtlpLogPipeline { + resource: Some(resource), + ..self + } } /// Set the batch log processor configuration, and it will override the env vars. @@ -137,7 +139,7 @@ impl OtlpLogPipeline { ) -> OtlpLogPipeline { OtlpLogPipeline { exporter_builder: pipeline.into(), - log_config: self.log_config, + resource: self.resource, batch_config: self.batch_config, } } @@ -152,7 +154,7 @@ impl OtlpLogPipeline { pub fn install_simple(self) -> Result { Ok(build_simple_with_exporter( self.exporter_builder.build_log_exporter()?, - self.log_config, + self.resource, )) } @@ -168,7 +170,7 @@ impl OtlpLogPipeline { ) -> Result { Ok(build_batch_with_exporter( self.exporter_builder.build_log_exporter()?, - self.log_config, + self.resource, runtime, self.batch_config, )) @@ -177,20 +179,21 @@ impl OtlpLogPipeline { fn build_simple_with_exporter( exporter: LogExporter, - log_config: Option, + resource: Option, ) -> opentelemetry_sdk::logs::LoggerProvider { let mut provider_builder = opentelemetry_sdk::logs::LoggerProvider::builder().with_simple_exporter(exporter); - if let Some(config) = log_config { - provider_builder = provider_builder.with_config(config); + if let Some(resource) = resource { + provider_builder = provider_builder.with_resource(resource); } - // logger would be created in the tracing appender + // logger would be created in the appenders like + // opentelemetry-appender-tracing, opentelemetry-appender-log etc. provider_builder.build() } fn build_batch_with_exporter( exporter: LogExporter, - log_config: Option, + resource: Option, runtime: R, batch_config: Option, ) -> opentelemetry_sdk::logs::LoggerProvider { @@ -200,8 +203,8 @@ fn build_batch_with_exporter( .build(); provider_builder = provider_builder.with_log_processor(batch_processor); - if let Some(config) = log_config { - provider_builder = provider_builder.with_config(config); + if let Some(resource) = resource { + provider_builder = provider_builder.with_resource(resource); } // logger would be created in the tracing appender provider_builder.build() diff --git a/opentelemetry-otlp/tests/integration_test/tests/logs.rs b/opentelemetry-otlp/tests/integration_test/tests/logs.rs index 22fabd0ae9..0c4fb773e9 100644 --- a/opentelemetry-otlp/tests/integration_test/tests/logs.rs +++ b/opentelemetry-otlp/tests/integration_test/tests/logs.rs @@ -14,12 +14,10 @@ fn init_logs() -> Result { opentelemetry_otlp::new_pipeline() .logging() .with_exporter(opentelemetry_otlp::new_exporter().tonic()) - .with_log_config( - sdklogs::config().with_resource(Resource::new(vec![KeyValue::new( - opentelemetry_semantic_conventions::resource::SERVICE_NAME, - "logs-integration-test", - )])), - ) + .with_resource(Resource::new(vec![KeyValue::new( + opentelemetry_semantic_conventions::resource::SERVICE_NAME, + "logs-integration-test", + )])) .install_batch(runtime::Tokio) } diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index 3d141e1ad5..47c4a7f179 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -4,6 +4,11 @@ - Add "metrics", "logs" to default features. With this, default feature list is "trace", "metrics" and "logs". +- Add `with_resource` on Builder for LoggerProvider, replacing the `with_config` + method. Instead of using + `.with_config(Config::default().with_resource(RESOURCE::default()))` users + must now use `.with_resource(RESOURCE::default())` to configure Resource on + logger provider. - Removed dependency on `ordered-float`. ## v0.23.0 diff --git a/opentelemetry-sdk/src/logs/config.rs b/opentelemetry-sdk/src/logs/config.rs deleted file mode 100644 index d58efa9b13..0000000000 --- a/opentelemetry-sdk/src/logs/config.rs +++ /dev/null @@ -1,23 +0,0 @@ -use std::borrow::Cow; - -use crate::Resource; - -/// Default log configuration -pub fn config() -> Config { - Config::default() -} - -/// Log emitter configuration. -#[derive(Debug, Default)] -pub struct Config { - /// Contains attributes representing an entity that produces telemetry. - pub resource: Cow<'static, crate::Resource>, -} - -impl Config { - /// Specify the attributes representing the entity that produces telemetry - pub fn with_resource(mut self, resource: Resource) -> Self { - self.resource = Cow::Owned(resource); - self - } -} diff --git a/opentelemetry-sdk/src/logs/log_emitter.rs b/opentelemetry-sdk/src/logs/log_emitter.rs index ffa08d8ab1..3a552e4e57 100644 --- a/opentelemetry-sdk/src/logs/log_emitter.rs +++ b/opentelemetry-sdk/src/logs/log_emitter.rs @@ -1,7 +1,8 @@ -use super::{BatchLogProcessor, Config, LogProcessor, LogRecord, SimpleLogProcessor, TraceContext}; +use super::{BatchLogProcessor, LogProcessor, LogRecord, SimpleLogProcessor, TraceContext}; use crate::{ export::logs::{LogData, LogExporter}, runtime::RuntimeChannel, + Resource, }; use opentelemetry::{ global, @@ -25,7 +26,7 @@ use once_cell::sync::Lazy; static NOOP_LOGGER_PROVIDER: Lazy = Lazy::new(|| LoggerProvider { inner: Arc::new(LoggerProviderInner { processors: Vec::new(), - config: Config::default(), + resource: Resource::empty(), }), is_shutdown: Arc::new(AtomicBool::new(true)), }); @@ -89,9 +90,9 @@ impl LoggerProvider { Builder::default() } - /// Config associated with this provider. - pub fn config(&self) -> &Config { - &self.inner.config + /// Resource associated with this provider. + pub fn resource(&self) -> &Resource { + &self.inner.resource } /// Log processors associated with this provider. @@ -137,7 +138,7 @@ impl LoggerProvider { #[derive(Debug)] struct LoggerProviderInner { processors: Vec>, - config: Config, + resource: Resource, } impl Drop for LoggerProviderInner { @@ -154,7 +155,7 @@ impl Drop for LoggerProviderInner { /// Builder for provider attributes. pub struct Builder { processors: Vec>, - config: Config, + resource: Option, } impl Builder { @@ -184,21 +185,25 @@ impl Builder { Builder { processors, ..self } } - /// The `Config` that this provider should use. - pub fn with_config(self, config: Config) -> Self { - Builder { config, ..self } + /// The `Resource` to be associated with this Provider. + pub fn with_resource(self, resource: Resource) -> Self { + Builder { + resource: Some(resource), + ..self + } } /// Create a new provider from this configuration. pub fn build(self) -> LoggerProvider { + let resource = self.resource.unwrap_or_default(); // invoke set_resource on all the processors for processor in &self.processors { - processor.set_resource(&self.config.resource); + processor.set_resource(&resource); } LoggerProvider { inner: Arc::new(LoggerProviderInner { processors: self.processors, - config: self.config, + resource, }), is_shutdown: Arc::new(AtomicBool::new(false)), } @@ -357,8 +362,7 @@ mod tests { expect: Option<&'static str>| { assert_eq!( provider - .config() - .resource + .resource() .get(Key::from_static_str(resource_key)) .map(|v| v.to_string()), expect.map(|s| s.to_string()) @@ -366,18 +370,15 @@ mod tests { }; let assert_telemetry_resource = |provider: &super::LoggerProvider| { assert_eq!( - provider - .config() - .resource - .get(TELEMETRY_SDK_LANGUAGE.into()), + provider.resource().get(TELEMETRY_SDK_LANGUAGE.into()), Some(Value::from("rust")) ); assert_eq!( - provider.config().resource.get(TELEMETRY_SDK_NAME.into()), + provider.resource().get(TELEMETRY_SDK_NAME.into()), Some(Value::from("opentelemetry")) ); assert_eq!( - provider.config().resource.get(TELEMETRY_SDK_VERSION.into()), + provider.resource().get(TELEMETRY_SDK_VERSION.into()), Some(Value::from(env!("CARGO_PKG_VERSION"))) ); }; @@ -395,15 +396,13 @@ mod tests { // If user provided a resource, use that. let custom_config_provider = super::LoggerProvider::builder() - .with_config(Config { - resource: Cow::Owned(Resource::new(vec![KeyValue::new( - SERVICE_NAME, - "test_service", - )])), - }) + .with_resource(Resource::new(vec![KeyValue::new( + SERVICE_NAME, + "test_service", + )])) .build(); assert_resource(&custom_config_provider, SERVICE_NAME, Some("test_service")); - assert_eq!(custom_config_provider.config().resource.len(), 1); + assert_eq!(custom_config_provider.resource().len(), 1); // If `OTEL_RESOURCE_ATTRIBUTES` is set, read them automatically temp_env::with_var( @@ -419,7 +418,7 @@ mod tests { assert_resource(&env_resource_provider, "key1", Some("value1")); assert_resource(&env_resource_provider, "k3", Some("value2")); assert_telemetry_resource(&env_resource_provider); - assert_eq!(env_resource_provider.config().resource.len(), 6); + assert_eq!(env_resource_provider.resource().len(), 6); }, ); @@ -429,12 +428,10 @@ mod tests { Some("my-custom-key=env-val,k2=value2"), || { let user_provided_resource_config_provider = super::LoggerProvider::builder() - .with_config(Config { - resource: Cow::Owned(Resource::default().merge(&mut Resource::new(vec![ - KeyValue::new("my-custom-key", "my-custom-value"), - KeyValue::new("my-custom-key2", "my-custom-value2"), - ]))), - }) + .with_resource(Resource::default().merge(&mut Resource::new(vec![ + KeyValue::new("my-custom-key", "my-custom-value"), + KeyValue::new("my-custom-key2", "my-custom-value2"), + ]))) .build(); assert_resource( &user_provided_resource_config_provider, @@ -457,23 +454,15 @@ mod tests { Some("value2"), ); assert_telemetry_resource(&user_provided_resource_config_provider); - assert_eq!( - user_provided_resource_config_provider - .config() - .resource - .len(), - 7 - ); + assert_eq!(user_provided_resource_config_provider.resource().len(), 7); }, ); // If user provided a resource, it takes priority during collision. let no_service_name = super::LoggerProvider::builder() - .with_config(Config { - resource: Cow::Owned(Resource::empty()), - }) + .with_resource(Resource::empty()) .build(); - assert_eq!(no_service_name.config().resource.len(), 0); + assert_eq!(no_service_name.resource().len(), 0); } #[test] diff --git a/opentelemetry-sdk/src/logs/log_processor.rs b/opentelemetry-sdk/src/logs/log_processor.rs index fd0be00769..1aad790c64 100644 --- a/opentelemetry-sdk/src/logs/log_processor.rs +++ b/opentelemetry-sdk/src/logs/log_processor.rs @@ -503,8 +503,7 @@ mod tests { OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT, OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT, OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT, OTEL_BLRP_SCHEDULE_DELAY_DEFAULT, }, - BatchConfig, BatchConfigBuilder, Config, LogProcessor, LoggerProvider, - SimpleLogProcessor, + BatchConfig, BatchConfigBuilder, LogProcessor, LoggerProvider, SimpleLogProcessor, }, runtime, testing::logs::InMemoryLogsExporter, @@ -699,12 +698,12 @@ mod tests { let processor = SimpleLogProcessor::new(Box::new(exporter.clone())); let _ = LoggerProvider::builder() .with_log_processor(processor) - .with_config(Config::default().with_resource(Resource::new(vec![ + .with_resource(Resource::new(vec![ KeyValue::new("k1", "v1"), KeyValue::new("k2", "v3"), KeyValue::new("k3", "v3"), KeyValue::new("k4", "v4"), - ]))) + ])) .build(); assert_eq!(exporter.get_resource().unwrap().into_iter().count(), 4); } @@ -721,12 +720,12 @@ mod tests { ); let provider = LoggerProvider::builder() .with_log_processor(processor) - .with_config(Config::default().with_resource(Resource::new(vec![ + .with_resource(Resource::new(vec![ KeyValue::new("k1", "v1"), KeyValue::new("k2", "v3"), KeyValue::new("k3", "v3"), KeyValue::new("k4", "v4"), - ]))) + ])) .build(); assert_eq!(exporter.get_resource().unwrap().into_iter().count(), 4); let _ = provider.shutdown(); diff --git a/opentelemetry-sdk/src/logs/mod.rs b/opentelemetry-sdk/src/logs/mod.rs index 036b7ffe7e..207da4255c 100644 --- a/opentelemetry-sdk/src/logs/mod.rs +++ b/opentelemetry-sdk/src/logs/mod.rs @@ -1,11 +1,9 @@ //! # OpenTelemetry Log SDK -mod config; mod log_emitter; mod log_processor; mod record; -pub use config::{config, Config}; pub use log_emitter::{Builder, Logger, LoggerProvider}; pub use log_processor::{ BatchConfig, BatchConfigBuilder, BatchLogProcessor, BatchLogProcessorBuilder, LogProcessor, @@ -35,7 +33,7 @@ mod tests { ]); let exporter: InMemoryLogsExporter = InMemoryLogsExporter::default(); let logger_provider = LoggerProvider::builder() - .with_config(Config::default().with_resource(resource.clone())) + .with_resource(resource.clone()) .with_log_processor(SimpleLogProcessor::new(Box::new(exporter.clone()))) .build(); diff --git a/opentelemetry-stdout/Cargo.toml b/opentelemetry-stdout/Cargo.toml index 78a395c0a6..e139df2b9d 100644 --- a/opentelemetry-stdout/Cargo.toml +++ b/opentelemetry-stdout/Cargo.toml @@ -36,6 +36,8 @@ ordered-float = { workspace = true } opentelemetry = { path = "../opentelemetry", features = ["metrics"] } opentelemetry_sdk = { path = "../opentelemetry-sdk", features = ["rt-tokio", "metrics"] } opentelemetry-appender-tracing = { path = "../opentelemetry-appender-tracing"} +opentelemetry-semantic-conventions = { path = "../opentelemetry-semantic-conventions" } tracing = { workspace = true, features = ["std"]} tracing-subscriber = { workspace = true, features = ["registry", "std"] } tokio = { workspace = true, features = ["full"] } +once_cell = { workspace = true } diff --git a/opentelemetry-stdout/examples/basic.rs b/opentelemetry-stdout/examples/basic.rs index ae2b7f6ec9..baa2b41d18 100644 --- a/opentelemetry-stdout/examples/basic.rs +++ b/opentelemetry-stdout/examples/basic.rs @@ -1,5 +1,6 @@ //! run with `$ cargo run --example basic +use once_cell::sync::Lazy; use opentelemetry::{global, KeyValue}; #[cfg(feature = "trace")] @@ -11,14 +12,24 @@ use opentelemetry_sdk::runtime; #[cfg(feature = "metrics")] use opentelemetry_sdk::metrics::{PeriodicReader, SdkMeterProvider}; +use opentelemetry_sdk::trace::Config; #[cfg(feature = "trace")] use opentelemetry_sdk::trace::TracerProvider; +use opentelemetry_sdk::Resource; + +static RESOURCE: Lazy = Lazy::new(|| { + Resource::default().merge(&Resource::new(vec![KeyValue::new( + opentelemetry_semantic_conventions::resource::SERVICE_NAME, + "basic-stdout-example", + )])) +}); #[cfg(feature = "trace")] fn init_trace() { let exporter = opentelemetry_stdout::SpanExporter::default(); let provider = TracerProvider::builder() .with_simple_exporter(exporter) + .with_config(Config::default().with_resource(RESOURCE.clone())) .build(); global::set_tracer_provider(provider); } @@ -27,7 +38,10 @@ fn init_trace() { fn init_metrics() -> opentelemetry_sdk::metrics::SdkMeterProvider { let exporter = opentelemetry_stdout::MetricsExporter::default(); let reader = PeriodicReader::builder(exporter, runtime::Tokio).build(); - let provider = SdkMeterProvider::builder().with_reader(reader).build(); + let provider = SdkMeterProvider::builder() + .with_reader(reader) + .with_resource(RESOURCE.clone()) + .build(); global::set_meter_provider(provider.clone()); provider } @@ -41,6 +55,7 @@ fn init_logs() -> opentelemetry_sdk::logs::LoggerProvider { let exporter = opentelemetry_stdout::LogExporter::default(); let provider: LoggerProvider = LoggerProvider::builder() .with_simple_exporter(exporter) + .with_resource(RESOURCE.clone()) .build(); let layer = layer::OpenTelemetryTracingBridge::new(&provider); tracing_subscriber::registry().with(layer).init(); From 4883d2d35061740795829060da12026bce0dc52c Mon Sep 17 00:00:00 2001 From: Lalit Kumar Bhasin Date: Thu, 23 May 2024 11:30:28 -0700 Subject: [PATCH 08/37] Support LogProcessors chaining through mutable reference (#1726) --- .../benches/logs.rs | 6 +- opentelemetry-otlp/src/exporter/http/logs.rs | 10 +- opentelemetry-otlp/src/exporter/tonic/logs.rs | 4 +- opentelemetry-otlp/src/logs.rs | 5 +- opentelemetry-sdk/CHANGELOG.md | 12 ++ opentelemetry-sdk/benches/log.rs | 7 +- opentelemetry-sdk/src/export/logs/mod.rs | 3 +- opentelemetry-sdk/src/logs/log_emitter.rs | 29 +-- opentelemetry-sdk/src/logs/log_processor.rs | 178 +++++++++++++++--- .../src/testing/logs/in_memory_exporter.rs | 12 +- opentelemetry-stdout/src/logs/exporter.rs | 9 +- stress/src/logs.rs | 4 +- 12 files changed, 225 insertions(+), 54 deletions(-) diff --git a/opentelemetry-appender-tracing/benches/logs.rs b/opentelemetry-appender-tracing/benches/logs.rs index 9f73e9c890..a65d4c2b3d 100644 --- a/opentelemetry-appender-tracing/benches/logs.rs +++ b/opentelemetry-appender-tracing/benches/logs.rs @@ -10,7 +10,7 @@ | noop_layer_disabled | 12 ns | | noop_layer_enabled | 25 ns | | ot_layer_disabled | 19 ns | - | ot_layer_enabled | 588 ns | + | ot_layer_enabled | 446 ns | */ use async_trait::async_trait; @@ -33,7 +33,7 @@ struct NoopExporter { #[async_trait] impl LogExporter for NoopExporter { - async fn export(&mut self, _: Vec) -> LogResult<()> { + async fn export<'a>(&mut self, _: Vec>) -> LogResult<()> { LogResult::Ok(()) } @@ -54,7 +54,7 @@ impl NoopProcessor { } impl LogProcessor for NoopProcessor { - fn emit(&self, _: LogData) { + fn emit(&self, _: &mut LogData) { // no-op } diff --git a/opentelemetry-otlp/src/exporter/http/logs.rs b/opentelemetry-otlp/src/exporter/http/logs.rs index 4bad1cc39e..3e59ffdd8e 100644 --- a/opentelemetry-otlp/src/exporter/http/logs.rs +++ b/opentelemetry-otlp/src/exporter/http/logs.rs @@ -9,7 +9,7 @@ use super::OtlpHttpClient; #[async_trait] impl LogExporter for OtlpHttpClient { - async fn export(&mut self, batch: Vec) -> LogResult<()> { + async fn export<'a>(&mut self, batch: Vec>) -> LogResult<()> { let client = self .client .lock() @@ -19,7 +19,13 @@ impl LogExporter for OtlpHttpClient { _ => Err(LogError::Other("exporter is already shut down".into())), })?; - let (body, content_type) = { self.build_logs_export_body(batch, &self.resource)? }; + //TODO: avoid cloning here. + let owned_batch = batch + .into_iter() + .map(|cow_log_data| cow_log_data.into_owned()) // Converts Cow to owned LogData + .collect::>(); + + let (body, content_type) = { self.build_logs_export_body(owned_batch, &self.resource)? }; let mut request = http::Request::builder() .method(Method::POST) .uri(&self.collector_endpoint) diff --git a/opentelemetry-otlp/src/exporter/tonic/logs.rs b/opentelemetry-otlp/src/exporter/tonic/logs.rs index 65759f7a4b..8a6637a5b0 100644 --- a/opentelemetry-otlp/src/exporter/tonic/logs.rs +++ b/opentelemetry-otlp/src/exporter/tonic/logs.rs @@ -52,7 +52,7 @@ impl TonicLogsClient { #[async_trait] impl LogExporter for TonicLogsClient { - async fn export(&mut self, batch: Vec) -> LogResult<()> { + async fn export<'a>(&mut self, batch: Vec>) -> LogResult<()> { let (mut client, metadata, extensions) = match &mut self.inner { Some(inner) => { let (m, e, _) = inner @@ -65,9 +65,11 @@ impl LogExporter for TonicLogsClient { None => return Err(LogError::Other("exporter is already shut down".into())), }; + // TODO: Avoid cloning here. let resource_logs = { batch .into_iter() + .map(|log_data_cow| (log_data_cow.into_owned())) .map(|log_data| (log_data, &self.resource)) .map(Into::into) .collect() diff --git a/opentelemetry-otlp/src/logs.rs b/opentelemetry-otlp/src/logs.rs index 13e407a678..72e3b352ec 100644 --- a/opentelemetry-otlp/src/logs.rs +++ b/opentelemetry-otlp/src/logs.rs @@ -98,7 +98,10 @@ impl LogExporter { #[async_trait] impl opentelemetry_sdk::export::logs::LogExporter for LogExporter { - async fn export(&mut self, batch: Vec) -> opentelemetry::logs::LogResult<()> { + async fn export<'a>( + &mut self, + batch: Vec>, + ) -> opentelemetry::logs::LogResult<()> { self.client.export(batch).await } diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index 47c4a7f179..17e0483a5d 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -11,6 +11,18 @@ logger provider. - Removed dependency on `ordered-float`. +- **Breaking** [1726](https://github.com/open-telemetry/opentelemetry-rust/pull/1726) + Update `LogProcessor::emit() method to take mutable reference to LogData. This is breaking + change for LogProcessor developers. If the processor needs to invoke the exporter + asynchronously, it should clone the data to ensure it can be safely processed without + lifetime issues. Any changes made to the log data before cloning in this method will be + reflected in the next log processor in the chain, as well as to the exporter. +- **Breaking** [1726](https://github.com/open-telemetry/opentelemetry-rust/pull/1726) + Update `LogExporter::export() method to accept a batch of log data, which can be either a + reference or owned `LogData`. If the exporter needs to process the log data + asynchronously, it should clone the log data to ensure it can be safely processed without + lifetime issues. + ## v0.23.0 - Fix SimpleSpanProcessor to be consistent with log counterpart. Also removed diff --git a/opentelemetry-sdk/benches/log.rs b/opentelemetry-sdk/benches/log.rs index f70bb3e650..72f205c5e0 100644 --- a/opentelemetry-sdk/benches/log.rs +++ b/opentelemetry-sdk/benches/log.rs @@ -1,3 +1,8 @@ +//! run with `$ cargo bench --bench log --features=logs -- --exact ` to run specific test for logs +//! So to run test named "full-log-with-attributes/with-context" you would run `$ cargo bench --bench log --features=logs -- --exact full-log-with-attributes/with-context` +//! To run all tests for logs you would run `$ cargo bench --bench log --features=logs` +//! + use std::collections::HashMap; use std::time::SystemTime; @@ -19,7 +24,7 @@ struct VoidExporter; #[async_trait] impl LogExporter for VoidExporter { - async fn export(&mut self, _batch: Vec) -> LogResult<()> { + async fn export<'a>(&mut self, _batch: Vec>) -> LogResult<()> { LogResult::Ok(()) } } diff --git a/opentelemetry-sdk/src/export/logs/mod.rs b/opentelemetry-sdk/src/export/logs/mod.rs index 5c56e355b4..9588339462 100644 --- a/opentelemetry-sdk/src/export/logs/mod.rs +++ b/opentelemetry-sdk/src/export/logs/mod.rs @@ -8,13 +8,14 @@ use opentelemetry::{ logs::{LogError, LogResult}, InstrumentationLibrary, }; +use std::borrow::Cow; use std::fmt::Debug; /// `LogExporter` defines the interface that log exporters should implement. #[async_trait] pub trait LogExporter: Send + Sync + Debug { /// Exports a batch of [`LogData`]. - async fn export(&mut self, batch: Vec) -> LogResult<()>; + async fn export<'a>(&mut self, batch: Vec>) -> LogResult<()>; /// Shuts down the exporter. fn shutdown(&mut self) {} #[cfg(feature = "logs_level_enabled")] diff --git a/opentelemetry-sdk/src/logs/log_emitter.rs b/opentelemetry-sdk/src/logs/log_emitter.rs index 3a552e4e57..d20e602ae1 100644 --- a/opentelemetry-sdk/src/logs/log_emitter.rs +++ b/opentelemetry-sdk/src/logs/log_emitter.rs @@ -256,20 +256,21 @@ impl opentelemetry::logs::Logger for Logger { cx.has_active_span() .then(|| TraceContext::from(cx.span().span_context())) }); + let mut log_record = record; + if let Some(ref trace_context) = trace_context { + log_record.trace_context = Some(trace_context.clone()); + } + if log_record.observed_timestamp.is_none() { + log_record.observed_timestamp = Some(SystemTime::now()); + } + + let mut data = LogData { + record: log_record, + instrumentation: self.instrumentation_library().clone(), + }; for p in processors { - let mut cloned_record = record.clone(); - if let Some(ref trace_context) = trace_context { - cloned_record.trace_context = Some(trace_context.clone()); - } - if cloned_record.observed_timestamp.is_none() { - cloned_record.observed_timestamp = Some(SystemTime::now()); - } - let data = LogData { - record: cloned_record, - instrumentation: self.instrumentation_library().clone(), - }; - p.emit(data); + p.emit(&mut data); } } @@ -326,7 +327,7 @@ mod tests { } impl LogProcessor for ShutdownTestLogProcessor { - fn emit(&self, _data: LogData) { + fn emit(&self, _data: &mut LogData) { self.is_shutdown .lock() .map(|is_shutdown| { @@ -561,7 +562,7 @@ mod tests { } impl LogProcessor for LazyLogProcessor { - fn emit(&self, _data: LogData) { + fn emit(&self, _data: &mut LogData) { // nothing to do. } diff --git a/opentelemetry-sdk/src/logs/log_processor.rs b/opentelemetry-sdk/src/logs/log_processor.rs index 1aad790c64..34d7fbce14 100644 --- a/opentelemetry-sdk/src/logs/log_processor.rs +++ b/opentelemetry-sdk/src/logs/log_processor.rs @@ -14,6 +14,7 @@ use opentelemetry::{ global, logs::{LogError, LogResult}, }; +use std::borrow::Cow; use std::sync::atomic::AtomicBool; use std::{cmp::min, env, sync::Mutex}; use std::{ @@ -45,7 +46,16 @@ const OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT: usize = 512; /// [`Logger`]: crate::logs::Logger pub trait LogProcessor: Send + Sync + Debug { /// Called when a log record is ready to processed and exported. - fn emit(&self, data: LogData); + /// + /// This method receives a mutable reference to `LogData`. If the processor + /// needs to handle the export asynchronously, it should clone the data to + /// ensure it can be safely processed without lifetime issues. Any changes + /// made to the log data in this method will be reflected in the next log + /// processor in the chain. + /// + /// # Parameters + /// - `data`: A mutable reference to `LogData` representing the log record. + fn emit(&self, data: &mut LogData); /// Force the logs lying in the cache to be exported. fn force_flush(&self) -> LogResult<()>; /// Shuts down the processor. @@ -80,7 +90,7 @@ impl SimpleLogProcessor { } impl LogProcessor for SimpleLogProcessor { - fn emit(&self, data: LogData) { + fn emit(&self, data: &mut LogData) { // noop after shutdown if self.is_shutdown.load(std::sync::atomic::Ordering::Relaxed) { return; @@ -90,7 +100,9 @@ impl LogProcessor for SimpleLogProcessor { .exporter .lock() .map_err(|_| LogError::Other("simple logprocessor mutex poison".into())) - .and_then(|mut exporter| futures_executor::block_on(exporter.export(vec![data]))); + .and_then(|mut exporter| { + futures_executor::block_on(exporter.export(vec![Cow::Borrowed(data)])) + }); if let Err(err) = result { global::handle_error(err); } @@ -140,8 +152,10 @@ impl Debug for BatchLogProcessor { } impl LogProcessor for BatchLogProcessor { - fn emit(&self, data: LogData) { - let result = self.message_sender.try_send(BatchMessage::ExportLog(data)); + fn emit(&self, data: &mut LogData) { + let result = self + .message_sender + .try_send(BatchMessage::ExportLog(data.clone())); if let Err(err) = result { global::handle_error(LogError::Other(err.into())); @@ -201,7 +215,7 @@ impl BatchLogProcessor { match message { // Log has finished, add to buffer of pending logs. BatchMessage::ExportLog(log) => { - logs.push(log); + logs.push(Cow::Owned(log)); if logs.len() == config.max_export_batch_size { let result = export_with_timeout( @@ -285,11 +299,11 @@ impl BatchLogProcessor { } } -async fn export_with_timeout( +async fn export_with_timeout<'a, R, E>( time_out: Duration, exporter: &mut E, runtime: &R, - batch: Vec, + batch: Vec>, ) -> ExportResult where R: RuntimeChannel, @@ -510,8 +524,14 @@ mod tests { Resource, }; use async_trait::async_trait; + use opentelemetry::logs::AnyValue; + #[cfg(feature = "logs_level_enabled")] + use opentelemetry::logs::Severity; + use opentelemetry::logs::{Logger, LoggerProvider as _}; + use opentelemetry::Key; use opentelemetry::{logs::LogResult, KeyValue}; - use std::sync::Arc; + use std::borrow::Cow; + use std::sync::{Arc, Mutex}; use std::time::Duration; #[derive(Debug, Clone)] @@ -521,7 +541,7 @@ mod tests { #[async_trait] impl LogExporter for MockLogExporter { - async fn export(&mut self, _batch: Vec) -> LogResult<()> { + async fn export<'a>(&mut self, _batch: Vec>) -> LogResult<()> { Ok(()) } @@ -743,17 +763,15 @@ mod tests { BatchConfig::default(), runtime::Tokio, ); - processor.emit(LogData { + let mut log_data = LogData { record: Default::default(), instrumentation: Default::default(), - }); + }; + processor.emit(&mut log_data); processor.force_flush().unwrap(); processor.shutdown().unwrap(); // todo: expect to see errors here. How should we assert this? - processor.emit(LogData { - record: Default::default(), - instrumentation: Default::default(), - }); + processor.emit(&mut log_data); assert_eq!(1, exporter.get_emitted_logs().unwrap().len()) } @@ -764,10 +782,12 @@ mod tests { .build(); let processor = SimpleLogProcessor::new(Box::new(exporter.clone())); - processor.emit(LogData { + let mut log_data = LogData { record: Default::default(), instrumentation: Default::default(), - }); + }; + + processor.emit(&mut log_data); processor.shutdown().unwrap(); @@ -776,11 +796,125 @@ mod tests { .load(std::sync::atomic::Ordering::Relaxed); assert!(is_shutdown); - processor.emit(LogData { - record: Default::default(), - instrumentation: Default::default(), - }); + processor.emit(&mut log_data); assert_eq!(1, exporter.get_emitted_logs().unwrap().len()) } + + #[derive(Debug)] + struct FirstProcessor { + pub(crate) logs: Arc>>, + } + + impl LogProcessor for FirstProcessor { + fn emit(&self, data: &mut LogData) { + // add attribute + data.record.attributes.get_or_insert(vec![]).push(( + Key::from_static_str("processed_by"), + AnyValue::String("FirstProcessor".into()), + )); + // update body + data.record.body = Some("Updated by FirstProcessor".into()); + + self.logs.lock().unwrap().push(data.clone()); //clone as the LogProcessor is storing the data. + } + + #[cfg(feature = "logs_level_enabled")] + fn event_enabled(&self, _level: Severity, _target: &str, _name: &str) -> bool { + true + } + + fn force_flush(&self) -> LogResult<()> { + Ok(()) + } + + fn shutdown(&self) -> LogResult<()> { + Ok(()) + } + } + + #[derive(Debug)] + struct SecondProcessor { + pub(crate) logs: Arc>>, + } + + impl LogProcessor for SecondProcessor { + fn emit(&self, data: &mut LogData) { + assert!(data.record.attributes.as_ref().map_or(false, |attrs| { + attrs.iter().any(|(key, value)| { + key.as_str() == "processed_by" + && value == &AnyValue::String("FirstProcessor".into()) + }) + })); + assert!( + data.record.body.clone().unwrap() + == AnyValue::String("Updated by FirstProcessor".into()) + ); + self.logs.lock().unwrap().push(data.clone()); + } + + #[cfg(feature = "logs_level_enabled")] + fn event_enabled(&self, _level: Severity, _target: &str, _name: &str) -> bool { + true + } + + fn force_flush(&self) -> LogResult<()> { + Ok(()) + } + + fn shutdown(&self) -> LogResult<()> { + Ok(()) + } + } + #[test] + fn test_log_data_modification_by_multiple_processors() { + let first_processor_logs = Arc::new(Mutex::new(Vec::new())); + let second_processor_logs = Arc::new(Mutex::new(Vec::new())); + + let first_processor = FirstProcessor { + logs: Arc::clone(&first_processor_logs), + }; + let second_processor = SecondProcessor { + logs: Arc::clone(&second_processor_logs), + }; + + let logger_provider = LoggerProvider::builder() + .with_log_processor(first_processor) + .with_log_processor(second_processor) + .build(); + + let logger = logger_provider.logger("test-logger"); + let mut log_record = logger.create_log_record(); + log_record.body = Some(AnyValue::String("Test log".into())); + + logger.emit(log_record); + + assert_eq!(first_processor_logs.lock().unwrap().len(), 1); + assert_eq!(second_processor_logs.lock().unwrap().len(), 1); + + let first_log = &first_processor_logs.lock().unwrap()[0]; + let second_log = &second_processor_logs.lock().unwrap()[0]; + + assert!(first_log.record.attributes.iter().any(|attrs| { + attrs.iter().any(|(key, value)| { + key.as_str() == "processed_by" + && value == &AnyValue::String("FirstProcessor".into()) + }) + })); + + assert!(second_log.record.attributes.iter().any(|attrs| { + attrs.iter().any(|(key, value)| { + key.as_str() == "processed_by" + && value == &AnyValue::String("FirstProcessor".into()) + }) + })); + assert!( + first_log.record.body.clone().unwrap() + == AnyValue::String("Updated by FirstProcessor".into()) + ); + assert!( + second_log.record.body.clone().unwrap() + == AnyValue::String("Updated by FirstProcessor".into()) + ); + } } diff --git a/opentelemetry-sdk/src/testing/logs/in_memory_exporter.rs b/opentelemetry-sdk/src/testing/logs/in_memory_exporter.rs index 233da0ccbb..8068fafaec 100644 --- a/opentelemetry-sdk/src/testing/logs/in_memory_exporter.rs +++ b/opentelemetry-sdk/src/testing/logs/in_memory_exporter.rs @@ -175,12 +175,14 @@ impl InMemoryLogsExporter { #[async_trait] impl LogExporter for InMemoryLogsExporter { - async fn export(&mut self, batch: Vec) -> LogResult<()> { - self.logs - .lock() - .map(|mut logs_guard| logs_guard.append(&mut batch.clone())) - .map_err(LogError::from) + async fn export<'a>(&mut self, batch: Vec>) -> LogResult<()> { + let mut logs_guard = self.logs.lock().map_err(LogError::from)?; + for log in batch.into_iter() { + logs_guard.push(log.into_owned()); + } + Ok(()) } + fn shutdown(&mut self) { if self.should_reset_on_shutdown { self.reset(); diff --git a/opentelemetry-stdout/src/logs/exporter.rs b/opentelemetry-stdout/src/logs/exporter.rs index fcea153133..dacefa3d8b 100644 --- a/opentelemetry-stdout/src/logs/exporter.rs +++ b/opentelemetry-stdout/src/logs/exporter.rs @@ -6,6 +6,7 @@ use opentelemetry::{ }; use opentelemetry_sdk::export::logs::{ExportResult, LogData}; use opentelemetry_sdk::Resource; +use std::borrow::Cow; use std::io::{stdout, Write}; type Encoder = @@ -44,9 +45,13 @@ impl fmt::Debug for LogExporter { #[async_trait] impl opentelemetry_sdk::export::logs::LogExporter for LogExporter { /// Export spans to stdout - async fn export(&mut self, batch: Vec) -> ExportResult { + async fn export<'a>(&mut self, batch: Vec>) -> ExportResult { if let Some(writer) = &mut self.writer { - let log_data = crate::logs::transform::LogData::from((batch, &self.resource)); + // TODO - Avoid cloning logdata if it is borrowed. + let log_data = crate::logs::transform::LogData::from(( + batch.into_iter().map(Cow::into_owned).collect(), + &self.resource, + )); let result = (self.encoder)(writer, log_data) as LogResult<()>; result.and_then(|_| writer.write_all(b"\n").map_err(|e| Error(e).into())) } else { diff --git a/stress/src/logs.rs b/stress/src/logs.rs index a70e81014d..a4d4d48ae7 100644 --- a/stress/src/logs.rs +++ b/stress/src/logs.rs @@ -3,7 +3,7 @@ OS: Ubuntu 22.04.3 LTS (5.15.146.1-microsoft-standard-WSL2) Hardware: AMD EPYC 7763 64-Core Processor - 2.44 GHz, 16vCPUs, RAM: 64.0 GB - 27 M/sec + 39 M/sec */ use opentelemetry_appender_tracing::layer; @@ -17,7 +17,7 @@ mod throughput; pub struct NoOpLogProcessor; impl LogProcessor for NoOpLogProcessor { - fn emit(&self, _data: opentelemetry_sdk::export::logs::LogData) {} + fn emit(&self, _data: &mut opentelemetry_sdk::export::logs::LogData) {} fn force_flush(&self) -> opentelemetry::logs::LogResult<()> { Ok(()) From 5c7c695c6cbbfe682c6a0d16f7d37e586d40a3b6 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Thu, 23 May 2024 12:15:13 -0700 Subject: [PATCH 09/37] Run semver CI on demand (#1813) --- .github/workflows/ci.yml | 13 ------------- .github/workflows/semver.yml | 22 ++++++++++++++++++++++ 2 files changed, 22 insertions(+), 13 deletions(-) create mode 100644 .github/workflows/semver.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index daafa68815..295c69bc6b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -138,19 +138,6 @@ jobs: env: CARGO_INCREMENTAL: '0' RUSTDOCFLAGS: -Dwarnings - semver: # This job uses the latest published crate as baseline for comparison. - runs-on: ubuntu-latest - name: semver - steps: - - uses: actions/checkout@v4 - with: - submodules: true - - name: Install stable - uses: dtolnay/rust-toolchain@stable - with: - components: rustfmt - - name: cargo-semver-checks - uses: obi1kenobi/cargo-semver-checks-action@v2.3 coverage: continue-on-error: true runs-on: ubuntu-latest diff --git a/.github/workflows/semver.yml b/.github/workflows/semver.yml new file mode 100644 index 0000000000..6d48652d60 --- /dev/null +++ b/.github/workflows/semver.yml @@ -0,0 +1,22 @@ +name: Semver compliance +env: + CI: true +on: + pull_request: + types: [ labeled, synchronize, opened, reopened ] + +jobs: + semver-compliance: # This job uses the latest published crate as baseline for comparison. + runs-on: ubuntu-latest + timeout-minutes: 10 + if: ${{ github.event.label.name == 'semver-check' || contains(github.event.pull_request.labels.*.name, 'semver-check') }} + steps: + - uses: actions/checkout@v4 + with: + submodules: true + - name: Install stable + uses: dtolnay/rust-toolchain@stable + with: + components: rustfmt + - name: cargo-semver-checks + uses: obi1kenobi/cargo-semver-checks-action@v2.3 From aca4b52e20ee6728c91eb74744adfffa68e061d6 Mon Sep 17 00:00:00 2001 From: Lalit Kumar Bhasin Date: Thu, 23 May 2024 14:38:17 -0700 Subject: [PATCH 10/37] Remove temporary vector in tracer appender (#1814) --- .../benches/logs.rs | 2 +- opentelemetry-appender-tracing/src/layer.rs | 79 +++++++++---------- stress/src/logs.rs | 2 +- 3 files changed, 41 insertions(+), 42 deletions(-) diff --git a/opentelemetry-appender-tracing/benches/logs.rs b/opentelemetry-appender-tracing/benches/logs.rs index a65d4c2b3d..f7ef4d94ad 100644 --- a/opentelemetry-appender-tracing/benches/logs.rs +++ b/opentelemetry-appender-tracing/benches/logs.rs @@ -10,7 +10,7 @@ | noop_layer_disabled | 12 ns | | noop_layer_enabled | 25 ns | | ot_layer_disabled | 19 ns | - | ot_layer_enabled | 446 ns | + | ot_layer_enabled | 371 ns | */ use async_trait::async_trait; diff --git a/opentelemetry-appender-tracing/src/layer.rs b/opentelemetry-appender-tracing/src/layer.rs index fe28a786dc..72d9e276e9 100644 --- a/opentelemetry-appender-tracing/src/layer.rs +++ b/opentelemetry-appender-tracing/src/layer.rs @@ -11,10 +11,8 @@ use tracing_subscriber::Layer; const INSTRUMENTATION_LIBRARY_NAME: &str = "opentelemetry-appender-tracing"; /// Visitor to record the fields from the event record. -#[derive(Default)] -struct EventVisitor { - log_record_attributes: Vec<(Key, AnyValue)>, - log_record_body: Option, +struct EventVisitor<'a, LR: LogRecord> { + log_record: &'a mut LR, } /// Logs from the log crate have duplicated attributes that we removed here. @@ -37,10 +35,13 @@ fn get_filename(filepath: &str) -> &str { filepath } -impl EventVisitor { +impl<'a, LR: LogRecord> EventVisitor<'a, LR> { + fn new(log_record: &'a mut LR) -> Self { + EventVisitor { log_record } + } fn visit_metadata(&mut self, meta: &Metadata) { - self.log_record_attributes - .push(("name".into(), meta.name().into())); + self.log_record + .add_attribute(Key::new("name"), AnyValue::from(meta.name())); #[cfg(feature = "experimental_metadata_attributes")] self.visit_experimental_metadata(meta); @@ -48,48 +49,47 @@ impl EventVisitor { #[cfg(feature = "experimental_metadata_attributes")] fn visit_experimental_metadata(&mut self, meta: &Metadata) { - self.log_record_attributes - .push(("log.target".into(), meta.target().to_owned().into())); + self.log_record.add_attribute( + Key::new("log.target"), + AnyValue::from(meta.target().to_owned()), + ); if let Some(module_path) = meta.module_path() { - self.log_record_attributes - .push(("code.namespace".into(), module_path.to_owned().into())); + self.log_record.add_attribute( + Key::new("code.namespace"), + AnyValue::from(module_path.to_owned()), + ); } if let Some(filepath) = meta.file() { - self.log_record_attributes - .push(("code.filepath".into(), filepath.to_owned().into())); - self.log_record_attributes.push(( - "code.filename".into(), - get_filename(filepath).to_owned().into(), - )); + self.log_record.add_attribute( + Key::new("code.filepath"), + AnyValue::from(filepath.to_owned()), + ); + self.log_record.add_attribute( + Key::new("code.filename"), + AnyValue::from(get_filename(filepath).to_owned()), + ); } if let Some(line) = meta.line() { - self.log_record_attributes - .push(("code.lineno".into(), line.into())); - } - } - - fn push_to_otel_log_record(self, log_record: &mut LR) { - if let Some(body) = self.log_record_body { - log_record.set_body(body); + self.log_record + .add_attribute(Key::new("code.lineno"), AnyValue::from(line)); } - log_record.add_attributes(self.log_record_attributes); } } -impl tracing::field::Visit for EventVisitor { +impl<'a, LR: LogRecord> tracing::field::Visit for EventVisitor<'a, LR> { fn record_debug(&mut self, field: &tracing::field::Field, value: &dyn std::fmt::Debug) { #[cfg(feature = "experimental_metadata_attributes")] if is_duplicated_metadata(field.name()) { return; } if field.name() == "message" { - self.log_record_body = Some(format!("{value:?}").into()); + self.log_record.set_body(format!("{:?}", value).into()); } else { - self.log_record_attributes - .push((field.name().into(), format!("{value:?}").into())); + self.log_record + .add_attribute(Key::new(field.name()), AnyValue::from(format!("{value:?}"))); } } @@ -98,18 +98,18 @@ impl tracing::field::Visit for EventVisitor { if is_duplicated_metadata(field.name()) { return; } - self.log_record_attributes - .push((field.name().into(), value.to_owned().into())); + self.log_record + .add_attribute(Key::new(field.name()), AnyValue::from(value.to_owned())); } fn record_bool(&mut self, field: &tracing_core::Field, value: bool) { - self.log_record_attributes - .push((field.name().into(), value.into())); + self.log_record + .add_attribute(Key::new(field.name()), AnyValue::from(value)); } fn record_f64(&mut self, field: &tracing::field::Field, value: f64) { - self.log_record_attributes - .push((field.name().into(), value.into())); + self.log_record + .add_attribute(Key::new(field.name()), AnyValue::from(value)); } fn record_i64(&mut self, field: &tracing::field::Field, value: i64) { @@ -117,8 +117,8 @@ impl tracing::field::Visit for EventVisitor { if is_duplicated_metadata(field.name()) { return; } - self.log_record_attributes - .push((field.name().into(), value.into())); + self.log_record + .add_attribute(Key::new(field.name()), AnyValue::from(value)); } // TODO: Remaining field types from AnyValue : Bytes, ListAny, Boolean @@ -173,11 +173,10 @@ where log_record.set_severity_number(severity_of_level(meta.level())); log_record.set_severity_text(meta.level().to_string().into()); - let mut visitor = EventVisitor::default(); + let mut visitor = EventVisitor::new(&mut log_record); visitor.visit_metadata(meta); // Visit fields. event.record(&mut visitor); - visitor.push_to_otel_log_record(&mut log_record); self.logger.emit(log_record); } diff --git a/stress/src/logs.rs b/stress/src/logs.rs index a4d4d48ae7..e973cc71a4 100644 --- a/stress/src/logs.rs +++ b/stress/src/logs.rs @@ -3,7 +3,7 @@ OS: Ubuntu 22.04.3 LTS (5.15.146.1-microsoft-standard-WSL2) Hardware: AMD EPYC 7763 64-Core Processor - 2.44 GHz, 16vCPUs, RAM: 64.0 GB - 39 M/sec + 53 M/sec */ use opentelemetry_appender_tracing::layer; From ca03c79ee1bfd49b05b3da746807968716bc5219 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Thu, 23 May 2024 19:20:09 -0700 Subject: [PATCH 11/37] Add bench for gauge instrument (#1816) --- opentelemetry-sdk/Cargo.toml | 5 +- opentelemetry-sdk/benches/metric_gauge.rs | 82 +++++++++++++++++++++++ 2 files changed, 86 insertions(+), 1 deletion(-) create mode 100644 opentelemetry-sdk/benches/metric_gauge.rs diff --git a/opentelemetry-sdk/Cargo.toml b/opentelemetry-sdk/Cargo.toml index b535dd5438..ea402f38d8 100644 --- a/opentelemetry-sdk/Cargo.toml +++ b/opentelemetry-sdk/Cargo.toml @@ -65,7 +65,10 @@ harness = false [[bench]] name = "metric_counter" harness = false -required-features = ["metrics"] + +[[bench]] +name = "metric_gauge" +harness = false [[bench]] name = "attribute_set" diff --git a/opentelemetry-sdk/benches/metric_gauge.rs b/opentelemetry-sdk/benches/metric_gauge.rs new file mode 100644 index 0000000000..3217480760 --- /dev/null +++ b/opentelemetry-sdk/benches/metric_gauge.rs @@ -0,0 +1,82 @@ +/* + The benchmark results: + criterion = "0.5.1" + OS: Ubuntu 22.04.3 LTS (5.15.146.1-microsoft-standard-WSL2) + Hardware: AMD EPYC 7763 64-Core Processor - 2.44 GHz, 16vCPUs, + RAM: 64.0 GB + | Test | Average time| + |--------------------------------|-------------| + | Gauge_Add_4 | 586 ns | +*/ + +use criterion::{criterion_group, criterion_main, Criterion}; +use opentelemetry::{ + metrics::{Gauge, MeterProvider as _}, + KeyValue, +}; +use opentelemetry_sdk::metrics::{ManualReader, SdkMeterProvider}; +use rand::{ + rngs::{self}, + Rng, SeedableRng, +}; +use std::cell::RefCell; + +thread_local! { + /// Store random number generator for each thread + static CURRENT_RNG: RefCell = RefCell::new(rngs::SmallRng::from_entropy()); +} + +// Run this benchmark with: +// cargo bench --bench metric_gauge +fn create_gauge() -> Gauge { + let meter_provider: SdkMeterProvider = SdkMeterProvider::builder() + .with_reader(ManualReader::builder().build()) + .build(); + let meter = meter_provider.meter("benchmarks"); + + meter.u64_gauge("gauge_bench").init() +} + +fn criterion_benchmark(c: &mut Criterion) { + gauge_record(c); +} + +fn gauge_record(c: &mut Criterion) { + let attribute_values = [ + "value1", "value2", "value3", "value4", "value5", "value6", "value7", "value8", "value9", + "value10", + ]; + + let gauge = create_gauge(); + c.bench_function("Gauge_Add_4", |b| { + b.iter(|| { + // 4*4*10*10 = 1600 time series. + let rands = CURRENT_RNG.with(|rng| { + let mut rng = rng.borrow_mut(); + [ + rng.gen_range(0..4), + rng.gen_range(0..4), + rng.gen_range(0..10), + rng.gen_range(0..10), + ] + }); + let index_first_attribute = rands[0]; + let index_second_attribute = rands[1]; + let index_third_attribute = rands[2]; + let index_forth_attribute = rands[3]; + gauge.record( + 1, + &[ + KeyValue::new("attribute1", attribute_values[index_first_attribute]), + KeyValue::new("attribute2", attribute_values[index_second_attribute]), + KeyValue::new("attribute3", attribute_values[index_third_attribute]), + KeyValue::new("attribute4", attribute_values[index_forth_attribute]), + ], + ); + }); + }); +} + +criterion_group!(benches, criterion_benchmark); + +criterion_main!(benches); From bded5989ef21dac2217629939d79e1e190f05ec1 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Thu, 23 May 2024 20:43:18 -0700 Subject: [PATCH 12/37] Fix overflow for counter metric (#1815) Co-authored-by: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com> --- opentelemetry-sdk/benches/metric_counter.rs | 37 ++++++++++++---- .../src/metrics/internal/aggregate.rs | 2 +- opentelemetry-sdk/src/metrics/internal/sum.rs | 10 ++--- opentelemetry-sdk/src/metrics/mod.rs | 29 +++++++++++++ stress/Cargo.toml | 5 +++ stress/src/metrics_overflow.rs | 43 +++++++++++++++++++ 6 files changed, 112 insertions(+), 14 deletions(-) create mode 100644 stress/src/metrics_overflow.rs diff --git a/opentelemetry-sdk/benches/metric_counter.rs b/opentelemetry-sdk/benches/metric_counter.rs index d6e42bb6d5..8962daa125 100644 --- a/opentelemetry-sdk/benches/metric_counter.rs +++ b/opentelemetry-sdk/benches/metric_counter.rs @@ -19,7 +19,7 @@ use opentelemetry::{ }; use opentelemetry_sdk::metrics::{ManualReader, SdkMeterProvider}; use rand::{ - rngs::{self, SmallRng}, + rngs::{self}, Rng, SeedableRng, }; use std::cell::RefCell; @@ -107,14 +107,35 @@ fn counter_add(c: &mut Criterion) { }); }); - c.bench_function("Random_Generator_5", |b| { + // Cause overflow. + for v in 0..2001 { + counter.add(100, &[KeyValue::new("A", v.to_string())]); + } + c.bench_function("Counter_Overflow", |b| { b.iter(|| { - let mut rng = SmallRng::from_entropy(); - let _i1 = rng.gen_range(0..4); - let _i2 = rng.gen_range(0..4); - let _i3 = rng.gen_range(0..10); - let _i4 = rng.gen_range(0..10); - let _i5 = rng.gen_range(0..10); + // 4*4*10*10 = 1600 time series. + let rands = CURRENT_RNG.with(|rng| { + let mut rng = rng.borrow_mut(); + [ + rng.gen_range(0..4), + rng.gen_range(0..4), + rng.gen_range(0..10), + rng.gen_range(0..10), + ] + }); + let index_first_attribute = rands[0]; + let index_second_attribute = rands[1]; + let index_third_attribute = rands[2]; + let index_forth_attribute = rands[3]; + counter.add( + 1, + &[ + KeyValue::new("attribute1", attribute_values[index_first_attribute]), + KeyValue::new("attribute2", attribute_values[index_second_attribute]), + KeyValue::new("attribute3", attribute_values[index_third_attribute]), + KeyValue::new("attribute4", attribute_values[index_forth_attribute]), + ], + ); }); }); diff --git a/opentelemetry-sdk/src/metrics/internal/aggregate.rs b/opentelemetry-sdk/src/metrics/internal/aggregate.rs index 13e63b33c3..93fcc6a69c 100644 --- a/opentelemetry-sdk/src/metrics/internal/aggregate.rs +++ b/opentelemetry-sdk/src/metrics/internal/aggregate.rs @@ -24,7 +24,7 @@ pub(crate) static STREAM_OVERFLOW_ATTRIBUTE_SET: Lazy = Lazy::new( /// Checks whether aggregator has hit cardinality limit for metric streams pub(crate) fn is_under_cardinality_limit(size: usize) -> bool { - size < STREAM_CARDINALITY_LIMIT as usize - 1 + size < STREAM_CARDINALITY_LIMIT as usize } /// Receives measurements to be aggregated. diff --git a/opentelemetry-sdk/src/metrics/internal/sum.rs b/opentelemetry-sdk/src/metrics/internal/sum.rs index b46f4c952c..27710b0743 100644 --- a/opentelemetry-sdk/src/metrics/internal/sum.rs +++ b/opentelemetry-sdk/src/metrics/internal/sum.rs @@ -55,12 +55,12 @@ impl> ValueMap { Entry::Vacant(vacant_entry) => { if is_under_cardinality_limit(size) { vacant_entry.insert(measurement); + } else if let Some(val) = values.get_mut(&STREAM_OVERFLOW_ATTRIBUTE_SET) { + *val += measurement; + return; } else { - values - .entry(STREAM_OVERFLOW_ATTRIBUTE_SET.clone()) - .and_modify(|val| *val += measurement) - .or_insert(measurement); - global::handle_error(MetricsError::Other("Warning: Maximum data points for metric stream exceeded. Entry added to overflow.".into())); + values.insert(STREAM_OVERFLOW_ATTRIBUTE_SET.clone(), measurement); + global::handle_error(MetricsError::Other("Warning: Maximum data points for metric stream exceeded. Entry added to overflow. Subsequent overflows to same metric until next collect will not be logged.".into())); } } } diff --git a/opentelemetry-sdk/src/metrics/mod.rs b/opentelemetry-sdk/src/metrics/mod.rs index 830f7a7d35..1bad037ad4 100644 --- a/opentelemetry-sdk/src/metrics/mod.rs +++ b/opentelemetry-sdk/src/metrics/mod.rs @@ -161,6 +161,35 @@ mod tests { // "multi_thread" tokio flavor must be used else flush won't // be able to make progress! + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn counter_overflow_delta() { + // Arrange + let mut test_context = TestContext::new(Temporality::Delta); + let counter = test_context.u64_counter("test", "my_counter", None); + + // Act + // Record measurements with A:0, A:1,.......A:1999, which just fits in the 2000 limit + for v in 0..2000 { + counter.add(100, &[KeyValue::new("A", v.to_string())]); + } + + // All of the below will now go into overflow. + counter.add(100, &[KeyValue::new("A", "foo")]); + counter.add(100, &[KeyValue::new("A", "another")]); + counter.add(100, &[KeyValue::new("A", "yet_another")]); + test_context.flush_metrics(); + + let sum = test_context.get_aggregation::>("my_counter", None); + + // Expecting 2001 metric points. (2000 + 1 overflow) + assert_eq!(sum.data_points.len(), 2001); + + let data_point = + find_datapoint_with_key_value(&sum.data_points, "otel.metric.overflow", "true") + .expect("overflow point expected"); + assert_eq!(data_point.value, 300); + } + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn counter_aggregation_cumulative() { // Run this test with stdout enabled to see output. diff --git a/stress/Cargo.toml b/stress/Cargo.toml index c693eef984..b28a367350 100644 --- a/stress/Cargo.toml +++ b/stress/Cargo.toml @@ -9,6 +9,11 @@ name = "metrics" path = "src/metrics.rs" doc = false +[[bin]] # Bin to run the metrics overflow stress tests +name = "metrics_overflow" +path = "src/metrics_overflow.rs" +doc = false + [[bin]] # Bin to run the logs stress tests name = "logs" path = "src/logs.rs" diff --git a/stress/src/metrics_overflow.rs b/stress/src/metrics_overflow.rs new file mode 100644 index 0000000000..ab8b6ac28c --- /dev/null +++ b/stress/src/metrics_overflow.rs @@ -0,0 +1,43 @@ +/* + Stress test results: + OS: Ubuntu 22.04.3 LTS (5.15.146.1-microsoft-standard-WSL2) + Hardware: AMD EPYC 7763 64-Core Processor - 2.44 GHz, 16vCPUs, + RAM: 64.0 GB + 4.5M /sec +*/ + +use lazy_static::lazy_static; +use opentelemetry::{ + metrics::{Counter, MeterProvider as _}, + KeyValue, +}; +use opentelemetry_sdk::metrics::{ManualReader, SdkMeterProvider}; +use rand::{rngs::SmallRng, Rng, SeedableRng}; +use std::borrow::Cow; + +mod throughput; + +lazy_static! { + static ref PROVIDER: SdkMeterProvider = SdkMeterProvider::builder() + .with_reader(ManualReader::builder().build()) + .build(); + static ref ATTRIBUTE_VALUES: [&'static str; 10] = [ + "value1", "value2", "value3", "value4", "value5", "value6", "value7", "value8", "value9", + "value10" + ]; + static ref COUNTER: Counter = PROVIDER + .meter(<&str as Into>>::into("test")) + .u64_counter("hello") + .init(); +} + +fn main() { + for v in 0..2001 { + COUNTER.add(100, &[KeyValue::new("A", v.to_string())]); + } + throughput::test_throughput(test_counter); +} + +fn test_counter() { + COUNTER.add(1, &[KeyValue::new("A", "2001")]); +} From 74b0ad41eb08315e06807a0b356f1a8b2cbf80ce Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Fri, 24 May 2024 08:48:02 -0700 Subject: [PATCH 13/37] Fix metrics overflow stress test (#1819) --- stress/src/metrics_overflow.rs | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/stress/src/metrics_overflow.rs b/stress/src/metrics_overflow.rs index ab8b6ac28c..0e27c96b24 100644 --- a/stress/src/metrics_overflow.rs +++ b/stress/src/metrics_overflow.rs @@ -12,8 +12,11 @@ use opentelemetry::{ KeyValue, }; use opentelemetry_sdk::metrics::{ManualReader, SdkMeterProvider}; -use rand::{rngs::SmallRng, Rng, SeedableRng}; -use std::borrow::Cow; +use rand::{ + rngs::{self}, + Rng, SeedableRng, +}; +use std::{borrow::Cow, cell::RefCell}; mod throughput; @@ -21,23 +24,26 @@ lazy_static! { static ref PROVIDER: SdkMeterProvider = SdkMeterProvider::builder() .with_reader(ManualReader::builder().build()) .build(); - static ref ATTRIBUTE_VALUES: [&'static str; 10] = [ - "value1", "value2", "value3", "value4", "value5", "value6", "value7", "value8", "value9", - "value10" - ]; static ref COUNTER: Counter = PROVIDER .meter(<&str as Into>>::into("test")) .u64_counter("hello") .init(); } +thread_local! { + /// Store random number generator for each thread + static CURRENT_RNG: RefCell = RefCell::new(rngs::SmallRng::from_entropy()); +} + fn main() { - for v in 0..2001 { - COUNTER.add(100, &[KeyValue::new("A", v.to_string())]); - } throughput::test_throughput(test_counter); } fn test_counter() { - COUNTER.add(1, &[KeyValue::new("A", "2001")]); + // The main goal of this test is to ensure that OTel SDK is not growing its + // memory usage indefinitely even when user code misbehaves by producing + // unbounded metric points (unique time series). + // It also checks that SDK's internal logging is also done in a bounded way. + let rand = CURRENT_RNG.with(|rng| rng.borrow_mut().gen_range(0..100000000)); + COUNTER.add(1, &[KeyValue::new("A", rand)]); } From 98741c7cc7b4aabd0a2c4f28ec2c26afb1117cd9 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Fri, 24 May 2024 09:35:29 -0700 Subject: [PATCH 14/37] [Metrics] - Remove unit abstraction (#1821) Co-authored-by: Zhongyang Wu --- .cspell.json | 4 + examples/metrics-advanced/src/main.rs | 7 +- examples/metrics-basic/src/main.rs | 9 +- .../examples/basic-otlp-http/src/main.rs | 4 +- .../examples/basic-otlp/src/main.rs | 4 +- opentelemetry-proto/src/transform/metrics.rs | 2 +- opentelemetry-sdk/src/metrics/data/mod.rs | 4 +- opentelemetry-sdk/src/metrics/instrument.rs | 24 +-- opentelemetry-sdk/src/metrics/meter.rs | 179 +++++++----------- opentelemetry-sdk/src/metrics/mod.rs | 49 ++--- opentelemetry-sdk/src/metrics/pipeline.rs | 4 +- opentelemetry-sdk/src/metrics/view.rs | 2 +- opentelemetry-stdout/src/metrics/transform.rs | 21 +- opentelemetry/CHANGELOG.md | 3 + opentelemetry/src/metrics/instruments/mod.rs | 14 +- opentelemetry/src/metrics/mod.rs | 58 ++---- 16 files changed, 150 insertions(+), 238 deletions(-) diff --git a/.cspell.json b/.cspell.json index 57a1c7bd34..a091c48706 100644 --- a/.cspell.json +++ b/.cspell.json @@ -43,6 +43,8 @@ "Lalit", "LIBCLANG", "msrv", + "mykey", + "myvalue", "Ochtman", "opentelemetry", "OTLP", @@ -53,8 +55,10 @@ "runtimes", "rustc", "shoppingcart", + "struct", "Tescher", "tracerprovider", + "updown", "Zhongyang", "zipkin" ], diff --git a/examples/metrics-advanced/src/main.rs b/examples/metrics-advanced/src/main.rs index 6731071555..b99fc8d4e4 100644 --- a/examples/metrics-advanced/src/main.rs +++ b/examples/metrics-advanced/src/main.rs @@ -1,5 +1,4 @@ use opentelemetry::global; -use opentelemetry::metrics::Unit; use opentelemetry::Key; use opentelemetry::KeyValue; use opentelemetry_sdk::metrics::{ @@ -15,7 +14,7 @@ fn init_meter_provider() -> opentelemetry_sdk::metrics::SdkMeterProvider { Some( Stream::new() .name("my_histogram_renamed") - .unit(Unit::new("milliseconds")), + .unit("milliseconds"), ) } else { None @@ -76,7 +75,7 @@ async fn main() -> Result<(), Box> { // using view. let histogram = meter .f64_histogram("my_histogram") - .with_unit(Unit::new("ms")) + .with_unit("ms") .with_description("My histogram example description") .init(); @@ -114,7 +113,7 @@ async fn main() -> Result<(), Box> { // use a custom set of boundaries, and min/max values will not be recorded. let histogram2 = meter .f64_histogram("my_second_histogram") - .with_unit(Unit::new("ms")) + .with_unit("ms") .with_description("My histogram example description") .init(); diff --git a/examples/metrics-basic/src/main.rs b/examples/metrics-basic/src/main.rs index e965ab6efa..996b16a008 100644 --- a/examples/metrics-basic/src/main.rs +++ b/examples/metrics-basic/src/main.rs @@ -1,5 +1,4 @@ use opentelemetry::global; -use opentelemetry::metrics::Unit; use opentelemetry::KeyValue; use opentelemetry_sdk::metrics::{PeriodicReader, SdkMeterProvider}; use opentelemetry_sdk::{runtime, Resource}; @@ -47,7 +46,7 @@ async fn main() -> Result<(), Box> { let _observable_counter = meter .u64_observable_counter("my_observable_counter") .with_description("My observable counter example description") - .with_unit(Unit::new("myunit")) + .with_unit("myunit") .with_callback(|observer| { observer.observe( 100, @@ -75,7 +74,7 @@ async fn main() -> Result<(), Box> { let _observable_up_down_counter = meter .i64_observable_up_down_counter("my_observable_updown_counter") .with_description("My observable updown counter example description") - .with_unit(Unit::new("myunit")) + .with_unit("myunit") .with_callback(|observer| { observer.observe( 100, @@ -108,7 +107,7 @@ async fn main() -> Result<(), Box> { let gauge = meter .f64_gauge("my_gauge") .with_description("A gauge set to 1.0") - .with_unit(Unit::new("myunit")) + .with_unit("myunit") .init(); gauge.record( @@ -123,7 +122,7 @@ async fn main() -> Result<(), Box> { let _observable_gauge = meter .f64_observable_gauge("my_observable_gauge") .with_description("An observable gauge set to 1.0") - .with_unit(Unit::new("myunit")) + .with_unit("myunit") .with_callback(|observer| { observer.observe( 1.0, diff --git a/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs b/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs index 7cc6490164..4bb3f77c8f 100644 --- a/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs @@ -1,7 +1,7 @@ use once_cell::sync::Lazy; use opentelemetry::{ global, - metrics::{MetricsError, Unit}, + metrics::MetricsError, trace::{TraceContextExt, TraceError, Tracer, TracerProvider as _}, Key, KeyValue, }; @@ -125,7 +125,7 @@ async fn main() -> Result<(), Box> { let counter = meter .u64_counter("test_counter") .with_description("a simple counter for demo purposes.") - .with_unit(Unit::new("my_unit")) + .with_unit("my_unit") .init(); for _ in 0..10 { counter.add(1, &[KeyValue::new("test_key", "test_value")]); diff --git a/opentelemetry-otlp/examples/basic-otlp/src/main.rs b/opentelemetry-otlp/examples/basic-otlp/src/main.rs index 605916801d..50c95723fc 100644 --- a/opentelemetry-otlp/examples/basic-otlp/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp/src/main.rs @@ -1,7 +1,7 @@ use once_cell::sync::Lazy; use opentelemetry::global; use opentelemetry::logs::LogError; -use opentelemetry::metrics::{MetricsError, Unit}; +use opentelemetry::metrics::MetricsError; use opentelemetry::trace::{TraceError, TracerProvider}; use opentelemetry::{ trace::{TraceContextExt, Tracer}, @@ -127,7 +127,7 @@ async fn main() -> Result<(), Box> { let counter = meter .u64_counter("test_counter") .with_description("a simple counter for demo purposes.") - .with_unit(Unit::new("my_unit")) + .with_unit("my_unit") .init(); for _ in 0..10 { counter.add(1, &[KeyValue::new("test_key", "test_value")]); diff --git a/opentelemetry-proto/src/transform/metrics.rs b/opentelemetry-proto/src/transform/metrics.rs index 610348baaf..f49fec06db 100644 --- a/opentelemetry-proto/src/transform/metrics.rs +++ b/opentelemetry-proto/src/transform/metrics.rs @@ -148,7 +148,7 @@ pub mod tonic { TonicMetric { name: metric.name.to_string(), description: metric.description.to_string(), - unit: metric.unit.as_str().to_string(), + unit: metric.unit.to_string(), metadata: vec![], // internal and currently unused data: metric.data.as_any().try_into().ok(), } diff --git a/opentelemetry-sdk/src/metrics/data/mod.rs b/opentelemetry-sdk/src/metrics/data/mod.rs index fd4c85c35f..64a1879d26 100644 --- a/opentelemetry-sdk/src/metrics/data/mod.rs +++ b/opentelemetry-sdk/src/metrics/data/mod.rs @@ -2,7 +2,7 @@ use std::{any, borrow::Cow, fmt, time::SystemTime}; -use opentelemetry::{metrics::Unit, KeyValue}; +use opentelemetry::KeyValue; use crate::{instrumentation::Scope, Resource}; @@ -38,7 +38,7 @@ pub struct Metric { /// The description of the instrument, which can be used in documentation. pub description: Cow<'static, str>, /// The unit in which the instrument reports. - pub unit: Unit, + pub unit: Cow<'static, str>, /// The aggregated data from an instrument. pub data: Box, } diff --git a/opentelemetry-sdk/src/metrics/instrument.rs b/opentelemetry-sdk/src/metrics/instrument.rs index 5b8ef5c6e5..72097d504d 100644 --- a/opentelemetry-sdk/src/metrics/instrument.rs +++ b/opentelemetry-sdk/src/metrics/instrument.rs @@ -3,7 +3,7 @@ use std::{any::Any, borrow::Cow, collections::HashSet, hash::Hash, marker, sync: use opentelemetry::{ metrics::{ AsyncInstrument, MetricsError, Result, SyncCounter, SyncGauge, SyncHistogram, - SyncUpDownCounter, Unit, + SyncUpDownCounter, }, Key, KeyValue, }; @@ -69,7 +69,7 @@ pub struct Instrument { /// The functional group of the instrument. pub kind: Option, /// Unit is the unit of measurement recorded by the instrument. - pub unit: Unit, + pub unit: Cow<'static, str>, /// The instrumentation that created the instrument. pub scope: Scope, } @@ -93,8 +93,8 @@ impl Instrument { } /// Set the instrument unit. - pub fn unit(mut self, unit: Unit) -> Self { - self.unit = unit; + pub fn unit(mut self, unit: impl Into>) -> Self { + self.unit = unit.into(); self } @@ -109,7 +109,7 @@ impl Instrument { self.name == "" && self.description == "" && self.kind.is_none() - && self.unit.as_str() == "" + && self.unit == "" && self.scope == Scope::default() } @@ -134,7 +134,7 @@ impl Instrument { } pub(crate) fn matches_unit(&self, other: &Instrument) -> bool { - self.unit.as_str() == "" || self.unit == other.unit + self.unit.is_empty() || self.unit.as_ref() == other.unit.as_ref() } pub(crate) fn matches_scope(&self, other: &Instrument) -> bool { @@ -171,7 +171,7 @@ pub struct Stream { /// Describes the purpose of the data. pub description: Cow<'static, str>, /// the unit of measurement recorded. - pub unit: Unit, + pub unit: Cow<'static, str>, /// Aggregation the stream uses for an instrument. pub aggregation: Option, /// An allow-list of attribute keys that will be preserved for the stream. @@ -201,8 +201,8 @@ impl Stream { } /// Set the stream unit. - pub fn unit(mut self, unit: Unit) -> Self { - self.unit = unit; + pub fn unit(mut self, unit: impl Into>) -> Self { + self.unit = unit.into(); self } @@ -233,7 +233,7 @@ pub(crate) struct InstrumentId { /// Defines the functional group of the instrument. pub(crate) kind: InstrumentKind, /// The unit of measurement recorded. - pub(crate) unit: Unit, + pub(crate) unit: Cow<'static, str>, /// Number is the underlying data type of the instrument. pub(crate) number: Cow<'static, str>, } @@ -306,7 +306,7 @@ pub(crate) struct IdInner { /// The functional group of the instrument. kind: InstrumentKind, /// The unit of measurement recorded by the instrument. - pub(crate) unit: Unit, + pub(crate) unit: Cow<'static, str>, /// The instrumentation that created the instrument. scope: Scope, } @@ -337,7 +337,7 @@ impl Observable { kind: InstrumentKind, name: Cow<'static, str>, description: Cow<'static, str>, - unit: Unit, + unit: Cow<'static, str>, measures: Vec>>, ) -> Self { Self { diff --git a/opentelemetry-sdk/src/metrics/meter.rs b/opentelemetry-sdk/src/metrics/meter.rs index 542c6e1281..26cf8167dd 100644 --- a/opentelemetry-sdk/src/metrics/meter.rs +++ b/opentelemetry-sdk/src/metrics/meter.rs @@ -7,7 +7,7 @@ use opentelemetry::{ noop::{NoopAsyncInstrument, NoopRegistration}, AsyncInstrument, Callback, CallbackRegistration, Counter, Gauge, Histogram, InstrumentProvider, MetricsError, ObservableCounter, ObservableGauge, - ObservableUpDownCounter, Observer as ApiObserver, Result, Unit, UpDownCounter, + ObservableUpDownCounter, Observer as ApiObserver, Result, UpDownCounter, }, KeyValue, }; @@ -84,50 +84,40 @@ impl InstrumentProvider for SdkMeter { &self, name: Cow<'static, str>, description: Option>, - unit: Option, + unit: Option>, ) -> Result> { - validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; + validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; let p = InstrumentResolver::new(self, &self.u64_resolver); - p.lookup( - InstrumentKind::Counter, - name, - description, - unit.unwrap_or_default(), - ) - .map(|i| Counter::new(Arc::new(i))) + p.lookup(InstrumentKind::Counter, name, description, unit) + .map(|i| Counter::new(Arc::new(i))) } fn f64_counter( &self, name: Cow<'static, str>, description: Option>, - unit: Option, + unit: Option>, ) -> Result> { - validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; + validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; let p = InstrumentResolver::new(self, &self.f64_resolver); - p.lookup( - InstrumentKind::Counter, - name, - description, - unit.unwrap_or_default(), - ) - .map(|i| Counter::new(Arc::new(i))) + p.lookup(InstrumentKind::Counter, name, description, unit) + .map(|i| Counter::new(Arc::new(i))) } fn u64_observable_counter( &self, name: Cow<'static, str>, description: Option>, - unit: Option, + unit: Option>, callbacks: Vec>, ) -> Result> { - validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; + validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; let p = InstrumentResolver::new(self, &self.u64_resolver); let ms = p.measures( InstrumentKind::ObservableCounter, name.clone(), description.clone(), - unit.clone().unwrap_or_default(), + unit.clone(), )?; if ms.is_empty() { return Ok(ObservableCounter::new(Arc::new(NoopAsyncInstrument::new()))); @@ -155,16 +145,16 @@ impl InstrumentProvider for SdkMeter { &self, name: Cow<'static, str>, description: Option>, - unit: Option, + unit: Option>, callbacks: Vec>, ) -> Result> { - validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; + validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; let p = InstrumentResolver::new(self, &self.f64_resolver); let ms = p.measures( InstrumentKind::ObservableCounter, name.clone(), description.clone(), - unit.clone().unwrap_or_default(), + unit.clone(), )?; if ms.is_empty() { return Ok(ObservableCounter::new(Arc::new(NoopAsyncInstrument::new()))); @@ -191,50 +181,40 @@ impl InstrumentProvider for SdkMeter { &self, name: Cow<'static, str>, description: Option>, - unit: Option, + unit: Option>, ) -> Result> { - validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; + validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; let p = InstrumentResolver::new(self, &self.i64_resolver); - p.lookup( - InstrumentKind::UpDownCounter, - name, - description, - unit.unwrap_or_default(), - ) - .map(|i| UpDownCounter::new(Arc::new(i))) + p.lookup(InstrumentKind::UpDownCounter, name, description, unit) + .map(|i| UpDownCounter::new(Arc::new(i))) } fn f64_up_down_counter( &self, name: Cow<'static, str>, description: Option>, - unit: Option, + unit: Option>, ) -> Result> { - validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; + validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; let p = InstrumentResolver::new(self, &self.f64_resolver); - p.lookup( - InstrumentKind::UpDownCounter, - name, - description, - unit.unwrap_or_default(), - ) - .map(|i| UpDownCounter::new(Arc::new(i))) + p.lookup(InstrumentKind::UpDownCounter, name, description, unit) + .map(|i| UpDownCounter::new(Arc::new(i))) } fn i64_observable_up_down_counter( &self, name: Cow<'static, str>, description: Option>, - unit: Option, + unit: Option>, callbacks: Vec>, ) -> Result> { - validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; + validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; let p = InstrumentResolver::new(self, &self.i64_resolver); let ms = p.measures( InstrumentKind::ObservableUpDownCounter, name.clone(), description.clone(), - unit.clone().unwrap_or_default(), + unit.clone(), )?; if ms.is_empty() { return Ok(ObservableUpDownCounter::new(Arc::new( @@ -264,16 +244,16 @@ impl InstrumentProvider for SdkMeter { &self, name: Cow<'static, str>, description: Option>, - unit: Option, + unit: Option>, callbacks: Vec>, ) -> Result> { - validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; + validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; let p = InstrumentResolver::new(self, &self.f64_resolver); let ms = p.measures( InstrumentKind::ObservableUpDownCounter, name.clone(), description.clone(), - unit.clone().unwrap_or_default(), + unit.clone(), )?; if ms.is_empty() { return Ok(ObservableUpDownCounter::new(Arc::new( @@ -303,67 +283,52 @@ impl InstrumentProvider for SdkMeter { &self, name: Cow<'static, str>, description: Option>, - unit: Option, + unit: Option>, ) -> Result> { - validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; + validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; let p = InstrumentResolver::new(self, &self.u64_resolver); - p.lookup( - InstrumentKind::Gauge, - name, - description, - unit.unwrap_or_default(), - ) - .map(|i| Gauge::new(Arc::new(i))) + p.lookup(InstrumentKind::Gauge, name, description, unit) + .map(|i| Gauge::new(Arc::new(i))) } fn f64_gauge( &self, name: Cow<'static, str>, description: Option>, - unit: Option, + unit: Option>, ) -> Result> { - validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; + validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; let p = InstrumentResolver::new(self, &self.f64_resolver); - p.lookup( - InstrumentKind::Gauge, - name, - description, - unit.unwrap_or_default(), - ) - .map(|i| Gauge::new(Arc::new(i))) + p.lookup(InstrumentKind::Gauge, name, description, unit) + .map(|i| Gauge::new(Arc::new(i))) } fn i64_gauge( &self, name: Cow<'static, str>, description: Option>, - unit: Option, + unit: Option>, ) -> Result> { - validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; + validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; let p = InstrumentResolver::new(self, &self.i64_resolver); - p.lookup( - InstrumentKind::Gauge, - name, - description, - unit.unwrap_or_default(), - ) - .map(|i| Gauge::new(Arc::new(i))) + p.lookup(InstrumentKind::Gauge, name, description, unit) + .map(|i| Gauge::new(Arc::new(i))) } fn u64_observable_gauge( &self, name: Cow<'static, str>, description: Option>, - unit: Option, + unit: Option>, callbacks: Vec>, ) -> Result> { - validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; + validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; let p = InstrumentResolver::new(self, &self.u64_resolver); let ms = p.measures( InstrumentKind::ObservableGauge, name.clone(), description.clone(), - unit.clone().unwrap_or_default(), + unit.clone(), )?; if ms.is_empty() { return Ok(ObservableGauge::new(Arc::new(NoopAsyncInstrument::new()))); @@ -391,16 +356,16 @@ impl InstrumentProvider for SdkMeter { &self, name: Cow<'static, str>, description: Option>, - unit: Option, + unit: Option>, callbacks: Vec>, ) -> Result> { - validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; + validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; let p = InstrumentResolver::new(self, &self.i64_resolver); let ms = p.measures( InstrumentKind::ObservableGauge, name.clone(), description.clone(), - unit.clone().unwrap_or_default(), + unit.clone(), )?; if ms.is_empty() { return Ok(ObservableGauge::new(Arc::new(NoopAsyncInstrument::new()))); @@ -428,16 +393,16 @@ impl InstrumentProvider for SdkMeter { &self, name: Cow<'static, str>, description: Option>, - unit: Option, + unit: Option>, callbacks: Vec>, ) -> Result> { - validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; + validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; let p = InstrumentResolver::new(self, &self.f64_resolver); let ms = p.measures( InstrumentKind::ObservableGauge, name.clone(), description.clone(), - unit.clone().unwrap_or_default(), + unit.clone(), )?; if ms.is_empty() { return Ok(ObservableGauge::new(Arc::new(NoopAsyncInstrument::new()))); @@ -465,34 +430,24 @@ impl InstrumentProvider for SdkMeter { &self, name: Cow<'static, str>, description: Option>, - unit: Option, + unit: Option>, ) -> Result> { - validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; + validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; let p = InstrumentResolver::new(self, &self.f64_resolver); - p.lookup( - InstrumentKind::Histogram, - name, - description, - unit.unwrap_or_default(), - ) - .map(|i| Histogram::new(Arc::new(i))) + p.lookup(InstrumentKind::Histogram, name, description, unit) + .map(|i| Histogram::new(Arc::new(i))) } fn u64_histogram( &self, name: Cow<'static, str>, description: Option>, - unit: Option, + unit: Option>, ) -> Result> { - validate_instrument_config(name.as_ref(), unit.as_ref(), self.validation_policy)?; + validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; let p = InstrumentResolver::new(self, &self.u64_resolver); - p.lookup( - InstrumentKind::Histogram, - name, - description, - unit.unwrap_or_default(), - ) - .map(|i| Histogram::new(Arc::new(i))) + p.lookup(InstrumentKind::Histogram, name, description, unit) + .map(|i| Histogram::new(Arc::new(i))) } fn register_callback( @@ -563,7 +518,7 @@ enum InstrumentValidationPolicy { fn validate_instrument_config( name: &str, - unit: Option<&Unit>, + unit: &Option>, policy: InstrumentValidationPolicy, ) -> Result<()> { match validate_instrument_name(name).and_then(|_| validate_instrument_unit(unit)) { @@ -605,14 +560,14 @@ fn validate_instrument_name(name: &str) -> Result<()> { Ok(()) } -fn validate_instrument_unit(unit: Option<&Unit>) -> Result<()> { +fn validate_instrument_unit(unit: &Option>) -> Result<()> { if let Some(unit) = unit { - if unit.as_str().len() > INSTRUMENT_UNIT_NAME_MAX_LENGTH { + if unit.len() > INSTRUMENT_UNIT_NAME_MAX_LENGTH { return Err(MetricsError::InvalidInstrumentConfiguration( INSTRUMENT_UNIT_LENGTH, )); } - if unit.as_str().contains(|c: char| !c.is_ascii()) { + if unit.contains(|c: char| !c.is_ascii()) { return Err(MetricsError::InvalidInstrumentConfiguration( INSTRUMENT_UNIT_INVALID_CHAR, )); @@ -731,7 +686,7 @@ where kind: InstrumentKind, name: Cow<'static, str>, description: Option>, - unit: Unit, + unit: Option>, ) -> Result> { let aggregators = self.measures(kind, name, description, unit)?; Ok(ResolvedMeasures { @@ -744,12 +699,12 @@ where kind: InstrumentKind, name: Cow<'static, str>, description: Option>, - unit: Unit, + unit: Option>, ) -> Result>>> { let inst = Instrument { name, description: description.unwrap_or_default(), - unit, + unit: unit.unwrap_or_default(), kind: Some(kind), scope: self.meter.scope.clone(), }; @@ -762,7 +717,7 @@ where mod tests { use std::sync::Arc; - use opentelemetry::metrics::{InstrumentProvider, MeterProvider, MetricsError, Unit}; + use opentelemetry::metrics::{InstrumentProvider, MeterProvider, MetricsError}; use super::{ InstrumentValidationPolicy, SdkMeter, INSTRUMENT_NAME_FIRST_ALPHABETIC, @@ -895,7 +850,7 @@ mod tests { )); } }; - let unit = Some(Unit::new(unit)); + let unit = Some(unit.into()); assert( meter .u64_counter("test".into(), None, unit.clone()) diff --git a/opentelemetry-sdk/src/metrics/mod.rs b/opentelemetry-sdk/src/metrics/mod.rs index 1bad037ad4..e7a51213b0 100644 --- a/opentelemetry-sdk/src/metrics/mod.rs +++ b/opentelemetry-sdk/src/metrics/mod.rs @@ -10,10 +10,7 @@ //! //! ``` //! use opentelemetry::global; -//! use opentelemetry::{ -//! metrics::Unit, -//! KeyValue, -//! }; +//! use opentelemetry::KeyValue; //! use opentelemetry_sdk::{metrics::SdkMeterProvider, Resource}; //! //! // Generate SDK configuration, resource, views, etc @@ -29,7 +26,7 @@ //! // Create instruments scoped to the meter //! let counter = meter //! .u64_counter("power_consumption") -//! .with_unit(Unit::new("kWh")) +//! .with_unit("kWh") //! .init(); //! //! // use instruments to record measurements @@ -149,10 +146,7 @@ mod tests { use crate::testing::metrics::InMemoryMetricsExporterBuilder; use crate::{runtime, testing::metrics::InMemoryMetricsExporter}; use opentelemetry::metrics::{Counter, UpDownCounter}; - use opentelemetry::{ - metrics::{MeterProvider as _, Unit}, - KeyValue, - }; + use opentelemetry::{metrics::MeterProvider as _, KeyValue}; use std::borrow::Cow; // Run all tests in this mod @@ -232,7 +226,7 @@ mod tests { let meter = meter_provider.meter("test"); let _counter = meter .u64_observable_counter("my_observable_counter") - .with_unit(Unit::new("my_unit")) + .with_unit("my_unit") .with_callback(|observer| { observer.observe(100, &[KeyValue::new("key1", "value1")]); observer.observe(200, &[KeyValue::new("key1", "value2")]); @@ -248,7 +242,7 @@ mod tests { assert!(!resource_metrics.is_empty()); let metric = &resource_metrics[0].scope_metrics[0].metrics[0]; assert_eq!(metric.name, "my_observable_counter"); - assert_eq!(metric.unit.as_str(), "my_unit"); + assert_eq!(metric.unit, "my_unit"); let sum = metric .data .as_any() @@ -312,13 +306,13 @@ mod tests { let meter = meter_provider.meter("test"); let counter = meter .u64_counter("my_counter") - .with_unit(Unit::new("my_unit")) + .with_unit("my_unit") .with_description("my_description") .init(); let counter_duplicated = meter .u64_counter("my_counter") - .with_unit(Unit::new("my_unit")) + .with_unit("my_unit") .with_description("my_description") .init(); @@ -338,7 +332,7 @@ mod tests { ); let metric = &resource_metrics[0].scope_metrics[0].metrics[0]; assert_eq!(metric.name, "my_counter"); - assert_eq!(metric.unit.as_str(), "my_unit"); + assert_eq!(metric.unit, "my_unit"); let sum = metric .data .as_any() @@ -364,13 +358,13 @@ mod tests { let meter2 = meter_provider.meter("test.meter2"); let counter1 = meter1 .u64_counter("my_counter") - .with_unit(Unit::new("my_unit")) + .with_unit("my_unit") .with_description("my_description") .init(); let counter2 = meter2 .u64_counter("my_counter") - .with_unit(Unit::new("my_unit")) + .with_unit("my_unit") .with_description("my_description") .init(); @@ -403,7 +397,7 @@ mod tests { if let Some(scope1) = scope1 { let metric1 = &scope1.metrics[0]; assert_eq!(metric1.name, "my_counter"); - assert_eq!(metric1.unit.as_str(), "my_unit"); + assert_eq!(metric1.unit, "my_unit"); assert_eq!(metric1.description, "my_description"); let sum1 = metric1 .data @@ -423,7 +417,7 @@ mod tests { if let Some(scope2) = scope2 { let metric2 = &scope2.metrics[0]; assert_eq!(metric2.name, "my_counter"); - assert_eq!(metric2.unit.as_str(), "my_unit"); + assert_eq!(metric2.unit, "my_unit"); assert_eq!(metric2.description, "my_description"); let sum2 = metric2 .data @@ -465,13 +459,13 @@ mod tests { ); let counter1 = meter1 .u64_counter("my_counter") - .with_unit(Unit::new("my_unit")) + .with_unit("my_unit") .with_description("my_description") .init(); let counter2 = meter2 .u64_counter("my_counter") - .with_unit(Unit::new("my_unit")) + .with_unit("my_unit") .with_description("my_description") .init(); @@ -506,7 +500,7 @@ mod tests { let metric = &resource_metrics[0].scope_metrics[0].metrics[0]; assert_eq!(metric.name, "my_counter"); - assert_eq!(metric.unit.as_str(), "my_unit"); + assert_eq!(metric.unit, "my_unit"); assert_eq!(metric.description, "my_description"); let sum = metric .data @@ -536,7 +530,7 @@ mod tests { record_min_max: false, }) .name("test_histogram_renamed") - .unit(Unit::new("test_unit_renamed")); + .unit("test_unit_renamed"); let view = new_view(criteria, stream_invalid_aggregation).expect("Expected to create a new view"); @@ -549,7 +543,7 @@ mod tests { let meter = meter_provider.meter("test"); let histogram = meter .f64_histogram("test_histogram") - .with_unit(Unit::new("test_unit")) + .with_unit("test_unit") .init(); histogram.record(1.5, &[KeyValue::new("key1", "value1")]); @@ -566,8 +560,7 @@ mod tests { "View rename should be ignored and original name retained." ); assert_eq!( - metric.unit.as_str(), - "test_unit", + metric.unit, "test_unit", "View rename of unit should be ignored and original unit retained." ); } @@ -1157,7 +1150,7 @@ mod tests { let meter = self.meter_provider.meter(meter_name); let mut counter_builder = meter.u64_counter(counter_name); if let Some(unit_name) = unit { - counter_builder = counter_builder.with_unit(Unit::new(unit_name)); + counter_builder = counter_builder.with_unit(unit_name); } counter_builder.init() } @@ -1171,7 +1164,7 @@ mod tests { let meter = self.meter_provider.meter(meter_name); let mut updown_counter_builder = meter.i64_up_down_counter(counter_name); if let Some(unit_name) = unit { - updown_counter_builder = updown_counter_builder.with_unit(Unit::new(unit_name)); + updown_counter_builder = updown_counter_builder.with_unit(unit_name); } updown_counter_builder.init() } @@ -1217,7 +1210,7 @@ mod tests { let metric = &resource_metric.scope_metrics[0].metrics[0]; assert_eq!(metric.name, counter_name); if let Some(expected_unit) = unit_name { - assert_eq!(metric.unit.as_str(), expected_unit); + assert_eq!(metric.unit, expected_unit); } metric diff --git a/opentelemetry-sdk/src/metrics/pipeline.rs b/opentelemetry-sdk/src/metrics/pipeline.rs index cbb942b17e..49415c15e4 100644 --- a/opentelemetry-sdk/src/metrics/pipeline.rs +++ b/opentelemetry-sdk/src/metrics/pipeline.rs @@ -7,7 +7,7 @@ use std::{ use opentelemetry::{ global, - metrics::{CallbackRegistration, MetricsError, Result, Unit}, + metrics::{CallbackRegistration, MetricsError, Result}, KeyValue, }; @@ -192,7 +192,7 @@ impl SdkProducer for Pipeline { struct InstrumentSync { name: Cow<'static, str>, description: Cow<'static, str>, - unit: Unit, + unit: Cow<'static, str>, comp_agg: Box, } diff --git a/opentelemetry-sdk/src/metrics/view.rs b/opentelemetry-sdk/src/metrics/view.rs index 139ec05018..8184613589 100644 --- a/opentelemetry-sdk/src/metrics/view.rs +++ b/opentelemetry-sdk/src/metrics/view.rs @@ -160,7 +160,7 @@ pub fn new_view(criteria: Instrument, mask: Stream) -> Result> { } else { i.description.clone() }, - unit: if !mask.unit.as_str().is_empty() { + unit: if !mask.unit.is_empty() { mask.unit.clone() } else { i.unit.clone() diff --git a/opentelemetry-stdout/src/metrics/transform.rs b/opentelemetry-stdout/src/metrics/transform.rs index 49aa662696..9df88ff7da 100644 --- a/opentelemetry-stdout/src/metrics/transform.rs +++ b/opentelemetry-stdout/src/metrics/transform.rs @@ -60,29 +60,14 @@ impl From for ScopeMetrics { } } -#[derive(Serialize, Debug, Clone)] -struct Unit(Cow<'static, str>); - -impl Unit { - fn is_empty(&self) -> bool { - self.0.is_empty() - } -} - -impl From for Unit { - fn from(unit: opentelemetry::metrics::Unit) -> Self { - Unit(unit.as_str().to_string().into()) - } -} - #[derive(Serialize, Debug, Clone)] #[serde(rename_all = "camelCase")] struct Metric { name: Cow<'static, str>, #[serde(skip_serializing_if = "str::is_empty")] description: Cow<'static, str>, - #[serde(skip_serializing_if = "Unit::is_empty")] - unit: Unit, + #[serde(skip_serializing_if = "str::is_empty")] + unit: Cow<'static, str>, #[serde(flatten)] data: Option, } @@ -92,7 +77,7 @@ impl From for Metric { Metric { name: value.name, description: value.description, - unit: value.unit.into(), + unit: value.unit, data: map_data(value.data.as_any()), } } diff --git a/opentelemetry/CHANGELOG.md b/opentelemetry/CHANGELOG.md index 396e48c388..1b578d9a2e 100644 --- a/opentelemetry/CHANGELOG.md +++ b/opentelemetry/CHANGELOG.md @@ -7,6 +7,9 @@ - When "metrics" feature is enabled, `KeyValue` implements `PartialEq`, `Eq`, `PartialOrder`, `Order`, `Hash`. This is meant to be used for metrics aggregation purposes only. +- Removed `Unit` struct for specifying Instrument units. Unit is treated as an + opaque string. Migration: Replace `.with_unit(Unit::new("myunit"))` with + `.with_unit("myunit")`. ## v0.23.0 diff --git a/opentelemetry/src/metrics/instruments/mod.rs b/opentelemetry/src/metrics/instruments/mod.rs index 67158cf565..3ee530453a 100644 --- a/opentelemetry/src/metrics/instruments/mod.rs +++ b/opentelemetry/src/metrics/instruments/mod.rs @@ -1,4 +1,4 @@ -use crate::metrics::{Meter, MetricsError, Result, Unit}; +use crate::metrics::{Meter, MetricsError, Result}; use crate::KeyValue; use core::fmt; use std::any::Any; @@ -29,7 +29,7 @@ pub struct InstrumentBuilder<'a, T> { instrument_provider: &'a dyn InstrumentProvider, name: Cow<'static, str>, description: Option>, - unit: Option, + unit: Option>, _marker: marker::PhantomData, } @@ -61,8 +61,8 @@ where /// Unit must be: /// - ASCII string /// - No longer than 63 characters - pub fn with_unit(mut self, unit: Unit) -> Self { - self.unit = Some(unit); + pub fn with_unit>>(mut self, unit: S) -> Self { + self.unit = Some(unit.into()); self } @@ -112,7 +112,7 @@ where meter: &'a Meter, name: Cow<'static, str>, description: Option>, - unit: Option, + unit: Option>, _inst: marker::PhantomData, callbacks: Vec>, } @@ -147,8 +147,8 @@ where /// Unit must be: /// - ASCII string /// - No longer than 63 characters - pub fn with_unit(mut self, unit: Unit) -> Self { - self.unit = Some(unit); + pub fn with_unit>>(mut self, unit: S) -> Self { + self.unit = Some(unit.into()); self } diff --git a/opentelemetry/src/metrics/mod.rs b/opentelemetry/src/metrics/mod.rs index bd24d6368d..1bc17e476e 100644 --- a/opentelemetry/src/metrics/mod.rs +++ b/opentelemetry/src/metrics/mod.rs @@ -57,32 +57,6 @@ impl From> for MetricsError { } } -/// Units denote underlying data units tracked by `Meter`s. -#[derive(Clone, Default, Debug, PartialEq, Eq, Hash)] -pub struct Unit(Cow<'static, str>); - -impl Unit { - /// Create a new `Unit` from an `Into` - pub fn new(value: S) -> Self - where - S: Into>, - { - Unit(value.into()) - } - - /// View unit as &str - pub fn as_str(&self) -> &str { - self.0.as_ref() - } -} - -impl AsRef for Unit { - #[inline] - fn as_ref(&self) -> &str { - self.0.as_ref() - } -} - struct F64Hashable(f64); impl PartialEq for F64Hashable { @@ -139,7 +113,7 @@ pub trait InstrumentProvider { &self, _name: Cow<'static, str>, _description: Option>, - _unit: Option, + _unit: Option>, ) -> Result> { Ok(Counter::new(Arc::new(noop::NoopSyncInstrument::new()))) } @@ -149,7 +123,7 @@ pub trait InstrumentProvider { &self, _name: Cow<'static, str>, _description: Option>, - _unit: Option, + _unit: Option>, ) -> Result> { Ok(Counter::new(Arc::new(noop::NoopSyncInstrument::new()))) } @@ -159,7 +133,7 @@ pub trait InstrumentProvider { &self, _name: Cow<'static, str>, _description: Option>, - _unit: Option, + _unit: Option>, _callback: Vec>, ) -> Result> { Ok(ObservableCounter::new(Arc::new( @@ -172,7 +146,7 @@ pub trait InstrumentProvider { &self, _name: Cow<'static, str>, _description: Option>, - _unit: Option, + _unit: Option>, _callback: Vec>, ) -> Result> { Ok(ObservableCounter::new(Arc::new( @@ -185,7 +159,7 @@ pub trait InstrumentProvider { &self, _name: Cow<'static, str>, _description: Option>, - _unit: Option, + _unit: Option>, ) -> Result> { Ok(UpDownCounter::new( Arc::new(noop::NoopSyncInstrument::new()), @@ -197,7 +171,7 @@ pub trait InstrumentProvider { &self, _name: Cow<'static, str>, _description: Option>, - _unit: Option, + _unit: Option>, ) -> Result> { Ok(UpDownCounter::new( Arc::new(noop::NoopSyncInstrument::new()), @@ -209,7 +183,7 @@ pub trait InstrumentProvider { &self, _name: Cow<'static, str>, _description: Option>, - _unit: Option, + _unit: Option>, _callback: Vec>, ) -> Result> { Ok(ObservableUpDownCounter::new(Arc::new( @@ -222,7 +196,7 @@ pub trait InstrumentProvider { &self, _name: Cow<'static, str>, _description: Option>, - _unit: Option, + _unit: Option>, _callback: Vec>, ) -> Result> { Ok(ObservableUpDownCounter::new(Arc::new( @@ -235,7 +209,7 @@ pub trait InstrumentProvider { &self, _name: Cow<'static, str>, _description: Option>, - _unit: Option, + _unit: Option>, ) -> Result> { Ok(Gauge::new(Arc::new(noop::NoopSyncInstrument::new()))) } @@ -245,7 +219,7 @@ pub trait InstrumentProvider { &self, _name: Cow<'static, str>, _description: Option>, - _unit: Option, + _unit: Option>, ) -> Result> { Ok(Gauge::new(Arc::new(noop::NoopSyncInstrument::new()))) } @@ -255,7 +229,7 @@ pub trait InstrumentProvider { &self, _name: Cow<'static, str>, _description: Option>, - _unit: Option, + _unit: Option>, ) -> Result> { Ok(Gauge::new(Arc::new(noop::NoopSyncInstrument::new()))) } @@ -265,7 +239,7 @@ pub trait InstrumentProvider { &self, _name: Cow<'static, str>, _description: Option>, - _unit: Option, + _unit: Option>, _callback: Vec>, ) -> Result> { Ok(ObservableGauge::new(Arc::new( @@ -278,7 +252,7 @@ pub trait InstrumentProvider { &self, _name: Cow<'static, str>, _description: Option>, - _unit: Option, + _unit: Option>, _callback: Vec>, ) -> Result> { Ok(ObservableGauge::new(Arc::new( @@ -291,7 +265,7 @@ pub trait InstrumentProvider { &self, _name: Cow<'static, str>, _description: Option>, - _unit: Option, + _unit: Option>, _callback: Vec>, ) -> Result> { Ok(ObservableGauge::new(Arc::new( @@ -304,7 +278,7 @@ pub trait InstrumentProvider { &self, _name: Cow<'static, str>, _description: Option>, - _unit: Option, + _unit: Option>, ) -> Result> { Ok(Histogram::new(Arc::new(noop::NoopSyncInstrument::new()))) } @@ -314,7 +288,7 @@ pub trait InstrumentProvider { &self, _name: Cow<'static, str>, _description: Option>, - _unit: Option, + _unit: Option>, ) -> Result> { Ok(Histogram::new(Arc::new(noop::NoopSyncInstrument::new()))) } From 1b410d08dd9cd048fc349b7d8837fd7d114f7714 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Fri, 24 May 2024 09:45:17 -0700 Subject: [PATCH 15/37] Remove AWSid generator (#1820) Co-authored-by: Zhongyang Wu --- opentelemetry-sdk/CHANGELOG.md | 15 +-- .../src/trace/id_generator/aws.rs | 98 ------------------- .../src/trace/id_generator/mod.rs | 3 - opentelemetry-sdk/src/trace/mod.rs | 5 - 4 files changed, 9 insertions(+), 112 deletions(-) delete mode 100644 opentelemetry-sdk/src/trace/id_generator/aws.rs diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index 17e0483a5d..8460c89133 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -10,18 +10,21 @@ must now use `.with_resource(RESOURCE::default())` to configure Resource on logger provider. - Removed dependency on `ordered-float`. +- Removed `XrayIdGenerator`, which was marked deprecated since 0.21.3. Use + [`opentelemetry-aws`](https://crates.io/crates/opentelemetry-aws), version + 0.10.0 or newer. - **Breaking** [1726](https://github.com/open-telemetry/opentelemetry-rust/pull/1726) Update `LogProcessor::emit() method to take mutable reference to LogData. This is breaking - change for LogProcessor developers. If the processor needs to invoke the exporter - asynchronously, it should clone the data to ensure it can be safely processed without + change for LogProcessor developers. If the processor needs to invoke the exporter + asynchronously, it should clone the data to ensure it can be safely processed without lifetime issues. Any changes made to the log data before cloning in this method will be reflected in the next log processor in the chain, as well as to the exporter. - **Breaking** [1726](https://github.com/open-telemetry/opentelemetry-rust/pull/1726) - Update `LogExporter::export() method to accept a batch of log data, which can be either a - reference or owned `LogData`. If the exporter needs to process the log data - asynchronously, it should clone the log data to ensure it can be safely processed without - lifetime issues. + Update `LogExporter::export() method to accept a batch of log data, which can be either a + reference or owned`LogData`. If the exporter needs to process the log data + asynchronously, it should clone the log data to ensure it can be safely processed without + lifetime issues. ## v0.23.0 diff --git a/opentelemetry-sdk/src/trace/id_generator/aws.rs b/opentelemetry-sdk/src/trace/id_generator/aws.rs deleted file mode 100644 index 278467e230..0000000000 --- a/opentelemetry-sdk/src/trace/id_generator/aws.rs +++ /dev/null @@ -1,98 +0,0 @@ -use crate::trace::{IdGenerator, RandomIdGenerator}; -use opentelemetry::trace::{SpanId, TraceId}; -use std::time::{Duration, UNIX_EPOCH}; - -/// Generates AWS X-Ray compliant Trace and Span ids. -/// -/// Generates OpenTelemetry formatted `TraceId`'s and `SpanId`'s. The `TraceId`'s are generated so -/// they can be backed out into X-Ray format by the [AWS X-Ray Exporter][xray-exporter] in the -/// [OpenTelemetry Collector][otel-collector]. -/// -/// ## Trace ID Format -/// -/// A `trace_id` consists of three numbers separated by hyphens. For example, `1-58406520-a006649127e371903a2de979`. -/// This includes: -/// -/// * The version number, that is, 1. -/// * The time of the original request, in Unix epoch time, in 8 hexadecimal digits. -/// * For example, 10:00AM December 1st, 2016 PST in epoch time is 1480615200 seconds, or 58406520 in hexadecimal digits. -/// * A 96-bit identifier for the trace, globally unique, in 24 hexadecimal digits. -/// -/// See the [AWS X-Ray Documentation][xray-trace-id] for more details. -/// -/// ## Example -/// -/// ``` -/// use opentelemetry_sdk::trace::{self, TracerProvider, XrayIdGenerator}; -/// -/// let _provider: TracerProvider = TracerProvider::builder() -/// .with_config(trace::Config::default().with_id_generator(XrayIdGenerator::default())) -/// .build(); -/// ``` -/// -/// [otel-collector]: https://github.com/open-telemetry/opentelemetry-collector-contrib#opentelemetry-collector-contrib -/// [xray-exporter]: https://godoc.org/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter -/// [xray-trace-id]: https://docs.aws.amazon.com/xray/latest/devguide/xray-api-sendingdata.html#xray-api-traceids -#[derive(Debug, Default)] -#[deprecated( - since = "0.21.3", - note = "XrayId Generator has been migrated to the opentelemetry-aws crate" -)] -pub struct XrayIdGenerator { - sdk_default_generator: RandomIdGenerator, -} - -impl IdGenerator for XrayIdGenerator { - /// Generates a new `TraceId` that can be converted to an X-Ray Trace ID - fn new_trace_id(&self) -> TraceId { - let mut default_trace_id: String = - format!("{:024x}", self.sdk_default_generator.new_trace_id()); - - default_trace_id.truncate(24); - - let epoch_time_seconds: u64 = opentelemetry::time::now() - .duration_since(UNIX_EPOCH) - .unwrap_or_else(|_| Duration::from_secs(0)) - .as_secs(); - - TraceId::from_hex(format!("{:08x}{}", epoch_time_seconds, default_trace_id).as_str()) - .unwrap_or(TraceId::INVALID) - } - - /// Generates a new `SpanId` that can be converted to an X-Ray Segment ID - fn new_span_id(&self) -> SpanId { - self.sdk_default_generator.new_span_id() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use std::thread::sleep; - - #[test] - fn test_trace_id_generation() { - let before: u64 = opentelemetry::time::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs(); - sleep(Duration::from_secs(1)); - - let generator: XrayIdGenerator = XrayIdGenerator::default(); - let trace_id: TraceId = generator.new_trace_id(); - - sleep(Duration::from_secs(1)); - let after: u64 = opentelemetry::time::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs(); - - let trace_as_hex = trace_id.to_string(); - let (timestamp, _xray_id) = trace_as_hex.split_at(8_usize); - - let trace_time: u64 = u64::from_str_radix(timestamp, 16).unwrap(); - - assert!(before <= trace_time); - assert!(after >= trace_time); - } -} diff --git a/opentelemetry-sdk/src/trace/id_generator/mod.rs b/opentelemetry-sdk/src/trace/id_generator/mod.rs index 27cea4cb19..1cf483118e 100644 --- a/opentelemetry-sdk/src/trace/id_generator/mod.rs +++ b/opentelemetry-sdk/src/trace/id_generator/mod.rs @@ -1,6 +1,3 @@ -//! Id Generator -pub(super) mod aws; - use opentelemetry::trace::{SpanId, TraceId}; use rand::{rngs, Rng, SeedableRng}; use std::cell::RefCell; diff --git a/opentelemetry-sdk/src/trace/mod.rs b/opentelemetry-sdk/src/trace/mod.rs index 36c017dc7d..d6c4ceae2e 100644 --- a/opentelemetry-sdk/src/trace/mod.rs +++ b/opentelemetry-sdk/src/trace/mod.rs @@ -20,11 +20,6 @@ mod tracer; pub use config::{config, Config}; pub use events::SpanEvents; -#[deprecated( - since = "0.21.3", - note = "XrayId Generator has been migrated to the opentelemetry-aws crate" -)] -pub use id_generator::aws::XrayIdGenerator; pub use id_generator::{IdGenerator, RandomIdGenerator}; pub use links::SpanLinks; pub use provider::{Builder, TracerProvider}; From f8ee5518d75ea67e0a3be3a7b9c0b45274cbf8ed Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Fri, 24 May 2024 10:10:20 -0700 Subject: [PATCH 16/37] Improve CI speed by removing duplicate run of tests (#1822) --- scripts/test.sh | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/scripts/test.sh b/scripts/test.sh index cd02a14994..dfcb925659 100755 --- a/scripts/test.sh +++ b/scripts/test.sh @@ -2,13 +2,15 @@ set -eu -cargo test --workspace --all-features "$@" -- --test-threads=1 +echo "Running tests for all packages in workspace with --all-features" +cargo test --workspace --all-features # See https://github.com/rust-lang/cargo/issues/5364 +echo "Running tests for opentelemetry package with --no-default-features" cargo test --manifest-path=opentelemetry/Cargo.toml --no-default-features # Run global tracer provider test in single thread -cargo test --manifest-path=opentelemetry/Cargo.toml --all-features -- --ignored --test-threads=1 - -cargo test --manifest-path=opentelemetry/Cargo.toml --all-features -cargo test --manifest-path=opentelemetry-zipkin/Cargo.toml --all-features +# //TODO: This tests were not running for a while. Need to find out how to run +# run them. Using --ignored will run other tests as well, so that cannot be used. +# echo "Running global tracer provider for opentelemetry-sdk package with single thread." +# cargo test --manifest-path=opentelemetry-sdk/Cargo.toml --all-features -- --test-threads=1 From 57f87d68ffd098877e327aada5b9ad7499aa0b9b Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Fri, 24 May 2024 10:55:24 -0700 Subject: [PATCH 17/37] Add more observable counter tests (#1818) --- .cspell.json | 3 + opentelemetry-sdk/src/metrics/mod.rs | 152 +++++++++++++++------------ 2 files changed, 85 insertions(+), 70 deletions(-) diff --git a/.cspell.json b/.cspell.json index a091c48706..47fc5d18e8 100644 --- a/.cspell.json +++ b/.cspell.json @@ -32,8 +32,10 @@ "Cijo", "clippy", "codecov", + "datapoint", "deque", "Dirkjan", + "EPYC", "hasher", "isahc", "Isobel", @@ -45,6 +47,7 @@ "msrv", "mykey", "myvalue", + "nocapture", "Ochtman", "opentelemetry", "OTLP", diff --git a/opentelemetry-sdk/src/metrics/mod.rs b/opentelemetry-sdk/src/metrics/mod.rs index e7a51213b0..ca5076763d 100644 --- a/opentelemetry-sdk/src/metrics/mod.rs +++ b/opentelemetry-sdk/src/metrics/mod.rs @@ -145,9 +145,10 @@ mod tests { use crate::metrics::reader::TemporalitySelector; use crate::testing::metrics::InMemoryMetricsExporterBuilder; use crate::{runtime, testing::metrics::InMemoryMetricsExporter}; - use opentelemetry::metrics::{Counter, UpDownCounter}; + use opentelemetry::metrics::{Counter, Meter, UpDownCounter}; use opentelemetry::{metrics::MeterProvider as _, KeyValue}; use std::borrow::Cow; + use std::sync::{Arc, Mutex}; // Run all tests in this mod // cargo test metrics::tests --features=metrics,testing @@ -213,86 +214,94 @@ mod tests { } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] - async fn observable_counter_aggregation() { + async fn observable_counter_aggregation_cumulative_non_zero_increment() { // Run this test with stdout enabled to see output. - // cargo test observable_counter_aggregation --features=metrics,testing -- --nocapture + // cargo test observable_counter_aggregation_cumulative_non_zero_increment --features=testing -- --nocapture + observable_counter_aggregation_helper(Temporality::Cumulative, 100, 10, 4); + } - // Arrange - let exporter = InMemoryMetricsExporter::default(); - let reader = PeriodicReader::builder(exporter.clone(), runtime::Tokio).build(); - let meter_provider = SdkMeterProvider::builder().with_reader(reader).build(); + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn observable_counter_aggregation_delta_non_zero_increment() { + // Run this test with stdout enabled to see output. + // cargo test observable_counter_aggregation_delta_non_zero_increment --features=testing -- --nocapture + observable_counter_aggregation_helper(Temporality::Delta, 100, 10, 4); + } - // Act - let meter = meter_provider.meter("test"); - let _counter = meter + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn observable_counter_aggregation_cumulative_zero_increment() { + // Run this test with stdout enabled to see output. + // cargo test observable_counter_aggregation_cumulative_zero_increment --features=testing -- --nocapture + observable_counter_aggregation_helper(Temporality::Cumulative, 100, 0, 4); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + #[ignore = "Aggregation bug! https://github.com/open-telemetry/opentelemetry-rust/issues/1517"] + async fn observable_counter_aggregation_delta_zero_increment() { + // Run this test with stdout enabled to see output. + // cargo test observable_counter_aggregation_delta_zero_increment --features=testing -- --nocapture + observable_counter_aggregation_helper(Temporality::Delta, 100, 0, 4); + } + + fn observable_counter_aggregation_helper( + temporality: Temporality, + start: u64, + increment: u64, + length: u64, + ) { + // Arrange + let mut test_context = TestContext::new(temporality); + // The Observable counter reports values[0], values[1],....values[n] on each flush. + let values: Vec = (0..length).map(|i| start + i * increment).collect(); + println!("Testing with observable values: {:?}", values); + let values = Arc::new(values); + let values_clone = values.clone(); + let i = Arc::new(Mutex::new(0)); + let _observable_counter = test_context + .meter() .u64_observable_counter("my_observable_counter") .with_unit("my_unit") - .with_callback(|observer| { - observer.observe(100, &[KeyValue::new("key1", "value1")]); - observer.observe(200, &[KeyValue::new("key1", "value2")]); + .with_callback(move |observer| { + let mut index = i.lock().unwrap(); + if *index < values.len() { + observer.observe(values[*index], &[KeyValue::new("key1", "value1")]); + *index += 1; + } }) .init(); - meter_provider.force_flush().unwrap(); - - // Assert - let resource_metrics = exporter - .get_finished_metrics() - .expect("metrics are expected to be exported."); - assert!(!resource_metrics.is_empty()); - let metric = &resource_metrics[0].scope_metrics[0].metrics[0]; - assert_eq!(metric.name, "my_observable_counter"); - assert_eq!(metric.unit, "my_unit"); - let sum = metric - .data - .as_any() - .downcast_ref::>() - .expect("Sum aggregation expected for ObservableCounter instruments by default"); - - // Expecting 2 time-series. - assert_eq!(sum.data_points.len(), 2); - assert!(sum.is_monotonic, "Counter should produce monotonic."); - assert_eq!( - sum.temporality, - data::Temporality::Cumulative, - "Should produce cumulative by default." - ); - - // find and validate key1=value1 datapoint - let mut data_point1 = None; - for datapoint in &sum.data_points { - if datapoint - .attributes - .iter() - .any(|kv| kv.key.as_str() == "key1" && kv.value.as_str() == "value1") - { - data_point1 = Some(datapoint); + for (iter, v) in values_clone.iter().enumerate() { + test_context.flush_metrics(); + let sum = test_context.get_aggregation::>("my_observable_counter", None); + assert_eq!(sum.data_points.len(), 1); + assert!(sum.is_monotonic, "Counter should produce monotonic."); + if let Temporality::Cumulative = temporality { + assert_eq!( + sum.temporality, + Temporality::Cumulative, + "Should produce cumulative" + ); + } else { + assert_eq!(sum.temporality, Temporality::Delta, "Should produce delta"); } - } - assert_eq!( - data_point1 - .expect("datapoint with key1=value1 expected") - .value, - 100 - ); - // find and validate key1=value2 datapoint - let mut data_point1 = None; - for datapoint in &sum.data_points { - if datapoint - .attributes - .iter() - .any(|kv| kv.key.as_str() == "key1" && kv.value.as_str() == "value2") - { - data_point1 = Some(datapoint); + // find and validate key1=value1 datapoint + let data_point = find_datapoint_with_key_value(&sum.data_points, "key1", "value1") + .expect("datapoint with key1=value1 expected"); + if let Temporality::Cumulative = temporality { + // Cumulative counter should have the value as is. + assert_eq!(data_point.value, *v); + } else { + // Delta counter should have the increment value. + // Except for the first value which should be the start value. + if iter == 0 { + assert_eq!(data_point.value, start); + } else { + assert_eq!(data_point.value, increment); + } } + + test_context.reset_metrics(); } - assert_eq!( - data_point1 - .expect("datapoint with key1=value2 expected") - .value, - 200 - ); } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] @@ -566,7 +575,6 @@ mod tests { } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] - // #[ignore = "Spatial aggregation is not yet implemented."] async fn spatial_aggregation_when_view_drops_attributes_observable_counter() { // cargo test spatial_aggregation_when_view_drops_attributes_observable_counter --features=metrics,testing @@ -1169,6 +1177,10 @@ mod tests { updown_counter_builder.init() } + fn meter(&self) -> Meter { + self.meter_provider.meter("test") + } + fn flush_metrics(&self) { self.meter_provider.force_flush().unwrap(); } From ac741c2186014498168315664e090118df616d5a Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Fri, 24 May 2024 11:45:49 -0700 Subject: [PATCH 18/37] Remove env_logger from cargo (#1825) --- Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 0c8e3839b4..5f16c4957d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,6 @@ debug = 1 async-std = "1.10" async-trait = "0.1" bytes = "1" -env_logger = { version = "0.10", default-features = false } # env_logger requires a newer MSRV futures-core = "0.3" futures-executor = "0.3" futures-util = { version = "0.3", default-features = false } From ec9fd62d2ef6ebdcc736d26258758c11c8650b5f Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Fri, 24 May 2024 12:52:35 -0700 Subject: [PATCH 19/37] Run msrv check for all crates (#1826) --- .github/workflows/ci.yml | 6 ++---- scripts/msrv.sh | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 4 deletions(-) create mode 100755 scripts/msrv.sh diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 295c69bc6b..18a18f849d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -110,10 +110,8 @@ jobs: - uses: dtolnay/rust-toolchain@1.65.0 - name: Patch dependencies versions # some dependencies bump MSRV without major version bump run: bash ./scripts/patch_dependencies.sh - - name: Run tests - run: cargo --version && - cargo test --manifest-path=opentelemetry/Cargo.toml --features trace,metrics,testing && - cargo test --manifest-path=opentelemetry-zipkin/Cargo.toml + - name: Check MSRV for all crates + run: bash ./scripts/msrv.sh cargo-deny: runs-on: ubuntu-latest # This uses the step `EmbarkStudios/cargo-deny-action@v1` which is only supported on Linux continue-on-error: true # Prevent sudden announcement of a new advisory from failing ci diff --git a/scripts/msrv.sh b/scripts/msrv.sh new file mode 100755 index 0000000000..eb9386cfa3 --- /dev/null +++ b/scripts/msrv.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +set -eu + +echo "Running msrv check for opentelemetry package" +cargo check --manifest-path=opentelemetry/Cargo.toml --all-features + +echo "Running msrv check for opentelemetry-sdk package" +cargo check --manifest-path=opentelemetry-sdk/Cargo.toml --all-features + +echo "Running msrv check for opentelemetry-stdout package" +cargo check --manifest-path=opentelemetry-stdout/Cargo.toml --all-features + +# TODO: Ignoring as this is failing with the following error: +# error: package `prost-derive v0.12.6` cannot be built because it requires rustc 1.70 or newer, while the currently active rustc version is 1.65.0 +#echo "Running msrv check for opentelemetry-otlp package" +# cargo check --manifest-path=opentelemetry-otlp/Cargo.toml --all-features + +echo "Running msrv check for opentelemetry-http package" +cargo check --manifest-path=opentelemetry-http/Cargo.toml --all-features + +echo "Running msrv check for opentelemetry-jaeger-propagator package" +cargo check --manifest-path=opentelemetry-jaeger-propagator/Cargo.toml --all-features + +echo "Running msrv check for opentelemetry-zipkin package" +cargo check --manifest-path=opentelemetry-zipkin/Cargo.toml --all-features + +echo "Running msrv check for opentelemetry-appender-log package" +cargo check --manifest-path=opentelemetry-appender-log/Cargo.toml --all-features + +echo "Running msrv check for opentelemetry-appender-tracing package" +cargo check --manifest-path=opentelemetry-appender-tracing/Cargo.toml --all-features + From 28f8dbca39896e94cb7c238bc3504aa71ec10f0f Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Fri, 24 May 2024 13:01:56 -0700 Subject: [PATCH 20/37] Add note about using newer protoc in contributing doc (#1824) --- CONTRIBUTING.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4d0d9e9e9b..fe44403566 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -37,6 +37,11 @@ You can provide the protocol compiler protoc path programmatically (only works w export PROTOC=$(which protoc) ``` +It is recommended to use "3.15" or newer of protoc, as some of the proto +definitions include "optional" fields, that are not supported in older versions, +resulting in errors as shown +[here](https://github.com/open-telemetry/opentelemetry-proto/issues/451). + Prerequisites to build the protocol compiler protoc from source - [protoc](https://github.com/protocolbuffers/protobuf) From b1a80eed9fc65e339e6c636f4755515bcc2b098d Mon Sep 17 00:00:00 2001 From: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com> Date: Fri, 24 May 2024 13:22:29 -0700 Subject: [PATCH 21/37] OtlpTracePipeline fixed to no longer set global provider (#1812) Co-authored-by: Zhongyang Wu Co-authored-by: Cijo Thomas --- examples/tracing-jaeger/src/main.rs | 5 ++- opentelemetry-otlp/CHANGELOG.md | 2 + .../examples/basic-otlp-http/src/main.rs | 7 +++- .../examples/basic-otlp/src/main.rs | 6 ++- opentelemetry-otlp/src/lib.rs | 6 ++- opentelemetry-otlp/src/logs.rs | 8 ++-- opentelemetry-otlp/src/span.rs | 38 ++++++------------- .../tests/integration_test/tests/traces.rs | 8 ++-- opentelemetry-otlp/tests/smoke.rs | 9 ++++- 9 files changed, 44 insertions(+), 45 deletions(-) diff --git a/examples/tracing-jaeger/src/main.rs b/examples/tracing-jaeger/src/main.rs index 9a76fca48f..5ddb5a843c 100644 --- a/examples/tracing-jaeger/src/main.rs +++ b/examples/tracing-jaeger/src/main.rs @@ -10,7 +10,7 @@ use opentelemetry_semantic_conventions::resource::SERVICE_NAME; use std::error::Error; -fn init_tracer() -> Result { +fn init_tracer_provider() -> Result { opentelemetry_otlp::new_pipeline() .tracing() .with_exporter( @@ -29,7 +29,8 @@ fn init_tracer() -> Result { #[tokio::main] async fn main() -> Result<(), Box> { - let _tracer = init_tracer().expect("Failed to initialize tracer."); + let tracer_provider = init_tracer_provider().expect("Failed to initialize tracer provider."); + global::set_tracer_provider(tracer_provider.clone()); let tracer = global::tracer("tracing-jaeger"); tracer.in_span("main-operation", |cx| { diff --git a/opentelemetry-otlp/CHANGELOG.md b/opentelemetry-otlp/CHANGELOG.md index 07213b96aa..a2a598f4ac 100644 --- a/opentelemetry-otlp/CHANGELOG.md +++ b/opentelemetry-otlp/CHANGELOG.md @@ -12,6 +12,8 @@ Instead of using `.with_config(Config::default().with_resource(RESOURCE::default()))` users must now use `.with_resource(RESOURCE::default())` to configure Resource when using `OtlpLogPipeline`. +- **Breaking** The methods `OtlpTracePipeline::install_simple()` and `OtlpTracePipeline::install_batch()` would now return `TracerProvider` instead of `Tracer`. + These methods would also no longer set the global tracer provider. It would now be the responsibility of users to set it by calling `global::set_tracer_provider(tracer_provider.clone());`. Refer to the [basic-otlp](https://github.com/open-telemetry/opentelemetry-rust/blob/main/opentelemetry-otlp/examples/basic-otlp/src/main.rs) and [basic-otlp-http](https://github.com/open-telemetry/opentelemetry-rust/blob/main/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs) examples on how to initialize OTLP Trace Exporter. ## v0.16.0 diff --git a/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs b/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs index 4bb3f77c8f..c237c484e0 100644 --- a/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs @@ -37,7 +37,7 @@ fn init_logs() -> Result .install_batch(opentelemetry_sdk::runtime::Tokio) } -fn init_tracer() -> Result { +fn init_tracer_provider() -> Result { opentelemetry_otlp::new_pipeline() .tracing() .with_exporter( @@ -67,13 +67,16 @@ fn init_metrics() -> Result Result<(), Box> { - let result = init_tracer(); + let result = init_tracer_provider(); assert!( result.is_ok(), "Init tracer failed with error: {:?}", result.err() ); + let tracer_provider = result.unwrap(); + global::set_tracer_provider(tracer_provider.clone()); + let result = init_metrics(); assert!( result.is_ok(), diff --git a/opentelemetry-otlp/examples/basic-otlp/src/main.rs b/opentelemetry-otlp/examples/basic-otlp/src/main.rs index 50c95723fc..2b12c11212 100644 --- a/opentelemetry-otlp/examples/basic-otlp/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp/src/main.rs @@ -22,7 +22,7 @@ static RESOURCE: Lazy = Lazy::new(|| { )]) }); -fn init_tracer() -> Result { +fn init_tracer_provider() -> Result { opentelemetry_otlp::new_pipeline() .tracing() .with_exporter( @@ -72,12 +72,14 @@ async fn main() -> Result<(), Box> { // matches the containing block, reporting traces and metrics during the whole // execution. - let result = init_tracer(); + let result = init_tracer_provider(); assert!( result.is_ok(), "Init tracer failed with error: {:?}", result.err() ); + let tracer_provider = result.unwrap(); + global::set_tracer_provider(tracer_provider.clone()); let result = init_metrics(); assert!( diff --git a/opentelemetry-otlp/src/lib.rs b/opentelemetry-otlp/src/lib.rs index 76e3493dab..a3d9b02e2d 100644 --- a/opentelemetry-otlp/src/lib.rs +++ b/opentelemetry-otlp/src/lib.rs @@ -120,7 +120,7 @@ //! on the choice of exporters. //! //! ```no_run -//! use opentelemetry::{KeyValue, trace::Tracer}; +//! use opentelemetry::{global, KeyValue, trace::Tracer}; //! use opentelemetry_sdk::{trace::{self, RandomIdGenerator, Sampler}, Resource}; //! # #[cfg(feature = "metrics")] //! use opentelemetry_sdk::metrics::reader::{DefaultAggregationSelector, DefaultTemporalitySelector}; @@ -138,7 +138,7 @@ //! map.insert("x-number", "123".parse().unwrap()); //! map.insert_bin("trace-proto-bin", MetadataValue::from_bytes(b"[binary data]")); //! -//! let tracer = opentelemetry_otlp::new_pipeline() +//! let tracer_provider = opentelemetry_otlp::new_pipeline() //! .tracing() //! .with_exporter( //! opentelemetry_otlp::new_exporter() @@ -157,6 +157,8 @@ //! .with_resource(Resource::new(vec![KeyValue::new("service.name", "example")])), //! ) //! .install_batch(opentelemetry_sdk::runtime::Tokio)?; +//! global::set_tracer_provider(tracer_provider); +//! let tracer = global::tracer("tracer-name"); //! # tracer //! # }; //! diff --git a/opentelemetry-otlp/src/logs.rs b/opentelemetry-otlp/src/logs.rs index 72e3b352ec..3f21697fb0 100644 --- a/opentelemetry-otlp/src/logs.rs +++ b/opentelemetry-otlp/src/logs.rs @@ -151,9 +151,9 @@ impl OtlpLogPipeline { impl OtlpLogPipeline { /// Install the configured log exporter. /// - /// Returns a [`Logger`] with the name `opentelemetry-otlp` and the current crate version. + /// Returns a [`LoggerProvider`]. /// - /// [`Logger`]: opentelemetry_sdk::logs::Logger + /// [`LoggerProvider`]: opentelemetry_sdk::logs::LoggerProvider pub fn install_simple(self) -> Result { Ok(build_simple_with_exporter( self.exporter_builder.build_log_exporter()?, @@ -164,9 +164,9 @@ impl OtlpLogPipeline { /// Install the configured log exporter and a batch log processor using the /// specified runtime. /// - /// Returns a [`Logger`] with the name `opentelemetry-otlp` and the current crate version. + /// Returns a [`LoggerProvider`]. /// - /// [`Logger`]: opentelemetry_sdk::logs::Logger + /// [`LoggerProvider`]: opentelemetry_sdk::logs::LoggerProvider pub fn install_batch( self, runtime: R, diff --git a/opentelemetry-otlp/src/span.rs b/opentelemetry-otlp/src/span.rs index 4b658a89e2..ba70f0825e 100644 --- a/opentelemetry-otlp/src/span.rs +++ b/opentelemetry-otlp/src/span.rs @@ -5,10 +5,7 @@ use std::fmt::Debug; use futures_core::future::BoxFuture; -use opentelemetry::{ - global, - trace::{TraceError, TracerProvider}, -}; +use opentelemetry::trace::TraceError; use opentelemetry_sdk::{ self as sdk, export::trace::{ExportResult, SpanData}, @@ -99,10 +96,10 @@ impl OtlpTracePipeline { impl OtlpTracePipeline { /// Install the configured span exporter. /// - /// Returns a [`Tracer`] with the name `opentelemetry-otlp` and current crate version. + /// Returns a [`TracerProvider`]. /// - /// [`Tracer`]: opentelemetry::trace::Tracer - pub fn install_simple(self) -> Result { + /// [`TracerProvider`]: opentelemetry::trace::TracerProvider + pub fn install_simple(self) -> Result { Ok(build_simple_with_exporter( self.exporter_builder.build_span_exporter()?, self.trace_config, @@ -112,15 +109,15 @@ impl OtlpTracePipeline { /// Install the configured span exporter and a batch span processor using the /// specified runtime. /// - /// Returns a [`Tracer`] with the name `opentelemetry-otlp` and current crate version. + /// Returns a [`TracerProvider`]. /// /// `install_batch` will panic if not called within a tokio runtime /// - /// [`Tracer`]: opentelemetry::trace::Tracer + /// [`TracerProvider`]: opentelemetry::trace::TracerProvider pub fn install_batch( self, runtime: R, - ) -> Result { + ) -> Result { Ok(build_batch_with_exporter( self.exporter_builder.build_span_exporter()?, self.trace_config, @@ -133,18 +130,13 @@ impl OtlpTracePipeline { fn build_simple_with_exporter( exporter: SpanExporter, trace_config: Option, -) -> sdk::trace::Tracer { +) -> sdk::trace::TracerProvider { let mut provider_builder = sdk::trace::TracerProvider::builder().with_simple_exporter(exporter); if let Some(config) = trace_config { provider_builder = provider_builder.with_config(config); } - let provider = provider_builder.build(); - let tracer = provider - .tracer_builder("opentelemetry-otlp") - .with_version(env!("CARGO_PKG_VERSION")) - .build(); - let _ = global::set_tracer_provider(provider); - tracer + + provider_builder.build() } fn build_batch_with_exporter( @@ -152,7 +144,7 @@ fn build_batch_with_exporter( trace_config: Option, runtime: R, batch_config: Option, -) -> sdk::trace::Tracer { +) -> sdk::trace::TracerProvider { let mut provider_builder = sdk::trace::TracerProvider::builder(); let batch_processor = sdk::trace::BatchSpanProcessor::builder(exporter, runtime) .with_batch_config(batch_config.unwrap_or_default()) @@ -162,13 +154,7 @@ fn build_batch_with_exporter( if let Some(config) = trace_config { provider_builder = provider_builder.with_config(config); } - let provider = provider_builder.build(); - let tracer = provider - .tracer_builder("opentelemetry-otlp") - .with_version(env!("CARGO_PKG_VERSION")) - .build(); - let _ = global::set_tracer_provider(provider); - tracer + provider_builder.build() } /// OTLP span exporter builder. diff --git a/opentelemetry-otlp/tests/integration_test/tests/traces.rs b/opentelemetry-otlp/tests/integration_test/tests/traces.rs index e68e0d36a7..0c357e26b8 100644 --- a/opentelemetry-otlp/tests/integration_test/tests/traces.rs +++ b/opentelemetry-otlp/tests/integration_test/tests/traces.rs @@ -13,7 +13,7 @@ use std::error::Error; use std::fs::File; use std::os::unix::fs::MetadataExt; -fn init_tracer() -> Result { +fn init_tracer_provider() -> Result { opentelemetry_otlp::new_pipeline() .tracing() .with_exporter(opentelemetry_otlp::new_exporter().tonic()) @@ -30,10 +30,8 @@ const LEMONS_KEY: Key = Key::from_static_str("lemons"); const ANOTHER_KEY: Key = Key::from_static_str("ex.com/another"); pub async fn traces() -> Result<(), Box> { - // By binding the result to an unused variable, the lifetime of the variable - // matches the containing block, reporting traces and metrics during the whole - // execution. - let _ = init_tracer()?; + let tracer_provider = init_tracer_provider().expect("Failed to initialize tracer provider."); + global::set_tracer_provider(tracer_provider.clone()); let tracer = global::tracer("ex.com/basic"); diff --git a/opentelemetry-otlp/tests/smoke.rs b/opentelemetry-otlp/tests/smoke.rs index ba2d69837c..c217f8f9d6 100644 --- a/opentelemetry-otlp/tests/smoke.rs +++ b/opentelemetry-otlp/tests/smoke.rs @@ -1,4 +1,5 @@ use futures_util::StreamExt; +use opentelemetry::global; use opentelemetry::global::shutdown_tracer_provider; use opentelemetry::trace::{Span, SpanKind, Tracer}; use opentelemetry_otlp::WithExportConfig; @@ -80,10 +81,10 @@ async fn smoke_tracer() { let (addr, mut req_rx) = setup().await; { - println!("Installing tracer..."); + println!("Installing tracer provider..."); let mut metadata = tonic::metadata::MetadataMap::new(); metadata.insert("x-header-key", "header-value".parse().unwrap()); - let tracer = opentelemetry_otlp::new_pipeline() + let tracer_provider = opentelemetry_otlp::new_pipeline() .tracing() .with_exporter( #[cfg(feature = "gzip-tonic")] @@ -101,6 +102,10 @@ async fn smoke_tracer() { .install_batch(opentelemetry_sdk::runtime::Tokio) .expect("failed to install"); + global::set_tracer_provider(tracer_provider); + + let tracer = global::tracer("smoke"); + println!("Sending span..."); let mut span = tracer .span_builder("my-test-span") From e699233bcc4e8e7e00d4963652538c4d7174f432 Mon Sep 17 00:00:00 2001 From: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com> Date: Fri, 24 May 2024 14:44:21 -0700 Subject: [PATCH 22/37] Fix typos (#1828) Co-authored-by: Cijo Thomas --- opentelemetry-jaeger-propagator/src/propagator.rs | 2 +- opentelemetry-sdk/benches/metric_counter.rs | 12 ++++++------ opentelemetry-sdk/benches/metric_gauge.rs | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/opentelemetry-jaeger-propagator/src/propagator.rs b/opentelemetry-jaeger-propagator/src/propagator.rs index 4f5491d932..4dbd0b16fc 100644 --- a/opentelemetry-jaeger-propagator/src/propagator.rs +++ b/opentelemetry-jaeger-propagator/src/propagator.rs @@ -138,7 +138,7 @@ impl Propagator { /// First bit controls whether to sample /// Second bit controls whether it's a debug trace /// Third bit is not used. - /// Forth bit is firehose flag, which is not supported in OT now. + /// Fourth bit is firehose flag, which is not supported in OT now. fn extract_trace_flags(&self, flag: &str) -> Result { if flag.len() > 2 { return Err(()); diff --git a/opentelemetry-sdk/benches/metric_counter.rs b/opentelemetry-sdk/benches/metric_counter.rs index 8962daa125..d3b7bb1a65 100644 --- a/opentelemetry-sdk/benches/metric_counter.rs +++ b/opentelemetry-sdk/benches/metric_counter.rs @@ -66,14 +66,14 @@ fn counter_add(c: &mut Criterion) { let index_first_attribute = rands[0]; let index_second_attribute = rands[1]; let index_third_attribute = rands[2]; - let index_forth_attribute = rands[3]; + let index_fourth_attribute = rands[3]; counter.add( 1, &[ KeyValue::new("attribute1", attribute_values[index_first_attribute]), KeyValue::new("attribute2", attribute_values[index_second_attribute]), KeyValue::new("attribute3", attribute_values[index_third_attribute]), - KeyValue::new("attribute4", attribute_values[index_forth_attribute]), + KeyValue::new("attribute4", attribute_values[index_fourth_attribute]), ], ); }); @@ -94,14 +94,14 @@ fn counter_add(c: &mut Criterion) { let index_first_attribute = rands[0]; let index_second_attribute = rands[1]; let index_third_attribute = rands[2]; - let index_forth_attribute = rands[3]; + let index_fourth_attribute = rands[3]; counter.add( 1, &[ KeyValue::new("attribute2", attribute_values[index_second_attribute]), KeyValue::new("attribute3", attribute_values[index_third_attribute]), KeyValue::new("attribute1", attribute_values[index_first_attribute]), - KeyValue::new("attribute4", attribute_values[index_forth_attribute]), + KeyValue::new("attribute4", attribute_values[index_fourth_attribute]), ], ); }); @@ -126,14 +126,14 @@ fn counter_add(c: &mut Criterion) { let index_first_attribute = rands[0]; let index_second_attribute = rands[1]; let index_third_attribute = rands[2]; - let index_forth_attribute = rands[3]; + let index_fourth_attribute = rands[3]; counter.add( 1, &[ KeyValue::new("attribute1", attribute_values[index_first_attribute]), KeyValue::new("attribute2", attribute_values[index_second_attribute]), KeyValue::new("attribute3", attribute_values[index_third_attribute]), - KeyValue::new("attribute4", attribute_values[index_forth_attribute]), + KeyValue::new("attribute4", attribute_values[index_fourth_attribute]), ], ); }); diff --git a/opentelemetry-sdk/benches/metric_gauge.rs b/opentelemetry-sdk/benches/metric_gauge.rs index 3217480760..e455c5a577 100644 --- a/opentelemetry-sdk/benches/metric_gauge.rs +++ b/opentelemetry-sdk/benches/metric_gauge.rs @@ -63,14 +63,14 @@ fn gauge_record(c: &mut Criterion) { let index_first_attribute = rands[0]; let index_second_attribute = rands[1]; let index_third_attribute = rands[2]; - let index_forth_attribute = rands[3]; + let index_fourth_attribute = rands[3]; gauge.record( 1, &[ KeyValue::new("attribute1", attribute_values[index_first_attribute]), KeyValue::new("attribute2", attribute_values[index_second_attribute]), KeyValue::new("attribute3", attribute_values[index_third_attribute]), - KeyValue::new("attribute4", attribute_values[index_forth_attribute]), + KeyValue::new("attribute4", attribute_values[index_fourth_attribute]), ], ); }); From 91397fc64f382696fec08ac0faa2f3332f747e9b Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Sat, 25 May 2024 09:50:28 -0700 Subject: [PATCH 23/37] Add memory and cpu stats to Stress test (#1832) --- stress/Cargo.toml | 4 ++++ stress/README.md | 8 ++++++++ stress/src/throughput.rs | 30 +++++++++++++++++++++++++++++- 3 files changed, 41 insertions(+), 1 deletion(-) diff --git a/stress/Cargo.toml b/stress/Cargo.toml index b28a367350..6520e27ca0 100644 --- a/stress/Cargo.toml +++ b/stress/Cargo.toml @@ -41,3 +41,7 @@ tracing = { workspace = true, features = ["std"]} tracing-core = { workspace = true } tracing-subscriber = { workspace = true, features = ["registry", "std"] } num-format = "0.4.4" +sysinfo = { version = "0.30.12", optional = true } + +[features] +stats = ["sysinfo"] \ No newline at end of file diff --git a/stress/README.md b/stress/README.md index 975fd6f0af..16832c3bdd 100644 --- a/stress/README.md +++ b/stress/README.md @@ -36,3 +36,11 @@ Throughput: 3,905,200 iterations/sec Throughput: 4,106,600 iterations/sec Throughput: 5,075,400 iterations/sec ``` + +## Feature flags + +"stats" - Prints memory and CPU usage. Has slight impact on throughput. + +```sh +cargo run --release --bin metrics --feature=stats +``` diff --git a/stress/src/throughput.rs b/stress/src/throughput.rs index 821a5eff74..95f233b33a 100644 --- a/stress/src/throughput.rs +++ b/stress/src/throughput.rs @@ -3,6 +3,8 @@ use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::Arc; use std::thread; use std::time::{Duration, Instant}; +#[cfg(feature = "stats")] +use sysinfo::{Pid, System}; const SLIDING_WINDOW_SIZE: u64 = 2; // In seconds const BATCH_SIZE: u64 = 1000; @@ -27,7 +29,7 @@ where }) .expect("Error setting Ctrl-C handler"); let num_threads = num_cpus::get(); - println!("Number of threads: {}", num_threads); + println!("Number of threads: {}\n", num_threads); let mut handles = Vec::with_capacity(num_threads); let func_arc = Arc::new(func); let mut worker_stats_vec: Vec = Vec::new(); @@ -42,6 +44,12 @@ where let mut start_time = Instant::now(); let mut end_time = start_time; let mut total_count_old: u64 = 0; + + #[cfg(feature = "stats")] + let pid = Pid::from(std::process::id() as usize); + #[cfg(feature = "stats")] + let mut system = System::new_all(); + loop { let elapsed = end_time.duration_since(start_time).as_secs(); if elapsed >= SLIDING_WINDOW_SIZE { @@ -56,6 +64,26 @@ where "Throughput: {} iterations/sec", throughput.to_formatted_string(&Locale::en) ); + + #[cfg(feature = "stats")] + { + system.refresh_all(); + if let Some(process) = system.process(pid) { + println!( + "Memory usage: {:.2} MB", + process.memory() as f64 / (1024.0 * 1024.0) + ); + println!("CPU usage: {}%", process.cpu_usage() / num_threads as f32); + println!( + "Virtual memory usage: {:.2} MB", + process.virtual_memory() as f64 / (1024.0 * 1024.0) + ); + } else { + println!("Process not found"); + } + } + + println!("\n"); start_time = Instant::now(); } From 0ce6a6dc36229e6cdfaf64d01ab19810da387506 Mon Sep 17 00:00:00 2001 From: Zhongyang Wu Date: Tue, 28 May 2024 07:04:06 -0700 Subject: [PATCH 24/37] fix: make `shutdown` `&self` in span processor (#1836) --- opentelemetry-sdk/CHANGELOG.md | 2 ++ opentelemetry-sdk/src/trace/provider.rs | 2 +- opentelemetry-sdk/src/trace/span_processor.rs | 16 ++++++++-------- stress/src/traces.rs | 2 +- 4 files changed, 12 insertions(+), 10 deletions(-) diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index 8460c89133..153ba72064 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -26,6 +26,8 @@ asynchronously, it should clone the log data to ensure it can be safely processed without lifetime issues. +- **Breaking** [1836](https://github.com/open-telemetry/opentelemetry-rust/pull/1836) `SpanProcessor::shutdown` now takes an immutable reference to self. Any reference can call shutdown on the processor. After the first call to `shutdown` the processor will not process any new spans. + ## v0.23.0 - Fix SimpleSpanProcessor to be consistent with log counterpart. Also removed diff --git a/opentelemetry-sdk/src/trace/provider.rs b/opentelemetry-sdk/src/trace/provider.rs index 1fbe22d628..1c6342d3b9 100644 --- a/opentelemetry-sdk/src/trace/provider.rs +++ b/opentelemetry-sdk/src/trace/provider.rs @@ -264,7 +264,7 @@ mod tests { } } - fn shutdown(&mut self) -> TraceResult<()> { + fn shutdown(&self) -> TraceResult<()> { self.force_flush() } } diff --git a/opentelemetry-sdk/src/trace/span_processor.rs b/opentelemetry-sdk/src/trace/span_processor.rs index ac24cff1b0..86381119b0 100644 --- a/opentelemetry-sdk/src/trace/span_processor.rs +++ b/opentelemetry-sdk/src/trace/span_processor.rs @@ -91,7 +91,7 @@ pub trait SpanProcessor: Send + Sync + std::fmt::Debug { fn force_flush(&self) -> TraceResult<()>; /// Shuts down the processor. Called when SDK is shut down. This is an /// opportunity for processors to do any cleanup required. - fn shutdown(&mut self) -> TraceResult<()>; + fn shutdown(&self) -> TraceResult<()>; } /// A [SpanProcessor] that passes finished spans to the configured @@ -137,7 +137,7 @@ impl SpanProcessor for SimpleSpanProcessor { Ok(()) } - fn shutdown(&mut self) -> TraceResult<()> { + fn shutdown(&self) -> TraceResult<()> { if let Ok(mut exporter) = self.exporter.lock() { exporter.shutdown(); Ok(()) @@ -249,7 +249,7 @@ impl SpanProcessor for BatchSpanProcessor { .and_then(|identity| identity) } - fn shutdown(&mut self) -> TraceResult<()> { + fn shutdown(&self) -> TraceResult<()> { let (res_sender, res_receiver) = oneshot::channel(); self.message_sender .try_send(BatchMessage::Shutdown(res_sender)) @@ -687,7 +687,7 @@ mod tests { #[test] fn simple_span_processor_on_end_calls_export() { let exporter = InMemorySpanExporterBuilder::new().build(); - let mut processor = SimpleSpanProcessor::new(Box::new(exporter.clone())); + let processor = SimpleSpanProcessor::new(Box::new(exporter.clone())); let span_data = new_test_export_span_data(); processor.on_end(span_data.clone()); assert_eq!(exporter.get_finished_spans().unwrap()[0], span_data); @@ -720,7 +720,7 @@ mod tests { #[test] fn simple_span_processor_shutdown_calls_shutdown() { let exporter = InMemorySpanExporterBuilder::new().build(); - let mut processor = SimpleSpanProcessor::new(Box::new(exporter.clone())); + let processor = SimpleSpanProcessor::new(Box::new(exporter.clone())); let span_data = new_test_export_span_data(); processor.on_end(span_data.clone()); assert!(!exporter.get_finished_spans().unwrap().is_empty()); @@ -876,7 +876,7 @@ mod tests { scheduled_delay: Duration::from_secs(60 * 60 * 24), // set the tick to 24 hours so we know the span must be exported via force_flush ..Default::default() }; - let mut processor = + let processor = BatchSpanProcessor::new(Box::new(exporter), config, runtime::TokioCurrentThread); let handle = tokio::spawn(async move { loop { @@ -976,7 +976,7 @@ mod tests { delay_for: Duration::from_millis(if !time_out { 5 } else { 60 }), delay_fn: async_std::task::sleep, }; - let mut processor = BatchSpanProcessor::new(Box::new(exporter), config, runtime::AsyncStd); + let processor = BatchSpanProcessor::new(Box::new(exporter), config, runtime::AsyncStd); processor.on_end(new_test_export_span_data()); let flush_res = processor.force_flush(); if time_out { @@ -1000,7 +1000,7 @@ mod tests { delay_for: Duration::from_millis(if !time_out { 5 } else { 60 }), delay_fn: tokio::time::sleep, }; - let mut processor = + let processor = BatchSpanProcessor::new(Box::new(exporter), config, runtime::TokioCurrentThread); tokio::time::sleep(Duration::from_secs(1)).await; // skip the first processor.on_end(new_test_export_span_data()); diff --git a/stress/src/traces.rs b/stress/src/traces.rs index 0dd992f708..9f9065d1a5 100644 --- a/stress/src/traces.rs +++ b/stress/src/traces.rs @@ -42,7 +42,7 @@ impl SpanProcessor for NoOpSpanProcessor { Ok(()) } - fn shutdown(&mut self) -> TraceResult<()> { + fn shutdown(&self) -> TraceResult<()> { Ok(()) } } From 14c038d5bf6a5c276bc66b0cc54598a780e10420 Mon Sep 17 00:00:00 2001 From: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com> Date: Tue, 28 May 2024 17:18:39 -0700 Subject: [PATCH 25/37] Fix external-types check (#1838) --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 18a18f849d..4332693776 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -71,7 +71,7 @@ jobs: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@nightly with: - toolchain: nightly-2024-02-07 + toolchain: nightly-2024-05-01 components: rustfmt - name: external-type-check run: | From 6ee55798384e78b60326d115a24fb9cd43491ae8 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Tue, 28 May 2024 18:12:16 -0700 Subject: [PATCH 26/37] Add tests for Histogram metrics (#1834) --- opentelemetry-sdk/src/metrics/mod.rs | 89 ++++++++++++++++++- stress/Cargo.toml | 9 +- stress/src/{metrics.rs => metrics_counter.rs} | 0 stress/src/metrics_histogram.rs | 69 ++++++++++++++ 4 files changed, 164 insertions(+), 3 deletions(-) rename stress/src/{metrics.rs => metrics_counter.rs} (100%) create mode 100644 stress/src/metrics_histogram.rs diff --git a/opentelemetry-sdk/src/metrics/mod.rs b/opentelemetry-sdk/src/metrics/mod.rs index ca5076763d..03ee78b05f 100644 --- a/opentelemetry-sdk/src/metrics/mod.rs +++ b/opentelemetry-sdk/src/metrics/mod.rs @@ -139,7 +139,7 @@ impl Hash for AttributeSet { #[cfg(all(test, feature = "testing"))] mod tests { - use self::data::{DataPoint, ScopeMetrics}; + use self::data::{DataPoint, HistogramDataPoint, ScopeMetrics}; use super::*; use crate::metrics::data::{ResourceMetrics, Temporality}; use crate::metrics::reader::TemporalitySelector; @@ -147,6 +147,7 @@ mod tests { use crate::{runtime, testing::metrics::InMemoryMetricsExporter}; use opentelemetry::metrics::{Counter, Meter, UpDownCounter}; use opentelemetry::{metrics::MeterProvider as _, KeyValue}; + use rand::{rngs, Rng, SeedableRng}; use std::borrow::Cow; use std::sync::{Arc, Mutex}; @@ -199,6 +200,20 @@ mod tests { counter_aggregation_helper(Temporality::Delta); } + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn histogram_aggregation_cumulative() { + // Run this test with stdout enabled to see output. + // cargo test histogram_aggregation_cumulative --features=metrics,testing -- --nocapture + histogram_aggregation_helper(Temporality::Cumulative); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn histogram_aggregation_delta() { + // Run this test with stdout enabled to see output. + // cargo test histogram_aggregation_delta --features=metrics,testing -- --nocapture + histogram_aggregation_helper(Temporality::Delta); + } + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn updown_counter_aggregation_cumulative() { // Run this test with stdout enabled to see output. @@ -1007,6 +1022,65 @@ mod tests { assert!(resource_metrics.is_empty(), "No metrics should be exported as no new measurements were recorded since last collect."); } + fn histogram_aggregation_helper(temporality: Temporality) { + // Arrange + let mut test_context = TestContext::new(temporality); + let histogram = test_context.meter().u64_histogram("my_histogram").init(); + + // Act + let mut rand = rngs::SmallRng::from_entropy(); + let values_kv1 = (0..50) + .map(|_| rand.gen_range(0..100)) + .collect::>(); + for value in values_kv1.iter() { + histogram.record(*value, &[KeyValue::new("key1", "value1")]); + } + + let values_kv2 = (0..30) + .map(|_| rand.gen_range(0..100)) + .collect::>(); + for value in values_kv2.iter() { + histogram.record(*value, &[KeyValue::new("key1", "value2")]); + } + + test_context.flush_metrics(); + + // Assert + let histogram = test_context.get_aggregation::>("my_histogram", None); + // Expecting 2 time-series. + assert_eq!(histogram.data_points.len(), 2); + if let Temporality::Cumulative = temporality { + assert_eq!( + histogram.temporality, + Temporality::Cumulative, + "Should produce cumulative" + ); + } else { + assert_eq!( + histogram.temporality, + Temporality::Delta, + "Should produce delta" + ); + } + + // find and validate key1=value2 datapoint + let data_point1 = + find_histogram_datapoint_with_key_value(&histogram.data_points, "key1", "value1") + .expect("datapoint with key1=value1 expected"); + assert_eq!(data_point1.count, values_kv1.len() as u64); + assert_eq!(data_point1.sum, values_kv1.iter().sum::()); + assert_eq!(data_point1.min.unwrap(), *values_kv1.iter().min().unwrap()); + assert_eq!(data_point1.max.unwrap(), *values_kv1.iter().max().unwrap()); + + let data_point2 = + find_histogram_datapoint_with_key_value(&histogram.data_points, "key1", "value2") + .expect("datapoint with key1=value2 expected"); + assert_eq!(data_point2.count, values_kv2.len() as u64); + assert_eq!(data_point2.sum, values_kv2.iter().sum::()); + assert_eq!(data_point2.min.unwrap(), *values_kv2.iter().min().unwrap()); + assert_eq!(data_point2.max.unwrap(), *values_kv2.iter().max().unwrap()); + } + fn counter_aggregation_helper(temporality: Temporality) { // Arrange let mut test_context = TestContext::new(temporality); @@ -1109,6 +1183,19 @@ mod tests { }) } + fn find_histogram_datapoint_with_key_value<'a, T>( + data_points: &'a [HistogramDataPoint], + key: &str, + value: &str, + ) -> Option<&'a HistogramDataPoint> { + data_points.iter().find(|&datapoint| { + datapoint + .attributes + .iter() + .any(|kv| kv.key.as_str() == key && kv.value.as_str() == value) + }) + } + fn find_scope_metric<'a>( metrics: &'a [ScopeMetrics], name: &'a str, diff --git a/stress/Cargo.toml b/stress/Cargo.toml index 6520e27ca0..43aa8adee7 100644 --- a/stress/Cargo.toml +++ b/stress/Cargo.toml @@ -4,9 +4,14 @@ version = "0.1.0" edition = "2021" publish = false -[[bin]] # Bin to run the metrics stress tests +[[bin]] # Bin to run the metrics stress tests for Counter name = "metrics" -path = "src/metrics.rs" +path = "src/metrics_counter.rs" +doc = false + +[[bin]] # Bin to run the metrics stress tests for Histogram +name = "metrics_histogram" +path = "src/metrics_histogram.rs" doc = false [[bin]] # Bin to run the metrics overflow stress tests diff --git a/stress/src/metrics.rs b/stress/src/metrics_counter.rs similarity index 100% rename from stress/src/metrics.rs rename to stress/src/metrics_counter.rs diff --git a/stress/src/metrics_histogram.rs b/stress/src/metrics_histogram.rs new file mode 100644 index 0000000000..8b7690b5ae --- /dev/null +++ b/stress/src/metrics_histogram.rs @@ -0,0 +1,69 @@ +/* + Stress test results: + OS: Ubuntu 22.04.3 LTS (5.15.146.1-microsoft-standard-WSL2) + Hardware: AMD EPYC 7763 64-Core Processor - 2.44 GHz, 16vCPUs, + RAM: 64.0 GB + 4.6M /sec +*/ + +use lazy_static::lazy_static; +use opentelemetry::{ + metrics::{Histogram, MeterProvider as _}, + KeyValue, +}; +use opentelemetry_sdk::metrics::{ManualReader, SdkMeterProvider}; +use rand::{ + rngs::{self}, + Rng, SeedableRng, +}; +use std::{borrow::Cow, cell::RefCell}; + +mod throughput; + +lazy_static! { + static ref PROVIDER: SdkMeterProvider = SdkMeterProvider::builder() + .with_reader(ManualReader::builder().build()) + .build(); + static ref ATTRIBUTE_VALUES: [&'static str; 10] = [ + "value1", "value2", "value3", "value4", "value5", "value6", "value7", "value8", "value9", + "value10" + ]; + static ref HISTOGRAM: Histogram = PROVIDER + .meter(<&str as Into>>::into("test")) + .u64_histogram("hello") + .init(); +} + +thread_local! { + /// Store random number generator for each thread + static CURRENT_RNG: RefCell = RefCell::new(rngs::SmallRng::from_entropy()); +} + +fn main() { + throughput::test_throughput(test_counter); +} + +fn test_counter() { + let len = ATTRIBUTE_VALUES.len(); + let rands = CURRENT_RNG.with(|rng| { + let mut rng = rng.borrow_mut(); + [ + rng.gen_range(0..len), + rng.gen_range(0..len), + rng.gen_range(0..len), + ] + }); + let index_first_attribute = rands[0]; + let index_second_attribute = rands[1]; + let index_third_attribute = rands[2]; + + // each attribute has 10 possible values, so there are 1000 possible combinations (time-series) + HISTOGRAM.record( + 1, + &[ + KeyValue::new("attribute1", ATTRIBUTE_VALUES[index_first_attribute]), + KeyValue::new("attribute2", ATTRIBUTE_VALUES[index_second_attribute]), + KeyValue::new("attribute3", ATTRIBUTE_VALUES[index_third_attribute]), + ], + ); +} From 0f6de5a946774f032578635565d889d57ad1cdf9 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Tue, 28 May 2024 18:26:50 -0700 Subject: [PATCH 27/37] Metrics Aggregation - Improve throughput by 10x (#1833) Co-authored-by: Zhongyang Wu --- opentelemetry-sdk/CHANGELOG.md | 2 + opentelemetry-sdk/src/metrics/internal/sum.rs | 64 +++++++++++-------- stress/src/metrics_counter.rs | 2 +- 3 files changed, 40 insertions(+), 28 deletions(-) diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index 153ba72064..ff0fa8cdb8 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -13,6 +13,8 @@ - Removed `XrayIdGenerator`, which was marked deprecated since 0.21.3. Use [`opentelemetry-aws`](https://crates.io/crates/opentelemetry-aws), version 0.10.0 or newer. +- Performance Improvement - Counter/UpDownCounter instruments internally use + `RwLock` instead of `Mutex` to reduce contention. - **Breaking** [1726](https://github.com/open-telemetry/opentelemetry-rust/pull/1726) Update `LogProcessor::emit() method to take mutable reference to LogData. This is breaking diff --git a/opentelemetry-sdk/src/metrics/internal/sum.rs b/opentelemetry-sdk/src/metrics/internal/sum.rs index 27710b0743..19d008c366 100644 --- a/opentelemetry-sdk/src/metrics/internal/sum.rs +++ b/opentelemetry-sdk/src/metrics/internal/sum.rs @@ -1,8 +1,8 @@ use std::sync::atomic::{AtomicBool, Ordering}; use std::vec; use std::{ - collections::{hash_map::Entry, HashMap}, - sync::Mutex, + collections::HashMap, + sync::{Mutex, RwLock}, time::SystemTime, }; @@ -18,7 +18,7 @@ use super::{ /// The storage for sums. struct ValueMap> { - values: Mutex>, + values: RwLock>, has_no_value_attribute_value: AtomicBool, no_attribute_value: T::AtomicTracker, } @@ -32,7 +32,7 @@ impl> Default for ValueMap { impl> ValueMap { fn new() -> Self { ValueMap { - values: Mutex::new(HashMap::new()), + values: RwLock::new(HashMap::new()), has_no_value_attribute_value: AtomicBool::new(false), no_attribute_value: T::new_atomic_tracker(), } @@ -45,21 +45,31 @@ impl> ValueMap { self.no_attribute_value.add(measurement); self.has_no_value_attribute_value .store(true, Ordering::Release); - } else if let Ok(mut values) = self.values.lock() { - let size = values.len(); - match values.entry(attrs) { - Entry::Occupied(mut occupied_entry) => { - let sum = occupied_entry.get_mut(); - *sum += measurement; - } - Entry::Vacant(vacant_entry) => { - if is_under_cardinality_limit(size) { - vacant_entry.insert(measurement); - } else if let Some(val) = values.get_mut(&STREAM_OVERFLOW_ATTRIBUTE_SET) { - *val += measurement; + } else if let Ok(values) = self.values.read() { + if let Some(value_to_update) = values.get(&attrs) { + value_to_update.add(measurement); + return; + } else { + drop(values); + if let Ok(mut values) = self.values.write() { + // Recheck after acquiring write lock, in case another + // thread has added the value. + if let Some(value_to_update) = values.get(&attrs) { + value_to_update.add(measurement); + return; + } else if is_under_cardinality_limit(values.len()) { + let new_value = T::new_atomic_tracker(); + new_value.add(measurement); + values.insert(attrs, new_value); + } else if let Some(overflow_value) = + values.get_mut(&STREAM_OVERFLOW_ATTRIBUTE_SET) + { + overflow_value.add(measurement); return; } else { - values.insert(STREAM_OVERFLOW_ATTRIBUTE_SET.clone(), measurement); + let new_value = T::new_atomic_tracker(); + new_value.add(measurement); + values.insert(STREAM_OVERFLOW_ATTRIBUTE_SET.clone(), new_value); global::handle_error(MetricsError::Other("Warning: Maximum data points for metric stream exceeded. Entry added to overflow. Subsequent overflows to same metric until next collect will not be logged.".into())); } } @@ -114,7 +124,7 @@ impl> Sum { s_data.is_monotonic = self.monotonic; s_data.data_points.clear(); - let mut values = match self.value_map.values.lock() { + let mut values = match self.value_map.values.write() { Ok(v) => v, Err(_) => return (0, None), }; @@ -149,7 +159,7 @@ impl> Sum { .collect(), start_time: Some(prev_start), time: Some(t), - value, + value: value.get_value(), exemplars: vec![], }); } @@ -186,7 +196,7 @@ impl> Sum { s_data.is_monotonic = self.monotonic; s_data.data_points.clear(); - let values = match self.value_map.values.lock() { + let values = match self.value_map.values.write() { Ok(v) => v, Err(_) => return (0, None), }; @@ -226,7 +236,7 @@ impl> Sum { .collect(), start_time: Some(prev_start), time: Some(t), - value: *value, + value: value.get_value(), exemplars: vec![], }); } @@ -282,7 +292,7 @@ impl> PrecomputedSum { s_data.temporality = Temporality::Delta; s_data.is_monotonic = self.monotonic; - let mut values = match self.value_map.values.lock() { + let mut values = match self.value_map.values.write() { Ok(v) => v, Err(_) => return (0, None), }; @@ -315,9 +325,9 @@ impl> PrecomputedSum { let default = T::default(); for (attrs, value) in values.drain() { - let delta = value - *reported.get(&attrs).unwrap_or(&default); + let delta = value.get_value() - *reported.get(&attrs).unwrap_or(&default); if delta != default { - new_reported.insert(attrs.clone(), value); + new_reported.insert(attrs.clone(), value.get_value()); } s_data.data_points.push(DataPoint { attributes: attrs @@ -367,7 +377,7 @@ impl> PrecomputedSum { s_data.temporality = Temporality::Cumulative; s_data.is_monotonic = self.monotonic; - let values = match self.value_map.values.lock() { + let values = match self.value_map.values.write() { Ok(v) => v, Err(_) => return (0, None), }; @@ -400,9 +410,9 @@ impl> PrecomputedSum { let default = T::default(); for (attrs, value) in values.iter() { - let delta = *value - *reported.get(attrs).unwrap_or(&default); + let delta = value.get_value() - *reported.get(attrs).unwrap_or(&default); if delta != default { - new_reported.insert(attrs.clone(), *value); + new_reported.insert(attrs.clone(), value.get_value()); } s_data.data_points.push(DataPoint { attributes: attrs diff --git a/stress/src/metrics_counter.rs b/stress/src/metrics_counter.rs index 2309d8eaaa..2cc3dbf7cd 100644 --- a/stress/src/metrics_counter.rs +++ b/stress/src/metrics_counter.rs @@ -3,7 +3,7 @@ OS: Ubuntu 22.04.3 LTS (5.15.146.1-microsoft-standard-WSL2) Hardware: AMD EPYC 7763 64-Core Processor - 2.44 GHz, 16vCPUs, RAM: 64.0 GB - 3M /sec + 35 M /sec */ use lazy_static::lazy_static; From e521083cd27ae00302c840d1f75d85d31f6593c7 Mon Sep 17 00:00:00 2001 From: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com> Date: Tue, 28 May 2024 21:27:21 -0700 Subject: [PATCH 28/37] Minor refactoring (#1839) --- stress/src/metrics_counter.rs | 7 ++----- stress/src/metrics_histogram.rs | 7 ++----- stress/src/metrics_overflow.rs | 7 ++----- 3 files changed, 6 insertions(+), 15 deletions(-) diff --git a/stress/src/metrics_counter.rs b/stress/src/metrics_counter.rs index 2cc3dbf7cd..7a38bb2938 100644 --- a/stress/src/metrics_counter.rs +++ b/stress/src/metrics_counter.rs @@ -16,7 +16,7 @@ use rand::{ rngs::{self}, Rng, SeedableRng, }; -use std::{borrow::Cow, cell::RefCell}; +use std::cell::RefCell; mod throughput; @@ -28,10 +28,7 @@ lazy_static! { "value1", "value2", "value3", "value4", "value5", "value6", "value7", "value8", "value9", "value10" ]; - static ref COUNTER: Counter = PROVIDER - .meter(<&str as Into>>::into("test")) - .u64_counter("hello") - .init(); + static ref COUNTER: Counter = PROVIDER.meter("test").u64_counter("hello").init(); } thread_local! { diff --git a/stress/src/metrics_histogram.rs b/stress/src/metrics_histogram.rs index 8b7690b5ae..321deb57ec 100644 --- a/stress/src/metrics_histogram.rs +++ b/stress/src/metrics_histogram.rs @@ -16,7 +16,7 @@ use rand::{ rngs::{self}, Rng, SeedableRng, }; -use std::{borrow::Cow, cell::RefCell}; +use std::cell::RefCell; mod throughput; @@ -28,10 +28,7 @@ lazy_static! { "value1", "value2", "value3", "value4", "value5", "value6", "value7", "value8", "value9", "value10" ]; - static ref HISTOGRAM: Histogram = PROVIDER - .meter(<&str as Into>>::into("test")) - .u64_histogram("hello") - .init(); + static ref HISTOGRAM: Histogram = PROVIDER.meter("test").u64_histogram("hello").init(); } thread_local! { diff --git a/stress/src/metrics_overflow.rs b/stress/src/metrics_overflow.rs index 0e27c96b24..dfb13170a8 100644 --- a/stress/src/metrics_overflow.rs +++ b/stress/src/metrics_overflow.rs @@ -16,7 +16,7 @@ use rand::{ rngs::{self}, Rng, SeedableRng, }; -use std::{borrow::Cow, cell::RefCell}; +use std::cell::RefCell; mod throughput; @@ -24,10 +24,7 @@ lazy_static! { static ref PROVIDER: SdkMeterProvider = SdkMeterProvider::builder() .with_reader(ManualReader::builder().build()) .build(); - static ref COUNTER: Counter = PROVIDER - .meter(<&str as Into>>::into("test")) - .u64_counter("hello") - .init(); + static ref COUNTER: Counter = PROVIDER.meter("test").u64_counter("hello").init(); } thread_local! { From 2dd47fd5782c5e9d21a0e559dcd4a6f7a552d83e Mon Sep 17 00:00:00 2001 From: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com> Date: Tue, 28 May 2024 21:46:38 -0700 Subject: [PATCH 29/37] Fix tests to make precommit.sh run on Windows (#1831) Co-authored-by: Zhongyang Wu Co-authored-by: Cijo Thomas --- opentelemetry-prometheus/tests/integration_test.rs | 9 +++++---- opentelemetry-proto/tests/grpc_build.rs | 11 +++++------ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/opentelemetry-prometheus/tests/integration_test.rs b/opentelemetry-prometheus/tests/integration_test.rs index 51aa496b62..149185e440 100644 --- a/opentelemetry-prometheus/tests/integration_test.rs +++ b/opentelemetry-prometheus/tests/integration_test.rs @@ -406,6 +406,7 @@ fn gather_and_compare(registry: prometheus::Registry, expected: String, name: &' let metric_families = registry.gather(); encoder.encode(&metric_families, &mut output).unwrap(); + let expected = get_platform_specific_string(expected); let output_string = get_platform_specific_string(String::from_utf8(output).unwrap()); assert_eq!(output_string, expected, "{name}"); @@ -413,11 +414,10 @@ fn gather_and_compare(registry: prometheus::Registry, expected: String, name: &' /// Returns a String which uses the platform specific new line feed character. fn get_platform_specific_string(input: String) -> String { - if cfg!(windows) { - input.replace('\n', "\r\n") - } else { - input + if cfg!(windows) && !input.ends_with("\r\n") && input.ends_with('\n') { + return input.replace('\n', "\r\n"); } + input } #[test] @@ -812,6 +812,7 @@ fn duplicate_metrics() { .expected_files .into_iter() .map(|f| fs::read_to_string(Path::new("./tests/data").join(f)).expect(f)) + .map(get_platform_specific_string) .collect(); gather_and_compare_multi(registry, possible_matches, tc.name); } diff --git a/opentelemetry-proto/tests/grpc_build.rs b/opentelemetry-proto/tests/grpc_build.rs index 7201a0f8f8..8477a4e626 100644 --- a/opentelemetry-proto/tests/grpc_build.rs +++ b/opentelemetry-proto/tests/grpc_build.rs @@ -18,7 +18,7 @@ const TONIC_INCLUDES: &[&str] = &["src/proto/opentelemetry-proto", "src/proto"]; #[test] fn build_tonic() { - let before_build = build_content_map(TONIC_OUT_DIR, false); + let before_build = build_content_map(TONIC_OUT_DIR, true); let out_dir = TempDir::new().expect("failed to create temp dir to store the generated files"); @@ -130,13 +130,12 @@ fn build_content_map(path: impl AsRef, normalize_line_feed: bool) -> HashM .collect() } -/// Returns a String with the platform specific new line feed character. +/// Returns a String which uses the platform specific new line feed character. fn get_platform_specific_string(input: String) -> String { - if cfg!(windows) { - input.replace('\n', "\r\n") - } else { - input + if cfg!(windows) && !input.ends_with("\r\n") && input.ends_with('\n') { + return input.replace('\n', "\r\n"); } + input } fn ensure_files_are_same( From d0c81054467c8a3b211f68ee5fd3de8c5fb712fa Mon Sep 17 00:00:00 2001 From: Zhongyang Wu Date: Wed, 29 May 2024 05:22:34 -0700 Subject: [PATCH 30/37] chore: bump the msrv of OTLP to 1.70 (#1840) --- opentelemetry-otlp/CHANGELOG.md | 2 ++ opentelemetry-otlp/Cargo.toml | 2 +- opentelemetry-otlp/README.md | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/opentelemetry-otlp/CHANGELOG.md b/opentelemetry-otlp/CHANGELOG.md index a2a598f4ac..e8f75bffec 100644 --- a/opentelemetry-otlp/CHANGELOG.md +++ b/opentelemetry-otlp/CHANGELOG.md @@ -14,6 +14,8 @@ now use `.with_resource(RESOURCE::default())` to configure Resource when using `OtlpLogPipeline`. - **Breaking** The methods `OtlpTracePipeline::install_simple()` and `OtlpTracePipeline::install_batch()` would now return `TracerProvider` instead of `Tracer`. These methods would also no longer set the global tracer provider. It would now be the responsibility of users to set it by calling `global::set_tracer_provider(tracer_provider.clone());`. Refer to the [basic-otlp](https://github.com/open-telemetry/opentelemetry-rust/blob/main/opentelemetry-otlp/examples/basic-otlp/src/main.rs) and [basic-otlp-http](https://github.com/open-telemetry/opentelemetry-rust/blob/main/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs) examples on how to initialize OTLP Trace Exporter. +- Bump MSRV to 1.70 [#1840](https://github.com/open-telemetry/opentelemetry-rust/pull/1840) + ## v0.16.0 diff --git a/opentelemetry-otlp/Cargo.toml b/opentelemetry-otlp/Cargo.toml index b18609c30c..f93a8d1c3a 100644 --- a/opentelemetry-otlp/Cargo.toml +++ b/opentelemetry-otlp/Cargo.toml @@ -13,7 +13,7 @@ categories = [ keywords = ["opentelemetry", "otlp", "logging", "tracing", "metrics"] license = "Apache-2.0" edition = "2021" -rust-version = "1.65" +rust-version = "1.70" autotests = false [[test]] diff --git a/opentelemetry-otlp/README.md b/opentelemetry-otlp/README.md index 7ab3632a33..c9fb019de2 100644 --- a/opentelemetry-otlp/README.md +++ b/opentelemetry-otlp/README.md @@ -30,7 +30,7 @@ can easily instrument your applications or systems, no matter their language, infrastructure, or runtime environment. Crucially, the storage and visualization of telemetry is intentionally left to other tools. -*Compiler support: [requires `rustc` 1.65+][msrv]* +*Compiler support: [requires `rustc` 1.70+][msrv]* [Prometheus]: https://prometheus.io [Jaeger]: https://www.jaegertracing.io @@ -43,7 +43,7 @@ See [docs](https://docs.rs/opentelemetry-otlp). ## Supported Rust Versions OpenTelemetry is built against the latest stable release. The minimum supported -version is 1.65. The current OpenTelemetry version is not guaranteed to build +version is 1.70. The current OpenTelemetry version is not guaranteed to build on Rust versions earlier than the minimum supported version. The current stable Rust compiler and the three most recent minor versions From 78943fd674cfde59945d6ebd6d9163b987318c9a Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Wed, 29 May 2024 12:41:38 -0700 Subject: [PATCH 31/37] Add more validations to Metrics tests (#1841) --- opentelemetry-sdk/src/metrics/mod.rs | 192 +++++++++++++++++++++++++-- 1 file changed, 184 insertions(+), 8 deletions(-) diff --git a/opentelemetry-sdk/src/metrics/mod.rs b/opentelemetry-sdk/src/metrics/mod.rs index 03ee78b05f..94ab2345f6 100644 --- a/opentelemetry-sdk/src/metrics/mod.rs +++ b/opentelemetry-sdk/src/metrics/mod.rs @@ -228,6 +228,13 @@ mod tests { updown_counter_aggregation_helper(Temporality::Delta); } + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn gauge_aggregation() { + // Run this test with stdout enabled to see output. + // cargo test gauge_aggregation --features=metrics,testing -- --nocapture + gauge_aggregation_helper(); + } + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn observable_counter_aggregation_cumulative_non_zero_increment() { // Run this test with stdout enabled to see output. @@ -1046,18 +1053,19 @@ mod tests { test_context.flush_metrics(); // Assert - let histogram = test_context.get_aggregation::>("my_histogram", None); + let histogram_data = + test_context.get_aggregation::>("my_histogram", None); // Expecting 2 time-series. - assert_eq!(histogram.data_points.len(), 2); + assert_eq!(histogram_data.data_points.len(), 2); if let Temporality::Cumulative = temporality { assert_eq!( - histogram.temporality, + histogram_data.temporality, Temporality::Cumulative, "Should produce cumulative" ); } else { assert_eq!( - histogram.temporality, + histogram_data.temporality, Temporality::Delta, "Should produce delta" ); @@ -1065,7 +1073,7 @@ mod tests { // find and validate key1=value2 datapoint let data_point1 = - find_histogram_datapoint_with_key_value(&histogram.data_points, "key1", "value1") + find_histogram_datapoint_with_key_value(&histogram_data.data_points, "key1", "value1") .expect("datapoint with key1=value1 expected"); assert_eq!(data_point1.count, values_kv1.len() as u64); assert_eq!(data_point1.sum, values_kv1.iter().sum::()); @@ -1073,12 +1081,116 @@ mod tests { assert_eq!(data_point1.max.unwrap(), *values_kv1.iter().max().unwrap()); let data_point2 = - find_histogram_datapoint_with_key_value(&histogram.data_points, "key1", "value2") + find_histogram_datapoint_with_key_value(&histogram_data.data_points, "key1", "value2") .expect("datapoint with key1=value2 expected"); assert_eq!(data_point2.count, values_kv2.len() as u64); assert_eq!(data_point2.sum, values_kv2.iter().sum::()); assert_eq!(data_point2.min.unwrap(), *values_kv2.iter().min().unwrap()); assert_eq!(data_point2.max.unwrap(), *values_kv2.iter().max().unwrap()); + + // Reset and report more measurements + test_context.reset_metrics(); + for value in values_kv1.iter() { + histogram.record(*value, &[KeyValue::new("key1", "value1")]); + } + + for value in values_kv2.iter() { + histogram.record(*value, &[KeyValue::new("key1", "value2")]); + } + + test_context.flush_metrics(); + + let histogram_data = + test_context.get_aggregation::>("my_histogram", None); + assert_eq!(histogram_data.data_points.len(), 2); + let data_point1 = + find_histogram_datapoint_with_key_value(&histogram_data.data_points, "key1", "value1") + .expect("datapoint with key1=value1 expected"); + if temporality == Temporality::Cumulative { + assert_eq!(data_point1.count, 2 * (values_kv1.len() as u64)); + assert_eq!(data_point1.sum, 2 * (values_kv1.iter().sum::())); + assert_eq!(data_point1.min.unwrap(), *values_kv1.iter().min().unwrap()); + assert_eq!(data_point1.max.unwrap(), *values_kv1.iter().max().unwrap()); + } else { + assert_eq!(data_point1.count, values_kv1.len() as u64); + assert_eq!(data_point1.sum, values_kv1.iter().sum::()); + assert_eq!(data_point1.min.unwrap(), *values_kv1.iter().min().unwrap()); + assert_eq!(data_point1.max.unwrap(), *values_kv1.iter().max().unwrap()); + } + + let data_point1 = + find_histogram_datapoint_with_key_value(&histogram_data.data_points, "key1", "value2") + .expect("datapoint with key1=value1 expected"); + if temporality == Temporality::Cumulative { + assert_eq!(data_point1.count, 2 * (values_kv2.len() as u64)); + assert_eq!(data_point1.sum, 2 * (values_kv2.iter().sum::())); + assert_eq!(data_point1.min.unwrap(), *values_kv2.iter().min().unwrap()); + assert_eq!(data_point1.max.unwrap(), *values_kv2.iter().max().unwrap()); + } else { + assert_eq!(data_point1.count, values_kv2.len() as u64); + assert_eq!(data_point1.sum, values_kv2.iter().sum::()); + assert_eq!(data_point1.min.unwrap(), *values_kv2.iter().min().unwrap()); + assert_eq!(data_point1.max.unwrap(), *values_kv2.iter().max().unwrap()); + } + } + + fn gauge_aggregation_helper() { + // Arrange + let mut test_context = TestContext::new(Temporality::Delta); + let gauge = test_context.meter().i64_gauge("my_gauge").init(); + + // Act + gauge.record(1, &[KeyValue::new("key1", "value1")]); + gauge.record(2, &[KeyValue::new("key1", "value1")]); + gauge.record(1, &[KeyValue::new("key1", "value1")]); + gauge.record(3, &[KeyValue::new("key1", "value1")]); + gauge.record(4, &[KeyValue::new("key1", "value1")]); + + gauge.record(11, &[KeyValue::new("key1", "value2")]); + gauge.record(13, &[KeyValue::new("key1", "value2")]); + gauge.record(6, &[KeyValue::new("key1", "value2")]); + + test_context.flush_metrics(); + + // Assert + let gauge_data_point = test_context.get_aggregation::>("my_gauge", None); + // Expecting 2 time-series. + assert_eq!(gauge_data_point.data_points.len(), 2); + + // find and validate key1=value2 datapoint + let data_point1 = + find_datapoint_with_key_value(&gauge_data_point.data_points, "key1", "value1") + .expect("datapoint with key1=value1 expected"); + assert_eq!(data_point1.value, 4); + + let data_point1 = + find_datapoint_with_key_value(&gauge_data_point.data_points, "key1", "value2") + .expect("datapoint with key1=value2 expected"); + assert_eq!(data_point1.value, 6); + + // Reset and report more measurements + test_context.reset_metrics(); + gauge.record(1, &[KeyValue::new("key1", "value1")]); + gauge.record(2, &[KeyValue::new("key1", "value1")]); + gauge.record(11, &[KeyValue::new("key1", "value1")]); + gauge.record(3, &[KeyValue::new("key1", "value1")]); + gauge.record(41, &[KeyValue::new("key1", "value1")]); + + gauge.record(34, &[KeyValue::new("key1", "value2")]); + gauge.record(12, &[KeyValue::new("key1", "value2")]); + gauge.record(54, &[KeyValue::new("key1", "value2")]); + + test_context.flush_metrics(); + + let sum = test_context.get_aggregation::>("my_gauge", None); + assert_eq!(sum.data_points.len(), 2); + let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value1") + .expect("datapoint with key1=value1 expected"); + assert_eq!(data_point1.value, 41); + + let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value2") + .expect("datapoint with key1=value2 expected"); + assert_eq!(data_point1.value, 54); } fn counter_aggregation_helper(temporality: Temporality) { @@ -1122,12 +1234,44 @@ mod tests { let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value2") .expect("datapoint with key1=value2 expected"); assert_eq!(data_point1.value, 3); + + // Reset and report more measurements + test_context.reset_metrics(); + counter.add(1, &[KeyValue::new("key1", "value1")]); + counter.add(1, &[KeyValue::new("key1", "value1")]); + counter.add(1, &[KeyValue::new("key1", "value1")]); + counter.add(1, &[KeyValue::new("key1", "value1")]); + counter.add(1, &[KeyValue::new("key1", "value1")]); + + counter.add(1, &[KeyValue::new("key1", "value2")]); + counter.add(1, &[KeyValue::new("key1", "value2")]); + counter.add(1, &[KeyValue::new("key1", "value2")]); + + test_context.flush_metrics(); + + let sum = test_context.get_aggregation::>("my_counter", None); + assert_eq!(sum.data_points.len(), 2); + let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value1") + .expect("datapoint with key1=value1 expected"); + if temporality == Temporality::Cumulative { + assert_eq!(data_point1.value, 10); + } else { + assert_eq!(data_point1.value, 5); + } + + let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value2") + .expect("datapoint with key1=value2 expected"); + if temporality == Temporality::Cumulative { + assert_eq!(data_point1.value, 6); + } else { + assert_eq!(data_point1.value, 3); + } } fn updown_counter_aggregation_helper(temporality: Temporality) { // Arrange let mut test_context = TestContext::new(temporality); - let counter = test_context.i64_up_down_counter("test", "my_counter", None); + let counter = test_context.i64_up_down_counter("test", "my_updown_counter", None); // Act counter.add(1, &[KeyValue::new("key1", "value1")]); @@ -1143,7 +1287,7 @@ mod tests { test_context.flush_metrics(); // Assert - let sum = test_context.get_aggregation::>("my_counter", None); + let sum = test_context.get_aggregation::>("my_updown_counter", None); // Expecting 2 time-series. assert_eq!(sum.data_points.len(), 2); assert!( @@ -1168,6 +1312,38 @@ mod tests { let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value2") .expect("datapoint with key1=value2 expected"); assert_eq!(data_point1.value, 3); + + // Reset and report more measurements + test_context.reset_metrics(); + counter.add(1, &[KeyValue::new("key1", "value1")]); + counter.add(1, &[KeyValue::new("key1", "value1")]); + counter.add(1, &[KeyValue::new("key1", "value1")]); + counter.add(1, &[KeyValue::new("key1", "value1")]); + counter.add(1, &[KeyValue::new("key1", "value1")]); + + counter.add(1, &[KeyValue::new("key1", "value2")]); + counter.add(1, &[KeyValue::new("key1", "value2")]); + counter.add(1, &[KeyValue::new("key1", "value2")]); + + test_context.flush_metrics(); + + let sum = test_context.get_aggregation::>("my_updown_counter", None); + assert_eq!(sum.data_points.len(), 2); + let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value1") + .expect("datapoint with key1=value1 expected"); + if temporality == Temporality::Cumulative { + assert_eq!(data_point1.value, 10); + } else { + assert_eq!(data_point1.value, 5); + } + + let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value2") + .expect("datapoint with key1=value2 expected"); + if temporality == Temporality::Cumulative { + assert_eq!(data_point1.value, 6); + } else { + assert_eq!(data_point1.value, 3); + } } fn find_datapoint_with_key_value<'a, T>( From 3825edcc8597d1ff7e95a07575da2d8c555c3dd7 Mon Sep 17 00:00:00 2001 From: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com> Date: Wed, 29 May 2024 16:00:08 -0700 Subject: [PATCH 32/37] Update metrics tests (#1844) --- opentelemetry-sdk/src/metrics/mod.rs | 47 +++++++++++++++------------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/opentelemetry-sdk/src/metrics/mod.rs b/opentelemetry-sdk/src/metrics/mod.rs index 94ab2345f6..5e75b0c7c4 100644 --- a/opentelemetry-sdk/src/metrics/mod.rs +++ b/opentelemetry-sdk/src/metrics/mod.rs @@ -217,14 +217,14 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn updown_counter_aggregation_cumulative() { // Run this test with stdout enabled to see output. - // cargo test counter_aggregation_cumulative --features=metrics,testing -- --nocapture + // cargo test updown_counter_aggregation_cumulative --features=metrics,testing -- --nocapture updown_counter_aggregation_helper(Temporality::Cumulative); } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn updown_counter_aggregation_delta() { // Run this test with stdout enabled to see output. - // cargo test counter_aggregation_delta --features=metrics,testing -- --nocapture + // cargo test updown_counter_aggregation_delta --features=metrics,testing -- --nocapture updown_counter_aggregation_helper(Temporality::Delta); } @@ -232,7 +232,10 @@ mod tests { async fn gauge_aggregation() { // Run this test with stdout enabled to see output. // cargo test gauge_aggregation --features=metrics,testing -- --nocapture - gauge_aggregation_helper(); + + // Gauge should use last value aggregation regardless of the aggregation temporality used. + gauge_aggregation_helper(Temporality::Delta); + gauge_aggregation_helper(Temporality::Cumulative); } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] @@ -1134,9 +1137,9 @@ mod tests { } } - fn gauge_aggregation_helper() { + fn gauge_aggregation_helper(temporality: Temporality) { // Arrange - let mut test_context = TestContext::new(Temporality::Delta); + let mut test_context = TestContext::new(temporality); let gauge = test_context.meter().i64_gauge("my_gauge").init(); // Act @@ -1274,15 +1277,15 @@ mod tests { let counter = test_context.i64_up_down_counter("test", "my_updown_counter", None); // Act - counter.add(1, &[KeyValue::new("key1", "value1")]); - counter.add(1, &[KeyValue::new("key1", "value1")]); - counter.add(1, &[KeyValue::new("key1", "value1")]); - counter.add(1, &[KeyValue::new("key1", "value1")]); + counter.add(10, &[KeyValue::new("key1", "value1")]); + counter.add(-1, &[KeyValue::new("key1", "value1")]); + counter.add(-5, &[KeyValue::new("key1", "value1")]); + counter.add(0, &[KeyValue::new("key1", "value1")]); counter.add(1, &[KeyValue::new("key1", "value1")]); - counter.add(1, &[KeyValue::new("key1", "value2")]); - counter.add(1, &[KeyValue::new("key1", "value2")]); - counter.add(1, &[KeyValue::new("key1", "value2")]); + counter.add(10, &[KeyValue::new("key1", "value2")]); + counter.add(0, &[KeyValue::new("key1", "value2")]); + counter.add(-3, &[KeyValue::new("key1", "value2")]); test_context.flush_metrics(); @@ -1311,19 +1314,19 @@ mod tests { let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value2") .expect("datapoint with key1=value2 expected"); - assert_eq!(data_point1.value, 3); + assert_eq!(data_point1.value, 7); // Reset and report more measurements test_context.reset_metrics(); - counter.add(1, &[KeyValue::new("key1", "value1")]); - counter.add(1, &[KeyValue::new("key1", "value1")]); - counter.add(1, &[KeyValue::new("key1", "value1")]); - counter.add(1, &[KeyValue::new("key1", "value1")]); + counter.add(10, &[KeyValue::new("key1", "value1")]); + counter.add(-1, &[KeyValue::new("key1", "value1")]); + counter.add(-5, &[KeyValue::new("key1", "value1")]); + counter.add(0, &[KeyValue::new("key1", "value1")]); counter.add(1, &[KeyValue::new("key1", "value1")]); - counter.add(1, &[KeyValue::new("key1", "value2")]); - counter.add(1, &[KeyValue::new("key1", "value2")]); - counter.add(1, &[KeyValue::new("key1", "value2")]); + counter.add(10, &[KeyValue::new("key1", "value2")]); + counter.add(0, &[KeyValue::new("key1", "value2")]); + counter.add(-3, &[KeyValue::new("key1", "value2")]); test_context.flush_metrics(); @@ -1340,9 +1343,9 @@ mod tests { let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value2") .expect("datapoint with key1=value2 expected"); if temporality == Temporality::Cumulative { - assert_eq!(data_point1.value, 6); + assert_eq!(data_point1.value, 14); } else { - assert_eq!(data_point1.value, 3); + assert_eq!(data_point1.value, 7); } } From e2e6a6e2a86f6258511e0a9fdd638b828fe670a4 Mon Sep 17 00:00:00 2001 From: Utkarsh Umesan Pillai <66651184+utpilla@users.noreply.github.com> Date: Wed, 29 May 2024 16:17:18 -0700 Subject: [PATCH 33/37] Add multi-threaded test for Counter (#1845) Co-authored-by: Cijo Thomas --- opentelemetry-sdk/src/metrics/mod.rs | 56 ++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/opentelemetry-sdk/src/metrics/mod.rs b/opentelemetry-sdk/src/metrics/mod.rs index 5e75b0c7c4..e342311c30 100644 --- a/opentelemetry-sdk/src/metrics/mod.rs +++ b/opentelemetry-sdk/src/metrics/mod.rs @@ -150,6 +150,7 @@ mod tests { use rand::{rngs, Rng, SeedableRng}; use std::borrow::Cow; use std::sync::{Arc, Mutex}; + use std::thread; // Run all tests in this mod // cargo test metrics::tests --features=metrics,testing @@ -1032,6 +1033,61 @@ mod tests { assert!(resource_metrics.is_empty(), "No metrics should be exported as no new measurements were recorded since last collect."); } + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn counter_multithreaded() { + // Run this test with stdout enabled to see output. + // cargo test counter_multithreaded --features=metrics,testing -- --nocapture + + counter_multithreaded_aggregation_helper(Temporality::Delta); + counter_multithreaded_aggregation_helper(Temporality::Cumulative); + } + + fn counter_multithreaded_aggregation_helper(temporality: Temporality) { + // Arrange + let mut test_context = TestContext::new(temporality); + let counter = Arc::new(test_context.u64_counter("test", "my_counter", None)); + + let mut update_threads = vec![]; + for _ in 0..10 { + let counter = Arc::clone(&counter); + + update_threads.push(thread::spawn(move || { + counter.add(1, &[KeyValue::new("key1", "value1")]); + counter.add(1, &[KeyValue::new("key1", "value1")]); + counter.add(1, &[KeyValue::new("key1", "value1")]); + counter.add(1, &[KeyValue::new("key1", "value1")]); + counter.add(1, &[KeyValue::new("key1", "value1")]); + })); + } + + for thread in update_threads { + thread.join().unwrap(); + } + + test_context.flush_metrics(); + + // Assert + let sum = test_context.get_aggregation::>("my_counter", None); + // Expecting 2 time-series. + assert_eq!(sum.data_points.len(), 1); + assert!(sum.is_monotonic, "Counter should produce monotonic."); + assert_eq!(sum.temporality, temporality); + if let Temporality::Cumulative = temporality { + assert_eq!( + sum.temporality, + Temporality::Cumulative, + "Should produce cumulative" + ); + } else { + assert_eq!(sum.temporality, Temporality::Delta, "Should produce delta"); + } + + // find and validate key1=value2 datapoint + let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value1") + .expect("datapoint with key1=value1 expected"); + assert_eq!(data_point1.value, 50); // Each of the 10 update threads record measurements summing up to 5. + } + fn histogram_aggregation_helper(temporality: Temporality) { // Arrange let mut test_context = TestContext::new(temporality); From 7cad751cab59f2c0950b60a48791f1efba0fa804 Mon Sep 17 00:00:00 2001 From: Expyron <5100376+Expyron@users.noreply.github.com> Date: Thu, 30 May 2024 16:29:34 +0200 Subject: [PATCH 34/37] Remove unused dependency (#1847) --- opentelemetry-sdk/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/opentelemetry-sdk/Cargo.toml b/opentelemetry-sdk/Cargo.toml index ea402f38d8..16b95f704b 100644 --- a/opentelemetry-sdk/Cargo.toml +++ b/opentelemetry-sdk/Cargo.toml @@ -28,7 +28,6 @@ url = { workspace = true, optional = true } tokio = { workspace = true, features = ["rt", "time"], optional = true } tokio-stream = { workspace = true, optional = true } http = { workspace = true, optional = true } -lazy_static = "1.4.0" [package.metadata.docs.rs] all-features = true From 1beffd2149411dff03ff0d07e7865b8f00f02d23 Mon Sep 17 00:00:00 2001 From: EAimTY Date: Fri, 31 May 2024 01:24:09 +0900 Subject: [PATCH 35/37] Fix typos in webpki feature name (#1842) Signed-off-by: EAimTY Co-authored-by: Zhongyang Wu Co-authored-by: Cijo Thomas --- opentelemetry-http/CHANGELOG.md | 2 ++ opentelemetry-http/Cargo.toml | 2 +- opentelemetry-otlp/CHANGELOG.md | 2 +- opentelemetry-otlp/Cargo.toml | 4 ++-- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/opentelemetry-http/CHANGELOG.md b/opentelemetry-http/CHANGELOG.md index 0cda2a1f8c..c19fd56dea 100644 --- a/opentelemetry-http/CHANGELOG.md +++ b/opentelemetry-http/CHANGELOG.md @@ -2,6 +2,8 @@ ## vNext +- **Breaking** Correct the misspelling of "webkpi" to "webpki" in features [#1842](https://github.com/open-telemetry/opentelemetry-rust/pull/1842) + ## v0.12.0 - Add `reqwest-rustls-webkpi-roots` feature flag to configure [`reqwest`](https://docs.rs/reqwest/0.11.27/reqwest/index.html#optional-features) to use embedded `webkpi-roots`. diff --git a/opentelemetry-http/Cargo.toml b/opentelemetry-http/Cargo.toml index 29ba96d795..a3fda11719 100644 --- a/opentelemetry-http/Cargo.toml +++ b/opentelemetry-http/Cargo.toml @@ -11,7 +11,7 @@ rust-version = "1.65" [features] reqwest-rustls = ["reqwest", "reqwest/rustls-tls-native-roots"] -reqwest-rustls-webkpi-roots = ["reqwest", "reqwest/rustls-tls-webpki-roots"] +reqwest-rustls-webpki-roots = ["reqwest", "reqwest/rustls-tls-webpki-roots"] [dependencies] async-trait = { workspace = true } diff --git a/opentelemetry-otlp/CHANGELOG.md b/opentelemetry-otlp/CHANGELOG.md index e8f75bffec..5ec7699d2a 100644 --- a/opentelemetry-otlp/CHANGELOG.md +++ b/opentelemetry-otlp/CHANGELOG.md @@ -14,9 +14,9 @@ now use `.with_resource(RESOURCE::default())` to configure Resource when using `OtlpLogPipeline`. - **Breaking** The methods `OtlpTracePipeline::install_simple()` and `OtlpTracePipeline::install_batch()` would now return `TracerProvider` instead of `Tracer`. These methods would also no longer set the global tracer provider. It would now be the responsibility of users to set it by calling `global::set_tracer_provider(tracer_provider.clone());`. Refer to the [basic-otlp](https://github.com/open-telemetry/opentelemetry-rust/blob/main/opentelemetry-otlp/examples/basic-otlp/src/main.rs) and [basic-otlp-http](https://github.com/open-telemetry/opentelemetry-rust/blob/main/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs) examples on how to initialize OTLP Trace Exporter. +- **Breaking** Correct the misspelling of "webkpi" to "webpki" in features [#1842](https://github.com/open-telemetry/opentelemetry-rust/pull/1842) - Bump MSRV to 1.70 [#1840](https://github.com/open-telemetry/opentelemetry-rust/pull/1840) - ## v0.16.0 ### Fixed diff --git a/opentelemetry-otlp/Cargo.toml b/opentelemetry-otlp/Cargo.toml index f93a8d1c3a..f031791aa6 100644 --- a/opentelemetry-otlp/Cargo.toml +++ b/opentelemetry-otlp/Cargo.toml @@ -67,7 +67,7 @@ grpc-tonic = ["tonic", "prost", "http", "tokio", "opentelemetry-proto/gen-tonic" gzip-tonic = ["tonic/gzip"] tls = ["tonic/tls"] tls-roots = ["tls", "tonic/tls-roots"] -tls-webkpi-roots = ["tls", "tonic/tls-webpki-roots"] +tls-webpki-roots = ["tls", "tonic/tls-webpki-roots"] # http binary http-proto = ["prost", "opentelemetry-http", "opentelemetry-proto/gen-tonic-messages", "http", "trace", "metrics"] @@ -76,7 +76,7 @@ http-json = ["serde_json", "prost", "opentelemetry-http", "opentelemetry-proto/g reqwest-blocking-client = ["reqwest/blocking", "opentelemetry-http/reqwest"] reqwest-client = ["reqwest", "opentelemetry-http/reqwest"] reqwest-rustls = ["reqwest", "opentelemetry-http/reqwest-rustls"] -reqwest-rustls-webkpi-roots = ["reqwest", "opentelemetry-http/reqwest-rustls-webkpi-roots"] +reqwest-rustls-webpki-roots = ["reqwest", "opentelemetry-http/reqwest-rustls-webpki-roots"] # test integration-testing = ["tonic", "prost", "tokio/full", "trace"] From 84c23a301777fbc94510a78d3489f57ec7e2dd44 Mon Sep 17 00:00:00 2001 From: Lalit Kumar Bhasin Date: Thu, 30 May 2024 09:40:14 -0700 Subject: [PATCH 36/37] [Trace SDK] Send resource once to processor and exporter, and not with every span (#1830) Co-authored-by: Cijo Thomas Co-authored-by: Zhongyang Wu --- opentelemetry-otlp/src/exporter/http/mod.rs | 14 +++++++--- opentelemetry-otlp/src/exporter/http/trace.rs | 6 ++++- opentelemetry-otlp/src/exporter/tonic/logs.rs | 2 +- .../src/exporter/tonic/trace.rs | 21 ++++++++++++--- opentelemetry-otlp/src/span.rs | 4 +++ opentelemetry-proto/src/transform/trace.rs | 14 ++++------ opentelemetry-sdk/CHANGELOG.md | 9 +++++++ .../benches/batch_span_processor.rs | 3 --- opentelemetry-sdk/src/export/trace.rs | 5 ++-- .../src/testing/trace/in_memory_exporter.rs | 10 +++++++ .../src/testing/trace/span_exporters.rs | 4 +-- opentelemetry-sdk/src/trace/provider.rs | 13 +++++++--- opentelemetry-sdk/src/trace/span.rs | 9 +------ opentelemetry-sdk/src/trace/span_processor.rs | 26 ++++++++++++++++--- opentelemetry-stdout/src/trace/exporter.rs | 19 ++++++++++---- opentelemetry-stdout/src/trace/transform.rs | 13 ++++++---- .../src/exporter/model/span.rs | 4 +-- 17 files changed, 122 insertions(+), 54 deletions(-) diff --git a/opentelemetry-otlp/src/exporter/http/mod.rs b/opentelemetry-otlp/src/exporter/http/mod.rs index a5ced848d4..4a45379121 100644 --- a/opentelemetry-otlp/src/exporter/http/mod.rs +++ b/opentelemetry-otlp/src/exporter/http/mod.rs @@ -307,12 +307,18 @@ impl OtlpHttpClient { fn build_trace_export_body( &self, spans: Vec, + resource: &opentelemetry_proto::transform::common::tonic::ResourceAttributesWithSchema, ) -> opentelemetry::trace::TraceResult<(Vec, &'static str)> { - use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest; - - let req = ExportTraceServiceRequest { - resource_spans: spans.into_iter().map(Into::into).collect(), + use opentelemetry_proto::tonic::{ + collector::trace::v1::ExportTraceServiceRequest, trace::v1::ResourceSpans, }; + + let resource_spans = spans + .into_iter() + .map(|span| ResourceSpans::new(span, resource)) + .collect::>(); + + let req = ExportTraceServiceRequest { resource_spans }; match self.protocol { #[cfg(feature = "http-json")] Protocol::HttpJson => match serde_json::to_string_pretty(&req) { diff --git a/opentelemetry-otlp/src/exporter/http/trace.rs b/opentelemetry-otlp/src/exporter/http/trace.rs index 8e272c93cf..8d6c3116cd 100644 --- a/opentelemetry-otlp/src/exporter/http/trace.rs +++ b/opentelemetry-otlp/src/exporter/http/trace.rs @@ -21,7 +21,7 @@ impl SpanExporter for OtlpHttpClient { Err(err) => return Box::pin(std::future::ready(Err(err))), }; - let (body, content_type) = match self.build_trace_export_body(batch) { + let (body, content_type) = match self.build_trace_export_body(batch, &self.resource) { Ok(body) => body, Err(e) => return Box::pin(std::future::ready(Err(e))), }; @@ -66,4 +66,8 @@ impl SpanExporter for OtlpHttpClient { fn shutdown(&mut self) { let _ = self.client.lock().map(|mut c| c.take()); } + + fn set_resource(&mut self, resource: &opentelemetry_sdk::Resource) { + self.resource = resource.into(); + } } diff --git a/opentelemetry-otlp/src/exporter/tonic/logs.rs b/opentelemetry-otlp/src/exporter/tonic/logs.rs index 8a6637a5b0..6cefd611ff 100644 --- a/opentelemetry-otlp/src/exporter/tonic/logs.rs +++ b/opentelemetry-otlp/src/exporter/tonic/logs.rs @@ -12,7 +12,7 @@ use super::BoxInterceptor; pub(crate) struct TonicLogsClient { inner: Option, #[allow(dead_code)] - // would be removed once we support set_resource for metrics and traces. + // would be removed once we support set_resource for metrics. resource: opentelemetry_proto::transform::common::tonic::ResourceAttributesWithSchema, } diff --git a/opentelemetry-otlp/src/exporter/tonic/trace.rs b/opentelemetry-otlp/src/exporter/tonic/trace.rs index b328dfba5f..a0dbe0e76b 100644 --- a/opentelemetry-otlp/src/exporter/tonic/trace.rs +++ b/opentelemetry-otlp/src/exporter/tonic/trace.rs @@ -5,6 +5,7 @@ use opentelemetry::trace::TraceError; use opentelemetry_proto::tonic::collector::trace::v1::{ trace_service_client::TraceServiceClient, ExportTraceServiceRequest, }; +use opentelemetry_proto::tonic::trace::v1::ResourceSpans; use opentelemetry_sdk::export::trace::{ExportResult, SpanData, SpanExporter}; use tonic::{codegen::CompressionEncoding, service::Interceptor, transport::Channel, Request}; @@ -12,6 +13,9 @@ use super::BoxInterceptor; pub(crate) struct TonicTracesClient { inner: Option, + #[allow(dead_code)] + // would be removed once we support set_resource for metrics. + resource: opentelemetry_proto::transform::common::tonic::ResourceAttributesWithSchema, } struct ClientInner { @@ -43,6 +47,7 @@ impl TonicTracesClient { client, interceptor, }), + resource: Default::default(), } } } @@ -66,14 +71,20 @@ impl SpanExporter for TonicTracesClient { } }; + // TODO: Avoid cloning here. + let resource_spans = { + batch + .into_iter() + .map(|log_data| ResourceSpans::new(log_data, &self.resource)) + .collect() + }; + Box::pin(async move { client .export(Request::from_parts( metadata, extensions, - ExportTraceServiceRequest { - resource_spans: batch.into_iter().map(Into::into).collect(), - }, + ExportTraceServiceRequest { resource_spans }, )) .await .map_err(crate::Error::from)?; @@ -85,4 +96,8 @@ impl SpanExporter for TonicTracesClient { fn shutdown(&mut self) { let _ = self.inner.take(); } + + fn set_resource(&mut self, resource: &opentelemetry_sdk::Resource) { + self.resource = resource.into(); + } } diff --git a/opentelemetry-otlp/src/span.rs b/opentelemetry-otlp/src/span.rs index ba70f0825e..6e61cfd1a2 100644 --- a/opentelemetry-otlp/src/span.rs +++ b/opentelemetry-otlp/src/span.rs @@ -213,4 +213,8 @@ impl opentelemetry_sdk::export::trace::SpanExporter for SpanExporter { fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult> { self.0.export(batch) } + + fn set_resource(&mut self, resource: &opentelemetry_sdk::Resource) { + self.0.set_resource(resource); + } } diff --git a/opentelemetry-proto/src/transform/trace.rs b/opentelemetry-proto/src/transform/trace.rs index 77f4f18de2..3f0003d44e 100644 --- a/opentelemetry-proto/src/transform/trace.rs +++ b/opentelemetry-proto/src/transform/trace.rs @@ -4,7 +4,7 @@ pub mod tonic { use crate::proto::tonic::trace::v1::{span, status, ResourceSpans, ScopeSpans, Span, Status}; use crate::transform::common::{ to_nanos, - tonic::{resource_attributes, Attributes}, + tonic::{Attributes, ResourceAttributesWithSchema}, }; use opentelemetry::trace; use opentelemetry::trace::{Link, SpanId, SpanKind}; @@ -45,19 +45,15 @@ pub mod tonic { } } - impl From for ResourceSpans { - fn from(source_span: SpanData) -> Self { + impl ResourceSpans { + pub fn new(source_span: SpanData, resource: &ResourceAttributesWithSchema) -> Self { let span_kind: span::SpanKind = source_span.span_kind.into(); ResourceSpans { resource: Some(Resource { - attributes: resource_attributes(&source_span.resource).0, + attributes: resource.attributes.0.clone(), dropped_attributes_count: 0, }), - schema_url: source_span - .resource - .schema_url() - .map(|url| url.to_string()) - .unwrap_or_default(), + schema_url: resource.schema_url.clone().unwrap_or_default(), scope_spans: vec![ScopeSpans { schema_url: source_span .instrumentation_lib diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index ff0fa8cdb8..ccd7fd1e36 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -28,6 +28,15 @@ asynchronously, it should clone the log data to ensure it can be safely processed without lifetime issues. +- **Breaking** [#1830](https://github.com/open-telemetry/opentelemetry-rust/pull/1830/files) [Traces SDK] Improves + performance by sending Resource information to processors (and exporters) once, instead of sending with every log. If you are an author + of Processor, Exporter, the following are *BREAKING* changes. + - Implement `set_resource` method in your custom SpanProcessor, which invokes exporter's `set_resource`. + - Implement `set_resource` method in your custom SpanExporter. This method should save the resource object + in original or serialized format, to be merged with every span event during export. + - `SpanData` doesn't have the resource attributes. The `SpanExporter::export()` method needs to merge it + with the earlier preserved resource before export. + - **Breaking** [1836](https://github.com/open-telemetry/opentelemetry-rust/pull/1836) `SpanProcessor::shutdown` now takes an immutable reference to self. Any reference can call shutdown on the processor. After the first call to `shutdown` the processor will not process any new spans. ## v0.23.0 diff --git a/opentelemetry-sdk/benches/batch_span_processor.rs b/opentelemetry-sdk/benches/batch_span_processor.rs index 7b6c096f4e..abc7d0df02 100644 --- a/opentelemetry-sdk/benches/batch_span_processor.rs +++ b/opentelemetry-sdk/benches/batch_span_processor.rs @@ -8,8 +8,6 @@ use opentelemetry_sdk::testing::trace::NoopSpanExporter; use opentelemetry_sdk::trace::{ BatchConfigBuilder, BatchSpanProcessor, SpanEvents, SpanLinks, SpanProcessor, }; -use opentelemetry_sdk::Resource; -use std::borrow::Cow; use std::sync::Arc; use std::time::SystemTime; use tokio::runtime::Runtime; @@ -34,7 +32,6 @@ fn get_span_data() -> Vec { events: SpanEvents::default(), links: SpanLinks::default(), status: Status::Unset, - resource: Cow::Owned(Resource::empty()), instrumentation_lib: Default::default(), }) .collect::>() diff --git a/opentelemetry-sdk/src/export/trace.rs b/opentelemetry-sdk/src/export/trace.rs index b3d99c9a13..4b43e00c36 100644 --- a/opentelemetry-sdk/src/export/trace.rs +++ b/opentelemetry-sdk/src/export/trace.rs @@ -63,6 +63,9 @@ pub trait SpanExporter: Send + Sync + Debug { fn force_flush(&mut self) -> BoxFuture<'static, ExportResult> { Box::pin(async { Ok(()) }) } + + /// Set the resource for the exporter. + fn set_resource(&mut self, _resource: &Resource) {} } /// `SpanData` contains all the information collected by a `Span` and can be used @@ -92,8 +95,6 @@ pub struct SpanData { pub links: crate::trace::SpanLinks, /// Span status pub status: Status, - /// Resource contains attributes representing an entity that produced this span. - pub resource: Cow<'static, Resource>, /// Instrumentation library that produced this span pub instrumentation_lib: crate::InstrumentationLibrary, } diff --git a/opentelemetry-sdk/src/testing/trace/in_memory_exporter.rs b/opentelemetry-sdk/src/testing/trace/in_memory_exporter.rs index d10a4c1922..5853558436 100644 --- a/opentelemetry-sdk/src/testing/trace/in_memory_exporter.rs +++ b/opentelemetry-sdk/src/testing/trace/in_memory_exporter.rs @@ -1,4 +1,5 @@ use crate::export::trace::{ExportResult, SpanData, SpanExporter}; +use crate::resource::Resource; use futures_util::future::BoxFuture; use opentelemetry::trace::{TraceError, TraceResult}; use std::sync::{Arc, Mutex}; @@ -51,6 +52,7 @@ use std::sync::{Arc, Mutex}; #[derive(Clone, Debug)] pub struct InMemorySpanExporter { spans: Arc>>, + resource: Arc>, } impl Default for InMemorySpanExporter { @@ -85,6 +87,7 @@ impl InMemorySpanExporterBuilder { pub fn build(&self) -> InMemorySpanExporter { InMemorySpanExporter { spans: Arc::new(Mutex::new(Vec::new())), + resource: Arc::new(Mutex::new(Resource::default())), } } } @@ -142,4 +145,11 @@ impl SpanExporter for InMemorySpanExporter { fn shutdown(&mut self) { self.reset() } + + fn set_resource(&mut self, resource: &Resource) { + self.resource + .lock() + .map(|mut res_guard| *res_guard = resource.clone()) + .expect("Resource lock poisoned"); + } } diff --git a/opentelemetry-sdk/src/testing/trace/span_exporters.rs b/opentelemetry-sdk/src/testing/trace/span_exporters.rs index 48ea2c6b43..c92a64f399 100644 --- a/opentelemetry-sdk/src/testing/trace/span_exporters.rs +++ b/opentelemetry-sdk/src/testing/trace/span_exporters.rs @@ -3,7 +3,7 @@ use crate::{ trace::{ExportResult, SpanData, SpanExporter}, ExportError, }, - trace::{Config, SpanEvents, SpanLinks}, + trace::{SpanEvents, SpanLinks}, InstrumentationLibrary, }; use futures_util::future::BoxFuture; @@ -14,7 +14,6 @@ use opentelemetry::trace::{ use std::fmt::{Display, Formatter}; pub fn new_test_export_span_data() -> SpanData { - let config = Config::default(); SpanData { span_context: SpanContext::new( TraceId::from_u128(1), @@ -33,7 +32,6 @@ pub fn new_test_export_span_data() -> SpanData { events: SpanEvents::default(), links: SpanLinks::default(), status: Status::Unset, - resource: config.resource, instrumentation_lib: InstrumentationLibrary::default(), } } diff --git a/opentelemetry-sdk/src/trace/provider.rs b/opentelemetry-sdk/src/trace/provider.rs index 1c6342d3b9..560d37dae9 100644 --- a/opentelemetry-sdk/src/trace/provider.rs +++ b/opentelemetry-sdk/src/trace/provider.rs @@ -218,11 +218,16 @@ impl Builder { } } + // Create a new vector to hold the modified processors + let mut processors = self.processors; + + // Set the resource for each processor + for p in &mut processors { + p.set_resource(config.resource.as_ref()); + } + TracerProvider { - inner: Arc::new(TracerProviderInner { - processors: self.processors, - config, - }), + inner: Arc::new(TracerProviderInner { processors, config }), } } } diff --git a/opentelemetry-sdk/src/trace/span.rs b/opentelemetry-sdk/src/trace/span.rs index 2ff3079cda..d672348885 100644 --- a/opentelemetry-sdk/src/trace/span.rs +++ b/opentelemetry-sdk/src/trace/span.rs @@ -9,7 +9,6 @@ //! is possible to change its name, set its `Attributes`, and add `Links` and `Events`. //! These cannot be changed after the `Span`'s end time has been set. use crate::trace::SpanLimits; -use crate::Resource; use opentelemetry::trace::{Event, Link, SpanContext, SpanId, SpanKind, Status}; use opentelemetry::KeyValue; use std::borrow::Cow; @@ -77,11 +76,10 @@ impl Span { /// overhead. pub fn exported_data(&self) -> Option { let (span_context, tracer) = (self.span_context.clone(), &self.tracer); - let resource = self.tracer.provider()?.config().resource.clone(); self.data .as_ref() - .map(|data| build_export_data(data.clone(), span_context, resource, tracer)) + .map(|data| build_export_data(data.clone(), span_context, tracer)) } } @@ -225,17 +223,14 @@ impl Span { processor.on_end(build_export_data( data, self.span_context.clone(), - provider.config().resource.clone(), &self.tracer, )); } processors => { - let config = provider.config(); for processor in processors { processor.on_end(build_export_data( data.clone(), self.span_context.clone(), - config.resource.clone(), &self.tracer, )); } @@ -254,7 +249,6 @@ impl Drop for Span { fn build_export_data( data: SpanData, span_context: SpanContext, - resource: Cow<'static, Resource>, tracer: &crate::trace::Tracer, ) -> crate::export::trace::SpanData { crate::export::trace::SpanData { @@ -269,7 +263,6 @@ fn build_export_data( events: data.events, links: data.links, status: data.status, - resource, instrumentation_lib: tracer.instrumentation_library().clone(), } } diff --git a/opentelemetry-sdk/src/trace/span_processor.rs b/opentelemetry-sdk/src/trace/span_processor.rs index 86381119b0..30fb543768 100644 --- a/opentelemetry-sdk/src/trace/span_processor.rs +++ b/opentelemetry-sdk/src/trace/span_processor.rs @@ -35,6 +35,7 @@ //! [`TracerProvider`]: opentelemetry::trace::TracerProvider use crate::export::trace::{ExportResult, SpanData, SpanExporter}; +use crate::resource::Resource; use crate::runtime::{RuntimeChannel, TrySend}; use crate::trace::Span; use futures_channel::oneshot; @@ -50,7 +51,7 @@ use opentelemetry::{ Context, }; use std::cmp::min; -use std::sync::Mutex; +use std::sync::{Arc, Mutex}; use std::{env, fmt, str::FromStr, time::Duration}; /// Delay interval between two consecutive exports. @@ -92,6 +93,8 @@ pub trait SpanProcessor: Send + Sync + std::fmt::Debug { /// Shuts down the processor. Called when SDK is shut down. This is an /// opportunity for processors to do any cleanup required. fn shutdown(&self) -> TraceResult<()>; + /// Set the resource for the log processor. + fn set_resource(&mut self, _resource: &Resource) {} } /// A [SpanProcessor] that passes finished spans to the configured @@ -147,6 +150,12 @@ impl SpanProcessor for SimpleSpanProcessor { )) } } + + fn set_resource(&mut self, resource: &Resource) { + if let Ok(mut exporter) = self.exporter.lock() { + exporter.set_resource(resource); + } + } } /// A [`SpanProcessor`] that asynchronously buffers finished spans and reports @@ -259,6 +268,13 @@ impl SpanProcessor for BatchSpanProcessor { .map_err(|err| TraceError::Other(err.into())) .and_then(|identity| identity) } + + fn set_resource(&mut self, resource: &Resource) { + let resource = Arc::new(resource.clone()); + let _ = self + .message_sender + .try_send(BatchMessage::SetResource(resource)); + } } /// Messages sent between application thread and batch span processor's work thread. @@ -275,6 +291,8 @@ enum BatchMessage { Flush(Option>), /// Shut down the worker thread, push all spans in buffer to the backend. Shutdown(oneshot::Sender), + /// Set the resource for the exporter. + SetResource(Arc), } struct BatchSpanProcessorInternal { @@ -375,8 +393,11 @@ impl BatchSpanProcessorInternal { self.exporter.shutdown(); return false; } + // propagate the resource + BatchMessage::SetResource(resource) => { + self.exporter.set_resource(&resource); + } } - true } @@ -710,7 +731,6 @@ mod tests { events: SpanEvents::default(), links: SpanLinks::default(), status: Status::Unset, - resource: Default::default(), instrumentation_lib: Default::default(), }; processor.on_end(unsampled); diff --git a/opentelemetry-stdout/src/trace/exporter.rs b/opentelemetry-stdout/src/trace/exporter.rs index 774920267a..c4d319ff31 100644 --- a/opentelemetry-stdout/src/trace/exporter.rs +++ b/opentelemetry-stdout/src/trace/exporter.rs @@ -5,6 +5,7 @@ use opentelemetry_sdk::export::{self, trace::ExportResult}; use std::io::{stdout, Write}; use crate::trace::transform::SpanData; +use opentelemetry_sdk::resource::Resource; type Encoder = Box TraceResult<()> + Send + Sync>; @@ -12,6 +13,7 @@ type Encoder = Box TraceResult<()> + Send + pub struct SpanExporter { writer: Option>, encoder: Encoder, + resource: Resource, } impl fmt::Debug for SpanExporter { @@ -36,11 +38,13 @@ impl Default for SpanExporter { impl opentelemetry_sdk::export::trace::SpanExporter for SpanExporter { fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult> { let res = if let Some(writer) = &mut self.writer { - (self.encoder)(writer, crate::trace::SpanData::from(batch)).and_then(|_| { - writer - .write_all(b"\n") - .map_err(|err| TraceError::Other(Box::new(err))) - }) + (self.encoder)(writer, crate::trace::SpanData::new(batch, &self.resource)).and_then( + |_| { + writer + .write_all(b"\n") + .map_err(|err| TraceError::Other(Box::new(err))) + }, + ) } else { Err("exporter is shut down".into()) }; @@ -51,6 +55,10 @@ impl opentelemetry_sdk::export::trace::SpanExporter for SpanExporter { fn shutdown(&mut self) { self.writer.take(); } + + fn set_resource(&mut self, res: &opentelemetry_sdk::Resource) { + self.resource = res.clone(); + } } /// Configuration for the stdout trace exporter @@ -107,6 +115,7 @@ impl SpanExporterBuilder { pub fn build(self) -> SpanExporter { SpanExporter { writer: Some(self.writer.unwrap_or_else(|| Box::new(stdout()))), + resource: Resource::empty(), encoder: self.encoder.unwrap_or_else(|| { Box::new(|writer, spans| { serde_json::to_writer(writer, &spans) diff --git a/opentelemetry-stdout/src/trace/transform.rs b/opentelemetry-stdout/src/trace/transform.rs index 66a659de07..484a222f42 100644 --- a/opentelemetry-stdout/src/trace/transform.rs +++ b/opentelemetry-stdout/src/trace/transform.rs @@ -9,17 +9,20 @@ pub struct SpanData { resource_spans: Vec, } -impl From> for SpanData { - fn from(sdk_spans: Vec) -> Self { +impl SpanData { + pub(crate) fn new( + sdk_spans: Vec, + sdk_resource: &opentelemetry_sdk::Resource, + ) -> Self { let mut resource_spans = HashMap::::new(); for sdk_span in sdk_spans { - let resource_schema_url = sdk_span.resource.schema_url().map(|s| s.to_string().into()); + let resource_schema_url = sdk_resource.schema_url().map(|s| s.to_string().into()); let schema_url = sdk_span.instrumentation_lib.schema_url.clone(); let scope = sdk_span.instrumentation_lib.clone().into(); - let resource = sdk_span.resource.as_ref().into(); + let resource: Resource = sdk_resource.into(); let rs = resource_spans - .entry(sdk_span.resource.as_ref().into()) + .entry(sdk_resource.into()) .or_insert_with(move || ResourceSpans { resource, scope_spans: Vec::with_capacity(1), diff --git a/opentelemetry-zipkin/src/exporter/model/span.rs b/opentelemetry-zipkin/src/exporter/model/span.rs index 6e21b52588..8c9c7fd5a1 100644 --- a/opentelemetry-zipkin/src/exporter/model/span.rs +++ b/opentelemetry-zipkin/src/exporter/model/span.rs @@ -60,9 +60,8 @@ mod tests { use crate::exporter::model::span::{Kind, Span}; use crate::exporter::model::{into_zipkin_span, OTEL_ERROR_DESCRIPTION, OTEL_STATUS_CODE}; use opentelemetry::trace::{SpanContext, SpanId, SpanKind, Status, TraceFlags, TraceId}; + use opentelemetry_sdk::export::trace::SpanData; use opentelemetry_sdk::trace::{SpanEvents, SpanLinks}; - use opentelemetry_sdk::{export::trace::SpanData, Resource}; - use std::borrow::Cow; use std::collections::HashMap; use std::net::Ipv4Addr; use std::time::SystemTime; @@ -166,7 +165,6 @@ mod tests { events: SpanEvents::default(), links: SpanLinks::default(), status, - resource: Cow::Owned(Resource::default()), instrumentation_lib: Default::default(), }; let local_endpoint = Endpoint::new("test".into(), None); From 82fa485359745e5bda9eadc9c080bb84f98a4649 Mon Sep 17 00:00:00 2001 From: Zhongyang Wu Date: Thu, 30 May 2024 10:04:08 -0700 Subject: [PATCH 37/37] feat: clean up trace SDK APIs (#1755) Co-authored-by: Cijo Thomas --- examples/tracing-jaeger/src/main.rs | 2 +- opentelemetry-appender-tracing/src/layer.rs | 7 ++++--- .../tests/integration_test/tests/traces.rs | 2 +- opentelemetry-sdk/CHANGELOG.md | 4 ++++ opentelemetry-sdk/benches/context.rs | 7 +++++-- opentelemetry-sdk/benches/log.rs | 5 +++-- opentelemetry-sdk/benches/span_builder.rs | 2 +- opentelemetry-sdk/benches/trace.rs | 4 ++-- opentelemetry-sdk/src/trace/config.rs | 2 ++ opentelemetry-sdk/src/trace/provider.rs | 4 ++-- opentelemetry-sdk/src/trace/tracer.rs | 4 ++-- opentelemetry-zipkin/src/exporter/mod.rs | 11 ++--------- scripts/integration_tests.sh | 4 ---- stress/src/traces.rs | 2 +- 14 files changed, 30 insertions(+), 30 deletions(-) diff --git a/examples/tracing-jaeger/src/main.rs b/examples/tracing-jaeger/src/main.rs index 5ddb5a843c..e6c3dfdb2b 100644 --- a/examples/tracing-jaeger/src/main.rs +++ b/examples/tracing-jaeger/src/main.rs @@ -19,7 +19,7 @@ fn init_tracer_provider() -> Result Result { .tracing() .with_exporter(opentelemetry_otlp::new_exporter().tonic()) .with_trace_config( - sdktrace::config().with_resource(Resource::new(vec![KeyValue::new( + sdktrace::Config::default().with_resource(Resource::new(vec![KeyValue::new( opentelemetry_semantic_conventions::resource::SERVICE_NAME, "basic-otlp-tracing-example", )])), diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index ccd7fd1e36..25c4775284 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -27,6 +27,10 @@ reference or owned`LogData`. If the exporter needs to process the log data asynchronously, it should clone the log data to ensure it can be safely processed without lifetime issues. +- Clean up public methods in SDK. + - [`TracerProvider::span_processors`] and [`TracerProvider::config`] was removed as it's not part of the spec. + - Added `non_exhaustive` annotation to [`trace::Config`]. Marked [`config`] as deprecated since it's only a wrapper for `Config::default` + - Removed [`Tracer::tracer_provder`] and [`Tracer::instrument_libraries`] as it's not part of the spec. - **Breaking** [#1830](https://github.com/open-telemetry/opentelemetry-rust/pull/1830/files) [Traces SDK] Improves performance by sending Resource information to processors (and exporters) once, instead of sending with every log. If you are an author diff --git a/opentelemetry-sdk/benches/context.rs b/opentelemetry-sdk/benches/context.rs index f5a1f7e2df..3ef494990c 100644 --- a/opentelemetry-sdk/benches/context.rs +++ b/opentelemetry-sdk/benches/context.rs @@ -10,7 +10,8 @@ use opentelemetry::{ }; use opentelemetry_sdk::{ export::trace::{ExportResult, SpanData, SpanExporter}, - trace::{config, Sampler, TracerProvider}, + trace, + trace::{Sampler, TracerProvider}, }; #[cfg(not(target_os = "windows"))] use pprof::criterion::{Output, PProfProfiler}; @@ -126,7 +127,9 @@ impl Display for Environment { fn parent_sampled_tracer(inner_sampler: Sampler) -> (TracerProvider, BoxedTracer) { let provider = TracerProvider::builder() - .with_config(config().with_sampler(Sampler::ParentBased(Box::new(inner_sampler)))) + .with_config( + trace::Config::default().with_sampler(Sampler::ParentBased(Box::new(inner_sampler))), + ) .with_simple_exporter(NoopExporter) .build(); let tracer = provider.tracer(module_path!()); diff --git a/opentelemetry-sdk/benches/log.rs b/opentelemetry-sdk/benches/log.rs index 72f205c5e0..21b807cbf7 100644 --- a/opentelemetry-sdk/benches/log.rs +++ b/opentelemetry-sdk/benches/log.rs @@ -17,7 +17,8 @@ use opentelemetry::trace::TracerProvider as _; use opentelemetry::Key; use opentelemetry_sdk::export::logs::{LogData, LogExporter}; use opentelemetry_sdk::logs::{Logger, LoggerProvider}; -use opentelemetry_sdk::trace::{config, Sampler, TracerProvider}; +use opentelemetry_sdk::trace; +use opentelemetry_sdk::trace::{Sampler, TracerProvider}; #[derive(Debug)] struct VoidExporter; @@ -51,7 +52,7 @@ fn log_benchmark_group(c: &mut Criterion, name: &str, f: F) { // setup tracing as well. let tracer_provider = TracerProvider::builder() - .with_config(config().with_sampler(Sampler::AlwaysOn)) + .with_config(trace::Config::default().with_sampler(Sampler::AlwaysOn)) .build(); let tracer = tracer_provider.tracer("bench-tracer"); diff --git a/opentelemetry-sdk/benches/span_builder.rs b/opentelemetry-sdk/benches/span_builder.rs index de5c1fd235..10fd4addfe 100644 --- a/opentelemetry-sdk/benches/span_builder.rs +++ b/opentelemetry-sdk/benches/span_builder.rs @@ -54,7 +54,7 @@ fn span_builder_benchmark_group(c: &mut Criterion) { fn not_sampled_provider() -> (sdktrace::TracerProvider, sdktrace::Tracer) { let provider = sdktrace::TracerProvider::builder() - .with_config(sdktrace::config().with_sampler(sdktrace::Sampler::AlwaysOff)) + .with_config(sdktrace::Config::default().with_sampler(sdktrace::Sampler::AlwaysOff)) .with_simple_exporter(NoopExporter) .build(); let tracer = provider.tracer("not-sampled"); diff --git a/opentelemetry-sdk/benches/trace.rs b/opentelemetry-sdk/benches/trace.rs index 93bdf3859a..ed4d3add82 100644 --- a/opentelemetry-sdk/benches/trace.rs +++ b/opentelemetry-sdk/benches/trace.rs @@ -70,7 +70,7 @@ fn trace_benchmark_group(c: &mut Criterion, name: &str group.bench_function("always-sample", |b| { let provider = sdktrace::TracerProvider::builder() - .with_config(sdktrace::config().with_sampler(sdktrace::Sampler::AlwaysOn)) + .with_config(sdktrace::Config::default().with_sampler(sdktrace::Sampler::AlwaysOn)) .with_simple_exporter(VoidExporter) .build(); let always_sample = provider.tracer("always-sample"); @@ -80,7 +80,7 @@ fn trace_benchmark_group(c: &mut Criterion, name: &str group.bench_function("never-sample", |b| { let provider = sdktrace::TracerProvider::builder() - .with_config(sdktrace::config().with_sampler(sdktrace::Sampler::AlwaysOff)) + .with_config(sdktrace::Config::default().with_sampler(sdktrace::Sampler::AlwaysOff)) .with_simple_exporter(VoidExporter) .build(); let never_sample = provider.tracer("never-sample"); diff --git a/opentelemetry-sdk/src/trace/config.rs b/opentelemetry-sdk/src/trace/config.rs index 88aa925286..368f69dacd 100644 --- a/opentelemetry-sdk/src/trace/config.rs +++ b/opentelemetry-sdk/src/trace/config.rs @@ -10,12 +10,14 @@ use std::env; use std::str::FromStr; /// Default trace configuration +#[deprecated(since = "0.23.0", note = "Use Config::default() instead")] pub fn config() -> Config { Config::default() } /// Tracer configuration #[derive(Debug)] +#[non_exhaustive] pub struct Config { /// The sampler that the sdk should use pub sampler: Box, diff --git a/opentelemetry-sdk/src/trace/provider.rs b/opentelemetry-sdk/src/trace/provider.rs index 560d37dae9..701a784f90 100644 --- a/opentelemetry-sdk/src/trace/provider.rs +++ b/opentelemetry-sdk/src/trace/provider.rs @@ -62,12 +62,12 @@ impl TracerProvider { } /// Span processors associated with this provider - pub fn span_processors(&self) -> &[Box] { + pub(crate) fn span_processors(&self) -> &[Box] { &self.inner.processors } /// Config associated with this tracer - pub fn config(&self) -> &crate::trace::Config { + pub(crate) fn config(&self) -> &crate::trace::Config { &self.inner.config } diff --git a/opentelemetry-sdk/src/trace/tracer.rs b/opentelemetry-sdk/src/trace/tracer.rs index 2ca1af30c0..0749937cd7 100644 --- a/opentelemetry-sdk/src/trace/tracer.rs +++ b/opentelemetry-sdk/src/trace/tracer.rs @@ -55,12 +55,12 @@ impl Tracer { } /// TracerProvider associated with this tracer. - pub fn provider(&self) -> Option { + pub(crate) fn provider(&self) -> Option { self.provider.upgrade().map(TracerProvider::new) } /// Instrumentation library information of this tracer. - pub fn instrumentation_library(&self) -> &InstrumentationLibrary { + pub(crate) fn instrumentation_library(&self) -> &InstrumentationLibrary { &self.instrumentation_lib } diff --git a/opentelemetry-zipkin/src/exporter/mod.rs b/opentelemetry-zipkin/src/exporter/mod.rs index 0bd2d19016..88edcfb025 100644 --- a/opentelemetry-zipkin/src/exporter/mod.rs +++ b/opentelemetry-zipkin/src/exporter/mod.rs @@ -106,10 +106,7 @@ impl ZipkinPipelineBuilder { )); cfg } else { - Config { - resource: Cow::Owned(Resource::empty()), - ..Default::default() - } + Config::default().with_resource(Resource::empty()) }; (config, Endpoint::new(service_name, self.service_addr)) } else { @@ -119,11 +116,7 @@ impl ZipkinPipelineBuilder { .unwrap() .to_string(); ( - Config { - // use a empty resource to prevent TracerProvider to assign a service name. - resource: Cow::Owned(Resource::empty()), - ..Default::default() - }, + Config::default().with_resource(Resource::empty()), Endpoint::new(service_name, self.service_addr), ) } diff --git a/scripts/integration_tests.sh b/scripts/integration_tests.sh index cff6834f84..361098a5fc 100755 --- a/scripts/integration_tests.sh +++ b/scripts/integration_tests.sh @@ -1,5 +1 @@ -#COMPOSE_FILE=./opentelemetry-jaeger/tests/docker-compose.yaml -#docker-compose -f $COMPOSE_FILE down -v && -#docker-compose -f $COMPOSE_FILE up --build --abort-on-container-exit --exit-code-from opentelemetry-jaeger - cargo test ./opentelemetry-otlp/tests/integration_test/tests -- --ignored diff --git a/stress/src/traces.rs b/stress/src/traces.rs index 9f9065d1a5..3924dc9e2b 100644 --- a/stress/src/traces.rs +++ b/stress/src/traces.rs @@ -20,7 +20,7 @@ mod throughput; lazy_static! { static ref PROVIDER: sdktrace::TracerProvider = sdktrace::TracerProvider::builder() - .with_config(sdktrace::config().with_sampler(sdktrace::Sampler::AlwaysOn)) + .with_config(sdktrace::Config::default().with_sampler(sdktrace::Sampler::AlwaysOn)) .with_span_processor(NoOpSpanProcessor {}) .build(); static ref TRACER: sdktrace::Tracer = PROVIDER.tracer("stress");