diff --git a/CHANGELOG.md b/CHANGELOG.md index 347bfd6573..4fb738a2b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ **Internal**: - Treat arrays of pairs as key-value mappings during PII scrubbing. ([#3639](https://github.com/getsentry/relay/pull/3639)) +- Rate limit envelopes instead of metrics for sampled/indexed items. ([#3716](https://github.com/getsentry/relay/pull/3716)) - Improve flush time calculation in metrics aggregator. ([#3726](https://github.com/getsentry/relay/pull/3726)) ## 24.5.1 diff --git a/relay-base-schema/src/data_category.rs b/relay-base-schema/src/data_category.rs index b38ae5ccd7..35840ca19a 100644 --- a/relay-base-schema/src/data_category.rs +++ b/relay-base-schema/src/data_category.rs @@ -169,9 +169,19 @@ impl DataCategory { pub fn index_category(self) -> Option { match self { Self::Transaction => Some(Self::TransactionIndexed), + Self::Span => Some(Self::SpanIndexed), + Self::Profile => Some(Self::ProfileIndexed), _ => None, } } + + /// Returns `true` if this data category is an indexed data category. + pub fn is_indexed(self) -> bool { + matches!( + self, + Self::TransactionIndexed | Self::SpanIndexed | Self::ProfileIndexed + ) + } } impl fmt::Display for DataCategory { diff --git a/relay-metrics/src/aggregator.rs b/relay-metrics/src/aggregator.rs index 374b1bd9c9..b68012f7db 100644 --- a/relay-metrics/src/aggregator.rs +++ b/relay-metrics/src/aggregator.rs @@ -64,6 +64,7 @@ struct BucketKey { timestamp: UnixTimestamp, metric_name: MetricName, tags: BTreeMap, + extracted_from_indexed: bool, } impl BucketKey { @@ -725,6 +726,7 @@ impl Aggregator { timestamp, metric_name: bucket.name, tags: bucket.tags, + extracted_from_indexed: bucket.metadata.extracted_from_indexed, }; let key = validate_bucket_key(key, &self.config)?; @@ -1012,6 +1014,7 @@ mod tests { "c:transactions/foo@none", ), tags: {}, + extracted_from_indexed: false, }, Counter( 85.0, @@ -1061,10 +1064,11 @@ mod tests { ("hello".to_owned(), "world".to_owned()), ("answer".to_owned(), "42".to_owned()), ]), + extracted_from_indexed: false, }; assert_eq!( bucket_key.cost(), - 80 + // BucketKey + 88 + // BucketKey 5 + // name (5 + 5 + 6 + 2) // tags ); @@ -1107,6 +1111,7 @@ mod tests { "c:transactions/foo@none", ), tags: {}, + extracted_from_indexed: false, }, Counter( 84.0, @@ -1120,6 +1125,7 @@ mod tests { "c:transactions/foo@none", ), tags: {}, + extracted_from_indexed: false, }, Counter( 42.0, @@ -1242,6 +1248,7 @@ mod tests { timestamp: UnixTimestamp::now(), metric_name: "c:transactions/foo@none".into(), tags: BTreeMap::new(), + extracted_from_indexed: false, }; let fixed_cost = bucket_key.cost() + mem::size_of::(); for (metric_name, metric_value, expected_added_cost) in [ @@ -1402,6 +1409,7 @@ mod tests { tags.insert("another\0garbage".to_owned(), "bye".to_owned()); tags }, + extracted_from_indexed: false, }; let mut bucket_key = validate_bucket_key(bucket_key, &test_config()).unwrap(); @@ -1427,6 +1435,7 @@ mod tests { timestamp: UnixTimestamp::now(), metric_name: "c:transactions/a_short_metric".into(), tags: BTreeMap::new(), + extracted_from_indexed: false, }; assert!(validate_bucket_key(short_metric, &test_config()).is_ok()); @@ -1435,6 +1444,7 @@ mod tests { timestamp: UnixTimestamp::now(), metric_name: "c:transactions/long_name_a_very_long_name_its_super_long_really_but_like_super_long_probably_the_longest_name_youve_seen_and_even_the_longest_name_ever_its_extremly_long_i_cant_tell_how_long_it_is_because_i_dont_have_that_many_fingers_thus_i_cant_count_the_many_characters_this_long_name_is".into(), tags: BTreeMap::new(), + extracted_from_indexed: false, }; let validation = validate_bucket_key(long_metric, &test_config()); @@ -1450,6 +1460,7 @@ mod tests { timestamp: UnixTimestamp::now(), metric_name: "c:transactions/a_short_metric_with_long_tag_key".into(), tags: BTreeMap::from([("i_run_out_of_creativity_so_here_we_go_Lorem_Ipsum_is_simply_dummy_text_of_the_printing_and_typesetting_industry_Lorem_Ipsum_has_been_the_industrys_standard_dummy_text_ever_since_the_1500s_when_an_unknown_printer_took_a_galley_of_type_and_scrambled_it_to_make_a_type_specimen_book".into(), "tag_value".into())]), + extracted_from_indexed: false, }; let validation = validate_bucket_key(short_metric_long_tag_key, &test_config()).unwrap(); assert_eq!(validation.tags.len(), 0); @@ -1459,6 +1470,7 @@ mod tests { timestamp: UnixTimestamp::now(), metric_name: "c:transactions/a_short_metric_with_long_tag_value".into(), tags: BTreeMap::from([("tag_key".into(), "i_run_out_of_creativity_so_here_we_go_Lorem_Ipsum_is_simply_dummy_text_of_the_printing_and_typesetting_industry_Lorem_Ipsum_has_been_the_industrys_standard_dummy_text_ever_since_the_1500s_when_an_unknown_printer_took_a_galley_of_type_and_scrambled_it_to_make_a_type_specimen_book".into())]), + extracted_from_indexed: false, }; let validation = validate_bucket_key(short_metric_long_tag_value, &test_config()).unwrap(); assert_eq!(validation.tags.len(), 0); @@ -1476,6 +1488,7 @@ mod tests { timestamp: UnixTimestamp::now(), metric_name: "c:transactions/a_short_metric".into(), tags: BTreeMap::from([("foo".into(), tag_value.clone())]), + extracted_from_indexed: false, }; let validated_bucket = validate_metric_tags(short_metric, &test_config()); assert_eq!(validated_bucket.tags["foo"], tag_value); @@ -1583,6 +1596,7 @@ mod tests { received_at: Some( UnixTimestamp(999994711), ), + extracted_from_indexed: false, }, ] "###); @@ -1609,6 +1623,7 @@ mod tests { timestamp, metric_name: "c:transactions/foo".into(), tags: BTreeMap::new(), + extracted_from_indexed: false, }; // Second bucket has a timestamp in this hour. @@ -1618,6 +1633,7 @@ mod tests { timestamp, metric_name: "c:transactions/foo".into(), tags: BTreeMap::new(), + extracted_from_indexed: false, }; let flush_time_1 = get_flush_time(&config, reference_time, &bucket_key_1); diff --git a/relay-metrics/src/bucket.rs b/relay-metrics/src/bucket.rs index 0230978cd5..674af4eb2c 100644 --- a/relay-metrics/src/bucket.rs +++ b/relay-metrics/src/bucket.rs @@ -759,6 +759,17 @@ pub struct BucketMetadata { /// This field should be set to the time in which the first metric of a specific bucket was /// received in the outermost internal Relay. pub received_at: Option, + + /// Is `true` if this metric was extracted from a sampled/indexed envelope item. + /// + /// The final dynamic sampling decision is always made in processing Relays. + /// If a metric was extracted from an item which is sampled (i.e. retained by dynamic sampling), this flag is `true`. + /// + /// Since these metrics from samples carry additional information, e.g. they don't + /// require rate limiting since the sample they've been extracted from was already + /// rate limited, this flag must be included in the aggregation key when aggregation buckets. + #[serde(skip)] + pub extracted_from_indexed: bool, } impl BucketMetadata { @@ -769,6 +780,7 @@ impl BucketMetadata { Self { merges: 1, received_at: Some(received_at), + extracted_from_indexed: false, } } @@ -793,6 +805,7 @@ impl Default for BucketMetadata { Self { merges: 1, received_at: None, + extracted_from_indexed: false, } } } @@ -927,6 +940,7 @@ mod tests { metadata: BucketMetadata { merges: 1, received_at: None, + extracted_from_indexed: false, }, } "###); @@ -961,6 +975,7 @@ mod tests { metadata: BucketMetadata { merges: 1, received_at: None, + extracted_from_indexed: false, }, } "###); @@ -1013,6 +1028,7 @@ mod tests { metadata: BucketMetadata { merges: 1, received_at: None, + extracted_from_indexed: false, }, } "###); @@ -1073,6 +1089,7 @@ mod tests { metadata: BucketMetadata { merges: 1, received_at: None, + extracted_from_indexed: false, }, } "###); @@ -1103,6 +1120,7 @@ mod tests { metadata: BucketMetadata { merges: 1, received_at: None, + extracted_from_indexed: false, }, } "###); @@ -1127,6 +1145,7 @@ mod tests { metadata: BucketMetadata { merges: 1, received_at: None, + extracted_from_indexed: false, }, } "###); @@ -1330,6 +1349,7 @@ mod tests { received_at: Some( UnixTimestamp(1615889440), ), + extracted_from_indexed: false, }, }, ] @@ -1371,6 +1391,7 @@ mod tests { received_at: Some( UnixTimestamp(1615889440), ), + extracted_from_indexed: false, }, }, ] @@ -1459,7 +1480,8 @@ mod tests { metadata, BucketMetadata { merges: 2, - received_at: None + received_at: None, + extracted_from_indexed: false, } ); @@ -1469,7 +1491,8 @@ mod tests { metadata, BucketMetadata { merges: 3, - received_at: Some(UnixTimestamp::from_secs(10)) + received_at: Some(UnixTimestamp::from_secs(10)), + extracted_from_indexed: false, } ); @@ -1479,7 +1502,8 @@ mod tests { metadata, BucketMetadata { merges: 4, - received_at: Some(UnixTimestamp::from_secs(10)) + received_at: Some(UnixTimestamp::from_secs(10)), + extracted_from_indexed: false, } ); } diff --git a/relay-metrics/src/view.rs b/relay-metrics/src/view.rs index 7130e621ec..adea53d178 100644 --- a/relay-metrics/src/view.rs +++ b/relay-metrics/src/view.rs @@ -478,7 +478,7 @@ impl<'a> BucketView<'a> { BucketMetadata { merges, - received_at: self.inner.metadata.received_at, + ..self.inner.metadata } } diff --git a/relay-quotas/src/rate_limit.rs b/relay-quotas/src/rate_limit.rs index 2086dccabf..b77c612952 100644 --- a/relay-quotas/src/rate_limit.rs +++ b/relay-quotas/src/rate_limit.rs @@ -332,6 +332,14 @@ impl RateLimits { pub fn longest(&self) -> Option<&RateLimit> { self.iter().max_by_key(|limit| limit.retry_after) } + + /// Returns `true` if there are any limits contained. + /// + /// This is equavalent to checking whether [`Self::longest`] returns `Some` + /// or [`Self::iter`] returns an iterator with at least one item. + pub fn is_empty(&self) -> bool { + self.limits.is_empty() + } } /// Immutable rate limits iterator. @@ -385,6 +393,56 @@ impl<'a> IntoIterator for &'a RateLimits { } } +/// Like [`RateLimits`], a collection of scoped rate limits but with all the checks +/// necessary to cache the limits. +/// +/// The data structure makes sure no expired rate limits are enforced as well +/// as removing any indexed rate limit. +/// +/// Cached rate limits don't enforce indexed rate limits because at the time of the check +/// the decision whether an envelope is sampled or not is not yet known. Additionally +/// even if the item is later dropped by dynamic sampling, it must still be around to extract metrics +/// and cannot be dropped too early. +#[derive(Debug, Default)] +pub struct CachedRateLimits(RateLimits); + +impl CachedRateLimits { + /// Creates a new, empty instance without any rate limits enforced. + pub fn new() -> Self { + Self::default() + } + + /// Adds a limit to this collection. + /// + /// See also: [`RateLimits::add`]. + pub fn add(&mut self, mut limit: RateLimit) { + if !limit.categories.is_empty() { + limit.categories.retain(|category| !category.is_indexed()); + if limit.categories.is_empty() { + return; + } + } + self.0.add(limit); + } + + /// Merges more rate limits into this instance. + /// + /// See also: [`RateLimits::merge`]. + pub fn merge(&mut self, rate_limits: RateLimits) { + for limit in rate_limits { + self.add(limit) + } + } + + /// Returns a reference to the contained rate limits. + /// + /// This call gurantuess that at the time of call no returned rate limit is expired. + pub fn current_limits(&mut self) -> &RateLimits { + self.0.clean_expired(); + &self.0 + } +} + #[cfg(test)] mod tests { use smallvec::smallvec; @@ -1067,4 +1125,84 @@ mod tests { ) "###); } + + #[test] + fn test_cached_rate_limits_expired() { + let mut cached = CachedRateLimits::new(); + + // Active error limit + cached.add(RateLimit { + categories: smallvec![DataCategory::Error], + scope: RateLimitScope::Organization(42), + reason_code: None, + retry_after: RetryAfter::from_secs(1), + namespaces: smallvec![], + }); + + // Inactive error limit with distinct scope + cached.add(RateLimit { + categories: smallvec![DataCategory::Error], + scope: RateLimitScope::Project(ProjectId::new(21)), + reason_code: None, + retry_after: RetryAfter::from_secs(0), + namespaces: smallvec![], + }); + + let rate_limits = cached.current_limits(); + + insta::assert_ron_snapshot!(rate_limits, @r###" + RateLimits( + limits: [ + RateLimit( + categories: [ + error, + ], + scope: Organization(42), + reason_code: None, + retry_after: RetryAfter(1), + namespaces: [], + ), + ], + ) + "###); + } + + #[test] + fn test_cached_rate_limits_indexed() { + let mut cached = CachedRateLimits::new(); + + cached.add(RateLimit { + categories: smallvec![DataCategory::Transaction, DataCategory::TransactionIndexed], + scope: RateLimitScope::Organization(42), + reason_code: None, + retry_after: RetryAfter::from_secs(5), + namespaces: smallvec![], + }); + + cached.add(RateLimit { + categories: smallvec![DataCategory::TransactionIndexed], + scope: RateLimitScope::Project(ProjectId::new(21)), + reason_code: None, + retry_after: RetryAfter::from_secs(5), + namespaces: smallvec![], + }); + + let rate_limits = cached.current_limits(); + + insta::assert_ron_snapshot!(rate_limits, @r###" + RateLimits( + limits: [ + RateLimit( + categories: [ + transaction, + ], + scope: Organization(42), + reason_code: None, + retry_after: RetryAfter(5), + namespaces: [], + ), + ], + ) + "###); + } } diff --git a/relay-sampling/src/evaluation.rs b/relay-sampling/src/evaluation.rs index 8f29bae309..7f08dc8fa8 100644 --- a/relay-sampling/src/evaluation.rs +++ b/relay-sampling/src/evaluation.rs @@ -245,11 +245,11 @@ impl<'a> SamplingEvaluator<'a> { } } -fn sampling_match(sample_rate: f64, seed: Uuid) -> bool { - if sample_rate == 0.0 { - return false; - } else if sample_rate == 1.0 { - return true; +fn sampling_match(sample_rate: f64, seed: Uuid) -> SamplingDecision { + if sample_rate <= 0.0 { + return SamplingDecision::Drop; + } else if sample_rate >= 1.0 { + return SamplingDecision::Keep; } let random_number = pseudo_random_from_uuid(seed); @@ -261,10 +261,45 @@ fn sampling_match(sample_rate: f64, seed: Uuid) -> bool { if random_number >= sample_rate { relay_log::trace!("dropping event that matched the configuration"); - false + SamplingDecision::Drop } else { relay_log::trace!("keeping event that matched the configuration"); - true + SamplingDecision::Keep + } +} + +/// A sampling decision. +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum SamplingDecision { + /// The item is sampled and should not be dropped. + Keep, + /// The item is not sampled and should be dropped. + Drop, +} + +impl SamplingDecision { + /// Returns `true` if the sampling decision is [`Self::Keep`]. + pub fn is_keep(self) -> bool { + matches!(self, Self::Keep) + } + + /// Returns `true` if the sampling decision is [`Self::Drop`]. + pub fn is_drop(self) -> bool { + matches!(self, Self::Drop) + } + + /// Returns a string representation of the sampling decision. + pub fn as_str(self) -> &'static str { + match self { + Self::Keep => "keep", + Self::Drop => "drop", + } + } +} + +impl fmt::Display for SamplingDecision { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.as_str()) } } @@ -284,19 +319,19 @@ pub struct SamplingMatch { /// Whether this sampling match results in the item getting sampled. /// It's essentially a cache, as the value can be deterministically derived from /// the sample rate and the seed. - should_keep: bool, + decision: SamplingDecision, } impl SamplingMatch { fn new(sample_rate: f64, seed: Uuid, matched_rules: Vec) -> Self { let matched_rules = MatchedRuleIds(matched_rules); - let should_keep = sampling_match(sample_rate, seed); + let decision = sampling_match(sample_rate, seed); Self { sample_rate, seed, matched_rules, - should_keep, + decision, } } @@ -313,14 +348,9 @@ impl SamplingMatch { self.matched_rules } - /// Returns true if event should be kept. - pub fn should_keep(&self) -> bool { - self.should_keep - } - - /// Returns true if event should be dropped. - pub fn should_drop(&self) -> bool { - !self.should_keep() + /// Returns the sampling decision. + pub fn decision(&self) -> SamplingDecision { + self.decision } } diff --git a/relay-server/src/envelope.rs b/relay-server/src/envelope.rs index be95a1006e..4ddf19e5f7 100644 --- a/relay-server/src/envelope.rs +++ b/relay-server/src/envelope.rs @@ -678,14 +678,10 @@ impl Item { /// Returns the data category used for generating outcomes. /// /// Returns `None` if outcomes are not generated for this type (e.g. sessions). - pub fn outcome_category(&self, indexed: bool) -> Option { + pub fn outcome_category(&self) -> Option { match self.ty() { ItemType::Event => Some(DataCategory::Error), - ItemType::Transaction => Some(if indexed { - DataCategory::TransactionIndexed - } else { - DataCategory::Transaction - }), + ItemType::Transaction => Some(DataCategory::Transaction), ItemType::Security | ItemType::RawSecurity => Some(DataCategory::Security), ItemType::Nel => None, ItemType::UnrealReport => Some(DataCategory::Error), @@ -695,21 +691,13 @@ impl Item { ItemType::FormData => None, ItemType::UserReport => None, ItemType::UserReportV2 => Some(DataCategory::UserReportV2), - ItemType::Profile => Some(if indexed { - DataCategory::ProfileIndexed - } else { - DataCategory::Profile - }), + ItemType::Profile => Some(DataCategory::Profile), ItemType::ReplayEvent | ItemType::ReplayRecording | ItemType::ReplayVideo => { Some(DataCategory::Replay) } ItemType::ClientReport => None, ItemType::CheckIn => Some(DataCategory::Monitor), - ItemType::Span | ItemType::OtelSpan => Some(if indexed { - DataCategory::SpanIndexed - } else { - DataCategory::Span - }), + ItemType::Span | ItemType::OtelSpan => Some(DataCategory::Span), ItemType::ProfileChunk => Some(DataCategory::ProfileChunk), ItemType::Unknown(_) => None, } diff --git a/relay-server/src/metrics/rate_limits.rs b/relay-server/src/metrics/rate_limits.rs index 50bcc9a2ed..1bb9f5cb20 100644 --- a/relay-server/src/metrics/rate_limits.rs +++ b/relay-server/src/metrics/rate_limits.rs @@ -112,7 +112,11 @@ impl>> MetricsLimiter { let buckets: Vec<_> = buckets .into_iter() .map(|bucket| { - let summary = bucket.summary(); + // Sampled buckets are not rate limited, because the sample has already been rate limited. + let summary = match bucket.metadata.extracted_from_indexed { + false => bucket.summary(), + true => Default::default(), + }; SummarizedBucket { bucket, summary } }) .collect(); diff --git a/relay-server/src/metrics_extraction/generic.rs b/relay-server/src/metrics_extraction/generic.rs index 18ca1a9c94..f4215e00c7 100644 --- a/relay-server/src/metrics_extraction/generic.rs +++ b/relay-server/src/metrics_extraction/generic.rs @@ -218,6 +218,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, ] @@ -268,6 +269,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, ] @@ -320,6 +322,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, ] @@ -386,6 +389,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, ] @@ -449,6 +453,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, ] @@ -516,6 +521,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, ] @@ -591,6 +597,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, ] diff --git a/relay-server/src/metrics_extraction/sessions/mod.rs b/relay-server/src/metrics_extraction/sessions/mod.rs index 38f8f5e8ef..8681941e21 100644 --- a/relay-server/src/metrics_extraction/sessions/mod.rs +++ b/relay-server/src/metrics_extraction/sessions/mod.rs @@ -496,6 +496,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -518,6 +519,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -540,6 +542,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -562,6 +565,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -584,6 +588,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -606,6 +611,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -630,6 +636,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, ] diff --git a/relay-server/src/metrics_extraction/snapshots/relay_server__metrics_extraction__event__tests__both_feature_flags_enabled.snap b/relay-server/src/metrics_extraction/snapshots/relay_server__metrics_extraction__event__tests__both_feature_flags_enabled.snap index c51a96e7d7..a3c8bf0ad6 100644 --- a/relay-server/src/metrics_extraction/snapshots/relay_server__metrics_extraction__event__tests__both_feature_flags_enabled.snap +++ b/relay-server/src/metrics_extraction/snapshots/relay_server__metrics_extraction__event__tests__both_feature_flags_enabled.snap @@ -18,6 +18,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -42,6 +43,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -66,6 +68,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -83,6 +86,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -107,6 +111,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -131,6 +136,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -148,6 +154,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -177,6 +184,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -203,6 +211,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -232,6 +241,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -249,6 +259,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -278,6 +289,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -304,6 +316,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -333,6 +346,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -350,6 +364,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -379,6 +394,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -405,6 +421,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -434,6 +451,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -451,6 +469,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -479,6 +498,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -504,6 +524,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -532,6 +553,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -549,6 +571,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -578,6 +601,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -604,6 +628,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -633,6 +658,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -650,6 +676,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -680,6 +707,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -707,6 +735,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -737,6 +766,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -754,6 +784,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -784,6 +815,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -811,6 +843,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -841,6 +874,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -858,6 +892,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -888,6 +923,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -915,6 +951,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -945,6 +982,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -962,6 +1000,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -992,6 +1031,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1019,6 +1059,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1049,6 +1090,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1066,6 +1108,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1096,6 +1139,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1123,6 +1167,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1153,6 +1198,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1170,6 +1216,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1200,6 +1247,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1227,6 +1275,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1257,6 +1306,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1274,6 +1324,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1304,6 +1355,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1331,6 +1383,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1361,6 +1414,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1378,6 +1432,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1408,6 +1463,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1435,6 +1491,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1465,6 +1522,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1482,6 +1540,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1510,6 +1569,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1535,6 +1595,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1563,6 +1624,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1580,6 +1642,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1610,6 +1673,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1637,6 +1701,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1667,6 +1732,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1684,6 +1750,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1712,6 +1779,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1737,6 +1805,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1765,6 +1834,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1782,6 +1852,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1812,6 +1883,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1839,6 +1911,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1869,6 +1942,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1886,6 +1960,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1916,6 +1991,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1943,6 +2019,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1973,6 +2050,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1990,6 +2068,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2018,6 +2097,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2043,6 +2123,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2071,6 +2152,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2088,6 +2170,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2116,6 +2199,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2141,6 +2225,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2169,6 +2254,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2186,6 +2272,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2215,6 +2302,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2241,6 +2329,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2270,6 +2359,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2287,6 +2377,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2312,6 +2403,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2338,6 +2430,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2363,6 +2456,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2387,6 +2481,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2404,6 +2499,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2429,6 +2525,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2455,6 +2552,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2480,6 +2578,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2504,6 +2603,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2521,6 +2621,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2545,6 +2646,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2569,6 +2671,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2586,6 +2689,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2610,6 +2714,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2634,6 +2739,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2651,6 +2757,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2675,6 +2782,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2699,6 +2807,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2716,6 +2825,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2745,6 +2855,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2772,6 +2883,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2801,6 +2913,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2818,6 +2931,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2842,6 +2956,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2866,6 +2981,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2883,6 +2999,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2911,6 +3028,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2936,6 +3054,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2964,6 +3083,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2981,6 +3101,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3005,6 +3126,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3029,6 +3151,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3046,6 +3169,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3074,6 +3198,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3099,6 +3224,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3127,6 +3253,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3144,6 +3271,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3168,6 +3296,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3192,6 +3321,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3209,6 +3339,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3233,6 +3364,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3257,6 +3389,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3274,6 +3407,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3301,6 +3435,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3328,6 +3463,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3345,6 +3481,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3375,6 +3512,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3403,6 +3541,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3433,6 +3572,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3461,6 +3601,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3488,6 +3629,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3515,6 +3657,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3532,6 +3675,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3562,6 +3706,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3590,6 +3735,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3620,6 +3766,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3648,6 +3795,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3675,6 +3823,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3702,6 +3851,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3719,6 +3869,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3743,6 +3894,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3767,6 +3919,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3784,6 +3937,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3813,6 +3967,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3839,6 +3994,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3868,6 +4024,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3885,6 +4042,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3914,6 +4072,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3940,6 +4099,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3969,6 +4129,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3986,6 +4147,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4015,6 +4177,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4041,6 +4204,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4070,6 +4234,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4087,6 +4252,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4115,6 +4281,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4140,6 +4307,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4168,6 +4336,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4185,6 +4354,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4215,6 +4385,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4242,6 +4413,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4272,6 +4444,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4289,6 +4462,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4319,6 +4493,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4346,6 +4521,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4376,6 +4552,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4393,6 +4570,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4423,6 +4601,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4450,6 +4629,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4480,6 +4660,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4497,6 +4678,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4527,6 +4709,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4554,6 +4737,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4584,6 +4768,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4601,6 +4786,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4631,6 +4817,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4658,6 +4845,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4688,6 +4876,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4705,6 +4894,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4735,6 +4925,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4762,6 +4953,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4792,6 +4984,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4809,6 +5002,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4839,6 +5033,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4866,6 +5061,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4896,6 +5092,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4913,6 +5110,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4943,6 +5141,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4970,6 +5169,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5000,6 +5200,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5017,6 +5218,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5045,6 +5247,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5070,6 +5273,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5098,6 +5302,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5115,6 +5320,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5145,6 +5351,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5172,6 +5379,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5202,6 +5410,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5219,6 +5428,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5247,6 +5457,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5272,6 +5483,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5300,6 +5512,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5317,6 +5530,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5347,6 +5561,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5374,6 +5589,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5404,6 +5620,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5421,6 +5638,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5451,6 +5669,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5478,6 +5697,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5508,6 +5728,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5525,6 +5746,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5553,6 +5775,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5578,6 +5801,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5606,6 +5830,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5623,6 +5848,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5651,6 +5877,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5676,6 +5903,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5704,6 +5932,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5721,6 +5950,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5750,6 +5980,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5776,6 +6007,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5805,6 +6037,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5822,6 +6055,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5847,6 +6081,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5873,6 +6108,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5898,6 +6134,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5922,6 +6159,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5939,6 +6177,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5964,6 +6203,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5990,6 +6230,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6015,6 +6256,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6032,6 +6274,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6056,6 +6299,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6080,6 +6324,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6097,6 +6342,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6121,6 +6367,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6145,6 +6392,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6162,6 +6410,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6186,6 +6435,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6210,6 +6460,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6227,6 +6478,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6255,6 +6507,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6281,6 +6534,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6309,6 +6563,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6326,6 +6581,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6355,6 +6611,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6382,6 +6639,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6411,6 +6669,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6428,6 +6687,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6458,6 +6718,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6486,6 +6747,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6516,6 +6778,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6544,6 +6807,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6571,6 +6835,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6598,6 +6863,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6615,6 +6881,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6642,6 +6909,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6667,6 +6935,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6694,6 +6963,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6711,6 +6981,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6735,6 +7006,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6758,6 +7030,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6782,6 +7055,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6799,6 +7073,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6826,6 +7101,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6851,6 +7127,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6878,6 +7155,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6906,6 +7184,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6923,6 +7202,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6950,6 +7230,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6975,6 +7256,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7002,6 +7284,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7030,6 +7313,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7047,6 +7331,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7074,6 +7359,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7099,6 +7385,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7126,6 +7413,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7154,6 +7442,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7171,6 +7460,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7198,6 +7488,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7223,6 +7514,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7250,6 +7542,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7278,6 +7571,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7295,6 +7589,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7322,6 +7617,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7346,6 +7642,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7373,6 +7670,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7399,6 +7697,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7425,6 +7724,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7442,6 +7742,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7469,6 +7770,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7493,6 +7795,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7520,6 +7823,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -7545,6 +7849,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, ] diff --git a/relay-server/src/metrics_extraction/snapshots/relay_server__metrics_extraction__event__tests__extract_span_metrics_mobile.snap b/relay-server/src/metrics_extraction/snapshots/relay_server__metrics_extraction__event__tests__extract_span_metrics_mobile.snap index 48ef885df0..bcac833962 100644 --- a/relay-server/src/metrics_extraction/snapshots/relay_server__metrics_extraction__event__tests__extract_span_metrics_mobile.snap +++ b/relay-server/src/metrics_extraction/snapshots/relay_server__metrics_extraction__event__tests__extract_span_metrics_mobile.snap @@ -837,6 +837,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -867,6 +868,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -894,6 +896,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -924,6 +927,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -941,6 +945,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -971,6 +976,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -997,6 +1003,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1027,6 +1034,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1044,6 +1052,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1073,6 +1082,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1098,6 +1108,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1127,6 +1138,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1157,6 +1169,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1187,6 +1200,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1217,6 +1231,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1247,6 +1262,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1264,6 +1280,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1296,6 +1313,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1324,6 +1342,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1356,6 +1375,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1373,6 +1393,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1396,6 +1417,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1419,6 +1441,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1436,6 +1459,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1467,6 +1491,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1494,6 +1519,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1525,6 +1551,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1542,6 +1569,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1573,6 +1601,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1600,6 +1629,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1631,6 +1661,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1648,6 +1679,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1679,6 +1711,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1706,6 +1739,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1737,6 +1771,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1754,6 +1789,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1783,6 +1819,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1808,6 +1845,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1837,6 +1875,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1854,6 +1893,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1886,6 +1926,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1914,6 +1955,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1946,6 +1988,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1963,6 +2006,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1994,6 +2038,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2021,6 +2066,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2052,6 +2098,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2069,6 +2116,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2100,6 +2148,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2127,6 +2176,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2158,6 +2208,7 @@ expression: "(&event.value().unwrap().spans, metrics)" received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, ], diff --git a/relay-server/src/metrics_extraction/snapshots/relay_server__metrics_extraction__event__tests__only_common.snap b/relay-server/src/metrics_extraction/snapshots/relay_server__metrics_extraction__event__tests__only_common.snap index ed3d6b1f91..0901a39895 100644 --- a/relay-server/src/metrics_extraction/snapshots/relay_server__metrics_extraction__event__tests__only_common.snap +++ b/relay-server/src/metrics_extraction/snapshots/relay_server__metrics_extraction__event__tests__only_common.snap @@ -18,6 +18,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -42,6 +43,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -66,6 +68,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -83,6 +86,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -107,6 +111,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -131,6 +136,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -148,6 +154,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -177,6 +184,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -203,6 +211,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -232,6 +241,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -249,6 +259,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -278,6 +289,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -304,6 +316,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -333,6 +346,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -350,6 +364,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -379,6 +394,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -405,6 +421,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -434,6 +451,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -451,6 +469,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -479,6 +498,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -504,6 +524,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -532,6 +553,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -549,6 +571,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -578,6 +601,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -604,6 +628,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -633,6 +658,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -650,6 +676,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -680,6 +707,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -707,6 +735,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -737,6 +766,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -754,6 +784,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -784,6 +815,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -811,6 +843,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -841,6 +874,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -858,6 +892,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -888,6 +923,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -915,6 +951,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -945,6 +982,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -962,6 +1000,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -992,6 +1031,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1019,6 +1059,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1049,6 +1090,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1066,6 +1108,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1096,6 +1139,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1123,6 +1167,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1153,6 +1198,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1170,6 +1216,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1200,6 +1247,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1227,6 +1275,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1257,6 +1306,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1274,6 +1324,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1304,6 +1355,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1331,6 +1383,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1361,6 +1414,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1378,6 +1432,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1408,6 +1463,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1435,6 +1491,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1465,6 +1522,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1482,6 +1540,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1510,6 +1569,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1535,6 +1595,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1563,6 +1624,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1580,6 +1642,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1610,6 +1673,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1637,6 +1701,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1667,6 +1732,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1684,6 +1750,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1712,6 +1779,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1737,6 +1805,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1765,6 +1834,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1782,6 +1852,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1812,6 +1883,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1839,6 +1911,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1869,6 +1942,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1886,6 +1960,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1916,6 +1991,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1943,6 +2019,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1973,6 +2050,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1990,6 +2068,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2018,6 +2097,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2043,6 +2123,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2071,6 +2152,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2088,6 +2170,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2116,6 +2199,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2141,6 +2225,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2169,6 +2254,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2186,6 +2272,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2215,6 +2302,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2241,6 +2329,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2270,6 +2359,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2287,6 +2377,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2304,6 +2395,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2321,6 +2413,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2345,6 +2438,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2369,6 +2463,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2386,6 +2481,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2410,6 +2506,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2434,6 +2531,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2451,6 +2549,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2475,6 +2574,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2499,6 +2599,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2516,6 +2617,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2545,6 +2647,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2572,6 +2675,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2601,6 +2705,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2618,6 +2723,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2642,6 +2748,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2666,6 +2773,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2683,6 +2791,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2711,6 +2820,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2736,6 +2846,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2764,6 +2875,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2781,6 +2893,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2805,6 +2918,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2829,6 +2943,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2846,6 +2961,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2874,6 +2990,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2899,6 +3016,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2927,6 +3045,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2944,6 +3063,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2968,6 +3088,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2992,6 +3113,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3009,6 +3131,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3033,6 +3156,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3057,6 +3181,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3074,6 +3199,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3101,6 +3227,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3128,6 +3255,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3145,6 +3273,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3175,6 +3304,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3203,6 +3333,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3233,6 +3364,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3261,6 +3393,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3288,6 +3421,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3315,6 +3449,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3332,6 +3467,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3362,6 +3498,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3390,6 +3527,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3420,6 +3558,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3448,6 +3587,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3475,6 +3615,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3502,6 +3643,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3519,6 +3661,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3543,6 +3686,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3567,6 +3711,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3584,6 +3729,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3613,6 +3759,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3639,6 +3786,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3668,6 +3816,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3685,6 +3834,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3714,6 +3864,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3740,6 +3891,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3769,6 +3921,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3786,6 +3939,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3815,6 +3969,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3841,6 +3996,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3870,6 +4026,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3887,6 +4044,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3915,6 +4073,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3940,6 +4099,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3968,6 +4128,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -3985,6 +4146,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4015,6 +4177,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4042,6 +4205,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4072,6 +4236,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4089,6 +4254,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4119,6 +4285,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4146,6 +4313,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4176,6 +4344,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4193,6 +4362,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4223,6 +4393,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4250,6 +4421,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4280,6 +4452,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4297,6 +4470,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4327,6 +4501,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4354,6 +4529,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4384,6 +4560,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4401,6 +4578,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4431,6 +4609,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4458,6 +4637,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4488,6 +4668,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4505,6 +4686,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4535,6 +4717,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4562,6 +4745,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4592,6 +4776,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4609,6 +4794,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4639,6 +4825,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4666,6 +4853,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4696,6 +4884,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4713,6 +4902,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4743,6 +4933,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4770,6 +4961,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4800,6 +4992,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4817,6 +5010,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4845,6 +5039,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4870,6 +5065,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4898,6 +5094,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4915,6 +5112,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4945,6 +5143,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -4972,6 +5171,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5002,6 +5202,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5019,6 +5220,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5047,6 +5249,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5072,6 +5275,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5100,6 +5304,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5117,6 +5322,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5147,6 +5353,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5174,6 +5381,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5204,6 +5412,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5221,6 +5430,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5251,6 +5461,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5278,6 +5489,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5308,6 +5520,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5325,6 +5538,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5353,6 +5567,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5378,6 +5593,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5406,6 +5622,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5423,6 +5640,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5451,6 +5669,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5476,6 +5695,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5504,6 +5724,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5521,6 +5742,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5550,6 +5772,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5576,6 +5799,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5605,6 +5829,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5622,6 +5847,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5639,6 +5865,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5656,6 +5883,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5680,6 +5908,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5704,6 +5933,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5721,6 +5951,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5745,6 +5976,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5769,6 +6001,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5786,6 +6019,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5810,6 +6044,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5834,6 +6069,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5851,6 +6087,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5879,6 +6116,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5905,6 +6143,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5933,6 +6172,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5950,6 +6190,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -5979,6 +6220,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6006,6 +6248,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6035,6 +6278,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6052,6 +6296,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6082,6 +6327,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6110,6 +6356,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6140,6 +6387,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6168,6 +6416,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6195,6 +6444,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6222,6 +6472,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6239,6 +6490,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6266,6 +6518,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6291,6 +6544,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6318,6 +6572,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6335,6 +6590,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6359,6 +6615,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6382,6 +6639,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6406,6 +6664,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6423,6 +6682,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6440,6 +6700,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6457,6 +6718,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6474,6 +6736,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6491,6 +6754,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -6508,6 +6772,7 @@ expression: metrics received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, ] diff --git a/relay-server/src/metrics_extraction/transactions/mod.rs b/relay-server/src/metrics_extraction/transactions/mod.rs index 08d919796e..02818d8668 100644 --- a/relay-server/src/metrics_extraction/transactions/mod.rs +++ b/relay-server/src/metrics_extraction/transactions/mod.rs @@ -9,6 +9,7 @@ use relay_event_schema::protocol::{ TransactionSource, }; use relay_metrics::{Bucket, DurationUnit, FiniteF64}; +use relay_sampling::evaluation::SamplingDecision; use crate::metrics_extraction::generic; use crate::metrics_extraction::transactions::types::{ @@ -17,7 +18,7 @@ use crate::metrics_extraction::transactions::types::{ }; use crate::metrics_extraction::IntoMetric; use crate::statsd::RelayCounters; -use crate::utils::{self, SamplingResult}; +use crate::utils; pub mod types; @@ -236,20 +237,12 @@ pub struct ExtractedMetrics { pub sampling_metrics: Vec, } -impl ExtractedMetrics { - /// Extends the set of metrics with the supplied newly extracted metrics. - pub fn extend(&mut self, other: Self) { - self.project_metrics.extend(other.project_metrics); - self.sampling_metrics.extend(other.sampling_metrics); - } -} - /// A utility that extracts metrics from transactions. pub struct TransactionExtractor<'a> { pub config: &'a TransactionMetricsConfig, pub generic_config: Option>, pub transaction_from_dsc: Option<&'a str>, - pub sampling_result: &'a SamplingResult, + pub sampling_decision: SamplingDecision, pub has_profile: bool, } @@ -426,14 +419,9 @@ impl TransactionExtractor<'_> { .0 .insert(CommonTag::Transaction, transaction_from_dsc.to_string()); } - let decision = if self.sampling_result.should_keep() { - "keep" - } else { - "drop" - }; TransactionCPRTags { - decision: decision.to_owned(), + decision: self.sampling_decision.to_string(), universal_tags, } }; @@ -617,7 +605,7 @@ mod tests { config: &config, generic_config: None, transaction_from_dsc: Some("test_transaction"), - sampling_result: &SamplingResult::Pending, + sampling_decision: SamplingDecision::Keep, has_profile: false, }; @@ -693,6 +681,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -726,6 +715,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -753,6 +743,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -780,6 +771,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -807,6 +799,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -839,6 +832,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -856,6 +850,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -888,6 +883,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -910,6 +906,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -942,6 +939,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, ] @@ -978,7 +976,7 @@ mod tests { config: &config, generic_config: None, transaction_from_dsc: Some("test_transaction"), - sampling_result: &SamplingResult::Pending, + sampling_decision: SamplingDecision::Keep, has_profile: false, }; @@ -1007,6 +1005,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1030,6 +1029,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1053,6 +1053,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1070,6 +1071,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1093,6 +1095,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1114,6 +1117,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, ] @@ -1147,7 +1151,7 @@ mod tests { config: &config, generic_config: None, transaction_from_dsc: Some("test_transaction"), - sampling_result: &SamplingResult::Pending, + sampling_decision: SamplingDecision::Keep, has_profile: false, }; @@ -1176,6 +1180,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1200,6 +1205,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1217,6 +1223,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1240,6 +1247,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1261,6 +1269,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, ] @@ -1293,7 +1302,7 @@ mod tests { config: &config, generic_config: None, transaction_from_dsc: Some("test_transaction"), - sampling_result: &SamplingResult::Pending, + sampling_decision: SamplingDecision::Keep, has_profile: false, }; @@ -1368,7 +1377,7 @@ mod tests { config: &config, generic_config: None, transaction_from_dsc: Some("test_transaction"), - sampling_result: &SamplingResult::Pending, + sampling_decision: SamplingDecision::Keep, has_profile: false, }; @@ -1396,6 +1405,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1420,6 +1430,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1443,6 +1454,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1460,6 +1472,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1483,6 +1496,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1504,6 +1518,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, ] @@ -1527,7 +1542,7 @@ mod tests { config: &config, generic_config: None, transaction_from_dsc: Some("test_transaction"), - sampling_result: &SamplingResult::Pending, + sampling_decision: SamplingDecision::Keep, has_profile: false, }; @@ -1566,7 +1581,7 @@ mod tests { config: &config, generic_config: None, transaction_from_dsc: Some("test_transaction"), - sampling_result: &SamplingResult::Pending, + sampling_decision: SamplingDecision::Keep, has_profile: false, }; @@ -1634,7 +1649,7 @@ mod tests { config: &config, generic_config: None, transaction_from_dsc: Some("test_transaction"), - sampling_result: &SamplingResult::Pending, + sampling_decision: SamplingDecision::Keep, has_profile: false, }; @@ -1656,6 +1671,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1679,6 +1695,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -1698,6 +1715,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, ] @@ -1737,7 +1755,7 @@ mod tests { config: &config, generic_config: None, transaction_from_dsc: Some("test_transaction"), - sampling_result: &SamplingResult::Pending, + sampling_decision: SamplingDecision::Keep, has_profile: false, }; @@ -1770,7 +1788,7 @@ mod tests { config: &config, generic_config: None, transaction_from_dsc: Some("test_transaction"), - sampling_result: &SamplingResult::Pending, + sampling_decision: SamplingDecision::Keep, has_profile: false, }; @@ -1807,7 +1825,7 @@ mod tests { config: &config, generic_config: None, transaction_from_dsc: Some("root_transaction"), - sampling_result: &SamplingResult::Pending, + sampling_decision: SamplingDecision::Keep, has_profile: false, }; @@ -1832,6 +1850,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, ] @@ -2075,7 +2094,7 @@ mod tests { config: &config, generic_config: None, transaction_from_dsc: Some("test_transaction"), - sampling_result: &SamplingResult::Pending, + sampling_decision: SamplingDecision::Keep, has_profile: false, }; @@ -2176,7 +2195,7 @@ mod tests { config: &config, generic_config: Some(combined_config), transaction_from_dsc: Some("test_transaction"), - sampling_result: &SamplingResult::Pending, + sampling_decision: SamplingDecision::Keep, has_profile: false, }; @@ -2203,6 +2222,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2220,6 +2240,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2242,6 +2263,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, Bucket { @@ -2261,6 +2283,7 @@ mod tests { received_at: Some( UnixTimestamp(0), ), + extracted_from_indexed: false, }, }, ] diff --git a/relay-server/src/metrics_extraction/transactions/types.rs b/relay-server/src/metrics_extraction/transactions/types.rs index 07f09e82b7..c44f1db2ca 100644 --- a/relay-server/src/metrics_extraction/transactions/types.rs +++ b/relay-server/src/metrics_extraction/transactions/types.rs @@ -161,7 +161,7 @@ impl From for BTreeMap { fn from(tags: UsageTags) -> Self { let mut map = BTreeMap::new(); if tags.has_profile { - map.insert("has_profile".to_string(), "true".to_string()); + map.insert("has_profile".to_owned(), "true".to_owned()); } map } @@ -178,11 +178,11 @@ impl From for BTreeMap { fn from(value: TransactionMeasurementTags) -> Self { let mut map: BTreeMap = value.universal_tags.into(); if let Some(decision) = value.measurement_rating { - map.insert("measurement_rating".to_string(), decision); + map.insert("measurement_rating".to_owned(), decision); } if let Some(score_profile_version) = value.score_profile_version { map.insert( - "sentry.score_profile_version".to_string(), + "sentry.score_profile_version".to_owned(), score_profile_version, ); } diff --git a/relay-server/src/services/processor.rs b/relay-server/src/services/processor.rs index 05bc2c34a4..0d9c68a79e 100644 --- a/relay-server/src/services/processor.rs +++ b/relay-server/src/services/processor.rs @@ -36,7 +36,7 @@ use relay_profiling::ProfileId; use relay_protocol::{Annotated, Value}; use relay_quotas::{DataCategory, Scoping}; use relay_sampling::config::RuleId; -use relay_sampling::evaluation::{ReservoirCounters, ReservoirEvaluator}; +use relay_sampling::evaluation::{ReservoirCounters, ReservoirEvaluator, SamplingDecision}; use relay_statsd::metric; use relay_system::{Addr, FromMessage, NoResponse, Service}; use reqwest::header; @@ -55,7 +55,7 @@ use { }, relay_dynamic_config::{CardinalityLimiterMode, GlobalConfig, MetricExtractionGroups}, relay_metrics::{Aggregator, MergeBuckets, RedisMetricMetaStore}, - relay_quotas::{Quota, RateLimitingError, RedisRateLimiter}, + relay_quotas::{Quota, RateLimitingError, RateLimits, RedisRateLimiter}, relay_redis::RedisPool, std::iter::Chain, std::slice::Iter, @@ -581,18 +581,93 @@ impl From for ProcessingError { type ExtractedEvent = (Annotated, usize); -impl ExtractedMetrics { +/// A container for extracted metrics during processing. +/// +/// The container enforces that the extracted metrics are correctly tagged +/// with the dynamic sampling decision. +#[derive(Default, Debug)] +pub struct ProcessingExtractedMetrics(ExtractedMetrics); + +impl ProcessingExtractedMetrics { + /// Extends the contained metrics with [`ExtractedMetrics`]. + pub fn extend( + &mut self, + extracted: ExtractedMetrics, + sampling_decision: Option, + ) { + self.extend_project_metrics(extracted.project_metrics, sampling_decision); + self.extend_sampling_metrics(extracted.sampling_metrics, sampling_decision); + } + + /// Extends the contained project metrics. + pub fn extend_project_metrics( + &mut self, + mut buckets: Vec, + sampling_decision: Option, + ) { + for bucket in &mut buckets { + bucket.metadata.extracted_from_indexed = + sampling_decision == Some(SamplingDecision::Keep); + } + self.0.project_metrics.extend(buckets); + } + + /// Extends the contained sampling metrics. + pub fn extend_sampling_metrics( + &mut self, + mut buckets: Vec, + sampling_decision: Option, + ) { + for bucket in &mut buckets { + bucket.metadata.extracted_from_indexed = + sampling_decision == Some(SamplingDecision::Keep); + } + self.0.sampling_metrics.extend(buckets); + } + + /// Returns `true` if any project metrics are extracted. + pub fn has_project_metrics(&self) -> bool { + !self.0.project_metrics.is_empty() + } + + /// Applies rate limits to the contained metrics. + /// + /// This is used to apply rate limits which have been enforced on sampled items of an envelope + /// to also consistently apply to the metrics extracted from these items. + #[cfg(feature = "processing")] + fn enforce_limits(&mut self, scoping: Scoping, limits: &RateLimits) { + for (category, namespace) in [ + (DataCategory::Transaction, MetricNamespace::Transactions), + (DataCategory::Span, MetricNamespace::Spans), + ] { + if !limits.check(scoping.item(category)).is_empty() { + relay_log::trace!( + "dropping {namespace} metrics, due to enforced limit on envelope" + ); + self.retain(|bucket| bucket.name.try_namespace() != Some(namespace)); + } + } + } + + #[cfg(feature = "processing")] + fn retain(&mut self, mut f: impl FnMut(&Bucket) -> bool) { + self.0.project_metrics.retain(&mut f); + self.0.sampling_metrics.retain(&mut f); + } + fn send_metrics(self, envelope: &Envelope, project_cache: Addr) { let project_key = envelope.meta().public_key(); - if !self.project_metrics.is_empty() { - project_cache.send(AddMetricBuckets::internal( - project_key, - self.project_metrics, - )); + let ExtractedMetrics { + project_metrics, + sampling_metrics, + } = self.0; + + if !project_metrics.is_empty() { + project_cache.send(AddMetricBuckets::internal(project_key, project_metrics)); } - if !self.sampling_metrics.is_empty() { + if !sampling_metrics.is_empty() { // If no sampling project state is available, we associate the sampling // metrics with the current project. // @@ -602,7 +677,7 @@ impl ExtractedMetrics { let sampling_project_key = utils::get_sampling_key(envelope).unwrap_or(project_key); project_cache.send(AddMetricBuckets::internal( sampling_project_key, - self.sampling_metrics, + sampling_metrics, )); } } @@ -642,7 +717,7 @@ struct ProcessEnvelopeState<'a, Group> { /// /// Relay can extract metrics for sessions and transactions, which is controlled by /// configuration objects in the project config. - extracted_metrics: ExtractedMetrics, + extracted_metrics: ProcessingExtractedMetrics, /// The state of the project that this envelope belongs to. project_state: Arc, @@ -706,18 +781,13 @@ impl<'a, Group> ProcessEnvelopeState<'a, Group> { fn remove_event(&mut self) { self.event = Annotated::empty(); } - - fn reject_event(&mut self, outcome: Outcome) { - self.remove_event(); - self.managed_envelope.reject_event(outcome); - } } /// The view out of the [`ProcessEnvelopeState`] after processing. #[derive(Debug)] struct ProcessingStateResult { managed_envelope: TypedEnvelope, - extracted_metrics: ExtractedMetrics, + extracted_metrics: ProcessingExtractedMetrics, } /// Response of the [`ProcessEnvelope`] message. @@ -1167,15 +1237,14 @@ impl EnvelopeProcessorService { // When invoking the rate limiter, capture if the event item has been rate limited to also // remove it from the processing state eventually. - let mut envelope_limiter = - EnvelopeLimiter::new(Some(&project_state.config), |item_scope, quantity| { - rate_limiter.is_rate_limited(quotas, item_scope, quantity, false) - }); + let mut envelope_limiter = EnvelopeLimiter::new(|item_scope, quantity| { + rate_limiter.is_rate_limited(quotas, item_scope, quantity, false) + }); // Tell the envelope limiter about the event, since it has been removed from the Envelope at // this stage in processing. if let Some(category) = event_category { - envelope_limiter.assume_event(category, state.event_metrics_extracted); + envelope_limiter.assume_event(category); } let scoping = state.managed_envelope.scoping(); @@ -1183,7 +1252,12 @@ impl EnvelopeProcessorService { envelope_limiter.enforce(state.managed_envelope.envelope_mut(), &scoping)? }); - if limits.is_limited() { + // Use the same rate limits as used for the envelope on the metrics. + // Those rate limits should not be checked for expiry or similar to ensure a consistent + // limiting of envelope items and metrics. + state.extracted_metrics.enforce_limits(scoping, &limits); + + if !limits.is_empty() { self.inner .addrs .project_cache @@ -1208,7 +1282,7 @@ impl EnvelopeProcessorService { fn extract_transaction_metrics( &self, state: &mut ProcessEnvelopeState, - sampling_result: &SamplingResult, + sampling_decision: SamplingDecision, profile_id: Option, ) -> Result<(), ProcessingError> { if state.event_metrics_extracted { @@ -1284,7 +1358,9 @@ impl EnvelopeProcessorService { .max_tag_value_length, global.options.span_extraction_sample_rate, ); - state.extracted_metrics.project_metrics.extend(metrics); + state + .extracted_metrics + .extend_project_metrics(metrics, Some(sampling_decision)); if !state.project_state.has_feature(Feature::DiscardTransaction) { let transaction_from_dsc = state @@ -1297,15 +1373,16 @@ impl EnvelopeProcessorService { config: tx_config, generic_config: Some(combined_config), transaction_from_dsc, - sampling_result, + sampling_decision, has_profile: profile_id.is_some(), }; - state.extracted_metrics.extend(extractor.extract(event)?); + state + .extracted_metrics + .extend(extractor.extract(event)?, Some(sampling_decision)); } state.event_metrics_extracted = true; - state.managed_envelope.set_event_metrics_extracted(); Ok(()) } @@ -1533,20 +1610,18 @@ impl EnvelopeProcessorService { // Always run dynamic sampling on processing Relays, // but delay decision until inbound filters have been fully processed. - let sampling_result = - if self.inner.config.processing_enabled() || matches!(filter_run, FiltersStatus::Ok) { - dynamic_sampling::run(state, &self.inner.config) - } else { - SamplingResult::NoMatch - }; + let run_dynamic_sampling = + matches!(filter_run, FiltersStatus::Ok) || self.inner.config.processing_enabled(); - // We avoid extracting metrics if we are not sampling the event while in non-processing - // Relays, in order to synchronize rate limits on indexed and processed transactions. - if self.inner.config.processing_enabled() || sampling_result.should_drop() { - self.extract_transaction_metrics(state, &sampling_result, profile_id)?; - } + let sampling_result = match run_dynamic_sampling { + true => dynamic_sampling::run(state, &self.inner.config), + false => SamplingResult::Pending, + }; if let Some(outcome) = sampling_result.into_dropped_outcome() { + // Extract metrics here, we're about to drop the event/transaction. + self.extract_transaction_metrics(state, SamplingDecision::Drop, profile_id)?; + let keep_profiles = dynamic_sampling::forward_unsampled_profiles(state, &global_config); // Process profiles before dropping the transaction, if necessary. @@ -1574,6 +1649,9 @@ impl EnvelopeProcessorService { attachment::scrub(state); if_processing!(self.inner.config, { + // Always extract metrics in processing Relays for sampled items. + self.extract_transaction_metrics(state, SamplingDecision::Keep, profile_id)?; + profile::process(state, &self.inner.config); if state @@ -1584,6 +1662,7 @@ impl EnvelopeProcessorService { } self.enforce_quotas(state)?; + span::maybe_discard_transaction(state); }); @@ -1864,7 +1943,7 @@ impl EnvelopeProcessorService { // requires recomputation of the context. state.managed_envelope.update(); - let has_metrics = !state.extracted_metrics.project_metrics.is_empty(); + let has_metrics = state.extracted_metrics.has_project_metrics(); state.extracted_metrics.send_metrics( state.managed_envelope.envelope(), diff --git a/relay-server/src/services/processor/dynamic_sampling.rs b/relay-server/src/services/processor/dynamic_sampling.rs index 31736474aa..69e0b7a709 100644 --- a/relay-server/src/services/processor/dynamic_sampling.rs +++ b/relay-server/src/services/processor/dynamic_sampling.rs @@ -16,7 +16,7 @@ use crate::services::outcome::Outcome; use crate::services::processor::{ EventProcessing, ProcessEnvelopeState, Sampling, TransactionGroup, }; -use crate::utils::{self, sample, ItemAction, SamplingResult}; +use crate::utils::{self, sample, SamplingResult}; /// Ensures there is a valid dynamic sampling context and corresponding project state. /// @@ -96,19 +96,38 @@ pub fn drop_unsampled_items( outcome: Outcome, keep_profiles: bool, ) { - state.managed_envelope.retain_items(|item| { - if keep_profiles && item.ty() == &ItemType::Profile { - // Remember on the item that this profile belongs to an transaction which was not - // sampled. - // Upstream Relays can use that information to allow standalone unsampled profiles. - item.set_sampled(false); - ItemAction::Keep - } else { - ItemAction::Drop(outcome.clone()) - } + // Remove all items from the envelope which need to be dropped due to dynamic sampling. + let dropped_items = state.envelope_mut().take_items_by(|item| match item.ty() { + ItemType::Profile => !keep_profiles, + _ => true, }); - // The event is no longer in the envelope, so we need to handle it separately: - state.reject_event(outcome); + + for item in dropped_items { + let Some(category) = item.outcome_category() else { + continue; + }; + + // Dynamic sampling only drops indexed items. Upgrade the category to the index + // category if one exists for this category, for example profiles will be upgraded to profiles indexed, + // but attachments are still emitted as attachments. + let category = category.index_category().unwrap_or(category); + + state + .managed_envelope + .track_outcome(outcome.clone(), category, item.quantity()); + } + + // Mark all remaining items in the envelope as unsampled. + for item in state.envelope_mut().items_mut() { + item.set_sampled(false); + } + + // All items have been dropped, now make sure the event is also handled and dropped. + if let Some(category) = state.event_category() { + let category = category.index_category().unwrap_or(category); + state.managed_envelope.track_outcome(outcome, category, 1) + } + state.remove_event(); } /// Computes the sampling decision on the incoming envelope. @@ -251,7 +270,7 @@ mod tests { use relay_sampling::config::{ DecayingFunction, RuleId, SamplingRule, SamplingValue, TimeRange, }; - use relay_sampling::evaluation::{ReservoirCounters, SamplingMatch}; + use relay_sampling::evaluation::{ReservoirCounters, SamplingDecision, SamplingMatch}; use relay_system::Addr; use uuid::Uuid; @@ -392,7 +411,7 @@ mod tests { None, None, ); - assert_eq!(res.should_keep(), should_keep); + assert_eq!(res.decision().is_keep(), should_keep); } } @@ -459,17 +478,17 @@ mod tests { // None represents no TransactionMetricsConfig, DS will not be run let mut state = get_state(None); let sampling_result = run(&mut state, &config); - assert!(sampling_result.should_keep()); + assert_eq!(sampling_result.decision(), SamplingDecision::Keep); // Current version is 3, so it won't run DS if it's outdated let mut state = get_state(Some(2)); let sampling_result = run(&mut state, &config); - assert!(sampling_result.should_keep()); + assert_eq!(sampling_result.decision(), SamplingDecision::Keep); // Dynamic sampling is run, as the transactionmetrics version is up to date. let mut state = get_state(Some(3)); let sampling_result = run(&mut state, &config); - assert!(sampling_result.should_drop()); + assert_eq!(sampling_result.decision(), SamplingDecision::Drop); } fn project_state_with_single_rule(sample_rate: f64) -> ProjectState { @@ -758,13 +777,13 @@ mod tests { fn test_reservoir_applied_for_transactions() { let result = run_with_reservoir_rule::(ProcessingGroup::Transaction); // Default sampling rate is 0.0, but transaction is retained because of reservoir: - assert!(result.should_keep()); + assert_eq!(result.decision(), SamplingDecision::Keep); } #[test] fn test_reservoir_not_applied_for_spans() { let result = run_with_reservoir_rule::(ProcessingGroup::Span); // Default sampling rate is 0.0, and the reservoir does not apply to spans: - assert!(result.should_drop()); + assert_eq!(result.decision(), SamplingDecision::Drop); } } diff --git a/relay-server/src/services/processor/session.rs b/relay-server/src/services/processor/session.rs index 44493f3b16..8565130676 100644 --- a/relay-server/src/services/processor/session.rs +++ b/relay-server/src/services/processor/session.rs @@ -24,7 +24,6 @@ use crate::utils::ItemAction; /// are out of range after clock drift correction. pub fn process(state: &mut ProcessEnvelopeState, config: &Config) { let received = state.managed_envelope.received_at(); - let extracted_metrics = &mut state.extracted_metrics.project_metrics; let metrics_config = state.project_state.config().session_metrics; let envelope = state.managed_envelope.envelope_mut(); let client = envelope.meta().client().map(|x| x.to_owned()); @@ -33,6 +32,7 @@ pub fn process(state: &mut ProcessEnvelopeState, config: &Config) let clock_drift_processor = ClockDriftProcessor::new(envelope.sent_at(), received).at_least(MINIMUM_CLOCK_DRIFT); + let mut extracted_metrics = Vec::new(); state.managed_envelope.retain_items(|item| { let should_keep = match item.ty() { ItemType::Session => process_session( @@ -43,7 +43,7 @@ pub fn process(state: &mut ProcessEnvelopeState, config: &Config) client_addr, metrics_config, &clock_drift_processor, - extracted_metrics, + &mut extracted_metrics, ), ItemType::Sessions => process_session_aggregates( item, @@ -53,7 +53,7 @@ pub fn process(state: &mut ProcessEnvelopeState, config: &Config) client_addr, metrics_config, &clock_drift_processor, - extracted_metrics, + &mut extracted_metrics, ), _ => true, // Keep all other item types }; @@ -63,6 +63,10 @@ pub fn process(state: &mut ProcessEnvelopeState, config: &Config) ItemAction::DropSilently // sessions never log outcomes. } }); + + state + .extracted_metrics + .extend_project_metrics(extracted_metrics, None); } /// Returns Ok(true) if attributes were modified. diff --git a/relay-server/src/services/processor/span/processing.rs b/relay-server/src/services/processor/span/processing.rs index 46003d1060..b2374d0213 100644 --- a/relay-server/src/services/processor/span/processing.rs +++ b/relay-server/src/services/processor/span/processing.rs @@ -21,18 +21,19 @@ use relay_log::protocol::{Attachment, AttachmentType}; use relay_metrics::{aggregator::AggregatorConfig, MetricNamespace, UnixTimestamp}; use relay_pii::PiiProcessor; use relay_protocol::{Annotated, Empty}; +use relay_quotas::DataCategory; use relay_spans::{otel_to_sentry_span, otel_trace::Span as OtelSpan}; use crate::envelope::{ContentType, Envelope, Item, ItemType}; use crate::metrics_extraction::generic::extract_metrics; -use crate::services::outcome::{DiscardReason, Outcome, RuleCategories}; +use crate::services::outcome::{DiscardReason, Outcome}; use crate::services::processor::span::extract_transaction_span; use crate::services::processor::{ dynamic_sampling, Addrs, ProcessEnvelope, ProcessEnvelopeState, ProcessingError, ProcessingGroup, SpanGroup, TransactionGroup, }; use crate::statsd::{RelayCounters, RelayHistograms}; -use crate::utils::{sample, BufferGuard, ItemAction, SamplingResult}; +use crate::utils::{sample, BufferGuard, ItemAction}; use relay_event_normalization::span::ai::extract_ai_measurements; use thiserror::Error; @@ -51,12 +52,7 @@ pub fn process( // We only implement trace-based sampling rules for now, which can be computed // once for all spans in the envelope. - let sampling_outcome = match dynamic_sampling::run(state, &config) { - SamplingResult::Match(sampling_match) if sampling_match.should_drop() => Some( - Outcome::FilteredSampling(RuleCategories::from(sampling_match.into_matched_rules())), - ), - _ => None, - }; + let sampling_result = dynamic_sampling::run(state, &config); let span_metrics_extraction_config = match state.project_state.config.metric_extraction { ErrorBoundary::Ok(ref config) if config.is_enabled() => Some(config), @@ -93,6 +89,7 @@ pub fn process( let client_ip = state.managed_envelope.envelope().meta().client_addr(); let filter_settings = &state.project_state.config.filter_settings; + let mut dynamic_sampling_dropped_spans = 0; state.managed_envelope.retain_items(|item| { let mut annotated_span = match item.ty() { ItemType::OtelSpan => match serde_json::from_slice::(&item.payload()) { @@ -161,16 +158,19 @@ pub fn process( span, CombinedMetricExtractionConfig::new(global_metrics_config, config), ); - state.extracted_metrics.project_metrics.extend(metrics); + state + .extracted_metrics + .extend_project_metrics(metrics, Some(sampling_result.decision())); item.set_metrics_extracted(true); } - if let Some(sampling_outcome) = &sampling_outcome { - relay_log::trace!( - "Dropping span because of sampling rule {}", - sampling_outcome - ); - return ItemAction::Drop(sampling_outcome.clone()); + if sampling_result.decision().is_drop() { + relay_log::trace!("Dropping span because of sampling rule {sampling_result:?}"); + dynamic_sampling_dropped_spans += 1; + // Drop silently and not with an outcome, we only want to emit an outcome for the + // indexed category if the span was dropped by dynamic sampling. + // Dropping through the envelope will emit for both categories. + return ItemAction::DropSilently; } if let Err(e) = scrub(&mut annotated_span, &state.project_state.config) { @@ -227,6 +227,14 @@ pub fn process( ItemAction::Keep }); + if let Some(outcome) = sampling_result.into_dropped_outcome() { + state.managed_envelope.track_outcome( + outcome, + DataCategory::SpanIndexed, + dynamic_sampling_dropped_spans, + ); + } + let mut transaction_count = 0; for transaction in extracted_transactions { // Enqueue a full processing request for every extracted transaction item. diff --git a/relay-server/src/services/project.rs b/relay-server/src/services/project.rs index c7e16db4bb..8f2d37e9c2 100644 --- a/relay-server/src/services/project.rs +++ b/relay-server/src/services/project.rs @@ -13,7 +13,9 @@ use relay_metrics::aggregator::AggregatorConfig; use relay_metrics::{ aggregator, Aggregator, Bucket, MergeBuckets, MetaAggregator, MetricMeta, MetricNamespace, }; -use relay_quotas::{DataCategory, MetricNamespaceScoping, Quota, RateLimits, Scoping}; +use relay_quotas::{ + CachedRateLimits, DataCategory, MetricNamespaceScoping, Quota, RateLimits, Scoping, +}; use relay_sampling::evaluation::ReservoirCounters; use relay_statsd::metric; use relay_system::{Addr, BroadcastChannel}; @@ -467,7 +469,7 @@ pub struct Project { config: Arc, state: State, state_channel: Option, - rate_limits: RateLimits, + rate_limits: CachedRateLimits, last_no_cache: Instant, reservoir_counters: ReservoirCounters, metric_meta_aggregator: MetaAggregator, @@ -484,7 +486,7 @@ impl Project { project_key: key, state: State::new(config.permissive_aggregator_config()), state_channel: None, - rate_limits: RateLimits::new(), + rate_limits: CachedRateLimits::new(), last_no_cache: Instant::now(), reservoir_counters: Arc::default(), metric_meta_aggregator: MetaAggregator::new(config.metrics_meta_locations_max()), @@ -561,11 +563,6 @@ impl Project { self.next_fetch_attempt } - /// The rate limits that are active for this project. - pub fn rate_limits(&self) -> &RateLimits { - &self.rate_limits - } - /// The last time the project state was updated pub fn last_updated_at(&self) -> Instant { self.last_updated_at @@ -579,14 +576,28 @@ impl Project { } fn merge_buckets_into_aggregator( - &self, + &mut self, aggregator: &Addr, #[allow(unused_variables)] envelope_processor: &Addr, outcome_aggregator: &Addr, metric_outcomes: &MetricOutcomes, - state: &ProjectState, buckets: Buckets, ) { + let state = match self.state { + State::Cached(ref state) => { + // TODO: When the state is present but expired, we should send buckets + // to the metrics buffer instead. In practice, the project state should be + // refreshed at the time when the buckets emerge from the aggregator though. + state + } + State::Pending(ref mut inner_agg) => { + // We need to queue the metrics in a temporary aggregator until the project state becomes available. + relay_log::debug!("sending metrics to metrics-buffer"); + inner_agg.merge_all(self.project_key, buckets, None); + return; + } + }; + let Some(scoping) = self.scoping() else { relay_log::error!( "there is no scoping due to missing project id: dropping {} buckets", @@ -611,10 +622,10 @@ impl Project { let quotas = state.config.quotas.clone(); let buckets = match MetricsLimiter::create(buckets, quotas, scoping) { Ok(mut bucket_limiter) => { - let cached_rate_limits = self.rate_limits().clone(); + let current_limits = self.rate_limits.current_limits(); #[allow(unused_variables)] let was_rate_limited = bucket_limiter.enforce_limits( - &cached_rate_limits, + current_limits, metric_outcomes, outcome_aggregator, ); @@ -654,28 +665,13 @@ impl Project { } let buckets = Buckets::new(buckets).filter_namespaces(source); - - match self.state { - State::Cached(ref state) => { - // TODO: When the state is present but expired, we should send buckets - // to the metrics buffer instead. In practice, the project state should be - // refreshed at the time when the buckets emerge from the aggregator though. - - self.merge_buckets_into_aggregator( - aggregator, - envelope_processor, - outcome_aggregator, - metric_outcomes, - state, - buckets, - ); - } - State::Pending(ref mut inner_agg) => { - // We need to queue the metrics in a temporary aggregator until the project state becomes available. - relay_log::debug!("sending metrics to metrics-buffer"); - inner_agg.merge_all(self.project_key, buckets, None); - } - } + self.merge_buckets_into_aggregator( + aggregator, + envelope_processor, + outcome_aggregator, + metric_outcomes, + buckets, + ); } pub fn add_metric_meta( @@ -901,7 +897,6 @@ impl Project { envelope_processor, outcome_aggregator, metric_outcomes, - &state, buckets, ); } @@ -1051,12 +1046,11 @@ impl Project { } } - self.rate_limits.clean_expired(); + let current_limits = self.rate_limits.current_limits(); - let config = state.as_deref().map(|s| &s.config); let quotas = state.as_deref().map(|s| s.get_quotas()).unwrap_or(&[]); - let envelope_limiter = EnvelopeLimiter::new(config, |item_scoping, _| { - Ok(self.rate_limits.check_with_quotas(quotas, item_scoping)) + let envelope_limiter = EnvelopeLimiter::new(|item_scoping, _| { + Ok(current_limits.check_with_quotas(quotas, item_scoping)) }); let (enforcement, mut rate_limits) = @@ -1070,7 +1064,7 @@ impl Project { if envelope.envelope().items().any(|i| i.ty().is_metrics()) { let mut metrics_scoping = scoping.item(DataCategory::MetricBucket); metrics_scoping.namespace = MetricNamespaceScoping::Any; - rate_limits.merge(self.rate_limits.check_with_quotas(quotas, metrics_scoping)); + rate_limits.merge(current_limits.check_with_quotas(quotas, metrics_scoping)); } let envelope = if envelope.envelope().is_empty() { @@ -1124,8 +1118,9 @@ impl Project { .filter_map(|bucket| bucket.name.try_namespace()) .collect(); + let current_limits = self.rate_limits.current_limits(); for namespace in namespaces { - let limits = self.rate_limits().check_with_quotas( + let limits = current_limits.check_with_quotas( project_state.get_quotas(), scoping.item(DataCategory::MetricBucket), ); @@ -1387,13 +1382,12 @@ mod tests { let metric_outcomes = MetricOutcomes::new(MetricStats::test().0, outcome_aggregator.clone()); - let project = create_project(None); + let mut project = create_project(None); project.merge_buckets_into_aggregator( &aggregator, &envelope_processor, &outcome_aggregator, &metric_outcomes, - &project.state_value().unwrap(), Buckets::test(vec![create_transaction_metric()]), ); drop(aggregator); @@ -1416,7 +1410,7 @@ mod tests { let metric_outcomes = MetricOutcomes::new(MetricStats::test().0, outcome_aggregator.clone()); - let project = create_project(Some(json!({ + let mut project = create_project(Some(json!({ "quotas": [{ "id": "foo", "categories": ["transaction"], @@ -1430,7 +1424,6 @@ mod tests { &envelope_processor, &outcome_aggregator, &metric_outcomes, - &project.state_value().unwrap(), Buckets::test(vec![create_transaction_metric()]), ); drop(aggregator); @@ -1458,13 +1451,12 @@ mod tests { let metric_outcomes = MetricOutcomes::new(MetricStats::test().0, outcome_aggregator.clone()); - let project = create_project(None); + let mut project = create_project(None); project.merge_buckets_into_aggregator( &aggregator, &envelope_processor, &outcome_aggregator, &metric_outcomes, - &project.state_value().unwrap(), Buckets::test(vec![create_transaction_bucket()]), ); drop(aggregator); @@ -1488,7 +1480,7 @@ mod tests { let metric_outcomes = MetricOutcomes::new(MetricStats::test().0, outcome_aggregator.clone()); - let project = create_project(Some(json!({ + let mut project = create_project(Some(json!({ "quotas": [{ "id": "foo", "categories": ["transaction"], @@ -1502,7 +1494,6 @@ mod tests { &envelope_processor, &outcome_aggregator, &metric_outcomes, - &project.state_value().unwrap(), Buckets::test(vec![create_transaction_bucket()]), ); drop(aggregator); diff --git a/relay-server/src/utils/dynamic_sampling.rs b/relay-server/src/utils/dynamic_sampling.rs index 05d4624e02..851ab99653 100644 --- a/relay-server/src/utils/dynamic_sampling.rs +++ b/relay-server/src/utils/dynamic_sampling.rs @@ -8,7 +8,7 @@ use relay_base_schema::project::ProjectKey; use relay_event_schema::protocol::{Event, TraceContext}; use relay_sampling::config::{RuleType, SamplingConfig}; use relay_sampling::dsc::{DynamicSamplingContext, TraceUserContext}; -use relay_sampling::evaluation::{SamplingEvaluator, SamplingMatch}; +use relay_sampling::evaluation::{SamplingDecision, SamplingEvaluator, SamplingMatch}; use crate::envelope::{Envelope, ItemType}; use crate::services::outcome::Outcome; @@ -151,25 +151,18 @@ impl SamplingResult { matches!(self, &Self::Match(_)) } - /// Returns `true` if the event should be dropped. - pub fn should_drop(&self) -> bool { - !self.should_keep() - } - - /// Returns `true` if the event should be kept. - pub fn should_keep(&self) -> bool { + /// Boolean decision whether to keep or drop the item. + pub fn decision(&self) -> SamplingDecision { match self { - SamplingResult::Match(sampling_match) => sampling_match.should_keep(), - // If no rules matched on an event, we want to keep it. - SamplingResult::NoMatch => true, - SamplingResult::Pending => true, + Self::Match(sampling_match) => sampling_match.decision(), + _ => SamplingDecision::Keep, } } /// Consumes the sampling results and returns and outcome if the sampling decision is drop. pub fn into_dropped_outcome(self) -> Option { match self { - SamplingResult::Match(sampling_match) if sampling_match.should_drop() => Some( + SamplingResult::Match(sampling_match) if sampling_match.decision().is_drop() => Some( Outcome::FilteredSampling(sampling_match.into_matched_rules().into()), ), SamplingResult::Match(_) => None, @@ -208,7 +201,7 @@ pub fn is_trace_fully_sampled( let rules = root_project_config.filter_rules(RuleType::Trace); let evaluation = evaluator.match_rules(dsc.trace_id, dsc, rules); - Some(SamplingResult::from(evaluation).should_keep()) + Some(SamplingResult::from(evaluation).decision().is_keep()) } /// Returns the project key defined in the `trace` header of the envelope. @@ -373,7 +366,7 @@ mod tests { .into(); assert!(result.is_match()); - assert!(result.should_keep()); + assert!(result.decision().is_keep()); } #[test] /// Tests that an event is dropped when there is a match and we have 0% sample rate. @@ -387,7 +380,7 @@ mod tests { .into(); assert!(result.is_match()); - assert!(result.should_drop()); + assert!(result.decision().is_drop()); } #[test] @@ -410,7 +403,7 @@ mod tests { .into(); assert!(result.is_no_match()); - assert!(result.should_keep()); + assert!(result.decision().is_keep()); } #[test] @@ -424,7 +417,7 @@ mod tests { .into(); assert!(result.is_match()); - assert!(result.should_keep()); + assert!(result.decision().is_keep()); } #[test] diff --git a/relay-server/src/utils/managed_envelope.rs b/relay-server/src/utils/managed_envelope.rs index ee48ba48dc..79bd5cb230 100644 --- a/relay-server/src/utils/managed_envelope.rs +++ b/relay-server/src/utils/managed_envelope.rs @@ -10,7 +10,7 @@ use chrono::{DateTime, Utc}; use relay_quotas::{DataCategory, Scoping}; use relay_system::Addr; -use crate::envelope::{Envelope, Item, ItemType}; +use crate::envelope::{Envelope, Item}; use crate::extractors::RequestMeta; use crate::services::outcome::{DiscardReason, Outcome, TrackOutcome}; use crate::services::processor::{Processed, ProcessingGroup}; @@ -292,18 +292,15 @@ impl ManagedEnvelope { where F: FnMut(&mut Item) -> ItemAction, { - let mut outcomes = vec![]; - let use_indexed = self.use_index_category(); + let mut outcomes = Vec::new(); self.envelope.retain_items(|item| match f(item) { ItemAction::Keep => true, ItemAction::DropSilently => false, ItemAction::Drop(outcome) => { - let use_indexed = if item.ty() == &ItemType::Span { - item.metrics_extracted() - } else { - use_indexed - }; - if let Some(category) = item.outcome_category(use_indexed) { + if let Some(category) = item.outcome_category() { + if let Some(indexed) = category.index_category() { + outcomes.push((outcome.clone(), indexed, item.quantity())); + }; outcomes.push((outcome, category, item.quantity())); }; false @@ -320,16 +317,6 @@ impl ManagedEnvelope { self.envelope.drop_items_silently(); } - /// Record that event metrics have been extracted. - /// - /// This is usually done automatically as part of `EnvelopeContext::new` or `update`. However, - /// if the context needs to be updated in-flight without recomputing the entire summary, this - /// method can record that metric extraction for the event item has occurred. - pub fn set_event_metrics_extracted(&mut self) -> &mut Self { - self.context.summary.transaction_metrics_extracted = true; - self - } - /// Re-scopes this context to the given scoping. pub fn scope(&mut self, scoping: Scoping) -> &mut Self { self.context.scoping = scoping; @@ -342,6 +329,9 @@ impl ManagedEnvelope { pub fn reject_event(&mut self, outcome: Outcome) { if let Some(event_category) = self.event_category() { self.envelope.retain_items(|item| !item.creates_event()); + if let Some(indexed) = event_category.index_category() { + self.track_outcome(outcome.clone(), indexed, 1); + } self.track_outcome(outcome, event_category, 1); } } @@ -375,28 +365,9 @@ impl ManagedEnvelope { } } - /// Returns `true` if the indexed data category should be used for reporting. - /// - /// If metrics have been extracted from the event item, we use the indexed category - /// (for example, [TransactionIndexed](`DataCategory::TransactionIndexed`)) for reporting - /// rate limits and outcomes, because reporting of the main category - /// (for example, [Transaction](`DataCategory::Transaction`) for processed transactions) - /// will be handled by the metrics aggregator. - fn use_index_category(&self) -> bool { - self.context.summary.transaction_metrics_extracted - } - /// Returns the data category of the event item in the envelope. - /// - /// If metrics have been extracted from the event item, this will return the indexing category. - /// Outcomes for metrics (the base data category) will be logged by the metrics aggregator. fn event_category(&self) -> Option { - let category = self.context.summary.event_category?; - - match category.index_category() { - Some(index_category) if self.use_index_category() => Some(index_category), - _ => Some(category), - } + self.context.summary.event_category } /// Records rejection outcomes for all items stored in this context. @@ -437,6 +408,9 @@ impl ManagedEnvelope { .send(Capture::rejected(self.envelope.event_id(), &outcome)); if let Some(category) = self.event_category() { + if let Some(category) = category.index_category() { + self.track_outcome(outcome.clone(), category, 1); + } self.track_outcome(outcome.clone(), category, 1); } @@ -451,11 +425,12 @@ impl ManagedEnvelope { if self.context.summary.profile_quantity > 0 { self.track_outcome( outcome.clone(), - if self.use_index_category() { - DataCategory::ProfileIndexed - } else { - DataCategory::Profile - }, + DataCategory::Profile, + self.context.summary.profile_quantity, + ); + self.track_outcome( + outcome.clone(), + DataCategory::ProfileIndexed, self.context.summary.profile_quantity, ); } @@ -463,10 +438,12 @@ impl ManagedEnvelope { if self.context.summary.span_quantity > 0 { self.track_outcome( outcome.clone(), - match self.context.summary.span_metrics_extracted { - true => DataCategory::SpanIndexed, - false => DataCategory::Span, - }, + DataCategory::Span, + self.context.summary.span_quantity, + ); + self.track_outcome( + outcome.clone(), + DataCategory::SpanIndexed, self.context.summary.span_quantity, ); } @@ -606,7 +583,6 @@ mod tests { test_store, ProcessingGroup::Ungrouped, ); - env.context.summary.span_metrics_extracted = true; env.context.summary.span_quantity = 123; env.context.summary.secondary_span_quantity = 456; @@ -614,6 +590,11 @@ mod tests { rx.close(); + let outcome = rx.blocking_recv().unwrap(); + assert_eq!(outcome.category, DataCategory::Span); + assert_eq!(outcome.quantity, 123); + assert_eq!(outcome.outcome, Outcome::Abuse); + let outcome = rx.blocking_recv().unwrap(); assert_eq!(outcome.category, DataCategory::SpanIndexed); assert_eq!(outcome.quantity, 123); diff --git a/relay-server/src/utils/rate_limits.rs b/relay-server/src/utils/rate_limits.rs index 0c537e380c..90c20b70c5 100644 --- a/relay-server/src/utils/rate_limits.rs +++ b/relay-server/src/utils/rate_limits.rs @@ -1,6 +1,5 @@ use std::fmt::{self, Write}; -use relay_dynamic_config::{ErrorBoundary, ProjectConfig}; use relay_quotas::{ DataCategories, DataCategory, ItemScoping, QuotaScope, RateLimit, RateLimitScope, RateLimits, ReasonCode, Scoping, @@ -181,12 +180,6 @@ pub struct EnvelopeSummary { /// Indicates that the envelope contains regular attachments that do not create event payloads. pub has_plain_attachments: bool, - /// Whether the envelope contains a transaction which already had the metrics extracted. - pub transaction_metrics_extracted: bool, - - /// Whether the envelope contains spans which already had metrics extracted. - pub span_metrics_extracted: bool, - /// The payload size of this envelope. pub payload_size: usize, @@ -212,15 +205,6 @@ impl EnvelopeSummary { summary.has_plain_attachments = true; } - if *item.ty() == ItemType::Transaction && item.metrics_extracted() { - summary.transaction_metrics_extracted = true; - } - - if item.is_span() && item.metrics_extracted() { - // This assumes that if one span had metrics extracted, all of them have. - summary.span_metrics_extracted = true; - } - // If the item has been rate limited before, the quota has been consumed and outcomes // emitted. We can skip it here. if item.rate_limited() { @@ -300,6 +284,15 @@ impl CategoryLimit { } } + /// Recreates the category limit for a new category with the same reason. + fn clone_for(&self, category: DataCategory, quantity: usize) -> CategoryLimit { + Self { + category, + quantity, + reason_code: self.reason_code.clone(), + } + } + /// Returns `true` if this is an active limit. /// /// This indicates that the category is limited and a certain quantity is removed from the @@ -324,22 +317,24 @@ impl Default for CategoryLimit { pub struct Enforcement { /// The event item rate limit. event: CategoryLimit, + /// The rate limit for the indexed category of the event. + event_indexed: CategoryLimit, /// The combined attachment item rate limit. attachments: CategoryLimit, /// The combined session item rate limit. sessions: CategoryLimit, /// The combined profile item rate limit. profiles: CategoryLimit, + /// The rate limit for the indexed profiles category. + profiles_indexed: CategoryLimit, /// The combined replay item rate limit. replays: CategoryLimit, /// The combined check-in item rate limit. check_ins: CategoryLimit, - /// The combined rate limit for metrics extracted from transactions. - event_metrics: CategoryLimit, /// The combined spans rate limit. spans: CategoryLimit, - /// The combined rate limit for metrics extracted from spans. - span_metrics: CategoryLimit, + /// The rate limit for the indexed span category. + spans_indexed: CategoryLimit, /// The combined rate limit for user-reports. user_reports_v2: CategoryLimit, /// The combined profile chunk item rate limit. @@ -348,9 +343,8 @@ pub struct Enforcement { impl Enforcement { /// Returns `true` if the event should be rate limited. - #[cfg(feature = "processing")] pub fn event_active(&self) -> bool { - self.event.is_active() + self.event.is_active() || self.event_indexed.is_active() } /// Helper for `track_outcomes`. @@ -366,27 +360,29 @@ impl Enforcement { let Self { event, + event_indexed, attachments, sessions: _, // Do not report outcomes for sessions. profiles, + profiles_indexed, replays, check_ins, - event_metrics, spans, - span_metrics, + spans_indexed, user_reports_v2, profile_chunks, } = self; let limits = [ event, + event_indexed, attachments, profiles, + profiles_indexed, replays, check_ins, - event_metrics, spans, - span_metrics, + spans_indexed, user_reports_v2, profile_chunks, ]; @@ -433,22 +429,20 @@ impl Enforcement { /// - If the event is removed, all items depending on the event are removed (e.g. attachments). /// - Attachments are not removed if they create events (e.g. minidumps). /// - Sessions are handled separate to all of the above. -pub struct EnvelopeLimiter<'a, F> { +pub struct EnvelopeLimiter { check: F, - event_category: Option<(DataCategory, bool)>, - config: Option<&'a ProjectConfig>, + event_category: Option, } -impl<'a, E, F> EnvelopeLimiter<'a, F> +impl EnvelopeLimiter where F: FnMut(ItemScoping<'_>, usize) -> Result, { /// Create a new `EnvelopeLimiter` with the given `check` function. - pub fn new(config: Option<&'a ProjectConfig>, check: F) -> Self { + pub fn new(check: F) -> Self { Self { check, event_category: None, - config, } } @@ -458,8 +452,8 @@ where /// matching item in the envelope. Other items are handled according to the rules as if the /// event item were present. #[cfg(feature = "processing")] - pub fn assume_event(&mut self, category: DataCategory, metrics_extracted: bool) { - self.event_category = Some((category, metrics_extracted)); + pub fn assume_event(&mut self, category: DataCategory) { + self.event_category = Some(category); } /// Process rate limits for the envelope, removing offending items and returning applied limits. @@ -511,36 +505,13 @@ where scoping: &Scoping, ) -> Result<(Enforcement, RateLimits), E> { let mut summary = EnvelopeSummary::compute(envelope); - if let Some((event_category, metrics_extracted)) = self.event_category { - summary.event_category = Some(event_category); - summary.transaction_metrics_extracted = metrics_extracted; - } + summary.event_category = self.event_category.or(summary.event_category); let (enforcement, rate_limits) = self.execute(&summary, scoping)?; envelope.retain_items(|item| self.retain_item(item, &enforcement)); Ok((enforcement, rate_limits)) } - /// Returns a dedicated data category for indexing if metrics are to be extracted. - /// - /// This is similar to [`DataCategory::index_category`], with an additional check if metrics - /// extraction is enabled for this category. At this point, this is only true for transactions: - /// - /// - `DataCategory::Transaction` counts the transaction metrics. If quotas with this category - /// are exhausted, both the event and metrics are dropped. - /// - `DataCategory::TransactionIndexed` counts ingested and stored events. If quotas with this - /// category are exhausted, just the event payload is dropped, but metrics are kept. - fn transaction_index_category(&self, category: DataCategory) -> Option { - if category != DataCategory::Transaction { - return None; - } - - match self.config?.transaction_metrics { - Some(ErrorBoundary::Ok(ref c)) if c.is_enabled() => category.index_category(), - _ => None, - } - } - fn execute( &mut self, summary: &EnvelopeSummary, @@ -550,73 +521,29 @@ where let mut enforcement = Enforcement::default(); if let Some(category) = summary.event_category { - let mut longest; - let mut event_limits; - - if let Some(index_category) = self.transaction_index_category(category) { - // Check for rate limits on the main category (e.g. transaction) but do not consume - // quota. Quota will be consumed by metrics in the metrics aggregator instead. - event_limits = (self.check)(scoping.item(category), 0)?; - longest = event_limits.longest(); - - // Only enforce and record an outcome if metrics haven't been extracted yet. - // Otherwise, the outcome is logged at a different place. - if !summary.transaction_metrics_extracted { - enforcement.event_metrics = CategoryLimit::new(category, 1, longest); - if summary.span_quantity > 0 { - enforcement.span_metrics = - CategoryLimit::new(DataCategory::Span, summary.span_quantity, longest); - } - } - - // If the main category is rate limited, we drop both the event and metrics. If - // there's no rate limit, check for specific indexing quota and drop just the event. - if summary.transaction_metrics_extracted && longest.is_none() { - event_limits = (self.check)(scoping.item(index_category), 1)?; - longest = event_limits.longest(); - } - - enforcement.event = CategoryLimit::new(index_category, 1, longest); - if summary.span_quantity > 0 { - enforcement.spans = CategoryLimit::new( - DataCategory::SpanIndexed, - summary.span_quantity, - longest, - ); + // Check the broad category for limits. + let mut event_limits = (self.check)(scoping.item(category), 1)?; + enforcement.event = CategoryLimit::new(category, 1, event_limits.longest()); + + if let Some(index_category) = category.index_category() { + // Check the specific/indexed category for limits only if the specific one has not already + // an enforced limit. + if event_limits.is_empty() { + event_limits.merge((self.check)(scoping.item(index_category), 1)?); } - } else { - event_limits = (self.check)(scoping.item(category), 1)?; - longest = event_limits.longest(); - enforcement.event = CategoryLimit::new(category, 1, longest); - if summary.span_quantity > 0 { - enforcement.spans = - CategoryLimit::new(DataCategory::Span, summary.span_quantity, longest); - } - } - - // Record the same reason for attachments, if there are any. - enforcement.attachments = CategoryLimit::new( - DataCategory::Attachment, - summary.attachment_quantity, - longest, - ); - // It makes no sense to store profiles without transactions, so if the event - // is rate limited, rate limit profiles as well. - enforcement.profiles = CategoryLimit::new( - if summary.transaction_metrics_extracted { - DataCategory::ProfileIndexed - } else { - DataCategory::Profile - }, - summary.profile_quantity, - longest, - ); + enforcement.event_indexed = + CategoryLimit::new(index_category, 1, event_limits.longest()); + }; rate_limits.merge(event_limits); } - if !enforcement.event.is_active() && summary.attachment_quantity > 0 { + if enforcement.event_active() { + enforcement.attachments = enforcement + .event + .clone_for(DataCategory::Attachment, summary.attachment_quantity); + } else if summary.attachment_quantity > 0 { let item_scoping = scoping.item(DataCategory::Attachment); let attachment_limits = (self.check)(item_scoping, summary.attachment_quantity)?; enforcement.attachments = CategoryLimit::new( @@ -644,18 +571,38 @@ where rate_limits.merge(session_limits); } - if !enforcement.event.is_active() && summary.profile_quantity > 0 { - let item_scoping = scoping.item(DataCategory::Profile); - let profile_limits = (self.check)(item_scoping, summary.profile_quantity)?; + if enforcement.event_active() { + enforcement.profiles = enforcement + .event + .clone_for(DataCategory::Profile, summary.profile_quantity); + + enforcement.profiles_indexed = enforcement + .event_indexed + .clone_for(DataCategory::ProfileIndexed, summary.profile_quantity) + } else if summary.profile_quantity > 0 { + let mut profile_limits = (self.check)( + scoping.item(DataCategory::Profile), + summary.profile_quantity, + )?; enforcement.profiles = CategoryLimit::new( - if summary.transaction_metrics_extracted { - DataCategory::ProfileIndexed - } else { - DataCategory::Profile - }, + DataCategory::Profile, summary.profile_quantity, profile_limits.longest(), ); + + if profile_limits.is_empty() { + profile_limits.merge((self.check)( + scoping.item(DataCategory::ProfileIndexed), + summary.profile_quantity, + )?); + } + + enforcement.profiles_indexed = CategoryLimit::new( + DataCategory::ProfileIndexed, + summary.profile_quantity, + profile_limits.longest(), + ); + rate_limits.merge(profile_limits); } @@ -681,35 +628,35 @@ where rate_limits.merge(checkin_limits); } - // We want to process spans rate limits only if they were not already applied because a - // rate limited transaction has also rate limited its child spans. - if !enforcement.event.is_active() && summary.span_quantity > 0 { - // Check for rate limits on the main category but do not consume - // quota. Quota will be consumed by the metrics rate limiter instead. - let mut span_limits = (self.check)(scoping.item(DataCategory::Span), 0)?; - let mut longest = span_limits.longest(); - - // Only enforce and record an outcome if metrics haven't been extracted yet. - // Otherwise, the outcome is logged by the metrics rate limiter. - if !summary.span_metrics_extracted { - enforcement.span_metrics = - CategoryLimit::new(DataCategory::Span, summary.span_quantity, longest); - } + if enforcement.event_active() { + enforcement.spans = enforcement + .event + .clone_for(DataCategory::Span, summary.span_quantity); + + enforcement.spans_indexed = enforcement + .event_indexed + .clone_for(DataCategory::SpanIndexed, summary.span_quantity) + } else if summary.span_quantity > 0 { + let mut span_limits = + (self.check)(scoping.item(DataCategory::Span), summary.span_quantity)?; + enforcement.spans = CategoryLimit::new( + DataCategory::Span, + summary.span_quantity, + span_limits.longest(), + ); - // If the main category is rate limited, we drop both the spans and metrics. If - // there's no rate limit, check for specific indexing quota and drop just the event. - if summary.span_metrics_extracted && longest.is_none() { - // Metrics were extracted and aren't rate limited. Check if there - // is a separate rate limit for indexed spans: - span_limits = (self.check)( + if span_limits.is_empty() { + span_limits.merge((self.check)( scoping.item(DataCategory::SpanIndexed), summary.span_quantity, - )?; - longest = span_limits.longest(); + )?); } - enforcement.spans = - CategoryLimit::new(DataCategory::SpanIndexed, summary.span_quantity, longest); + enforcement.spans_indexed = CategoryLimit::new( + DataCategory::SpanIndexed, + summary.span_quantity, + span_limits.longest(), + ); rate_limits.merge(span_limits); } @@ -734,47 +681,50 @@ where return false; } - // Remove attachments, except those required for processing - if enforcement.attachments.is_active() && item.ty() == &ItemType::Attachment { - if item.creates_event() { - item.set_rate_limited(true); - return true; + // When checking limits for categories that have an indexed variant, + // we only have to check the more specific, the indexed, variant + // to determine whether an item is limited. + match item.ty() { + ItemType::Attachment => { + if !enforcement.attachments.is_active() { + return true; + } + if item.creates_event() { + item.set_rate_limited(true); + true + } else { + false + } } - - return false; + ItemType::Session => !enforcement.sessions.is_active(), + ItemType::Profile => !enforcement.profiles_indexed.is_active(), + ItemType::ReplayEvent => !enforcement.replays.is_active(), + ItemType::ReplayVideo => !enforcement.replays.is_active(), + ItemType::ReplayRecording => !enforcement.replays.is_active(), + ItemType::CheckIn => !enforcement.check_ins.is_active(), + ItemType::Span => !enforcement.spans_indexed.is_active(), + ItemType::OtelSpan => !enforcement.spans_indexed.is_active(), + ItemType::Event + | ItemType::Transaction + | ItemType::Security + | ItemType::FormData + | ItemType::RawSecurity + | ItemType::Nel + | ItemType::UnrealReport + | ItemType::UserReport + | ItemType::Sessions + | ItemType::Statsd + | ItemType::MetricBuckets + | ItemType::MetricMeta + | ItemType::ClientReport + | ItemType::UserReportV2 + | ItemType::ProfileChunk + | ItemType::Unknown(_) => true, } - - // Remove sessions independently of events - if enforcement.sessions.is_active() && item.ty() == &ItemType::Session { - return false; - } - - // Remove profiles even if the transaction is not rate limited - if enforcement.profiles.is_active() && item.ty() == &ItemType::Profile { - return false; - } - - // Remove replays independently of events. - if enforcement.replays.is_active() - && matches!(item.ty(), ItemType::ReplayEvent | ItemType::ReplayRecording) - { - return false; - } - - if enforcement.check_ins.is_active() && item.ty() == &ItemType::CheckIn { - return false; - } - - if (enforcement.spans.is_active() || enforcement.span_metrics.is_active()) && item.is_span() - { - return false; - } - - true } } -impl fmt::Debug for EnvelopeLimiter<'_, F> { +impl fmt::Debug for EnvelopeLimiter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("EnvelopeLimiter") .field("event_category", &self.event_category) @@ -784,10 +734,9 @@ impl fmt::Debug for EnvelopeLimiter<'_, F> { #[cfg(test)] mod tests { - use std::collections::BTreeMap; + use std::collections::{BTreeMap, BTreeSet}; use relay_base_schema::project::{ProjectId, ProjectKey}; - use relay_dynamic_config::TransactionMetricsConfig; use relay_metrics::MetricNamespace; use relay_quotas::RetryAfter; use smallvec::smallvec; @@ -1031,6 +980,7 @@ mod tests { struct MockLimiter { denied: Vec, called: BTreeMap, + checked: BTreeSet, } impl MockLimiter { @@ -1055,138 +1005,155 @@ mod tests { Ok(limits) } - pub fn assert_call(&self, category: DataCategory, quantity: Option) { - assert_eq!(self.called.get(&category), quantity.as_ref()); + #[track_caller] + pub fn assert_call(&mut self, category: DataCategory, expected: usize) { + self.checked.insert(category); + + let quantity = self.called.get(&category).copied(); + assert_eq!( + quantity, + Some(expected), + "Expected quantity `{expected}` for data category `{category}`, got {quantity:?}." + ); + } + } + + impl Drop for MockLimiter { + fn drop(&mut self) { + if std::thread::panicking() { + return; + } + + for checked in &self.checked { + self.called.remove(checked); + } + + if self.called.is_empty() { + return; + } + + let not_asserted = self + .called + .iter() + .map(|(k, v)| format!("- {k}: {v}")) + .collect::>() + .join("\n"); + + panic!("Following calls to the limiter were not asserted:\n{not_asserted}"); } } #[test] fn test_enforce_pass_empty() { let mut envelope = envelope![]; - let config = ProjectConfig::default(); let mut mock = MockLimiter::default(); - let (_, limits) = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)) + let (_, limits) = EnvelopeLimiter::new(|s, q| mock.check(s, q)) .enforce(&mut envelope, &scoping()) .unwrap(); assert!(!limits.is_limited()); assert!(envelope.is_empty()); - mock.assert_call(DataCategory::Error, None); - mock.assert_call(DataCategory::Attachment, None); - mock.assert_call(DataCategory::Session, None); } #[test] fn test_enforce_limit_error_event() { let mut envelope = envelope![Event]; - let config = ProjectConfig::default(); let mut mock = MockLimiter::default().deny(DataCategory::Error); - let (_, limits) = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)) + let (_, limits) = EnvelopeLimiter::new(|s, q| mock.check(s, q)) .enforce(&mut envelope, &scoping()) .unwrap(); assert!(limits.is_limited()); assert!(envelope.is_empty()); - mock.assert_call(DataCategory::Error, Some(1)); - mock.assert_call(DataCategory::Attachment, None); - mock.assert_call(DataCategory::Session, None); + mock.assert_call(DataCategory::Error, 1); } #[test] fn test_enforce_limit_error_with_attachments() { let mut envelope = envelope![Event, Attachment]; - let config = ProjectConfig::default(); let mut mock = MockLimiter::default().deny(DataCategory::Error); - let (_, limits) = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)) + let (_, limits) = EnvelopeLimiter::new(|s, q| mock.check(s, q)) .enforce(&mut envelope, &scoping()) .unwrap(); assert!(limits.is_limited()); assert!(envelope.is_empty()); - mock.assert_call(DataCategory::Error, Some(1)); - // Error is limited, so no need to call the attachment quota - mock.assert_call(DataCategory::Attachment, None); - mock.assert_call(DataCategory::Session, None); + mock.assert_call(DataCategory::Error, 1); } #[test] fn test_enforce_limit_minidump() { let mut envelope = envelope![Attachment::Minidump]; - let config = ProjectConfig::default(); let mut mock = MockLimiter::default().deny(DataCategory::Error); - let (_, limits) = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)) + let (_, limits) = EnvelopeLimiter::new(|s, q| mock.check(s, q)) .enforce(&mut envelope, &scoping()) .unwrap(); assert!(limits.is_limited()); assert!(envelope.is_empty()); - mock.assert_call(DataCategory::Error, Some(1)); - // Error is limited, so no need to call the attachment quota - mock.assert_call(DataCategory::Attachment, None); - mock.assert_call(DataCategory::Session, None); + mock.assert_call(DataCategory::Error, 1); } #[test] fn test_enforce_limit_attachments() { let mut envelope = envelope![Attachment::Minidump, Attachment]; - let config = ProjectConfig::default(); let mut mock = MockLimiter::default().deny(DataCategory::Attachment); - let (_, limits) = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)) + let (_, limits) = EnvelopeLimiter::new(|s, q| mock.check(s, q)) .enforce(&mut envelope, &scoping()) .unwrap(); // Attachments would be limited, but crash reports create events and are thus allowed. assert!(limits.is_limited()); assert_eq!(envelope.len(), 1); - mock.assert_call(DataCategory::Error, Some(1)); - mock.assert_call(DataCategory::Attachment, Some(20)); - mock.assert_call(DataCategory::Session, None); + mock.assert_call(DataCategory::Error, 1); + mock.assert_call(DataCategory::Attachment, 20); } /// Limit stand-alone profiles. #[test] fn test_enforce_limit_profiles() { let mut envelope = envelope![Profile, Profile]; - let config = ProjectConfig::default(); let mut mock = MockLimiter::default().deny(DataCategory::Profile); - let (enforcement, limits) = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)) + let (enforcement, limits) = EnvelopeLimiter::new(|s, q| mock.check(s, q)) .enforce(&mut envelope, &scoping()) .unwrap(); assert!(limits.is_limited()); assert_eq!(envelope.len(), 0); - assert_eq!(mock.called, BTreeMap::from([(DataCategory::Profile, 2)])); + mock.assert_call(DataCategory::Profile, 2); assert_eq!( get_outcomes(&envelope, enforcement), - vec![(DataCategory::Profile, 2),] + vec![ + (DataCategory::Profile, 2), + (DataCategory::ProfileIndexed, 2) + ] ); } /// Limit replays. #[test] fn test_enforce_limit_replays() { - let mut envelope = envelope![ReplayEvent, ReplayRecording]; - let config = ProjectConfig::default(); + let mut envelope = envelope![ReplayEvent, ReplayRecording, ReplayVideo]; let mut mock = MockLimiter::default().deny(DataCategory::Replay); - let (enforcement, limits) = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)) + let (enforcement, limits) = EnvelopeLimiter::new(|s, q| mock.check(s, q)) .enforce(&mut envelope, &scoping()) .unwrap(); assert!(limits.is_limited()); assert_eq!(envelope.len(), 0); - assert_eq!(mock.called, BTreeMap::from([(DataCategory::Replay, 2)])); + mock.assert_call(DataCategory::Replay, 3); assert_eq!( get_outcomes(&envelope, enforcement), - vec![(DataCategory::Replay, 2),] + vec![(DataCategory::Replay, 3),] ); } @@ -1194,43 +1161,36 @@ mod tests { #[test] fn test_enforce_limit_monitor_checkins() { let mut envelope = envelope![CheckIn]; - let config = ProjectConfig::default(); let mut mock = MockLimiter::default().deny(DataCategory::Monitor); - let (enforcement, limits) = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)) + let (enforcement, limits) = EnvelopeLimiter::new(|s, q| mock.check(s, q)) .enforce(&mut envelope, &scoping()) .unwrap(); assert!(limits.is_limited()); assert_eq!(envelope.len(), 0); - assert_eq!(mock.called, BTreeMap::from([(DataCategory::Monitor, 1)])); + mock.assert_call(DataCategory::Monitor, 1); - let outcomes = enforcement - .get_outcomes(&envelope, &scoping()) - .map(|outcome| (outcome.outcome, outcome.category, outcome.quantity)) - .collect::>(); assert_eq!( - outcomes, - vec![(Outcome::RateLimited(None), DataCategory::Monitor, 1)] + get_outcomes(&envelope, enforcement), + vec![(DataCategory::Monitor, 1)] ) } #[test] fn test_enforce_pass_minidump() { let mut envelope = envelope![Attachment::Minidump]; - let config = ProjectConfig::default(); let mut mock = MockLimiter::default().deny(DataCategory::Attachment); - let (_, limits) = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)) + let (_, limits) = EnvelopeLimiter::new(|s, q| mock.check(s, q)) .enforce(&mut envelope, &scoping()) .unwrap(); // If only crash report attachments are present, we don't emit a rate limit. assert!(!limits.is_limited()); assert_eq!(envelope.len(), 1); - mock.assert_call(DataCategory::Error, Some(1)); - mock.assert_call(DataCategory::Attachment, Some(10)); - mock.assert_call(DataCategory::Session, None); + mock.assert_call(DataCategory::Error, 1); + mock.assert_call(DataCategory::Attachment, 10); } #[test] @@ -1241,179 +1201,126 @@ mod tests { item.set_payload(ContentType::OctetStream, "0123456789"); item.set_rate_limited(true); envelope.add_item(item); - let config = ProjectConfig::default(); let mut mock = MockLimiter::default().deny(DataCategory::Error); - let (_, limits) = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)) + let (_, limits) = EnvelopeLimiter::new(|s, q| mock.check(s, q)) .enforce(&mut envelope, &scoping()) .unwrap(); assert!(!limits.is_limited()); // No new rate limits applied. assert_eq!(envelope.len(), 1); // The item was retained - mock.assert_call(DataCategory::Error, None); - mock.assert_call(DataCategory::Attachment, None); // Limiter not invoked - mock.assert_call(DataCategory::Session, None); } #[test] fn test_enforce_pass_sessions() { let mut envelope = envelope![Session, Session, Session]; - let config = ProjectConfig::default(); let mut mock = MockLimiter::default().deny(DataCategory::Error); - let (_, limits) = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)) + let (_, limits) = EnvelopeLimiter::new(|s, q| mock.check(s, q)) .enforce(&mut envelope, &scoping()) .unwrap(); // If only crash report attachments are present, we don't emit a rate limit. assert!(!limits.is_limited()); assert_eq!(envelope.len(), 3); - mock.assert_call(DataCategory::Error, None); - mock.assert_call(DataCategory::Attachment, None); - mock.assert_call(DataCategory::Session, Some(3)); + mock.assert_call(DataCategory::Session, 3); } #[test] fn test_enforce_limit_sessions() { let mut envelope = envelope![Session, Session, Event]; - let config = ProjectConfig::default(); let mut mock = MockLimiter::default().deny(DataCategory::Session); - let (_, limits) = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)) + let (_, limits) = EnvelopeLimiter::new(|s, q| mock.check(s, q)) .enforce(&mut envelope, &scoping()) .unwrap(); // If only crash report attachments are present, we don't emit a rate limit. assert!(limits.is_limited()); assert_eq!(envelope.len(), 1); - mock.assert_call(DataCategory::Error, Some(1)); - mock.assert_call(DataCategory::Attachment, None); - mock.assert_call(DataCategory::Session, Some(2)); + mock.assert_call(DataCategory::Error, 1); + mock.assert_call(DataCategory::Session, 2); } #[test] #[cfg(feature = "processing")] fn test_enforce_limit_assumed_event() { let mut envelope = envelope![]; - let config = ProjectConfig::default(); let mut mock = MockLimiter::default().deny(DataCategory::Transaction); - let mut limiter = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)); - limiter.assume_event(DataCategory::Transaction, false); + let mut limiter = EnvelopeLimiter::new(|s, q| mock.check(s, q)); + limiter.assume_event(DataCategory::Transaction); let (_, limits) = limiter.enforce(&mut envelope, &scoping()).unwrap(); assert!(limits.is_limited()); assert!(envelope.is_empty()); // obviously - mock.assert_call(DataCategory::Transaction, Some(1)); - mock.assert_call(DataCategory::Attachment, None); - mock.assert_call(DataCategory::Session, None); + mock.assert_call(DataCategory::Transaction, 1); } #[test] #[cfg(feature = "processing")] fn test_enforce_limit_assumed_attachments() { let mut envelope = envelope![Attachment, Attachment]; - let config = ProjectConfig::default(); let mut mock = MockLimiter::default().deny(DataCategory::Error); - let mut limiter = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)); - limiter.assume_event(DataCategory::Error, false); + let mut limiter = EnvelopeLimiter::new(|s, q| mock.check(s, q)); + limiter.assume_event(DataCategory::Error); let (_, limits) = limiter.enforce(&mut envelope, &scoping()).unwrap(); assert!(limits.is_limited()); assert!(envelope.is_empty()); - mock.assert_call(DataCategory::Error, Some(1)); - mock.assert_call(DataCategory::Attachment, None); - mock.assert_call(DataCategory::Session, None); - } - - fn config_with_tx_metrics() -> ProjectConfig { - ProjectConfig { - transaction_metrics: Some(ErrorBoundary::Ok(TransactionMetricsConfig::new())), - ..ProjectConfig::default() - } + mock.assert_call(DataCategory::Error, 1); } #[test] - fn test_enforce_transaction_no_metrics_extracted() { + fn test_enforce_transaction() { let mut envelope = envelope![Transaction]; - let config = config_with_tx_metrics(); let mut mock = MockLimiter::default().deny(DataCategory::Transaction); - let limiter = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)); + let limiter = EnvelopeLimiter::new(|s, q| mock.check(s, q)); let (enforcement, limits) = limiter.enforce(&mut envelope, &scoping()).unwrap(); assert!(limits.is_limited()); - assert!(enforcement.event_metrics.is_active()); + assert!(enforcement.event_indexed.is_active()); assert!(enforcement.event.is_active()); - mock.assert_call(DataCategory::Transaction, Some(0)); - } + mock.assert_call(DataCategory::Transaction, 1); - #[test] - fn test_enforce_event_metrics_extracted() { - let mut envelope = envelope![Transaction]; - set_extracted(&mut envelope, ItemType::Transaction); - let config = config_with_tx_metrics(); - - let mut mock = MockLimiter::default().deny(DataCategory::Transaction); - let limiter = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)); - let (enforcement, limits) = limiter.enforce(&mut envelope, &scoping()).unwrap(); - - assert!(limits.is_limited()); - assert!(!enforcement.event_metrics.is_active()); - assert!(enforcement.event.is_active()); + assert_eq!( + get_outcomes(&envelope, enforcement), + vec![ + (DataCategory::Transaction, 1), + (DataCategory::TransactionIndexed, 1), + ] + ); } #[test] fn test_enforce_transaction_no_indexing_quota() { let mut envelope = envelope![Transaction]; - let config = config_with_tx_metrics(); - - let mut mock = MockLimiter::default().deny(DataCategory::TransactionIndexed); - let limiter = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)); - let (enforcement, limits) = limiter.enforce(&mut envelope, &scoping()).unwrap(); - - // NOTE: Since metrics have not been extracted on this item, we do not check the indexing - // quota. Basic processing quota is not denied, so the item must pass rate limiting. The - // indexing quota will be checked again after metrics extraction. - - assert!(!limits.is_limited()); - assert!(!enforcement.event_metrics.is_active()); - assert!(!enforcement.event.is_active()); - mock.assert_call(DataCategory::Transaction, Some(0)); - } - - #[test] - fn test_enforce_event_metrics_extracted_no_indexing_quota() { - let mut envelope = envelope![Transaction]; - set_extracted(&mut envelope, ItemType::Transaction); - let config = config_with_tx_metrics(); let mut mock = MockLimiter::default().deny(DataCategory::TransactionIndexed); - let limiter = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)); + let limiter = EnvelopeLimiter::new(|s, q| mock.check(s, q)); let (enforcement, limits) = limiter.enforce(&mut envelope, &scoping()).unwrap(); assert!(limits.is_limited()); - assert!(!enforcement.event_metrics.is_active()); - assert!(enforcement.event.is_active()); - mock.assert_call(DataCategory::Transaction, Some(0)); - mock.assert_call(DataCategory::TransactionIndexed, Some(1)); + assert!(enforcement.event_indexed.is_active()); + assert!(!enforcement.event.is_active()); + mock.assert_call(DataCategory::Transaction, 1); + mock.assert_call(DataCategory::TransactionIndexed, 1); } #[test] fn test_enforce_transaction_attachment_enforced() { let mut envelope = envelope![Transaction, Attachment]; - let config = config_with_tx_metrics(); let mut mock = MockLimiter::default().deny(DataCategory::Transaction); - let limiter = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)); + let limiter = EnvelopeLimiter::new(|s, q| mock.check(s, q)); let (enforcement, _limits) = limiter.enforce(&mut envelope, &scoping()).unwrap(); assert!(enforcement.event.is_active()); assert!(enforcement.attachments.is_active()); - mock.assert_call(DataCategory::Transaction, Some(0)); - mock.assert_call(DataCategory::Attachment, None); + mock.assert_call(DataCategory::Transaction, 1); } fn get_outcomes(envelope: &Envelope, enforcement: Enforcement) -> Vec<(DataCategory, u32)> { @@ -1426,122 +1333,105 @@ mod tests { #[test] fn test_enforce_transaction_profile_enforced() { let mut envelope = envelope![Transaction, Profile]; - let config = config_with_tx_metrics(); let mut mock = MockLimiter::default().deny(DataCategory::Transaction); - let limiter = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)); + let limiter = EnvelopeLimiter::new(|s, q| mock.check(s, q)); let (enforcement, _limits) = limiter.enforce(&mut envelope, &scoping()).unwrap(); assert!(enforcement.event.is_active()); assert!(enforcement.profiles.is_active()); - mock.assert_call(DataCategory::Transaction, Some(0)); - mock.assert_call(DataCategory::Profile, None); + mock.assert_call(DataCategory::Transaction, 1); assert_eq!( get_outcomes(&envelope, enforcement), vec![ + (DataCategory::Transaction, 1), (DataCategory::TransactionIndexed, 1), (DataCategory::Profile, 1), - (DataCategory::Transaction, 1) + (DataCategory::ProfileIndexed, 1), ] ); } #[test] - fn test_enforce_transaction_attachment_enforced_metrics_extracted_indexing_quota() { + fn test_enforce_transaction_attachment_enforced_indexing_quota() { let mut envelope = envelope![Transaction, Attachment]; set_extracted(&mut envelope, ItemType::Transaction); - let config = config_with_tx_metrics(); let mut mock = MockLimiter::default().deny(DataCategory::TransactionIndexed); - let limiter = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)); + let limiter = EnvelopeLimiter::new(|s, q| mock.check(s, q)); let (enforcement, _limits) = limiter.enforce(&mut envelope, &scoping()).unwrap(); - assert!(enforcement.event.is_active()); + assert!(!enforcement.event.is_active()); + assert!(enforcement.event_indexed.is_active()); assert!(enforcement.attachments.is_active()); - mock.assert_call(DataCategory::Transaction, Some(0)); - mock.assert_call(DataCategory::TransactionIndexed, Some(1)); - mock.assert_call(DataCategory::Attachment, None); - } - - #[test] - fn test_enforce_span_no_metrics_extracted() { - let mut envelope = envelope![Span, Span]; - let config = config_with_tx_metrics(); - - let mut mock = MockLimiter::default().deny(DataCategory::Span); - let limiter = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)); - let (enforcement, limits) = limiter.enforce(&mut envelope, &scoping()).unwrap(); - - assert!(limits.is_limited()); - assert!(enforcement.span_metrics.is_active()); - assert!(enforcement.spans.is_active()); - mock.assert_call(DataCategory::Span, Some(0)); + mock.assert_call(DataCategory::Transaction, 1); + mock.assert_call(DataCategory::TransactionIndexed, 1); assert_eq!( get_outcomes(&envelope, enforcement), - vec![(DataCategory::SpanIndexed, 2), (DataCategory::Span, 2),] + vec![ + (DataCategory::TransactionIndexed, 1), + (DataCategory::Attachment, 10) + ] ); } #[test] - fn test_enforce_span_metrics_extracted() { - let mut envelope = envelope![Span]; - set_extracted(&mut envelope, ItemType::Span); - let config = config_with_tx_metrics(); + fn test_enforce_span() { + let mut envelope = envelope![Span, OtelSpan]; let mut mock = MockLimiter::default().deny(DataCategory::Span); - let limiter = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)); + let limiter = EnvelopeLimiter::new(|s, q| mock.check(s, q)); let (enforcement, limits) = limiter.enforce(&mut envelope, &scoping()).unwrap(); assert!(limits.is_limited()); - assert!(!enforcement.span_metrics.is_active()); + assert!(enforcement.spans_indexed.is_active()); assert!(enforcement.spans.is_active()); + mock.assert_call(DataCategory::Span, 2); assert_eq!( get_outcomes(&envelope, enforcement), - vec![(DataCategory::SpanIndexed, 1)] + vec![(DataCategory::Span, 2), (DataCategory::SpanIndexed, 2)] ); } #[test] fn test_enforce_span_no_indexing_quota() { - let mut envelope = envelope![OtelSpan]; - let config = config_with_tx_metrics(); + let mut envelope = envelope![OtelSpan, Span]; let mut mock = MockLimiter::default().deny(DataCategory::SpanIndexed); - let limiter = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)); + let limiter = EnvelopeLimiter::new(|s, q| mock.check(s, q)); let (enforcement, limits) = limiter.enforce(&mut envelope, &scoping()).unwrap(); - // NOTE: Since metrics have not been extracted on this item, we do not check the indexing - // quota. Basic processing quota is not denied, so the item must pass rate limiting. The - // indexing quota will be checked again after metrics extraction. - - assert!(!limits.is_limited()); - assert!(!enforcement.span_metrics.is_active()); + assert!(limits.is_limited()); + assert!(enforcement.spans_indexed.is_active()); assert!(!enforcement.spans.is_active()); - mock.assert_call(DataCategory::Span, Some(0)); + mock.assert_call(DataCategory::Span, 2); + mock.assert_call(DataCategory::SpanIndexed, 2); - assert_eq!(get_outcomes(&envelope, enforcement), vec![]); + assert_eq!( + get_outcomes(&envelope, enforcement), + vec![(DataCategory::SpanIndexed, 2)] + ); } #[test] fn test_enforce_span_metrics_extracted_no_indexing_quota() { let mut envelope = envelope![Span, OtelSpan]; set_extracted(&mut envelope, ItemType::Span); - let config = config_with_tx_metrics(); let mut mock = MockLimiter::default().deny(DataCategory::SpanIndexed); - let limiter = EnvelopeLimiter::new(Some(&config), |s, q| mock.check(s, q)); + let limiter = EnvelopeLimiter::new(|s, q| mock.check(s, q)); let (enforcement, limits) = limiter.enforce(&mut envelope, &scoping()).unwrap(); assert!(limits.is_limited()); - assert!(!enforcement.span_metrics.is_active()); - assert!(enforcement.spans.is_active()); - mock.assert_call(DataCategory::Span, Some(0)); - mock.assert_call(DataCategory::SpanIndexed, Some(2)); + assert!(enforcement.spans_indexed.is_active()); + assert!(!enforcement.spans.is_active()); + mock.assert_call(DataCategory::Span, 2); + mock.assert_call(DataCategory::SpanIndexed, 2); assert_eq!( get_outcomes(&envelope, enforcement), diff --git a/tests/integration/asserts/time.py b/tests/integration/asserts/time.py index fd1c1c981c..e432e8249f 100644 --- a/tests/integration/asserts/time.py +++ b/tests/integration/asserts/time.py @@ -1,35 +1,47 @@ from datetime import timedelta, datetime, timezone -class _WithinBounds: +def _to_datetime(v): + if isinstance(v, datetime): + return v + elif isinstance(v, int): + return datetime.fromtimestamp(v, timezone.utc) + elif isinstance(v, float): + return datetime.utcfromtimestamp(v) + elif isinstance(v, str): + return datetime.fromisoformat(v) + else: + assert False, f"cannot convert {v} to datetime" + +class _WithinBounds: def __init__(self, lower_bound, upper_bound): self._lower_bound = lower_bound self._upper_bound = upper_bound def __eq__(self, other): - assert isinstance(other, int) + other = _to_datetime(other) return self._lower_bound <= other <= self._upper_bound def __str__(self): return f"{self._lower_bound} <= x <= {self._upper_bound}" + def __repr__(self) -> str: + return str(self) + def time_after(lower_bound): - upper_bound = int(datetime.now(tz=timezone.utc).timestamp()) + upper_bound = datetime.now(tz=timezone.utc) return time_within(lower_bound, upper_bound) def time_within(lower_bound, upper_bound): + lower_bound = _to_datetime(lower_bound) + upper_bound = _to_datetime(upper_bound) assert lower_bound <= upper_bound return _WithinBounds(lower_bound, upper_bound) -def time_within_delta(timestamp, delta=None): - if delta is None: - delta = timedelta(seconds=5) - - lower_bound = (datetime.fromtimestamp(timestamp) - delta).timestamp() - upper_bound = (datetime.fromtimestamp(timestamp) + delta).timestamp() - - return _WithinBounds(lower_bound, upper_bound) +def time_within_delta(time=None, delta=timedelta(seconds=30)): + time = _to_datetime(time) if time is not None else datetime.now(tz=timezone.utc) + return _WithinBounds(time - delta, time + delta) diff --git a/tests/integration/test_metrics.py b/tests/integration/test_metrics.py index f70818472b..d9d5472d12 100644 --- a/tests/integration/test_metrics.py +++ b/tests/integration/test_metrics.py @@ -7,6 +7,7 @@ import signal import time import queue +from itertools import chain from .consts import ( TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, TRANSACTION_EXTRACT_MAX_SUPPORTED_VERSION, @@ -1214,9 +1215,17 @@ def test_no_transaction_metrics_when_filtered(mini_sentry, relay): relay = relay(mini_sentry, options=TEST_CONFIG) relay.send_transaction(project_id, tx) - # The only envelope received should be outcomes: - envelope = mini_sentry.captured_events.get(timeout=3) - assert {item.type for item in envelope.items} == {"client_report"} + # The only two envelopes received should be outcomes for Transaction and TransactionIndexed: + reports = [mini_sentry.get_client_report(), mini_sentry.get_client_report()] + filtered_events = list( + chain.from_iterable(report["filtered_events"] for report in reports) + ) + filtered_events.sort(key=lambda x: x["category"]) + + assert filtered_events == [ + {"reason": "release-version", "category": "transaction", "quantity": 1}, + {"reason": "release-version", "category": "transaction_indexed", "quantity": 1}, + ] assert mini_sentry.captured_events.qsize() == 0 diff --git a/tests/integration/test_outcome.py b/tests/integration/test_outcome.py index d1aa578c0a..67969b2d2f 100644 --- a/tests/integration/test_outcome.py +++ b/tests/integration/test_outcome.py @@ -16,6 +16,7 @@ import requests from requests.exceptions import HTTPError from sentry_sdk.envelope import Envelope, Item, PayloadRef +from .asserts import time_within_delta from .test_metrics import metrics_by_name @@ -252,7 +253,7 @@ def _send_event(relay, project_id=42, event_type="error", event_id=None, trace_i return event_id -@pytest.mark.parametrize("event_type", ["error", "transaction"]) +@pytest.mark.parametrize("event_type", ["transaction"]) def test_outcomes_non_processing(relay, mini_sentry, event_type): """ Test basic outcome functionality. @@ -266,27 +267,31 @@ def test_outcomes_non_processing(relay, mini_sentry, event_type): _send_event(relay, event_type=event_type) - outcomes_batch = mini_sentry.captured_outcomes.get(timeout=0.2) - assert mini_sentry.captured_outcomes.qsize() == 0 # we had only one batch - - outcomes = outcomes_batch.get("outcomes") - assert len(outcomes) == 1 + expected_categories = [2, 9] if event_type == "transaction" else [1] # Error - outcome = outcomes[0] + outcomes = [] + for _ in expected_categories: + outcomes.extend(mini_sentry.captured_outcomes.get(timeout=3).get("outcomes")) + assert len(outcomes) == len(expected_categories) + outcomes.sort(key=lambda x: x["category"]) - del outcome["timestamp"] # 'timestamp': '2020-06-03T16:18:59.259447Z' + expected_outcomes = [ + { + "project_id": 42, + "outcome": 3, # invalid + "reason": "project_id", # missing project id + "category": category, + "quantity": 1, + "timestamp": time_within_delta(), + } + for category in expected_categories + ] - expected_outcome = { - "project_id": 42, - "outcome": 3, # invalid - "reason": "project_id", # missing project id - "category": 2 if event_type == "transaction" else 1, - "quantity": 1, - } - assert outcome == expected_outcome + assert outcomes == expected_outcomes # no events received since all have been for an invalid project id assert mini_sentry.captured_events.empty() + assert mini_sentry.captured_outcomes.empty() def test_outcomes_not_sent_when_disabled(relay, mini_sentry): @@ -428,7 +433,7 @@ def test_outcome_forwarding( Tests that Relay forwards outcomes from a chain of relays Have a chain of many relays that eventually connect to Sentry - and verify that the outcomes sent by the first (downstream relay) + and verify that the outcomes sent by the first (downstream relay) are properly forwarded up to sentry. """ outcomes_consumer = outcomes_consumer(timeout=2) @@ -466,19 +471,22 @@ def test_outcome_forwarding( _send_event(downstream_relay, event_type=event_type) - outcome = outcomes_consumer.get_outcome() + expected_categories = [1] if event_type == "error" else [2, 9] + outcomes = outcomes_consumer.get_outcomes(n=len(expected_categories)) + outcomes.sort(key=lambda x: x["category"]) - expected_outcome = { - "project_id": 42, - "outcome": 3, - "source": "downstream-layer", - "reason": "project_id", - "category": 2 if event_type == "transaction" else 1, - "quantity": 1, - } - outcome.pop("timestamp") - - assert outcome == expected_outcome + assert outcomes == [ + { + "project_id": 42, + "outcome": 3, # Invalid + "source": "downstream-layer", + "reason": "project_id", + "category": category, + "quantity": 1, + "timestamp": time_within_delta(), + } + for category in expected_categories + ] def test_outcomes_forwarding_rate_limited( @@ -733,11 +741,15 @@ def _get_span_payload(): @pytest.mark.parametrize( - "category,is_outcome_expected", - [("session", False), ("transaction", True), ("user_report_v2", True)], + "category,outcome_categories", + [ + ("session", []), + ("transaction", ["transaction", "transaction_indexed"]), + ("user_report_v2", ["user_report_v2"]), + ], ) def test_outcomes_rate_limit( - relay_with_processing, mini_sentry, outcomes_consumer, category, is_outcome_expected + relay_with_processing, mini_sentry, outcomes_consumer, category, outcome_categories ): """ Tests that outcomes are emitted or not, depending on the type of message. @@ -782,11 +794,10 @@ def test_outcomes_rate_limit( else: relay.send_event(project_id, _get_event_payload(category)) - # give relay some to handle the request (and send any outcomes it needs to send) - time.sleep(1) - - if is_outcome_expected: - outcomes_consumer.assert_rate_limited(reason_code, categories=[category]) + if outcome_categories: + outcomes_consumer.assert_rate_limited( + reason_code, categories=outcome_categories + ) else: outcomes_consumer.assert_empty() @@ -1288,13 +1299,11 @@ def make_envelope(transaction_name): assert outcomes == expected_outcomes, outcomes -@pytest.mark.parametrize("metrics_already_extracted", [False, True]) def test_profile_outcomes_invalid( mini_sentry, relay_with_processing, outcomes_consumer, metrics_consumer, - metrics_already_extracted, ): """ Tests that Relay reports correct outcomes for invalid profiles as `Profile`. @@ -1334,7 +1343,6 @@ def make_envelope(): Item( payload=PayloadRef(bytes=json.dumps(payload).encode()), type="transaction", - headers={"metrics_extracted": metrics_already_extracted}, ) ) envelope.add_item(Item(payload=PayloadRef(bytes=b""), type="profile")) @@ -1347,12 +1355,9 @@ def make_envelope(): outcomes = outcomes_consumer.get_outcomes() outcomes.sort(key=lambda o: sorted(o.items())) - # Expect ProfileIndexed if metrics have been extracted, else Profile - expected_category = 11 if metrics_already_extracted else 6 - - expected_outcomes = [ + assert outcomes == [ { - "category": expected_category, + "category": category, "key_id": 123, "org_id": 1, "outcome": 3, # Invalid @@ -1360,20 +1365,15 @@ def make_envelope(): "quantity": 1, "reason": "profiling_invalid_json", "source": "pop-relay", - }, + "timestamp": time_within_delta(), + } + for category in [6, 11] # Profile, ProfileIndexed ] - for outcome in outcomes: - outcome.pop("timestamp") - assert outcomes == expected_outcomes, outcomes - - if not metrics_already_extracted: - # Make sure the profile will not be counted as accepted: - metrics = metrics_by_name(metrics_consumer, 4) - assert ( - "has_profile" not in metrics["d:transactions/duration@millisecond"]["tags"] - ) - assert "has_profile" not in metrics["c:transactions/usage@none"]["tags"] + # Make sure the profile will not be counted as accepted: + metrics = metrics_by_name(metrics_consumer, 4) + assert "has_profile" not in metrics["d:transactions/duration@millisecond"]["tags"] + assert "has_profile" not in metrics["c:transactions/usage@none"]["tags"] def test_profile_outcomes_too_many( @@ -1439,9 +1439,9 @@ def make_envelope(): outcomes = outcomes_consumer.get_outcomes() outcomes.sort(key=lambda o: sorted(o.items())) - expected_outcomes = [ + assert outcomes == [ { - "category": 6, # Profile + "category": category, "key_id": 123, "org_id": 1, "outcome": 3, # Invalid @@ -1449,12 +1449,10 @@ def make_envelope(): "quantity": 1, "reason": "profiling_too_many_profiles", "source": "pop-relay", - }, + "timestamp": time_within_delta(), + } + for category in [6, 11] # Profile, ProfileIndexed ] - for outcome in outcomes: - outcome.pop("timestamp") - - assert outcomes == expected_outcomes, outcomes # Make sure one profile will not be counted as accepted metrics = metrics_by_name(metrics_consumer, 4) @@ -1519,12 +1517,12 @@ def make_envelope(): envelope = make_envelope() upstream.send_envelope(project_id, envelope) - outcomes = outcomes_consumer.get_outcomes() + outcomes = outcomes_consumer.get_outcomes(n=2) outcomes.sort(key=lambda o: sorted(o.items())) - expected_outcomes = [ + assert outcomes == [ { - "category": 11, # ProfileIndexed + "category": category, "key_id": 123, "org_id": 1, "outcome": 3, @@ -1532,12 +1530,10 @@ def make_envelope(): "quantity": 1, "reason": "profiling_invalid_json", "source": "processing-relay", - }, + "timestamp": time_within_delta(), + } + for category in [6, 11] # Profile, ProfileIndexed ] - for outcome in outcomes: - outcome.pop("timestamp") - - assert outcomes == expected_outcomes, outcomes # Because invalid data is detected _after_ metrics extraction, there is still a metric: metrics = metrics_by_name(metrics_consumer, 4) @@ -1545,13 +1541,11 @@ def make_envelope(): assert metrics["c:transactions/usage@none"]["tags"]["has_profile"] == "true" -@pytest.mark.parametrize("metrics_already_extracted", [False, True]) @pytest.mark.parametrize("quota_category", ["transaction", "profile"]) def test_profile_outcomes_rate_limited( mini_sentry, relay_with_processing, outcomes_consumer, - metrics_already_extracted, quota_category, ): """ @@ -1601,7 +1595,6 @@ def test_profile_outcomes_rate_limited( Item( payload=PayloadRef(bytes=json.dumps(payload).encode()), type="transaction", - headers={"metrics_extracted": metrics_already_extracted}, ) ) envelope.add_item(Item(payload=PayloadRef(bytes=profile), type="profile")) @@ -1610,34 +1603,25 @@ def test_profile_outcomes_rate_limited( outcomes = outcomes_consumer.get_outcomes() outcomes.sort(key=lambda o: sorted(o.items())) - expected_outcomes = [] + expected_categories = [6, 11] # Profile, ProfileIndexed if quota_category == "transaction": # Transaction got rate limited as well: - expected_outcomes += [ - { - "category": 2, # Transaction - "key_id": 123, - "org_id": 1, - "outcome": 2, # RateLimited - "project_id": 42, - "quantity": 1, - "reason": "profiles_exceeded", - }, - ] + expected_categories += [2, 9] # Transaction, TransactionIndexed + expected_categories.sort() - expected_outcomes += [ + expected_outcomes = [ { - "category": 11 if metrics_already_extracted else 6, + "category": category, "key_id": 123, "org_id": 1, "outcome": 2, # RateLimited "project_id": 42, "quantity": 1, "reason": "profiles_exceeded", - }, + "timestamp": time_within_delta(), + } + for category in expected_categories ] - for outcome in outcomes: - outcome.pop("timestamp") assert outcomes == expected_outcomes, outcomes @@ -1850,12 +1834,10 @@ def make_envelope(transaction_name): assert outcomes == expected_outcomes, outcomes -@pytest.mark.parametrize("metrics_already_extracted", [False, True]) def test_span_outcomes_invalid( mini_sentry, relay_with_processing, outcomes_consumer, - metrics_already_extracted, ): """ Tests that Relay reports correct outcomes for invalid spans as `Span` or `Transaction`. @@ -1889,7 +1871,6 @@ def test_span_outcomes_invalid( }, "aggregator": {"bucket_interval": 1, "initial_delay": 0, "debounce_delay": 0}, } - upstream = relay_with_processing(config) # Create an envelope with an invalid profile: @@ -1901,7 +1882,6 @@ def make_envelope(): Item( payload=PayloadRef(bytes=json.dumps(payload).encode()), type="transaction", - headers={"metrics_extracted": metrics_already_extracted}, ) ) payload = _get_span_payload() @@ -1910,7 +1890,6 @@ def make_envelope(): Item( payload=PayloadRef(bytes=json.dumps(payload).encode()), type="span", - headers={"metrics_extracted": metrics_already_extracted}, ) ) return envelope @@ -1918,35 +1897,28 @@ def make_envelope(): envelope = make_envelope() upstream.send_envelope(project_id, envelope) - outcomes = outcomes_consumer.get_outcomes(timeout=10.0, n=2) + outcomes = outcomes_consumer.get_outcomes(timeout=10.0, n=4) outcomes.sort(key=lambda o: sorted(o.items())) - expected_outcomes = [ - { - "category": 9 if metrics_already_extracted else 2, - "key_id": 123, - "org_id": 1, - "outcome": 3, # Invalid - "project_id": 42, - "quantity": 1, - "reason": "invalid_transaction", - "source": "pop-relay", - }, + assert outcomes == [ { - "category": 16 if metrics_already_extracted else 12, + "category": category, "key_id": 123, "org_id": 1, "outcome": 3, # Invalid "project_id": 42, "quantity": 1, - "reason": "internal", + "reason": reason, "source": "pop-relay", - }, + "timestamp": time_within_delta(), + } + for (category, reason) in [ + (2, "invalid_transaction"), + (9, "invalid_transaction"), + (12, "internal"), + (16, "internal"), + ] ] - for outcome in outcomes: - outcome.pop("timestamp") - - assert outcomes == expected_outcomes, outcomes def test_global_rate_limit_by_namespace( diff --git a/tests/integration/test_spans.py b/tests/integration/test_spans.py index c2db4e10e5..7275b9e435 100644 --- a/tests/integration/test_spans.py +++ b/tests/integration/test_spans.py @@ -17,7 +17,7 @@ from sentry_relay.consts import DataCategory from sentry_sdk.envelope import Envelope, Item, PayloadRef -from .asserts import time_after +from .asserts import time_after, time_within_delta from .test_metrics import TEST_CONFIG from .test_store import make_transaction @@ -396,14 +396,11 @@ def envelope_with_spans( return envelope -def envelope_with_transaction_and_spans( - start: datetime, end: datetime, metrics_extracted: bool = False -) -> Envelope: +def envelope_with_transaction_and_spans(start: datetime, end: datetime) -> Envelope: envelope = Envelope() envelope.add_item( Item( type="transaction", - headers={"metrics_extracted": metrics_extracted}, payload=PayloadRef( bytes=json.dumps( { @@ -956,10 +953,25 @@ def test_otel_endpoint_disabled(mini_sentry, relay): json=make_otel_span(start, end), ) - (outcome,) = mini_sentry.captured_outcomes.get()["outcomes"] - assert outcome["category"] == DataCategory.SPAN - assert outcome["outcome"] == 3 # invalid - assert outcome["reason"] == "feature_disabled" + outcomes = [] + for _ in range(2): + outcomes.extend(mini_sentry.captured_outcomes.get(timeout=3).get("outcomes")) + outcomes.sort(key=lambda x: x["category"]) + + assert outcomes == [ + { + "org_id": 1, + "key_id": 123, + "project_id": 42, + "outcome": 3, + "reason": "feature_disabled", + "category": category.value, + "quantity": 1, + "source": "relay", + "timestamp": time_within_delta(), + } + for category in [DataCategory.SPAN, DataCategory.SPAN_INDEXED] + ] # Second attempt will cause a 403 response: with pytest.raises(HTTPError) as exc_info: @@ -1654,12 +1666,7 @@ def summarize_outcomes(): } metrics = metrics_consumer.get_metrics(timeout=1) if category == "span": - expected_outcomes.update( - { - (12, 2): 2, # Span, RateLimited - (15, 2): 6, # MetricBucket, RateLimited - } - ) + expected_outcomes.update({(12, 2): 2}), # Span, RateLimited assert len(metrics) == 4 assert all(m[0]["name"][2:14] == "transactions" for m in metrics), metrics else: @@ -1673,14 +1680,14 @@ def summarize_outcomes(): outcomes_consumer.assert_empty() -def test_rate_limit_metrics_consistent( +def test_rate_limit_spans_in_envelope( mini_sentry, relay_with_processing, spans_consumer, metrics_consumer, outcomes_consumer, ): - """Rate limits for total spans (i.e. metrics) are enforced consistently after metrics extraction.""" + """Rate limits for total spans are enforced and no metrics are emitted.""" relay = relay_with_processing(options=TEST_CONFIG) project_id = 42 project_config = mini_sentry.add_full_project_config(project_id) @@ -1709,44 +1716,24 @@ def test_rate_limit_metrics_consistent( def summarize_outcomes(): counter = Counter() - for outcome in outcomes_consumer.get_outcomes(): + for outcome in outcomes_consumer.get_outcomes(timeout=10, n=2): counter[(outcome["category"], outcome["outcome"])] += outcome["quantity"] return dict(counter) - # First batch passes (we over-accept once) relay.send_envelope(project_id, envelope) - spans = spans_consumer.get_spans(n=4, timeout=10) - assert len(spans) == 4 - metrics = metrics_consumer.get_metrics() - assert len(metrics) > 0 - assert all(headers == [("namespace", b"spans")] for _, headers in metrics), metrics - # Accepted outcomes for main category are logged in sentry. - assert summarize_outcomes() == {(16, 0): 4} # SpanIndexed, Accepted - - # Second batch is limited - relay.send_envelope(project_id, envelope) - metrics = metrics_consumer.get_metrics() - assert len(metrics) == 0 - outcomes = summarize_outcomes() - assert outcomes.pop((15, 2)) > 0 # Metric Bucket, RateLimited - assert outcomes == { - (16, 2): 4, # SpanIndexed, RateLimited - (12, 2): 4, # Span, RateLimited - } + assert summarize_outcomes() == {(12, 2): 4, (16, 2): 4} spans_consumer.assert_empty() - outcomes_consumer.assert_empty() + metrics_consumer.assert_empty() @pytest.mark.parametrize( - "category, metrics_enabled, metrics_extracted, raises_rate_limited", + "category,raises_rate_limited", [ - ("transaction", False, False, False), - ("transaction", False, True, False), - ("transaction", True, False, True), - ("transaction_indexed", True, False, False), - ("transaction_indexed", True, True, False), + ("transaction", True), + ("transaction", True), + ("transaction_indexed", False), ], ) def test_rate_limit_is_consistent_between_transaction_and_spans( @@ -1755,8 +1742,6 @@ def test_rate_limit_is_consistent_between_transaction_and_spans( transactions_consumer, spans_consumer, category, - metrics_enabled, - metrics_extracted, raises_rate_limited, ): """Rate limits for indexed are enforced consistently after metrics extraction. @@ -1781,10 +1766,9 @@ def test_rate_limit_is_consistent_between_transaction_and_spans( "reasonCode": "exceeded", }, ] - if metrics_enabled: - project_config["config"]["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - } + project_config["config"]["transactionMetrics"] = { + "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, + } transactions_consumer = transactions_consumer() spans_consumer = spans_consumer() @@ -1792,7 +1776,7 @@ def test_rate_limit_is_consistent_between_transaction_and_spans( start = datetime.now(timezone.utc) end = start + timedelta(seconds=1) - envelope = envelope_with_transaction_and_spans(start, end, metrics_extracted) + envelope = envelope_with_transaction_and_spans(start, end) # First batch passes relay.send_envelope(project_id, envelope) @@ -1802,13 +1786,15 @@ def test_rate_limit_is_consistent_between_transaction_and_spans( spans = spans_consumer.get_spans(n=2, timeout=10) assert len(spans) == 2 + relay.send_envelope(project_id, envelope) + transactions_consumer.assert_empty() + spans_consumer.assert_empty() + maybe_raises = ( pytest.raises(HTTPError, match="429 Client Error") if raises_rate_limited else contextlib.nullcontext() ) - - # Second batch is limited with maybe_raises: relay.send_envelope(project_id, envelope) @@ -1929,11 +1915,11 @@ def test_span_filtering_with_generic_inbound_filter( def summarize_outcomes(): counter = Counter() - for outcome in outcomes_consumer.get_outcomes(timeout=10, n=1): + for outcome in outcomes_consumer.get_outcomes(timeout=10, n=2): counter[(outcome["category"], outcome["outcome"])] += outcome["quantity"] return counter - assert summarize_outcomes() == {(12, 1): 1} + assert summarize_outcomes() == {(12, 1): 1, (16, 1): 1} spans_consumer.assert_empty() outcomes_consumer.assert_empty() @@ -2028,8 +2014,8 @@ def summarize_outcomes(outcomes): outcomes = outcomes_consumer.get_outcomes(timeout=10, n=4) assert summarize_outcomes(outcomes) == {(16, 0): 4} # SpanIndexed, Accepted else: - outcomes = outcomes_consumer.get_outcomes(timeout=10, n=4) - assert summarize_outcomes(outcomes) == {(12, 1): 4} # Span, Filtered + outcomes = outcomes_consumer.get_outcomes(timeout=10, n=1) + assert summarize_outcomes(outcomes) == {(16, 1): 4} # Span, Filtered assert {o["reason"] for o in outcomes} == {"Sampled:3000"} spans_consumer.assert_empty() diff --git a/tests/integration/test_store.py b/tests/integration/test_store.py index 36e81b699d..4a7312d49f 100644 --- a/tests/integration/test_store.py +++ b/tests/integration/test_store.py @@ -876,9 +876,11 @@ def test_processing_quota_transaction_indexing( buckets = list(metrics_consumer.get_metrics()) assert len(buckets) > 0 + relay.send_event(project_id, make_transaction({"message": "3rd tx"})) + tx_consumer.assert_empty() + with pytest.raises(HTTPError) as exc_info: relay.send_event(project_id, make_transaction({"message": "4nd tx"})) - assert exc_info.value.response.status_code == 429, "Expected a 429 status code"