diff --git a/CHANGELOG.md b/CHANGELOG.md index 233eec2ea34..6af0fb8895b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ - Accept incoming requests even if there was an error fetching their project config. ([#4140](https://github.com/getsentry/relay/pull/4140)) - Rate limit profiles when transaction was sampled. ([#4195](https://github.com/getsentry/relay/pull/4195)) - Fix scrubbing user paths in minidump debug module names. ([#4351](https://github.com/getsentry/relay/pull/4351)) +- Stop collecting the `has_profile` metrics tag & reporting outcomes for it. ([#4365](https://github.com/getsentry/relay/pull/4365)) - Scrub user fields in span.sentry_tags. ([#4364](https://github.com/getsentry/relay/pull/4364)), ([#4370](https://github.com/getsentry/relay/pull/4370)) **Features**: diff --git a/relay-server/src/metrics/minimal.rs b/relay-server/src/metrics/minimal.rs index 37889ad6e13..f200c31e015 100644 --- a/relay-server/src/metrics/minimal.rs +++ b/relay-server/src/metrics/minimal.rs @@ -16,8 +16,6 @@ pub struct MinimalTrackableBucket { #[serde(flatten)] value: MinimalValue, #[serde(default)] - tags: Tags, - #[serde(default)] metadata: BucketMetadata, } @@ -42,9 +40,7 @@ impl TrackableBucket for MinimalTrackableBucket { MinimalValue::Counter(c) if mri.name == "usage" => c.to_f64() as usize, _ => 0, }; - let has_profile = matches!(mri.name.as_ref(), "usage" | "duration") - && self.tags.has_profile.is_some(); - BucketSummary::Transactions { count, has_profile } + BucketSummary::Transactions(count) } MetricNamespace::Spans => BucketSummary::Spans(match self.value { MinimalValue::Counter(c) if mri.name == "usage" => c.to_f64() as usize, @@ -83,12 +79,6 @@ impl MinimalValue { } } -#[derive(Clone, Copy, Debug, Default, Deserialize)] -#[serde(default)] -struct Tags { - has_profile: Option, -} - #[cfg(test)] mod tests { use insta::assert_debug_snapshot; @@ -172,14 +162,12 @@ mod tests { let summary = min_buckets.iter().map(|b| b.summary()).collect::>(); assert_debug_snapshot!(summary, @r###" [ - Transactions { - count: 0, - has_profile: true, - }, - Transactions { - count: 3, - has_profile: false, - }, + Transactions( + 0, + ), + Transactions( + 3, + ), Spans( 3, ), diff --git a/relay-server/src/metrics/outcomes.rs b/relay-server/src/metrics/outcomes.rs index 97aea4f582f..5d0e2ff40f3 100644 --- a/relay-server/src/metrics/outcomes.rs +++ b/relay-server/src/metrics/outcomes.rs @@ -12,8 +12,6 @@ use crate::services::outcome::{Outcome, TrackOutcome}; #[cfg(feature = "processing")] use relay_cardinality::{CardinalityLimit, CardinalityReport}; -pub const PROFILE_TAG: &str = "has_profile"; - /// [`MetricOutcomes`] takes care of creating the right outcomes for metrics at the end of their /// lifecycle. /// @@ -99,10 +97,7 @@ impl MetricOutcomes { /// Contains the count of total transactions or spans that went into this bucket. #[derive(Debug, Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] pub enum BucketSummary { - Transactions { - count: usize, - has_profile: bool, - }, + Transactions(usize), Spans(usize), #[default] None, @@ -185,9 +180,7 @@ impl TrackableBucket for BucketView<'_> { BucketViewValue::Counter(c) if mri.name == "usage" => c.to_f64() as usize, _ => 0, }; - let has_profile = matches!(mri.name.as_ref(), "usage" | "duration") - && self.tag(PROFILE_TAG) == Some("true"); - BucketSummary::Transactions { count, has_profile } + BucketSummary::Transactions(count) } MetricNamespace::Spans => BucketSummary::Spans(match self.value() { BucketViewValue::Counter(c) if mri.name == "usage" => c.to_f64() as usize, @@ -217,11 +210,8 @@ where quantities.buckets += 1; let summary = bucket.summary(); match summary { - BucketSummary::Transactions { count, has_profile } => { + BucketSummary::Transactions(count) => { quantities.transactions += count; - if has_profile { - quantities.profiles += count; - } } BucketSummary::Spans(count) => quantities.spans += count, BucketSummary::None => continue, diff --git a/relay-server/src/metrics/rate_limits.rs b/relay-server/src/metrics/rate_limits.rs index 174831746ee..993008ae8bb 100644 --- a/relay-server/src/metrics/rate_limits.rs +++ b/relay-server/src/metrics/rate_limits.rs @@ -1,14 +1,11 @@ //! Quota and rate limiting helpers for metrics and metrics buckets. -use chrono::{DateTime, Utc}; use itertools::Either; -use relay_common::time::UnixTimestamp; use relay_metrics::Bucket; use relay_quotas::{DataCategory, Quota, RateLimits, Scoping}; -use relay_system::Addr; -use crate::metrics::outcomes::{BucketSummary, MetricOutcomes, TrackableBucket, PROFILE_TAG}; -use crate::services::outcome::{Outcome, TrackOutcome}; +use crate::metrics::outcomes::{BucketSummary, MetricOutcomes, TrackableBucket}; +use crate::services::outcome::Outcome; use crate::utils; /// Contains all data necessary to rate limit metrics or metrics buckets. @@ -24,20 +21,19 @@ pub struct MetricsLimiter> = Vec> { scoping: Scoping, /// The number of performance items (transactions, spans, profiles) contributing to these metrics. + #[cfg(feature = "processing")] counts: EntityCounts, } fn to_counts(summary: &BucketSummary) -> EntityCounts { match *summary { - BucketSummary::Transactions { count, has_profile } => EntityCounts { + BucketSummary::Transactions(count) => EntityCounts { transactions: Some(count), spans: None, - profiles: if has_profile { count } else { 0 }, }, BucketSummary::Spans(count) => EntityCounts { transactions: None, spans: Some(count), - profiles: 0, }, BucketSummary::None => EntityCounts::default(), } @@ -70,11 +66,6 @@ struct EntityCounts { /// The distinction between `None` and `Some(0)` is needed to decide whether or not a rate limit /// must be checked. spans: Option, - /// The number of profiles represented in the current batch. - /// - /// We do not explicitly check the rate limiter for profiles, so there is no need to - /// distinguish between `None` and `Some(0)`. - profiles: usize, } impl std::ops::Add for EntityCounts { @@ -84,7 +75,6 @@ impl std::ops::Add for EntityCounts { Self { transactions: add_some(self.transactions, rhs.transactions), spans: add_some(self.spans, rhs.spans), - profiles: self.profiles + rhs.profiles, } } } @@ -126,12 +116,13 @@ impl>> MetricsLimiter { .iter() .map(|b| to_counts(&b.summary)) .reduce(|a, b| a + b); - if let Some(counts) = total_counts { + if let Some(_counts) = total_counts { Ok(Self { buckets, quotas, scoping, - counts, + #[cfg(feature = "processing")] + counts: _counts, }) } else { Err(buckets.into_iter().map(|s| s.bucket).collect()) @@ -187,35 +178,6 @@ impl>> MetricsLimiter { metric_outcomes.track(self.scoping, &dropped, outcome); } - fn drop_profiles( - &mut self, - outcome: Outcome, - timestamp: DateTime, - outcome_aggregator: &Addr, - ) { - if self.counts.profiles == 0 { - return; - } - - for SummarizedBucket { bucket, summary } in self.buckets.iter_mut() { - if let BucketSummary::Transactions { has_profile, .. } = summary { - if *has_profile { - bucket.remove_tag(PROFILE_TAG); - } - } - } - - outcome_aggregator.send(TrackOutcome { - timestamp, - scoping: self.scoping, - outcome, - event_id: None, - remote_addr: None, - category: DataCategory::Profile, - quantity: self.counts.profiles as u32, - }); - } - // Drop transaction-related metrics and create outcomes for any active rate limits. // // Returns true if any metrics were dropped. @@ -223,7 +185,6 @@ impl>> MetricsLimiter { &mut self, rate_limits: &RateLimits, metric_outcomes: &MetricOutcomes, - outcome_aggregator: &Addr, ) -> bool { for category in [DataCategory::Transaction, DataCategory::Span] { let active_rate_limits = @@ -238,20 +199,6 @@ impl>> MetricsLimiter { ); return true; - } else if category == DataCategory::Transaction { - // Also check profiles: - let active_rate_limits = rate_limits.check_with_quotas( - self.quotas.as_ref(), - self.scoping.item(DataCategory::Profile), - ); - - if let Some(limit) = active_rate_limits.longest() { - self.drop_profiles( - Outcome::RateLimited(limit.reason_code.clone()), - UnixTimestamp::now().as_datetime().unwrap_or_else(Utc::now), - outcome_aggregator, - ) - } } } @@ -268,8 +215,9 @@ impl>> MetricsLimiter { mod tests { use relay_base_schema::organization::OrganizationId; use relay_base_schema::project::{ProjectId, ProjectKey}; - use relay_metrics::{BucketMetadata, BucketValue}; + use relay_metrics::{BucketMetadata, BucketValue, UnixTimestamp}; use relay_quotas::QuotaScope; + use relay_system::Addr; use smallvec::smallvec; use crate::metrics::MetricStats; @@ -309,7 +257,7 @@ mod tests { ) .unwrap(); - limiter.enforce_limits(&RateLimits::new(), &metric_outcomes, &outcome_sink); + limiter.enforce_limits(&RateLimits::new(), &metric_outcomes); let metrics = limiter.into_buckets(); rx.close(); @@ -325,137 +273,6 @@ mod tests { (metrics, outcomes) } - #[test] - fn profiles_limits_are_reported() { - let metrics = vec![ - Bucket { - // transaction without profile - timestamp: UnixTimestamp::now(), - width: 0, - name: "d:transactions/duration@millisecond".into(), - tags: Default::default(), - value: BucketValue::distribution(123.into()), - metadata: Default::default(), - }, - Bucket { - // transaction with profile - timestamp: UnixTimestamp::now(), - width: 0, - name: "d:transactions/duration@millisecond".into(), - tags: [("has_profile".to_string(), "true".to_string())].into(), - value: BucketValue::distribution(456.into()), - metadata: Default::default(), - }, - Bucket { - // transaction without profile - timestamp: UnixTimestamp::now(), - width: 0, - name: "c:transactions/usage@none".into(), - tags: Default::default(), - value: BucketValue::counter(1.into()), - metadata: Default::default(), - }, - Bucket { - // transaction with profile - timestamp: UnixTimestamp::now(), - width: 0, - name: "c:transactions/usage@none".into(), - tags: [("has_profile".to_string(), "true".to_string())].into(), - value: BucketValue::counter(1.into()), - metadata: Default::default(), - }, - Bucket { - // unrelated metric - timestamp: UnixTimestamp::now(), - width: 0, - name: "something_else".into(), - tags: [("has_profile".to_string(), "true".to_string())].into(), - value: BucketValue::distribution(123.into()), - metadata: Default::default(), - }, - ]; - let (metrics, outcomes) = run_limiter(metrics, deny(DataCategory::Transaction)); - - assert_eq!(metrics.len(), 1); - assert_eq!(&*metrics[0].name, "something_else"); - - assert_eq!( - outcomes, - vec![ - (Outcome::RateLimited(None), DataCategory::Transaction, 2), - (Outcome::RateLimited(None), DataCategory::Profile, 1) - ] - ) - } - - #[test] - fn profiles_quota_is_enforced() { - let metrics = vec![ - Bucket { - // transaction without profile - timestamp: UnixTimestamp::now(), - width: 0, - name: "d:transactions/duration@millisecond".into(), - tags: Default::default(), - value: BucketValue::distribution(123.into()), - metadata: Default::default(), - }, - Bucket { - // transaction with profile - timestamp: UnixTimestamp::now(), - width: 0, - name: "d:transactions/duration@millisecond".into(), - tags: [("has_profile".to_string(), "true".to_string())].into(), - value: BucketValue::distribution(456.into()), - metadata: Default::default(), - }, - Bucket { - // transaction without profile - timestamp: UnixTimestamp::now(), - width: 0, - name: "c:transactions/usage@none".into(), - tags: Default::default(), - value: BucketValue::counter(1.into()), - metadata: Default::default(), - }, - Bucket { - // transaction with profile - timestamp: UnixTimestamp::now(), - width: 0, - name: "c:transactions/usage@none".into(), - tags: [("has_profile".to_string(), "true".to_string())].into(), - value: BucketValue::counter(1.into()), - metadata: Default::default(), - }, - Bucket { - // unrelated metric - timestamp: UnixTimestamp::now(), - width: 0, - name: "something_else".into(), - tags: [("has_profile".to_string(), "true".to_string())].into(), - value: BucketValue::distribution(123.into()), - metadata: Default::default(), - }, - ]; - - let (metrics, outcomes) = run_limiter(metrics, deny(DataCategory::Profile)); - - // All metrics have been preserved: - assert_eq!(metrics.len(), 5); - - // Profile tag has been removed: - assert!(metrics[0].tags.is_empty()); - assert!(metrics[1].tags.is_empty()); - assert!(metrics[2].tags.is_empty()); - assert!(metrics[3].tags.is_empty()); - assert!(!metrics[4].tags.is_empty()); // unrelated metric still has it - - assert_eq!( - outcomes, - vec![(Outcome::RateLimited(None), DataCategory::Profile, 1)] - ); - } - /// A few different bucket types fn mixed_bag() -> Vec { vec![ diff --git a/relay-server/src/metrics_extraction/transactions/mod.rs b/relay-server/src/metrics_extraction/transactions/mod.rs index 50c9a20e612..adae13d9192 100644 --- a/relay-server/src/metrics_extraction/transactions/mod.rs +++ b/relay-server/src/metrics_extraction/transactions/mod.rs @@ -16,7 +16,7 @@ use relay_sampling::evaluation::SamplingDecision; use crate::metrics_extraction::generic; use crate::metrics_extraction::transactions::types::{ CommonTag, CommonTags, ExtractMetricsError, LightTransactionTags, TransactionCPRTags, - TransactionMeasurementTags, TransactionMetric, UsageTags, + TransactionMeasurementTags, TransactionMetric, }; use crate::metrics_extraction::IntoMetric; use crate::statsd::RelayCounters; @@ -257,7 +257,6 @@ pub struct TransactionExtractor<'a> { pub transaction_from_dsc: Option<&'a str>, pub sampling_decision: SamplingDecision, pub target_project_id: ProjectId, - pub has_profile: bool, } impl TransactionExtractor<'_> { @@ -389,14 +388,9 @@ impl TransactionExtractor<'_> { } // Internal usage counter - metrics.project_metrics.push( - TransactionMetric::Usage { - tags: UsageTags { - has_profile: self.has_profile, - }, - } - .into_metric(timestamp), - ); + metrics + .project_metrics + .push(TransactionMetric::Usage.into_metric(timestamp)); // Duration let duration = relay_common::time::chrono_to_positive_millis(end - start); @@ -619,7 +613,6 @@ mod tests { transaction_from_dsc: Some("test_transaction"), sampling_decision: SamplingDecision::Keep, target_project_id: ProjectId::new(4711), - has_profile: false, }; let extracted = extractor.extract(event.value().unwrap()).unwrap(); @@ -998,7 +991,6 @@ mod tests { transaction_from_dsc: Some("test_transaction"), sampling_decision: SamplingDecision::Keep, target_project_id: ProjectId::new(4711), - has_profile: false, }; let extracted = extractor.extract(event.value().unwrap()).unwrap(); @@ -1174,7 +1166,6 @@ mod tests { transaction_from_dsc: Some("test_transaction"), sampling_decision: SamplingDecision::Keep, target_project_id: ProjectId::new(4711), - has_profile: false, }; let extracted = extractor.extract(event.value().unwrap()).unwrap(); @@ -1326,7 +1317,6 @@ mod tests { transaction_from_dsc: Some("test_transaction"), sampling_decision: SamplingDecision::Keep, target_project_id: ProjectId::new(4711), - has_profile: false, }; let extracted = extractor.extract(event.value().unwrap()).unwrap(); @@ -1402,7 +1392,6 @@ mod tests { transaction_from_dsc: Some("test_transaction"), sampling_decision: SamplingDecision::Keep, target_project_id: ProjectId::new(4711), - has_profile: false, }; let extracted = extractor.extract(event.value().unwrap()).unwrap(); @@ -1568,7 +1557,6 @@ mod tests { transaction_from_dsc: Some("test_transaction"), sampling_decision: SamplingDecision::Keep, target_project_id: ProjectId::new(4711), - has_profile: false, }; let extracted = extractor.extract(event.value().unwrap()).unwrap(); @@ -1608,7 +1596,6 @@ mod tests { transaction_from_dsc: Some("test_transaction"), sampling_decision: SamplingDecision::Keep, target_project_id: ProjectId::new(4711), - has_profile: false, }; let extracted = extractor.extract(event.value().unwrap()).unwrap(); @@ -1677,7 +1664,6 @@ mod tests { transaction_from_dsc: Some("test_transaction"), sampling_decision: SamplingDecision::Keep, target_project_id: ProjectId::new(4711), - has_profile: false, }; let extracted = extractor.extract(event.value().unwrap()).unwrap(); @@ -1784,7 +1770,6 @@ mod tests { transaction_from_dsc: Some("test_transaction"), sampling_decision: SamplingDecision::Keep, target_project_id: ProjectId::new(4711), - has_profile: false, }; let extracted = extractor.extract(event.value().unwrap()).unwrap(); @@ -1818,7 +1803,6 @@ mod tests { transaction_from_dsc: Some("test_transaction"), sampling_decision: SamplingDecision::Keep, target_project_id: ProjectId::new(4711), - has_profile: false, }; let extracted = extractor.extract(event.value().unwrap()).unwrap(); @@ -1856,7 +1840,6 @@ mod tests { transaction_from_dsc: Some("root_transaction"), sampling_decision: SamplingDecision::Keep, target_project_id: ProjectId::new(4711), - has_profile: false, }; let extracted = extractor.extract(event.value().unwrap()).unwrap(); @@ -2127,7 +2110,6 @@ mod tests { transaction_from_dsc: Some("test_transaction"), sampling_decision: SamplingDecision::Keep, target_project_id: ProjectId::new(4711), - has_profile: false, }; let extracted = extractor.extract(event.value().unwrap()).unwrap(); @@ -2229,7 +2211,6 @@ mod tests { transaction_from_dsc: Some("test_transaction"), sampling_decision: SamplingDecision::Keep, target_project_id: ProjectId::new(4711), - has_profile: false, }; let extracted = extractor.extract(event.value().unwrap()).unwrap(); diff --git a/relay-server/src/metrics_extraction/transactions/types.rs b/relay-server/src/metrics_extraction/transactions/types.rs index 6a8ce105d3c..a7f5ff69175 100644 --- a/relay-server/src/metrics_extraction/transactions/types.rs +++ b/relay-server/src/metrics_extraction/transactions/types.rs @@ -34,7 +34,7 @@ pub enum TransactionMetric { /// /// This metric does not have any of the common tags for the performance product, but instead /// carries internal information for accounting purposes. - Usage { tags: UsageTags }, + Usage, /// An internal counter metric used to compute dynamic sampling biases. /// /// See ''. @@ -77,11 +77,11 @@ impl IntoMetric for TransactionMetric { MetricUnit::Duration(unit), tags.into(), ), - Self::Usage { tags } => ( + Self::Usage => ( Cow::Borrowed("usage"), BucketValue::counter(1.into()), MetricUnit::None, - tags.into(), + Default::default(), ), Self::CountPerRootProject { tags } => ( Cow::Borrowed("count_per_root_project"), @@ -153,21 +153,6 @@ impl From for BTreeMap { } } -#[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd)] -pub struct UsageTags { - pub has_profile: bool, -} - -impl From for BTreeMap { - fn from(tags: UsageTags) -> Self { - let mut map = BTreeMap::new(); - if tags.has_profile { - map.insert("has_profile".to_owned(), "true".to_owned()); - } - map - } -} - #[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd)] pub struct TransactionMeasurementTags { pub measurement_rating: Option, diff --git a/relay-server/src/services/processor.rs b/relay-server/src/services/processor.rs index b473a8c9798..fb4772ea2e1 100644 --- a/relay-server/src/services/processor.rs +++ b/relay-server/src/services/processor.rs @@ -31,7 +31,6 @@ use relay_event_schema::protocol::{ use relay_filter::FilterStatKey; use relay_metrics::{Bucket, BucketMetadata, BucketView, BucketsView, MetricNamespace}; use relay_pii::PiiConfigError; -use relay_profiling::ProfileId; use relay_protocol::Annotated; use relay_quotas::{DataCategory, RateLimits, Scoping}; use relay_sampling::config::RuleId; @@ -1345,7 +1344,6 @@ impl EnvelopeProcessorService { &self, state: &mut ProcessEnvelopeState, sampling_decision: SamplingDecision, - profile_id: Option, ) -> Result<(), ProcessingError> { if state.event_metrics_extracted { return Ok(()); @@ -1445,7 +1443,6 @@ impl EnvelopeProcessorService { transaction_from_dsc, sampling_decision, target_project_id: state.project_id, - has_profile: profile_id.is_some(), }; state @@ -1690,9 +1687,9 @@ impl EnvelopeProcessorService { if let Some(outcome) = sampling_result.into_dropped_outcome() { // Process profiles before dropping the transaction, if necessary. // Before metric extraction to make sure the profile count is reflected correctly. - let profile_id = profile::process(state, &global_config); + profile::process(state, &global_config); // Extract metrics here, we're about to drop the event/transaction. - self.extract_transaction_metrics(state, SamplingDecision::Drop, profile_id)?; + self.extract_transaction_metrics(state, SamplingDecision::Drop)?; dynamic_sampling::drop_unsampled_items(state, outcome); @@ -1719,7 +1716,7 @@ impl EnvelopeProcessorService { profile::transfer_id(state, profile_id); // Always extract metrics in processing Relays for sampled items. - self.extract_transaction_metrics(state, SamplingDecision::Keep, profile_id)?; + self.extract_transaction_metrics(state, SamplingDecision::Keep)?; if state .project_info @@ -2315,11 +2312,7 @@ impl EnvelopeProcessorService { let quotas = project_info.config.quotas.clone(); match MetricsLimiter::create(buckets, quotas, scoping) { Ok(mut bucket_limiter) => { - bucket_limiter.enforce_limits( - rate_limits, - &self.inner.metric_outcomes, - &self.inner.addrs.outcome_aggregator, - ); + bucket_limiter.enforce_limits(rate_limits, &self.inner.metric_outcomes); bucket_limiter.into_buckets() } Err(buckets) => buckets, @@ -2442,11 +2435,8 @@ impl EnvelopeProcessorService { } if rate_limits.is_limited() { - let was_enforced = bucket_limiter.enforce_limits( - &rate_limits, - &self.inner.metric_outcomes, - &self.inner.addrs.outcome_aggregator, - ); + let was_enforced = + bucket_limiter.enforce_limits(&rate_limits, &self.inner.metric_outcomes); if was_enforced { // Update the rate limits in the project cache. diff --git a/tests/integration/test_outcome.py b/tests/integration/test_outcome.py index 5ee3d553a8b..818ebf661af 100644 --- a/tests/integration/test_outcome.py +++ b/tests/integration/test_outcome.py @@ -1173,8 +1173,8 @@ def test_profile_outcomes( are properly forwarded up to sentry. """ outcomes_consumer = outcomes_consumer(timeout=5) - metrics_consumer = metrics_consumer() profiles_consumer = profiles_consumer() + metrics_consumer = metrics_consumer() project_id = 42 project_config = mini_sentry.add_full_project_config(project_id)["config"] @@ -1297,7 +1297,6 @@ def make_envelope(transaction_name): for m, _ in metrics_consumer.get_metrics() if m["name"] == "c:transactions/usage@none" ] - assert all(metric["tags"]["has_profile"] == "true" for metric in metrics) assert sum(metric["value"] for metric in metrics) == 2 assert outcomes == expected_outcomes, outcomes @@ -1406,13 +1405,13 @@ def test_profile_outcomes_too_many( mini_sentry, relay_with_processing, outcomes_consumer, - metrics_consumer, + profiles_consumer, ): """ Tests that Relay reports duplicate profiles as invalid """ outcomes_consumer = outcomes_consumer(timeout=2) - metrics_consumer = metrics_consumer() + profiles_consumer = profiles_consumer() project_id = 42 project_config = mini_sentry.add_full_project_config(project_id)["config"] @@ -1483,10 +1482,8 @@ def make_envelope(): for category in [6, 11] # Profile, ProfileIndexed ] - # Make sure one profile will not be counted as accepted - metrics = metrics_by_name(metrics_consumer, 4) - assert "has_profile" not in metrics["d:transactions/duration@millisecond"]["tags"] - assert metrics["c:transactions/usage@none"]["tags"]["has_profile"] == "true" + # One profile was accepted + assert profiles_consumer.get_profile() def test_profile_outcomes_data_invalid(