From fd4f306ea6220b24d5a78c278c68e27472748cac Mon Sep 17 00:00:00 2001 From: Martin Linzmayer Date: Tue, 22 Oct 2024 16:56:46 +0200 Subject: [PATCH 1/9] ref: Replace organization id type with newtype --- relay-base-schema/src/lib.rs | 1 + relay-base-schema/src/organization.rs | 27 ++++++ relay-cardinality/benches/redis_impl.rs | 3 +- relay-cardinality/src/lib.rs | 3 - relay-cardinality/src/limiter.rs | 5 +- relay-cardinality/src/redis/cache.rs | 13 +-- relay-cardinality/src/redis/limiter.rs | 9 +- relay-cardinality/src/redis/quota.rs | 7 +- relay-cardinality/src/redis/state.rs | 4 +- relay-metrics/src/meta/redis.rs | 7 +- relay-quotas/src/global.rs | 3 +- relay-quotas/src/quota.rs | 29 +++--- relay-quotas/src/rate_limit.rs | 97 ++++++++++--------- relay-quotas/src/redis.rs | 35 +++---- relay-sampling/src/evaluation.rs | 5 +- relay-sampling/src/redis_sampling.rs | 3 +- relay-server/src/extractors/request_meta.rs | 3 +- relay-server/src/metrics/metric_stats.rs | 7 +- relay-server/src/metrics/rate_limits.rs | 3 +- relay-server/src/services/outcome.rs | 7 +- relay-server/src/services/processor.rs | 12 ++- .../src/services/processor/metrics.rs | 5 +- relay-server/src/services/processor/replay.rs | 7 +- .../services/projects/project/state/info.rs | 9 +- relay-server/src/services/store.rs | 17 ++-- relay-server/src/utils/rate_limits.rs | 27 +++--- 26 files changed, 199 insertions(+), 149 deletions(-) create mode 100644 relay-base-schema/src/organization.rs diff --git a/relay-base-schema/src/lib.rs b/relay-base-schema/src/lib.rs index b65ffb6ea90..52e67bf93db 100644 --- a/relay-base-schema/src/lib.rs +++ b/relay-base-schema/src/lib.rs @@ -9,5 +9,6 @@ pub mod data_category; pub mod events; pub mod metrics; +pub mod organization; pub mod project; pub mod spans; diff --git a/relay-base-schema/src/organization.rs b/relay-base-schema/src/organization.rs new file mode 100644 index 00000000000..4318a2e5b50 --- /dev/null +++ b/relay-base-schema/src/organization.rs @@ -0,0 +1,27 @@ +//! Contains [`OrganizationId`] which is the ID of a Sentry organization and is currently a +//! a wrapper over `u64`. + +use serde::{Deserialize, Serialize}; + +/// The unique identifier of a Sentry organization +#[derive(Copy, Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub struct OrganizationId(u64); + +impl OrganizationId { + /// Creates a new organization ID from its numeric value + #[inline] + pub fn new(id: u64) -> Self { + OrganizationId(id) + } + + /// returns the numeric value of the organization ID + pub fn value(self) -> u64 { + self.0 + } +} + +impl std::fmt::Display for OrganizationId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.value()) + } +} diff --git a/relay-cardinality/benches/redis_impl.rs b/relay-cardinality/benches/redis_impl.rs index 07f1375887c..9892a7aba19 100644 --- a/relay-cardinality/benches/redis_impl.rs +++ b/relay-cardinality/benches/redis_impl.rs @@ -6,6 +6,7 @@ use std::{ use criterion::{BatchSize, BenchmarkId, Criterion}; use relay_base_schema::{ metrics::{MetricName, MetricNamespace}, + organization::OrganizationId, project::ProjectId, }; use relay_cardinality::{ @@ -83,7 +84,7 @@ impl Params { namespace: None, }], scoping: Scoping { - organization_id: 1, + organization_id: OrganizationId::new(1), project_id: ProjectId::new(100), }, rounds, diff --git a/relay-cardinality/src/lib.rs b/relay-cardinality/src/lib.rs index 6d3a3dcaf4e..595a59db7bd 100644 --- a/relay-cardinality/src/lib.rs +++ b/relay-cardinality/src/lib.rs @@ -26,6 +26,3 @@ pub use self::window::SlidingWindow; /// Redis Set based cardinality limiter. #[cfg(feature = "redis")] pub type CardinalityLimiter = self::limiter::CardinalityLimiter; - -/// Internal alias for better readability. -type OrganizationId = u64; diff --git a/relay-cardinality/src/limiter.rs b/relay-cardinality/src/limiter.rs index ffb66f09228..d872bf9b5d9 100644 --- a/relay-cardinality/src/limiter.rs +++ b/relay-cardinality/src/limiter.rs @@ -5,12 +5,13 @@ use std::collections::BTreeMap; use hashbrown::{HashMap, HashSet}; use relay_base_schema::metrics::{MetricName, MetricNamespace, MetricType}; +use relay_base_schema::organization::OrganizationId; use relay_base_schema::project::ProjectId; use relay_common::time::UnixTimestamp; use relay_statsd::metric; use crate::statsd::CardinalityLimiterTimers; -use crate::{CardinalityLimit, Error, OrganizationId, Result}; +use crate::{CardinalityLimit, Error, Result}; /// Data scoping information. /// @@ -394,7 +395,7 @@ mod tests { fn build_scoping() -> Scoping { Scoping { - organization_id: 1, + organization_id: OrganizationId::new(1), project_id: ProjectId::new(1), } } diff --git a/relay-cardinality/src/redis/cache.rs b/relay-cardinality/src/redis/cache.rs index 51d0742a149..a0f63cf807d 100644 --- a/relay-cardinality/src/redis/cache.rs +++ b/relay-cardinality/src/redis/cache.rs @@ -197,11 +197,12 @@ impl ScopedCache { #[cfg(test)] mod tests { use relay_base_schema::metrics::{MetricName, MetricNamespace}; + use relay_base_schema::organization::OrganizationId; use relay_base_schema::project::ProjectId; use crate::limiter::{Entry, EntryId}; use crate::redis::quota::PartialQuotaScoping; - use crate::{CardinalityLimit, CardinalityScope, OrganizationId, Scoping, SlidingWindow}; + use crate::{CardinalityLimit, CardinalityScope, Scoping, SlidingWindow}; use super::*; @@ -238,7 +239,7 @@ mod tests { window_seconds: 100, granularity_seconds: 10, }; - let scope = build_scoping(1, window); + let scope = build_scoping(OrganizationId::new(1), window); let now = UnixTimestamp::now(); let future = now + Duration::from_secs(window.granularity_seconds + 1); @@ -298,8 +299,8 @@ mod tests { window_seconds: 100, granularity_seconds: 10, }; - let scope1 = build_scoping(1, window); - let scope2 = build_scoping(2, window); + let scope1 = build_scoping(OrganizationId::new(1), window); + let scope2 = build_scoping(OrganizationId::new(2), window); let now = UnixTimestamp::now(); @@ -336,8 +337,8 @@ mod tests { window_seconds: vacuum_interval.as_secs() * 10, granularity_seconds: vacuum_interval.as_secs() * 2, }; - let scope1 = build_scoping(1, window); - let scope2 = build_scoping(2, window); + let scope1 = build_scoping(OrganizationId::new(1), window); + let scope2 = build_scoping(OrganizationId::new(2), window); let now = UnixTimestamp::now(); let in_interval = now + Duration::from_secs(vacuum_interval.as_secs() - 1); diff --git a/relay-cardinality/src/redis/limiter.rs b/relay-cardinality/src/redis/limiter.rs index 8a1c9222b81..6e80886684c 100644 --- a/relay-cardinality/src/redis/limiter.rs +++ b/relay-cardinality/src/redis/limiter.rs @@ -262,6 +262,7 @@ mod tests { use std::sync::atomic::AtomicU64; use relay_base_schema::metrics::{MetricName, MetricNamespace::*, MetricType}; + use relay_base_schema::organization::OrganizationId; use relay_base_schema::project::ProjectId; use relay_redis::{redis, RedisConfigOptions}; @@ -293,7 +294,9 @@ mod tests { static ORGS: AtomicU64 = AtomicU64::new(100); let scoping = Scoping { - organization_id: ORGS.fetch_add(1, std::sync::atomic::Ordering::SeqCst), + organization_id: OrganizationId::new( + ORGS.fetch_add(1, std::sync::atomic::Ordering::SeqCst), + ), project_id: ProjectId::new(1), }; @@ -593,12 +596,12 @@ mod tests { let granularity_seconds = 10_000; let scoping1 = Scoping { - organization_id: granularity_seconds, + organization_id: OrganizationId::new(granularity_seconds), project_id: ProjectId::new(1), }; let scoping2 = Scoping { // Time shift relative to `scoping1` should be half the granularity. - organization_id: granularity_seconds / 2, + organization_id: OrganizationId::new(granularity_seconds / 2), project_id: ProjectId::new(1), }; diff --git a/relay-cardinality/src/redis/quota.rs b/relay-cardinality/src/redis/quota.rs index 3e98798cec7..b892e4a6161 100644 --- a/relay-cardinality/src/redis/quota.rs +++ b/relay-cardinality/src/redis/quota.rs @@ -3,13 +3,14 @@ use std::fmt::{self, Write}; use std::hash::Hash; use relay_base_schema::metrics::{MetricName, MetricNamespace, MetricType}; +use relay_base_schema::organization::OrganizationId; use relay_base_schema::project::ProjectId; use relay_common::time::UnixTimestamp; use crate::limiter::Entry; use crate::redis::{KEY_PREFIX, KEY_VERSION}; use crate::window::Slot; -use crate::{CardinalityLimit, CardinalityScope, OrganizationId, Scoping, SlidingWindow}; +use crate::{CardinalityLimit, CardinalityScope, Scoping, SlidingWindow}; /// A quota scoping extracted from a [`CardinalityLimit`] and a [`Scoping`]. /// @@ -70,7 +71,7 @@ impl PartialQuotaScoping { fn shifted(&self, timestamp: UnixTimestamp) -> UnixTimestamp { let shift = self .organization_id - .map(|o| o % self.window.granularity_seconds) + .map(|o| o.value() % self.window.granularity_seconds) .unwrap_or(0); UnixTimestamp::from_secs(timestamp.as_secs() + shift) @@ -119,7 +120,7 @@ impl QuotaScoping { /// Turns the scoping into a Redis key for the passed slot. pub fn to_redis_key(&self, slot: Slot) -> String { - let organization_id = self.organization_id.unwrap_or(0); + let organization_id = self.organization_id.unwrap_or(OrganizationId::new(0)); let project_id = self.project_id.map(|p| p.value()).unwrap_or(0); let namespace = self.namespace.map(|ns| ns.as_str()).unwrap_or(""); let metric_type = DisplayOptMinus(self.metric_type); diff --git a/relay-cardinality/src/redis/state.rs b/relay-cardinality/src/redis/state.rs index d09e17a1e23..8c42fd9c462 100644 --- a/relay-cardinality/src/redis/state.rs +++ b/relay-cardinality/src/redis/state.rs @@ -162,10 +162,10 @@ impl<'a> Drop for LimitState<'a> { passive = passive, ); - let organization_id = self.scoping.organization_id as i64; + let organization_id = self.scoping.organization_id; let status = if self.rejections > 0 { "limited" } else { "ok" }; metric!( - set(CardinalityLimiterSets::Organizations) = organization_id, + set(CardinalityLimiterSets::Organizations) = organization_id.value() as i64, id = &self.cardinality_limit.id, passive = passive, status = status, diff --git a/relay-metrics/src/meta/redis.rs b/relay-metrics/src/meta/redis.rs index 60cda8d50fb..5fda5710a6f 100644 --- a/relay-metrics/src/meta/redis.rs +++ b/relay-metrics/src/meta/redis.rs @@ -1,6 +1,7 @@ use std::time::Duration; use hash32::{FnvHasher, Hasher as _}; +use relay_base_schema::organization::OrganizationId; use relay_base_schema::project::ProjectId; use relay_common::time::UnixTimestamp; use relay_redis::{RedisError, RedisPool}; @@ -23,7 +24,7 @@ impl RedisMetricMetaStore { /// Stores metric metadata in Redis. pub fn store( &self, - organization_id: u64, + organization_id: OrganizationId, project_id: ProjectId, meta: MetricMeta, ) -> Result<(), RedisError> { @@ -67,7 +68,7 @@ impl RedisMetricMetaStore { fn build_redis_key( &self, - organization_id: u64, + organization_id: OrganizationId, project_id: ProjectId, timestamp: UnixTimestamp, mri: &MetricResourceIdentifier<'_>, @@ -113,7 +114,7 @@ mod tests { fn test_store() { let store = build_store(); - let organization_id = 1000; + let organization_id = OrganizationId::new(1000); let project_id = ProjectId::new(2); let mri = MetricResourceIdentifier::parse("c:foo").unwrap(); diff --git a/relay-quotas/src/global.rs b/relay-quotas/src/global.rs index ce598a4e764..8b563f494f6 100644 --- a/relay-quotas/src/global.rs +++ b/relay-quotas/src/global.rs @@ -245,6 +245,7 @@ mod tests { use super::*; use relay_base_schema::data_category::DataCategory; + use relay_base_schema::organization::OrganizationId; use relay_base_schema::project::{ProjectId, ProjectKey}; use relay_common::time::UnixTimestamp; use relay_redis::{RedisConfigOptions, RedisPool}; @@ -273,7 +274,7 @@ mod tests { fn build_scoping() -> Scoping { Scoping { - organization_id: 69420, + organization_id: OrganizationId::new(69420), project_id: ProjectId::new(42), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(4711), diff --git a/relay-quotas/src/quota.rs b/relay-quotas/src/quota.rs index a49acffb2a8..2af89b2398b 100644 --- a/relay-quotas/src/quota.rs +++ b/relay-quotas/src/quota.rs @@ -2,6 +2,7 @@ use std::fmt; use std::str::FromStr; use relay_base_schema::metrics::MetricNamespace; +use relay_base_schema::organization::OrganizationId; use relay_base_schema::project::{ProjectId, ProjectKey}; use serde::{Deserialize, Serialize}; use smallvec::SmallVec; @@ -15,7 +16,7 @@ pub use relay_base_schema::data_category::DataCategory; #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct Scoping { /// The organization id. - pub organization_id: u64, + pub organization_id: OrganizationId, /// The project id. pub project_id: ProjectId, @@ -129,7 +130,7 @@ impl ItemScoping<'_> { pub fn scope_id(&self, scope: QuotaScope) -> Option { match scope { QuotaScope::Global => None, - QuotaScope::Organization => Some(self.organization_id), + QuotaScope::Organization => Some(self.organization_id.value()), QuotaScope::Project => Some(self.project_id.value()), QuotaScope::Key => self.key_id, QuotaScope::Unknown => None, @@ -726,7 +727,7 @@ mod tests { assert!(quota.matches(ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(17), @@ -751,7 +752,7 @@ mod tests { assert!(!quota.matches(ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(17), @@ -776,7 +777,7 @@ mod tests { assert!(quota.matches(ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(17), @@ -787,7 +788,7 @@ mod tests { assert!(!quota.matches(ItemScoping { category: DataCategory::Transaction, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(17), @@ -812,7 +813,7 @@ mod tests { assert!(!quota.matches(ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(17), @@ -837,7 +838,7 @@ mod tests { assert!(quota.matches(ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(17), @@ -848,7 +849,7 @@ mod tests { assert!(!quota.matches(ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 0, + organization_id: OrganizationId::new(0), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(17), @@ -873,7 +874,7 @@ mod tests { assert!(quota.matches(ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(17), @@ -884,7 +885,7 @@ mod tests { assert!(!quota.matches(ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(0), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(17), @@ -909,7 +910,7 @@ mod tests { assert!(quota.matches(ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(17), @@ -920,7 +921,7 @@ mod tests { assert!(!quota.matches(ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(0), @@ -931,7 +932,7 @@ mod tests { assert!(!quota.matches(ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: None, diff --git a/relay-quotas/src/rate_limit.rs b/relay-quotas/src/rate_limit.rs index c1162d340ea..421886c0d60 100644 --- a/relay-quotas/src/rate_limit.rs +++ b/relay-quotas/src/rate_limit.rs @@ -3,6 +3,7 @@ use std::str::FromStr; use std::time::{Duration, Instant}; use relay_base_schema::metrics::MetricNamespace; +use relay_base_schema::organization::OrganizationId; use relay_base_schema::project::{ProjectId, ProjectKey}; use smallvec::SmallVec; @@ -128,7 +129,7 @@ pub enum RateLimitScope { /// Global scope. Global, /// An organization with identifier. - Organization(u64), + Organization(OrganizationId), /// A project with identifier. Project(ProjectId), /// A DSN public key. @@ -492,7 +493,7 @@ mod tests { fn test_rate_limit_matches_categories() { let rate_limit = RateLimit { categories: smallvec![DataCategory::Unknown, DataCategory::Error], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: RetryAfter::from_secs(1), namespaces: smallvec![], @@ -501,7 +502,7 @@ mod tests { assert!(rate_limit.matches(ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: None, @@ -512,7 +513,7 @@ mod tests { assert!(!rate_limit.matches(ItemScoping { category: DataCategory::Transaction, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: None, @@ -525,7 +526,7 @@ mod tests { fn test_rate_limit_matches_organization() { let rate_limit = RateLimit { categories: DataCategories::new(), - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: RetryAfter::from_secs(1), namespaces: smallvec![], @@ -534,7 +535,7 @@ mod tests { assert!(rate_limit.matches(ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: None, @@ -545,7 +546,7 @@ mod tests { assert!(!rate_limit.matches(ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 0, + organization_id: OrganizationId::new(0), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: None, @@ -567,7 +568,7 @@ mod tests { assert!(rate_limit.matches(ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: None, @@ -578,7 +579,7 @@ mod tests { assert!(!rate_limit.matches(ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(0), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: None, @@ -591,14 +592,14 @@ mod tests { fn test_rate_limit_matches_namespaces() { let rate_limit = RateLimit { categories: smallvec![], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: RetryAfter::from_secs(1), namespaces: smallvec![MetricNamespace::Custom], }; let scoping = Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: None, @@ -618,7 +619,7 @@ mod tests { let general_rate_limit = RateLimit { categories: smallvec![], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: RetryAfter::from_secs(1), namespaces: smallvec![], // all namespaces @@ -652,7 +653,7 @@ mod tests { assert!(rate_limit.matches(ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: None, @@ -663,7 +664,7 @@ mod tests { assert!(!rate_limit.matches(ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 0, + organization_id: OrganizationId::new(0), project_id: ProjectId::new(21), project_key: ProjectKey::parse("deadbeefdeadbeefdeadbeefdeadbeef").unwrap(), key_id: None, @@ -678,7 +679,7 @@ mod tests { rate_limits.add(RateLimit { categories: smallvec![DataCategory::Default, DataCategory::Error], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: Some(ReasonCode::new("first")), retry_after: RetryAfter::from_secs(1), namespaces: smallvec![], @@ -687,7 +688,7 @@ mod tests { // longer rate limit shadows shorter one rate_limits.add(RateLimit { categories: smallvec![DataCategory::Error, DataCategory::Default], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: Some(ReasonCode::new("second")), retry_after: RetryAfter::from_secs(10), namespaces: smallvec![], @@ -701,7 +702,7 @@ mod tests { default, error, ], - scope: Organization(42), + scope: Organization(OrganizationId(42)), reason_code: Some(ReasonCode("second")), retry_after: RetryAfter(10), namespaces: [], @@ -717,7 +718,7 @@ mod tests { rate_limits.add(RateLimit { categories: smallvec![DataCategory::Default, DataCategory::Error], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: Some(ReasonCode::new("first")), retry_after: RetryAfter::from_secs(10), namespaces: smallvec![], @@ -726,7 +727,7 @@ mod tests { // shorter rate limit is shadowed by existing one rate_limits.add(RateLimit { categories: smallvec![DataCategory::Error, DataCategory::Default], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: Some(ReasonCode::new("second")), retry_after: RetryAfter::from_secs(1), namespaces: smallvec![], @@ -740,7 +741,7 @@ mod tests { default, error, ], - scope: Organization(42), + scope: Organization(OrganizationId(42)), reason_code: Some(ReasonCode("first")), retry_after: RetryAfter(10), namespaces: [], @@ -756,7 +757,7 @@ mod tests { rate_limits.add(RateLimit { categories: smallvec![DataCategory::Error], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: RetryAfter::from_secs(1), namespaces: smallvec![], @@ -765,7 +766,7 @@ mod tests { // Same scope but different categories rate_limits.add(RateLimit { categories: smallvec![DataCategory::Transaction], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: RetryAfter::from_secs(1), namespaces: smallvec![], @@ -787,7 +788,7 @@ mod tests { categories: [ error, ], - scope: Organization(42), + scope: Organization(OrganizationId(42)), reason_code: None, retry_after: RetryAfter(1), namespaces: [], @@ -796,7 +797,7 @@ mod tests { categories: [ transaction, ], - scope: Organization(42), + scope: Organization(OrganizationId(42)), reason_code: None, retry_after: RetryAfter(1), namespaces: [], @@ -822,7 +823,7 @@ mod tests { rate_limits.add(RateLimit { categories: smallvec![DataCategory::MetricBucket], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: RetryAfter::from_secs(1), namespaces: smallvec![MetricNamespace::Custom], @@ -831,7 +832,7 @@ mod tests { // Same category but different namespaces rate_limits.add(RateLimit { categories: smallvec![DataCategory::MetricBucket], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: RetryAfter::from_secs(1), namespaces: smallvec![MetricNamespace::Spans], @@ -844,7 +845,7 @@ mod tests { categories: [ metric_bucket, ], - scope: Organization(42), + scope: Organization(OrganizationId(42)), reason_code: None, retry_after: RetryAfter(1), namespaces: [ @@ -855,7 +856,7 @@ mod tests { categories: [ metric_bucket, ], - scope: Organization(42), + scope: Organization(OrganizationId(42)), reason_code: None, retry_after: RetryAfter(1), namespaces: [ @@ -873,7 +874,7 @@ mod tests { rate_limits.add(RateLimit { categories: smallvec![DataCategory::Error], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: Some(ReasonCode::new("first")), retry_after: RetryAfter::from_secs(1), namespaces: smallvec![], @@ -882,7 +883,7 @@ mod tests { // Distinct scope to prevent deduplication rate_limits.add(RateLimit { categories: smallvec![DataCategory::Transaction], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: Some(ReasonCode::new("second")), retry_after: RetryAfter::from_secs(10), namespaces: smallvec![], @@ -894,7 +895,7 @@ mod tests { categories: [ transaction, ], - scope: Organization(42), + scope: Organization(OrganizationId(42)), reason_code: Some(ReasonCode("second")), retry_after: RetryAfter(10), namespaces: [], @@ -909,7 +910,7 @@ mod tests { // Active error limit rate_limits.add(RateLimit { categories: smallvec![DataCategory::Error], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: RetryAfter::from_secs(1), namespaces: smallvec![], @@ -937,7 +938,7 @@ mod tests { categories: [ error, ], - scope: Organization(42), + scope: Organization(OrganizationId(42)), reason_code: None, retry_after: RetryAfter(1), namespaces: [], @@ -954,7 +955,7 @@ mod tests { // Active error limit rate_limits.add(RateLimit { categories: smallvec![DataCategory::Error], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: RetryAfter::from_secs(1), namespaces: smallvec![], @@ -963,7 +964,7 @@ mod tests { // Active transaction limit rate_limits.add(RateLimit { categories: smallvec![DataCategory::Transaction], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: RetryAfter::from_secs(1), namespaces: smallvec![], @@ -972,7 +973,7 @@ mod tests { let applied_limits = rate_limits.check(ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: None, @@ -988,7 +989,7 @@ mod tests { categories: [ error, ], - scope: Organization(42), + scope: Organization(OrganizationId(42)), reason_code: None, retry_after: RetryAfter(1), namespaces: [], @@ -1005,7 +1006,7 @@ mod tests { // Active error limit rate_limits.add(RateLimit { categories: smallvec![DataCategory::Error], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: RetryAfter::from_secs(1), namespaces: smallvec![], @@ -1014,7 +1015,7 @@ mod tests { // Active transaction limit rate_limits.add(RateLimit { categories: smallvec![DataCategory::Transaction], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: RetryAfter::from_secs(1), namespaces: smallvec![], @@ -1023,7 +1024,7 @@ mod tests { let item_scoping = ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: None, @@ -1051,7 +1052,7 @@ mod tests { categories: [ error, ], - scope: Organization(42), + scope: Organization(OrganizationId(42)), reason_code: Some(ReasonCode("zero")), retry_after: RetryAfter(60), namespaces: [], @@ -1068,7 +1069,7 @@ mod tests { rate_limits1.add(RateLimit { categories: smallvec![DataCategory::Error], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: Some(ReasonCode::new("first")), retry_after: RetryAfter::from_secs(1), namespaces: smallvec![], @@ -1076,7 +1077,7 @@ mod tests { rate_limits1.add(RateLimit { categories: smallvec![DataCategory::TransactionIndexed], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: RetryAfter::from_secs(1), namespaces: smallvec![], @@ -1084,7 +1085,7 @@ mod tests { rate_limits2.add(RateLimit { categories: smallvec![DataCategory::Error], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: Some(ReasonCode::new("second")), retry_after: RetryAfter::from_secs(10), namespaces: smallvec![], @@ -1099,7 +1100,7 @@ mod tests { categories: [ error, ], - scope: Organization(42), + scope: Organization(OrganizationId(42)), reason_code: Some(ReasonCode("second")), retry_after: RetryAfter(10), namespaces: [], @@ -1108,7 +1109,7 @@ mod tests { categories: [ transaction_indexed, ], - scope: Organization(42), + scope: Organization(OrganizationId(42)), reason_code: None, retry_after: RetryAfter(1), namespaces: [], @@ -1125,7 +1126,7 @@ mod tests { // Active error limit cached.add(RateLimit { categories: smallvec![DataCategory::Error], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: RetryAfter::from_secs(1), namespaces: smallvec![], @@ -1149,7 +1150,7 @@ mod tests { categories: [ error, ], - scope: Organization(42), + scope: Organization(OrganizationId(42)), reason_code: None, retry_after: RetryAfter(1), namespaces: [], diff --git a/relay-quotas/src/redis.rs b/relay-quotas/src/redis.rs index 9ee63a54e58..13d4bf8e060 100644 --- a/relay-quotas/src/redis.rs +++ b/relay-quotas/src/redis.rs @@ -99,7 +99,7 @@ impl<'a> RedisQuota<'a> { if self.quota.scope == QuotaScope::Global { 0 } else { - self.scoping.organization_id % self.window + self.scoping.organization_id.value() % self.window } } @@ -305,6 +305,7 @@ mod tests { use std::time::{SystemTime, UNIX_EPOCH}; use relay_base_schema::metrics::MetricNamespace; + use relay_base_schema::organization::OrganizationId; use relay_base_schema::project::{ProjectId, ProjectKey}; use relay_redis::redis::Commands; use relay_redis::RedisConfigOptions; @@ -355,7 +356,7 @@ mod tests { let scoping = ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(43), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(44), @@ -373,7 +374,7 @@ mod tests { rate_limits, vec![RateLimit { categories: DataCategories::new(), - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: Some(ReasonCode::new("get_lost")), retry_after: rate_limits[0].retry_after, namespaces: smallvec![], @@ -404,7 +405,7 @@ mod tests { let scoping = ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(43), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(44), @@ -467,7 +468,7 @@ mod tests { let scoping = ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(43), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(44), @@ -489,7 +490,7 @@ mod tests { rate_limits, vec![RateLimit { categories: DataCategories::new(), - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: Some(ReasonCode::new("get_lost")), retry_after: rate_limits[0].retry_after, namespaces: smallvec![], @@ -517,7 +518,7 @@ mod tests { let scoping = ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(43), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(44), @@ -567,7 +568,7 @@ mod tests { let scoping = ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(43), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(44), @@ -618,7 +619,7 @@ mod tests { let scoping = ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(43), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(44), @@ -662,7 +663,7 @@ mod tests { let scoping = ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(43), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(44), @@ -707,7 +708,7 @@ mod tests { let scoping = ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(43), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(44), @@ -731,7 +732,7 @@ mod tests { rate_limits, vec![RateLimit { categories: DataCategories::new(), - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: Some(ReasonCode::new("project_quota1")), retry_after: rate_limits[0].retry_after, namespaces: smallvec![], @@ -757,7 +758,7 @@ mod tests { let scoping = ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(43), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(44), @@ -779,7 +780,7 @@ mod tests { rate_limits, vec![RateLimit { categories: DataCategories::new(), - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: Some(ReasonCode::new("get_lost")), retry_after: rate_limits[0].retry_after, namespaces: smallvec![], @@ -807,7 +808,7 @@ mod tests { let scoping = ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 69420, + organization_id: OrganizationId::new(69420), project_id: ProjectId::new(42), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(4711), @@ -836,7 +837,7 @@ mod tests { let scoping = ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 69420, + organization_id: OrganizationId::new(69420), project_id: ProjectId::new(42), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(4711), @@ -865,7 +866,7 @@ mod tests { let scoping = ItemScoping { category: DataCategory::Error, scoping: &Scoping { - organization_id: 69420, + organization_id: OrganizationId::new(69420), project_id: ProjectId::new(42), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(4711), diff --git a/relay-sampling/src/evaluation.rs b/relay-sampling/src/evaluation.rs index 7f08dc8fa88..1a30d80bbab 100644 --- a/relay-sampling/src/evaluation.rs +++ b/relay-sampling/src/evaluation.rs @@ -10,6 +10,7 @@ use chrono::{DateTime, Utc}; use rand::distributions::Uniform; use rand::Rng; use rand_pcg::Pcg32; +use relay_base_schema::organization::OrganizationId; use relay_protocol::Getter; #[cfg(feature = "redis")] use relay_redis::RedisPool; @@ -49,7 +50,7 @@ pub type ReservoirCounters = Arc>>; pub struct ReservoirEvaluator<'a> { counters: ReservoirCounters, #[cfg(feature = "redis")] - org_id_and_redis_pool: Option<(u64, &'a RedisPool)>, + org_id_and_redis_pool: Option<(OrganizationId, &'a RedisPool)>, // Using PhantomData because the lifetimes are behind a feature flag. _phantom: std::marker::PhantomData<&'a ()>, } @@ -74,7 +75,7 @@ impl<'a> ReservoirEvaluator<'a> { /// /// These values are needed to synchronize with Redis. #[cfg(feature = "redis")] - pub fn set_redis(&mut self, org_id: u64, redis_pool: &'a RedisPool) { + pub fn set_redis(&mut self, org_id: OrganizationId, redis_pool: &'a RedisPool) { self.org_id_and_redis_pool = Some((org_id, redis_pool)); } diff --git a/relay-sampling/src/redis_sampling.rs b/relay-sampling/src/redis_sampling.rs index 154c9a1b7bb..12f63d42e78 100644 --- a/relay-sampling/src/redis_sampling.rs +++ b/relay-sampling/src/redis_sampling.rs @@ -1,11 +1,12 @@ use chrono::{DateTime, Utc}; +use relay_base_schema::organization::OrganizationId; use crate::config::RuleId; pub struct ReservoirRuleKey(String); impl ReservoirRuleKey { - pub fn new(org_id: u64, rule_id: RuleId) -> Self { + pub fn new(org_id: OrganizationId, rule_id: RuleId) -> Self { Self(format!("reservoir:{}:{}", org_id, rule_id)) } diff --git a/relay-server/src/extractors/request_meta.rs b/relay-server/src/extractors/request_meta.rs index 012a4d95b26..a42b3635665 100644 --- a/relay-server/src/extractors/request_meta.rs +++ b/relay-server/src/extractors/request_meta.rs @@ -13,6 +13,7 @@ use axum::response::{IntoResponse, Response}; use axum::RequestPartsExt; use data_encoding::BASE64; use relay_auth::RelayId; +use relay_base_schema::organization::OrganizationId; use relay_base_schema::project::{ParseProjectKeyError, ProjectId, ProjectKey}; use relay_common::{Auth, Dsn, ParseAuthError, ParseDsnError, Scheme}; use relay_config::UpstreamDescriptor; @@ -397,7 +398,7 @@ impl RequestMeta { /// state. To fetch full scoping information, invoke the `GetScoping` message on `Project`. pub fn get_partial_scoping(&self) -> Scoping { Scoping { - organization_id: 0, + organization_id: OrganizationId::new(0), project_id: self.project_id().unwrap_or_else(|| ProjectId::new(0)), project_key: self.public_key(), key_id: None, diff --git a/relay-server/src/metrics/metric_stats.rs b/relay-server/src/metrics/metric_stats.rs index 0121dfbdc74..4e1aa6aafb9 100644 --- a/relay-server/src/metrics/metric_stats.rs +++ b/relay-server/src/metrics/metric_stats.rs @@ -1,6 +1,7 @@ use std::collections::BTreeMap; use std::sync::{Arc, OnceLock}; +use relay_base_schema::organization::OrganizationId; #[cfg(feature = "processing")] use relay_cardinality::{CardinalityLimit, CardinalityReport}; use relay_config::Config; @@ -111,14 +112,14 @@ impl MetricStats { self.config.metric_stats_enabled() && self.is_rolled_out(scoping.organization_id) } - fn is_rolled_out(&self, organization_id: u64) -> bool { + fn is_rolled_out(&self, organization_id: OrganizationId) -> bool { let rate = self .global_config .current() .options .metric_stats_rollout_rate; - is_rolled_out(organization_id, rate) + is_rolled_out(organization_id.value(), rate) } fn to_volume_metric(&self, bucket: impl TrackableBucket, outcome: &Outcome) -> Option { @@ -245,7 +246,7 @@ mod tests { fn scoping() -> Scoping { Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(17), diff --git a/relay-server/src/metrics/rate_limits.rs b/relay-server/src/metrics/rate_limits.rs index de05d00c6b5..174831746ee 100644 --- a/relay-server/src/metrics/rate_limits.rs +++ b/relay-server/src/metrics/rate_limits.rs @@ -266,6 +266,7 @@ impl>> MetricsLimiter { #[cfg(test)] mod tests { + use relay_base_schema::organization::OrganizationId; use relay_base_schema::project::{ProjectId, ProjectKey}; use relay_metrics::{BucketMetadata, BucketValue}; use relay_quotas::QuotaScope; @@ -300,7 +301,7 @@ mod tests { metrics, quotas, Scoping { - organization_id: 1, + organization_id: OrganizationId::new(1), project_id: ProjectId::new(1), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: None, diff --git a/relay-server/src/services/outcome.rs b/relay-server/src/services/outcome.rs index 55a4a8b73a4..56f6f57720b 100644 --- a/relay-server/src/services/outcome.rs +++ b/relay-server/src/services/outcome.rs @@ -15,6 +15,7 @@ use std::{fmt, mem}; #[cfg(feature = "processing")] use anyhow::Context; use chrono::{DateTime, SecondsFormat, Utc}; +use relay_base_schema::organization::OrganizationId; use relay_base_schema::project::ProjectId; use relay_common::time::UnixTimestamp; use relay_config::{Config, EmitOutcomes}; @@ -523,7 +524,7 @@ pub struct TrackRawOutcome { timestamp: String, /// Organization id. #[serde(default, skip_serializing_if = "Option::is_none")] - org_id: Option, + org_id: Option, /// Project id. project_id: ProjectId, /// The DSN project key id. @@ -559,9 +560,9 @@ impl TrackRawOutcome { // e.g. something like: "2019-09-29T09:46:40.123456Z" let timestamp = msg.timestamp.to_rfc3339_opts(SecondsFormat::Micros, true); - let org_id = match msg.scoping.organization_id { + let org_id = match msg.scoping.organization_id.value() { 0 => None, - id => Some(id), + id => Some(OrganizationId::new(id)), }; // since TrackOutcome objects come only from this Relay (and not any downstream diff --git a/relay-server/src/services/processor.rs b/relay-server/src/services/processor.rs index 494630a01bc..c5acdf40b54 100644 --- a/relay-server/src/services/processor.rs +++ b/relay-server/src/services/processor.rs @@ -2567,7 +2567,7 @@ impl EnvelopeProcessorService { if !limits.exceeded_limits().is_empty() && sample(error_sample_rate) { for limit in limits.exceeded_limits() { relay_log::error!( - tags.organization_id = scoping.organization_id, + tags.organization_id = scoping.organization_id.value(), tags.limit_id = limit.id, tags.passive = limit.passive, "Cardinality Limit" @@ -3389,8 +3389,10 @@ mod tests { #[cfg(feature = "processing")] #[tokio::test] async fn test_ratelimit_per_batch() { - let rate_limited_org = 1; - let not_ratelimited_org = 2; + use relay_base_schema::organization::OrganizationId; + + let rate_limited_org = OrganizationId::new(1); + let not_ratelimited_org = OrganizationId::new(2); let message = { let project_info = { @@ -3427,7 +3429,7 @@ mod tests { project_info, }; - let scoping_by_org_id = |org_id: u64| Scoping { + let scoping_by_org_id = |org_id: OrganizationId| Scoping { organization_id: org_id, project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), @@ -3464,7 +3466,7 @@ mod tests { }; let (store, handle) = { - let f = |org_ids: &mut Vec, msg: Store| { + let f = |org_ids: &mut Vec, msg: Store| { let org_id = match msg { Store::Metrics(x) => x.scoping.organization_id, _ => panic!("received envelope when expecting only metrics"), diff --git a/relay-server/src/services/processor/metrics.rs b/relay-server/src/services/processor/metrics.rs index 0d28285e77b..165713d754c 100644 --- a/relay-server/src/services/processor/metrics.rs +++ b/relay-server/src/services/processor/metrics.rs @@ -103,6 +103,7 @@ fn remove_matching_bucket_tags(metric_config: &Metrics, bucket: &mut Bucket) { mod tests { use std::collections::BTreeMap; + use relay_base_schema::organization::OrganizationId; use relay_base_schema::project::{ProjectId, ProjectKey}; use relay_dynamic_config::TagBlock; use relay_metrics::{BucketValue, UnixTimestamp}; @@ -186,7 +187,7 @@ mod tests { &metric_outcomes, &project_info, Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(43), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(44), @@ -223,7 +224,7 @@ mod tests { &metric_outcomes, &ProjectInfo::default(), Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(43), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(44), diff --git a/relay-server/src/services/processor/replay.rs b/relay-server/src/services/processor/replay.rs index 848d08bb375..8466d442ba2 100644 --- a/relay-server/src/services/processor/replay.rs +++ b/relay-server/src/services/processor/replay.rs @@ -3,6 +3,7 @@ use std::error::Error; use std::net::IpAddr; use bytes::Bytes; +use relay_base_schema::organization::OrganizationId; use relay_base_schema::project::ProjectId; use relay_dynamic_config::{Feature, GlobalConfig, ProjectConfig}; use relay_event_normalization::replay::{self, ReplayError}; @@ -118,7 +119,7 @@ struct ReplayProcessingConfig<'a> { pub geoip_lookup: Option<&'a GeoIpLookup>, pub event_id: Option, pub project_id: Option, - pub organization_id: Option, + pub organization_id: Option, pub client_addr: Option, pub user_agent: RawUserAgentInfo, } @@ -149,7 +150,7 @@ fn handle_replay_event_item( relay_log::debug!( event_id = ?config.event_id, project_id = config.project_id.map(|v| v.value()), - organization_id = config.organization_id, + organization_id = config.organization_id.map(|o| o.value()), segment_id = segment_id, "replay segment-exceeded-limit" ); @@ -174,7 +175,7 @@ fn handle_replay_event_item( error = &error as &dyn Error, event_id = ?config.event_id, project_id = config.project_id.map(|v| v.value()), - organization_id = config.organization_id, + organization_id = config.organization_id.map(|o| o.value()), "invalid replay event" ); Err(match error { diff --git a/relay-server/src/services/projects/project/state/info.rs b/relay-server/src/services/projects/project/state/info.rs index eab9b8fb620..04854c1aaed 100644 --- a/relay-server/src/services/projects/project/state/info.rs +++ b/relay-server/src/services/projects/project/state/info.rs @@ -1,5 +1,6 @@ use chrono::{DateTime, Utc}; +use relay_base_schema::organization::OrganizationId; use relay_base_schema::project::{ProjectId, ProjectKey}; #[cfg(feature = "processing")] use relay_cardinality::CardinalityLimit; @@ -48,7 +49,7 @@ pub struct ProjectInfo { pub config: ProjectConfig, /// The organization id. #[serde(default)] - pub organization_id: Option, + pub organization_id: Option, } /// Controls how we serialize a ProjectState for an external Relay @@ -62,7 +63,7 @@ pub struct LimitedProjectInfo { pub slug: Option, #[serde(with = "LimitedProjectConfig")] pub config: ProjectConfig, - pub organization_id: Option, + pub organization_id: Option, } impl ProjectInfo { @@ -76,7 +77,7 @@ impl ProjectInfo { /// Returns `Some` if the project contains a project identifier otherwise `None`. pub fn scoping(&self, project_key: ProjectKey) -> Option { Some(Scoping { - organization_id: self.organization_id.unwrap_or(0), + organization_id: self.organization_id.unwrap_or(OrganizationId::new(0)), project_id: self.project_id?, project_key, key_id: self @@ -218,7 +219,7 @@ impl ProjectInfo { // 3. An organization id is available and can be matched against rate limits. In this // project, all organizations will match automatically, unless the organization id // has changed since the last fetch. - scoping.organization_id = self.organization_id.unwrap_or(0); + scoping.organization_id = self.organization_id.unwrap_or(OrganizationId::new(0)); scoping } diff --git a/relay-server/src/services/store.rs b/relay-server/src/services/store.rs index 1af723ed687..89b7254ea56 100644 --- a/relay-server/src/services/store.rs +++ b/relay-server/src/services/store.rs @@ -2,6 +2,7 @@ //! The service uses Kafka topics to forward data to Sentry use anyhow::Context; +use relay_base_schema::organization::OrganizationId; use std::borrow::Cow; use std::collections::BTreeMap; use std::error::Error; @@ -432,7 +433,7 @@ impl StoreService { fn create_metric_message<'a>( &self, - organization_id: u64, + organization_id: OrganizationId, project_id: ProjectId, encoder: &'a mut BucketEncoder, namespace: MetricNamespace, @@ -682,7 +683,7 @@ impl StoreService { fn produce_profile( &self, - organization_id: u64, + organization_id: OrganizationId, project_id: ProjectId, key_id: Option, start_time: Instant, @@ -887,7 +888,7 @@ impl StoreService { span.duration_ms = ((span.end_timestamp_precise - span.start_timestamp_precise) * 1e3) as u32; span.event_id = event_id; - span.organization_id = scoping.organization_id; + span.organization_id = scoping.organization_id.value(); span.project_id = scoping.project_id.value(); span.retention_days = retention_days; span.start_timestamp_ms = (span.start_timestamp_precise * 1e3) as u64; @@ -1023,7 +1024,7 @@ impl StoreService { fn produce_profile_chunk( &self, - organization_id: u64, + organization_id: OrganizationId, project_id: ProjectId, start_time: Instant, retention_days: u16, @@ -1185,7 +1186,7 @@ struct AttachmentKafkaMessage { struct ReplayRecordingNotChunkedKafkaMessage<'a> { replay_id: EventId, key_id: Option, - org_id: u64, + org_id: OrganizationId, project_id: ProjectId, received: u64, retention_days: u16, @@ -1214,7 +1215,7 @@ struct UserReportKafkaMessage { #[derive(Clone, Debug, Serialize)] struct MetricKafkaMessage<'a> { - org_id: u64, + org_id: OrganizationId, project_id: ProjectId, name: &'a MetricName, #[serde(flatten)] @@ -1260,7 +1261,7 @@ impl<'a> MetricValue<'a> { #[derive(Clone, Debug, Serialize)] struct ProfileKafkaMessage { - organization_id: u64, + organization_id: OrganizationId, project_id: ProjectId, key_id: Option, received: u64, @@ -1418,7 +1419,7 @@ struct MetricsSummaryKafkaMessage<'a> { #[derive(Clone, Debug, Serialize)] struct ProfileChunkKafkaMessage { - organization_id: u64, + organization_id: OrganizationId, project_id: ProjectId, received: u64, retention_days: u16, diff --git a/relay-server/src/utils/rate_limits.rs b/relay-server/src/utils/rate_limits.rs index 2dabb2ed3e9..137567d8fa3 100644 --- a/relay-server/src/utils/rate_limits.rs +++ b/relay-server/src/utils/rate_limits.rs @@ -783,6 +783,7 @@ impl fmt::Debug for EnvelopeLimiter { mod tests { use std::collections::{BTreeMap, BTreeSet}; + use relay_base_schema::organization::OrganizationId; use relay_base_schema::project::{ProjectId, ProjectKey}; use relay_metrics::MetricNamespace; use relay_quotas::RetryAfter; @@ -803,7 +804,7 @@ mod tests { // Add a generic rate limit for all categories. rate_limits.add(RateLimit { categories: DataCategories::new(), - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: Some(ReasonCode::new("my_limit")), retry_after: RetryAfter::from_secs(42), namespaces: smallvec![], @@ -830,7 +831,7 @@ mod tests { // Rate limit with reason code and namespace. rate_limits.add(RateLimit { categories: smallvec![DataCategory::MetricBucket], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: Some(ReasonCode::new("my_limit")), retry_after: RetryAfter::from_secs(42), namespaces: smallvec![MetricNamespace::Custom, MetricNamespace::Spans], @@ -839,7 +840,7 @@ mod tests { // Rate limit without reason code. rate_limits.add(RateLimit { categories: smallvec![DataCategory::MetricBucket], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: RetryAfter::from_secs(42), namespaces: smallvec![MetricNamespace::Spans], @@ -854,7 +855,7 @@ mod tests { #[test] fn test_parse_invalid_rate_limits() { let scoping = Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(17), @@ -868,7 +869,7 @@ mod tests { #[test] fn test_parse_rate_limits() { let scoping = Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(17), @@ -885,7 +886,7 @@ mod tests { vec![ RateLimit { categories: DataCategories::new(), - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: Some(ReasonCode::new("my_limit")), retry_after: rate_limits[0].retry_after, namespaces: smallvec![], @@ -911,7 +912,7 @@ mod tests { #[test] fn test_parse_rate_limits_namespace() { let scoping = Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(17), @@ -925,7 +926,7 @@ mod tests { rate_limits, vec![RateLimit { categories: smallvec![DataCategory::MetricBucket], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: rate_limits[0].retry_after, namespaces: smallvec![MetricNamespace::Custom, MetricNamespace::Spans], @@ -936,7 +937,7 @@ mod tests { #[test] fn test_parse_rate_limits_empty_namespace() { let scoping = Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(17), @@ -951,7 +952,7 @@ mod tests { rate_limits, vec![RateLimit { categories: smallvec![DataCategory::MetricBucket], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: Some(ReasonCode::new("some_reason")), retry_after: rate_limits[0].retry_after, namespaces: smallvec![], @@ -962,7 +963,7 @@ mod tests { #[test] fn test_parse_rate_limits_only_unknown() { let scoping = Scoping { - organization_id: 42, + organization_id: OrganizationId::new(42), project_id: ProjectId::new(21), project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(17), @@ -976,7 +977,7 @@ mod tests { rate_limits, vec![RateLimit { categories: smallvec![DataCategory::Unknown, DataCategory::Unknown], - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: rate_limits[0].retry_after, namespaces: smallvec![], @@ -1018,7 +1019,7 @@ mod tests { fn rate_limit(category: DataCategory) -> RateLimit { RateLimit { categories: vec![category].into(), - scope: RateLimitScope::Organization(42), + scope: RateLimitScope::Organization(OrganizationId::new(42)), reason_code: None, retry_after: RetryAfter::from_secs(60), namespaces: smallvec![], From fe10112edd209fdd85f66fb3fd86d76ce6a544b9 Mon Sep 17 00:00:00 2001 From: Martin Linzmayer Date: Tue, 22 Oct 2024 17:21:45 +0200 Subject: [PATCH 2/9] ref: Add cfg(feature) to import to satisfy clippy --- relay-sampling/src/evaluation.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/relay-sampling/src/evaluation.rs b/relay-sampling/src/evaluation.rs index 1a30d80bbab..a09091441f5 100644 --- a/relay-sampling/src/evaluation.rs +++ b/relay-sampling/src/evaluation.rs @@ -10,6 +10,7 @@ use chrono::{DateTime, Utc}; use rand::distributions::Uniform; use rand::Rng; use rand_pcg::Pcg32; +#[cfg(feature = "redis")] use relay_base_schema::organization::OrganizationId; use relay_protocol::Getter; #[cfg(feature = "redis")] From be4740057560a848dad1b9e4fb1686ed08524426 Mon Sep 17 00:00:00 2001 From: Martin Linzmayer Date: Wed, 23 Oct 2024 09:26:55 +0200 Subject: [PATCH 3/9] docs: Add Changelog entry --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d6004291635..51a8c511831 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ - Feature flags of graduated features are now hard-coded in Relay so they can be removed from Sentry. ([#4076](https://github.com/getsentry/relay/pull/4076), [#4080](https://github.com/getsentry/relay/pull/4080)) - Add parallelization in Redis commands. ([#4118](https://github.com/getsentry/relay/pull/4118)) - Extract user ip for spans. ([#4144](https://github.com/getsentry/relay/pull/4144)) +- Replace u64 with OrganizationId new-type struct for organization id. ([#4159](https://github.com/getsentry/relay/pull/4159)) ## 24.9.0 From 5656626ac1cf8741cc8edc8b6ff1e0ffa5cfc1e6 Mon Sep 17 00:00:00 2001 From: Martin Linzmayer Date: Wed, 23 Oct 2024 13:25:58 +0200 Subject: [PATCH 4/9] docs: add backticks to struct in Changelog Co-authored-by: Riccardo Busetti --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 51a8c511831..1230836c677 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,7 +32,7 @@ - Feature flags of graduated features are now hard-coded in Relay so they can be removed from Sentry. ([#4076](https://github.com/getsentry/relay/pull/4076), [#4080](https://github.com/getsentry/relay/pull/4080)) - Add parallelization in Redis commands. ([#4118](https://github.com/getsentry/relay/pull/4118)) - Extract user ip for spans. ([#4144](https://github.com/getsentry/relay/pull/4144)) -- Replace u64 with OrganizationId new-type struct for organization id. ([#4159](https://github.com/getsentry/relay/pull/4159)) +- Replace u64 with `OrganizationId` new-type struct for organization id. ([#4159](https://github.com/getsentry/relay/pull/4159)) ## 24.9.0 From 8af6c282c5fa8f88641d9def20ca5282d226bb71 Mon Sep 17 00:00:00 2001 From: Martin Linzmayer Date: Wed, 23 Oct 2024 14:36:42 +0200 Subject: [PATCH 5/9] ref: replace struct name with Self, update docs --- relay-base-schema/src/organization.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/relay-base-schema/src/organization.rs b/relay-base-schema/src/organization.rs index 4318a2e5b50..98bb54549f9 100644 --- a/relay-base-schema/src/organization.rs +++ b/relay-base-schema/src/organization.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; -/// The unique identifier of a Sentry organization +/// The unique identifier of a Sentry organization. #[derive(Copy, Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub struct OrganizationId(u64); @@ -11,10 +11,10 @@ impl OrganizationId { /// Creates a new organization ID from its numeric value #[inline] pub fn new(id: u64) -> Self { - OrganizationId(id) + Self(id) } - /// returns the numeric value of the organization ID + /// Returns the numeric value of the organization ID. pub fn value(self) -> u64 { self.0 } From 3955a15b209abc59ff628ee93041578d1e387ab8 Mon Sep 17 00:00:00 2001 From: Martin Linzmayer Date: Wed, 23 Oct 2024 14:55:04 +0200 Subject: [PATCH 6/9] docs: move release note into Unreleased section --- CHANGELOG.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1230836c677..7c0a2b3a5ef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## Unreleased + +**Internal** + +- Replace u64 with OrganizationId new-type struct for organization id. ([#4159](https://github.com/getsentry/relay/pull/4159)) + ## 24.10.0 **Breaking Changes:** @@ -32,7 +38,6 @@ - Feature flags of graduated features are now hard-coded in Relay so they can be removed from Sentry. ([#4076](https://github.com/getsentry/relay/pull/4076), [#4080](https://github.com/getsentry/relay/pull/4080)) - Add parallelization in Redis commands. ([#4118](https://github.com/getsentry/relay/pull/4118)) - Extract user ip for spans. ([#4144](https://github.com/getsentry/relay/pull/4144)) -- Replace u64 with `OrganizationId` new-type struct for organization id. ([#4159](https://github.com/getsentry/relay/pull/4159)) ## 24.9.0 From 5c2791baf79c31f16ce96661b5eab6d4c30950c2 Mon Sep 17 00:00:00 2001 From: Martin Linzmayer Date: Wed, 23 Oct 2024 14:59:55 +0200 Subject: [PATCH 7/9] test: add tests to verify that new-type is de/serialized properly --- relay-base-schema/src/organization.rs | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/relay-base-schema/src/organization.rs b/relay-base-schema/src/organization.rs index 98bb54549f9..12fec9b1fb6 100644 --- a/relay-base-schema/src/organization.rs +++ b/relay-base-schema/src/organization.rs @@ -25,3 +25,23 @@ impl std::fmt::Display for OrganizationId { write!(f, "{}", self.value()) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_deserialize() { + let json = r#"[42]"#; + let ids: Vec = + serde_json::from_str(json).expect("deserialize organization ids"); + assert_eq!(ids, vec![OrganizationId::new(42)]); + } + + #[test] + fn test_serialize() { + let ids = vec![OrganizationId::new(42)]; + let json = serde_json::to_string(&ids).expect("serialize organization ids"); + assert_eq!(json, r#"[42]"#); + } +} From ff7afd472c08ae1e4dd879f5f9d8fbefdc3b9859 Mon Sep 17 00:00:00 2001 From: Martin Linzmayer Date: Wed, 30 Oct 2024 14:05:46 +0100 Subject: [PATCH 8/9] fix broken value call --- relay-server/src/services/processor.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/relay-server/src/services/processor.rs b/relay-server/src/services/processor.rs index 5b9dae42f6f..1d6d14adec9 100644 --- a/relay-server/src/services/processor.rs +++ b/relay-server/src/services/processor.rs @@ -2592,7 +2592,7 @@ impl EnvelopeProcessorService { }, || { relay_log::error!( - tags.organization_id = scoping.organization_idvalue(), + tags.organization_id = scoping.organization_id.value(), tags.limit_id = limit.id, tags.passive = limit.passive, "Cardinality Limit" From aebab72280ffedec2e5af41e39bd896dec7b0efa Mon Sep 17 00:00:00 2001 From: Martin Linzmayer Date: Thu, 31 Oct 2024 09:54:25 +0100 Subject: [PATCH 9/9] fix merge errors --- relay-server/src/services/processor/replay.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/relay-server/src/services/processor/replay.rs b/relay-server/src/services/processor/replay.rs index faaff51243d..40db6d49d46 100644 --- a/relay-server/src/services/processor/replay.rs +++ b/relay-server/src/services/processor/replay.rs @@ -269,7 +269,7 @@ fn handle_replay_recording_item( error = &error as &dyn Error, event_id = ?config.event_id, project_id = config.project_id.map(|v| v.value()), - organization_id = config.organization_id, + organization_id = config.organization_id.map(|o| o.value()), "invalid replay recording" ); ProcessingError::InvalidReplay(DiscardReason::InvalidReplayRecordingEvent)