Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(metrics): Add received_at timestamp in bucket metadata #3488

Merged
merged 32 commits into from
May 6, 2024
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions relay-config/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -524,13 +524,17 @@ struct SentryMetrics {
///
/// Defaults to 5.
pub meta_locations_max: usize,
/// Whether to override the [`received_at`] field in the [`BucketMetadata`] with the current
/// receive time of the instance.
pub override_received_at_metadata: bool,
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In production, we would like to set this option to true only for pop relays, since we want to have the received_at metadata field set to the outermost relay timestamp that receives a given bucket.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We already have that handling in the processor as keep_metadata for example in handle_process_metrics which should already do the right thing.

}

impl Default for SentryMetrics {
fn default() -> Self {
Self {
meta_locations_expiry: 15 * 24 * 60 * 60,
meta_locations_max: 5,
override_received_at_metadata: false,
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

By default we do not want to override the metadata to avoid unexpected behaviors due to the missing config param.

}
}
}
Expand Down Expand Up @@ -1947,6 +1951,11 @@ impl Config {
Duration::from_secs(self.values.sentry_metrics.meta_locations_expiry)
}

/// Returns whether we want to override the [`received_at`] field of [`BucketMetadata`].
pub fn metrics_override_received_at_metadata(&self) -> bool {
self.values.sentry_metrics.override_received_at_metadata
}

/// Returns the default timeout for all upstream HTTP requests.
pub fn http_timeout(&self) -> Duration {
Duration::from_secs(self.values.http.timeout.into())
Expand Down
69 changes: 45 additions & 24 deletions relay-metrics/src/aggregator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -899,14 +899,15 @@ mod tests {
}
}

fn some_bucket() -> Bucket {
fn some_bucket(timestamp: Option<UnixTimestamp>) -> Bucket {
let timestamp = timestamp.map_or(UnixTimestamp::from_secs(999994711), |t| t);
Bucket {
timestamp: UnixTimestamp::from_secs(999994711),
timestamp,
width: 0,
name: "c:transactions/foo".into(),
value: BucketValue::counter(42.into()),
tags: BTreeMap::new(),
metadata: BucketMetadata::new(),
metadata: BucketMetadata::new(timestamp),
}
}

Expand All @@ -916,7 +917,7 @@ mod tests {
let project_key = ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap();
let mut aggregator = Aggregator::new(test_config());

let bucket1 = some_bucket();
let bucket1 = some_bucket(None);

let mut bucket2 = bucket1.clone();
bucket2.value = BucketValue::counter(43.into());
Expand Down Expand Up @@ -1006,7 +1007,7 @@ mod tests {
let project_key = ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap();
let mut aggregator = Aggregator::new(config);

let bucket1 = some_bucket();
let bucket1 = some_bucket(None);

let mut bucket2 = bucket1.clone();
bucket2.timestamp = UnixTimestamp::from_secs(999994712);
Expand Down Expand Up @@ -1069,8 +1070,12 @@ mod tests {
let mut aggregator = Aggregator::new(config);

// It's OK to have same metric with different projects:
aggregator.merge(project_key1, some_bucket(), None).unwrap();
aggregator.merge(project_key2, some_bucket(), None).unwrap();
aggregator
.merge(project_key1, some_bucket(None), None)
.unwrap();
aggregator
.merge(project_key2, some_bucket(None), None)
.unwrap();

assert_eq!(aggregator.buckets.len(), 2);
}
Expand Down Expand Up @@ -1151,13 +1156,14 @@ mod tests {
let mut aggregator = Aggregator::new(test_config());
let project_key = ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fed").unwrap();

let timestamp = UnixTimestamp::from_secs(999994711);
let bucket = Bucket {
timestamp: UnixTimestamp::from_secs(999994711),
timestamp,
width: 0,
name: "c:transactions/foo@none".into(),
value: BucketValue::counter(42.into()),
tags: BTreeMap::new(),
metadata: BucketMetadata::new(),
metadata: BucketMetadata::new(timestamp),
};
let bucket_key = BucketKey {
project_key,
Expand Down Expand Up @@ -1407,13 +1413,14 @@ mod tests {

#[test]
fn test_aggregator_cost_enforcement_total() {
let timestamp = UnixTimestamp::from_secs(999994711);
let bucket = Bucket {
timestamp: UnixTimestamp::from_secs(999994711),
timestamp,
width: 0,
name: "c:transactions/foo".into(),
value: BucketValue::counter(42.into()),
tags: BTreeMap::new(),
metadata: BucketMetadata::new(),
metadata: BucketMetadata::new(timestamp),
};

let mut aggregator = Aggregator::new(test_config());
Expand All @@ -1438,13 +1445,14 @@ mod tests {
let mut config = test_config();
config.max_project_key_bucket_bytes = Some(1);

let timestamp = UnixTimestamp::from_secs(999994711);
let bucket = Bucket {
timestamp: UnixTimestamp::from_secs(999994711),
timestamp,
width: 0,
name: "c:transactions/foo".into(),
value: BucketValue::counter(42.into()),
tags: BTreeMap::new(),
metadata: BucketMetadata::new(),
metadata: BucketMetadata::new(timestamp),
};

let mut aggregator = Aggregator::new(config);
Expand Down Expand Up @@ -1475,23 +1483,36 @@ mod tests {
let project_key = ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap();
let mut aggregator = Aggregator::new(config);

let bucket1 = some_bucket();
let bucket2 = some_bucket();
let bucket1 = some_bucket(Some(UnixTimestamp::from_secs(999994711)));
let bucket2 = some_bucket(Some(UnixTimestamp::from_secs(999994711)));

// Create a bucket with already 3 merges.
let mut bucket3 = some_bucket();
bucket3.metadata.merge(BucketMetadata::new());
bucket3.metadata.merge(BucketMetadata::new());
// We create a bucket with 3 merges and monotonically increasing timestamps.
let mut bucket3 = some_bucket(Some(UnixTimestamp::from_secs(999994711)));
bucket3
.metadata
.merge(BucketMetadata::new(UnixTimestamp::from_secs(999997811)));
bucket3
.metadata
.merge(BucketMetadata::new(UnixTimestamp::from_secs(999999811)));

aggregator.merge(project_key, bucket1, None).unwrap();
aggregator.merge(project_key, bucket2, None).unwrap();
aggregator.merge(project_key, bucket3, None).unwrap();
aggregator
.merge(project_key, bucket1.clone(), None)
.unwrap();
aggregator
.merge(project_key, bucket2.clone(), None)
.unwrap();
aggregator
.merge(project_key, bucket3.clone(), None)
.unwrap();

let buckets: Vec<_> = aggregator.buckets.values().map(|v| &v.metadata).collect();
insta::assert_debug_snapshot!(buckets, @r###"
let buckets_metadata: Vec<_> = aggregator.buckets.values().map(|v| &v.metadata).collect();
insta::assert_debug_snapshot!(buckets_metadata, @r###"
[
BucketMetadata {
merges: 5,
received_at: Some(
UnixTimestamp(999994711),
),
},
]
"###);
Expand Down
5 changes: 3 additions & 2 deletions relay-metrics/src/aggregatorservice.rs
Original file line number Diff line number Diff line change
Expand Up @@ -471,13 +471,14 @@ mod tests {
}

fn some_bucket() -> Bucket {
let timestamp = UnixTimestamp::from_secs(999994711);
Bucket {
timestamp: UnixTimestamp::from_secs(999994711),
timestamp,
width: 0,
name: "c:transactions/foo".into(),
value: BucketValue::counter(42.into()),
tags: BTreeMap::new(),
metadata: BucketMetadata::new(),
metadata: BucketMetadata::new(timestamp),
}
}

Expand Down
Loading
Loading