diff --git a/docs/DEPRECATIONS.md b/docs/DEPRECATIONS.md index b2e1faedd6996..5bf7447aef2f4 100644 --- a/docs/DEPRECATIONS.md +++ b/docs/DEPRECATIONS.md @@ -18,5 +18,4 @@ For example: ## To be removed -- datadog_v1_metrics v0.35.0 Support for `v1` series endpoint in the `datadog_metrics` sink should be removed. - http_internal_metrics v0.35.0 `requests_completed_total`, `request_duration_seconds`, and `requests_received_total` internal metrics should be removed. diff --git a/src/sinks/datadog/metrics/config.rs b/src/sinks/datadog/metrics/config.rs index c0817a456a675..86b7f4818ec9a 100644 --- a/src/sinks/datadog/metrics/config.rs +++ b/src/sinks/datadog/metrics/config.rs @@ -22,18 +22,15 @@ use crate::{ tls::{MaybeTlsSettings, TlsEnableableConfig}, }; +#[derive(Clone, Copy, Debug, Default)] +pub struct DatadogMetricsDefaultBatchSettings; + // This default is centered around "series" data, which should be the lion's share of what we // process. Given that a single series, when encoded, is in the 150-300 byte range, we can fit a // lot of these into a single request, something like 150-200K series. Simply to be a little more // conservative, though, we use 100K here. This will also get a little more tricky when it comes to // distributions and sketches, but we're going to have to implement incremental encoding to handle // "we've exceeded our maximum payload size, split this batch" scenarios anyways. -pub const MAXIMUM_PAYLOAD_COMPRESSED_SIZE: usize = 3_200_000; -pub const MAXIMUM_PAYLOAD_SIZE: usize = 62_914_560; - -#[derive(Clone, Copy, Debug, Default)] -pub struct DatadogMetricsDefaultBatchSettings; - impl SinkBatchSettings for DatadogMetricsDefaultBatchSettings { const MAX_EVENTS: Option = Some(100_000); const MAX_BYTES: Option = None; @@ -44,9 +41,6 @@ pub(super) const SERIES_V1_PATH: &str = "/api/v1/series"; pub(super) const SERIES_V2_PATH: &str = "/api/v2/series"; pub(super) const SKETCHES_PATH: &str = "/api/beta/sketches"; -// TODO: the series V1 endpoint support is considered deprecated and should be removed in a future release. -// At that time when the V1 support is removed, the SeriesApiVersion stops being useful and can be removed. - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum SeriesApiVersion { V1, @@ -60,12 +54,12 @@ impl SeriesApiVersion { Self::V2 => SERIES_V2_PATH, } } - fn get_api_version_backwards_compatible() -> Self { + fn get_api_version() -> Self { static API_VERSION: OnceLock = OnceLock::new(); *API_VERSION.get_or_init(|| { - match std::env::var("VECTOR_TEMP_USE_DD_METRICS_SERIES_V1_API") { - Ok(_) => Self::V1, - Err(_) => Self::V2, + match std::env::var("VECTOR_TEMP_USE_DD_METRICS_SERIES_V2_API") { + Ok(_) => Self::V2, + Err(_) => Self::V1, } }) } @@ -80,6 +74,12 @@ pub enum DatadogMetricsEndpoint { Sketches, } +/// Payload limits for metrics are endpoint-dependent. +pub(super) struct DatadogMetricsPayloadLimits { + pub(super) uncompressed: usize, + pub(super) compressed: usize, +} + impl DatadogMetricsEndpoint { /// Gets the content type associated with the specific encoder for a given metric endpoint. pub const fn content_type(self) -> &'static str { @@ -96,7 +96,29 @@ impl DatadogMetricsEndpoint { // Creates an instance of the `Series` variant with the default API version. pub fn series() -> Self { - Self::Series(SeriesApiVersion::get_api_version_backwards_compatible()) + Self::Series(SeriesApiVersion::get_api_version()) + } + + pub(super) const fn payload_limits(self) -> DatadogMetricsPayloadLimits { + // from https://docs.datadoghq.com/api/latest/metrics/#submit-metrics + + let (uncompressed, compressed) = match self { + // Sketches use the same payload size limits as v1 series + DatadogMetricsEndpoint::Series(SeriesApiVersion::V1) + | DatadogMetricsEndpoint::Sketches => ( + 62_914_560, // 60 MiB + 3_200_000, // 3.2 MB + ), + DatadogMetricsEndpoint::Series(SeriesApiVersion::V2) => ( + 5_242_880, // 5 MiB + 512_000, // 512 KB + ), + }; + + DatadogMetricsPayloadLimits { + uncompressed, + compressed, + } } } @@ -197,12 +219,7 @@ impl DatadogMetricsConfig { ) -> crate::Result { let base_uri = self.get_base_agent_endpoint(); - // TODO: the V1 endpoint support is considered deprecated and should be removed in a future release. - // At that time, the get_api_version_backwards_compatible() should be replaced with statically using the v2. - let series_endpoint = build_uri( - &base_uri, - SeriesApiVersion::get_api_version_backwards_compatible().get_path(), - )?; + let series_endpoint = build_uri(&base_uri, SeriesApiVersion::get_api_version().get_path())?; let sketches_endpoint = build_uri(&base_uri, SKETCHES_PATH)?; Ok(DatadogMetricsEndpointConfiguration::new( diff --git a/src/sinks/datadog/metrics/encoder.rs b/src/sinks/datadog/metrics/encoder.rs index 5cc265992440c..edb6d4725efc8 100644 --- a/src/sinks/datadog/metrics/encoder.rs +++ b/src/sinks/datadog/metrics/encoder.rs @@ -17,9 +17,7 @@ use vector_lib::{ EstimatedJsonEncodedSizeOf, }; -use super::config::{ - DatadogMetricsEndpoint, SeriesApiVersion, MAXIMUM_PAYLOAD_COMPRESSED_SIZE, MAXIMUM_PAYLOAD_SIZE, -}; +use super::config::{DatadogMetricsEndpoint, SeriesApiVersion}; use crate::{ common::datadog::{ DatadogMetricType, DatadogPoint, DatadogSeriesMetric, DatadogSeriesMetricMetadata, @@ -171,13 +169,12 @@ impl DatadogMetricsEncoder { endpoint: DatadogMetricsEndpoint, default_namespace: Option, ) -> Result { - // According to the datadog-agent code, sketches use the same payload size limits as series - // data. We're just gonna go with that for now. + let payload_limits = endpoint.payload_limits(); Self::with_payload_limits( endpoint, default_namespace, - MAXIMUM_PAYLOAD_SIZE, - MAXIMUM_PAYLOAD_COMPRESSED_SIZE, + payload_limits.uncompressed, + payload_limits.compressed, ) }