From 38c2a15174b39bb59f26abc2c15b4798adbe2573 Mon Sep 17 00:00:00 2001 From: Stephen Wakely Date: Thu, 6 Jul 2023 19:02:02 +0100 Subject: [PATCH 1/9] Get byte size after transforming event Signed-off-by: Stephen Wakely --- src/sinks/amqp/encoder.rs | 13 +++++-- src/sinks/azure_blob/test.rs | 24 ++++++++++--- src/sinks/datadog/logs/sink.rs | 18 +++++++--- src/sinks/datadog/metrics/encoder.rs | 12 +++++-- src/sinks/elasticsearch/encoder.rs | 12 ++++--- src/sinks/elasticsearch/tests.rs | 12 +++---- src/sinks/gcp/chronicle_unstructured.rs | 13 ++++--- src/sinks/gcp/cloud_storage.rs | 8 ++++- src/sinks/loki/event.rs | 10 ++++-- src/sinks/new_relic/encoding.rs | 10 ++++-- src/sinks/pulsar/encoder.rs | 14 ++++++-- src/sinks/splunk_hec/logs/encoder.rs | 9 +++-- src/sinks/splunk_hec/metrics/encoder.rs | 11 ++++-- src/sinks/statsd/request_builder.rs | 11 ++++-- src/sinks/util/encoding.rs | 48 ++++++++++++++++++------- src/sinks/util/request_builder.rs | 19 ++++++---- src/sinks/webhdfs/test.rs | 4 ++- 17 files changed, 187 insertions(+), 61 deletions(-) diff --git a/src/sinks/amqp/encoder.rs b/src/sinks/amqp/encoder.rs index d3d449811372f..a1af7ec54e77b 100644 --- a/src/sinks/amqp/encoder.rs +++ b/src/sinks/amqp/encoder.rs @@ -3,6 +3,7 @@ use crate::sinks::prelude::*; use bytes::BytesMut; use std::io; use tokio_util::codec::Encoder as _; +use vector_core::config::telemetry; #[derive(Clone, Debug)] pub(super) struct AmqpEncoder { @@ -11,9 +12,17 @@ pub(super) struct AmqpEncoder { } impl encoding::Encoder for AmqpEncoder { - fn encode_input(&self, mut input: Event, writer: &mut dyn io::Write) -> io::Result { + fn encode_input( + &self, + mut input: Event, + writer: &mut dyn io::Write, + ) -> io::Result<(usize, GroupedCountByteSize)> { let mut body = BytesMut::new(); self.transformer.transform(&mut input); + + let mut byte_size = telemetry().create_request_count_byte_size(); + byte_size.add_event(&input, input.estimated_json_encoded_size_of()); + let mut encoder = self.encoder.clone(); encoder .encode(input, &mut body) @@ -22,6 +31,6 @@ impl encoding::Encoder for AmqpEncoder { let body = body.freeze(); write_all(writer, 1, body.as_ref())?; - Ok(body.len()) + Ok((body.len(), byte_size)) } } diff --git a/src/sinks/azure_blob/test.rs b/src/sinks/azure_blob/test.rs index de47fd8bdebe4..0c4ad1f38dd53 100644 --- a/src/sinks/azure_blob/test.rs +++ b/src/sinks/azure_blob/test.rs @@ -4,7 +4,8 @@ use codecs::{ encoding::{Framer, FramingConfig}, NewlineDelimitedEncoder, TextSerializerConfig, }; -use vector_core::partition::Partitioner; +use vector_common::request_metadata::GroupedCountByteSize; +use vector_core::{partition::Partitioner, EstimatedJsonEncodedSizeOf}; use super::config::AzureBlobSinkConfig; use super::request_builder::AzureBlobRequestOptions; @@ -68,10 +69,13 @@ fn azure_blob_build_request_without_compression() { compression, }; + let mut byte_size = GroupedCountByteSize::new_untagged(); + byte_size.add_event(&log, log.estimated_json_encoded_size_of()); + let (metadata, request_metadata_builder, _events) = request_options.split_input((key, vec![log])); - let payload = EncodeResult::uncompressed(Bytes::new()); + let payload = EncodeResult::uncompressed(Bytes::new(), byte_size); let request_metadata = request_metadata_builder.build(&payload); let request = request_options.build_request(metadata, request_metadata, payload); @@ -112,10 +116,14 @@ fn azure_blob_build_request_with_compression() { ), compression, }; + + let mut byte_size = GroupedCountByteSize::new_untagged(); + byte_size.add_event(&log, log.estimated_json_encoded_size_of()); + let (metadata, request_metadata_builder, _events) = request_options.split_input((key, vec![log])); - let payload = EncodeResult::uncompressed(Bytes::new()); + let payload = EncodeResult::uncompressed(Bytes::new(), byte_size); let request_metadata = request_metadata_builder.build(&payload); let request = request_options.build_request(metadata, request_metadata, payload); @@ -157,10 +165,13 @@ fn azure_blob_build_request_with_time_format() { compression, }; + let mut byte_size = GroupedCountByteSize::new_untagged(); + byte_size.add_event(&log, log.estimated_json_encoded_size_of()); + let (metadata, request_metadata_builder, _events) = request_options.split_input((key, vec![log])); - let payload = EncodeResult::uncompressed(Bytes::new()); + let payload = EncodeResult::uncompressed(Bytes::new(), byte_size); let request_metadata = request_metadata_builder.build(&payload); let request = request_options.build_request(metadata, request_metadata, payload); @@ -205,10 +216,13 @@ fn azure_blob_build_request_with_uuid() { compression, }; + let mut byte_size = GroupedCountByteSize::new_untagged(); + byte_size.add_event(&log, log.estimated_json_encoded_size_of()); + let (metadata, request_metadata_builder, _events) = request_options.split_input((key, vec![log])); - let payload = EncodeResult::uncompressed(Bytes::new()); + let payload = EncodeResult::uncompressed(Bytes::new(), byte_size); let request_metadata = request_metadata_builder.build(&payload); let request = request_options.build_request(metadata, request_metadata, payload); diff --git a/src/sinks/datadog/logs/sink.rs b/src/sinks/datadog/logs/sink.rs index fd2f132d13f85..ca1bb60e8de7a 100644 --- a/src/sinks/datadog/logs/sink.rs +++ b/src/sinks/datadog/logs/sink.rs @@ -7,7 +7,7 @@ use futures::stream::{BoxStream, StreamExt}; use lookup::event_path; use snafu::Snafu; use tower::Service; -use vector_common::request_metadata::RequestMetadata; +use vector_common::request_metadata::{GroupedCountByteSize, RequestMetadata}; use vector_core::{ event::{Event, EventFinalizers, Finalizable, Value}, partition::Partitioner, @@ -125,7 +125,11 @@ impl JsonEncoding { } impl crate::sinks::util::encoding::Encoder> for JsonEncoding { - fn encode_input(&self, mut input: Vec, writer: &mut dyn io::Write) -> io::Result { + fn encode_input( + &self, + mut input: Vec, + writer: &mut dyn io::Write, + ) -> io::Result<(usize, GroupedCountByteSize)> { for event in input.iter_mut() { let log = event.as_mut_log(); let message_path = log @@ -219,7 +223,7 @@ impl RequestBuilder<(Option>, Vec)> for LogRequestBuilder { // to (un)compressed size limitations. let mut buf = Vec::new(); let n_events = events.len(); - let uncompressed_size = self.encoder().encode_input(events, &mut buf)?; + let (uncompressed_size, byte_size) = self.encoder().encode_input(events, &mut buf)?; if uncompressed_size > MAX_PAYLOAD_BYTES { return Err(RequestBuildError::PayloadTooBig); } @@ -230,9 +234,13 @@ impl RequestBuilder<(Option>, Vec)> for LogRequestBuilder { let bytes = compressor.into_inner().freeze(); if self.compression.is_compressed() { - Ok(EncodeResult::compressed(bytes, uncompressed_size)) + Ok(EncodeResult::compressed( + bytes, + uncompressed_size, + byte_size, + )) } else { - Ok(EncodeResult::uncompressed(bytes)) + Ok(EncodeResult::uncompressed(bytes, byte_size)) } } diff --git a/src/sinks/datadog/metrics/encoder.rs b/src/sinks/datadog/metrics/encoder.rs index 0dd6c393e31b5..338a91392b3b6 100644 --- a/src/sinks/datadog/metrics/encoder.rs +++ b/src/sinks/datadog/metrics/encoder.rs @@ -10,10 +10,12 @@ use chrono::{DateTime, Utc}; use once_cell::sync::OnceCell; use prost::Message; use snafu::{ResultExt, Snafu}; +use vector_common::request_metadata::GroupedCountByteSize; use vector_core::{ - config::{log_schema, LogSchema}, + config::{log_schema, telemetry, LogSchema}, event::{metric::MetricSketch, Metric, MetricTags, MetricValue}, metrics::AgentDDSketch, + EstimatedJsonEncodedSizeOf, }; use super::config::{ @@ -122,6 +124,7 @@ struct EncoderState { written: usize, buf: Vec, processed: Vec, + byte_size: GroupedCountByteSize, } impl Default for EncoderState { @@ -131,6 +134,7 @@ impl Default for EncoderState { written: 0, buf: Vec::with_capacity(1024), processed: Vec::new(), + byte_size: telemetry().create_request_count_byte_size(), } } } @@ -202,6 +206,10 @@ impl DatadogMetricsEncoder { // Clear our temporary buffer before any encoding. self.state.buf.clear(); + self.state + .byte_size + .add_event(&metric, metric.estimated_json_encoded_size_of()); + match self.endpoint { // Series metrics are encoded via JSON, in an incremental fashion. DatadogMetricsEndpoint::Series => { @@ -349,7 +357,7 @@ impl DatadogMetricsEncoder { if recommended_splits == 1 { // "One" split means no splits needed: our payload didn't exceed either of the limits. Ok(( - EncodeResult::compressed(payload, raw_bytes_written), + EncodeResult::compressed(payload, raw_bytes_written, self.state.byte_size.clone()), processed, )) } else { diff --git a/src/sinks/elasticsearch/encoder.rs b/src/sinks/elasticsearch/encoder.rs index f5b39a52b23a0..8f136bd02a195 100644 --- a/src/sinks/elasticsearch/encoder.rs +++ b/src/sinks/elasticsearch/encoder.rs @@ -4,9 +4,9 @@ use serde::Serialize; use vector_buffers::EventCount; use vector_common::{ json_size::JsonSize, - request_metadata::{EventCountTags, GetEventCountTags}, + request_metadata::{EventCountTags, GetEventCountTags, GroupedCountByteSize}, }; -use vector_core::{event::Event, ByteSizeOf, EstimatedJsonEncodedSizeOf}; +use vector_core::{config::telemetry, event::Event, ByteSizeOf, EstimatedJsonEncodedSizeOf}; use crate::{ codecs::Transformer, @@ -68,12 +68,15 @@ impl Encoder> for ElasticsearchEncoder { &self, input: Vec, writer: &mut dyn Write, - ) -> std::io::Result { + ) -> std::io::Result<(usize, GroupedCountByteSize)> { let mut written_bytes = 0; + let mut byte_size = telemetry().create_request_count_byte_size(); for event in input { let log = { let mut event = Event::from(event.log); self.transformer.transform(&mut event); + byte_size.add_event(&event, event.estimated_json_encoded_size_of()); + event.into_log() }; written_bytes += write_bulk_action( @@ -92,7 +95,8 @@ impl Encoder> for ElasticsearchEncoder { Ok(()) })?; } - Ok(written_bytes) + + Ok((written_bytes, byte_size)) } } diff --git a/src/sinks/elasticsearch/tests.rs b/src/sinks/elasticsearch/tests.rs index da0d3cecea965..a9cc8576a6843 100644 --- a/src/sinks/elasticsearch/tests.rs +++ b/src/sinks/elasticsearch/tests.rs @@ -49,7 +49,7 @@ async fn sets_create_action_when_configured() { log.insert("action", "crea"); let mut encoded = vec![]; - let encoded_size = es + let (encoded_size, _json_size) = es .request_builder .encoder .encode_input( @@ -103,7 +103,7 @@ async fn encode_datastream_mode() { log.insert("data_stream", data_stream_body()); let mut encoded = vec![]; - let encoded_size = es + let (encoded_size, _json_size) = es .request_builder .encoder .encode_input( @@ -154,7 +154,7 @@ async fn encode_datastream_mode_no_routing() { .expect("invalid timestamp"), ); let mut encoded = vec![]; - let encoded_size = es + let (encoded_size, _json_size) = es .request_builder .encoder .encode_input( @@ -299,7 +299,7 @@ async fn encode_datastream_mode_no_sync() { ); let mut encoded = vec![]; - let encoded_size = es + let (encoded_size, _json_size) = es .request_builder .encoder .encode_input( @@ -339,7 +339,7 @@ async fn allows_using_except_fields() { log.insert("idx", "purple"); let mut encoded = vec![]; - let encoded_size = es + let (encoded_size, _json_size) = es .request_builder .encoder .encode_input( @@ -374,7 +374,7 @@ async fn allows_using_only_fields() { log.insert("idx", "purple"); let mut encoded = vec![]; - let encoded_size = es + let (encoded_size, _json_size) = es .request_builder .encoder .encode_input( diff --git a/src/sinks/gcp/chronicle_unstructured.rs b/src/sinks/gcp/chronicle_unstructured.rs index 2e91b15ca3548..5b65089852ea0 100644 --- a/src/sinks/gcp/chronicle_unstructured.rs +++ b/src/sinks/gcp/chronicle_unstructured.rs @@ -12,12 +12,13 @@ use snafu::Snafu; use std::io; use tokio_util::codec::Encoder as _; use tower::{Service, ServiceBuilder}; -use vector_common::request_metadata::{MetaDescriptive, RequestMetadata}; +use vector_common::request_metadata::{GroupedCountByteSize, MetaDescriptive, RequestMetadata}; use vector_config::configurable_component; use vector_core::{ - config::{AcknowledgementsConfig, Input}, + config::{telemetry, AcknowledgementsConfig, Input}, event::{Event, EventFinalizers, Finalizable}, sink::VectorSink, + EstimatedJsonEncodedSizeOf, }; use vrl::value::Kind; @@ -307,9 +308,10 @@ impl Encoder<(String, Vec)> for ChronicleEncoder { &self, input: (String, Vec), writer: &mut dyn io::Write, - ) -> io::Result { + ) -> io::Result<(usize, GroupedCountByteSize)> { let (partition_key, events) = input; let mut encoder = self.encoder.clone(); + let mut byte_size = telemetry().create_request_count_byte_size(); let events = events .into_iter() .filter_map(|mut event| { @@ -320,6 +322,9 @@ impl Encoder<(String, Vec)> for ChronicleEncoder { .cloned(); let mut bytes = BytesMut::new(); self.transformer.transform(&mut event); + + byte_size.add_event(&event, event.estimated_json_encoded_size_of()); + encoder.encode(event, &mut bytes).ok()?; let mut value = json!({ @@ -349,7 +354,7 @@ impl Encoder<(String, Vec)> for ChronicleEncoder { Ok(()) })?; - Ok(size) + Ok((size, byte_size)) } } diff --git a/src/sinks/gcp/cloud_storage.rs b/src/sinks/gcp/cloud_storage.rs index ff4f6bb378a21..45b051a87ca3d 100644 --- a/src/sinks/gcp/cloud_storage.rs +++ b/src/sinks/gcp/cloud_storage.rs @@ -410,7 +410,9 @@ mod tests { use codecs::encoding::FramingConfig; use codecs::{JsonSerializerConfig, NewlineDelimitedEncoderConfig, TextSerializerConfig}; use futures_util::{future::ready, stream}; + use vector_common::request_metadata::GroupedCountByteSize; use vector_core::partition::Partitioner; + use vector_core::EstimatedJsonEncodedSizeOf; use crate::event::LogEvent; use crate::test_util::{ @@ -491,10 +493,14 @@ mod tests { .unwrap() .partition(&log) .expect("key wasn't provided"); + + let mut byte_size = GroupedCountByteSize::new_untagged(); + byte_size.add_event(&log, log.estimated_json_encoded_size_of()); + let request_settings = request_settings(&sink_config); let (metadata, metadata_request_builder, _events) = request_settings.split_input((key, vec![log])); - let payload = EncodeResult::uncompressed(Bytes::new()); + let payload = EncodeResult::uncompressed(Bytes::new(), byte_size); let request_metadata = metadata_request_builder.build(&payload); request_settings.build_request(metadata, request_metadata, payload) diff --git a/src/sinks/loki/event.rs b/src/sinks/loki/event.rs index 22d399f970710..28a115243c3e8 100644 --- a/src/sinks/loki/event.rs +++ b/src/sinks/loki/event.rs @@ -3,6 +3,7 @@ use std::{collections::HashMap, io}; use crate::sinks::{prelude::*, util::encoding::Encoder}; use bytes::Bytes; use serde::{ser::SerializeSeq, Serialize}; +use vector_core::config::telemetry; pub type Labels = Vec<(String, String)>; @@ -20,8 +21,13 @@ impl Encoder> for LokiBatchEncoder { &self, input: Vec, writer: &mut dyn io::Write, - ) -> io::Result { + ) -> io::Result<(usize, GroupedCountByteSize)> { let count = input.len(); + let mut byte_size = telemetry().create_request_count_byte_size(); + for event in &input { + byte_size.add_event(event, event.estimated_json_encoded_size_of()); + } + let batch = LokiBatch::from(input); let body = match self.0 { LokiBatchEncoding::Json => { @@ -52,7 +58,7 @@ impl Encoder> for LokiBatchEncoder { batch.encode() } }; - write_all(writer, count, &body).map(|()| body.len()) + write_all(writer, count, &body).map(|()| (body.len(), byte_size)) } } diff --git a/src/sinks/new_relic/encoding.rs b/src/sinks/new_relic/encoding.rs index ec71aa4f40879..0188761d011a7 100644 --- a/src/sinks/new_relic/encoding.rs +++ b/src/sinks/new_relic/encoding.rs @@ -1,6 +1,8 @@ use std::io; use serde::Serialize; +use vector_common::request_metadata::GroupedCountByteSize; +use vector_core::config::telemetry; use super::{NewRelicApiModel, NewRelicSinkError}; use crate::sinks::util::encoding::{as_tracked_write, Encoder}; @@ -12,7 +14,7 @@ impl Encoder> for NewRelicEncoder { &self, input: Result, writer: &mut dyn io::Write, - ) -> io::Result { + ) -> io::Result<(usize, GroupedCountByteSize)> { let json = match input? { NewRelicApiModel::Events(ev_api_model) => to_json(&ev_api_model)?, NewRelicApiModel::Metrics(met_api_model) => to_json(&met_api_model)?, @@ -22,7 +24,11 @@ impl Encoder> for NewRelicEncoder { writer.write_all(json)?; Ok(()) })?; - io::Result::Ok(size) + + // TODO This should not be zero. + let byte_size = telemetry().create_request_count_byte_size(); + + io::Result::Ok((size, byte_size)) } } diff --git a/src/sinks/pulsar/encoder.rs b/src/sinks/pulsar/encoder.rs index 63d25f2e05711..e324c4b0ea376 100644 --- a/src/sinks/pulsar/encoder.rs +++ b/src/sinks/pulsar/encoder.rs @@ -6,6 +6,8 @@ use crate::{ use bytes::BytesMut; use std::io; use tokio_util::codec::Encoder as _; +use vector_common::request_metadata::GroupedCountByteSize; +use vector_core::{config::telemetry, EstimatedJsonEncodedSizeOf}; #[derive(Clone, Debug)] pub(super) struct PulsarEncoder { @@ -14,9 +16,17 @@ pub(super) struct PulsarEncoder { } impl Encoder for PulsarEncoder { - fn encode_input(&self, mut input: Event, writer: &mut dyn io::Write) -> io::Result { + fn encode_input( + &self, + mut input: Event, + writer: &mut dyn io::Write, + ) -> io::Result<(usize, GroupedCountByteSize)> { let mut body = BytesMut::new(); self.transformer.transform(&mut input); + + let mut byte_size = telemetry().create_request_count_byte_size(); + byte_size.add_event(&input, input.estimated_json_encoded_size_of()); + let mut encoder = self.encoder.clone(); encoder .encode(input, &mut body) @@ -25,6 +35,6 @@ impl Encoder for PulsarEncoder { let body = body.freeze(); write_all(writer, 1, body.as_ref())?; - Ok(body.len()) + Ok((body.len(), byte_size)) } } diff --git a/src/sinks/splunk_hec/logs/encoder.rs b/src/sinks/splunk_hec/logs/encoder.rs index 695294cefae70..0f0270ce28a42 100644 --- a/src/sinks/splunk_hec/logs/encoder.rs +++ b/src/sinks/splunk_hec/logs/encoder.rs @@ -3,6 +3,8 @@ use std::borrow::Cow; use bytes::BytesMut; use serde::Serialize; use tokio_util::codec::Encoder as _; +use vector_common::request_metadata::GroupedCountByteSize; +use vector_core::{config::telemetry, EstimatedJsonEncodedSizeOf}; use super::sink::HecProcessedEvent; use crate::{ @@ -63,8 +65,9 @@ impl Encoder> for HecLogsEncoder { &self, input: Vec, writer: &mut dyn std::io::Write, - ) -> std::io::Result { + ) -> std::io::Result<(usize, GroupedCountByteSize)> { let mut encoder = self.encoder.clone(); + let mut byte_size = telemetry().create_request_count_byte_size(); let encoded_input: Vec = input .into_iter() .filter_map(|processed_event| { @@ -72,6 +75,8 @@ impl Encoder> for HecLogsEncoder { let metadata = processed_event.metadata; self.transformer.transform(&mut event); + byte_size.add_event(&event, event.estimated_json_encoded_size_of()); + let mut bytes = BytesMut::new(); match metadata.endpoint_target { @@ -128,6 +133,6 @@ impl Encoder> for HecLogsEncoder { let encoded_size = encoded_input.len(); writer.write_all(encoded_input.as_slice())?; - Ok(encoded_size) + Ok((encoded_size, byte_size)) } } diff --git a/src/sinks/splunk_hec/metrics/encoder.rs b/src/sinks/splunk_hec/metrics/encoder.rs index fea4c058ea72c..5b0198825f9eb 100644 --- a/src/sinks/splunk_hec/metrics/encoder.rs +++ b/src/sinks/splunk_hec/metrics/encoder.rs @@ -1,6 +1,8 @@ use std::{collections::BTreeMap, iter}; use serde::Serialize; +use vector_common::request_metadata::GroupedCountByteSize; +use vector_core::{config::telemetry, EstimatedJsonEncodedSizeOf}; use super::sink::HecProcessedEvent; use crate::{internal_events::SplunkEventEncodeError, sinks::util::encoding::Encoder}; @@ -95,7 +97,12 @@ impl Encoder> for HecMetricsEncoder { &self, input: Vec, writer: &mut dyn std::io::Write, - ) -> std::io::Result { + ) -> std::io::Result<(usize, GroupedCountByteSize)> { + let mut byte_size = telemetry().create_request_count_byte_size(); + for event in &input { + byte_size.add_event(event, event.estimated_json_encoded_size_of()); + } + let encoded_input: Vec = input .into_iter() .filter_map(Self::encode_event) @@ -103,6 +110,6 @@ impl Encoder> for HecMetricsEncoder { .collect(); let encoded_size = encoded_input.len(); writer.write_all(encoded_input.as_slice())?; - Ok(encoded_size) + Ok((encoded_size, byte_size)) } } diff --git a/src/sinks/statsd/request_builder.rs b/src/sinks/statsd/request_builder.rs index 9cfdf119a08d3..08034cb101f5b 100644 --- a/src/sinks/statsd/request_builder.rs +++ b/src/sinks/statsd/request_builder.rs @@ -4,7 +4,11 @@ use bytes::BytesMut; use snafu::Snafu; use tokio_util::codec::Encoder; use vector_common::request_metadata::RequestMetadata; -use vector_core::event::{EventFinalizers, Finalizable, Metric}; +use vector_core::{ + config::telemetry, + event::{EventFinalizers, Finalizable, Metric}, + EstimatedJsonEncodedSizeOf, +}; use super::{encoder::StatsdEncoder, service::StatsdRequest}; use crate::{ @@ -79,6 +83,7 @@ impl IncrementalRequestBuilder> for StatsdRequestBuilder { let mut metrics = input.drain(..); while metrics.len() != 0 || pending.is_some() { + let mut byte_size = telemetry().create_request_count_byte_size(); let mut n = 0; let mut request_buf = Vec::new(); @@ -95,6 +100,8 @@ impl IncrementalRequestBuilder> for StatsdRequestBuilder { }, }; + byte_size.add_event(&metric, metric.estimated_json_encoded_size_of()); + // Encode the metric. Once we've done that, see if it can fit into the request // buffer without exceeding the maximum request size limit. // @@ -131,7 +138,7 @@ impl IncrementalRequestBuilder> for StatsdRequestBuilder { // If we encoded one or more metrics this pass, finalize the request. if n > 0 { - let encode_result = EncodeResult::uncompressed(request_buf); + let encode_result = EncodeResult::uncompressed(request_buf, byte_size); let request_metadata = request_metadata_builder.build(&encode_result); results.push(Ok(( diff --git a/src/sinks/util/encoding.rs b/src/sinks/util/encoding.rs index 00dc6944bdbad..b60e7ef177410 100644 --- a/src/sinks/util/encoding.rs +++ b/src/sinks/util/encoding.rs @@ -3,6 +3,8 @@ use std::io; use bytes::BytesMut; use codecs::encoding::Framer; use tokio_util::codec::Encoder as _; +use vector_common::request_metadata::GroupedCountByteSize; +use vector_core::{config::telemetry, EstimatedJsonEncodedSizeOf}; use crate::{codecs::Transformer, event::Event, internal_events::EncoderWriteError}; @@ -12,7 +14,11 @@ pub trait Encoder { /// # Errors /// /// If an I/O error is encountered while encoding the input, an error variant will be returned. - fn encode_input(&self, input: T, writer: &mut dyn io::Write) -> io::Result; + fn encode_input( + &self, + input: T, + writer: &mut dyn io::Write, + ) -> io::Result<(usize, GroupedCountByteSize)>; } impl Encoder> for (Transformer, crate::codecs::Encoder) { @@ -20,13 +26,16 @@ impl Encoder> for (Transformer, crate::codecs::Encoder) { &self, mut events: Vec, writer: &mut dyn io::Write, - ) -> io::Result { + ) -> io::Result<(usize, GroupedCountByteSize)> { let mut encoder = self.1.clone(); let mut bytes_written = 0; let mut n_events_pending = events.len(); let batch_prefix = encoder.batch_prefix(); write_all(writer, n_events_pending, batch_prefix)?; bytes_written += batch_prefix.len(); + + let mut byte_size = telemetry().create_request_count_byte_size(); + if let Some(last) = events.pop() { for mut event in events { self.0.transform(&mut event); @@ -40,6 +49,11 @@ impl Encoder> for (Transformer, crate::codecs::Encoder) { } let mut event = last; self.0.transform(&mut event); + + // Ensure the json size is calculated after any fields have been removed + // by the transformer. + byte_size.add_event(&event, event.estimated_json_encoded_size_of()); + let mut bytes = BytesMut::new(); encoder .serialize(event, &mut bytes) @@ -53,20 +67,28 @@ impl Encoder> for (Transformer, crate::codecs::Encoder) { write_all(writer, 0, batch_suffix)?; bytes_written += batch_suffix.len(); - Ok(bytes_written) + Ok((bytes_written, byte_size)) } } impl Encoder for (Transformer, crate::codecs::Encoder<()>) { - fn encode_input(&self, mut event: Event, writer: &mut dyn io::Write) -> io::Result { + fn encode_input( + &self, + mut event: Event, + writer: &mut dyn io::Write, + ) -> io::Result<(usize, GroupedCountByteSize)> { let mut encoder = self.1.clone(); self.0.transform(&mut event); + + let mut byte_size = telemetry().create_request_count_byte_size(); + byte_size.add_event(&event, event.estimated_json_encoded_size_of()); + let mut bytes = BytesMut::new(); encoder .serialize(event, &mut bytes) .map_err(|error| io::Error::new(io::ErrorKind::InvalidData, error))?; write_all(writer, 1, &bytes)?; - Ok(bytes.len()) + Ok((bytes.len(), byte_size)) } } @@ -144,7 +166,7 @@ mod tests { ); let mut writer = Vec::new(); - let written = encoding.encode_input(vec![], &mut writer).unwrap(); + let (written, _json_size) = encoding.encode_input(vec![], &mut writer).unwrap(); assert_eq!(written, 2); assert_eq!(String::from_utf8(writer).unwrap(), "[]"); @@ -161,7 +183,7 @@ mod tests { ); let mut writer = Vec::new(); - let written = encoding + let (written, _json_size) = encoding .encode_input( vec![Event::Log(LogEvent::from(BTreeMap::from([( String::from("key"), @@ -186,7 +208,7 @@ mod tests { ); let mut writer = Vec::new(); - let written = encoding + let (written, _json_size) = encoding .encode_input( vec![ Event::Log(LogEvent::from(BTreeMap::from([( @@ -224,7 +246,7 @@ mod tests { ); let mut writer = Vec::new(); - let written = encoding.encode_input(vec![], &mut writer).unwrap(); + let (written, _json_size) = encoding.encode_input(vec![], &mut writer).unwrap(); assert_eq!(written, 0); assert_eq!(String::from_utf8(writer).unwrap(), ""); @@ -241,7 +263,7 @@ mod tests { ); let mut writer = Vec::new(); - let written = encoding + let (written, _json_size) = encoding .encode_input( vec![Event::Log(LogEvent::from(BTreeMap::from([( String::from("key"), @@ -266,7 +288,7 @@ mod tests { ); let mut writer = Vec::new(); - let written = encoding + let (written, _json_size) = encoding .encode_input( vec![ Event::Log(LogEvent::from(BTreeMap::from([( @@ -301,7 +323,7 @@ mod tests { ); let mut writer = Vec::new(); - let written = encoding + let (written, _json_size) = encoding .encode_input( Event::Log(LogEvent::from(BTreeMap::from([( String::from("key"), @@ -323,7 +345,7 @@ mod tests { ); let mut writer = Vec::new(); - let written = encoding + let (written, _json_size) = encoding .encode_input( Event::Log(LogEvent::from(BTreeMap::from([( String::from("message"), diff --git a/src/sinks/util/request_builder.rs b/src/sinks/util/request_builder.rs index 87280d2ec966e..86501f3a96a04 100644 --- a/src/sinks/util/request_builder.rs +++ b/src/sinks/util/request_builder.rs @@ -1,13 +1,14 @@ use std::io; use bytes::Bytes; -use vector_common::request_metadata::RequestMetadata; +use vector_common::request_metadata::{GroupedCountByteSize, RequestMetadata}; use super::{encoding::Encoder, metadata::RequestMetadataBuilder, Compression, Compressor}; pub struct EncodeResult

{ pub payload: P, pub uncompressed_byte_size: usize, + pub transformed_json_size: GroupedCountByteSize, pub compressed_byte_size: Option, } @@ -15,20 +16,26 @@ impl

EncodeResult

where P: AsRef<[u8]>, { - pub fn uncompressed(payload: P) -> Self { + pub fn uncompressed(payload: P, transformed_json_size: GroupedCountByteSize) -> Self { let uncompressed_byte_size = payload.as_ref().len(); Self { payload, uncompressed_byte_size, + transformed_json_size, compressed_byte_size: None, } } - pub fn compressed(payload: P, uncompressed_byte_size: usize) -> Self { + pub fn compressed( + payload: P, + uncompressed_byte_size: usize, + transformed_json_size: GroupedCountByteSize, + ) -> Self { let compressed_byte_size = payload.as_ref().len(); Self { payload, uncompressed_byte_size, + transformed_json_size, compressed_byte_size: Some(compressed_byte_size), } } @@ -74,14 +81,14 @@ pub trait RequestBuilder { // of clash-y with `Self::Metadata`. let mut compressor = Compressor::from(self.compression()); let is_compressed = compressor.is_compressed(); - _ = self.encoder().encode_input(events, &mut compressor)?; + let (_, json_size) = self.encoder().encode_input(events, &mut compressor)?; let payload = compressor.into_inner().freeze(); let result = if is_compressed { let compressed_byte_size = payload.len(); - EncodeResult::compressed(payload.into(), compressed_byte_size) + EncodeResult::compressed(payload.into(), compressed_byte_size, json_size) } else { - EncodeResult::uncompressed(payload.into()) + EncodeResult::uncompressed(payload.into(), json_size) }; Ok(result) diff --git a/src/sinks/webhdfs/test.rs b/src/sinks/webhdfs/test.rs index 0a558c3470225..7d4aa66e2e251 100644 --- a/src/sinks/webhdfs/test.rs +++ b/src/sinks/webhdfs/test.rs @@ -1,5 +1,6 @@ use bytes::Bytes; use codecs::{encoding::Framer, JsonSerializerConfig, NewlineDelimitedEncoderConfig}; +use vector_common::request_metadata::GroupedCountByteSize; use vector_core::partition::Partitioner; use super::config::WebHdfsConfig; @@ -66,7 +67,8 @@ fn build_request(compression: Compression) -> OpenDalRequest { let request_builder = request_builder(&sink_config); let (metadata, metadata_request_builder, _events) = request_builder.split_input((key, vec![log])); - let payload = EncodeResult::uncompressed(Bytes::new()); + let byte_size = GroupedCountByteSize::new_untagged(); + let payload = EncodeResult::uncompressed(Bytes::new(), byte_size); let request_metadata = metadata_request_builder.build(&payload); request_builder.build_request(metadata, request_metadata, payload) From 7f13f70d4f7852801e530d7bf057f6b2ee04097b Mon Sep 17 00:00:00 2001 From: Stephen Wakely Date: Tue, 11 Jul 2023 11:05:53 +0100 Subject: [PATCH 2/9] Ensure dropped fields are still available Signed-off-by: Stephen Wakely --- lib/vector-core/src/event/log_event.rs | 12 ++- lib/vector-core/src/event/metadata.rs | 18 ++++ src/codecs/encoding/transformer.rs | 121 +++++++++++++++++++++++-- src/sinks/kafka/request_builder.rs | 6 +- src/sinks/util/metadata.rs | 10 +- 5 files changed, 154 insertions(+), 13 deletions(-) diff --git a/lib/vector-core/src/event/log_event.rs b/lib/vector-core/src/event/log_event.rs index e5755f12d7e66..4f210f10795cb 100644 --- a/lib/vector-core/src/event/log_event.rs +++ b/lib/vector-core/src/event/log_event.rs @@ -293,10 +293,14 @@ impl LogEvent { } pub fn get_by_meaning(&self, meaning: impl AsRef) -> Option<&Value> { - self.metadata() - .schema_definition() - .meaning_path(meaning.as_ref()) - .and_then(|path| self.get(path)) + if let Some(dropped) = self.metadata().dropped_field(&meaning) { + Some(dropped) + } else { + self.metadata() + .schema_definition() + .meaning_path(meaning.as_ref()) + .and_then(|path| self.get(path)) + } } // TODO(Jean): Once the event API uses `Lookup`, the allocation here can be removed. diff --git a/lib/vector-core/src/event/metadata.rs b/lib/vector-core/src/event/metadata.rs index d86884be7582c..042fda57c5eaa 100644 --- a/lib/vector-core/src/event/metadata.rs +++ b/lib/vector-core/src/event/metadata.rs @@ -46,6 +46,12 @@ pub struct EventMetadata { /// TODO(Jean): must not skip serialization to track schemas across restarts. #[serde(default = "default_schema_definition", skip)] schema_definition: Arc, + + /// A store of values that may be dropped during the encoding process but may be needed + /// later on. The map is indexed by meaning. + /// Currently this is just used for the `service`. If the service field is dropped by `only_fields` + /// we need to ensure it is still available later on for emitting metrics tagged by the service. + dropped_fields: BTreeMap, } fn default_metadata_value() -> Value { @@ -123,6 +129,17 @@ impl EventMetadata { pub fn set_splunk_hec_token(&mut self, secret: Arc) { self.secrets.insert(SPLUNK_HEC_TOKEN, secret); } + + /// Adds the value to the dropped fields list, returning the + /// position the value will be found in the list. + pub fn add_dropped_field(&mut self, meaning: String, value: Value) { + self.dropped_fields.insert(meaning, value); + } + + /// Fetches the dropped field by meaning. + pub fn dropped_field(&self, meaning: impl AsRef) -> Option<&Value> { + self.dropped_fields.get(meaning.as_ref()) + } } impl Default for EventMetadata { @@ -134,6 +151,7 @@ impl Default for EventMetadata { schema_definition: default_schema_definition(), source_id: None, upstream_id: None, + dropped_fields: BTreeMap::new(), } } } diff --git a/src/codecs/encoding/transformer.rs b/src/codecs/encoding/transformer.rs index 79137020e6a10..d51ca4e04960d 100644 --- a/src/codecs/encoding/transformer.rs +++ b/src/codecs/encoding/transformer.rs @@ -128,11 +128,23 @@ impl Transformer { fn apply_only_fields(&self, log: &mut LogEvent) { if let Some(only_fields) = self.only_fields.as_ref() { - let old_value = std::mem::replace(log.value_mut(), Value::Object(BTreeMap::new())); + let mut old_value = std::mem::replace(log.value_mut(), Value::Object(BTreeMap::new())); for field in only_fields { - if let Some(value) = old_value.get(field) { - log.insert((PathPrefix::Event, field), value.clone()); + if let Some(value) = old_value.remove(field, true) { + log.insert((PathPrefix::Event, field), value); + } + } + + // We may need the service field to apply tags to emitted metrics after the log message has been pruned. If there + // is a service meaning, we move this value to `dropped_fields` in the metadata. + // If the field is still in the new log message after pruning it will have been removed from `old_value` above. + let service_path = log.metadata().schema_definition().meaning_path("service"); + if let Some(service_path) = service_path { + let mut new_log = LogEvent::from(old_value); + if let Some(service) = new_log.remove(service_path) { + log.metadata_mut() + .add_dropped_field("service".to_string(), service); } } } @@ -141,7 +153,20 @@ impl Transformer { fn apply_except_fields(&self, log: &mut LogEvent) { if let Some(except_fields) = self.except_fields.as_ref() { for field in except_fields { - log.remove(field.as_str()); + let value = log.remove(field.as_str()); + + // If we are removing the service field we need to store this in a `dropped_fields` list as we may need to + // refer to this later when emitting metrics. + if let Some(v) = value { + if let Some(path) = log.metadata().schema_definition().meaning_path("service") { + use lookup::path::TargetPath; + + if &path.value_path().to_string() == field { + log.metadata_mut() + .add_dropped_field("service".to_string(), v); + } + } + } } } } @@ -213,10 +238,15 @@ pub enum TimestampFormat { #[cfg(test)] mod tests { use indoc::indoc; - use vector_core::config::log_schema; + use lookup::path::parse_target_path; + use vector_common::btreemap; + use vector_core::config::{log_schema, LogNamespace}; + use vrl::value::Kind; + + use crate::config::schema; use super::*; - use std::collections::BTreeMap; + use std::{collections::BTreeMap, sync::Arc}; #[test] fn serialize() { @@ -374,4 +404,83 @@ mod tests { "#}); assert!(config.is_err()) } + + #[test] + fn only_fields_with_service() { + let transformer: Transformer = toml::from_str(r#"only_fields = ["message"]"#).unwrap(); + let mut log = LogEvent::default(); + { + log.insert("message", 1); + log.insert("thing.service", "carrot"); + } + + let schema = schema::Definition::new_with_default_metadata( + Kind::object(btreemap! { + "thing" => Kind::object(btreemap! { + "service" => Kind::bytes(), + }) + }), + [LogNamespace::Vector], + ); + + let schema = schema.with_meaning(parse_target_path("thing.service").unwrap(), "service"); + + let mut event = Event::from(log); + + event + .metadata_mut() + .set_schema_definition(&Arc::new(schema)); + + transformer.transform(&mut event); + assert!(event.as_mut_log().contains("message")); + + // Event no longer contains the service field. + assert!(!event.as_mut_log().contains("thing.service")); + + // But we can still get the service by meaning. + assert_eq!( + &Value::from("carrot"), + event.as_log().get_by_meaning("service").unwrap() + ); + } + + #[test] + fn except_fields_with_service() { + let transformer: Transformer = + toml::from_str(r#"except_fields = ["thing.service"]"#).unwrap(); + let mut log = LogEvent::default(); + { + log.insert("message", 1); + log.insert("thing.service", "carrot"); + } + + let schema = schema::Definition::new_with_default_metadata( + Kind::object(btreemap! { + "thing" => Kind::object(btreemap! { + "service" => Kind::bytes(), + }) + }), + [LogNamespace::Vector], + ); + + let schema = schema.with_meaning(parse_target_path("thing.service").unwrap(), "service"); + + let mut event = Event::from(log); + + event + .metadata_mut() + .set_schema_definition(&Arc::new(schema)); + + transformer.transform(&mut event); + assert!(event.as_mut_log().contains("message")); + + // Event no longer contains the service field. + assert!(!event.as_mut_log().contains("thing.service")); + + // But we can still get the service by meaning. + assert_eq!( + &Value::from("carrot"), + event.as_log().get_by_meaning("service").unwrap() + ); + } } diff --git a/src/sinks/kafka/request_builder.rs b/src/sinks/kafka/request_builder.rs index 9d1edd0d97c43..c2d3c7aaa9219 100644 --- a/src/sinks/kafka/request_builder.rs +++ b/src/sinks/kafka/request_builder.rs @@ -37,8 +37,6 @@ impl KafkaRequestBuilder { }) .ok()?; - let metadata_builder = RequestMetadataBuilder::from_event(&event); - let metadata = KafkaRequestMetadata { finalizers: event.take_finalizers(), key: get_key(&event, &self.key_field), @@ -48,6 +46,10 @@ impl KafkaRequestBuilder { }; self.transformer.transform(&mut event); let mut body = BytesMut::new(); + + // Ensure the metadata builder is built after transforming the event so we have the event + // size taking into account any dropped fields. + let metadata_builder = RequestMetadataBuilder::from_event(&event); self.encoder.encode(event, &mut body).ok()?; let body = body.freeze(); diff --git a/src/sinks/util/metadata.rs b/src/sinks/util/metadata.rs index e6f4e7739e4d2..4230b7b100d88 100644 --- a/src/sinks/util/metadata.rs +++ b/src/sinks/util/metadata.rs @@ -79,6 +79,8 @@ impl RequestMetadataBuilder { .add_event(&event, json_size); } + /// Builds the [`RequestMetadata`] with the given size. + /// This is used when there is no encoder in the process to provide an `EncodeResult` pub fn with_request_size(&self, size: NonZeroUsize) -> RequestMetadata { let size = size.get(); @@ -91,6 +93,10 @@ impl RequestMetadataBuilder { ) } + /// Builds the [`RequestMetadata`] from the results of encoding. + /// `EncodeResult` provides is with the byte size before and after compression + /// and the json size of the events after transforming (dropping unwanted fields) but + /// before encoding. pub fn build(&self, result: &EncodeResult) -> RequestMetadata { RequestMetadata::new( self.event_count, @@ -99,7 +105,9 @@ impl RequestMetadataBuilder { result .compressed_byte_size .unwrap_or(result.uncompressed_byte_size), - self.events_estimated_json_encoded_byte_size.clone(), + // Building from an encoded result, we take the json size from the encoded since that has the size + // after transforming the event. + result.transformed_json_size.clone(), ) } } From acf5cd38595cc1e5a7ecde1e61e0cdf88db3432c Mon Sep 17 00:00:00 2001 From: Stephen Wakely Date: Tue, 11 Jul 2023 11:28:57 +0100 Subject: [PATCH 3/9] Comment about static str Signed-off-by: Stephen Wakely --- lib/vector-core/src/event/metadata.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/vector-core/src/event/metadata.rs b/lib/vector-core/src/event/metadata.rs index 042fda57c5eaa..0ab76350b6475 100644 --- a/lib/vector-core/src/event/metadata.rs +++ b/lib/vector-core/src/event/metadata.rs @@ -51,6 +51,8 @@ pub struct EventMetadata { /// later on. The map is indexed by meaning. /// Currently this is just used for the `service`. If the service field is dropped by `only_fields` /// we need to ensure it is still available later on for emitting metrics tagged by the service. + /// This field could almost be keyed by `&'static str`, but because it needs to be deserializable + /// we have to use `String`. dropped_fields: BTreeMap, } From 6a808c9160a45bd84021f319d4243c5c6242d7be Mon Sep 17 00:00:00 2001 From: Stephen Wakely Date: Wed, 12 Jul 2023 11:46:16 +0100 Subject: [PATCH 4/9] Responding to feedback Signed-off-by: Stephen Wakely --- lib/vector-core/src/event/metadata.rs | 3 +-- lib/vector-core/src/schema/meanings.rs | 17 +++++++++++++++++ lib/vector-core/src/schema/mod.rs | 1 + src/codecs/encoding/transformer.rs | 26 +++++++++++++++++--------- src/sinks/datadog/logs/config.rs | 16 ++++++++-------- src/sinks/new_relic/sink.rs | 5 +++++ src/sinks/util/metadata.rs | 2 +- src/sources/splunk_hec/mod.rs | 7 ++++--- 8 files changed, 54 insertions(+), 23 deletions(-) create mode 100644 lib/vector-core/src/schema/meanings.rs diff --git a/lib/vector-core/src/event/metadata.rs b/lib/vector-core/src/event/metadata.rs index 0ab76350b6475..c71b4287264bb 100644 --- a/lib/vector-core/src/event/metadata.rs +++ b/lib/vector-core/src/event/metadata.rs @@ -132,8 +132,7 @@ impl EventMetadata { self.secrets.insert(SPLUNK_HEC_TOKEN, secret); } - /// Adds the value to the dropped fields list, returning the - /// position the value will be found in the list. + /// Adds the value to the dropped fields list. pub fn add_dropped_field(&mut self, meaning: String, value: Value) { self.dropped_fields.insert(meaning, value); } diff --git a/lib/vector-core/src/schema/meanings.rs b/lib/vector-core/src/schema/meanings.rs new file mode 100644 index 0000000000000..119db13a5e6bd --- /dev/null +++ b/lib/vector-core/src/schema/meanings.rs @@ -0,0 +1,17 @@ +///! Constants for commonly used meanings. + +/// The service typically represents the application that generated the event. +pub const SERVICE: &'static str = "service"; + +/// The main text message of the event. +pub const MESSAGE: &'static str = "message"; + +/// The main timestamp of the event. +pub const TIMESTAMP: &'static str = "timestamp"; + +/// The hostname of the machine where the event was generated. +pub const HOST: &'static str = "host"; + +pub const SOURCE: &'static str = "source"; +pub const SEVERITY: &'static str = "severity"; +pub const TRACE_ID: &'static str = "trace_id"; diff --git a/lib/vector-core/src/schema/mod.rs b/lib/vector-core/src/schema/mod.rs index 96f6d99442fa8..87a2418ec323a 100644 --- a/lib/vector-core/src/schema/mod.rs +++ b/lib/vector-core/src/schema/mod.rs @@ -1,4 +1,5 @@ mod definition; +pub mod meanings; mod requirement; pub use definition::Definition; diff --git a/src/codecs/encoding/transformer.rs b/src/codecs/encoding/transformer.rs index d51ca4e04960d..4b26438dfa8f2 100644 --- a/src/codecs/encoding/transformer.rs +++ b/src/codecs/encoding/transformer.rs @@ -12,6 +12,7 @@ use lookup::{ use serde::{Deserialize, Deserializer}; use vector_config::configurable_component; use vector_core::event::{LogEvent, MaybeAsLogMut}; +use vector_core::schema::meanings; use vrl::value::Value; use crate::{event::Event, serde::skip_serializing_if_default}; @@ -139,32 +140,39 @@ impl Transformer { // We may need the service field to apply tags to emitted metrics after the log message has been pruned. If there // is a service meaning, we move this value to `dropped_fields` in the metadata. // If the field is still in the new log message after pruning it will have been removed from `old_value` above. - let service_path = log.metadata().schema_definition().meaning_path("service"); + let service_path = log + .metadata() + .schema_definition() + .meaning_path(meanings::SERVICE); if let Some(service_path) = service_path { let mut new_log = LogEvent::from(old_value); if let Some(service) = new_log.remove(service_path) { log.metadata_mut() - .add_dropped_field("service".to_string(), service); + .add_dropped_field(meanings::SERVICE.to_string(), service); } } } } fn apply_except_fields(&self, log: &mut LogEvent) { + use lookup::path::TargetPath; + if let Some(except_fields) = self.except_fields.as_ref() { + let service_path = log + .metadata() + .schema_definition() + .meaning_path(meanings::SERVICE) + .map(|path| path.value_path().to_string()); + for field in except_fields { let value = log.remove(field.as_str()); // If we are removing the service field we need to store this in a `dropped_fields` list as we may need to // refer to this later when emitting metrics. if let Some(v) = value { - if let Some(path) = log.metadata().schema_definition().meaning_path("service") { - use lookup::path::TargetPath; - - if &path.value_path().to_string() == field { - log.metadata_mut() - .add_dropped_field("service".to_string(), v); - } + if matches!(service_path.as_ref(), Some(path) if path == field) { + log.metadata_mut() + .add_dropped_field(meanings::SERVICE.to_string(), v); } } } diff --git a/src/sinks/datadog/logs/config.rs b/src/sinks/datadog/logs/config.rs index 80fb5293257a1..19c3c3fff9475 100644 --- a/src/sinks/datadog/logs/config.rs +++ b/src/sinks/datadog/logs/config.rs @@ -3,7 +3,7 @@ use std::{convert::TryFrom, sync::Arc}; use indoc::indoc; use tower::ServiceBuilder; use vector_config::configurable_component; -use vector_core::config::proxy::ProxyConfig; +use vector_core::{config::proxy::ProxyConfig, schema::meanings}; use vrl::value::Kind; use super::{service::LogApiRetry, sink::LogSinkBuilder}; @@ -176,13 +176,13 @@ impl SinkConfig for DatadogLogsConfig { fn input(&self) -> Input { let requirement = schema::Requirement::empty() - .required_meaning("message", Kind::bytes()) - .required_meaning("timestamp", Kind::timestamp()) - .optional_meaning("host", Kind::bytes()) - .optional_meaning("source", Kind::bytes()) - .optional_meaning("severity", Kind::bytes()) - .optional_meaning("service", Kind::bytes()) - .optional_meaning("trace_id", Kind::bytes()); + .required_meaning(meanings::MESSAGE, Kind::bytes()) + .required_meaning(meanings::TIMESTAMP, Kind::timestamp()) + .optional_meaning(meanings::HOST, Kind::bytes()) + .optional_meaning(meanings::SOURCE, Kind::bytes()) + .optional_meaning(meanings::SEVERITY, Kind::bytes()) + .optional_meaning(meanings::SERVICE, Kind::bytes()) + .optional_meaning(meanings::TRACE_ID, Kind::bytes()); Input::log().with_schema_requirement(requirement) } diff --git a/src/sinks/new_relic/sink.rs b/src/sinks/new_relic/sink.rs index c56c4d4b9f425..7b90c499f0136 100644 --- a/src/sinks/new_relic/sink.rs +++ b/src/sinks/new_relic/sink.rs @@ -6,8 +6,10 @@ use futures::stream::{BoxStream, StreamExt}; use tower::Service; use vector_common::request_metadata::RequestMetadata; use vector_core::{ + config::telemetry, event::{EventFinalizers, Finalizable}, stream::{BatcherSettings, DriverResponse}, + EstimatedJsonEncodedSizeOf, }; use super::{ @@ -95,8 +97,11 @@ impl RequestBuilder> for NewRelicRequestBuilder { &self, mut input: Vec, ) -> (Self::Metadata, RequestMetadataBuilder, Self::Events) { + let mut byte_size = telemetry().create_request_count_byte_size(); + for event in input.iter_mut() { self.transformer.transform(event); + byte_size.add_event(event, event.estimated_json_encoded_size_of()); } let builder = RequestMetadataBuilder::from_events(&input); diff --git a/src/sinks/util/metadata.rs b/src/sinks/util/metadata.rs index 4230b7b100d88..ccf39542c6bd3 100644 --- a/src/sinks/util/metadata.rs +++ b/src/sinks/util/metadata.rs @@ -94,7 +94,7 @@ impl RequestMetadataBuilder { } /// Builds the [`RequestMetadata`] from the results of encoding. - /// `EncodeResult` provides is with the byte size before and after compression + /// `EncodeResult` provides us with the byte size before and after compression /// and the json size of the events after transforming (dropping unwanted fields) but /// before encoding. pub fn build(&self, result: &EncodeResult) -> RequestMetadata { diff --git a/src/sources/splunk_hec/mod.rs b/src/sources/splunk_hec/mod.rs index 222ffc2487041..713a6b82833ab 100644 --- a/src/sources/splunk_hec/mod.rs +++ b/src/sources/splunk_hec/mod.rs @@ -21,6 +21,7 @@ use vector_config::configurable_component; use vector_core::{ config::{LegacyKey, LogNamespace}, event::BatchNotifier, + schema::meanings, EstimatedJsonEncodedSizeOf, }; use vrl::value::{kind::Collection, Kind}; @@ -183,7 +184,7 @@ impl SourceConfig for SplunkConfig { .with_event_field( &owned_value_path!(log_schema().message_key()), Kind::bytes().or_undefined(), - Some("message"), + Some(meanings::MESSAGE), ) .with_event_field( &owned_value_path!("line"), @@ -205,7 +206,7 @@ impl SourceConfig for SplunkConfig { ))), &owned_value_path!("host"), Kind::bytes(), - Some("host"), + Some(meanings::HOST), ) .with_source_metadata( SplunkConfig::NAME, @@ -226,7 +227,7 @@ impl SourceConfig for SplunkConfig { Some(LegacyKey::Overwrite(owned_value_path!(SOURCE))), &owned_value_path!("source"), Kind::bytes(), - Some("service"), + Some(meanings::SERVICE), ) // Not to be confused with `source_type`. .with_source_metadata( From 64b7d8c9f89a5ed248f6582db2266e315fd5876f Mon Sep 17 00:00:00 2001 From: Stephen Wakely Date: Wed, 12 Jul 2023 12:30:37 +0100 Subject: [PATCH 5/9] Restructure new relic sink to perform encoding in the Encoder Signed-off-by: Stephen Wakely --- src/sinks/new_relic/config.rs | 19 ++++-------- src/sinks/new_relic/encoding.rs | 42 +++++++++++++++++++------- src/sinks/new_relic/service.rs | 8 +---- src/sinks/new_relic/sink.rs | 52 ++++----------------------------- 4 files changed, 44 insertions(+), 77 deletions(-) diff --git a/src/sinks/new_relic/config.rs b/src/sinks/new_relic/config.rs index 5900755b12207..cea53b06151d1 100644 --- a/src/sinks/new_relic/config.rs +++ b/src/sinks/new_relic/config.rs @@ -1,25 +1,15 @@ use std::{fmt::Debug, sync::Arc}; -use futures::FutureExt; use http::Uri; use tower::ServiceBuilder; use vector_common::sensitive_string::SensitiveString; -use vector_config::configurable_component; use super::{ healthcheck, NewRelicApiResponse, NewRelicApiService, NewRelicEncoder, NewRelicSink, NewRelicSinkError, }; -use crate::{ - codecs::Transformer, - config::{AcknowledgementsConfig, DataType, Input, SinkConfig, SinkContext}, - http::HttpClient, - sinks::util::{ - retries::RetryLogic, service::ServiceBuilderExt, BatchConfig, Compression, - SinkBatchSettings, TowerRequestConfig, - }, - tls::TlsSettings, -}; + +use crate::{http::HttpClient, sinks::prelude::*}; /// New Relic region. #[configurable_component] @@ -165,7 +155,10 @@ impl SinkConfig for NewRelicConfig { let sink = NewRelicSink { service, transformer: self.encoding.clone(), - encoder: NewRelicEncoder, + encoder: NewRelicEncoder { + transformer: self.encoding.clone(), + credentials: Arc::clone(&credentials), + }, credentials, compression: self.compression, batcher_settings, diff --git a/src/sinks/new_relic/encoding.rs b/src/sinks/new_relic/encoding.rs index 0188761d011a7..f058161d20fb9 100644 --- a/src/sinks/new_relic/encoding.rs +++ b/src/sinks/new_relic/encoding.rs @@ -1,33 +1,53 @@ -use std::io; +use std::{io, sync::Arc}; use serde::Serialize; use vector_common::request_metadata::GroupedCountByteSize; -use vector_core::config::telemetry; +use vector_core::{config::telemetry, event::Event, EstimatedJsonEncodedSizeOf}; -use super::{NewRelicApiModel, NewRelicSinkError}; -use crate::sinks::util::encoding::{as_tracked_write, Encoder}; +use super::{ + EventsApiModel, LogsApiModel, MetricsApiModel, NewRelicApi, NewRelicApiModel, + NewRelicCredentials, NewRelicSinkError, +}; +use crate::sinks::{ + prelude::*, + util::encoding::{as_tracked_write, Encoder}, +}; -pub struct NewRelicEncoder; +pub struct NewRelicEncoder { + pub(super) transformer: Transformer, + pub(super) credentials: Arc, +} -impl Encoder> for NewRelicEncoder { +impl Encoder> for NewRelicEncoder { fn encode_input( &self, - input: Result, + mut input: Vec, writer: &mut dyn io::Write, ) -> io::Result<(usize, GroupedCountByteSize)> { - let json = match input? { + let mut byte_size = telemetry().create_request_count_byte_size(); + + for event in input.iter_mut() { + self.transformer.transform(event); + byte_size.add_event(event, event.estimated_json_encoded_size_of()); + } + + let api_model = match self.credentials.api { + NewRelicApi::Events => NewRelicApiModel::Events(EventsApiModel::try_from(input)?), + NewRelicApi::Metrics => NewRelicApiModel::Metrics(MetricsApiModel::try_from(input)?), + NewRelicApi::Logs => NewRelicApiModel::Logs(LogsApiModel::try_from(input)?), + }; + + let json = match api_model { NewRelicApiModel::Events(ev_api_model) => to_json(&ev_api_model)?, NewRelicApiModel::Metrics(met_api_model) => to_json(&met_api_model)?, NewRelicApiModel::Logs(log_api_model) => to_json(&log_api_model)?, }; + let size = as_tracked_write::<_, _, io::Error>(writer, &json, |writer, json| { writer.write_all(json)?; Ok(()) })?; - // TODO This should not be zero. - let byte_size = telemetry().create_request_count_byte_size(); - io::Result::Ok((size, byte_size)) } } diff --git a/src/sinks/new_relic/service.rs b/src/sinks/new_relic/service.rs index 290276b72b5cf..2b69aae492348 100644 --- a/src/sinks/new_relic/service.rs +++ b/src/sinks/new_relic/service.rs @@ -5,21 +5,15 @@ use std::{ }; use bytes::Bytes; -use futures::future::BoxFuture; use http::{ header::{CONTENT_ENCODING, CONTENT_LENGTH, CONTENT_TYPE}, Request, }; use hyper::Body; -use tower::Service; use tracing::Instrument; -use vector_common::request_metadata::{GroupedCountByteSize, MetaDescriptive, RequestMetadata}; -use vector_core::{ - event::{EventFinalizers, EventStatus, Finalizable}, - stream::DriverResponse, -}; use super::{NewRelicCredentials, NewRelicSinkError}; +use crate::sinks::prelude::*; use crate::{http::HttpClient, sinks::util::Compression}; #[derive(Debug, Clone)] diff --git a/src/sinks/new_relic/sink.rs b/src/sinks/new_relic/sink.rs index 7b90c499f0136..48057d346c746 100644 --- a/src/sinks/new_relic/sink.rs +++ b/src/sinks/new_relic/sink.rs @@ -1,30 +1,11 @@ -use std::{convert::TryFrom, fmt::Debug, num::NonZeroUsize, sync::Arc}; +use std::{fmt::Debug, num::NonZeroUsize, sync::Arc}; use async_trait::async_trait; use bytes::Bytes; -use futures::stream::{BoxStream, StreamExt}; -use tower::Service; -use vector_common::request_metadata::RequestMetadata; -use vector_core::{ - config::telemetry, - event::{EventFinalizers, Finalizable}, - stream::{BatcherSettings, DriverResponse}, - EstimatedJsonEncodedSizeOf, -}; -use super::{ - EventsApiModel, LogsApiModel, MetricsApiModel, NewRelicApi, NewRelicApiModel, - NewRelicApiRequest, NewRelicCredentials, NewRelicEncoder, -}; +use super::{NewRelicApiRequest, NewRelicCredentials, NewRelicEncoder}; use crate::{ - codecs::Transformer, - event::Event, - http::get_http_scheme_from_uri, - internal_events::SinkRequestBuildError, - sinks::util::{ - builder::SinkBuilderExt, metadata::RequestMetadataBuilder, request_builder::EncodeResult, - Compression, RequestBuilder, StreamSink, - }, + http::get_http_scheme_from_uri, internal_events::SinkRequestBuildError, sinks::prelude::*, }; #[derive(Debug)] @@ -71,7 +52,6 @@ impl From for std::io::Error { } struct NewRelicRequestBuilder { - transformer: Transformer, encoder: NewRelicEncoder, compression: Compression, credentials: Arc, @@ -79,7 +59,7 @@ struct NewRelicRequestBuilder { impl RequestBuilder> for NewRelicRequestBuilder { type Metadata = EventFinalizers; - type Events = Result; + type Events = Vec; type Encoder = NewRelicEncoder; type Payload = Bytes; type Request = NewRelicApiRequest; @@ -97,29 +77,10 @@ impl RequestBuilder> for NewRelicRequestBuilder { &self, mut input: Vec, ) -> (Self::Metadata, RequestMetadataBuilder, Self::Events) { - let mut byte_size = telemetry().create_request_count_byte_size(); - - for event in input.iter_mut() { - self.transformer.transform(event); - byte_size.add_event(event, event.estimated_json_encoded_size_of()); - } - let builder = RequestMetadataBuilder::from_events(&input); - let finalizers = input.take_finalizers(); - let api_model = || -> Result { - match self.credentials.api { - NewRelicApi::Events => { - Ok(NewRelicApiModel::Events(EventsApiModel::try_from(input)?)) - } - NewRelicApi::Metrics => { - Ok(NewRelicApiModel::Metrics(MetricsApiModel::try_from(input)?)) - } - NewRelicApi::Logs => Ok(NewRelicApiModel::Logs(LogsApiModel::try_from(input)?)), - } - }(); - - (finalizers, builder, api_model) + + (finalizers, builder, input) } fn build_request( @@ -157,7 +118,6 @@ where async fn run_inner(self: Box, input: BoxStream<'_, Event>) -> Result<(), ()> { let builder_limit = NonZeroUsize::new(64); let request_builder = NewRelicRequestBuilder { - transformer: self.transformer, encoder: self.encoder, compression: self.compression, credentials: Arc::clone(&self.credentials), From c825ccdb17c063857e9a66e9782e157eb0eb1179 Mon Sep 17 00:00:00 2001 From: Stephen Wakely Date: Wed, 12 Jul 2023 13:00:17 +0100 Subject: [PATCH 6/9] Clippy Signed-off-by: Stephen Wakely --- lib/vector-core/src/schema/meanings.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/vector-core/src/schema/meanings.rs b/lib/vector-core/src/schema/meanings.rs index 119db13a5e6bd..d77c3d36091a8 100644 --- a/lib/vector-core/src/schema/meanings.rs +++ b/lib/vector-core/src/schema/meanings.rs @@ -1,17 +1,17 @@ -///! Constants for commonly used meanings. +//! Constants for commonly used meanings. /// The service typically represents the application that generated the event. -pub const SERVICE: &'static str = "service"; +pub const SERVICE: &str = "service"; /// The main text message of the event. -pub const MESSAGE: &'static str = "message"; +pub const MESSAGE: &str = "message"; /// The main timestamp of the event. -pub const TIMESTAMP: &'static str = "timestamp"; +pub const TIMESTAMP: &str = "timestamp"; /// The hostname of the machine where the event was generated. -pub const HOST: &'static str = "host"; +pub const HOST: &str = "host"; -pub const SOURCE: &'static str = "source"; -pub const SEVERITY: &'static str = "severity"; -pub const TRACE_ID: &'static str = "trace_id"; +pub const SOURCE: &str = "source"; +pub const SEVERITY: &str = "severity"; +pub const TRACE_ID: &str = "trace_id"; From 1dc163797bf941fe1dda53faed313edc284a7aa9 Mon Sep 17 00:00:00 2001 From: Stephen Wakely Date: Fri, 14 Jul 2023 16:53:11 +0100 Subject: [PATCH 7/9] Rename meanings to meaning Signed-off-by: Stephen Wakely --- lib/vector-core/src/schema/meanings.rs | 17 ----------------- lib/vector-core/src/schema/mod.rs | 2 +- src/codecs/encoding/transformer.rs | 10 +++++----- src/sinks/datadog/logs/config.rs | 16 ++++++++-------- src/sinks/new_relic/config.rs | 1 - src/sinks/new_relic/sink.rs | 1 - src/sources/splunk_hec/mod.rs | 8 ++++---- 7 files changed, 18 insertions(+), 37 deletions(-) delete mode 100644 lib/vector-core/src/schema/meanings.rs diff --git a/lib/vector-core/src/schema/meanings.rs b/lib/vector-core/src/schema/meanings.rs deleted file mode 100644 index d77c3d36091a8..0000000000000 --- a/lib/vector-core/src/schema/meanings.rs +++ /dev/null @@ -1,17 +0,0 @@ -//! Constants for commonly used meanings. - -/// The service typically represents the application that generated the event. -pub const SERVICE: &str = "service"; - -/// The main text message of the event. -pub const MESSAGE: &str = "message"; - -/// The main timestamp of the event. -pub const TIMESTAMP: &str = "timestamp"; - -/// The hostname of the machine where the event was generated. -pub const HOST: &str = "host"; - -pub const SOURCE: &str = "source"; -pub const SEVERITY: &str = "severity"; -pub const TRACE_ID: &str = "trace_id"; diff --git a/lib/vector-core/src/schema/mod.rs b/lib/vector-core/src/schema/mod.rs index 87a2418ec323a..2d1c01b8d281f 100644 --- a/lib/vector-core/src/schema/mod.rs +++ b/lib/vector-core/src/schema/mod.rs @@ -1,5 +1,5 @@ mod definition; -pub mod meanings; +pub mod meaning; mod requirement; pub use definition::Definition; diff --git a/src/codecs/encoding/transformer.rs b/src/codecs/encoding/transformer.rs index 4b26438dfa8f2..df711bd8f065b 100644 --- a/src/codecs/encoding/transformer.rs +++ b/src/codecs/encoding/transformer.rs @@ -12,7 +12,7 @@ use lookup::{ use serde::{Deserialize, Deserializer}; use vector_config::configurable_component; use vector_core::event::{LogEvent, MaybeAsLogMut}; -use vector_core::schema::meanings; +use vector_core::schema::meaning; use vrl::value::Value; use crate::{event::Event, serde::skip_serializing_if_default}; @@ -143,12 +143,12 @@ impl Transformer { let service_path = log .metadata() .schema_definition() - .meaning_path(meanings::SERVICE); + .meaning_path(meaning::SERVICE); if let Some(service_path) = service_path { let mut new_log = LogEvent::from(old_value); if let Some(service) = new_log.remove(service_path) { log.metadata_mut() - .add_dropped_field(meanings::SERVICE.to_string(), service); + .add_dropped_field(meaning::SERVICE.to_string(), service); } } } @@ -161,7 +161,7 @@ impl Transformer { let service_path = log .metadata() .schema_definition() - .meaning_path(meanings::SERVICE) + .meaning_path(meaning::SERVICE) .map(|path| path.value_path().to_string()); for field in except_fields { @@ -172,7 +172,7 @@ impl Transformer { if let Some(v) = value { if matches!(service_path.as_ref(), Some(path) if path == field) { log.metadata_mut() - .add_dropped_field(meanings::SERVICE.to_string(), v); + .add_dropped_field(meaning::SERVICE.to_string(), v); } } } diff --git a/src/sinks/datadog/logs/config.rs b/src/sinks/datadog/logs/config.rs index 19c3c3fff9475..c6c93da795fc3 100644 --- a/src/sinks/datadog/logs/config.rs +++ b/src/sinks/datadog/logs/config.rs @@ -3,7 +3,7 @@ use std::{convert::TryFrom, sync::Arc}; use indoc::indoc; use tower::ServiceBuilder; use vector_config::configurable_component; -use vector_core::{config::proxy::ProxyConfig, schema::meanings}; +use vector_core::{config::proxy::ProxyConfig, schema::meaning}; use vrl::value::Kind; use super::{service::LogApiRetry, sink::LogSinkBuilder}; @@ -176,13 +176,13 @@ impl SinkConfig for DatadogLogsConfig { fn input(&self) -> Input { let requirement = schema::Requirement::empty() - .required_meaning(meanings::MESSAGE, Kind::bytes()) - .required_meaning(meanings::TIMESTAMP, Kind::timestamp()) - .optional_meaning(meanings::HOST, Kind::bytes()) - .optional_meaning(meanings::SOURCE, Kind::bytes()) - .optional_meaning(meanings::SEVERITY, Kind::bytes()) - .optional_meaning(meanings::SERVICE, Kind::bytes()) - .optional_meaning(meanings::TRACE_ID, Kind::bytes()); + .required_meaning(meaning::MESSAGE, Kind::bytes()) + .required_meaning(meaning::TIMESTAMP, Kind::timestamp()) + .optional_meaning(meaning::HOST, Kind::bytes()) + .optional_meaning(meaning::SOURCE, Kind::bytes()) + .optional_meaning(meaning::SEVERITY, Kind::bytes()) + .optional_meaning(meaning::SERVICE, Kind::bytes()) + .optional_meaning(meaning::TRACE_ID, Kind::bytes()); Input::log().with_schema_requirement(requirement) } diff --git a/src/sinks/new_relic/config.rs b/src/sinks/new_relic/config.rs index cea53b06151d1..af892849f0964 100644 --- a/src/sinks/new_relic/config.rs +++ b/src/sinks/new_relic/config.rs @@ -154,7 +154,6 @@ impl SinkConfig for NewRelicConfig { let sink = NewRelicSink { service, - transformer: self.encoding.clone(), encoder: NewRelicEncoder { transformer: self.encoding.clone(), credentials: Arc::clone(&credentials), diff --git a/src/sinks/new_relic/sink.rs b/src/sinks/new_relic/sink.rs index 48057d346c746..384a1b7a54e52 100644 --- a/src/sinks/new_relic/sink.rs +++ b/src/sinks/new_relic/sink.rs @@ -101,7 +101,6 @@ impl RequestBuilder> for NewRelicRequestBuilder { pub struct NewRelicSink { pub service: S, - pub transformer: Transformer, pub encoder: NewRelicEncoder, pub credentials: Arc, pub compression: Compression, diff --git a/src/sources/splunk_hec/mod.rs b/src/sources/splunk_hec/mod.rs index 713a6b82833ab..886d15ab6fb53 100644 --- a/src/sources/splunk_hec/mod.rs +++ b/src/sources/splunk_hec/mod.rs @@ -21,7 +21,7 @@ use vector_config::configurable_component; use vector_core::{ config::{LegacyKey, LogNamespace}, event::BatchNotifier, - schema::meanings, + schema::meaning, EstimatedJsonEncodedSizeOf, }; use vrl::value::{kind::Collection, Kind}; @@ -184,7 +184,7 @@ impl SourceConfig for SplunkConfig { .with_event_field( &owned_value_path!(log_schema().message_key()), Kind::bytes().or_undefined(), - Some(meanings::MESSAGE), + Some(meaning::MESSAGE), ) .with_event_field( &owned_value_path!("line"), @@ -206,7 +206,7 @@ impl SourceConfig for SplunkConfig { ))), &owned_value_path!("host"), Kind::bytes(), - Some(meanings::HOST), + Some(meaning::HOST), ) .with_source_metadata( SplunkConfig::NAME, @@ -227,7 +227,7 @@ impl SourceConfig for SplunkConfig { Some(LegacyKey::Overwrite(owned_value_path!(SOURCE))), &owned_value_path!("source"), Kind::bytes(), - Some(meanings::SERVICE), + Some(meaning::SERVICE), ) // Not to be confused with `source_type`. .with_source_metadata( From 553223d85f1da93988bd5e4af3486376ce6bfef2 Mon Sep 17 00:00:00 2001 From: Stephen Wakely Date: Mon, 17 Jul 2023 10:02:00 +0100 Subject: [PATCH 8/9] Add meaning Signed-off-by: Stephen Wakely --- lib/vector-core/src/schema/meaning.rs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 lib/vector-core/src/schema/meaning.rs diff --git a/lib/vector-core/src/schema/meaning.rs b/lib/vector-core/src/schema/meaning.rs new file mode 100644 index 0000000000000..ab766b0986924 --- /dev/null +++ b/lib/vector-core/src/schema/meaning.rs @@ -0,0 +1,17 @@ +//! Constants for commonly used semantic meanings. + +/// The service typically represents the application that generated the event. +pub const SERVICE: &str = "service"; + +/// The main text message of the event. +pub const MESSAGE: &str = "message"; + +/// The main timestamp of the event. +pub const TIMESTAMP: &str = "timestamp"; + +/// The hostname of the machine where the event was generated. +pub const HOST: &str = "host"; + +pub const SOURCE: &str = "source"; +pub const SEVERITY: &str = "severity"; +pub const TRACE_ID: &str = "trace_id"; From cd7a4f80b50fdb1d67cf4668e616074d42f5bd00 Mon Sep 17 00:00:00 2001 From: Stephen Wakely Date: Mon, 17 Jul 2023 10:55:04 +0100 Subject: [PATCH 9/9] Add comments re dropping and then readding fields Signed-off-by: Stephen Wakely --- lib/vector-core/src/event/log_event.rs | 4 ++++ lib/vector-core/src/event/metadata.rs | 3 +++ 2 files changed, 7 insertions(+) diff --git a/lib/vector-core/src/event/log_event.rs b/lib/vector-core/src/event/log_event.rs index 4f210f10795cb..a4bedd51476b7 100644 --- a/lib/vector-core/src/event/log_event.rs +++ b/lib/vector-core/src/event/log_event.rs @@ -292,6 +292,10 @@ impl LogEvent { } } + /// Retrieves the value of a field based on it's meaning. + /// This will first check if the value has previously been dropped. It is worth being + /// aware that if the field has been dropped and then some how readded, we still fetch + /// the dropped value here. pub fn get_by_meaning(&self, meaning: impl AsRef) -> Option<&Value> { if let Some(dropped) = self.metadata().dropped_field(&meaning) { Some(dropped) diff --git a/lib/vector-core/src/event/metadata.rs b/lib/vector-core/src/event/metadata.rs index c71b4287264bb..f577147e11ded 100644 --- a/lib/vector-core/src/event/metadata.rs +++ b/lib/vector-core/src/event/metadata.rs @@ -133,6 +133,9 @@ impl EventMetadata { } /// Adds the value to the dropped fields list. + /// There is currently no way to remove a field from this list, so if a field is dropped + /// and then the field is re-added with a new value - the dropped value will still be + /// retrieved. pub fn add_dropped_field(&mut self, meaning: String, value: Value) { self.dropped_fields.insert(meaning, value); }