diff --git a/.chloggen/add-logdedupe-processor.yaml b/.chloggen/add-logdedupe-processor.yaml new file mode 100644 index 000000000000..faebe3b8e873 --- /dev/null +++ b/.chloggen/add-logdedupe-processor.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: new_component + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: logdedupeprocessor + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add new logdedupeprocessor processor that deduplicates log entries. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [34118] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 06421d601024..bf8c381f4bc6 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -174,6 +174,7 @@ processor/groupbyattrsprocessor/ @open-teleme processor/groupbytraceprocessor/ @open-telemetry/collector-contrib-approvers @jpkrohling processor/intervalprocessor/ @open-telemetry/collector-contrib-approvers @RichieSams @sh0rez @djaglowski processor/k8sattributesprocessor/ @open-telemetry/collector-contrib-approvers @dmitryax @rmfitzpatrick @fatsheep9146 @TylerHelmuth +processor/logdeduplicationprocessor/ @open-telemetry/collector-contrib-approvers @BinaryFissionGames @MikeGoldsmith @djaglowski processor/logstransformprocessor/ @open-telemetry/collector-contrib-approvers @djaglowski @dehaansa processor/metricsgenerationprocessor/ @open-telemetry/collector-contrib-approvers @Aneurysm9 processor/metricstransformprocessor/ @open-telemetry/collector-contrib-approvers @dmitryax diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml index 7a40a922837d..3124015ddeaf 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -169,6 +169,7 @@ body: - processor/groupbytrace - processor/interval - processor/k8sattributes + - processor/logdeduplication - processor/logstransform - processor/metricsgeneration - processor/metricstransform diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml index 90126dd48049..b270c4419cb0 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yaml +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -163,6 +163,7 @@ body: - processor/groupbytrace - processor/interval - processor/k8sattributes + - processor/logdeduplication - processor/logstransform - processor/metricsgeneration - processor/metricstransform diff --git a/.github/ISSUE_TEMPLATE/other.yaml b/.github/ISSUE_TEMPLATE/other.yaml index f1d39931d1a9..d540aac44fdc 100644 --- a/.github/ISSUE_TEMPLATE/other.yaml +++ b/.github/ISSUE_TEMPLATE/other.yaml @@ -163,6 +163,7 @@ body: - processor/groupbytrace - processor/interval - processor/k8sattributes + - processor/logdeduplication - processor/logstransform - processor/metricsgeneration - processor/metricstransform diff --git a/.github/ISSUE_TEMPLATE/unmaintained.yaml b/.github/ISSUE_TEMPLATE/unmaintained.yaml index 34d58a058c6f..64a7f243ebe1 100644 --- a/.github/ISSUE_TEMPLATE/unmaintained.yaml +++ b/.github/ISSUE_TEMPLATE/unmaintained.yaml @@ -168,6 +168,7 @@ body: - processor/groupbytrace - processor/interval - processor/k8sattributes + - processor/logdeduplication - processor/logstransform - processor/metricsgeneration - processor/metricstransform diff --git a/processor/logdeduplicationprocessor/Makefile b/processor/logdeduplicationprocessor/Makefile new file mode 100644 index 000000000000..ded7a36092dc --- /dev/null +++ b/processor/logdeduplicationprocessor/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/processor/logdeduplicationprocessor/README.md b/processor/logdeduplicationprocessor/README.md new file mode 100644 index 000000000000..bcd62c4a898e --- /dev/null +++ b/processor/logdeduplicationprocessor/README.md @@ -0,0 +1,75 @@ +# Log DeDuplication Processor +This processor is used to deduplicate logs by detecting identical logs over a range of time and emitting a single log with the count of logs that were deduplicated. + +## Supported pipelines +- Logs + +## How It Works +1. The user configures the log deduplication processor in the desired logs pipeline. +2. All logs sent to the processor and aggregated over the configured `interval`. Logs are considered identical if they have the same body, resource attributes, severity, and log attributes. +3. After the interval, the processor emits a single log with the count of logs that were deduplicated. The emitted log will have the same body, resource attributes, severity, and log attributes as the original log. The emitted log will also have the following new attributes: + + - `log_count`: The count of logs that were deduplicated over the interval. The name of the attribute is configurable via the `log_count_attribute` parameter. + - `first_observed_timestamp`: The timestamp of the first log that was observed during the aggregation interval. + - `last_observed_timestamp`: The timestamp of the last log that was observed during the aggregation interval. + +**Note**: The `ObservedTimestamp` and `Timestamp` of the emitted log will be the time that the aggregated log was emitted and will not be the same as the `ObservedTimestamp` and `Timestamp` of the original logs. + +## Configuration +| Field | Type | Default | Description | +| --- | --- | --- | --- | +| interval | duration | `10s` | The interval at which logs are aggregated. The counter will reset after each interval. | +| log_count_attribute | string | `log_count` | The name of the count attribute of deduplicated logs that will be added to the emitted aggregated log. | +| timezone | string | `UTC` | The timezone of the `first_observed_timestamp` and `last_observed_timestamp` timestamps on the emitted aggregated log. The available locations depend on the local IANA Time Zone database. [This page](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) contains many examples, such as `America/New_York`. | +| exclude_fields | []string | `[]` | Fields to exclude from duplication matching. Fields can be excluded from the log `body` or `attributes`. These fields will not be present in the emitted aggregated log. Nested fields must be `.` delimited. If a field contains a `.` it can be escaped by using a `\` see [example config](#example-config-with-excluded-fields).

**Note**: The entire `body` cannot be excluded. If the body is a map then fields within it can be excluded. | + + +### Example Config +The following config is an example configuration for the log deduplication processor. It is configured with an aggregation interval of `60 seconds`, a timezone of `America/Los_Angeles`, and a log count attribute of `dedup_count`. It has no fields being excluded. +```yaml +receivers: + filelog: + include: [./example/*.log] +processors: + logdedup: + interval: 60s + log_count_attribute: dedup_count + timezone: 'America/Los_Angeles' +exporters: + googlecloud: + +service: + pipelines: + logs: + receivers: [filelog] + processors: [logdedup] + exporters: [googlecloud] +``` + +### Example Config with Excluded Fields +The following config is an example configuration that excludes the following fields from being considered when searching for duplicate logs: + +- `timestamp` field from the body +- `host.name` field from attributes +- `ip` nested attribute inside a map attribute named `src` + +```yaml +receivers: + filelog: + include: [./example/*.log] +processors: + logdedup: + exclude_fields: + - body.timestamp + - attributes.host\.name + - attributes.src.ip +exporters: + googlecloud: + +service: + pipelines: + logs: + receivers: [filelog] + processors: [logdedup] + exporters: [googlecloud] +``` diff --git a/processor/logdeduplicationprocessor/config.go b/processor/logdeduplicationprocessor/config.go new file mode 100644 index 000000000000..af9bfa9ead70 --- /dev/null +++ b/processor/logdeduplicationprocessor/config.go @@ -0,0 +1,102 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package logdeduplicationprocessor provides a processor that counts logs as metrics. +package logdeduplicationprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdeduplicationprocessor" + +import ( + "errors" + "fmt" + "strings" + "time" + + "go.opentelemetry.io/collector/component" +) + +// Config defaults +const ( + // defaultInterval is the default export interval. + defaultInterval = 10 * time.Second + + // defaultLogCountAttribute is the default log count attribute + defaultLogCountAttribute = "log_count" + + // defaultTimezone is the default timezone + defaultTimezone = "UTC" + + // bodyField is the name of the body field + bodyField = "body" + + // attributeField is the name of the attribute field + attributeField = "attributes" +) + +// Config errors +var ( + errInvalidLogCountAttribute = errors.New("log_count_attribute must be set") + errInvalidInterval = errors.New("interval must be greater than 0") + errCannotExcludeBody = errors.New("cannot exclude the entire body") +) + +// Config is the config of the processor. +type Config struct { + LogCountAttribute string `mapstructure:"log_count_attribute"` + Interval time.Duration `mapstructure:"interval"` + Timezone string `mapstructure:"timezone"` + ExcludeFields []string `mapstructure:"exclude_fields"` +} + +// createDefaultConfig returns the default config for the processor. +func createDefaultConfig() component.Config { + return &Config{ + LogCountAttribute: defaultLogCountAttribute, + Interval: defaultInterval, + Timezone: defaultTimezone, + ExcludeFields: []string{}, + } +} + +// Validate validates the configuration +func (c Config) Validate() error { + if c.Interval <= 0 { + return errInvalidInterval + } + + if c.LogCountAttribute == "" { + return errInvalidLogCountAttribute + } + + _, err := time.LoadLocation(c.Timezone) + if err != nil { + return fmt.Errorf("timezone is invalid: %w", err) + } + + return c.validateExcludeFields() +} + +// validateExcludeFields validates that all the exclude fields +func (c Config) validateExcludeFields() error { + knownExcludeFields := make(map[string]struct{}) + + for _, field := range c.ExcludeFields { + // Special check to make sure the entire body is not excluded + if field == bodyField { + return errCannotExcludeBody + } + + // Split and ensure the field starts with `body` or `attributes` + parts := strings.Split(field, fieldDelimiter) + if parts[0] != bodyField && parts[0] != attributeField { + return fmt.Errorf("an excludefield must start with %s or %s", bodyField, attributeField) + } + + // If a field is valid make sure we haven't already seen it + if _, ok := knownExcludeFields[field]; ok { + return fmt.Errorf("duplicate exclude_field %s", field) + } + + knownExcludeFields[field] = struct{}{} + } + + return nil +} diff --git a/processor/logdeduplicationprocessor/config_test.go b/processor/logdeduplicationprocessor/config_test.go new file mode 100644 index 000000000000..fab69dde82f1 --- /dev/null +++ b/processor/logdeduplicationprocessor/config_test.go @@ -0,0 +1,109 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package logdeduplicationprocessor + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCreateDefaultProcessorConfig(t *testing.T) { + cfg := createDefaultConfig().(*Config) + require.Equal(t, defaultInterval, cfg.Interval) + require.Equal(t, defaultLogCountAttribute, cfg.LogCountAttribute) + require.Equal(t, defaultTimezone, cfg.Timezone) + require.Equal(t, []string{}, cfg.ExcludeFields) +} + +func TestValidateConfig(t *testing.T) { + testCases := []struct { + desc string + cfg *Config + expectedErr error + }{ + { + desc: "invalid LogCountAttribute config", + cfg: &Config{ + LogCountAttribute: "", + Interval: defaultInterval, + Timezone: defaultTimezone, + ExcludeFields: []string{}, + }, + expectedErr: errInvalidLogCountAttribute, + }, + { + desc: "invalid Interval config", + cfg: &Config{ + LogCountAttribute: defaultLogCountAttribute, + Interval: -1, + Timezone: defaultTimezone, + ExcludeFields: []string{}, + }, + expectedErr: errInvalidInterval, + }, + { + desc: "invalid Timezone config", + cfg: &Config{ + LogCountAttribute: defaultLogCountAttribute, + Interval: defaultInterval, + Timezone: "not a timezone", + ExcludeFields: []string{}, + }, + expectedErr: errors.New("timezone is invalid"), + }, + { + desc: "invalid exclude entire body", + cfg: &Config{ + LogCountAttribute: defaultLogCountAttribute, + Interval: defaultInterval, + Timezone: defaultTimezone, + ExcludeFields: []string{bodyField}, + }, + expectedErr: errCannotExcludeBody, + }, + { + desc: "invalid exclude field body", + cfg: &Config{ + LogCountAttribute: defaultLogCountAttribute, + Interval: defaultInterval, + Timezone: defaultTimezone, + ExcludeFields: []string{"not.value"}, + }, + expectedErr: errors.New("an excludefield must start with"), + }, + { + desc: "invalid duplice exclude field", + cfg: &Config{ + LogCountAttribute: defaultLogCountAttribute, + Interval: defaultInterval, + Timezone: defaultTimezone, + ExcludeFields: []string{"body.thing", "body.thing"}, + }, + expectedErr: errors.New("duplicate exclude_field"), + }, + { + desc: "valid config", + cfg: &Config{ + LogCountAttribute: defaultLogCountAttribute, + Interval: defaultInterval, + Timezone: defaultTimezone, + ExcludeFields: []string{"body.thing", "attributes.otherthing"}, + }, + expectedErr: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + err := tc.cfg.Validate() + if tc.expectedErr != nil { + require.ErrorContains(t, err, tc.expectedErr.Error()) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/processor/logdeduplicationprocessor/counter.go b/processor/logdeduplicationprocessor/counter.go new file mode 100644 index 000000000000..7bfc264793d9 --- /dev/null +++ b/processor/logdeduplicationprocessor/counter.go @@ -0,0 +1,164 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package logdeduplicationprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdeduplicationprocessor" + +import ( + "hash/fnv" + "time" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil" +) + +// Attributes names for first and last observed timestamps +const ( + firstObservedTSAttr = "first_observed_timestamp" + lastObservedTSAttr = "last_observed_timestamp" +) + +// timeNow can be reassigned for testing +var timeNow = time.Now + +// logAggregator tracks the number of times a specific logRecord has been seen. +type logAggregator struct { + resources map[[16]byte]*resourceAggregator + logCountAttribute string + timezone *time.Location +} + +// newLogAggregator creates a new LogCounter. +func newLogAggregator(logCountAttribute string, timezone *time.Location) *logAggregator { + + return &logAggregator{ + resources: make(map[[16]byte]*resourceAggregator), + logCountAttribute: logCountAttribute, + timezone: timezone, + } +} + +// Export exports the counter as a Logs +func (l *logAggregator) Export() plog.Logs { + logs := plog.NewLogs() + + for _, resource := range l.resources { + resourceLogs := logs.ResourceLogs().AppendEmpty() + resourceAttrs := resourceLogs.Resource().Attributes() + resourceAttrs.EnsureCapacity(resourceAttrs.Len()) + resource.attributes.CopyTo(resourceAttrs) + + scopeLogs := resourceLogs.ScopeLogs().AppendEmpty() + for _, lc := range resource.logCounters { + lr := scopeLogs.LogRecords().AppendEmpty() + + baseRecord := lc.logRecord + + // Copy contents of base record + baseRecord.SetObservedTimestamp(pcommon.NewTimestampFromTime(lc.firstObservedTimestamp)) + baseRecord.Body().CopyTo(lr.Body()) + + lr.Attributes().EnsureCapacity(baseRecord.Attributes().Len()) + baseRecord.Attributes().CopyTo(lr.Attributes()) + + lr.SetSeverityNumber(baseRecord.SeverityNumber()) + lr.SetSeverityText(baseRecord.SeverityText()) + + // Add attributes for log count and timestamps + lr.SetObservedTimestamp(pcommon.NewTimestampFromTime(lc.firstObservedTimestamp)) + lr.SetTimestamp(pcommon.NewTimestampFromTime(timeNow())) + lr.Attributes().PutInt(l.logCountAttribute, lc.count) + + firstTimestampStr := lc.firstObservedTimestamp.In(l.timezone).Format(time.RFC3339) + lastTimestampStr := lc.lastObservedTimestamp.In(l.timezone).Format(time.RFC3339) + lr.Attributes().PutStr(firstObservedTSAttr, firstTimestampStr) + lr.Attributes().PutStr(lastObservedTSAttr, lastTimestampStr) + } + } + + return logs +} + +// Add adds the logRecord to the resource aggregator that is identified by the resource attributes +func (l *logAggregator) Add(resourceKey [16]byte, resourceAttrs pcommon.Map, logRecord plog.LogRecord) { + resourceCounter, ok := l.resources[resourceKey] + if !ok { + resourceCounter = newResourceAggregator(resourceAttrs) + l.resources[resourceKey] = resourceCounter + } + + resourceCounter.Add(logRecord) +} + +// Reset resets the counter. +func (l *logAggregator) Reset() { + l.resources = make(map[[16]byte]*resourceAggregator) +} + +// resourceAggregator dimensions the counter by resource. +type resourceAggregator struct { + attributes pcommon.Map + logCounters map[[8]byte]*logCounter +} + +// newResourceAggregator creates a new ResourceCounter. +func newResourceAggregator(attributes pcommon.Map) *resourceAggregator { + return &resourceAggregator{ + attributes: attributes, + logCounters: make(map[[8]byte]*logCounter), + } +} + +// Add increments the counter that the logRecord matches. +func (r *resourceAggregator) Add(logRecord plog.LogRecord) { + key := getLogKey(logRecord) + lc, ok := r.logCounters[key] + if !ok { + lc = newLogCounter(logRecord) + r.logCounters[key] = lc + } + lc.Increment() +} + +// logCounter is a counter for a log record. +type logCounter struct { + logRecord plog.LogRecord + firstObservedTimestamp time.Time + lastObservedTimestamp time.Time + count int64 +} + +// newLogCounter creates a new AttributeCounter. +func newLogCounter(logRecord plog.LogRecord) *logCounter { + return &logCounter{ + logRecord: logRecord, + count: 0, + firstObservedTimestamp: timeNow().UTC(), + } +} + +// Increment increments the counter. +func (a *logCounter) Increment() { + a.lastObservedTimestamp = timeNow().UTC() + a.count++ +} + +// getLogKey creates a unique hash for the log record to use as a map key +/* #nosec G104 -- According to Hash interface write can never return an error */ +func getLogKey(logRecord plog.LogRecord) [8]byte { + hasher := fnv.New64() + attrHash := pdatautil.MapHash(logRecord.Attributes()) + + hasher.Write(attrHash[:]) + bodyHash := pdatautil.ValueHash(logRecord.Body()) + hasher.Write(bodyHash[:]) + hasher.Write([]byte(logRecord.SeverityNumber().String())) + hasher.Write([]byte(logRecord.SeverityText())) + hash := hasher.Sum(nil) + + // convert from slice to fixed size array to use as key + var key [8]byte + copy(key[:], hash) + return key +} diff --git a/processor/logdeduplicationprocessor/counter_test.go b/processor/logdeduplicationprocessor/counter_test.go new file mode 100644 index 000000000000..1d8778b1f7ee --- /dev/null +++ b/processor/logdeduplicationprocessor/counter_test.go @@ -0,0 +1,231 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package logdeduplicationprocessor + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil" +) + +func Test_newLogAggregator(t *testing.T) { + cfg := createDefaultConfig().(*Config) + aggregator := newLogAggregator(cfg.LogCountAttribute, time.UTC) + require.Equal(t, cfg.LogCountAttribute, aggregator.logCountAttribute) + require.Equal(t, time.UTC, aggregator.timezone) + require.NotNil(t, aggregator.resources) +} + +func Test_logAggregatorAdd(t *testing.T) { + oldTimeNow := timeNow + defer func() { + timeNow = oldTimeNow + }() + + // Set timeNow to return a known value + firstExpectedTimestamp := time.Now().UTC() + timeNow = func() time.Time { + return firstExpectedTimestamp + } + + // Setup aggregator + aggregator := newLogAggregator("log_count", time.UTC) + logRecord := plog.NewLogRecord() + resourceAttrs := pcommon.NewMap() + resourceAttrs.PutStr("one", "two") + + expectedResourceKey := pdatautil.MapHash(resourceAttrs) + expectedLogKey := getLogKey(logRecord) + + // Add logRecord + resourceKey := pdatautil.MapHash(resourceAttrs) + aggregator.Add(resourceKey, resourceAttrs, logRecord) + + // Check resourceCounter was set + resourceCounter, ok := aggregator.resources[expectedResourceKey] + require.True(t, ok) + require.Equal(t, resourceAttrs, resourceCounter.attributes) + + // Check logCounter was set + lc, ok := resourceCounter.logCounters[expectedLogKey] + require.True(t, ok) + + // Check fields on logCounter + require.Equal(t, logRecord, lc.logRecord) + require.Equal(t, int64(1), lc.count) + require.Equal(t, firstExpectedTimestamp, lc.firstObservedTimestamp) + require.Equal(t, firstExpectedTimestamp, lc.lastObservedTimestamp) + + // Add a matching logRecord to update counter and last observedTimestamp + secondExpectedTimestamp := time.Now().Add(2 * time.Minute).UTC() + timeNow = func() time.Time { + return secondExpectedTimestamp + } + + aggregator.Add(resourceKey, resourceAttrs, logRecord) + require.Equal(t, int64(2), lc.count) + require.Equal(t, secondExpectedTimestamp, lc.lastObservedTimestamp) +} + +func Test_logAggregatorReset(t *testing.T) { + aggregator := newLogAggregator("log_count", time.UTC) + for i := 0; i < 2; i++ { + resourceAttrs := pcommon.NewMap() + resourceAttrs.PutInt("i", int64(i)) + key := pdatautil.MapHash(resourceAttrs) + aggregator.resources[key] = newResourceAggregator(resourceAttrs) + } + + require.Len(t, aggregator.resources, 2) + + aggregator.Reset() + + require.Len(t, aggregator.resources, 0) +} + +func Test_logAggregatorExport(t *testing.T) { + oldTimeNow := timeNow + defer func() { + timeNow = oldTimeNow + }() + + location, err := time.LoadLocation("America/New_York") + require.NoError(t, err) + + // Set timeNow to return a known value + expectedTimestamp := time.Now().UTC() + expectedTimestampStr := expectedTimestamp.In(location).Format(time.RFC3339) + timeNow = func() time.Time { + return expectedTimestamp + } + + // Setup aggregator + + aggregator := newLogAggregator(defaultLogCountAttribute, location) + resourceAttrs := pcommon.NewMap() + resourceAttrs.PutStr("one", "two") + expectedHash := pdatautil.MapHash(resourceAttrs) + + logRecord := generateTestLogRecord(t, "body string") + + // Add logRecord + resourceKey := pdatautil.MapHash(resourceAttrs) + aggregator.Add(resourceKey, resourceAttrs, logRecord) + + exportedLogs := aggregator.Export() + require.Equal(t, 1, exportedLogs.LogRecordCount()) + require.Equal(t, 1, exportedLogs.ResourceLogs().Len()) + + // Check resource + rl := exportedLogs.ResourceLogs().At(0) + actualAttrs := rl.Resource().Attributes() + actualHash := pdatautil.MapHash(actualAttrs) + require.Equal(t, expectedHash, actualHash) + + require.Equal(t, 1, rl.ScopeLogs().Len()) + sl := rl.ScopeLogs().At(0) + + require.Equal(t, 1, sl.LogRecords().Len()) + actualLogRecord := sl.LogRecords().At(0) + + // Check logRecord + require.Equal(t, logRecord.Body().AsString(), actualLogRecord.Body().AsString()) + require.Equal(t, logRecord.SeverityNumber(), actualLogRecord.SeverityNumber()) + require.Equal(t, logRecord.SeverityText(), actualLogRecord.SeverityText()) + require.Equal(t, expectedTimestamp.UnixMilli(), actualLogRecord.ObservedTimestamp().AsTime().UnixMilli()) + require.Equal(t, expectedTimestamp.UnixMilli(), actualLogRecord.Timestamp().AsTime().UnixMilli()) + + actualRawAttrs := actualLogRecord.Attributes().AsRaw() + for key, val := range logRecord.Attributes().AsRaw() { + actualVal, ok := actualRawAttrs[key] + require.True(t, ok) + require.Equal(t, val, actualVal) + } + + // Ensure new attributes were added + actualLogCount, ok := actualRawAttrs[defaultLogCountAttribute] + require.True(t, ok) + require.Equal(t, int64(1), actualLogCount) + + actualFirstObserved, ok := actualRawAttrs[firstObservedTSAttr] + require.True(t, ok) + require.Equal(t, expectedTimestampStr, actualFirstObserved) + + actualLastObserved, ok := actualRawAttrs[lastObservedTSAttr] + require.True(t, ok) + require.Equal(t, expectedTimestampStr, actualLastObserved) +} + +func Test_newResourceAggregator(t *testing.T) { + attributes := pcommon.NewMap() + attributes.PutStr("one", "two") + aggregator := newResourceAggregator(attributes) + require.NotNil(t, aggregator.logCounters) + require.Equal(t, attributes, aggregator.attributes) +} + +func Test_newLogCounter(t *testing.T) { + logRecord := plog.NewLogRecord() + lc := newLogCounter(logRecord) + require.Equal(t, logRecord, lc.logRecord) + require.Equal(t, int64(0), lc.count) +} + +func Test_getLogKey(t *testing.T) { + testCases := []struct { + desc string + testFunc func(*testing.T) + }{ + { + desc: "getLogKey returns the same key for logs that should match", + testFunc: func(t *testing.T) { + logRecord1 := generateTestLogRecord(t, "Body of the log") + + // Differ by timestamp + logRecord1.SetTimestamp(pcommon.NewTimestampFromTime(time.Now().Add(time.Minute))) + + logRecord2 := generateTestLogRecord(t, "Body of the log") + + key1 := getLogKey(logRecord1) + key2 := getLogKey(logRecord2) + + require.Equal(t, key1, key2) + }, + }, + { + desc: "getLogKey returns the different key for logs that shouldn't match", + testFunc: func(t *testing.T) { + logRecord1 := generateTestLogRecord(t, "Body of the log") + + logRecord2 := generateTestLogRecord(t, "A different Body of the log") + + key1 := getLogKey(logRecord1) + key2 := getLogKey(logRecord2) + + require.NotEqual(t, key1, key2) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, tc.testFunc) + } +} + +func generateTestLogRecord(t *testing.T, body string) plog.LogRecord { + t.Helper() + logRecord := plog.NewLogRecord() + logRecord.Body().SetStr(body) + logRecord.SetSeverityText("info") + logRecord.SetSeverityNumber(0) + logRecord.Attributes().PutBool("bool", true) + logRecord.Attributes().PutStr("str", "attr str") + logRecord.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) + return logRecord +} diff --git a/processor/logdeduplicationprocessor/factory.go b/processor/logdeduplicationprocessor/factory.go new file mode 100644 index 000000000000..b3f0a49426f4 --- /dev/null +++ b/processor/logdeduplicationprocessor/factory.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package logdeduplicationprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdeduplicationprocessor" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/processor" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdeduplicationprocessor/internal/metadata" +) + +// NewFactory creates a new factory for the processor. +func NewFactory() processor.Factory { + return processor.NewFactory( + metadata.Type, + createDefaultConfig, + processor.WithLogs(createLogsProcessor, metadata.LogsStability), + ) +} + +// createLogsProcessor creates a log processor. +func createLogsProcessor(_ context.Context, params processor.Settings, cfg component.Config, consumer consumer.Logs) (processor.Logs, error) { + processorCfg, ok := cfg.(*Config) + if !ok { + return nil, fmt.Errorf("invalid config type: %+v", cfg) + } + + return newProcessor(processorCfg, consumer, params.Logger) +} diff --git a/processor/logdeduplicationprocessor/factory_test.go b/processor/logdeduplicationprocessor/factory_test.go new file mode 100644 index 000000000000..9775818c280e --- /dev/null +++ b/processor/logdeduplicationprocessor/factory_test.go @@ -0,0 +1,55 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package logdeduplicationprocessor + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/processor" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdeduplicationprocessor/internal/metadata" +) + +func TestNewProcessorFactory(t *testing.T) { + f := NewFactory() + require.Equal(t, metadata.Type, f.Type()) + require.Equal(t, metadata.LogsStability, f.LogsProcessorStability()) + require.NotNil(t, f.CreateDefaultConfig()) + require.NotNil(t, f.CreateLogsProcessor) +} + +func TestCreateLogsProcessor(t *testing.T) { + var testCases = []struct { + name string + cfg component.Config + expectedErr string + }{ + { + name: "valid config", + cfg: createDefaultConfig().(*Config), + }, + { + name: "invalid config type", + cfg: nil, + expectedErr: "invalid config type", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + f := NewFactory() + p, err := f.CreateLogsProcessor(context.Background(), processor.Settings{}, tc.cfg, nil) + if tc.expectedErr == "" { + require.NoError(t, err) + require.IsType(t, &logDedupProcessor{}, p) + } else { + require.ErrorContains(t, err, tc.expectedErr) + require.Nil(t, p) + } + }) + } +} diff --git a/processor/logdeduplicationprocessor/field_remover.go b/processor/logdeduplicationprocessor/field_remover.go new file mode 100644 index 000000000000..7fa5af761c9c --- /dev/null +++ b/processor/logdeduplicationprocessor/field_remover.go @@ -0,0 +1,111 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package logdeduplicationprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdeduplicationprocessor" + +import ( + "fmt" + "strings" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" +) + +const ( + // fieldDelimiter is the delimiter used to split a field key into its parts. + fieldDelimiter = "." + + // fieldEscapeKeyReplacement is the string used to temporarily replace escaped delimters while splitting a field key. + fieldEscapeKeyReplacement = "{TEMP_REPLACE}" +) + +// fieldRemover handles removing excluded fields from log records +type fieldRemover struct { + fields []*field +} + +// field represents a field and it's compound key to match on +type field struct { + keyParts []string +} + +// newFieldRemover creates a new field remover based on the passed in field keys +func newFieldRemover(fieldKeys []string) *fieldRemover { + fe := &fieldRemover{ + fields: make([]*field, 0, len(fieldKeys)), + } + + for _, f := range fieldKeys { + fe.fields = append(fe.fields, &field{ + keyParts: splitField(f), + }) + } + + return fe +} + +// RemoveFields removes any body or attribute fields that match in the log record +func (fe *fieldRemover) RemoveFields(logRecord plog.LogRecord) { + for _, field := range fe.fields { + field.removeField(logRecord) + } +} + +// removeField removes the field from the log record if it exists +func (f *field) removeField(logRecord plog.LogRecord) { + firstPart, remainingParts := f.keyParts[0], f.keyParts[1:] + + switch firstPart { + case bodyField: + // If body is a map then recurse through to remove the field + if logRecord.Body().Type() == pcommon.ValueTypeMap { + removeFieldFromMap(logRecord.Body().Map(), remainingParts) + } + case attributeField: + // Remove all attributes + if len(remainingParts) == 0 { + logRecord.Attributes().Clear() + return + } + + // Recurse through map and remove fields + removeFieldFromMap(logRecord.Attributes(), remainingParts) + } +} + +// removeFieldFromMap recurses through the map and removes the field if it's found. +func removeFieldFromMap(valueMap pcommon.Map, keyParts []string) { + nextKeyPart, remainingParts := keyParts[0], keyParts[1:] + + // Look for the value associated with the next key part. + // If we don't find it then return + value, ok := valueMap.Get(nextKeyPart) + if !ok { + return + } + + // No more key parts that means we have found the value and remove it + if len(remainingParts) == 0 { + valueMap.Remove(nextKeyPart) + return + } + + // If the value is a map then recurse through with the remaining parts + if value.Type() == pcommon.ValueTypeMap { + removeFieldFromMap(value.Map(), remainingParts) + } +} + +// splitField splits a field key into its parts. +// It replaces escaped delimiters with the full delimiter after splitting. +func splitField(fieldKey string) []string { + escapedKey := strings.ReplaceAll(fieldKey, fmt.Sprintf("\\%s", fieldDelimiter), fieldEscapeKeyReplacement) + keyParts := strings.Split(escapedKey, fieldDelimiter) + + // Replace the temporarily escaped delimiters with the actual delimiter. + for i := range keyParts { + keyParts[i] = strings.ReplaceAll(keyParts[i], fieldEscapeKeyReplacement, fieldDelimiter) + } + + return keyParts +} diff --git a/processor/logdeduplicationprocessor/field_remover_test.go b/processor/logdeduplicationprocessor/field_remover_test.go new file mode 100644 index 000000000000..8b584e94be2a --- /dev/null +++ b/processor/logdeduplicationprocessor/field_remover_test.go @@ -0,0 +1,100 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package logdeduplicationprocessor + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil" +) + +func Test_newFieldRemover(t *testing.T) { + fieldKeys := []string{ + "single_field", + "compound.field.one", + "escaped\\.field", + "escaped\\.compound.field", + } + + expected := &fieldRemover{ + fields: []*field{ + { + keyParts: []string{"single_field"}, + }, + { + keyParts: []string{"compound", "field", "one"}, + }, + { + keyParts: []string{"escaped.field"}, + }, + { + keyParts: []string{"escaped.compound", "field"}, + }, + }, + } + + actual := newFieldRemover(fieldKeys) + require.Equal(t, expected, actual) +} + +// TestRemoveFieldsAttributes tests when a remove field is attributes +func TestRemoveFieldsAttributes(t *testing.T) { + fields := []string{attributeField} + remover := newFieldRemover(fields) + + expectedBody := "test body" + logRecord := generateTestLogRecord(t, expectedBody) + + remover.RemoveFields(logRecord) + require.Equal(t, expectedBody, logRecord.Body().AsString()) + require.Equal(t, 0, logRecord.Attributes().Len()) +} + +func TestRemoveFields(t *testing.T) { + fields := []string{ + fmt.Sprintf("%s.nested\\.map.bool", bodyField), + fmt.Sprintf("%s.bool", attributeField), + fmt.Sprintf("%s.nested", attributeField), + fmt.Sprintf("%s.not_present", bodyField), + } + remover := newFieldRemover(fields) + + logRecord := plog.NewLogRecord() + + // Fill attribute map + logRecord.Attributes().PutBool("bool", true) + logRecord.Attributes().PutStr("str", "attr str") + nestedAttrMap := logRecord.Attributes().PutEmptyMap("nested") + nestedAttrMap.PutInt("int", 2) + + // Expected attribut map + expectedAttrsMap := pcommon.NewMap() + expectedAttrsMap.PutStr("str", "attr str") + expectedAttrHash := pdatautil.MapHash(expectedAttrsMap) + + // Fill body map + bodyMap := logRecord.Body().SetEmptyMap() + bodyMap.PutInt("safe", 10) + nestedBodyMap := bodyMap.PutEmptyMap("nested.map") + nestedBodyMap.PutBool("bool", true) + + // expected body map + expectedBodyMap := pcommon.NewMap() + expectedBodyMap.PutEmptyMap("nested.map") + expectedBodyMap.PutInt("safe", 10) + expectedBodyHash := pdatautil.MapHash(expectedBodyMap) + + remover.RemoveFields(logRecord) + + actualAttrHash := pdatautil.MapHash(logRecord.Attributes()) + actualBodyHash := pdatautil.MapHash(logRecord.Body().Map()) + + require.Equal(t, expectedAttrHash, actualAttrHash) + require.Equal(t, expectedBodyHash, actualBodyHash) +} diff --git a/processor/logdeduplicationprocessor/generated_component_test.go b/processor/logdeduplicationprocessor/generated_component_test.go new file mode 100644 index 000000000000..ad8c0ffbb770 --- /dev/null +++ b/processor/logdeduplicationprocessor/generated_component_test.go @@ -0,0 +1,135 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package logdeduplicationprocessor + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/confmap/confmaptest" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/processortest" +) + +func TestComponentFactoryType(t *testing.T) { + require.Equal(t, "logdedup", NewFactory().Type().String()) +} + +func TestComponentConfigStruct(t *testing.T) { + require.NoError(t, componenttest.CheckConfigStruct(NewFactory().CreateDefaultConfig())) +} + +func TestComponentLifecycle(t *testing.T) { + factory := NewFactory() + + tests := []struct { + name string + createFn func(ctx context.Context, set processor.Settings, cfg component.Config) (component.Component, error) + }{ + + { + name: "logs", + createFn: func(ctx context.Context, set processor.Settings, cfg component.Config) (component.Component, error) { + return factory.CreateLogsProcessor(ctx, set, cfg, consumertest.NewNop()) + }, + }, + } + + cm, err := confmaptest.LoadConf("metadata.yaml") + require.NoError(t, err) + cfg := factory.CreateDefaultConfig() + sub, err := cm.Sub("tests::config") + require.NoError(t, err) + require.NoError(t, sub.Unmarshal(&cfg)) + + for _, test := range tests { + t.Run(test.name+"-shutdown", func(t *testing.T) { + c, err := test.createFn(context.Background(), processortest.NewNopSettings(), cfg) + require.NoError(t, err) + err = c.Shutdown(context.Background()) + require.NoError(t, err) + }) + t.Run(test.name+"-lifecycle", func(t *testing.T) { + c, err := test.createFn(context.Background(), processortest.NewNopSettings(), cfg) + require.NoError(t, err) + host := componenttest.NewNopHost() + err = c.Start(context.Background(), host) + require.NoError(t, err) + require.NotPanics(t, func() { + switch test.name { + case "logs": + e, ok := c.(processor.Logs) + require.True(t, ok) + logs := generateLifecycleTestLogs() + if !e.Capabilities().MutatesData { + logs.MarkReadOnly() + } + err = e.ConsumeLogs(context.Background(), logs) + case "metrics": + e, ok := c.(processor.Metrics) + require.True(t, ok) + metrics := generateLifecycleTestMetrics() + if !e.Capabilities().MutatesData { + metrics.MarkReadOnly() + } + err = e.ConsumeMetrics(context.Background(), metrics) + case "traces": + e, ok := c.(processor.Traces) + require.True(t, ok) + traces := generateLifecycleTestTraces() + if !e.Capabilities().MutatesData { + traces.MarkReadOnly() + } + err = e.ConsumeTraces(context.Background(), traces) + } + }) + require.NoError(t, err) + err = c.Shutdown(context.Background()) + require.NoError(t, err) + }) + } +} + +func generateLifecycleTestLogs() plog.Logs { + logs := plog.NewLogs() + rl := logs.ResourceLogs().AppendEmpty() + rl.Resource().Attributes().PutStr("resource", "R1") + l := rl.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() + l.Body().SetStr("test log message") + l.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) + return logs +} + +func generateLifecycleTestMetrics() pmetric.Metrics { + metrics := pmetric.NewMetrics() + rm := metrics.ResourceMetrics().AppendEmpty() + rm.Resource().Attributes().PutStr("resource", "R1") + m := rm.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() + m.SetName("test_metric") + dp := m.SetEmptyGauge().DataPoints().AppendEmpty() + dp.Attributes().PutStr("test_attr", "value_1") + dp.SetIntValue(123) + dp.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) + return metrics +} + +func generateLifecycleTestTraces() ptrace.Traces { + traces := ptrace.NewTraces() + rs := traces.ResourceSpans().AppendEmpty() + rs.Resource().Attributes().PutStr("resource", "R1") + span := rs.ScopeSpans().AppendEmpty().Spans().AppendEmpty() + span.Attributes().PutStr("test_attr", "value_1") + span.SetName("test_span") + span.SetStartTimestamp(pcommon.NewTimestampFromTime(time.Now().Add(-1 * time.Second))) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(time.Now())) + return traces +} diff --git a/processor/logdeduplicationprocessor/generated_package_test.go b/processor/logdeduplicationprocessor/generated_package_test.go new file mode 100644 index 000000000000..810bd7465961 --- /dev/null +++ b/processor/logdeduplicationprocessor/generated_package_test.go @@ -0,0 +1,13 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package logdeduplicationprocessor + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} diff --git a/processor/logdeduplicationprocessor/go.mod b/processor/logdeduplicationprocessor/go.mod new file mode 100644 index 000000000000..df0c6fb4366c --- /dev/null +++ b/processor/logdeduplicationprocessor/go.mod @@ -0,0 +1,67 @@ +module github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdeduplicationprocessor + +go 1.21.0 + +require ( + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.106.1 + github.com/stretchr/testify v1.9.0 + go.opentelemetry.io/collector/component v0.106.1 + go.opentelemetry.io/collector/confmap v0.106.1 + go.opentelemetry.io/collector/consumer v0.106.1 + go.opentelemetry.io/collector/consumer/consumertest v0.106.1 + go.opentelemetry.io/collector/pdata v1.12.0 + go.opentelemetry.io/collector/processor v0.106.1 + go.uber.org/goleak v1.3.0 + go.uber.org/zap v1.27.0 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-viper/mapstructure/v2 v2.0.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/knadh/koanf/maps v0.1.1 // indirect + github.com/knadh/koanf/providers/confmap v0.1.0 // indirect + github.com/knadh/koanf/v2 v2.1.1 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.106.1 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.106.1 // indirect + go.opentelemetry.io/collector/featuregate v1.12.0 // indirect + go.opentelemetry.io/collector/internal/globalgates v0.106.1 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.106.1 // indirect + go.opentelemetry.io/collector/pdata/testdata v0.106.1 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.50.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/grpc v1.65.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect +) + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil => ../../pkg/pdatautil diff --git a/processor/logdeduplicationprocessor/go.sum b/processor/logdeduplicationprocessor/go.sum new file mode 100644 index 000000000000..9303f81e2cac --- /dev/null +++ b/processor/logdeduplicationprocessor/go.sum @@ -0,0 +1,150 @@ +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-viper/mapstructure/v2 v2.0.0 h1:dhn8MZ1gZ0mzeodTG3jt5Vj/o87xZKuNAprG2mQfMfc= +github.com/go-viper/mapstructure/v2 v2.0.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= +github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= +github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= +github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= +github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/collector/component v0.106.1 h1:6Xp4tKqnd/JkJDG/C4p1hto+Y5zvk5FwqZIdMCPzZlA= +go.opentelemetry.io/collector/component v0.106.1/go.mod h1:KiVE/5ZayuLlDJTe7mHqHRCn/5LrmF99C7/mKe54mWA= +go.opentelemetry.io/collector/config/configtelemetry v0.106.1 h1:A8nwYnI6brfur5KPFC8GtVX/49pByvVoKSgO4qPXBqg= +go.opentelemetry.io/collector/config/configtelemetry v0.106.1/go.mod h1:WxWKNVAQJg/Io1nA3xLgn/DWLE/W1QOB2+/Js3ACi40= +go.opentelemetry.io/collector/confmap v0.106.1 h1:R7HQIPDRPOEwauBeJUlkT8Elc5f0KQr/s/kQfZi05t0= +go.opentelemetry.io/collector/confmap v0.106.1/go.mod h1:iWdWgvxRYSHdAt5ySgPJq/i6fQMKGNnP5Pt7jOfTXno= +go.opentelemetry.io/collector/consumer v0.106.1 h1:+AQ/Kmoc/g0WP8thwymNkXk1jeWsHDK6XyYfdezcxcc= +go.opentelemetry.io/collector/consumer v0.106.1/go.mod h1:oy6pR/v5o/N9cxsICskyt//bU8k8EG0JeOO1MTDfs5A= +go.opentelemetry.io/collector/consumer/consumerprofiles v0.106.1 h1:uxQjWm2XE7d1OncQDM9tL1ha+otGt1HjoRYIcQRMOfQ= +go.opentelemetry.io/collector/consumer/consumerprofiles v0.106.1/go.mod h1:xQScBf9/PORFaYM6JVPOr7/TcRVEuKcW5XbAXfJByRs= +go.opentelemetry.io/collector/consumer/consumertest v0.106.1 h1:hDdFeVjCLIJ6iLfbiYcV9s+4iboFXbkJ/k3h09qusPw= +go.opentelemetry.io/collector/consumer/consumertest v0.106.1/go.mod h1:WRTYnQ8bYHQrEN6eJZ80oC4pNI7VeDRdsTZI6xs9o5M= +go.opentelemetry.io/collector/featuregate v1.12.0 h1:l5WbV2vMQd2bL8ubfGrbKNtZaeJRckE12CTHvRe47Tw= +go.opentelemetry.io/collector/featuregate v1.12.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U= +go.opentelemetry.io/collector/internal/globalgates v0.106.1 h1:0NQHTcykmYNDsNKObJ2XocGCv3WUAQZppfP3o6hZUIA= +go.opentelemetry.io/collector/internal/globalgates v0.106.1/go.mod h1:Z5US6O2xkZAtxVSSBnHAPFZwPhFoxlyKLUvS67Vx4gc= +go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimKItMWw46BmA= +go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI= +go.opentelemetry.io/collector/pdata/pprofile v0.106.1 h1:nOLo25YnluNi+zAbU7G24RN86cJ1/EZJc6VEayBlOPo= +go.opentelemetry.io/collector/pdata/pprofile v0.106.1/go.mod h1:chr7lMJIzyXkccnPRkIPhyXtqLZLSReZYhwsggOGEfg= +go.opentelemetry.io/collector/pdata/testdata v0.106.1 h1:JUyLAwKD8o/9jgkBi16zOClxOyY028A7XIXHPV4mNmM= +go.opentelemetry.io/collector/pdata/testdata v0.106.1/go.mod h1:ghdz2RDEzsfigW0J+9oqA4fGmQJ/DJYUhE3vYU6JfhM= +go.opentelemetry.io/collector/processor v0.106.1 h1:W/SmNRkGLf6dOWjdqU5WlDnPLDQJRyHZxI6X8IQwFec= +go.opentelemetry.io/collector/processor v0.106.1/go.mod h1:D4Ni5zbK/QtkIxSbDEZanUcLN9zM3JnlU9hc3Qm/o6I= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/exporters/prometheus v0.50.0 h1:2Ewsda6hejmbhGFyUvWZjUThC98Cf8Zy6g0zkIimOng= +go.opentelemetry.io/otel/exporters/prometheus v0.50.0/go.mod h1:pMm5PkUo5YwbLiuEf7t2xg4wbP0/eSJrMxIMxKosynY= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08= +go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/processor/logdeduplicationprocessor/internal/metadata/generated_status.go b/processor/logdeduplicationprocessor/internal/metadata/generated_status.go new file mode 100644 index 000000000000..be0363c0b3dc --- /dev/null +++ b/processor/logdeduplicationprocessor/internal/metadata/generated_status.go @@ -0,0 +1,15 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/component" +) + +var ( + Type = component.MustNewType("logdedup") +) + +const ( + LogsStability = component.StabilityLevelDevelopment +) diff --git a/processor/logdeduplicationprocessor/metadata.yaml b/processor/logdeduplicationprocessor/metadata.yaml new file mode 100644 index 000000000000..1b944254a487 --- /dev/null +++ b/processor/logdeduplicationprocessor/metadata.yaml @@ -0,0 +1,13 @@ +type: logdedup + +status: + class: processor + stability: + development: [logs] + distributions: [] + warnings: [] + codeowners: + active: [BinaryFissionGames, MikeGoldsmith, djaglowski] + +tests: + config: diff --git a/processor/logdeduplicationprocessor/processor.go b/processor/logdeduplicationprocessor/processor.go new file mode 100644 index 000000000000..18f47c37962e --- /dev/null +++ b/processor/logdeduplicationprocessor/processor.go @@ -0,0 +1,136 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package logdeduplicationprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdeduplicationprocessor" + +import ( + "context" + "fmt" + "sync" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/plog" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil" +) + +// logDedupProcessor is a logDedupProcessor that counts duplicate instances of logs. +type logDedupProcessor struct { + emitInterval time.Duration + aggregator *logAggregator + remover *fieldRemover + consumer consumer.Logs + logger *zap.Logger + cancel context.CancelFunc + wg sync.WaitGroup + mux sync.Mutex +} + +func newProcessor(cfg *Config, consumer consumer.Logs, logger *zap.Logger) (*logDedupProcessor, error) { + // This should not happen due to config validation but we check anyways. + timezone, err := time.LoadLocation(cfg.Timezone) + if err != nil { + return nil, fmt.Errorf("invalid timezone: %w", err) + } + + return &logDedupProcessor{ + emitInterval: cfg.Interval, + aggregator: newLogAggregator(cfg.LogCountAttribute, timezone), + remover: newFieldRemover(cfg.ExcludeFields), + consumer: consumer, + logger: logger, + }, nil +} + +// Start starts the processor. +func (p *logDedupProcessor) Start(ctx context.Context, _ component.Host) error { + ctx, cancel := context.WithCancel(ctx) + p.cancel = cancel + + p.wg.Add(1) + go p.handleExportInterval(ctx) + + return nil +} + +// Capabilities returns the consumer's capabilities. +func (p *logDedupProcessor) Capabilities() consumer.Capabilities { + return consumer.Capabilities{MutatesData: true} +} + +// Shutdown stops the processor. +func (p *logDedupProcessor) Shutdown(ctx context.Context) error { + if p.cancel != nil { + p.cancel() + } + + doneChan := make(chan struct{}) + go func() { + defer close(doneChan) + p.wg.Wait() + }() + + select { + case <-ctx.Done(): + return ctx.Err() + case <-doneChan: + return nil + } +} + +// ConsumeLogs processes the logs. +func (p *logDedupProcessor) ConsumeLogs(_ context.Context, pl plog.Logs) error { + p.mux.Lock() + defer p.mux.Unlock() + + for i := 0; i < pl.ResourceLogs().Len(); i++ { + resourceLogs := pl.ResourceLogs().At(i) + resourceAttrs := resourceLogs.Resource().Attributes() + resourceKey := pdatautil.MapHash(resourceAttrs) + for j := 0; j < resourceLogs.ScopeLogs().Len(); j++ { + scope := resourceLogs.ScopeLogs().At(j) + logs := scope.LogRecords() + for k := 0; k < logs.Len(); k++ { + logRecord := logs.At(k) + // Remove excluded fields if any + p.remover.RemoveFields(logRecord) + + // Add the log to the aggregator + p.aggregator.Add(resourceKey, resourceAttrs, logRecord) + } + } + } + + return nil +} + +// handleExportInterval sends metrics at the configured interval. +func (p *logDedupProcessor) handleExportInterval(ctx context.Context) { + defer p.wg.Done() + + ticker := time.NewTicker(p.emitInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + p.mux.Lock() + + logs := p.aggregator.Export() + // Only send logs if we have some + if logs.LogRecordCount() > 0 { + err := p.consumer.ConsumeLogs(ctx, logs) + if err != nil { + p.logger.Error("failed to consume logs", zap.Error(err)) + } + } + p.aggregator.Reset() + p.mux.Unlock() + } + } +} diff --git a/processor/logdeduplicationprocessor/processor_test.go b/processor/logdeduplicationprocessor/processor_test.go new file mode 100644 index 000000000000..fde7f889bfbc --- /dev/null +++ b/processor/logdeduplicationprocessor/processor_test.go @@ -0,0 +1,200 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package logdeduplicationprocessor + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.uber.org/zap" +) + +func Test_newProcessor(t *testing.T) { + testCases := []struct { + desc string + cfg *Config + expected *logDedupProcessor + expectedErr error + }{ + { + desc: "Timezone error", + cfg: &Config{ + LogCountAttribute: defaultLogCountAttribute, + Interval: defaultInterval, + Timezone: "bad timezone", + }, + expected: nil, + expectedErr: errors.New("invalid timezone"), + }, + { + desc: "valid config", + cfg: &Config{ + LogCountAttribute: defaultLogCountAttribute, + Interval: defaultInterval, + Timezone: defaultTimezone, + }, + expected: &logDedupProcessor{ + emitInterval: defaultInterval, + }, + expectedErr: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + logsSink := &consumertest.LogsSink{} + logger := zap.NewNop() + + if tc.expected != nil { + tc.expected.consumer = logsSink + tc.expected.logger = logger + } + + actual, err := newProcessor(tc.cfg, logsSink, logger) + if tc.expectedErr != nil { + require.ErrorContains(t, err, tc.expectedErr.Error()) + require.Nil(t, actual) + } else { + require.NoError(t, err) + require.Equal(t, tc.expected.emitInterval, actual.emitInterval) + require.NotNil(t, actual.aggregator) + require.NotNil(t, actual.remover) + require.Equal(t, tc.expected.consumer, actual.consumer) + require.Equal(t, tc.expected.logger, actual.logger) + } + }) + } +} + +func TestProcessorShutdownCtxError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + logsSink := &consumertest.LogsSink{} + logger := zap.NewNop() + cfg := &Config{ + LogCountAttribute: defaultLogCountAttribute, + Interval: 1 * time.Second, + Timezone: defaultTimezone, + } + + // Create a processor + p, err := newProcessor(cfg, logsSink, logger) + require.NoError(t, err) + + // We don't call p.Start as it can create a non-deterministic situation in Shutdown where we may not exit due to ctx error + + // Create empty cancel func as this is called during shutdown + p.cancel = func() {} + + // Add one to wait group to ensure shutdown blocks and the ctx error will trigger + p.wg.Add(1) + + err = p.Shutdown(ctx) + require.ErrorIs(t, err, context.Canceled) + + // Call done to ensure goroutine spawned in Shutdown doesn't leak + p.wg.Done() +} + +func TestProcessorCapabilities(t *testing.T) { + p := &logDedupProcessor{} + require.Equal(t, consumer.Capabilities{MutatesData: true}, p.Capabilities()) +} + +func TestShutdownBeforeStart(t *testing.T) { + logsSink := &consumertest.LogsSink{} + logger := zap.NewNop() + cfg := &Config{ + LogCountAttribute: defaultLogCountAttribute, + Interval: 1 * time.Second, + Timezone: defaultTimezone, + ExcludeFields: []string{ + fmt.Sprintf("%s.remove_me", attributeField), + }, + } + + // Create a processor + p, err := newProcessor(cfg, logsSink, logger) + require.NoError(t, err) + require.NotPanics(t, func() { + err := p.Shutdown(context.Background()) + require.NoError(t, err) + }) +} + +func TestProcessorConsume(t *testing.T) { + logsSink := &consumertest.LogsSink{} + logger := zap.NewNop() + cfg := &Config{ + LogCountAttribute: defaultLogCountAttribute, + Interval: 1 * time.Second, + Timezone: defaultTimezone, + ExcludeFields: []string{ + fmt.Sprintf("%s.remove_me", attributeField), + }, + } + + // Create a processor + p, err := newProcessor(cfg, logsSink, logger) + require.NoError(t, err) + + err = p.Start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err) + + // Create plog payload + logRecord1 := generateTestLogRecord(t, "Body of the log") + logRecord2 := generateTestLogRecord(t, "Body of the log") + + // Differ by timestamp and attribute to be removed + logRecord1.SetTimestamp(pcommon.NewTimestampFromTime(time.Now().Add(time.Minute))) + logRecord2.Attributes().PutBool("remove_me", false) + + logs := plog.NewLogs() + rl := logs.ResourceLogs().AppendEmpty() + rl.Resource().Attributes().PutInt("one", 1) + + sl := rl.ScopeLogs().AppendEmpty() + logRecord1.CopyTo(sl.LogRecords().AppendEmpty()) + logRecord2.CopyTo(sl.LogRecords().AppendEmpty()) + + // Consume the payload + err = p.ConsumeLogs(context.Background(), logs) + require.NoError(t, err) + + // Wait for the logs to be emitted + require.Eventually(t, func() bool { + return logsSink.LogRecordCount() > 0 + }, 3*time.Second, 200*time.Millisecond) + + allSinkLogs := logsSink.AllLogs() + require.Len(t, allSinkLogs, 1) + + consumedLogs := allSinkLogs[0] + require.Equal(t, 1, consumedLogs.LogRecordCount()) + + require.Equal(t, 1, consumedLogs.ResourceLogs().Len()) + consumedRl := consumedLogs.ResourceLogs().At(0) + require.Equal(t, 1, consumedRl.ScopeLogs().Len()) + consumedSl := consumedRl.ScopeLogs().At(0) + require.Equal(t, 1, consumedSl.LogRecords().Len()) + consumedLogRecord := consumedSl.LogRecords().At(0) + + countVal, ok := consumedLogRecord.Attributes().Get(cfg.LogCountAttribute) + require.True(t, ok) + require.Equal(t, int64(2), countVal.Int()) + + // Cleanup + err = p.Shutdown(context.Background()) + require.NoError(t, err) +} diff --git a/processor/logdeduplicationprocessor/timezone.md b/processor/logdeduplicationprocessor/timezone.md new file mode 100644 index 000000000000..370b6662012c --- /dev/null +++ b/processor/logdeduplicationprocessor/timezone.md @@ -0,0 +1,430 @@ +# Timezone + +Below is a list of acceptable values for `timezone` parameters: + +- "UTC" +- "Africa/Abidjan" +- "Africa/Accra" +- "Africa/Addis_Ababa" +- "Africa/Algiers" +- "Africa/Asmara" +- "Africa/Bamako" +- "Africa/Bangui" +- "Africa/Banjul" +- "Africa/Bissau" +- "Africa/Blantyre" +- "Africa/Brazzaville" +- "Africa/Bujumbura" +- "Africa/Cairo" +- "Africa/Casablanca" +- "Africa/Ceuta" +- "Africa/Conakry" +- "Africa/Dakar" +- "Africa/Dar_es_Salaam" +- "Africa/Djibouti" +- "Africa/Douala" +- "Africa/El_Aaiun" +- "Africa/Freetown" +- "Africa/Gaborone" +- "Africa/Harare" +- "Africa/Johannesburg" +- "Africa/Juba" +- "Africa/Kampala" +- "Africa/Khartoum" +- "Africa/Kigali" +- "Africa/Kinshasa" +- "Africa/Lagos" +- "Africa/Libreville" +- "Africa/Lome" +- "Africa/Luanda" +- "Africa/Lubumbashi" +- "Africa/Lusaka" +- "Africa/Malabo" +- "Africa/Maputo" +- "Africa/Maseru" +- "Africa/Mbabane" +- "Africa/Mogadishu" +- "Africa/Monrovia" +- "Africa/Nairobi" +- "Africa/Ndjamena" +- "Africa/Niamey" +- "Africa/Nouakchott" +- "Africa/Ouagadougou" +- "Africa/Porto-Novo" +- "Africa/Sao_Tome" +- "Africa/Tripoli" +- "Africa/Tunis" +- "Africa/Windhoek" +- "America/Adak" +- "America/Anchorage" +- "America/Anguilla" +- "America/Antigua" +- "America/Araguaina" +- "America/Argentina/Buenos_Aires" +- "America/Argentina/Catamarca" +- "America/Argentina/Cordoba" +- "America/Argentina/Jujuy" +- "America/Argentina/La_Rioja" +- "America/Argentina/Mendoza" +- "America/Argentina/Rio_Gallegos" +- "America/Argentina/Salta" +- "America/Argentina/San_Juan" +- "America/Argentina/San_Luis" +- "America/Argentina/Tucuman" +- "America/Argentina/Ushuaia" +- "America/Aruba" +- "America/Asuncion" +- "America/Atikokan" +- "America/Bahia" +- "America/Bahia_Banderas" +- "America/Barbados" +- "America/Belem" +- "America/Belize" +- "America/Blanc-Sablon" +- "America/Boa_Vista" +- "America/Bogota" +- "America/Boise" +- "America/Cambridge_Bay" +- "America/Campo_Grande" +- "America/Cancun" +- "America/Caracas" +- "America/Cayenne" +- "America/Cayman" +- "America/Chicago" +- "America/Chihuahua" +- "America/Costa_Rica" +- "America/Creston" +- "America/Cuiaba" +- "America/Curacao" +- "America/Danmarkshavn" +- "America/Dawson" +- "America/Dawson_Creek" +- "America/Denver" +- "America/Detroit" +- "America/Dominica" +- "America/Edmonton" +- "America/Eirunepe" +- "America/El_Salvador" +- "America/Fort_Nelson" +- "America/Fortaleza" +- "America/Glace_Bay" +- "America/Goose_Bay" +- "America/Grand_Turk" +- "America/Grenada" +- "America/Guadeloupe" +- "America/Guatemala" +- "America/Guayaquil" +- "America/Guyana" +- "America/Halifax" +- "America/Havana" +- "America/Hermosillo" +- "America/Indiana/Indianapolis" +- "America/Indiana/Knox" +- "America/Indiana/Marengo" +- "America/Indiana/Petersburg" +- "America/Indiana/Tell_City" +- "America/Indiana/Vevay" +- "America/Indiana/Vincennes" +- "America/Indiana/Winamac" +- "America/Inuvik" +- "America/Iqaluit" +- "America/Jamaica" +- "America/Juneau" +- "America/Kentucky/Louisville" +- "America/Kentucky/Monticello" +- "America/Kralendijk" +- "America/La_Paz" +- "America/Lima" +- "America/Los_Angeles" +- "America/Lower_Princes" +- "America/Maceio" +- "America/Managua" +- "America/Manaus" +- "America/Marigot" +- "America/Martinique" +- "America/Matamoros" +- "America/Mazatlan" +- "America/Menominee" +- "America/Merida" +- "America/Metlakatla" +- "America/Mexico_City" +- "America/Miquelon" +- "America/Moncton" +- "America/Monterrey" +- "America/Montevideo" +- "America/Montserrat" +- "America/Nassau" +- "America/New_York" +- "America/Nipigon" +- "America/Nome" +- "America/Noronha" +- "America/North_Dakota/Beulah" +- "America/North_Dakota/Center" +- "America/North_Dakota/New_Salem" +- "America/Nuuk" +- "America/Ojinaga" +- "America/Panama" +- "America/Pangnirtung" +- "America/Paramaribo" +- "America/Phoenix" +- "America/Port-au-Prince" +- "America/Port_of_Spain" +- "America/Porto_Velho" +- "America/Puerto_Rico" +- "America/Punta_Arenas" +- "America/Rainy_River" +- "America/Rankin_Inlet" +- "America/Recife" +- "America/Regina" +- "America/Resolute" +- "America/Rio_Branco" +- "America/Santarem" +- "America/Santiago" +- "America/Santo_Domingo" +- "America/Sao_Paulo" +- "America/Scoresbysund" +- "America/Sitka" +- "America/St_Barthelemy" +- "America/St_Johns" +- "America/St_Kitts" +- "America/St_Lucia" +- "America/St_Thomas" +- "America/St_Vincent" +- "America/Swift_Current" +- "America/Tegucigalpa" +- "America/Thule" +- "America/Thunder_Bay" +- "America/Tijuana" +- "America/Toronto" +- "America/Tortola" +- "America/Vancouver" +- "America/Whitehorse" +- "America/Winnipeg" +- "America/Yakutat" +- "America/Yellowknife" +- "Antarctica/Casey" +- "Antarctica/Davis" +- "Antarctica/DumontDUrville" +- "Antarctica/Macquarie" +- "Antarctica/Mawson" +- "Antarctica/McMurdo" +- "Antarctica/Palmer" +- "Antarctica/Rothera" +- "Antarctica/Syowa" +- "Antarctica/Troll" +- "Antarctica/Vostok" +- "Arctic/Longyearbyen" +- "Asia/Aden" +- "Asia/Almaty" +- "Asia/Amman" +- "Asia/Anadyr" +- "Asia/Aqtau" +- "Asia/Aqtobe" +- "Asia/Ashgabat" +- "Asia/Atyrau" +- "Asia/Baghdad" +- "Asia/Bahrain" +- "Asia/Baku" +- "Asia/Bangkok" +- "Asia/Barnaul" +- "Asia/Beirut" +- "Asia/Bishkek" +- "Asia/Brunei" +- "Asia/Chita" +- "Asia/Choibalsan" +- "Asia/Colombo" +- "Asia/Damascus" +- "Asia/Dhaka" +- "Asia/Dili" +- "Asia/Dubai" +- "Asia/Dushanbe" +- "Asia/Famagusta" +- "Asia/Gaza" +- "Asia/Hebron" +- "Asia/Ho_Chi_Minh" +- "Asia/Hong_Kong" +- "Asia/Hovd" +- "Asia/Irkutsk" +- "Asia/Jakarta" +- "Asia/Jayapura" +- "Asia/Jerusalem" +- "Asia/Kabul" +- "Asia/Kamchatka" +- "Asia/Karachi" +- "Asia/Kathmandu" +- "Asia/Khandyga" +- "Asia/Kolkata" +- "Asia/Krasnoyarsk" +- "Asia/Kuala_Lumpur" +- "Asia/Kuching" +- "Asia/Kuwait" +- "Asia/Macau" +- "Asia/Magadan" +- "Asia/Makassar" +- "Asia/Manila" +- "Asia/Muscat" +- "Asia/Nicosia" +- "Asia/Novokuznetsk" +- "Asia/Novosibirsk" +- "Asia/Omsk" +- "Asia/Oral" +- "Asia/Phnom_Penh" +- "Asia/Pontianak" +- "Asia/Pyongyang" +- "Asia/Qatar" +- "Asia/Qostanay" +- "Asia/Qyzylorda" +- "Asia/Riyadh" +- "Asia/Sakhalin" +- "Asia/Samarkand" +- "Asia/Seoul" +- "Asia/Shanghai" +- "Asia/Singapore" +- "Asia/Srednekolymsk" +- "Asia/Taipei" +- "Asia/Tashkent" +- "Asia/Tbilisi" +- "Asia/Tehran" +- "Asia/Thimphu" +- "Asia/Tokyo" +- "Asia/Tomsk" +- "Asia/Ulaanbaatar" +- "Asia/Urumqi" +- "Asia/Ust-Nera" +- "Asia/Vientiane" +- "Asia/Vladivostok" +- "Asia/Yakutsk" +- "Asia/Yangon" +- "Asia/Yekaterinburg" +- "Asia/Yerevan" +- "Atlantic/Azores" +- "Atlantic/Bermuda" +- "Atlantic/Canary" +- "Atlantic/Cape_Verde" +- "Atlantic/Faroe" +- "Atlantic/Madeira" +- "Atlantic/Reykjavik" +- "Atlantic/South_Georgia" +- "Atlantic/St_Helena" +- "Atlantic/Stanley" +- "Australia/Adelaide" +- "Australia/Brisbane" +- "Australia/Broken_Hill" +- "Australia/Currie" +- "Australia/Darwin" +- "Australia/Eucla" +- "Australia/Hobart" +- "Australia/Lindeman" +- "Australia/Lord_Howe" +- "Australia/Melbourne" +- "Australia/Perth" +- "Australia/Sydney" +- "Europe/Amsterdam" +- "Europe/Andorra" +- "Europe/Astrakhan" +- "Europe/Athens" +- "Europe/Belgrade" +- "Europe/Berlin" +- "Europe/Bratislava" +- "Europe/Brussels" +- "Europe/Bucharest" +- "Europe/Budapest" +- "Europe/Busingen" +- "Europe/Chisinau" +- "Europe/Copenhagen" +- "Europe/Dublin" +- "Europe/Gibraltar" +- "Europe/Guernsey" +- "Europe/Helsinki" +- "Europe/Isle_of_Man" +- "Europe/Istanbul" +- "Europe/Jersey" +- "Europe/Kaliningrad" +- "Europe/Kiev" +- "Europe/Kirov" +- "Europe/Lisbon" +- "Europe/Ljubljana" +- "Europe/London" +- "Europe/Luxembourg" +- "Europe/Madrid" +- "Europe/Malta" +- "Europe/Mariehamn" +- "Europe/Minsk" +- "Europe/Monaco" +- "Europe/Moscow" +- "Europe/Oslo" +- "Europe/Paris" +- "Europe/Podgorica" +- "Europe/Prague" +- "Europe/Riga" +- "Europe/Rome" +- "Europe/Samara" +- "Europe/San_Marino" +- "Europe/Sarajevo" +- "Europe/Saratov" +- "Europe/Simferopol" +- "Europe/Skopje" +- "Europe/Sofia" +- "Europe/Stockholm" +- "Europe/Tallinn" +- "Europe/Tirane" +- "Europe/Ulyanovsk" +- "Europe/Uzhgorod" +- "Europe/Vaduz" +- "Europe/Vatican" +- "Europe/Vienna" +- "Europe/Vilnius" +- "Europe/Volgograd" +- "Europe/Warsaw" +- "Europe/Zagreb" +- "Europe/Zaporozhye" +- "Europe/Zurich" +- "Indian/Antananarivo" +- "Indian/Chagos" +- "Indian/Christmas" +- "Indian/Cocos" +- "Indian/Comoro" +- "Indian/Kerguelen" +- "Indian/Mahe" +- "Indian/Maldives" +- "Indian/Mauritius" +- "Indian/Mayotte" +- "Indian/Reunion" +- "Pacific/Apia" +- "Pacific/Auckland" +- "Pacific/Bougainville" +- "Pacific/Chatham" +- "Pacific/Chuuk" +- "Pacific/Easter" +- "Pacific/Efate" +- "Pacific/Enderbury" +- "Pacific/Fakaofo" +- "Pacific/Fiji" +- "Pacific/Funafuti" +- "Pacific/Galapagos" +- "Pacific/Gambier" +- "Pacific/Guadalcanal" +- "Pacific/Guam" +- "Pacific/Honolulu" +- "Pacific/Kiritimati" +- "Pacific/Kosrae" +- "Pacific/Kwajalein" +- "Pacific/Majuro" +- "Pacific/Marquesas" +- "Pacific/Midway" +- "Pacific/Nauru" +- "Pacific/Niue" +- "Pacific/Norfolk" +- "Pacific/Noumea" +- "Pacific/Pago_Pago" +- "Pacific/Palau" +- "Pacific/Pitcairn" +- "Pacific/Pohnpei" +- "Pacific/Port_Moresby" +- "Pacific/Rarotonga" +- "Pacific/Saipan" +- "Pacific/Tahiti" +- "Pacific/Tarawa" +- "Pacific/Tongatapu" +- "Pacific/Wake" +- "Pacific/Wallis" \ No newline at end of file diff --git a/versions.yaml b/versions.yaml index eb7bb5a380ac..2ce962920448 100644 --- a/versions.yaml +++ b/versions.yaml @@ -166,6 +166,7 @@ module-sets: - github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor - github.com/open-telemetry/opentelemetry-collector-contrib/processor/intervalprocessor - github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor + - github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdeduplicationprocessor - github.com/open-telemetry/opentelemetry-collector-contrib/processor/logstransformprocessor - github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor - github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor