From 59da8d0f74267d59286857f39aaa4201bdc398b3 Mon Sep 17 00:00:00 2001 From: Adam Boguszewski Date: Tue, 11 Oct 2022 10:52:58 +0200 Subject: [PATCH] feat: add metrics for merge operations with total shard aggregation --- .chloggen/elasticsearch-total-merge.yaml | 5 + .../elasticsearchreceiver/documentation.md | 2 + .../internal/metadata/generated_metrics.go | 130 +++++++++++++++++ .../internal/model/nodestats.go | 8 +- receiver/elasticsearchreceiver/metadata.yaml | 18 +++ receiver/elasticsearchreceiver/scraper.go | 12 +- .../elasticsearchreceiver/scraper_test.go | 6 +- .../expected_metrics/clusterSkip.json | 38 +++++ .../testdata/expected_metrics/full.json | 134 ++++++++++++++++++ .../testdata/expected_metrics/noNodes.json | 38 +++++ .../testdata/integration/expected.7_16_3.json | 38 +++++ .../testdata/integration/expected.7_9_3.json | 38 +++++ .../testdata/sample_payloads/indices.json | 24 ++-- 13 files changed, 476 insertions(+), 15 deletions(-) create mode 100644 .chloggen/elasticsearch-total-merge.yaml diff --git a/.chloggen/elasticsearch-total-merge.yaml b/.chloggen/elasticsearch-total-merge.yaml new file mode 100644 index 000000000000..cf4d6c186c4d --- /dev/null +++ b/.chloggen/elasticsearch-total-merge.yaml @@ -0,0 +1,5 @@ +change_type: enhancement +component: elasticsearchreceiver +note: Add metrics related to merge operations with aggregated for all shards +issues: [14635] + diff --git a/receiver/elasticsearchreceiver/documentation.md b/receiver/elasticsearchreceiver/documentation.md index dc78c0eb8787..193b8fb23d89 100644 --- a/receiver/elasticsearchreceiver/documentation.md +++ b/receiver/elasticsearchreceiver/documentation.md @@ -23,6 +23,8 @@ These are the metrics available for this scraper. | **elasticsearch.cluster.state_update.count** | The number of cluster state update attempts that changed the cluster state since the node started. | 1 | Sum(Int) | | | **elasticsearch.cluster.state_update.time** | The cumulative amount of time updating the cluster state since the node started. | ms | Sum(Int) | | | **elasticsearch.index.operations.completed** | The number of operations completed for an index. | {operations} | Sum(Int) | | +| elasticsearch.index.operations.merge.docs_count | The total number of documents in merge operations for an index. | {documents} | Sum(Int) | | +| elasticsearch.index.operations.merge.size | The total size of merged segments for an index. | By | Sum(Int) | | | **elasticsearch.index.operations.time** | Time spent on operations for an index. | ms | Sum(Int) | | | **elasticsearch.index.shards.size** | The size of the shards assigned to this index. | By | Sum(Int) | | | **elasticsearch.indexing_pressure.memory.limit** | Configured memory limit, in bytes, for the indexing requests. | By | Gauge(Int) | | diff --git a/receiver/elasticsearchreceiver/internal/metadata/generated_metrics.go b/receiver/elasticsearchreceiver/internal/metadata/generated_metrics.go index 59aab7a56d79..51fd29418fca 100644 --- a/receiver/elasticsearchreceiver/internal/metadata/generated_metrics.go +++ b/receiver/elasticsearchreceiver/internal/metadata/generated_metrics.go @@ -52,6 +52,8 @@ type MetricsSettings struct { ElasticsearchClusterStateUpdateCount MetricSettings `mapstructure:"elasticsearch.cluster.state_update.count"` ElasticsearchClusterStateUpdateTime MetricSettings `mapstructure:"elasticsearch.cluster.state_update.time"` ElasticsearchIndexOperationsCompleted MetricSettings `mapstructure:"elasticsearch.index.operations.completed"` + ElasticsearchIndexOperationsMergeDocsCount MetricSettings `mapstructure:"elasticsearch.index.operations.merge.docs_count"` + ElasticsearchIndexOperationsMergeSize MetricSettings `mapstructure:"elasticsearch.index.operations.merge.size"` ElasticsearchIndexOperationsTime MetricSettings `mapstructure:"elasticsearch.index.operations.time"` ElasticsearchIndexShardsSize MetricSettings `mapstructure:"elasticsearch.index.shards.size"` ElasticsearchIndexingPressureMemoryLimit MetricSettings `mapstructure:"elasticsearch.indexing_pressure.memory.limit"` @@ -156,6 +158,12 @@ func DefaultMetricsSettings() MetricsSettings { ElasticsearchIndexOperationsCompleted: MetricSettings{ Enabled: true, }, + ElasticsearchIndexOperationsMergeDocsCount: MetricSettings{ + Enabled: false, + }, + ElasticsearchIndexOperationsMergeSize: MetricSettings{ + Enabled: false, + }, ElasticsearchIndexOperationsTime: MetricSettings{ Enabled: true, }, @@ -1619,6 +1627,112 @@ func newMetricElasticsearchIndexOperationsCompleted(settings MetricSettings) met return m } +type metricElasticsearchIndexOperationsMergeDocsCount struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills elasticsearch.index.operations.merge.docs_count metric with initial data. +func (m *metricElasticsearchIndexOperationsMergeDocsCount) init() { + m.data.SetName("elasticsearch.index.operations.merge.docs_count") + m.data.SetDescription("The total number of documents in merge operations for an index.") + m.data.SetUnit("{documents}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricElasticsearchIndexOperationsMergeDocsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("aggregation", indexAggregationTypeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricElasticsearchIndexOperationsMergeDocsCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricElasticsearchIndexOperationsMergeDocsCount) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricElasticsearchIndexOperationsMergeDocsCount(settings MetricSettings) metricElasticsearchIndexOperationsMergeDocsCount { + m := metricElasticsearchIndexOperationsMergeDocsCount{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricElasticsearchIndexOperationsMergeSize struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills elasticsearch.index.operations.merge.size metric with initial data. +func (m *metricElasticsearchIndexOperationsMergeSize) init() { + m.data.SetName("elasticsearch.index.operations.merge.size") + m.data.SetDescription("The total size of merged segments for an index.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricElasticsearchIndexOperationsMergeSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("aggregation", indexAggregationTypeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricElasticsearchIndexOperationsMergeSize) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricElasticsearchIndexOperationsMergeSize) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricElasticsearchIndexOperationsMergeSize(settings MetricSettings) metricElasticsearchIndexOperationsMergeSize { + m := metricElasticsearchIndexOperationsMergeSize{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricElasticsearchIndexOperationsTime struct { data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. @@ -4462,6 +4576,8 @@ type MetricsBuilder struct { metricElasticsearchClusterStateUpdateCount metricElasticsearchClusterStateUpdateCount metricElasticsearchClusterStateUpdateTime metricElasticsearchClusterStateUpdateTime metricElasticsearchIndexOperationsCompleted metricElasticsearchIndexOperationsCompleted + metricElasticsearchIndexOperationsMergeDocsCount metricElasticsearchIndexOperationsMergeDocsCount + metricElasticsearchIndexOperationsMergeSize metricElasticsearchIndexOperationsMergeSize metricElasticsearchIndexOperationsTime metricElasticsearchIndexOperationsTime metricElasticsearchIndexShardsSize metricElasticsearchIndexShardsSize metricElasticsearchIndexingPressureMemoryLimit metricElasticsearchIndexingPressureMemoryLimit @@ -4549,6 +4665,8 @@ func NewMetricsBuilder(settings MetricsSettings, buildInfo component.BuildInfo, metricElasticsearchClusterStateUpdateCount: newMetricElasticsearchClusterStateUpdateCount(settings.ElasticsearchClusterStateUpdateCount), metricElasticsearchClusterStateUpdateTime: newMetricElasticsearchClusterStateUpdateTime(settings.ElasticsearchClusterStateUpdateTime), metricElasticsearchIndexOperationsCompleted: newMetricElasticsearchIndexOperationsCompleted(settings.ElasticsearchIndexOperationsCompleted), + metricElasticsearchIndexOperationsMergeDocsCount: newMetricElasticsearchIndexOperationsMergeDocsCount(settings.ElasticsearchIndexOperationsMergeDocsCount), + metricElasticsearchIndexOperationsMergeSize: newMetricElasticsearchIndexOperationsMergeSize(settings.ElasticsearchIndexOperationsMergeSize), metricElasticsearchIndexOperationsTime: newMetricElasticsearchIndexOperationsTime(settings.ElasticsearchIndexOperationsTime), metricElasticsearchIndexShardsSize: newMetricElasticsearchIndexShardsSize(settings.ElasticsearchIndexShardsSize), metricElasticsearchIndexingPressureMemoryLimit: newMetricElasticsearchIndexingPressureMemoryLimit(settings.ElasticsearchIndexingPressureMemoryLimit), @@ -4692,6 +4810,8 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricElasticsearchClusterStateUpdateCount.emit(ils.Metrics()) mb.metricElasticsearchClusterStateUpdateTime.emit(ils.Metrics()) mb.metricElasticsearchIndexOperationsCompleted.emit(ils.Metrics()) + mb.metricElasticsearchIndexOperationsMergeDocsCount.emit(ils.Metrics()) + mb.metricElasticsearchIndexOperationsMergeSize.emit(ils.Metrics()) mb.metricElasticsearchIndexOperationsTime.emit(ils.Metrics()) mb.metricElasticsearchIndexShardsSize.emit(ils.Metrics()) mb.metricElasticsearchIndexingPressureMemoryLimit.emit(ils.Metrics()) @@ -4841,6 +4961,16 @@ func (mb *MetricsBuilder) RecordElasticsearchIndexOperationsCompletedDataPoint(t mb.metricElasticsearchIndexOperationsCompleted.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String(), indexAggregationTypeAttributeValue.String()) } +// RecordElasticsearchIndexOperationsMergeDocsCountDataPoint adds a data point to elasticsearch.index.operations.merge.docs_count metric. +func (mb *MetricsBuilder) RecordElasticsearchIndexOperationsMergeDocsCountDataPoint(ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue AttributeIndexAggregationType) { + mb.metricElasticsearchIndexOperationsMergeDocsCount.recordDataPoint(mb.startTime, ts, val, indexAggregationTypeAttributeValue.String()) +} + +// RecordElasticsearchIndexOperationsMergeSizeDataPoint adds a data point to elasticsearch.index.operations.merge.size metric. +func (mb *MetricsBuilder) RecordElasticsearchIndexOperationsMergeSizeDataPoint(ts pcommon.Timestamp, val int64, indexAggregationTypeAttributeValue AttributeIndexAggregationType) { + mb.metricElasticsearchIndexOperationsMergeSize.recordDataPoint(mb.startTime, ts, val, indexAggregationTypeAttributeValue.String()) +} + // RecordElasticsearchIndexOperationsTimeDataPoint adds a data point to elasticsearch.index.operations.time metric. func (mb *MetricsBuilder) RecordElasticsearchIndexOperationsTimeDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation, indexAggregationTypeAttributeValue AttributeIndexAggregationType) { mb.metricElasticsearchIndexOperationsTime.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String(), indexAggregationTypeAttributeValue.String()) diff --git a/receiver/elasticsearchreceiver/internal/model/nodestats.go b/receiver/elasticsearchreceiver/internal/model/nodestats.go index fb0a3bb7f719..59502425a220 100644 --- a/receiver/elasticsearchreceiver/internal/model/nodestats.go +++ b/receiver/elasticsearchreceiver/internal/model/nodestats.go @@ -169,7 +169,7 @@ type NodeStatsNodesInfoIndices struct { IndexingOperations IndexingOperations `json:"indexing"` GetOperation GetOperation `json:"get"` SearchOperations SearchOperations `json:"search"` - MergeOperations BasicIndexOperation `json:"merges"` + MergeOperations MergeOperations `json:"merges"` RefreshOperations BasicIndexOperation `json:"refresh"` FlushOperations BasicIndexOperation `json:"flush"` WarmerOperations BasicIndexOperation `json:"warmer"` @@ -195,6 +195,12 @@ type BasicIndexOperation struct { TotalTimeInMs int64 `json:"total_time_in_millis"` } +type MergeOperations struct { + BasicIndexOperation + TotalSizeInBytes int64 `json:"total_size_in_bytes"` + TotalDocs int64 `json:"total_docs"` +} + type IndexingOperations struct { IndexTotal int64 `json:"index_total"` IndexTimeInMs int64 `json:"index_time_in_millis"` diff --git a/receiver/elasticsearchreceiver/metadata.yaml b/receiver/elasticsearchreceiver/metadata.yaml index ee0cb3ab3595..d842183caee6 100644 --- a/receiver/elasticsearchreceiver/metadata.yaml +++ b/receiver/elasticsearchreceiver/metadata.yaml @@ -767,3 +767,21 @@ metrics: value_type: int attributes: [index_aggregation_type] enabled: true + elasticsearch.index.operations.merge.size: + description: The total size of merged segments for an index. + unit: By + sum: + monotonic: true + aggregation: cumulative + value_type: int + attributes: [index_aggregation_type] + enabled: false + elasticsearch.index.operations.merge.docs_count: + description: The total number of documents in merge operations for an index. + unit: "{documents}" + sum: + monotonic: true + aggregation: cumulative + value_type: int + attributes: [index_aggregation_type] + enabled: false diff --git a/receiver/elasticsearchreceiver/scraper.go b/receiver/elasticsearchreceiver/scraper.go index 0f9c99e9e133..71c46ca2f824 100644 --- a/receiver/elasticsearchreceiver/scraper.go +++ b/receiver/elasticsearchreceiver/scraper.go @@ -333,7 +333,7 @@ func (r *elasticsearchScraper) scrapeIndicesMetrics(ctx context.Context, now pco indexStats, err := r.client.IndexStats(ctx, r.cfg.Indices) if err != nil { - errs.AddPartial(4, err) + errs.AddPartial(8, err) return } @@ -359,6 +359,16 @@ func (r *elasticsearchScraper) scrapeOneIndexMetrics(now pcommon.Timestamp, name r.mb.RecordElasticsearchIndexOperationsTimeDataPoint( now, stats.Total.SearchOperations.QueryTimeInMs, metadata.AttributeOperationQuery, metadata.AttributeIndexAggregationTypeTotal, ) + r.mb.RecordElasticsearchIndexOperationsTimeDataPoint( + now, stats.Total.MergeOperations.TotalTimeInMs, metadata.AttributeOperationMerge, metadata.AttributeIndexAggregationTypeTotal, + ) + + r.mb.RecordElasticsearchIndexOperationsMergeSizeDataPoint( + now, stats.Total.MergeOperations.TotalSizeInBytes, metadata.AttributeIndexAggregationTypeTotal, + ) + r.mb.RecordElasticsearchIndexOperationsMergeDocsCountDataPoint( + now, stats.Total.MergeOperations.TotalDocs, metadata.AttributeIndexAggregationTypeTotal, + ) r.mb.RecordElasticsearchIndexShardsSizeDataPoint( now, stats.Total.StoreInfo.SizeInBy, metadata.AttributeIndexAggregationTypeTotal, diff --git a/receiver/elasticsearchreceiver/scraper_test.go b/receiver/elasticsearchreceiver/scraper_test.go index 6310d6596de7..e749847a7ade 100644 --- a/receiver/elasticsearchreceiver/scraper_test.go +++ b/receiver/elasticsearchreceiver/scraper_test.go @@ -41,7 +41,11 @@ const noNodesExpectedMetricsPath = "./testdata/expected_metrics/noNodes.json" func TestScraper(t *testing.T) { t.Parallel() - sc := newElasticSearchScraper(componenttest.NewNopReceiverCreateSettings(), createDefaultConfig().(*Config)) + config := createDefaultConfig().(*Config) + config.Metrics.ElasticsearchIndexOperationsMergeSize.Enabled = true + config.Metrics.ElasticsearchIndexOperationsMergeDocsCount.Enabled = true + + sc := newElasticSearchScraper(componenttest.NewNopReceiverCreateSettings(), config) err := sc.start(context.Background(), componenttest.NewNopHost()) require.NoError(t, err) diff --git a/receiver/elasticsearchreceiver/testdata/expected_metrics/clusterSkip.json b/receiver/elasticsearchreceiver/testdata/expected_metrics/clusterSkip.json index 822faa67e16c..430fa9c82e46 100644 --- a/receiver/elasticsearchreceiver/testdata/expected_metrics/clusterSkip.json +++ b/receiver/elasticsearchreceiver/testdata/expected_metrics/clusterSkip.json @@ -2297,6 +2297,25 @@ ], "startTimeUnixNano": "1661811689941624000", "timeUnixNano": "1661811689943245000" + }, + { + "asInt": "12", + "attributes": [ + { + "key": "operation", + "value": { + "stringValue": "merge" + } + }, + { + "key": "aggregation", + "value": { + "stringValue": "total" + } + } + ], + "startTimeUnixNano": "1661811689941624000", + "timeUnixNano": "1661811689943245000" } ] }, @@ -2447,6 +2466,25 @@ ], "startTimeUnixNano": "1661811689941624000", "timeUnixNano": "1661811689943245000" + }, + { + "asInt": "12", + "attributes": [ + { + "key": "operation", + "value": { + "stringValue": "merge" + } + }, + { + "key": "aggregation", + "value": { + "stringValue": "total" + } + } + ], + "startTimeUnixNano": "1661811689941624000", + "timeUnixNano": "1661811689943245000" } ] }, diff --git a/receiver/elasticsearchreceiver/testdata/expected_metrics/full.json b/receiver/elasticsearchreceiver/testdata/expected_metrics/full.json index be5a6330805e..777be54ae69c 100644 --- a/receiver/elasticsearchreceiver/testdata/expected_metrics/full.json +++ b/receiver/elasticsearchreceiver/testdata/expected_metrics/full.json @@ -2490,6 +2490,25 @@ ], "startTimeUnixNano": "1661811689941624000", "timeUnixNano": "1661811689943245000" + }, + { + "asInt": "12", + "attributes": [ + { + "key": "operation", + "value": { + "stringValue": "merge" + } + }, + { + "key": "aggregation", + "value": { + "stringValue": "total" + } + } + ], + "startTimeUnixNano": "1661811689941624000", + "timeUnixNano": "1661811689943245000" } ] }, @@ -2518,6 +2537,54 @@ ] }, "unit": "By" + }, + { + "description": "The total size of merged segments for an index.", + "name": "elasticsearch.index.operations.merge.size", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "isMonotonic": true, + "dataPoints": [ + { + "asInt": "64", + "attributes": [ + { + "key": "aggregation", + "value": { + "stringValue": "total" + } + } + ], + "startTimeUnixNano": "1661811689941624000", + "timeUnixNano": "1661811689943245000" + } + ] + }, + "unit": "By" + }, + { + "description": "The total number of documents in merge operations for an index.", + "name": "elasticsearch.index.operations.merge.docs_count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "isMonotonic": true, + "dataPoints": [ + { + "asInt": "5", + "attributes": [ + { + "key": "aggregation", + "value": { + "stringValue": "total" + } + } + ], + "startTimeUnixNano": "1661811689941624000", + "timeUnixNano": "1661811689943245000" + } + ] + }, + "unit": "{documents}" } ], "scope": { @@ -2640,6 +2707,25 @@ ], "startTimeUnixNano": "1661811689941624000", "timeUnixNano": "1661811689943245000" + }, + { + "asInt": "12", + "attributes": [ + { + "key": "operation", + "value": { + "stringValue": "merge" + } + }, + { + "key": "aggregation", + "value": { + "stringValue": "total" + } + } + ], + "startTimeUnixNano": "1661811689941624000", + "timeUnixNano": "1661811689943245000" } ] }, @@ -2668,6 +2754,54 @@ ] }, "unit": "By" + }, + { + "description": "The total size of merged segments for an index.", + "name": "elasticsearch.index.operations.merge.size", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "isMonotonic": true, + "dataPoints": [ + { + "asInt": "64", + "attributes": [ + { + "key": "aggregation", + "value": { + "stringValue": "total" + } + } + ], + "startTimeUnixNano": "1661811689941624000", + "timeUnixNano": "1661811689943245000" + } + ] + }, + "unit": "By" + }, + { + "description": "The total number of documents in merge operations for an index.", + "name": "elasticsearch.index.operations.merge.docs_count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "isMonotonic": true, + "dataPoints": [ + { + "asInt": "5", + "attributes": [ + { + "key": "aggregation", + "value": { + "stringValue": "total" + } + } + ], + "startTimeUnixNano": "1661811689941624000", + "timeUnixNano": "1661811689943245000" + } + ] + }, + "unit": "{documents}" } ], "scope": { diff --git a/receiver/elasticsearchreceiver/testdata/expected_metrics/noNodes.json b/receiver/elasticsearchreceiver/testdata/expected_metrics/noNodes.json index 83f3927d67d2..7efecefdfa2e 100644 --- a/receiver/elasticsearchreceiver/testdata/expected_metrics/noNodes.json +++ b/receiver/elasticsearchreceiver/testdata/expected_metrics/noNodes.json @@ -306,6 +306,25 @@ ], "startTimeUnixNano": "1661811689941624000", "timeUnixNano": "1661811689943245000" + }, + { + "asInt": "12", + "attributes": [ + { + "key": "operation", + "value": { + "stringValue": "merge" + } + }, + { + "key": "aggregation", + "value": { + "stringValue": "total" + } + } + ], + "startTimeUnixNano": "1661811689941624000", + "timeUnixNano": "1661811689943245000" } ] }, @@ -456,6 +475,25 @@ ], "startTimeUnixNano": "1661811689941624000", "timeUnixNano": "1661811689943245000" + }, + { + "asInt": "12", + "attributes": [ + { + "key": "operation", + "value": { + "stringValue": "merge" + } + }, + { + "key": "aggregation", + "value": { + "stringValue": "total" + } + } + ], + "startTimeUnixNano": "1661811689941624000", + "timeUnixNano": "1661811689943245000" } ] }, diff --git a/receiver/elasticsearchreceiver/testdata/integration/expected.7_16_3.json b/receiver/elasticsearchreceiver/testdata/integration/expected.7_16_3.json index 142b4bf9d790..7ffa85407959 100644 --- a/receiver/elasticsearchreceiver/testdata/integration/expected.7_16_3.json +++ b/receiver/elasticsearchreceiver/testdata/integration/expected.7_16_3.json @@ -5400,6 +5400,25 @@ ], "startTimeUnixNano": "1661811689941624000", "timeUnixNano": "1661811689943245000" + }, + { + "asInt": "12", + "attributes": [ + { + "key": "operation", + "value": { + "stringValue": "merge" + } + }, + { + "key": "aggregation", + "value": { + "stringValue": "total" + } + } + ], + "startTimeUnixNano": "1661811689941624000", + "timeUnixNano": "1661811689943245000" } ] }, @@ -5550,6 +5569,25 @@ ], "startTimeUnixNano": "1661811689941624000", "timeUnixNano": "1661811689943245000" + }, + { + "asInt": "12", + "attributes": [ + { + "key": "operation", + "value": { + "stringValue": "merge" + } + }, + { + "key": "aggregation", + "value": { + "stringValue": "total" + } + } + ], + "startTimeUnixNano": "1661811689941624000", + "timeUnixNano": "1661811689943245000" } ] }, diff --git a/receiver/elasticsearchreceiver/testdata/integration/expected.7_9_3.json b/receiver/elasticsearchreceiver/testdata/integration/expected.7_9_3.json index 1035c0906445..a35a22ee1de9 100644 --- a/receiver/elasticsearchreceiver/testdata/integration/expected.7_9_3.json +++ b/receiver/elasticsearchreceiver/testdata/integration/expected.7_9_3.json @@ -4181,6 +4181,25 @@ ], "startTimeUnixNano": "1661811689941624000", "timeUnixNano": "1661811689943245000" + }, + { + "asInt": "12", + "attributes": [ + { + "key": "operation", + "value": { + "stringValue": "merge" + } + }, + { + "key": "aggregation", + "value": { + "stringValue": "total" + } + } + ], + "startTimeUnixNano": "1661811689941624000", + "timeUnixNano": "1661811689943245000" } ] }, @@ -4331,6 +4350,25 @@ ], "startTimeUnixNano": "1661811689941624000", "timeUnixNano": "1661811689943245000" + }, + { + "asInt": "12", + "attributes": [ + { + "key": "operation", + "value": { + "stringValue": "merge" + } + }, + { + "key": "aggregation", + "value": { + "stringValue": "total" + } + } + ], + "startTimeUnixNano": "1661811689941624000", + "timeUnixNano": "1661811689943245000" } ] }, diff --git a/receiver/elasticsearchreceiver/testdata/sample_payloads/indices.json b/receiver/elasticsearchreceiver/testdata/sample_payloads/indices.json index e913fb4b8971..196c40585c0d 100644 --- a/receiver/elasticsearchreceiver/testdata/sample_payloads/indices.json +++ b/receiver/elasticsearchreceiver/testdata/sample_payloads/indices.json @@ -59,9 +59,9 @@ "current_docs" : 0, "current_size_in_bytes" : 0, "total" : 0, - "total_time_in_millis" : 0, - "total_docs" : 0, - "total_size_in_bytes" : 0, + "total_time_in_millis" : 12, + "total_docs" : 5, + "total_size_in_bytes" : 64, "total_stopped_time_in_millis" : 0, "total_throttled_time_in_millis" : 0, "total_auto_throttle_in_bytes" : 20971520 @@ -187,9 +187,9 @@ "current_docs" : 0, "current_size_in_bytes" : 0, "total" : 0, - "total_time_in_millis" : 0, - "total_docs" : 0, - "total_size_in_bytes" : 0, + "total_time_in_millis" : 12, + "total_docs" : 5, + "total_size_in_bytes" : 64, "total_stopped_time_in_millis" : 0, "total_throttled_time_in_millis" : 0, "total_auto_throttle_in_bytes" : 20971520 @@ -319,9 +319,9 @@ "current_docs" : 0, "current_size_in_bytes" : 0, "total" : 0, - "total_time_in_millis" : 0, - "total_docs" : 0, - "total_size_in_bytes" : 0, + "total_time_in_millis" : 12, + "total_docs" : 5, + "total_size_in_bytes" : 64, "total_stopped_time_in_millis" : 0, "total_throttled_time_in_millis" : 0, "total_auto_throttle_in_bytes" : 20971520 @@ -447,9 +447,9 @@ "current_docs" : 0, "current_size_in_bytes" : 0, "total" : 0, - "total_time_in_millis" : 0, - "total_docs" : 0, - "total_size_in_bytes" : 0, + "total_time_in_millis" : 12, + "total_docs" : 5, + "total_size_in_bytes" : 64, "total_stopped_time_in_millis" : 0, "total_throttled_time_in_millis" : 0, "total_auto_throttle_in_bytes" : 20971520