From 7eeb1bc9d30b90e091991d31fab35ce8f03e2efc Mon Sep 17 00:00:00 2001 From: SuperQ Date: Tue, 17 Jan 2023 12:04:45 +0100 Subject: [PATCH] Replace group with resolution in compact metrics. Compaction metrics have too high a cardinality, causing metric bloat on large installations. The group information is better suited to logs. * Replace with a `resolution` label to the compaction counters. Fixes: https://github.com/thanos-io/thanos/issues/5841 Signed-off-by: SuperQ --- CHANGELOG.md | 1 + examples/dashboards/compact.json | 12 +-- examples/dashboards/overview.json | 2 +- mixin/dashboards/compact.libsonnet | 14 ++-- pkg/block/metadata/meta.go | 7 +- pkg/compact/compact.go | 67 ++++++++-------- pkg/compact/compact_e2e_test.go | 32 +++----- pkg/compact/compact_test.go | 119 +++++++---------------------- 8 files changed, 93 insertions(+), 161 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b84e3f867..d55592284c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#6399](https://github.com/thanos-io/thanos/pull/6399) *: Fix double-counting bug in http_request_duration metric ### Changed +- [#6049](https://github.com/thanos-io/thanos/pull/6049) Compact: *breaking :warning:* Replace group with resolution in compact metrics to avoid cardinality explosion on compact metrics for large numbers of groups. - [#6168](https://github.com/thanos-io/thanos/pull/6168) Receiver: Make ketama hashring fail early when configured with number of nodes lower than the replication factor. - [#6201](https://github.com/thanos-io/thanos/pull/6201) Query-Frontend: Disable absent and absent_over_time for vertical sharding. - [#6212](https://github.com/thanos-io/thanos/pull/6212) Query-Frontend: Disable scalar for vertical sharding. diff --git a/examples/dashboards/compact.json b/examples/dashboards/compact.json index 8fecc5a2c2..b0f53f004a 100644 --- a/examples/dashboards/compact.json +++ b/examples/dashboards/compact.json @@ -19,7 +19,7 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", - "description": "Shows rate of execution for compactions against blocks that are stored in the bucket by compaction group.", + "description": "Shows rate of execution for compactions against blocks that are stored in the bucket by compaction resolution.", "fill": 10, "id": 1, "legend": { @@ -46,10 +46,10 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, group) (rate(thanos_compact_group_compactions_total{job=~\"$job\"}[$__rate_interval]))", + "expr": "sum by (job, resolution) (rate(thanos_compact_group_compactions_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "compaction {{job}} {{group}}", + "legendFormat": "compaction {{job}} {{resolution}}", "legendLink": null, "step": 10 } @@ -186,7 +186,7 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", - "description": "Shows rate of execution for downsampling against blocks that are stored in the bucket by compaction group.", + "description": "Shows rate of execution for downsampling against blocks that are stored in the bucket by compaction resolution.", "fill": 10, "id": 3, "legend": { @@ -213,10 +213,10 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (job, group) (rate(thanos_compact_downsample_total{job=~\"$job\"}[$__rate_interval]))", + "expr": "sum by (job, resolution) (rate(thanos_compact_downsample_total{job=~\"$job\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "downsample {{job}} {{group}}", + "legendFormat": "downsample {{job}} {{resolution}}", "legendLink": null, "step": 10 } diff --git a/examples/dashboards/overview.json b/examples/dashboards/overview.json index 29bd665535..d22b570ec3 100644 --- a/examples/dashboards/overview.json +++ b/examples/dashboards/overview.json @@ -1928,7 +1928,7 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", - "description": "Shows rate of execution for compactions against blocks that are stored in the bucket by compaction group.", + "description": "Shows rate of execution for compactions against blocks that are stored in the bucket.", "fill": 10, "id": 19, "legend": { diff --git a/mixin/dashboards/compact.libsonnet b/mixin/dashboards/compact.libsonnet index 031633d467..9018e360d1 100644 --- a/mixin/dashboards/compact.libsonnet +++ b/mixin/dashboards/compact.libsonnet @@ -18,11 +18,11 @@ local g = import '../lib/thanos-grafana-builder/builder.libsonnet'; .addPanel( g.panel( 'Rate', - 'Shows rate of execution for compactions against blocks that are stored in the bucket by compaction group.' + 'Shows rate of execution for compactions against blocks that are stored in the bucket by compaction resolution.' ) + g.queryPanel( - 'sum by (%(dimensions)s, group) (rate(thanos_compact_group_compactions_total{%(selector)s}[$__rate_interval]))' % thanos.compact.dashboard, - 'compaction {{job}} {{group}}' + 'sum by (%(dimensions)s, resolution) (rate(thanos_compact_group_compactions_total{%(selector)s}[$__rate_interval]))' % thanos.compact.dashboard, + 'compaction {{job}} {{resolution}}' ) + g.stack ) @@ -43,11 +43,11 @@ local g = import '../lib/thanos-grafana-builder/builder.libsonnet'; .addPanel( g.panel( 'Rate', - 'Shows rate of execution for downsampling against blocks that are stored in the bucket by compaction group.' + 'Shows rate of execution for downsampling against blocks that are stored in the bucket by compaction resolution.' ) + g.queryPanel( - 'sum by (%(dimensions)s, group) (rate(thanos_compact_downsample_total{%(selector)s}[$__rate_interval]))' % thanos.compact.dashboard, - 'downsample {{job}} {{group}}' + 'sum by (%(dimensions)s, resolution) (rate(thanos_compact_downsample_total{%(selector)s}[$__rate_interval]))' % thanos.compact.dashboard, + 'downsample {{job}} {{resolution}}' ) + g.stack ) @@ -178,7 +178,7 @@ local g = import '../lib/thanos-grafana-builder/builder.libsonnet'; .addPanel( g.panel( 'Compaction Rate', - 'Shows rate of execution for compactions against blocks that are stored in the bucket by compaction group.' + 'Shows rate of execution for compactions against blocks that are stored in the bucket.' ) + g.queryPanel( 'sum by (%(dimensions)s) (rate(thanos_compact_group_compactions_total{%(selector)s}[$__rate_interval]))' % thanos.dashboard.overview, diff --git a/pkg/block/metadata/meta.go b/pkg/block/metadata/meta.go index 787a03c241..ae3ba188e6 100644 --- a/pkg/block/metadata/meta.go +++ b/pkg/block/metadata/meta.go @@ -151,12 +151,17 @@ func InjectThanos(logger log.Logger, bdir string, meta Thanos, downsampledMeta * return newMeta, nil } -// Returns a unique identifier for the compaction group the block belongs to. +// GroupKey returns a unique identifier for the compaction group the block belongs to. // It considers the downsampling resolution and the block's labels. func (m *Thanos) GroupKey() string { return fmt.Sprintf("%d@%v", m.Downsample.Resolution, labels.FromMap(m.Labels).Hash()) } +// ResolutionString returns a the block's resolution as a string. +func (m *Thanos) ResolutionString() string { + return fmt.Sprintf("%d", m.Downsample.Resolution) +} + // WriteToDir writes the encoded meta into /meta.json. func (m Meta) WriteToDir(logger log.Logger, dir string) error { // Make any changes to the file appear atomic. diff --git a/pkg/compact/compact.go b/pkg/compact/compact.go index 585d5d6b4d..84e078728e 100644 --- a/pkg/compact/compact.go +++ b/pkg/compact/compact.go @@ -256,23 +256,23 @@ func NewDefaultGrouper( compactions: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_compact_group_compactions_total", Help: "Total number of group compaction attempts that resulted in a new block.", - }, []string{"group"}), + }, []string{"resolution"}), compactionRunsStarted: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_compact_group_compaction_runs_started_total", Help: "Total number of group compaction attempts.", - }, []string{"group"}), + }, []string{"resolution"}), compactionRunsCompleted: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_compact_group_compaction_runs_completed_total", Help: "Total number of group completed compaction runs. This also includes compactor group runs that resulted with no compaction.", - }, []string{"group"}), + }, []string{"resolution"}), compactionFailures: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_compact_group_compactions_failures_total", Help: "Total number of failed group compactions.", - }, []string{"group"}), + }, []string{"resolution"}), verticalCompactions: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_compact_group_vertical_compactions_total", Help: "Total number of group compaction attempts that resulted in a new block based on overlapping blocks.", - }, []string{"group"}), + }, []string{"resolution"}), blocksMarkedForNoCompact: blocksMarkedForNoCompact, garbageCollectedBlocks: garbageCollectedBlocks, blocksMarkedForDeletion: blocksMarkedForDeletion, @@ -291,19 +291,20 @@ func (g *DefaultGrouper) Groups(blocks map[ulid.ULID]*metadata.Meta) (res []*Gro group, ok := groups[groupKey] if !ok { lbls := labels.FromMap(m.Thanos.Labels) + resolutionLabel := m.Thanos.ResolutionString() group, err = NewGroup( - log.With(g.logger, "group", fmt.Sprintf("%d@%v", m.Thanos.Downsample.Resolution, lbls.String()), "groupKey", groupKey), + log.With(g.logger, "group", fmt.Sprintf("%s@%v", resolutionLabel, lbls.String()), "groupKey", groupKey), g.bkt, groupKey, lbls, m.Thanos.Downsample.Resolution, g.acceptMalformedIndex, g.enableVerticalCompaction, - g.compactions.WithLabelValues(groupKey), - g.compactionRunsStarted.WithLabelValues(groupKey), - g.compactionRunsCompleted.WithLabelValues(groupKey), - g.compactionFailures.WithLabelValues(groupKey), - g.verticalCompactions.WithLabelValues(groupKey), + g.compactions.WithLabelValues(resolutionLabel), + g.compactionRunsStarted.WithLabelValues(resolutionLabel), + g.compactionRunsCompleted.WithLabelValues(resolutionLabel), + g.compactionFailures.WithLabelValues(resolutionLabel), + g.verticalCompactions.WithLabelValues(resolutionLabel), g.garbageCollectedBlocks, g.blocksMarkedForDeletion, g.blocksMarkedForNoCompact, @@ -492,8 +493,8 @@ func (cg *Group) Resolution() int64 { // CompactProgressMetrics contains Prometheus metrics related to compaction progress. type CompactProgressMetrics struct { - NumberOfCompactionRuns *prometheus.GaugeVec - NumberOfCompactionBlocks *prometheus.GaugeVec + NumberOfCompactionRuns prometheus.Gauge + NumberOfCompactionBlocks prometheus.Gauge } // ProgressCalculator calculates the progress of the compaction process for a given slice of Groups. @@ -512,14 +513,14 @@ func NewCompactionProgressCalculator(reg prometheus.Registerer, planner *tsdbBas return &CompactionProgressCalculator{ planner: planner, CompactProgressMetrics: &CompactProgressMetrics{ - NumberOfCompactionRuns: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + NumberOfCompactionRuns: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ Name: "thanos_compact_todo_compactions", Help: "number of compactions to be done", - }, []string{"group"}), - NumberOfCompactionBlocks: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + }), + NumberOfCompactionBlocks: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ Name: "thanos_compact_todo_compaction_blocks", Help: "number of blocks planned to be compacted", - }, []string{"group"}), + }), }, } } @@ -568,12 +569,12 @@ func (ps *CompactionProgressCalculator) ProgressCalculate(ctx context.Context, g groups = tmpGroups } - ps.CompactProgressMetrics.NumberOfCompactionRuns.Reset() - ps.CompactProgressMetrics.NumberOfCompactionBlocks.Reset() + ps.CompactProgressMetrics.NumberOfCompactionRuns.Set(0) + ps.CompactProgressMetrics.NumberOfCompactionBlocks.Set(0) for key, iters := range groupCompactions { - ps.CompactProgressMetrics.NumberOfCompactionRuns.WithLabelValues(key).Add(float64(iters)) - ps.CompactProgressMetrics.NumberOfCompactionBlocks.WithLabelValues(key).Add(float64(groupBlocks[key])) + ps.CompactProgressMetrics.NumberOfCompactionRuns.Add(float64(iters)) + ps.CompactProgressMetrics.NumberOfCompactionBlocks.Add(float64(groupBlocks[key])) } return nil @@ -581,7 +582,7 @@ func (ps *CompactionProgressCalculator) ProgressCalculate(ctx context.Context, g // DownsampleProgressMetrics contains Prometheus metrics related to downsampling progress. type DownsampleProgressMetrics struct { - NumberOfBlocksDownsampled *prometheus.GaugeVec + NumberOfBlocksDownsampled prometheus.Gauge } // DownsampleProgressCalculator contains DownsampleMetrics, which are updated during the downsampling simulation process. @@ -593,10 +594,10 @@ type DownsampleProgressCalculator struct { func NewDownsampleProgressCalculator(reg prometheus.Registerer) *DownsampleProgressCalculator { return &DownsampleProgressCalculator{ DownsampleProgressMetrics: &DownsampleProgressMetrics{ - NumberOfBlocksDownsampled: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + NumberOfBlocksDownsampled: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ Name: "thanos_compact_todo_downsample_blocks", Help: "number of blocks to be downsampled", - }, []string{"group"}), + }), }, } } @@ -666,9 +667,9 @@ func (ds *DownsampleProgressCalculator) ProgressCalculate(ctx context.Context, g } } - ds.DownsampleProgressMetrics.NumberOfBlocksDownsampled.Reset() - for key, blocks := range groupBlocks { - ds.DownsampleProgressMetrics.NumberOfBlocksDownsampled.WithLabelValues(key).Add(float64(blocks)) + ds.DownsampleProgressMetrics.NumberOfBlocksDownsampled.Set(0) + for _, blocks := range groupBlocks { + ds.DownsampleProgressMetrics.NumberOfBlocksDownsampled.Add(float64(blocks)) } return nil @@ -676,7 +677,7 @@ func (ds *DownsampleProgressCalculator) ProgressCalculate(ctx context.Context, g // RetentionProgressMetrics contains Prometheus metrics related to retention progress. type RetentionProgressMetrics struct { - NumberOfBlocksToDelete *prometheus.GaugeVec + NumberOfBlocksToDelete prometheus.Gauge } // RetentionProgressCalculator contains RetentionProgressMetrics, which are updated during the retention simulation process. @@ -690,10 +691,10 @@ func NewRetentionProgressCalculator(reg prometheus.Registerer, retentionByResolu return &RetentionProgressCalculator{ retentionByResolution: retentionByResolution, RetentionProgressMetrics: &RetentionProgressMetrics{ - NumberOfBlocksToDelete: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + NumberOfBlocksToDelete: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ Name: "thanos_compact_todo_deletion_blocks", Help: "number of blocks that have crossed their retention period", - }, []string{"group"}), + }), }, } } @@ -715,9 +716,9 @@ func (rs *RetentionProgressCalculator) ProgressCalculate(ctx context.Context, gr } } - rs.RetentionProgressMetrics.NumberOfBlocksToDelete.Reset() - for key, blocks := range groupBlocks { - rs.RetentionProgressMetrics.NumberOfBlocksToDelete.WithLabelValues(key).Add(float64(blocks)) + rs.RetentionProgressMetrics.NumberOfBlocksToDelete.Set(0) + for _, blocks := range groupBlocks { + rs.RetentionProgressMetrics.NumberOfBlocksToDelete.Add(float64(blocks)) } return nil diff --git a/pkg/compact/compact_e2e_test.go b/pkg/compact/compact_e2e_test.go index 9b40da16dc..1551552ade 100644 --- a/pkg/compact/compact_e2e_test.go +++ b/pkg/compact/compact_e2e_test.go @@ -324,26 +324,18 @@ func testGroupCompactE2e(t *testing.T, mergeFunc storage.VerticalChunkSeriesMerg testutil.Equals(t, 5.0, promtest.ToFloat64(sy.metrics.blocksMarkedForDeletion)) testutil.Equals(t, 1.0, promtest.ToFloat64(grouper.blocksMarkedForNoCompact)) testutil.Equals(t, 0.0, promtest.ToFloat64(sy.metrics.garbageCollectionFailures)) - testutil.Equals(t, 4, MetricCount(grouper.compactions)) - testutil.Equals(t, 1.0, promtest.ToFloat64(grouper.compactions.WithLabelValues(metas[0].Thanos.GroupKey()))) - testutil.Equals(t, 1.0, promtest.ToFloat64(grouper.compactions.WithLabelValues(metas[7].Thanos.GroupKey()))) - testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactions.WithLabelValues(metas[4].Thanos.GroupKey()))) - testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactions.WithLabelValues(metas[5].Thanos.GroupKey()))) - testutil.Equals(t, 4, MetricCount(grouper.compactionRunsStarted)) - testutil.Equals(t, 3.0, promtest.ToFloat64(grouper.compactionRunsStarted.WithLabelValues(metas[0].Thanos.GroupKey()))) - testutil.Equals(t, 3.0, promtest.ToFloat64(grouper.compactionRunsStarted.WithLabelValues(metas[7].Thanos.GroupKey()))) - testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactionRunsStarted.WithLabelValues(metas[4].Thanos.GroupKey()))) - testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactionRunsStarted.WithLabelValues(metas[5].Thanos.GroupKey()))) - testutil.Equals(t, 4, MetricCount(grouper.compactionRunsCompleted)) - testutil.Equals(t, 2.0, promtest.ToFloat64(grouper.compactionRunsCompleted.WithLabelValues(metas[0].Thanos.GroupKey()))) - testutil.Equals(t, 3.0, promtest.ToFloat64(grouper.compactionRunsCompleted.WithLabelValues(metas[7].Thanos.GroupKey()))) - testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactionRunsCompleted.WithLabelValues(metas[4].Thanos.GroupKey()))) - testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactionRunsCompleted.WithLabelValues(metas[5].Thanos.GroupKey()))) - testutil.Equals(t, 4, MetricCount(grouper.compactionFailures)) - testutil.Equals(t, 1.0, promtest.ToFloat64(grouper.compactionFailures.WithLabelValues(metas[0].Thanos.GroupKey()))) - testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactionFailures.WithLabelValues(metas[7].Thanos.GroupKey()))) - testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactionFailures.WithLabelValues(metas[4].Thanos.GroupKey()))) - testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactionFailures.WithLabelValues(metas[5].Thanos.GroupKey()))) + testutil.Equals(t, 2, MetricCount(grouper.compactions)) + testutil.Equals(t, 2.0, promtest.ToFloat64(grouper.compactions.WithLabelValues(metas[0].Thanos.ResolutionString()))) + testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactions.WithLabelValues(metas[5].Thanos.ResolutionString()))) + testutil.Equals(t, 2, MetricCount(grouper.compactionRunsStarted)) + testutil.Equals(t, 6.0, promtest.ToFloat64(grouper.compactionRunsStarted.WithLabelValues(metas[0].Thanos.ResolutionString()))) + testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactionRunsStarted.WithLabelValues(metas[5].Thanos.ResolutionString()))) + testutil.Equals(t, 2, MetricCount(grouper.compactionRunsCompleted)) + testutil.Equals(t, 5.0, promtest.ToFloat64(grouper.compactionRunsCompleted.WithLabelValues(metas[0].Thanos.ResolutionString()))) + testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactionRunsCompleted.WithLabelValues(metas[5].Thanos.ResolutionString()))) + testutil.Equals(t, 2, MetricCount(grouper.compactionFailures)) + testutil.Equals(t, 1.0, promtest.ToFloat64(grouper.compactionFailures.WithLabelValues(metas[0].Thanos.ResolutionString()))) + testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactionFailures.WithLabelValues(metas[5].Thanos.ResolutionString()))) _, err = os.Stat(dir) testutil.Assert(t, os.IsNotExist(err), "dir %s should be remove after compaction.", dir) diff --git a/pkg/compact/compact_test.go b/pkg/compact/compact_test.go index 0112c0a8e2..1d3764907c 100644 --- a/pkg/compact/compact_test.go +++ b/pkg/compact/compact_test.go @@ -214,8 +214,6 @@ func TestRetentionProgressCalculate(t *testing.T) { temp := promauto.With(reg).NewCounter(prometheus.CounterOpts{Name: "test_metric_for_group", Help: "this is a test metric for compact progress tests"}) grouper := NewDefaultGrouper(logger, bkt, false, false, reg, temp, temp, temp, "", 1, 1) - type groupedResult map[string]float64 - type retInput struct { meta []*metadata.Meta resMap map[ResolutionLevel]time.Duration @@ -238,7 +236,7 @@ func TestRetentionProgressCalculate(t *testing.T) { for _, tcase := range []struct { testName string input retInput - expected groupedResult + expected float64 }{ { // In this test case, blocks belonging to multiple groups are tested. All blocks in the first group and the first block in the second group are beyond their retention period. In the second group, the second block still has some time before its retention period and hence, is not marked to be deleted. @@ -257,11 +255,7 @@ func TestRetentionProgressCalculate(t *testing.T) { ResolutionLevel(downsample.ResLevel2): 6 * 30 * 24 * time.Hour, // 6 months retention. }, }, - expected: groupedResult{ - keys[0]: 2.0, - keys[1]: 1.0, - keys[2]: 0.0, - }, + expected: 3.0, }, { // In this test case, all the blocks are retained since they have not yet crossed their retention period. testName: "retain_test", @@ -277,11 +271,7 @@ func TestRetentionProgressCalculate(t *testing.T) { ResolutionLevel(downsample.ResLevel2): 16 * 30 * 24 * time.Hour, // 6 months retention. }, }, - expected: groupedResult{ - keys[0]: 0, - keys[1]: 0, - keys[2]: 0, - }, + expected: 0.0, }, { // In this test case, all the blocks are deleted since they are past their retention period. @@ -298,11 +288,7 @@ func TestRetentionProgressCalculate(t *testing.T) { ResolutionLevel(downsample.ResLevel2): 6 * 30 * 24 * time.Hour, // 6 months retention. }, }, - expected: groupedResult{ - keys[0]: 1, - keys[1]: 1, - keys[2]: 1, - }, + expected: 3.0, }, { // In this test case, none of the blocks are marked for deletion since the retention period is 0d i.e. indefinitely long retention. @@ -319,11 +305,7 @@ func TestRetentionProgressCalculate(t *testing.T) { ResolutionLevel(downsample.ResLevel2): 0, }, }, - expected: groupedResult{ - keys[0]: 0, - keys[1]: 0, - keys[2]: 0, - }, + expected: 0.0, }, } { if ok := t.Run(tcase.testName, func(t *testing.T) { @@ -338,11 +320,7 @@ func TestRetentionProgressCalculate(t *testing.T) { testutil.Ok(t, err) metrics := ps.RetentionProgressMetrics testutil.Ok(t, err) - for key := range tcase.expected { - a, err := metrics.NumberOfBlocksToDelete.GetMetricWithLabelValues(key) - testutil.Ok(t, err) - testutil.Equals(t, tcase.expected[key], promtestutil.ToFloat64(a)) - } + testutil.Equals(t, tcase.expected, promtestutil.ToFloat64(metrics.NumberOfBlocksToDelete)) }); !ok { return } @@ -353,7 +331,6 @@ func TestCompactProgressCalculate(t *testing.T) { type planResult struct { compactionBlocks, compactionRuns float64 } - type groupedResult map[string]planResult logger := log.NewNopLogger() reg := prometheus.NewRegistry() @@ -383,7 +360,7 @@ func TestCompactProgressCalculate(t *testing.T) { for _, tcase := range []struct { testName string input []*metadata.Meta - expected groupedResult + expected planResult }{ { // This test has a single compaction run with two blocks from the second group compacted. @@ -398,19 +375,9 @@ func TestCompactProgressCalculate(t *testing.T) { createBlockMeta(6, int64(time.Duration(12)*time.Hour/time.Millisecond), int64(time.Duration(20)*time.Hour/time.Millisecond), map[string]string{"a": "1", "b": "2"}, 1, []uint64{}), createBlockMeta(7, int64(time.Duration(20)*time.Hour/time.Millisecond), int64(time.Duration(28)*time.Hour/time.Millisecond), map[string]string{"a": "1", "b": "2"}, 1, []uint64{}), }, - expected: map[string]planResult{ - keys[0]: { - compactionRuns: 0.0, - compactionBlocks: 0.0, - }, - keys[1]: { - compactionRuns: 1.0, - compactionBlocks: 2.0, - }, - keys[2]: { - compactionRuns: 0.0, - compactionBlocks: 0.0, - }, + expected: planResult{ + compactionRuns: 1.0, + compactionBlocks: 2.0, }, }, { @@ -425,15 +392,9 @@ func TestCompactProgressCalculate(t *testing.T) { createBlockMeta(1, int64(time.Duration(2)*time.Hour/time.Millisecond), int64(time.Duration(4)*time.Hour/time.Millisecond), map[string]string{"b": "2"}, 0, []uint64{}), createBlockMeta(2, int64(time.Duration(4)*time.Hour/time.Millisecond), int64(time.Duration(6)*time.Hour/time.Millisecond), map[string]string{"b": "2"}, 0, []uint64{}), }, - expected: map[string]planResult{ - keys[0]: { - compactionRuns: 3.0, - compactionBlocks: 6.0, - }, - keys[1]: { - compactionRuns: 0.0, - compactionBlocks: 0.0, - }, + expected: planResult{ + compactionRuns: 3.0, + compactionBlocks: 6.0, }, }, { @@ -446,11 +407,9 @@ func TestCompactProgressCalculate(t *testing.T) { createBlockMeta(3, int64(time.Duration(6)*time.Hour/time.Millisecond), int64(time.Duration(8)*time.Hour/time.Millisecond), map[string]string{"a": "1", "b": "2"}, 1, []uint64{}), createBlockMeta(4, int64(time.Duration(10)*time.Hour/time.Millisecond), int64(time.Duration(12)*time.Hour/time.Millisecond), map[string]string{"a": "1", "b": "2"}, 1, []uint64{}), }, - expected: map[string]planResult{ - keys[2]: { - compactionRuns: 1.0, - compactionBlocks: 2.0, - }, + expected: planResult{ + compactionRuns: 1.0, + compactionBlocks: 2.0, }, }, } { @@ -465,14 +424,8 @@ func TestCompactProgressCalculate(t *testing.T) { testutil.Ok(t, err) metrics := ps.CompactProgressMetrics testutil.Ok(t, err) - for key := range tcase.expected { - a, err := metrics.NumberOfCompactionBlocks.GetMetricWithLabelValues(key) - testutil.Ok(t, err) - b, err := metrics.NumberOfCompactionRuns.GetMetricWithLabelValues(key) - testutil.Ok(t, err) - testutil.Equals(t, tcase.expected[key].compactionBlocks, promtestutil.ToFloat64(a)) - testutil.Equals(t, tcase.expected[key].compactionRuns, promtestutil.ToFloat64(b)) - } + testutil.Equals(t, tcase.expected.compactionBlocks, promtestutil.ToFloat64(metrics.NumberOfCompactionBlocks)) + testutil.Equals(t, tcase.expected.compactionRuns, promtestutil.ToFloat64(metrics.NumberOfCompactionRuns)) }); !ok { return } @@ -482,7 +435,6 @@ func TestCompactProgressCalculate(t *testing.T) { func TestDownsampleProgressCalculate(t *testing.T) { reg := prometheus.NewRegistry() logger := log.NewNopLogger() - type groupedResult map[string]float64 keys := make([]string, 3) m := make([]metadata.Meta, 3) @@ -505,7 +457,7 @@ func TestDownsampleProgressCalculate(t *testing.T) { for _, tcase := range []struct { testName string input []*metadata.Meta - expected groupedResult + expected float64 }{ { // This test case has blocks from multiple groups and resolution levels. Only the blocks in the second group should be downsampled since the others either have time differences not in the range for their resolution, or a resolution which should not be downsampled. @@ -516,11 +468,7 @@ func TestDownsampleProgressCalculate(t *testing.T) { createBlockMeta(9, 0, downsample.ResLevel2DownsampleRange, map[string]string{"b": "2"}, downsample.ResLevel1, []uint64{8, 11}), createBlockMeta(8, 0, downsample.ResLevel2DownsampleRange, map[string]string{"a": "1", "b": "2"}, downsample.ResLevel2, []uint64{9, 10}), }, - expected: map[string]float64{ - keys[0]: 0.0, - keys[1]: 2.0, - keys[2]: 0.0, - }, + expected: 2.0, }, { // This is a test case for resLevel0, with the correct time difference threshold. // This block should be downsampled. @@ -528,9 +476,7 @@ func TestDownsampleProgressCalculate(t *testing.T) { input: []*metadata.Meta{ createBlockMeta(9, 0, downsample.ResLevel1DownsampleRange, map[string]string{"a": "1"}, downsample.ResLevel0, []uint64{10, 11}), }, - expected: map[string]float64{ - keys[0]: 1.0, - }, + expected: 1.0, }, { // This is a test case for resLevel1, with the correct time difference threshold. // This block should be downsampled. @@ -538,9 +484,7 @@ func TestDownsampleProgressCalculate(t *testing.T) { input: []*metadata.Meta{ createBlockMeta(9, 0, downsample.ResLevel2DownsampleRange, map[string]string{"b": "2"}, downsample.ResLevel1, []uint64{10, 11}), }, - expected: map[string]float64{ - keys[1]: 1.0, - }, + expected: 1.0, }, { // This is a test case for resLevel2. @@ -549,9 +493,7 @@ func TestDownsampleProgressCalculate(t *testing.T) { input: []*metadata.Meta{ createBlockMeta(10, 0, downsample.ResLevel2DownsampleRange, map[string]string{"a": "1", "b": "2"}, downsample.ResLevel2, []uint64{11, 12}), }, - expected: map[string]float64{ - keys[2]: 0.0, - }, + expected: 0.0, }, { // This is a test case for resLevel0, with incorrect time difference, below the threshold. // This block should be downsampled. @@ -559,9 +501,7 @@ func TestDownsampleProgressCalculate(t *testing.T) { input: []*metadata.Meta{ createBlockMeta(9, 1, downsample.ResLevel1DownsampleRange, map[string]string{"a": "1"}, downsample.ResLevel0, []uint64{10, 11}), }, - expected: map[string]float64{ - keys[0]: 0.0, - }, + expected: 0.0, }, { // This is a test case for resLevel1, with incorrect time difference, below the threshold. @@ -570,9 +510,7 @@ func TestDownsampleProgressCalculate(t *testing.T) { input: []*metadata.Meta{ createBlockMeta(9, 1, downsample.ResLevel2DownsampleRange, map[string]string{"b": "2"}, downsample.ResLevel1, []uint64{10, 11}), }, - expected: map[string]float64{ - keys[1]: 0.0, - }, + expected: 0.0, }, } { if ok := t.Run(tcase.testName, func(t *testing.T) { @@ -586,12 +524,7 @@ func TestDownsampleProgressCalculate(t *testing.T) { err = ds.ProgressCalculate(context.Background(), groups) testutil.Ok(t, err) metrics := ds.DownsampleProgressMetrics - for key := range tcase.expected { - a, err := metrics.NumberOfBlocksDownsampled.GetMetricWithLabelValues(key) - - testutil.Ok(t, err) - testutil.Equals(t, tcase.expected[key], promtestutil.ToFloat64(a)) - } + testutil.Equals(t, tcase.expected, promtestutil.ToFloat64(metrics.NumberOfBlocksDownsampled)) }); !ok { return }