Skip to content
This repository was archived by the owner on Aug 23, 2023. It is now read-only.

Commit 9d5fbb2

Browse files
committed
calculate future tolerance based on raw ttl
1 parent 98d0142 commit 9d5fbb2

File tree

15 files changed

+124
-61
lines changed

15 files changed

+124
-61
lines changed

conf/retention.go

-4
Original file line numberDiff line numberDiff line change
@@ -31,10 +31,6 @@ func (r Retentions) Sub(pos int) Retentions {
3131
}
3232
}
3333

34-
func (r Retentions) MaxRetention() int {
35-
return r.Rets[len(r.Rets)-1].MaxRetention()
36-
}
37-
3834
func BuildFromRetentions(rets ...Retention) Retentions {
3935
return Retentions{
4036
Orig: buildOrigFromRetentions(rets),

docker/docker-chaos/metrictank.ini

+3-1
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,9 @@ create-cf = true
180180
schemas-file = /etc/metrictank/storage-schemas.conf
181181
# path to storage-aggregation.conf file
182182
aggregations-file = /etc/metrictank/storage-aggregation.conf
183-
# defines until how far in the future we accept datapoints. defined as a percentage fraction of the maxTTL of the matching retention storage schema
183+
# enables/disables the enforcement of the future tolerance limitation
184+
enforce-future-tolerance = true
185+
# defines until how far in the future we accept datapoints. defined as a percentage fraction of the raw ttl of the matching retention storage schema
184186
future-tolerance-ratio = 10
185187

186188
## instrumentation stats ##

docker/docker-cluster-query/metrictank.ini

+3-1
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,9 @@ create-cf = true
180180
schemas-file = /etc/metrictank/storage-schemas.conf
181181
# path to storage-aggregation.conf file
182182
aggregations-file = /etc/metrictank/storage-aggregation.conf
183-
# defines until how far in the future we accept datapoints. defined as a percentage fraction of the maxTTL of the matching retention storage schema
183+
# enables/disables the enforcement of the future tolerance limitation
184+
enforce-future-tolerance = true
185+
# defines until how far in the future we accept datapoints. defined as a percentage fraction of the raw ttl of the matching retention storage schema
184186
future-tolerance-ratio = 10
185187

186188
## instrumentation stats ##

docker/docker-cluster/metrictank.ini

+3-1
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,9 @@ create-cf = true
180180
schemas-file = /etc/metrictank/storage-schemas.conf
181181
# path to storage-aggregation.conf file
182182
aggregations-file = /etc/metrictank/storage-aggregation.conf
183-
# defines until how far in the future we accept datapoints. defined as a percentage fraction of the maxTTL of the matching retention storage schema
183+
# enables/disables the enforcement of the future tolerance limitation
184+
enforce-future-tolerance = true
185+
# defines until how far in the future we accept datapoints. defined as a percentage fraction of the raw ttl of the matching retention storage schema
184186
future-tolerance-ratio = 10
185187

186188
## instrumentation stats ##

docker/docker-dev-custom-cfg-kafka/metrictank.ini

+3-1
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,9 @@ create-cf = true
180180
schemas-file = /etc/metrictank/storage-schemas.conf
181181
# path to storage-aggregation.conf file
182182
aggregations-file = /etc/metrictank/storage-aggregation.conf
183-
# defines until how far in the future we accept datapoints. defined as a percentage fraction of the maxTTL of the matching retention storage schema
183+
# enables/disables the enforcement of the future tolerance limitation
184+
enforce-future-tolerance = true
185+
# defines until how far in the future we accept datapoints. defined as a percentage fraction of the raw ttl of the matching retention storage schema
184186
future-tolerance-ratio = 10
185187

186188
## instrumentation stats ##

docs/config.md

+3-1
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,9 @@ create-cf = true
220220
schemas-file = /etc/metrictank/storage-schemas.conf
221221
# path to storage-aggregation.conf file
222222
aggregations-file = /etc/metrictank/storage-aggregation.conf
223-
# defines until how far in the future we accept datapoints. defined as a percentage fraction of the maxTTL of the matching retention storage schema
223+
# enables/disables the enforcement of the future tolerance limitation
224+
enforce-future-tolerance = true
225+
# defines until how far in the future we accept datapoints. defined as a percentage fraction of the raw ttl of the matching retention storage schema
224226
future-tolerance-ratio = 10
225227
```
226228

mdata/aggmetric.go

+10-8
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ type AggMetric struct {
5555
// it's the callers responsibility to make sure agg is not nil in that case!
5656
// If reorderWindow is greater than 0, a reorder buffer is enabled. In that case data points with duplicate timestamps
5757
// the behavior is defined by reorderAllowUpdate
58-
func NewAggMetric(store Store, cachePusher cache.CachePusher, key schema.AMKey, retentions conf.Retentions, reorderWindow, interval uint32, agg *conf.Aggregation, reorderAllowUpdate, dropFirstChunk bool, ingestFrom int64, futureTolerance uint32) *AggMetric {
58+
func NewAggMetric(store Store, cachePusher cache.CachePusher, key schema.AMKey, retentions conf.Retentions, reorderWindow, interval uint32, agg *conf.Aggregation, reorderAllowUpdate, dropFirstChunk bool, ingestFrom int64) *AggMetric {
5959

6060
// note: during parsing of retentions, we assure there's at least 1.
6161
ret := retentions.Rets[0]
@@ -68,7 +68,7 @@ func NewAggMetric(store Store, cachePusher cache.CachePusher, key schema.AMKey,
6868
numChunks: ret.NumChunks,
6969
chunks: make([]*chunk.Chunk, 0, ret.NumChunks),
7070
dropFirstChunk: dropFirstChunk,
71-
futureTolerance: futureTolerance,
71+
futureTolerance: uint32(ret.MaxRetention()) * uint32(futureToleranceRatio) / 100,
7272
ttl: uint32(ret.MaxRetention()),
7373
// we set LastWrite here to make sure a new Chunk doesn't get immediately
7474
// garbage collected right after creating it, before we can push to it.
@@ -450,13 +450,15 @@ func (a *AggMetric) Add(ts uint32, val float64) {
450450

451451
// need to check if ts > futureTolerance to prevent that we reject a datapoint
452452
// because the ts value has wrapped around the uint32 boundary
453-
if a.futureTolerance > 0 && ts > a.futureTolerance && int64(ts-a.futureTolerance) > time.Now().Unix() {
454-
if log.IsLevelEnabled(log.DebugLevel) {
455-
log.Debugf("AM: discarding metric <%d,%f>: timestamp is too far in the future, accepting timestamps up to %d seconds into the future", ts, val, a.futureTolerance)
456-
}
457-
453+
if ts > a.futureTolerance && int64(ts-a.futureTolerance) > time.Now().Unix() {
458454
discardedSampleTooFarAhead.Inc()
459-
return
455+
456+
if enforceFutureTolerance {
457+
if log.IsLevelEnabled(log.DebugLevel) {
458+
log.Debugf("AM: discarding metric <%d,%f>: timestamp is too far in the future, accepting timestamps up to %d seconds into the future", ts, val, a.futureTolerance)
459+
}
460+
return
461+
}
460462
}
461463

462464
a.Lock()

mdata/aggmetric_test.go

+76-25
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ func testMetricPersistOptionalPrimary(t *testing.T, primary bool) {
132132

133133
chunkAddCount, chunkSpan := uint32(10), uint32(300)
134134
rets := conf.MustParseRetentions("1s:1s:5min:5:true")
135-
agg := NewAggMetric(mockstore, &mockCache, test.GetAMKey(42), rets, 0, chunkSpan, nil, false, false, 0, 0)
135+
agg := NewAggMetric(mockstore, &mockCache, test.GetAMKey(42), rets, 0, chunkSpan, nil, false, false, 0)
136136

137137
for ts := chunkSpan; ts <= chunkSpan*chunkAddCount; ts += chunkSpan {
138138
agg.Add(ts, 1)
@@ -168,7 +168,7 @@ func TestAggMetric(t *testing.T) {
168168
cluster.Init("default", "test", time.Now(), "http", 6060)
169169

170170
ret := conf.MustParseRetentions("1s:1s:2min:5:true")
171-
c := NewChecker(t, NewAggMetric(mockstore, &cache.MockCache{}, test.GetAMKey(42), ret, 0, 1, nil, false, false, 0, 0))
171+
c := NewChecker(t, NewAggMetric(mockstore, &cache.MockCache{}, test.GetAMKey(42), ret, 0, 1, nil, false, false, 0))
172172

173173
// chunk t0's: 120, 240, 360, 480, 600, 720, 840, 960
174174

@@ -246,7 +246,7 @@ func TestAggMetricWithReorderBuffer(t *testing.T) {
246246
AggregationMethod: []conf.Method{conf.Avg},
247247
}
248248
ret := conf.MustParseRetentions("1s:1s:2min:5:true")
249-
c := NewChecker(t, NewAggMetric(mockstore, &cache.MockCache{}, test.GetAMKey(42), ret, 10, 1, &agg, false, false, 0, 0))
249+
c := NewChecker(t, NewAggMetric(mockstore, &cache.MockCache{}, test.GetAMKey(42), ret, 10, 1, &agg, false, false, 0))
250250

251251
// basic adds and verifies with test data
252252
c.Add(121, 121)
@@ -284,7 +284,7 @@ func TestAggMetricDropFirstChunk(t *testing.T) {
284284
cluster.Manager.SetPrimary(true)
285285
mockstore.Reset()
286286
rets := conf.MustParseRetentions("1s:1s:10s:5:true")
287-
m := NewAggMetric(mockstore, &cache.MockCache{}, test.GetAMKey(42), rets, 0, 1, nil, false, true, 0, 0)
287+
m := NewAggMetric(mockstore, &cache.MockCache{}, test.GetAMKey(42), rets, 0, 1, nil, false, true, 0)
288288
m.Add(10, 10)
289289
m.Add(11, 11)
290290
m.Add(12, 12)
@@ -312,7 +312,7 @@ func TestAggMetricIngestFrom(t *testing.T) {
312312
mockstore.Reset()
313313
ingestFrom := int64(25)
314314
ret := conf.MustParseRetentions("1s:1s:10s:5:true")
315-
m := NewAggMetric(mockstore, &cache.MockCache{}, test.GetAMKey(42), ret, 0, 1, nil, false, false, ingestFrom, 0)
315+
m := NewAggMetric(mockstore, &cache.MockCache{}, test.GetAMKey(42), ret, 0, 1, nil, false, false, ingestFrom)
316316
m.Add(10, 10)
317317
m.Add(11, 11)
318318
m.Add(12, 12)
@@ -342,27 +342,78 @@ func TestAggMetricFutureTolerance(t *testing.T) {
342342
cluster.Init("default", "test", time.Now(), "http", 6060)
343343
cluster.Manager.SetPrimary(true)
344344
mockstore.Reset()
345-
ret := conf.MustParseRetentions("1s:1s:10s:5:true")
346-
aggMetricLimited := NewAggMetric(mockstore, &cache.MockCache{}, test.GetAMKey(42), ret, 0, 1, nil, false, false, 0, 60)
347-
aggMetricUnlimited := NewAggMetric(mockstore, &cache.MockCache{}, test.GetAMKey(42), ret, 0, 1, nil, false, false, 0, 0)
345+
ret := conf.MustParseRetentions("1s:10m:6h:5:true")
346+
347+
_futureToleranceRatio := futureToleranceRatio
348+
_enforceFutureTolerance := enforceFutureTolerance
349+
defer func() {
350+
futureToleranceRatio = _futureToleranceRatio
351+
enforceFutureTolerance = _enforceFutureTolerance
352+
discardedSampleTooFarAhead.SetUint32(0)
353+
}()
354+
355+
// with a raw retention of 600s, this will result in a future tolerance of 60s
356+
futureToleranceRatio = 10
357+
aggMetricTolerate60 := NewAggMetric(mockstore, &cache.MockCache{}, test.GetAMKey(42), ret, 0, 1, nil, false, false, 0)
358+
359+
// will not tolerate future datapoints at all
360+
futureToleranceRatio = 0
361+
aggMetricTolerate0 := NewAggMetric(mockstore, &cache.MockCache{}, test.GetAMKey(42), ret, 0, 1, nil, false, false, 0)
362+
363+
// add datapoint which is 30 seconds in the future to both aggmetrics, they should both accept it
364+
// because enforcement of future tolerance is disabled, but the one with tolerance 0 should increase
365+
// the counter of data points that would have been rejected
366+
discardedSampleTooFarAhead.SetUint32(0)
367+
enforceFutureTolerance = false
368+
aggMetricTolerate60.Add(uint32(time.Now().Unix()+30), 10)
369+
if len(aggMetricTolerate60.chunks) != 1 {
370+
t.Fatalf("expected to have 1 chunk in aggmetric, but there were %d", len(aggMetricTolerate60.chunks))
371+
}
372+
if discardedSampleTooFarAhead.Peek() != 0 {
373+
t.Fatalf("expected the discardedSampleTooFarAhead count to be 0, but it was %d", discardedSampleTooFarAhead.Peek())
374+
}
348375

349-
// add datapoint which is 90 seconds in the future
350-
// the limited aggmetric should not accept it because it is more than 60 seconds in the future
351-
// the unlimited aggmetric should accept it because there is no limit
352-
aggMetricLimited.Add(uint32(time.Now().Unix()+90), 10)
353-
aggMetricUnlimited.Add(uint32(time.Now().Unix()+90), 10)
354-
if len(aggMetricLimited.chunks) != 0 {
355-
t.Fatalf("expected to have no chunks in limited aggmetric, but there were %d", len(aggMetricLimited.chunks))
376+
aggMetricTolerate0.Add(uint32(time.Now().Unix()+30), 10)
377+
if len(aggMetricTolerate0.chunks) != 1 {
378+
t.Fatalf("expected to have 1 chunk in aggmetric, but there were %d", len(aggMetricTolerate0.chunks))
356379
}
357-
if len(aggMetricUnlimited.chunks) != 1 {
358-
t.Fatalf("expected to have 1 chunk in the unlimited aggmetric, but there were %d", len(aggMetricUnlimited.chunks))
380+
if discardedSampleTooFarAhead.Peek() != 1 {
381+
t.Fatalf("expected the discardedSampleTooFarAhead count to be 1, but it was %d", discardedSampleTooFarAhead.Peek())
359382
}
360383

361-
// add datapoint where the timestamp is now
362-
// the limited aggmetric should accept this one, because it only rejects datapoints from at least 60 seconds in the future
363-
aggMetricLimited.Add(uint32(time.Now().Unix()), 10)
364-
if len(aggMetricLimited.chunks) != 1 {
365-
t.Fatalf("expected to have 1 chunk in the limited aggmetric, but there were %d", len(aggMetricLimited.chunks))
384+
// enable the enforcement of the future tolerance limit and re-initialize the two agg metrics
385+
// then add a data point with time stamp 30 sec in the future to both aggmetrics again.
386+
// this time only the one that tolerates up to 60 secs should accept the datapoint.
387+
discardedSampleTooFarAhead.SetUint32(0)
388+
enforceFutureTolerance = true
389+
futureToleranceRatio = 10
390+
aggMetricTolerate60 = NewAggMetric(mockstore, &cache.MockCache{}, test.GetAMKey(42), ret, 0, 1, nil, false, false, 0)
391+
futureToleranceRatio = 0
392+
aggMetricTolerate0 = NewAggMetric(mockstore, &cache.MockCache{}, test.GetAMKey(42), ret, 0, 1, nil, false, false, 0)
393+
394+
aggMetricTolerate60.Add(uint32(time.Now().Unix()+30), 10)
395+
if len(aggMetricTolerate60.chunks) != 1 {
396+
t.Fatalf("expected to have 1 chunk in aggmetric, but there were %d", len(aggMetricTolerate60.chunks))
397+
}
398+
if discardedSampleTooFarAhead.Peek() != 0 {
399+
t.Fatalf("expected the discardedSampleTooFarAhead count to be 0, but it was %d", discardedSampleTooFarAhead.Peek())
400+
}
401+
402+
aggMetricTolerate0.Add(uint32(time.Now().Unix()+30), 10)
403+
if len(aggMetricTolerate0.chunks) != 0 {
404+
t.Fatalf("expected to have 0 chunks in aggmetric, but there were %d", len(aggMetricTolerate0.chunks))
405+
}
406+
if discardedSampleTooFarAhead.Peek() != 1 {
407+
t.Fatalf("expected the discardedSampleTooFarAhead count to be 1, but it was %d", discardedSampleTooFarAhead.Peek())
408+
}
409+
410+
// add another datapoint with timestamp of now() to the aggmetric tolerating 0, should be accepted
411+
aggMetricTolerate0.Add(uint32(time.Now().Unix()), 10)
412+
if len(aggMetricTolerate0.chunks) != 1 {
413+
t.Fatalf("expected to have 1 chunk in aggmetric, but there were %d", len(aggMetricTolerate0.chunks))
414+
}
415+
if discardedSampleTooFarAhead.Peek() != 1 {
416+
t.Fatalf("expected the discardedSampleTooFarAhead count to be 1, but it was %d", discardedSampleTooFarAhead.Peek())
366417
}
367418
}
368419

@@ -405,7 +456,7 @@ func TestGetAggregated(t *testing.T) {
405456
AggregationMethod: []conf.Method{conf.Sum},
406457
}
407458

408-
m := NewAggMetric(mockstore, &cache.MockCache{}, test.GetAMKey(42), ret, 0, 1, &agg, false, false, 0, 0)
459+
m := NewAggMetric(mockstore, &cache.MockCache{}, test.GetAMKey(42), ret, 0, 1, &agg, false, false, 0)
409460
m.Add(10, 10)
410461
m.Add(11, 11)
411462
m.Add(12, 12)
@@ -451,7 +502,7 @@ func TestGetAggregatedIngestFrom(t *testing.T) {
451502
AggregationMethod: []conf.Method{conf.Sum},
452503
}
453504

454-
m := NewAggMetric(mockstore, &cache.MockCache{}, test.GetAMKey(42), ret, 0, 1, &agg, false, false, ingestFrom, 0)
505+
m := NewAggMetric(mockstore, &cache.MockCache{}, test.GetAMKey(42), ret, 0, 1, &agg, false, false, ingestFrom)
455506
m.Add(10, 10)
456507
m.Add(11, 11)
457508
m.Add(12, 12)
@@ -491,7 +542,7 @@ func BenchmarkAggMetricAdd(b *testing.B) {
491542

492543
// each chunk contains 180 points
493544
rets := conf.MustParseRetentions("10s:1000000000s,30min:1")
494-
metric := NewAggMetric(mockstore, &cache.MockCache{}, test.GetAMKey(0), rets, 0, 10, nil, false, false, 0, 0)
545+
metric := NewAggMetric(mockstore, &cache.MockCache{}, test.GetAMKey(0), rets, 0, 10, nil, false, false, 0)
495546

496547
max := uint32(b.N*10 + 1)
497548
for t := uint32(1); t < max; t += 10 {

mdata/aggmetrics.go

+1-5
Original file line numberDiff line numberDiff line change
@@ -151,10 +151,6 @@ func (ms *AggMetrics) GetOrCreate(key schema.MKey, schemaId, aggId uint16, inter
151151

152152
agg := Aggregations.Get(aggId)
153153
confSchema := Schemas.Get(schemaId)
154-
var futureTolerance uint32
155-
if futureToleranceRatio > 0 {
156-
futureTolerance = uint32(confSchema.Retentions.MaxRetention()) * uint32(futureToleranceRatio) / 100
157-
}
158154

159155
// if it wasn't there, get the write lock and prepare to add it
160156
// but first we need to check again if someone has added it in
@@ -169,7 +165,7 @@ func (ms *AggMetrics) GetOrCreate(key schema.MKey, schemaId, aggId uint16, inter
169165
return m
170166
}
171167
ingestFrom := ms.ingestFrom[key.Org]
172-
m = NewAggMetric(ms.store, ms.cachePusher, k, confSchema.Retentions, confSchema.ReorderWindow, interval, &agg, confSchema.ReorderAllowUpdate, ms.dropFirstChunk, ingestFrom, futureTolerance)
168+
m = NewAggMetric(ms.store, ms.cachePusher, k, confSchema.Retentions, confSchema.ReorderWindow, interval, &agg, confSchema.ReorderAllowUpdate, ms.dropFirstChunk, ingestFrom)
173169
ms.Metrics[key.Org][key.Key] = m
174170
active := len(ms.Metrics[key.Org])
175171
ms.Unlock()

mdata/aggmetrics_test.go

-1
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,6 @@ func TestAggMetricsGetOrCreate(t *testing.T) {
9898
t.Fatalf("Expected GetOrCreate to return twice the same metric for the same key")
9999
}
100100

101-
// verify that disabling future tolerance limitation works
102101
futureToleranceRatio = 0
103102
testKey2, _ := schema.AMKeyFromString("1.12345678901234567890123456789013")
104103
metric3 := aggMetrics.GetOrCreate(testKey2.MKey, 1, 0, 10).(*AggMetric)

mdata/aggregator.go

+6-6
Original file line numberDiff line numberDiff line change
@@ -47,31 +47,31 @@ func NewAggregator(store Store, cachePusher cache.CachePusher, key schema.AMKey,
4747
case conf.Avg:
4848
if aggregator.sumMetric == nil {
4949
key.Archive = schema.NewArchive(schema.Sum, span)
50-
aggregator.sumMetric = NewAggMetric(store, cachePusher, key, retentions, 0, span, nil, false, dropFirstChunk, ingestFrom, 0)
50+
aggregator.sumMetric = NewAggMetric(store, cachePusher, key, retentions, 0, span, nil, false, dropFirstChunk, ingestFrom)
5151
}
5252
if aggregator.cntMetric == nil {
5353
key.Archive = schema.NewArchive(schema.Cnt, span)
54-
aggregator.cntMetric = NewAggMetric(store, cachePusher, key, retentions, 0, span, nil, false, dropFirstChunk, ingestFrom, 0)
54+
aggregator.cntMetric = NewAggMetric(store, cachePusher, key, retentions, 0, span, nil, false, dropFirstChunk, ingestFrom)
5555
}
5656
case conf.Sum:
5757
if aggregator.sumMetric == nil {
5858
key.Archive = schema.NewArchive(schema.Sum, span)
59-
aggregator.sumMetric = NewAggMetric(store, cachePusher, key, retentions, 0, span, nil, false, dropFirstChunk, ingestFrom, 0)
59+
aggregator.sumMetric = NewAggMetric(store, cachePusher, key, retentions, 0, span, nil, false, dropFirstChunk, ingestFrom)
6060
}
6161
case conf.Lst:
6262
if aggregator.lstMetric == nil {
6363
key.Archive = schema.NewArchive(schema.Lst, span)
64-
aggregator.lstMetric = NewAggMetric(store, cachePusher, key, retentions, 0, span, nil, false, dropFirstChunk, ingestFrom, 0)
64+
aggregator.lstMetric = NewAggMetric(store, cachePusher, key, retentions, 0, span, nil, false, dropFirstChunk, ingestFrom)
6565
}
6666
case conf.Max:
6767
if aggregator.maxMetric == nil {
6868
key.Archive = schema.NewArchive(schema.Max, span)
69-
aggregator.maxMetric = NewAggMetric(store, cachePusher, key, retentions, 0, span, nil, false, dropFirstChunk, ingestFrom, 0)
69+
aggregator.maxMetric = NewAggMetric(store, cachePusher, key, retentions, 0, span, nil, false, dropFirstChunk, ingestFrom)
7070
}
7171
case conf.Min:
7272
if aggregator.minMetric == nil {
7373
key.Archive = schema.NewArchive(schema.Min, span)
74-
aggregator.minMetric = NewAggMetric(store, cachePusher, key, retentions, 0, span, nil, false, dropFirstChunk, ingestFrom, 0)
74+
aggregator.minMetric = NewAggMetric(store, cachePusher, key, retentions, 0, span, nil, false, dropFirstChunk, ingestFrom)
7575
}
7676
}
7777
}

mdata/init.go

+6-4
Original file line numberDiff line numberDiff line change
@@ -88,9 +88,10 @@ var (
8888
Aggregations conf.Aggregations
8989
Schemas conf.Schemas
9090

91-
schemasFile = "/etc/metrictank/storage-schemas.conf"
92-
aggFile = "/etc/metrictank/storage-aggregation.conf"
93-
futureToleranceRatio = uint(10)
91+
schemasFile = "/etc/metrictank/storage-schemas.conf"
92+
aggFile = "/etc/metrictank/storage-aggregation.conf"
93+
futureToleranceRatio = uint(10)
94+
enforceFutureTolerance = true
9495

9596
promActiveMetrics = promauto.NewGaugeVec(prometheus.GaugeOpts{
9697
Namespace: "metrictank",
@@ -109,7 +110,8 @@ func ConfigSetup() {
109110
retentionConf := flag.NewFlagSet("retention", flag.ExitOnError)
110111
retentionConf.StringVar(&schemasFile, "schemas-file", "/etc/metrictank/storage-schemas.conf", "path to storage-schemas.conf file")
111112
retentionConf.StringVar(&aggFile, "aggregations-file", "/etc/metrictank/storage-aggregation.conf", "path to storage-aggregation.conf file")
112-
retentionConf.UintVar(&futureToleranceRatio, "future-tolerance-ratio", 10, "defines until how far in the future we accept datapoints. defined as a percentage fraction of the maxTTL of the matching retention storage schema")
113+
retentionConf.UintVar(&futureToleranceRatio, "future-tolerance-ratio", 10, "defines until how far in the future we accept datapoints. defined as a percentage fraction of the raw ttl of the matching retention storage schema")
114+
retentionConf.BoolVar(&enforceFutureTolerance, "enforce-future-tolerance", true, "enables/disables the enforcement of the future tolerance limitation")
113115
globalconf.Register("retention", retentionConf, flag.ExitOnError)
114116
}
115117

0 commit comments

Comments
 (0)