@@ -27,6 +27,7 @@ type Aggregator struct {
27
27
sumMetric * AggMetric
28
28
cntMetric * AggMetric
29
29
lstMetric * AggMetric
30
+ ingestFrom uint32
30
31
}
31
32
32
33
func NewAggregator (store Store , cachePusher cache.CachePusher , key schema.AMKey , ret conf.Retention , agg conf.Aggregation , dropFirstChunk bool , ingestFrom int64 ) * Aggregator {
@@ -38,36 +39,39 @@ func NewAggregator(store Store, cachePusher cache.CachePusher, key schema.AMKey,
38
39
span : span ,
39
40
agg : NewAggregation (),
40
41
}
42
+ if ingestFrom > 0 {
43
+ aggregator .ingestFrom = AggBoundary (uint32 (ingestFrom ), ret .ChunkSpan ) - span
44
+ }
41
45
for _ , agg := range agg .AggregationMethod {
42
46
switch agg {
43
47
case conf .Avg :
44
48
if aggregator .sumMetric == nil {
45
49
key .Archive = schema .NewArchive (schema .Sum , span )
46
- aggregator .sumMetric = NewAggMetric (store , cachePusher , key , conf.Retentions {ret }, 0 , span , nil , dropFirstChunk , ingestFrom )
50
+ aggregator .sumMetric = NewAggMetric (store , cachePusher , key , conf.Retentions {ret }, 0 , span , nil , dropFirstChunk , 0 )
47
51
}
48
52
if aggregator .cntMetric == nil {
49
53
key .Archive = schema .NewArchive (schema .Cnt , span )
50
- aggregator .cntMetric = NewAggMetric (store , cachePusher , key , conf.Retentions {ret }, 0 , span , nil , dropFirstChunk , ingestFrom )
54
+ aggregator .cntMetric = NewAggMetric (store , cachePusher , key , conf.Retentions {ret }, 0 , span , nil , dropFirstChunk , 0 )
51
55
}
52
56
case conf .Sum :
53
57
if aggregator .sumMetric == nil {
54
58
key .Archive = schema .NewArchive (schema .Sum , span )
55
- aggregator .sumMetric = NewAggMetric (store , cachePusher , key , conf.Retentions {ret }, 0 , span , nil , dropFirstChunk , ingestFrom )
59
+ aggregator .sumMetric = NewAggMetric (store , cachePusher , key , conf.Retentions {ret }, 0 , span , nil , dropFirstChunk , 0 )
56
60
}
57
61
case conf .Lst :
58
62
if aggregator .lstMetric == nil {
59
63
key .Archive = schema .NewArchive (schema .Lst , span )
60
- aggregator .lstMetric = NewAggMetric (store , cachePusher , key , conf.Retentions {ret }, 0 , span , nil , dropFirstChunk , ingestFrom )
64
+ aggregator .lstMetric = NewAggMetric (store , cachePusher , key , conf.Retentions {ret }, 0 , span , nil , dropFirstChunk , 0 )
61
65
}
62
66
case conf .Max :
63
67
if aggregator .maxMetric == nil {
64
68
key .Archive = schema .NewArchive (schema .Max , span )
65
- aggregator .maxMetric = NewAggMetric (store , cachePusher , key , conf.Retentions {ret }, 0 , span , nil , dropFirstChunk , ingestFrom )
69
+ aggregator .maxMetric = NewAggMetric (store , cachePusher , key , conf.Retentions {ret }, 0 , span , nil , dropFirstChunk , 0 )
66
70
}
67
71
case conf .Min :
68
72
if aggregator .minMetric == nil {
69
73
key .Archive = schema .NewArchive (schema .Min , span )
70
- aggregator .minMetric = NewAggMetric (store , cachePusher , key , conf.Retentions {ret }, 0 , span , nil , dropFirstChunk , ingestFrom )
74
+ aggregator .minMetric = NewAggMetric (store , cachePusher , key , conf.Retentions {ret }, 0 , span , nil , dropFirstChunk , 0 )
71
75
}
72
76
}
73
77
}
@@ -98,6 +102,9 @@ func (agg *Aggregator) flush() {
98
102
// Add adds the point to the in-progress aggregation, and flushes it if we reached the boundary
99
103
// points going back in time are accepted, unless they go into a previous bucket, in which case they are ignored
100
104
func (agg * Aggregator ) Add (ts uint32 , val float64 ) {
105
+ if ts <= agg .ingestFrom {
106
+ return
107
+ }
101
108
boundary := AggBoundary (ts , agg .span )
102
109
103
110
if boundary == agg .currentBoundary {
0 commit comments