@@ -4,6 +4,8 @@ package input
4
4
5
5
import (
6
6
"fmt"
7
+ "sort"
8
+ "sync"
7
9
"time"
8
10
9
11
"github.com/grafana/metrictank/idx"
@@ -29,10 +31,21 @@ type DefaultHandler struct {
29
31
30
32
metrics mdata.Metrics
31
33
metricIndex idx.MetricIndex
34
+
35
+ buffer sync.Map
36
+ }
37
+
38
+ type IntervalLookupRecord struct {
39
+ sync.Mutex
40
+
41
+ Interval int
42
+ DataPoints []* schema.MetricData
43
+ Last time.Time
44
+ pos int
32
45
}
33
46
34
- func NewDefaultHandler (metrics mdata.Metrics , metricIndex idx.MetricIndex , input string ) DefaultHandler {
35
- return DefaultHandler {
47
+ func NewDefaultHandler (metrics mdata.Metrics , metricIndex idx.MetricIndex , input string ) * DefaultHandler {
48
+ return & DefaultHandler {
36
49
metricsReceived : stats .NewCounter32 (fmt .Sprintf ("input.%s.metrics_received" , input )),
37
50
MetricInvalid : stats .NewCounter32 (fmt .Sprintf ("input.%s.metric_invalid" , input )),
38
51
MsgsAge : stats .NewMeter32 (fmt .Sprintf ("input.%s.message_age" , input ), false ),
@@ -46,10 +59,17 @@ func NewDefaultHandler(metrics mdata.Metrics, metricIndex idx.MetricIndex, input
46
59
47
60
// process makes sure the data is stored and the metadata is in the index
48
61
// concurrency-safe.
49
- func (in DefaultHandler ) Process (metric * schema.MetricData , partition int32 ) {
62
+ func (in * DefaultHandler ) Process (metric * schema.MetricData , partition int32 ) {
50
63
if metric == nil {
51
64
return
52
65
}
66
+
67
+ if metric .Interval <= 0 {
68
+ // we need to buffer enough of this metric to be able to work out its interval.
69
+ // once that is done, we can update the Id and call in.Process again
70
+ in .SetInterval (metric , partition )
71
+ return
72
+ }
53
73
in .metricsReceived .Inc ()
54
74
err := metric .Validate ()
55
75
if err != nil {
@@ -72,3 +92,99 @@ func (in DefaultHandler) Process(metric *schema.MetricData, partition int32) {
72
92
m .Add (uint32 (metric .Time ), metric .Value )
73
93
in .pressureTank .Add (int (time .Since (pre ).Nanoseconds ()))
74
94
}
95
+
96
+ func (in * DefaultHandler ) SetInterval (metric * schema.MetricData , partition int32 ) {
97
+ b , ok := in .buffer .Load (metric .Id )
98
+ if ! ok {
99
+ dp := make ([]* schema.MetricData , 1 , 3 )
100
+ dp [0 ] = metric
101
+ in .buffer .Store (metric .Id , & IntervalLookupRecord {
102
+ DataPoints : dp ,
103
+ })
104
+ return
105
+ }
106
+ ilr := b .(* IntervalLookupRecord )
107
+ ilr .Lock ()
108
+ // check for duplicate TS
109
+ if ilr .DataPoints [ilr .pos ].Time == metric .Time {
110
+ //drop the metric as it is a duplicate.
111
+ ilr .Unlock ()
112
+ return
113
+ }
114
+
115
+ // add this datapoint to our circular buffer
116
+ if len (ilr .DataPoints ) < 3 {
117
+ ilr .DataPoints = append (ilr .DataPoints , metric )
118
+ ilr .pos ++
119
+ } else {
120
+ if ilr .pos == 2 {
121
+ ilr .pos = 0
122
+ } else {
123
+ ilr .pos ++
124
+ }
125
+ ilr .DataPoints [ilr .pos ] = metric
126
+ }
127
+
128
+ // if the interval is already known and was updated in the last 24hours, use it.
129
+ if ilr .Interval != 0 && time .Since (ilr .Last ) > 84600 {
130
+ metric .Interval = ilr .Interval
131
+ metric .SetId ()
132
+ in .Process (metric , partition )
133
+ ilr .Unlock ()
134
+ return
135
+ }
136
+
137
+ if len (ilr .DataPoints ) < 3 {
138
+ // we dont have 3 points yet.
139
+ ilr .Unlock ()
140
+ return
141
+ }
142
+ log .Debug ("input: calculating interval of %s" , metric .Id )
143
+
144
+ delta1 := ilr .DataPoints [1 ].Time - ilr .DataPoints [0 ].Time
145
+ // make sure the points are not out of order
146
+ if delta1 < 0 {
147
+ delta1 = - 1 * delta1
148
+ }
149
+ // make sure the points are not out of order
150
+ delta2 := ilr .DataPoints [2 ].Time - ilr .DataPoints [1 ].Time
151
+ if delta2 < 0 {
152
+ delta2 = - 1 * delta2
153
+ }
154
+ interval := 0
155
+
156
+ // To protect against dropped metrics and out of order metrics, use the smallest delta as the interval.
157
+ // Because we have 3 datapoints, it doesnt matter what order they are in. The smallest delta is always
158
+ // going to be correct. (unless their are out of order and dropped metrics)
159
+ if delta1 <= delta2 {
160
+ interval = int (delta1 )
161
+ } else {
162
+ interval = int (delta2 )
163
+ }
164
+
165
+ // TODO: align the interval to the likely value. eg. if the value is 27, make it 30. if it is 68, use 60. etc...
166
+
167
+ if ilr .Last .IsZero () {
168
+ // sort the datapoints then update their interval and process them properly.
169
+ sort .Sort (SortedMetricData (ilr .DataPoints ))
170
+ ilr .pos = 2 // as the metrics are sorted, the oldest point is at the end of the slice
171
+ for _ , md := range ilr .DataPoints {
172
+ md .Interval = interval
173
+ md .SetId ()
174
+ in .Process (md , partition )
175
+ }
176
+ } else {
177
+ metric .Interval = interval
178
+ metric .SetId ()
179
+ in .Process (metric , partition )
180
+ }
181
+ ilr .Interval = interval
182
+ ilr .Last = time .Now ()
183
+ ilr .Unlock ()
184
+ }
185
+
186
+ type SortedMetricData []* schema.MetricData
187
+
188
+ func (a SortedMetricData ) Len () int { return len (a ) }
189
+ func (a SortedMetricData ) Swap (i , j int ) { a [i ], a [j ] = a [j ], a [i ] }
190
+ func (a SortedMetricData ) Less (i , j int ) bool { return a [i ].Time < a [j ].Time }
0 commit comments