@@ -2,6 +2,7 @@ package api
2
2
3
3
import (
4
4
"errors"
5
+ "math"
5
6
"net/http"
6
7
"sort"
7
8
"strings"
@@ -14,9 +15,11 @@ import (
14
15
"github.com/raintank/metrictank/api/response"
15
16
"github.com/raintank/metrictank/cluster"
16
17
"github.com/raintank/metrictank/consolidation"
18
+ "github.com/raintank/metrictank/expr"
17
19
"github.com/raintank/metrictank/idx"
18
20
"github.com/raintank/metrictank/mdata"
19
21
"github.com/raintank/metrictank/stats"
22
+ "github.com/raintank/metrictank/util"
20
23
"github.com/raintank/worldping-api/pkg/log"
21
24
)
22
25
@@ -40,34 +43,6 @@ type Series struct {
40
43
Node cluster.Node
41
44
}
42
45
43
- func parseTarget (target string ) (string , string , error ) {
44
- var consolidateBy string
45
- // yes, i am aware of the arguably grossness of the below.
46
- // however, it is solid based on the documented allowed input format.
47
- // once we need to support several functions, we can implement
48
- // a proper expression parser
49
- if strings .HasPrefix (target , "consolidateBy(" ) {
50
- var q1 , q2 int
51
- t := target
52
- if t [len (t )- 2 :] == "')" && (strings .Contains (t , ",'" ) || strings .Contains (t , ", '" )) && strings .Count (t , "'" ) == 2 {
53
- q1 = strings .Index (t , "'" )
54
- q2 = strings .LastIndex (t , "'" )
55
- } else if t [len (t )- 2 :] == "\" )" && (strings .Contains (t , ",\" " ) || strings .Contains (t , ", \" " )) && strings .Count (t , "\" " ) == 2 {
56
- q1 = strings .Index (t , "\" " )
57
- q2 = strings .LastIndex (t , "\" " )
58
- } else {
59
- return "" , "" , response .NewError (http .StatusBadRequest , "target parse error" )
60
- }
61
- consolidateBy = t [q1 + 1 : q2 ]
62
- err := consolidation .Validate (consolidateBy )
63
- if err != nil {
64
- return "" , "" , err
65
- }
66
- target = t [strings .Index (t , "(" )+ 1 : strings .LastIndex (t , "," )]
67
- }
68
- return target , consolidateBy , nil
69
- }
70
-
71
46
func (s * Server ) findSeries (orgId int , patterns []string , seenAfter int64 ) ([]Series , error ) {
72
47
peers := cluster .MembersForQuery ()
73
48
log .Debug ("HTTP findSeries for %v across %d instances" , patterns , len (peers ))
@@ -189,112 +164,37 @@ func (s *Server) renderMetrics(ctx *middleware.Context, request models.GraphiteR
189
164
return
190
165
}
191
166
192
- reqs := make ([]models.Req , 0 )
193
-
194
- patterns := make ([]string , 0 )
195
- type locatedDef struct {
196
- def idx.Archive
197
- node cluster.Node
198
- }
199
-
200
- //locatedDefs[<pattern>][<def.id>]locatedDef
201
- locatedDefs := make (map [string ]map [string ]locatedDef )
202
- //targetForPattern[<pattern>]<target>
203
- targetForPattern := make (map [string ]string )
204
- for _ , target := range targets {
205
- pattern , _ , err := parseTarget (target )
206
- if err != nil {
207
- ctx .Error (http .StatusBadRequest , err .Error ())
208
- return
209
- }
210
- patterns = append (patterns , pattern )
211
- targetForPattern [pattern ] = target
212
- locatedDefs [pattern ] = make (map [string ]locatedDef )
213
-
214
- }
215
-
216
- series , err := s .findSeries (ctx .OrgId , patterns , int64 (fromUnix ))
167
+ exprs , err := expr .ParseMany (targets )
217
168
if err != nil {
218
- response .Write (ctx , response .WrapError (err ))
219
- }
220
-
221
- for _ , s := range series {
222
- for _ , metric := range s .Series {
223
- if ! metric .Leaf {
224
- continue
225
- }
226
- for _ , def := range metric .Defs {
227
- locatedDefs [s.Pattern ][def.Id ] = locatedDef {def , s .Node }
228
- }
229
- }
230
- }
231
-
232
- for pattern , ldefs := range locatedDefs {
233
- for _ , locdef := range ldefs {
234
- archive := locdef .def
235
- // set consolidator that will be used to normalize raw data before feeding into processing functions
236
- // not to be confused with runtime consolidation which happens in the graphite api, after all processing.
237
- fn := mdata .Aggregations .Get (archive .AggId ).AggregationMethod [0 ]
238
- consolidator := consolidation .Consolidator (fn ) // we use the same number assignments so we can cast them
239
- // target is like foo.bar or foo.* or consolidateBy(foo.*,'sum')
240
- // pattern is like foo.bar or foo.*
241
- // def.Name is like foo.concretebar
242
- // so we want target to contain the concrete graphite name, potentially wrapped with consolidateBy().
243
- target := strings .Replace (targetForPattern [pattern ], pattern , archive .Name , - 1 )
244
- reqs = append (reqs , models .NewReq (archive .Id , target , fromUnix , toUnix , request .MaxDataPoints , uint32 (archive .Interval ), consolidator , locdef .node , archive .SchemaId , archive .AggId ))
245
- }
246
- }
247
-
248
- reqRenderSeriesCount .Value (len (reqs ))
249
- reqRenderTargetCount .Value (len (request .Targets ))
250
-
251
- if len (reqs ) == 0 {
252
- if request .Format == "msgp" {
253
- var series models.SeriesByTarget
254
- response .Write (ctx , response .NewMsgp (200 , series ))
255
- } else {
256
- response .Write (ctx , response .NewJson (200 , []string {}, "" ))
257
- }
169
+ ctx .Error (http .StatusBadRequest , err .Error ())
258
170
return
259
171
}
260
172
261
- if (toUnix - fromUnix ) >= logMinDur {
262
- log .Info ("HTTP Render: INCOMING REQ %q from: %q, to: %q target cnt: %d, maxDataPoints: %d" ,
263
- ctx .Req .Method , from , to , len (request .Targets ), request .MaxDataPoints )
264
- }
173
+ reqRenderTargetCount .Value (len (targets ))
265
174
266
- reqs , err = alignRequests ( uint32 ( time . Now (). Unix ()), reqs )
175
+ plan , err := expr . NewPlan ( exprs , fromUnix , toUnix , request . MaxDataPoints , nil )
267
176
if err != nil {
268
- log .Error (3 , "HTTP Render alignReq error: %s" , err )
269
- response .Write (ctx , response .WrapError (err ))
177
+ ctx .Error (http .StatusBadRequest , err .Error ())
270
178
return
271
179
}
272
180
273
- if LogLevel < 2 {
274
- for _ , req := range reqs {
275
- log .Debug ("HTTP Render %s - arch:%d archI:%d outI:%d aggN: %d from %s" , req , req .Archive , req .ArchInterval , req .OutInterval , req .AggNum , req .Node .Name )
276
- }
277
- }
278
-
279
- out , err := s .getTargets (reqs )
181
+ out , err := s .executePlan (ctx .OrgId , plan )
280
182
if err != nil {
281
- log .Error (3 , "HTTP Render %s" , err .Error ())
282
- response .Write (ctx , response .WrapError (err ))
183
+ ctx .Error (http .StatusBadRequest , err .Error ())
283
184
return
284
185
}
186
+ sort .Sort (models .SeriesByTarget (out ))
285
187
286
- merged := mergeSeries (out )
287
- sort .Sort (models .SeriesByTarget (merged ))
288
188
defer func () {
289
189
for _ , serie := range out {
290
190
pointSlicePool .Put (serie .Datapoints [:0 ])
291
191
}
292
192
}()
293
193
294
194
if request .Format == "msgp" {
295
- response .Write (ctx , response .NewMsgp (200 , models .SeriesByTarget (merged )))
195
+ response .Write (ctx , response .NewMsgp (200 , models .SeriesByTarget (out )))
296
196
} else {
297
- response .Write (ctx , response .NewFastJson (200 , models .SeriesByTarget (merged )))
197
+ response .Write (ctx , response .NewFastJson (200 , models .SeriesByTarget (out )))
298
198
}
299
199
}
300
200
@@ -557,3 +457,81 @@ func (s *Server) metricsDeleteRemote(orgId int, query string, peer cluster.Node)
557
457
558
458
return resp .DeletedDefs , nil
559
459
}
460
+
461
+ // note if you do something like sum(foo.*) and all of those metrics happen to be on another node,
462
+ // we will collect all the indidividual series from the peer, and then sum here. that could be optimized
463
+ func (s * Server ) executePlan (orgId int , plan expr.Plan ) ([]models.Series , error ) {
464
+
465
+ type locatedDef struct {
466
+ def idx.Archive
467
+ node cluster.Node
468
+ }
469
+ minFrom := uint32 (math .MaxUint32 )
470
+ var maxTo uint32
471
+ //locatedDefs[request][<def.id>]locatedDef
472
+ locatedDefs := make (map [expr.Req ]map [string ]locatedDef )
473
+
474
+ // note that different patterns to query can have different from / to, so they require different index lookups
475
+ // e.g. target=movingAvg(foo.*, "1h")&target=foo.*
476
+ // note that in this case we fetch foo.* twice. can be optimized later
477
+ for _ , r := range plan .Reqs {
478
+ series , err := s .findSeries (orgId , []string {r .Query }, int64 (r .From ))
479
+ if err != nil {
480
+ return nil , err
481
+ }
482
+
483
+ locatedDefs [r ] = make (map [string ]locatedDef )
484
+
485
+ minFrom = util .Min (minFrom , r .From )
486
+ maxTo = util .Max (maxTo , r .To )
487
+
488
+ for _ , s := range series {
489
+ for _ , metric := range s .Series {
490
+ if ! metric .Leaf {
491
+ continue
492
+ }
493
+ for _ , def := range metric .Defs {
494
+ locatedDefs [r ][def.Id ] = locatedDef {def , s .Node }
495
+ }
496
+ }
497
+ }
498
+ }
499
+
500
+ var reqs []models.Req
501
+ for r , ldefs := range locatedDefs {
502
+ for _ , locdef := range ldefs {
503
+ archive := locdef .def
504
+ // set consolidator that will be used to normalize raw data before feeding into processing functions
505
+ // not to be confused with runtime consolidation which happens after all processing.
506
+ fn := mdata .Aggregations .Get (archive .AggId ).AggregationMethod [0 ]
507
+ consolidator := consolidation .Consolidator (fn ) // we use the same number assignments so we can cast them
508
+ reqs = append (reqs , models .NewReq (
509
+ archive .Id , archive .Name , r .From , r .To , plan .MaxDataPoints , uint32 (archive .Interval ), consolidator , locdef .node , archive .SchemaId , archive .AggId ))
510
+ }
511
+ }
512
+ reqRenderSeriesCount .Value (len (reqs ))
513
+ if len (reqs ) == 0 {
514
+ return nil , nil
515
+ }
516
+ // note: if 1 series has a movingAvg that requires a long time range extension, it may push other reqs into another archive. can be optimized later
517
+ reqs , err := alignRequests (uint32 (time .Now ().Unix ()), minFrom , maxTo , reqs )
518
+ if err != nil {
519
+ log .Error (3 , "HTTP Render alignReq error: %s" , err )
520
+ return nil , err
521
+ }
522
+
523
+ if LogLevel < 2 {
524
+ for _ , req := range reqs {
525
+ log .Debug ("HTTP Render %s - arch:%d archI:%d outI:%d aggN: %d from %s" , req , req .Archive , req .ArchInterval , req .OutInterval , req .AggNum , req .Node .Name )
526
+ }
527
+ }
528
+
529
+ out , err := s .getTargets (reqs )
530
+ if err != nil {
531
+ log .Error (3 , "HTTP Render %s" , err .Error ())
532
+ return nil , err
533
+ }
534
+
535
+ merged := mergeSeries (out )
536
+ return merged , nil
537
+ }
0 commit comments