|
9 | 9 | "sync"
|
10 | 10 | "time"
|
11 | 11 |
|
| 12 | + "github.com/grafana/metrictank/expr" |
| 13 | + |
12 | 14 | "github.com/grafana/metrictank/api/models"
|
13 | 15 | "github.com/grafana/metrictank/consolidation"
|
14 | 16 | "github.com/grafana/metrictank/mdata"
|
@@ -619,11 +621,12 @@ func (s *Server) getSeriesCachedStore(ctx *requestContext, ss *models.StorageSta
|
619 | 621 | return iters, nil
|
620 | 622 | }
|
621 | 623 |
|
622 |
| -// check for duplicate series names for the same query target. If found merge the results. |
| 624 | +// mergeSeries merges series together if applicable. It does this by categorizing |
| 625 | +// series into groups based on their target, query, consolidator etc. If they collide, they get merged. |
623 | 626 | // each first uniquely-identified series's backing datapoints slice is reused
|
624 | 627 | // any subsequent non-uniquely-identified series is merged into the former and has its
|
625 | 628 | // datapoints slice returned to the pool. input series must be canonical
|
626 |
| -func mergeSeries(in []models.Series) []models.Series { |
| 629 | +func mergeSeries(in []models.Series, dataMap expr.DataMap) []models.Series { |
627 | 630 | type segment struct {
|
628 | 631 | target string
|
629 | 632 | query string
|
@@ -655,6 +658,7 @@ func mergeSeries(in []models.Series) []models.Series {
|
655 | 658 | // we use the first series in the list as our result. We check over every
|
656 | 659 | // point and if it is null, we then check the other series for a non null
|
657 | 660 | // value to use instead.
|
| 661 | + series = expr.Normalize(dataMap, series) |
658 | 662 | log.Debugf("DP mergeSeries: %s has multiple series.", series[0].Target)
|
659 | 663 | for i := range series[0].Datapoints {
|
660 | 664 | for j := 0; j < len(series); j++ {
|
|
0 commit comments