-
Notifications
You must be signed in to change notification settings - Fork 2.4k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Fix badger merge-join algorithm to correctly filter indexes #1721
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -105,13 +105,12 @@ func (r *TraceReader) getTraces(traceIDs []model.TraceID) ([]*model.Trace, error | |
|
||
err := r.store.View(func(txn *badger.Txn) error { | ||
opts := badger.DefaultIteratorOptions | ||
opts.PrefetchSize = 10 // TraceIDs are not sorted, pointless to prefetch large amount of values | ||
it := txn.NewIterator(opts) | ||
defer it.Close() | ||
|
||
val := []byte{} | ||
for _, prefix := range prefixes { | ||
spans := make([]*model.Span, 0, 4) // reduce reallocation requirements by defining some initial length | ||
spans := make([]*model.Span, 0, 32) // reduce reallocation requirements by defining some initial length | ||
|
||
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { | ||
// Add value to the span store (decode from JSON / defined encoding first) | ||
|
@@ -346,53 +345,60 @@ func (r *TraceReader) durationQueries(query *spanstore.TraceQueryParameters, ids | |
return ids | ||
} | ||
|
||
func mergeJoinIds(left, right [][]byte) [][]byte { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. rename to There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Are the ids sorted? Maybe that should be documented somewhere. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's mentioned at the beginning of the package. Everything is sorted (it's a sorted K/V). There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. As for the name, it's because the algorithm is called "sort-merge join" and is used in relational databases. Here the sorting phase happens in the DB and the merge phase in this code. It's pretty descriptive in my opinion since if someone wants to improve this method such as doing it parallel or using sharding from multiple badgers there are known algorithms for those variations too (which would underneath use this in any case). There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks for the explanation. Now it rings a bell.. |
||
merged := make([][]byte, 0, len(left)) // len(left) or len(right) is the maximum, whichever is smallest | ||
burmanm marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
lMax := len(left) - 1 | ||
rMax := len(right) - 1 | ||
for r, l := 0, 0; r <= rMax && l <= lMax; { | ||
switch bytes.Compare(left[l], right[r]) { | ||
case 0: | ||
// Left matches right - merge | ||
merged = append(merged, left[l]) | ||
// Advance both | ||
l++ | ||
r++ | ||
case 1: | ||
// left > right, increase right one | ||
r++ | ||
case -1: | ||
// left < right, increase left one | ||
l++ | ||
} | ||
} | ||
return merged | ||
} | ||
|
||
// sortMergeIds does a sort-merge join operation to the list of TraceIDs to remove duplicates | ||
func sortMergeIds(query *spanstore.TraceQueryParameters, ids [][][]byte) []model.TraceID { | ||
// Key only scan is a lot faster in the badger - use sort-merge join algorithm instead of hash join since we have the keys in sorted order already | ||
intersected := ids[0] | ||
mergeIntersected := make([][]byte, 0, len(intersected)) // intersected is the maximum size | ||
|
||
var merged [][]byte | ||
|
||
if len(ids) > 1 { | ||
for i := 1; i < len(ids); i++ { | ||
mergeIntersected = make([][]byte, 0, len(intersected)) // intersected is the maximum size | ||
k := len(intersected) - 1 | ||
for j := len(ids[i]) - 1; j >= 0 && k >= 0; { | ||
// The result will be 0 if a==b, -1 if a < b, and +1 if a > b. | ||
switch bytes.Compare(intersected[k], ids[i][j]) { | ||
case 1: | ||
k-- // Move on to the next item in the intersected list | ||
// a > b | ||
case -1: | ||
j-- | ||
// a < b | ||
// Move on to next iteration of j | ||
case 0: | ||
mergeIntersected = append(mergeIntersected, intersected[k]) | ||
k-- // Move on to next item | ||
// Match | ||
} | ||
} | ||
intersected = mergeIntersected | ||
merged = mergeJoinIds(ids[0], ids[1]) | ||
for i := 2; i < len(ids); i++ { | ||
merged = mergeJoinIds(merged, ids[i]) | ||
} | ||
|
||
} else { | ||
// mergeIntersected should be reversed intersected | ||
for i, j := 0, len(intersected)-1; j >= 0; i, j = i+1, j-1 { | ||
mergeIntersected = append(mergeIntersected, intersected[j]) | ||
} | ||
intersected = mergeIntersected | ||
merged = ids[0] | ||
|
||
burmanm marked this conversation as resolved.
Show resolved
Hide resolved
|
||
} | ||
|
||
// Get top query.NumTraces results (order in DESC) | ||
if query.NumTraces < len(merged) { | ||
merged = merged[len(merged)-query.NumTraces:] | ||
} | ||
|
||
// Get top query.NumTraces results (note, the slice is now in descending timestamp order) | ||
if query.NumTraces < len(intersected) { | ||
intersected = intersected[:query.NumTraces] | ||
// Results are in ASC (badger's default order), but Jaeger uses DESC, thus we need to reverse the array | ||
for left, right := 0, len(merged)-1; left < right; left, right = left+1, right-1 { | ||
merged[left], merged[right] = merged[right], merged[left] | ||
} | ||
|
||
// Enrich the traceIds to model.Trace | ||
// result := make([]*model.Trace, 0, len(intersected)) | ||
keys := make([]model.TraceID, 0, len(intersected)) | ||
// Create the structs from [][]byte to TraceID | ||
keys := make([]model.TraceID, 0, len(merged)) | ||
|
||
for _, key := range intersected { | ||
for _, key := range merged { | ||
keys = append(keys, model.TraceID{ | ||
High: binary.BigEndian.Uint64(key[:8]), | ||
Low: binary.BigEndian.Uint64(key[8:]), | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Curious why this was added, as doesn't seem related to the PR?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Devil is in the details. That single line exploits the bug (the test fails with older version) since it adds another index query against the tags.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
As for the id list, it is basically the list of matches for the search query. A form of a posting list (of traceIDs) if thinking in terms of the ES.
In terms of relational database, it's equivalent to something like:
SELECT id FROM dbo.spans WHERE service = 'invoices'
That is, a single id list is equivalent to that one. Just imagine each id list is one similar query, touching a single index and single value. It doesn't matter if the index is the same or not (so one query could be against service, one against tags index etc).