diff --git a/server/src/main/java/org/opensearch/common/lucene/search/FilteredCollector.java b/server/src/main/java/org/opensearch/common/lucene/search/FilteredCollector.java index 0d7a8866f7788..b5c0e84a10308 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/FilteredCollector.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/FilteredCollector.java @@ -40,6 +40,7 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.opensearch.common.lucene.Lucene; +import org.opensearch.search.profile.query.ProfileWeight; import java.io.IOException; @@ -64,6 +65,9 @@ public Collector getCollector() { @Override public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { + if (filter instanceof ProfileWeight) { + ((ProfileWeight) filter).associateCollectorToLeaves(context, collector); + } final ScorerSupplier filterScorerSupplier = filter.scorerSupplier(context); final LeafCollector in = collector.getLeafCollector(context); final Bits bits = Lucene.asSequentialAccessBits(context.reader().maxDoc(), filterScorerSupplier); diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java index 59ef01f9f947a..e1d41227a22f7 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java @@ -83,7 +83,10 @@ public Map toBreakdownMap() { // creates a new weight and breakdown map for each rewritten query. This new breakdown map captures the timing information for // the new rewritten query. The sliceCollectorsToLeaves is empty because this breakdown for rewritten query gets created later // in search leaf path which doesn't have collector. Also, this is not needed since this breakdown is per leaf and there is no - // concurrency involved. An empty sliceCollectorsToLeaves could also happen in the case of early termination. + // concurrency involved. + assert contexts.size() == 1 : "Unexpected size: " + + contexts.size() + + " of leaves breakdown in ConcurrentQueryProfileBreakdown of rewritten query for a leaf."; AbstractProfileBreakdown breakdown = contexts.values().iterator().next(); queryNodeTime = breakdown.toNodeTime() + createWeightTime; maxSliceNodeTime = 0L;