|
70 | 70 | import org.opensearch.search.internal.SearchContext; |
71 | 71 | import org.opensearch.search.lookup.SearchLookup; |
72 | 72 | import org.opensearch.search.lookup.SourceLookup; |
| 73 | +import org.opensearch.search.profile.Timer; |
| 74 | +import org.opensearch.search.profile.fetch.FetchTimingType; |
73 | 75 |
|
74 | 76 | import java.io.IOException; |
75 | 77 | import java.util.ArrayList; |
@@ -103,92 +105,103 @@ public FetchPhase(List<FetchSubPhase> fetchSubPhases) { |
103 | 105 | } |
104 | 106 |
|
105 | 107 | public void execute(SearchContext context) { |
106 | | - if (LOGGER.isTraceEnabled()) { |
107 | | - LOGGER.trace("{}", new SearchContextSourcePrinter(context)); |
| 108 | + Timer timer = null; |
| 109 | + if (context.getProfilers() != null) { |
| 110 | + timer = context.getProfilers().getFetchProfiler().getQueryBreakdown("fetch").getTimer(FetchTimingType.EXECUTE_FETCH_PHASE); |
| 111 | + timer.start(); |
108 | 112 | } |
| 113 | + try { |
| 114 | + if (LOGGER.isTraceEnabled()) { |
| 115 | + LOGGER.trace("{}", new SearchContextSourcePrinter(context)); |
| 116 | + } |
109 | 117 |
|
110 | | - if (context.isCancelled()) { |
111 | | - throw new TaskCancelledException("cancelled task with reason: " + context.getTask().getReasonCancelled()); |
112 | | - } |
| 118 | + if (context.isCancelled()) { |
| 119 | + throw new TaskCancelledException("cancelled task with reason: " + context.getTask().getReasonCancelled()); |
| 120 | + } |
113 | 121 |
|
114 | | - if (context.docIdsToLoadSize() == 0) { |
115 | | - // no individual hits to process, so we shortcut |
116 | | - context.fetchResult() |
117 | | - .hits(new SearchHits(new SearchHit[0], context.queryResult().getTotalHits(), context.queryResult().getMaxScore())); |
118 | | - return; |
119 | | - } |
| 122 | + if (context.docIdsToLoadSize() == 0) { |
| 123 | + // no individual hits to process, so we shortcut |
| 124 | + context.fetchResult() |
| 125 | + .hits(new SearchHits(new SearchHit[0], context.queryResult().getTotalHits(), context.queryResult().getMaxScore())); |
| 126 | + return; |
| 127 | + } |
120 | 128 |
|
121 | | - DocIdToIndex[] docs = new DocIdToIndex[context.docIdsToLoadSize()]; |
122 | | - for (int index = 0; index < context.docIdsToLoadSize(); index++) { |
123 | | - docs[index] = new DocIdToIndex(context.docIdsToLoad()[context.docIdsToLoadFrom() + index], index); |
124 | | - } |
125 | | - // make sure that we iterate in doc id order |
126 | | - Arrays.sort(docs); |
| 129 | + DocIdToIndex[] docs = new DocIdToIndex[context.docIdsToLoadSize()]; |
| 130 | + for (int index = 0; index < context.docIdsToLoadSize(); index++) { |
| 131 | + docs[index] = new DocIdToIndex(context.docIdsToLoad()[context.docIdsToLoadFrom() + index], index); |
| 132 | + } |
| 133 | + // make sure that we iterate in doc id order |
| 134 | + Arrays.sort(docs); |
127 | 135 |
|
128 | | - Map<String, Set<String>> storedToRequestedFields = new HashMap<>(); |
129 | | - FieldsVisitor fieldsVisitor = createStoredFieldsVisitor(context, storedToRequestedFields); |
| 136 | + Map<String, Set<String>> storedToRequestedFields = new HashMap<>(); |
| 137 | + FieldsVisitor fieldsVisitor = createStoredFieldsVisitor(context, storedToRequestedFields); |
130 | 138 |
|
131 | | - FetchContext fetchContext = new FetchContext(context); |
| 139 | + FetchContext fetchContext = new FetchContext(context); |
132 | 140 |
|
133 | | - SearchHit[] hits = new SearchHit[context.docIdsToLoadSize()]; |
| 141 | + SearchHit[] hits = new SearchHit[context.docIdsToLoadSize()]; |
134 | 142 |
|
135 | | - List<FetchSubPhaseProcessor> processors = getProcessors(context.shardTarget(), fetchContext); |
| 143 | + List<FetchSubPhaseProcessor> processors = getProcessors(context.shardTarget(), fetchContext); |
136 | 144 |
|
137 | | - int currentReaderIndex = -1; |
138 | | - LeafReaderContext currentReaderContext = null; |
139 | | - CheckedBiConsumer<Integer, FieldsVisitor, IOException> fieldReader = null; |
140 | | - boolean hasSequentialDocs = hasSequentialDocs(docs); |
141 | | - for (int index = 0; index < context.docIdsToLoadSize(); index++) { |
142 | | - if (context.isCancelled()) { |
143 | | - throw new TaskCancelledException("cancelled task with reason: " + context.getTask().getReasonCancelled()); |
144 | | - } |
145 | | - int docId = docs[index].docId; |
146 | | - try { |
147 | | - int readerIndex = ReaderUtil.subIndex(docId, context.searcher().getIndexReader().leaves()); |
148 | | - if (currentReaderIndex != readerIndex) { |
149 | | - currentReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex); |
150 | | - currentReaderIndex = readerIndex; |
151 | | - if (currentReaderContext.reader() instanceof SequentialStoredFieldsLeafReader |
152 | | - && hasSequentialDocs |
153 | | - && docs.length >= 10) { |
154 | | - // All the docs to fetch are adjacent but Lucene stored fields are optimized |
155 | | - // for random access and don't optimize for sequential access - except for merging. |
156 | | - // So we do a little hack here and pretend we're going to do merges in order to |
157 | | - // get better sequential access. |
158 | | - SequentialStoredFieldsLeafReader lf = (SequentialStoredFieldsLeafReader) currentReaderContext.reader(); |
159 | | - fieldReader = lf.getSequentialStoredFieldsReader()::document; |
160 | | - } else { |
161 | | - fieldReader = currentReaderContext.reader().storedFields()::document; |
| 145 | + int currentReaderIndex = -1; |
| 146 | + LeafReaderContext currentReaderContext = null; |
| 147 | + CheckedBiConsumer<Integer, FieldsVisitor, IOException> fieldReader = null; |
| 148 | + boolean hasSequentialDocs = hasSequentialDocs(docs); |
| 149 | + for (int index = 0; index < context.docIdsToLoadSize(); index++) { |
| 150 | + if (context.isCancelled()) { |
| 151 | + throw new TaskCancelledException("cancelled task with reason: " + context.getTask().getReasonCancelled()); |
| 152 | + } |
| 153 | + int docId = docs[index].docId; |
| 154 | + try { |
| 155 | + int readerIndex = ReaderUtil.subIndex(docId, context.searcher().getIndexReader().leaves()); |
| 156 | + if (currentReaderIndex != readerIndex) { |
| 157 | + currentReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex); |
| 158 | + currentReaderIndex = readerIndex; |
| 159 | + if (currentReaderContext.reader() instanceof SequentialStoredFieldsLeafReader |
| 160 | + && hasSequentialDocs |
| 161 | + && docs.length >= 10) { |
| 162 | + // All the docs to fetch are adjacent but Lucene stored fields are optimized |
| 163 | + // for random access and don't optimize for sequential access - except for merging. |
| 164 | + // So we do a little hack here and pretend we're going to do merges in order to |
| 165 | + // get better sequential access. |
| 166 | + SequentialStoredFieldsLeafReader lf = (SequentialStoredFieldsLeafReader) currentReaderContext.reader(); |
| 167 | + fieldReader = lf.getSequentialStoredFieldsReader()::document; |
| 168 | + } else { |
| 169 | + fieldReader = currentReaderContext.reader().storedFields()::document; |
| 170 | + } |
| 171 | + for (FetchSubPhaseProcessor processor : processors) { |
| 172 | + processor.setNextReader(currentReaderContext); |
| 173 | + } |
162 | 174 | } |
| 175 | + assert currentReaderContext != null; |
| 176 | + HitContext hit = prepareHitContext( |
| 177 | + context, |
| 178 | + fetchContext.searchLookup(), |
| 179 | + fieldsVisitor, |
| 180 | + docId, |
| 181 | + storedToRequestedFields, |
| 182 | + currentReaderContext, |
| 183 | + fieldReader |
| 184 | + ); |
163 | 185 | for (FetchSubPhaseProcessor processor : processors) { |
164 | | - processor.setNextReader(currentReaderContext); |
| 186 | + processor.process(hit); |
165 | 187 | } |
| 188 | + hits[docs[index].index] = hit.hit(); |
| 189 | + } catch (Exception e) { |
| 190 | + throw new FetchPhaseExecutionException(context.shardTarget(), "Error running fetch phase for doc [" + docId + "]", e); |
166 | 191 | } |
167 | | - assert currentReaderContext != null; |
168 | | - HitContext hit = prepareHitContext( |
169 | | - context, |
170 | | - fetchContext.searchLookup(), |
171 | | - fieldsVisitor, |
172 | | - docId, |
173 | | - storedToRequestedFields, |
174 | | - currentReaderContext, |
175 | | - fieldReader |
176 | | - ); |
177 | | - for (FetchSubPhaseProcessor processor : processors) { |
178 | | - processor.process(hit); |
179 | | - } |
180 | | - hits[docs[index].index] = hit.hit(); |
181 | | - } catch (Exception e) { |
182 | | - throw new FetchPhaseExecutionException(context.shardTarget(), "Error running fetch phase for doc [" + docId + "]", e); |
183 | 192 | } |
184 | | - } |
185 | | - if (context.isCancelled()) { |
186 | | - throw new TaskCancelledException("cancelled task with reason: " + context.getTask().getReasonCancelled()); |
187 | | - } |
188 | | - |
189 | | - TotalHits totalHits = context.queryResult().getTotalHits(); |
190 | | - context.fetchResult().hits(new SearchHits(hits, totalHits, context.queryResult().getMaxScore())); |
| 193 | + if (context.isCancelled()) { |
| 194 | + throw new TaskCancelledException("cancelled task with reason: " + context.getTask().getReasonCancelled()); |
| 195 | + } |
191 | 196 |
|
| 197 | + TotalHits totalHits = context.queryResult().getTotalHits(); |
| 198 | + context.fetchResult().hits(new SearchHits(hits, totalHits, context.queryResult().getMaxScore())); |
| 199 | + } finally { |
| 200 | + if (timer != null) { |
| 201 | + timer.stop(); |
| 202 | + context.getProfilers().getFetchProfiler().pollLastElement(); |
| 203 | + } |
| 204 | + } |
192 | 205 | } |
193 | 206 |
|
194 | 207 | List<FetchSubPhaseProcessor> getProcessors(SearchShardTarget target, FetchContext context) { |
|
0 commit comments