Skip to content

Commit

Permalink
Merge branch 'opensearch-project:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
gashutos authored Sep 3, 2023
2 parents eb0a9d5 + e98ded6 commit 38103fb
Show file tree
Hide file tree
Showing 16 changed files with 695 additions and 179 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Expose DelimitedTermFrequencyTokenFilter to allow providing term frequencies along with terms ([#9479](https://github.com/opensearch-project/OpenSearch/pull/9479))
- APIs for performing async blob reads and async downloads from the repository using multiple streams ([#9592](https://github.com/opensearch-project/OpenSearch/issues/9592))
- Introduce cluster default remote translog buffer interval setting ([#9584](https://github.com/opensearch-project/OpenSearch/pull/9584))
- Add average concurrency metric for concurrent segment search ([#9670](https://github.com/opensearch-project/OpenSearch/issues/9670))

### Dependencies
- Bump `org.apache.logging.log4j:log4j-core` from 2.17.1 to 2.20.0 ([#8307](https://github.com/opensearch-project/OpenSearch/pull/8307))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -349,17 +349,13 @@ private static DeleteObjectsRequest bulkDelete(String bucket, List<String> blobs
}

@Override
public void listBlobsByPrefixInSortedOrder(
String blobNamePrefix,
int limit,
BlobNameSortOrder blobNameSortOrder,
ActionListener<List<BlobMetadata>> listener
) {
public List<BlobMetadata> listBlobsByPrefixInSortedOrder(String blobNamePrefix, int limit, BlobNameSortOrder blobNameSortOrder)
throws IOException {
// As AWS S3 returns list of keys in Lexicographic order, we don't have to fetch all the keys in order to sort them
// We fetch only keys as per the given limit to optimize the fetch. If provided sort order is not Lexicographic,
// we fall-back to default implementation of fetching all the keys and sorting them.
if (blobNameSortOrder != BlobNameSortOrder.LEXICOGRAPHIC) {
super.listBlobsByPrefixInSortedOrder(blobNamePrefix, limit, blobNameSortOrder, listener);
return super.listBlobsByPrefixInSortedOrder(blobNamePrefix, limit, blobNameSortOrder);
} else {
if (limit < 0) {
throw new IllegalArgumentException("limit should not be a negative value");
Expand All @@ -370,9 +366,9 @@ public void listBlobsByPrefixInSortedOrder(
.flatMap(listing -> listing.contents().stream())
.map(s3Object -> new PlainBlobMetadata(s3Object.key().substring(keyPath.length()), s3Object.size()))
.collect(Collectors.toList());
listener.onResponse(blobs.subList(0, Math.min(limit, blobs.size())));
return blobs.subList(0, Math.min(limit, blobs.size()));
} catch (final Exception e) {
listener.onFailure(new IOException("Exception when listing blobs by prefix [" + prefix + "]", e));
throw new IOException("Exception when listing blobs by prefix [" + prefix + "]", e);
}
}
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,105 +1,13 @@
"Help":
- skip:
version: " - 2.99.99"
reason: concurrent search stats were added in 3.0.0
features: node_selector
- do:
cat.shards:
help: true
node_selector:
version: "3.0.0 - "

- match:
$body: |
/^ index .+ \n
shard .+ \n
prirep .+ \n
state .+ \n
docs .+ \n
store .+ \n
ip .+ \n
id .+ \n
node .+ \n
sync_id .+ \n
unassigned.reason .+ \n
unassigned.at .+ \n
unassigned.for .+ \n
unassigned.details .+ \n
recoverysource.type .+ \n
completion.size .+ \n
fielddata.memory_size .+ \n
fielddata.evictions .+ \n
query_cache.memory_size .+ \n
query_cache.evictions .+ \n
flush.total .+ \n
flush.total_time .+ \n
get.current .+ \n
get.time .+ \n
get.total .+ \n
get.exists_time .+ \n
get.exists_total .+ \n
get.missing_time .+ \n
get.missing_total .+ \n
indexing.delete_current .+ \n
indexing.delete_time .+ \n
indexing.delete_total .+ \n
indexing.index_current .+ \n
indexing.index_time .+ \n
indexing.index_total .+ \n
indexing.index_failed .+ \n
merges.current .+ \n
merges.current_docs .+ \n
merges.current_size .+ \n
merges.total .+ \n
merges.total_docs .+ \n
merges.total_size .+ \n
merges.total_time .+ \n
refresh.total .+ \n
refresh.time .+ \n
refresh.external_total .+ \n
refresh.external_time .+ \n
refresh.listeners .+ \n
search.fetch_current .+ \n
search.fetch_time .+ \n
search.fetch_total .+ \n
search.open_contexts .+ \n
search.query_current .+ \n
search.query_time .+ \n
search.query_total .+ \n
search.concurrent_query_current .+ \n
search.concurrent_query_time .+ \n
search.concurrent_query_total .+ \n
search.scroll_current .+ \n
search.scroll_time .+ \n
search.scroll_total .+ \n
search.point_in_time_current .+ \n
search.point_in_time_time .+ \n
search.point_in_time_total .+ \n
segments.count .+ \n
segments.memory .+ \n
segments.index_writer_memory .+ \n
segments.version_map_memory .+ \n
segments.fixed_bitset_memory .+ \n
seq_no.max .+ \n
seq_no.local_checkpoint .+ \n
seq_no.global_checkpoint .+ \n
warmer.current .+ \n
warmer.total .+ \n
warmer.total_time .+ \n
path.data .+ \n
path.state .+ \n
$/
---
"Help between 2.4.0 - 2.99.99":
- skip:
version: " - 2.3.99 , 3.0.0 - "
version: " - 2.3.99"
reason: point in time stats were added in 2.4.0
features: node_selector
- do:
cat.shards:
help: true
node_selector:
version: "2.4.0 - 2.99.99"
version: "2.4.0 - "

- match:
$body: |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -171,4 +171,10 @@ public void testDisconnectsDuringRecovery() {
public void testReplicaRecovery() {

}

@AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9580")
public void testRerouteRecovery() {

}

}
Loading

0 comments on commit 38103fb

Please sign in to comment.