Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Add a mapper for context aware segments grouping criteria ([#19233](https://github.com/opensearch-project/OpenSearch/pull/19233))
- Return full error for GRPC error response ([#19568](https://github.com/opensearch-project/OpenSearch/pull/19568))
- Add pluggable gRPC interceptors with explicit ordering([#19005](https://github.com/opensearch-project/OpenSearch/pull/19005))

- Add metrics for the merged segment warmer feature ([#18929](https://github.com/opensearch-project/OpenSearch/pull/18929))
- Add an `item_count` metric for field data cache API ([#19174](https://github.com/opensearch-project/OpenSearch/pull/19174))

### Changed
- Faster `terms` query creation for `keyword` field with index and docValues enabled ([#19350](https://github.com/opensearch-project/OpenSearch/pull/19350))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@

import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs;
import org.opensearch.action.DocWriteResponse;
import org.opensearch.action.admin.cluster.node.stats.NodeStats;
import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder;
import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.opensearch.action.admin.indices.create.CreateIndexRequest;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse;
Expand Down Expand Up @@ -70,6 +72,7 @@
import org.opensearch.index.VersionType;
import org.opensearch.index.cache.query.QueryCacheStats;
import org.opensearch.index.engine.VersionConflictEngineException;
import org.opensearch.index.fielddata.FieldDataStats;
import org.opensearch.index.query.QueryBuilders;
import org.opensearch.index.remote.RemoteSegmentStats;
import org.opensearch.index.shard.IndexShard;
Expand Down Expand Up @@ -116,7 +119,6 @@
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThan;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;

Expand Down Expand Up @@ -175,131 +177,88 @@ public void testFieldDataStats() throws Exception {
.get()
);
ensureGreen();
client().prepareIndex("test").setId("1").setSource("field", "value1", "field2", "value1").execute().actionGet();
client().prepareIndex("test").setId("2").setSource("field", "value2", "field2", "value2").execute().actionGet();
// Index enough docs to be sure neither primary shard is empty
for (int i = 0; i < 100; i++) {
client().prepareIndex("test")
.setId(Integer.toString(i))
.setSource("field", "value" + i, "field2", "value" + i)
.execute()
.actionGet();
}
refreshAndWaitForReplication();
indexRandomForConcurrentSearch("test");
// Force merge to 1 segment so we can predict counts
client().admin().indices().prepareForceMerge().setMaxNumSegments(1).execute().actionGet();
refreshAndWaitForReplication();

NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats("data:true").setIndices(true).execute().actionGet();
assertThat(
nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes()
.get(1)
.getIndices()
.getFieldData()
.getMemorySizeInBytes(),
equalTo(0L)
);
IndicesStatsResponse indicesStats = client().admin()
.indices()
.prepareStats("test")
.clear()
.setFieldData(true)
.execute()
.actionGet();
assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0L));
for (FieldDataStats totalStats : List.of(getTotalFieldDataStats(false), getIndicesFieldDataStats(false))) {
assertEquals(0, totalStats.getMemorySizeInBytes());
assertEquals(0, totalStats.getItemCount());
}

// sort to load it to field data...
client().prepareSearch().addSort("field", SortOrder.ASC).execute().actionGet();
client().prepareSearch().addSort("field", SortOrder.ASC).execute().actionGet();

nodesStats = client().admin().cluster().prepareNodesStats("data:true").setIndices(true).execute().actionGet();
assertThat(
nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes()
.get(1)
.getIndices()
.getFieldData()
.getMemorySizeInBytes(),
greaterThan(0L)
);
indicesStats = client().admin().indices().prepareStats("test").clear().setFieldData(true).execute().actionGet();
assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0L));
for (FieldDataStats totalStats : List.of(getTotalFieldDataStats(false), getIndicesFieldDataStats(false))) {
assertTrue(totalStats.getMemorySizeInBytes() > 0);
// The search should have hit 2 shards of the total 4 shards, each of which has 1 segment. So we expect 2 entries.
assertEquals(2, totalStats.getItemCount());
}

// sort to load it to field data...
client().prepareSearch().addSort("field2", SortOrder.ASC).execute().actionGet();
client().prepareSearch().addSort("field2", SortOrder.ASC).execute().actionGet();

// now check the per field stats
nodesStats = client().admin()
.cluster()
.prepareNodesStats("data:true")
.setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.FieldData, true).fieldDataFields("*"))
.execute()
.actionGet();
assertThat(
nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes()
.get(1)
.getIndices()
.getFieldData()
.getMemorySizeInBytes(),
greaterThan(0L)
);
assertThat(
nodesStats.getNodes().get(0).getIndices().getFieldData().getFields().get("field") + nodesStats.getNodes()
.get(1)
.getIndices()
.getFieldData()
.getFields()
.get("field"),
greaterThan(0L)
);
assertThat(
nodesStats.getNodes().get(0).getIndices().getFieldData().getFields().get("field") + nodesStats.getNodes()
.get(1)
.getIndices()
.getFieldData()
.getFields()
.get("field"),
lessThan(
nodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes()
.get(1)
.getIndices()
.getFieldData()
.getMemorySizeInBytes()
)
);
// Now we expect 4 total entries, one per searched segment per field
assertEquals(4, getTotalFieldDataStats(false).getItemCount());

indicesStats = client().admin()
.indices()
.prepareStats("test")
.clear()
.setFieldData(true)
.setFieldDataFields("*")
.execute()
.actionGet();
assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0L));
assertThat(indicesStats.getTotal().getFieldData().getFields().get("field"), greaterThan(0L));
assertThat(
indicesStats.getTotal().getFieldData().getFields().get("field"),
lessThan(indicesStats.getTotal().getFieldData().getMemorySizeInBytes())
);
// now check the per field stats
for (FieldDataStats totalStats : List.of(getTotalFieldDataStats(true), getIndicesFieldDataStats(true))) {
assertTrue(totalStats.getMemorySizeInBytes() > 0);
for (String fieldName : List.of("field", "field2")) {
assertTrue(totalStats.getFields().get(fieldName) > 0);
assertEquals(2, totalStats.getFieldItemCounts().get(fieldName));
assertTrue(totalStats.getFields().get(fieldName) < totalStats.getMemorySizeInBytes());
}
}

client().admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet();
assertBusy(() -> {
NodesStatsResponse postClearNodesStats = client().admin()
.cluster()
.prepareNodesStats("data:true")
.setIndices(true)
.execute()
.actionGet();
assertThat(
postClearNodesStats.getNodes().get(0).getIndices().getFieldData().getMemorySizeInBytes() + postClearNodesStats.getNodes()
.get(1)
.getIndices()
.getFieldData()
.getMemorySizeInBytes(),
equalTo(0L)
);
IndicesStatsResponse postClearIndicesStats = client().admin()
.indices()
.prepareStats("test")
.clear()
.setFieldData(true)
.execute()
.actionGet();
assertThat(postClearIndicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0L));
for (FieldDataStats postClearStats : List.of(getTotalFieldDataStats(true), getIndicesFieldDataStats(true))) {
assertEquals(0, postClearStats.getMemorySizeInBytes());
assertEquals(0, postClearStats.getItemCount());
for (long fieldMemorySize : postClearStats.getFields().getStats().values()) {
assertEquals(0, fieldMemorySize);
}
for (long fieldItemCount : postClearStats.getFieldItemCounts().getStats().values()) {
assertEquals(0, fieldItemCount);
}
}
});
}

private FieldDataStats getTotalFieldDataStats(boolean setFieldDataFields) {
NodesStatsRequestBuilder builder = client().admin().cluster().prepareNodesStats("data:true");
if (setFieldDataFields) {
builder.setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.FieldData, true).fieldDataFields("*"));
} else {
builder.setIndices(true);
}
NodesStatsResponse nodesStats = builder.execute().actionGet();
FieldDataStats total = new FieldDataStats();
for (NodeStats node : nodesStats.getNodes()) {
total.add(node.getIndices().getFieldData());
}
return total;
}

private FieldDataStats getIndicesFieldDataStats(boolean setFieldDataFields) {
IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats("test").clear().setFieldData(true);
if (setFieldDataFields) {
builder.setFieldDataFields("*");
}
return builder.execute().actionGet().getTotal().getFieldData();
}

public void testClearAllCaches() throws Exception {
assertAcked(
client().admin()
Expand Down
58 changes: 58 additions & 0 deletions server/src/main/java/org/opensearch/common/FieldCountStats.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/

package org.opensearch.common;

import org.opensearch.common.annotation.PublicApi;
import org.opensearch.core.common.io.stream.StreamInput;
import org.opensearch.core.xcontent.XContentBuilder;

import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;

/**
* A reusable class to encode {@code field -&gt; count} mappings
* Identical to FieldMemoryStats except for toXContent logic
*/
@PublicApi(since = "3.4.0")
public class FieldCountStats extends FieldStats {

public FieldCountStats(Map<String, Long> stats) {
super(stats);
}

public FieldCountStats(StreamInput input) throws IOException {
super(input);
}

@Override
public FieldCountStats copy() {
return new FieldCountStats(new HashMap<>(stats));
}

@Override
public void toXContent(XContentBuilder builder, String key, String rawKey, String readableKey) throws IOException {
// Note the readableKey is not used here, as there is no such concept for non-memory stats.
builder.startObject(key);
for (final var entry : stats.entrySet()) {
builder.startObject(entry.getKey());
builder.field(rawKey, entry.getValue());
builder.endObject();
}
builder.endObject();
}

@Override
public void toXContentField(XContentBuilder builder, String field, String rawKey, String readableKey) throws IOException {
Objects.requireNonNull(rawKey);
Objects.requireNonNull(field);
builder.field(rawKey, get(field));
}
}
Loading
Loading