Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Display all file cache metrics in NodeStats and use the correct human-readable field name #13232

Open
wants to merge 9 commits into
base: main
Choose a base branch
from
1 change: 1 addition & 0 deletions CHANGELOG-3.0.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827))
- Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944))
- Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993))
- Display all file cache metrics in NodeStats and use the correct human-readable field name ([#13232](https://github.com/opensearch-project/OpenSearch/pull/13232))

### Security

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
# As of now only nodes with the "search" role can utilize file caching.
# Moreover, until the remote store is used in the test and some searches executed (which will force
# downloading the files from remote store to node file cache) we should always expect 0b size.
# But at least the cache file fields must be present.
---
"File Cache stats":
- skip:
version: " - 2.15.99"
reason: "file cache statistics fields were added in 2.7 (#6485) but the fix (#13232) was merged in 2.16.0"
features: [arbitrary_key, node_selector]

- do:
node_selector:
version: "3.0.0 - "
nodes.info: {}
- set:
nodes._arbitrary_key_: node_id

- do:
node_selector:
version: "3.0.0 - "
nodes.stats:
metric: [ fs ]

# In the future we shall test that the node has a "search" role
# otherwise the file cache will be always 0.
- is_true: nodes.$node_id.roles

- is_true: nodes.$node_id.fs.total
- gte: { nodes.$node_id.fs.total.cache_reserved_in_bytes: 0 }
- gte: { nodes.$node_id.fs.total.cache_utilized_in_bytes: 0 }

- is_true: nodes.$node_id.fs.data
- gte: { nodes.$node_id.fs.data.0.cache_reserved_in_bytes: 0 }
- gte: { nodes.$node_id.fs.data.0.cache_utilized_in_bytes: 0 }
Original file line number Diff line number Diff line change
Expand Up @@ -563,7 +563,7 @@ private long createReasonableSizedShards(final String indexName) throws Interrup
}

private static FsInfo.Path setDiskUsage(FsInfo.Path original, long totalBytes, long freeBytes) {
return new FsInfo.Path(original.getPath(), original.getMount(), totalBytes, freeBytes, freeBytes);
return new FsInfo.Path(original.getPath(), original.getMount(), totalBytes, freeBytes, freeBytes, 0, 0);
}

private void refreshDiskUsage() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ public Settings indexSettings() {
}

private static FsInfo.Path setDiskUsage(FsInfo.Path original, long totalBytes, long freeBytes) {
return new FsInfo.Path(original.getPath(), original.getMount(), totalBytes, freeBytes, freeBytes);
return new FsInfo.Path(original.getPath(), original.getMount(), totalBytes, freeBytes, freeBytes, 0, 0);
}

public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@
import org.opensearch.index.IndexSettings;
import org.opensearch.index.shard.ShardPath;
import org.opensearch.index.store.FsDirectoryFactory;
import org.opensearch.index.store.remote.filecache.FileCache;
import org.opensearch.monitor.fs.FsInfo;
import org.opensearch.monitor.fs.FsProbe;
import org.opensearch.monitor.jvm.JvmInfo;
Expand Down Expand Up @@ -126,6 +127,7 @@ public static class NodePath {
public final Path indicesPath;
/** Cached FileStore from path */
public final FileStore fileStore;
/* ${data.paths}/nodes/{node.id}/cache */
public final Path fileCachePath;
/*
Cache reserved size can default to a different value depending on configuration
Expand Down Expand Up @@ -178,6 +180,8 @@ public String toString() {
+ indicesPath
+ ", fileStore="
+ fileStore
+ ", fileCachePath="
+ fileCachePath
+ ", majorDeviceNumber="
+ majorDeviceNumber
+ ", minorDeviceNumber="
Expand Down Expand Up @@ -421,7 +425,7 @@ private void maybeLogPathDetails() throws IOException {
for (NodePath nodePath : nodePaths) {
sb.append('\n').append(" -> ").append(nodePath.path.toAbsolutePath());

FsInfo.Path fsPath = FsProbe.getFSInfo(nodePath);
FsInfo.Path fsPath = FsProbe.getFSInfo(nodePath, FileCache.NOOP_FILE_CACHE);
sb.append(", free_space [")
.append(fsPath.getFree())
.append("], usable_space [")
Expand All @@ -440,7 +444,7 @@ private void maybeLogPathDetails() throws IOException {
Set<String> allTypes = new HashSet<>();
Set<String> allMounts = new HashSet<>();
for (NodePath nodePath : nodePaths) {
FsInfo.Path fsPath = FsProbe.getFSInfo(nodePath);
FsInfo.Path fsPath = FsProbe.getFSInfo(nodePath, FileCache.NOOP_FILE_CACHE);
String mount = fsPath.getMount();
if (allMounts.contains(mount) == false) {
allMounts.add(mount);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
import org.opensearch.common.annotation.PublicApi;
import org.opensearch.core.common.breaker.CircuitBreaker;
import org.opensearch.core.common.breaker.CircuitBreakingException;
import org.opensearch.core.common.breaker.NoopCircuitBreaker;
import org.opensearch.env.NodeEnvironment;
import org.opensearch.index.store.remote.utils.cache.CacheUsage;
import org.opensearch.index.store.remote.utils.cache.RefCountedCache;
import org.opensearch.index.store.remote.utils.cache.SegmentedCache;
Expand Down Expand Up @@ -49,6 +51,29 @@
*/
@PublicApi(since = "2.7.0")
public class FileCache implements RefCountedCache<Path, CachedIndexInput> {

/**
* A minimalistic instance of the File Cache (FC) which is not useful for any real caching task
* but is useful as a placeholder in API calls that require FC instance, for example
* {@link org.opensearch.monitor.fs.FsProbe#getFSInfo(NodeEnvironment.NodePath, FileCache)}.
* <br>
* REMEMBER to use a valid FC instance if the output of such API call needs to reflect its state.
*
* @opensearch.internal
*/
public static FileCache NOOP_FILE_CACHE = null;

// Perhaps we shall create a new class with static instance like NoopFileCache.INSTANCE ???
static {
int CONCURRENCY_LEVEL = 1;
int CAPACITY = 0;
NOOP_FILE_CACHE = FileCacheFactory.createConcurrentLRUFileCache(
CAPACITY,
CONCURRENCY_LEVEL,
new NoopCircuitBreaker(CircuitBreaker.REQUEST)
);
}

private static final Logger logger = LogManager.getLogger(FileCache.class);
private final SegmentedCache<Path, CachedIndexInput> theCache;

Expand Down
39 changes: 36 additions & 3 deletions server/src/main/java/org/opensearch/monitor/fs/FsInfo.java
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@

import org.opensearch.Version;
import org.opensearch.common.Nullable;
import org.opensearch.common.annotation.InternalApi;
import org.opensearch.common.annotation.PublicApi;
import org.opensearch.core.common.io.stream.StreamInput;
import org.opensearch.core.common.io.stream.StreamOutput;
Expand Down Expand Up @@ -75,16 +76,48 @@
long free = -1;
long available = -1;
long fileCacheReserved = -1;
long fileCacheUtilized = 0;
long fileCacheUtilized = -1;

public Path() {}

/**
* Please notice that this constructor will set value of <code>0</code>
* to <code>fileCacheReserved</code> and <code>fileCacheUtilized</code> variables.
*
* See {@link #getFileCacheReserved()}, {@link #getFileCacheUtilized()}
*/
public Path(String path, @Nullable String mount, long total, long free, long available) {
new Path(path, mount, total, free, available, 0, 0);
}

Check warning on line 91 in server/src/main/java/org/opensearch/monitor/fs/FsInfo.java

View check run for this annotation

Codecov / codecov/patch

server/src/main/java/org/opensearch/monitor/fs/FsInfo.java#L90-L91

Added lines #L90 - L91 were not covered by tests

/**
* Do not assign negative values to variables <code>total</code>, <code>free</code>, <code>available</code>,
* <code>fileCacheReserved</code> and <code>fileCacheUtilized</code>. The only exception is value <code>-1</code>
* which is interpreted as an uninitialized value.
*/
@InternalApi
public Path(
String path,
@Nullable String mount,
long total,
long free,
long available,
long fileCacheReserved,
long fileCacheUtilized
) {
this.path = path;
this.mount = mount;
this.total = total;
this.free = free;
this.available = available;
this.fileCacheReserved = fileCacheReserved;
lukas-vlcek marked this conversation as resolved.
Show resolved Hide resolved
this.fileCacheUtilized = fileCacheUtilized;

assert (this.total == -1 || this.total >= 0) : "Total value can not be negative";
assert (this.free == -1 || this.free >= 0) : "Free value can not be negative";
assert (this.available == -1 || this.available >= 0) : "Available value can not be negative";
assert (this.fileCacheReserved == -1 || this.fileCacheReserved >= 0) : "File cache reserved value can not be negative";
assert (this.fileCacheUtilized == -1 || this.fileCacheUtilized >= 0) : "File cache utilization value can not be negative";
}

/**
Expand Down Expand Up @@ -211,8 +244,8 @@
if (fileCacheReserved != -1) {
builder.humanReadableField(Fields.CACHE_RESERVED_IN_BYTES, Fields.CACHE_RESERVED, getFileCacheReserved());
}
if (fileCacheReserved != 0) {
builder.humanReadableField(Fields.CACHE_UTILIZED, Fields.CACHE_UTILIZED_IN_BYTES, getFileCacheUtilized());
if (fileCacheUtilized != -1) {
builder.humanReadableField(Fields.CACHE_UTILIZED_IN_BYTES, Fields.CACHE_UTILIZED, getFileCacheUtilized());

Check warning on line 248 in server/src/main/java/org/opensearch/monitor/fs/FsInfo.java

View check run for this annotation

Codecov / codecov/patch

server/src/main/java/org/opensearch/monitor/fs/FsInfo.java#L248

Added line #L248 was not covered by tests
}

builder.endObject();
Expand Down
31 changes: 29 additions & 2 deletions server/src/main/java/org/opensearch/monitor/fs/FsProbe.java
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.util.Constants;
import org.opensearch.common.SuppressForbidden;
import org.opensearch.common.annotation.DeprecatedApi;
import org.opensearch.common.collect.Tuple;
import org.opensearch.common.io.PathUtils;
import org.opensearch.core.common.unit.ByteSizeValue;
Expand Down Expand Up @@ -77,8 +78,8 @@ public FsInfo stats(FsInfo previous) throws IOException {
NodePath[] dataLocations = nodeEnv.nodePaths();
FsInfo.Path[] paths = new FsInfo.Path[dataLocations.length];
for (int i = 0; i < dataLocations.length; i++) {
paths[i] = getFSInfo(dataLocations[i]);
if (fileCache != null && dataLocations[i].fileCacheReservedSize != ByteSizeValue.ZERO) {
paths[i] = getFSInfo(dataLocations[i], fileCache == null ? FileCache.NOOP_FILE_CACHE : fileCache);
if (fileCache != null && dataLocations[i].fileCacheReservedSize.compareTo(ByteSizeValue.ZERO) >= 0) {
paths[i].fileCacheReserved = adjustForHugeFilesystems(dataLocations[i].fileCacheReservedSize.getBytes());
paths[i].fileCacheUtilized = adjustForHugeFilesystems(fileCache.usage().usage());
// fileCacheFree will be less than zero if the cache being over-subscribed
Expand Down Expand Up @@ -203,6 +204,14 @@ static long adjustForHugeFilesystems(long bytes) {
return bytes;
}

/**
* Retrieves information about the path associated with the given node path.
*
* @deprecated This method has been deprecated since version 2.16.0 and will be removed in version 3.0.0, use {@link #getFSInfo(NodePath, FileCache)} instead.
* @see #getFSInfo(NodePath, FileCache)
*/
@Deprecated
@DeprecatedApi(since = "2.16.0", forRemoval = "3.0.0")
public static FsInfo.Path getFSInfo(NodePath nodePath) throws IOException {
FsInfo.Path fsPath = new FsInfo.Path();
fsPath.path = nodePath.path.toString();
Expand All @@ -214,11 +223,29 @@ public static FsInfo.Path getFSInfo(NodePath nodePath) throws IOException {
fsPath.free = adjustForHugeFilesystems(nodePath.fileStore.getUnallocatedSpace());
fsPath.available = adjustForHugeFilesystems(nodePath.fileStore.getUsableSpace());
fsPath.fileCacheReserved = adjustForHugeFilesystems(nodePath.fileCacheReservedSize.getBytes());
// The fsPath.fileCacheUtilized cannot be calculated because the information about file cache utilization
// is hold by FileCache which is not available in this context.
fsPath.type = nodePath.fileStore.type();
fsPath.mount = nodePath.fileStore.toString();
return fsPath;
}

/**
* Retrieves information about the path associated with the given node path.
*
* @param nodePath The node path to retrieve the path information for.
* @param fileCache The FileCache object used for adjusting the file cache utilization.
* @return The path information for the given node path.
* @throws IOException If an I/O error occurs while retrieving the path information.
*/
public static FsInfo.Path getFSInfo(NodePath nodePath, FileCache fileCache) throws IOException {
assert fileCache != null : "FileCache cannot be null";
FsInfo.Path fsPath = getFSInfo(nodePath);
fsPath.fileCacheUtilized = adjustForHugeFilesystems(fileCache.usage().usage());
fsPath.available -= (fsPath.fileCacheReserved - fsPath.fileCacheUtilized);
return fsPath;
}

public static long getTotalSize(NodePath nodePath) throws IOException {
return adjustForHugeFilesystems(nodePath.fileStore.getTotalSpace());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,8 @@
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.TreeMap;
import java.util.function.Function;
import java.util.stream.Collectors;
Expand Down Expand Up @@ -291,12 +293,23 @@ public void testSerialization() throws IOException {
assertNull(deserializedFs);
} else {
assertEquals(fs.getTimestamp(), deserializedFs.getTimestamp());
assertEquals(fs.getTotal().getAvailable(), deserializedFs.getTotal().getAvailable());
assertEquals(fs.getTotal().getTotal(), deserializedFs.getTotal().getTotal());
assertEquals(fs.getTotal().getFree(), deserializedFs.getTotal().getFree());
assertEquals(fs.getTotal().getMount(), deserializedFs.getTotal().getMount());
assertEquals(fs.getTotal().getPath(), deserializedFs.getTotal().getPath());
assertEquals(fs.getTotal().getType(), deserializedFs.getTotal().getType());
compareFsInfo(fs.getTotal(), deserializedFs.getTotal());

FsInfo.Path[] fsPaths = StreamSupport.stream(
Spliterators.spliteratorUnknownSize(fs.iterator(), Spliterator.ORDERED),
false
).toArray(FsInfo.Path[]::new);

FsInfo.Path[] deserializedFsPaths = StreamSupport.stream(
Spliterators.spliteratorUnknownSize(deserializedFs.iterator(), Spliterator.ORDERED),
false
).toArray(FsInfo.Path[]::new);

assertEquals(fsPaths.length, deserializedFsPaths.length);
for (int i = 0; i < fsPaths.length; i++) {
compareFsInfo(fsPaths[i], deserializedFsPaths[i]);
}

FsInfo.IoStats ioStats = fs.getIoStats();
FsInfo.IoStats deserializedIoStats = deserializedFs.getIoStats();
assertEquals(ioStats.getTotalOperations(), deserializedIoStats.getTotalOperations());
Expand Down Expand Up @@ -595,11 +608,22 @@ public void testSerialization() throws IOException {
}
}

private void compareFsInfo(FsInfo.Path path, FsInfo.Path otherPath) {
assertEquals(path.getAvailable(), otherPath.getAvailable());
assertEquals(path.getTotal(), otherPath.getTotal());
assertEquals(path.getFree(), otherPath.getFree());
assertEquals(path.getFileCacheReserved(), otherPath.getFileCacheReserved());
assertEquals(path.getFileCacheUtilized(), otherPath.getFileCacheUtilized());
assertEquals(path.getMount(), otherPath.getMount());
assertEquals(path.getPath(), otherPath.getPath());
assertEquals(path.getType(), otherPath.getType());
}

public static NodeStats createNodeStats() throws IOException {
return createNodeStats(false);
}

public static NodeStats createNodeStats(boolean remoteStoreStats) throws IOException {
private static NodeStats createNodeStats(boolean remoteStoreStats) throws IOException {
DiscoveryNode node = new DiscoveryNode(
"test_node",
buildNewFakeTransportAddress(),
Expand Down Expand Up @@ -764,6 +788,8 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) throws IOExcep
randomBoolean() ? randomAlphaOfLengthBetween(3, 10) : null,
randomNonNegativeLong(),
randomNonNegativeLong(),
randomNonNegativeLong(),
randomNonNegativeLong(),
randomNonNegativeLong()
);
}
Expand Down
24 changes: 12 additions & 12 deletions server/src/test/java/org/opensearch/cluster/DiskUsageTests.java
Original file line number Diff line number Diff line change
Expand Up @@ -157,14 +157,14 @@ public void testFillDiskUsage() {
final Map<String, DiskUsage> newLeastAvaiableUsages = new HashMap<>();
final Map<String, DiskUsage> newMostAvaiableUsages = new HashMap<>();
FsInfo.Path[] node1FSInfo = new FsInfo.Path[] {
new FsInfo.Path("/middle", "/dev/sda", 100, 90, 80),
new FsInfo.Path("/least", "/dev/sdb", 200, 190, 70),
new FsInfo.Path("/most", "/dev/sdc", 300, 290, 280), };
FsInfo.Path[] node2FSInfo = new FsInfo.Path[] { new FsInfo.Path("/least_most", "/dev/sda", 100, 90, 80), };
new FsInfo.Path("/middle", "/dev/sda", 100, 90, 80, 0, 0),
new FsInfo.Path("/least", "/dev/sdb", 200, 190, 70, 0, 0),
new FsInfo.Path("/most", "/dev/sdc", 300, 290, 280, 0, 0), };
FsInfo.Path[] node2FSInfo = new FsInfo.Path[] { new FsInfo.Path("/least_most", "/dev/sda", 100, 90, 80, 0, 0), };

FsInfo.Path[] node3FSInfo = new FsInfo.Path[] {
new FsInfo.Path("/least", "/dev/sda", 100, 90, 70),
new FsInfo.Path("/most", "/dev/sda", 100, 90, 80), };
new FsInfo.Path("/least", "/dev/sda", 100, 90, 70, 0, 0),
new FsInfo.Path("/most", "/dev/sda", 100, 90, 80, 0, 0), };
List<NodeStats> nodeStats = Arrays.asList(
new NodeStats(
new DiscoveryNode("node_1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT),
Expand Down Expand Up @@ -281,14 +281,14 @@ public void testFillDiskUsageSomeInvalidValues() {
final Map<String, DiskUsage> newLeastAvailableUsages = new HashMap<>();
final Map<String, DiskUsage> newMostAvailableUsages = new HashMap<>();
FsInfo.Path[] node1FSInfo = new FsInfo.Path[] {
new FsInfo.Path("/middle", "/dev/sda", 100, 90, 80),
new FsInfo.Path("/least", "/dev/sdb", -1, -1, -1),
new FsInfo.Path("/most", "/dev/sdc", 300, 290, 280), };
FsInfo.Path[] node2FSInfo = new FsInfo.Path[] { new FsInfo.Path("/least_most", "/dev/sda", -1, -1, -1), };
new FsInfo.Path("/middle", "/dev/sda", 100, 90, 80, 0, 0),
new FsInfo.Path("/least", "/dev/sdb", -1, -1, -1, 0, 0),
new FsInfo.Path("/most", "/dev/sdc", 300, 290, 280, 0, 0), };
FsInfo.Path[] node2FSInfo = new FsInfo.Path[] { new FsInfo.Path("/least_most", "/dev/sda", -1, -1, -1, 0, 0), };

FsInfo.Path[] node3FSInfo = new FsInfo.Path[] {
new FsInfo.Path("/most", "/dev/sda", 100, 90, 70),
new FsInfo.Path("/least", "/dev/sda", 10, -1, 0), };
new FsInfo.Path("/most", "/dev/sda", 100, 90, 70, 0, 0),
new FsInfo.Path("/least", "/dev/sda", 10, -1, 0, 0, 0), };
List<NodeStats> nodeStats = Arrays.asList(
new NodeStats(
new DiscoveryNode("node_1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT),
Expand Down
Loading
Loading