Skip to content

Commit

Permalink
OAK-10771 - Add disk cache size stats and issue warning if evicted se…
Browse files Browse the repository at this point in the history
…gment has zero length (apache#1427)

* OAK-10771 - Log if evicted segment has length zero.

* OAK-10771 - Add diskCacheSize stats

* OAK-10771 - Record cache size and change

---------

Co-authored-by: Axel Hanikel <ahanikel@adobe.com>
  • Loading branch information
2 people authored and shodaaan committed Jun 13, 2024
1 parent a3d5a17 commit 44321c4
Show file tree
Hide file tree
Showing 5 changed files with 42 additions and 10 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder;
import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException;
import org.apache.jackrabbit.oak.segment.file.tar.TarPersistence;
import org.apache.jackrabbit.oak.segment.remote.persistentcache.DiskCacheIOMonitor;
import org.apache.jackrabbit.oak.segment.remote.persistentcache.PersistentDiskCache;
import org.apache.jackrabbit.oak.segment.spi.monitor.FileStoreMonitorAdapter;
import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter;
Expand All @@ -63,6 +64,7 @@
import com.microsoft.azure.storage.StorageCredentialsSharedAccessSignature;
import com.microsoft.azure.storage.StorageException;
import com.microsoft.azure.storage.blob.CloudBlobDirectory;
import org.apache.jackrabbit.oak.stats.StatisticsProvider;
import org.jetbrains.annotations.NotNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand Down Expand Up @@ -130,7 +132,7 @@ public static SegmentNodeStorePersistence newSegmentNodeStorePersistence(Segment
SegmentNodeStorePersistence basePersistence = new AzurePersistence(cloudBlobDirectory);

PersistentCache persistentCache = new PersistentDiskCache(new File(persistentCachePath),
persistentCacheSize * 1024, new IOMonitorAdapter());
persistentCacheSize * 1024, new DiskCacheIOMonitor(StatisticsProvider.NOOP));
persistence = new CachingPersistence(persistentCache, basePersistence);
break;
default:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
package org.apache.jackrabbit.oak.segment.remote.persistentcache;

import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter;
import org.apache.jackrabbit.oak.stats.HistogramStats;
import org.apache.jackrabbit.oak.stats.MeterStats;
import org.apache.jackrabbit.oak.stats.StatisticsProvider;
import org.apache.jackrabbit.oak.stats.StatsOptions;
Expand All @@ -41,18 +42,26 @@
* a timer metrics for the time spent reading from segment disk cache</li>
* <li>{@link #OAK_SEGMENT_CACHE_DISk_SEGMENT_WRITE_TIME}:
* a timer metrics for the time spent writing to segment disk cache</li>
* <li>{@link #OAK_SEGMENT_CACHE_DISK_CACHE_SIZE_CALCULATED}:
* a histogram for the calculated segment disk cache size</li>
* <li>{@link #OAK_SEGMENT_CACHE_DISK_CACHE_SIZE_CHANGE}:
* a histogram for the segment disk cache size change</li>
* </ul>
*/
public class DiskCacheIOMonitor extends IOMonitorAdapter {
public static final String OAK_SEGMENT_CACHE_DISK_SEGMENT_READ_BYTES = "oak.segment.cache.disk.segment-read-bytes";
public static final String OAK_SEGMENT_CACHE_DISK_SEGMENT_WRITE_BYTES = "oak.segment.cache.disk.segment-write-bytes";
public static final String OAK_SEGMENT_CACHE_DISK_SEGMENT_READ_TIME = "oak.segment.cache.disk.segment-read-time";
public static final String OAK_SEGMENT_CACHE_DISk_SEGMENT_WRITE_TIME = "oak.segment.cache.disk.segment-write-time";
public static final String OAK_SEGMENT_CACHE_DISK_CACHE_SIZE_CALCULATED = "oak.segment.cache.disk.cache-size-calculated";
public static final String OAK_SEGMENT_CACHE_DISK_CACHE_SIZE_CHANGE = "oak.segment.cache.disk.cache-size-change";

private final MeterStats segmentReadBytes;
private final MeterStats segmentWriteBytes;
private final TimerStats segmentReadTime;
private final TimerStats segmentWriteTime;
private final HistogramStats cacheSizeCalculated;
private final HistogramStats cacheSizeOnDisk;

public DiskCacheIOMonitor(@NotNull StatisticsProvider statisticsProvider) {
segmentReadBytes = statisticsProvider.getMeter(
Expand All @@ -63,6 +72,10 @@ public DiskCacheIOMonitor(@NotNull StatisticsProvider statisticsProvider) {
OAK_SEGMENT_CACHE_DISK_SEGMENT_READ_TIME, StatsOptions.METRICS_ONLY);
segmentWriteTime = statisticsProvider.getTimer(
OAK_SEGMENT_CACHE_DISk_SEGMENT_WRITE_TIME, StatsOptions.METRICS_ONLY);
cacheSizeCalculated = statisticsProvider.getHistogram(
OAK_SEGMENT_CACHE_DISK_CACHE_SIZE_CALCULATED, StatsOptions.METRICS_ONLY);
cacheSizeOnDisk = statisticsProvider.getHistogram(
OAK_SEGMENT_CACHE_DISK_CACHE_SIZE_CHANGE, StatsOptions.METRICS_ONLY);
}

@Override
Expand All @@ -76,4 +89,9 @@ public void afterSegmentWrite(File file, long msb, long lsb, int length, long el
segmentWriteBytes.mark(length);
segmentWriteTime.update(elapsed, NANOSECONDS);
}

public void updateCacheSize(long calculated, long change) {
cacheSizeCalculated.update(calculated);
cacheSizeOnDisk.update(change);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ public class PersistentDiskCache extends AbstractPersistentCache {

private final File directory;
private final long maxCacheSizeBytes;
private final IOMonitor diskCacheIOMonitor;
private final DiskCacheIOMonitor diskCacheIOMonitor;
/**
* Wait time before attempting to clean up orphaned temp files
*/
Expand All @@ -71,11 +71,11 @@ public class PersistentDiskCache extends AbstractPersistentCache {

final AtomicLong evictionCount = new AtomicLong();

public PersistentDiskCache(File directory, int cacheMaxSizeMB, IOMonitor diskCacheIOMonitor) {
public PersistentDiskCache(File directory, int cacheMaxSizeMB, DiskCacheIOMonitor diskCacheIOMonitor) {
this(directory, cacheMaxSizeMB, diskCacheIOMonitor, DEFAULT_TEMP_FILES_CLEANUP_WAIT_TIME_MS);
}

public PersistentDiskCache(File directory, int cacheMaxSizeMB, IOMonitor diskCacheIOMonitor, long tempFilesCleanupWaitTimeMs) {
public PersistentDiskCache(File directory, int cacheMaxSizeMB, DiskCacheIOMonitor diskCacheIOMonitor, long tempFilesCleanupWaitTimeMs) {
this.directory = directory;
this.maxCacheSizeBytes = cacheMaxSizeMB * 1024L * 1024L;
this.diskCacheIOMonitor = diskCacheIOMonitor;
Expand Down Expand Up @@ -158,7 +158,8 @@ public void writeSegment(long msb, long lsb, Buffer buffer) {
} catch (AtomicMoveNotSupportedException e) {
Files.move(tempSegmentFile.toPath(), segmentFile.toPath());
}
cacheSize.addAndGet(fileSize);
long cacheSizeAfter = cacheSize.addAndGet(fileSize);
diskCacheIOMonitor.updateCacheSize(cacheSizeAfter, fileSize);
} catch (Exception e) {
logger.error("Error writing segment {} to cache", segmentId, e);
try {
Expand Down Expand Up @@ -204,7 +205,17 @@ private void cleanUpInternal() {
}
if (cacheSize.get() > maxCacheSizeBytes * 0.66) {
File segment = segmentCacheEntry.getPath().toFile();
cacheSize.addAndGet(-segment.length());
long length = segment.length();
if (length == 0) {
if (logger.isDebugEnabled()) {
logger.debug("Avoiding removal of zero-sized file {}", segmentCacheEntry.getPath());
} else {
logger.warn("Avoiding removal of zero-sized file.");
}
return;
}
long cacheSizeAfter = cacheSize.addAndGet(-length);
diskCacheIOMonitor.updateCacheSize(cacheSizeAfter, -length);
segment.delete();
evictionCount.incrementAndGet();
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@Version("1.0.0")
@Version("2.0.0")
package org.apache.jackrabbit.oak.segment.remote.persistentcache;

import org.osgi.annotation.versioning.Version;
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import org.apache.jackrabbit.oak.commons.Buffer;
import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitorAdapter;
import org.apache.jackrabbit.oak.stats.StatisticsProvider;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
Expand Down Expand Up @@ -49,13 +50,13 @@ public class PersistentDiskCacheTest extends AbstractPersistentCacheTest {

@Before
public void setUp() throws Exception {
persistentCache = new PersistentDiskCache(temporaryFolder.newFolder(), 10 * 1024, new IOMonitorAdapter());
persistentCache = new PersistentDiskCache(temporaryFolder.newFolder(), 10 * 1024, new DiskCacheIOMonitor(StatisticsProvider.NOOP));
}

@Test
public void cleanupTest() throws Exception {
persistentCache.close();
persistentCache = new PersistentDiskCache(temporaryFolder.newFolder(), 0, new IOMonitorAdapter(), 500);
persistentCache = new PersistentDiskCache(temporaryFolder.newFolder(), 0, new DiskCacheIOMonitor(StatisticsProvider.NOOP), 500);
final List<TestSegment> testSegments = new ArrayList<>(SEGMENTS);
final List<Map<String, Buffer>> segmentsRead = new ArrayList<>(THREADS);

Expand Down Expand Up @@ -127,7 +128,7 @@ public void cleanupTest() throws Exception {

@Test
public void testIOMonitor() throws IOException {
IOMonitorAdapter ioMonitorAdapter = Mockito.mock(IOMonitorAdapter.class);
DiskCacheIOMonitor ioMonitorAdapter = Mockito.mock(DiskCacheIOMonitor.class);

persistentCache.close();
File cacheFolder = temporaryFolder.newFolder();
Expand Down

0 comments on commit 44321c4

Please sign in to comment.