Skip to content

Commit

Permalink
Implement microbenchmark for FileCache (#6610) (#6886)
Browse files Browse the repository at this point in the history
The current implementation of the FileCache uses some hand-rolled data
structures that would be nice to replace with library implementations.
This benchmark will be useful to compare the performance of any future
replacements.

Sample results (AWS EC2 Ubuntu machine with 32 vCPUs and 64GB of RAM):

```
Benchmark                   (concurrencyLevel)  (maximumNumberOfEntries)   Mode  Cnt      Score   Error   Units
FileCacheBenchmark.get                       1                     65536  thrpt        2243.092          ops/ms
FileCacheBenchmark.get                       1                   1048576  thrpt         950.818          ops/ms
FileCacheBenchmark.get                       8                     65536  thrpt        5651.150          ops/ms
FileCacheBenchmark.get                       8                   1048576  thrpt        2831.012          ops/ms
FileCacheBenchmark.put                       1                     65536  thrpt        2206.027          ops/ms
FileCacheBenchmark.put                       1                   1048576  thrpt         921.248          ops/ms
FileCacheBenchmark.put                       8                     65536  thrpt        4421.122          ops/ms
FileCacheBenchmark.put                       8                   1048576  thrpt        2624.550          ops/ms
FileCacheBenchmark.remove                    1                     65536  thrpt       12387.999          ops/ms
FileCacheBenchmark.remove                    1                   1048576  thrpt        6324.643          ops/ms
FileCacheBenchmark.remove                    8                     65536  thrpt       22161.031          ops/ms
FileCacheBenchmark.remove                    8                   1048576  thrpt       14826.586          ops/ms
FileCacheBenchmark.replace                   1                     65536  thrpt        2146.572          ops/ms
FileCacheBenchmark.replace                   1                   1048576  thrpt         947.612          ops/ms
FileCacheBenchmark.replace                   8                     65536  thrpt        4405.339          ops/ms
FileCacheBenchmark.replace                   8                   1048576  thrpt        2707.204          ops/ms
```


(cherry picked from commit 5eed61e)

Signed-off-by: Andrew Ross <andrross@amazon.com>
Signed-off-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
  • Loading branch information
1 parent 7aadd1d commit 0c32a58
Showing 1 changed file with 150 additions and 0 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/

package org.opensearch.benchmark.store.remote.filecache;

import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;

import org.apache.lucene.store.IndexInput;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Threads;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
import org.opensearch.index.store.remote.filecache.CachedIndexInput;
import org.opensearch.index.store.remote.filecache.FileCache;
import org.opensearch.index.store.remote.filecache.FileCacheFactory;

/**
* Simple benchmark test of {@link FileCache}. It uses a uniform random distribution
* of keys, which is very simple but unlikely to be representative of any real life
* workload.
*/
@Warmup(iterations = 1)
@Measurement(iterations = 1)
@Fork(1)
@BenchmarkMode(Mode.Throughput)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@State(Scope.Thread)
@Threads(8)
@SuppressWarnings("unused") // invoked by benchmarking framework
public class FileCacheBenchmark {
private static final CachedIndexInput INDEX_INPUT = new FixedSizeStubIndexInput();

@Benchmark
public void get(CacheParameters parameters, Blackhole blackhole) {
blackhole.consume(parameters.fileCache.get(randomKeyInCache(parameters)));
}

@Benchmark
public void replace(CacheParameters parameters, Blackhole blackhole) {
blackhole.consume(parameters.fileCache.put(randomKeyInCache(parameters), INDEX_INPUT));
}

@Benchmark
public void put(CacheParameters parameters, Blackhole blackhole) {
blackhole.consume(parameters.fileCache.put(randomKeyNotInCache(parameters), INDEX_INPUT));
}

@Benchmark
public void remove(CacheParameters parameters) {
parameters.fileCache.remove(randomKeyInCache(parameters));
}

private static Path randomKeyInCache(CacheParameters parameters) {
int i = ThreadLocalRandom.current().nextInt(parameters.maximumNumberOfEntries);
return Paths.get(Integer.toString(i));
}

private static Path randomKeyNotInCache(CacheParameters parameters) {
int i = ThreadLocalRandom.current().nextInt(parameters.maximumNumberOfEntries, parameters.maximumNumberOfEntries * 2);
return Paths.get(Integer.toString(i));
}

@State(Scope.Benchmark)
public static class CacheParameters {
@Param({ "65536", "1048576" })
int maximumNumberOfEntries;

@Param({ "1", "8" })
int concurrencyLevel;

FileCache fileCache;

@Setup
public void setup() {
fileCache = FileCacheFactory.createConcurrentLRUFileCache(
(long) maximumNumberOfEntries * INDEX_INPUT.length(),
concurrencyLevel
);
for (long i = 0; i < maximumNumberOfEntries; i++) {
final Path key = Paths.get(Long.toString(i));
fileCache.put(key, INDEX_INPUT);
fileCache.decRef(key);
}
}
}

/**
* Stubbed out IndexInput that does nothing but report a fixed size
*/
private static class FixedSizeStubIndexInput extends CachedIndexInput {
private FixedSizeStubIndexInput() {
super(FixedSizeStubIndexInput.class.getSimpleName());
}

@Override
public boolean isClosed() {
return false;
}

@Override
public void close() {}

@Override
public long getFilePointer() {
throw new UnsupportedOperationException();
}

@Override
public void seek(long pos) {
throw new UnsupportedOperationException();
}

@Override
public long length() {
return 1024 * 1024 * 8; // 8MiB
}

@Override
public IndexInput slice(String sliceDescription, long offset, long length) {
throw new UnsupportedOperationException();
}

@Override
public byte readByte() {
throw new UnsupportedOperationException();
}

@Override
public void readBytes(byte[] b, int offset, int len) {
throw new UnsupportedOperationException();
}
}
}

0 comments on commit 0c32a58

Please sign in to comment.