Skip to content

Commit

Permalink
[Java] Fixed compile error due to the removal of ReadOptions.prefix_s…
Browse files Browse the repository at this point in the history
…eek, minor improvement on DbBenchmark.java.
  • Loading branch information
yhchiang committed Apr 28, 2014
1 parent 7ca06a3 commit 9895465
Show file tree
Hide file tree
Showing 6 changed files with 56 additions and 69 deletions.
2 changes: 1 addition & 1 deletion java/jdb_bench.sh
Original file line number Diff line number Diff line change
@@ -1 +1 @@
java -Djava.library.path=.:../ -cp "rocksdbjni.jar:.:./*" org.rocksdb.benchmark.DbBenchmark $@
java -server -d64 -XX:NewSize=4m -XX:+AggressiveOpts -Djava.library.path=.:../ -cp "rocksdbjni.jar:.:./*" org.rocksdb.benchmark.DbBenchmark $@
28 changes: 0 additions & 28 deletions java/org/rocksdb/ReadOptions.java
Original file line number Diff line number Diff line change
Expand Up @@ -93,34 +93,6 @@ public ReadOptions setFillCache(boolean fillCache) {
private native void setFillCache(
long handle, boolean fillCache);

/**
* If this option is set and memtable implementation allows, Seek
* might only return keys with the same prefix as the seek-key
* Default: false
*
* @return true if prefix-seek is enabled.
*/
public boolean prefixSeek() {
assert(isInitialized());
return prefixSeek(nativeHandle_);
}
private native boolean prefixSeek(long handle);

/**
* If this option is set and memtable implementation allows, Seek
* might only return keys with the same prefix as the seek-key
*
* @param prefixSeek if true, then prefix-seek will be enabled.
* @return the reference to the current ReadOptions.
*/
public ReadOptions setPrefixSeek(boolean prefixSeek) {
assert(isInitialized());
setPrefixSeek(nativeHandle_, prefixSeek);
return this;
}
private native void setPrefixSeek(
long handle, boolean prefixSeek);

/**
* Specify to create a tailing iterator -- a special iterator that has a
* view of the complete database (i.e. it can also be used to read newly
Expand Down
66 changes: 54 additions & 12 deletions java/org/rocksdb/benchmark/DbBenchmark.java
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,15 @@ enum DBState {
EXISTING
}

enum CompressionType {
NONE,
SNAPPY,
ZLIB,
BZIP2,
LZ4,
LZ4HC
}

static {
System.loadLibrary("rocksdbjni");
}
Expand Down Expand Up @@ -435,7 +444,6 @@ public DbBenchmark(Map<Flag, Object> flags) throws Exception {
databaseDir_ = (String) flags.get(Flag.db);
writesPerSeconds_ = (Integer) flags.get(Flag.writes_per_second);
cacheSize_ = (Long) flags.get(Flag.cache_size);
gen_ = new RandomGenerator(randSeed_, compressionRatio_);
memtable_ = (String) flags.get(Flag.memtablerep);
maxWriteBufferNumber_ = (Integer) flags.get(Flag.max_write_buffer_number);
prefixSize_ = (Integer) flags.get(Flag.prefix_size);
Expand All @@ -446,6 +454,28 @@ public DbBenchmark(Map<Flag, Object> flags) throws Exception {
finishLock_ = new Object();
// options.setPrefixSize((Integer)flags_.get(Flag.prefix_size));
// options.setKeysPerPrefix((Long)flags_.get(Flag.keys_per_prefix));
compressionType_ = (String) flags.get(Flag.compression_type);
compression_ = CompressionType.NONE;
try {
if (compressionType_.equals("snappy")) {
System.loadLibrary("snappy");
} else if (compressionType_.equals("zlib")) {
System.loadLibrary("zlib");
} else if (compressionType_.equals("bzip2")) {
System.loadLibrary("bzip2");
} else if (compressionType_.equals("lz4")) {
System.loadLibrary("lz4");
} else if (compressionType_.equals("lz4hc")) {
System.loadLibrary("lz4hc");
}
} catch (UnsatisfiedLinkError e) {
System.err.format("Unable to load %s library:%s%n" +
"No compression is used.%n",
compressionType_, e.toString());
compressionType_ = "none";
compressionRatio_ = 1.0;
}
gen_ = new RandomGenerator(randSeed_, compressionRatio_);
}

private void prepareReadOptions(ReadOptions options) {
Expand All @@ -462,6 +492,8 @@ private void prepareOptions(Options options) {
options.setCacheSize(cacheSize_);
if (!useExisting_) {
options.setCreateIfMissing(true);
} else {
options.setCreateIfMissing(false);
}
if (memtable_.equals("skip_list")) {
options.setMemTableConfig(new SkipListMemTableConfig());
Expand All @@ -488,6 +520,8 @@ private void prepareOptions(Options options) {
options.setTableFormatConfig(
new PlainTableConfig().setKeySize(keySize_));
}
options.setWriteBufferSize(
(Long)flags_.get(Flag.write_buffer_size));
options.setMaxWriteBufferNumber(
(Integer)flags_.get(Flag.max_write_buffer_number));
options.setMaxBackgroundCompactions(
Expand All @@ -513,7 +547,7 @@ private void prepareOptions(Options options) {
options.setDisableSeekCompaction(
(Boolean)flags_.get(Flag.disable_seek_compaction));
options.setDeleteObsoleteFilesPeriodMicros(
(Long)flags_.get(Flag.delete_obsolete_files_period_micros));
(Integer)flags_.get(Flag.delete_obsolete_files_period_micros));
options.setTableCacheNumshardbits(
(Integer)flags_.get(Flag.table_cache_numshardbits));
options.setAllowMmapReads(
Expand Down Expand Up @@ -640,12 +674,12 @@ private void run() throws RocksDBException {
} else if (benchmark.equals("readseq")) {
for (int t = 0; t < threadNum_; ++t) {
tasks.add(new ReadSequentialTask(
currentTaskId++, randSeed_, reads_, num_));
currentTaskId++, randSeed_, reads_ / threadNum_, num_));
}
} else if (benchmark.equals("readrandom")) {
for (int t = 0; t < threadNum_; ++t) {
tasks.add(new ReadRandomTask(
currentTaskId++, randSeed_, reads_, num_));
currentTaskId++, randSeed_, reads_ / threadNum_, num_));
}
} else if (benchmark.equals("readwhilewriting")) {
WriteTask writeTask = new WriteRandomTask(
Expand Down Expand Up @@ -717,12 +751,12 @@ private void printHeader(Options options) {
(int) (valueSize_ * compressionRatio_ + 0.5));
System.out.printf("Entries: %d\n", num_);
System.out.printf("RawSize: %.1f MB (estimated)\n",
((kKeySize + valueSize_) * num_) / 1048576.0);
((double)(kKeySize + valueSize_) * num_) / SizeUnit.MB);
System.out.printf("FileSize: %.1f MB (estimated)\n",
(((kKeySize + valueSize_ * compressionRatio_) * num_)
/ 1048576.0));
(((kKeySize + valueSize_ * compressionRatio_) * num_) / SizeUnit.MB));
System.out.format("Memtable Factory: %s%n", options.memTableFactoryName());
System.out.format("Prefix: %d bytes%n", prefixSize_);
System.out.format("Compression: %s%n", compressionType_);
printWarnings();
System.out.printf("------------------------------------------------\n");
}
Expand Down Expand Up @@ -769,7 +803,7 @@ private void stop(

System.out.printf(
"%-16s : %11.5f micros/op; %6.1f MB/s; %d / %d task(s) finished.\n",
benchmark, elapsedSeconds * 1e6 / stats.done_,
benchmark, (double) elapsedSeconds / stats.done_ * 1e6,
(stats.bytes_ / 1048576.0) / elapsedSeconds,
taskFinishedCount, concurrentThreads);
}
Expand Down Expand Up @@ -932,7 +966,7 @@ private enum Flag {
return Integer.parseInt(value);
}
},
write_buffer_size(4 << 20,
write_buffer_size(4 * SizeUnit.MB,
"Number of bytes to buffer in memtable before compacting\n" +
"\t(initialized to default value by 'main'.)") {
@Override public Object parseValue(String value) {
Expand Down Expand Up @@ -1275,11 +1309,17 @@ private enum Flag {
return Boolean.parseBoolean(value);
}
},
delete_obsolete_files_period_micros(0L,"Option to delete\n" +
delete_obsolete_files_period_micros(0,"Option to delete\n" +
"\tobsolete files periodically. 0 means that obsolete files are\n" +
"\tdeleted after every compaction run.") {
@Override public Object parseValue(String value) {
return Long.parseLong(value);
return Integer.parseInt(value);
}
},
compression_type("snappy",
"Algorithm used to compress the database.") {
@Override public Object parseValue(String value) {
return value;
}
},
compression_level(-1,
Expand Down Expand Up @@ -1512,7 +1552,7 @@ void setFinished(boolean flag) {
final long cacheSize_;
final boolean useExisting_;
final String databaseDir_;
final double compressionRatio_;
double compressionRatio_;
RandomGenerator gen_;
long startTime_;

Expand All @@ -1532,4 +1572,6 @@ void setFinished(boolean flag) {
// as the scope of a static member equals to the scope of the problem,
// we let its c++ pointer to be disposed in its finalizer.
static Options defaultOptions_ = new Options();
String compressionType_;
CompressionType compression_;
}
6 changes: 0 additions & 6 deletions java/org/rocksdb/test/ReadOptionsTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,6 @@ public static void main(String[] args) {
assert(opt.fillCache() == boolValue);
}

{ // PrefixSeek test
boolean boolValue = rand.nextBoolean();
opt.setPrefixSeek(boolValue);
assert(opt.prefixSeek() == boolValue);
}

{ // Tailing test
boolean boolValue = rand.nextBoolean();
opt.setTailing(boolValue);
Expand Down
21 changes: 0 additions & 21 deletions java/rocksjni/options.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1785,27 +1785,6 @@ void Java_org_rocksdb_ReadOptions_setFillCache(
static_cast<bool>(jfill_cache);
}

/*
* Class: org_rocksdb_ReadOptions
* Method: prefixSeek
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_ReadOptions_prefixSeek(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->prefix_seek;
}

/*
* Class: org_rocksdb_ReadOptions
* Method: setPrefixSeek
* Signature: (JZ)V
*/
void Java_org_rocksdb_ReadOptions_setPrefixSeek(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean jprefix_seek) {
reinterpret_cast<rocksdb::ReadOptions*>(jhandle)->prefix_seek =
static_cast<bool>(jprefix_seek);
}

/*
* Class: org_rocksdb_ReadOptions
* Method: tailing
Expand Down
2 changes: 1 addition & 1 deletion java/rocksjni/write_batch.cc
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ jbyteArray Java_org_rocksdb_WriteBatchTest_getContents(
rocksdb::Status s =
rocksdb::WriteBatchInternal::InsertInto(b, &cf_mems_default);
int count = 0;
rocksdb::Iterator* iter = mem->NewIterator();
rocksdb::Iterator* iter = mem->NewIterator(rocksdb::ReadOptions());
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
rocksdb::ParsedInternalKey ikey;
memset(reinterpret_cast<void*>(&ikey), 0, sizeof(ikey));
Expand Down

0 comments on commit 9895465

Please sign in to comment.