Skip to content

Commit

Permalink
Merge branch 'master' into columnfamilies
Browse files Browse the repository at this point in the history
  • Loading branch information
igorcanadi committed Jan 13, 2014
2 parents d076cef + dd6ecdf commit 151f9e1
Show file tree
Hide file tree
Showing 22 changed files with 679 additions and 150 deletions.
60 changes: 51 additions & 9 deletions build_tools/regression_build_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ make release
--num=$NUM \
--writes=$NUM \
--cache_size=6442450944 \
--cache_numshardbits=4 \
--cache_numshardbits=6 \
--table_cache_numshardbits=4 \
--open_files=55000 \
--statistics=1 \
Expand All @@ -68,7 +68,7 @@ make release
--num=$NUM \
--writes=$((NUM / 10)) \
--cache_size=6442450944 \
--cache_numshardbits=4 \
--cache_numshardbits=6 \
--table_cache_numshardbits=4 \
--open_files=55000 \
--statistics=1 \
Expand All @@ -87,7 +87,7 @@ make release
--num=$NUM \
--writes=$NUM \
--cache_size=6442450944 \
--cache_numshardbits=4 \
--cache_numshardbits=6 \
--table_cache_numshardbits=4 \
--open_files=55000 \
--statistics=1 \
Expand All @@ -106,7 +106,7 @@ make release
--num=$NUM \
--reads=$((NUM / 5)) \
--cache_size=6442450944 \
--cache_numshardbits=4 \
--cache_numshardbits=6 \
--table_cache_numshardbits=4 \
--open_files=55000 \
--disable_seek_compaction=1 \
Expand All @@ -126,7 +126,7 @@ make release
--num=$NUM \
--reads=$((NUM / 5)) \
--cache_size=104857600 \
--cache_numshardbits=4 \
--cache_numshardbits=6 \
--table_cache_numshardbits=4 \
--open_files=55000 \
--disable_seek_compaction=1 \
Expand All @@ -147,7 +147,7 @@ make release
--reads=$((NUM / 5)) \
--writes=512 \
--cache_size=6442450944 \
--cache_numshardbits=4 \
--cache_numshardbits=6 \
--table_cache_numshardbits=4 \
--write_buffer_size=1000000000 \
--open_files=55000 \
Expand All @@ -169,7 +169,7 @@ make release
--num=$((NUM / 4)) \
--writes=$((NUM / 4)) \
--cache_size=6442450944 \
--cache_numshardbits=4 \
--cache_numshardbits=6 \
--table_cache_numshardbits=4 \
--open_files=55000 \
--statistics=1 \
Expand All @@ -179,6 +179,25 @@ make release
--sync=0 \
--threads=1 > /dev/null

# dummy test just to compact the data
./db_bench \
--benchmarks=readrandom \
--db=$DATA_DIR \
--use_existing_db=1 \
--bloom_bits=10 \
--num=$((NUM / 1000)) \
--reads=$((NUM / 1000)) \
--cache_size=6442450944 \
--cache_numshardbits=6 \
--table_cache_numshardbits=4 \
--open_files=55000 \
--statistics=1 \
--histogram=1 \
--disable_data_sync=1 \
--disable_wal=1 \
--sync=0 \
--threads=16 > /dev/null

# measure readrandom after load with filluniquerandom with 6GB block cache
./db_bench \
--benchmarks=readrandom \
Expand All @@ -188,7 +207,7 @@ make release
--num=$((NUM / 4)) \
--reads=$((NUM / 4)) \
--cache_size=6442450944 \
--cache_numshardbits=4 \
--cache_numshardbits=6 \
--table_cache_numshardbits=4 \
--open_files=55000 \
--disable_seek_compaction=1 \
Expand All @@ -200,6 +219,28 @@ make release
--sync=0 \
--threads=16 > ${STAT_FILE}.readrandom_filluniquerandom

# measure readwhilewriting after load with filluniquerandom with 6GB block cache
./db_bench \
--benchmarks=readwhilewriting \
--db=$DATA_DIR \
--use_existing_db=1 \
--bloom_bits=10 \
--num=$((NUM / 4)) \
--reads=$((NUM / 4)) \
--writes_per_second=1000 \
--write_buffer_size=100000000 \
--cache_size=6442450944 \
--cache_numshardbits=6 \
--table_cache_numshardbits=4 \
--open_files=55000 \
--disable_seek_compaction=1 \
--statistics=1 \
--histogram=1 \
--disable_data_sync=1 \
--disable_wal=1 \
--sync=0 \
--threads=16 > ${STAT_FILE}.readwhilewriting

# measure memtable performance -- none of the data gets flushed to disk
./db_bench \
--benchmarks=fillrandom,readrandom, \
Expand All @@ -208,7 +249,7 @@ make release
--num=$((NUM / 10)) \
--reads=$NUM \
--cache_size=6442450944 \
--cache_numshardbits=4 \
--cache_numshardbits=6 \
--table_cache_numshardbits=4 \
--write_buffer_size=1000000000 \
--open_files=55000 \
Expand Down Expand Up @@ -264,3 +305,4 @@ send_benchmark_to_ods readrandom readrandom_memtable_sst $STAT_FILE.readrandom_m
send_benchmark_to_ods readrandom readrandom_fillunique_random $STAT_FILE.readrandom_filluniquerandom
send_benchmark_to_ods fillrandom memtablefillrandom $STAT_FILE.memtablefillreadrandom
send_benchmark_to_ods readrandom memtablereadrandom $STAT_FILE.memtablefillreadrandom
send_benchmark_to_ods readwhilewriting readwhilewriting $STAT_FILE.readwhilewriting
4 changes: 2 additions & 2 deletions db/builder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ Status BuildTable(const std::string& dbname,
const Comparator* user_comparator,
const SequenceNumber newest_snapshot,
const SequenceNumber earliest_seqno_in_memtable,
const bool enable_compression) {
const CompressionType compression) {
Status s;
meta->file_size = 0;
meta->smallest_seqno = meta->largest_seqno = 0;
Expand All @@ -65,7 +65,7 @@ Status BuildTable(const std::string& dbname,
}

TableBuilder* builder = GetTableBuilder(options, file.get(),
options.compression);
compression);

// the first key is the smallest key
Slice key = iter->key();
Expand Down
2 changes: 1 addition & 1 deletion db/builder.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,6 @@ extern Status BuildTable(const std::string& dbname,
const Comparator* user_comparator,
const SequenceNumber newest_snapshot,
const SequenceNumber earliest_seqno_in_memtable,
const bool enable_compression);
const CompressionType compression);

} // namespace rocksdb
4 changes: 4 additions & 0 deletions db/c.cc
Original file line number Diff line number Diff line change
Expand Up @@ -788,6 +788,10 @@ void rocksdb_env_set_background_threads(rocksdb_env_t* env, int n) {
env->rep->SetBackgroundThreads(n);
}

void rocksdb_env_set_high_priority_background_threads(rocksdb_env_t* env, int n) {
env->rep->SetBackgroundThreads(n, Env::HIGH);
}

void rocksdb_env_destroy(rocksdb_env_t* env) {
if (!env->is_default) delete env->rep;
delete env;
Expand Down
97 changes: 94 additions & 3 deletions db/db_bench.cc
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,8 @@ DEFINE_string(benchmarks,
"\tmergerandom -- same as updaterandom/appendrandom using merge"
" operator. "
"Must be used with merge_operator\n"
"\treadrandommergerandom -- perform N random read-or-merge "
"operations. Must be used with merge_operator\n"
"\tseekrandom -- N random seeks\n"
"\tcrc32c -- repeated crc32c of 4K of data\n"
"\tacquireload -- load N*1000 times\n"
Expand All @@ -112,6 +114,11 @@ DEFINE_int64(numdistinct, 1000,
"read/write on fewer keys so that gets are more likely to find the"
" key and puts are more likely to update the same key");

DEFINE_int64(merge_keys, -1,
"Number of distinct keys to use for MergeRandom and "
"ReadRandomMergeRandom. "
"If negative, there will be FLAGS_num keys.");

DEFINE_int64(reads, -1, "Number of read operations to do. "
"If negative, do FLAGS_num reads.");

Expand Down Expand Up @@ -297,6 +304,11 @@ DEFINE_int32(readwritepercent, 90, "Ratio of reads to reads/writes (expressed"
"default value 90 means 90% operations out of all reads and writes"
" operations are reads. In other words, 9 gets for every 1 put.");

DEFINE_int32(mergereadpercent, 70, "Ratio of merges to merges&reads (expressed"
" as percentage) for the ReadRandomMergeRandom workload. The"
" default value 70 means 70% out of all read and merge operations"
" are merges. In other words, 7 merges for every 3 gets.");

DEFINE_int32(deletepercent, 2, "Percentage of deletes out of reads/writes/"
"deletes (used in RandomWithVerify only). RandomWithVerify "
"calculates writepercent as (100 - FLAGS_readwritepercent - "
Expand Down Expand Up @@ -446,6 +458,9 @@ DEFINE_uint64(bytes_per_sync, rocksdb::Options().bytes_per_sync,
DEFINE_bool(filter_deletes, false, " On true, deletes use bloom-filter and drop"
" the delete if key not present");

DEFINE_int32(max_successive_merges, 0, "Maximum number of successive merge"
" operations on a key in the memtable");

static bool ValidatePrefixSize(const char* flagname, int32_t value) {
if (value < 0 || value>=2000000000) {
fprintf(stderr, "Invalid value for --%s: %d. 0<= PrefixSize <=2000000000\n",
Expand Down Expand Up @@ -784,6 +799,7 @@ class Benchmark {
long long reads_;
long long writes_;
long long readwrites_;
long long merge_keys_;
int heap_counter_;
char keyFormat_[100]; // will contain the format of key. e.g "%016d"
void PrintHeader() {
Expand Down Expand Up @@ -958,6 +974,7 @@ class Benchmark {
readwrites_((FLAGS_writes < 0 && FLAGS_reads < 0)? FLAGS_num :
((FLAGS_writes > FLAGS_reads) ? FLAGS_writes : FLAGS_reads)
),
merge_keys_(FLAGS_merge_keys < 0 ? FLAGS_num : FLAGS_merge_keys),
heap_counter_(0) {
std::vector<std::string> files;
FLAGS_env->GetChildren(FLAGS_db, &files);
Expand Down Expand Up @@ -985,8 +1002,8 @@ class Benchmark {
}

unique_ptr<char []> GenerateKeyFromInt(long long v, const char* suffix = "") {
unique_ptr<char []> keyInStr(new char[kMaxKeySize]);
snprintf(keyInStr.get(), kMaxKeySize, keyFormat_, v, suffix);
unique_ptr<char []> keyInStr(new char[kMaxKeySize + 1]);
snprintf(keyInStr.get(), kMaxKeySize + 1, keyFormat_, v, suffix);
return keyInStr;
}

Expand Down Expand Up @@ -1087,6 +1104,14 @@ class Benchmark {
method = &Benchmark::ReadWhileWriting;
} else if (name == Slice("readrandomwriterandom")) {
method = &Benchmark::ReadRandomWriteRandom;
} else if (name == Slice("readrandommergerandom")) {
if (FLAGS_merge_operator.empty()) {
fprintf(stdout, "%-12s : skipped (--merge_operator is unknown)\n",
name.ToString().c_str());
method = nullptr;
} else {
method = &Benchmark::ReadRandomMergeRandom;
}
} else if (name == Slice("updaterandom")) {
method = &Benchmark::UpdateRandom;
} else if (name == Slice("appendrandom")) {
Expand Down Expand Up @@ -1421,6 +1446,7 @@ class Benchmark {
FLAGS_merge_operator.c_str());
exit(1);
}
options.max_successive_merges = FLAGS_max_successive_merges;

// set universal style compaction configurations, if applicable
if (FLAGS_universal_size_ratio != 0) {
Expand Down Expand Up @@ -2375,13 +2401,16 @@ class Benchmark {
//
// For example, use FLAGS_merge_operator="uint64add" and FLAGS_value_size=8
// to simulate random additions over 64-bit integers using merge.
//
// The number of merges on the same key can be controlled by adjusting
// FLAGS_merge_keys.
void MergeRandom(ThreadState* thread) {
RandomGenerator gen;

// The number of iterations is the larger of read_ or write_
Duration duration(FLAGS_duration, readwrites_);
while (!duration.Done(1)) {
const long long k = thread->rand.Next() % FLAGS_num;
const long long k = thread->rand.Next() % merge_keys_;
unique_ptr<char []> key = GenerateKeyFromInt(k);

Status s = db_->Merge(write_options_, key.get(),
Expand All @@ -2400,6 +2429,68 @@ class Benchmark {
thread->stats.AddMessage(msg);
}

// Read and merge random keys. The amount of reads and merges are controlled
// by adjusting FLAGS_num and FLAGS_mergereadpercent. The number of distinct
// keys (and thus also the number of reads and merges on the same key) can be
// adjusted with FLAGS_merge_keys.
//
// As with MergeRandom, the merge operator to use should be defined by
// FLAGS_merge_operator.
void ReadRandomMergeRandom(ThreadState* thread) {
ReadOptions options(FLAGS_verify_checksum, true);
RandomGenerator gen;
std::string value;
long long num_hits = 0;
long long num_gets = 0;
long long num_merges = 0;
size_t max_length = 0;

// the number of iterations is the larger of read_ or write_
Duration duration(FLAGS_duration, readwrites_);

while (!duration.Done(1)) {
const long long k = thread->rand.Next() % merge_keys_;
unique_ptr<char []> key = GenerateKeyFromInt(k);

bool do_merge = int(thread->rand.Next() % 100) < FLAGS_mergereadpercent;

if (do_merge) {
Status s = db_->Merge(write_options_, key.get(),
gen.Generate(value_size_));
if (!s.ok()) {
fprintf(stderr, "merge error: %s\n", s.ToString().c_str());
exit(1);
}

num_merges++;

} else {
Status s = db_->Get(options, key.get(), &value);
if (value.length() > max_length)
max_length = value.length();

if (!s.ok() && !s.IsNotFound()) {
fprintf(stderr, "get error: %s\n", s.ToString().c_str());
// we continue after error rather than exiting so that we can
// find more errors if any
} else if (!s.IsNotFound()) {
num_hits++;
}

num_gets++;

}

thread->stats.FinishedSingleOp(db_);
}
char msg[100];
snprintf(msg, sizeof(msg),
"(reads:%lld merges:%lld total:%lld hits:%lld maxlength:%zu)",
num_gets, num_merges, readwrites_, num_hits, max_length);
thread->stats.AddMessage(msg);
}


void Compact(ThreadState* thread) {
db_->CompactRange(nullptr, nullptr);
}
Expand Down
Loading

0 comments on commit 151f9e1

Please sign in to comment.