diff --git a/db/c_test.c b/db/c_test.c index b2e263a618f..594199cba9f 100644 --- a/db/c_test.c +++ b/db/c_test.c @@ -535,10 +535,6 @@ int main(int argc, char** argv) { StartPhase("checkpoint"); { - rocksdb_put(db, woptions, "foo", 3, "hello", 5, &err); // PEGASUS: avoid memtable to be empty. - // empty memtable will skip create checkpoint in pegasus - CheckNoError(err); - rocksdb_destroy_db(options, dbcheckpointname, &err); CheckNoError(err); @@ -719,9 +715,8 @@ int main(int argc, char** argv) { rocksdb_writebatch_destroy(wb); } - StartPhase("writebatch_savepoint"); // PEGASUS: not support empty batch + StartPhase("writebatch_savepoint"); { - /* rocksdb_writebatch_t* wb = rocksdb_writebatch_create(); rocksdb_writebatch_set_save_point(wb); rocksdb_writebatch_set_save_point(wb); @@ -738,7 +733,6 @@ int main(int argc, char** argv) { CheckNoError(err); CheckGet(db, roptions, "zap", NULL); rocksdb_writebatch_destroy(wb); - */ } StartPhase("writebatch_rep"); @@ -826,9 +820,8 @@ int main(int argc, char** argv) { rocksdb_writebatch_wi_destroy(wb); } - StartPhase("writebatch_wi_savepoint"); // PEGASUS: not support empty batch + StartPhase("writebatch_wi_savepoint"); { - /* rocksdb_writebatch_wi_t* wb = rocksdb_writebatch_wi_create(0, 1); rocksdb_writebatch_wi_set_save_point(wb); const char* k_list[2] = {"z", "ap"}; @@ -842,7 +835,6 @@ int main(int argc, char** argv) { CheckNoError(err); CheckGet(db, roptions, "zap", NULL); rocksdb_writebatch_wi_destroy(wb); - */ } StartPhase("iter"); diff --git a/db/column_family_test.cc b/db/column_family_test.cc index 6696509aacc..62cc53b1fa0 100644 --- a/db/column_family_test.cc +++ b/db/column_family_test.cc @@ -740,7 +740,7 @@ TEST_F(ColumnFamilyTest, BulkAddDrop) { ASSERT_TRUE(families == std::vector({"default"})); } -TEST_F(ColumnFamilyTest, DISABLED_DropTest) { +TEST_F(ColumnFamilyTest, DropTest) { // first iteration - dont reopen DB before dropping // second iteration - reopen DB before dropping for (int iter = 0; iter < 2; ++iter) { @@ -764,7 +764,7 @@ TEST_F(ColumnFamilyTest, DISABLED_DropTest) { } } -TEST_F(ColumnFamilyTest, DISABLED_WriteBatchFailure) { +TEST_F(ColumnFamilyTest, WriteBatchFailure) { Open(); CreateColumnFamiliesAndReopen({"one", "two"}); WriteBatch batch; @@ -782,7 +782,7 @@ TEST_F(ColumnFamilyTest, DISABLED_WriteBatchFailure) { Close(); } -TEST_F(ColumnFamilyTest, DISABLED_ReadWrite) { +TEST_F(ColumnFamilyTest, ReadWrite) { Open(); CreateColumnFamiliesAndReopen({"one", "two"}); ASSERT_OK(Put(0, "foo", "v1")); @@ -806,7 +806,7 @@ TEST_F(ColumnFamilyTest, DISABLED_ReadWrite) { Close(); } -TEST_F(ColumnFamilyTest, DISABLED_IgnoreRecoveredLog) { +TEST_F(ColumnFamilyTest, IgnoreRecoveredLog) { std::string backup_logs = dbname_ + "/backup_logs"; // delete old files in backup_logs directory @@ -884,12 +884,12 @@ TEST_F(ColumnFamilyTest, DISABLED_IgnoreRecoveredLog) { #ifndef ROCKSDB_LITE // TEST functions used are not supported TEST_F(ColumnFamilyTest, FlushTest) { Open(); -// CreateColumnFamiliesAndReopen({"one", "two"}); + CreateColumnFamiliesAndReopen({"one", "two"}); ASSERT_OK(Put(0, "foo", "v1")); ASSERT_OK(Put(0, "bar", "v2")); -// ASSERT_OK(Put(1, "mirko", "v3")); + ASSERT_OK(Put(1, "mirko", "v3")); ASSERT_OK(Put(0, "foo", "v2")); -// ASSERT_OK(Put(2, "fodor", "v5")); + ASSERT_OK(Put(2, "fodor", "v5")); for (int j = 0; j < 2; j++) { ReadOptions ro; @@ -899,13 +899,13 @@ TEST_F(ColumnFamilyTest, FlushTest) { ASSERT_OK(db_->NewIterators(ro, handles_, &iterators)); } - for (int i = 0; i < 1; ++i) { + for (int i = 0; i < 3; ++i) { uint64_t max_total_in_memory_state = MaxTotalInMemoryState(); Flush(i); AssertMaxTotalInMemoryState(max_total_in_memory_state); } -// ASSERT_OK(Put(1, "foofoo", "bar")); + ASSERT_OK(Put(1, "foofoo", "bar")); ASSERT_OK(Put(0, "foofoo", "bar")); for (auto* it : iterators) { @@ -917,11 +917,11 @@ TEST_F(ColumnFamilyTest, FlushTest) { for (int iter = 0; iter <= 2; ++iter) { ASSERT_EQ("v2", Get(0, "foo")); ASSERT_EQ("v2", Get(0, "bar")); -// ASSERT_EQ("v3", Get(1, "mirko")); -// ASSERT_EQ("v5", Get(2, "fodor")); + ASSERT_EQ("v3", Get(1, "mirko")); + ASSERT_EQ("v5", Get(2, "fodor")); ASSERT_EQ("NOT_FOUND", Get(0, "fodor")); -// ASSERT_EQ("NOT_FOUND", Get(1, "fodor")); -// ASSERT_EQ("NOT_FOUND", Get(2, "foo")); + ASSERT_EQ("NOT_FOUND", Get(1, "fodor")); + ASSERT_EQ("NOT_FOUND", Get(2, "foo")); if (iter <= 1) { Reopen(); } @@ -930,7 +930,7 @@ TEST_F(ColumnFamilyTest, FlushTest) { } // Makes sure that obsolete log files get deleted -TEST_F(ColumnFamilyTest, DISABLED_LogDeletionTest) { +TEST_F(ColumnFamilyTest, LogDeletionTest) { db_options_.max_total_wal_size = std::numeric_limits::max(); column_family_options_.arena_block_size = 4 * 1024; column_family_options_.write_buffer_size = 128000; // 128KB @@ -998,7 +998,7 @@ TEST_F(ColumnFamilyTest, DISABLED_LogDeletionTest) { } #endif // !ROCKSDB_LITE -TEST_F(ColumnFamilyTest, DISABLED_CrashAfterFlush) { +TEST_F(ColumnFamilyTest, CrashAfterFlush) { std::unique_ptr fault_env( new FaultInjectionTestEnv(env_)); db_options_.env = fault_env.get(); @@ -1038,7 +1038,7 @@ TEST_F(ColumnFamilyTest, OpenNonexistentColumnFamily) { #ifndef ROCKSDB_LITE // WaitForFlush() is not supported // Makes sure that obsolete log files get deleted -TEST_F(ColumnFamilyTest, DISABLED_DifferentWriteBufferSizes) { +TEST_F(ColumnFamilyTest, DifferentWriteBufferSizes) { // disable flushing stale column families db_options_.max_total_wal_size = std::numeric_limits::max(); Open(); @@ -1200,7 +1200,7 @@ TEST_F(ColumnFamilyTest, GetComparator) { Close(); } -TEST_F(ColumnFamilyTest, DISABLED_DifferentMergeOperators) { +TEST_F(ColumnFamilyTest, DifferentMergeOperators) { Open(); CreateColumnFamilies({"first", "second"}); ColumnFamilyOptions default_cf, first, second; @@ -1231,7 +1231,7 @@ TEST_F(ColumnFamilyTest, DISABLED_DifferentMergeOperators) { } #ifndef ROCKSDB_LITE // WaitForFlush() is not supported -TEST_F(ColumnFamilyTest, DISABLED_DifferentCompactionStyles) { +TEST_F(ColumnFamilyTest, DifferentCompactionStyles) { Open(); CreateColumnFamilies({"one", "two"}); ColumnFamilyOptions default_cf, one, two; @@ -1303,7 +1303,7 @@ TEST_F(ColumnFamilyTest, DISABLED_DifferentCompactionStyles) { #ifndef ROCKSDB_LITE // Sync points not supported in RocksDB Lite -TEST_F(ColumnFamilyTest, DISABLED_MultipleManualCompactions) { +TEST_F(ColumnFamilyTest, MultipleManualCompactions) { Open(); CreateColumnFamilies({"one", "two"}); ColumnFamilyOptions default_cf, one, two; @@ -1400,7 +1400,7 @@ TEST_F(ColumnFamilyTest, DISABLED_MultipleManualCompactions) { Close(); } -TEST_F(ColumnFamilyTest, DISABLED_AutomaticAndManualCompactions) { +TEST_F(ColumnFamilyTest, AutomaticAndManualCompactions) { Open(); CreateColumnFamilies({"one", "two"}); ColumnFamilyOptions default_cf, one, two; @@ -1493,7 +1493,7 @@ TEST_F(ColumnFamilyTest, DISABLED_AutomaticAndManualCompactions) { } } -TEST_F(ColumnFamilyTest, DISABLED_ManualAndAutomaticCompactions) { +TEST_F(ColumnFamilyTest, ManualAndAutomaticCompactions) { Open(); CreateColumnFamilies({"one", "two"}); ColumnFamilyOptions default_cf, one, two; @@ -1589,7 +1589,7 @@ TEST_F(ColumnFamilyTest, DISABLED_ManualAndAutomaticCompactions) { } } -TEST_F(ColumnFamilyTest, DISABLED_SameCFManualManualCompactions) { +TEST_F(ColumnFamilyTest, SameCFManualManualCompactions) { Open(); CreateColumnFamilies({"one"}); ColumnFamilyOptions default_cf, one; @@ -1688,7 +1688,7 @@ TEST_F(ColumnFamilyTest, DISABLED_SameCFManualManualCompactions) { } } -TEST_F(ColumnFamilyTest, DISABLED_SameCFManualAutomaticCompactions) { +TEST_F(ColumnFamilyTest, SameCFManualAutomaticCompactions) { Open(); CreateColumnFamilies({"one"}); ColumnFamilyOptions default_cf, one; @@ -1778,7 +1778,7 @@ TEST_F(ColumnFamilyTest, DISABLED_SameCFManualAutomaticCompactions) { } } -TEST_F(ColumnFamilyTest, DISABLED_SameCFManualAutomaticCompactionsLevel) { +TEST_F(ColumnFamilyTest, SameCFManualAutomaticCompactionsLevel) { Open(); CreateColumnFamilies({"one"}); ColumnFamilyOptions default_cf, one; @@ -1875,7 +1875,7 @@ TEST_F(ColumnFamilyTest, DISABLED_SameCFManualAutomaticCompactionsLevel) { // This will wait because there is an unscheduled manual compaction. // Once the conflict is hit, the manual compaction starts and ends // Then another automatic will start and end. -TEST_F(ColumnFamilyTest, DISABLED_SameCFManualAutomaticConflict) { +TEST_F(ColumnFamilyTest, SameCFManualAutomaticConflict) { Open(); CreateColumnFamilies({"one"}); ColumnFamilyOptions default_cf, one; @@ -1994,7 +1994,7 @@ TEST_F(ColumnFamilyTest, DISABLED_SameCFManualAutomaticConflict) { // This will wait because the automatic compaction has files it needs. // Once the conflict is hit, the automatic compaction starts and ends // Then the manual will run and end. -TEST_F(ColumnFamilyTest, DISABLED_SameCFAutomaticManualCompactions) { +TEST_F(ColumnFamilyTest, SameCFAutomaticManualCompactions) { Open(); CreateColumnFamilies({"one"}); ColumnFamilyOptions default_cf, one; @@ -2091,7 +2091,7 @@ std::string IterStatus(Iterator* iter) { } } // anonymous namespace -TEST_F(ColumnFamilyTest, DISABLED_NewIteratorsTest) { +TEST_F(ColumnFamilyTest, NewIteratorsTest) { // iter == 0 -- no tailing // iter == 2 -- tailing for (int iter = 0; iter < 2; ++iter) { @@ -2138,7 +2138,7 @@ TEST_F(ColumnFamilyTest, DISABLED_NewIteratorsTest) { #endif // !ROCKSDB_LITE #ifndef ROCKSDB_LITE // ReadOnlyDB is not supported -TEST_F(ColumnFamilyTest, DISABLED_ReadOnlyDBTest) { +TEST_F(ColumnFamilyTest, ReadOnlyDBTest) { Open(); CreateColumnFamiliesAndReopen({"one", "two", "three", "four"}); ASSERT_OK(Put(0, "a", "b")); @@ -2190,7 +2190,7 @@ TEST_F(ColumnFamilyTest, DISABLED_ReadOnlyDBTest) { #endif // !ROCKSDB_LITE #ifndef ROCKSDB_LITE // WaitForFlush() is not supported in lite -TEST_F(ColumnFamilyTest, DISABLED_DontRollEmptyLogs) { +TEST_F(ColumnFamilyTest, DontRollEmptyLogs) { Open(); CreateColumnFamiliesAndReopen({"one", "two", "three", "four"}); @@ -2214,7 +2214,7 @@ TEST_F(ColumnFamilyTest, DISABLED_DontRollEmptyLogs) { #endif // !ROCKSDB_LITE #ifndef ROCKSDB_LITE // WaitForCompaction() is not supported in lite -TEST_F(ColumnFamilyTest, DISABLED_FlushStaleColumnFamilies) { +TEST_F(ColumnFamilyTest, FlushStaleColumnFamilies) { Open(); CreateColumnFamilies({"one", "two"}); ColumnFamilyOptions default_cf, one, two; @@ -2249,7 +2249,7 @@ TEST_F(ColumnFamilyTest, DISABLED_FlushStaleColumnFamilies) { } #endif // !ROCKSDB_LITE -TEST_F(ColumnFamilyTest, DISABLED_CreateMissingColumnFamilies) { +TEST_F(ColumnFamilyTest, CreateMissingColumnFamilies) { Status s = TryOpen({"one", "two"}); ASSERT_TRUE(!s.ok()); db_options_.create_missing_column_families = true; @@ -2307,7 +2307,7 @@ TEST_F(ColumnFamilyTest, SanitizeOptions) { } } -TEST_F(ColumnFamilyTest, DISABLED_ReadDroppedColumnFamily) { +TEST_F(ColumnFamilyTest, ReadDroppedColumnFamily) { // iter 0 -- drop CF, don't reopen // iter 1 -- delete CF, reopen for (int iter = 0; iter < 2; ++iter) { @@ -2379,7 +2379,7 @@ TEST_F(ColumnFamilyTest, DISABLED_ReadDroppedColumnFamily) { } } -TEST_F(ColumnFamilyTest, DISABLED_FlushAndDropRaceCondition) { +TEST_F(ColumnFamilyTest, FlushAndDropRaceCondition) { db_options_.create_missing_column_families = true; Open({"default", "one"}); ColumnFamilyOptions options; @@ -2911,7 +2911,7 @@ TEST_F(ColumnFamilyTest, CompactionSpeedupTwoColumnFamilies) { } #ifndef ROCKSDB_LITE -TEST_F(ColumnFamilyTest, DISABLED_FlushCloseWALFiles) { +TEST_F(ColumnFamilyTest, FlushCloseWALFiles) { SpecialEnv env(Env::Default()); db_options_.env = &env; db_options_.max_background_flushes = 1; @@ -2953,7 +2953,7 @@ TEST_F(ColumnFamilyTest, DISABLED_FlushCloseWALFiles) { #endif // !ROCKSDB_LITE #ifndef ROCKSDB_LITE // WaitForFlush() is not supported -TEST_F(ColumnFamilyTest, DISABLED_IteratorCloseWALFile1) { +TEST_F(ColumnFamilyTest, IteratorCloseWALFile1) { SpecialEnv env(Env::Default()); db_options_.env = &env; db_options_.max_background_flushes = 1; @@ -2998,7 +2998,7 @@ TEST_F(ColumnFamilyTest, DISABLED_IteratorCloseWALFile1) { Close(); } -TEST_F(ColumnFamilyTest, DISABLED_IteratorCloseWALFile2) { +TEST_F(ColumnFamilyTest, IteratorCloseWALFile2) { SpecialEnv env(Env::Default()); // Allow both of flush and purge job to schedule. env.SetBackgroundThreads(2, Env::HIGH); @@ -3055,7 +3055,7 @@ TEST_F(ColumnFamilyTest, DISABLED_IteratorCloseWALFile2) { #endif // !ROCKSDB_LITE #ifndef ROCKSDB_LITE // TEST functions are not supported in lite -TEST_F(ColumnFamilyTest, DISABLED_ForwardIteratorCloseWALFile) { +TEST_F(ColumnFamilyTest, ForwardIteratorCloseWALFile) { SpecialEnv env(Env::Default()); // Allow both of flush and purge job to schedule. env.SetBackgroundThreads(2, Env::HIGH); @@ -3132,7 +3132,7 @@ TEST_F(ColumnFamilyTest, DISABLED_ForwardIteratorCloseWALFile) { // Disable on windows because SyncWAL requires env->IsSyncThreadSafe() // to return true which is not so in unbuffered mode. #ifndef OS_WIN -TEST_F(ColumnFamilyTest, DISABLED_LogSyncConflictFlush) { +TEST_F(ColumnFamilyTest, LogSyncConflictFlush) { Open(); CreateColumnFamiliesAndReopen({"one", "two"}); diff --git a/db/compaction_job_stats_test.cc b/db/compaction_job_stats_test.cc index a4e69213619..9a8372f5785 100644 --- a/db/compaction_job_stats_test.cc +++ b/db/compaction_job_stats_test.cc @@ -140,9 +140,7 @@ class CompactionJobStatsTest : public testing::Test, size_t cfi = handles_.size(); handles_.resize(cfi + cfs.size()); for (auto cf : cfs) { - if (cf != "default") { - ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++])); - } + ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++])); } } @@ -666,7 +664,7 @@ TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) { options.report_bg_io_stats = true; for (int test = 0; test < 2; ++test) { DestroyAndReopen(options); - CreateAndReopenWithCF({"default"}, options); + CreateAndReopenWithCF({"pikachu"}, options); // 1st Phase: generate "num_L0_files" L0 files. int num_L0_files = 0; @@ -884,7 +882,7 @@ TEST_P(CompactionJobStatsTest, DeletionStatsTest) { options.max_subcompactions = max_subcompactions_; DestroyAndReopen(options); - CreateAndReopenWithCF({"default"}, options); + CreateAndReopenWithCF({"pikachu"}, options); // Stage 1: Generate several L0 files and then send them to L2 by // using CompactRangeOptions and CompactRange(). These files will @@ -976,7 +974,7 @@ TEST_P(CompactionJobStatsTest, UniversalCompactionTest) { options.max_subcompactions = max_subcompactions_; DestroyAndReopen(options); - CreateAndReopenWithCF({"default"}, options); + CreateAndReopenWithCF({"pikachu"}, options); // Generates the expected CompactionJobStats for each compaction for (uint32_t num_flushes = 2; num_flushes <= kTestScale; num_flushes++) { diff --git a/db/corruption_test.cc b/db/corruption_test.cc index b24a3785e10..56e157832c2 100644 --- a/db/corruption_test.cc +++ b/db/corruption_test.cc @@ -105,9 +105,7 @@ class CorruptionTest : public testing::Test { Slice key = Key(i, &key_space); batch.Clear(); batch.Put(key, Value(i, &value_space)); - WriteOptions wop; - wop.disableWAL = true; - ASSERT_OK(db_->Write(wop, &batch)); + ASSERT_OK(db_->Write(WriteOptions(), &batch)); } } @@ -257,7 +255,7 @@ class CorruptionTest : public testing::Test { } }; -TEST_F(CorruptionTest, DISABLED_Recovery) { // PEGASUS: pegasus does not use wal, ignore this case +TEST_F(CorruptionTest, Recovery) { Build(100); Check(100, 100); #ifdef OS_WIN @@ -341,14 +339,14 @@ TEST_F(CorruptionTest, TableFileIndexData) { ASSERT_NOK(dbi->VerifyChecksum()); } -TEST_F(CorruptionTest, DISABLED_MissingDescriptor) { // PEGASUS: last_flush_sequence is lost when repair +TEST_F(CorruptionTest, MissingDescriptor) { Build(1000); RepairDB(); Reopen(); Check(1000, 1000); } -TEST_F(CorruptionTest, DISABLED_SequenceNumberRecovery) { // PEGASUS: last_flush_sequence is lost when repair +TEST_F(CorruptionTest, SequenceNumberRecovery) { ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1")); ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2")); ASSERT_OK(db_->Put(WriteOptions(), "foo", "v3")); @@ -369,7 +367,7 @@ TEST_F(CorruptionTest, DISABLED_SequenceNumberRecovery) { // PEGASUS: last_f ASSERT_EQ("v6", v); } -TEST_F(CorruptionTest, DISABLED_CorruptedDescriptor) { // PEGASUS: last_flush_sequence is lost when repair +TEST_F(CorruptionTest, CorruptedDescriptor) { ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello")); DBImpl* dbi = reinterpret_cast(db_); dbi->TEST_FlushMemTable(); diff --git a/db/db_basic_test.cc b/db/db_basic_test.cc index 9c5ad405171..6fe33a193a3 100644 --- a/db/db_basic_test.cc +++ b/db/db_basic_test.cc @@ -169,11 +169,12 @@ TEST_F(DBBasicTest, CompactedDB) { TEST_F(DBBasicTest, LevelLimitReopen) { Options options = CurrentOptions(); + CreateAndReopenWithCF({"pikachu"}, options); const std::string value(1024 * 1024, ' '); int i = 0; - while (NumTableFilesAtLevel(2, 0) == 0) { - ASSERT_OK(Put(Key(i++), value)); + while (NumTableFilesAtLevel(2, 1) == 0) { + ASSERT_OK(Put(1, Key(i++), value)); dbfull()->TEST_WaitForFlushMemTable(); dbfull()->TEST_WaitForCompact(); } @@ -187,36 +188,36 @@ TEST_F(DBBasicTest, LevelLimitReopen) { options.num_levels = 10; options.max_bytes_for_level_multiplier_additional.resize(10, 1); - ASSERT_OK(TryReopenWithColumnFamilies({"default"}, options)); + ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options)); } #endif // ROCKSDB_LITE TEST_F(DBBasicTest, PutDeleteGet) { do { - ASSERT_OK(Put("foo", "v1")); - ASSERT_EQ("v1", Get("foo")); - ASSERT_OK(Put("foo", "v2")); - ASSERT_EQ("v2", Get("foo")); - ASSERT_OK(Delete("foo")); - ASSERT_EQ("NOT_FOUND", Get("foo")); - } while (ChangeOptions(kSkipPipelinedWrite)); + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "foo", "v1")); + ASSERT_EQ("v1", Get(1, "foo")); + ASSERT_OK(Put(1, "foo", "v2")); + ASSERT_EQ("v2", Get(1, "foo")); + ASSERT_OK(Delete(1, "foo")); + ASSERT_EQ("NOT_FOUND", Get(1, "foo")); + } while (ChangeOptions()); } TEST_F(DBBasicTest, PutSingleDeleteGet) { do { - //CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); - ASSERT_OK(Put("foo", "v1")); - ASSERT_EQ("v1", Get("foo")); - ASSERT_OK(Put("foo2", "v2")); - ASSERT_EQ("v2", Get("foo2")); - ASSERT_OK(SingleDelete("foo")); - ASSERT_EQ("NOT_FOUND", Get("foo")); + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "foo", "v1")); + ASSERT_EQ("v1", Get(1, "foo")); + ASSERT_OK(Put(1, "foo2", "v2")); + ASSERT_EQ("v2", Get(1, "foo2")); + ASSERT_OK(SingleDelete(1, "foo")); + ASSERT_EQ("NOT_FOUND", Get(1, "foo")); // Skip HashCuckooRep as it does not support single delete. FIFO and // universal compaction do not apply to the test case. Skip MergePut // because single delete does not get removed when it encounters a merge. } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction | - kSkipUniversalCompaction | kSkipMergePut | - kSkipPipelinedWrite)); + kSkipUniversalCompaction | kSkipMergePut)); } TEST_F(DBBasicTest, EmptyFlush) { @@ -227,27 +228,28 @@ TEST_F(DBBasicTest, EmptyFlush) { Options options = CurrentOptions(); options.disable_auto_compactions = true; + CreateAndReopenWithCF({"pikachu"}, options); - Put("a", Slice()); - SingleDelete("a"); - ASSERT_OK(Flush(0)); + Put(1, "a", Slice()); + SingleDelete(1, "a"); + ASSERT_OK(Flush(1)); - ASSERT_EQ("[ ]", AllEntriesFor("a", 0)); + ASSERT_EQ("[ ]", AllEntriesFor("a", 1)); // Skip HashCuckooRep as it does not support single delete. FIFO and // universal compaction do not apply to the test case. Skip MergePut // because merges cannot be combined with single deletions. } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction | - kSkipUniversalCompaction | kSkipMergePut| kSkipPipelinedWrite)); + kSkipUniversalCompaction | kSkipMergePut)); } TEST_F(DBBasicTest, GetFromVersions) { do { - //CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); - ASSERT_OK(Put("foo", "v1")); - ASSERT_OK(Flush(0)); - ASSERT_EQ("v1", Get("foo")); -// ASSERT_EQ("NOT_FOUND", Get(0, "foo")); - } while (ChangeOptions(kSkipPipelinedWrite)); + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "foo", "v1")); + ASSERT_OK(Flush(1)); + ASSERT_EQ("v1", Get(1, "foo")); + ASSERT_EQ("NOT_FOUND", Get(0, "foo")); + } while (ChangeOptions()); } #ifndef ROCKSDB_LITE @@ -255,26 +257,26 @@ TEST_F(DBBasicTest, GetSnapshot) { anon::OptionsOverride options_override; options_override.skip_policy = kSkipNoSnapshot; do { -// CreateAndReopenWithCF({"pikachu"}, CurrentOptions(options_override)); + CreateAndReopenWithCF({"pikachu"}, CurrentOptions(options_override)); // Try with both a short key and a long key for (int i = 0; i < 2; i++) { std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x'); - ASSERT_OK(Put(key, "v1")); + ASSERT_OK(Put(1, key, "v1")); const Snapshot* s1 = db_->GetSnapshot(); if (option_config_ == kHashCuckoo) { // Unsupported case. ASSERT_TRUE(s1 == nullptr); break; } - ASSERT_OK(Put(key, "v2")); - ASSERT_EQ("v2", Get(key)); - ASSERT_EQ("v1", Get(key, s1)); - ASSERT_OK(Flush(0)); - ASSERT_EQ("v2", Get(key)); - ASSERT_EQ("v1", Get(key, s1)); + ASSERT_OK(Put(1, key, "v2")); + ASSERT_EQ("v2", Get(1, key)); + ASSERT_EQ("v1", Get(1, key, s1)); + ASSERT_OK(Flush(1)); + ASSERT_EQ("v2", Get(1, key)); + ASSERT_EQ("v1", Get(1, key, s1)); db_->ReleaseSnapshot(s1); } - } while (ChangeOptions(kSkipPipelinedWrite)); + } while (ChangeOptions()); } #endif // ROCKSDB_LITE @@ -297,18 +299,18 @@ TEST_F(DBBasicTest, FlushMultipleMemtable) { options.max_write_buffer_number = 4; options.min_write_buffer_number_to_merge = 3; options.max_write_buffer_number_to_maintain = -1; - //CreateAndReopenWithCF({"pikachu"}, options); - ASSERT_OK(dbfull()->Put(writeOpt,"foo", "v1")); - ASSERT_OK(Flush(0)); - ASSERT_OK(dbfull()->Put(writeOpt,"bar", "v1")); - - ASSERT_EQ("v1", Get("foo")); - ASSERT_EQ("v1", Get("bar")); - ASSERT_OK(Flush(0)); + CreateAndReopenWithCF({"pikachu"}, options); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v1")); + ASSERT_OK(Flush(1)); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v1")); + + ASSERT_EQ("v1", Get(1, "foo")); + ASSERT_EQ("v1", Get(1, "bar")); + ASSERT_OK(Flush(1)); } while (ChangeCompactOptions()); } -TEST_F(DBBasicTest, DISABLED_FlushEmptyColumnFamily) { +TEST_F(DBBasicTest, FlushEmptyColumnFamily) { // Block flush thread and disable compaction thread env_->SetBackgroundThreads(1, Env::HIGH); env_->SetBackgroundThreads(1, Env::LOW); @@ -329,15 +331,10 @@ TEST_F(DBBasicTest, DISABLED_FlushEmptyColumnFamily) { options.max_write_buffer_number_to_maintain = 1; CreateAndReopenWithCF({"pikachu"}, options); - auto cfh = dbfull()->DefaultColumnFamily(); - auto cfd = reinterpret_cast(cfh)->cfd(); - ASSERT_EQ(0, cfd->imm()->NumNotFlushed()); - // Compaction can still go through even if no thread can flush the // mem table. ASSERT_OK(Flush(0)); ASSERT_OK(Flush(1)); - ASSERT_EQ(0, cfd->imm()->NumNotFlushed()); // Insert can go through ASSERT_OK(dbfull()->Put(writeOpt, handles_[0], "foo", "v1")); @@ -346,34 +343,12 @@ TEST_F(DBBasicTest, DISABLED_FlushEmptyColumnFamily) { ASSERT_EQ("v1", Get(0, "foo")); ASSERT_EQ("v1", Get(1, "bar")); - // Compaction without waiting will go through even if no thread can - // flush the mem table, which will switch the mem table. - FlushOptions flush_options; - flush_options.wait = false; - ASSERT_OK(Flush(0, flush_options)); - ASSERT_OK(Flush(1, flush_options)); - ASSERT_EQ(1, cfd->imm()->NumNotFlushed()); - - // Compaction without waiting will go through even if no thread can - // flush the mem table, and no new compaction will be started. - ASSERT_OK(Flush(0, flush_options)); - ASSERT_OK(Flush(1, flush_options)); - ASSERT_EQ(1, cfd->imm()->NumNotFlushed()); - - // Insert can go through - ASSERT_OK(dbfull()->Put(writeOpt, handles_[0], "k1", "v1")); - ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k2", "v2")); - - ASSERT_EQ("v1", Get(0, "k1")); - ASSERT_EQ("v2", Get(1, "k2")); - sleeping_task_high.WakeUp(); sleeping_task_high.WaitUntilDone(); // Flush can still go through. ASSERT_OK(Flush(0)); ASSERT_OK(Flush(1)); - ASSERT_EQ(0, cfd->imm()->NumNotFlushed()); sleeping_task_low.WakeUp(); sleeping_task_low.WaitUntilDone(); @@ -381,45 +356,45 @@ TEST_F(DBBasicTest, DISABLED_FlushEmptyColumnFamily) { TEST_F(DBBasicTest, FLUSH) { do { -// CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); WriteOptions writeOpt = WriteOptions(); writeOpt.disableWAL = true; SetPerfLevel(kEnableTime); - ASSERT_OK(dbfull()->Put(writeOpt,"foo", "v1")); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v1")); // this will now also flush the last 2 writes - ASSERT_OK(Flush(0)); - ASSERT_OK(dbfull()->Put(writeOpt,"bar", "v1")); + ASSERT_OK(Flush(1)); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v1")); get_perf_context()->Reset(); - Get("foo"); + Get(1, "foo"); ASSERT_TRUE((int)get_perf_context()->get_from_output_files_time > 0); ASSERT_EQ(2, (int)get_perf_context()->get_read_bytes); - ReopenWithColumnFamilies({"default"}, CurrentOptions()); - ASSERT_EQ("v1", Get("foo")); - ASSERT_EQ("v1", Get("bar")); + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + ASSERT_EQ("v1", Get(1, "foo")); + ASSERT_EQ("v1", Get(1, "bar")); writeOpt.disableWAL = true; - ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v2")); - ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v2")); - ASSERT_OK(Flush(0)); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v2")); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v2")); + ASSERT_OK(Flush(1)); - ReopenWithColumnFamilies({"default"}, CurrentOptions()); - ASSERT_EQ("v2", Get("bar")); + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + ASSERT_EQ("v2", Get(1, "bar")); get_perf_context()->Reset(); - ASSERT_EQ("v2", Get("foo")); + ASSERT_EQ("v2", Get(1, "foo")); ASSERT_TRUE((int)get_perf_context()->get_from_output_files_time > 0); writeOpt.disableWAL = false; - ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v3")); - ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v3")); - ASSERT_OK(Flush(0)); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v3")); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v3")); + ASSERT_OK(Flush(1)); - ReopenWithColumnFamilies({"default"}, CurrentOptions()); + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); // 'foo' should be there because its put // has WAL enabled. - ASSERT_EQ("v3", Get("foo")); - ASSERT_EQ("v3", Get("bar")); + ASSERT_EQ("v3", Get(1, "foo")); + ASSERT_EQ("v3", Get(1, "bar")); SetPerfLevel(kDisable); } while (ChangeCompactOptions()); @@ -430,20 +405,21 @@ TEST_F(DBBasicTest, ManifestRollOver) { Options options; options.max_manifest_file_size = 10; // 10 bytes options = CurrentOptions(options); + CreateAndReopenWithCF({"pikachu"}, options); { - ASSERT_OK(Put("manifest_key1", std::string(1000, '1'))); - ASSERT_OK(Put("manifest_key2", std::string(1000, '2'))); - ASSERT_OK(Put("manifest_key3", std::string(1000, '3'))); + ASSERT_OK(Put(1, "manifest_key1", std::string(1000, '1'))); + ASSERT_OK(Put(1, "manifest_key2", std::string(1000, '2'))); + ASSERT_OK(Put(1, "manifest_key3", std::string(1000, '3'))); uint64_t manifest_before_flush = dbfull()->TEST_Current_Manifest_FileNo(); - ASSERT_OK(Flush(0)); // This should trigger LogAndApply. + ASSERT_OK(Flush(1)); // This should trigger LogAndApply. uint64_t manifest_after_flush = dbfull()->TEST_Current_Manifest_FileNo(); ASSERT_GT(manifest_after_flush, manifest_before_flush); - ReopenWithColumnFamilies({"default"}, options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); ASSERT_GT(dbfull()->TEST_Current_Manifest_FileNo(), manifest_after_flush); // check if a new manifest file got inserted or not. - ASSERT_EQ(std::string(1000, '1'), Get("manifest_key1")); - ASSERT_EQ(std::string(1000, '2'), Get("manifest_key2")); - ASSERT_EQ(std::string(1000, '3'), Get("manifest_key3")); + ASSERT_EQ(std::string(1000, '1'), Get(1, "manifest_key1")); + ASSERT_EQ(std::string(1000, '2'), Get(1, "manifest_key2")); + ASSERT_EQ(std::string(1000, '3'), Get(1, "manifest_key3")); } } while (ChangeCompactOptions()); } @@ -475,64 +451,64 @@ TEST_F(DBBasicTest, Snapshot) { anon::OptionsOverride options_override; options_override.skip_policy = kSkipNoSnapshot; do { - //CreateAndReopenWithCF({"pikachu"}, CurrentOptions(options_override)); - Put(/*0, */"foo", "0v1"); - //Put(1, "foo", "1v1"); + CreateAndReopenWithCF({"pikachu"}, CurrentOptions(options_override)); + Put(0, "foo", "0v1"); + Put(1, "foo", "1v1"); const Snapshot* s1 = db_->GetSnapshot(); ASSERT_EQ(1U, GetNumSnapshots()); uint64_t time_snap1 = GetTimeOldestSnapshots(); ASSERT_GT(time_snap1, 0U); - Put(/*0, */"foo", "0v2"); - //Put(1, "foo", "1v2"); + Put(0, "foo", "0v2"); + Put(1, "foo", "1v2"); env_->addon_time_.fetch_add(1); const Snapshot* s2 = db_->GetSnapshot(); ASSERT_EQ(2U, GetNumSnapshots()); ASSERT_EQ(time_snap1, GetTimeOldestSnapshots()); - Put(/*0, */"foo", "0v3"); - //Put(1, "foo", "1v3"); + Put(0, "foo", "0v3"); + Put(1, "foo", "1v3"); { ManagedSnapshot s3(db_); ASSERT_EQ(3U, GetNumSnapshots()); ASSERT_EQ(time_snap1, GetTimeOldestSnapshots()); - Put(/*0, */"foo", "0v4"); - //Put(1, "foo", "1v4"); - ASSERT_EQ("0v1", Get(/*0, */"foo", s1)); - //ASSERT_EQ("1v1", Get(1, "foo", s1)); - ASSERT_EQ("0v2", Get(/*0, */"foo", s2)); - //ASSERT_EQ("1v2", Get(1, "foo", s2)); - ASSERT_EQ("0v3", Get(/*0, */"foo", s3.snapshot())); - //ASSERT_EQ("1v3", Get(1, "foo", s3.snapshot())); - ASSERT_EQ("0v4", Get(/*0, */"foo")); - //ASSERT_EQ("1v4", Get(1, "foo")); + Put(0, "foo", "0v4"); + Put(1, "foo", "1v4"); + ASSERT_EQ("0v1", Get(0, "foo", s1)); + ASSERT_EQ("1v1", Get(1, "foo", s1)); + ASSERT_EQ("0v2", Get(0, "foo", s2)); + ASSERT_EQ("1v2", Get(1, "foo", s2)); + ASSERT_EQ("0v3", Get(0, "foo", s3.snapshot())); + ASSERT_EQ("1v3", Get(1, "foo", s3.snapshot())); + ASSERT_EQ("0v4", Get(0, "foo")); + ASSERT_EQ("1v4", Get(1, "foo")); } ASSERT_EQ(2U, GetNumSnapshots()); ASSERT_EQ(time_snap1, GetTimeOldestSnapshots()); - ASSERT_EQ("0v1", Get(/*0, */"foo", s1)); - //ASSERT_EQ("1v1", Get(1, "foo", s1)); - ASSERT_EQ("0v2", Get(/*0, */"foo", s2)); - //ASSERT_EQ("1v2", Get(1, "foo", s2)); - ASSERT_EQ("0v4", Get(/*0, */"foo")); - //ASSERT_EQ("1v4", Get(1, "foo")); + ASSERT_EQ("0v1", Get(0, "foo", s1)); + ASSERT_EQ("1v1", Get(1, "foo", s1)); + ASSERT_EQ("0v2", Get(0, "foo", s2)); + ASSERT_EQ("1v2", Get(1, "foo", s2)); + ASSERT_EQ("0v4", Get(0, "foo")); + ASSERT_EQ("1v4", Get(1, "foo")); db_->ReleaseSnapshot(s1); - ASSERT_EQ("0v2", Get(/*0, */"foo", s2)); - //ASSERT_EQ("1v2", Get(1, "foo", s2)); - ASSERT_EQ("0v4", Get(/*0, */"foo")); - //ASSERT_EQ("1v4", Get(1, "foo")); + ASSERT_EQ("0v2", Get(0, "foo", s2)); + ASSERT_EQ("1v2", Get(1, "foo", s2)); + ASSERT_EQ("0v4", Get(0, "foo")); + ASSERT_EQ("1v4", Get(1, "foo")); ASSERT_EQ(1U, GetNumSnapshots()); ASSERT_LT(time_snap1, GetTimeOldestSnapshots()); db_->ReleaseSnapshot(s2); ASSERT_EQ(0U, GetNumSnapshots()); - ASSERT_EQ("0v4", Get(/*0, */"foo")); - //ASSERT_EQ("1v4", Get(1, "foo")); - } while (ChangeOptions(kSkipHashCuckoo | kSkipPipelinedWrite)); + ASSERT_EQ("0v4", Get(0, "foo")); + ASSERT_EQ("1v4", Get(1, "foo")); + } while (ChangeOptions(kSkipHashCuckoo)); } #endif // ROCKSDB_LITE @@ -543,54 +519,53 @@ TEST_F(DBBasicTest, CompactBetweenSnapshots) { do { Options options = CurrentOptions(options_override); options.disable_auto_compactions = true; - Reopen(options); + CreateAndReopenWithCF({"pikachu"}, options); Random rnd(301); - FillLevels("a", "z", 0); + FillLevels("a", "z", 1); - Put("foo", "first"); + Put(1, "foo", "first"); const Snapshot* snapshot1 = db_->GetSnapshot(); - Put("foo", "second"); - Put("foo", "third"); - Put("foo", "fourth"); + Put(1, "foo", "second"); + Put(1, "foo", "third"); + Put(1, "foo", "fourth"); const Snapshot* snapshot2 = db_->GetSnapshot(); - Put("foo", "fifth"); - Put("foo", "sixth"); + Put(1, "foo", "fifth"); + Put(1, "foo", "sixth"); // All entries (including duplicates) exist // before any compaction or flush is triggered. - ASSERT_EQ(AllEntriesFor("foo", 0), + ASSERT_EQ(AllEntriesFor("foo", 1), "[ sixth, fifth, fourth, third, second, first ]"); - ASSERT_EQ("sixth", Get("foo")); - ASSERT_EQ("fourth", Get("foo", snapshot2)); - ASSERT_EQ("first", Get("foo", snapshot1)); + ASSERT_EQ("sixth", Get(1, "foo")); + ASSERT_EQ("fourth", Get(1, "foo", snapshot2)); + ASSERT_EQ("first", Get(1, "foo", snapshot1)); // After a flush, "second", "third" and "fifth" should // be removed - ASSERT_OK(Flush(0)); - ASSERT_EQ(AllEntriesFor("foo"), "[ sixth, fourth, first ]"); + ASSERT_OK(Flush(1)); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ sixth, fourth, first ]"); // after we release the snapshot1, only two values left db_->ReleaseSnapshot(snapshot1); - FillLevels("a", "z", 0); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, + FillLevels("a", "z", 1); + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr); // We have only one valid snapshot snapshot2. Since snapshot1 is // not valid anymore, "first" should be removed by a compaction. - ASSERT_EQ("sixth", Get("foo")); - ASSERT_EQ("fourth", Get("foo", snapshot2)); - ASSERT_EQ(AllEntriesFor("foo", 0), "[ sixth, fourth ]"); + ASSERT_EQ("sixth", Get(1, "foo")); + ASSERT_EQ("fourth", Get(1, "foo", snapshot2)); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ sixth, fourth ]"); // after we release the snapshot2, only one value should be left db_->ReleaseSnapshot(snapshot2); - FillLevels("a", "z", 0); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, + FillLevels("a", "z", 1); + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr); - ASSERT_EQ("sixth", Get("foo")); - ASSERT_EQ(AllEntriesFor("foo", 0), "[ sixth ]"); + ASSERT_EQ("sixth", Get(1, "foo")); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ sixth ]"); // skip HashCuckooRep as it does not support snapshot - } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction - | kSkipPipelinedWrite)); + } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction)); } TEST_F(DBBasicTest, DBOpen_Options) { @@ -638,98 +613,94 @@ TEST_F(DBBasicTest, CompactOnFlush) { do { Options options = CurrentOptions(options_override); options.disable_auto_compactions = true; - //CreateAndReopenWithCF({"pikachu"}, options); + CreateAndReopenWithCF({"pikachu"}, options); - Put("foo", "v1"); - ASSERT_OK(Flush(0)); - ASSERT_EQ(AllEntriesFor("foo", 0), "[ v1 ]"); + Put(1, "foo", "v1"); + ASSERT_OK(Flush(1)); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v1 ]"); // Write two new keys - Put("a", "begin"); - Put("z", "end"); - Flush(0); + Put(1, "a", "begin"); + Put(1, "z", "end"); + Flush(1); // Case1: Delete followed by a put - Delete("foo"); - Put("foo", "v2"); - ASSERT_EQ(AllEntriesFor("foo", 0), "[ v2, DEL, v1 ]"); + Delete(1, "foo"); + Put(1, "foo", "v2"); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, DEL, v1 ]"); // After the current memtable is flushed, the DEL should // have been removed - ASSERT_OK(Flush(0)); - ASSERT_EQ(AllEntriesFor("foo", 0), "[ v2, v1 ]"); + ASSERT_OK(Flush(1)); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, v1 ]"); - Put("foo", "v2"); // add data to memtable to ensure compact will be executed - dbfull()->CompactRange(CompactRangeOptions(), nullptr, + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr); - ASSERT_EQ(AllEntriesFor("foo", 0), "[ v2 ]"); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2 ]"); // Case 2: Delete followed by another delete - Delete("foo"); - Delete("foo"); - ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, DEL, v2 ]"); - ASSERT_OK(Flush(0)); - ASSERT_EQ(AllEntriesFor("foo", 0), "[ DEL, v2 ]"); - Delete("foo"); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, + Delete(1, "foo"); + Delete(1, "foo"); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, DEL, v2 ]"); + ASSERT_OK(Flush(1)); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v2 ]"); + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr); - ASSERT_EQ(AllEntriesFor("foo", 0), "[ ]"); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]"); // Case 3: Put followed by a delete - Put("foo", "v3"); - Delete("foo"); - ASSERT_EQ(AllEntriesFor("foo", 0), "[ DEL, v3 ]"); - ASSERT_OK(Flush(0)); - ASSERT_EQ(AllEntriesFor("foo", 0), "[ DEL ]"); - Delete("foo"); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, + Put(1, "foo", "v3"); + Delete(1, "foo"); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v3 ]"); + ASSERT_OK(Flush(1)); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL ]"); + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr); - ASSERT_EQ(AllEntriesFor("foo", 0), "[ ]"); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]"); // Case 4: Put followed by another Put - Put("foo", "v4"); - Put("foo", "v5"); - ASSERT_EQ(AllEntriesFor("foo", 0), "[ v5, v4 ]"); - ASSERT_OK(Flush(0)); - ASSERT_EQ(AllEntriesFor("foo", 0), "[ v5 ]"); - Put("foo", "v5"); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, + Put(1, "foo", "v4"); + Put(1, "foo", "v5"); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v5, v4 ]"); + ASSERT_OK(Flush(1)); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v5 ]"); + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr); - ASSERT_EQ(AllEntriesFor("foo", 0), "[ v5 ]"); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v5 ]"); // clear database - Delete("foo"); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, + Delete(1, "foo"); + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr); - ASSERT_EQ(AllEntriesFor("foo", 0), "[ ]"); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]"); // Case 5: Put followed by snapshot followed by another Put // Both puts should remain. - Put("foo", "v6"); + Put(1, "foo", "v6"); const Snapshot* snapshot = db_->GetSnapshot(); - Put("foo", "v7"); - ASSERT_OK(Flush(0)); - ASSERT_EQ(AllEntriesFor("foo", 0), "[ v7, v6 ]"); + Put(1, "foo", "v7"); + ASSERT_OK(Flush(1)); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v7, v6 ]"); db_->ReleaseSnapshot(snapshot); // clear database - Delete("foo"); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, + Delete(1, "foo"); + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr); - ASSERT_EQ(AllEntriesFor("foo", 0), "[ ]"); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]"); // Case 5: snapshot followed by a put followed by another Put // Only the last put should remain. const Snapshot* snapshot1 = db_->GetSnapshot(); - Put("foo", "v8"); - Put("foo", "v9"); - ASSERT_OK(Flush(0)); - ASSERT_EQ(AllEntriesFor("foo", 0), "[ v9 ]"); + Put(1, "foo", "v8"); + Put(1, "foo", "v9"); + ASSERT_OK(Flush(1)); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v9 ]"); db_->ReleaseSnapshot(snapshot1); } while (ChangeCompactOptions()); } -TEST_F(DBBasicTest, DISABLED_FlushOneColumnFamily) { +TEST_F(DBBasicTest, FlushOneColumnFamily) { Options options = CurrentOptions(); CreateAndReopenWithCF({"pikachu", "ilya", "muromec", "dobrynia", "nikitich", "alyosha", "popovich"}, @@ -753,21 +724,23 @@ TEST_F(DBBasicTest, DISABLED_FlushOneColumnFamily) { TEST_F(DBBasicTest, MultiGetSimple) { do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); SetPerfLevel(kEnableCount); - ASSERT_OK(Put("k1", "v1")); - ASSERT_OK(Put("k2", "v2")); - ASSERT_OK(Put("k3", "v3")); - ASSERT_OK(Put("k4", "v4")); - ASSERT_OK(Delete("k4")); - ASSERT_OK(Put("k5", "v5")); - ASSERT_OK(Delete("no_key")); + ASSERT_OK(Put(1, "k1", "v1")); + ASSERT_OK(Put(1, "k2", "v2")); + ASSERT_OK(Put(1, "k3", "v3")); + ASSERT_OK(Put(1, "k4", "v4")); + ASSERT_OK(Delete(1, "k4")); + ASSERT_OK(Put(1, "k5", "v5")); + ASSERT_OK(Delete(1, "no_key")); std::vector keys({"k1", "k2", "k3", "k4", "k5", "no_key"}); std::vector values(20, "Temporary data to be overwritten"); + std::vector cfs(keys.size(), handles_[1]); get_perf_context()->Reset(); - std::vector s = db_->MultiGet(ReadOptions(), keys, &values); + std::vector s = db_->MultiGet(ReadOptions(), cfs, keys, &values); ASSERT_EQ(values.size(), keys.size()); ASSERT_EQ(values[0], "v1"); ASSERT_EQ(values[1], "v2"); diff --git a/db/db_block_cache_test.cc b/db/db_block_cache_test.cc index 635360bdc0f..169cadc85c3 100644 --- a/db/db_block_cache_test.cc +++ b/db/db_block_cache_test.cc @@ -50,7 +50,7 @@ class DBBlockCacheTest : public DBTestBase { void InitTable(const Options& options) { std::string value(kValueSize, 'a'); for (size_t i = 0; i < kNumBlocks; i++) { - ASSERT_OK(Put(ToString(i), value.c_str(), WriteOptions(), false)); + ASSERT_OK(Put(ToString(i), value.c_str())); } } diff --git a/db/db_bloom_filter_test.cc b/db/db_bloom_filter_test.cc index b698d5dc583..e6248a04014 100644 --- a/db/db_bloom_filter_test.cc +++ b/db/db_bloom_filter_test.cc @@ -61,61 +61,60 @@ TEST_P(DBBloomFilterTestWithParam, KeyMayExist) { continue; } options.statistics = rocksdb::CreateDBStatistics(); - Reopen(options); + CreateAndReopenWithCF({"pikachu"}, options); - ASSERT_TRUE(!db_->KeyMayExist(ropts, "a", &value)); + ASSERT_TRUE(!db_->KeyMayExist(ropts, handles_[1], "a", &value)); - ASSERT_OK(Put("a", "b")); + ASSERT_OK(Put(1, "a", "b")); bool value_found = false; ASSERT_TRUE( - db_->KeyMayExist(ropts, "a", &value, &value_found)); + db_->KeyMayExist(ropts, handles_[1], "a", &value, &value_found)); ASSERT_TRUE(value_found); ASSERT_EQ("b", value); - ASSERT_OK(Flush(0)); + ASSERT_OK(Flush(1)); value.clear(); uint64_t numopen = TestGetTickerCount(options, NO_FILE_OPENS); uint64_t cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD); ASSERT_TRUE( - db_->KeyMayExist(ropts, "a", &value, &value_found)); + db_->KeyMayExist(ropts, handles_[1], "a", &value, &value_found)); ASSERT_TRUE(!value_found); // assert that no new files were opened and no new blocks were // read into block cache. ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS)); ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD)); - ASSERT_OK(Delete("a")); + ASSERT_OK(Delete(1, "a")); numopen = TestGetTickerCount(options, NO_FILE_OPENS); cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD); - ASSERT_TRUE(!db_->KeyMayExist(ropts, "a", &value)); + ASSERT_TRUE(!db_->KeyMayExist(ropts, handles_[1], "a", &value)); ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS)); ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD)); - ASSERT_OK(Flush(0)); - dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr, + ASSERT_OK(Flush(1)); + dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1], true /* disallow trivial move */); numopen = TestGetTickerCount(options, NO_FILE_OPENS); cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD); - ASSERT_TRUE(!db_->KeyMayExist(ropts, "a", &value)); + ASSERT_TRUE(!db_->KeyMayExist(ropts, handles_[1], "a", &value)); ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS)); ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD)); - ASSERT_OK(Delete("c")); + ASSERT_OK(Delete(1, "c")); numopen = TestGetTickerCount(options, NO_FILE_OPENS); cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD); - ASSERT_TRUE(!db_->KeyMayExist(ropts, "c", &value)); + ASSERT_TRUE(!db_->KeyMayExist(ropts, handles_[1], "c", &value)); ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS)); ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD)); // KeyMayExist function only checks data in block caches, which is not used // by plain table format. } while ( - ChangeOptions(kSkipPlainTable | kSkipHashIndex | kSkipFIFOCompaction | - kSkipPipelinedWrite)); + ChangeOptions(kSkipPlainTable | kSkipHashIndex | kSkipFIFOCompaction)); } TEST_F(DBBloomFilterTest, GetFilterByPrefixBloom) { @@ -338,18 +337,18 @@ TEST_P(DBBloomFilterTestWithParam, BloomFilter) { table_options.metadata_block_size = 32; options.table_factory.reset(NewBlockBasedTableFactory(table_options)); - Reopen(options); + CreateAndReopenWithCF({"pikachu"}, options); // Populate multiple layers const int N = 10000; for (int i = 0; i < N; i++) { - ASSERT_OK(Put(Key(i), Key(i))); + ASSERT_OK(Put(1, Key(i), Key(i))); } - Compact("a", "z"); + Compact(1, "a", "z"); for (int i = 0; i < N; i += 100) { - ASSERT_OK(Put(Key(i), Key(i))); + ASSERT_OK(Put(1, Key(i), Key(i))); } - Flush(0); + Flush(1); // Prevent auto compactions triggered by seeks env_->delay_sstable_sync_.store(true, std::memory_order_release); @@ -357,7 +356,7 @@ TEST_P(DBBloomFilterTestWithParam, BloomFilter) { // Lookup present keys. Should rarely read from small sstable. env_->random_read_counter_.Reset(); for (int i = 0; i < N; i++) { - ASSERT_EQ(Key(i), Get(Key(i))); + ASSERT_EQ(Key(i), Get(1, Key(i))); } int reads = env_->random_read_counter_.Read(); fprintf(stderr, "%d present => %d reads\n", N, reads); @@ -373,7 +372,7 @@ TEST_P(DBBloomFilterTestWithParam, BloomFilter) { // Lookup present keys. Should rarely read from either sstable. env_->random_read_counter_.Reset(); for (int i = 0; i < N; i++) { - ASSERT_EQ("NOT_FOUND", Get(Key(i) + ".missing")); + ASSERT_EQ("NOT_FOUND", Get(1, Key(i) + ".missing")); } reads = env_->random_read_counter_.Read(); fprintf(stderr, "%d missing => %d reads\n", N, reads); @@ -399,25 +398,25 @@ TEST_F(DBBloomFilterTest, BloomFilterRate) { while (ChangeFilterOptions()) { Options options = CurrentOptions(); options.statistics = rocksdb::CreateDBStatistics(); - Reopen(options); + CreateAndReopenWithCF({"pikachu"}, options); const int maxKey = 10000; for (int i = 0; i < maxKey; i++) { - ASSERT_OK(Put(Key(i), Key(i))); + ASSERT_OK(Put(1, Key(i), Key(i))); } // Add a large key to make the file contain wide range - ASSERT_OK(Put(Key(maxKey + 55555), Key(maxKey + 55555))); - Flush(0); + ASSERT_OK(Put(1, Key(maxKey + 55555), Key(maxKey + 55555))); + Flush(1); // Check if they can be found for (int i = 0; i < maxKey; i++) { - ASSERT_EQ(Key(i), Get(Key(i))); + ASSERT_EQ(Key(i), Get(1, Key(i))); } ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0); // Check if filter is useful for (int i = 0; i < maxKey; i++) { - ASSERT_EQ("NOT_FOUND", Get(Key(i + 33333))); + ASSERT_EQ("NOT_FOUND", Get(1, Key(i + 33333))); } ASSERT_GE(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), maxKey * 0.98); } @@ -431,23 +430,23 @@ TEST_F(DBBloomFilterTest, BloomFilterCompatibility) { options.table_factory.reset(NewBlockBasedTableFactory(table_options)); // Create with block based filter - Reopen(options); + CreateAndReopenWithCF({"pikachu"}, options); const int maxKey = 10000; for (int i = 0; i < maxKey; i++) { - ASSERT_OK(Put(Key(i), Key(i))); + ASSERT_OK(Put(1, Key(i), Key(i))); } - ASSERT_OK(Put(Key(maxKey + 55555), Key(maxKey + 55555))); - Flush(0); + ASSERT_OK(Put(1, Key(maxKey + 55555), Key(maxKey + 55555))); + Flush(1); // Check db with full filter table_options.filter_policy.reset(NewBloomFilterPolicy(10, false)); options.table_factory.reset(NewBlockBasedTableFactory(table_options)); - Reopen(options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); // Check if they can be found for (int i = 0; i < maxKey; i++) { - ASSERT_EQ(Key(i), Get(Key(i))); + ASSERT_EQ(Key(i), Get(1, Key(i))); } ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0); @@ -457,11 +456,11 @@ TEST_F(DBBloomFilterTest, BloomFilterCompatibility) { BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch; table_options.filter_policy.reset(NewBloomFilterPolicy(10, false)); options.table_factory.reset(NewBlockBasedTableFactory(table_options)); - Reopen(options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); // Check if they can be found for (int i = 0; i < maxKey; i++) { - ASSERT_EQ(Key(i), Get(Key(i))); + ASSERT_EQ(Key(i), Get(1, Key(i))); } ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0); } @@ -481,23 +480,23 @@ TEST_F(DBBloomFilterTest, BloomFilterReverseCompatibility) { DestroyAndReopen(options); // Create with full filter - Reopen(options); + CreateAndReopenWithCF({"pikachu"}, options); const int maxKey = 10000; for (int i = 0; i < maxKey; i++) { - ASSERT_OK(Put(Key(i), Key(i))); + ASSERT_OK(Put(1, Key(i), Key(i))); } - ASSERT_OK(Put(Key(maxKey + 55555), Key(maxKey + 55555))); - Flush(0); + ASSERT_OK(Put(1, Key(maxKey + 55555), Key(maxKey + 55555))); + Flush(1); // Check db with block_based filter table_options.filter_policy.reset(NewBloomFilterPolicy(10, true)); options.table_factory.reset(NewBlockBasedTableFactory(table_options)); - Reopen(options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); // Check if they can be found for (int i = 0; i < maxKey; i++) { - ASSERT_EQ(Key(i), Get(Key(i))); + ASSERT_EQ(Key(i), Get(1, Key(i))); } ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0); } @@ -548,27 +547,27 @@ TEST_F(DBBloomFilterTest, BloomFilterWrapper) { table_options.filter_policy.reset(policy); options.table_factory.reset(NewBlockBasedTableFactory(table_options)); - Reopen(options); + CreateAndReopenWithCF({"pikachu"}, options); const int maxKey = 10000; for (int i = 0; i < maxKey; i++) { - ASSERT_OK(Put(Key(i), Key(i))); + ASSERT_OK(Put(1, Key(i), Key(i))); } // Add a large key to make the file contain wide range - ASSERT_OK(Put(Key(maxKey + 55555), Key(maxKey + 55555))); + ASSERT_OK(Put(1, Key(maxKey + 55555), Key(maxKey + 55555))); ASSERT_EQ(0U, policy->GetCounter()); - Flush(0); + Flush(1); // Check if they can be found for (int i = 0; i < maxKey; i++) { - ASSERT_EQ(Key(i), Get(Key(i))); + ASSERT_EQ(Key(i), Get(1, Key(i))); } ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0); ASSERT_EQ(1U * maxKey, policy->GetCounter()); // Check if filter is useful for (int i = 0; i < maxKey; i++) { - ASSERT_EQ("NOT_FOUND", Get(Key(i + 33333))); + ASSERT_EQ("NOT_FOUND", Get(1, Key(i + 33333))); } ASSERT_GE(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), maxKey * 0.98); ASSERT_EQ(2U * maxKey, policy->GetCounter()); @@ -941,7 +940,7 @@ TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) { options.table_factory.reset(NewBlockBasedTableFactory(bbto)); options.optimize_filters_for_hits = true; options.statistics = rocksdb::CreateDBStatistics(); - Reopen(options); + CreateAndReopenWithCF({"mypikachu"}, options); int numkeys = 200000; @@ -956,27 +955,27 @@ TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) { int num_inserted = 0; for (int key : keys) { - ASSERT_OK(Put(Key(key), "val")); + ASSERT_OK(Put(1, Key(key), "val")); if (++num_inserted % 1000 == 0) { dbfull()->TEST_WaitForFlushMemTable(); dbfull()->TEST_WaitForCompact(); } } - ASSERT_OK(Put(Key(0), "val")); - ASSERT_OK(Put(Key(numkeys), "val")); - ASSERT_OK(Flush(0)); + ASSERT_OK(Put(1, Key(0), "val")); + ASSERT_OK(Put(1, Key(numkeys), "val")); + ASSERT_OK(Flush(1)); dbfull()->TEST_WaitForCompact(); - if (NumTableFilesAtLevel(0, 0) == 0) { + if (NumTableFilesAtLevel(0, 1) == 0) { // No Level 0 file. Create one. - ASSERT_OK(Put(Key(0), "val")); - ASSERT_OK(Put(Key(numkeys), "val")); - ASSERT_OK(Flush(0)); + ASSERT_OK(Put(1, Key(0), "val")); + ASSERT_OK(Put(1, Key(numkeys), "val")); + ASSERT_OK(Flush(1)); dbfull()->TEST_WaitForCompact(); } for (int i = 1; i < numkeys; i += 2) { - ASSERT_EQ(Get(Key(i)), "NOT_FOUND"); + ASSERT_EQ(Get(1, Key(i)), "NOT_FOUND"); } ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L0)); @@ -989,7 +988,7 @@ TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) { ASSERT_LT(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 120000 * 2); for (int i = 0; i < numkeys; i += 2) { - ASSERT_EQ(Get(Key(i)), "val"); + ASSERT_EQ(Get(1, Key(i)), "val"); } // Part 2 (read path): rewrite last level with blooms, then verify they get @@ -1001,13 +1000,13 @@ TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) { bbto.block_cache.reset(); options.table_factory.reset(NewBlockBasedTableFactory(bbto)); - Reopen(options); - MoveFilesToLevel(7 /* level */, 0 /* column family index */); + ReopenWithColumnFamilies({"default", "mypikachu"}, options); + MoveFilesToLevel(7 /* level */, 1 /* column family index */); - std::string value = Get(Key(0)); + std::string value = Get(1, Key(0)); uint64_t prev_cache_filter_hits = TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT); - value = Get(Key(0)); + value = Get(1, Key(0)); ASSERT_EQ(prev_cache_filter_hits + 1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); @@ -1018,9 +1017,9 @@ TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) { bbto.block_cache.reset(); options.table_factory.reset(NewBlockBasedTableFactory(bbto)); - Reopen(options); + ReopenWithColumnFamilies({"default", "mypikachu"}, options); - value = Get(Key(0)); + value = Get(1, Key(0)); ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); ASSERT_EQ(2 /* index and data block */, @@ -1032,12 +1031,12 @@ TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) { bbto.block_cache.reset(); options.table_factory.reset(NewBlockBasedTableFactory(bbto)); - Reopen(options); + ReopenWithColumnFamilies({"default", "mypikachu"}, options); uint64_t prev_cache_filter_misses = TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS); prev_cache_filter_hits = TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT); - Get(Key(0)); + Get(1, Key(0)); ASSERT_EQ(prev_cache_filter_misses, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); ASSERT_EQ(prev_cache_filter_hits, @@ -1049,10 +1048,10 @@ TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) { options.statistics = CreateDBStatistics(); options.table_factory.reset(NewBlockBasedTableFactory(bbto)); - Reopen(options); + ReopenWithColumnFamilies({"default", "mypikachu"}, options); - ASSERT_OK(Put(Key(numkeys + 1), "val")); - ASSERT_OK(Flush(0)); + ASSERT_OK(Put(1, Key(numkeys + 1), "val")); + ASSERT_OK(Flush(1)); int32_t trivial_move = 0; int32_t non_trivial_move = 0; @@ -1069,7 +1068,7 @@ TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) { BottommostLevelCompaction::kSkip; compact_options.change_level = true; compact_options.target_level = 7; - db_->CompactRange(compact_options, nullptr, nullptr); + db_->CompactRange(compact_options, handles_[1], nullptr, nullptr); ASSERT_EQ(trivial_move, 1); ASSERT_EQ(non_trivial_move, 0); @@ -1077,7 +1076,7 @@ TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) { prev_cache_filter_hits = TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT); prev_cache_filter_misses = TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS); - value = Get(Key(numkeys + 1)); + value = Get(1, Key(numkeys + 1)); ASSERT_EQ(prev_cache_filter_hits, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); ASSERT_EQ(prev_cache_filter_misses, @@ -1088,9 +1087,9 @@ TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) { options.statistics = CreateDBStatistics(); options.table_factory.reset(NewBlockBasedTableFactory(bbto)); - Reopen(options); + ReopenWithColumnFamilies({"default", "mypikachu"}, options); - std::unique_ptr iter(db_->NewIterator(ReadOptions())); + std::unique_ptr iter(db_->NewIterator(ReadOptions(), handles_[1])); iter->SeekToFirst(); ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); diff --git a/db/db_compaction_test.cc b/db/db_compaction_test.cc index eef4a7a3b33..6458519d086 100644 --- a/db/db_compaction_test.cc +++ b/db/db_compaction_test.cc @@ -499,7 +499,7 @@ TEST_F(DBCompactionTest, DisableStatsUpdateReopen) { std::vector values; for (int k = 0; k < kTestSize; ++k) { values.push_back(RandomString(&rnd, kCDTValueSize)); - ASSERT_OK(Put(Key(k), values[k], WriteOptions(), false)); + ASSERT_OK(Put(Key(k), values[k])); } dbfull()->TEST_WaitForFlushMemTable(); dbfull()->TEST_WaitForCompact(); @@ -716,21 +716,21 @@ TEST_F(DBCompactionTest, MinorCompactionsHappen) { const int N = 500; - int starting_num_tables = TotalTableFiles(0); + int starting_num_tables = TotalTableFiles(1); for (int i = 0; i < N; i++) { - ASSERT_OK(Put(Key(i), Key(i) + std::string(1000, 'v'))); + ASSERT_OK(Put(1, Key(i), Key(i) + std::string(1000, 'v'))); } - int ending_num_tables = TotalTableFiles(0); + int ending_num_tables = TotalTableFiles(1); ASSERT_GT(ending_num_tables, starting_num_tables); for (int i = 0; i < N; i++) { - ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i))); + ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(1, Key(i))); } ReopenWithColumnFamilies({"default", "pikachu"}, options); for (int i = 0; i < N; i++) { - ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i))); + ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(1, Key(i))); } } while (ChangeCompactOptions()); } @@ -909,7 +909,7 @@ TEST_F(DBCompactionTest, RecoverDuringMemtableCompaction) { ASSERT_EQ("v2", Get(1, "bar")); ASSERT_EQ(std::string(10000000, 'x'), Get(1, "big1")); ASSERT_EQ(std::string(1000, 'y'), Get(1, "big2")); - } while (ChangeOptions(kSkipPipelinedWrite)); + } while (ChangeOptions()); } TEST_P(DBCompactionTestWithParam, TrivialMoveOneFile) { @@ -1887,15 +1887,15 @@ TEST_P(DBCompactionTestWithParam, ConvertCompactionStyle) { for (int i = 0; i <= max_key_level_insert; i++) { // each value is 10K - ASSERT_OK(Put(Key(i), RandomString(&rnd, 10000))); + ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000))); } - ASSERT_OK(Flush()); + ASSERT_OK(Flush(1)); dbfull()->TEST_WaitForCompact(); - ASSERT_GT(TotalTableFiles(0, 4), 1); + ASSERT_GT(TotalTableFiles(1, 4), 1); int non_level0_num_files = 0; for (int i = 1; i < options.num_levels; i++) { - non_level0_num_files += NumTableFilesAtLevel(i, 0); + non_level0_num_files += NumTableFilesAtLevel(i, 1); } ASSERT_GT(non_level0_num_files, 0); @@ -1924,10 +1924,10 @@ TEST_P(DBCompactionTestWithParam, ConvertCompactionStyle) { compact_options.bottommost_level_compaction = BottommostLevelCompaction::kForce; compact_options.exclusive_manual_compaction = exclusive_manual_compaction_; - dbfull()->CompactRange(compact_options, handles_[0], nullptr, nullptr); + dbfull()->CompactRange(compact_options, handles_[1], nullptr, nullptr); // Only 1 file in L0 - ASSERT_EQ("1", FilesPerLevel(0)); + ASSERT_EQ("1", FilesPerLevel(1)); // Stage 4: re-open in universal compaction style and do some db operations options = CurrentOptions(); @@ -1943,20 +1943,20 @@ TEST_P(DBCompactionTestWithParam, ConvertCompactionStyle) { ReopenWithColumnFamilies({"default", "pikachu"}, options); for (int i = max_key_level_insert / 2; i <= max_key_universal_insert; i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, 10000))); + ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000))); } dbfull()->Flush(FlushOptions()); ASSERT_OK(Flush(1)); dbfull()->TEST_WaitForCompact(); for (int i = 1; i < options.num_levels; i++) { - ASSERT_EQ(NumTableFilesAtLevel(i, 0), 0); + ASSERT_EQ(NumTableFilesAtLevel(i, 1), 0); } // verify keys inserted in both level compaction style and universal // compaction style std::string keys_in_db; - Iterator* iter = dbfull()->NewIterator(ReadOptions(), handles_[0]); + Iterator* iter = dbfull()->NewIterator(ReadOptions(), handles_[1]); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { keys_in_db.append(iter->key().ToString()); keys_in_db.push_back(','); @@ -1975,19 +1975,19 @@ TEST_P(DBCompactionTestWithParam, ConvertCompactionStyle) { TEST_F(DBCompactionTest, L0_CompactionBug_Issue44_a) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); - ASSERT_OK(Put("b", "v")); + ASSERT_OK(Put(1, "b", "v")); ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); - ASSERT_OK(Delete("b")); - ASSERT_OK(Delete("a")); + ASSERT_OK(Delete(1, "b")); + ASSERT_OK(Delete(1, "a")); ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); - ASSERT_OK(Delete("a")); + ASSERT_OK(Delete(1, "a")); ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); - ASSERT_OK(Put("a", "v")); + ASSERT_OK(Put(1, "a", "v")); ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); - ASSERT_EQ("(a->v)", Contents()); + ASSERT_EQ("(a->v)", Contents(1)); env_->SleepForMicroseconds(1000000); // Wait for compaction to finish - ASSERT_EQ("(a->v)", Contents()); + ASSERT_EQ("(a->v)", Contents(1)); } while (ChangeCompactOptions()); } diff --git a/db/db_flush_test.cc b/db/db_flush_test.cc index cdf8f108202..107e82467cb 100644 --- a/db/db_flush_test.cc +++ b/db/db_flush_test.cc @@ -68,7 +68,7 @@ TEST_F(DBFlushTest, SyncFail) { SyncPoint::GetInstance()->EnableProcessing(); Reopen(options); - Put("key", "value", WriteOptions(), false); + Put("key", "value"); auto* cfd = reinterpret_cast(db_->DefaultColumnFamily()) ->cfd(); diff --git a/db/db_inplace_update_test.cc b/db/db_inplace_update_test.cc index e91e30222e9..c1f1b51e301 100644 --- a/db/db_inplace_update_test.cc +++ b/db/db_inplace_update_test.cc @@ -25,18 +25,18 @@ TEST_F(DBTestInPlaceUpdate, InPlaceUpdate) { options.write_buffer_size = 100000; options.allow_concurrent_memtable_write = false; Reopen(options); - //CreateAndReopenWithCF({"pikachu"}, options); + CreateAndReopenWithCF({"pikachu"}, options); // Update key with values of smaller size int numValues = 10; for (int i = numValues; i > 0; i--) { std::string value = DummyString(i, 'a'); - ASSERT_OK(Put("key", value)); - ASSERT_EQ(value, Get("key")); + ASSERT_OK(Put(1, "key", value)); + ASSERT_EQ(value, Get(1, "key")); } // Only 1 instance for that key. - validateNumberOfEntries(1, 0); + validateNumberOfEntries(1, 1); } while (ChangeCompactOptions()); } @@ -49,18 +49,18 @@ TEST_F(DBTestInPlaceUpdate, InPlaceUpdateLargeNewValue) { options.write_buffer_size = 100000; options.allow_concurrent_memtable_write = false; Reopen(options); - //CreateAndReopenWithCF({"pikachu"}, options); + CreateAndReopenWithCF({"pikachu"}, options); // Update key with values of larger size int numValues = 10; for (int i = 0; i < numValues; i++) { std::string value = DummyString(i, 'a'); - ASSERT_OK(Put("key", value)); - ASSERT_EQ(value, Get("key")); + ASSERT_OK(Put(1, "key", value)); + ASSERT_EQ(value, Get(1, "key")); } // All 10 updates exist in the internal iterator - validateNumberOfEntries(numValues, 0); + validateNumberOfEntries(numValues, 1); } while (ChangeCompactOptions()); } @@ -76,20 +76,20 @@ TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackSmallerSize) { rocksdb::DBTestInPlaceUpdate::updateInPlaceSmallerSize; options.allow_concurrent_memtable_write = false; Reopen(options); - //CreateAndReopenWithCF({"pikachu"}, options); + CreateAndReopenWithCF({"pikachu"}, options); // Update key with values of smaller size int numValues = 10; - ASSERT_OK(Put("key", DummyString(numValues, 'a'))); - ASSERT_EQ(DummyString(numValues, 'c'), Get("key")); + ASSERT_OK(Put(1, "key", DummyString(numValues, 'a'))); + ASSERT_EQ(DummyString(numValues, 'c'), Get(1, "key")); for (int i = numValues; i > 0; i--) { - ASSERT_OK(Put("key", DummyString(i, 'a'))); - ASSERT_EQ(DummyString(i - 1, 'b'), Get("key")); + ASSERT_OK(Put(1, "key", DummyString(i, 'a'))); + ASSERT_EQ(DummyString(i - 1, 'b'), Get(1, "key")); } // Only 1 instance for that key. - validateNumberOfEntries(1, 0); + validateNumberOfEntries(1, 1); } while (ChangeCompactOptions()); } @@ -105,20 +105,20 @@ TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackSmallerVarintSize) { rocksdb::DBTestInPlaceUpdate::updateInPlaceSmallerVarintSize; options.allow_concurrent_memtable_write = false; Reopen(options); - //CreateAndReopenWithCF({"pikachu"}, options); + CreateAndReopenWithCF({"pikachu"}, options); // Update key with values of smaller varint size int numValues = 265; - ASSERT_OK(Put("key", DummyString(numValues, 'a'))); - ASSERT_EQ(DummyString(numValues, 'c'), Get("key")); + ASSERT_OK(Put(1, "key", DummyString(numValues, 'a'))); + ASSERT_EQ(DummyString(numValues, 'c'), Get(1, "key")); for (int i = numValues; i > 0; i--) { - ASSERT_OK(Put("key", DummyString(i, 'a'))); - ASSERT_EQ(DummyString(1, 'b'), Get("key")); + ASSERT_OK(Put(1, "key", DummyString(i, 'a'))); + ASSERT_EQ(DummyString(1, 'b'), Get(1, "key")); } // Only 1 instance for that key. - validateNumberOfEntries(1, 0); + validateNumberOfEntries(1, 1); } while (ChangeCompactOptions()); } @@ -134,18 +134,18 @@ TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackLargeNewValue) { rocksdb::DBTestInPlaceUpdate::updateInPlaceLargerSize; options.allow_concurrent_memtable_write = false; Reopen(options); - //CreateAndReopenWithCF({"pikachu"}, options); + CreateAndReopenWithCF({"pikachu"}, options); // Update key with values of larger size int numValues = 10; for (int i = 0; i < numValues; i++) { - ASSERT_OK(Put("key", DummyString(i, 'a'))); - ASSERT_EQ(DummyString(i, 'c'), Get("key")); + ASSERT_OK(Put(1, "key", DummyString(i, 'a'))); + ASSERT_EQ(DummyString(i, 'c'), Get(1, "key")); } // No inplace updates. All updates are puts with new seq number // All 10 updates exist in the internal iterator - validateNumberOfEntries(numValues, 0); + validateNumberOfEntries(numValues, 1); } while (ChangeCompactOptions()); } @@ -161,11 +161,11 @@ TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackNoAction) { rocksdb::DBTestInPlaceUpdate::updateInPlaceNoAction; options.allow_concurrent_memtable_write = false; Reopen(options); - //CreateAndReopenWithCF({"pikachu"}, options); + CreateAndReopenWithCF({"pikachu"}, options); // Callback function requests no actions from db - ASSERT_OK(Put("key", DummyString(1, 'a'))); - ASSERT_EQ(Get("key"), "NOT_FOUND"); + ASSERT_OK(Put(1, "key", DummyString(1, 'a'))); + ASSERT_EQ(Get(1, "key"), "NOT_FOUND"); } while (ChangeCompactOptions()); } } // namespace rocksdb diff --git a/db/db_io_failure_test.cc b/db/db_io_failure_test.cc index 5abfa044e94..9f4dcc5d056 100644 --- a/db/db_io_failure_test.cc +++ b/db/db_io_failure_test.cc @@ -333,21 +333,21 @@ TEST_F(DBIOFailureTest, CompactSstRangeSyncError) { RandomString(&rnd, static_cast(options.bytes_per_sync / 2)); std::string rnd_str_512kb = RandomString(&rnd, 512 * 1024); - ASSERT_OK(Put("foo", "bar")); + ASSERT_OK(Put(1, "foo", "bar")); // First 1MB doesn't get range synced - ASSERT_OK(Put("foo0_0", rnd_str_512kb)); - ASSERT_OK(Put("foo0_1", rnd_str_512kb)); - ASSERT_OK(Put("foo1_1", rnd_str)); - ASSERT_OK(Put("foo1_2", rnd_str)); - ASSERT_OK(Put("foo1_3", rnd_str)); - Flush(0); - ASSERT_OK(Put("foo", "bar")); - ASSERT_OK(Put("foo3_1", rnd_str)); - ASSERT_OK(Put("foo3_2", rnd_str)); - ASSERT_OK(Put("foo3_3", rnd_str)); - ASSERT_OK(Put("foo4", "bar")); - Flush(0); - dbfull()->TEST_WaitForFlushMemTable(handles_[0]); + ASSERT_OK(Put(1, "foo0_0", rnd_str_512kb)); + ASSERT_OK(Put(1, "foo0_1", rnd_str_512kb)); + ASSERT_OK(Put(1, "foo1_1", rnd_str)); + ASSERT_OK(Put(1, "foo1_2", rnd_str)); + ASSERT_OK(Put(1, "foo1_3", rnd_str)); + Flush(1); + ASSERT_OK(Put(1, "foo", "bar")); + ASSERT_OK(Put(1, "foo3_1", rnd_str)); + ASSERT_OK(Put(1, "foo3_2", rnd_str)); + ASSERT_OK(Put(1, "foo3_3", rnd_str)); + ASSERT_OK(Put(1, "foo4", "bar")); + Flush(1); + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); std::atomic range_sync_called(0); rocksdb::SyncPoint::GetInstance()->SetCallBack( @@ -359,21 +359,21 @@ TEST_F(DBIOFailureTest, CompactSstRangeSyncError) { }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); - ASSERT_OK(dbfull()->SetOptions(handles_[0], + ASSERT_OK(dbfull()->SetOptions(handles_[1], { {"disable_auto_compactions", "false"}, })); dbfull()->TEST_WaitForCompact(); // Following writes should fail as flush failed. - ASSERT_NOK(Put("foo2", "bar3")); - ASSERT_EQ("bar", Get("foo")); + ASSERT_NOK(Put(1, "foo2", "bar3")); + ASSERT_EQ("bar", Get(1, "foo")); rocksdb::SyncPoint::GetInstance()->DisableProcessing(); ASSERT_GE(1, range_sync_called.load()); ReopenWithColumnFamilies({"default", "pikachu"}, options); - ASSERT_EQ("bar", Get("foo")); + ASSERT_EQ("bar", Get(1, "foo")); } TEST_F(DBIOFailureTest, FlushSstCloseError) { @@ -429,15 +429,15 @@ TEST_F(DBIOFailureTest, CompactionSstCloseError) { CreateAndReopenWithCF({"pikachu"}, options); Status s; - ASSERT_OK(Put("foo", "bar")); - ASSERT_OK(Put("foo2", "bar")); - Flush(0); - ASSERT_OK(Put("foo", "bar2")); - ASSERT_OK(Put("foo2", "bar")); - Flush(0); - ASSERT_OK(Put("foo", "bar3")); - ASSERT_OK(Put("foo2", "bar")); - Flush(0); + ASSERT_OK(Put(1, "foo", "bar")); + ASSERT_OK(Put(1, "foo2", "bar")); + Flush(1); + ASSERT_OK(Put(1, "foo", "bar2")); + ASSERT_OK(Put(1, "foo2", "bar")); + Flush(1); + ASSERT_OK(Put(1, "foo", "bar3")); + ASSERT_OK(Put(1, "foo2", "bar")); + Flush(1); dbfull()->TEST_WaitForCompact(); std::atomic close_called(0); @@ -450,20 +450,20 @@ TEST_F(DBIOFailureTest, CompactionSstCloseError) { }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); - ASSERT_OK(dbfull()->SetOptions(handles_[0], + ASSERT_OK(dbfull()->SetOptions(handles_[1], { {"disable_auto_compactions", "false"}, })); dbfull()->TEST_WaitForCompact(); // Following writes should fail as compaction failed. - ASSERT_NOK(Put("foo2", "bar3")); - ASSERT_EQ("bar3", Get("foo")); + ASSERT_NOK(Put(1, "foo2", "bar3")); + ASSERT_EQ("bar3", Get(1, "foo")); rocksdb::SyncPoint::GetInstance()->DisableProcessing(); ReopenWithColumnFamilies({"default", "pikachu"}, options); - ASSERT_EQ("bar3", Get("foo")); + ASSERT_EQ("bar3", Get(1, "foo")); } TEST_F(DBIOFailureTest, FlushSstSyncError) { @@ -521,15 +521,15 @@ TEST_F(DBIOFailureTest, CompactionSstSyncError) { CreateAndReopenWithCF({"pikachu"}, options); Status s; - ASSERT_OK(Put("foo", "bar")); - ASSERT_OK(Put("foo2", "bar")); - Flush(); - ASSERT_OK(Put("foo", "bar2")); - ASSERT_OK(Put("foo2", "bar")); - Flush(); - ASSERT_OK(Put("foo", "bar3")); - ASSERT_OK(Put("foo2", "bar")); - Flush(); + ASSERT_OK(Put(1, "foo", "bar")); + ASSERT_OK(Put(1, "foo2", "bar")); + Flush(1); + ASSERT_OK(Put(1, "foo", "bar2")); + ASSERT_OK(Put(1, "foo2", "bar")); + Flush(1); + ASSERT_OK(Put(1, "foo", "bar3")); + ASSERT_OK(Put(1, "foo2", "bar")); + Flush(1); dbfull()->TEST_WaitForCompact(); std::atomic sync_called(0); @@ -542,20 +542,20 @@ TEST_F(DBIOFailureTest, CompactionSstSyncError) { }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); - ASSERT_OK(dbfull()->SetOptions(handles_[0], + ASSERT_OK(dbfull()->SetOptions(handles_[1], { {"disable_auto_compactions", "false"}, })); dbfull()->TEST_WaitForCompact(); // Following writes should fail as compaction failed. - ASSERT_NOK(Put("foo2", "bar3")); - ASSERT_EQ("bar3", Get("foo")); + ASSERT_NOK(Put(1, "foo2", "bar3")); + ASSERT_EQ("bar3", Get(1, "foo")); rocksdb::SyncPoint::GetInstance()->DisableProcessing(); ReopenWithColumnFamilies({"default", "pikachu"}, options); - ASSERT_EQ("bar3", Get("foo")); + ASSERT_EQ("bar3", Get(1, "foo")); } #endif // !(defined NDEBUG) || !defined(OS_WIN) #endif // ROCKSDB_LITE diff --git a/db/db_iterator_test.cc b/db/db_iterator_test.cc index f0dbdf1c495..d62862a953e 100644 --- a/db/db_iterator_test.cc +++ b/db/db_iterator_test.cc @@ -53,13 +53,12 @@ class FlushBlockEveryKeyPolicyFactory : public FlushBlockPolicyFactory { TEST_F(DBIteratorTest, IteratorProperty) { // The test needs to be changed if kPersistedTier is supported in iterator. Options options = CurrentOptions(); - Reopen(options); - //CreateAndReopenWithCF({"pikachu"}, options); - Put("1", "2"); + CreateAndReopenWithCF({"pikachu"}, options); + Put(1, "1", "2"); ReadOptions ropt; ropt.pin_data = false; { - unique_ptr iter(db_->NewIterator(ropt)); + unique_ptr iter(db_->NewIterator(ropt, handles_[1])); iter->SeekToFirst(); std::string prop_value; ASSERT_NOK(iter->GetProperty("non_existing.value", &prop_value)); @@ -94,14 +93,13 @@ TEST_F(DBIteratorTest, NonBlockingIteration) { Options options = CurrentOptions(); options.statistics = rocksdb::CreateDBStatistics(); non_blocking_opts.read_tier = kBlockCacheTier; - Reopen(options); - //CreateAndReopenWithCF({"pikachu"}, options); + CreateAndReopenWithCF({"pikachu"}, options); // write one kv to the database. - ASSERT_OK(Put("a", "b")); + ASSERT_OK(Put(1, "a", "b")); // scan using non-blocking iterator. We should find it because // it is in memtable. - Iterator* iter = db_->NewIterator(non_blocking_opts); + Iterator* iter = db_->NewIterator(non_blocking_opts, handles_[1]); int count = 0; for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ASSERT_OK(iter->status()); @@ -112,13 +110,13 @@ TEST_F(DBIteratorTest, NonBlockingIteration) { // flush memtable to storage. Now, the key should not be in the // memtable neither in the block cache. - ASSERT_OK(Flush()); + ASSERT_OK(Flush(1)); // verify that a non-blocking iterator does not find any // kvs. Neither does it do any IOs to storage. uint64_t numopen = TestGetTickerCount(options, NO_FILE_OPENS); uint64_t cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD); - iter = db_->NewIterator(non_blocking_opts); + iter = db_->NewIterator(non_blocking_opts, handles_[1]); count = 0; for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { count++; @@ -130,12 +128,12 @@ TEST_F(DBIteratorTest, NonBlockingIteration) { delete iter; // read in the specified block via a regular get - ASSERT_EQ(Get("a"), "b"); + ASSERT_EQ(Get(1, "a"), "b"); // verify that we can find it via a non-blocking scan numopen = TestGetTickerCount(options, NO_FILE_OPENS); cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD); - iter = db_->NewIterator(non_blocking_opts); + iter = db_->NewIterator(non_blocking_opts, handles_[1]); count = 0; for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ASSERT_OK(iter->status()); @@ -150,7 +148,7 @@ TEST_F(DBIteratorTest, NonBlockingIteration) { // table format. // Exclude kHashCuckoo as it does not support iteration currently } while (ChangeOptions(kSkipPlainTable | kSkipNoSeekToLast | kSkipHashCuckoo | - kSkipMmapReads | kSkipPipelinedWrite)); + kSkipMmapReads)); } #ifndef ROCKSDB_LITE @@ -161,14 +159,13 @@ TEST_F(DBIteratorTest, ManagedNonBlockingIteration) { options.statistics = rocksdb::CreateDBStatistics(); non_blocking_opts.read_tier = kBlockCacheTier; non_blocking_opts.managed = true; - Reopen(options); - //CreateAndReopenWithCF({"pikachu"}, options); + CreateAndReopenWithCF({"pikachu"}, options); // write one kv to the database. - ASSERT_OK(Put("a", "b")); + ASSERT_OK(Put(1, "a", "b")); // scan using non-blocking iterator. We should find it because // it is in memtable. - Iterator* iter = db_->NewIterator(non_blocking_opts); + Iterator* iter = db_->NewIterator(non_blocking_opts, handles_[1]); int count = 0; for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ASSERT_OK(iter->status()); @@ -179,13 +176,13 @@ TEST_F(DBIteratorTest, ManagedNonBlockingIteration) { // flush memtable to storage. Now, the key should not be in the // memtable neither in the block cache. - ASSERT_OK(Flush()); + ASSERT_OK(Flush(1)); // verify that a non-blocking iterator does not find any // kvs. Neither does it do any IOs to storage. int64_t numopen = TestGetTickerCount(options, NO_FILE_OPENS); int64_t cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD); - iter = db_->NewIterator(non_blocking_opts); + iter = db_->NewIterator(non_blocking_opts, handles_[1]); count = 0; for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { count++; @@ -197,12 +194,12 @@ TEST_F(DBIteratorTest, ManagedNonBlockingIteration) { delete iter; // read in the specified block via a regular get - ASSERT_EQ(Get("a"), "b"); + ASSERT_EQ(Get(1, "a"), "b"); // verify that we can find it via a non-blocking scan numopen = TestGetTickerCount(options, NO_FILE_OPENS); cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD); - iter = db_->NewIterator(non_blocking_opts); + iter = db_->NewIterator(non_blocking_opts, handles_[1]); count = 0; for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ASSERT_OK(iter->status()); @@ -217,7 +214,7 @@ TEST_F(DBIteratorTest, ManagedNonBlockingIteration) { // table format. // Exclude kHashCuckoo as it does not support iteration currently } while (ChangeOptions(kSkipPlainTable | kSkipNoSeekToLast | kSkipHashCuckoo | - kSkipMmapReads | kSkipPipelinedWrite)); + kSkipMmapReads)); } #endif // ROCKSDB_LITE @@ -408,10 +405,9 @@ TEST_F(DBIteratorTest, IterEmpty) { TEST_F(DBIteratorTest, IterSingle) { do { - Reopen(CurrentOptions()); - //CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); - ASSERT_OK(Put("a", "va")); - Iterator* iter = db_->NewIterator(ReadOptions()); + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "a", "va")); + Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]); iter->SeekToFirst(); ASSERT_EQ(IterStatus(iter), "a->va"); @@ -460,12 +456,11 @@ TEST_F(DBIteratorTest, IterSingle) { TEST_F(DBIteratorTest, IterMulti) { do { - Reopen(CurrentOptions()); - //CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); - ASSERT_OK(Put("a", "va")); - ASSERT_OK(Put("b", "vb")); - ASSERT_OK(Put("c", "vc")); - Iterator* iter = db_->NewIterator(ReadOptions()); + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "a", "va")); + ASSERT_OK(Put(1, "b", "vb")); + ASSERT_OK(Put(1, "c", "vc")); + Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]); iter->SeekToFirst(); ASSERT_EQ(IterStatus(iter), "a->va"); @@ -530,11 +525,11 @@ TEST_F(DBIteratorTest, IterMulti) { ASSERT_EQ(IterStatus(iter), "b->vb"); // Make sure iter stays at snapshot - ASSERT_OK(Put("a", "va2")); - ASSERT_OK(Put("a2", "va3")); - ASSERT_OK(Put("b", "vb2")); - ASSERT_OK(Put("c", "vc2")); - ASSERT_OK(Delete("b")); + ASSERT_OK(Put(1, "a", "va2")); + ASSERT_OK(Put(1, "a2", "va3")); + ASSERT_OK(Put(1, "b", "vb2")); + ASSERT_OK(Put(1, "c", "vc2")); + ASSERT_OK(Delete(1, "b")); iter->SeekToFirst(); ASSERT_EQ(IterStatus(iter), "a->va"); iter->Next(); @@ -566,17 +561,16 @@ TEST_F(DBIteratorTest, IterReseek) { options.create_if_missing = true; options.statistics = rocksdb::CreateDBStatistics(); DestroyAndReopen(options); - Reopen(options); -// CreateAndReopenWithCF({"pikachu"}, options); + CreateAndReopenWithCF({"pikachu"}, options); // insert three keys with same userkey and verify that // reseek is not invoked. For each of these test cases, // verify that we can find the next key "b". - ASSERT_OK(Put("a", "zero")); - ASSERT_OK(Put("a", "one")); - ASSERT_OK(Put("a", "two")); - ASSERT_OK(Put("b", "bone")); - Iterator* iter = db_->NewIterator(ReadOptions()); + ASSERT_OK(Put(1, "a", "zero")); + ASSERT_OK(Put(1, "a", "one")); + ASSERT_OK(Put(1, "a", "two")); + ASSERT_OK(Put(1, "b", "bone")); + Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]); iter->SeekToFirst(); ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 0); ASSERT_EQ(IterStatus(iter), "a->two"); @@ -587,8 +581,8 @@ TEST_F(DBIteratorTest, IterReseek) { // insert a total of three keys with same userkey and verify // that reseek is still not invoked. - ASSERT_OK(Put("a", "three")); - iter = db_->NewIterator(ReadOptions()); + ASSERT_OK(Put(1, "a", "three")); + iter = db_->NewIterator(ReadOptions(), handles_[1]); iter->SeekToFirst(); ASSERT_EQ(IterStatus(iter), "a->three"); iter->Next(); @@ -598,8 +592,8 @@ TEST_F(DBIteratorTest, IterReseek) { // insert a total of four keys with same userkey and verify // that reseek is invoked. - ASSERT_OK(Put("a", "four")); - iter = db_->NewIterator(ReadOptions()); + ASSERT_OK(Put(1, "a", "four")); + iter = db_->NewIterator(ReadOptions(), handles_[1]); iter->SeekToFirst(); ASSERT_EQ(IterStatus(iter), "a->four"); ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 0); @@ -615,8 +609,8 @@ TEST_F(DBIteratorTest, IterReseek) { TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION)); // Insert another version of b and assert that reseek is not invoked - ASSERT_OK(Put("b", "btwo")); - iter = db_->NewIterator(ReadOptions()); + ASSERT_OK(Put(1, "b", "btwo")); + iter = db_->NewIterator(ReadOptions(), handles_[1]); iter->SeekToLast(); ASSERT_EQ(IterStatus(iter), "b->btwo"); ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), @@ -629,9 +623,9 @@ TEST_F(DBIteratorTest, IterReseek) { // insert two more versions of b. This makes a total of 4 versions // of b and 4 versions of a. - ASSERT_OK(Put("b", "bthree")); - ASSERT_OK(Put("b", "bfour")); - iter = db_->NewIterator(ReadOptions()); + ASSERT_OK(Put(1, "b", "bthree")); + ASSERT_OK(Put(1, "b", "bfour")); + iter = db_->NewIterator(ReadOptions(), handles_[1]); iter->SeekToLast(); ASSERT_EQ(IterStatus(iter), "b->bfour"); ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), @@ -647,15 +641,14 @@ TEST_F(DBIteratorTest, IterReseek) { TEST_F(DBIteratorTest, IterSmallAndLargeMix) { do { - Reopen(CurrentOptions()); -// CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); - ASSERT_OK(Put("a", "va")); - ASSERT_OK(Put("b", std::string(100000, 'b'))); - ASSERT_OK(Put("c", "vc")); - ASSERT_OK(Put("d", std::string(100000, 'd'))); - ASSERT_OK(Put("e", std::string(100000, 'e'))); + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "a", "va")); + ASSERT_OK(Put(1, "b", std::string(100000, 'b'))); + ASSERT_OK(Put(1, "c", "vc")); + ASSERT_OK(Put(1, "d", std::string(100000, 'd'))); + ASSERT_OK(Put(1, "e", std::string(100000, 'e'))); - Iterator* iter = db_->NewIterator(ReadOptions()); + Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]); iter->SeekToFirst(); ASSERT_EQ(IterStatus(iter), "a->va"); @@ -689,15 +682,14 @@ TEST_F(DBIteratorTest, IterSmallAndLargeMix) { TEST_F(DBIteratorTest, IterMultiWithDelete) { do { - Reopen(CurrentOptions()); - //CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); - ASSERT_OK(Put("ka", "va")); - ASSERT_OK(Put("kb", "vb")); - ASSERT_OK(Put("kc", "vc")); - ASSERT_OK(Delete("kb")); - ASSERT_EQ("NOT_FOUND", Get("kb")); + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "ka", "va")); + ASSERT_OK(Put(1, "kb", "vb")); + ASSERT_OK(Put(1, "kc", "vc")); + ASSERT_OK(Delete(1, "kb")); + ASSERT_EQ("NOT_FOUND", Get(1, "kb")); - Iterator* iter = db_->NewIterator(ReadOptions()); + Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]); iter->Seek("kc"); ASSERT_EQ(IterStatus(iter), "kc->vc"); if (!CurrentOptions().merge_operator) { @@ -711,61 +703,59 @@ TEST_F(DBIteratorTest, IterMultiWithDelete) { } } delete iter; - } while (ChangeOptions(kSkipPipelinedWrite)); + } while (ChangeOptions()); } TEST_F(DBIteratorTest, IterPrevMaxSkip) { do { - Reopen(CurrentOptions()); - //CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); for (int i = 0; i < 2; i++) { - ASSERT_OK(Put("key1", "v1")); - ASSERT_OK(Put("key2", "v2")); - ASSERT_OK(Put("key3", "v3")); - ASSERT_OK(Put("key4", "v4")); - ASSERT_OK(Put("key5", "v5")); + ASSERT_OK(Put(1, "key1", "v1")); + ASSERT_OK(Put(1, "key2", "v2")); + ASSERT_OK(Put(1, "key3", "v3")); + ASSERT_OK(Put(1, "key4", "v4")); + ASSERT_OK(Put(1, "key5", "v5")); } - VerifyIterLast("key5->v5"); + VerifyIterLast("key5->v5", 1); - ASSERT_OK(Delete("key5")); - VerifyIterLast("key4->v4"); + ASSERT_OK(Delete(1, "key5")); + VerifyIterLast("key4->v4", 1); - ASSERT_OK(Delete("key4")); - VerifyIterLast("key3->v3"); + ASSERT_OK(Delete(1, "key4")); + VerifyIterLast("key3->v3", 1); - ASSERT_OK(Delete("key3")); - VerifyIterLast("key2->v2"); + ASSERT_OK(Delete(1, "key3")); + VerifyIterLast("key2->v2", 1); - ASSERT_OK(Delete("key2")); - VerifyIterLast("key1->v1"); + ASSERT_OK(Delete(1, "key2")); + VerifyIterLast("key1->v1", 1); - ASSERT_OK(Delete("key1")); - VerifyIterLast("(invalid)"); - } while (ChangeOptions(kSkipMergePut | kSkipNoSeekToLast | kSkipPipelinedWrite)); + ASSERT_OK(Delete(1, "key1")); + VerifyIterLast("(invalid)", 1); + } while (ChangeOptions(kSkipMergePut | kSkipNoSeekToLast)); } TEST_F(DBIteratorTest, IterWithSnapshot) { anon::OptionsOverride options_override; options_override.skip_policy = kSkipNoSnapshot; do { - Reopen(CurrentOptions()); - //CreateAndReopenWithCF({"pikachu"}, CurrentOptions(options_override)); - ASSERT_OK(Put("key1", "val1")); - ASSERT_OK(Put("key2", "val2")); - ASSERT_OK(Put("key3", "val3")); - ASSERT_OK(Put("key4", "val4")); - ASSERT_OK(Put("key5", "val5")); + CreateAndReopenWithCF({"pikachu"}, CurrentOptions(options_override)); + ASSERT_OK(Put(1, "key1", "val1")); + ASSERT_OK(Put(1, "key2", "val2")); + ASSERT_OK(Put(1, "key3", "val3")); + ASSERT_OK(Put(1, "key4", "val4")); + ASSERT_OK(Put(1, "key5", "val5")); const Snapshot* snapshot = db_->GetSnapshot(); ReadOptions options; options.snapshot = snapshot; - Iterator* iter = db_->NewIterator(options); + Iterator* iter = db_->NewIterator(options, handles_[1]); - ASSERT_OK(Put("key0", "val0")); + ASSERT_OK(Put(1, "key0", "val0")); // Put more values after the snapshot - ASSERT_OK(Put("key100", "val100")); - ASSERT_OK(Put("key101", "val101")); + ASSERT_OK(Put(1, "key100", "val100")); + ASSERT_OK(Put(1, "key101", "val101")); iter->Seek("key5"); ASSERT_EQ(IterStatus(iter), "key5->val5"); @@ -810,25 +800,24 @@ TEST_F(DBIteratorTest, IterWithSnapshot) { db_->ReleaseSnapshot(snapshot); delete iter; // skip as HashCuckooRep does not support snapshot - } while (ChangeOptions(kSkipHashCuckoo | kSkipPipelinedWrite)); + } while (ChangeOptions(kSkipHashCuckoo)); } TEST_F(DBIteratorTest, IteratorPinsRef) { do { - Reopen(CurrentOptions()); - //CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); - Put("foo", "hello"); + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + Put(1, "foo", "hello"); // Get iterator that will yield the current contents of the DB. - Iterator* iter = db_->NewIterator(ReadOptions()); + Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]); // Write to force compactions - Put("foo", "newvalue1"); + Put(1, "foo", "newvalue1"); for (int i = 0; i < 100; i++) { // 100K values - ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v'))); + ASSERT_OK(Put(1, Key(i), Key(i) + std::string(100000, 'v'))); } - Put("foo", "newvalue2"); + Put(1, "foo", "newvalue2"); iter->SeekToFirst(); ASSERT_TRUE(iter->Valid()); diff --git a/db/db_log_iter_test.cc b/db/db_log_iter_test.cc index b6e99a8547a..e7f94c4c423 100644 --- a/db/db_log_iter_test.cc +++ b/db/db_log_iter_test.cc @@ -17,10 +17,9 @@ namespace rocksdb { -// NOTE: WAL is disable in pegasus, so ignore these test cases. -class DISABLED_DBTestXactLogIterator : public DBTestBase { +class DBTestXactLogIterator : public DBTestBase { public: - DISABLED_DBTestXactLogIterator() : DBTestBase("/db_log_iter_test") {} + DBTestXactLogIterator() : DBTestBase("/db_log_iter_test") {} std::unique_ptr OpenTransactionLogIter( const SequenceNumber seq) { @@ -59,7 +58,7 @@ void ExpectRecords( } } // namespace -TEST_F(DISABLED_DBTestXactLogIterator, TransactionLogIterator) { +TEST_F(DBTestXactLogIterator, TransactionLogIterator) { do { Options options = OptionsForLogIterTest(); DestroyAndReopen(options); @@ -87,7 +86,7 @@ TEST_F(DISABLED_DBTestXactLogIterator, TransactionLogIterator) { } #ifndef NDEBUG // sync point is not included with DNDEBUG build -TEST_F(DISABLED_DBTestXactLogIterator, TransactionLogIteratorRace) { +TEST_F(DBTestXactLogIterator, TransactionLogIteratorRace) { static const int LOG_ITERATOR_RACE_TEST_COUNT = 2; static const char* sync_points[LOG_ITERATOR_RACE_TEST_COUNT][4] = { {"WalManager::GetSortedWalFiles:1", "WalManager::PurgeObsoleteFiles:1", @@ -145,7 +144,7 @@ TEST_F(DISABLED_DBTestXactLogIterator, TransactionLogIteratorRace) { } #endif -TEST_F(DISABLED_DBTestXactLogIterator, TransactionLogIteratorStallAtLastRecord) { +TEST_F(DBTestXactLogIterator, TransactionLogIteratorStallAtLastRecord) { do { Options options = OptionsForLogIterTest(); DestroyAndReopen(options); @@ -163,7 +162,7 @@ TEST_F(DISABLED_DBTestXactLogIterator, TransactionLogIteratorStallAtLastRecord) } while (ChangeCompactOptions()); } -TEST_F(DISABLED_DBTestXactLogIterator, TransactionLogIteratorCheckAfterRestart) { +TEST_F(DBTestXactLogIterator, TransactionLogIteratorCheckAfterRestart) { do { Options options = OptionsForLogIterTest(); DestroyAndReopen(options); @@ -176,7 +175,7 @@ TEST_F(DISABLED_DBTestXactLogIterator, TransactionLogIteratorCheckAfterRestart) } while (ChangeCompactOptions()); } -TEST_F(DISABLED_DBTestXactLogIterator, TransactionLogIteratorCorruptedLog) { +TEST_F(DBTestXactLogIterator, TransactionLogIteratorCorruptedLog) { do { Options options = OptionsForLogIterTest(); DestroyAndReopen(options); @@ -211,7 +210,7 @@ TEST_F(DISABLED_DBTestXactLogIterator, TransactionLogIteratorCorruptedLog) { } while (ChangeCompactOptions()); } -TEST_F(DISABLED_DBTestXactLogIterator, TransactionLogIteratorBatchOperations) { +TEST_F(DBTestXactLogIterator, TransactionLogIteratorBatchOperations) { do { Options options = OptionsForLogIterTest(); DestroyAndReopen(options); @@ -231,7 +230,7 @@ TEST_F(DISABLED_DBTestXactLogIterator, TransactionLogIteratorBatchOperations) { } while (ChangeCompactOptions()); } -TEST_F(DISABLED_DBTestXactLogIterator, TransactionLogIteratorBlobs) { +TEST_F(DBTestXactLogIterator, TransactionLogIteratorBlobs) { Options options = OptionsForLogIterTest(); DestroyAndReopen(options); CreateAndReopenWithCF({"pikachu"}, options); diff --git a/db/db_memtable_test.cc b/db/db_memtable_test.cc index 92c20df55bc..63d274f6ab5 100644 --- a/db/db_memtable_test.cc +++ b/db/db_memtable_test.cc @@ -175,9 +175,9 @@ TEST_F(DBMemTableTest, ColumnFamilyId) { options.create_if_missing = true; options.memtable_factory.reset(new MockMemTableRepFactory()); DestroyAndReopen(options); - //CreateAndReopenWithCF({"pikachu"}, options); + CreateAndReopenWithCF({"pikachu"}, options); - for (int cf = 0; cf < 1; ++cf) { + for (int cf = 0; cf < 2; ++cf) { ASSERT_OK(Put(cf, "key", "val")); ASSERT_OK(Flush(cf)); ASSERT_EQ( diff --git a/db/db_merge_operator_test.cc b/db/db_merge_operator_test.cc index 4a2d5a2556f..49a2cf6afa8 100644 --- a/db/db_merge_operator_test.cc +++ b/db/db_merge_operator_test.cc @@ -282,7 +282,7 @@ TEST_P(MergeOperatorPinningTest, Randomized) { VerifyDBFromMap(true_data); // Skip HashCuckoo since it does not support merge operators - } while (ChangeOptions(kSkipMergePut | kSkipHashCuckoo | kSkipPipelinedWrite)); + } while (ChangeOptions(kSkipMergePut | kSkipHashCuckoo)); } class MergeOperatorHook : public MergeOperator { diff --git a/db/db_options_test.cc b/db/db_options_test.cc index e4f02a62c00..bafa8e55a0a 100644 --- a/db/db_options_test.cc +++ b/db/db_options_test.cc @@ -190,7 +190,7 @@ TEST_F(DBOptionsTest, SetWalBytesPerSync) { const std::string kValue(kValueSize, 'v'); int i = 0; for (; i < 10; i++) { - Put(Key(i), kValue, WriteOptions(), false); + Put(Key(i), kValue); } // Do not flush. If we flush here, SwitchWAL will reuse old WAL file since its // empty and will not get the new wal_bytes_per_sync value. @@ -201,7 +201,7 @@ TEST_F(DBOptionsTest, SetWalBytesPerSync) { counter = 0; i = 0; for (; i < 10; i++) { - Put(Key(i), kValue, WriteOptions(), false); + Put(Key(i), kValue); } ASSERT_GT(counter, 0); ASSERT_GT(low_bytes_per_sync, 0); @@ -234,8 +234,8 @@ TEST_F(DBOptionsTest, WritableFileMaxBufferSize) { rocksdb::SyncPoint::GetInstance()->EnableProcessing(); int i = 0; for (; i < 3; i++) { - ASSERT_OK(Put("foo", ToString(i), WriteOptions(), false)); - ASSERT_OK(Put("bar", ToString(i), WriteOptions(), false)); + ASSERT_OK(Put("foo", ToString(i))); + ASSERT_OK(Put("bar", ToString(i))); Flush(); } dbfull()->TEST_WaitForCompact(); @@ -251,8 +251,8 @@ TEST_F(DBOptionsTest, WritableFileMaxBufferSize) { dbfull()->GetDBOptions().writable_file_max_buffer_size); i = 0; for (; i < 3; i++) { - ASSERT_OK(Put("foo", ToString(i), WriteOptions(), false)); - ASSERT_OK(Put("bar", ToString(i), WriteOptions(), false)); + ASSERT_OK(Put("foo", ToString(i))); + ASSERT_OK(Put("bar", ToString(i))); Flush(); } dbfull()->TEST_WaitForCompact(); @@ -478,8 +478,8 @@ TEST_F(DBOptionsTest, MaxTotalWalSizeChange) { Options options; options.create_if_missing = true; options.env = env_; - //CreateColumnFamilies({"1", "2", "3"}, options); - ReopenWithColumnFamilies({"default"/*, "1", "2", "3"*/}, options); + CreateColumnFamilies({"1", "2", "3"}, options); + ReopenWithColumnFamilies({"default", "1", "2", "3"}, options); WriteOptions write_options; diff --git a/db/db_properties_test.cc b/db/db_properties_test.cc index 272af398b78..0da64b13656 100644 --- a/db/db_properties_test.cc +++ b/db/db_properties_test.cc @@ -36,34 +36,32 @@ TEST_F(DBPropertiesTest, Empty) { options.write_buffer_size = 100000; // Small write buffer options.allow_concurrent_memtable_write = false; options = CurrentOptions(options); - //CreateAndReopenWithCF({"pikachu"}, options); - Reopen(options); + CreateAndReopenWithCF({"pikachu"}, options); - ColumnFamilyHandle* dcfh = db_->DefaultColumnFamily(); std::string num; ASSERT_TRUE(dbfull()->GetProperty( - dcfh, "rocksdb.num-entries-active-mem-table", &num)); + handles_[1], "rocksdb.num-entries-active-mem-table", &num)); ASSERT_EQ("0", num); - ASSERT_OK(Put("foo", "v1")); - ASSERT_EQ("v1", Get("foo")); + ASSERT_OK(Put(1, "foo", "v1")); + ASSERT_EQ("v1", Get(1, "foo")); ASSERT_TRUE(dbfull()->GetProperty( - dcfh, "rocksdb.num-entries-active-mem-table", &num)); + handles_[1], "rocksdb.num-entries-active-mem-table", &num)); ASSERT_EQ("1", num); // Block sync calls env_->delay_sstable_sync_.store(true, std::memory_order_release); - Put("k1", std::string(100000, 'x')); // Fill memtable + Put(1, "k1", std::string(100000, 'x')); // Fill memtable ASSERT_TRUE(dbfull()->GetProperty( - dcfh, "rocksdb.num-entries-active-mem-table", &num)); + handles_[1], "rocksdb.num-entries-active-mem-table", &num)); ASSERT_EQ("2", num); - Put("k2", std::string(100000, 'y')); // Trigger compaction + Put(1, "k2", std::string(100000, 'y')); // Trigger compaction ASSERT_TRUE(dbfull()->GetProperty( - dcfh, "rocksdb.num-entries-active-mem-table", &num)); + handles_[1], "rocksdb.num-entries-active-mem-table", &num)); ASSERT_EQ("1", num); - ASSERT_EQ("v1", Get("foo")); + ASSERT_EQ("v1", Get(1, "foo")); // Release sync calls env_->delay_sstable_sync_.store(false, std::memory_order_release); @@ -91,7 +89,7 @@ TEST_F(DBPropertiesTest, Empty) { ASSERT_TRUE( dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num)); ASSERT_EQ("0", num); - } while (ChangeOptions(kSkipPipelinedWrite)); + } while (ChangeOptions()); } TEST_F(DBPropertiesTest, CurrentVersionNumber) { @@ -122,26 +120,24 @@ TEST_F(DBPropertiesTest, GetAggregatedIntPropertyTest) { options.min_write_buffer_number_to_merge = 1000; options.max_write_buffer_number = 1000; options = CurrentOptions(options); - //CreateAndReopenWithCF({"one", "two", "three", "four"}, options); - Reopen(options); + CreateAndReopenWithCF({"one", "two", "three", "four"}, options); - ColumnFamilyHandle* dcfh = db_->DefaultColumnFamily(); Random rnd(301); - //for (auto* handle : handles_) { + for (auto* handle : handles_) { for (int i = 0; i < kKeyNum; ++i) { - db_->Put(WriteOptions(), dcfh, RandomString(&rnd, kKeySize), + db_->Put(WriteOptions(), handle, RandomString(&rnd, kKeySize), RandomString(&rnd, kValueSize)); } - //} + } uint64_t manual_sum = 0; uint64_t api_sum = 0; uint64_t value = 0; - //for (auto* handle : handles_) { + for (auto* handle : handles_) { ASSERT_TRUE( - db_->GetIntProperty(dcfh, DB::Properties::kSizeAllMemTables, &value)); + db_->GetIntProperty(handle, DB::Properties::kSizeAllMemTables, &value)); manual_sum += value; - //} + } ASSERT_TRUE(db_->GetAggregatedIntProperty(DB::Properties::kSizeAllMemTables, &api_sum)); ASSERT_GT(manual_sum, 0); @@ -151,17 +147,17 @@ TEST_F(DBPropertiesTest, GetAggregatedIntPropertyTest) { uint64_t before_flush_trm; uint64_t after_flush_trm; - //for (auto* handle : handles_) { + for (auto* handle : handles_) { ASSERT_TRUE(db_->GetAggregatedIntProperty( DB::Properties::kEstimateTableReadersMem, &before_flush_trm)); // Issue flush and expect larger memory usage of table readers. - db_->Flush(FlushOptions(), dcfh); + db_->Flush(FlushOptions(), handle); ASSERT_TRUE(db_->GetAggregatedIntProperty( DB::Properties::kEstimateTableReadersMem, &after_flush_trm)); ASSERT_GT(after_flush_trm, before_flush_trm); - //} + } } namespace { @@ -346,9 +342,7 @@ TEST_F(DBPropertiesTest, ReadLatencyHistogramByLevel) { BlockBasedTableOptions table_options; table_options.no_block_cache = true; - //CreateAndReopenWithCF({"pikachu"}, options); - Reopen(options); - + CreateAndReopenWithCF({"pikachu"}, options); int key_index = 0; Random rnd(301); for (int num = 0; num < 8; num++) { @@ -371,8 +365,7 @@ TEST_F(DBPropertiesTest, ReadLatencyHistogramByLevel) { ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram")); // Reopen and issue Get(). See thee latency tracked - //ReopenWithColumnFamilies({"default", "pikachu"}, options); - Reopen(options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); dbfull()->TEST_WaitForCompact(); for (int key = 0; key < key_index; key++) { Get(Key(key)); @@ -384,8 +377,7 @@ TEST_F(DBPropertiesTest, ReadLatencyHistogramByLevel) { ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram")); // Reopen and issue iterating. See thee latency tracked - //ReopenWithColumnFamilies({"default", "pikachu"}, options); - Reopen(options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cf-file-histogram", &prop)); ASSERT_EQ(std::string::npos, prop.find("** Level 0 read latency histogram")); ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram")); @@ -401,28 +393,26 @@ TEST_F(DBPropertiesTest, ReadLatencyHistogramByLevel) { ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram")); // CF 1 should show no histogram. - // TODO: add a new test case -/* ASSERT_TRUE( + ASSERT_TRUE( dbfull()->GetProperty(handles_[1], "rocksdb.cf-file-histogram", &prop)); ASSERT_EQ(std::string::npos, prop.find("** Level 0 read latency histogram")); ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram")); ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram")); // put something and read it back , CF 1 should show histogram. - Put("foo", "bar"); - Flush(); + Put(1, "foo", "bar"); + Flush(1); dbfull()->TEST_WaitForCompact(); - ASSERT_EQ("bar", Get("foo")); + ASSERT_EQ("bar", Get(1, "foo")); ASSERT_TRUE( dbfull()->GetProperty(handles_[1], "rocksdb.cf-file-histogram", &prop)); ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram")); ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram")); - ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));*/ + ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram")); // options.max_open_files preloads table readers. options.max_open_files = -1; - //ReopenWithColumnFamilies({"default", "pikachu"}, options); - Reopen(options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); ASSERT_TRUE(dbfull()->GetProperty(dbfull()->DefaultColumnFamily(), "rocksdb.cf-file-histogram", &prop)); ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram")); @@ -525,8 +515,7 @@ TEST_F(DBPropertiesTest, NumImmutableMemTable) { options.min_write_buffer_number_to_merge = 3; options.max_write_buffer_number_to_maintain = 4; options.write_buffer_size = 1000000; - //CreateAndReopenWithCF({"pikachu"}, options); - Reopen(options); + CreateAndReopenWithCF({"pikachu"}, options); std::string big_value(1000000 * 2, 'x'); std::string num; @@ -534,69 +523,69 @@ TEST_F(DBPropertiesTest, NumImmutableMemTable) { SetPerfLevel(kEnableTime); ASSERT_TRUE(GetPerfLevel() == kEnableTime); - ASSERT_OK(dbfull()->Put(writeOpt, "k1", big_value)); - ASSERT_TRUE(dbfull()->GetProperty( + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k1", big_value)); + ASSERT_TRUE(dbfull()->GetProperty(handles_[1], "rocksdb.num-immutable-mem-table", &num)); ASSERT_EQ(num, "0"); ASSERT_TRUE(dbfull()->GetProperty( - DB::Properties::kNumImmutableMemTableFlushed, &num)); + handles_[1], DB::Properties::kNumImmutableMemTableFlushed, &num)); ASSERT_EQ(num, "0"); ASSERT_TRUE(dbfull()->GetProperty( - "rocksdb.num-entries-active-mem-table", &num)); + handles_[1], "rocksdb.num-entries-active-mem-table", &num)); ASSERT_EQ(num, "1"); get_perf_context()->Reset(); - Get("k1"); + Get(1, "k1"); ASSERT_EQ(1, static_cast(get_perf_context()->get_from_memtable_count)); - ASSERT_OK(dbfull()->Put(writeOpt, "k2", big_value)); - ASSERT_TRUE(dbfull()->GetProperty( + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k2", big_value)); + ASSERT_TRUE(dbfull()->GetProperty(handles_[1], "rocksdb.num-immutable-mem-table", &num)); ASSERT_EQ(num, "1"); ASSERT_TRUE(dbfull()->GetProperty( - "rocksdb.num-entries-active-mem-table", &num)); + handles_[1], "rocksdb.num-entries-active-mem-table", &num)); ASSERT_EQ(num, "1"); ASSERT_TRUE(dbfull()->GetProperty( - "rocksdb.num-entries-imm-mem-tables", &num)); + handles_[1], "rocksdb.num-entries-imm-mem-tables", &num)); ASSERT_EQ(num, "1"); get_perf_context()->Reset(); - Get("k1"); + Get(1, "k1"); ASSERT_EQ(2, static_cast(get_perf_context()->get_from_memtable_count)); get_perf_context()->Reset(); - Get("k2"); + Get(1, "k2"); ASSERT_EQ(1, static_cast(get_perf_context()->get_from_memtable_count)); - ASSERT_OK(dbfull()->Put(writeOpt, "k3", big_value)); - ASSERT_TRUE(dbfull()->GetProperty( - "rocksdb.cur-size-active-mem-table", &num)); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k3", big_value)); ASSERT_TRUE(dbfull()->GetProperty( + handles_[1], "rocksdb.cur-size-active-mem-table", &num)); + ASSERT_TRUE(dbfull()->GetProperty(handles_[1], "rocksdb.num-immutable-mem-table", &num)); ASSERT_EQ(num, "2"); ASSERT_TRUE(dbfull()->GetProperty( - "rocksdb.num-entries-active-mem-table", &num)); + handles_[1], "rocksdb.num-entries-active-mem-table", &num)); ASSERT_EQ(num, "1"); ASSERT_TRUE(dbfull()->GetProperty( - "rocksdb.num-entries-imm-mem-tables", &num)); + handles_[1], "rocksdb.num-entries-imm-mem-tables", &num)); ASSERT_EQ(num, "2"); get_perf_context()->Reset(); - Get("k2"); + Get(1, "k2"); ASSERT_EQ(2, static_cast(get_perf_context()->get_from_memtable_count)); get_perf_context()->Reset(); - Get("k3"); + Get(1, "k3"); ASSERT_EQ(1, static_cast(get_perf_context()->get_from_memtable_count)); get_perf_context()->Reset(); - Get("k1"); + Get(1, "k1"); ASSERT_EQ(3, static_cast(get_perf_context()->get_from_memtable_count)); - ASSERT_OK(Flush()); - ASSERT_TRUE(dbfull()->GetProperty( + ASSERT_OK(Flush(1)); + ASSERT_TRUE(dbfull()->GetProperty(handles_[1], "rocksdb.num-immutable-mem-table", &num)); ASSERT_EQ(num, "0"); ASSERT_TRUE(dbfull()->GetProperty( - DB::Properties::kNumImmutableMemTableFlushed, &num)); + handles_[1], DB::Properties::kNumImmutableMemTableFlushed, &num)); ASSERT_EQ(num, "3"); ASSERT_TRUE(dbfull()->GetIntProperty( - "rocksdb.cur-size-active-mem-table", &value)); + handles_[1], "rocksdb.cur-size-active-mem-table", &value)); // "192" is the size of the metadata of two empty skiplists, this would // break if we change the default skiplist implementation ASSERT_GE(value, 192); @@ -604,29 +593,29 @@ TEST_F(DBPropertiesTest, NumImmutableMemTable) { uint64_t int_num; uint64_t base_total_size; ASSERT_TRUE(dbfull()->GetIntProperty( - "rocksdb.estimate-num-keys", &base_total_size)); + handles_[1], "rocksdb.estimate-num-keys", &base_total_size)); - ASSERT_OK(dbfull()->Delete(writeOpt, "k2")); - ASSERT_OK(dbfull()->Put(writeOpt, "k3", "")); - ASSERT_OK(dbfull()->Delete(writeOpt, "k3")); + ASSERT_OK(dbfull()->Delete(writeOpt, handles_[1], "k2")); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k3", "")); + ASSERT_OK(dbfull()->Delete(writeOpt, handles_[1], "k3")); ASSERT_TRUE(dbfull()->GetIntProperty( - "rocksdb.num-deletes-active-mem-table", &int_num)); + handles_[1], "rocksdb.num-deletes-active-mem-table", &int_num)); ASSERT_EQ(int_num, 2U); ASSERT_TRUE(dbfull()->GetIntProperty( - "rocksdb.num-entries-active-mem-table", &int_num)); + handles_[1], "rocksdb.num-entries-active-mem-table", &int_num)); ASSERT_EQ(int_num, 3U); - ASSERT_OK(dbfull()->Put(writeOpt, "k2", big_value)); - ASSERT_OK(dbfull()->Put(writeOpt, "k2", big_value)); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k2", big_value)); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k2", big_value)); ASSERT_TRUE(dbfull()->GetIntProperty( - "rocksdb.num-entries-imm-mem-tables", &int_num)); + handles_[1], "rocksdb.num-entries-imm-mem-tables", &int_num)); ASSERT_EQ(int_num, 4U); ASSERT_TRUE(dbfull()->GetIntProperty( - "rocksdb.num-deletes-imm-mem-tables", &int_num)); + handles_[1], "rocksdb.num-deletes-imm-mem-tables", &int_num)); ASSERT_EQ(int_num, 2U); ASSERT_TRUE(dbfull()->GetIntProperty( - "rocksdb.estimate-num-keys", &int_num)); + handles_[1], "rocksdb.estimate-num-keys", &int_num)); ASSERT_EQ(int_num, base_total_size + 1); SetPerfLevel(kDisable); @@ -1120,16 +1109,15 @@ TEST_F(DBPropertiesTest, UserDefinedTablePropertiesContext) { options.level0_file_num_compaction_trigger = 3; options.table_properties_collector_factories.resize(1); std::shared_ptr collector_factory = - std::make_shared(0); + std::make_shared(1); options.table_properties_collector_factories[0] = collector_factory, - Reopen(options); - //CreateAndReopenWithCF({"pikachu"}, options); + CreateAndReopenWithCF({"pikachu"}, options); // Create 2 files for (int table = 0; table < 2; ++table) { for (int i = 0; i < 10 + table; ++i) { - Put(ToString(table * 100 + i), "val"); + Put(1, ToString(table * 100 + i), "val"); } - Flush(); + Flush(1); } ASSERT_GT(collector_factory->num_created_, 0U); @@ -1137,15 +1125,15 @@ TEST_F(DBPropertiesTest, UserDefinedTablePropertiesContext) { // Trigger automatic compactions. for (int table = 0; table < 3; ++table) { for (int i = 0; i < 10 + table; ++i) { - Put(ToString(table * 100 + i), "val"); + Put(1, ToString(table * 100 + i), "val"); } - Flush(); + Flush(1); dbfull()->TEST_WaitForCompact(); } ASSERT_GT(collector_factory->num_created_, 0U); collector_factory->num_created_ = 0; - dbfull()->TEST_CompactRange(0, nullptr, nullptr); + dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); ASSERT_GT(collector_factory->num_created_, 0U); // Come back to write to default column family diff --git a/db/db_sst_test.cc b/db/db_sst_test.cc index 4b988bdb7db..59566889124 100644 --- a/db/db_sst_test.cc +++ b/db/db_sst_test.cc @@ -158,7 +158,7 @@ TEST_F(DBSSTTest, DeleteObsoleteFilesPendingOutputs) { for (int i = 0; i < 2; ++i) { // Create 1MB sst file for (int j = 0; j < 100; ++j) { - ASSERT_OK(Put(Key(i * 50 + j), RandomString(&rnd, 10 * 1024), WriteOptions(), false)); + ASSERT_OK(Put(Key(i * 50 + j), RandomString(&rnd, 10 * 1024))); } ASSERT_OK(Flush()); } @@ -189,7 +189,7 @@ TEST_F(DBSSTTest, DeleteObsoleteFilesPendingOutputs) { // write_buffer_size. The flush will be blocked with block_first_time // pending_file is protecting all the files created after for (int j = 0; j < 256; ++j) { - ASSERT_OK(Put(Key(j), RandomString(&rnd, 10 * 1024), WriteOptions(), false)); + ASSERT_OK(Put(Key(j), RandomString(&rnd, 10 * 1024))); } blocking_thread.WaitUntilSleeping(); diff --git a/db/db_test.cc b/db/db_test.cc index ea2b2dfc38b..0d573631ba7 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -196,7 +196,7 @@ TEST_F(DBTest, MemEnvTest) { } #endif // ROCKSDB_LITE -TEST_F(DBTest, DISABLED_WriteEmptyBatch) { // PEGASUS: empty batch is not allowed in pegasus +TEST_F(DBTest, WriteEmptyBatch) { Options options = CurrentOptions(); options.env = env_; options.write_buffer_size = 100000; @@ -301,7 +301,7 @@ TEST_F(DBTest, PutSingleDeleteGet) { // universal compaction do not apply to the test case. Skip MergePut // because single delete does not get removed when it encounters a merge. } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction | - kSkipUniversalCompaction | kSkipMergePut | kSkipPipelinedWrite)); + kSkipUniversalCompaction | kSkipMergePut)); } TEST_F(DBTest, ReadFromPersistedTier) { @@ -414,7 +414,7 @@ TEST_F(DBTest, ReadFromPersistedTier) { DestroyAndReopen(options); } } - } while (ChangeOptions(kSkipHashCuckoo | kSkipPipelinedWrite)); + } while (ChangeOptions(kSkipHashCuckoo)); } TEST_F(DBTest, SingleDeleteFlush) { @@ -454,7 +454,7 @@ TEST_F(DBTest, SingleDeleteFlush) { // universal compaction do not apply to the test case. Skip MergePut // because merges cannot be combined with single deletions. } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction | - kSkipUniversalCompaction | kSkipMergePut | kSkipPipelinedWrite)); + kSkipUniversalCompaction | kSkipMergePut)); } TEST_F(DBTest, SingleDeletePutFlush) { @@ -477,7 +477,7 @@ TEST_F(DBTest, SingleDeletePutFlush) { // universal compaction do not apply to the test case. Skip MergePut // because merges cannot be combined with single deletions. } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction | - kSkipUniversalCompaction | kSkipMergePut | kSkipPipelinedWrite)); + kSkipUniversalCompaction | kSkipMergePut)); } // Disable because not all platform can run it. @@ -551,7 +551,7 @@ TEST_F(DBTest, GetFromImmutableLayer) { ASSERT_EQ("NOT_FOUND", Get(0, "foo")); // Release sync calls env_->delay_sstable_sync_.store(false, std::memory_order_release); - } while (ChangeOptions(kSkipPipelinedWrite)); + } while (ChangeOptions()); } @@ -568,7 +568,7 @@ TEST_F(DBTest, GetLevel0Ordering) { ASSERT_OK(Put(1, "foo", "v2")); ASSERT_OK(Flush(1)); ASSERT_EQ("v2", Get(1, "foo")); - } while (ChangeOptions(kSkipPipelinedWrite)); + } while (ChangeOptions()); } TEST_F(DBTest, WrongLevel0Config) { @@ -592,7 +592,7 @@ TEST_F(DBTest, GetOrderedByLevels) { ASSERT_EQ("v2", Get(1, "foo")); ASSERT_OK(Flush(1)); ASSERT_EQ("v2", Get(1, "foo")); - } while (ChangeOptions(kSkipPipelinedWrite)); + } while (ChangeOptions()); } TEST_F(DBTest, GetPicksCorrectFile) { @@ -608,7 +608,7 @@ TEST_F(DBTest, GetPicksCorrectFile) { ASSERT_EQ("va", Get(1, "a")); ASSERT_EQ("vf", Get(1, "f")); ASSERT_EQ("vx", Get(1, "x")); - } while (ChangeOptions(kSkipPipelinedWrite)); + } while (ChangeOptions()); } TEST_F(DBTest, GetEncountersEmptyLevel) { @@ -650,7 +650,7 @@ TEST_F(DBTest, GetEncountersEmptyLevel) { dbfull()->TEST_WaitForCompact(); ASSERT_EQ(NumTableFilesAtLevel(0, 1), 1); // XXX - } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction | kSkipPipelinedWrite)); + } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction)); } #endif // ROCKSDB_LITE @@ -683,7 +683,7 @@ TEST_F(DBTest, FlushSchedule) { options.max_write_buffer_number = 2; options.write_buffer_size = 120 * 1024; CreateAndReopenWithCF({"pikachu"}, options); - //std::vector threads; + std::vector threads; std::atomic thread_num(0); // each column family will have 5 thread, each thread generating 2 memtables. @@ -699,12 +699,12 @@ TEST_F(DBTest, FlushSchedule) { }; for (int i = 0; i < 10; ++i) { - fill_memtable_func(); + threads.emplace_back(fill_memtable_func); } -// for (auto& t : threads) { // PEGASUS: multithread is not supported -// t.join(); -// } + for (auto& t : threads) { + t.join(); + } auto default_tables = GetNumberOfSstFilesForColumnFamily(db_, "default"); auto pikachu_tables = GetNumberOfSstFilesForColumnFamily(db_, "pikachu"); @@ -1242,7 +1242,7 @@ TEST_F(DBTest, ApproximateSizes) { } // ApproximateOffsetOf() is not yet implemented in plain table format. } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction | - kSkipPlainTable | kSkipHashIndex | kSkipPipelinedWrite)); + kSkipPlainTable | kSkipHashIndex)); } TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) { @@ -1281,7 +1281,7 @@ TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) { dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); } // ApproximateOffsetOf() is not yet implemented in plain table format. - } while (ChangeOptions(kSkipPlainTable | kSkipPipelinedWrite)); + } while (ChangeOptions(kSkipPlainTable)); } #endif // ROCKSDB_LITE @@ -1347,7 +1347,7 @@ TEST_F(DBTest, Snapshot) { ASSERT_EQ(0U, GetNumSnapshots()); ASSERT_EQ("0v4", Get(0, "foo")); ASSERT_EQ("1v4", Get(1, "foo")); - } while (ChangeOptions(kSkipHashCuckoo | kSkipPipelinedWrite)); + } while (ChangeOptions(kSkipHashCuckoo)); } TEST_F(DBTest, HiddenValuesAreRemoved) { @@ -1386,7 +1386,7 @@ TEST_F(DBTest, HiddenValuesAreRemoved) { // which is used by Size(). // skip HashCuckooRep as it does not support snapshot } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction | - kSkipPlainTable | kSkipHashCuckoo | kSkipPipelinedWrite)); + kSkipPlainTable | kSkipHashCuckoo)); } #endif // ROCKSDB_LITE @@ -1436,7 +1436,7 @@ TEST_F(DBTest, UnremovableSingleDelete) { // universal compaction do not apply to the test case. Skip MergePut // because single delete does not get removed when it encounters a merge. } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction | - kSkipUniversalCompaction | kSkipMergePut | kSkipPipelinedWrite)); + kSkipUniversalCompaction | kSkipMergePut)); } #ifndef ROCKSDB_LITE @@ -1547,7 +1547,7 @@ TEST_F(DBTest, OverlapInLevel0) { Flush(1); ASSERT_EQ("3", FilesPerLevel(1)); ASSERT_EQ("NOT_FOUND", Get(1, "600")); - } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction | kSkipPipelinedWrite)); + } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction)); } #endif // ROCKSDB_LITE @@ -1748,20 +1748,20 @@ TEST_F(DBTest, SnapshotFiles) { do { Options options = CurrentOptions(); options.write_buffer_size = 100000000; // Large write buffer - Reopen(options); + CreateAndReopenWithCF({"pikachu"}, options); Random rnd(301); // Write 8MB (80 values, each 100K) - ASSERT_EQ(NumTableFilesAtLevel(0, 0), 0); + ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); std::vector values; for (int i = 0; i < 80; i++) { values.push_back(RandomString(&rnd, 100000)); - ASSERT_OK(Put(0, Key(i), values[i])); + ASSERT_OK(Put((i < 40), Key(i), values[i])); } // assert that nothing makes it to disk yet. - ASSERT_EQ(NumTableFilesAtLevel(0, 0), 0); + ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); // get a file snapshot uint64_t manifest_number = 0; @@ -1771,7 +1771,7 @@ TEST_F(DBTest, SnapshotFiles) { dbfull()->GetLiveFiles(files, &manifest_size); // CURRENT, MANIFEST, OPTIONS, *.sst files (one for each CF) - ASSERT_EQ(files.size(), 4U); + ASSERT_EQ(files.size(), 5U); uint64_t number = 0; FileType type; @@ -1816,6 +1816,7 @@ TEST_F(DBTest, SnapshotFiles) { // verify that data in the snapshot are correct std::vector column_families; column_families.emplace_back("default", ColumnFamilyOptions()); + column_families.emplace_back("pikachu", ColumnFamilyOptions()); std::vector cf_handles; DB* snapdb; DBOptions opts; @@ -1828,7 +1829,7 @@ TEST_F(DBTest, SnapshotFiles) { ReadOptions roptions; std::string val; for (unsigned int i = 0; i < 80; i++) { - stat = snapdb->Get(roptions, cf_handles[0], Key(i), &val); + stat = snapdb->Get(roptions, cf_handles[i < 40], Key(i), &val); ASSERT_EQ(values[i].compare(val), 0); } for (auto cfh : cf_handles) { @@ -2028,7 +2029,7 @@ static void MTThreadBody(void* arg) { } // namespace -class DISABLED_MultiThreadedDBTest : public DBTest, +class MultiThreadedDBTest : public DBTest, public ::testing::WithParamInterface { public: virtual void SetUp() override { option_config_ = GetParam(); } @@ -2045,7 +2046,7 @@ class DISABLED_MultiThreadedDBTest : public DBTest, } }; -TEST_P(DISABLED_MultiThreadedDBTest, MultiThreaded) { +TEST_P(MultiThreadedDBTest, MultiThreaded) { anon::OptionsOverride options_override; options_override.skip_policy = kSkipNoSnapshot; Options options = CurrentOptions(options_override); @@ -2085,8 +2086,8 @@ TEST_P(DISABLED_MultiThreadedDBTest, MultiThreaded) { } INSTANTIATE_TEST_CASE_P( - MultiThreaded, DISABLED_MultiThreadedDBTest, - ::testing::ValuesIn(DISABLED_MultiThreadedDBTest::GenerateOptionConfigs())); + MultiThreaded, MultiThreadedDBTest, + ::testing::ValuesIn(MultiThreadedDBTest::GenerateOptionConfigs())); #endif // ROCKSDB_LITE // Group commit test: @@ -2116,7 +2117,7 @@ static void GCThreadBody(void* arg) { } // namespace -TEST_F(DBTest, DISABLED_GroupCommitTest) { +TEST_F(DBTest, GroupCommitTest) { do { Options options = CurrentOptions(); options.env = env_; @@ -2125,16 +2126,15 @@ TEST_F(DBTest, DISABLED_GroupCommitTest) { Reopen(options); // Start threads - int thread_num = 1; - GCThread thread[thread_num]; - for (int id = 0; id < thread_num; id++) { + GCThread thread[kGCNumThreads]; + for (int id = 0; id < kGCNumThreads; id++) { thread[id].id = id; thread[id].db = db_; thread[id].done = false; env_->StartThread(GCThreadBody, &thread[id]); } - for (int id = 0; id < thread_num; id++) { + for (int id = 0; id < kGCNumThreads; id++) { while (thread[id].done == false) { env_->SleepForMicroseconds(100000); } @@ -2144,7 +2144,7 @@ TEST_F(DBTest, DISABLED_GroupCommitTest) { ASSERT_GT(TestGetTickerCount(options, WRITE_DONE_BY_OTHER), 0); std::vector expected_db; - for (int i = 0; i < thread_num * kGCNumKeys; ++i) { + for (int i = 0; i < kGCNumThreads * kGCNumKeys; ++i) { expected_db.push_back(ToString(i)); } std::sort(expected_db.begin(), expected_db.end()); @@ -2163,7 +2163,7 @@ TEST_F(DBTest, DISABLED_GroupCommitTest) { HistogramData hist_data; options.statistics->histogramData(DB_WRITE, &hist_data); ASSERT_GT(hist_data.average, 0.0); - } while (ChangeOptions(kSkipNoSeekToLast | kSkipPipelinedWrite)); + } while (ChangeOptions(kSkipNoSeekToLast)); } namespace { @@ -2580,8 +2580,7 @@ class DBTestRandomized : public DBTest, for (int option_config = kDefault; option_config < kEnd; ++option_config) { if (!ShouldSkipOptions(option_config, kSkipDeletesFilterFirst | kSkipNoSeekToLast | - kSkipHashCuckoo | - kSkipPipelinedWrite)) { + kSkipHashCuckoo)) { option_configs.push_back(option_config); } } @@ -2643,9 +2642,9 @@ TEST_P(DBTestRandomized, Randomized) { } else { b.Delete(k); } - ASSERT_OK(model.Write(WriteOptions(), &b)); - ASSERT_OK(db_->Write(WriteOptions(), &b)); } + ASSERT_OK(model.Write(WriteOptions(), &b)); + ASSERT_OK(db_->Write(WriteOptions(), &b)); } if ((step % 100) == 0) { @@ -4407,7 +4406,7 @@ TEST_F(DBTest, FileCreationRandomFailure) { for (int k = 0; k < kTestSize; ++k) { // here we expect some of the Put fails. std::string value = RandomString(&rnd, 100); - Status s = Put(Key(k), Slice(value), WriteOptions(), false); + Status s = Put(Key(k), Slice(value)); if (s.ok()) { // update the latest successful put values[k] = value; @@ -4646,7 +4645,7 @@ TEST_F(DBTest, CloseSpeedup) { // First three 110KB files are not going to level 2 // After that, (100K, 200K) for (int num = 0; num < 5; num++) { - GenerateNewFile(&rnd, &key_idx, true, false); + GenerateNewFile(&rnd, &key_idx, true); } ASSERT_EQ(0, GetSstFileCount(dbname_)); diff --git a/db/db_test2.cc b/db/db_test2.cc index 81b9199fa71..26441044d0f 100644 --- a/db/db_test2.cc +++ b/db/db_test2.cc @@ -479,7 +479,7 @@ namespace { } // namespace -TEST_F(DBTest2, DISABLED_WalFilterTest) { +TEST_F(DBTest2, WalFilterTest) { class TestWalFilter : public WalFilter { private: // Processing option that is requested to be applied at the given index @@ -654,7 +654,7 @@ TEST_F(DBTest2, DISABLED_WalFilterTest) { } } -TEST_F(DBTest2, DISABLED_WalFilterTestWithChangeBatch) { +TEST_F(DBTest2, WalFilterTestWithChangeBatch) { class ChangeBatchHandler : public WriteBatch::Handler { private: // Batch to insert keys in @@ -1316,22 +1316,20 @@ class PinL0IndexAndFilterBlocksTest : public DBTestBase, options->table_factory.reset(new BlockBasedTableFactory(table_options)); CreateAndReopenWithCF({"pikachu"}, *options); - WriteOptions wop; - wop.disableWAL = false; - Put("a", "begin", wop); - Put("z", "end", wop); - ASSERT_OK(Flush()); + Put(1, "a", "begin"); + Put(1, "z", "end"); + ASSERT_OK(Flush(1)); // move this table to L1 - dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[0]); + dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); // reset block cache table_options.block_cache = NewLRUCache(64 * 1024); options->table_factory.reset(NewBlockBasedTableFactory(table_options)); TryReopenWithColumnFamilies({"default", "pikachu"}, *options); // create new table at L0 - Put("a2", "begin2", wop); - Put("z2", "end2", wop); - ASSERT_OK(Flush()); + Put(1, "a2", "begin2"); + Put(1, "z2", "end2"); + ASSERT_OK(Flush(1)); if (close_afterwards) { Close(); // This ensures that there is no ref to block cache entries @@ -1400,7 +1398,7 @@ TEST_P(PinL0IndexAndFilterBlocksTest, std::string value; // this should be read from L0 // so cache values don't change - value = Get("a2"); + value = Get(1, "a2"); ASSERT_EQ(fm, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); ASSERT_EQ(fh, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); ASSERT_EQ(im, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS)); @@ -1412,7 +1410,7 @@ TEST_P(PinL0IndexAndFilterBlocksTest, // then the get results in a cache hit for L1 // When we have inifinite max_files, there is still cache miss because we have // reset the block cache - value = Get("a"); + value = Get(1, "a"); ASSERT_EQ(fm + 1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); ASSERT_EQ(im + 1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS)); } @@ -1452,7 +1450,7 @@ TEST_P(PinL0IndexAndFilterBlocksTest, DisablePrefetchingNonL0IndexAndFilter) { } std::string value; // this should be read from L0 - value = Get("a2"); + value = Get(1, "a2"); // If max_open_files is -1, we have pinned index and filter in Rep, so there // will not be changes in index and filter misses or hits. If max_open_files // is not -1, Get() will open a TableReader and prefetch index and filter. @@ -1462,7 +1460,7 @@ TEST_P(PinL0IndexAndFilterBlocksTest, DisablePrefetchingNonL0IndexAndFilter) { ASSERT_EQ(ih, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT)); // this should be read from L1 - value = Get("a"); + value = Get(1, "a"); if (infinite_max_files_) { // In inifinite max files case, there's a cache miss in executing Get() // because index and filter are not prefetched before. diff --git a/db/db_test_util.cc b/db/db_test_util.cc index d85721128de..db17f0d08e9 100644 --- a/db/db_test_util.cc +++ b/db/db_test_util.cc @@ -150,9 +150,6 @@ bool DBTestBase::ShouldSkipOptions(int option_config, int skip_mask) { if ((skip_mask & kSkipMmapReads) && option_config == kWalDirAndMmapReads) { return true; } - if ((skip_mask & kSkipPipelinedWrite) && option_config == kPipelinedWrite) { - return true; - } return false; } @@ -621,16 +618,15 @@ bool DBTestBase::IsMemoryMappedAccessSupported() const { return (!encrypted_env_); } -Status DBTestBase::Flush(int cf, const FlushOptions& options) { +Status DBTestBase::Flush(int cf) { if (cf == 0) { - return db_->Flush(options); + return db_->Flush(FlushOptions()); } else { - return db_->Flush(options, handles_[cf]); + return db_->Flush(FlushOptions(), handles_[cf]); } } -Status DBTestBase::Put(const Slice& k, const Slice& v, WriteOptions wo, bool disableWAL) { - wo.disableWAL = disableWAL; +Status DBTestBase::Put(const Slice& k, const Slice& v, WriteOptions wo) { if (kMergePut == option_config_) { return db_->Merge(wo, k, v); } else { @@ -641,17 +637,9 @@ Status DBTestBase::Put(const Slice& k, const Slice& v, WriteOptions wo, bool dis Status DBTestBase::Put(int cf, const Slice& k, const Slice& v, WriteOptions wo) { if (kMergePut == option_config_) { - if (cf == 0) { - return db_->Merge(wo, k, v); - } else { - return db_->Merge(wo, handles_[cf], k, v); - } + return db_->Merge(wo, handles_[cf], k, v); } else { - if (cf == 0) { - return db_->Put(wo, k, v); - } else { - return db_->Put(wo, handles_[cf], k, v); - } + return db_->Put(wo, handles_[cf], k, v); } } @@ -997,13 +985,7 @@ void DBTestBase::MakeTables(int n, const std::string& small, // tables that cover a specified range to all levels. void DBTestBase::FillLevels(const std::string& smallest, const std::string& largest, int cf) { - int levels = 0; - if (cf == 0) { - levels = db_->NumberLevels(); - } else { - levels = db_->NumberLevels(handles_[cf]); - } - MakeTables(levels, smallest, largest, cf); + MakeTables(db_->NumberLevels(handles_[cf]), smallest, largest, cf); } void DBTestBase::MoveFilesToLevel(int level, int cf) { @@ -1066,9 +1048,9 @@ void DBTestBase::GenerateNewFile(int cf, Random* rnd, int* key_idx, } // this will generate non-overlapping files since it keeps increasing key_idx -void DBTestBase::GenerateNewFile(Random* rnd, int* key_idx, bool nowait, bool disableWAL) { +void DBTestBase::GenerateNewFile(Random* rnd, int* key_idx, bool nowait) { for (int i = 0; i < KNumKeysByGenerateNewFile; i++) { - ASSERT_OK(Put(Key(*key_idx), RandomString(rnd, (i == 99) ? 1 : 990), WriteOptions(), disableWAL)); + ASSERT_OK(Put(Key(*key_idx), RandomString(rnd, (i == 99) ? 1 : 990))); (*key_idx)++; } if (!nowait) { diff --git a/db/db_test_util.h b/db/db_test_util.h index 4f2138e355c..89083b50195 100644 --- a/db/db_test_util.h +++ b/db/db_test_util.h @@ -723,7 +723,6 @@ class DBTestBase : public testing::Test { kSkipHashCuckoo = 64, kSkipFIFOCompaction = 128, kSkipMmapReads = 256, - kSkipPipelinedWrite = 512, }; explicit DBTestBase(const std::string path); @@ -809,9 +808,9 @@ class DBTestBase : public testing::Test { bool IsMemoryMappedAccessSupported() const; - Status Flush(int cf = 0, const FlushOptions& options = FlushOptions()); + Status Flush(int cf = 0); - Status Put(const Slice& k, const Slice& v, WriteOptions wo = WriteOptions(), bool disableWAL = true); + Status Put(const Slice& k, const Slice& v, WriteOptions wo = WriteOptions()); Status Put(int cf, const Slice& k, const Slice& v, WriteOptions wo = WriteOptions()); @@ -902,7 +901,7 @@ class DBTestBase : public testing::Test { int GetSstFileCount(std::string path); // this will generate non-overlapping files since it keeps increasing key_idx - void GenerateNewFile(Random* rnd, int* key_idx, bool nowait = false, bool disableWAL = true); + void GenerateNewFile(Random* rnd, int* key_idx, bool nowait = false); void GenerateNewFile(int fd, Random* rnd, int* key_idx, bool nowait = false); diff --git a/db/db_universal_compaction_test.cc b/db/db_universal_compaction_test.cc index 8705f46c0e5..03e5f12bac7 100644 --- a/db/db_universal_compaction_test.cc +++ b/db/db_universal_compaction_test.cc @@ -1171,7 +1171,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionFourPaths) { TEST_P(DBTestUniversalCompaction, IncreaseUniversalCompactionNumLevels) { std::function verify_func = [&](int num_keys_in_db) { std::string keys_in_db; - Iterator* iter = dbfull()->NewIterator(ReadOptions()); + Iterator* iter = dbfull()->NewIterator(ReadOptions(), handles_[1]); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { keys_in_db.append(iter->key().ToString()); keys_in_db.push_back(','); @@ -1201,66 +1201,65 @@ TEST_P(DBTestUniversalCompaction, IncreaseUniversalCompactionNumLevels) { options.level0_file_num_compaction_trigger = 3; options.memtable_factory.reset(new SpecialSkipListFactory(KNumKeysPerFile)); options = CurrentOptions(options); - //CreateAndReopenWithCF({"pikachu"}, options); - Reopen(options); + CreateAndReopenWithCF({"pikachu"}, options); for (int i = 0; i <= max_key1; i++) { // each value is 10K - ASSERT_OK(Put(Key(i), RandomString(&rnd, 10000))); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000))); + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); dbfull()->TEST_WaitForCompact(); } - ASSERT_OK(Flush()); + ASSERT_OK(Flush(1)); dbfull()->TEST_WaitForCompact(); // Stage 2: reopen with universal compaction, num_levels=4 options.compaction_style = kCompactionStyleUniversal; options.num_levels = 4; options = CurrentOptions(options); - ReopenWithColumnFamilies({"default"}, options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); verify_func(max_key1); // Insert more keys for (int i = max_key1 + 1; i <= max_key2; i++) { // each value is 10K - ASSERT_OK(Put(Key(i), RandomString(&rnd, 10000))); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000))); + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); dbfull()->TEST_WaitForCompact(); } - ASSERT_OK(Flush()); + ASSERT_OK(Flush(1)); dbfull()->TEST_WaitForCompact(); verify_func(max_key2); // Compaction to non-L0 has happened. - ASSERT_GT(NumTableFilesAtLevel(options.num_levels - 1, 0), 0); + ASSERT_GT(NumTableFilesAtLevel(options.num_levels - 1, 1), 0); // Stage 3: Revert it back to one level and revert to num_levels=1. options.num_levels = 4; options.target_file_size_base = INT_MAX; - ReopenWithColumnFamilies({"default"}, options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); // Compact all to level 0 CompactRangeOptions compact_options; compact_options.change_level = true; compact_options.target_level = 0; compact_options.exclusive_manual_compaction = exclusive_manual_compaction_; - dbfull()->CompactRange(compact_options, nullptr, nullptr); + dbfull()->CompactRange(compact_options, handles_[1], nullptr, nullptr); // Need to restart it once to remove higher level records in manifest. - ReopenWithColumnFamilies({"default"}, options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); // Final reopen options.compaction_style = kCompactionStyleUniversal; options.num_levels = 1; options = CurrentOptions(options); - ReopenWithColumnFamilies({"default"}, options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); // Insert more keys for (int i = max_key2 + 1; i <= max_key3; i++) { // each value is 10K - ASSERT_OK(Put(Key(i), RandomString(&rnd, 10000))); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000))); + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); dbfull()->TEST_WaitForCompact(); } - ASSERT_OK(Flush()); + ASSERT_OK(Flush(1)); dbfull()->TEST_WaitForCompact(); verify_func(max_key3); } @@ -1524,9 +1523,10 @@ TEST_P(DBTestUniversalManualCompactionOutputPathId, options.level0_file_num_compaction_trigger = 10; Destroy(options); DestroyAndReopen(options); - MakeTables(3, "p", "q", 0); + CreateAndReopenWithCF({"pikachu"}, options); + MakeTables(3, "p", "q", 1); dbfull()->TEST_WaitForCompact(); - ASSERT_EQ(2, TotalLiveFiles(0)); + ASSERT_EQ(2, TotalLiveFiles(1)); ASSERT_EQ(2, GetSstFileCount(options.db_paths[0].path)); ASSERT_EQ(0, GetSstFileCount(options.db_paths[1].path)); @@ -1534,38 +1534,38 @@ TEST_P(DBTestUniversalManualCompactionOutputPathId, CompactRangeOptions compact_options; compact_options.target_path_id = 1; compact_options.exclusive_manual_compaction = exclusive_manual_compaction_; - db_->CompactRange(compact_options, nullptr, nullptr); - ASSERT_EQ(1, TotalLiveFiles(0)); + db_->CompactRange(compact_options, handles_[1], nullptr, nullptr); + ASSERT_EQ(1, TotalLiveFiles(1)); ASSERT_EQ(0, GetSstFileCount(options.db_paths[0].path)); ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); - ReopenWithColumnFamilies({kDefaultColumnFamilyName}, options); - ASSERT_EQ(1, TotalLiveFiles(0)); + ReopenWithColumnFamilies({kDefaultColumnFamilyName, "pikachu"}, options); + ASSERT_EQ(1, TotalLiveFiles(1)); ASSERT_EQ(0, GetSstFileCount(options.db_paths[0].path)); ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); - MakeTables(1, "p", "q", 0); - ASSERT_EQ(2, TotalLiveFiles(0)); + MakeTables(1, "p", "q", 1); + ASSERT_EQ(2, TotalLiveFiles(1)); ASSERT_EQ(1, GetSstFileCount(options.db_paths[0].path)); ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); - ReopenWithColumnFamilies({kDefaultColumnFamilyName}, options); - ASSERT_EQ(2, TotalLiveFiles(0)); + ReopenWithColumnFamilies({kDefaultColumnFamilyName, "pikachu"}, options); + ASSERT_EQ(2, TotalLiveFiles(1)); ASSERT_EQ(1, GetSstFileCount(options.db_paths[0].path)); ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); // Full compaction to DB path 0 compact_options.target_path_id = 0; compact_options.exclusive_manual_compaction = exclusive_manual_compaction_; - db_->CompactRange(compact_options, nullptr, nullptr); - ASSERT_EQ(1, TotalLiveFiles(0)); + db_->CompactRange(compact_options, handles_[1], nullptr, nullptr); + ASSERT_EQ(1, TotalLiveFiles(1)); ASSERT_EQ(1, GetSstFileCount(options.db_paths[0].path)); ASSERT_EQ(0, GetSstFileCount(options.db_paths[1].path)); // Fail when compacting to an invalid path ID compact_options.target_path_id = 2; compact_options.exclusive_manual_compaction = exclusive_manual_compaction_; - ASSERT_TRUE(db_->CompactRange(compact_options, nullptr, nullptr) + ASSERT_TRUE(db_->CompactRange(compact_options, handles_[1], nullptr, nullptr) .IsInvalidArgument()); } diff --git a/db/db_wal_test.cc b/db/db_wal_test.cc index 4186a308a17..507d8ffbe4a 100644 --- a/db/db_wal_test.cc +++ b/db/db_wal_test.cc @@ -15,12 +15,12 @@ #include "util/sync_point.h" namespace rocksdb { -class DISABLED_DBWALTest : public DBTestBase { +class DBWALTest : public DBTestBase { public: - DISABLED_DBWALTest() : DBTestBase("/db_wal_test") {} + DBWALTest() : DBTestBase("/db_wal_test") {} }; -TEST_F(DISABLED_DBWALTest, WAL) { +TEST_F(DBWALTest, WAL) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); WriteOptions writeOpt = WriteOptions(); @@ -54,7 +54,7 @@ TEST_F(DISABLED_DBWALTest, WAL) { } while (ChangeWalOptions()); } -TEST_F(DISABLED_DBWALTest, RollLog) { +TEST_F(DBWALTest, RollLog) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ASSERT_OK(Put(1, "foo", "v1")); @@ -71,7 +71,7 @@ TEST_F(DISABLED_DBWALTest, RollLog) { } while (ChangeWalOptions()); } -TEST_F(DISABLED_DBWALTest, SyncWALNotBlockWrite) { +TEST_F(DBWALTest, SyncWALNotBlockWrite) { Options options = CurrentOptions(); options.max_write_buffer_number = 4; DestroyAndReopen(options); @@ -81,15 +81,15 @@ TEST_F(DISABLED_DBWALTest, SyncWALNotBlockWrite) { rocksdb::SyncPoint::GetInstance()->LoadDependency({ {"WritableFileWriter::SyncWithoutFlush:1", - "DISABLED_DBWALTest::SyncWALNotBlockWrite:1"}, - {"DISABLED_DBWALTest::SyncWALNotBlockWrite:2", + "DBWALTest::SyncWALNotBlockWrite:1"}, + {"DBWALTest::SyncWALNotBlockWrite:2", "WritableFileWriter::SyncWithoutFlush:2"}, }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); rocksdb::port::Thread thread([&]() { ASSERT_OK(db_->SyncWAL()); }); - TEST_SYNC_POINT("DISABLED_DBWALTest::SyncWALNotBlockWrite:1"); + TEST_SYNC_POINT("DBWALTest::SyncWALNotBlockWrite:1"); ASSERT_OK(Put("foo2", "bar2")); ASSERT_OK(Put("foo3", "bar3")); FlushOptions fo; @@ -97,7 +97,7 @@ TEST_F(DISABLED_DBWALTest, SyncWALNotBlockWrite) { ASSERT_OK(db_->Flush(fo)); ASSERT_OK(Put("foo4", "bar4")); - TEST_SYNC_POINT("DISABLED_DBWALTest::SyncWALNotBlockWrite:2"); + TEST_SYNC_POINT("DBWALTest::SyncWALNotBlockWrite:2"); thread.join(); @@ -109,22 +109,22 @@ TEST_F(DISABLED_DBWALTest, SyncWALNotBlockWrite) { rocksdb::SyncPoint::GetInstance()->DisableProcessing(); } -TEST_F(DISABLED_DBWALTest, SyncWALNotWaitWrite) { - ASSERT_OK(Put("foo1", "bar1", WriteOptions(), false)); - ASSERT_OK(Put("foo3", "bar3", WriteOptions(), false)); +TEST_F(DBWALTest, SyncWALNotWaitWrite) { + ASSERT_OK(Put("foo1", "bar1")); + ASSERT_OK(Put("foo3", "bar3")); rocksdb::SyncPoint::GetInstance()->LoadDependency({ - {"SpecialEnv::WalFile::Append:1", "DISABLED_DBWALTest::SyncWALNotWaitWrite:1"}, - {"DISABLED_DBWALTest::SyncWALNotWaitWrite:2", "SpecialEnv::WalFile::Append:2"}, + {"SpecialEnv::WalFile::Append:1", "DBWALTest::SyncWALNotWaitWrite:1"}, + {"DBWALTest::SyncWALNotWaitWrite:2", "SpecialEnv::WalFile::Append:2"}, }); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); - rocksdb::port::Thread thread([&]() { ASSERT_OK(Put("foo2", "bar2", WriteOptions(), false)); }); + rocksdb::port::Thread thread([&]() { ASSERT_OK(Put("foo2", "bar2")); }); // Moving this to SyncWAL before the actual fsync - // TEST_SYNC_POINT("DISABLED_DBWALTest::SyncWALNotWaitWrite:1"); + // TEST_SYNC_POINT("DBWALTest::SyncWALNotWaitWrite:1"); ASSERT_OK(db_->SyncWAL()); // Moving this to SyncWAL after actual fsync - // TEST_SYNC_POINT("DISABLED_DBWALTest::SyncWALNotWaitWrite:2"); + // TEST_SYNC_POINT("DBWALTest::SyncWALNotWaitWrite:2"); thread.join(); @@ -133,7 +133,7 @@ TEST_F(DISABLED_DBWALTest, SyncWALNotWaitWrite) { rocksdb::SyncPoint::GetInstance()->DisableProcessing(); } -TEST_F(DISABLED_DBWALTest, Recover) { +TEST_F(DBWALTest, Recover) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ASSERT_OK(Put(1, "foo", "v1")); @@ -156,7 +156,7 @@ TEST_F(DISABLED_DBWALTest, Recover) { } while (ChangeWalOptions()); } -TEST_F(DISABLED_DBWALTest, RecoverWithTableHandle) { +TEST_F(DBWALTest, RecoverWithTableHandle) { do { Options options = CurrentOptions(); options.create_if_missing = true; @@ -193,7 +193,7 @@ TEST_F(DISABLED_DBWALTest, RecoverWithTableHandle) { } while (ChangeWalOptions()); } -TEST_F(DISABLED_DBWALTest, IgnoreRecoveredLog) { +TEST_F(DBWALTest, IgnoreRecoveredLog) { std::string backup_logs = dbname_ + "/backup_logs"; do { @@ -282,7 +282,7 @@ TEST_F(DISABLED_DBWALTest, IgnoreRecoveredLog) { } while (ChangeWalOptions()); } -TEST_F(DISABLED_DBWALTest, RecoveryWithEmptyLog) { +TEST_F(DBWALTest, RecoveryWithEmptyLog) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); ASSERT_OK(Put(1, "foo", "v1")); @@ -296,7 +296,7 @@ TEST_F(DISABLED_DBWALTest, RecoveryWithEmptyLog) { } #if !(defined NDEBUG) || !defined(OS_WIN) -TEST_F(DISABLED_DBWALTest, PreallocateBlock) { +TEST_F(DBWALTest, PreallocateBlock) { Options options = CurrentOptions(); options.write_buffer_size = 10 * 1000 * 1000; options.max_total_wal_size = 0; @@ -385,7 +385,7 @@ TEST_F(DISABLED_DBWALTest, PreallocateBlock) { #endif // !(defined NDEBUG) || !defined(OS_WIN) #ifndef ROCKSDB_LITE -TEST_F(DISABLED_DBWALTest, FullPurgePreservesRecycledLog) { +TEST_F(DBWALTest, FullPurgePreservesRecycledLog) { // For github issue #1303 for (int i = 0; i < 2; ++i) { Options options = CurrentOptions(); @@ -421,7 +421,7 @@ TEST_F(DISABLED_DBWALTest, FullPurgePreservesRecycledLog) { } } -TEST_F(DISABLED_DBWALTest, GetSortedWalFiles) { +TEST_F(DBWALTest, GetSortedWalFiles) { do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); VectorLogPtr log_files; @@ -434,7 +434,7 @@ TEST_F(DISABLED_DBWALTest, GetSortedWalFiles) { } while (ChangeWalOptions()); } -TEST_F(DISABLED_DBWALTest, RecoveryWithLogDataForSomeCFs) { +TEST_F(DBWALTest, RecoveryWithLogDataForSomeCFs) { // Test for regression of WAL cleanup missing files that don't contain data // for every column family. do { @@ -459,7 +459,7 @@ TEST_F(DISABLED_DBWALTest, RecoveryWithLogDataForSomeCFs) { } while (ChangeWalOptions()); } -TEST_F(DISABLED_DBWALTest, RecoverWithLargeLog) { +TEST_F(DBWALTest, RecoverWithLargeLog) { do { { Options options = CurrentOptions(); @@ -491,7 +491,7 @@ TEST_F(DISABLED_DBWALTest, RecoverWithLargeLog) { // memtable was flushed, even it was empty. Now it's changed: // we try to create the smallest number of table files by merging // updates from multiple logs -TEST_F(DISABLED_DBWALTest, RecoverCheckFileAmountWithSmallWriteBuffer) { +TEST_F(DBWALTest, RecoverCheckFileAmountWithSmallWriteBuffer) { Options options = CurrentOptions(); options.write_buffer_size = 5000000; CreateAndReopenWithCF({"pikachu", "dobrynia", "nikitich"}, options); @@ -547,7 +547,7 @@ TEST_F(DISABLED_DBWALTest, RecoverCheckFileAmountWithSmallWriteBuffer) { // memtable was flushed, even it wasn't empty. Now it's changed: // we try to create the smallest number of table files by merging // updates from multiple logs -TEST_F(DISABLED_DBWALTest, RecoverCheckFileAmount) { +TEST_F(DBWALTest, RecoverCheckFileAmount) { Options options = CurrentOptions(); options.write_buffer_size = 100000; options.arena_block_size = 4 * 1024; @@ -613,7 +613,7 @@ TEST_F(DISABLED_DBWALTest, RecoverCheckFileAmount) { } } -TEST_F(DISABLED_DBWALTest, SyncMultipleLogs) { +TEST_F(DBWALTest, SyncMultipleLogs) { const uint64_t kNumBatches = 2; const int kBatchSize = 1000; @@ -642,7 +642,7 @@ TEST_F(DISABLED_DBWALTest, SyncMultipleLogs) { // a local variable, then keep increase the variable as we replay logs, // ignoring actual sequence id of the records. This is incorrect if some writes // come with WAL disabled. -TEST_F(DISABLED_DBWALTest, PartOfWritesWithWALDisabled) { +TEST_F(DBWALTest, PartOfWritesWithWALDisabled) { std::unique_ptr fault_env( new FaultInjectionTestEnv(env_)); Options options = CurrentOptions(); @@ -687,7 +687,7 @@ class RecoveryTestHelper { static const int kValueSize = 96; // Create WAL files with values filled in - static void FillData(DISABLED_DBWALTest* test, const Options& options, + static void FillData(DBWALTest* test, const Options& options, const size_t wal_count, size_t* count) { const ImmutableDBOptions db_options(options); @@ -737,7 +737,7 @@ class RecoveryTestHelper { } // Recreate and fill the store with some data - static size_t FillData(DISABLED_DBWALTest* test, Options* options) { + static size_t FillData(DBWALTest* test, Options* options) { options->create_if_missing = true; test->DestroyAndReopen(*options); test->Close(); @@ -748,7 +748,7 @@ class RecoveryTestHelper { } // Read back all the keys we wrote and return the number of keys found - static size_t GetData(DISABLED_DBWALTest* test) { + static size_t GetData(DBWALTest* test) { size_t count = 0; for (size_t i = 0; i < kWALFilesCount * kKeysPerWALFile; i++) { if (test->Get("key" + ToString(i)) != "NOT_FOUND") { @@ -759,7 +759,7 @@ class RecoveryTestHelper { } // Manuall corrupt the specified WAL - static void CorruptWAL(DISABLED_DBWALTest* test, const Options& options, + static void CorruptWAL(DBWALTest* test, const Options& options, const double off, const double len, const int wal_file_id, const bool trunc = false) { Env* env = options.env; @@ -808,7 +808,7 @@ class RecoveryTestHelper { // - We expect to open the data store when there is incomplete trailing writes // at the end of any of the logs // - We do not expect to open the data store for corruption -TEST_F(DISABLED_DBWALTest, kTolerateCorruptedTailRecords) { +TEST_F(DBWALTest, kTolerateCorruptedTailRecords) { const int jstart = RecoveryTestHelper::kWALFileOffset; const int jend = jstart + RecoveryTestHelper::kWALFilesCount; @@ -843,7 +843,7 @@ TEST_F(DISABLED_DBWALTest, kTolerateCorruptedTailRecords) { // Test scope: // We don't expect the data store to be opened if there is any corruption // (leading, middle or trailing -- incomplete writes or corruption) -TEST_F(DISABLED_DBWALTest, kAbsoluteConsistency) { +TEST_F(DBWALTest, kAbsoluteConsistency) { const int jstart = RecoveryTestHelper::kWALFileOffset; const int jend = jstart + RecoveryTestHelper::kWALFilesCount; @@ -879,7 +879,7 @@ TEST_F(DISABLED_DBWALTest, kAbsoluteConsistency) { // Test scope: // We don't expect the data store to be opened if there is any inconsistency // between WAL and SST files -TEST_F(DISABLED_DBWALTest, kPointInTimeRecoveryCFConsistency) { +TEST_F(DBWALTest, kPointInTimeRecoveryCFConsistency) { Options options = CurrentOptions(); options.avoid_flush_during_recovery = true; @@ -912,7 +912,7 @@ TEST_F(DISABLED_DBWALTest, kPointInTimeRecoveryCFConsistency) { // Test scope: // - We expect to open data store under all circumstances // - We expect only data upto the point where the first error was encountered -TEST_F(DISABLED_DBWALTest, kPointInTimeRecovery) { +TEST_F(DBWALTest, kPointInTimeRecovery) { const int jstart = RecoveryTestHelper::kWALFileOffset; const int jend = jstart + RecoveryTestHelper::kWALFilesCount; const int maxkeys = @@ -963,7 +963,7 @@ TEST_F(DISABLED_DBWALTest, kPointInTimeRecovery) { // Test scope: // - We expect to open the data store under all scenarios // - We expect to have recovered records past the corruption zone -TEST_F(DISABLED_DBWALTest, kSkipAnyCorruptedRecords) { +TEST_F(DBWALTest, kSkipAnyCorruptedRecords) { const int jstart = RecoveryTestHelper::kWALFileOffset; const int jend = jstart + RecoveryTestHelper::kWALFilesCount; @@ -995,7 +995,7 @@ TEST_F(DISABLED_DBWALTest, kSkipAnyCorruptedRecords) { } } -TEST_F(DISABLED_DBWALTest, AvoidFlushDuringRecovery) { +TEST_F(DBWALTest, AvoidFlushDuringRecovery) { Options options = CurrentOptions(); options.disable_auto_compactions = true; options.avoid_flush_during_recovery = false; @@ -1043,7 +1043,7 @@ TEST_F(DISABLED_DBWALTest, AvoidFlushDuringRecovery) { ASSERT_EQ(3, TotalTableFiles()); } -TEST_F(DISABLED_DBWALTest, WalCleanupAfterAvoidFlushDuringRecovery) { +TEST_F(DBWALTest, WalCleanupAfterAvoidFlushDuringRecovery) { // Verifies WAL files that were present during recovery, but not flushed due // to avoid_flush_during_recovery, will be considered for deletion at a later // stage. We check at least one such file is deleted during Flush(). @@ -1069,7 +1069,7 @@ TEST_F(DISABLED_DBWALTest, WalCleanupAfterAvoidFlushDuringRecovery) { } } -TEST_F(DISABLED_DBWALTest, RecoverWithoutFlush) { +TEST_F(DBWALTest, RecoverWithoutFlush) { Options options = CurrentOptions(); options.avoid_flush_during_recovery = true; options.create_if_missing = false; @@ -1110,7 +1110,7 @@ TEST_F(DISABLED_DBWALTest, RecoverWithoutFlush) { ASSERT_EQ(Get("bar"), "bar_v3"); } -TEST_F(DISABLED_DBWALTest, RecoverWithoutFlushMultipleCF) { +TEST_F(DBWALTest, RecoverWithoutFlushMultipleCF) { const std::string kSmallValue = "v"; const std::string kLargeValue = DummyString(1024); Options options = CurrentOptions(); @@ -1170,7 +1170,7 @@ TEST_F(DISABLED_DBWALTest, RecoverWithoutFlushMultipleCF) { // 2. Open with avoid_flush_during_recovery = true; // 3. Append more data without flushing, which creates new WAL log. // 4. Open again. See if it can correctly handle previous corruption. -TEST_F(DISABLED_DBWALTest, RecoverFromCorruptedWALWithoutFlush) { +TEST_F(DBWALTest, RecoverFromCorruptedWALWithoutFlush) { const int jstart = RecoveryTestHelper::kWALFileOffset; const int jend = jstart + RecoveryTestHelper::kWALFilesCount; const int kAppendKeys = 100; @@ -1230,7 +1230,7 @@ TEST_F(DISABLED_DBWALTest, RecoverFromCorruptedWALWithoutFlush) { #endif // ROCKSDB_LITE -TEST_F(DISABLED_DBWALTest, WalTermTest) { +TEST_F(DBWALTest, WalTermTest) { Options options = CurrentOptions(); options.env = env_; CreateAndReopenWithCF({"pikachu"}, options); diff --git a/db/db_write_test.cc b/db/db_write_test.cc index f6130cf3495..d21bfe4738a 100644 --- a/db/db_write_test.cc +++ b/db/db_write_test.cc @@ -52,7 +52,7 @@ TEST_P(DBWriteTest, ReturnSeuqneceNumber) { } } -TEST_P(DBWriteTest, DISABLED_ReturnSeuqneceNumberMultiThreaded) { +TEST_P(DBWriteTest, ReturnSeuqneceNumberMultiThreaded) { constexpr size_t kThreads = 16; constexpr size_t kNumKeys = 1000; Open(); @@ -85,7 +85,7 @@ TEST_P(DBWriteTest, DISABLED_ReturnSeuqneceNumberMultiThreaded) { } } -TEST_P(DBWriteTest, DISABLED_IOErrorOnWALWritePropagateToWriteThreadFollower) { +TEST_P(DBWriteTest, IOErrorOnWALWritePropagateToWriteThreadFollower) { constexpr int kNumThreads = 5; std::unique_ptr mock_env( new FaultInjectionTestEnv(Env::Default())); @@ -128,8 +128,8 @@ TEST_P(DBWriteTest, DISABLED_IOErrorOnWALWritePropagateToWriteThreadFollower) { INSTANTIATE_TEST_CASE_P(DBWriteTestInstance, DBWriteTest, testing::Values(DBTestBase::kDefault, - DBTestBase::kConcurrentWALWrites/*, - DBTestBase::kPipelinedWrite*/)); + DBTestBase::kConcurrentWALWrites, + DBTestBase::kPipelinedWrite)); } // namespace rocksdb diff --git a/db/deletefile_test.cc b/db/deletefile_test.cc index 1f51f941ee3..989c0c4118b 100644 --- a/db/deletefile_test.cc +++ b/db/deletefile_test.cc @@ -434,26 +434,23 @@ TEST_F(DeleteFileTest, DeleteNonDefaultColumnFamily) { rocksdb::DB* db; ASSERT_OK(DB::Open(db_options, dbname_, column_families, &handles, &db)); - WriteOptions wop; - wop.disableWAL = true; - Random rnd(5); for (int i = 0; i < 1000; ++i) { - ASSERT_OK(db->Put(wop, handles[0], test::RandomKey(&rnd, 10), + ASSERT_OK(db->Put(WriteOptions(), handles[1], test::RandomKey(&rnd, 10), test::RandomKey(&rnd, 10))); } - ASSERT_OK(db->Flush(FlushOptions(), handles[0])); + ASSERT_OK(db->Flush(FlushOptions(), handles[1])); for (int i = 0; i < 1000; ++i) { - ASSERT_OK(db->Put(wop, handles[0], test::RandomKey(&rnd, 10), + ASSERT_OK(db->Put(WriteOptions(), handles[1], test::RandomKey(&rnd, 10), test::RandomKey(&rnd, 10))); } - ASSERT_OK(db->Flush(FlushOptions(), handles[0])); + ASSERT_OK(db->Flush(FlushOptions(), handles[1])); std::vector metadata; db->GetLiveFilesMetaData(&metadata); ASSERT_EQ(2U, metadata.size()); - ASSERT_EQ("default", metadata[0].column_family_name); - ASSERT_EQ("default", metadata[1].column_family_name); + ASSERT_EQ("new_cf", metadata[0].column_family_name); + ASSERT_EQ("new_cf", metadata[1].column_family_name); auto old_file = metadata[0].smallest_seqno < metadata[1].smallest_seqno ? metadata[0].name : metadata[1].name; @@ -464,7 +461,7 @@ TEST_F(DeleteFileTest, DeleteNonDefaultColumnFamily) { ASSERT_OK(db->DeleteFile(old_file)); { - std::unique_ptr itr(db->NewIterator(ReadOptions(), handles[0])); + std::unique_ptr itr(db->NewIterator(ReadOptions(), handles[1])); int count = 0; for (itr->SeekToFirst(); itr->Valid(); itr->Next()) { ASSERT_OK(itr->status()); @@ -479,7 +476,7 @@ TEST_F(DeleteFileTest, DeleteNonDefaultColumnFamily) { ASSERT_OK(DB::Open(db_options, dbname_, column_families, &handles, &db)); { - std::unique_ptr itr(db->NewIterator(ReadOptions(), handles[0])); + std::unique_ptr itr(db->NewIterator(ReadOptions(), handles[1])); int count = 0; for (itr->SeekToFirst(); itr->Valid(); itr->Next()) { ASSERT_OK(itr->status()); diff --git a/db/external_sst_file_test.cc b/db/external_sst_file_test.cc index 68ff14b9a05..4a4e82e792d 100644 --- a/db/external_sst_file_test.cc +++ b/db/external_sst_file_test.cc @@ -349,7 +349,7 @@ TEST_F(ExternalSSTFileTest, Basic) { std::string value = Key(k) + "_val"; ASSERT_EQ(Get(Key(k)), value); } - ASSERT_TRUE(Flush().ok()); + ASSERT_OK(Flush()); ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); } @@ -377,7 +377,7 @@ TEST_F(ExternalSSTFileTest, Basic) { ASSERT_EQ(Get(Key(k)), value); } DestroyAndRecreateExternalSSTFilesDir(); - } while (ChangeOptions(kSkipPlainTable | kSkipFIFOCompaction | kSkipPipelinedWrite)); + } while (ChangeOptions(kSkipPlainTable | kSkipFIFOCompaction)); } class SstFileWriterCollector : public TablePropertiesCollector { public: @@ -593,7 +593,7 @@ TEST_F(ExternalSSTFileTest, AddList) { std::string value = Key(k) + "_val"; ASSERT_EQ(Get(Key(k)), value); } - ASSERT_TRUE(Flush().ok()); + ASSERT_OK(Flush()); ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); } @@ -617,7 +617,7 @@ TEST_F(ExternalSSTFileTest, AddList) { ASSERT_EQ(Get(Key(k)), value); } DestroyAndRecreateExternalSSTFilesDir(); - } while (ChangeOptions(kSkipPlainTable | kSkipFIFOCompaction | kSkipPipelinedWrite)); + } while (ChangeOptions(kSkipPlainTable | kSkipFIFOCompaction)); } TEST_F(ExternalSSTFileTest, AddListAtomicity) { @@ -659,7 +659,7 @@ TEST_F(ExternalSSTFileTest, AddListAtomicity) { ASSERT_EQ(Get(Key(k)), value); } DestroyAndRecreateExternalSSTFilesDir(); - } while (ChangeOptions(kSkipPlainTable | kSkipFIFOCompaction | kSkipPipelinedWrite)); + } while (ChangeOptions(kSkipPlainTable | kSkipFIFOCompaction)); } // This test reporduce a bug that can happen in some cases if the DB started // purging obsolete files when we are adding an external sst file. @@ -884,13 +884,13 @@ TEST_F(ExternalSSTFileTest, MultiThreaded) { std::string value = (k % 100 == 0) ? (key + "_new") : key; ASSERT_EQ(Get(key), value); } - ASSERT_TRUE(Flush().ok()); + ASSERT_OK(Flush()); ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); } fprintf(stderr, "Verified %d values\n", num_files * keys_per_file); DestroyAndRecreateExternalSSTFilesDir(); - } while (ChangeOptions(kSkipPlainTable | kSkipFIFOCompaction | kSkipPipelinedWrite)); + } while (ChangeOptions(kSkipPlainTable | kSkipFIFOCompaction)); } TEST_F(ExternalSSTFileTest, OverlappingRanges) { @@ -1020,7 +1020,7 @@ TEST_F(ExternalSSTFileTest, OverlappingRanges) { } printf("keys/values verified\n"); DestroyAndRecreateExternalSSTFilesDir(); - } while (ChangeOptions(kSkipPlainTable | kSkipFIFOCompaction | kSkipPipelinedWrite)); + } while (ChangeOptions(kSkipPlainTable | kSkipFIFOCompaction)); } TEST_F(ExternalSSTFileTest, PickedLevel) { diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc index 71eb5bc4465..adfcb4db5a7 100644 --- a/db/fault_injection_test.cc +++ b/db/fault_injection_test.cc @@ -350,7 +350,7 @@ __attribute__((__no_sanitize_undefined__)) } }; -TEST_P(FaultInjectionTest, DISABLED_FaultTest) { // PEGASUS: we dont support WAL +TEST_P(FaultInjectionTest, FaultTest) { do { Random rnd(301); @@ -390,7 +390,7 @@ TEST_P(FaultInjectionTest, DISABLED_FaultTest) { // PEGASUS: we dont supp } // Previous log file is not fsynced if sync is forced after log rolling. -TEST_P(FaultInjectionTest, DISABLED_WriteOptionSyncTest) { // PEGASUS: we dont support WAL +TEST_P(FaultInjectionTest, WriteOptionSyncTest) { test::SleepingBackgroundTask sleeping_task_low; env_->SetBackgroundThreads(1, Env::HIGH); // Block the job queue to prevent flush job from running. @@ -476,7 +476,7 @@ TEST_P(FaultInjectionTest, UninstalledCompaction) { rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks(); } -TEST_P(FaultInjectionTest, DISABLED_ManualLogSyncTest) { // PEGASUS: we dont support WAL +TEST_P(FaultInjectionTest, ManualLogSyncTest) { test::SleepingBackgroundTask sleeping_task_low; env_->SetBackgroundThreads(1, Env::HIGH); // Block the job queue to prevent flush job from running. @@ -513,7 +513,7 @@ TEST_P(FaultInjectionTest, DISABLED_ManualLogSyncTest) { // PEGASUS: we d ASSERT_EQ(value_space, val); } -TEST_P(FaultInjectionTest, DISABLED_WriteBatchWalTerminationTest) { // PEGASUS: we dont support WAL +TEST_P(FaultInjectionTest, WriteBatchWalTerminationTest) { ReadOptions ro; Options options = CurrentOptions(); options.env = env_; diff --git a/db/repair_test.cc b/db/repair_test.cc index e03431a054b..ab00a96b4d1 100644 --- a/db/repair_test.cc +++ b/db/repair_test.cc @@ -20,9 +20,9 @@ namespace rocksdb { #ifndef ROCKSDB_LITE -class DISABLED_RepairTest : public DBTestBase { +class RepairTest : public DBTestBase { public: - DISABLED_RepairTest() : DBTestBase("/repair_test") {} + RepairTest() : DBTestBase("/repair_test") {} std::string GetFirstSstPath() { uint64_t manifest_size; @@ -39,7 +39,7 @@ class DISABLED_RepairTest : public DBTestBase { } }; -TEST_F(DISABLED_RepairTest, LostManifest) { +TEST_F(RepairTest, LostManifest) { // Add a couple SST files, delete the manifest, and verify RepairDB() saves // the day. Put("key", "val"); @@ -61,7 +61,7 @@ TEST_F(DISABLED_RepairTest, LostManifest) { ASSERT_EQ(Get("key2"), "val2"); } -TEST_F(DISABLED_RepairTest, CorruptManifest) { +TEST_F(RepairTest, CorruptManifest) { // Manifest is in an invalid format. Expect a full recovery. Put("key", "val"); Flush(); @@ -82,7 +82,7 @@ TEST_F(DISABLED_RepairTest, CorruptManifest) { ASSERT_EQ(Get("key2"), "val2"); } -TEST_F(DISABLED_RepairTest, IncompleteManifest) { +TEST_F(RepairTest, IncompleteManifest) { // In this case, the manifest is valid but does not reference all of the SST // files. Expect a full recovery. Put("key", "val"); @@ -108,7 +108,7 @@ TEST_F(DISABLED_RepairTest, IncompleteManifest) { ASSERT_EQ(Get("key2"), "val2"); } -TEST_F(DISABLED_RepairTest, PostRepairSstFileNumbering) { +TEST_F(RepairTest, PostRepairSstFileNumbering) { // Verify after a DB is repaired, new files will be assigned higher numbers // than old files. Put("key", "val"); @@ -125,7 +125,7 @@ TEST_F(DISABLED_RepairTest, PostRepairSstFileNumbering) { ASSERT_GE(post_repair_file_num, pre_repair_file_num); } -TEST_F(DISABLED_RepairTest, LostSst) { +TEST_F(RepairTest, LostSst) { // Delete one of the SST files but preserve the manifest that refers to it, // then verify the DB is still usable for the intact SST. Put("key", "val"); @@ -144,7 +144,7 @@ TEST_F(DISABLED_RepairTest, LostSst) { ASSERT_TRUE((Get("key") == "val") != (Get("key2") == "val2")); } -TEST_F(DISABLED_RepairTest, CorruptSst) { +TEST_F(RepairTest, CorruptSst) { // Corrupt one of the SST files but preserve the manifest that refers to it, // then verify the DB is still usable for the intact SST. Put("key", "val"); @@ -163,7 +163,7 @@ TEST_F(DISABLED_RepairTest, CorruptSst) { ASSERT_TRUE((Get("key") == "val") != (Get("key2") == "val2")); } -TEST_F(DISABLED_RepairTest, UnflushedSst) { +TEST_F(RepairTest, UnflushedSst) { // This test case invokes repair while some data is unflushed, then verifies // that data is in the db. Put("key", "val"); @@ -191,7 +191,7 @@ TEST_F(DISABLED_RepairTest, UnflushedSst) { ASSERT_EQ(Get("key"), "val"); } -TEST_F(DISABLED_RepairTest, SeparateWalDir) { +TEST_F(RepairTest, SeparateWalDir) { do { Options options = CurrentOptions(); DestroyAndReopen(options); @@ -225,7 +225,7 @@ TEST_F(DISABLED_RepairTest, SeparateWalDir) { } while(ChangeWalOptions()); } -TEST_F(DISABLED_RepairTest, RepairMultipleColumnFamilies) { +TEST_F(RepairTest, RepairMultipleColumnFamilies) { // Verify repair logic associates SST files with their original column // families. const int kNumCfs = 3; @@ -263,7 +263,7 @@ TEST_F(DISABLED_RepairTest, RepairMultipleColumnFamilies) { } } -TEST_F(DISABLED_RepairTest, RepairColumnFamilyOptions) { +TEST_F(RepairTest, RepairColumnFamilyOptions) { // Verify repair logic uses correct ColumnFamilyOptions when repairing a // database with different options for column families. const int kNumCfs = 2; @@ -326,12 +326,12 @@ TEST_F(DISABLED_RepairTest, RepairColumnFamilyOptions) { } } -TEST_F(DISABLED_RepairTest, DbNameContainsTrailingSlash) { +TEST_F(RepairTest, DbNameContainsTrailingSlash) { { bool tmp; if (env_->AreFilesSame("", "", &tmp).IsNotSupported()) { fprintf(stderr, - "skipping DISABLED_RepairTest.DbNameContainsTrailingSlash due to " + "skipping RepairTest.DbNameContainsTrailingSlash due to " "unsupported Env::AreFilesSame\n"); return; } diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc index d32292ec00b..4511f015b9d 100644 --- a/db/write_batch_test.cc +++ b/db/write_batch_test.cc @@ -194,7 +194,6 @@ TEST_F(WriteBatchTest, Append) { PrintContents(&b1)); ASSERT_EQ(4, b1.Count()); b2.Clear(); - WriteBatchInternal::SetSequence(&b2, 1); b2.Put("c", "cc"); b2.Put("d", "dd"); b2.MarkWalTerminationPoint(); @@ -210,9 +209,9 @@ TEST_F(WriteBatchTest, Append) { PrintContents(&b1)); ASSERT_EQ(6, b1.Count()); ASSERT_EQ( - "Put(c, cc)@1" - "Put(d, dd)@2" - "Put(e, ee)@3", + "Put(c, cc)@0" + "Put(d, dd)@1" + "Put(e, ee)@2", PrintContents(&b2)); ASSERT_EQ(3, b2.Count()); } @@ -316,10 +315,9 @@ namespace { TEST_F(WriteBatchTest, PutNotImplemented) { WriteBatch batch; - WriteBatchInternal::SetSequence(&batch, 1); batch.Put(Slice("k1"), Slice("v1")); ASSERT_EQ(1, batch.Count()); - ASSERT_EQ("Put(k1, v1)@1", PrintContents(&batch)); + ASSERT_EQ("Put(k1, v1)@0", PrintContents(&batch)); WriteBatch::Handler handler; ASSERT_OK(batch.Iterate(&handler)); @@ -327,10 +325,9 @@ TEST_F(WriteBatchTest, PutNotImplemented) { TEST_F(WriteBatchTest, DeleteNotImplemented) { WriteBatch batch; - WriteBatchInternal::SetSequence(&batch, 1); batch.Delete(Slice("k2")); ASSERT_EQ(1, batch.Count()); - ASSERT_EQ("Delete(k2)@1", PrintContents(&batch)); + ASSERT_EQ("Delete(k2)@0", PrintContents(&batch)); WriteBatch::Handler handler; ASSERT_OK(batch.Iterate(&handler)); @@ -338,10 +335,9 @@ TEST_F(WriteBatchTest, DeleteNotImplemented) { TEST_F(WriteBatchTest, SingleDeleteNotImplemented) { WriteBatch batch; - WriteBatchInternal::SetSequence(&batch, 1); batch.SingleDelete(Slice("k2")); ASSERT_EQ(1, batch.Count()); - ASSERT_EQ("SingleDelete(k2)@1", PrintContents(&batch)); + ASSERT_EQ("SingleDelete(k2)@0", PrintContents(&batch)); WriteBatch::Handler handler; ASSERT_OK(batch.Iterate(&handler)); @@ -349,10 +345,9 @@ TEST_F(WriteBatchTest, SingleDeleteNotImplemented) { TEST_F(WriteBatchTest, MergeNotImplemented) { WriteBatch batch; - WriteBatchInternal::SetSequence(&batch, 1); batch.Merge(Slice("foo"), Slice("bar")); ASSERT_EQ(1, batch.Count()); - ASSERT_EQ("Merge(foo, bar)@1", PrintContents(&batch)); + ASSERT_EQ("Merge(foo, bar)@0", PrintContents(&batch)); WriteBatch::Handler handler; ASSERT_OK(batch.Iterate(&handler)); @@ -360,7 +355,6 @@ TEST_F(WriteBatchTest, MergeNotImplemented) { TEST_F(WriteBatchTest, Blob) { WriteBatch batch; - WriteBatchInternal::SetSequence(&batch, 1); batch.Put(Slice("k1"), Slice("v1")); batch.Put(Slice("k2"), Slice("v2")); batch.Put(Slice("k3"), Slice("v3")); @@ -371,12 +365,12 @@ TEST_F(WriteBatchTest, Blob) { batch.Merge(Slice("foo"), Slice("bar")); ASSERT_EQ(6, batch.Count()); ASSERT_EQ( - "Merge(foo, bar)@6" - "Put(k1, v1)@1" - "Delete(k2)@4" - "Put(k2, v2)@2" - "SingleDelete(k3)@5" - "Put(k3, v3)@3", + "Merge(foo, bar)@5" + "Put(k1, v1)@0" + "Delete(k2)@3" + "Put(k2, v2)@1" + "SingleDelete(k3)@4" + "Put(k3, v3)@2", PrintContents(&batch)); TestHandler handler; @@ -768,7 +762,6 @@ TEST_F(WriteBatchTest, ColumnFamiliesBatchWithIndexTest) { TEST_F(WriteBatchTest, SavePointTest) { Status s; WriteBatch batch; - WriteBatchInternal::SetSequence(&batch, 1); batch.SetSavePoint(); batch.Put("A", "a"); @@ -782,17 +775,17 @@ TEST_F(WriteBatchTest, SavePointTest) { ASSERT_OK(batch.RollbackToSavePoint()); ASSERT_EQ( - "Delete(A)@4" - "Put(A, a)@1" - "Put(B, b)@2" - "Put(C, c)@3", + "Delete(A)@3" + "Put(A, a)@0" + "Put(B, b)@1" + "Put(C, c)@2", PrintContents(&batch)); ASSERT_OK(batch.RollbackToSavePoint()); ASSERT_OK(batch.RollbackToSavePoint()); ASSERT_EQ( - "Put(A, a)@1" - "Put(B, b)@2", + "Put(A, a)@0" + "Put(B, b)@1", PrintContents(&batch)); batch.Delete("A"); @@ -814,8 +807,8 @@ TEST_F(WriteBatchTest, SavePointTest) { ASSERT_OK(batch.RollbackToSavePoint()); ASSERT_EQ( - "Delete(A)@2" - "Put(D, d)@1", + "Delete(A)@1" + "Put(D, d)@0", PrintContents(&batch)); batch.SetSavePoint(); @@ -825,19 +818,18 @@ TEST_F(WriteBatchTest, SavePointTest) { ASSERT_OK(batch.RollbackToSavePoint()); ASSERT_EQ( - "Delete(A)@2" - "Put(D, d)@1", + "Delete(A)@1" + "Put(D, d)@0", PrintContents(&batch)); s = batch.RollbackToSavePoint(); ASSERT_TRUE(s.IsNotFound()); ASSERT_EQ( - "Delete(A)@2" - "Put(D, d)@1", + "Delete(A)@1" + "Put(D, d)@0", PrintContents(&batch)); WriteBatch batch2; - WriteBatchInternal::SetSequence(&batch2, 1); s = batch2.RollbackToSavePoint(); ASSERT_TRUE(s.IsNotFound()); @@ -848,21 +840,20 @@ TEST_F(WriteBatchTest, SavePointTest) { s = batch2.RollbackToSavePoint(); ASSERT_OK(s); - ASSERT_EQ("Delete(A)@1", PrintContents(&batch2)); + ASSERT_EQ("Delete(A)@0", PrintContents(&batch2)); batch2.Clear(); - WriteBatchInternal::SetSequence(&batch2, 1); ASSERT_EQ("", PrintContents(&batch2)); batch2.SetSavePoint(); batch2.Delete("B"); - ASSERT_EQ("Delete(B)@1", PrintContents(&batch2)); + ASSERT_EQ("Delete(B)@0", PrintContents(&batch2)); batch2.SetSavePoint(); s = batch2.RollbackToSavePoint(); ASSERT_OK(s); - ASSERT_EQ("Delete(B)@1", PrintContents(&batch2)); + ASSERT_EQ("Delete(B)@0", PrintContents(&batch2)); s = batch2.RollbackToSavePoint(); ASSERT_OK(s); @@ -873,7 +864,6 @@ TEST_F(WriteBatchTest, SavePointTest) { ASSERT_EQ("", PrintContents(&batch2)); WriteBatch batch3; - WriteBatchInternal::SetSequence(&batch3, 1); s = batch3.PopSavePoint(); ASSERT_TRUE(s.IsNotFound()); @@ -884,7 +874,7 @@ TEST_F(WriteBatchTest, SavePointTest) { s = batch3.PopSavePoint(); ASSERT_OK(s); - ASSERT_EQ("Delete(A)@1", PrintContents(&batch3)); + ASSERT_EQ("Delete(A)@0", PrintContents(&batch3)); } TEST_F(WriteBatchTest, MemoryLimitTest) { diff --git a/db/write_callback_test.cc b/db/write_callback_test.cc index 13eae62fa2a..431ceca16d1 100644 --- a/db/write_callback_test.cc +++ b/db/write_callback_test.cc @@ -24,11 +24,11 @@ using std::string; namespace rocksdb { -class DISABLED_WriteCallbackTest : public testing::Test { +class WriteCallbackTest : public testing::Test { public: string dbname; - DISABLED_WriteCallbackTest() { + WriteCallbackTest() { dbname = test::TmpDir() + "/write_callback_testdb"; } }; @@ -86,7 +86,7 @@ class MockWriteCallback : public WriteCallback { bool AllowWriteBatching() override { return allow_batching_; } }; -TEST_F(DISABLED_WriteCallbackTest, WriteWithCallbackTest) { +TEST_F(WriteCallbackTest, WriteWithCallbackTest) { struct WriteOP { WriteOP(bool should_fail = false) { callback_.should_fail_ = should_fail; } @@ -322,7 +322,7 @@ TEST_F(DISABLED_WriteCallbackTest, WriteWithCallbackTest) { } } -TEST_F(DISABLED_WriteCallbackTest, WriteCallBackTest) { +TEST_F(WriteCallbackTest, WriteCallBackTest) { Options options; WriteOptions write_options; ReadOptions read_options; diff --git a/utilities/backupable/backupable_db_test.cc b/utilities/backupable/backupable_db_test.cc index 4f46a70817e..7a30e4ec3f4 100644 --- a/utilities/backupable/backupable_db_test.cc +++ b/utilities/backupable/backupable_db_test.cc @@ -472,10 +472,9 @@ static void AssertEmpty(DB* db, int from, int to) { } } -// pegasus: we dont support backup now -class DISABLED_BackupableDBTest : public testing::Test { +class BackupableDBTest : public testing::Test { public: - DISABLED_BackupableDBTest() { + BackupableDBTest() { // set up files std::string db_chroot = test::TmpDir() + "/backupable_db"; std::string backup_chroot = test::TmpDir() + "/backupable_db_backup"; @@ -636,7 +635,7 @@ class DISABLED_BackupableDBTest : public testing::Test { protected: unique_ptr backupable_options_; -}; // DISABLED_BackupableDBTest +}; // BackupableDBTest void AppendPath(const std::string& path, std::vector& v) { for (auto& f : v) { @@ -644,17 +643,17 @@ void AppendPath(const std::string& path, std::vector& v) { } } -class DISABLED_BackupableDBTestWithParam : public DISABLED_BackupableDBTest, +class BackupableDBTestWithParam : public BackupableDBTest, public testing::WithParamInterface { public: - DISABLED_BackupableDBTestWithParam() { + BackupableDBTestWithParam() { backupable_options_->share_files_with_checksum = GetParam(); } }; // This test verifies that the verifyBackup method correctly identifies // invalid backups -TEST_P(DISABLED_BackupableDBTestWithParam, VerifyBackup) { +TEST_P(BackupableDBTestWithParam, VerifyBackup) { const int keys_iteration = 5000; Random rnd(6); Status s; @@ -686,7 +685,7 @@ TEST_P(DISABLED_BackupableDBTestWithParam, VerifyBackup) { } // open DB, write, close DB, backup, restore, repeat -TEST_P(DISABLED_BackupableDBTestWithParam, OfflineIntegrationTest) { +TEST_P(BackupableDBTestWithParam, OfflineIntegrationTest) { // has to be a big number, so that it triggers the memtable flush const int keys_iteration = 5000; const int max_key = keys_iteration * 4 + 10; @@ -733,7 +732,7 @@ TEST_P(DISABLED_BackupableDBTestWithParam, OfflineIntegrationTest) { } // open DB, write, backup, write, backup, close, restore -TEST_P(DISABLED_BackupableDBTestWithParam, OnlineIntegrationTest) { +TEST_P(BackupableDBTestWithParam, OnlineIntegrationTest) { // has to be a big number, so that it triggers the memtable flush const int keys_iteration = 5000; const int max_key = keys_iteration * 4 + 10; @@ -794,11 +793,11 @@ TEST_P(DISABLED_BackupableDBTestWithParam, OnlineIntegrationTest) { CloseBackupEngine(); } -INSTANTIATE_TEST_CASE_P(DISABLED_BackupableDBTestWithParam, DISABLED_BackupableDBTestWithParam, +INSTANTIATE_TEST_CASE_P(BackupableDBTestWithParam, BackupableDBTestWithParam, ::testing::Bool()); // this will make sure that backup does not copy the same file twice -TEST_F(DISABLED_BackupableDBTest, NoDoubleCopy) { +TEST_F(BackupableDBTest, NoDoubleCopy) { OpenDBAndBackupEngine(true, true); // should write 5 DB files + one meta file @@ -861,7 +860,7 @@ TEST_F(DISABLED_BackupableDBTest, NoDoubleCopy) { // fine // 3. Corrupted checksum value - if the checksum is not a valid uint32_t, // db open should fail, otherwise, it aborts during the restore process. -TEST_F(DISABLED_BackupableDBTest, CorruptionsTest) { +TEST_F(BackupableDBTest, CorruptionsTest) { const int keys_iteration = 5000; Random rnd(6); Status s; @@ -961,7 +960,7 @@ TEST_F(DISABLED_BackupableDBTest, CorruptionsTest) { AssertBackupConsistency(2, 0, keys_iteration * 2, keys_iteration * 5); } -TEST_F(DISABLED_BackupableDBTest, InterruptCreationTest) { +TEST_F(BackupableDBTest, InterruptCreationTest) { // Interrupt backup creation by failing new writes and failing cleanup of the // partial state. Then verify a subsequent backup can still succeed. const int keys_iteration = 5000; @@ -997,7 +996,7 @@ inline std::string OptionsPath(std::string ret, int backupID) { // Backup the LATEST options file to // "/private//OPTIONS" -TEST_F(DISABLED_BackupableDBTest, BackupOptions) { +TEST_F(BackupableDBTest, BackupOptions) { OpenDBAndBackupEngine(true); for (int i = 1; i < 5; i++) { std::string name; @@ -1022,7 +1021,7 @@ TEST_F(DISABLED_BackupableDBTest, BackupOptions) { // This test verifies we don't delete the latest backup when read-only option is // set -TEST_F(DISABLED_BackupableDBTest, NoDeleteWithReadOnly) { +TEST_F(BackupableDBTest, NoDeleteWithReadOnly) { const int keys_iteration = 5000; Random rnd(6); Status s; @@ -1055,7 +1054,7 @@ TEST_F(DISABLED_BackupableDBTest, NoDeleteWithReadOnly) { delete read_only_backup_engine; } -TEST_F(DISABLED_BackupableDBTest, FailOverwritingBackups) { +TEST_F(BackupableDBTest, FailOverwritingBackups) { options_.write_buffer_size = 1024 * 1024 * 1024; // 1GB options_.disable_auto_compactions = true; @@ -1092,7 +1091,7 @@ TEST_F(DISABLED_BackupableDBTest, FailOverwritingBackups) { CloseDBAndBackupEngine(); } -TEST_F(DISABLED_BackupableDBTest, NoShareTableFiles) { +TEST_F(BackupableDBTest, NoShareTableFiles) { const int keys_iteration = 5000; OpenDBAndBackupEngine(true, false, false); for (int i = 0; i < 5; ++i) { @@ -1108,7 +1107,7 @@ TEST_F(DISABLED_BackupableDBTest, NoShareTableFiles) { } // Verify that you can backup and restore with share_files_with_checksum on -TEST_F(DISABLED_BackupableDBTest, ShareTableFilesWithChecksums) { +TEST_F(BackupableDBTest, ShareTableFilesWithChecksums) { const int keys_iteration = 5000; OpenDBAndBackupEngineShareWithChecksum(true, false, true, true); for (int i = 0; i < 5; ++i) { @@ -1125,7 +1124,7 @@ TEST_F(DISABLED_BackupableDBTest, ShareTableFilesWithChecksums) { // Verify that you can backup and restore using share_files_with_checksum set to // false and then transition this option to true -TEST_F(DISABLED_BackupableDBTest, ShareTableFilesWithChecksumsTransition) { +TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsTransition) { const int keys_iteration = 5000; // set share_files_with_checksum to false OpenDBAndBackupEngineShareWithChecksum(true, false, true, false); @@ -1154,7 +1153,7 @@ TEST_F(DISABLED_BackupableDBTest, ShareTableFilesWithChecksumsTransition) { } } -TEST_F(DISABLED_BackupableDBTest, DeleteTmpFiles) { +TEST_F(BackupableDBTest, DeleteTmpFiles) { for (bool shared_checksum : {false, true}) { if (shared_checksum) { OpenDBAndBackupEngineShareWithChecksum( @@ -1193,7 +1192,7 @@ TEST_F(DISABLED_BackupableDBTest, DeleteTmpFiles) { } } -TEST_F(DISABLED_BackupableDBTest, KeepLogFiles) { +TEST_F(BackupableDBTest, KeepLogFiles) { backupable_options_->backup_log_files = false; // basically infinite options_.WAL_ttl_seconds = 24 * 60 * 60; @@ -1214,7 +1213,7 @@ TEST_F(DISABLED_BackupableDBTest, KeepLogFiles) { AssertBackupConsistency(0, 0, 500, 600, true); } -TEST_F(DISABLED_BackupableDBTest, RateLimiting) { +TEST_F(BackupableDBTest, RateLimiting) { size_t const kMicrosPerSec = 1000 * 1000LL; uint64_t const MB = 1024 * 1024; @@ -1271,7 +1270,7 @@ TEST_F(DISABLED_BackupableDBTest, RateLimiting) { } } -TEST_F(DISABLED_BackupableDBTest, ReadOnlyBackupEngine) { +TEST_F(BackupableDBTest, ReadOnlyBackupEngine) { DestroyDB(dbname_, options_); OpenDBAndBackupEngine(true); FillDB(db_.get(), 0, 100); @@ -1303,7 +1302,7 @@ TEST_F(DISABLED_BackupableDBTest, ReadOnlyBackupEngine) { delete db; } -TEST_F(DISABLED_BackupableDBTest, ProgressCallbackDuringBackup) { +TEST_F(BackupableDBTest, ProgressCallbackDuringBackup) { DestroyDB(dbname_, options_); OpenDBAndBackupEngine(true); FillDB(db_.get(), 0, 100); @@ -1317,7 +1316,7 @@ TEST_F(DISABLED_BackupableDBTest, ProgressCallbackDuringBackup) { DestroyDB(dbname_, options_); } -TEST_F(DISABLED_BackupableDBTest, GarbageCollectionBeforeBackup) { +TEST_F(BackupableDBTest, GarbageCollectionBeforeBackup) { DestroyDB(dbname_, options_); OpenDBAndBackupEngine(true); @@ -1343,7 +1342,7 @@ TEST_F(DISABLED_BackupableDBTest, GarbageCollectionBeforeBackup) { } // Test that we properly propagate Env failures -TEST_F(DISABLED_BackupableDBTest, EnvFailures) { +TEST_F(BackupableDBTest, EnvFailures) { BackupEngine* backup_engine; // get children failure @@ -1396,7 +1395,7 @@ TEST_F(DISABLED_BackupableDBTest, EnvFailures) { // Verify manifest can roll while a backup is being created with the old // manifest. -TEST_F(DISABLED_BackupableDBTest, ChangeManifestDuringBackupCreation) { +TEST_F(BackupableDBTest, ChangeManifestDuringBackupCreation) { DestroyDB(dbname_, options_); options_.max_manifest_file_size = 0; // always rollover manifest for file add OpenDBAndBackupEngine(true); @@ -1434,7 +1433,7 @@ TEST_F(DISABLED_BackupableDBTest, ChangeManifestDuringBackupCreation) { } // see https://github.com/facebook/rocksdb/issues/921 -TEST_F(DISABLED_BackupableDBTest, Issue921Test) { +TEST_F(BackupableDBTest, Issue921Test) { BackupEngine* backup_engine; backupable_options_->share_table_files = false; backup_chroot_env_->CreateDirIfMissing(backupable_options_->backup_dir); @@ -1445,7 +1444,7 @@ TEST_F(DISABLED_BackupableDBTest, Issue921Test) { delete backup_engine; } -TEST_F(DISABLED_BackupableDBTest, BackupWithMetadata) { +TEST_F(BackupableDBTest, BackupWithMetadata) { const int keys_iteration = 5000; OpenDBAndBackupEngine(true); // create five backups @@ -1468,7 +1467,7 @@ TEST_F(DISABLED_BackupableDBTest, BackupWithMetadata) { DestroyDB(dbname_, options_); } -TEST_F(DISABLED_BackupableDBTest, BinaryMetadata) { +TEST_F(BackupableDBTest, BinaryMetadata) { OpenDBAndBackupEngine(true); std::string binaryMetadata = "abc\ndef"; binaryMetadata.push_back('\0'); @@ -1486,7 +1485,7 @@ TEST_F(DISABLED_BackupableDBTest, BinaryMetadata) { DestroyDB(dbname_, options_); } -TEST_F(DISABLED_BackupableDBTest, MetadataTooLarge) { +TEST_F(BackupableDBTest, MetadataTooLarge) { OpenDBAndBackupEngine(true); std::string largeMetadata(1024 * 1024 + 1, 0); ASSERT_NOK( @@ -1495,7 +1494,7 @@ TEST_F(DISABLED_BackupableDBTest, MetadataTooLarge) { DestroyDB(dbname_, options_); } -TEST_F(DISABLED_BackupableDBTest, LimitBackupsOpened) { +TEST_F(BackupableDBTest, LimitBackupsOpened) { // Verify the specified max backups are opened, including skipping over // corrupted backups. // @@ -1528,7 +1527,7 @@ TEST_F(DISABLED_BackupableDBTest, LimitBackupsOpened) { DestroyDB(dbname_, options_); } -TEST_F(DISABLED_BackupableDBTest, CreateWhenLatestBackupCorrupted) { +TEST_F(BackupableDBTest, CreateWhenLatestBackupCorrupted) { // we should pick an ID greater than corrupted backups' IDs so creation can // succeed even when latest backup is corrupted. const int kNumKeys = 5000; @@ -1549,7 +1548,7 @@ TEST_F(DISABLED_BackupableDBTest, CreateWhenLatestBackupCorrupted) { ASSERT_EQ(2, backup_infos[0].backup_id); } -TEST_F(DISABLED_BackupableDBTest, WriteOnlyEngine) { +TEST_F(BackupableDBTest, WriteOnlyEngine) { // Verify we can open a backup engine and create new ones even if reading old // backups would fail with IOError. IOError is a more serious condition than // corruption and would cause the engine to fail opening. So the only way to diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index ca2453757cd..7a63aca66ef 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -618,7 +618,7 @@ TEST_F(BlobDBTest, MultipleWriters) { std::vector workers; std::vector> data_set(10); - for (uint32_t i = 0; i < 1; i++) + for (uint32_t i = 0; i < 10; i++) workers.push_back(port::Thread( [&](uint32_t id) { Random rnd(301 + id); @@ -635,7 +635,7 @@ TEST_F(BlobDBTest, MultipleWriters) { }, i)); std::map data; - for (size_t i = 0; i < 1; i++) { + for (size_t i = 0; i < 10; i++) { workers[i].join(); data.insert(data_set[i].begin(), data_set[i].end()); } @@ -680,7 +680,7 @@ TEST_F(BlobDBTest, GCAfterOverwriteKeys) { VerifyDB(data); } -TEST_F(BlobDBTest, DISABLED_GCRelocateKeyWhileOverwriting) { +TEST_F(BlobDBTest, GCRelocateKeyWhileOverwriting) { Random rnd(301); BlobDBOptions bdb_options; bdb_options.min_blob_size = 0; @@ -711,7 +711,7 @@ TEST_F(BlobDBTest, DISABLED_GCRelocateKeyWhileOverwriting) { VerifyDB({{"foo", "v2"}}); } -TEST_F(BlobDBTest, DISABLED_GCExpiredKeyWhileOverwriting) { +TEST_F(BlobDBTest, GCExpiredKeyWhileOverwriting) { Random rnd(301); Options options; options.env = mock_env_.get(); diff --git a/utilities/checkpoint/checkpoint_test.cc b/utilities/checkpoint/checkpoint_test.cc index 2b56cb03b9a..dc22a3b4920 100644 --- a/utilities/checkpoint/checkpoint_test.cc +++ b/utilities/checkpoint/checkpoint_test.cc @@ -218,7 +218,7 @@ class CheckpointTest : public testing::Test { }; TEST_F(CheckpointTest, GetSnapshotLink) { - for (uint64_t log_size_for_flush : {0}) { // PEGASUS: log_size_for_flush should always be zero + for (uint64_t log_size_for_flush : {0, 1000000}) { Options options; const std::string snapshot_name = test::TmpDir(env_) + "/snapshot"; DB* snapshotDB; @@ -348,7 +348,7 @@ TEST_F(CheckpointTest, CheckpointCF) { ASSERT_OK(DestroyDB(snapshot_name, options)); } -TEST_F(CheckpointTest, DISABLED_CheckpointCFNoFlush) { +TEST_F(CheckpointTest, CheckpointCFNoFlush) { Options options = CurrentOptions(); CreateAndReopenWithCF({"one", "two", "three", "four", "five"}, options); @@ -458,7 +458,7 @@ TEST_F(CheckpointTest, CurrentFileModifiedWhileCheckpointing) { snapshotDB = nullptr; } -TEST_F(CheckpointTest, DISABLED_CurrentFileModifiedWhileCheckpointing2PC) { +TEST_F(CheckpointTest, CurrentFileModifiedWhileCheckpointing2PC) { Close(); const std::string kSnapshotName = test::TmpDir(env_) + "/snapshot"; const std::string dbname = test::TmpDir() + "/transaction_testdb"; diff --git a/utilities/document/document_db_test.cc b/utilities/document/document_db_test.cc index 6a4c9682b13..e8f4138c0bc 100644 --- a/utilities/document/document_db_test.cc +++ b/utilities/document/document_db_test.cc @@ -15,14 +15,13 @@ namespace rocksdb { -// NOTE: we will not use DocumentDB in pegasus -class DISABLED_DocumentDBTest : public testing::Test { +class DocumentDBTest : public testing::Test { public: - DISABLED_DocumentDBTest() { + DocumentDBTest() { dbname_ = test::TmpDir() + "/document_db_test"; DestroyDB(dbname_, Options()); } - ~DISABLED_DocumentDBTest() { + ~DocumentDBTest() { delete db_; DestroyDB(dbname_, Options()); } @@ -67,7 +66,7 @@ class DISABLED_DocumentDBTest : public testing::Test { DocumentDB* db_; }; -TEST_F(DISABLED_DocumentDBTest, SimpleQueryTest) { +TEST_F(DocumentDBTest, SimpleQueryTest) { DocumentDBOptions options; DocumentDB::IndexDescriptor index; index.description = Parse("{\"name\": 1}"); @@ -139,7 +138,7 @@ TEST_F(DISABLED_DocumentDBTest, SimpleQueryTest) { } } -TEST_F(DISABLED_DocumentDBTest, ComplexQueryTest) { +TEST_F(DocumentDBTest, ComplexQueryTest) { DocumentDBOptions options; DocumentDB::IndexDescriptor priority_index; priority_index.description = Parse("{'priority': 1}"); diff --git a/utilities/simulator_cache/sim_cache_test.cc b/utilities/simulator_cache/sim_cache_test.cc index ee8cb041365..4c175c94775 100644 --- a/utilities/simulator_cache/sim_cache_test.cc +++ b/utilities/simulator_cache/sim_cache_test.cc @@ -42,7 +42,7 @@ class SimCacheTest : public DBTestBase { void InitTable(const Options& options) { std::string value(kValueSize, 'a'); for (size_t i = 0; i < kNumBlocks * 2; i++) { - ASSERT_OK(Put(ToString(i), value.c_str(), WriteOptions(), false)); + ASSERT_OK(Put(ToString(i), value.c_str())); } } diff --git a/utilities/transactions/optimistic_transaction_test.cc b/utilities/transactions/optimistic_transaction_test.cc index 337fda34d06..f627f0e0955 100644 --- a/utilities/transactions/optimistic_transaction_test.cc +++ b/utilities/transactions/optimistic_transaction_test.cc @@ -23,15 +23,14 @@ using std::string; namespace rocksdb { -// PEGASUS: write count of commit may be 0 in DISABLED_OptimisticTransactionTest, which is not supported in pegasus -class DISABLED_OptimisticTransactionTest : public testing::Test { +class OptimisticTransactionTest : public testing::Test { public: OptimisticTransactionDB* txn_db; DB* db; string dbname; Options options; - DISABLED_OptimisticTransactionTest() { + OptimisticTransactionTest() { options.create_if_missing = true; options.max_write_buffer_number = 2; dbname = test::TmpDir() + "/optimistic_transaction_testdb"; @@ -39,7 +38,7 @@ class DISABLED_OptimisticTransactionTest : public testing::Test { DestroyDB(dbname, options); Open(); } - ~DISABLED_OptimisticTransactionTest() { + ~OptimisticTransactionTest() { delete txn_db; DestroyDB(dbname, options); } @@ -59,7 +58,7 @@ class DISABLED_OptimisticTransactionTest : public testing::Test { } }; -TEST_F(DISABLED_OptimisticTransactionTest, SuccessTest) { +TEST_F(OptimisticTransactionTest, SuccessTest) { WriteOptions write_options; ReadOptions read_options; string value; @@ -88,7 +87,7 @@ TEST_F(DISABLED_OptimisticTransactionTest, SuccessTest) { delete txn; } -TEST_F(DISABLED_OptimisticTransactionTest, WriteConflictTest) { +TEST_F(OptimisticTransactionTest, WriteConflictTest) { WriteOptions write_options; ReadOptions read_options; string value; @@ -122,7 +121,7 @@ TEST_F(DISABLED_OptimisticTransactionTest, WriteConflictTest) { delete txn; } -TEST_F(DISABLED_OptimisticTransactionTest, WriteConflictTest2) { +TEST_F(OptimisticTransactionTest, WriteConflictTest2) { WriteOptions write_options; ReadOptions read_options; OptimisticTransactionOptions txn_options; @@ -157,7 +156,7 @@ TEST_F(DISABLED_OptimisticTransactionTest, WriteConflictTest2) { delete txn; } -TEST_F(DISABLED_OptimisticTransactionTest, ReadConflictTest) { +TEST_F(OptimisticTransactionTest, ReadConflictTest) { WriteOptions write_options; ReadOptions read_options, snapshot_read_options; OptimisticTransactionOptions txn_options; @@ -196,7 +195,7 @@ TEST_F(DISABLED_OptimisticTransactionTest, ReadConflictTest) { delete txn; } -TEST_F(DISABLED_OptimisticTransactionTest, TxnOnlyTest) { +TEST_F(OptimisticTransactionTest, TxnOnlyTest) { // Test to make sure transactions work when there are no other writes in an // empty db. @@ -216,7 +215,7 @@ TEST_F(DISABLED_OptimisticTransactionTest, TxnOnlyTest) { delete txn; } -TEST_F(DISABLED_OptimisticTransactionTest, FlushTest) { +TEST_F(OptimisticTransactionTest, FlushTest) { WriteOptions write_options; ReadOptions read_options, snapshot_read_options; string value; @@ -256,7 +255,7 @@ TEST_F(DISABLED_OptimisticTransactionTest, FlushTest) { delete txn; } -TEST_F(DISABLED_OptimisticTransactionTest, FlushTest2) { +TEST_F(OptimisticTransactionTest, FlushTest2) { WriteOptions write_options; ReadOptions read_options, snapshot_read_options; string value; @@ -311,7 +310,7 @@ TEST_F(DISABLED_OptimisticTransactionTest, FlushTest2) { delete txn; } -TEST_F(DISABLED_OptimisticTransactionTest, NoSnapshotTest) { +TEST_F(OptimisticTransactionTest, NoSnapshotTest) { WriteOptions write_options; ReadOptions read_options; string value; @@ -340,7 +339,7 @@ TEST_F(DISABLED_OptimisticTransactionTest, NoSnapshotTest) { delete txn; } -TEST_F(DISABLED_OptimisticTransactionTest, MultipleSnapshotTest) { +TEST_F(OptimisticTransactionTest, MultipleSnapshotTest) { WriteOptions write_options; ReadOptions read_options, snapshot_read_options; string value; @@ -447,7 +446,7 @@ TEST_F(DISABLED_OptimisticTransactionTest, MultipleSnapshotTest) { delete txn2; } -TEST_F(DISABLED_OptimisticTransactionTest, ColumnFamiliesTest) { +TEST_F(OptimisticTransactionTest, ColumnFamiliesTest) { WriteOptions write_options; ReadOptions read_options, snapshot_read_options; OptimisticTransactionOptions txn_options; @@ -606,7 +605,7 @@ TEST_F(DISABLED_OptimisticTransactionTest, ColumnFamiliesTest) { } } -TEST_F(DISABLED_OptimisticTransactionTest, EmptyTest) { +TEST_F(OptimisticTransactionTest, EmptyTest) { WriteOptions write_options; ReadOptions read_options; string value; @@ -643,7 +642,7 @@ TEST_F(DISABLED_OptimisticTransactionTest, EmptyTest) { delete txn; } -TEST_F(DISABLED_OptimisticTransactionTest, PredicateManyPreceders) { +TEST_F(OptimisticTransactionTest, PredicateManyPreceders) { WriteOptions write_options; ReadOptions read_options1, read_options2; OptimisticTransactionOptions txn_options; @@ -707,7 +706,7 @@ TEST_F(DISABLED_OptimisticTransactionTest, PredicateManyPreceders) { delete txn2; } -TEST_F(DISABLED_OptimisticTransactionTest, LostUpdate) { +TEST_F(OptimisticTransactionTest, LostUpdate) { WriteOptions write_options; ReadOptions read_options, read_options1, read_options2; OptimisticTransactionOptions txn_options; @@ -805,7 +804,7 @@ TEST_F(DISABLED_OptimisticTransactionTest, LostUpdate) { ASSERT_EQ(value, "8"); } -TEST_F(DISABLED_OptimisticTransactionTest, UntrackedWrites) { +TEST_F(OptimisticTransactionTest, UntrackedWrites) { WriteOptions write_options; ReadOptions read_options; string value; @@ -856,7 +855,7 @@ TEST_F(DISABLED_OptimisticTransactionTest, UntrackedWrites) { delete txn; } -TEST_F(DISABLED_OptimisticTransactionTest, IteratorTest) { +TEST_F(OptimisticTransactionTest, IteratorTest) { WriteOptions write_options; ReadOptions read_options, snapshot_read_options; OptimisticTransactionOptions txn_options; @@ -971,7 +970,7 @@ TEST_F(DISABLED_OptimisticTransactionTest, IteratorTest) { delete txn; } -TEST_F(DISABLED_OptimisticTransactionTest, SavepointTest) { +TEST_F(OptimisticTransactionTest, SavepointTest) { WriteOptions write_options; ReadOptions read_options, snapshot_read_options; OptimisticTransactionOptions txn_options; @@ -1135,7 +1134,7 @@ TEST_F(DISABLED_OptimisticTransactionTest, SavepointTest) { delete txn; } -TEST_F(DISABLED_OptimisticTransactionTest, UndoGetForUpdateTest) { +TEST_F(OptimisticTransactionTest, UndoGetForUpdateTest) { WriteOptions write_options; ReadOptions read_options, snapshot_read_options; OptimisticTransactionOptions txn_options; @@ -1324,7 +1323,7 @@ Status OptimisticTransactionStressTestInserter(OptimisticTransactionDB* db, } } // namespace -TEST_F(DISABLED_OptimisticTransactionTest, OptimisticTransactionStressTest) { +TEST_F(OptimisticTransactionTest, OptimisticTransactionStressTest) { const size_t num_threads = 4; const size_t num_transactions_per_thread = 10000; const size_t num_sets = 3; @@ -1355,7 +1354,7 @@ TEST_F(DISABLED_OptimisticTransactionTest, OptimisticTransactionStressTest) { ASSERT_OK(s); } -TEST_F(DISABLED_OptimisticTransactionTest, SequenceNumberAfterRecoverTest) { +TEST_F(OptimisticTransactionTest, SequenceNumberAfterRecoverTest) { WriteOptions write_options; OptimisticTransactionOptions transaction_options; diff --git a/utilities/transactions/transaction_test.cc b/utilities/transactions/transaction_test.cc index 6b4ec01ce59..80aabb94342 100644 --- a/utilities/transactions/transaction_test.cc +++ b/utilities/transactions/transaction_test.cc @@ -4,7 +4,7 @@ // (found in the LICENSE.Apache file in the root directory). #ifndef ROCKSDB_LITE -#ifndef PEGASUS + #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif @@ -4949,5 +4949,5 @@ int main(int argc, char** argv) { "SKIPPED as Transactions are not supported in ROCKSDB_LITE\n"); return 0; } -#endif // PEGASUS + #endif // ROCKSDB_LITE diff --git a/utilities/transactions/transaction_test.h b/utilities/transactions/transaction_test.h index db17d7d3a5b..1be26897952 100644 --- a/utilities/transactions/transaction_test.h +++ b/utilities/transactions/transaction_test.h @@ -4,7 +4,7 @@ // (found in the LICENSE.Apache file in the root directory). #pragma once -#ifndef PEGASUS + #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif @@ -243,5 +243,3 @@ class TransactionTest : public ::testing::TestWithParam< class MySQLStyleTransactionTest : public TransactionTest {}; } // namespace rocksdb - -#endif // PEGASUS \ No newline at end of file diff --git a/utilities/transactions/write_prepared_transaction_test.cc b/utilities/transactions/write_prepared_transaction_test.cc index 0d2e375424b..84ea6cdf31c 100644 --- a/utilities/transactions/write_prepared_transaction_test.cc +++ b/utilities/transactions/write_prepared_transaction_test.cc @@ -155,7 +155,6 @@ TEST(CommitEntry64b, BasicTest) { } } -#ifndef PEGASUS class WritePreparedTxnDBMock : public WritePreparedTxnDB { public: WritePreparedTxnDBMock(DBImpl* db_impl, TransactionDBOptions& opt) @@ -1630,7 +1629,6 @@ TEST_P(WritePreparedTransactionTest, Iterate) { delete transaction; } -#endif // PEGASUS } // namespace rocksdb int main(int argc, char** argv) {