Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add missing status check when compiling with ASSERT_STATUS_CHECKED=1 #11686

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion db/compact_files_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,7 @@ TEST_F(CompactFilesTest, CompactionFilterWithGetSv) {
return true;
}
std::string res;
db_->Get(ReadOptions(), "", &res);
EXPECT_TRUE(db_->Get(ReadOptions(), "", &res).IsNotFound());
return true;
}

Expand Down
3 changes: 2 additions & 1 deletion db/comparator_db_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ void DoRandomIteraratorTest(DB* db, std::vector<std::string> source_strings,

for (int i = 0; i < num_writes; i++) {
if (num_trigger_flush > 0 && i != 0 && i % num_trigger_flush == 0) {
db->Flush(FlushOptions());
ASSERT_OK(db->Flush(FlushOptions()));
}

int type = rnd->Uniform(2);
Expand Down Expand Up @@ -156,6 +156,7 @@ void DoRandomIteraratorTest(DB* db, std::vector<std::string> source_strings,
if (map.find(key) == map.end()) {
ASSERT_TRUE(status.IsNotFound());
} else {
ASSERT_OK(status);
ASSERT_EQ(map[key], result);
}
break;
Expand Down
28 changes: 14 additions & 14 deletions db/db_bloom_filter_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -654,7 +654,7 @@ TEST_P(DBBloomFilterTestWithParam, SkipFilterOnEssentiallyZeroBpk) {
for (i = 0; i < maxKey; i++) {
ASSERT_OK(Put(Key(i), Key(i)));
}
Flush();
ASSERT_OK(Flush());
};
auto GetFn = [&]() {
int i;
Expand Down Expand Up @@ -792,7 +792,7 @@ TEST_F(DBBloomFilterTest, BloomFilterRate) {
}
// Add a large key to make the file contain wide range
ASSERT_OK(Put(1, Key(maxKey + 55555), Key(maxKey + 55555)));
Flush(1);
ASSERT_OK(Flush(1));

// Check if they can be found
for (int i = 0; i < maxKey; i++) {
Expand Down Expand Up @@ -1696,7 +1696,7 @@ TEST_F(DBBloomFilterTest, ContextCustomFilterPolicy) {
table_options.format_version = 5;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));

TryReopen(options);
ASSERT_OK(TryReopen(options));
CreateAndReopenWithCF({fifo ? "abe" : "bob"}, options);

const int maxKey = 10000;
Expand All @@ -1705,15 +1705,15 @@ TEST_F(DBBloomFilterTest, ContextCustomFilterPolicy) {
}
// Add a large key to make the file contain wide range
ASSERT_OK(Put(1, Key(maxKey + 55555), Key(maxKey + 55555)));
Flush(1);
ASSERT_OK(Flush(1));
EXPECT_EQ(policy->DumpTestReport(),
fifo ? "cf=abe,s=kCompactionStyleFIFO,n=7,l=0,b=0,r=kFlush\n"
: "cf=bob,s=kCompactionStyleLevel,n=7,l=0,b=0,r=kFlush\n");

for (int i = maxKey / 2; i < maxKey; i++) {
ASSERT_OK(Put(1, Key(i), Key(i)));
}
Flush(1);
ASSERT_OK(Flush(1));
EXPECT_EQ(policy->DumpTestReport(),
fifo ? "cf=abe,s=kCompactionStyleFIFO,n=7,l=0,b=0,r=kFlush\n"
: "cf=bob,s=kCompactionStyleLevel,n=7,l=0,b=0,r=kFlush\n");
Expand Down Expand Up @@ -2261,7 +2261,7 @@ TEST_P(BloomStatsTestWithParam, BloomStatsTest) {
ASSERT_EQ(0, get_perf_context()->bloom_sst_hit_count);
ASSERT_EQ(0, get_perf_context()->bloom_sst_miss_count);

Flush();
ASSERT_OK(Flush());

// sanity checks
ASSERT_EQ(0, get_perf_context()->bloom_sst_hit_count);
Expand Down Expand Up @@ -2311,7 +2311,7 @@ TEST_P(BloomStatsTestWithParam, BloomStatsTestWithIter) {
ASSERT_EQ(1, get_perf_context()->bloom_memtable_miss_count);
ASSERT_EQ(2, get_perf_context()->bloom_memtable_hit_count);

Flush();
ASSERT_OK(Flush());

iter.reset(dbfull()->NewIterator(ReadOptions()));

Expand Down Expand Up @@ -2379,7 +2379,7 @@ void PrefixScanInit(DBBloomFilterTest* dbtest) {
snprintf(buf, sizeof(buf), "%02d______:end", i + 1);
keystr = std::string(buf);
ASSERT_OK(dbtest->Put(keystr, keystr));
dbtest->Flush();
ASSERT_OK(dbtest->Flush());
}

// GROUP 2
Expand All @@ -2390,7 +2390,7 @@ void PrefixScanInit(DBBloomFilterTest* dbtest) {
snprintf(buf, sizeof(buf), "%02d______:end", small_range_sstfiles + i + 1);
keystr = std::string(buf);
ASSERT_OK(dbtest->Put(keystr, keystr));
dbtest->Flush();
ASSERT_OK(dbtest->Flush());
}
}
} // anonymous namespace
Expand Down Expand Up @@ -2853,7 +2853,7 @@ TEST_F(DBBloomFilterTest, DynamicBloomFilterMultipleSST) {
ASSERT_OK(Put("foo", "bar"));
ASSERT_OK(Put("foq1", "bar1"));
ASSERT_OK(Put("fpa", "0"));
dbfull()->Flush(FlushOptions());
ASSERT_OK(dbfull()->Flush(FlushOptions()));
std::unique_ptr<Iterator> iter_old(db_->NewIterator(read_options));
ASSERT_EQ(CountIter(iter_old, "foo"), 4);
EXPECT_EQ(PopTicker(options, NON_LAST_LEVEL_SEEK_FILTERED), 0);
Expand Down Expand Up @@ -2981,7 +2981,7 @@ TEST_F(DBBloomFilterTest, DynamicBloomFilterNewColumnFamily) {
ASSERT_OK(Put(2, "foo5", "bar5"));
ASSERT_OK(Put(2, "foq6", "bar6"));
ASSERT_OK(Put(2, "fpq7", "bar7"));
dbfull()->Flush(FlushOptions());
ASSERT_OK(dbfull()->Flush(FlushOptions()));
{
std::unique_ptr<Iterator> iter(
db_->NewIterator(read_options, handles_[2]));
Expand Down Expand Up @@ -3031,17 +3031,17 @@ TEST_F(DBBloomFilterTest, DynamicBloomFilterOptions) {
ASSERT_OK(Put("foo", "bar"));
ASSERT_OK(Put("foo1", "bar1"));
ASSERT_OK(Put("fpa", "0"));
dbfull()->Flush(FlushOptions());
ASSERT_OK(dbfull()->Flush(FlushOptions()));
ASSERT_OK(Put("foo3", "bar3"));
ASSERT_OK(Put("foo4", "bar4"));
ASSERT_OK(Put("foo5", "bar5"));
ASSERT_OK(Put("fpb", "1"));
dbfull()->Flush(FlushOptions());
ASSERT_OK(dbfull()->Flush(FlushOptions()));
ASSERT_OK(Put("foo6", "bar6"));
ASSERT_OK(Put("foo7", "bar7"));
ASSERT_OK(Put("foo8", "bar8"));
ASSERT_OK(Put("fpc", "2"));
dbfull()->Flush(FlushOptions());
ASSERT_OK(dbfull()->Flush(FlushOptions()));

ReadOptions read_options;
read_options.prefix_same_as_start = true;
Expand Down
2 changes: 1 addition & 1 deletion db/db_compaction_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -9792,7 +9792,7 @@ TEST_F(DBCompactionTest, NumberOfSubcompactions) {
SubCompactionEventListener* listener = new SubCompactionEventListener();
options.listeners.clear();
options.listeners.emplace_back(listener);
TryReopen(options);
ASSERT_OK(TryReopen(options));

for (int file = 0; file < kLevel0CompactTrigger; ++file) {
for (int key = file; key < 2 * kNumKeyPerFile; key += 2) {
Expand Down
2 changes: 1 addition & 1 deletion db/db_flush_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ TEST_F(DBFlushTest, CloseDBWhenFlushInLowPri) {
sleeping_task_low.WaitUntilDone();
ASSERT_EQ(0, num_flushes);

TryReopenWithColumnFamilies({"default", "cf1", "cf2"}, options);
ASSERT_OK(TryReopenWithColumnFamilies({"default", "cf1", "cf2"}, options));
ASSERT_OK(Put(0, "key3", DummyString(8192)));
ASSERT_OK(Flush(0));
ASSERT_EQ(1, num_flushes);
Expand Down
2 changes: 2 additions & 0 deletions db/db_impl/db_impl_secondary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -986,6 +986,8 @@ Status DB::OpenAndCompact(
delete db;
if (s.ok()) {
return serialization_status;
} else {
serialization_status.PermitUncheckedError();
hx235 marked this conversation as resolved.
Show resolved Hide resolved
}
return s;
}
Expand Down
2 changes: 1 addition & 1 deletion db/db_options_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1308,7 +1308,7 @@ TEST_F(DBOptionsTest, TempOptionsFailTest) {
[&](void* /*arg*/) { fs->SetFilesystemActive(true); });

SyncPoint::GetInstance()->EnableProcessing();
TryReopen(options);
ASSERT_NOK(TryReopen(options));
SyncPoint::GetInstance()->DisableProcessing();

std::vector<std::string> filenames;
Expand Down
2 changes: 1 addition & 1 deletion db/db_statistics_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ TEST_F(DBStatisticsTest, VerifyChecksumReadStat) {
ASSERT_OK(Flush());
std::unordered_map<std::string, uint64_t> table_files;
uint64_t table_files_size = 0;
GetAllDataFiles(kTableFile, &table_files, &table_files_size);
ASSERT_OK(GetAllDataFiles(kTableFile, &table_files, &table_files_size));

{
// Scenario 1: Table verified in `VerifyFileChecksums()`. This should read
Expand Down
26 changes: 13 additions & 13 deletions db/db_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -697,7 +697,7 @@ TEST_F(DBTest, ReadFromPersistedTier) {

// 3rd round: delete and flush
ASSERT_OK(db_->Delete(wopt, handles_[1], "foo"));
Flush(1);
ASSERT_OK(Flush(1));
ASSERT_OK(db_->Delete(wopt, handles_[1], "bar"));

ASSERT_TRUE(db_->Get(ropt, handles_[1], "foo", &value).IsNotFound());
Expand Down Expand Up @@ -860,7 +860,7 @@ TEST_F(DBTest, DISABLED_VeryLargeValue) {
ASSERT_EQ('w', value[0]);

// Compact all files.
Flush();
ASSERT_OK(Flush());
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);

// Check DB is not in read-only state.
Expand Down Expand Up @@ -1300,7 +1300,7 @@ TEST_F(DBTest, MetaDataTest) {
options.disable_auto_compactions = true;

int64_t temp_time = 0;
options.env->GetCurrentTime(&temp_time);
ASSERT_OK(options.env->GetCurrentTime(&temp_time));
uint64_t start_time = static_cast<uint64_t>(temp_time);

DestroyAndReopen(options);
Expand Down Expand Up @@ -1329,7 +1329,7 @@ TEST_F(DBTest, MetaDataTest) {
std::vector<std::vector<FileMetaData>> files_by_level;
dbfull()->TEST_GetFilesMetaData(db_->DefaultColumnFamily(), &files_by_level);

options.env->GetCurrentTime(&temp_time);
ASSERT_OK(options.env->GetCurrentTime(&temp_time));
uint64_t end_time = static_cast<uint64_t>(temp_time);

ColumnFamilyMetaData cf_meta;
Expand Down Expand Up @@ -3648,7 +3648,7 @@ TEST_F(DBTest, BlockBasedTablePrefixHashIndexTest) {
ASSERT_OK(Put("kk2", "v2"));
ASSERT_OK(Put("kk", "v3"));
ASSERT_OK(Put("k", "v4"));
Flush();
ASSERT_OK(Flush());

ASSERT_EQ("v1", Get("kk1"));
ASSERT_EQ("v2", Get("kk2"));
Expand Down Expand Up @@ -4280,8 +4280,8 @@ TEST_F(DBTest, ConcurrentMemtableNotSupported) {
options.soft_pending_compaction_bytes_limit = 0;
options.hard_pending_compaction_bytes_limit = 100;
options.create_if_missing = true;

DestroyDB(dbname_, options);
Close();
ASSERT_OK(DestroyDB(dbname_, options));
options.memtable_factory.reset(NewHashLinkListRepFactory(4, 0, 3, true, 4));
ASSERT_NOK(TryReopen(options));

Expand Down Expand Up @@ -4622,7 +4622,7 @@ TEST_F(DBTest, GetThreadStatus) {
Options options;
options.env = env_;
options.enable_thread_tracking = true;
TryReopen(options);
ASSERT_OK(TryReopen(options));

std::vector<ThreadStatus> thread_list;
Status s = env_->GetThreadList(&thread_list);
Expand Down Expand Up @@ -4693,7 +4693,7 @@ TEST_F(DBTest, DisableThreadStatus) {
Options options;
options.env = env_;
options.enable_thread_tracking = false;
TryReopen(options);
ASSERT_OK(TryReopen(options));
CreateAndReopenWithCF({"pikachu", "about-to-remove"}, options);
// Verify non of the column family info exists
env_->GetThreadStatusUpdater()->TEST_VerifyColumnFamilyInfoMap(handles_,
Expand Down Expand Up @@ -4902,7 +4902,7 @@ TEST_P(DBTestWithParam, PreShutdownMultipleCompaction) {
options.level0_slowdown_writes_trigger = 1 << 10;
options.max_subcompactions = max_subcompactions_;

TryReopen(options);
ASSERT_OK(TryReopen(options));
Random rnd(301);

std::vector<ThreadStatus> thread_list;
Expand Down Expand Up @@ -4991,7 +4991,7 @@ TEST_P(DBTestWithParam, PreShutdownCompactionMiddle) {
options.level0_slowdown_writes_trigger = 1 << 10;
options.max_subcompactions = max_subcompactions_;

TryReopen(options);
ASSERT_OK(TryReopen(options));
Random rnd(301);

std::vector<ThreadStatus> thread_list;
Expand Down Expand Up @@ -7206,14 +7206,14 @@ TEST_F(DBTest, CreationTimeOfOldestFile) {
int idx = 0;

int64_t time_1 = 0;
env_->GetCurrentTime(&time_1);
ASSERT_OK(env_->GetCurrentTime(&time_1));
const uint64_t uint_time_1 = static_cast<uint64_t>(time_1);

// Add 50 hours
env_->MockSleepForSeconds(50 * 60 * 60);

int64_t time_2 = 0;
env_->GetCurrentTime(&time_2);
ASSERT_OK(env_->GetCurrentTime(&time_2));
const uint64_t uint_time_2 = static_cast<uint64_t>(time_2);

ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
Expand Down
14 changes: 7 additions & 7 deletions db/db_test2.cc
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ TEST_F(DBTest2, CacheIndexAndFilterWithDBRestart) {
ASSERT_OK(Put(1, "a", "begin"));
ASSERT_OK(Put(1, "z", "end"));
ASSERT_OK(Flush(1));
TryReopenWithColumnFamilies({"default", "pikachu"}, options);
ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options));

std::string value;
value = Get(1, "a");
Expand Down Expand Up @@ -357,10 +357,10 @@ TEST_P(DBTestSharedWriteBufferAcrossCFs, SharedWriteBufferAcrossCFs) {
// are newer CFs created.
flush_listener->expected_flush_reason = FlushReason::kManualFlush;
ASSERT_OK(Put(3, Key(1), DummyString(1), wo));
Flush(3);
ASSERT_OK(Flush(3));
ASSERT_OK(Put(3, Key(1), DummyString(1), wo));
ASSERT_OK(Put(0, Key(1), DummyString(1), wo));
Flush(0);
ASSERT_OK(Flush(0));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"),
static_cast<uint64_t>(1));
ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"),
Expand Down Expand Up @@ -2068,7 +2068,7 @@ class PinL0IndexAndFilterBlocksTest
// reset block cache
table_options.block_cache = NewLRUCache(64 * 1024);
options->table_factory.reset(NewBlockBasedTableFactory(table_options));
TryReopenWithColumnFamilies({"default", "pikachu"}, *options);
ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, *options));
// create new table at L0
ASSERT_OK(Put(1, "a2", "begin2"));
ASSERT_OK(Put(1, "z2", "end2"));
Expand Down Expand Up @@ -2188,7 +2188,7 @@ TEST_P(PinL0IndexAndFilterBlocksTest, DisablePrefetchingNonL0IndexAndFilter) {
// Reopen database. If max_open_files is set as -1, table readers will be
// preloaded. This will trigger a BlockBasedTable::Open() and prefetch
// L0 index and filter. Level 1's prefetching is disabled in DB::Open()
TryReopenWithColumnFamilies({"default", "pikachu"}, options);
ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options));

ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();

Expand Down Expand Up @@ -3804,7 +3804,7 @@ TEST_F(DBTest2, MemtableOnlyIterator) {
ASSERT_EQ(2, count);
delete it;

Flush(1);
ASSERT_OK(Flush(1));

// After flushing
// point lookups
Expand Down Expand Up @@ -4112,7 +4112,7 @@ TEST_F(DBTest2, LiveFilesOmitObsoleteFiles) {
ASSERT_OK(Put("key", "val"));
FlushOptions flush_opts;
flush_opts.wait = false;
db_->Flush(flush_opts);
ASSERT_OK(db_->Flush(flush_opts));
TEST_SYNC_POINT("DBTest2::LiveFilesOmitObsoleteFiles:FlushTriggered");

ASSERT_OK(db_->DisableFileDeletions());
Expand Down
Loading
Loading