diff --git a/dbms/src/Debug/dbgFuncMisc.cpp b/dbms/src/Debug/dbgFuncMisc.cpp index 374e6cd4007..105c974716c 100644 --- a/dbms/src/Debug/dbgFuncMisc.cpp +++ b/dbms/src/Debug/dbgFuncMisc.cpp @@ -8,28 +8,73 @@ namespace DB { +inline size_t getThreadIdForLog(const String & line) +{ + auto sub_line = line.substr(line.find("thread_id=")); + std::regex rx(R"((0|[1-9][0-9]*))"); + std::smatch m; + if (regex_search(sub_line, m, rx)) + return std::stoi(m[1]); + else + return 0; +} + +// Usage example: +// The first argument is the key you want to search. +// For example, we want to search the key 'RSFilter exclude rate' in log file, and get the value following it. +// So we can use it as the first argument. +// But many kind of thread can print this keyword, +// so we can use the second argument to specify a keyword that may just be printed by a specific kind of thread. +// Here we use 'Rough set filter' to specify we just want to search read thread. +// And the complete command is the following: +// DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') +// TODO: this is still a too hack way to do test, but cannot think a better way now. void dbgFuncSearchLogForKey(Context & context, const ASTs & args, DBGInvoker::Printer output) { - if (args.size() < 1) - throw Exception("Args not matched, should be: key", ErrorCodes::BAD_ARGUMENTS); + if (args.size() < 2) + throw Exception("Args not matched, should be: key, thread_hint", ErrorCodes::BAD_ARGUMENTS); String key = safeGet(typeid_cast(*args[0]).value); + // the candidate line must be printed by a thread which also print a line contains `thread_hint` + String thread_hint = safeGet(typeid_cast(*args[1]).value); auto log_path = context.getConfigRef().getString("logger.log"); std::ifstream file(log_path); - std::vector line_candidates; - String line; - while (std::getline(file, line)) + // get the lines containing `thread_hint` and `key` + std::vector thread_hint_line_candidates; + std::vector key_line_candidates; { - if ((line.find(key) != String::npos) && (line.find("DBGInvoke") == String::npos)) - line_candidates.emplace_back(line); + String line; + while (std::getline(file, line)) + { + if ((line.find(thread_hint) != String::npos) && (line.find("DBGInvoke") == String::npos)) + thread_hint_line_candidates.emplace_back(line); + else if ((line.find(key) != String::npos) && (line.find("DBGInvoke") == String::npos)) + key_line_candidates.emplace_back(line); + } } - if (line_candidates.empty()) + // get target thread id + if (thread_hint_line_candidates.empty() || key_line_candidates.empty()) { output("Invalid"); return; } - auto & target_line = line_candidates.back(); + size_t target_thread_id = getThreadIdForLog(thread_hint_line_candidates.back()); + if (target_thread_id == 0) + { + output("Invalid"); + return; + } + String target_line; + for (auto iter = key_line_candidates.rbegin(); iter != key_line_candidates.rend(); iter++) + { + if (getThreadIdForLog(*iter) == target_thread_id) + { + target_line = *iter; + break; + } + } + // try parse the first number following the key auto sub_line = target_line.substr(target_line.find(key)); std::regex rx(R"([+-]?([0-9]+([.][0-9]*)?|[.][0-9]+))"); std::smatch m; diff --git a/dbms/src/Storages/DeltaMerge/Delta/DeltaPackFile.cpp b/dbms/src/Storages/DeltaMerge/Delta/DeltaPackFile.cpp index 740dd71c08e..69bb30c238f 100644 --- a/dbms/src/Storages/DeltaMerge/Delta/DeltaPackFile.cpp +++ b/dbms/src/Storages/DeltaMerge/Delta/DeltaPackFile.cpp @@ -22,7 +22,7 @@ void DeltaPackFile::calculateStat(const DMContext & context) auto hash_salt = context.hash_salt; auto pack_filter - = DMFilePackFilter::loadFrom(file, index_cache, hash_salt, {segment_range}, EMPTY_FILTER, {}, context.db_context.getFileProvider(), context.getReadLimiter()); + = DMFilePackFilter::loadFrom(file, index_cache, hash_salt, /*set_cache_if_miss*/ false, {segment_range}, EMPTY_FILTER, {}, context.db_context.getFileProvider(), context.getReadLimiter()); std::tie(valid_rows, valid_bytes) = pack_filter.validRowsAndBytes(); } diff --git a/dbms/src/Storages/DeltaMerge/DeltaMergeStore.cpp b/dbms/src/Storages/DeltaMerge/DeltaMergeStore.cpp index ae920ac91a6..231191beb55 100644 --- a/dbms/src/Storages/DeltaMerge/DeltaMergeStore.cpp +++ b/dbms/src/Storages/DeltaMerge/DeltaMergeStore.cpp @@ -1553,7 +1553,7 @@ UInt64 DeltaMergeStore::onSyncGc(Int64 limit) } assert(segment != nullptr); - if (segment->hasAbandoned() || segment->getLastCheckGCSafePoint() >= gc_safe_point || segment_snap == nullptr) + if (segment->hasAbandoned() || segment_snap == nullptr) continue; const auto segment_id = segment->segmentId(); @@ -1562,43 +1562,52 @@ UInt64 DeltaMergeStore::onSyncGc(Int64 limit) // meet empty segment, try merge it if (segment_snap->getRows() == 0) { + // release segment_snap before checkSegmentUpdate, otherwise this segment is still in update status. + segment_snap = {}; checkSegmentUpdate(dm_context, segment, ThreadType::BG_GC); continue; } - // Avoid recheck this segment when gc_safe_point doesn't change regardless whether we trigger this segment's DeltaMerge or not. - // Because after we calculate StableProperty and compare it with this gc_safe_point, - // there is no need to recheck it again using the same gc_safe_point. - // On the other hand, if it should do DeltaMerge using this gc_safe_point, and the DeltaMerge is interruptted by other process, - // it's still worth to wait another gc_safe_point to check this segment again. - segment->setLastCheckGCSafePoint(gc_safe_point); - dm_context->min_version = gc_safe_point; - - // calculate StableProperty if needed - if (!segment->getStable()->isStablePropertyCached()) - segment->getStable()->calculateStableProperty(*dm_context, segment_range, isCommonHandle()); - try { // Check whether we should apply gc on this segment - const bool should_compact - = GC::shouldCompactStable( - segment, - gc_safe_point, - global_context.getSettingsRef().dt_bg_gc_ratio_threhold_to_trigger_gc, - log) - || GC::shouldCompactDeltaWithStable( - *dm_context, - segment_snap, - segment_range, - global_context.getSettingsRef().dt_bg_gc_delta_delete_ratio_to_trigger_gc, - log); + bool should_compact = false; + if (GC::shouldCompactDeltaWithStable( + *dm_context, + segment_snap, + segment_range, + global_context.getSettingsRef().dt_bg_gc_delta_delete_ratio_to_trigger_gc, + log)) + { + should_compact = true; + } + else if (segment->getLastCheckGCSafePoint() < gc_safe_point) + { + // Avoid recheck this segment when gc_safe_point doesn't change regardless whether we trigger this segment's DeltaMerge or not. + // Because after we calculate StableProperty and compare it with this gc_safe_point, + // there is no need to recheck it again using the same gc_safe_point. + // On the other hand, if it should do DeltaMerge using this gc_safe_point, and the DeltaMerge is interruptted by other process, + // it's still worth to wait another gc_safe_point to check this segment again. + segment->setLastCheckGCSafePoint(gc_safe_point); + dm_context->min_version = gc_safe_point; + + // calculate StableProperty if needed + if (!segment->getStable()->isStablePropertyCached()) + segment->getStable()->calculateStableProperty(*dm_context, segment_range, isCommonHandle()); + + should_compact = GC::shouldCompactStable( + segment, + gc_safe_point, + global_context.getSettingsRef().dt_bg_gc_ratio_threhold_to_trigger_gc, + log); + } bool finish_gc_on_segment = false; if (should_compact) { if (segment = segmentMergeDelta(*dm_context, segment, TaskRunThread::BackgroundGCThread, segment_snap); segment) { // Continue to check whether we need to apply more tasks on this segment + segment_snap = {}; checkSegmentUpdate(dm_context, segment, ThreadType::BG_GC); gc_segments_num++; finish_gc_on_segment = true; diff --git a/dbms/src/Storages/DeltaMerge/File/DMFilePackFilter.h b/dbms/src/Storages/DeltaMerge/File/DMFilePackFilter.h index 4839c5a1268..11565281b0e 100644 --- a/dbms/src/Storages/DeltaMerge/File/DMFilePackFilter.h +++ b/dbms/src/Storages/DeltaMerge/File/DMFilePackFilter.h @@ -28,13 +28,14 @@ class DMFilePackFilter static DMFilePackFilter loadFrom(const DMFilePtr & dmfile, const MinMaxIndexCachePtr & index_cache, UInt64 hash_salt, + bool set_cache_if_miss, const RowKeyRanges & rowkey_ranges, const RSOperatorPtr & filter, const IdSetPtr & read_packs, const FileProviderPtr & file_provider, const ReadLimiterPtr & read_limiter) { - auto pack_filter = DMFilePackFilter(dmfile, index_cache, hash_salt, rowkey_ranges, filter, read_packs, file_provider, read_limiter); + auto pack_filter = DMFilePackFilter(dmfile, index_cache, hash_salt, set_cache_if_miss, rowkey_ranges, filter, read_packs, file_provider, read_limiter); pack_filter.init(); return pack_filter; } @@ -87,6 +88,7 @@ class DMFilePackFilter DMFilePackFilter(const DMFilePtr & dmfile_, const MinMaxIndexCachePtr & index_cache_, UInt64 hash_salt_, + bool set_cache_if_miss_, const RowKeyRanges & rowkey_ranges_, // filter by handle range const RSOperatorPtr & filter_, // filter by push down where clause const IdSetPtr & read_packs_, // filter by pack index @@ -95,6 +97,7 @@ class DMFilePackFilter : dmfile(dmfile_) , index_cache(index_cache_) , hash_salt(hash_salt_) + , set_cache_if_miss(set_cache_if_miss_) , rowkey_ranges(rowkey_ranges_) , filter(filter_) , read_packs(read_packs_) @@ -200,6 +203,7 @@ class DMFilePackFilter const DMFilePtr & dmfile, const FileProviderPtr & file_provider, const MinMaxIndexCachePtr & index_cache, + bool set_cache_if_miss, ColId col_id, const ReadLimiterPtr & read_limiter) { @@ -207,13 +211,16 @@ class DMFilePackFilter const auto file_name_base = DMFile::getFileNameBase(col_id); auto load = [&]() { + auto index_file_size = dmfile->colIndexSize(file_name_base); + if (index_file_size == 0) + return std::make_shared(*type); if (!dmfile->configuration) { auto index_buf = ReadBufferFromFileProvider( file_provider, dmfile->colIndexPath(file_name_base), dmfile->encryptionIndexPath(file_name_base), - std::min(static_cast(DBMS_DEFAULT_BUFFER_SIZE), dmfile->colIndexSize(file_name_base)), + std::min(static_cast(DBMS_DEFAULT_BUFFER_SIZE), index_file_size), read_limiter); index_buf.seek(dmfile->colIndexOffset(file_name_base)); return MinMaxIndex::read(*type, index_buf, dmfile->colIndexSize(file_name_base)); @@ -228,21 +235,24 @@ class DMFilePackFilter dmfile->configuration->getChecksumAlgorithm(), dmfile->configuration->getChecksumFrameLength()); index_buf->seek(dmfile->colIndexOffset(file_name_base)); - auto file_size = dmfile->colIndexSize(file_name_base); auto header_size = dmfile->configuration->getChecksumHeaderLength(); auto frame_total_size = dmfile->configuration->getChecksumFrameLength(); - auto frame_count = file_size / frame_total_size + (file_size % frame_total_size != 0); - return MinMaxIndex::read(*type, *index_buf, file_size - header_size * frame_count); + auto frame_count = index_file_size / frame_total_size + (index_file_size % frame_total_size != 0); + return MinMaxIndex::read(*type, *index_buf, index_file_size - header_size * frame_count); } }; MinMaxIndexPtr minmax_index; - if (index_cache) + if (index_cache && set_cache_if_miss) { minmax_index = index_cache->getOrSet(dmfile->colIndexCacheKey(file_name_base), load); } else { - minmax_index = load(); + // try load from the cache first + if (index_cache) + minmax_index = index_cache->get(dmfile->colIndexCacheKey(file_name_base)); + if (!minmax_index) + minmax_index = load(); } indexes.emplace(col_id, RSIndex(type, minmax_index)); } @@ -255,13 +265,14 @@ class DMFilePackFilter if (!dmfile->isColIndexExist(col_id)) return; - loadIndex(param.indexes, dmfile, file_provider, index_cache, col_id, read_limiter); + loadIndex(param.indexes, dmfile, file_provider, index_cache, set_cache_if_miss, col_id, read_limiter); } private: DMFilePtr dmfile; MinMaxIndexCachePtr index_cache; UInt64 hash_salt; + bool set_cache_if_miss; RowKeyRanges rowkey_ranges; RSOperatorPtr filter; IdSetPtr read_packs; diff --git a/dbms/src/Storages/DeltaMerge/File/DMFileReader.cpp b/dbms/src/Storages/DeltaMerge/File/DMFileReader.cpp index 430b4877bee..84fef4e7827 100644 --- a/dbms/src/Storages/DeltaMerge/File/DMFileReader.cpp +++ b/dbms/src/Storages/DeltaMerge/File/DMFileReader.cpp @@ -208,7 +208,7 @@ DMFileReader::DMFileReader( , read_columns(read_columns_) , enable_clean_read(enable_clean_read_) , max_read_version(max_read_version_) - , pack_filter(dmfile_, index_cache_, hash_salt_, rowkey_ranges_, filter_, read_packs_, file_provider_, read_limiter) + , pack_filter(dmfile_, index_cache_, hash_salt_, /*set_cache_if_miss*/ true, rowkey_ranges_, filter_, read_packs_, file_provider_, read_limiter) , handle_res(pack_filter.getHandleRes()) , use_packs(pack_filter.getUsePacks()) , skip_packs_by_column(read_columns.size(), 0) diff --git a/dbms/src/Storages/DeltaMerge/File/DMFileWriter.cpp b/dbms/src/Storages/DeltaMerge/File/DMFileWriter.cpp index f1a8b64d3af..e3bda0d0137 100644 --- a/dbms/src/Storages/DeltaMerge/File/DMFileWriter.cpp +++ b/dbms/src/Storages/DeltaMerge/File/DMFileWriter.cpp @@ -107,6 +107,7 @@ void DMFileWriter::addStreams(ColId col_id, DataTypePtr type, bool do_index) void DMFileWriter::write(const Block & block, const BlockProperty & block_property) { + is_empty_file = false; DMFile::PackStat stat; stat.rows = block.rows(); stat.not_clean = block_property.not_clean_rows; @@ -114,17 +115,17 @@ void DMFileWriter::write(const Block & block, const BlockProperty & block_proper auto del_mark_column = tryGetByColumnId(block, TAG_COLUMN_ID).column; - const ColumnVector * del_mark = !del_mark_column ? nullptr : (const ColumnVector *)del_mark_column.get(); + const ColumnVector * del_mark = !del_mark_column ? nullptr : static_cast *>(del_mark_column.get()); for (auto & cd : write_columns) { - auto & col = getByColumnId(block, cd.id).column; + const auto & col = getByColumnId(block, cd.id).column; writeColumn(cd.id, *cd.type, *col, del_mark); if (cd.id == VERSION_COLUMN_ID) stat.first_version = col->get64(0); else if (cd.id == TAG_COLUMN_ID) - stat.first_tag = (UInt8)(col->get64(0)); + stat.first_tag = static_cast(col->get64(0)); } if (!options.flags.isSingleFile()) @@ -345,7 +346,8 @@ void DMFileWriter::finalizeColumn(ColId col_id, DataTypePtr type) dmfile->encryptionIndexPath(stream_name), false, write_limiter); - stream->minmaxes->write(*type, buf); + if (!is_empty_file) + stream->minmaxes->write(*type, buf); buf.sync(); bytes_written += buf.getMaterializedBytes(); } @@ -358,7 +360,8 @@ void DMFileWriter::finalizeColumn(ColId col_id, DataTypePtr type) write_limiter, dmfile->configuration->getChecksumAlgorithm(), dmfile->configuration->getChecksumFrameLength()); - stream->minmaxes->write(*type, *buf); + if (!is_empty_file) + stream->minmaxes->write(*type, *buf); buf->sync(); bytes_written += buf->getMaterializedBytes(); #ifndef NDEBUG diff --git a/dbms/src/Storages/DeltaMerge/File/DMFileWriter.h b/dbms/src/Storages/DeltaMerge/File/DMFileWriter.h index a4a5c481eb5..22f80e5ea0a 100644 --- a/dbms/src/Storages/DeltaMerge/File/DMFileWriter.h +++ b/dbms/src/Storages/DeltaMerge/File/DMFileWriter.h @@ -235,6 +235,9 @@ class DMFileWriter FileProviderPtr file_provider; WriteLimiterPtr write_limiter; + + // use to avoid write index data for empty file + bool is_empty_file = true; }; } // namespace DM diff --git a/dbms/src/Storages/DeltaMerge/StableValueSpace.cpp b/dbms/src/Storages/DeltaMerge/StableValueSpace.cpp index a2aab1208f9..c2aa945eda1 100644 --- a/dbms/src/Storages/DeltaMerge/StableValueSpace.cpp +++ b/dbms/src/Storages/DeltaMerge/StableValueSpace.cpp @@ -39,6 +39,7 @@ void StableValueSpace::setFiles(const DMFiles & files_, const RowKeyRange & rang auto pack_filter = DMFilePackFilter::loadFrom(file, index_cache, hash_salt, + /*set_cache_if_miss*/ true, {range}, EMPTY_FILTER, {}, @@ -228,6 +229,7 @@ void StableValueSpace::calculateStableProperty(const DMContext & context, const auto pack_filter = DMFilePackFilter::loadFrom(file, context.db_context.getGlobalContext().getMinMaxIndexCache(), context.hash_salt, + /*set_cache_if_miss*/ false, {rowkey_range}, EMPTY_FILTER, {}, @@ -346,16 +348,15 @@ RowsAndBytes StableValueSpace::Snapshot::getApproxRowsAndBytes(const DMContext & size_t match_packs = 0; size_t total_match_rows = 0; size_t total_match_bytes = 0; - // Usually, this method will be called for some "cold" key ranges. Loading the index - // into cache may pollute the cache and make the hot index cache invalid. Set the - // index cache to nullptr so that the cache won't be polluted. - // TODO: We can use the cache if the index happens to exist in the cache, but - // don't refill the cache if the index does not exist. + // Usually, this method will be called for some "cold" key ranges. + // Loading the index into cache may pollute the cache and make the hot index cache invalid. + // So don't refill the cache if the index does not exist. for (auto & f : stable->files) { auto filter = DMFilePackFilter::loadFrom(f, // - nullptr, + context.db_context.getGlobalContext().getMinMaxIndexCache(), context.hash_salt, + /*set_cache_if_miss*/ false, {range}, RSOperatorPtr{}, IdSetPtr{}, diff --git a/tests/delta-merge-test/query/misc/timestamp_rough_set_filter.test b/tests/delta-merge-test/query/misc/timestamp_rough_set_filter.test index 188b73ae65e..c7df348a58c 100644 --- a/tests/delta-merge-test/query/misc/timestamp_rough_set_filter.test +++ b/tests/delta-merge-test/query/misc/timestamp_rough_set_filter.test @@ -40,7 +40,7 @@ => DBGInvoke dag('select * from default.test where col_2 < cast_string_datetime(\'2019-06-10 09:00:00.00000\')') -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 100.00 │ └─────────────────────────────────────────────┘ @@ -50,7 +50,7 @@ │ 50 │ 2019-06-10 09:00:00.00000 │ └────────────┴───────────────────────────┘ -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 0.00 │ └─────────────────────────────────────────────┘ @@ -60,7 +60,7 @@ # so '2019-06-10 17:00:00.00000'(tz_offset:28800) below is equal to '2019-06-10 09:00:00.00000' in UTC => DBGInvoke dag('select * from default.test where col_2 < cast_string_datetime(\'2019-06-10 17:00:00.00000\')',4,'encode_type:default,tz_offset:28800') -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 100.00 │ └─────────────────────────────────────────────┘ @@ -70,14 +70,14 @@ │ 50 │ 2019-06-10 09:00:00.00000 │ └────────────┴───────────────────────────┘ -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 0.00 │ └─────────────────────────────────────────────┘ => DBGInvoke dag('select * from default.test where col_2 < cast_string_datetime(\'2019-06-10 04:00:00.00000\')',4,'encode_type:default,tz_name:America/Chicago') -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 100.00 │ └─────────────────────────────────────────────┘ @@ -87,14 +87,14 @@ │ 50 │ 2019-06-10 09:00:00.00000 │ └────────────┴───────────────────────────┘ -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 0.00 │ └─────────────────────────────────────────────┘ => DBGInvoke dag('select * from default.test where col_2 > cast_string_datetime(\'2019-06-13 12:00:01.00000\')') -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 100.00 │ └─────────────────────────────────────────────┘ @@ -104,14 +104,14 @@ │ 55 │ 2019-06-13 12:00:01.00000 │ └────────────┴───────────────────────────┘ -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 0.00 │ └─────────────────────────────────────────────┘ => DBGInvoke dag('select * from default.test where col_2 > cast_string_datetime(\'2019-06-13 20:00:01.00000\')',4,'encode_type:default,tz_offset:28800') -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 100.00 │ └─────────────────────────────────────────────┘ @@ -121,14 +121,14 @@ │ 55 │ 2019-06-13 12:00:01.00000 │ └────────────┴───────────────────────────┘ -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 0.00 │ └─────────────────────────────────────────────┘ => DBGInvoke dag('select * from default.test where col_2 > cast_string_datetime(\'2019-06-13 07:00:01.00000\')',4,'encode_type:default,tz_name:America/Chicago') -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 100.00 │ └─────────────────────────────────────────────┘ @@ -138,7 +138,7 @@ │ 55 │ 2019-06-13 12:00:01.00000 │ └────────────┴───────────────────────────┘ -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 0.00 │ └─────────────────────────────────────────────┘ diff --git a/tests/fullstack-test-dt/expr/timestamp_filter.test b/tests/fullstack-test-dt/expr/timestamp_filter.test index 1488cef1dbf..e1597014e66 100644 --- a/tests/fullstack-test-dt/expr/timestamp_filter.test +++ b/tests/fullstack-test-dt/expr/timestamp_filter.test @@ -30,7 +30,7 @@ mysql> SET time_zone = '+0:00'; set session tidb_isolation_read_engines='tiflash | 1 | 2000-01-01 10:00:00 | +----+---------------------+ -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 0.00 │ └─────────────────────────────────────────────┘ @@ -43,26 +43,26 @@ mysql> SET time_zone = '+0:00'; set session tidb_isolation_read_engines='tiflash | 1 | 2000-01-01 10:00:00 | +----+---------------------+ -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 0.00 │ └─────────────────────────────────────────────┘ mysql> SET time_zone = '+0:00'; set session tidb_isolation_read_engines='tiflash'; select * from test.t where ts != '2000-01-01 10:00:00'; -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 100.00 │ └─────────────────────────────────────────────┘ ## Tests the direction between column and literal mysql> SET time_zone = '+0:00'; set session tidb_isolation_read_engines='tiflash'; select * from test.t where ts > '2000-01-01 10:00:01'; -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 100.00 │ └─────────────────────────────────────────────┘ mysql> SET time_zone = '+0:00'; set session tidb_isolation_read_engines='tiflash'; select * from test.t where '2000-01-01 10:00:01' < ts; -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 100.00 │ └─────────────────────────────────────────────┘ @@ -72,7 +72,7 @@ mysql> SET time_zone = '+0:00'; set session tidb_isolation_read_engines='tiflash +----+---------------------+ | 1 | 2000-01-01 10:00:00 | +----+---------------------+ -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 0.00 │ └─────────────────────────────────────────────┘ @@ -82,7 +82,7 @@ mysql> SET time_zone = '+0:00'; set session tidb_isolation_read_engines='tiflash +----+---------------------+ | 1 | 2000-01-01 10:00:00 | +----+---------------------+ -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 0.00 │ └─────────────────────────────────────────────┘ @@ -95,14 +95,14 @@ mysql> SET time_zone = '+8:00'; set session tidb_isolation_read_engines='tiflash | 1 | 2000-01-01 18:00:00 | +----+---------------------+ -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 0.00 │ └─────────────────────────────────────────────┘ mysql> SET time_zone = '+8:00'; set session tidb_isolation_read_engines='tiflash'; select * from test.t where ts != '2000-01-01 18:00:00'; -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 100.00 │ └─────────────────────────────────────────────┘ @@ -141,7 +141,7 @@ mysql> SET time_zone = '+0:00'; set session tidb_isolation_read_engines='tiflash | 1 | 2000-01-01 10:00:00 | +----+---------------------+ -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 0.00 │ └─────────────────────────────────────────────┘ @@ -154,7 +154,7 @@ mysql> SET time_zone = '+8:00'; set session tidb_isolation_read_engines='tiflash | 1 | 2000-01-01 18:00:00 | +----+---------------------+ -=> DBGInvoke search_log_for_key('RSFilter exclude rate') +=> DBGInvoke search_log_for_key('RSFilter exclude rate', 'Rough set filter') ┌─search_log_for_key("RSFilter exclude rate")─┐ │ 0.00 │ └─────────────────────────────────────────────┘