Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DDL: Use Column Name Instead of Offset to Find the common handle cluster index #5166

Merged
merged 28 commits into from
Jun 22, 2022
Merged
Show file tree
Hide file tree
Changes from 26 commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
b871522
fix issue 5154: use name instead of offset to find the pk id
hongyunyan Jun 17, 2022
cd92fc3
Merge branch 'master' into hongyunyan_fix_5154
hongyunyan Jun 17, 2022
6eea5ee
add tests about clustered index
hongyunyan Jun 17, 2022
8561b9a
Merge branch 'hongyunyan_fix_5154' of https://github.com/hongyunyan/t…
hongyunyan Jun 17, 2022
110f2fb
fix clang tidy
hongyunyan Jun 18, 2022
d4c55a7
Merge branch 'master' into hongyunyan_fix_5154
JaySon-Huang Jun 18, 2022
e68d45d
remove useless code
hongyunyan Jun 20, 2022
4ffb496
Merge branch 'hongyunyan_fix_5154' of https://github.com/hongyunyan/t…
hongyunyan Jun 20, 2022
0128545
fix the comments
hongyunyan Jun 20, 2022
6e2bbb1
change session statement
hongyunyan Jun 20, 2022
08b122a
add unit test
hongyunyan Jun 20, 2022
8d74059
fix format
hongyunyan Jun 20, 2022
3139593
add license
hongyunyan Jun 20, 2022
ffbd63e
add one more case
JaySon-Huang Jun 21, 2022
fcf09ab
Fix error message
JaySon-Huang Jun 21, 2022
51e894c
Merge pull request #1 from JaySon-Huang/pr/hongyunyan/5166
hongyunyan Jun 21, 2022
6decafe
add benchmark
hongyunyan Jun 21, 2022
ecb26ff
Merge branch 'hongyunyan_fix_5154' of https://github.com/hongyunyan/t…
hongyunyan Jun 21, 2022
d89b308
remove useless code
hongyunyan Jun 21, 2022
44b6798
fix format
hongyunyan Jun 21, 2022
2162411
add benchmark for differen num of rows
JaySon-Huang Jun 21, 2022
590d4dd
Merge branch 'hongyunyan_fix_5154' into pr/hongyunyan/5166
hongyunyan Jun 21, 2022
238bb7b
Merge pull request #2 from JaySon-Huang/pr/hongyunyan/5166
hongyunyan Jun 21, 2022
6dfd5dd
Merge branch 'master' into hongyunyan_fix_5154
hongyunyan Jun 21, 2022
4275d2a
fix format
hongyunyan Jun 22, 2022
d864d99
Merge branch 'hongyunyan_fix_5154' of https://github.com/hongyunyan/t…
hongyunyan Jun 22, 2022
72c75bc
remove with_check variable
hongyunyan Jun 22, 2022
3ec80b2
Merge branch 'master' into hongyunyan_fix_5154
flowbehappy Jun 22, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 11 additions & 3 deletions dbms/src/Core/Block.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -238,10 +238,18 @@ void Block::checkNumberOfRows() const
if (rows == -1)
rows = size;
else if (rows != size)
throw Exception("Sizes of columns doesn't match: "
+ data.front().name + ": " + toString(rows)
+ ", " + elem.name + ": " + toString(size),
{
auto first_col = data.front();
throw Exception(fmt::format(
"Sizes of columns doesn't match: {}(id={}): {}, {}(id={}): {}",
first_col.name,
first_col.column_id,
rows,
elem.name,
elem.column_id,
size),
ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
}
}
}

Expand Down
5 changes: 2 additions & 3 deletions dbms/src/Debug/MockTiDB.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,6 @@ TiDB::TableInfoPtr MockTiDB::parseColumns(
{
String & name = string_tokens[index];
index_info.idx_cols[index].name = name;
index_info.idx_cols[index].offset = pk_column_pos_map[name];
index_info.idx_cols[index].length = -1;
}
}
Expand Down Expand Up @@ -302,7 +301,7 @@ int MockTiDB::newTables(
tables_by_id.emplace(table->table_info.id, table);
tables_by_name.emplace(qualified_name, table);

AffectedOption opt;
AffectedOption opt{};
opt.schema_id = table->database_id;
opt.table_id = table->id();
opt.old_schema_id = table->database_id;
Expand Down Expand Up @@ -571,7 +570,7 @@ void MockTiDB::renameTables(const std::vector<std::tuple<std::string, std::strin
tables_by_name.erase(qualified_name);
tables_by_name.emplace(new_qualified_name, new_table);

AffectedOption opt;
AffectedOption opt{};
opt.schema_id = table->database_id;
opt.table_id = new_table->id();
opt.old_schema_id = table->database_id;
Expand Down
42 changes: 25 additions & 17 deletions dbms/src/Debug/dbgFuncMockRaftCommand.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ void MockRaftCommand::dbgFuncRegionBatchSplit(Context & context, const ASTs & ar
auto & tmt = context.getTMTContext();
auto & kvstore = tmt.getKVStore();

RegionID region_id = (RegionID)safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[0]).value);
auto region_id = static_cast<RegionID>(safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[0]).value));
const String & database_name = typeid_cast<const ASTIdentifier &>(*args[1]).name;
const String & table_name = typeid_cast<const ASTIdentifier &>(*args[2]).name;
auto table = MockTiDB::instance().getTableByName(database_name, table_name);
Expand All @@ -49,7 +49,7 @@ void MockRaftCommand::dbgFuncRegionBatchSplit(Context & context, const ASTs & ar
if (4 + handle_column_size * 4 != args.size())
throw Exception("Args not matched, should be: region-id1, database-name, table-name, start1, end1, start2, end2, region-id2",
ErrorCodes::BAD_ARGUMENTS);
RegionID region_id2 = (RegionID)safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[args.size() - 1]).value);
auto region_id2 = static_cast<RegionID>(safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[args.size() - 1]).value));

auto table_id = table->id();
TiKVKey start_key1, start_key2, end_key1, end_key2;
Expand All @@ -59,9 +59,17 @@ void MockRaftCommand::dbgFuncRegionBatchSplit(Context & context, const ASTs & ar
std::vector<Field> start_keys2;
std::vector<Field> end_keys1;
std::vector<Field> end_keys2;

std::unordered_map<String, size_t> column_name_columns_index_map;
for (size_t i = 0; i < table_info.columns.size(); i++)
{
column_name_columns_index_map.emplace(table_info.columns[i].name, i);
}

for (size_t i = 0; i < handle_column_size; i++)
{
auto & column_info = table_info.columns[table_info.getPrimaryIndexInfo().idx_cols[i].offset];
auto idx = column_name_columns_index_map[table_info.getPrimaryIndexInfo().idx_cols[i].name];
auto & column_info = table_info.columns[idx];

auto start_field1 = RegionBench::convertField(column_info, typeid_cast<const ASTLiteral &>(*args[3 + i]).value);
TiDB::DatumBumpy start_datum1 = TiDB::DatumBumpy(start_field1, column_info.tp);
Expand All @@ -88,10 +96,10 @@ void MockRaftCommand::dbgFuncRegionBatchSplit(Context & context, const ASTs & ar
}
else
{
HandleID start1 = (HandleID)safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[3]).value);
HandleID end1 = (HandleID)safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[4]).value);
HandleID start2 = (HandleID)safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[5]).value);
HandleID end2 = (HandleID)safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[6]).value);
auto start1 = static_cast<HandleID>(safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[3]).value));
auto end1 = static_cast<HandleID>(safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[4]).value));
auto start2 = static_cast<HandleID>(safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[5]).value));
auto end2 = static_cast<HandleID>(safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[6]).value));
start_key1 = RecordKVFormat::genKey(table_id, start1);
start_key2 = RecordKVFormat::genKey(table_id, start2);
end_key1 = RecordKVFormat::genKey(table_id, end1);
Expand All @@ -110,15 +118,15 @@ void MockRaftCommand::dbgFuncRegionBatchSplit(Context & context, const ASTs & ar
request.set_cmd_type(raft_cmdpb::AdminCmdType::BatchSplit);
raft_cmdpb::BatchSplitResponse * splits = response.mutable_splits();
{
auto region = splits->add_regions();
auto * region = splits->add_regions();
region->set_id(region_id);
region->set_start_key(start_key1);
region->set_end_key(end_key1);
region->add_peers();
*region->mutable_region_epoch() = new_epoch;
}
{
auto region = splits->add_regions();
auto * region = splits->add_regions();
region->set_id(region_id2);
region->set_start_key(start_key2);
region->set_end_key(end_key2);
Expand All @@ -144,8 +152,8 @@ void MockRaftCommand::dbgFuncPrepareMerge(Context & context, const ASTs & args,
throw Exception("Args not matched, should be: source-id1, target-id2", ErrorCodes::BAD_ARGUMENTS);
}

RegionID region_id = (RegionID)safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[0]).value);
RegionID target_id = (RegionID)safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[1]).value);
auto region_id = static_cast<RegionID>(safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[0]).value));
auto target_id = static_cast<RegionID>(safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[1]).value));

auto & tmt = context.getTMTContext();
auto & kvstore = tmt.getKVStore();
Expand All @@ -157,7 +165,7 @@ void MockRaftCommand::dbgFuncPrepareMerge(Context & context, const ASTs & args,
{
request.set_cmd_type(raft_cmdpb::AdminCmdType::PrepareMerge);

auto prepare_merge = request.mutable_prepare_merge();
auto * prepare_merge = request.mutable_prepare_merge();
{
auto min_index = region->appliedIndex();
prepare_merge->set_min_index(min_index);
Expand All @@ -184,8 +192,8 @@ void MockRaftCommand::dbgFuncCommitMerge(Context & context, const ASTs & args, D
throw Exception("Args not matched, should be: source-id1, current-id2", ErrorCodes::BAD_ARGUMENTS);
}

RegionID source_id = (RegionID)safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[0]).value);
RegionID current_id = (RegionID)safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[1]).value);
auto source_id = static_cast<RegionID>(safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[0]).value));
auto current_id = static_cast<RegionID>(safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[1]).value));

auto & tmt = context.getTMTContext();
auto & kvstore = tmt.getKVStore();
Expand All @@ -196,7 +204,7 @@ void MockRaftCommand::dbgFuncCommitMerge(Context & context, const ASTs & args, D

{
request.set_cmd_type(raft_cmdpb::AdminCmdType::CommitMerge);
auto commit_merge = request.mutable_commit_merge();
auto * commit_merge = request.mutable_commit_merge();
{
commit_merge->set_commit(source_region->appliedIndex());
*commit_merge->mutable_source() = source_region->getMetaRegion();
Expand All @@ -220,7 +228,7 @@ void MockRaftCommand::dbgFuncRollbackMerge(Context & context, const ASTs & args,
throw Exception("Args not matched, should be: region-id", ErrorCodes::BAD_ARGUMENTS);
}

RegionID region_id = (RegionID)safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[0]).value);
auto region_id = static_cast<RegionID>(safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[0]).value));

auto & tmt = context.getTMTContext();
auto & kvstore = tmt.getKVStore();
Expand All @@ -231,7 +239,7 @@ void MockRaftCommand::dbgFuncRollbackMerge(Context & context, const ASTs & args,
{
request.set_cmd_type(raft_cmdpb::AdminCmdType::RollbackMerge);

auto rollback_merge = request.mutable_rollback_merge();
auto * rollback_merge = request.mutable_rollback_merge();
{
auto merge_state = region->getMergeState();
rollback_merge->set_commit(merge_state.commit());
Expand Down
24 changes: 19 additions & 5 deletions dbms/src/Debug/dbgFuncMockRaftSnapshot.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,12 @@ RegionPtr GenDbgRegionSnapshotWithData(Context & context, const ASTs & args)
size_t handle_column_size = is_common_handle ? table_info.getPrimaryIndexInfo().idx_cols.size() : 1;
RegionPtr region;

std::unordered_map<String, size_t> column_name_columns_index_map;
for (size_t i = 0; i < table_info.columns.size(); i++)
{
column_name_columns_index_map.emplace(table_info.columns[i].name, i);
}

if (!is_common_handle)
{
auto start = static_cast<HandleID>(safeGet<UInt64>(typeid_cast<const ASTLiteral &>(*args[3]).value));
Expand All @@ -81,7 +87,8 @@ RegionPtr GenDbgRegionSnapshotWithData(Context & context, const ASTs & args)
std::vector<Field> end_keys;
for (size_t i = 0; i < handle_column_size; i++)
{
auto & column_info = table_info.columns[table_info.getPrimaryIndexInfo().idx_cols[i].offset];
auto idx = column_name_columns_index_map[table_info.getPrimaryIndexInfo().idx_cols[i].name];
auto & column_info = table_info.columns[idx];
auto start_field = RegionBench::convertField(column_info, typeid_cast<const ASTLiteral &>(*args[3 + i]).value);
TiDB::DatumBumpy start_datum = TiDB::DatumBumpy(start_field, column_info.tp);
start_keys.emplace_back(start_datum.field());
Expand Down Expand Up @@ -122,9 +129,9 @@ RegionPtr GenDbgRegionSnapshotWithData(Context & context, const ASTs & args)
std::vector<Field> keys; // handle key
for (size_t i = 0; i < table_info.getPrimaryIndexInfo().idx_cols.size(); i++)
{
auto & idx_col = table_info.getPrimaryIndexInfo().idx_cols[i];
auto & column_info = table_info.columns[idx_col.offset];
auto start_field = RegionBench::convertField(column_info, fields[idx_col.offset]);
auto idx = column_name_columns_index_map[table_info.getPrimaryIndexInfo().idx_cols[i].name];
auto & column_info = table_info.columns[idx];
auto start_field = RegionBench::convertField(column_info, fields[idx]);
TiDB::DatumBumpy start_datum = TiDB::DatumBumpy(start_field, column_info.tp);
keys.emplace_back(start_datum.field());
}
Expand Down Expand Up @@ -198,9 +205,16 @@ void MockRaftCommand::dbgFuncRegionSnapshot(Context & context, const ASTs & args
// Get start key and end key form multiple column if it is clustered_index.
std::vector<Field> start_keys;
std::vector<Field> end_keys;

std::unordered_map<String, size_t> column_name_columns_index_map;
for (size_t i = 0; i < table_info.columns.size(); i++)
{
column_name_columns_index_map.emplace(table_info.columns[i].name, i);
}
for (size_t i = 0; i < handle_column_size; i++)
{
const auto & column_info = table_info.columns[table_info.getPrimaryIndexInfo().idx_cols[i].offset];
auto idx = column_name_columns_index_map[table_info.getPrimaryIndexInfo().idx_cols[i].name];
const auto & column_info = table_info.columns[idx];
auto start_field = RegionBench::convertField(column_info, typeid_cast<const ASTLiteral &>(*args[1 + i]).value);
TiDB::DatumBumpy start_datum = TiDB::DatumBumpy(start_field, column_info.tp);
start_keys.emplace_back(start_datum.field());
Expand Down
8 changes: 7 additions & 1 deletion dbms/src/Debug/dbgFuncRegion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,15 @@ void dbgFuncPutRegion(Context & context, const ASTs & args, DBGInvoker::Printer
{
std::vector<Field> start_keys;
std::vector<Field> end_keys;
std::unordered_map<String, size_t> column_name_columns_index_map;
for (size_t i = 0; i < table_info.columns.size(); i++)
{
column_name_columns_index_map.emplace(table_info.columns[i].name, i);
}
for (size_t i = 0; i < handle_column_size; i++)
{
const auto & column_info = table_info.columns[table_info.getPrimaryIndexInfo().idx_cols[i].offset];
auto idx = column_name_columns_index_map[table_info.getPrimaryIndexInfo().idx_cols[i].name];
const auto & column_info = table_info.columns[idx];
auto start_field = RegionBench::convertField(column_info, typeid_cast<const ASTLiteral &>(*args[1 + i]).value);
TiDB::DatumBumpy start_datum = TiDB::DatumBumpy(start_field, column_info.tp);
start_keys.emplace_back(start_datum.field());
Expand Down
15 changes: 11 additions & 4 deletions dbms/src/Debug/dbgTools.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ void insert( //
// Parse the fields in the inserted row
std::vector<Field> fields;
{
for (ASTs::const_iterator it = values_begin; it != values_end; ++it)
for (auto it = values_begin; it != values_end; ++it)
{
auto field = typeid_cast<const ASTLiteral *>((*it).get())->value;
fields.emplace_back(field);
Expand All @@ -330,11 +330,18 @@ void insert( //
if (table_info.is_common_handle)
{
std::vector<Field> keys;

std::unordered_map<String, size_t> column_name_columns_index_map;
for (size_t i = 0; i < table_info.columns.size(); i++)
{
column_name_columns_index_map.emplace(table_info.columns[i].name, i);
}

for (size_t i = 0; i < table_info.getPrimaryIndexInfo().idx_cols.size(); i++)
{
const auto & idx_col = table_info.getPrimaryIndexInfo().idx_cols[i];
const auto & column_info = table_info.columns[idx_col.offset];
auto start_field = RegionBench::convertField(column_info, fields[idx_col.offset]);
const auto & col_idx = column_name_columns_index_map[table_info.getPrimaryIndexInfo().idx_cols[i].name];
const auto & column_info = table_info.columns[col_idx];
auto start_field = RegionBench::convertField(column_info, fields[col_idx]);
TiDB::DatumBumpy start_datum = TiDB::DatumBumpy(start_field, column_info.tp);
keys.emplace_back(start_datum.field());
}
Expand Down
14 changes: 10 additions & 4 deletions dbms/src/Storages/Transaction/DecodingStorageSchemaSnapshot.h
Original file line number Diff line number Diff line change
Expand Up @@ -77,18 +77,20 @@ struct DecodingStorageSchemaSnapshot
, decoding_schema_version{decoding_schema_version_}
{
std::unordered_map<ColumnID, size_t> column_lut;
std::unordered_map<String, ColumnID> column_name_id_map;
for (size_t i = 0; i < table_info_.columns.size(); i++)
{
const auto & ci = table_info_.columns[i];
column_lut.emplace(ci.id, i);
column_name_id_map.emplace(ci.name, ci.id);
}
for (size_t i = 0; i < column_defines->size(); i++)
{
auto & cd = (*column_defines)[i];
sorted_column_id_with_pos.insert({cd.id, i});
if (cd.id != TiDBPkColumnID && cd.id != VersionColumnID && cd.id != DelMarkColumnID)
{
auto & columns = table_info_.columns;
const auto & columns = table_info_.columns;
column_infos.push_back(columns[column_lut.at(cd.id)]);
}
else
Expand All @@ -100,10 +102,14 @@ struct DecodingStorageSchemaSnapshot
// create pk related metadata if needed
if (is_common_handle)
{
const auto & primary_index_info = table_info_.getPrimaryIndexInfo();
for (size_t i = 0; i < primary_index_info.idx_cols.size(); i++)
/// we will not update the IndexInfo except Rename DDL.
/// When the add column / drop column action happenes, the offset of each column may change
/// Thus, we should not use offset to get the column we want,
/// but use to compare the column name to get the column id.
const auto & primary_index_cols = table_info_.getPrimaryIndexInfo().idx_cols;
for (const auto & col : primary_index_cols)
{
auto pk_column_id = table_info_.columns[primary_index_info.idx_cols[i].offset].id;
auto pk_column_id = column_name_id_map[col.name];
hongyunyan marked this conversation as resolved.
Show resolved Hide resolved
pk_column_ids.emplace_back(pk_column_id);
pk_pos_map.emplace(pk_column_id, reinterpret_cast<size_t>(std::numeric_limits<size_t>::max()));
}
Expand Down
17 changes: 11 additions & 6 deletions dbms/src/Storages/Transaction/RegionBlockReader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,23 +33,23 @@ RegionBlockReader::RegionBlockReader(DecodingStorageSchemaSnapshotConstPtr schem
: schema_snapshot{std::move(schema_snapshot_)}
{}

bool RegionBlockReader::read(Block & block, const RegionDataReadInfoList & data_list, bool force_decode)
bool RegionBlockReader::read(Block & block, const RegionDataReadInfoList & data_list, bool force_decode, bool with_check)
{
switch (schema_snapshot->pk_type)
{
case TMTPKType::INT64:
return readImpl<TMTPKType::INT64>(block, data_list, force_decode);
return readImpl<TMTPKType::INT64>(block, data_list, force_decode, with_check);
case TMTPKType::UINT64:
return readImpl<TMTPKType::UINT64>(block, data_list, force_decode);
return readImpl<TMTPKType::UINT64>(block, data_list, force_decode, with_check);
case TMTPKType::STRING:
return readImpl<TMTPKType::STRING>(block, data_list, force_decode);
return readImpl<TMTPKType::STRING>(block, data_list, force_decode, with_check);
default:
return readImpl<TMTPKType::UNSPECIFIED>(block, data_list, force_decode);
return readImpl<TMTPKType::UNSPECIFIED>(block, data_list, force_decode, with_check);
}
}

template <TMTPKType pk_type>
bool RegionBlockReader::readImpl(Block & block, const RegionDataReadInfoList & data_list, bool force_decode)
bool RegionBlockReader::readImpl(Block & block, const RegionDataReadInfoList & data_list, bool force_decode, bool with_check)
{
if (unlikely(block.columns() != schema_snapshot->column_defines->size()))
throw Exception("block structure doesn't match schema_snapshot.", ErrorCodes::LOGICAL_ERROR);
Expand Down Expand Up @@ -208,6 +208,11 @@ bool RegionBlockReader::readImpl(Block & block, const RegionDataReadInfoList & d
}
index++;
}
if (with_check)
hongyunyan marked this conversation as resolved.
Show resolved Hide resolved
{
block.checkNumberOfRows();
}

return true;
}

Expand Down
Loading