Skip to content

Commit

Permalink
Only update tiflash replica info when applying replica SchemaDiff
Browse files Browse the repository at this point in the history
  • Loading branch information
JaySon-Huang committed Nov 23, 2023
1 parent 742c0b9 commit 1c189c7
Show file tree
Hide file tree
Showing 4 changed files with 57 additions and 23 deletions.
70 changes: 51 additions & 19 deletions dbms/src/TiDB/Schema/SchemaBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -509,47 +509,78 @@ void SchemaBuilder<Getter, NameMapper>::applySetTiFlashReplica(DatabaseID databa
return;
}

if (storage->getTombstone() != 0)
if (storage->isTombstone())
{
applyRecoverTable(db_info->id, table_id);
return;
}

auto managed_storage = std::dynamic_pointer_cast<IManageableStorage>(storage);
auto storage_replica_info = managed_storage->getTableInfo().replica_info;
if (storage_replica_info.count == table_info->replica_info.count
&& storage_replica_info.available == table_info->replica_info.available)
auto local_logical_storage_table_info = storage->getTableInfo(); // copy
// nothing changed
if (const auto & local_logical_storage_replica_info = local_logical_storage_table_info.replica_info;
local_logical_storage_replica_info.count == table_info->replica_info.count
&& local_logical_storage_replica_info.available == table_info->replica_info.available)
{
return;
}

size_t old_replica_count = 0;
size_t new_replica_count = 0;
if (table_info->isLogicalPartitionTable())
{
for (const auto & part_def : table_info->partition.definitions)
{
auto new_part_table_info = table_info->producePartitionTableInfo(part_def.id, name_mapper);
auto part_storage = tmt_context.getStorages().get(keyspace_id, new_part_table_info->id);
if (part_storage != nullptr)
if (part_storage == nullptr)
{
table_id_map.emplacePartitionTableID(part_def.id, table_id);
continue;
}
{
auto alter_lock = part_storage->lockForAlter(getThreadNameAndID());
// Only update the replica info
auto local_table_info = part_storage->getTableInfo(); // copy
old_replica_count = local_table_info.replica_info.count;
new_replica_count = new_part_table_info->replica_info.count;
local_table_info.replica_info = new_part_table_info->replica_info;
part_storage->alterSchemaChange(
alter_lock,
*new_part_table_info,
local_table_info,
name_mapper.mapDatabaseName(db_info->id, keyspace_id),
name_mapper.mapTableName(*new_part_table_info),
name_mapper.mapTableName(local_table_info),
context);
}
else
table_id_map.emplacePartitionTableID(part_def.id, table_id);
LOG_INFO(
log,
"Updating replica info, replica count old={} new={} physical_table_id={} logical_table_id={}",
old_replica_count,
new_replica_count,
part_def.id,
table_id);
}
}
auto alter_lock = storage->lockForAlter(getThreadNameAndID());
storage->alterSchemaChange(
alter_lock,
*table_info,
name_mapper.mapDatabaseName(db_info->id, keyspace_id),
name_mapper.mapTableName(*table_info),
context);

{
auto alter_lock = storage->lockForAlter(getThreadNameAndID());
// Only update the replica info
old_replica_count = local_logical_storage_table_info.replica_info.count;
new_replica_count = table_info->replica_info.count;
local_logical_storage_table_info.replica_info = table_info->replica_info;
storage->alterSchemaChange(
alter_lock,
local_logical_storage_table_info,
name_mapper.mapDatabaseName(db_info->id, keyspace_id),
name_mapper.mapTableName(local_logical_storage_table_info),
context);
}
LOG_INFO(
log,
"Updating replica info, replica count old={} new={} physical_table_id={} logical_table_id={}",
old_replica_count,
new_replica_count,
table_id,
table_id);
}

template <typename Getter, typename NameMapper>
Expand Down Expand Up @@ -1357,7 +1388,8 @@ template <typename Getter, typename NameMapper>
bool SchemaBuilder<Getter, NameMapper>::applyTable(
DatabaseID database_id,
TableID logical_table_id,
TableID physical_table_id)
TableID physical_table_id,
bool force)
{
// Here we get table info without mvcc. So we can detect that whether the table
// has been renamed to another database or dropped.
Expand All @@ -1366,7 +1398,7 @@ bool SchemaBuilder<Getter, NameMapper>::applyTable(
// added to the new table.
// For the reason above, if we can not get table info without mvcc, this function
// will return false and the caller should update the table_id_map then retry.
auto table_info = getter.getTableInfo(database_id, logical_table_id, /*try_mvcc*/ false);
auto table_info = getter.getTableInfo(database_id, logical_table_id, /*try_mvcc*/ force);
if (table_info == nullptr)
{
LOG_WARNING(
Expand Down
2 changes: 1 addition & 1 deletion dbms/src/TiDB/Schema/SchemaBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ struct SchemaBuilder

void dropAllSchema();

bool applyTable(DatabaseID database_id, TableID logical_table_id, TableID physical_table_id);
bool applyTable(DatabaseID database_id, TableID logical_table_id, TableID physical_table_id, bool force);

private:
void applyDropSchema(DatabaseID schema_id);
Expand Down
7 changes: 4 additions & 3 deletions dbms/src/TiDB/Schema/TiDBSchemaSyncer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,7 @@ std::tuple<bool, String> TiDBSchemaSyncer<mock_getter, mock_mapper>::trySyncTabl
Context & context,
TableID physical_table_id,
Getter & getter,
bool force,
const char * next_action)
{
// Get logical_table_id and database_id by physical_table_id.
Expand All @@ -176,7 +177,7 @@ std::tuple<bool, String> TiDBSchemaSyncer<mock_getter, mock_mapper>::trySyncTabl
// If the table schema apply is failed, then we need to update the table-id-mapping
// and retry.
SchemaBuilder<Getter, NameMapper> builder(getter, context, databases, table_id_map, shared_mutex_for_databases);
if (!builder.applyTable(database_id, logical_table_id, physical_table_id))
if (!builder.applyTable(database_id, logical_table_id, physical_table_id, force))
{
String message = fmt::format(
"Can not apply table schema because the table_id_map is not up-to-date, {}."
Expand Down Expand Up @@ -206,7 +207,7 @@ bool TiDBSchemaSyncer<mock_getter, mock_mapper>::syncTableSchema(Context & conte
/// Note that we don't need a lock at the beginning of syncTableSchema.
/// The AlterLock for storage will be acquired in `SchemaBuilder::applyTable`.
auto [need_update_id_mapping, message]
= trySyncTableSchema(context, physical_table_id, getter, "try to syncSchemas");
= trySyncTableSchema(context, physical_table_id, getter, false, "try to syncSchemas");
if (!need_update_id_mapping)
{
LOG_INFO(log, "Sync table schema end, table_id={}", physical_table_id);
Expand All @@ -218,7 +219,7 @@ bool TiDBSchemaSyncer<mock_getter, mock_mapper>::syncTableSchema(Context & conte
// Notice: must use the same getter
syncSchemasByGetter(context, getter);
std::tie(need_update_id_mapping, message)
= trySyncTableSchema(context, physical_table_id, getter, "sync table schema fail");
= trySyncTableSchema(context, physical_table_id, getter, true, "sync table schema fail");
if (likely(!need_update_id_mapping))
{
LOG_INFO(log, "Sync table schema end after syncSchemas, table_id={}", physical_table_id);
Expand Down
1 change: 1 addition & 0 deletions dbms/src/TiDB/Schema/TiDBSchemaSyncer.h
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@ class TiDBSchemaSyncer : public SchemaSyncer
Context & context,
TableID physical_table_id,
Getter & getter,
bool force,
const char * next_action);

TiDB::DBInfoPtr getDBInfoByName(const String & database_name) override
Expand Down

0 comments on commit 1c189c7

Please sign in to comment.