From e58a7ea2f35a5db69445a6a643b8a5a582c37e67 Mon Sep 17 00:00:00 2001 From: lidezhu Date: Fri, 27 Sep 2024 10:38:28 +0800 Subject: [PATCH 1/3] add some log --- logservice/schemastore/multi_version.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/logservice/schemastore/multi_version.go b/logservice/schemastore/multi_version.go index 8b0dc682e..2bf41b801 100644 --- a/logservice/schemastore/multi_version.go +++ b/logservice/schemastore/multi_version.go @@ -15,6 +15,7 @@ package schemastore import ( "errors" + "fmt" "math" "sort" "sync" @@ -103,7 +104,12 @@ func (v *versionedTableInfoStore) getTableInfo(ts uint64) (*common.TableInfo, er } if ts >= v.deleteVersion { - return nil, errors.New("table info deleted") + log.Error("table info deleted", + zap.Any("ts", ts), + zap.Any("tableID", v.tableID), + zap.Any("infos", v.infos), + zap.Any("deleteVersion", v.deleteVersion)) + return nil, fmt.Errorf("table info deleted %d", v.tableID) } target := sort.Search(len(v.infos), func(i int) bool { From 42ac47ee0fd7abec66d98866dc82d01beffe576b Mon Sep 17 00:00:00 2001 From: lidezhu Date: Fri, 27 Sep 2024 14:54:17 +0800 Subject: [PATCH 2/3] add truncate table test --- logservice/schemastore/multi_version.go | 43 +- logservice/schemastore/multi_version_test.go | 217 ++- logservice/schemastore/persist_storage.go | 253 +-- .../schemastore/persist_storage_test.go | 1707 +++++++++-------- logservice/schemastore/schema_store.go | 8 +- logservice/schemastore/types.go | 27 +- logservice/schemastore/types_gen.go | 106 +- 7 files changed, 1216 insertions(+), 1145 deletions(-) diff --git a/logservice/schemastore/multi_version.go b/logservice/schemastore/multi_version.go index 2bf41b801..e25bf0664 100644 --- a/logservice/schemastore/multi_version.go +++ b/logservice/schemastore/multi_version.go @@ -154,7 +154,7 @@ func assertEmpty(infos []*tableInfoItem, event PersistedDDLEvent) { zap.Any("lastVersion", infos[len(infos)-1].version), zap.Any("lastTableInfoVersion", infos[len(infos)-1].info.Version), zap.String("query", event.Query), - zap.Int64("tableID", event.TableID), + zap.Int64("tableID", event.CurrentTableID), zap.Uint64("finishedTs", event.FinishedTs), zap.Int64("schemaVersion", event.SchemaVersion)) } @@ -198,22 +198,16 @@ func (v *versionedTableInfoStore) applyDDL(event PersistedDDLEvent) { } // lock must be hold by the caller -// TODO: filter old ddl: there may be some pending ddls which is also written to disk and applied to table info store already func (v *versionedTableInfoStore) doApplyDDL(event PersistedDDLEvent) { - if len(v.infos) != 0 && uint64(event.FinishedTs) <= v.infos[len(v.infos)-1].version { - log.Panic("ddl job finished ts should be monotonically increasing") - } - if len(v.infos) > 0 { - // TODO: FinishedTS is not enough, need schema version. But currently there should be no duplicate ddl, - // so the following check is useless - if uint64(event.FinishedTs) <= v.infos[len(v.infos)-1].version { - log.Info("ignore job", - zap.Int64("tableID", int64(v.tableID)), - zap.String("query", event.Query), - zap.Uint64("finishedTS", event.FinishedTs), - zap.Any("infosLen", len(v.infos))) - return - } + // TODO: add a unit test + // TODO: whether need add schema version check + if len(v.infos) != 0 && event.FinishedTs <= v.infos[len(v.infos)-1].version { + log.Warn("already applied ddl, ignore it.", + zap.Int64("tableID", v.tableID), + zap.String("query", event.Query), + zap.Uint64("finishedTS", event.FinishedTs), + zap.Int("infosLen", len(v.infos))) + return } switch model.ActionType(event.Type) { @@ -227,18 +221,29 @@ func (v *versionedTableInfoStore) doApplyDDL(event PersistedDDLEvent) { break } assertEmpty(v.infos, event) - info := common.WrapTableInfo(event.SchemaID, event.SchemaName, event.FinishedTs, event.TableInfo) + info := common.WrapTableInfo(event.CurrentSchemaID, event.CurrentSchemaName, event.FinishedTs, event.TableInfo) info.InitPreSQLs() v.infos = append(v.infos, &tableInfoItem{version: uint64(event.FinishedTs), info: info}) case model.ActionRenameTable, model.ActionAddColumn, model.ActionDropColumn: assertNonEmpty(v.infos, event) - info := common.WrapTableInfo(event.SchemaID, event.SchemaName, event.FinishedTs, event.TableInfo) + info := common.WrapTableInfo(event.CurrentSchemaID, event.CurrentSchemaName, event.FinishedTs, event.TableInfo) info.InitPreSQLs() v.infos = append(v.infos, &tableInfoItem{version: uint64(event.FinishedTs), info: info}) - case model.ActionDropTable, model.ActionTruncateTable: + case model.ActionDropTable: v.deleteVersion = uint64(event.FinishedTs) + case model.ActionTruncateTable: + if v.tableID == event.CurrentTableID { + info := common.WrapTableInfo(event.CurrentSchemaID, event.CurrentSchemaName, event.FinishedTs, event.TableInfo) + info.InitPreSQLs() + v.infos = append(v.infos, &tableInfoItem{version: uint64(event.FinishedTs), info: info}) + } else { + if v.tableID != event.PrevTableID { + log.Panic("should not happen") + } + v.deleteVersion = uint64(event.FinishedTs) + } default: // TODO: idenitify unexpected ddl or specify all expected ddl } diff --git a/logservice/schemastore/multi_version_test.go b/logservice/schemastore/multi_version_test.go index 1b702c671..eddf20416 100644 --- a/logservice/schemastore/multi_version_test.go +++ b/logservice/schemastore/multi_version_test.go @@ -20,107 +20,140 @@ import ( "github.com/stretchr/testify/require" ) -func TestCreateTable(t *testing.T) { - version := uint64(100) - store := newEmptyVersionedTableInfoStore(100) - store.setTableInfoInitialized() - createDDLJob := PersistedDDLEvent{ - Type: byte(model.ActionCreateTable), - SchemaID: 10, - SchemaName: "test", - TableInfo: &model.TableInfo{ - ID: 100, - Name: model.NewCIStr("t"), - }, - FinishedTs: version, +func TestCreateTruncateAndDropTable(t *testing.T) { + tableID1 := int64(100) + store1 := newEmptyVersionedTableInfoStore(tableID1) + store1.setTableInfoInitialized() + createVersion := uint64(300) + { + createDDLJob := PersistedDDLEvent{ + Type: byte(model.ActionCreateTable), + CurrentSchemaID: 10, + CurrentTableID: tableID1, + CurrentSchemaName: "test", + CurrentTableName: "t", + TableInfo: &model.TableInfo{ + ID: tableID1, + Name: model.NewCIStr("t"), + }, + FinishedTs: createVersion, + } + store1.applyDDL(createDDLJob) } - store.applyDDL(createDDLJob) - require.Equal(t, len(store.infos), 1) -} -func TestRenameTable(t *testing.T) { - version := uint64(100) - store := newEmptyVersionedTableInfoStore(100) - store.setTableInfoInitialized() - createDDLJob := PersistedDDLEvent{ - Type: byte(model.ActionCreateTable), - SchemaID: 10, - SchemaName: "test", - TableInfo: &model.TableInfo{ - ID: 100, - Name: model.NewCIStr("t"), - }, - FinishedTs: version, + tableID2 := tableID1 + 1 + store2 := newEmptyVersionedTableInfoStore(tableID2) + store2.setTableInfoInitialized() + truncateVersion := createVersion + 10 + { + truncateDDLJob := PersistedDDLEvent{ + Type: byte(model.ActionTruncateTable), + CurrentSchemaID: 10, + CurrentTableID: tableID2, + CurrentSchemaName: "test", + CurrentTableName: "t", + PrevTableID: tableID1, + TableInfo: &model.TableInfo{ + ID: tableID1, + Name: model.NewCIStr("t"), + }, + FinishedTs: truncateVersion, + } + store1.applyDDL(truncateDDLJob) + store2.applyDDL(truncateDDLJob) } - renameDDLJob := PersistedDDLEvent{ - Type: byte(model.ActionRenameTable), - SchemaID: 10, - SchemaName: "test", - TableInfo: &model.TableInfo{ - ID: 100, - Name: model.NewCIStr("t2"), - }, - FinishedTs: version + 1, + + dropVersion := truncateVersion + 10 + { + dropDDLJob := PersistedDDLEvent{ + Type: byte(model.ActionDropTable), + CurrentSchemaID: 10, + CurrentTableID: tableID2, + CurrentSchemaName: "test", + CurrentTableName: "t", + TableInfo: &model.TableInfo{ + ID: tableID2, + Name: model.NewCIStr("t"), + }, + FinishedTs: dropVersion, + } + store2.applyDDL(dropDDLJob) + } + + { + require.Equal(t, len(store1.infos), 1) + tableInfo, err := store1.getTableInfo(createVersion) + require.Nil(t, err) + require.Equal(t, "t", tableInfo.Name.O) + require.Equal(t, truncateVersion, store1.deleteVersion) } - renameDDLJob2 := PersistedDDLEvent{ - Type: byte(model.ActionRenameTable), - SchemaID: 10, - SchemaName: "test", - TableInfo: &model.TableInfo{ - ID: 100, - Name: model.NewCIStr("t3"), - }, - FinishedTs: version + 10, + + { + require.Equal(t, len(store2.infos), 1) + tableInfo, err := store2.getTableInfo(truncateVersion) + require.Nil(t, err) + require.Equal(t, "t", tableInfo.Name.O) + require.Equal(t, dropVersion, store2.deleteVersion) } - store.applyDDL(createDDLJob) - store.applyDDL(renameDDLJob) - store.applyDDL(renameDDLJob2) - require.Equal(t, len(store.infos), 3) - tableInfo, err := store.getTableInfo(uint64(version)) - require.Nil(t, err) - require.Equal(t, tableInfo.Name.O, "t") - tableInfo, err = store.getTableInfo(uint64(version + 1)) - require.Nil(t, err) - require.Equal(t, tableInfo.Name.O, "t2") - tableInfo, err = store.getTableInfo(uint64(version + 2)) - require.Nil(t, err) - require.Equal(t, tableInfo.Name.O, "t2") - tableInfo, err = store.getTableInfo(uint64(version + 10)) - require.Nil(t, err) - require.Equal(t, tableInfo.Name.O, "t3") } -func TestDropTable(t *testing.T) { - version := uint64(100) - store := newEmptyVersionedTableInfoStore(100) +func TestRenameTable(t *testing.T) { + tableID := int64(100) + store := newEmptyVersionedTableInfoStore(tableID) store.setTableInfoInitialized() - createDDLJob := PersistedDDLEvent{ - Type: byte(model.ActionCreateTable), - SchemaID: 10, - SchemaName: "test", - TableInfo: &model.TableInfo{ - ID: 100, - Name: model.NewCIStr("t"), - }, - FinishedTs: version, + + createVersion := uint64(100) + schemaID1 := int64(10) + { + createDDLJob := PersistedDDLEvent{ + Type: byte(model.ActionCreateTable), + CurrentSchemaID: 10, + CurrentTableID: tableID, + CurrentSchemaName: "test", + CurrentTableName: "t", + TableInfo: &model.TableInfo{ + ID: tableID, + Name: model.NewCIStr("t"), + }, + FinishedTs: createVersion, + } + store.applyDDL(createDDLJob) } - dropDDLJob := PersistedDDLEvent{ - Type: byte(model.ActionDropTable), - SchemaID: 10, - SchemaName: "test", - TableInfo: &model.TableInfo{ - ID: 100, - Name: model.NewCIStr("t"), - }, - FinishedTs: version + 10, + + renameVersion := createVersion + 10 + schemaID2 := schemaID1 + 100 + { + renameDDLJob := PersistedDDLEvent{ + Type: byte(model.ActionRenameTable), + CurrentSchemaID: schemaID2, + CurrentTableID: tableID, + CurrentSchemaName: "test2", + CurrentTableName: "t2", + PrevSchemaID: schemaID1, + PrevSchemaName: "test", + PrevTableName: "t", + TableInfo: &model.TableInfo{ + ID: tableID, + Name: model.NewCIStr("t2"), + }, + FinishedTs: renameVersion, + } + store.applyDDL(renameDDLJob) + } + + { + require.Equal(t, len(store.infos), 2) + tableInfo, err := store.getTableInfo(createVersion) + require.Nil(t, err) + require.Equal(t, tableInfo.Name.O, "t") + require.Equal(t, tableInfo.SchemaID, schemaID1) + tableInfo, err = store.getTableInfo(renameVersion) + require.Nil(t, err) + require.Equal(t, tableInfo.Name.O, "t2") + require.Equal(t, tableInfo.SchemaID, schemaID2) } - store.applyDDL(createDDLJob) - store.applyDDL(dropDDLJob) - require.Equal(t, len(store.infos), 1) - tableInfo, err := store.getTableInfo(uint64(version)) - require.Nil(t, err) - require.Equal(t, tableInfo.Name.O, "t") } -// TODO: test gc -// TODO: test register dispatcher and unregister dispatcher +func TestGC(t *testing.T) { + +} diff --git a/logservice/schemastore/persist_storage.go b/logservice/schemastore/persist_storage.go index b23eb28d2..a8a59e8ac 100644 --- a/logservice/schemastore/persist_storage.go +++ b/logservice/schemastore/persist_storage.go @@ -322,8 +322,9 @@ func (p *persistentStorage) fetchTableDDLEvents(tableID int64, tableFilter filte events := make([]common.DDLEvent, 0, len(allTargetTs)) for _, ts := range allTargetTs { rawEvent := readPersistedDDLEvent(storageSnap, ts) + // TODO: if ExtraSchemaName and other fields are empty, does it cause any problem? if tableFilter != nil && - tableFilter.ShouldDiscardDDL(model.ActionType(rawEvent.Type), rawEvent.SchemaName, rawEvent.TableName) && + tableFilter.ShouldDiscardDDL(model.ActionType(rawEvent.Type), rawEvent.CurrentSchemaName, rawEvent.CurrentTableName) && tableFilter.ShouldDiscardDDL(model.ActionType(rawEvent.Type), rawEvent.PrevSchemaName, rawEvent.PrevTableName) { continue } @@ -387,7 +388,7 @@ func (p *persistentStorage) fetchTableTriggerDDLEvents(tableFilter filter.Filter for _, ts := range allTargetTs { rawEvent := readPersistedDDLEvent(storageSnap, ts) if tableFilter != nil && - tableFilter.ShouldDiscardDDL(model.ActionType(rawEvent.Type), rawEvent.SchemaName, rawEvent.TableName) && + tableFilter.ShouldDiscardDDL(model.ActionType(rawEvent.Type), rawEvent.CurrentSchemaName, rawEvent.CurrentTableName) && tableFilter.ShouldDiscardDDL(model.ActionType(rawEvent.Type), rawEvent.PrevSchemaName, rawEvent.PrevTableName) { continue } @@ -569,57 +570,53 @@ func (p *persistentStorage) persistUpperBoundPeriodically(ctx context.Context) e } } -func (p *persistentStorage) handleSortedDDLEvents(ddlEvents ...PersistedDDLEvent) error { - // TODO: ignore some ddl event - // TODO: check ddl events are sorted - - for i := range ddlEvents { - p.mu.Lock() - log.Info("handle resolved ddl event", - zap.Int64("schemaID", ddlEvents[i].SchemaID), - zap.Int64("tableID", ddlEvents[i].TableID), - zap.Uint64("finishedTs", ddlEvents[i].FinishedTs), - zap.String("query", ddlEvents[i].Query)) - if shouldSkipDDL(&ddlEvents[i], p.databaseMap, p.tableMap) { - p.mu.Unlock() - continue - } +func (p *persistentStorage) handleDDLJob(job *model.Job) error { + p.mu.Lock() - completePersistedDDLEvent(&ddlEvents[i], p.databaseMap, p.tableMap) + ddlEvent := buildPersistedDDLEventFromJob(job, p.databaseMap, p.tableMap) + // TODO: and some comment to explain why we need skik ddl here and why it is real rare + if shouldSkipDDL(&ddlEvent, p.databaseMap, p.tableMap) { p.mu.Unlock() + return nil + } - writePersistedDDLEvent(p.db, &ddlEvents[i]) + p.mu.Unlock() + log.Info("handle resolved ddl event", + zap.Int64("schemaID", ddlEvent.CurrentSchemaID), + zap.Int64("tableID", ddlEvent.CurrentTableID), + zap.Uint64("finishedTs", ddlEvent.FinishedTs), + zap.String("query", ddlEvent.Query)) - p.mu.Lock() - var err error - if p.tableTriggerDDLHistory, err = updateDDLHistory( - &ddlEvents[i], - p.databaseMap, - p.tableMap, - p.tablesDDLHistory, - p.tableTriggerDDLHistory); err != nil { - p.mu.Unlock() - return err - } - if err := updateDatabaseInfoAndTableInfo(&ddlEvents[i], p.databaseMap, p.tableMap); err != nil { - p.mu.Unlock() - return err - } - if err := updateRegisteredTableInfoStore(ddlEvents[i], p.tableInfoStoreMap); err != nil { - p.mu.Unlock() - return err - } + writePersistedDDLEvent(p.db, &ddlEvent) + + p.mu.Lock() + var err error + if p.tableTriggerDDLHistory, err = updateDDLHistory( + &ddlEvent, + p.databaseMap, + p.tableMap, + p.tablesDDLHistory, + p.tableTriggerDDLHistory); err != nil { p.mu.Unlock() + return err } - + if err := updateDatabaseInfoAndTableInfo(&ddlEvent, p.databaseMap, p.tableMap); err != nil { + p.mu.Unlock() + return err + } + if err := updateRegisteredTableInfoStore(ddlEvent, p.tableInfoStoreMap); err != nil { + p.mu.Unlock() + return err + } + p.mu.Unlock() return nil } -func completePersistedDDLEvent( - event *PersistedDDLEvent, +func buildPersistedDDLEventFromJob( + job *model.Job, databaseMap map[int64]*BasicDatabaseInfo, tableMap map[int64]*BasicTableInfo, -) { +) PersistedDDLEvent { getSchemaName := func(schemaID int64) string { databaseInfo, ok := databaseMap[schemaID] if !ok { @@ -645,17 +642,31 @@ func completePersistedDDLEvent( return tableInfo.SchemaID } + event := PersistedDDLEvent{ + ID: job.ID, + Type: byte(job.Type), + CurrentSchemaID: job.SchemaID, + CurrentTableID: job.TableID, + Query: job.Query, + SchemaVersion: job.BinlogInfo.SchemaVersion, + DBInfo: job.BinlogInfo.DBInfo, + TableInfo: job.BinlogInfo.TableInfo, + FinishedTs: job.BinlogInfo.FinishedTS, + BDRRole: job.BDRRole, + CDCWriteSource: job.CDCWriteSource, + } + switch model.ActionType(event.Type) { case model.ActionCreateSchema, model.ActionDropSchema: log.Info("completePersistedDDLEvent for create/drop schema", zap.Any("type", event.Type), - zap.Int64("schemaID", event.SchemaID), + zap.Int64("schemaID", event.CurrentSchemaID), zap.String("schemaName", event.DBInfo.Name.O)) - event.SchemaName = event.DBInfo.Name.O + event.CurrentSchemaName = event.DBInfo.Name.O case model.ActionCreateTable: - event.SchemaName = getSchemaName(event.SchemaID) - event.TableName = event.TableInfo.Name.O + event.CurrentSchemaName = getSchemaName(event.CurrentSchemaID) + event.CurrentTableName = event.TableInfo.Name.O case model.ActionDropTable, model.ActionAddColumn, model.ActionDropColumn, @@ -669,31 +680,32 @@ func completePersistedDDLEvent( model.ActionShardRowID, model.ActionModifyTableComment, model.ActionRenameIndex: - event.SchemaName = getSchemaName(event.SchemaID) - event.TableName = getTableName(event.TableID) + event.CurrentSchemaName = getSchemaName(event.CurrentSchemaID) + event.CurrentTableName = getTableName(event.CurrentTableID) case model.ActionTruncateTable: - event.SchemaName = getSchemaName(event.SchemaID) - event.TableName = getTableName(event.TableID) - // TODO: different with tidb, will it be confusing? - event.PrevTableID = event.TableID - event.TableID = event.TableInfo.ID + // only table id change + event.PrevTableID = event.CurrentTableID + event.CurrentTableID = event.TableInfo.ID + event.CurrentSchemaName = getSchemaName(event.CurrentSchemaID) + event.CurrentTableName = getTableName(event.PrevTableID) case model.ActionRenameTable: - event.PrevSchemaID = getSchemaID(event.TableID) + // TODO: check the following fields is set correctly + // schema id/schema name/table name may be changed + event.PrevSchemaID = getSchemaID(event.CurrentTableID) event.PrevSchemaName = getSchemaName(event.PrevSchemaID) - event.PrevTableName = getTableName(event.TableID) - // TODO: is the following SchemaName and TableName correct? - event.SchemaName = getSchemaName(event.SchemaID) - event.TableName = event.TableInfo.Name.O - case model.ActionCreateView, - // FIXME: support create tables - model.ActionCreateTables: - + event.PrevTableName = getTableName(event.CurrentTableID) + event.CurrentSchemaName = getSchemaName(event.CurrentSchemaID) + event.CurrentTableName = event.TableInfo.Name.O + case model.ActionCreateView: // ignore + case model.ActionCreateTables: + // FIXME: support create tables default: log.Panic("unknown ddl type", zap.Any("ddlType", event.Type), zap.String("DDL", event.Query)) } + return event } // TODO: add some comment to explain why we should skip some ddl @@ -704,22 +716,22 @@ func shouldSkipDDL( ) bool { switch model.ActionType(event.Type) { case model.ActionCreateSchema: - if _, ok := databaseMap[event.SchemaID]; ok { + if _, ok := databaseMap[event.CurrentSchemaID]; ok { log.Warn("database already exists. ignore DDL ", zap.String("DDL", event.Query), zap.Int64("jobID", event.ID), - zap.Int64("schemaID", event.SchemaID), + zap.Int64("schemaID", event.CurrentSchemaID), zap.Uint64("finishTs", event.FinishedTs), zap.Int64("jobSchemaVersion", event.SchemaVersion)) return true } case model.ActionCreateTable: - if _, ok := tableMap[event.TableID]; ok { + if _, ok := tableMap[event.CurrentTableID]; ok { log.Warn("table already exists. ignore DDL ", zap.String("DDL", event.Query), zap.Int64("jobID", event.ID), - zap.Int64("schemaID", event.SchemaID), - zap.Int64("tableID", event.TableID), + zap.Int64("schemaID", event.CurrentSchemaID), + zap.Int64("tableID", event.CurrentTableID), zap.Uint64("finishTs", event.FinishedTs), zap.Int64("jobSchemaVersion", event.SchemaVersion)) return true @@ -748,13 +760,13 @@ func updateDDLHistory( } case model.ActionDropSchema: tableTriggerDDLHistory = append(tableTriggerDDLHistory, ddlEvent.FinishedTs) - for tableID := range databaseMap[ddlEvent.SchemaID].Tables { + for tableID := range databaseMap[ddlEvent.CurrentSchemaID].Tables { addTableHistory(tableID) } case model.ActionCreateTable, model.ActionDropTable: tableTriggerDDLHistory = append(tableTriggerDDLHistory, ddlEvent.FinishedTs) - addTableHistory(ddlEvent.TableID) + addTableHistory(ddlEvent.CurrentTableID) case model.ActionAddColumn, model.ActionDropColumn, model.ActionAddIndex, @@ -767,13 +779,13 @@ func updateDDLHistory( model.ActionShardRowID, model.ActionModifyTableComment, model.ActionRenameIndex: - addTableHistory(ddlEvent.TableID) + addTableHistory(ddlEvent.CurrentTableID) case model.ActionTruncateTable: - addTableHistory(ddlEvent.TableID) + addTableHistory(ddlEvent.CurrentTableID) addTableHistory(ddlEvent.PrevTableID) case model.ActionRenameTable: tableTriggerDDLHistory = append(tableTriggerDDLHistory, ddlEvent.FinishedTs) - addTableHistory(ddlEvent.TableID) + addTableHistory(ddlEvent.CurrentTableID) default: log.Panic("unknown ddl type", zap.Any("ddlType", ddlEvent.Type), @@ -831,19 +843,19 @@ func updateDatabaseInfoAndTableInfo( switch model.ActionType(event.Type) { case model.ActionCreateSchema: - databaseMap[event.SchemaID] = &BasicDatabaseInfo{ - Name: event.SchemaName, + databaseMap[event.CurrentSchemaID] = &BasicDatabaseInfo{ + Name: event.CurrentSchemaName, Tables: make(map[int64]bool), } case model.ActionDropSchema: - for tableID := range databaseMap[event.SchemaID].Tables { + for tableID := range databaseMap[event.CurrentSchemaID].Tables { delete(tableMap, tableID) } - delete(databaseMap, event.SchemaID) + delete(databaseMap, event.CurrentSchemaID) case model.ActionCreateTable: - createTable(event.SchemaID, event.TableID) + createTable(event.CurrentSchemaID, event.CurrentTableID) case model.ActionDropTable: - dropTable(event.SchemaID, event.TableID) + dropTable(event.CurrentSchemaID, event.CurrentTableID) case model.ActionAddColumn, model.ActionDropColumn, model.ActionAddIndex, @@ -854,16 +866,15 @@ func updateDatabaseInfoAndTableInfo( model.ActionRebaseAutoID: // ignore case model.ActionTruncateTable: - dropTable(event.SchemaID, event.PrevTableID) - createTable(event.SchemaID, event.TableID) + dropTable(event.CurrentSchemaID, event.PrevTableID) + createTable(event.CurrentSchemaID, event.CurrentTableID) case model.ActionRenameTable: - oldSchemaID := tableMap[event.TableID].SchemaID - if oldSchemaID != event.SchemaID { - tableMap[event.TableID].SchemaID = event.SchemaID - removeTableFromDB(oldSchemaID, event.TableID) - addTableToDB(event.SchemaID, event.TableID) + if event.PrevSchemaID != event.CurrentSchemaID { + tableMap[event.CurrentTableID].SchemaID = event.CurrentSchemaID + removeTableFromDB(event.PrevSchemaID, event.CurrentTableID) + addTableToDB(event.CurrentSchemaID, event.CurrentTableID) } - tableMap[event.TableID].Name = event.TableInfo.Name.O + tableMap[event.CurrentTableID].Name = event.CurrentTableName case model.ActionSetDefaultValue, model.ActionShardRowID, model.ActionModifyTableComment, @@ -900,15 +911,20 @@ func updateRegisteredTableInfoStore( case model.ActionDropTable, model.ActionAddColumn, model.ActionDropColumn, - model.ActionTruncateTable, model.ActionModifyColumn, model.ActionRebaseAutoID, model.ActionSetDefaultValue, model.ActionShardRowID, model.ActionModifyTableComment, model.ActionRenameIndex: - store, ok := tableInfoStoreMap[event.TableID] - if ok { + if store, ok := tableInfoStoreMap[event.CurrentTableID]; ok { + store.applyDDL(event) + } + case model.ActionTruncateTable: + if store, ok := tableInfoStoreMap[event.PrevTableID]; ok { + store.applyDDL(event) + } + if store, ok := tableInfoStoreMap[event.CurrentTableID]; ok { store.applyDDL(event) } default: @@ -921,11 +937,12 @@ func updateRegisteredTableInfoStore( func buildDDLEvent(rawEvent *PersistedDDLEvent, tableFilter filter.Filter) common.DDLEvent { ddlEvent := common.DDLEvent{ - Type: rawEvent.Type, - SchemaID: rawEvent.SchemaID, - TableID: rawEvent.TableID, - SchemaName: rawEvent.SchemaName, - TableName: rawEvent.TableName, + Type: rawEvent.Type, + // TODO: whether the following fields are needed + SchemaID: rawEvent.CurrentSchemaID, + TableID: rawEvent.CurrentTableID, + SchemaName: rawEvent.CurrentSchemaName, + TableName: rawEvent.CurrentTableName, Query: rawEvent.Query, TableInfo: rawEvent.TableInfo, FinishedTs: rawEvent.FinishedTs, @@ -951,43 +968,43 @@ func buildDDLEvent(rawEvent *PersistedDDLEvent, tableFilter filter.Filter) commo case model.ActionDropSchema: ddlEvent.NeedDroppedTables = &common.InfluencedTables{ InfluenceType: common.InfluenceTypeDB, - SchemaID: rawEvent.SchemaID, + SchemaID: rawEvent.CurrentSchemaID, } ddlEvent.TableNameChange = &common.TableNameChange{ - DropDatabaseName: rawEvent.SchemaName, + DropDatabaseName: rawEvent.CurrentSchemaName, } case model.ActionCreateTable: // TODO: support create partition table ddlEvent.NeedAddedTables = []common.Table{ { - SchemaID: rawEvent.SchemaID, - TableID: rawEvent.TableID, + SchemaID: rawEvent.CurrentSchemaID, + TableID: rawEvent.CurrentTableID, }, } ddlEvent.TableNameChange = &common.TableNameChange{ AddName: []common.SchemaTableName{ { - SchemaName: rawEvent.SchemaName, - TableName: rawEvent.TableName, + SchemaName: rawEvent.CurrentSchemaName, + TableName: rawEvent.CurrentTableName, }, }, } case model.ActionDropTable: ddlEvent.BlockedTables = &common.InfluencedTables{ InfluenceType: common.InfluenceTypeNormal, - TableIDs: []int64{rawEvent.TableID, heartbeatpb.DDLSpan.TableID}, - SchemaID: rawEvent.SchemaID, + TableIDs: []int64{rawEvent.CurrentTableID, heartbeatpb.DDLSpan.TableID}, + SchemaID: rawEvent.CurrentSchemaID, } ddlEvent.NeedDroppedTables = &common.InfluencedTables{ InfluenceType: common.InfluenceTypeNormal, - TableIDs: []int64{rawEvent.TableID}, - SchemaID: rawEvent.SchemaID, + TableIDs: []int64{rawEvent.CurrentTableID}, + SchemaID: rawEvent.CurrentSchemaID, } ddlEvent.TableNameChange = &common.TableNameChange{ DropName: []common.SchemaTableName{ { - SchemaName: rawEvent.SchemaName, - TableName: rawEvent.TableName, + SchemaName: rawEvent.CurrentSchemaName, + TableName: rawEvent.CurrentTableName, }, }, } @@ -995,44 +1012,44 @@ func buildDDLEvent(rawEvent *PersistedDDLEvent, tableFilter filter.Filter) commo ddlEvent.NeedDroppedTables = &common.InfluencedTables{ InfluenceType: common.InfluenceTypeNormal, TableIDs: []int64{rawEvent.PrevTableID}, - SchemaID: rawEvent.SchemaID, + SchemaID: rawEvent.PrevSchemaID, } ddlEvent.NeedAddedTables = []common.Table{ { - SchemaID: rawEvent.SchemaID, - TableID: rawEvent.TableID, + SchemaID: rawEvent.CurrentSchemaID, + TableID: rawEvent.CurrentTableID, }, } case model.ActionRenameTable: ignorePrevTable := tableFilter != nil && tableFilter.ShouldIgnoreTable(rawEvent.PrevSchemaName, rawEvent.PrevTableName) - ignoreCurrentTable := tableFilter != nil && tableFilter.ShouldIgnoreTable(rawEvent.SchemaName, rawEvent.TableName) + ignoreCurrentTable := tableFilter != nil && tableFilter.ShouldIgnoreTable(rawEvent.CurrentSchemaName, rawEvent.CurrentTableName) var addName, dropName []common.SchemaTableName if !ignorePrevTable { ddlEvent.BlockedTables = &common.InfluencedTables{ InfluenceType: common.InfluenceTypeNormal, - TableIDs: []int64{rawEvent.TableID, heartbeatpb.DDLSpan.TableID}, + TableIDs: []int64{rawEvent.PrevTableID, heartbeatpb.DDLSpan.TableID}, SchemaID: rawEvent.PrevSchemaID, } ddlEvent.NeedDroppedTables = &common.InfluencedTables{ InfluenceType: common.InfluenceTypeNormal, - TableIDs: []int64{rawEvent.TableID}, + TableIDs: []int64{rawEvent.PrevTableID}, SchemaID: rawEvent.PrevSchemaID, } dropName = append(dropName, common.SchemaTableName{ - SchemaName: rawEvent.PrevSchemaName, - TableName: rawEvent.PrevTableName, + SchemaName: rawEvent.CurrentSchemaName, + TableName: rawEvent.CurrentTableName, }) } if !ignoreCurrentTable { ddlEvent.NeedAddedTables = []common.Table{ { - SchemaID: rawEvent.SchemaID, - TableID: rawEvent.TableID, + SchemaID: rawEvent.CurrentSchemaID, + TableID: rawEvent.CurrentTableID, }, } addName = append(addName, common.SchemaTableName{ - SchemaName: rawEvent.SchemaName, - TableName: rawEvent.TableName, + SchemaName: rawEvent.CurrentSchemaName, + TableName: rawEvent.CurrentTableName, }) } ddlEvent.TableNameChange = &common.TableNameChange{ diff --git a/logservice/schemastore/persist_storage_test.go b/logservice/schemastore/persist_storage_test.go index 11ad1dceb..a3fc9d021 100644 --- a/logservice/schemastore/persist_storage_test.go +++ b/logservice/schemastore/persist_storage_test.go @@ -20,12 +20,8 @@ import ( "testing" "github.com/cockroachdb/pebble" - "github.com/flowbehappy/tigate/heartbeatpb" - "github.com/flowbehappy/tigate/pkg/common" - "github.com/flowbehappy/tigate/pkg/filter" "github.com/pingcap/log" "github.com/pingcap/tidb/pkg/parser/model" - "github.com/pingcap/tiflow/pkg/config" "github.com/stretchr/testify/require" "go.uber.org/zap" ) @@ -159,836 +155,873 @@ func TestReadWriteMeta(t *testing.T) { } } -func TestBuildVersionedTableInfoStore(t *testing.T) { - dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) - err := os.RemoveAll(dbPath) - require.Nil(t, err) - - gcTs := uint64(1000) - schemaID := int64(50) - tableID := int64(99) - databaseInfo := make(map[int64]*model.DBInfo) - databaseInfo[schemaID] = &model.DBInfo{ - ID: schemaID, - Name: model.NewCIStr("test"), - Tables: []*model.TableInfo{ - { - ID: tableID, - Name: model.NewCIStr("t1"), - }, - }, - } - pStorage := newPersistentStorageForTest(dbPath, gcTs, databaseInfo) - - require.Equal(t, 1, len(pStorage.databaseMap)) - require.Equal(t, "test", pStorage.databaseMap[schemaID].Name) - - { - store := newEmptyVersionedTableInfoStore(tableID) - pStorage.buildVersionedTableInfoStore(store) - tableInfo, err := store.getTableInfo(gcTs) - require.Nil(t, err) - require.Equal(t, "t1", tableInfo.Name.O) - require.Equal(t, tableID, tableInfo.ID) - } - - // rename table - renameVersion := uint64(1500) - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionRenameTable), - SchemaID: schemaID, - TableID: tableID, - SchemaVersion: 3000, - TableInfo: &model.TableInfo{ - ID: tableID, - Name: model.NewCIStr("t2"), - }, - FinishedTs: renameVersion, - } - err = pStorage.handleSortedDDLEvents(ddlEvent) - require.Nil(t, err) - } - - // create another table - tableID2 := tableID + 1 - createVersion := renameVersion + 200 - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionCreateTable), - SchemaID: schemaID, - TableID: tableID2, - SchemaVersion: 3500, - TableInfo: &model.TableInfo{ - ID: tableID2, - Name: model.NewCIStr("t3"), - }, - FinishedTs: createVersion, - } - err = pStorage.handleSortedDDLEvents(ddlEvent) - require.Nil(t, err) - } - - upperBound := UpperBoundMeta{ - FinishedDDLTs: 3000, - SchemaVersion: 4000, - ResolvedTs: 2000, - } - pStorage = loadPersistentStorageForTest(pStorage.db, gcTs, upperBound) - { - store := newEmptyVersionedTableInfoStore(tableID) - pStorage.buildVersionedTableInfoStore(store) - require.Equal(t, 2, len(store.infos)) - tableInfo, err := store.getTableInfo(gcTs) - require.Nil(t, err) - require.Equal(t, "t1", tableInfo.Name.O) - require.Equal(t, tableID, tableInfo.ID) - tableInfo2, err := store.getTableInfo(renameVersion) - require.Nil(t, err) - require.Equal(t, "t2", tableInfo2.Name.O) - - renameVersion2 := uint64(3000) - store.applyDDL(PersistedDDLEvent{ - Type: byte(model.ActionRenameTable), - SchemaID: schemaID, - TableID: tableID, - SchemaVersion: 3000, - TableInfo: &model.TableInfo{ - ID: tableID, - Name: model.NewCIStr("t3"), - }, - FinishedTs: renameVersion2, - }) - tableInfo3, err := store.getTableInfo(renameVersion2) - require.Nil(t, err) - require.Equal(t, "t3", tableInfo3.Name.O) - } - - { - store := newEmptyVersionedTableInfoStore(tableID2) - pStorage.buildVersionedTableInfoStore(store) - require.Equal(t, 1, len(store.infos)) - tableInfo, err := store.getTableInfo(createVersion) - require.Nil(t, err) - require.Equal(t, "t3", tableInfo.Name.O) - require.Equal(t, tableID2, tableInfo.ID) - } -} - -func TestHandleCreateDropSchemaTableDDL(t *testing.T) { - dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) - err := os.RemoveAll(dbPath) - require.Nil(t, err) - pStorage := newEmptyPersistentStorageForTest(dbPath) - - // create db - schemaID := int64(300) - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionCreateSchema), - SchemaID: schemaID, - SchemaVersion: 100, - DBInfo: &model.DBInfo{ - ID: schemaID, - Name: model.NewCIStr("test"), - }, - TableInfo: nil, - FinishedTs: 200, - } - pStorage.handleSortedDDLEvents(ddlEvent) - - require.Equal(t, 1, len(pStorage.databaseMap)) - require.Equal(t, "test", pStorage.databaseMap[schemaID].Name) - require.Equal(t, 1, len(pStorage.tableTriggerDDLHistory)) - require.Equal(t, uint64(200), pStorage.tableTriggerDDLHistory[0]) - } - - // create a table - tableID := int64(100) - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionCreateTable), - SchemaID: schemaID, - TableID: tableID, - SchemaVersion: 101, - TableInfo: &model.TableInfo{ - Name: model.NewCIStr("t1"), - }, - FinishedTs: 201, - } - pStorage.handleSortedDDLEvents(ddlEvent) - - require.Equal(t, 1, len(pStorage.databaseMap[schemaID].Tables)) - require.Equal(t, 1, len(pStorage.tableMap)) - require.Equal(t, 2, len(pStorage.tableTriggerDDLHistory)) - require.Equal(t, uint64(201), pStorage.tableTriggerDDLHistory[1]) - require.Equal(t, 1, len(pStorage.tablesDDLHistory)) - require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID])) - } - - // create another table - tableID2 := int64(105) - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionCreateTable), - SchemaID: schemaID, - TableID: tableID2, - SchemaVersion: 103, - TableInfo: &model.TableInfo{ - Name: model.NewCIStr("t2"), - }, - FinishedTs: 203, - } - pStorage.handleSortedDDLEvents(ddlEvent) - - require.Equal(t, 2, len(pStorage.databaseMap[schemaID].Tables)) - require.Equal(t, 2, len(pStorage.tableMap)) - require.Equal(t, 3, len(pStorage.tableTriggerDDLHistory)) - require.Equal(t, uint64(203), pStorage.tableTriggerDDLHistory[2]) - require.Equal(t, 2, len(pStorage.tablesDDLHistory)) - require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID2])) - require.Equal(t, uint64(203), pStorage.tablesDDLHistory[tableID2][0]) - } - - // drop a table - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionDropTable), - SchemaID: schemaID, - TableID: tableID2, - SchemaVersion: 105, - TableInfo: nil, - FinishedTs: 205, - } - pStorage.handleSortedDDLEvents(ddlEvent) - - require.Equal(t, 1, len(pStorage.databaseMap[schemaID].Tables)) - require.Equal(t, 1, len(pStorage.tableMap)) - require.Equal(t, 4, len(pStorage.tableTriggerDDLHistory)) - require.Equal(t, uint64(205), pStorage.tableTriggerDDLHistory[3]) - require.Equal(t, 2, len(pStorage.tablesDDLHistory)) - require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID])) - require.Equal(t, 2, len(pStorage.tablesDDLHistory[tableID2])) - require.Equal(t, uint64(205), pStorage.tablesDDLHistory[tableID2][1]) - } - - // truncate a table - tableID3 := int64(112) - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionTruncateTable), - SchemaID: schemaID, - TableID: tableID, - SchemaVersion: 107, - TableInfo: &model.TableInfo{ - ID: tableID3, - }, - FinishedTs: 207, - } - pStorage.handleSortedDDLEvents(ddlEvent) - - require.Equal(t, 1, len(pStorage.databaseMap[schemaID].Tables)) - require.Equal(t, 1, len(pStorage.tableMap)) - require.Equal(t, 4, len(pStorage.tableTriggerDDLHistory)) - require.Equal(t, 3, len(pStorage.tablesDDLHistory)) - require.Equal(t, 2, len(pStorage.tablesDDLHistory[tableID])) - require.Equal(t, uint64(207), pStorage.tablesDDLHistory[tableID][1]) - require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID3])) - require.Equal(t, uint64(207), pStorage.tablesDDLHistory[tableID3][0]) - } - - // drop db - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionDropSchema), - SchemaID: schemaID, - SchemaVersion: 200, - DBInfo: &model.DBInfo{ - ID: schemaID, - Name: model.NewCIStr("test"), - }, - TableInfo: nil, - FinishedTs: 300, - } - - pStorage.handleSortedDDLEvents(ddlEvent) - - require.Equal(t, 0, len(pStorage.databaseMap)) - require.Equal(t, 0, len(pStorage.tableMap)) - require.Equal(t, 5, len(pStorage.tableTriggerDDLHistory)) - require.Equal(t, uint64(300), pStorage.tableTriggerDDLHistory[4]) - require.Equal(t, 3, len(pStorage.tablesDDLHistory)) - require.Equal(t, 2, len(pStorage.tablesDDLHistory[tableID])) - require.Equal(t, 2, len(pStorage.tablesDDLHistory[tableID2])) - require.Equal(t, 2, len(pStorage.tablesDDLHistory[tableID3])) - require.Equal(t, uint64(300), pStorage.tablesDDLHistory[tableID3][1]) - } -} - -func TestHandleRenameTable(t *testing.T) { - dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) - err := os.RemoveAll(dbPath) - require.Nil(t, err) - - gcTs := uint64(500) - schemaID1 := int64(300) - schemaID2 := int64(305) - - databaseInfo := make(map[int64]*model.DBInfo) - databaseInfo[schemaID1] = &model.DBInfo{ - ID: schemaID1, - Name: model.NewCIStr("test"), - } - databaseInfo[schemaID2] = &model.DBInfo{ - ID: schemaID2, - Name: model.NewCIStr("test2"), - } - pStorage := newPersistentStorageForTest(dbPath, gcTs, databaseInfo) - - // create a table - tableID := int64(100) - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionCreateTable), - SchemaID: schemaID1, - TableID: tableID, - SchemaVersion: 501, - TableInfo: &model.TableInfo{ - ID: tableID, - Name: model.NewCIStr("t1"), - }, - FinishedTs: 601, - } - pStorage.handleSortedDDLEvents(ddlEvent) - require.Equal(t, 2, len(pStorage.databaseMap)) - require.Equal(t, 1, len(pStorage.databaseMap[schemaID1].Tables)) - require.Equal(t, 0, len(pStorage.databaseMap[schemaID2].Tables)) - require.Equal(t, schemaID1, pStorage.tableMap[tableID].SchemaID) - require.Equal(t, "t1", pStorage.tableMap[tableID].Name) - } - - // rename table to a different db - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionRenameTable), - SchemaID: schemaID2, - TableID: tableID, - SchemaVersion: 505, - TableInfo: &model.TableInfo{ - ID: tableID, - Name: model.NewCIStr("t2"), - }, - FinishedTs: 605, - } - pStorage.handleSortedDDLEvents(ddlEvent) - require.Equal(t, 2, len(pStorage.databaseMap)) - require.Equal(t, 0, len(pStorage.databaseMap[schemaID1].Tables)) - require.Equal(t, 1, len(pStorage.databaseMap[schemaID2].Tables)) - require.Equal(t, schemaID2, pStorage.tableMap[tableID].SchemaID) - require.Equal(t, "t2", pStorage.tableMap[tableID].Name) - } - - { - ddlEvents, err := pStorage.fetchTableDDLEvents(tableID, nil, 601, 700) - require.Nil(t, err) - require.Equal(t, 1, len(ddlEvents)) - // rename table event - require.Equal(t, uint64(605), ddlEvents[0].FinishedTs) - require.Equal(t, "test2", ddlEvents[0].SchemaName) - require.Equal(t, "t2", ddlEvents[0].TableName) - require.Equal(t, common.InfluenceTypeNormal, ddlEvents[0].BlockedTables.InfluenceType) - require.Equal(t, schemaID1, ddlEvents[0].BlockedTables.SchemaID) - require.Equal(t, tableID, ddlEvents[0].BlockedTables.TableIDs[0]) - // TODO: don't count on the order - require.Equal(t, heartbeatpb.DDLSpan.TableID, ddlEvents[0].BlockedTables.TableIDs[1]) - require.Equal(t, tableID, ddlEvents[0].NeedDroppedTables.TableIDs[0]) - - require.Equal(t, tableID, ddlEvents[0].NeedAddedTables[0].TableID) - - require.Equal(t, "test2", ddlEvents[0].TableNameChange.AddName[0].SchemaName) - require.Equal(t, "t2", ddlEvents[0].TableNameChange.AddName[0].TableName) - require.Equal(t, "test", ddlEvents[0].TableNameChange.DropName[0].SchemaName) - require.Equal(t, "t1", ddlEvents[0].TableNameChange.DropName[0].TableName) - } - - // test filter: after rename, the table is filtered out - { - filterConfig := &config.FilterConfig{ - Rules: []string{"test.*"}, - } - tableFilter, err := filter.NewFilter(filterConfig, "", false) - require.Nil(t, err) - ddlEvents, err := pStorage.fetchTableDDLEvents(tableID, tableFilter, 601, 700) - require.Nil(t, err) - require.Equal(t, 1, len(ddlEvents)) - require.Equal(t, common.InfluenceTypeNormal, ddlEvents[0].BlockedTables.InfluenceType) - require.Equal(t, schemaID1, ddlEvents[0].BlockedTables.SchemaID) - require.Equal(t, tableID, ddlEvents[0].BlockedTables.TableIDs[0]) - // TODO: don't count on the order - require.Equal(t, heartbeatpb.DDLSpan.TableID, ddlEvents[0].BlockedTables.TableIDs[1]) - require.Equal(t, tableID, ddlEvents[0].NeedDroppedTables.TableIDs[0]) - - require.Nil(t, ddlEvents[0].NeedAddedTables) - - require.Equal(t, 0, len(ddlEvents[0].TableNameChange.AddName)) - require.Equal(t, "test", ddlEvents[0].TableNameChange.DropName[0].SchemaName) - require.Equal(t, "t1", ddlEvents[0].TableNameChange.DropName[0].TableName) - } - - // test filter: before rename, the table is filtered out, so only table trigger can get the event - { - filterConfig := &config.FilterConfig{ - Rules: []string{"test2.*"}, - } - tableFilter, err := filter.NewFilter(filterConfig, "", false) - require.Nil(t, err) - triggerDDLEvents, err := pStorage.fetchTableTriggerDDLEvents(tableFilter, 601, 10) - require.Nil(t, err) - require.Equal(t, 1, len(triggerDDLEvents)) - require.Nil(t, triggerDDLEvents[0].BlockedTables) - require.Nil(t, triggerDDLEvents[0].NeedDroppedTables) - - require.Equal(t, tableID, triggerDDLEvents[0].NeedAddedTables[0].TableID) - - require.Equal(t, "test2", triggerDDLEvents[0].TableNameChange.AddName[0].SchemaName) - require.Equal(t, "t2", triggerDDLEvents[0].TableNameChange.AddName[0].TableName) - require.Equal(t, 0, len(triggerDDLEvents[0].TableNameChange.DropName)) - } - - // test filter: the table is always filtered out - { - // check table trigger events cannot get the event - filterConfig := &config.FilterConfig{ - Rules: []string{"test3.*"}, - } - tableFilter, err := filter.NewFilter(filterConfig, "", false) - require.Nil(t, err) - triggerDDLEvents, err := pStorage.fetchTableTriggerDDLEvents(tableFilter, 601, 10) - require.Nil(t, err) - require.Equal(t, 0, len(triggerDDLEvents)) - } -} - -func TestFetchDDLEventsBasic(t *testing.T) { - dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) - err := os.RemoveAll(dbPath) - require.Nil(t, err) - pStorage := newEmptyPersistentStorageForTest(dbPath) - - // create db - schemaID := int64(300) - schemaName := "test" - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionCreateSchema), - SchemaID: schemaID, - SchemaVersion: 100, - DBInfo: &model.DBInfo{ - ID: schemaID, - Name: model.NewCIStr(schemaName), - }, - TableInfo: nil, - FinishedTs: 200, - } - pStorage.handleSortedDDLEvents(ddlEvent) - } - - // create a table - tableID := int64(100) - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionCreateTable), - SchemaID: schemaID, - TableID: tableID, - SchemaVersion: 501, - TableInfo: &model.TableInfo{ - ID: tableID, - Name: model.NewCIStr("t1"), - }, - FinishedTs: 601, - } - pStorage.handleSortedDDLEvents(ddlEvent) - } - - // rename table - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionRenameTable), - SchemaID: schemaID, - TableID: tableID, - SchemaVersion: 505, - TableInfo: &model.TableInfo{ - ID: tableID, - Name: model.NewCIStr("t2"), - }, - FinishedTs: 605, - } - pStorage.handleSortedDDLEvents(ddlEvent) - } - - // truncate table - tableID2 := int64(105) - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionTruncateTable), - SchemaID: schemaID, - TableID: tableID, - SchemaVersion: 507, - TableInfo: &model.TableInfo{ - ID: tableID2, - Name: model.NewCIStr("t2"), - }, - FinishedTs: 607, - } - pStorage.handleSortedDDLEvents(ddlEvent) - } - - // create another table - tableID3 := int64(200) - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionCreateTable), - SchemaID: schemaID, - TableID: tableID3, - SchemaVersion: 509, - TableInfo: &model.TableInfo{ - ID: tableID, - Name: model.NewCIStr("t3"), - }, - FinishedTs: 609, - } - pStorage.handleSortedDDLEvents(ddlEvent) - } - - // drop newly created table - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionDropTable), - SchemaID: schemaID, - TableID: tableID3, - SchemaVersion: 511, - TableInfo: &model.TableInfo{ - ID: tableID, - Name: model.NewCIStr("t3"), - }, - FinishedTs: 611, - } - pStorage.handleSortedDDLEvents(ddlEvent) - } - - // drop db - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionDropSchema), - SchemaID: schemaID, - SchemaVersion: 600, - DBInfo: &model.DBInfo{ - ID: schemaID, - Name: model.NewCIStr(schemaName), - }, - TableInfo: nil, - FinishedTs: 700, - } - pStorage.handleSortedDDLEvents(ddlEvent) - } - - // fetch table ddl events - { - ddlEvents, err := pStorage.fetchTableDDLEvents(tableID, nil, 601, 700) - require.Nil(t, err) - require.Equal(t, 2, len(ddlEvents)) - // rename table event - require.Equal(t, uint64(605), ddlEvents[0].FinishedTs) - // truncate table event - require.Equal(t, uint64(607), ddlEvents[1].FinishedTs) - require.Equal(t, "test", ddlEvents[1].SchemaName) - require.Equal(t, "t2", ddlEvents[1].TableName) - require.Equal(t, common.InfluenceTypeNormal, ddlEvents[1].NeedDroppedTables.InfluenceType) - require.Equal(t, schemaID, ddlEvents[1].NeedDroppedTables.SchemaID) - require.Equal(t, 1, len(ddlEvents[1].NeedDroppedTables.TableIDs)) - require.Equal(t, tableID, ddlEvents[1].NeedDroppedTables.TableIDs[0]) - require.Equal(t, 1, len(ddlEvents[1].NeedAddedTables)) - require.Equal(t, schemaID, ddlEvents[1].NeedAddedTables[0].SchemaID) - require.Equal(t, tableID2, ddlEvents[1].NeedAddedTables[0].TableID) - } - - // fetch table ddl events for another table - { - // TODO: test return error if start ts is smaller than 607 - ddlEvents, err := pStorage.fetchTableDDLEvents(tableID2, nil, 607, 700) - require.Nil(t, err) - require.Equal(t, 1, len(ddlEvents)) - // drop db event - require.Equal(t, uint64(700), ddlEvents[0].FinishedTs) - require.Equal(t, common.InfluenceTypeDB, ddlEvents[0].NeedDroppedTables.InfluenceType) - require.Equal(t, schemaID, ddlEvents[0].NeedDroppedTables.SchemaID) - } - - // fetch table ddl events again - { - ddlEvents, err := pStorage.fetchTableDDLEvents(tableID3, nil, 609, 700) - require.Nil(t, err) - require.Equal(t, 1, len(ddlEvents)) - // drop table event - require.Equal(t, uint64(611), ddlEvents[0].FinishedTs) - require.Equal(t, common.InfluenceTypeNormal, ddlEvents[0].NeedDroppedTables.InfluenceType) - require.Equal(t, 1, len(ddlEvents[0].NeedDroppedTables.TableIDs)) - require.Equal(t, tableID3, ddlEvents[0].NeedDroppedTables.TableIDs[0]) - require.Equal(t, schemaID, ddlEvents[0].NeedDroppedTables.SchemaID) - } - - // fetch all table trigger ddl events - { - tableTriggerDDLEvents, err := pStorage.fetchTableTriggerDDLEvents(nil, 0, 10) - require.Nil(t, err) - require.Equal(t, 6, len(tableTriggerDDLEvents)) - // create db event - require.Equal(t, uint64(200), tableTriggerDDLEvents[0].FinishedTs) - // create table event - require.Equal(t, uint64(601), tableTriggerDDLEvents[1].FinishedTs) - require.Equal(t, 1, len(tableTriggerDDLEvents[1].NeedAddedTables)) - require.Equal(t, schemaID, tableTriggerDDLEvents[1].NeedAddedTables[0].SchemaID) - require.Equal(t, tableID, tableTriggerDDLEvents[1].NeedAddedTables[0].TableID) - require.Equal(t, schemaName, tableTriggerDDLEvents[1].TableNameChange.AddName[0].SchemaName) - require.Equal(t, "t1", tableTriggerDDLEvents[1].TableNameChange.AddName[0].TableName) - // rename table event - require.Equal(t, uint64(605), tableTriggerDDLEvents[2].FinishedTs) - // create table event - require.Equal(t, uint64(609), tableTriggerDDLEvents[3].FinishedTs) - // drop table event - require.Equal(t, uint64(611), tableTriggerDDLEvents[4].FinishedTs) - require.Equal(t, common.InfluenceTypeNormal, tableTriggerDDLEvents[4].NeedDroppedTables.InfluenceType) - require.Equal(t, schemaID, tableTriggerDDLEvents[4].NeedDroppedTables.SchemaID) - require.Equal(t, tableID3, tableTriggerDDLEvents[4].NeedDroppedTables.TableIDs[0]) - require.Equal(t, schemaName, tableTriggerDDLEvents[4].TableNameChange.DropName[0].SchemaName) - require.Equal(t, "t3", tableTriggerDDLEvents[4].TableNameChange.DropName[0].TableName) - require.Equal(t, common.InfluenceTypeNormal, tableTriggerDDLEvents[4].BlockedTables.InfluenceType) - require.Equal(t, 2, len(tableTriggerDDLEvents[4].BlockedTables.TableIDs)) - require.Equal(t, tableID3, tableTriggerDDLEvents[4].BlockedTables.TableIDs[0]) - // TODO: don't count on the order - require.Equal(t, heartbeatpb.DDLSpan.TableID, tableTriggerDDLEvents[4].BlockedTables.TableIDs[1]) - // drop db event - require.Equal(t, uint64(700), tableTriggerDDLEvents[5].FinishedTs) - require.Equal(t, schemaName, tableTriggerDDLEvents[5].TableNameChange.DropDatabaseName) - } - - // fetch partial table trigger ddl events - { - tableTriggerDDLEvents, err := pStorage.fetchTableTriggerDDLEvents(nil, 0, 2) - require.Nil(t, err) - require.Equal(t, 2, len(tableTriggerDDLEvents)) - require.Equal(t, uint64(200), tableTriggerDDLEvents[0].FinishedTs) - require.Equal(t, uint64(601), tableTriggerDDLEvents[1].FinishedTs) - } - - // TODO: test filter -} - -func TestGC(t *testing.T) { - dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) - err := os.RemoveAll(dbPath) - require.Nil(t, err) - - schemaID := int64(300) - gcTs := uint64(600) - tableID1 := int64(100) - tableID2 := int64(200) - - databaseInfo := make(map[int64]*model.DBInfo) - databaseInfo[schemaID] = &model.DBInfo{ - ID: schemaID, - Name: model.NewCIStr("test"), - Tables: []*model.TableInfo{ - { - ID: tableID1, - Name: model.NewCIStr("t1"), - }, - { - ID: tableID2, - Name: model.NewCIStr("t2"), - }, - }, - } - pStorage := newPersistentStorageForTest(dbPath, gcTs, databaseInfo) - - // create table t3 - tableID3 := int64(500) - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionCreateTable), - SchemaID: schemaID, - TableID: tableID3, - SchemaVersion: 501, - TableInfo: &model.TableInfo{ - ID: tableID3, - Name: model.NewCIStr("t3"), - }, - FinishedTs: 601, - } - pStorage.handleSortedDDLEvents(ddlEvent) - } - - // drop table t2 - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionDropTable), - SchemaID: schemaID, - TableID: tableID2, - SchemaVersion: 503, - TableInfo: nil, - FinishedTs: 603, - } - pStorage.handleSortedDDLEvents(ddlEvent) - } - - // rename table t1 - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionRenameTable), - SchemaID: schemaID, - TableID: tableID1, - SchemaVersion: 505, - TableInfo: &model.TableInfo{ - ID: tableID1, - Name: model.NewCIStr("t1_r"), - }, - FinishedTs: 605, - } - pStorage.handleSortedDDLEvents(ddlEvent) - } - - // write upper bound - newUpperBound := UpperBoundMeta{ - FinishedDDLTs: 700, - SchemaVersion: 509, - ResolvedTs: 705, - } - { - writeUpperBoundMeta(pStorage.db, newUpperBound) - } - - pStorage.registerTable(tableID1, gcTs+1) - - // mock gc - newGcTs := uint64(603) - { - databaseInfo := make(map[int64]*model.DBInfo) - databaseInfo[schemaID] = &model.DBInfo{ - ID: schemaID, - Name: model.NewCIStr("test"), - Tables: []*model.TableInfo{ - { - ID: tableID1, - Name: model.NewCIStr("t1"), - }, - { - ID: tableID3, - Name: model.NewCIStr("t3"), - }, - }, - } - tablesInKVSnap := mockWriteKVSnapOnDisk(pStorage.db, newGcTs, databaseInfo) - - require.Equal(t, 3, len(pStorage.tableTriggerDDLHistory)) - require.Equal(t, 3, len(pStorage.tablesDDLHistory)) - pStorage.cleanObseleteDataInMemory(newGcTs, tablesInKVSnap) - require.Equal(t, 1, len(pStorage.tableTriggerDDLHistory)) - require.Equal(t, uint64(605), pStorage.tableTriggerDDLHistory[0]) - require.Equal(t, 1, len(pStorage.tablesDDLHistory)) - require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID1])) - tableInfoT1, err := pStorage.getTableInfo(tableID1, newGcTs) - require.Nil(t, err) - require.Equal(t, "t1", tableInfoT1.Name.O) - tableInfoT1, err = pStorage.getTableInfo(tableID1, 606) - require.Nil(t, err) - require.Equal(t, "t1_r", tableInfoT1.Name.O) - } - - pStorage = loadPersistentStorageForTest(pStorage.db, newGcTs, newUpperBound) - { - require.Equal(t, newGcTs, pStorage.gcTs) - require.Equal(t, newUpperBound, pStorage.upperBound) - require.Equal(t, 1, len(pStorage.tableTriggerDDLHistory)) - require.Equal(t, uint64(605), pStorage.tableTriggerDDLHistory[0]) - require.Equal(t, 1, len(pStorage.tablesDDLHistory)) - require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID1])) - } - - // TODO: test obsolete data can be removed -} - -func TestGetAllPhysicalTables(t *testing.T) { - dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) - err := os.RemoveAll(dbPath) - require.Nil(t, err) - - schemaID := int64(300) - gcTs := uint64(600) - tableID1 := int64(100) - tableID2 := int64(200) - - databaseInfo := make(map[int64]*model.DBInfo) - databaseInfo[schemaID] = &model.DBInfo{ - ID: schemaID, - Name: model.NewCIStr("test"), - Tables: []*model.TableInfo{ - { - ID: tableID1, - Name: model.NewCIStr("t1"), - }, - { - ID: tableID2, - Name: model.NewCIStr("t2"), - }, - }, - } - pStorage := newPersistentStorageForTest(dbPath, gcTs, databaseInfo) - - // create table t3 - tableID3 := int64(500) - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionCreateTable), - SchemaID: schemaID, - TableID: tableID3, - SchemaVersion: 501, - TableInfo: &model.TableInfo{ - ID: tableID3, - Name: model.NewCIStr("t3"), - }, - FinishedTs: 601, - } - pStorage.handleSortedDDLEvents(ddlEvent) - } - - // drop table t2 - { - ddlEvent := PersistedDDLEvent{ - Type: byte(model.ActionDropTable), - SchemaID: schemaID, - TableID: tableID2, - SchemaVersion: 503, - TableInfo: nil, - FinishedTs: 603, - } - pStorage.handleSortedDDLEvents(ddlEvent) - } - - { - allPhysicalTables, err := pStorage.getAllPhysicalTables(600, nil) - require.Nil(t, err) - require.Equal(t, 2, len(allPhysicalTables)) - } - - { - allPhysicalTables, err := pStorage.getAllPhysicalTables(601, nil) - require.Nil(t, err) - require.Equal(t, 3, len(allPhysicalTables)) - } - - { - allPhysicalTables, err := pStorage.getAllPhysicalTables(603, nil) - require.Nil(t, err) - require.Equal(t, 2, len(allPhysicalTables)) - } -} +// func TestBuildVersionedTableInfoStore(t *testing.T) { +// dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) +// err := os.RemoveAll(dbPath) +// require.Nil(t, err) + +// gcTs := uint64(1000) +// schemaID := int64(50) +// tableID := int64(99) +// databaseInfo := make(map[int64]*model.DBInfo) +// databaseInfo[schemaID] = &model.DBInfo{ +// ID: schemaID, +// Name: model.NewCIStr("test"), +// Tables: []*model.TableInfo{ +// { +// ID: tableID, +// Name: model.NewCIStr("t1"), +// }, +// }, +// } +// pStorage := newPersistentStorageForTest(dbPath, gcTs, databaseInfo) + +// require.Equal(t, 1, len(pStorage.databaseMap)) +// require.Equal(t, "test", pStorage.databaseMap[schemaID].Name) + +// { +// store := newEmptyVersionedTableInfoStore(tableID) +// pStorage.buildVersionedTableInfoStore(store) +// tableInfo, err := store.getTableInfo(gcTs) +// require.Nil(t, err) +// require.Equal(t, "t1", tableInfo.Name.O) +// require.Equal(t, tableID, tableInfo.ID) +// } + +// // rename table +// renameVersion := uint64(1500) +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionRenameTable), +// SchemaID: schemaID, +// TableID: tableID, +// SchemaVersion: 3000, +// TableInfo: &model.TableInfo{ +// ID: tableID, +// Name: model.NewCIStr("t2"), +// }, +// FinishedTs: renameVersion, +// } +// err = pStorage.handleSortedDDLEvents(ddlEvent) +// require.Nil(t, err) +// } + +// // create another table +// tableID2 := tableID + 1 +// createVersion := renameVersion + 200 +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionCreateTable), +// SchemaID: schemaID, +// TableID: tableID2, +// SchemaVersion: 3500, +// TableInfo: &model.TableInfo{ +// ID: tableID2, +// Name: model.NewCIStr("t3"), +// }, +// FinishedTs: createVersion, +// } +// err = pStorage.handleSortedDDLEvents(ddlEvent) +// require.Nil(t, err) +// } + +// upperBound := UpperBoundMeta{ +// FinishedDDLTs: 3000, +// SchemaVersion: 4000, +// ResolvedTs: 2000, +// } +// pStorage = loadPersistentStorageForTest(pStorage.db, gcTs, upperBound) +// { +// store := newEmptyVersionedTableInfoStore(tableID) +// pStorage.buildVersionedTableInfoStore(store) +// require.Equal(t, 2, len(store.infos)) +// tableInfo, err := store.getTableInfo(gcTs) +// require.Nil(t, err) +// require.Equal(t, "t1", tableInfo.Name.O) +// require.Equal(t, tableID, tableInfo.ID) +// tableInfo2, err := store.getTableInfo(renameVersion) +// require.Nil(t, err) +// require.Equal(t, "t2", tableInfo2.Name.O) + +// renameVersion2 := uint64(3000) +// store.applyDDL(PersistedDDLEvent{ +// Type: byte(model.ActionRenameTable), +// SchemaID: schemaID, +// TableID: tableID, +// SchemaVersion: 3000, +// TableInfo: &model.TableInfo{ +// ID: tableID, +// Name: model.NewCIStr("t3"), +// }, +// FinishedTs: renameVersion2, +// }) +// tableInfo3, err := store.getTableInfo(renameVersion2) +// require.Nil(t, err) +// require.Equal(t, "t3", tableInfo3.Name.O) +// } + +// { +// store := newEmptyVersionedTableInfoStore(tableID2) +// pStorage.buildVersionedTableInfoStore(store) +// require.Equal(t, 1, len(store.infos)) +// tableInfo, err := store.getTableInfo(createVersion) +// require.Nil(t, err) +// require.Equal(t, "t3", tableInfo.Name.O) +// require.Equal(t, tableID2, tableInfo.ID) +// } + +// // truncate table +// tableID3 := tableID2 + 1 +// truncateVersion := createVersion + 200 +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionTruncateTable), +// SchemaID: schemaID, +// TableID: tableID2, +// SchemaVersion: 3600, +// TableInfo: &model.TableInfo{ +// ID: tableID3, +// Name: model.NewCIStr("t4"), +// }, +// FinishedTs: truncateVersion, +// } +// err = pStorage.handleSortedDDLEvents(ddlEvent) +// require.Nil(t, err) +// } + +// { +// store := newEmptyVersionedTableInfoStore(tableID2) +// pStorage.buildVersionedTableInfoStore(store) +// require.Equal(t, 1, len(store.infos)) +// require.Equal(t, truncateVersion, store.deleteVersion) +// } + +// { +// store := newEmptyVersionedTableInfoStore(tableID3) +// pStorage.buildVersionedTableInfoStore(store) +// require.Equal(t, 1, len(store.infos)) +// tableInfo, err := store.getTableInfo(truncateVersion) +// require.Nil(t, err) +// require.Equal(t, "t4", tableInfo.Name.O) +// require.Equal(t, tableID3, tableInfo.ID) +// } + +// } + +// func TestHandleCreateDropSchemaTableDDL(t *testing.T) { +// dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) +// err := os.RemoveAll(dbPath) +// require.Nil(t, err) +// pStorage := newEmptyPersistentStorageForTest(dbPath) + +// // create db +// schemaID := int64(300) +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionCreateSchema), +// SchemaID: schemaID, +// SchemaVersion: 100, +// DBInfo: &model.DBInfo{ +// ID: schemaID, +// Name: model.NewCIStr("test"), +// }, +// TableInfo: nil, +// FinishedTs: 200, +// } +// pStorage.handleSortedDDLEvents(ddlEvent) + +// require.Equal(t, 1, len(pStorage.databaseMap)) +// require.Equal(t, "test", pStorage.databaseMap[schemaID].Name) +// require.Equal(t, 1, len(pStorage.tableTriggerDDLHistory)) +// require.Equal(t, uint64(200), pStorage.tableTriggerDDLHistory[0]) +// } + +// // create a table +// tableID := int64(100) +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionCreateTable), +// SchemaID: schemaID, +// TableID: tableID, +// SchemaVersion: 101, +// TableInfo: &model.TableInfo{ +// Name: model.NewCIStr("t1"), +// }, +// FinishedTs: 201, +// } +// pStorage.handleSortedDDLEvents(ddlEvent) + +// require.Equal(t, 1, len(pStorage.databaseMap[schemaID].Tables)) +// require.Equal(t, 1, len(pStorage.tableMap)) +// require.Equal(t, 2, len(pStorage.tableTriggerDDLHistory)) +// require.Equal(t, uint64(201), pStorage.tableTriggerDDLHistory[1]) +// require.Equal(t, 1, len(pStorage.tablesDDLHistory)) +// require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID])) +// } + +// // create another table +// tableID2 := int64(105) +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionCreateTable), +// SchemaID: schemaID, +// TableID: tableID2, +// SchemaVersion: 103, +// TableInfo: &model.TableInfo{ +// Name: model.NewCIStr("t2"), +// }, +// FinishedTs: 203, +// } +// pStorage.handleSortedDDLEvents(ddlEvent) + +// require.Equal(t, 2, len(pStorage.databaseMap[schemaID].Tables)) +// require.Equal(t, 2, len(pStorage.tableMap)) +// require.Equal(t, 3, len(pStorage.tableTriggerDDLHistory)) +// require.Equal(t, uint64(203), pStorage.tableTriggerDDLHistory[2]) +// require.Equal(t, 2, len(pStorage.tablesDDLHistory)) +// require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID2])) +// require.Equal(t, uint64(203), pStorage.tablesDDLHistory[tableID2][0]) +// } + +// // drop a table +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionDropTable), +// SchemaID: schemaID, +// TableID: tableID2, +// SchemaVersion: 105, +// TableInfo: nil, +// FinishedTs: 205, +// } +// pStorage.handleSortedDDLEvents(ddlEvent) + +// require.Equal(t, 1, len(pStorage.databaseMap[schemaID].Tables)) +// require.Equal(t, 1, len(pStorage.tableMap)) +// require.Equal(t, 4, len(pStorage.tableTriggerDDLHistory)) +// require.Equal(t, uint64(205), pStorage.tableTriggerDDLHistory[3]) +// require.Equal(t, 2, len(pStorage.tablesDDLHistory)) +// require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID])) +// require.Equal(t, 2, len(pStorage.tablesDDLHistory[tableID2])) +// require.Equal(t, uint64(205), pStorage.tablesDDLHistory[tableID2][1]) +// } + +// // truncate a table +// tableID3 := int64(112) +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionTruncateTable), +// SchemaID: schemaID, +// TableID: tableID, +// SchemaVersion: 107, +// TableInfo: &model.TableInfo{ +// ID: tableID3, +// }, +// FinishedTs: 207, +// } +// pStorage.handleSortedDDLEvents(ddlEvent) + +// require.Equal(t, 1, len(pStorage.databaseMap[schemaID].Tables)) +// require.Equal(t, 1, len(pStorage.tableMap)) +// require.Equal(t, 4, len(pStorage.tableTriggerDDLHistory)) +// require.Equal(t, 3, len(pStorage.tablesDDLHistory)) +// require.Equal(t, 2, len(pStorage.tablesDDLHistory[tableID])) +// require.Equal(t, uint64(207), pStorage.tablesDDLHistory[tableID][1]) +// require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID3])) +// require.Equal(t, uint64(207), pStorage.tablesDDLHistory[tableID3][0]) +// } + +// // drop db +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionDropSchema), +// SchemaID: schemaID, +// SchemaVersion: 200, +// DBInfo: &model.DBInfo{ +// ID: schemaID, +// Name: model.NewCIStr("test"), +// }, +// TableInfo: nil, +// FinishedTs: 300, +// } + +// pStorage.handleSortedDDLEvents(ddlEvent) + +// require.Equal(t, 0, len(pStorage.databaseMap)) +// require.Equal(t, 0, len(pStorage.tableMap)) +// require.Equal(t, 5, len(pStorage.tableTriggerDDLHistory)) +// require.Equal(t, uint64(300), pStorage.tableTriggerDDLHistory[4]) +// require.Equal(t, 3, len(pStorage.tablesDDLHistory)) +// require.Equal(t, 2, len(pStorage.tablesDDLHistory[tableID])) +// require.Equal(t, 2, len(pStorage.tablesDDLHistory[tableID2])) +// require.Equal(t, 2, len(pStorage.tablesDDLHistory[tableID3])) +// require.Equal(t, uint64(300), pStorage.tablesDDLHistory[tableID3][1]) +// } +// } + +// func TestHandleRenameTable(t *testing.T) { +// dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) +// err := os.RemoveAll(dbPath) +// require.Nil(t, err) + +// gcTs := uint64(500) +// schemaID1 := int64(300) +// schemaID2 := int64(305) + +// databaseInfo := make(map[int64]*model.DBInfo) +// databaseInfo[schemaID1] = &model.DBInfo{ +// ID: schemaID1, +// Name: model.NewCIStr("test"), +// } +// databaseInfo[schemaID2] = &model.DBInfo{ +// ID: schemaID2, +// Name: model.NewCIStr("test2"), +// } +// pStorage := newPersistentStorageForTest(dbPath, gcTs, databaseInfo) + +// // create a table +// tableID := int64(100) +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionCreateTable), +// SchemaID: schemaID1, +// TableID: tableID, +// SchemaVersion: 501, +// TableInfo: &model.TableInfo{ +// ID: tableID, +// Name: model.NewCIStr("t1"), +// }, +// FinishedTs: 601, +// } +// pStorage.handleSortedDDLEvents(ddlEvent) +// require.Equal(t, 2, len(pStorage.databaseMap)) +// require.Equal(t, 1, len(pStorage.databaseMap[schemaID1].Tables)) +// require.Equal(t, 0, len(pStorage.databaseMap[schemaID2].Tables)) +// require.Equal(t, schemaID1, pStorage.tableMap[tableID].SchemaID) +// require.Equal(t, "t1", pStorage.tableMap[tableID].Name) +// } + +// // rename table to a different db +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionRenameTable), +// SchemaID: schemaID2, +// TableID: tableID, +// SchemaVersion: 505, +// TableInfo: &model.TableInfo{ +// ID: tableID, +// Name: model.NewCIStr("t2"), +// }, +// FinishedTs: 605, +// } +// pStorage.handleSortedDDLEvents(ddlEvent) +// require.Equal(t, 2, len(pStorage.databaseMap)) +// require.Equal(t, 0, len(pStorage.databaseMap[schemaID1].Tables)) +// require.Equal(t, 1, len(pStorage.databaseMap[schemaID2].Tables)) +// require.Equal(t, schemaID2, pStorage.tableMap[tableID].SchemaID) +// require.Equal(t, "t2", pStorage.tableMap[tableID].Name) +// } + +// { +// ddlEvents, err := pStorage.fetchTableDDLEvents(tableID, nil, 601, 700) +// require.Nil(t, err) +// require.Equal(t, 1, len(ddlEvents)) +// // rename table event +// require.Equal(t, uint64(605), ddlEvents[0].FinishedTs) +// require.Equal(t, "test2", ddlEvents[0].SchemaName) +// require.Equal(t, "t2", ddlEvents[0].TableName) +// require.Equal(t, common.InfluenceTypeNormal, ddlEvents[0].BlockedTables.InfluenceType) +// require.Equal(t, schemaID1, ddlEvents[0].BlockedTables.SchemaID) +// require.Equal(t, tableID, ddlEvents[0].BlockedTables.TableIDs[0]) +// // TODO: don't count on the order +// require.Equal(t, heartbeatpb.DDLSpan.TableID, ddlEvents[0].BlockedTables.TableIDs[1]) +// require.Equal(t, tableID, ddlEvents[0].NeedDroppedTables.TableIDs[0]) + +// require.Equal(t, tableID, ddlEvents[0].NeedAddedTables[0].TableID) + +// require.Equal(t, "test2", ddlEvents[0].TableNameChange.AddName[0].SchemaName) +// require.Equal(t, "t2", ddlEvents[0].TableNameChange.AddName[0].TableName) +// require.Equal(t, "test", ddlEvents[0].TableNameChange.DropName[0].SchemaName) +// require.Equal(t, "t1", ddlEvents[0].TableNameChange.DropName[0].TableName) +// } + +// // test filter: after rename, the table is filtered out +// { +// filterConfig := &config.FilterConfig{ +// Rules: []string{"test.*"}, +// } +// tableFilter, err := filter.NewFilter(filterConfig, "", false) +// require.Nil(t, err) +// ddlEvents, err := pStorage.fetchTableDDLEvents(tableID, tableFilter, 601, 700) +// require.Nil(t, err) +// require.Equal(t, 1, len(ddlEvents)) +// require.Equal(t, common.InfluenceTypeNormal, ddlEvents[0].BlockedTables.InfluenceType) +// require.Equal(t, schemaID1, ddlEvents[0].BlockedTables.SchemaID) +// require.Equal(t, tableID, ddlEvents[0].BlockedTables.TableIDs[0]) +// // TODO: don't count on the order +// require.Equal(t, heartbeatpb.DDLSpan.TableID, ddlEvents[0].BlockedTables.TableIDs[1]) +// require.Equal(t, tableID, ddlEvents[0].NeedDroppedTables.TableIDs[0]) + +// require.Nil(t, ddlEvents[0].NeedAddedTables) + +// require.Equal(t, 0, len(ddlEvents[0].TableNameChange.AddName)) +// require.Equal(t, "test", ddlEvents[0].TableNameChange.DropName[0].SchemaName) +// require.Equal(t, "t1", ddlEvents[0].TableNameChange.DropName[0].TableName) +// } + +// // test filter: before rename, the table is filtered out, so only table trigger can get the event +// { +// filterConfig := &config.FilterConfig{ +// Rules: []string{"test2.*"}, +// } +// tableFilter, err := filter.NewFilter(filterConfig, "", false) +// require.Nil(t, err) +// triggerDDLEvents, err := pStorage.fetchTableTriggerDDLEvents(tableFilter, 601, 10) +// require.Nil(t, err) +// require.Equal(t, 1, len(triggerDDLEvents)) +// require.Nil(t, triggerDDLEvents[0].BlockedTables) +// require.Nil(t, triggerDDLEvents[0].NeedDroppedTables) + +// require.Equal(t, tableID, triggerDDLEvents[0].NeedAddedTables[0].TableID) + +// require.Equal(t, "test2", triggerDDLEvents[0].TableNameChange.AddName[0].SchemaName) +// require.Equal(t, "t2", triggerDDLEvents[0].TableNameChange.AddName[0].TableName) +// require.Equal(t, 0, len(triggerDDLEvents[0].TableNameChange.DropName)) +// } + +// // test filter: the table is always filtered out +// { +// // check table trigger events cannot get the event +// filterConfig := &config.FilterConfig{ +// Rules: []string{"test3.*"}, +// } +// tableFilter, err := filter.NewFilter(filterConfig, "", false) +// require.Nil(t, err) +// triggerDDLEvents, err := pStorage.fetchTableTriggerDDLEvents(tableFilter, 601, 10) +// require.Nil(t, err) +// require.Equal(t, 0, len(triggerDDLEvents)) +// } +// } + +// func TestFetchDDLEventsBasic(t *testing.T) { +// dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) +// err := os.RemoveAll(dbPath) +// require.Nil(t, err) +// pStorage := newEmptyPersistentStorageForTest(dbPath) + +// // create db +// schemaID := int64(300) +// schemaName := "test" +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionCreateSchema), +// SchemaID: schemaID, +// SchemaVersion: 100, +// DBInfo: &model.DBInfo{ +// ID: schemaID, +// Name: model.NewCIStr(schemaName), +// }, +// TableInfo: nil, +// FinishedTs: 200, +// } +// pStorage.handleSortedDDLEvents(ddlEvent) +// } + +// // create a table +// tableID := int64(100) +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionCreateTable), +// SchemaID: schemaID, +// TableID: tableID, +// SchemaVersion: 501, +// TableInfo: &model.TableInfo{ +// ID: tableID, +// Name: model.NewCIStr("t1"), +// }, +// FinishedTs: 601, +// } +// pStorage.handleSortedDDLEvents(ddlEvent) +// } + +// // rename table +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionRenameTable), +// SchemaID: schemaID, +// TableID: tableID, +// SchemaVersion: 505, +// TableInfo: &model.TableInfo{ +// ID: tableID, +// Name: model.NewCIStr("t2"), +// }, +// FinishedTs: 605, +// } +// pStorage.handleSortedDDLEvents(ddlEvent) +// } + +// // truncate table +// tableID2 := int64(105) +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionTruncateTable), +// SchemaID: schemaID, +// TableID: tableID, +// SchemaVersion: 507, +// TableInfo: &model.TableInfo{ +// ID: tableID2, +// Name: model.NewCIStr("t2"), +// }, +// FinishedTs: 607, +// } +// pStorage.handleSortedDDLEvents(ddlEvent) +// } + +// // create another table +// tableID3 := int64(200) +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionCreateTable), +// SchemaID: schemaID, +// TableID: tableID3, +// SchemaVersion: 509, +// TableInfo: &model.TableInfo{ +// ID: tableID, +// Name: model.NewCIStr("t3"), +// }, +// FinishedTs: 609, +// } +// pStorage.handleSortedDDLEvents(ddlEvent) +// } + +// // drop newly created table +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionDropTable), +// SchemaID: schemaID, +// TableID: tableID3, +// SchemaVersion: 511, +// TableInfo: &model.TableInfo{ +// ID: tableID, +// Name: model.NewCIStr("t3"), +// }, +// FinishedTs: 611, +// } +// pStorage.handleSortedDDLEvents(ddlEvent) +// } + +// // drop db +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionDropSchema), +// SchemaID: schemaID, +// SchemaVersion: 600, +// DBInfo: &model.DBInfo{ +// ID: schemaID, +// Name: model.NewCIStr(schemaName), +// }, +// TableInfo: nil, +// FinishedTs: 700, +// } +// pStorage.handleSortedDDLEvents(ddlEvent) +// } + +// // fetch table ddl events +// { +// ddlEvents, err := pStorage.fetchTableDDLEvents(tableID, nil, 601, 700) +// require.Nil(t, err) +// require.Equal(t, 2, len(ddlEvents)) +// // rename table event +// require.Equal(t, uint64(605), ddlEvents[0].FinishedTs) +// // truncate table event +// require.Equal(t, uint64(607), ddlEvents[1].FinishedTs) +// require.Equal(t, "test", ddlEvents[1].SchemaName) +// require.Equal(t, "t2", ddlEvents[1].TableName) +// require.Equal(t, common.InfluenceTypeNormal, ddlEvents[1].NeedDroppedTables.InfluenceType) +// require.Equal(t, schemaID, ddlEvents[1].NeedDroppedTables.SchemaID) +// require.Equal(t, 1, len(ddlEvents[1].NeedDroppedTables.TableIDs)) +// require.Equal(t, tableID, ddlEvents[1].NeedDroppedTables.TableIDs[0]) +// require.Equal(t, 1, len(ddlEvents[1].NeedAddedTables)) +// require.Equal(t, schemaID, ddlEvents[1].NeedAddedTables[0].SchemaID) +// require.Equal(t, tableID2, ddlEvents[1].NeedAddedTables[0].TableID) +// } + +// // fetch table ddl events for another table +// { +// // TODO: test return error if start ts is smaller than 607 +// ddlEvents, err := pStorage.fetchTableDDLEvents(tableID2, nil, 607, 700) +// require.Nil(t, err) +// require.Equal(t, 1, len(ddlEvents)) +// // drop db event +// require.Equal(t, uint64(700), ddlEvents[0].FinishedTs) +// require.Equal(t, common.InfluenceTypeDB, ddlEvents[0].NeedDroppedTables.InfluenceType) +// require.Equal(t, schemaID, ddlEvents[0].NeedDroppedTables.SchemaID) +// } + +// // fetch table ddl events again +// { +// ddlEvents, err := pStorage.fetchTableDDLEvents(tableID3, nil, 609, 700) +// require.Nil(t, err) +// require.Equal(t, 1, len(ddlEvents)) +// // drop table event +// require.Equal(t, uint64(611), ddlEvents[0].FinishedTs) +// require.Equal(t, common.InfluenceTypeNormal, ddlEvents[0].NeedDroppedTables.InfluenceType) +// require.Equal(t, 1, len(ddlEvents[0].NeedDroppedTables.TableIDs)) +// require.Equal(t, tableID3, ddlEvents[0].NeedDroppedTables.TableIDs[0]) +// require.Equal(t, schemaID, ddlEvents[0].NeedDroppedTables.SchemaID) +// } + +// // fetch all table trigger ddl events +// { +// tableTriggerDDLEvents, err := pStorage.fetchTableTriggerDDLEvents(nil, 0, 10) +// require.Nil(t, err) +// require.Equal(t, 6, len(tableTriggerDDLEvents)) +// // create db event +// require.Equal(t, uint64(200), tableTriggerDDLEvents[0].FinishedTs) +// // create table event +// require.Equal(t, uint64(601), tableTriggerDDLEvents[1].FinishedTs) +// require.Equal(t, 1, len(tableTriggerDDLEvents[1].NeedAddedTables)) +// require.Equal(t, schemaID, tableTriggerDDLEvents[1].NeedAddedTables[0].SchemaID) +// require.Equal(t, tableID, tableTriggerDDLEvents[1].NeedAddedTables[0].TableID) +// require.Equal(t, schemaName, tableTriggerDDLEvents[1].TableNameChange.AddName[0].SchemaName) +// require.Equal(t, "t1", tableTriggerDDLEvents[1].TableNameChange.AddName[0].TableName) +// // rename table event +// require.Equal(t, uint64(605), tableTriggerDDLEvents[2].FinishedTs) +// // create table event +// require.Equal(t, uint64(609), tableTriggerDDLEvents[3].FinishedTs) +// // drop table event +// require.Equal(t, uint64(611), tableTriggerDDLEvents[4].FinishedTs) +// require.Equal(t, common.InfluenceTypeNormal, tableTriggerDDLEvents[4].NeedDroppedTables.InfluenceType) +// require.Equal(t, schemaID, tableTriggerDDLEvents[4].NeedDroppedTables.SchemaID) +// require.Equal(t, tableID3, tableTriggerDDLEvents[4].NeedDroppedTables.TableIDs[0]) +// require.Equal(t, schemaName, tableTriggerDDLEvents[4].TableNameChange.DropName[0].SchemaName) +// require.Equal(t, "t3", tableTriggerDDLEvents[4].TableNameChange.DropName[0].TableName) +// require.Equal(t, common.InfluenceTypeNormal, tableTriggerDDLEvents[4].BlockedTables.InfluenceType) +// require.Equal(t, 2, len(tableTriggerDDLEvents[4].BlockedTables.TableIDs)) +// require.Equal(t, tableID3, tableTriggerDDLEvents[4].BlockedTables.TableIDs[0]) +// // TODO: don't count on the order +// require.Equal(t, heartbeatpb.DDLSpan.TableID, tableTriggerDDLEvents[4].BlockedTables.TableIDs[1]) +// // drop db event +// require.Equal(t, uint64(700), tableTriggerDDLEvents[5].FinishedTs) +// require.Equal(t, schemaName, tableTriggerDDLEvents[5].TableNameChange.DropDatabaseName) +// } + +// // fetch partial table trigger ddl events +// { +// tableTriggerDDLEvents, err := pStorage.fetchTableTriggerDDLEvents(nil, 0, 2) +// require.Nil(t, err) +// require.Equal(t, 2, len(tableTriggerDDLEvents)) +// require.Equal(t, uint64(200), tableTriggerDDLEvents[0].FinishedTs) +// require.Equal(t, uint64(601), tableTriggerDDLEvents[1].FinishedTs) +// } + +// // TODO: test filter +// } + +// func TestGC(t *testing.T) { +// dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) +// err := os.RemoveAll(dbPath) +// require.Nil(t, err) + +// schemaID := int64(300) +// gcTs := uint64(600) +// tableID1 := int64(100) +// tableID2 := int64(200) + +// databaseInfo := make(map[int64]*model.DBInfo) +// databaseInfo[schemaID] = &model.DBInfo{ +// ID: schemaID, +// Name: model.NewCIStr("test"), +// Tables: []*model.TableInfo{ +// { +// ID: tableID1, +// Name: model.NewCIStr("t1"), +// }, +// { +// ID: tableID2, +// Name: model.NewCIStr("t2"), +// }, +// }, +// } +// pStorage := newPersistentStorageForTest(dbPath, gcTs, databaseInfo) + +// // create table t3 +// tableID3 := int64(500) +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionCreateTable), +// SchemaID: schemaID, +// TableID: tableID3, +// SchemaVersion: 501, +// TableInfo: &model.TableInfo{ +// ID: tableID3, +// Name: model.NewCIStr("t3"), +// }, +// FinishedTs: 601, +// } +// pStorage.handleSortedDDLEvents(ddlEvent) +// } + +// // drop table t2 +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionDropTable), +// SchemaID: schemaID, +// TableID: tableID2, +// SchemaVersion: 503, +// TableInfo: nil, +// FinishedTs: 603, +// } +// pStorage.handleSortedDDLEvents(ddlEvent) +// } + +// // rename table t1 +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionRenameTable), +// SchemaID: schemaID, +// TableID: tableID1, +// SchemaVersion: 505, +// TableInfo: &model.TableInfo{ +// ID: tableID1, +// Name: model.NewCIStr("t1_r"), +// }, +// FinishedTs: 605, +// } +// pStorage.handleSortedDDLEvents(ddlEvent) +// } + +// // write upper bound +// newUpperBound := UpperBoundMeta{ +// FinishedDDLTs: 700, +// SchemaVersion: 509, +// ResolvedTs: 705, +// } +// { +// writeUpperBoundMeta(pStorage.db, newUpperBound) +// } + +// pStorage.registerTable(tableID1, gcTs+1) + +// // mock gc +// newGcTs := uint64(603) +// { +// databaseInfo := make(map[int64]*model.DBInfo) +// databaseInfo[schemaID] = &model.DBInfo{ +// ID: schemaID, +// Name: model.NewCIStr("test"), +// Tables: []*model.TableInfo{ +// { +// ID: tableID1, +// Name: model.NewCIStr("t1"), +// }, +// { +// ID: tableID3, +// Name: model.NewCIStr("t3"), +// }, +// }, +// } +// tablesInKVSnap := mockWriteKVSnapOnDisk(pStorage.db, newGcTs, databaseInfo) + +// require.Equal(t, 3, len(pStorage.tableTriggerDDLHistory)) +// require.Equal(t, 3, len(pStorage.tablesDDLHistory)) +// pStorage.cleanObseleteDataInMemory(newGcTs, tablesInKVSnap) +// require.Equal(t, 1, len(pStorage.tableTriggerDDLHistory)) +// require.Equal(t, uint64(605), pStorage.tableTriggerDDLHistory[0]) +// require.Equal(t, 1, len(pStorage.tablesDDLHistory)) +// require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID1])) +// tableInfoT1, err := pStorage.getTableInfo(tableID1, newGcTs) +// require.Nil(t, err) +// require.Equal(t, "t1", tableInfoT1.Name.O) +// tableInfoT1, err = pStorage.getTableInfo(tableID1, 606) +// require.Nil(t, err) +// require.Equal(t, "t1_r", tableInfoT1.Name.O) +// } + +// pStorage = loadPersistentStorageForTest(pStorage.db, newGcTs, newUpperBound) +// { +// require.Equal(t, newGcTs, pStorage.gcTs) +// require.Equal(t, newUpperBound, pStorage.upperBound) +// require.Equal(t, 1, len(pStorage.tableTriggerDDLHistory)) +// require.Equal(t, uint64(605), pStorage.tableTriggerDDLHistory[0]) +// require.Equal(t, 1, len(pStorage.tablesDDLHistory)) +// require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID1])) +// } + +// // TODO: test obsolete data can be removed +// } + +// func TestGetAllPhysicalTables(t *testing.T) { +// dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) +// err := os.RemoveAll(dbPath) +// require.Nil(t, err) + +// schemaID := int64(300) +// gcTs := uint64(600) +// tableID1 := int64(100) +// tableID2 := int64(200) + +// databaseInfo := make(map[int64]*model.DBInfo) +// databaseInfo[schemaID] = &model.DBInfo{ +// ID: schemaID, +// Name: model.NewCIStr("test"), +// Tables: []*model.TableInfo{ +// { +// ID: tableID1, +// Name: model.NewCIStr("t1"), +// }, +// { +// ID: tableID2, +// Name: model.NewCIStr("t2"), +// }, +// }, +// } +// pStorage := newPersistentStorageForTest(dbPath, gcTs, databaseInfo) + +// // create table t3 +// tableID3 := int64(500) +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionCreateTable), +// SchemaID: schemaID, +// TableID: tableID3, +// SchemaVersion: 501, +// TableInfo: &model.TableInfo{ +// ID: tableID3, +// Name: model.NewCIStr("t3"), +// }, +// FinishedTs: 601, +// } +// pStorage.handleSortedDDLEvents(ddlEvent) +// } + +// // drop table t2 +// { +// ddlEvent := PersistedDDLEvent{ +// Type: byte(model.ActionDropTable), +// SchemaID: schemaID, +// TableID: tableID2, +// SchemaVersion: 503, +// TableInfo: nil, +// FinishedTs: 603, +// } +// pStorage.handleSortedDDLEvents(ddlEvent) +// } + +// { +// allPhysicalTables, err := pStorage.getAllPhysicalTables(600, nil) +// require.Nil(t, err) +// require.Equal(t, 2, len(allPhysicalTables)) +// } + +// { +// allPhysicalTables, err := pStorage.getAllPhysicalTables(601, nil) +// require.Nil(t, err) +// require.Equal(t, 3, len(allPhysicalTables)) +// } + +// { +// allPhysicalTables, err := pStorage.getAllPhysicalTables(603, nil) +// require.Nil(t, err) +// require.Equal(t, 2, len(allPhysicalTables)) +// } +// } diff --git a/logservice/schemastore/schema_store.go b/logservice/schemastore/schema_store.go index f7f403133..de771d085 100644 --- a/logservice/schemastore/schema_store.go +++ b/logservice/schemastore/schema_store.go @@ -28,6 +28,7 @@ type SchemaStore interface { UnregisterTable(tableID int64) error + // return table info with largest version <= ts GetTableInfo(tableID int64, ts uint64) (*common.TableInfo, error) // FetchTableDDLEvents returns the next ddl events which finishedTs are within the range (start, end] @@ -136,10 +137,7 @@ func (s *schemaStore) updateResolvedTsPeriodically(ctx context.Context) error { zap.Uint64("resolvedTs", pendingTs), zap.Int("resolvedEventsLen", len(resolvedEvents))) - validEvents := make([]PersistedDDLEvent, 0, len(resolvedEvents)) - for _, event := range resolvedEvents { - // TODO: build persisted ddl event after filter if event.Job.BinlogInfo.SchemaVersion <= s.schemaVersion || event.Job.BinlogInfo.FinishedTS <= s.finishedDDLTs { log.Info("skip already applied ddl job", zap.String("job", event.Job.Query), @@ -149,12 +147,12 @@ func (s *schemaStore) updateResolvedTsPeriodically(ctx context.Context) error { zap.Uint64("finishedDDLTS", s.finishedDDLTs)) continue } - validEvents = append(validEvents, buildPersistedDDLEventFromJob(event.Job)) // need to update the following two members for every event to filter out later duplicate events s.schemaVersion = event.Job.BinlogInfo.SchemaVersion s.finishedDDLTs = event.Job.BinlogInfo.FinishedTS + + s.dataStorage.handleDDLJob(event.Job) } - s.dataStorage.handleSortedDDLEvents(validEvents...) } // TODO: resolved ts are updated after ddl events written to disk, do we need to optimize it? s.resolvedTs.Store(pendingTs) diff --git a/logservice/schemastore/types.go b/logservice/schemastore/types.go index 585e1e73e..2056128a9 100644 --- a/logservice/schemastore/types.go +++ b/logservice/schemastore/types.go @@ -11,17 +11,18 @@ type PersistedDDLEvent struct { ID int64 `msg:"id"` Type byte `msg:"type"` - // TODO: add more detailed comments about following fields // SchemaID means different for different job types: // - ExchangeTablePartition: db id of non-partitioned table - SchemaID int64 `msg:"schema_id"` // TableID means different for different job types: // - ExchangeTablePartition: non-partitioned table id // For truncate table, it it the table id of the newly created table - TableID int64 `msg:"table_id"` - SchemaName string `msg:"schema_name"` - TableName string `msg:"table_name"` + CurrentSchemaID int64 `msg:"current_schema_id"` + CurrentTableID int64 `msg:"current_table_id"` + CurrentSchemaName string `msg:"current_schema_name"` + CurrentTableName string `msg:"current_table_name"` + + // The following fields are only set when the ddl job involves a prev table and the corresponding fields change PrevSchemaID int64 `msg:"prev_schema_id"` PrevTableID int64 `msg:"prev_table_id"` PrevSchemaName string `msg:"prev_schema_name"` @@ -39,22 +40,6 @@ type PersistedDDLEvent struct { CDCWriteSource uint64 `msg:"cdc_write_source"` } -func buildPersistedDDLEventFromJob(job *model.Job) PersistedDDLEvent { - return PersistedDDLEvent{ - ID: job.ID, - Type: byte(job.Type), - SchemaID: job.SchemaID, - TableID: job.TableID, - Query: job.Query, - SchemaVersion: job.BinlogInfo.SchemaVersion, - DBInfo: job.BinlogInfo.DBInfo, - TableInfo: job.BinlogInfo.TableInfo, - FinishedTs: job.BinlogInfo.FinishedTS, - BDRRole: job.BDRRole, - CDCWriteSource: job.CDCWriteSource, - } -} - // TODO: use msgp.Raw to do version management type PersistedTableInfoEntry struct { SchemaID int64 `msg:"schema_id"` diff --git a/logservice/schemastore/types_gen.go b/logservice/schemastore/types_gen.go index 3da345afd..daef63883 100644 --- a/logservice/schemastore/types_gen.go +++ b/logservice/schemastore/types_gen.go @@ -36,28 +36,28 @@ func (z *PersistedDDLEvent) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "Type") return } - case "schema_id": - z.SchemaID, err = dc.ReadInt64() + case "current_schema_id": + z.CurrentSchemaID, err = dc.ReadInt64() if err != nil { - err = msgp.WrapError(err, "SchemaID") + err = msgp.WrapError(err, "CurrentSchemaID") return } - case "table_id": - z.TableID, err = dc.ReadInt64() + case "current_table_id": + z.CurrentTableID, err = dc.ReadInt64() if err != nil { - err = msgp.WrapError(err, "TableID") + err = msgp.WrapError(err, "CurrentTableID") return } - case "schema_name": - z.SchemaName, err = dc.ReadString() + case "current_schema_name": + z.CurrentSchemaName, err = dc.ReadString() if err != nil { - err = msgp.WrapError(err, "SchemaName") + err = msgp.WrapError(err, "CurrentSchemaName") return } - case "table_name": - z.TableName, err = dc.ReadString() + case "current_table_name": + z.CurrentTableName, err = dc.ReadString() if err != nil { - err = msgp.WrapError(err, "TableName") + err = msgp.WrapError(err, "CurrentTableName") return } case "prev_schema_id": @@ -154,44 +154,44 @@ func (z *PersistedDDLEvent) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "Type") return } - // write "schema_id" - err = en.Append(0xa9, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x69, 0x64) + // write "current_schema_id" + err = en.Append(0xb1, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x69, 0x64) if err != nil { return } - err = en.WriteInt64(z.SchemaID) + err = en.WriteInt64(z.CurrentSchemaID) if err != nil { - err = msgp.WrapError(err, "SchemaID") + err = msgp.WrapError(err, "CurrentSchemaID") return } - // write "table_id" - err = en.Append(0xa8, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64) + // write "current_table_id" + err = en.Append(0xb0, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64) if err != nil { return } - err = en.WriteInt64(z.TableID) + err = en.WriteInt64(z.CurrentTableID) if err != nil { - err = msgp.WrapError(err, "TableID") + err = msgp.WrapError(err, "CurrentTableID") return } - // write "schema_name" - err = en.Append(0xab, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65) + // write "current_schema_name" + err = en.Append(0xb3, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65) if err != nil { return } - err = en.WriteString(z.SchemaName) + err = en.WriteString(z.CurrentSchemaName) if err != nil { - err = msgp.WrapError(err, "SchemaName") + err = msgp.WrapError(err, "CurrentSchemaName") return } - // write "table_name" - err = en.Append(0xaa, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65) + // write "current_table_name" + err = en.Append(0xb2, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65) if err != nil { return } - err = en.WriteString(z.TableName) + err = en.WriteString(z.CurrentTableName) if err != nil { - err = msgp.WrapError(err, "TableName") + err = msgp.WrapError(err, "CurrentTableName") return } // write "prev_schema_id" @@ -307,18 +307,18 @@ func (z *PersistedDDLEvent) MarshalMsg(b []byte) (o []byte, err error) { // string "type" o = append(o, 0xa4, 0x74, 0x79, 0x70, 0x65) o = msgp.AppendByte(o, z.Type) - // string "schema_id" - o = append(o, 0xa9, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x69, 0x64) - o = msgp.AppendInt64(o, z.SchemaID) - // string "table_id" - o = append(o, 0xa8, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64) - o = msgp.AppendInt64(o, z.TableID) - // string "schema_name" - o = append(o, 0xab, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65) - o = msgp.AppendString(o, z.SchemaName) - // string "table_name" - o = append(o, 0xaa, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65) - o = msgp.AppendString(o, z.TableName) + // string "current_schema_id" + o = append(o, 0xb1, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x69, 0x64) + o = msgp.AppendInt64(o, z.CurrentSchemaID) + // string "current_table_id" + o = append(o, 0xb0, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64) + o = msgp.AppendInt64(o, z.CurrentTableID) + // string "current_schema_name" + o = append(o, 0xb3, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.CurrentSchemaName) + // string "current_table_name" + o = append(o, 0xb2, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.CurrentTableName) // string "prev_schema_id" o = append(o, 0xae, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x69, 0x64) o = msgp.AppendInt64(o, z.PrevSchemaID) @@ -382,28 +382,28 @@ func (z *PersistedDDLEvent) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "Type") return } - case "schema_id": - z.SchemaID, bts, err = msgp.ReadInt64Bytes(bts) + case "current_schema_id": + z.CurrentSchemaID, bts, err = msgp.ReadInt64Bytes(bts) if err != nil { - err = msgp.WrapError(err, "SchemaID") + err = msgp.WrapError(err, "CurrentSchemaID") return } - case "table_id": - z.TableID, bts, err = msgp.ReadInt64Bytes(bts) + case "current_table_id": + z.CurrentTableID, bts, err = msgp.ReadInt64Bytes(bts) if err != nil { - err = msgp.WrapError(err, "TableID") + err = msgp.WrapError(err, "CurrentTableID") return } - case "schema_name": - z.SchemaName, bts, err = msgp.ReadStringBytes(bts) + case "current_schema_name": + z.CurrentSchemaName, bts, err = msgp.ReadStringBytes(bts) if err != nil { - err = msgp.WrapError(err, "SchemaName") + err = msgp.WrapError(err, "CurrentSchemaName") return } - case "table_name": - z.TableName, bts, err = msgp.ReadStringBytes(bts) + case "current_table_name": + z.CurrentTableName, bts, err = msgp.ReadStringBytes(bts) if err != nil { - err = msgp.WrapError(err, "TableName") + err = msgp.WrapError(err, "CurrentTableName") return } case "prev_schema_id": @@ -480,7 +480,7 @@ func (z *PersistedDDLEvent) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *PersistedDDLEvent) Msgsize() (s int) { - s = 3 + 3 + msgp.Int64Size + 5 + msgp.ByteSize + 10 + msgp.Int64Size + 9 + msgp.Int64Size + 12 + msgp.StringPrefixSize + len(z.SchemaName) + 11 + msgp.StringPrefixSize + len(z.TableName) + 15 + msgp.Int64Size + 14 + msgp.Int64Size + 17 + msgp.StringPrefixSize + len(z.PrevSchemaName) + 16 + msgp.StringPrefixSize + len(z.PrevTableName) + 6 + msgp.StringPrefixSize + len(z.Query) + 15 + msgp.Int64Size + 17 + msgp.BytesPrefixSize + len(z.TableInfoValue) + 12 + msgp.Uint64Size + 9 + msgp.StringPrefixSize + len(z.BDRRole) + 17 + msgp.Uint64Size + s = 3 + 3 + msgp.Int64Size + 5 + msgp.ByteSize + 18 + msgp.Int64Size + 17 + msgp.Int64Size + 20 + msgp.StringPrefixSize + len(z.CurrentSchemaName) + 19 + msgp.StringPrefixSize + len(z.CurrentTableName) + 15 + msgp.Int64Size + 14 + msgp.Int64Size + 17 + msgp.StringPrefixSize + len(z.PrevSchemaName) + 16 + msgp.StringPrefixSize + len(z.PrevTableName) + 6 + msgp.StringPrefixSize + len(z.Query) + 15 + msgp.Int64Size + 17 + msgp.BytesPrefixSize + len(z.TableInfoValue) + 12 + msgp.Uint64Size + 9 + msgp.StringPrefixSize + len(z.BDRRole) + 17 + msgp.Uint64Size return } From f8cec1c8ddd441c0fa83f2a72d0c538e5295160b Mon Sep 17 00:00:00 2001 From: lidezhu Date: Fri, 27 Sep 2024 16:02:42 +0800 Subject: [PATCH 3/3] fix gc and and tests --- logservice/schemastore/disk_format.go | 16 +- logservice/schemastore/multi_version.go | 16 +- logservice/schemastore/multi_version_test.go | 18 +- logservice/schemastore/persist_storage.go | 43 +- .../schemastore/persist_storage_test.go | 1825 +++++++++-------- logservice/schemastore/schema_store.go | 4 +- 6 files changed, 1013 insertions(+), 909 deletions(-) diff --git a/logservice/schemastore/disk_format.go b/logservice/schemastore/disk_format.go index 5408351ad..ec7de904f 100644 --- a/logservice/schemastore/disk_format.go +++ b/logservice/schemastore/disk_format.go @@ -398,7 +398,7 @@ func writeSchemaSnapshotAndMeta( db *pebble.DB, tiStore kv.Storage, snapTs uint64, - onlyTableID bool, + needTableInfo bool, ) (map[int64]*BasicDatabaseInfo, map[int64]*BasicTableInfo, error) { meta := logpuller.GetSnapshotMeta(tiStore, snapTs) start := time.Now() @@ -407,8 +407,12 @@ func writeSchemaSnapshotAndMeta( log.Fatal("list databases failed", zap.Error(err)) } - databaseMap := make(map[int64]*BasicDatabaseInfo) - tablesInKVSnap := make(map[int64]*BasicTableInfo) + var databaseMap map[int64]*BasicDatabaseInfo + var tablesInKVSnap map[int64]*BasicTableInfo + if needTableInfo { + databaseMap = make(map[int64]*BasicDatabaseInfo) + tablesInKVSnap = make(map[int64]*BasicTableInfo) + } for _, dbInfo := range dbInfos { if filter.IsSysSchema(dbInfo.Name.O) { continue @@ -422,7 +426,7 @@ func writeSchemaSnapshotAndMeta( log.Fatal("get tables failed", zap.Error(err)) } var tables map[int64]bool - if !onlyTableID { + if needTableInfo { tables = make(map[int64]bool) } for _, rawTable := range rawTables { @@ -430,7 +434,7 @@ func writeSchemaSnapshotAndMeta( continue } tableID, tableName := writeTableInfoToBatch(batch, snapTs, dbInfo, rawTable.Value) - if !onlyTableID { + if needTableInfo { tablesInKVSnap[tableID] = &BasicTableInfo{ SchemaID: dbInfo.ID, Name: tableName, @@ -447,7 +451,7 @@ func writeSchemaSnapshotAndMeta( batch = db.NewBatch() } } - if !onlyTableID { + if needTableInfo { databaseInfo := &BasicDatabaseInfo{ Name: dbInfo.Name.O, Tables: tables, diff --git a/logservice/schemastore/multi_version.go b/logservice/schemastore/multi_version.go index e25bf0664..10d9ffb72 100644 --- a/logservice/schemastore/multi_version.go +++ b/logservice/schemastore/multi_version.go @@ -126,25 +126,33 @@ func (v *versionedTableInfoStore) getTableInfo(ts uint64) (*common.TableInfo, er return v.infos[target-1].info, nil } -// only keep one item with the largest version <= gcTS -func (v *versionedTableInfoStore) gc(gcTs uint64) { +// only keep one item with the largest version <= gcTS, return whether the store should be totally removed +func (v *versionedTableInfoStore) gc(gcTs uint64) bool { v.mu.Lock() defer v.mu.Unlock() if !v.initialized { - return + return false } if len(v.infos) == 0 { log.Fatal("no table info found", zap.Int64("tableID", v.tableID)) } + if gcTs >= v.deleteVersion { + return true + } + target := sort.Search(len(v.infos), func(i int) bool { return v.infos[i].version > gcTs }) if target == 0 { - return + return false } v.infos = v.infos[target-1:] + if len(v.infos) == 0 { + log.Panic("should not happen") + } + return false } func assertEmpty(infos []*tableInfoItem, event PersistedDDLEvent) { diff --git a/logservice/schemastore/multi_version_test.go b/logservice/schemastore/multi_version_test.go index eddf20416..d93a58ed8 100644 --- a/logservice/schemastore/multi_version_test.go +++ b/logservice/schemastore/multi_version_test.go @@ -16,6 +16,7 @@ package schemastore import ( "testing" + "github.com/flowbehappy/tigate/pkg/common" "github.com/pingcap/tidb/pkg/parser/model" "github.com/stretchr/testify/require" ) @@ -154,6 +155,21 @@ func TestRenameTable(t *testing.T) { } } -func TestGC(t *testing.T) { +func TestGCMultiVersionTableInfo(t *testing.T) { + tableID := int64(100) + store := newEmptyVersionedTableInfoStore(tableID) + store.setTableInfoInitialized() + + store.infos = append(store.infos, &tableInfoItem{version: 100, info: &common.TableInfo{}}) + store.infos = append(store.infos, &tableInfoItem{version: 200, info: &common.TableInfo{}}) + store.infos = append(store.infos, &tableInfoItem{version: 300, info: &common.TableInfo{}}) + store.deleteVersion = 1000 + require.False(t, store.gc(200)) + require.Equal(t, 2, len(store.infos)) + require.False(t, store.gc(300)) + require.Equal(t, 1, len(store.infos)) + require.False(t, store.gc(500)) + require.Equal(t, 1, len(store.infos)) + require.True(t, store.gc(1000)) } diff --git a/logservice/schemastore/persist_storage.go b/logservice/schemastore/persist_storage.go index a8a59e8ac..3c6dea78d 100644 --- a/logservice/schemastore/persist_storage.go +++ b/logservice/schemastore/persist_storage.go @@ -172,7 +172,7 @@ func (p *persistentStorage) initializeFromKVStorage(dbPath string, storage kv.St log.Info("schema store initialize from kv storage begin", zap.Uint64("snapTs", gcTs)) - if p.databaseMap, p.tableMap, err = writeSchemaSnapshotAndMeta(p.db, storage, gcTs, false); err != nil { + if p.databaseMap, p.tableMap, err = writeSchemaSnapshotAndMeta(p.db, storage, gcTs, true); err != nil { // TODO: retry log.Fatal("fail to initialize from kv snapshot") } @@ -472,7 +472,7 @@ func (p *persistentStorage) doGc(gcTs uint64) error { p.mu.Unlock() start := time.Now() - _, tablesInKVSnap, err := writeSchemaSnapshotAndMeta(p.db, p.kvStorage, gcTs, true) + _, _, err := writeSchemaSnapshotAndMeta(p.db, p.kvStorage, gcTs, false) if err != nil { log.Warn("fail to write kv snapshot during gc", zap.Uint64("gcTs", gcTs)) @@ -484,7 +484,7 @@ func (p *persistentStorage) doGc(gcTs uint64) error { zap.Any("duration", time.Since(start).Seconds())) // clean data in memeory before clean data on disk - p.cleanObseleteDataInMemory(gcTs, tablesInKVSnap) + p.cleanObseleteDataInMemory(gcTs) log.Info("persist storage: gc finish clean in memory data", zap.Uint64("gcTs", gcTs), zap.Any("duration", time.Since(start).Seconds())) @@ -497,27 +497,26 @@ func (p *persistentStorage) doGc(gcTs uint64) error { return nil } -func (p *persistentStorage) cleanObseleteDataInMemory(gcTs uint64, tablesInKVSnap map[int64]*BasicTableInfo) { +func (p *persistentStorage) cleanObseleteDataInMemory(gcTs uint64) { p.mu.Lock() defer p.mu.Unlock() p.gcTs = gcTs // clean tablesDDLHistory + tablesToRemove := make(map[int64]interface{}) for tableID := range p.tablesDDLHistory { - if _, ok := tablesInKVSnap[tableID]; !ok { - delete(p.tablesDDLHistory, tableID) - continue - } - i := sort.Search(len(p.tablesDDLHistory[tableID]), func(i int) bool { return p.tablesDDLHistory[tableID][i] > gcTs }) if i == len(p.tablesDDLHistory[tableID]) { - delete(p.tablesDDLHistory, tableID) + tablesToRemove[tableID] = nil continue } p.tablesDDLHistory[tableID] = p.tablesDDLHistory[tableID][i:] } + for tableID := range tablesToRemove { + delete(p.tablesDDLHistory, tableID) + } // clean tableTriggerDDLHistory i := sort.Search(len(p.tableTriggerDDLHistory), func(i int) bool { @@ -526,12 +525,16 @@ func (p *persistentStorage) cleanObseleteDataInMemory(gcTs uint64, tablesInKVSna p.tableTriggerDDLHistory = p.tableTriggerDDLHistory[i:] // clean tableInfoStoreMap + // Note: tableInfoStoreMap need to keep one version before gcTs, + // so it has different gc logic with tablesDDLHistory + tablesToRemove = make(map[int64]interface{}) for tableID, store := range p.tableInfoStoreMap { - if _, ok := tablesInKVSnap[tableID]; !ok { - delete(p.tableInfoStoreMap, tableID) - continue + if needRemove := store.gc(gcTs); needRemove { + tablesToRemove[tableID] = nil } - store.gc(gcTs) + } + for tableID := range tablesToRemove { + delete(p.tableInfoStoreMap, tableID) } } @@ -659,7 +662,7 @@ func buildPersistedDDLEventFromJob( switch model.ActionType(event.Type) { case model.ActionCreateSchema, model.ActionDropSchema: - log.Info("completePersistedDDLEvent for create/drop schema", + log.Info("buildPersistedDDLEvent for create/drop schema", zap.Any("type", event.Type), zap.Int64("schemaID", event.CurrentSchemaID), zap.String("schemaName", event.DBInfo.Name.O)) @@ -1012,7 +1015,7 @@ func buildDDLEvent(rawEvent *PersistedDDLEvent, tableFilter filter.Filter) commo ddlEvent.NeedDroppedTables = &common.InfluencedTables{ InfluenceType: common.InfluenceTypeNormal, TableIDs: []int64{rawEvent.PrevTableID}, - SchemaID: rawEvent.PrevSchemaID, + SchemaID: rawEvent.CurrentSchemaID, } ddlEvent.NeedAddedTables = []common.Table{ { @@ -1027,17 +1030,17 @@ func buildDDLEvent(rawEvent *PersistedDDLEvent, tableFilter filter.Filter) commo if !ignorePrevTable { ddlEvent.BlockedTables = &common.InfluencedTables{ InfluenceType: common.InfluenceTypeNormal, - TableIDs: []int64{rawEvent.PrevTableID, heartbeatpb.DDLSpan.TableID}, + TableIDs: []int64{rawEvent.CurrentTableID, heartbeatpb.DDLSpan.TableID}, SchemaID: rawEvent.PrevSchemaID, } ddlEvent.NeedDroppedTables = &common.InfluencedTables{ InfluenceType: common.InfluenceTypeNormal, - TableIDs: []int64{rawEvent.PrevTableID}, + TableIDs: []int64{rawEvent.CurrentTableID}, SchemaID: rawEvent.PrevSchemaID, } dropName = append(dropName, common.SchemaTableName{ - SchemaName: rawEvent.CurrentSchemaName, - TableName: rawEvent.CurrentTableName, + SchemaName: rawEvent.PrevSchemaName, + TableName: rawEvent.PrevTableName, }) } if !ignoreCurrentTable { diff --git a/logservice/schemastore/persist_storage_test.go b/logservice/schemastore/persist_storage_test.go index a3fc9d021..02a332cd5 100644 --- a/logservice/schemastore/persist_storage_test.go +++ b/logservice/schemastore/persist_storage_test.go @@ -20,8 +20,12 @@ import ( "testing" "github.com/cockroachdb/pebble" + "github.com/flowbehappy/tigate/heartbeatpb" + "github.com/flowbehappy/tigate/pkg/common" + "github.com/flowbehappy/tigate/pkg/filter" "github.com/pingcap/log" "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tiflow/pkg/config" "github.com/stretchr/testify/require" "go.uber.org/zap" ) @@ -75,17 +79,12 @@ func newPersistentStorageForTest(dbPath string, gcTs uint64, initialDBInfos map[ return loadPersistentStorageForTest(db, gcTs, upperBound) } -func mockWriteKVSnapOnDisk(db *pebble.DB, snapTs uint64, dbInfos map[int64]*model.DBInfo) map[int64]*BasicTableInfo { +func mockWriteKVSnapOnDisk(db *pebble.DB, snapTs uint64, dbInfos map[int64]*model.DBInfo) { batch := db.NewBatch() defer batch.Close() - tablesInKVSnap := make(map[int64]*BasicTableInfo) for _, dbInfo := range dbInfos { writeSchemaInfoToBatch(batch, snapTs, dbInfo) for _, tableInfo := range dbInfo.Tables { - tablesInKVSnap[tableInfo.ID] = &BasicTableInfo{ - SchemaID: dbInfo.ID, - Name: tableInfo.Name.O, - } tableInfoValue, err := json.Marshal(tableInfo) if err != nil { log.Panic("marshal table info fail", zap.Error(err)) @@ -97,7 +96,6 @@ func mockWriteKVSnapOnDisk(db *pebble.DB, snapTs uint64, dbInfos map[int64]*mode log.Panic("commit batch fail", zap.Error(err)) } writeGcTs(db, snapTs) - return tablesInKVSnap } func TestReadWriteMeta(t *testing.T) { @@ -155,873 +153,946 @@ func TestReadWriteMeta(t *testing.T) { } } -// func TestBuildVersionedTableInfoStore(t *testing.T) { -// dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) -// err := os.RemoveAll(dbPath) -// require.Nil(t, err) - -// gcTs := uint64(1000) -// schemaID := int64(50) -// tableID := int64(99) -// databaseInfo := make(map[int64]*model.DBInfo) -// databaseInfo[schemaID] = &model.DBInfo{ -// ID: schemaID, -// Name: model.NewCIStr("test"), -// Tables: []*model.TableInfo{ -// { -// ID: tableID, -// Name: model.NewCIStr("t1"), -// }, -// }, -// } -// pStorage := newPersistentStorageForTest(dbPath, gcTs, databaseInfo) - -// require.Equal(t, 1, len(pStorage.databaseMap)) -// require.Equal(t, "test", pStorage.databaseMap[schemaID].Name) - -// { -// store := newEmptyVersionedTableInfoStore(tableID) -// pStorage.buildVersionedTableInfoStore(store) -// tableInfo, err := store.getTableInfo(gcTs) -// require.Nil(t, err) -// require.Equal(t, "t1", tableInfo.Name.O) -// require.Equal(t, tableID, tableInfo.ID) -// } - -// // rename table -// renameVersion := uint64(1500) -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionRenameTable), -// SchemaID: schemaID, -// TableID: tableID, -// SchemaVersion: 3000, -// TableInfo: &model.TableInfo{ -// ID: tableID, -// Name: model.NewCIStr("t2"), -// }, -// FinishedTs: renameVersion, -// } -// err = pStorage.handleSortedDDLEvents(ddlEvent) -// require.Nil(t, err) -// } - -// // create another table -// tableID2 := tableID + 1 -// createVersion := renameVersion + 200 -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionCreateTable), -// SchemaID: schemaID, -// TableID: tableID2, -// SchemaVersion: 3500, -// TableInfo: &model.TableInfo{ -// ID: tableID2, -// Name: model.NewCIStr("t3"), -// }, -// FinishedTs: createVersion, -// } -// err = pStorage.handleSortedDDLEvents(ddlEvent) -// require.Nil(t, err) -// } - -// upperBound := UpperBoundMeta{ -// FinishedDDLTs: 3000, -// SchemaVersion: 4000, -// ResolvedTs: 2000, -// } -// pStorage = loadPersistentStorageForTest(pStorage.db, gcTs, upperBound) -// { -// store := newEmptyVersionedTableInfoStore(tableID) -// pStorage.buildVersionedTableInfoStore(store) -// require.Equal(t, 2, len(store.infos)) -// tableInfo, err := store.getTableInfo(gcTs) -// require.Nil(t, err) -// require.Equal(t, "t1", tableInfo.Name.O) -// require.Equal(t, tableID, tableInfo.ID) -// tableInfo2, err := store.getTableInfo(renameVersion) -// require.Nil(t, err) -// require.Equal(t, "t2", tableInfo2.Name.O) - -// renameVersion2 := uint64(3000) -// store.applyDDL(PersistedDDLEvent{ -// Type: byte(model.ActionRenameTable), -// SchemaID: schemaID, -// TableID: tableID, -// SchemaVersion: 3000, -// TableInfo: &model.TableInfo{ -// ID: tableID, -// Name: model.NewCIStr("t3"), -// }, -// FinishedTs: renameVersion2, -// }) -// tableInfo3, err := store.getTableInfo(renameVersion2) -// require.Nil(t, err) -// require.Equal(t, "t3", tableInfo3.Name.O) -// } - -// { -// store := newEmptyVersionedTableInfoStore(tableID2) -// pStorage.buildVersionedTableInfoStore(store) -// require.Equal(t, 1, len(store.infos)) -// tableInfo, err := store.getTableInfo(createVersion) -// require.Nil(t, err) -// require.Equal(t, "t3", tableInfo.Name.O) -// require.Equal(t, tableID2, tableInfo.ID) -// } - -// // truncate table -// tableID3 := tableID2 + 1 -// truncateVersion := createVersion + 200 -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionTruncateTable), -// SchemaID: schemaID, -// TableID: tableID2, -// SchemaVersion: 3600, -// TableInfo: &model.TableInfo{ -// ID: tableID3, -// Name: model.NewCIStr("t4"), -// }, -// FinishedTs: truncateVersion, -// } -// err = pStorage.handleSortedDDLEvents(ddlEvent) -// require.Nil(t, err) -// } - -// { -// store := newEmptyVersionedTableInfoStore(tableID2) -// pStorage.buildVersionedTableInfoStore(store) -// require.Equal(t, 1, len(store.infos)) -// require.Equal(t, truncateVersion, store.deleteVersion) -// } - -// { -// store := newEmptyVersionedTableInfoStore(tableID3) -// pStorage.buildVersionedTableInfoStore(store) -// require.Equal(t, 1, len(store.infos)) -// tableInfo, err := store.getTableInfo(truncateVersion) -// require.Nil(t, err) -// require.Equal(t, "t4", tableInfo.Name.O) -// require.Equal(t, tableID3, tableInfo.ID) -// } - -// } - -// func TestHandleCreateDropSchemaTableDDL(t *testing.T) { -// dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) -// err := os.RemoveAll(dbPath) -// require.Nil(t, err) -// pStorage := newEmptyPersistentStorageForTest(dbPath) - -// // create db -// schemaID := int64(300) -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionCreateSchema), -// SchemaID: schemaID, -// SchemaVersion: 100, -// DBInfo: &model.DBInfo{ -// ID: schemaID, -// Name: model.NewCIStr("test"), -// }, -// TableInfo: nil, -// FinishedTs: 200, -// } -// pStorage.handleSortedDDLEvents(ddlEvent) - -// require.Equal(t, 1, len(pStorage.databaseMap)) -// require.Equal(t, "test", pStorage.databaseMap[schemaID].Name) -// require.Equal(t, 1, len(pStorage.tableTriggerDDLHistory)) -// require.Equal(t, uint64(200), pStorage.tableTriggerDDLHistory[0]) -// } - -// // create a table -// tableID := int64(100) -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionCreateTable), -// SchemaID: schemaID, -// TableID: tableID, -// SchemaVersion: 101, -// TableInfo: &model.TableInfo{ -// Name: model.NewCIStr("t1"), -// }, -// FinishedTs: 201, -// } -// pStorage.handleSortedDDLEvents(ddlEvent) - -// require.Equal(t, 1, len(pStorage.databaseMap[schemaID].Tables)) -// require.Equal(t, 1, len(pStorage.tableMap)) -// require.Equal(t, 2, len(pStorage.tableTriggerDDLHistory)) -// require.Equal(t, uint64(201), pStorage.tableTriggerDDLHistory[1]) -// require.Equal(t, 1, len(pStorage.tablesDDLHistory)) -// require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID])) -// } - -// // create another table -// tableID2 := int64(105) -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionCreateTable), -// SchemaID: schemaID, -// TableID: tableID2, -// SchemaVersion: 103, -// TableInfo: &model.TableInfo{ -// Name: model.NewCIStr("t2"), -// }, -// FinishedTs: 203, -// } -// pStorage.handleSortedDDLEvents(ddlEvent) - -// require.Equal(t, 2, len(pStorage.databaseMap[schemaID].Tables)) -// require.Equal(t, 2, len(pStorage.tableMap)) -// require.Equal(t, 3, len(pStorage.tableTriggerDDLHistory)) -// require.Equal(t, uint64(203), pStorage.tableTriggerDDLHistory[2]) -// require.Equal(t, 2, len(pStorage.tablesDDLHistory)) -// require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID2])) -// require.Equal(t, uint64(203), pStorage.tablesDDLHistory[tableID2][0]) -// } - -// // drop a table -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionDropTable), -// SchemaID: schemaID, -// TableID: tableID2, -// SchemaVersion: 105, -// TableInfo: nil, -// FinishedTs: 205, -// } -// pStorage.handleSortedDDLEvents(ddlEvent) - -// require.Equal(t, 1, len(pStorage.databaseMap[schemaID].Tables)) -// require.Equal(t, 1, len(pStorage.tableMap)) -// require.Equal(t, 4, len(pStorage.tableTriggerDDLHistory)) -// require.Equal(t, uint64(205), pStorage.tableTriggerDDLHistory[3]) -// require.Equal(t, 2, len(pStorage.tablesDDLHistory)) -// require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID])) -// require.Equal(t, 2, len(pStorage.tablesDDLHistory[tableID2])) -// require.Equal(t, uint64(205), pStorage.tablesDDLHistory[tableID2][1]) -// } - -// // truncate a table -// tableID3 := int64(112) -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionTruncateTable), -// SchemaID: schemaID, -// TableID: tableID, -// SchemaVersion: 107, -// TableInfo: &model.TableInfo{ -// ID: tableID3, -// }, -// FinishedTs: 207, -// } -// pStorage.handleSortedDDLEvents(ddlEvent) - -// require.Equal(t, 1, len(pStorage.databaseMap[schemaID].Tables)) -// require.Equal(t, 1, len(pStorage.tableMap)) -// require.Equal(t, 4, len(pStorage.tableTriggerDDLHistory)) -// require.Equal(t, 3, len(pStorage.tablesDDLHistory)) -// require.Equal(t, 2, len(pStorage.tablesDDLHistory[tableID])) -// require.Equal(t, uint64(207), pStorage.tablesDDLHistory[tableID][1]) -// require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID3])) -// require.Equal(t, uint64(207), pStorage.tablesDDLHistory[tableID3][0]) -// } - -// // drop db -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionDropSchema), -// SchemaID: schemaID, -// SchemaVersion: 200, -// DBInfo: &model.DBInfo{ -// ID: schemaID, -// Name: model.NewCIStr("test"), -// }, -// TableInfo: nil, -// FinishedTs: 300, -// } - -// pStorage.handleSortedDDLEvents(ddlEvent) - -// require.Equal(t, 0, len(pStorage.databaseMap)) -// require.Equal(t, 0, len(pStorage.tableMap)) -// require.Equal(t, 5, len(pStorage.tableTriggerDDLHistory)) -// require.Equal(t, uint64(300), pStorage.tableTriggerDDLHistory[4]) -// require.Equal(t, 3, len(pStorage.tablesDDLHistory)) -// require.Equal(t, 2, len(pStorage.tablesDDLHistory[tableID])) -// require.Equal(t, 2, len(pStorage.tablesDDLHistory[tableID2])) -// require.Equal(t, 2, len(pStorage.tablesDDLHistory[tableID3])) -// require.Equal(t, uint64(300), pStorage.tablesDDLHistory[tableID3][1]) -// } -// } - -// func TestHandleRenameTable(t *testing.T) { -// dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) -// err := os.RemoveAll(dbPath) -// require.Nil(t, err) - -// gcTs := uint64(500) -// schemaID1 := int64(300) -// schemaID2 := int64(305) - -// databaseInfo := make(map[int64]*model.DBInfo) -// databaseInfo[schemaID1] = &model.DBInfo{ -// ID: schemaID1, -// Name: model.NewCIStr("test"), -// } -// databaseInfo[schemaID2] = &model.DBInfo{ -// ID: schemaID2, -// Name: model.NewCIStr("test2"), -// } -// pStorage := newPersistentStorageForTest(dbPath, gcTs, databaseInfo) - -// // create a table -// tableID := int64(100) -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionCreateTable), -// SchemaID: schemaID1, -// TableID: tableID, -// SchemaVersion: 501, -// TableInfo: &model.TableInfo{ -// ID: tableID, -// Name: model.NewCIStr("t1"), -// }, -// FinishedTs: 601, -// } -// pStorage.handleSortedDDLEvents(ddlEvent) -// require.Equal(t, 2, len(pStorage.databaseMap)) -// require.Equal(t, 1, len(pStorage.databaseMap[schemaID1].Tables)) -// require.Equal(t, 0, len(pStorage.databaseMap[schemaID2].Tables)) -// require.Equal(t, schemaID1, pStorage.tableMap[tableID].SchemaID) -// require.Equal(t, "t1", pStorage.tableMap[tableID].Name) -// } - -// // rename table to a different db -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionRenameTable), -// SchemaID: schemaID2, -// TableID: tableID, -// SchemaVersion: 505, -// TableInfo: &model.TableInfo{ -// ID: tableID, -// Name: model.NewCIStr("t2"), -// }, -// FinishedTs: 605, -// } -// pStorage.handleSortedDDLEvents(ddlEvent) -// require.Equal(t, 2, len(pStorage.databaseMap)) -// require.Equal(t, 0, len(pStorage.databaseMap[schemaID1].Tables)) -// require.Equal(t, 1, len(pStorage.databaseMap[schemaID2].Tables)) -// require.Equal(t, schemaID2, pStorage.tableMap[tableID].SchemaID) -// require.Equal(t, "t2", pStorage.tableMap[tableID].Name) -// } - -// { -// ddlEvents, err := pStorage.fetchTableDDLEvents(tableID, nil, 601, 700) -// require.Nil(t, err) -// require.Equal(t, 1, len(ddlEvents)) -// // rename table event -// require.Equal(t, uint64(605), ddlEvents[0].FinishedTs) -// require.Equal(t, "test2", ddlEvents[0].SchemaName) -// require.Equal(t, "t2", ddlEvents[0].TableName) -// require.Equal(t, common.InfluenceTypeNormal, ddlEvents[0].BlockedTables.InfluenceType) -// require.Equal(t, schemaID1, ddlEvents[0].BlockedTables.SchemaID) -// require.Equal(t, tableID, ddlEvents[0].BlockedTables.TableIDs[0]) -// // TODO: don't count on the order -// require.Equal(t, heartbeatpb.DDLSpan.TableID, ddlEvents[0].BlockedTables.TableIDs[1]) -// require.Equal(t, tableID, ddlEvents[0].NeedDroppedTables.TableIDs[0]) - -// require.Equal(t, tableID, ddlEvents[0].NeedAddedTables[0].TableID) - -// require.Equal(t, "test2", ddlEvents[0].TableNameChange.AddName[0].SchemaName) -// require.Equal(t, "t2", ddlEvents[0].TableNameChange.AddName[0].TableName) -// require.Equal(t, "test", ddlEvents[0].TableNameChange.DropName[0].SchemaName) -// require.Equal(t, "t1", ddlEvents[0].TableNameChange.DropName[0].TableName) -// } - -// // test filter: after rename, the table is filtered out -// { -// filterConfig := &config.FilterConfig{ -// Rules: []string{"test.*"}, -// } -// tableFilter, err := filter.NewFilter(filterConfig, "", false) -// require.Nil(t, err) -// ddlEvents, err := pStorage.fetchTableDDLEvents(tableID, tableFilter, 601, 700) -// require.Nil(t, err) -// require.Equal(t, 1, len(ddlEvents)) -// require.Equal(t, common.InfluenceTypeNormal, ddlEvents[0].BlockedTables.InfluenceType) -// require.Equal(t, schemaID1, ddlEvents[0].BlockedTables.SchemaID) -// require.Equal(t, tableID, ddlEvents[0].BlockedTables.TableIDs[0]) -// // TODO: don't count on the order -// require.Equal(t, heartbeatpb.DDLSpan.TableID, ddlEvents[0].BlockedTables.TableIDs[1]) -// require.Equal(t, tableID, ddlEvents[0].NeedDroppedTables.TableIDs[0]) - -// require.Nil(t, ddlEvents[0].NeedAddedTables) - -// require.Equal(t, 0, len(ddlEvents[0].TableNameChange.AddName)) -// require.Equal(t, "test", ddlEvents[0].TableNameChange.DropName[0].SchemaName) -// require.Equal(t, "t1", ddlEvents[0].TableNameChange.DropName[0].TableName) -// } - -// // test filter: before rename, the table is filtered out, so only table trigger can get the event -// { -// filterConfig := &config.FilterConfig{ -// Rules: []string{"test2.*"}, -// } -// tableFilter, err := filter.NewFilter(filterConfig, "", false) -// require.Nil(t, err) -// triggerDDLEvents, err := pStorage.fetchTableTriggerDDLEvents(tableFilter, 601, 10) -// require.Nil(t, err) -// require.Equal(t, 1, len(triggerDDLEvents)) -// require.Nil(t, triggerDDLEvents[0].BlockedTables) -// require.Nil(t, triggerDDLEvents[0].NeedDroppedTables) - -// require.Equal(t, tableID, triggerDDLEvents[0].NeedAddedTables[0].TableID) - -// require.Equal(t, "test2", triggerDDLEvents[0].TableNameChange.AddName[0].SchemaName) -// require.Equal(t, "t2", triggerDDLEvents[0].TableNameChange.AddName[0].TableName) -// require.Equal(t, 0, len(triggerDDLEvents[0].TableNameChange.DropName)) -// } - -// // test filter: the table is always filtered out -// { -// // check table trigger events cannot get the event -// filterConfig := &config.FilterConfig{ -// Rules: []string{"test3.*"}, -// } -// tableFilter, err := filter.NewFilter(filterConfig, "", false) -// require.Nil(t, err) -// triggerDDLEvents, err := pStorage.fetchTableTriggerDDLEvents(tableFilter, 601, 10) -// require.Nil(t, err) -// require.Equal(t, 0, len(triggerDDLEvents)) -// } -// } - -// func TestFetchDDLEventsBasic(t *testing.T) { -// dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) -// err := os.RemoveAll(dbPath) -// require.Nil(t, err) -// pStorage := newEmptyPersistentStorageForTest(dbPath) - -// // create db -// schemaID := int64(300) -// schemaName := "test" -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionCreateSchema), -// SchemaID: schemaID, -// SchemaVersion: 100, -// DBInfo: &model.DBInfo{ -// ID: schemaID, -// Name: model.NewCIStr(schemaName), -// }, -// TableInfo: nil, -// FinishedTs: 200, -// } -// pStorage.handleSortedDDLEvents(ddlEvent) -// } - -// // create a table -// tableID := int64(100) -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionCreateTable), -// SchemaID: schemaID, -// TableID: tableID, -// SchemaVersion: 501, -// TableInfo: &model.TableInfo{ -// ID: tableID, -// Name: model.NewCIStr("t1"), -// }, -// FinishedTs: 601, -// } -// pStorage.handleSortedDDLEvents(ddlEvent) -// } - -// // rename table -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionRenameTable), -// SchemaID: schemaID, -// TableID: tableID, -// SchemaVersion: 505, -// TableInfo: &model.TableInfo{ -// ID: tableID, -// Name: model.NewCIStr("t2"), -// }, -// FinishedTs: 605, -// } -// pStorage.handleSortedDDLEvents(ddlEvent) -// } - -// // truncate table -// tableID2 := int64(105) -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionTruncateTable), -// SchemaID: schemaID, -// TableID: tableID, -// SchemaVersion: 507, -// TableInfo: &model.TableInfo{ -// ID: tableID2, -// Name: model.NewCIStr("t2"), -// }, -// FinishedTs: 607, -// } -// pStorage.handleSortedDDLEvents(ddlEvent) -// } - -// // create another table -// tableID3 := int64(200) -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionCreateTable), -// SchemaID: schemaID, -// TableID: tableID3, -// SchemaVersion: 509, -// TableInfo: &model.TableInfo{ -// ID: tableID, -// Name: model.NewCIStr("t3"), -// }, -// FinishedTs: 609, -// } -// pStorage.handleSortedDDLEvents(ddlEvent) -// } - -// // drop newly created table -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionDropTable), -// SchemaID: schemaID, -// TableID: tableID3, -// SchemaVersion: 511, -// TableInfo: &model.TableInfo{ -// ID: tableID, -// Name: model.NewCIStr("t3"), -// }, -// FinishedTs: 611, -// } -// pStorage.handleSortedDDLEvents(ddlEvent) -// } - -// // drop db -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionDropSchema), -// SchemaID: schemaID, -// SchemaVersion: 600, -// DBInfo: &model.DBInfo{ -// ID: schemaID, -// Name: model.NewCIStr(schemaName), -// }, -// TableInfo: nil, -// FinishedTs: 700, -// } -// pStorage.handleSortedDDLEvents(ddlEvent) -// } - -// // fetch table ddl events -// { -// ddlEvents, err := pStorage.fetchTableDDLEvents(tableID, nil, 601, 700) -// require.Nil(t, err) -// require.Equal(t, 2, len(ddlEvents)) -// // rename table event -// require.Equal(t, uint64(605), ddlEvents[0].FinishedTs) -// // truncate table event -// require.Equal(t, uint64(607), ddlEvents[1].FinishedTs) -// require.Equal(t, "test", ddlEvents[1].SchemaName) -// require.Equal(t, "t2", ddlEvents[1].TableName) -// require.Equal(t, common.InfluenceTypeNormal, ddlEvents[1].NeedDroppedTables.InfluenceType) -// require.Equal(t, schemaID, ddlEvents[1].NeedDroppedTables.SchemaID) -// require.Equal(t, 1, len(ddlEvents[1].NeedDroppedTables.TableIDs)) -// require.Equal(t, tableID, ddlEvents[1].NeedDroppedTables.TableIDs[0]) -// require.Equal(t, 1, len(ddlEvents[1].NeedAddedTables)) -// require.Equal(t, schemaID, ddlEvents[1].NeedAddedTables[0].SchemaID) -// require.Equal(t, tableID2, ddlEvents[1].NeedAddedTables[0].TableID) -// } - -// // fetch table ddl events for another table -// { -// // TODO: test return error if start ts is smaller than 607 -// ddlEvents, err := pStorage.fetchTableDDLEvents(tableID2, nil, 607, 700) -// require.Nil(t, err) -// require.Equal(t, 1, len(ddlEvents)) -// // drop db event -// require.Equal(t, uint64(700), ddlEvents[0].FinishedTs) -// require.Equal(t, common.InfluenceTypeDB, ddlEvents[0].NeedDroppedTables.InfluenceType) -// require.Equal(t, schemaID, ddlEvents[0].NeedDroppedTables.SchemaID) -// } - -// // fetch table ddl events again -// { -// ddlEvents, err := pStorage.fetchTableDDLEvents(tableID3, nil, 609, 700) -// require.Nil(t, err) -// require.Equal(t, 1, len(ddlEvents)) -// // drop table event -// require.Equal(t, uint64(611), ddlEvents[0].FinishedTs) -// require.Equal(t, common.InfluenceTypeNormal, ddlEvents[0].NeedDroppedTables.InfluenceType) -// require.Equal(t, 1, len(ddlEvents[0].NeedDroppedTables.TableIDs)) -// require.Equal(t, tableID3, ddlEvents[0].NeedDroppedTables.TableIDs[0]) -// require.Equal(t, schemaID, ddlEvents[0].NeedDroppedTables.SchemaID) -// } - -// // fetch all table trigger ddl events -// { -// tableTriggerDDLEvents, err := pStorage.fetchTableTriggerDDLEvents(nil, 0, 10) -// require.Nil(t, err) -// require.Equal(t, 6, len(tableTriggerDDLEvents)) -// // create db event -// require.Equal(t, uint64(200), tableTriggerDDLEvents[0].FinishedTs) -// // create table event -// require.Equal(t, uint64(601), tableTriggerDDLEvents[1].FinishedTs) -// require.Equal(t, 1, len(tableTriggerDDLEvents[1].NeedAddedTables)) -// require.Equal(t, schemaID, tableTriggerDDLEvents[1].NeedAddedTables[0].SchemaID) -// require.Equal(t, tableID, tableTriggerDDLEvents[1].NeedAddedTables[0].TableID) -// require.Equal(t, schemaName, tableTriggerDDLEvents[1].TableNameChange.AddName[0].SchemaName) -// require.Equal(t, "t1", tableTriggerDDLEvents[1].TableNameChange.AddName[0].TableName) -// // rename table event -// require.Equal(t, uint64(605), tableTriggerDDLEvents[2].FinishedTs) -// // create table event -// require.Equal(t, uint64(609), tableTriggerDDLEvents[3].FinishedTs) -// // drop table event -// require.Equal(t, uint64(611), tableTriggerDDLEvents[4].FinishedTs) -// require.Equal(t, common.InfluenceTypeNormal, tableTriggerDDLEvents[4].NeedDroppedTables.InfluenceType) -// require.Equal(t, schemaID, tableTriggerDDLEvents[4].NeedDroppedTables.SchemaID) -// require.Equal(t, tableID3, tableTriggerDDLEvents[4].NeedDroppedTables.TableIDs[0]) -// require.Equal(t, schemaName, tableTriggerDDLEvents[4].TableNameChange.DropName[0].SchemaName) -// require.Equal(t, "t3", tableTriggerDDLEvents[4].TableNameChange.DropName[0].TableName) -// require.Equal(t, common.InfluenceTypeNormal, tableTriggerDDLEvents[4].BlockedTables.InfluenceType) -// require.Equal(t, 2, len(tableTriggerDDLEvents[4].BlockedTables.TableIDs)) -// require.Equal(t, tableID3, tableTriggerDDLEvents[4].BlockedTables.TableIDs[0]) -// // TODO: don't count on the order -// require.Equal(t, heartbeatpb.DDLSpan.TableID, tableTriggerDDLEvents[4].BlockedTables.TableIDs[1]) -// // drop db event -// require.Equal(t, uint64(700), tableTriggerDDLEvents[5].FinishedTs) -// require.Equal(t, schemaName, tableTriggerDDLEvents[5].TableNameChange.DropDatabaseName) -// } - -// // fetch partial table trigger ddl events -// { -// tableTriggerDDLEvents, err := pStorage.fetchTableTriggerDDLEvents(nil, 0, 2) -// require.Nil(t, err) -// require.Equal(t, 2, len(tableTriggerDDLEvents)) -// require.Equal(t, uint64(200), tableTriggerDDLEvents[0].FinishedTs) -// require.Equal(t, uint64(601), tableTriggerDDLEvents[1].FinishedTs) -// } - -// // TODO: test filter -// } - -// func TestGC(t *testing.T) { -// dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) -// err := os.RemoveAll(dbPath) -// require.Nil(t, err) - -// schemaID := int64(300) -// gcTs := uint64(600) -// tableID1 := int64(100) -// tableID2 := int64(200) - -// databaseInfo := make(map[int64]*model.DBInfo) -// databaseInfo[schemaID] = &model.DBInfo{ -// ID: schemaID, -// Name: model.NewCIStr("test"), -// Tables: []*model.TableInfo{ -// { -// ID: tableID1, -// Name: model.NewCIStr("t1"), -// }, -// { -// ID: tableID2, -// Name: model.NewCIStr("t2"), -// }, -// }, -// } -// pStorage := newPersistentStorageForTest(dbPath, gcTs, databaseInfo) - -// // create table t3 -// tableID3 := int64(500) -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionCreateTable), -// SchemaID: schemaID, -// TableID: tableID3, -// SchemaVersion: 501, -// TableInfo: &model.TableInfo{ -// ID: tableID3, -// Name: model.NewCIStr("t3"), -// }, -// FinishedTs: 601, -// } -// pStorage.handleSortedDDLEvents(ddlEvent) -// } - -// // drop table t2 -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionDropTable), -// SchemaID: schemaID, -// TableID: tableID2, -// SchemaVersion: 503, -// TableInfo: nil, -// FinishedTs: 603, -// } -// pStorage.handleSortedDDLEvents(ddlEvent) -// } - -// // rename table t1 -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionRenameTable), -// SchemaID: schemaID, -// TableID: tableID1, -// SchemaVersion: 505, -// TableInfo: &model.TableInfo{ -// ID: tableID1, -// Name: model.NewCIStr("t1_r"), -// }, -// FinishedTs: 605, -// } -// pStorage.handleSortedDDLEvents(ddlEvent) -// } - -// // write upper bound -// newUpperBound := UpperBoundMeta{ -// FinishedDDLTs: 700, -// SchemaVersion: 509, -// ResolvedTs: 705, -// } -// { -// writeUpperBoundMeta(pStorage.db, newUpperBound) -// } - -// pStorage.registerTable(tableID1, gcTs+1) - -// // mock gc -// newGcTs := uint64(603) -// { -// databaseInfo := make(map[int64]*model.DBInfo) -// databaseInfo[schemaID] = &model.DBInfo{ -// ID: schemaID, -// Name: model.NewCIStr("test"), -// Tables: []*model.TableInfo{ -// { -// ID: tableID1, -// Name: model.NewCIStr("t1"), -// }, -// { -// ID: tableID3, -// Name: model.NewCIStr("t3"), -// }, -// }, -// } -// tablesInKVSnap := mockWriteKVSnapOnDisk(pStorage.db, newGcTs, databaseInfo) - -// require.Equal(t, 3, len(pStorage.tableTriggerDDLHistory)) -// require.Equal(t, 3, len(pStorage.tablesDDLHistory)) -// pStorage.cleanObseleteDataInMemory(newGcTs, tablesInKVSnap) -// require.Equal(t, 1, len(pStorage.tableTriggerDDLHistory)) -// require.Equal(t, uint64(605), pStorage.tableTriggerDDLHistory[0]) -// require.Equal(t, 1, len(pStorage.tablesDDLHistory)) -// require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID1])) -// tableInfoT1, err := pStorage.getTableInfo(tableID1, newGcTs) -// require.Nil(t, err) -// require.Equal(t, "t1", tableInfoT1.Name.O) -// tableInfoT1, err = pStorage.getTableInfo(tableID1, 606) -// require.Nil(t, err) -// require.Equal(t, "t1_r", tableInfoT1.Name.O) -// } - -// pStorage = loadPersistentStorageForTest(pStorage.db, newGcTs, newUpperBound) -// { -// require.Equal(t, newGcTs, pStorage.gcTs) -// require.Equal(t, newUpperBound, pStorage.upperBound) -// require.Equal(t, 1, len(pStorage.tableTriggerDDLHistory)) -// require.Equal(t, uint64(605), pStorage.tableTriggerDDLHistory[0]) -// require.Equal(t, 1, len(pStorage.tablesDDLHistory)) -// require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID1])) -// } - -// // TODO: test obsolete data can be removed -// } - -// func TestGetAllPhysicalTables(t *testing.T) { -// dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) -// err := os.RemoveAll(dbPath) -// require.Nil(t, err) - -// schemaID := int64(300) -// gcTs := uint64(600) -// tableID1 := int64(100) -// tableID2 := int64(200) - -// databaseInfo := make(map[int64]*model.DBInfo) -// databaseInfo[schemaID] = &model.DBInfo{ -// ID: schemaID, -// Name: model.NewCIStr("test"), -// Tables: []*model.TableInfo{ -// { -// ID: tableID1, -// Name: model.NewCIStr("t1"), -// }, -// { -// ID: tableID2, -// Name: model.NewCIStr("t2"), -// }, -// }, -// } -// pStorage := newPersistentStorageForTest(dbPath, gcTs, databaseInfo) - -// // create table t3 -// tableID3 := int64(500) -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionCreateTable), -// SchemaID: schemaID, -// TableID: tableID3, -// SchemaVersion: 501, -// TableInfo: &model.TableInfo{ -// ID: tableID3, -// Name: model.NewCIStr("t3"), -// }, -// FinishedTs: 601, -// } -// pStorage.handleSortedDDLEvents(ddlEvent) -// } - -// // drop table t2 -// { -// ddlEvent := PersistedDDLEvent{ -// Type: byte(model.ActionDropTable), -// SchemaID: schemaID, -// TableID: tableID2, -// SchemaVersion: 503, -// TableInfo: nil, -// FinishedTs: 603, -// } -// pStorage.handleSortedDDLEvents(ddlEvent) -// } - -// { -// allPhysicalTables, err := pStorage.getAllPhysicalTables(600, nil) -// require.Nil(t, err) -// require.Equal(t, 2, len(allPhysicalTables)) -// } - -// { -// allPhysicalTables, err := pStorage.getAllPhysicalTables(601, nil) -// require.Nil(t, err) -// require.Equal(t, 3, len(allPhysicalTables)) -// } - -// { -// allPhysicalTables, err := pStorage.getAllPhysicalTables(603, nil) -// require.Nil(t, err) -// require.Equal(t, 2, len(allPhysicalTables)) -// } -// } +func TestBuildVersionedTableInfoStore(t *testing.T) { + dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) + err := os.RemoveAll(dbPath) + require.Nil(t, err) + + gcTs := uint64(1000) + schemaID := int64(50) + tableID := int64(99) + databaseInfo := make(map[int64]*model.DBInfo) + databaseInfo[schemaID] = &model.DBInfo{ + ID: schemaID, + Name: model.NewCIStr("test"), + Tables: []*model.TableInfo{ + { + ID: tableID, + Name: model.NewCIStr("t1"), + }, + }, + } + pStorage := newPersistentStorageForTest(dbPath, gcTs, databaseInfo) + + require.Equal(t, 1, len(pStorage.databaseMap)) + require.Equal(t, "test", pStorage.databaseMap[schemaID].Name) + + { + store := newEmptyVersionedTableInfoStore(tableID) + pStorage.buildVersionedTableInfoStore(store) + tableInfo, err := store.getTableInfo(gcTs) + require.Nil(t, err) + require.Equal(t, "t1", tableInfo.Name.O) + require.Equal(t, tableID, tableInfo.ID) + } + + // rename table + renameVersion := uint64(1500) + { + job := &model.Job{ + Type: model.ActionRenameTable, + SchemaID: schemaID, + TableID: tableID, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 3000, + TableInfo: &model.TableInfo{ + ID: tableID, + Name: model.NewCIStr("t2"), + }, + FinishedTS: renameVersion, + }, + } + err = pStorage.handleDDLJob(job) + require.Nil(t, err) + } + + // create another table + tableID2 := tableID + 1 + createVersion := renameVersion + 200 + { + job := &model.Job{ + Type: model.ActionCreateTable, + SchemaID: schemaID, + TableID: tableID2, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 3500, + TableInfo: &model.TableInfo{ + ID: tableID2, + Name: model.NewCIStr("t3"), + }, + FinishedTS: createVersion, + }, + } + err = pStorage.handleDDLJob(job) + require.Nil(t, err) + } + + upperBound := UpperBoundMeta{ + FinishedDDLTs: 3000, + SchemaVersion: 4000, + ResolvedTs: 2000, + } + pStorage = loadPersistentStorageForTest(pStorage.db, gcTs, upperBound) + { + store := newEmptyVersionedTableInfoStore(tableID) + pStorage.buildVersionedTableInfoStore(store) + require.Equal(t, 2, len(store.infos)) + tableInfo, err := store.getTableInfo(gcTs) + require.Nil(t, err) + require.Equal(t, "t1", tableInfo.Name.O) + require.Equal(t, tableID, tableInfo.ID) + tableInfo2, err := store.getTableInfo(renameVersion) + require.Nil(t, err) + require.Equal(t, "t2", tableInfo2.Name.O) + + renameVersion2 := uint64(3000) + store.applyDDL(PersistedDDLEvent{ + Type: byte(model.ActionRenameTable), + CurrentSchemaID: schemaID, + CurrentTableID: tableID, + SchemaVersion: 3000, + TableInfo: &model.TableInfo{ + ID: tableID, + Name: model.NewCIStr("t3"), + }, + FinishedTs: renameVersion2, + }) + tableInfo3, err := store.getTableInfo(renameVersion2) + require.Nil(t, err) + require.Equal(t, "t3", tableInfo3.Name.O) + } + + { + store := newEmptyVersionedTableInfoStore(tableID2) + pStorage.buildVersionedTableInfoStore(store) + require.Equal(t, 1, len(store.infos)) + tableInfo, err := store.getTableInfo(createVersion) + require.Nil(t, err) + require.Equal(t, "t3", tableInfo.Name.O) + require.Equal(t, tableID2, tableInfo.ID) + } + + // truncate table + tableID3 := tableID2 + 1 + truncateVersion := createVersion + 200 + { + job := &model.Job{ + Type: model.ActionTruncateTable, + SchemaID: schemaID, + TableID: tableID2, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 3600, + TableInfo: &model.TableInfo{ + ID: tableID3, + Name: model.NewCIStr("t4"), + }, + FinishedTS: truncateVersion, + }, + } + err = pStorage.handleDDLJob(job) + require.Nil(t, err) + } + + { + store := newEmptyVersionedTableInfoStore(tableID2) + pStorage.buildVersionedTableInfoStore(store) + require.Equal(t, 1, len(store.infos)) + require.Equal(t, truncateVersion, store.deleteVersion) + } + + { + store := newEmptyVersionedTableInfoStore(tableID3) + pStorage.buildVersionedTableInfoStore(store) + require.Equal(t, 1, len(store.infos)) + tableInfo, err := store.getTableInfo(truncateVersion) + require.Nil(t, err) + require.Equal(t, "t4", tableInfo.Name.O) + require.Equal(t, tableID3, tableInfo.ID) + } +} + +func TestHandleCreateDropSchemaTableDDL(t *testing.T) { + dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) + err := os.RemoveAll(dbPath) + require.Nil(t, err) + pStorage := newEmptyPersistentStorageForTest(dbPath) + + // create db + schemaID := int64(300) + { + job := &model.Job{ + Type: model.ActionCreateSchema, + SchemaID: schemaID, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 100, + DBInfo: &model.DBInfo{ + ID: schemaID, + Name: model.NewCIStr("test"), + }, + TableInfo: nil, + FinishedTS: 200, + }, + } + pStorage.handleDDLJob(job) + + require.Equal(t, 1, len(pStorage.databaseMap)) + require.Equal(t, "test", pStorage.databaseMap[schemaID].Name) + require.Equal(t, 1, len(pStorage.tableTriggerDDLHistory)) + require.Equal(t, uint64(200), pStorage.tableTriggerDDLHistory[0]) + } + + // create a table + tableID := int64(100) + { + job := &model.Job{ + Type: model.ActionCreateTable, + SchemaID: schemaID, + TableID: tableID, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 101, + TableInfo: &model.TableInfo{ + Name: model.NewCIStr("t1"), + }, + FinishedTS: 201, + }, + } + pStorage.handleDDLJob(job) + + require.Equal(t, 1, len(pStorage.databaseMap[schemaID].Tables)) + require.Equal(t, 1, len(pStorage.tableMap)) + require.Equal(t, 2, len(pStorage.tableTriggerDDLHistory)) + require.Equal(t, uint64(201), pStorage.tableTriggerDDLHistory[1]) + require.Equal(t, 1, len(pStorage.tablesDDLHistory)) + require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID])) + } + + // create another table + tableID2 := int64(105) + { + job := &model.Job{ + Type: model.ActionCreateTable, + SchemaID: schemaID, + TableID: tableID2, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 103, + TableInfo: &model.TableInfo{ + Name: model.NewCIStr("t2"), + }, + + FinishedTS: 203, + }, + } + pStorage.handleDDLJob(job) + + require.Equal(t, 2, len(pStorage.databaseMap[schemaID].Tables)) + require.Equal(t, 2, len(pStorage.tableMap)) + require.Equal(t, 3, len(pStorage.tableTriggerDDLHistory)) + require.Equal(t, uint64(203), pStorage.tableTriggerDDLHistory[2]) + require.Equal(t, 2, len(pStorage.tablesDDLHistory)) + require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID2])) + require.Equal(t, uint64(203), pStorage.tablesDDLHistory[tableID2][0]) + } + + // drop a table + { + job := &model.Job{ + Type: model.ActionDropTable, + SchemaID: schemaID, + TableID: tableID2, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 105, + TableInfo: nil, + FinishedTS: 205, + }, + } + pStorage.handleDDLJob(job) + + require.Equal(t, 1, len(pStorage.databaseMap[schemaID].Tables)) + require.Equal(t, 1, len(pStorage.tableMap)) + require.Equal(t, 4, len(pStorage.tableTriggerDDLHistory)) + require.Equal(t, uint64(205), pStorage.tableTriggerDDLHistory[3]) + require.Equal(t, 2, len(pStorage.tablesDDLHistory)) + require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID])) + require.Equal(t, 2, len(pStorage.tablesDDLHistory[tableID2])) + require.Equal(t, uint64(205), pStorage.tablesDDLHistory[tableID2][1]) + } + + // truncate a table + tableID3 := int64(112) + { + job := &model.Job{ + Type: model.ActionTruncateTable, + SchemaID: schemaID, + TableID: tableID, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 107, + TableInfo: &model.TableInfo{ + ID: tableID3, + }, + FinishedTS: 207, + }, + } + pStorage.handleDDLJob(job) + + require.Equal(t, 1, len(pStorage.databaseMap[schemaID].Tables)) + require.Equal(t, 1, len(pStorage.tableMap)) + require.Equal(t, 4, len(pStorage.tableTriggerDDLHistory)) + require.Equal(t, 3, len(pStorage.tablesDDLHistory)) + require.Equal(t, 2, len(pStorage.tablesDDLHistory[tableID])) + require.Equal(t, uint64(207), pStorage.tablesDDLHistory[tableID][1]) + require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID3])) + require.Equal(t, uint64(207), pStorage.tablesDDLHistory[tableID3][0]) + } + + // drop db + { + job := &model.Job{ + Type: model.ActionDropSchema, + SchemaID: schemaID, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 200, + DBInfo: &model.DBInfo{ + ID: schemaID, + Name: model.NewCIStr("test"), + }, + TableInfo: nil, + FinishedTS: 300, + }, + } + + pStorage.handleDDLJob(job) + + require.Equal(t, 0, len(pStorage.databaseMap)) + require.Equal(t, 0, len(pStorage.tableMap)) + require.Equal(t, 5, len(pStorage.tableTriggerDDLHistory)) + require.Equal(t, uint64(300), pStorage.tableTriggerDDLHistory[4]) + require.Equal(t, 3, len(pStorage.tablesDDLHistory)) + require.Equal(t, 2, len(pStorage.tablesDDLHistory[tableID])) + require.Equal(t, 2, len(pStorage.tablesDDLHistory[tableID2])) + require.Equal(t, 2, len(pStorage.tablesDDLHistory[tableID3])) + require.Equal(t, uint64(300), pStorage.tablesDDLHistory[tableID3][1]) + } +} + +func TestHandleRenameTable(t *testing.T) { + dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) + err := os.RemoveAll(dbPath) + require.Nil(t, err) + + gcTs := uint64(500) + schemaID1 := int64(300) + schemaID2 := int64(305) + + databaseInfo := make(map[int64]*model.DBInfo) + databaseInfo[schemaID1] = &model.DBInfo{ + ID: schemaID1, + Name: model.NewCIStr("test"), + } + databaseInfo[schemaID2] = &model.DBInfo{ + ID: schemaID2, + Name: model.NewCIStr("test2"), + } + pStorage := newPersistentStorageForTest(dbPath, gcTs, databaseInfo) + + // create a table + tableID := int64(100) + { + job := &model.Job{ + Type: model.ActionCreateTable, + SchemaID: schemaID1, + TableID: tableID, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 501, + TableInfo: &model.TableInfo{ + ID: tableID, + Name: model.NewCIStr("t1"), + }, + FinishedTS: 601, + }, + } + pStorage.handleDDLJob(job) + require.Equal(t, 2, len(pStorage.databaseMap)) + require.Equal(t, 1, len(pStorage.databaseMap[schemaID1].Tables)) + require.Equal(t, 0, len(pStorage.databaseMap[schemaID2].Tables)) + require.Equal(t, schemaID1, pStorage.tableMap[tableID].SchemaID) + require.Equal(t, "t1", pStorage.tableMap[tableID].Name) + } + + // rename table to a different db + { + job := &model.Job{ + Type: model.ActionRenameTable, + SchemaID: schemaID2, + TableID: tableID, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 505, + TableInfo: &model.TableInfo{ + ID: tableID, + Name: model.NewCIStr("t2"), + }, + FinishedTS: 605, + }, + } + pStorage.handleDDLJob(job) + require.Equal(t, 2, len(pStorage.databaseMap)) + require.Equal(t, 0, len(pStorage.databaseMap[schemaID1].Tables)) + require.Equal(t, 1, len(pStorage.databaseMap[schemaID2].Tables)) + require.Equal(t, schemaID2, pStorage.tableMap[tableID].SchemaID) + require.Equal(t, "t2", pStorage.tableMap[tableID].Name) + } + + { + ddlEvents, err := pStorage.fetchTableDDLEvents(tableID, nil, 601, 700) + require.Nil(t, err) + require.Equal(t, 1, len(ddlEvents)) + // rename table event + require.Equal(t, uint64(605), ddlEvents[0].FinishedTs) + require.Equal(t, "test2", ddlEvents[0].SchemaName) + require.Equal(t, "t2", ddlEvents[0].TableName) + require.Equal(t, common.InfluenceTypeNormal, ddlEvents[0].BlockedTables.InfluenceType) + require.Equal(t, schemaID1, ddlEvents[0].BlockedTables.SchemaID) + require.Equal(t, tableID, ddlEvents[0].BlockedTables.TableIDs[0]) + // TODO: don't count on the order + require.Equal(t, heartbeatpb.DDLSpan.TableID, ddlEvents[0].BlockedTables.TableIDs[1]) + require.Equal(t, tableID, ddlEvents[0].NeedDroppedTables.TableIDs[0]) + + require.Equal(t, tableID, ddlEvents[0].NeedAddedTables[0].TableID) + + require.Equal(t, "test2", ddlEvents[0].TableNameChange.AddName[0].SchemaName) + require.Equal(t, "t2", ddlEvents[0].TableNameChange.AddName[0].TableName) + require.Equal(t, "test", ddlEvents[0].TableNameChange.DropName[0].SchemaName) + require.Equal(t, "t1", ddlEvents[0].TableNameChange.DropName[0].TableName) + } + + // test filter: after rename, the table is filtered out + { + filterConfig := &config.FilterConfig{ + Rules: []string{"test.*"}, + } + tableFilter, err := filter.NewFilter(filterConfig, "", false) + require.Nil(t, err) + ddlEvents, err := pStorage.fetchTableDDLEvents(tableID, tableFilter, 601, 700) + require.Nil(t, err) + require.Equal(t, 1, len(ddlEvents)) + require.Equal(t, common.InfluenceTypeNormal, ddlEvents[0].BlockedTables.InfluenceType) + require.Equal(t, schemaID1, ddlEvents[0].BlockedTables.SchemaID) + require.Equal(t, tableID, ddlEvents[0].BlockedTables.TableIDs[0]) + // TODO: don't count on the order + require.Equal(t, heartbeatpb.DDLSpan.TableID, ddlEvents[0].BlockedTables.TableIDs[1]) + require.Equal(t, tableID, ddlEvents[0].NeedDroppedTables.TableIDs[0]) + + require.Nil(t, ddlEvents[0].NeedAddedTables) + + require.Equal(t, 0, len(ddlEvents[0].TableNameChange.AddName)) + require.Equal(t, "test", ddlEvents[0].TableNameChange.DropName[0].SchemaName) + require.Equal(t, "t1", ddlEvents[0].TableNameChange.DropName[0].TableName) + } + + // test filter: before rename, the table is filtered out, so only table trigger can get the event + { + filterConfig := &config.FilterConfig{ + Rules: []string{"test2.*"}, + } + tableFilter, err := filter.NewFilter(filterConfig, "", false) + require.Nil(t, err) + triggerDDLEvents, err := pStorage.fetchTableTriggerDDLEvents(tableFilter, 601, 10) + require.Nil(t, err) + require.Equal(t, 1, len(triggerDDLEvents)) + require.Nil(t, triggerDDLEvents[0].BlockedTables) + require.Nil(t, triggerDDLEvents[0].NeedDroppedTables) + + require.Equal(t, tableID, triggerDDLEvents[0].NeedAddedTables[0].TableID) + + require.Equal(t, "test2", triggerDDLEvents[0].TableNameChange.AddName[0].SchemaName) + require.Equal(t, "t2", triggerDDLEvents[0].TableNameChange.AddName[0].TableName) + require.Equal(t, 0, len(triggerDDLEvents[0].TableNameChange.DropName)) + } + + // test filter: the table is always filtered out + { + // check table trigger events cannot get the event + filterConfig := &config.FilterConfig{ + Rules: []string{"test3.*"}, + } + tableFilter, err := filter.NewFilter(filterConfig, "", false) + require.Nil(t, err) + triggerDDLEvents, err := pStorage.fetchTableTriggerDDLEvents(tableFilter, 601, 10) + require.Nil(t, err) + require.Equal(t, 0, len(triggerDDLEvents)) + } +} + +func TestFetchDDLEventsBasic(t *testing.T) { + dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) + err := os.RemoveAll(dbPath) + require.Nil(t, err) + pStorage := newEmptyPersistentStorageForTest(dbPath) + + // create db + schemaID := int64(300) + schemaName := "test" + { + ddlEvent := &model.Job{ + Type: model.ActionCreateSchema, + SchemaID: schemaID, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 100, + DBInfo: &model.DBInfo{ + ID: schemaID, + Name: model.NewCIStr(schemaName), + }, + TableInfo: nil, + FinishedTS: 200, + }, + } + pStorage.handleDDLJob(ddlEvent) + } + + // create a table + tableID := int64(100) + { + ddlEvent := &model.Job{ + Type: model.ActionCreateTable, + SchemaID: schemaID, + TableID: tableID, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 501, + TableInfo: &model.TableInfo{ + ID: tableID, + Name: model.NewCIStr("t1"), + }, + FinishedTS: 601, + }, + } + pStorage.handleDDLJob(ddlEvent) + } + + // rename table + { + ddlEvent := &model.Job{ + Type: model.ActionRenameTable, + SchemaID: schemaID, + TableID: tableID, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 505, + TableInfo: &model.TableInfo{ + ID: tableID, + Name: model.NewCIStr("t2"), + }, + FinishedTS: 605, + }, + } + pStorage.handleDDLJob(ddlEvent) + } + + // truncate table + tableID2 := int64(105) + { + ddlEvent := &model.Job{ + Type: model.ActionTruncateTable, + SchemaID: schemaID, + TableID: tableID, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 507, + TableInfo: &model.TableInfo{ + ID: tableID2, + Name: model.NewCIStr("t2"), + }, + FinishedTS: 607, + }, + } + pStorage.handleDDLJob(ddlEvent) + } + + // create another table + tableID3 := int64(200) + { + ddlEvent := &model.Job{ + Type: model.ActionCreateTable, + SchemaID: schemaID, + TableID: tableID3, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 509, + TableInfo: &model.TableInfo{ + ID: tableID, + Name: model.NewCIStr("t3"), + }, + FinishedTS: 609, + }, + } + pStorage.handleDDLJob(ddlEvent) + } + + // drop newly created table + { + ddlEvent := &model.Job{ + Type: model.ActionDropTable, + SchemaID: schemaID, + TableID: tableID3, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 511, + TableInfo: &model.TableInfo{ + ID: tableID, + Name: model.NewCIStr("t3"), + }, + FinishedTS: 611, + }, + } + pStorage.handleDDLJob(ddlEvent) + } + + // drop db + { + ddlEvent := &model.Job{ + Type: model.ActionDropSchema, + SchemaID: schemaID, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 600, + DBInfo: &model.DBInfo{ + ID: schemaID, + Name: model.NewCIStr(schemaName), + }, + TableInfo: nil, + FinishedTS: 700, + }, + } + pStorage.handleDDLJob(ddlEvent) + } + + // fetch table ddl events + { + ddlEvents, err := pStorage.fetchTableDDLEvents(tableID, nil, 601, 700) + require.Nil(t, err) + require.Equal(t, 2, len(ddlEvents)) + // rename table event + require.Equal(t, uint64(605), ddlEvents[0].FinishedTs) + // truncate table event + require.Equal(t, uint64(607), ddlEvents[1].FinishedTs) + require.Equal(t, "test", ddlEvents[1].SchemaName) + require.Equal(t, "t2", ddlEvents[1].TableName) + require.Equal(t, common.InfluenceTypeNormal, ddlEvents[1].NeedDroppedTables.InfluenceType) + require.Equal(t, schemaID, ddlEvents[1].NeedDroppedTables.SchemaID) + require.Equal(t, 1, len(ddlEvents[1].NeedDroppedTables.TableIDs)) + require.Equal(t, tableID, ddlEvents[1].NeedDroppedTables.TableIDs[0]) + require.Equal(t, 1, len(ddlEvents[1].NeedAddedTables)) + require.Equal(t, schemaID, ddlEvents[1].NeedAddedTables[0].SchemaID) + require.Equal(t, tableID2, ddlEvents[1].NeedAddedTables[0].TableID) + } + + // fetch table ddl events for another table + { + // TODO: test return error if start ts is smaller than 607 + ddlEvents, err := pStorage.fetchTableDDLEvents(tableID2, nil, 607, 700) + require.Nil(t, err) + require.Equal(t, 1, len(ddlEvents)) + // drop db event + require.Equal(t, uint64(700), ddlEvents[0].FinishedTs) + require.Equal(t, common.InfluenceTypeDB, ddlEvents[0].NeedDroppedTables.InfluenceType) + require.Equal(t, schemaID, ddlEvents[0].NeedDroppedTables.SchemaID) + } + + // fetch table ddl events again + { + ddlEvents, err := pStorage.fetchTableDDLEvents(tableID3, nil, 609, 700) + require.Nil(t, err) + require.Equal(t, 1, len(ddlEvents)) + // drop table event + require.Equal(t, uint64(611), ddlEvents[0].FinishedTs) + require.Equal(t, common.InfluenceTypeNormal, ddlEvents[0].NeedDroppedTables.InfluenceType) + require.Equal(t, 1, len(ddlEvents[0].NeedDroppedTables.TableIDs)) + require.Equal(t, tableID3, ddlEvents[0].NeedDroppedTables.TableIDs[0]) + require.Equal(t, schemaID, ddlEvents[0].NeedDroppedTables.SchemaID) + } + + // fetch all table trigger ddl events + { + tableTriggerDDLEvents, err := pStorage.fetchTableTriggerDDLEvents(nil, 0, 10) + require.Nil(t, err) + require.Equal(t, 6, len(tableTriggerDDLEvents)) + // create db event + require.Equal(t, uint64(200), tableTriggerDDLEvents[0].FinishedTs) + // create table event + require.Equal(t, uint64(601), tableTriggerDDLEvents[1].FinishedTs) + require.Equal(t, 1, len(tableTriggerDDLEvents[1].NeedAddedTables)) + require.Equal(t, schemaID, tableTriggerDDLEvents[1].NeedAddedTables[0].SchemaID) + require.Equal(t, tableID, tableTriggerDDLEvents[1].NeedAddedTables[0].TableID) + require.Equal(t, schemaName, tableTriggerDDLEvents[1].TableNameChange.AddName[0].SchemaName) + require.Equal(t, "t1", tableTriggerDDLEvents[1].TableNameChange.AddName[0].TableName) + // rename table event + require.Equal(t, uint64(605), tableTriggerDDLEvents[2].FinishedTs) + // create table event + require.Equal(t, uint64(609), tableTriggerDDLEvents[3].FinishedTs) + // drop table event + require.Equal(t, uint64(611), tableTriggerDDLEvents[4].FinishedTs) + require.Equal(t, common.InfluenceTypeNormal, tableTriggerDDLEvents[4].NeedDroppedTables.InfluenceType) + require.Equal(t, schemaID, tableTriggerDDLEvents[4].NeedDroppedTables.SchemaID) + require.Equal(t, tableID3, tableTriggerDDLEvents[4].NeedDroppedTables.TableIDs[0]) + require.Equal(t, schemaName, tableTriggerDDLEvents[4].TableNameChange.DropName[0].SchemaName) + require.Equal(t, "t3", tableTriggerDDLEvents[4].TableNameChange.DropName[0].TableName) + require.Equal(t, common.InfluenceTypeNormal, tableTriggerDDLEvents[4].BlockedTables.InfluenceType) + require.Equal(t, 2, len(tableTriggerDDLEvents[4].BlockedTables.TableIDs)) + require.Equal(t, tableID3, tableTriggerDDLEvents[4].BlockedTables.TableIDs[0]) + // TODO: don't count on the order + require.Equal(t, heartbeatpb.DDLSpan.TableID, tableTriggerDDLEvents[4].BlockedTables.TableIDs[1]) + // drop db event + require.Equal(t, uint64(700), tableTriggerDDLEvents[5].FinishedTs) + require.Equal(t, schemaName, tableTriggerDDLEvents[5].TableNameChange.DropDatabaseName) + } + + // fetch partial table trigger ddl events + { + tableTriggerDDLEvents, err := pStorage.fetchTableTriggerDDLEvents(nil, 0, 2) + require.Nil(t, err) + require.Equal(t, 2, len(tableTriggerDDLEvents)) + require.Equal(t, uint64(200), tableTriggerDDLEvents[0].FinishedTs) + require.Equal(t, uint64(601), tableTriggerDDLEvents[1].FinishedTs) + } + + // TODO: test filter +} + +func TestGCPersistStorage(t *testing.T) { + dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) + err := os.RemoveAll(dbPath) + require.Nil(t, err) + + schemaID := int64(300) + gcTs := uint64(600) + tableID1 := int64(100) + tableID2 := int64(200) + + databaseInfo := make(map[int64]*model.DBInfo) + databaseInfo[schemaID] = &model.DBInfo{ + ID: schemaID, + Name: model.NewCIStr("test"), + Tables: []*model.TableInfo{ + { + ID: tableID1, + Name: model.NewCIStr("t1"), + }, + { + ID: tableID2, + Name: model.NewCIStr("t2"), + }, + }, + } + pStorage := newPersistentStorageForTest(dbPath, gcTs, databaseInfo) + + // create table t3 + tableID3 := int64(500) + { + ddlEvent := &model.Job{ + Type: model.ActionCreateTable, + SchemaID: schemaID, + TableID: tableID3, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 501, + TableInfo: &model.TableInfo{ + ID: tableID3, + Name: model.NewCIStr("t3"), + }, + FinishedTS: 602, + }, + } + pStorage.handleDDLJob(ddlEvent) + } + + // drop table t2 + { + ddlEvent := &model.Job{ + Type: model.ActionDropTable, + SchemaID: schemaID, + TableID: tableID2, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 503, + TableInfo: nil, + FinishedTS: 603, + }, + } + pStorage.handleDDLJob(ddlEvent) + } + + // rename table t1 + { + ddlEvent := &model.Job{ + Type: model.ActionRenameTable, + SchemaID: schemaID, + TableID: tableID1, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 505, + TableInfo: &model.TableInfo{ + ID: tableID1, + Name: model.NewCIStr("t1_r"), + }, + FinishedTS: 605, + }, + } + pStorage.handleDDLJob(ddlEvent) + } + + // write upper bound + newUpperBound := UpperBoundMeta{ + FinishedDDLTs: 700, + SchemaVersion: 509, + ResolvedTs: 705, + } + { + writeUpperBoundMeta(pStorage.db, newUpperBound) + } + + // mock gc + newGcTs1 := uint64(601) + { + databaseInfo := make(map[int64]*model.DBInfo) + databaseInfo[schemaID] = &model.DBInfo{ + ID: schemaID, + Name: model.NewCIStr("test"), + Tables: []*model.TableInfo{ + { + ID: tableID1, + Name: model.NewCIStr("t1"), + }, + { + ID: tableID2, + Name: model.NewCIStr("t2"), + }, + }, + } + mockWriteKVSnapOnDisk(pStorage.db, newGcTs1, databaseInfo) + + require.Equal(t, 3, len(pStorage.tableTriggerDDLHistory)) + require.Equal(t, 3, len(pStorage.tablesDDLHistory)) + pStorage.cleanObseleteDataInMemory(newGcTs1) + require.Equal(t, 3, len(pStorage.tableTriggerDDLHistory)) + require.Equal(t, 3, len(pStorage.tablesDDLHistory)) + } + + // mock gc again with a register table + pStorage.registerTable(tableID1, newGcTs1+1) + newGcTs2 := uint64(603) + { + databaseInfo := make(map[int64]*model.DBInfo) + databaseInfo[schemaID] = &model.DBInfo{ + ID: schemaID, + Name: model.NewCIStr("test"), + Tables: []*model.TableInfo{ + { + ID: tableID1, + Name: model.NewCIStr("t1"), + }, + { + ID: tableID3, + Name: model.NewCIStr("t3"), + }, + }, + } + mockWriteKVSnapOnDisk(pStorage.db, newGcTs2, databaseInfo) + + require.Equal(t, 3, len(pStorage.tableTriggerDDLHistory)) + require.Equal(t, 3, len(pStorage.tablesDDLHistory)) + pStorage.cleanObseleteDataInMemory(newGcTs2) + require.Equal(t, 1, len(pStorage.tableTriggerDDLHistory)) + require.Equal(t, uint64(605), pStorage.tableTriggerDDLHistory[0]) + require.Equal(t, 1, len(pStorage.tablesDDLHistory)) + require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID1])) + tableInfoT1, err := pStorage.getTableInfo(tableID1, newGcTs2) + require.Nil(t, err) + require.Equal(t, "t1", tableInfoT1.Name.O) + tableInfoT1, err = pStorage.getTableInfo(tableID1, 606) + require.Nil(t, err) + require.Equal(t, "t1_r", tableInfoT1.Name.O) + } + + pStorage = loadPersistentStorageForTest(pStorage.db, newGcTs2, newUpperBound) + { + require.Equal(t, newGcTs2, pStorage.gcTs) + require.Equal(t, newUpperBound, pStorage.upperBound) + require.Equal(t, 1, len(pStorage.tableTriggerDDLHistory)) + require.Equal(t, uint64(605), pStorage.tableTriggerDDLHistory[0]) + require.Equal(t, 1, len(pStorage.tablesDDLHistory)) + require.Equal(t, 1, len(pStorage.tablesDDLHistory[tableID1])) + } + + // TODO: test obsolete data can be removed +} + +func TestGetAllPhysicalTables(t *testing.T) { + dbPath := fmt.Sprintf("/tmp/testdb-%s", t.Name()) + err := os.RemoveAll(dbPath) + require.Nil(t, err) + + schemaID := int64(300) + gcTs := uint64(600) + tableID1 := int64(100) + tableID2 := int64(200) + + databaseInfo := make(map[int64]*model.DBInfo) + databaseInfo[schemaID] = &model.DBInfo{ + ID: schemaID, + Name: model.NewCIStr("test"), + Tables: []*model.TableInfo{ + { + ID: tableID1, + Name: model.NewCIStr("t1"), + }, + { + ID: tableID2, + Name: model.NewCIStr("t2"), + }, + }, + } + pStorage := newPersistentStorageForTest(dbPath, gcTs, databaseInfo) + + // create table t3 + tableID3 := int64(500) + { + job := &model.Job{ + Type: model.ActionCreateTable, + SchemaID: schemaID, + TableID: tableID3, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 501, + TableInfo: &model.TableInfo{ + ID: tableID3, + Name: model.NewCIStr("t3"), + }, + + FinishedTS: 601, + }, + } + pStorage.handleDDLJob(job) + } + + // drop table t2 + { + job := &model.Job{ + Type: model.ActionDropTable, + SchemaID: schemaID, + TableID: tableID2, + BinlogInfo: &model.HistoryInfo{ + SchemaVersion: 503, + TableInfo: nil, + FinishedTS: 603, + }, + } + pStorage.handleDDLJob(job) + } + + { + allPhysicalTables, err := pStorage.getAllPhysicalTables(600, nil) + require.Nil(t, err) + require.Equal(t, 2, len(allPhysicalTables)) + } + + { + allPhysicalTables, err := pStorage.getAllPhysicalTables(601, nil) + require.Nil(t, err) + require.Equal(t, 3, len(allPhysicalTables)) + } + + { + allPhysicalTables, err := pStorage.getAllPhysicalTables(603, nil) + require.Nil(t, err) + require.Equal(t, 2, len(allPhysicalTables)) + } +} diff --git a/logservice/schemastore/schema_store.go b/logservice/schemastore/schema_store.go index de771d085..b3b8db135 100644 --- a/logservice/schemastore/schema_store.go +++ b/logservice/schemastore/schema_store.go @@ -154,7 +154,9 @@ func (s *schemaStore) updateResolvedTsPeriodically(ctx context.Context) error { s.dataStorage.handleDDLJob(event.Job) } } - // TODO: resolved ts are updated after ddl events written to disk, do we need to optimize it? + // When register a new table, it will load all ddl jobs from disk for the table, + // so we can only update resolved ts after all ddl jobs are written to disk + // Can we optimize it to update resolved ts more eagerly? s.resolvedTs.Store(pendingTs) currentPhyTs := oracle.GetPhysical(s.pdClock.CurrentTime()) resolvedPhyTs := oracle.ExtractPhysical(pendingTs)