diff --git a/infoschema/builder.go b/infoschema/builder.go index 7f44095629c19..1fb13c58e361c 100644 --- a/infoschema/builder.go +++ b/infoschema/builder.go @@ -14,6 +14,8 @@ package infoschema import ( + "sort" + "github.com/juju/errors" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/meta/autoid" @@ -37,7 +39,7 @@ func (b *Builder) ApplyDiff(m *meta.Meta, diff *model.SchemaDiff) error { b.applyDropSchema(diff.SchemaID) return nil } - roDBInfo, ok := b.is.schemas[diff.SchemaID] + roDBInfo, ok := b.is.SchemaByID(diff.SchemaID) if !ok { return ErrDatabaseNotExists } @@ -54,15 +56,18 @@ func (b *Builder) ApplyDiff(m *meta.Meta, diff *model.SchemaDiff) error { oldTableID = diff.TableID newTableID = diff.TableID } + b.copySchemaTables(roDBInfo.Name.L) + b.copySortedTables(oldTableID, newTableID) + // We try to reuse the old allocator, so the cached auto ID can be reused. var alloc autoid.Allocator - if oldTableID != 0 { + if tableIDIsValid(oldTableID) { if oldTableID == newTableID { alloc, _ = b.is.AllocByID(oldTableID) } - b.applyDropTable(roDBInfo.Name.L, oldTableID) + b.applyDropTable(roDBInfo, oldTableID) } - if newTableID != 0 { + if tableIDIsValid(newTableID) { // All types except DropTable. err := b.applyCreateTable(m, roDBInfo, newTableID, alloc) if err != nil { @@ -74,22 +79,40 @@ func (b *Builder) ApplyDiff(m *meta.Meta, diff *model.SchemaDiff) error { return nil } +// CopySortedTables copies sortedTables for old table and new table for later modification. +func (b *Builder) copySortedTables(oldTableID, newTableID int64) { + buckets := b.is.sortedTablesBuckets + if tableIDIsValid(oldTableID) { + bucketIdx := tableBucketIdx(oldTableID) + oldSortedTables := buckets[bucketIdx] + newSortedTables := make(sortedTables, len(oldSortedTables)) + copy(newSortedTables, oldSortedTables) + buckets[bucketIdx] = newSortedTables + } + if tableIDIsValid(newTableID) && newTableID != oldTableID { + oldSortedTables := buckets[tableBucketIdx(newTableID)] + newSortedTables := make(sortedTables, len(oldSortedTables), len(oldSortedTables)+1) + copy(newSortedTables, oldSortedTables) + buckets[tableBucketIdx(newTableID)] = newSortedTables + } +} + // updateDBInfo clones a new DBInfo from old DBInfo, and update on the new one. func (b *Builder) updateDBInfo(roDBInfo *model.DBInfo, oldTableID, newTableID int64) { - newDbInfo := new(model.DBInfo) - *newDbInfo = *roDBInfo + newDbInfo := *roDBInfo newDbInfo.Tables = make([]*model.TableInfo, 0, len(roDBInfo.Tables)) - if newTableID != 0 { + if tableIDIsValid(newTableID) { // All types except DropTable. - newTblInfo := b.is.tables[newTableID].Meta() - newDbInfo.Tables = append(newDbInfo.Tables, newTblInfo) + if newTbl, ok := b.is.TableByID(newTableID); ok { + newDbInfo.Tables = append(newDbInfo.Tables, newTbl.Meta()) + } } for _, tblInfo := range roDBInfo.Tables { if tblInfo.ID != oldTableID && tblInfo.ID != newTableID { newDbInfo.Tables = append(newDbInfo.Tables, tblInfo) } } - b.is.schemas[newDbInfo.ID] = newDbInfo + b.is.schemaMap[roDBInfo.Name.L].dbInfo = &newDbInfo } func (b *Builder) applyCreateSchema(m *meta.Meta, diff *model.SchemaDiff) error { @@ -102,20 +125,18 @@ func (b *Builder) applyCreateSchema(m *meta.Meta, diff *model.SchemaDiff) error // full load. return ErrDatabaseNotExists } - b.is.schemas[di.ID] = di - b.is.schemaNameToID[di.Name.L] = di.ID + b.is.schemaMap[di.Name.L] = &schemaTables{dbInfo: di, tables: make(map[string]table.Table)} return nil } func (b *Builder) applyDropSchema(schemaID int64) { - di, ok := b.is.schemas[schemaID] + di, ok := b.is.SchemaByID(schemaID) if !ok { return } - delete(b.is.schemas, di.ID) - delete(b.is.schemaNameToID, di.Name.L) + delete(b.is.schemaMap, di.Name.L) for _, tbl := range di.Tables { - b.applyDropTable(di.Name.L, tbl.ID) + b.applyDropTable(di, tbl.ID) } } @@ -136,58 +157,57 @@ func (b *Builder) applyCreateTable(m *meta.Meta, roDBInfo *model.DBInfo, tableID if err != nil { return errors.Trace(err) } - b.is.tables[tblInfo.ID] = tbl - tn := makeTableName(roDBInfo.Name.L, tblInfo.Name.L) - b.is.tableNameToID[string(tn)] = tblInfo.ID + tableNames := b.is.schemaMap[roDBInfo.Name.L] + tableNames.tables[tblInfo.Name.L] = tbl + bucketIdx := tableBucketIdx(tableID) + sortedTables := b.is.sortedTablesBuckets[bucketIdx] + sortedTables = append(sortedTables, tbl) + sort.Sort(sortedTables) + b.is.sortedTablesBuckets[bucketIdx] = sortedTables return nil } -func (b *Builder) applyDropTable(schemaName string, tableID int64) { - tbl, ok := b.is.tables[tableID] - if !ok { +func (b *Builder) applyDropTable(di *model.DBInfo, tableID int64) { + bucketIdx := tableBucketIdx(tableID) + sortedTables := b.is.sortedTablesBuckets[bucketIdx] + idx := sortedTables.searchTable(tableID) + if idx == -1 { return } - tblInfo := tbl.Meta() - delete(b.is.tables, tblInfo.ID) - tn := makeTableName(schemaName, tblInfo.Name.L) - delete(b.is.tableNameToID, string(tn)) + if tableNames, ok := b.is.schemaMap[di.Name.L]; ok { + delete(tableNames.tables, sortedTables[idx].Meta().Name.L) + } + // Remove the table in sorted table slice. + b.is.sortedTablesBuckets[bucketIdx] = append(sortedTables[0:idx], sortedTables[idx+1:]...) } // InitWithOldInfoSchema initializes an empty new InfoSchema by copies all the data from old InfoSchema. func (b *Builder) InitWithOldInfoSchema() *Builder { oldIS := b.handle.Get().(*infoSchema) b.is.schemaMetaVersion = oldIS.schemaMetaVersion - b.copySchemaNames(oldIS) - b.copyTableNames(oldIS) - b.copySchemas(oldIS) - b.copyTables(oldIS) + b.copySchemasMap(oldIS) + copy(b.is.sortedTablesBuckets, oldIS.sortedTablesBuckets) return b } -func (b *Builder) copySchemaNames(oldIS *infoSchema) { - for k, v := range oldIS.schemaNameToID { - b.is.schemaNameToID[k] = v +func (b *Builder) copySchemasMap(oldIS *infoSchema) { + for k, v := range oldIS.schemaMap { + b.is.schemaMap[k] = v } } -func (b *Builder) copyTableNames(oldIS *infoSchema) { - b.is.tableNameToID = make(map[string]int64, len(oldIS.tableNameToID)) - for k, v := range oldIS.tableNameToID { - b.is.tableNameToID[k] = v +// When a table in the database has changed, we should create a new schemaTables instance, then do modifications +// on the new one. Because old schemaTables must be read-only. +func (b *Builder) copySchemaTables(dbName string) { + oldSchemaTables := b.is.schemaMap[dbName] + newSchemaTables := &schemaTables{ + dbInfo: oldSchemaTables.dbInfo, + tables: make(map[string]table.Table, len(oldSchemaTables.tables)), } -} - -func (b *Builder) copySchemas(oldIS *infoSchema) { - for k, v := range oldIS.schemas { - b.is.schemas[k] = v - } -} - -func (b *Builder) copyTables(oldIS *infoSchema) { - b.is.tables = make(map[int64]table.Table, len(oldIS.tables)) - for k, v := range oldIS.tables { - b.is.tables[k] = v + for k, v := range oldSchemaTables.tables { + newSchemaTables.tables[k] = v } + b.is.schemaMap[dbName] = newSchemaTables } // InitWithDBInfos initializes an empty new InfoSchema with a slice of DBInfo and schema version. @@ -199,8 +219,11 @@ func (b *Builder) InitWithDBInfos(dbInfos []*model.DBInfo, schemaVersion int64) info := b.is info.schemaMetaVersion = schemaVersion for _, di := range dbInfos { - info.schemas[di.ID] = di - info.schemaNameToID[di.Name.L] = di.ID + schTbls := &schemaTables{ + dbInfo: di, + tables: make(map[string]table.Table, len(di.Tables)), + } + info.schemaMap[di.Name.L] = schTbls for _, t := range di.Tables { alloc := autoid.NewAllocator(b.handle.store, di.ID) var tbl table.Table @@ -208,38 +231,48 @@ func (b *Builder) InitWithDBInfos(dbInfos []*model.DBInfo, schemaVersion int64) if err != nil { return nil, errors.Trace(err) } - info.tables[t.ID] = tbl - tname := makeTableName(di.Name.L, t.Name.L) - info.tableNameToID[string(tname)] = t.ID + schTbls.tables[t.Name.L] = tbl + sortedTables := info.sortedTablesBuckets[tableBucketIdx(t.ID)] + info.sortedTablesBuckets[tableBucketIdx(t.ID)] = append(sortedTables, tbl) } } + for _, v := range info.sortedTablesBuckets { + sort.Sort(v) + } return b, nil } func (b *Builder) initMemorySchemas() error { info := b.is - info.schemaNameToID[infoSchemaDB.Name.L] = infoSchemaDB.ID - info.schemas[infoSchemaDB.ID] = infoSchemaDB + infoSchemaTblNames := &schemaTables{ + dbInfo: infoSchemaDB, + tables: make(map[string]table.Table, len(infoSchemaDB.Tables)), + } + + info.schemaMap[infoSchemaDB.Name.L] = infoSchemaTblNames for _, t := range infoSchemaDB.Tables { tbl := b.handle.memSchema.nameToTable[t.Name.L] - info.tables[t.ID] = tbl - tname := makeTableName(infoSchemaDB.Name.L, t.Name.L) - info.tableNameToID[string(tname)] = t.ID + infoSchemaTblNames.tables[t.Name.L] = tbl + bucketIdx := tableBucketIdx(t.ID) + info.sortedTablesBuckets[bucketIdx] = append(info.sortedTablesBuckets[bucketIdx], tbl) } perfHandle := b.handle.memSchema.perfHandle - psDB := perfHandle.GetDBMeta() + perfSchemaDB := perfHandle.GetDBMeta() + perfSchemaTblNames := &schemaTables{ + dbInfo: perfSchemaDB, + tables: make(map[string]table.Table, len(perfSchemaDB.Tables)), + } + info.schemaMap[perfSchemaDB.Name.L] = perfSchemaTblNames - info.schemaNameToID[psDB.Name.L] = psDB.ID - info.schemas[psDB.ID] = psDB - for _, t := range psDB.Tables { + for _, t := range perfSchemaDB.Tables { tbl, ok := perfHandle.GetTable(t.Name.O) if !ok { return ErrTableNotExists.Gen("table `%s` is missing.", t.Name) } - info.tables[t.ID] = tbl - tname := makeTableName(psDB.Name.L, t.Name.L) - info.tableNameToID[string(tname)] = t.ID + perfSchemaTblNames.tables[t.Name.L] = tbl + bucketIdx := tableBucketIdx(t.ID) + info.sortedTablesBuckets[bucketIdx] = append(info.sortedTablesBuckets[bucketIdx], tbl) } return nil } @@ -259,10 +292,16 @@ func NewBuilder(handle *Handle) *Builder { b := new(Builder) b.handle = handle b.is = &infoSchema{ - schemaNameToID: map[string]int64{}, - tableNameToID: map[string]int64{}, - schemas: map[int64]*model.DBInfo{}, - tables: map[int64]table.Table{}, + schemaMap: map[string]*schemaTables{}, + sortedTablesBuckets: make([]sortedTables, bucketCount), } return b } + +func tableBucketIdx(tableID int64) int { + return int(tableID % bucketCount) +} + +func tableIDIsValid(tableID int64) bool { + return tableID != 0 +} diff --git a/infoschema/infoschema.go b/infoschema/infoschema.go index ac6ae186ab723..1371288fdcf81 100644 --- a/infoschema/infoschema.go +++ b/infoschema/infoschema.go @@ -14,6 +14,7 @@ package infoschema import ( + "sort" "strings" "sync/atomic" @@ -81,34 +82,66 @@ const ( Name = "INFORMATION_SCHEMA" ) +type sortedTables []table.Table + +func (s sortedTables) Len() int { + return len(s) +} + +func (s sortedTables) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s sortedTables) Less(i, j int) bool { + return s[i].Meta().ID < s[j].Meta().ID +} + +func (s sortedTables) searchTable(id int64) int { + idx := sort.Search(len(s), func(i int) bool { + return s[i].Meta().ID >= id + }) + if idx == len(s) || s[idx].Meta().ID != id { + return -1 + } + return idx +} + +type schemaTables struct { + dbInfo *model.DBInfo + tables map[string]table.Table +} + +const bucketCount = 512 + type infoSchema struct { - schemaNameToID map[string]int64 - tableNameToID map[string]int64 - schemas map[int64]*model.DBInfo - tables map[int64]table.Table + schemaMap map[string]*schemaTables + + // sortedTablesBuckets is a slice of sortedTables, a table's bucket index is (tableID % bucketCount). + sortedTablesBuckets []sortedTables // We should check version when change schema. schemaMetaVersion int64 } -type schemaHandle struct { - tableNameToID map[string]int64 -} - // MockInfoSchema only serves for test. func MockInfoSchema(tbList []*model.TableInfo) InfoSchema { result := &infoSchema{} - result.schemaNameToID = make(map[string]int64) - result.tableNameToID = make(map[string]int64) - result.schemas = make(map[int64]*model.DBInfo) - result.tables = make(map[int64]table.Table) - - result.schemaNameToID["test"] = 0 - result.schemas[0] = &model.DBInfo{ID: 0, Name: model.NewCIStr("test"), Tables: tbList} - for i, tb := range tbList { - tn := makeTableName("test", tb.Name.L) - result.tableNameToID[string(tn)] = int64(i) - result.tables[int64(i)] = table.MockTableFromMeta(tb) + result.schemaMap = make(map[string]*schemaTables) + result.sortedTablesBuckets = make([]sortedTables, bucketCount) + dbInfo := &model.DBInfo{ID: 0, Name: model.NewCIStr("test"), Tables: tbList} + tableNames := &schemaTables{ + dbInfo: dbInfo, + tables: make(map[string]table.Table), + } + result.schemaMap["test"] = tableNames + for _, tb := range tbList { + tbl := table.MockTableFromMeta(tb) + tableNames.tables[tb.Name.L] = tbl + bucketIdx := tableBucketIdx(tb.ID) + result.sortedTablesBuckets[bucketIdx] = append(result.sortedTablesBuckets[bucketIdx], tbl) + } + for i := range result.sortedTablesBuckets { + sort.Sort(result.sortedTablesBuckets[i]) } return result } @@ -116,12 +149,11 @@ func MockInfoSchema(tbList []*model.TableInfo) InfoSchema { var _ InfoSchema = (*infoSchema)(nil) func (is *infoSchema) SchemaByName(schema model.CIStr) (val *model.DBInfo, ok bool) { - id, ok := is.schemaNameToID[schema.L] + tableNames, ok := is.schemaMap[schema.L] if !ok { return } - val, ok = is.schemas[id] - return + return tableNames.dbInfo, true } func (is *infoSchema) SchemaMetaVersion() int64 { @@ -129,46 +161,48 @@ func (is *infoSchema) SchemaMetaVersion() int64 { } func (is *infoSchema) SchemaExists(schema model.CIStr) bool { - _, ok := is.schemaNameToID[schema.L] + _, ok := is.schemaMap[schema.L] return ok } func (is *infoSchema) TableByName(schema, table model.CIStr) (t table.Table, err error) { - name := makeTableName(schema.L, table.L) - id, ok := is.tableNameToID[string(name)] - if !ok { - return nil, ErrTableNotExists.Gen("table %s.%s does not exist", schema, table) + if tbNames, ok := is.schemaMap[schema.L]; ok { + if t, ok = tbNames.tables[table.L]; ok { + return + } } - t = is.tables[id] - return + return nil, ErrTableNotExists.Gen("table %s.%s does not exist", schema, table) } func (is *infoSchema) TableExists(schema, table model.CIStr) bool { - name := makeTableName(schema.L, table.L) - _, ok := is.tableNameToID[string(name)] - return ok -} - -func makeTableName(schema, table string) []byte { - name := make([]byte, len(schema)+1+len(table)) - copy(name, schema) - name[len(schema)] = '.' - copy(name[len(schema)+1:], table) - return name + if tbNames, ok := is.schemaMap[schema.L]; ok { + if _, ok = tbNames.tables[table.L]; ok { + return true + } + } + return false } func (is *infoSchema) SchemaByID(id int64) (val *model.DBInfo, ok bool) { - val, ok = is.schemas[id] - return + for _, v := range is.schemaMap { + if v.dbInfo.ID == id { + return v.dbInfo, true + } + } + return nil, false } func (is *infoSchema) TableByID(id int64) (val table.Table, ok bool) { - val, ok = is.tables[id] - return + slice := is.sortedTablesBuckets[tableBucketIdx(id)] + idx := slice.searchTable(id) + if idx == -1 { + return nil, false + } + return slice[idx], true } func (is *infoSchema) AllocByID(id int64) (autoid.Allocator, bool) { - tbl, ok := is.tables[id] + tbl, ok := is.TableByID(id) if !ok { return nil, false } @@ -176,33 +210,33 @@ func (is *infoSchema) AllocByID(id int64) (autoid.Allocator, bool) { } func (is *infoSchema) AllSchemaNames() (names []string) { - for _, v := range is.schemas { - names = append(names, v.Name.O) + for _, v := range is.schemaMap { + names = append(names, v.dbInfo.Name.O) } return } func (is *infoSchema) AllSchemas() (schemas []*model.DBInfo) { - for _, v := range is.schemas { - schemas = append(schemas, v) + for _, v := range is.schemaMap { + schemas = append(schemas, v.dbInfo) } return } func (is *infoSchema) SchemaTables(schema model.CIStr) (tables []table.Table) { - di, ok := is.SchemaByName(schema) + schemaTables, ok := is.schemaMap[schema.L] if !ok { return } - for _, ti := range di.Tables { - tables = append(tables, is.tables[ti.ID]) + for _, tbl := range schemaTables.tables { + tables = append(tables, tbl) } return } func (is *infoSchema) Clone() (result []*model.DBInfo) { - for _, v := range is.schemas { - result = append(result, v.Clone()) + for _, v := range is.schemaMap { + result = append(result, v.dbInfo.Clone()) } return }