Skip to content

Commit

Permalink
statistics: fix wrong singleflight implementation for stats' syncload (
Browse files Browse the repository at this point in the history
  • Loading branch information
ti-chi-bot authored Apr 7, 2024
1 parent 2d6302d commit a255101
Show file tree
Hide file tree
Showing 5 changed files with 43 additions and 65 deletions.
5 changes: 5 additions & 0 deletions pkg/parser/model/model.go
Original file line number Diff line number Diff line change
Expand Up @@ -1766,6 +1766,11 @@ type TableItemID struct {
IsIndex bool
}

// Key is used to generate unique key for TableItemID to use in the syncload
func (t TableItemID) Key() string {
return fmt.Sprintf("%d#%d#%t", t.ID, t.TableID, t.IsIndex)
}

// PolicyRefInfo is the struct to refer the placement policy.
type PolicyRefInfo struct {
ID int64 `json:"id"`
Expand Down
1 change: 1 addition & 0 deletions pkg/statistics/handle/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ go_library(
"@com_github_pingcap_errors//:errors",
"@com_github_pingcap_failpoint//:failpoint",
"@com_github_tiancaiamao_gp//:gp",
"@org_golang_x_sync//singleflight",
"@org_uber_go_atomic//:atomic",
"@org_uber_go_zap//:zap",
],
Expand Down
2 changes: 0 additions & 2 deletions pkg/statistics/handle/handle.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import (
ddlUtil "github.com/pingcap/tidb/pkg/ddl/util"
"github.com/pingcap/tidb/pkg/parser/model"
"github.com/pingcap/tidb/pkg/sessionctx"
"github.com/pingcap/tidb/pkg/sessionctx/stmtctx"
"github.com/pingcap/tidb/pkg/statistics"
"github.com/pingcap/tidb/pkg/statistics/handle/autoanalyze"
"github.com/pingcap/tidb/pkg/statistics/handle/cache"
Expand Down Expand Up @@ -139,7 +138,6 @@ func NewHandle(_, initStatsCtx sessionctx.Context, lease time.Duration, pool uti
handle.StatsLoad.SubCtxs = make([]sessionctx.Context, cfg.Performance.StatsLoadConcurrency)
handle.StatsLoad.NeededItemsCh = make(chan *NeededItemTask, cfg.Performance.StatsLoadQueueSize)
handle.StatsLoad.TimeoutItemsCh = make(chan *NeededItemTask, cfg.Performance.StatsLoadQueueSize)
handle.StatsLoad.WorkingColMap = map[model.TableItemID][]chan stmtctx.StatsLoadResult{}
return handle, nil
}

Expand Down
83 changes: 34 additions & 49 deletions pkg/statistics/handle/handle_hist.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ import (
"github.com/pingcap/tidb/pkg/util"
"github.com/pingcap/tidb/pkg/util/logutil"
"go.uber.org/zap"
"golang.org/x/sync/singleflight"
)

type statsWrapper struct {
Expand All @@ -46,7 +47,7 @@ type statsWrapper struct {
type StatsLoad struct {
NeededItemsCh chan *NeededItemTask
TimeoutItemsCh chan *NeededItemTask
WorkingColMap map[model.TableItemID][]chan stmtctx.StatsLoadResult
Singleflight singleflight.Group
SubCtxs []sessionctx.Context
sync.Mutex
}
Expand Down Expand Up @@ -221,46 +222,60 @@ func (h *Handle) HandleOneTask(sctx sessionctx.Context, lastTask *NeededItemTask
} else {
task = lastTask
}
return h.handleOneItemTask(sctx, task)
resultChan := h.StatsLoad.Singleflight.DoChan(task.TableItemID.Key(), func() (any, error) {
return h.handleOneItemTask(sctx, task)
})
timeout := time.Until(task.ToTimeout)
select {
case result := <-resultChan:
if result.Err == nil {
slr := result.Val.(*stmtctx.StatsLoadResult)
if slr.Error != nil {
return task, slr.Error
}
task.ResultCh <- *slr
return nil, nil
}
return task, result.Err
case <-time.After(timeout):
return task, nil
}
}

func (h *Handle) handleOneItemTask(sctx sessionctx.Context, task *NeededItemTask) (*NeededItemTask, error) {
result := stmtctx.StatsLoadResult{Item: task.TableItemID}
func (h *Handle) handleOneItemTask(sctx sessionctx.Context, task *NeededItemTask) (result *stmtctx.StatsLoadResult, err error) {
defer func() {
// recover for each task, worker keeps working
if r := recover(); r != nil {
logutil.BgLogger().Error("handleOneItemTask panicked", zap.Any("recover", r), zap.Stack("stack"))
err = errors.Errorf("stats loading panicked: %v", r)
}
}()
result = &stmtctx.StatsLoadResult{Item: task.TableItemID}
item := result.Item
tbl, ok := h.Get(item.TableID)
if !ok {
h.writeToResultChan(task.ResultCh, result)
return nil, nil
return result, nil
}
var err error
wrapper := &statsWrapper{}
if item.IsIndex {
index, ok := tbl.Indices[item.ID]
if !ok || index.IsFullLoad() {
h.writeToResultChan(task.ResultCh, result)
return nil, nil
return result, nil
}
wrapper.idx = index
} else {
col, ok := tbl.Columns[item.ID]
if !ok || col.IsFullLoad() {
h.writeToResultChan(task.ResultCh, result)
return nil, nil
return result, nil
}
wrapper.col = col
}
// to avoid duplicated handling in concurrent scenario
working := h.setWorking(result.Item, task.ResultCh)
if !working {
h.writeToResultChan(task.ResultCh, result)
return nil, nil
}
t := time.Now()
needUpdate := false
wrapper, err = h.readStatsForOneItem(sctx, item, wrapper)
if err != nil {
result.Error = err
return task, err
return result, err
}
if item.IsIndex {
if wrapper.idx != nil {
Expand All @@ -273,9 +288,8 @@ func (h *Handle) handleOneItemTask(sctx sessionctx.Context, task *NeededItemTask
}
metrics.ReadStatsHistogram.Observe(float64(time.Since(t).Milliseconds()))
if needUpdate && h.updateCachedItem(item, wrapper.col, wrapper.idx) {
h.writeToResultChan(task.ResultCh, result)
return result, nil
}
h.finishWorking(result)
return nil, nil
}

Expand Down Expand Up @@ -466,32 +480,3 @@ func (h *Handle) updateCachedItem(item model.TableItemID, colHist *statistics.Co
h.UpdateStatsCache([]*statistics.Table{tbl}, nil)
return true
}

func (h *Handle) setWorking(item model.TableItemID, resultCh chan stmtctx.StatsLoadResult) bool {
h.StatsLoad.Lock()
defer h.StatsLoad.Unlock()
chList, ok := h.StatsLoad.WorkingColMap[item]
if ok {
if chList[0] == resultCh {
return true // just return for duplicate setWorking
}
h.StatsLoad.WorkingColMap[item] = append(chList, resultCh)
return false
}
chList = []chan stmtctx.StatsLoadResult{}
chList = append(chList, resultCh)
h.StatsLoad.WorkingColMap[item] = chList
return true
}

func (h *Handle) finishWorking(result stmtctx.StatsLoadResult) {
h.StatsLoad.Lock()
defer h.StatsLoad.Unlock()
if chList, ok := h.StatsLoad.WorkingColMap[result.Item]; ok {
list := chList[1:]
for _, ch := range list {
h.writeToResultChan(ch, result)
}
}
delete(h.StatsLoad.WorkingColMap, result.Item)
}
17 changes: 3 additions & 14 deletions pkg/statistics/handle/handle_hist_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -206,26 +206,15 @@ func TestConcurrentLoadHistWithPanicAndFail(t *testing.T) {
task1, err1 := h.HandleOneTask(testKit.Session().(sessionctx.Context), nil, exitCh)
require.Error(t, err1)
require.NotNil(t, task1)
list, ok := h.StatsLoad.WorkingColMap[neededColumns[0]]
require.True(t, ok)
require.Len(t, list, 1)
require.Equal(t, stmtCtx1.StatsLoad.ResultCh, list[0])

task2, err2 := h.HandleOneTask(testKit.Session().(sessionctx.Context), nil, exitCh)
require.Nil(t, err2)
require.Nil(t, task2)
list, ok = h.StatsLoad.WorkingColMap[neededColumns[0]]
require.True(t, ok)
require.Len(t, list, 2)
require.Equal(t, stmtCtx2.StatsLoad.ResultCh, list[1])

require.NoError(t, failpoint.Disable(fp.failPath))
task3, err3 := h.HandleOneTask(testKit.Session().(sessionctx.Context), task1, exitCh)
require.NoError(t, err3)
require.Nil(t, task3)

require.Len(t, stmtCtx1.StatsLoad.ResultCh, 1)
require.Len(t, stmtCtx2.StatsLoad.ResultCh, 1)
task, err3 := h.HandleOneTask(testKit.Session().(sessionctx.Context), nil, exitCh)
require.NoError(t, err3)
require.Nil(t, task)

rs1, ok1 := <-stmtCtx1.StatsLoad.ResultCh
require.True(t, ok1)
Expand Down

0 comments on commit a255101

Please sign in to comment.