Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

statscache: improve stats cache benchmark #45605

Merged
merged 4 commits into from
Jul 27, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 7 additions & 7 deletions statistics/handle/bootstrap.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@
Version: row.GetUint64(0),
Name: getFullTableName(is, tableInfo),
}
cache.PutFromInternal(physicalID, tbl) // put this table again since it is updated
cache.Put(physicalID, tbl) // put this table again since it is updated
}
}

Expand Down Expand Up @@ -156,7 +156,7 @@
}
table.Columns[hist.ID] = col
}
cache.PutFromInternal(tblID, table) // put this table again since it is updated
cache.Put(tblID, table) // put this table again since it is updated
}
}

Expand Down Expand Up @@ -225,7 +225,7 @@
lastAnalyzePos.Copy(&col.LastAnalyzePos)
table.Columns[hist.ID] = col
}
cache.PutFromInternal(tblID, table) // put this table again since it is updated
cache.Put(tblID, table) // put this table again since it is updated
}
}

Expand Down Expand Up @@ -293,7 +293,7 @@
data := make([]byte, len(row.GetBytes(2)))
copy(data, row.GetBytes(2))
idx.TopN.AppendTopN(data, row.GetUint64(3))
cache.PutFromInternal(table.PhysicalID, table) // put this table again since it is updated
cache.Put(table.PhysicalID, table) // put this table again since it is updated
}
for idx := range affectedIndexes {
idx.TopN.Sort()
Expand Down Expand Up @@ -346,7 +346,7 @@
colStats.FMSketch = fms
}
}
cache.PutFromInternal(table.PhysicalID, table) // put this table again since it is updated
cache.Put(table.PhysicalID, table) // put this table again since it is updated

Check warning on line 349 in statistics/handle/bootstrap.go

View check run for this annotation

Codecov / codecov/patch

statistics/handle/bootstrap.go#L349

Added line #L349 was not covered by tests
}
}

Expand Down Expand Up @@ -418,7 +418,7 @@
}
}
hist.AppendBucketWithNDV(&lower, &upper, row.GetInt64(3), row.GetInt64(4), row.GetInt64(7))
cache.PutFromInternal(tableID, table) // put this table again since it is updated
cache.Put(tableID, table) // put this table again since it is updated
}
}

Expand Down Expand Up @@ -456,7 +456,7 @@
}
col.PreCalculateScalar()
}
cache.PutFromInternal(table.PhysicalID, table) // put this table again since it is updated
cache.Put(table.PhysicalID, table) // put this table again since it is updated
}
return nil
}
Expand Down
113 changes: 108 additions & 5 deletions statistics/handle/cache/bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,62 @@ func benchCopyAndUpdate(b *testing.B, c *StatsCachePointer) {
defer wg.Done()
t1 := testutil.NewMockStatisticsTable(1, 1, true, false, false)
t1.PhysicalID = rand.Int63()
cache := c.Load()
c.Replace(cache.CopyAndUpdate([]*statistics.Table{t1}, nil))
c.UpdateStatsCache(c.Load(), []*statistics.Table{t1}, nil)
}()
}
wg.Wait()
b.StopTimer()
}

func BenchmarkStatsCacheLRUCopyAndUpdate(b *testing.B) {
func benchPutGet(b *testing.B, c *StatsCachePointer) {
var wg sync.WaitGroup
b.ResetTimer()
for i := 0; i < b.N; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
t1 := testutil.NewMockStatisticsTable(1, 1, true, false, false)
t1.PhysicalID = rand.Int63()
c.UpdateStatsCache(c.Load(), []*statistics.Table{t1}, nil)
}(i)
}
for i := 0; i < b.N; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
c.Load().GetFromUser(int64(i))
}(i)
}
wg.Wait()
b.StopTimer()
}

func benchGet(b *testing.B, c *StatsCachePointer) {
var w sync.WaitGroup
for i := 0; i < b.N; i++ {
w.Add(1)
go func(i int) {
defer w.Done()
t1 := testutil.NewMockStatisticsTable(1, 1, true, false, false)
t1.PhysicalID = rand.Int63()
c.UpdateStatsCache(c.Load(), []*statistics.Table{t1}, nil)
}(i)
}
w.Wait()
b.ResetTimer()
var wg sync.WaitGroup
for i := 0; i < b.N; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
c.Load().GetFromUser(int64(i))
}(i)
}
wg.Wait()
b.StopTimer()
}

func BenchmarkStatsCacheLFUCopyAndUpdate(b *testing.B) {
restore := config.RestoreFunc()
defer restore()
config.UpdateGlobal(func(conf *config.Config) {
Expand All @@ -68,9 +115,65 @@ func BenchmarkStatsCacheMapCacheCopyAndUpdate(b *testing.B) {
benchCopyAndUpdate(b, cache)
}

func TestBenchDaily(t *testing.T) {
func BenchmarkLFUCachePutGet(b *testing.B) {
restore := config.RestoreFunc()
defer restore()
config.UpdateGlobal(func(conf *config.Config) {
conf.Performance.EnableStatsCacheMemQuota = true
})
cache, err := NewStatsCachePointer()
if err != nil {
b.Fail()
}
benchPutGet(b, cache)
}

func BenchmarkMapCachePutGet(b *testing.B) {
restore := config.RestoreFunc()
defer restore()
config.UpdateGlobal(func(conf *config.Config) {
conf.Performance.EnableStatsCacheMemQuota = false
})
cache, err := NewStatsCachePointer()
if err != nil {
b.Fail()
}
benchPutGet(b, cache)
}

func BenchmarkLFUCacheGet(b *testing.B) {
restore := config.RestoreFunc()
defer restore()
config.UpdateGlobal(func(conf *config.Config) {
conf.Performance.EnableStatsCacheMemQuota = true
})
cache, err := NewStatsCachePointer()
if err != nil {
b.Fail()
}
benchGet(b, cache)
}

func BenchmarkMapCacheGet(b *testing.B) {
restore := config.RestoreFunc()
defer restore()
config.UpdateGlobal(func(conf *config.Config) {
conf.Performance.EnableStatsCacheMemQuota = false
})
cache, err := NewStatsCachePointer()
if err != nil {
b.Fail()
}
benchGet(b, cache)
}

func TestBenchDaily(*testing.T) {
benchdaily.Run(
BenchmarkStatsCacheLRUCopyAndUpdate,
BenchmarkStatsCacheLFUCopyAndUpdate,
BenchmarkStatsCacheMapCacheCopyAndUpdate,
BenchmarkLFUCachePutGet,
BenchmarkMapCachePutGet,
BenchmarkLFUCacheGet,
BenchmarkMapCacheGet,
)
}
16 changes: 1 addition & 15 deletions statistics/handle/cache/internal/BUILD.bazel
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
load("@io_bazel_rules_go//go:def.bzl", "go_library")

go_library(
name = "cache",
Expand All @@ -15,17 +15,3 @@ go_library(
visibility = ["//statistics/handle/cache:__subpackages__"],
deps = ["//statistics"],
)

go_test(
name = "internal_test",
timeout = "short",
srcs = ["bench_test.go"],
flaky = True,
deps = [
":internal",
"//statistics/handle/cache/internal/lfu",
"//statistics/handle/cache/internal/mapcache",
"//statistics/handle/cache/internal/testutil",
"//util/benchdaily",
],
)
101 changes: 0 additions & 101 deletions statistics/handle/cache/internal/bench_test.go

This file was deleted.

2 changes: 1 addition & 1 deletion statistics/handle/cache/internal/inner.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ type StatsCacheInner interface {
// Get gets the cache.
Get(tid int64, moveFront bool) (*statistics.Table, bool)
// Put puts a cache.
Put(tid int64, tbl *statistics.Table, moveLRUFront bool) bool
Put(tid int64, tbl *statistics.Table) bool
// Del deletes a cache.
Del(int64)
// Cost returns the memory usage of the cache.
Expand Down
7 changes: 1 addition & 6 deletions statistics/handle/cache/internal/lfu/lfu_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,12 +73,7 @@ func (s *LFU) Get(tid int64, _ bool) (*statistics.Table, bool) {
}

// Put implements statsCacheInner
func (s *LFU) Put(tblID int64, tbl *statistics.Table, _ bool) bool {
return s.put(tblID, tbl)
}

// Put implements statsCacheInner
func (s *LFU) put(tblID int64, tbl *statistics.Table) bool {
func (s *LFU) Put(tblID int64, tbl *statistics.Table) bool {
ok := s.cache.Set(tblID, tbl, tbl.MemoryUsage().TotalTrackingMemUsage())
if ok { // NOTE: `s.cache` and `s.resultKeySet` may be inconsistent since the update operation is not atomic, but it's acceptable for our scenario
s.resultKeySet.Add(tblID)
Expand Down
Loading