Skip to content

Commit

Permalink
util: use std/slices to replace exp/slices (#46052)
Browse files Browse the repository at this point in the history
ref #45933
  • Loading branch information
hawkingrei authored Aug 14, 2023
1 parent 9232aac commit 7bd1790
Show file tree
Hide file tree
Showing 27 changed files with 75 additions and 80 deletions.
1 change: 0 additions & 1 deletion util/collate/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ go_library(
"//util/logutil",
"//util/stringutil",
"@com_github_pingcap_errors//:errors",
"@org_golang_x_exp//slices",
"@org_golang_x_text//encoding",
"@org_uber_go_zap//:zap",
],
Expand Down
7 changes: 4 additions & 3 deletions util/collate/collate.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@
package collate

import (
"cmp"
"fmt"
"slices"
"sync/atomic"

"github.com/pingcap/errors"
Expand All @@ -25,7 +27,6 @@ import (
"github.com/pingcap/tidb/util/dbterror"
"github.com/pingcap/tidb/util/logutil"
"go.uber.org/zap"
"golang.org/x/exp/slices"
)

var (
Expand Down Expand Up @@ -255,8 +256,8 @@ func GetSupportedCollations() []*charset.Collation {
newSupportedCollations = append(newSupportedCollations, coll)
}
}
slices.SortFunc(newSupportedCollations, func(i, j *charset.Collation) bool {
return i.Name < j.Name
slices.SortFunc(newSupportedCollations, func(i, j *charset.Collation) int {
return cmp.Compare(i.Name, j.Name)
})
return newSupportedCollations
}
Expand Down
1 change: 0 additions & 1 deletion util/execdetails/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ go_library(
"@com_github_influxdata_tdigest//:tdigest",
"@com_github_pingcap_tipb//go-tipb",
"@com_github_tikv_client_go_v2//util",
"@org_golang_x_exp//slices",
"@org_uber_go_zap//:zap",
],
)
Expand Down
2 changes: 1 addition & 1 deletion util/execdetails/execdetails.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"bytes"
"fmt"
"math"
"slices"
"sort"
"strconv"
"strings"
Expand All @@ -29,7 +30,6 @@ import (
"github.com/pingcap/tipb/go-tipb"
"github.com/tikv/client-go/v2/util"
"go.uber.org/zap"
"golang.org/x/exp/slices"
)

// ExecDetails contains execution detail information.
Expand Down
2 changes: 0 additions & 2 deletions util/extsort/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ go_library(
"@com_github_cockroachdb_pebble//vfs",
"@com_github_pingcap_errors//:errors",
"@com_github_pingcap_log//:log",
"@org_golang_x_exp//slices",
"@org_golang_x_sync//errgroup",
"@org_uber_go_zap//:zap",
],
Expand All @@ -37,7 +36,6 @@ go_test(
"@com_github_cockroachdb_pebble//vfs",
"@com_github_pingcap_errors//:errors",
"@com_github_stretchr_testify//require",
"@org_golang_x_exp//slices",
"@org_golang_x_sync//errgroup",
"@org_uber_go_zap//:zap",
],
Expand Down
46 changes: 23 additions & 23 deletions util/extsort/disk_sorter.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"fmt"
"io"
"runtime"
"slices"
"sort"
"strconv"
"strings"
Expand All @@ -37,7 +38,6 @@ import (
"github.com/pingcap/tidb/util/generic"
"github.com/pingcap/tidb/util/syncutil"
"go.uber.org/zap"
"golang.org/x/exp/slices"
"golang.org/x/sync/errgroup"
)

Expand Down Expand Up @@ -432,8 +432,8 @@ type openIterFunc func(file *fileMetadata) (Iterator, error)
// newMergingIter returns an iterator that merges the given files.
// orderedFiles must be ordered by start key, otherwise it panics.
func newMergingIter(orderedFiles []*fileMetadata, openIter openIterFunc) *mergingIter {
if !slices.IsSortedFunc(orderedFiles, func(a, b *fileMetadata) bool {
return bytes.Compare(a.startKey, b.startKey) < 0
if !slices.IsSortedFunc(orderedFiles, func(a, b *fileMetadata) int {
return bytes.Compare(a.startKey, b.startKey)
}) {
panic("newMergingIter: orderedFiles are not ordered by start key")
}
Expand Down Expand Up @@ -605,8 +605,8 @@ func (m *mergingIter) Last() bool {

// Sort files by last key in reverse order.
files := slices.Clone(m.orderedFiles)
slices.SortFunc(files, func(a, b *fileMetadata) bool {
return bytes.Compare(a.lastKey, b.lastKey) > 0
slices.SortFunc(files, func(a, b *fileMetadata) int {
return bytes.Compare(b.lastKey, a.lastKey)
})

// Since we don't need to implement Prev() method,
Expand Down Expand Up @@ -835,8 +835,8 @@ func (d *DiskSorter) init() error {
files = append(files, file)
}
if _, err := d.fs.Stat(d.fs.PathJoin(d.dirname, diskSorterSortedFile)); err == nil {
slices.SortFunc(files, func(a, b *fileMetadata) bool {
return bytes.Compare(a.startKey, b.startKey) < 0
slices.SortFunc(files, func(a, b *fileMetadata) int {
return bytes.Compare(a.startKey, b.startKey)
})
d.orderedFiles = files
d.state.Store(diskSorterStateSorted)
Expand Down Expand Up @@ -939,8 +939,8 @@ func (d *DiskSorter) doSort(ctx context.Context) error {
}
d.orderedFiles = d.pendingFiles.files
d.pendingFiles.files = nil
slices.SortFunc(d.orderedFiles, func(a, b *fileMetadata) bool {
return bytes.Compare(a.startKey, b.startKey) < 0
slices.SortFunc(d.orderedFiles, func(a, b *fileMetadata) int {
return bytes.Compare(a.startKey, b.startKey)
})
files := pickCompactionFiles(d.orderedFiles, d.opts.CompactionThreshold, d.opts.Logger)
for len(files) > 0 {
Expand Down Expand Up @@ -1013,8 +1013,8 @@ func (d *DiskSorter) compactFiles(ctx context.Context, files []*fileMetadata) er
newOrderedFiles = append(newOrderedFiles, file)
}
}
slices.SortFunc(newOrderedFiles, func(a, b *fileMetadata) bool {
return bytes.Compare(a.startKey, b.startKey) < 0
slices.SortFunc(newOrderedFiles, func(a, b *fileMetadata) int {
return bytes.Compare(a.startKey, b.startKey)
})
d.orderedFiles = newOrderedFiles
return nil
Expand Down Expand Up @@ -1043,8 +1043,8 @@ func pickCompactionFiles(
depth: -1,
})
}
slices.SortFunc(intervals, func(a, b interval) bool {
return bytes.Compare(a.key, b.key) < 0
slices.SortFunc(intervals, func(a, b interval) int {
return bytes.Compare(a.key, b.key)
})

// Compute the maximum overlap depth of each interval.
Expand Down Expand Up @@ -1095,8 +1095,8 @@ func pickCompactionFiles(

func splitCompactionFiles(files []*fileMetadata, maxCompactionDepth int) [][]*fileMetadata {
// Split files into non-overlapping groups.
slices.SortFunc(files, func(a, b *fileMetadata) bool {
return bytes.Compare(a.startKey, b.startKey) < 0
slices.SortFunc(files, func(a, b *fileMetadata) int {
return bytes.Compare(a.startKey, b.startKey)
})
var groups [][]*fileMetadata
curGroup := []*fileMetadata{files[0]}
Expand Down Expand Up @@ -1155,8 +1155,8 @@ func buildCompactions(files []*fileMetadata, maxCompactionSize int) []*compactio
// If there is no kv stats, return a single compaction for all files.
if len(buckets) == 0 {
overlapFiles := slices.Clone(files)
slices.SortFunc(overlapFiles, func(a, b *fileMetadata) bool {
return bytes.Compare(a.startKey, b.startKey) < 0
slices.SortFunc(overlapFiles, func(a, b *fileMetadata) int {
return bytes.Compare(a.startKey, b.startKey)
})
return []*compaction{{
startKey: startKey,
Expand All @@ -1165,8 +1165,8 @@ func buildCompactions(files []*fileMetadata, maxCompactionSize int) []*compactio
}}
}

slices.SortFunc(buckets, func(a, b kvStatsBucket) bool {
return bytes.Compare(a.UpperBound, b.UpperBound) < 0
slices.SortFunc(buckets, func(a, b kvStatsBucket) int {
return bytes.Compare(a.UpperBound, b.UpperBound)
})
// Merge buckets with the same upper bound.
n := 0
Expand Down Expand Up @@ -1224,8 +1224,8 @@ func buildCompactions(files []*fileMetadata, maxCompactionSize int) []*compactio
c.overlapFiles = append(c.overlapFiles, file)
}
}
slices.SortFunc(c.overlapFiles, func(a, b *fileMetadata) bool {
return bytes.Compare(a.startKey, b.startKey) < 0
slices.SortFunc(c.overlapFiles, func(a, b *fileMetadata) int {
return bytes.Compare(a.startKey, b.startKey)
})
}
return compactions
Expand Down Expand Up @@ -1377,8 +1377,8 @@ func (w *diskSorterWriter) flush() error {
return err
}

slices.SortFunc(w.kvs, func(a, b keyValue) bool {
return bytes.Compare(a.key, b.key) < 0
slices.SortFunc(w.kvs, func(a, b keyValue) int {
return bytes.Compare(a.key, b.key)
})
for _, kv := range w.kvs {
if err := sw.Set(kv.key, kv.value); err != nil {
Expand Down
15 changes: 8 additions & 7 deletions util/extsort/disk_sorter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,11 @@ package extsort

import (
"bytes"
"cmp"
"context"
"encoding/json"
"math/rand"
"slices"
"sync"
"testing"

Expand All @@ -28,7 +30,6 @@ import (
"github.com/pingcap/errors"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"golang.org/x/exp/slices"
)

func TestDiskSorterCommon(t *testing.T) {
Expand Down Expand Up @@ -92,8 +93,8 @@ func TestDiskSorterReopen(t *testing.T) {
}
require.NoError(t, w.Close())

slices.SortFunc(kvs, func(a, b keyValue) bool {
return bytes.Compare(a.key, b.key) < 0
slices.SortFunc(kvs, func(a, b keyValue) int {
return bytes.Compare(a.key, b.key)
})
verify := func() {
iter, err := sorter.NewIterator(context.Background())
Expand Down Expand Up @@ -535,8 +536,8 @@ func TestMergingIter(t *testing.T) {
{[]byte("j1"), []byte("vj1")},
}),
}
slices.SortFunc(files, func(a, b *fileMetadata) bool {
return bytes.Compare(a.startKey, b.startKey) < 0
slices.SortFunc(files, func(a, b *fileMetadata) int {
return bytes.Compare(a.startKey, b.startKey)
})

iter := newMergingIter(files, openIter)
Expand Down Expand Up @@ -712,8 +713,8 @@ func TestPickCompactionFiles(t *testing.T) {
}
for _, tc := range testCases {
actual := pickCompactionFiles(tc.allFiles, tc.compactionThreshold, zap.NewNop())
slices.SortFunc(actual, func(a, b *fileMetadata) bool {
return a.fileNum < b.fileNum
slices.SortFunc(actual, func(a, b *fileMetadata) int {
return cmp.Compare(a.fileNum, b.fileNum)
})
require.Equal(t, tc.expected, actual)
}
Expand Down
10 changes: 5 additions & 5 deletions util/extsort/external_sorter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@ import (
"context"
"encoding/binary"
"math/rand"
"slices"
"testing"

"github.com/stretchr/testify/require"
"golang.org/x/exp/slices"
"golang.org/x/sync/errgroup"
)

Expand All @@ -49,8 +49,8 @@ func runCommonTest(t *testing.T, sorter ExternalSorter) {
iter, err := sorter.NewIterator(ctx)
require.NoError(t, err)

slices.SortFunc(kvs, func(a, b keyValue) bool {
return bytes.Compare(a.key, b.key) < 0
slices.SortFunc(kvs, func(a, b keyValue) int {
return bytes.Compare(a.key, b.key)
})

kvCnt := 0
Expand Down Expand Up @@ -105,8 +105,8 @@ func runCommonParallelTest(t *testing.T, sorter ExternalSorter) {
close(kvCh)
require.NoError(t, g.Wait())

slices.SortFunc(kvs, func(a, b keyValue) bool {
return bytes.Compare(a.key, b.key) < 0
slices.SortFunc(kvs, func(a, b keyValue) int {
return bytes.Compare(a.key, b.key)
})

require.NoError(t, sorter.Sort(ctx))
Expand Down
1 change: 0 additions & 1 deletion util/memory/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ go_library(
"@com_github_pingcap_failpoint//:failpoint",
"@com_github_pingcap_sysutil//:sysutil",
"@com_github_shirou_gopsutil_v3//mem",
"@org_golang_x_exp//slices",
"@org_uber_go_atomic//:atomic",
"@org_uber_go_zap//:zap",
],
Expand Down
2 changes: 1 addition & 1 deletion util/memory/tracker.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"bytes"
"fmt"
"runtime"
"slices"
"strconv"
"sync"
"sync/atomic"
Expand All @@ -27,7 +28,6 @@ import (
"github.com/pingcap/tidb/util/logutil"
atomicutil "go.uber.org/atomic"
"go.uber.org/zap"
"golang.org/x/exp/slices"
)

// TrackMemWhenExceeds is the threshold when memory usage needs to be tracked.
Expand Down
1 change: 0 additions & 1 deletion util/memoryusagealarm/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ go_library(
"//util/disk",
"//util/logutil",
"//util/memory",
"@org_golang_x_exp//slices",
"@org_uber_go_zap//:zap",
"@org_uber_go_zap//zapcore",
],
Expand Down
13 changes: 7 additions & 6 deletions util/memoryusagealarm/memoryusagealarm.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,12 @@
package memoryusagealarm

import (
"cmp"
"fmt"
"os"
"path/filepath"
rpprof "runtime/pprof"
"slices"
"strings"
"sync/atomic"
"time"
Expand All @@ -31,7 +33,6 @@ import (
"github.com/pingcap/tidb/util/memory"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"golang.org/x/exp/slices"
)

// Handle is the handler for expensive query.
Expand Down Expand Up @@ -263,7 +264,7 @@ func (record *memoryUsageAlarm) printTop10SqlInfo(pinfo []*util.ProcessInfo, f *
}
}

func (record *memoryUsageAlarm) getTop10SqlInfo(cmp func(i, j *util.ProcessInfo) bool, pinfo []*util.ProcessInfo) strings.Builder {
func (record *memoryUsageAlarm) getTop10SqlInfo(cmp func(i, j *util.ProcessInfo) int, pinfo []*util.ProcessInfo) strings.Builder {
slices.SortFunc(pinfo, cmp)
list := pinfo
var buf strings.Builder
Expand Down Expand Up @@ -302,14 +303,14 @@ func (record *memoryUsageAlarm) getTop10SqlInfo(cmp func(i, j *util.ProcessInfo)
}

func (record *memoryUsageAlarm) getTop10SqlInfoByMemoryUsage(pinfo []*util.ProcessInfo) strings.Builder {
return record.getTop10SqlInfo(func(i, j *util.ProcessInfo) bool {
return i.MemTracker.MaxConsumed() > j.MemTracker.MaxConsumed()
return record.getTop10SqlInfo(func(i, j *util.ProcessInfo) int {
return cmp.Compare(j.MemTracker.MaxConsumed(), i.MemTracker.MaxConsumed())
}, pinfo)
}

func (record *memoryUsageAlarm) getTop10SqlInfoByCostTime(pinfo []*util.ProcessInfo) strings.Builder {
return record.getTop10SqlInfo(func(i, j *util.ProcessInfo) bool {
return i.Time.Before(j.Time)
return record.getTop10SqlInfo(func(i, j *util.ProcessInfo) int {
return i.Time.Compare(j.Time)
}, pinfo)
}

Expand Down
1 change: 0 additions & 1 deletion util/profile/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ go_library(
"//util/texttree",
"@com_github_google_pprof//profile",
"@com_github_pingcap_errors//:errors",
"@org_golang_x_exp//slices",
],
)

Expand Down
Loading

0 comments on commit 7bd1790

Please sign in to comment.