From 6a85c6bbbd6cae7e0eea20a75ecd0853ac3545d6 Mon Sep 17 00:00:00 2001 From: Weizhen Wang Date: Mon, 28 Oct 2024 15:48:39 +0800 Subject: [PATCH] *: enable intrange linter (#56674) ref pingcap/tidb#55480 --- .golangci.yml | 1 + build/BUILD.bazel | 1 + build/linter/intrange/BUILD.bazel | 12 ++ build/linter/intrange/analyzer.go | 28 ++++ build/nogo_config.json | 12 ++ go.mod | 1 + go.sum | 2 + pkg/util/backoff/backoff_test.go | 6 +- pkg/util/bitmap/concurrent.go | 2 +- pkg/util/bitmap/concurrent_test.go | 8 +- pkg/util/cgroup/cgroup_cpu_test.go | 2 +- pkg/util/cgroup/cgroup_mock_test.go | 2 +- pkg/util/checksum/checksum_test.go | 2 +- pkg/util/chunk/alloc_test.go | 26 ++-- pkg/util/chunk/chunk.go | 10 +- pkg/util/chunk/chunk_in_disk.go | 6 +- pkg/util/chunk/chunk_in_disk_test.go | 8 +- pkg/util/chunk/chunk_test.go | 108 +++++++------- pkg/util/chunk/chunk_util.go | 6 +- pkg/util/chunk/chunk_util_test.go | 16 +-- pkg/util/chunk/codec.go | 8 +- pkg/util/chunk/codec_test.go | 20 +-- pkg/util/chunk/column.go | 12 +- pkg/util/chunk/column_test.go | 134 +++++++++--------- pkg/util/chunk/iterator.go | 2 +- pkg/util/chunk/iterator_test.go | 30 ++-- pkg/util/chunk/list.go | 4 +- pkg/util/chunk/list_test.go | 16 +-- pkg/util/chunk/mutrow_test.go | 22 +-- pkg/util/chunk/row.go | 4 +- pkg/util/chunk/row_container.go | 6 +- pkg/util/chunk/row_container_reader.go | 4 +- pkg/util/chunk/row_container_test.go | 58 ++++---- pkg/util/chunk/row_in_disk.go | 6 +- pkg/util/chunk/row_in_disk_test.go | 26 ++-- pkg/util/codec/bench_test.go | 14 +- pkg/util/codec/bytes.go | 2 +- pkg/util/codec/codec.go | 34 ++--- pkg/util/codec/codec_test.go | 14 +- pkg/util/codec/collation_test.go | 16 +-- pkg/util/collate/collate.go | 2 +- pkg/util/collate/collate_bench_test.go | 4 +- pkg/util/cpu/cpu_test.go | 8 +- pkg/util/cpuprofile/cpuprofile_test.go | 2 +- pkg/util/cpuprofile/testutil/util.go | 2 +- pkg/util/cteutil/storage_test.go | 24 ++-- pkg/util/dbutil/common.go | 2 +- .../deadlockhistory/deadlock_history_test.go | 2 +- pkg/util/deeptest/statictesthelper.go | 12 +- pkg/util/disk/tempDir_test.go | 2 +- pkg/util/encrypt/aes_layer_test.go | 6 +- pkg/util/encrypt/crypt.go | 6 +- pkg/util/etcd.go | 2 +- pkg/util/execdetails/execdetails_test.go | 2 +- pkg/util/extsort/disk_sorter_test.go | 2 +- pkg/util/extsort/external_sorter_test.go | 4 +- pkg/util/fastrand/random.go | 2 +- pkg/util/fastrand/random_test.go | 4 +- pkg/util/format/format.go | 6 +- pkg/util/gctuner/tuner_test.go | 2 +- pkg/util/globalconn/pool.go | 5 +- pkg/util/globalconn/pool_test.go | 10 +- pkg/util/importer/db.go | 4 +- pkg/util/importer/job.go | 6 +- pkg/util/intset/fast_int_set_bench_test.go | 24 ++-- pkg/util/intset/fast_int_set_test.go | 6 +- pkg/util/kvcache/simple_lru_test.go | 30 ++-- pkg/util/logutil/hex.go | 2 +- pkg/util/logutil/log_test.go | 6 +- pkg/util/mathutil/math_test.go | 2 +- pkg/util/memory/bench_test.go | 4 +- pkg/util/memory/tracker_test.go | 16 +-- .../memoryusagealarm/memoryusagealarm_test.go | 2 +- pkg/util/mock/mock_test.go | 2 +- pkg/util/mvmap/bench_test.go | 6 +- pkg/util/mvmap/mvmap.go | 2 +- pkg/util/mvmap/mvmap_test.go | 2 +- .../password_validation.go | 2 +- pkg/util/plancodec/codec.go | 8 +- pkg/util/profile/profile.go | 2 +- pkg/util/ranger/bench_test.go | 2 +- pkg/util/ranger/ranger.go | 4 +- pkg/util/ranger/types.go | 10 +- .../regexpr-router/regexpr_router_test.go | 2 +- .../resource_group_tag_test.go | 2 +- pkg/util/rowcodec/bench_test.go | 8 +- pkg/util/rowcodec/encoder.go | 4 +- pkg/util/rowcodec/rowcodec_test.go | 6 +- pkg/util/schemacmp/lattice.go | 2 +- pkg/util/selection/selection_test.go | 4 +- .../servermemorylimit/servermemorylimit.go | 2 +- .../servermemorylimit_test.go | 4 +- pkg/util/set/mem_aware_map_test.go | 12 +- pkg/util/set/set_with_memory_usage_test.go | 12 +- pkg/util/slice/slice.go | 2 +- pkg/util/sqlescape/utils_test.go | 8 +- pkg/util/stmtsummary/evicted_test.go | 4 +- .../stmtsummary/statement_summary_test.go | 24 ++-- pkg/util/stmtsummary/v2/reader.go | 2 +- pkg/util/stmtsummary/v2/tests/table_test.go | 4 +- pkg/util/stringutil/string_util.go | 4 +- pkg/util/stringutil/string_util_test.go | 4 +- pkg/util/table-rule-selector/selector_test.go | 2 +- pkg/util/tokenlimiter.go | 2 +- pkg/util/topsql/collector/main_test.go | 2 +- pkg/util/topsql/reporter/datamodel.go | 2 +- pkg/util/topsql/reporter/reporter_test.go | 14 +- pkg/util/topsql/stmtstats/aggregator_test.go | 4 +- .../topsql/stmtstats/kv_exec_count_test.go | 4 +- pkg/util/topsql/stmtstats/stmtstats_test.go | 6 +- pkg/util/topsql/topsql.go | 2 +- pkg/util/topsql/topsql_test.go | 4 +- pkg/util/tracing/noop_bench_test.go | 8 +- pkg/util/util.go | 4 +- pkg/util/wait_group_wrapper_test.go | 8 +- pkg/util/worker_pool.go | 2 +- pkg/util/zeropool/pool_test.go | 14 +- tools/check/ut.go | 8 +- 118 files changed, 599 insertions(+), 541 deletions(-) create mode 100644 build/linter/intrange/BUILD.bazel create mode 100644 build/linter/intrange/analyzer.go diff --git a/.golangci.yml b/.golangci.yml index f8893425ff96e..f0c4e5b6dc769 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -26,6 +26,7 @@ linters: - revive - lll - gofmt + - intrange linters-settings: staticcheck: diff --git a/build/BUILD.bazel b/build/BUILD.bazel index 763c5ead3c912..b95ae6b8756c7 100644 --- a/build/BUILD.bazel +++ b/build/BUILD.bazel @@ -157,6 +157,7 @@ nogo( "//build/linter/gci", "//build/linter/gosec", "//build/linter/ineffassign", + "//build/linter/intrange", "//build/linter/makezero", "//build/linter/mirror", "//build/linter/misspell", diff --git a/build/linter/intrange/BUILD.bazel b/build/linter/intrange/BUILD.bazel new file mode 100644 index 0000000000000..552551b5d742a --- /dev/null +++ b/build/linter/intrange/BUILD.bazel @@ -0,0 +1,12 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "intrange", + srcs = ["analyzer.go"], + importpath = "github.com/pingcap/tidb/build/linter/intrange", + visibility = ["//visibility:public"], + deps = [ + "//build/linter/util", + "@com_github_ckaznocha_intrange//:intrange", + ], +) diff --git a/build/linter/intrange/analyzer.go b/build/linter/intrange/analyzer.go new file mode 100644 index 0000000000000..c95d99cb1b1c1 --- /dev/null +++ b/build/linter/intrange/analyzer.go @@ -0,0 +1,28 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package intrange + +import ( + "github.com/ckaznocha/intrange" + "github.com/pingcap/tidb/build/linter/util" +) + +// Analyzer is the analyzer struct of ineffassign. +var Analyzer = intrange.Analyzer + +func init() { + util.SkipAnalyzerByConfig(Analyzer) + util.SkipAnalyzer(Analyzer) +} diff --git a/build/nogo_config.json b/build/nogo_config.json index e991895aa0ba4..848d49c7784e3 100644 --- a/build/nogo_config.json +++ b/build/nogo_config.json @@ -346,6 +346,18 @@ "/cgo/": "no need to vet cgo code" } }, + "intrange": { + "exclude_files": { + "pkg/parser/parser.go": "parser/parser.go code", + "external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code", + ".*mock.go$": "ignore generated code", + "/cgo/": "no need to vet cgo code" + }, + "only_files": { + "pkg/util/": "util code" + } + }, "inspect": { "exclude_files": { "pkg/parser/parser.go": "parser/parser.go code", diff --git a/go.mod b/go.mod index 82edf68a0d7d5..e71c7a11356bf 100644 --- a/go.mod +++ b/go.mod @@ -26,6 +26,7 @@ require ( github.com/charithe/durationcheck v0.0.10 github.com/cheggaaa/pb/v3 v3.0.8 github.com/cheynewallace/tabby v1.1.1 + github.com/ckaznocha/intrange v0.2.1 github.com/cloudfoundry/gosigar v1.3.6 github.com/cockroachdb/pebble v1.1.0 github.com/coocood/freecache v1.2.1 diff --git a/go.sum b/go.sum index 5ea4ec1197368..cc7655410833b 100644 --- a/go.sum +++ b/go.sum @@ -162,6 +162,8 @@ github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyr github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/ckaznocha/intrange v0.2.1 h1:M07spnNEQoALOJhwrImSrJLaxwuiQK+hA2DeajBlwYk= +github.com/ckaznocha/intrange v0.2.1/go.mod h1:7NEhVyf8fzZO5Ds7CRaqPEm52Ut83hsTiL5zbER/HYk= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudfoundry/gosigar v1.3.6 h1:gIc08FbB3QPb+nAQhINIK/qhf5REKkY0FTGgRGXkcVc= github.com/cloudfoundry/gosigar v1.3.6/go.mod h1:lNWstu5g5gw59O09Y+wsMNFzBSnU8a0u+Sfx4dq360E= diff --git a/pkg/util/backoff/backoff_test.go b/pkg/util/backoff/backoff_test.go index 6db7510a62c56..0f87a4fd9e643 100644 --- a/pkg/util/backoff/backoff_test.go +++ b/pkg/util/backoff/backoff_test.go @@ -23,16 +23,16 @@ import ( func TestExponential(t *testing.T) { backoffer := NewExponential(1, 1, 1) - for i := 0; i < 10; i++ { + for i := range 10 { require.Equal(t, time.Duration(1), backoffer.Backoff(i)) } backoffer = NewExponential(1, 1, 10) - for i := 0; i < 10; i++ { + for i := range 10 { require.Equal(t, time.Duration(1), backoffer.Backoff(i)) } backoffer = NewExponential(1, 2, 10) res := []time.Duration{1, 2, 4, 8, 10, 10, 10, 10, 10, 10} - for i := 0; i < 10; i++ { + for i := range 10 { require.Equal(t, res[i], backoffer.Backoff(i)) } } diff --git a/pkg/util/bitmap/concurrent.go b/pkg/util/bitmap/concurrent.go index 7bcb9d83cc657..a8440123a5b3d 100644 --- a/pkg/util/bitmap/concurrent.go +++ b/pkg/util/bitmap/concurrent.go @@ -41,7 +41,7 @@ type ConcurrentBitmap struct { func (cb *ConcurrentBitmap) Clone() *ConcurrentBitmap { cp := NewConcurrentBitmap(cb.bitLen) needLen := len(cp.segments) - for i := 0; i < needLen; i++ { + for i := range needLen { cp.segments[i] = cb.segments[i] } return cp diff --git a/pkg/util/bitmap/concurrent_test.go b/pkg/util/bitmap/concurrent_test.go index f7d2c8acba949..5a6b57e144ada 100644 --- a/pkg/util/bitmap/concurrent_test.go +++ b/pkg/util/bitmap/concurrent_test.go @@ -28,7 +28,7 @@ func TestConcurrentBitmapSet(t *testing.T) { bm := NewConcurrentBitmap(loopCount * interval) wg := &sync.WaitGroup{} - for i := 0; i < loopCount; i++ { + for i := range loopCount { wg.Add(1) go func(bitIndex int) { bm.Set(bitIndex) @@ -37,7 +37,7 @@ func TestConcurrentBitmapSet(t *testing.T) { } wg.Wait() - for i := 0; i < loopCount; i++ { + for i := range loopCount { if i%interval == 0 { assert.Equal(t, true, bm.UnsafeIsSet(i)) } else { @@ -57,13 +57,13 @@ func TestConcurrentBitmapUniqueSetter(t *testing.T) { var setterCounter uint64 var clearCounter uint64 // Concurrently set bit, and check if isSetter count matches zero clearing count. - for i := 0; i < loopCount; i++ { + for range loopCount { // Clear bitmap to zero. if atomic.CompareAndSwapUint32(&(bm.segments[0]), 0x00000001, 0x00000000) { atomic.AddUint64(&clearCounter, 1) } // Concurrently set. - for j := 0; j < competitorsPerSet; j++ { + for range competitorsPerSet { wg.Add(1) go func() { if bm.Set(31) { diff --git a/pkg/util/cgroup/cgroup_cpu_test.go b/pkg/util/cgroup/cgroup_cpu_test.go index 959e6e57ddcec..d32da890cfa47 100644 --- a/pkg/util/cgroup/cgroup_cpu_test.go +++ b/pkg/util/cgroup/cgroup_cpu_test.go @@ -73,7 +73,7 @@ func TestGetCgroupCPU(t *testing.T) { } exit := make(chan struct{}) var wg sync.WaitGroup - for i := 0; i < 10; i++ { + for range 10 { wg.Add(1) go func() { defer wg.Done() diff --git a/pkg/util/cgroup/cgroup_mock_test.go b/pkg/util/cgroup/cgroup_mock_test.go index 590c415330fb0..7d01073e5135f 100644 --- a/pkg/util/cgroup/cgroup_mock_test.go +++ b/pkg/util/cgroup/cgroup_mock_test.go @@ -378,7 +378,7 @@ const ( ) func TestCgroupsGetCPU(t *testing.T) { - for i := 0; i < 2; i++ { + for i := range 2 { if i == 1 { // The field in /proc/self/cgroup and /proc/self/mountinfo may appear as "cpuacct,cpu" or "rw,cpuacct,cpu" // while the input controller is "cpu,cpuacct" diff --git a/pkg/util/checksum/checksum_test.go b/pkg/util/checksum/checksum_test.go index 7d7b9197b4665..3391cec34c267 100644 --- a/pkg/util/checksum/checksum_test.go +++ b/pkg/util/checksum/checksum_test.go @@ -416,7 +416,7 @@ func TestChecksumWriterAutoFlush(t *testing.T) { func newTestBuff(str string, n int) *bytes.Buffer { buf := bytes.NewBuffer(nil) testData := str - for i := 0; i < n; i++ { + for range n { buf.WriteString(testData) } return buf diff --git a/pkg/util/chunk/alloc_test.go b/pkg/util/chunk/alloc_test.go index 333346b7a2597..bea00238dc51c 100644 --- a/pkg/util/chunk/alloc_test.go +++ b/pkg/util/chunk/alloc_test.go @@ -69,7 +69,7 @@ func TestAllocator(t *testing.T) { check() // Check maxFreeListLen - for i := 0; i < maxFreeChunks+10; i++ { + for range maxFreeChunks + 10 { alloc.Alloc(fieldTypes, initCap, maxChunkSize) } alloc.Reset() @@ -105,7 +105,7 @@ func TestColumnAllocator(t *testing.T) { ft := fieldTypes[2] // Test reuse. cols := make([]*Column, 0, maxFreeColumnsPerType+10) - for i := 0; i < maxFreeColumnsPerType+10; i++ { + for range maxFreeColumnsPerType + 10 { col := alloc1.NewColumn(ft, 20) cols = append(cols, col) } @@ -135,7 +135,7 @@ func TestNoDuplicateColumnReuse(t *testing.T) { types.NewFieldType(mysql.TypeDatetime), } alloc := NewAllocator() - for i := 0; i < maxFreeChunks+10; i++ { + for range maxFreeChunks + 10 { chk := alloc.Alloc(fieldTypes, 5, 10) chk.MakeRef(1, 3) } @@ -170,7 +170,7 @@ func TestAvoidColumnReuse(t *testing.T) { types.NewFieldTypeBuilder().SetType(mysql.TypeDatetime).BuildP(), } alloc := NewAllocator() - for i := 0; i < maxFreeChunks+10; i++ { + for range maxFreeChunks + 10 { chk := alloc.Alloc(fieldTypes, 5, 10) for _, col := range chk.columns { col.avoidReusing = true @@ -186,7 +186,7 @@ func TestAvoidColumnReuse(t *testing.T) { // test decoder will set avoid reusing flag. chk := alloc.Alloc(fieldTypes, 5, 1024) - for i := 0; i <= 10; i++ { + for range 10 { for _, col := range chk.columns { col.AppendNull() } @@ -220,7 +220,7 @@ func TestColumnAllocatorLimit(t *testing.T) { InitChunkAllocSize(10, 20) alloc := NewAllocator() require.True(t, alloc.CheckReuseAllocSize()) - for i := 0; i < maxFreeChunks+10; i++ { + for range maxFreeChunks + 10 { alloc.Alloc(fieldTypes, 5, 10) } alloc.Reset() @@ -232,7 +232,7 @@ func TestColumnAllocatorLimit(t *testing.T) { //Reduce capacity InitChunkAllocSize(5, 10) alloc = NewAllocator() - for i := 0; i < maxFreeChunks+10; i++ { + for range maxFreeChunks + 10 { alloc.Alloc(fieldTypes, 5, 10) } alloc.Reset() @@ -244,7 +244,7 @@ func TestColumnAllocatorLimit(t *testing.T) { //increase capacity InitChunkAllocSize(50, 100) alloc = NewAllocator() - for i := 0; i < maxFreeChunks+10; i++ { + for range maxFreeChunks + 10 { alloc.Alloc(fieldTypes, 5, 10) } alloc.Reset() @@ -259,7 +259,7 @@ func TestColumnAllocatorLimit(t *testing.T) { nu := len(alloc.columnAlloc.pool[VarElemLen].allocColumns) require.Equal(t, nu, 1) for _, col := range rs.columns { - for i := 0; i < 20480; i++ { + for range 20480 { col.data = append(col.data, byte('a')) } } @@ -280,7 +280,7 @@ func TestColumnAllocatorCheck(t *testing.T) { } InitChunkAllocSize(10, 20) alloc := NewAllocator() - for i := 0; i < 4; i++ { + for range 4 { alloc.Alloc(fieldTypes, 5, 10) } col := alloc.columnAlloc.NewColumn(types.NewFieldTypeBuilder().SetType(mysql.TypeFloat).BuildP(), 10) @@ -343,11 +343,11 @@ func TestSyncAllocator(t *testing.T) { alloc := NewSyncAllocator(NewAllocator()) wg := &sync.WaitGroup{} - for i := 0; i < 1000; i++ { + for range 1000 { wg.Add(1) go func() { - for j := 0; j < 10; j++ { - for k := 0; k < 100; k++ { + for range 10 { + for range 100 { chk := alloc.Alloc(fieldTypes, 5, 100) require.NotNil(t, chk) } diff --git a/pkg/util/chunk/chunk.go b/pkg/util/chunk/chunk.go index 4b93b559b5dcd..7f46b8a4f8df2 100644 --- a/pkg/util/chunk/chunk.go +++ b/pkg/util/chunk/chunk.go @@ -156,7 +156,7 @@ func renewEmpty(chk *Chunk) *Chunk { } func (c *Chunk) resetForReuse() { - for i := 0; i < len(c.columns); i++ { + for i := range len(c.columns) { c.columns[i] = nil } columns := c.columns[:0] @@ -245,12 +245,12 @@ func (c *Chunk) SwapColumn(colIdx int, other *Chunk, otherIdx int) error { } // Find the leftmost Column of the reference which is the actual Column to // be swapped. - for i := 0; i < colIdx; i++ { + for i := range colIdx { if c.columns[i] == c.columns[colIdx] { colIdx = i } } - for i := 0; i < otherIdx; i++ { + for i := range otherIdx { if other.columns[i] == other.columns[otherIdx] { otherIdx = i } @@ -699,8 +699,8 @@ func (c *Chunk) Reconstruct() { // ToString returns all the values in a chunk. func (c *Chunk) ToString(ft []*types.FieldType) string { - var buf []byte - for rowIdx := 0; rowIdx < c.NumRows(); rowIdx++ { + buf := make([]byte, 0, c.NumRows()*2) + for rowIdx := range c.NumRows() { row := c.GetRow(rowIdx) buf = append(buf, row.ToString(ft)...) buf = append(buf, '\n') diff --git a/pkg/util/chunk/chunk_in_disk.go b/pkg/util/chunk/chunk_in_disk.go index 9b4353fa9fe4a..268f1e1eca25f 100644 --- a/pkg/util/chunk/chunk_in_disk.go +++ b/pkg/util/chunk/chunk_in_disk.go @@ -194,7 +194,7 @@ func (d *DataInDiskByChunks) serializeChunkData(pos *int64, chk *Chunk, selSize d.buf = d.buf[:*pos+selSize] selLen := len(chk.sel) - for i := 0; i < selLen; i++ { + for i := range selLen { *(*int)(unsafe.Pointer(&d.buf[*pos])) = chk.sel[i] *pos += intLen } @@ -265,7 +265,7 @@ func (d *DataInDiskByChunks) deserializeColMeta(pos *int64) (length int64, nullM func (d *DataInDiskByChunks) deserializeSel(chk *Chunk, pos *int64, selSize int) { selLen := int64(selSize) / intLen chk.sel = make([]int, selLen) - for i := int64(0); i < selLen; i++ { + for i := range selLen { chk.sel[i] = *(*int)(unsafe.Pointer(&d.buf[*pos])) *pos += intLen } @@ -290,7 +290,7 @@ func (d *DataInDiskByChunks) deserializeChunkData(chk *Chunk, pos *int64) { func (d *DataInDiskByChunks) deserializeOffsets(dst []int64, pos *int64) { offsetNum := len(dst) - for i := 0; i < offsetNum; i++ { + for i := range offsetNum { dst[i] = *(*int64)(unsafe.Pointer(&d.buf[*pos])) *pos += int64Len } diff --git a/pkg/util/chunk/chunk_in_disk_test.go b/pkg/util/chunk/chunk_in_disk_test.go index ce23c233ac0da..46dd5745c9ee8 100644 --- a/pkg/util/chunk/chunk_in_disk_test.go +++ b/pkg/util/chunk/chunk_in_disk_test.go @@ -36,7 +36,7 @@ func addAuxDataForChunks(chunks []*Chunk) { selLen := rand.Intn(50) + 1 chk.sel = make([]int, selLen) - for i := 0; i < selLen; i++ { + for i := range selLen { chk.sel[i] = rand.Int() } } @@ -49,7 +49,7 @@ func checkAuxDataForChunk(t *testing.T, chk1, chk2 *Chunk) { require.Equal(t, len(chk1.sel), len(chk2.sel)) length := len(chk1.sel) - for i := 0; i < length; i++ { + for i := range length { require.Equal(t, chk1.sel[i], chk2.sel[i]) } } @@ -61,7 +61,7 @@ func checkChunk(t *testing.T, chk1, chk2 *Chunk) { require.Equal(t, chk1.NumRows(), chk2.NumRows()) numRows := chk1.NumRows() - for i := 0; i < numRows; i++ { + for i := range numRows { checkRow(t, chk1.GetRow(i), chk2.GetRow(i)) } } @@ -78,7 +78,7 @@ func TestDataInDiskByChunks(t *testing.T) { require.NoError(t, err) } - for i := 0; i < numChk; i++ { + for i := range numChk { chk, err := dataInDiskByChunks.GetChunk(i) require.NoError(t, err) checkChunk(t, chk, chks[i]) diff --git a/pkg/util/chunk/chunk_test.go b/pkg/util/chunk/chunk_test.go index 690f831001597..fe3110a5ea12c 100644 --- a/pkg/util/chunk/chunk_test.go +++ b/pkg/util/chunk/chunk_test.go @@ -33,7 +33,7 @@ func TestAppendRow(t *testing.T) { numRows := 10 chk := newChunk(8, 8, 0, 0, 40, 0) strFmt := "%d.12345" - for i := 0; i < numRows; i++ { + for i := range numRows { chk.AppendNull(0) chk.AppendInt64(1, int64(i)) str := fmt.Sprintf(strFmt, i) @@ -44,7 +44,7 @@ func TestAppendRow(t *testing.T) { } require.Equal(t, numCols, chk.NumCols()) require.Equal(t, numRows, chk.NumRows()) - for i := 0; i < numRows; i++ { + for i := range numRows { row := chk.GetRow(i) require.Equal(t, int64(0), row.GetInt64(0)) require.True(t, row.IsNull(0)) @@ -62,11 +62,11 @@ func TestAppendRow(t *testing.T) { } chk2 := newChunk(8, 8, 0, 0, 40, 0) - for i := 0; i < numRows; i++ { + for i := range numRows { row := chk.GetRow(i) chk2.AppendRow(row) } - for i := 0; i < numCols; i++ { + for i := range numCols { col2, col1 := chk2.columns[i], chk.columns[i] col2.elemBuf, col1.elemBuf = nil, nil require.Equal(t, col1, col2) @@ -205,7 +205,7 @@ func TestTruncateTo(t *testing.T) { src := NewChunkWithCapacity(fieldTypes, 32) - for i := 0; i < 8; i++ { + for range 8 { src.AppendFloat32(0, 12.8) src.AppendString(1, "abc") src.AppendJSON(2, jsonObj) @@ -268,7 +268,7 @@ func TestChunkSizeControl(t *testing.T) { chk := New([]*types.FieldType{types.NewFieldType(mysql.TypeLong)}, maxChunkSize, maxChunkSize) require.Equal(t, maxChunkSize, chk.RequiredRows()) - for i := 0; i < maxChunkSize; i++ { + for range maxChunkSize { chk.AppendInt64(0, 1) } maxChunkSize += maxChunkSize / 3 @@ -400,10 +400,10 @@ func newAllTypes() []*types.FieldType { func TestCompare(t *testing.T) { allTypes := newAllTypes() chunk := NewChunkWithCapacity(allTypes, 32) - for i := 0; i < len(allTypes); i++ { + for i := range allTypes { chunk.AppendNull(i) } - for i := 0; i < len(allTypes); i++ { + for i := range allTypes { switch allTypes[i].GetType() { case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeYear: if mysql.HasUnsignedFlag(allTypes[i].GetFlag()) { @@ -436,7 +436,7 @@ func TestCompare(t *testing.T) { require.FailNow(t, "type not handled", allTypes[i].GetType()) } } - for i := 0; i < len(allTypes); i++ { + for i := range allTypes { switch allTypes[i].GetType() { case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeYear: if mysql.HasUnsignedFlag(allTypes[i].GetFlag()) { @@ -472,7 +472,7 @@ func TestCompare(t *testing.T) { rowNull := chunk.GetRow(0) rowSmall := chunk.GetRow(1) rowBig := chunk.GetRow(2) - for i := 0; i < len(allTypes); i++ { + for i := range allTypes { cmpFunc := GetCompareFunc(allTypes[i]) require.Equal(t, 0, cmpFunc(rowNull, i, rowNull, i)) require.Equal(t, -1, cmpFunc(rowNull, i, rowSmall, i)) @@ -487,11 +487,11 @@ func TestCompare(t *testing.T) { func TestCopyTo(t *testing.T) { allTypes := newAllTypes() chunk := NewChunkWithCapacity(allTypes, 101) - for i := 0; i < len(allTypes); i++ { + for i := range allTypes { chunk.AppendNull(i) } - for k := 0; k < 100; k++ { - for i := 0; i < len(allTypes); i++ { + for k := range 100 { + for i := range allTypes { switch allTypes[i].GetType() { case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeYear: if mysql.HasUnsignedFlag(allTypes[i].GetFlag()) { @@ -528,10 +528,10 @@ func TestCopyTo(t *testing.T) { ck1 := chunk.CopyConstruct() - for k := 0; k < 101; k++ { + for k := range 101 { row := chunk.GetRow(k) r1 := ck1.GetRow(k) - for i := 0; i < len(allTypes); i++ { + for i := range allTypes { cmpFunc := GetCompareFunc(allTypes[i]) require.Zero(t, cmpFunc(row, i, r1, i)) } @@ -666,7 +666,7 @@ func TestAppendSel(t *testing.T) { tll := types.NewFieldType(mysql.TypeLonglong) chk := NewChunkWithCapacity([]*types.FieldType{tll}, 1024) sel := make([]int, 0, 1024/2) - for i := 0; i < 1024/2; i++ { + for i := range 1024 / 2 { chk.AppendInt64(0, int64(i)) if i%2 == 0 { sel = append(sel, i) @@ -727,14 +727,14 @@ func TestToString(t *testing.T) { func BenchmarkAppendInt(b *testing.B) { b.ReportAllocs() chk := newChunk(8) - for i := 0; i < b.N; i++ { + for range b.N { appendInt(chk) } } func appendInt(chk *Chunk) { chk.Reset() - for i := 0; i < 1000; i++ { + for i := range 1000 { chk.AppendInt64(0, int64(i)) } } @@ -742,14 +742,14 @@ func appendInt(chk *Chunk) { func BenchmarkAppendString(b *testing.B) { b.ReportAllocs() chk := newChunk(0) - for i := 0; i < b.N; i++ { + for range b.N { appendString(chk) } } func appendString(chk *Chunk) { chk.Reset() - for i := 0; i < 1000; i++ { + for range 1000 { chk.AppendString(0, "abcd") } } @@ -763,14 +763,14 @@ func BenchmarkAppendRow(b *testing.B) { rowChk.AppendBytes(3, []byte("abcd")) chk := newChunk(8, 8, 0, 0) - for i := 0; i < b.N; i++ { + for range b.N { appendRow(chk, rowChk.GetRow(0)) } } func appendRow(chk *Chunk, row Row) { chk.Reset() - for i := 0; i < 1000; i++ { + for range 1000 { chk.AppendRow(row) } } @@ -778,7 +778,7 @@ func appendRow(chk *Chunk, row Row) { func BenchmarkAppendBytes1024(b *testing.B) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeString)}, 32) var bs = make([]byte, 256) - for i := 0; i < b.N; i++ { + for range b.N { appendBytes(chk, bs, 1024) } } @@ -786,7 +786,7 @@ func BenchmarkAppendBytes1024(b *testing.B) { func BenchmarkAppendBytes512(b *testing.B) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeString)}, 32) var bs = make([]byte, 256) - for i := 0; i < b.N; i++ { + for range b.N { appendBytes(chk, bs, 512) } } @@ -794,7 +794,7 @@ func BenchmarkAppendBytes512(b *testing.B) { func BenchmarkAppendBytes256(b *testing.B) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeString)}, 32) var bs = make([]byte, 256) - for i := 0; i < b.N; i++ { + for range b.N { appendBytes(chk, bs, 256) } } @@ -802,7 +802,7 @@ func BenchmarkAppendBytes256(b *testing.B) { func BenchmarkAppendBytes128(b *testing.B) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeString)}, 32) var bs = make([]byte, 256) - for i := 0; i < b.N; i++ { + for range b.N { appendBytes(chk, bs, 128) } } @@ -810,7 +810,7 @@ func BenchmarkAppendBytes128(b *testing.B) { func BenchmarkAppendBytes64(b *testing.B) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeString)}, 32) var bs = make([]byte, 256) - for i := 0; i < b.N; i++ { + for range b.N { appendBytes(chk, bs, 64) } } @@ -818,7 +818,7 @@ func BenchmarkAppendBytes64(b *testing.B) { func BenchmarkAppendBytes32(b *testing.B) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeString)}, 32) var bs = make([]byte, 256) - for i := 0; i < b.N; i++ { + for range b.N { appendBytes(chk, bs, 32) } } @@ -826,7 +826,7 @@ func BenchmarkAppendBytes32(b *testing.B) { func BenchmarkAppendBytes16(b *testing.B) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeString)}, 32) var bs = make([]byte, 256) - for i := 0; i < b.N; i++ { + for range b.N { appendBytes(chk, bs, 16) } } @@ -834,7 +834,7 @@ func BenchmarkAppendBytes16(b *testing.B) { func BenchmarkAppendBytes8(b *testing.B) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeString)}, 32) var bs = make([]byte, 256) - for i := 0; i < b.N; i++ { + for range b.N { appendBytes(chk, bs, 8) } } @@ -842,7 +842,7 @@ func BenchmarkAppendBytes8(b *testing.B) { func BenchmarkAppendBytes4(b *testing.B) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeString)}, 32) var bs = make([]byte, 256) - for i := 0; i < b.N; i++ { + for range b.N { appendBytes(chk, bs, 4) } } @@ -850,7 +850,7 @@ func BenchmarkAppendBytes4(b *testing.B) { func BenchmarkAppendBytes2(b *testing.B) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeString)}, 32) var bs = make([]byte, 256) - for i := 0; i < b.N; i++ { + for range b.N { appendBytes(chk, bs, 2) } } @@ -858,14 +858,14 @@ func BenchmarkAppendBytes2(b *testing.B) { func BenchmarkAppendBytes1(b *testing.B) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeString)}, 32) var bs = make([]byte, 256) - for i := 0; i < b.N; i++ { + for range b.N { appendBytes(chk, bs, 1) } } func appendBytes(chk *Chunk, bs []byte, times int) { chk.Reset() - for i := 0; i < times; i++ { + for range times { chk.AppendBytes(0, bs) } } @@ -873,13 +873,13 @@ func appendBytes(chk *Chunk, bs []byte, times int) { func BenchmarkAccess(b *testing.B) { b.StopTimer() rowChk := newChunk(8) - for i := 0; i < 8192; i++ { + for range 8192 { rowChk.AppendInt64(0, math.MaxUint16) } b.StartTimer() var sum int64 - for i := 0; i < b.N; i++ { - for j := 0; j < 8192; j++ { + for range b.N { + for j := range 8192 { sum += rowChk.GetRow(j).GetInt64(0) } } @@ -898,14 +898,14 @@ func BenchmarkChunkMemoryUsage(b *testing.B) { timeObj := types.NewTime(types.FromGoTime(time.Now()), mysql.TypeDatetime, 0) durationObj := types.Duration{Duration: math.MaxInt64, Fsp: 0} - for i := 0; i < initCap; i++ { + for range initCap { chk.AppendFloat64(0, 123.123) chk.AppendString(1, "123") chk.AppendTime(2, timeObj) chk.AppendDuration(3, durationObj) } b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { chk.MemoryUsage() } } @@ -979,7 +979,7 @@ func benchmarkChunkGrow(t benchChunkGrowCase) func(b *testing.B) { b.ReportAllocs() chk := New([]*types.FieldType{types.NewFieldType(mysql.TypeLong)}, t.initCap, t.maxCap) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { e := &seqNumberGenerateExec{genCountSize: t.cntPerCall} for { e.Next(chk, t.newReset) @@ -1003,7 +1003,7 @@ func TestAppendRows(t *testing.T) { numRows := 10 chk := newChunk(8, 8, 0, 0, 40, 0) strFmt := "%d.12345" - for i := 0; i < numRows; i++ { + for i := range numRows { chk.AppendNull(0) chk.AppendInt64(1, int64(i)) str := fmt.Sprintf(strFmt, i) @@ -1018,11 +1018,11 @@ func TestAppendRows(t *testing.T) { chk2 := newChunk(8, 8, 0, 0, 40, 0) require.Equal(t, numCols, chk.NumCols()) rows := make([]Row, numRows) - for i := 0; i < numRows; i++ { + for i := range numRows { rows[i] = chk.GetRow(i) } chk2.AppendRows(rows) - for i := 0; i < numRows; i++ { + for i := range numRows { row := chk2.GetRow(i) require.Equal(t, int64(0), row.GetInt64(0)) require.True(t, row.IsNull(0)) @@ -1043,7 +1043,7 @@ func BenchmarkBatchAppendRows(b *testing.B) { b.ReportAllocs() numRows := 4096 rowChk := newChunk(8, 8, 0, 0) - for i := 0; i < numRows; i++ { + for range numRows { rowChk.AppendNull(0) rowChk.AppendInt64(1, 1) rowChk.AppendString(2, "abcd") @@ -1065,9 +1065,9 @@ func BenchmarkBatchAppendRows(b *testing.B) { } for _, conf := range testCaseConfs { b.Run(fmt.Sprintf("row-%d", conf.batchSize), func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { chk.Reset() - for j := 0; j < conf.batchSize; j++ { + for j := range conf.batchSize { chk.AppendRow(rowChk.GetRow(j)) } } @@ -1075,10 +1075,10 @@ func BenchmarkBatchAppendRows(b *testing.B) { b.ResetTimer() b.Run(fmt.Sprintf("column-%d", conf.batchSize), func(b *testing.B) { rows := make([]Row, conf.batchSize) - for i := 0; i < conf.batchSize; i++ { + for i := range conf.batchSize { rows[i] = rowChk.GetRow(i) } - for i := 0; i < b.N; i++ { + for range b.N { chk.Reset() chk.AppendRows(rows) } @@ -1090,7 +1090,7 @@ func BenchmarkAppendRows(b *testing.B) { b.ReportAllocs() rowChk := newChunk(8, 8, 0, 0) - for i := 0; i < 4096; i++ { + for range 4096 { rowChk.AppendNull(0) rowChk.AppendInt64(1, 1) rowChk.AppendString(2, "abcd") @@ -1113,16 +1113,16 @@ func BenchmarkAppendRows(b *testing.B) { for _, conf := range testCaseConfs { b.ResetTimer() b.Run(fmt.Sprintf("row-%d", conf.batchSize), func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { chk.Reset() - for j := 0; j < conf.batchSize; j++ { + for j := range conf.batchSize { chk.AppendRow(rowChk.GetRow(j)) } } }) b.ResetTimer() b.Run(fmt.Sprintf("column-%d", conf.batchSize), func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { chk.Reset() chk.Append(rowChk, 0, conf.batchSize) } @@ -1134,7 +1134,7 @@ func BenchmarkAppend(b *testing.B) { b.ReportAllocs() rowChk := newChunk(0, 0) - for i := 0; i < 4096; i++ { + for range 4096 { rowChk.AppendString(0, "abcd") rowChk.AppendBytes(1, []byte("abcd")) } @@ -1155,7 +1155,7 @@ func BenchmarkAppend(b *testing.B) { for _, conf := range testCaseConfs { b.ResetTimer() b.Run(fmt.Sprintf("column-%d", conf.batchSize), func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { chk.Reset() chk.Append(rowChk, 0, conf.batchSize) } diff --git a/pkg/util/chunk/chunk_util.go b/pkg/util/chunk/chunk_util.go index 05470aadcbf23..124a2c87c4076 100644 --- a/pkg/util/chunk/chunk_util.go +++ b/pkg/util/chunk/chunk_util.go @@ -49,7 +49,7 @@ func CopySelectedJoinRowsDirect(src *Chunk, selected []bool, dst *Chunk) (bool, for j, srcCol := range src.columns { dstCol := dst.columns[j] if srcCol.isFixed() { - for i := 0; i < len(selected); i++ { + for i := range selected { if !selected[i] { continue } @@ -61,7 +61,7 @@ func CopySelectedJoinRowsDirect(src *Chunk, selected []bool, dst *Chunk) (bool, dstCol.data = append(dstCol.data, srcCol.data[offset:offset+elemLen]...) } } else { - for i := 0; i < len(selected); i++ { + for i := range selected { if !selected[i] { continue } @@ -180,7 +180,7 @@ func copySameOuterRows(outerColOffset, outerColLen int, src *Chunk, numRows int, dstCol.data = append(dstCol.data, srcCol.data[start:end]...) offsets := dstCol.offsets elemLen := srcCol.offsets[row.idx+1] - srcCol.offsets[row.idx] - for j := 0; j < numRows; j++ { + for range numRows { offsets = append(offsets, offsets[len(offsets)-1]+elemLen) } dstCol.offsets = offsets diff --git a/pkg/util/chunk/chunk_util_test.go b/pkg/util/chunk/chunk_util_test.go index 32614ada18703..58a9e1594d237 100644 --- a/pkg/util/chunk/chunk_util_test.go +++ b/pkg/util/chunk/chunk_util_test.go @@ -29,7 +29,7 @@ func getChk(isLast3ColTheSame bool) (*Chunk, *Chunk, []bool) { srcChk := newChunkWithInitCap(numRows, 0, 0, 8, 8, sizeTime, 0) selected := make([]bool, numRows) var row Row - for j := 0; j < numRows; j++ { + for j := range numRows { if isLast3ColTheSame { if j%7 == 0 { row = MutRowFromValues("abc", "abcdefg", nil, 123, types.ZeroDatetime, "abcdefg").ToRow() @@ -55,7 +55,7 @@ func getChk(isLast3ColTheSame bool) (*Chunk, *Chunk, []bool) { func TestCopySelectedJoinRows(t *testing.T) { srcChk, dstChk, selected := getChk(true) numRows := srcChk.NumRows() - for i := 0; i < numRows; i++ { + for i := range numRows { if !selected[i] { continue } @@ -80,7 +80,7 @@ func TestCopySelectedJoinRows(t *testing.T) { func TestCopySelectedJoinRowsWithoutSameOuters(t *testing.T) { srcChk, dstChk, selected := getChk(false) numRows := srcChk.NumRows() - for i := 0; i < numRows; i++ { + for i := range numRows { if !selected[i] { continue } @@ -105,7 +105,7 @@ func TestCopySelectedJoinRowsWithoutSameOuters(t *testing.T) { func TestCopySelectedJoinRowsDirect(t *testing.T) { srcChk, dstChk, selected := getChk(false) numRows := srcChk.NumRows() - for i := 0; i < numRows; i++ { + for i := range numRows { if !selected[i] { continue } @@ -184,7 +184,7 @@ func BenchmarkCopySelectedJoinRows(b *testing.B) { b.ReportAllocs() srcChk, dstChk, selected := getChk(true) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { dstChk.Reset() _, err := CopySelectedJoinRowsWithSameOuterRows(srcChk, 0, 3, 3, 3, selected, dstChk) if err != nil { @@ -196,7 +196,7 @@ func BenchmarkCopySelectedJoinRowsDirect(b *testing.B) { b.ReportAllocs() srcChk, dstChk, selected := getChk(false) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { dstChk.Reset() _, err := CopySelectedJoinRowsDirect(srcChk, selected, dstChk) if err != nil { @@ -209,9 +209,9 @@ func BenchmarkAppendSelectedRow(b *testing.B) { srcChk, dstChk, selected := getChk(true) numRows := srcChk.NumRows() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { dstChk.Reset() - for j := 0; j < numRows; j++ { + for j := range numRows { if !selected[j] { continue } diff --git a/pkg/util/chunk/codec.go b/pkg/util/chunk/codec.go index b18c654eb4e2c..6e79b7ae958f0 100644 --- a/pkg/util/chunk/codec.go +++ b/pkg/util/chunk/codec.go @@ -99,7 +99,7 @@ func (c *Codec) Decode(buffer []byte) (*Chunk, []byte) { // DecodeToChunk decodes a Chunk from a byte slice, return the remained unused bytes. func (c *Codec) DecodeToChunk(buffer []byte, chk *Chunk) (remained []byte) { - for i := 0; i < len(chk.columns); i++ { + for i := range len(chk.columns) { buffer = c.decodeColumn(buffer, chk.columns[i], i) } return buffer @@ -225,7 +225,7 @@ func EstimateTypeWidth(colType *types.FieldType) int { } func init() { - for i := 0; i < 128; i++ { + for i := range 128 { allNotNullBitmap[i] = 0xFF } } @@ -262,7 +262,7 @@ func (c *Decoder) Decode(chk *Chunk) { if requiredRows > c.remainedRows { requiredRows = c.remainedRows } - for i := 0; i < chk.NumCols(); i++ { + for i := range chk.NumCols() { c.decodeColumn(chk, i, requiredRows) } c.remainedRows -= requiredRows @@ -332,7 +332,7 @@ func (c *Decoder) decodeColumn(chk *Chunk, ordinal int, requiredRows int) { // bitOffset indicates the number of valid bits in destCol.nullBitmap's last byte. bitOffset := destCol.length % 8 startIdx := (destCol.length - 1) >> 3 - for i := 0; i < numNullBitmapBytes; i++ { + for i := range numNullBitmapBytes { destCol.nullBitmap[startIdx+i] |= srcCol.nullBitmap[i] << bitOffset // The high order 8-bitOffset bits in `srcCol.nullBitmap[i]` should be appended to the low order of the next slot. if startIdx+i+1 < bitMapLen { diff --git a/pkg/util/chunk/codec_test.go b/pkg/util/chunk/codec_test.go index 904e8172f699e..7a978d43effad 100644 --- a/pkg/util/chunk/codec_test.go +++ b/pkg/util/chunk/codec_test.go @@ -36,7 +36,7 @@ func TestCodec(t *testing.T) { colTypes = append(colTypes, types.NewFieldType(mysql.TypeJSON)) oldChk := NewChunkWithCapacity(colTypes, numRows) - for i := 0; i < numRows; i++ { + for i := range numRows { str := fmt.Sprintf("%d.12345", i) oldChk.AppendNull(0) oldChk.AppendInt64(1, int64(i)) @@ -55,7 +55,7 @@ func TestCodec(t *testing.T) { require.Empty(t, remained) require.Equal(t, numCols, newChk.NumCols()) require.Equal(t, numRows, newChk.NumRows()) - for i := 0; i < numRows; i++ { + for i := range numRows { row := newChk.GetRow(i) str := fmt.Sprintf("%d.12345", i) require.True(t, row.IsNull(0)) @@ -100,7 +100,7 @@ func BenchmarkEncodeChunk(b *testing.B) { numRows := 1024 colTypes := make([]*types.FieldType, numCols) - for i := 0; i < numCols; i++ { + for i := range numCols { colTypes[i] = types.NewFieldType(mysql.TypeLonglong) } chk := NewChunkWithCapacity(colTypes, numRows) @@ -108,7 +108,7 @@ func BenchmarkEncodeChunk(b *testing.B) { codec := &Codec{} b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { codec.Encode(chk) } } @@ -118,7 +118,7 @@ func BenchmarkDecode(b *testing.B) { numRows := 1024 colTypes := make([]*types.FieldType, numCols) - for i := 0; i < numCols; i++ { + for i := range numCols { colTypes[i] = types.NewFieldType(mysql.TypeLonglong) } chk := NewChunkWithCapacity(colTypes, numRows) @@ -126,7 +126,7 @@ func BenchmarkDecode(b *testing.B) { buffer := codec.Encode(chk) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { codec.Decode(buffer) } } @@ -139,7 +139,7 @@ func BenchmarkDecodeToChunk(b *testing.B) { chk := &Chunk{ columns: make([]*Column, numCols), } - for i := 0; i < numCols; i++ { + for i := range numCols { chk.columns[i] = &Column{ length: numRows, nullBitmap: make([]byte, numRows/8+1), @@ -152,7 +152,7 @@ func BenchmarkDecodeToChunk(b *testing.B) { buffer := codec.Encode(chk) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { codec.DecodeToChunk(buffer, chk) } } @@ -170,7 +170,7 @@ func BenchmarkDecodeToChunkWithVariableType(b *testing.B) { colTypes = append(colTypes, types.NewFieldType(mysql.TypeJSON)) chk := NewChunkWithCapacity(colTypes, numRows) - for i := 0; i < numRows; i++ { + for i := range numRows { str := fmt.Sprintf("%d.12345", i) chk.AppendNull(0) chk.AppendInt64(1, int64(i)) @@ -185,7 +185,7 @@ func BenchmarkDecodeToChunkWithVariableType(b *testing.B) { chk.Reset() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { codec.DecodeToChunk(buffer, chk) } } diff --git a/pkg/util/chunk/column.go b/pkg/util/chunk/column.go index 3e0fc11aa240d..9732dca081170 100644 --- a/pkg/util/chunk/column.go +++ b/pkg/util/chunk/column.go @@ -256,12 +256,12 @@ func (c *Column) AppendCellNTimes(src *Column, pos, times int) { if c.isFixed() { elemLen := len(src.elemBuf) offset := pos * elemLen - for i := 0; i < times; i++ { + for range times { c.data = append(c.data, src.data[offset:offset+elemLen]...) } } else { start, end := src.offsets[pos], src.offsets[pos+1] - for i := 0; i < times; i++ { + for range times { c.data = append(c.data, src.data[start:end]...) c.offsets = append(c.offsets, int64(len(c.data))) } @@ -278,7 +278,7 @@ func (c *Column) appendMultiSameNullBitmap(notNull bool, num int) { if notNull { b = 0xff } - for i := 0; i < numNewBytes; i++ { + for range numNewBytes { c.nullBitmap = append(c.nullBitmap, b) } if !notNull { @@ -298,12 +298,12 @@ func (c *Column) appendMultiSameNullBitmap(notNull bool, num int) { func (c *Column) AppendNNulls(n int) { c.appendMultiSameNullBitmap(false, n) if c.isFixed() { - for i := 0; i < n; i++ { + for range n { c.data = append(c.data, c.elemBuf...) } } else { currentLength := c.offsets[c.length] - for i := 0; i < n; i++ { + for range n { c.offsets = append(c.offsets, currentLength) } } @@ -867,7 +867,7 @@ func (c *Column) MergeNulls(cols ...*Column) { // we can test if data in column are deeply copied. func (c *Column) DestroyDataForTest() { dataByteNum := len(c.data) - for i := 0; i < dataByteNum; i++ { + for i := range dataByteNum { c.data[i] = byte(rand.Intn(256)) } } diff --git a/pkg/util/chunk/column_test.go b/pkg/util/chunk/column_test.go index 0e1d3813637da..d136c25970fbe 100644 --- a/pkg/util/chunk/column_test.go +++ b/pkg/util/chunk/column_test.go @@ -28,7 +28,7 @@ import ( func TestColumnCopy(t *testing.T) { col := newFixedLenColumn(8, 10) - for i := 0; i < 10; i++ { + for i := range 10 { col.AppendInt64(int64(i)) } @@ -45,7 +45,7 @@ func TestColumnCopyReconstructFixedLen(t *testing.T) { results := make([]int64, 0, 1024) nulls := make([]bool, 0, 1024) sel := make([]int, 0, 1024) - for i := 0; i < 1024; i++ { + for i := range 1024 { if rand.Intn(10) < 6 { sel = append(sel, i) } @@ -76,7 +76,7 @@ func TestColumnCopyReconstructFixedLen(t *testing.T) { require.Equal(t, col.nullCount(), nullCnt) require.Len(t, sel, col.length) - for i := 0; i < 128; i++ { + for i := range 128 { if i%2 == 0 { col.AppendNull() } else { @@ -86,7 +86,7 @@ func TestColumnCopyReconstructFixedLen(t *testing.T) { require.Len(t, sel, col.length-128) require.Equal(t, nullCnt+128/2, col.nullCount()) - for i := 0; i < 128; i++ { + for i := range 128 { if i%2 == 0 { require.True(t, col.IsNull(len(sel)+i)) } else { @@ -101,7 +101,7 @@ func TestColumnCopyReconstructVarLen(t *testing.T) { results := make([]string, 0, 1024) nulls := make([]bool, 0, 1024) sel := make([]int, 0, 1024) - for i := 0; i < 1024; i++ { + for i := range 1024 { if rand.Intn(10) < 6 { sel = append(sel, i) } @@ -132,7 +132,7 @@ func TestColumnCopyReconstructVarLen(t *testing.T) { require.Equal(t, col.nullCount(), nullCnt) require.Len(t, sel, col.length) - for i := 0; i < 128; i++ { + for i := range 128 { if i%2 == 0 { col.AppendNull() } else { @@ -142,7 +142,7 @@ func TestColumnCopyReconstructVarLen(t *testing.T) { require.Len(t, sel, col.length-128) require.Equal(t, nullCnt+128/2, col.nullCount()) - for i := 0; i < 128; i++ { + for i := range 128 { if i%2 == 0 { require.True(t, col.IsNull(len(sel)+i)) } else { @@ -164,12 +164,12 @@ func TestLargeStringColumnOffset(t *testing.T) { func TestI64Column(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeLonglong)}, 1024) col := chk.Column(0) - for i := 0; i < 1024; i++ { + for i := range 1024 { col.AppendInt64(int64(i)) } i64s := col.Int64s() - for i := 0; i < 1024; i++ { + for i := range 1024 { require.Equal(t, int64(i), i64s[i]) i64s[i]++ } @@ -186,12 +186,12 @@ func TestI64Column(t *testing.T) { func TestF64Column(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeDouble)}, 1024) col := chk.Column(0) - for i := 0; i < 1024; i++ { + for i := range 1024 { col.AppendFloat64(float64(i)) } f64s := col.Float64s() - for i := 0; i < 1024; i++ { + for i := range 1024 { require.Equal(t, float64(i), f64s[i]) f64s[i] /= 2 } @@ -208,12 +208,12 @@ func TestF64Column(t *testing.T) { func TestF32Column(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeFloat)}, 1024) col := chk.Column(0) - for i := 0; i < 1024; i++ { + for i := range 1024 { col.AppendFloat32(float32(i)) } f32s := col.Float32s() - for i := 0; i < 1024; i++ { + for i := range 1024 { require.Equal(t, float32(i), f32s[i]) f32s[i] /= 2 } @@ -230,12 +230,12 @@ func TestF32Column(t *testing.T) { func TestDurationSliceColumn(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeDuration)}, 1024) col := chk.Column(0) - for i := 0; i < 1024; i++ { + for i := range 1024 { col.AppendDuration(types.Duration{Duration: time.Duration(i)}) } ds := col.GoDurations() - for i := 0; i < 1024; i++ { + for i := range 1024 { require.Equal(t, time.Duration(i), ds[i]) d := types.Duration{Duration: ds[i]} d, _ = d.Add(d) @@ -254,7 +254,7 @@ func TestDurationSliceColumn(t *testing.T) { func TestMyDecimal(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeNewDecimal)}, 1024) col := chk.Column(0) - for i := 0; i < 1024; i++ { + for i := range 1024 { d := new(types.MyDecimal) err := d.FromFloat64(float64(i) * 1.1) require.NoError(t, err) @@ -262,7 +262,7 @@ func TestMyDecimal(t *testing.T) { } ds := col.Decimals() - for i := 0; i < 1024; i++ { + for i := range 1024 { d := new(types.MyDecimal) err := d.FromFloat64(float64(i) * 1.1) require.NoError(t, err) @@ -294,7 +294,7 @@ func TestMyDecimal(t *testing.T) { func TestStringColumn(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeVarString)}, 1024) col := chk.Column(0) - for i := 0; i < 1024; i++ { + for i := range 1024 { col.AppendString(fmt.Sprintf("%v", i*i)) } @@ -310,7 +310,7 @@ func TestStringColumn(t *testing.T) { func TestSetColumn(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeSet)}, 1024) col := chk.Column(0) - for i := 0; i < 1024; i++ { + for i := range 1024 { col.AppendSet(types.Set{Name: fmt.Sprintf("%v", i), Value: uint64(i)}) } @@ -330,7 +330,7 @@ func TestSetColumn(t *testing.T) { func TestJSONColumn(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeJSON)}, 1024) col := chk.Column(0) - for i := 0; i < 1024; i++ { + for i := range 1024 { j := new(types.BinaryJSON) err := j.UnmarshalJSON([]byte(fmt.Sprintf(`{"%v":%v}`, i, i))) require.NoError(t, err) @@ -350,7 +350,7 @@ func TestJSONColumn(t *testing.T) { func TestTimeColumn(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeDatetime)}, 1024) col := chk.Column(0) - for i := 0; i < 1024; i++ { + for range 1024 { col.AppendTime(types.CurrentTime(mysql.TypeDatetime)) time.Sleep(time.Millisecond / 10) } @@ -371,7 +371,7 @@ func TestTimeColumn(t *testing.T) { func TestDurationColumn(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeDuration)}, 1024) col := chk.Column(0) - for i := 0; i < 1024; i++ { + for i := range 1024 { col.AppendDuration(types.Duration{Duration: time.Second * time.Duration(i)}) } @@ -388,7 +388,7 @@ func TestDurationColumn(t *testing.T) { func TestEnumColumn(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeEnum)}, 1024) col := chk.Column(0) - for i := 0; i < 1024; i++ { + for i := range 1024 { col.AppendEnum(types.Enum{Name: fmt.Sprintf("%v", i), Value: uint64(i)}) } @@ -408,7 +408,7 @@ func TestEnumColumn(t *testing.T) { func TestNullsColumn(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeLonglong)}, 1024) col := chk.Column(0) - for i := 0; i < 1024; i++ { + for i := range 1024 { if i%2 == 0 { col.AppendNull() continue @@ -434,7 +434,7 @@ func TestReconstructFixedLen(t *testing.T) { results := make([]int64, 0, 1024) nulls := make([]bool, 0, 1024) sel := make([]int, 0, 1024) - for i := 0; i < 1024; i++ { + for i := range 1024 { if rand.Intn(10) < 6 { sel = append(sel, i) } @@ -465,7 +465,7 @@ func TestReconstructFixedLen(t *testing.T) { require.Equal(t, col.nullCount(), nullCnt) require.Len(t, sel, col.length) - for i := 0; i < 128; i++ { + for i := range 128 { if i%2 == 0 { col.AppendNull() } else { @@ -475,7 +475,7 @@ func TestReconstructFixedLen(t *testing.T) { require.Len(t, sel, col.length-128) require.Equal(t, nullCnt+128/2, col.nullCount()) - for i := 0; i < 128; i++ { + for i := range 128 { if i%2 == 0 { require.True(t, col.IsNull(len(sel)+i)) } else { @@ -490,7 +490,7 @@ func TestReconstructVarLen(t *testing.T) { results := make([]string, 0, 1024) nulls := make([]bool, 0, 1024) sel := make([]int, 0, 1024) - for i := 0; i < 1024; i++ { + for i := range 1024 { if rand.Intn(10) < 6 { sel = append(sel, i) } @@ -521,7 +521,7 @@ func TestReconstructVarLen(t *testing.T) { require.Equal(t, col.nullCount(), nullCnt) require.Len(t, sel, col.length) - for i := 0; i < 128; i++ { + for i := range 128 { if i%2 == 0 { col.AppendNull() } else { @@ -531,7 +531,7 @@ func TestReconstructVarLen(t *testing.T) { require.Len(t, sel, col.length-128) require.Equal(t, nullCnt+128/2, col.nullCount()) - for i := 0; i < 128; i++ { + for i := range 128 { if i%2 == 0 { require.True(t, col.IsNull(len(sel)+i)) } else { @@ -546,7 +546,7 @@ func TestPreAllocInt64(t *testing.T) { col.ResizeInt64(256, true) i64s := col.Int64s() require.Equal(t, 256, len(i64s)) - for i := 0; i < 256; i++ { + for i := range 256 { require.True(t, col.IsNull(i)) } col.AppendInt64(2333) @@ -562,7 +562,7 @@ func TestPreAllocUint64(t *testing.T) { col.ResizeUint64(256, true) u64s := col.Uint64s() require.Equal(t, 256, len(u64s)) - for i := 0; i < 256; i++ { + for i := range 256 { require.True(t, col.IsNull(i)) } col.AppendUint64(2333) @@ -576,7 +576,7 @@ func TestPreAllocFloat32(t *testing.T) { col.ResizeFloat32(256, true) f32s := col.Float32s() require.Equal(t, 256, len(f32s)) - for i := 0; i < 256; i++ { + for i := range 256 { require.True(t, col.IsNull(i)) } col.AppendFloat32(2333) @@ -590,7 +590,7 @@ func TestPreAllocFloat64(t *testing.T) { col.ResizeFloat64(256, true) f64s := col.Float64s() require.Equal(t, 256, len(f64s)) - for i := 0; i < 256; i++ { + for i := range 256 { require.True(t, col.IsNull(i)) } col.AppendFloat64(2333) @@ -604,7 +604,7 @@ func TestPreAllocDecimal(t *testing.T) { col.ResizeDecimal(256, true) ds := col.Decimals() require.Equal(t, 256, len(ds)) - for i := 0; i < 256; i++ { + for i := range 256 { require.True(t, col.IsNull(i)) } col.AppendMyDecimal(new(types.MyDecimal)) @@ -617,7 +617,7 @@ func TestPreAllocTime(t *testing.T) { col.ResizeTime(256, true) ds := col.Times() require.Equal(t, 256, len(ds)) - for i := 0; i < 256; i++ { + for i := range 256 { require.True(t, col.IsNull(i)) } col.AppendTime(types.ZeroDatetime) @@ -631,7 +631,7 @@ func TestNull(t *testing.T) { require.Equal(t, 1024, col.nullCount()) notNulls := make(map[int]struct{}) - for i := 0; i < 512; i++ { + for range 512 { idx := rand.Intn(1024) notNulls[idx] = struct{}{} col.SetNull(idx, false) @@ -666,7 +666,7 @@ func TestSetNulls(t *testing.T) { require.Zero(t, col.nullCount()) nullMap := make(map[int]struct{}) - for i := 0; i < 100; i++ { + for range 100 { begin := rand.Intn(1024) l := rand.Intn(37) end := begin + l @@ -688,7 +688,7 @@ func TestSetNulls(t *testing.T) { func TestResizeReserve(t *testing.T) { cI64s := newFixedLenColumn(sizeInt64, 0) require.Zero(t, cI64s.length) - for i := 0; i < 100; i++ { + for range 100 { n := rand.Intn(1024) cI64s.ResizeInt64(n, true) require.Equal(t, n, cI64s.length) @@ -699,7 +699,7 @@ func TestResizeReserve(t *testing.T) { require.Zero(t, len(cI64s.Int64s())) cStrs := newVarLenColumn(0) - for i := 0; i < 100; i++ { + for range 100 { n := rand.Intn(1024) cStrs.ReserveString(n) require.Zero(t, cStrs.length) @@ -711,7 +711,7 @@ func TestResizeReserve(t *testing.T) { func TestGetRaw(t *testing.T) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeFloat)}, 1024) col := chk.Column(0) - for i := 0; i < 1024; i++ { + for i := range 1024 { col.AppendFloat32(float32(i)) } it := NewIterator4Chunk(chk) @@ -726,7 +726,7 @@ func TestGetRaw(t *testing.T) { chk = NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeVarString)}, 1024) col = chk.Column(0) - for i := 0; i < 1024; i++ { + for i := range 1024 { col.AppendString(fmt.Sprint(i)) } it = NewIterator4Chunk(chk) @@ -740,59 +740,59 @@ func TestGetRaw(t *testing.T) { func TestResize(t *testing.T) { col := NewColumn(types.NewFieldType(mysql.TypeLonglong), 1024) - for i := 0; i < 1024; i++ { + for i := range 1024 { col.AppendInt64(int64(i)) } col.ResizeInt64(1024, false) - for i := 0; i < 1024; i++ { + for i := range 1024 { require.Equal(t, int64(0), col.Int64s()[i]) } col = NewColumn(types.NewFieldType(mysql.TypeFloat), 1024) - for i := 0; i < 1024; i++ { + for i := range 1024 { col.AppendFloat32(float32(i)) } col.ResizeFloat32(1024, false) - for i := 0; i < 1024; i++ { + for i := range 1024 { require.Equal(t, float32(0), col.Float32s()[i]) } col = NewColumn(types.NewFieldType(mysql.TypeDouble), 1024) - for i := 0; i < 1024; i++ { + for i := range 1024 { col.AppendFloat64(float64(i)) } col.ResizeFloat64(1024, false) - for i := 0; i < 1024; i++ { + for i := range 1024 { require.Equal(t, float64(0), col.Float64s()[i]) } col = NewColumn(types.NewFieldType(mysql.TypeNewDecimal), 1024) - for i := 0; i < 1024; i++ { + for i := range 1024 { col.AppendMyDecimal(new(types.MyDecimal).FromInt(int64(i))) } col.ResizeDecimal(1024, false) - for i := 0; i < 1024; i++ { + for i := range 1024 { var d types.MyDecimal require.Equal(t, d, col.Decimals()[i]) } col = NewColumn(types.NewFieldType(mysql.TypeDuration), 1024) - for i := 0; i < 1024; i++ { + for i := range 1024 { col.AppendDuration(types.Duration{Duration: time.Duration(i), Fsp: i}) } col.ResizeGoDuration(1024, false) - for i := 0; i < 1024; i++ { + for i := range 1024 { require.Equal(t, time.Duration(0), col.GoDurations()[i]) } col = NewColumn(types.NewFieldType(mysql.TypeDatetime), 1024) - for i := 0; i < 1024; i++ { + for range 1024 { gt := types.FromDate(rand.Intn(2200), rand.Intn(10)+1, rand.Intn(20)+1, rand.Intn(12), rand.Intn(60), rand.Intn(60), rand.Intn(1000000)) t := types.NewTime(gt, 0, 0) col.AppendTime(t) } col.ResizeTime(1024, false) - for i := 0; i < 1024; i++ { + for i := range 1024 { var time types.Time require.Equal(t, time, col.Times()[i]) } @@ -801,7 +801,7 @@ func TestResize(t *testing.T) { func BenchmarkDurationRow(b *testing.B) { chk1 := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeDuration)}, 1024) col1 := chk1.Column(0) - for i := 0; i < 1024; i++ { + for i := range 1024 { col1.AppendDuration(types.Duration{Duration: time.Second * time.Duration(i)}) } chk2 := chk1.CopyConstruct() @@ -827,7 +827,7 @@ func BenchmarkDurationRow(b *testing.B) { func BenchmarkDurationVec(b *testing.B) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeDuration)}, 1024) col1 := chk.Column(0) - for i := 0; i < 1024; i++ { + for i := range 1024 { col1.AppendDuration(types.Duration{Duration: time.Second * time.Duration(i)}) } col2 := col1.CopyConstruct(nil) @@ -840,7 +840,7 @@ func BenchmarkDurationVec(b *testing.B) { b.ResetTimer() for k := 0; k < b.N; k++ { result.ResizeGoDuration(1024, true) - for i := 0; i < 1024; i++ { + for i := range 1024 { d1 := types.Duration{Duration: ds1[i]} d2 := types.Duration{Duration: ds2[i]} r, err := d1.Add(d2) @@ -855,7 +855,7 @@ func BenchmarkDurationVec(b *testing.B) { func BenchmarkTimeRow(b *testing.B) { chk1 := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeDate)}, 1024) col1 := chk1.Column(0) - for i := 0; i < 1024; i++ { + for range 1024 { col1.AppendTime(types.ZeroDate) } chk2 := chk1.CopyConstruct() @@ -881,7 +881,7 @@ func BenchmarkTimeRow(b *testing.B) { func BenchmarkTimeVec(b *testing.B) { chk := NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeDate)}, 1024) col1 := chk.Column(0) - for i := 0; i < 1024; i++ { + for range 1024 { col1.AppendTime(types.ZeroDate) } col2 := col1.CopyConstruct(nil) @@ -894,7 +894,7 @@ func BenchmarkTimeVec(b *testing.B) { b.ResetTimer() for k := 0; k < b.N; k++ { result.ResizeTime(1024, true) - for i := 0; i < 1024; i++ { + for i := range 1024 { if r := ds1[i].Compare(ds2[i]); r > 0 { rs[i] = ds1[i] } else { @@ -909,7 +909,7 @@ func genNullCols(n int) []*Column { for i := range cols { cols[i] = NewColumn(types.NewFieldType(mysql.TypeLonglong), 1024) cols[i].ResizeInt64(1024, false) - for j := 0; j < 1024; j++ { + for j := range 1024 { if rand.Intn(10) < 5 { cols[i].SetNull(j, true) } @@ -919,18 +919,18 @@ func genNullCols(n int) []*Column { } func TestVectorizedNulls(t *testing.T) { - for i := 0; i < 256; i++ { + for range 256 { cols := genNullCols(4) lCol, rCol := cols[0], cols[1] vecResult, rowResult := cols[2], cols[3] vecResult.SetNulls(0, 1024, false) rowResult.SetNulls(0, 1024, false) vecResult.MergeNulls(lCol, rCol) - for i := 0; i < 1024; i++ { + for i := range 1024 { rowResult.SetNull(i, lCol.IsNull(i) || rCol.IsNull(i)) } - for i := 0; i < 1024; i++ { + for i := range 1024 { require.Equal(t, vecResult.IsNull(i), rowResult.IsNull(i)) } } @@ -956,7 +956,7 @@ func TestResetColumn(t *testing.T) { func BenchmarkMergeNullsVectorized(b *testing.B) { cols := genNullCols(3) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { cols[0].MergeNulls(cols[1:]...) } } @@ -964,8 +964,8 @@ func BenchmarkMergeNullsVectorized(b *testing.B) { func BenchmarkMergeNullsNonVectorized(b *testing.B) { cols := genNullCols(3) b.ResetTimer() - for i := 0; i < b.N; i++ { - for i := 0; i < 1024; i++ { + for range b.N { + for i := range 1024 { cols[0].SetNull(i, cols[1].IsNull(i) || cols[2].IsNull(i)) } } diff --git a/pkg/util/chunk/iterator.go b/pkg/util/chunk/iterator.go index 1ec82e960ce61..9b1a216ae66de 100644 --- a/pkg/util/chunk/iterator.go +++ b/pkg/util/chunk/iterator.go @@ -413,7 +413,7 @@ type multiIterator struct { // NewMultiIterator creates a new multiIterator func NewMultiIterator(iters ...Iterator) Iterator { iter := &multiIterator{} - for i := 0; i < len(iters); i++ { + for i := range iters { if iters[i].Len() > 0 { iter.iters = append(iter.iters, iters[i]) iter.length += iters[i].Len() diff --git a/pkg/util/chunk/iterator_test.go b/pkg/util/chunk/iterator_test.go index 18a1cdb5412f0..c4573d6c845e8 100644 --- a/pkg/util/chunk/iterator_test.go +++ b/pkg/util/chunk/iterator_test.go @@ -26,7 +26,7 @@ func TestIteratorOnSel(t *testing.T) { fields := []*types.FieldType{types.NewFieldType(mysql.TypeLonglong)} chk := New(fields, 32, 1024) sel := make([]int, 0, 1024) - for i := 0; i < 1024; i++ { + for i := range 1024 { chk.AppendInt64(0, int64(i)) if i%2 == 0 { sel = append(sel, i) @@ -59,8 +59,8 @@ func TestMultiIterator(t *testing.T) { fields := []*types.FieldType{types.NewFieldType(mysql.TypeLonglong)} chk := New(fields, 32, 1024) n := 10 - var expected []int64 - for i := 0; i < n; i++ { + expected := make([]int64, 0, n) + for i := range n { chk.AppendInt64(0, int64(i)) expected = append(expected, int64(i)) } @@ -96,17 +96,17 @@ func TestIterator(t *testing.T) { fields := []*types.FieldType{types.NewFieldType(mysql.TypeLonglong)} chk := New(fields, 32, 1024) n := 10 - var expected []int64 - for i := 0; i < n; i++ { + expected := make([]int64, 0, n) + for i := range n { chk.AppendInt64(0, int64(i)) expected = append(expected, int64(i)) } - var rows []Row + var rows = make([]Row, 0, n) li := NewList(fields, 1, 2) li2 := NewList(fields, 8, 16) - var ptrs []RowPtr - var ptrs2 []RowPtr - for i := 0; i < n; i++ { + var ptrs = make([]RowPtr, 0, n) + var ptrs2 = make([]RowPtr, 0, n) + for i := range n { rows = append(rows, chk.GetRow(i)) ptr := li.AppendRow(chk.GetRow(i)) ptrs = append(ptrs, ptr) @@ -118,7 +118,7 @@ func TestIterator(t *testing.T) { it = NewIterator4Slice(rows) checkEqual(it, expected, t) it.Begin() - for i := 0; i < 5; i++ { + for i := range 5 { require.Equal(t, rows[i], it.Current()) it.Next() } @@ -129,7 +129,7 @@ func TestIterator(t *testing.T) { it = NewIterator4Chunk(chk) checkEqual(it, expected, t) it.Begin() - for i := 0; i < 5; i++ { + for i := range 5 { require.Equal(t, chk.GetRow(i), it.Current()) it.Next() } @@ -140,7 +140,7 @@ func TestIterator(t *testing.T) { it = NewIterator4List(li) checkEqual(it, expected, t) it.Begin() - for i := 0; i < 5; i++ { + for i := range 5 { require.Equal(t, li.GetRow(ptrs[i]), it.Current()) it.Next() } @@ -151,7 +151,7 @@ func TestIterator(t *testing.T) { it = NewIterator4RowPtr(li, ptrs) checkEqual(it, expected, t) it.Begin() - for i := 0; i < 5; i++ { + for i := range 5 { require.Equal(t, li.GetRow(ptrs[i]), it.Current()) it.Next() } @@ -162,7 +162,7 @@ func TestIterator(t *testing.T) { it = NewIterator4RowPtr(li2, ptrs2) checkEqual(it, expected, t) it.Begin() - for i := 0; i < 5; i++ { + for i := range 5 { require.Equal(t, li2.GetRow(ptrs2[i]), it.Current()) it.Next() } @@ -175,7 +175,7 @@ func TestIterator(t *testing.T) { it = NewIterator4RowContainer(rc) checkEqual(it, expected, t) it.Begin() - for i := 0; i < 5; i++ { + for i := range 5 { require.Equal(t, li.GetRow(ptrs[i]), it.Current()) it.Next() } diff --git a/pkg/util/chunk/list.go b/pkg/util/chunk/list.go index 70b7595e9bed7..998abae56f9bf 100644 --- a/pkg/util/chunk/list.go +++ b/pkg/util/chunk/list.go @@ -176,9 +176,9 @@ type ListWalkFunc = func(row Row) error // Walk iterate the list and call walkFunc for each row. func (l *List) Walk(walkFunc ListWalkFunc) error { - for i := 0; i < len(l.chunks); i++ { + for i := range l.chunks { chk := l.chunks[i] - for j := 0; j < chk.NumRows(); j++ { + for j := range chk.NumRows() { err := walkFunc(chk.GetRow(j)) if err != nil { return errors.Trace(err) diff --git a/pkg/util/chunk/list_test.go b/pkg/util/chunk/list_test.go index 4ec1b777e41b2..d8ea395557d15 100644 --- a/pkg/util/chunk/list_test.go +++ b/pkg/util/chunk/list_test.go @@ -35,7 +35,7 @@ func TestList(t *testing.T) { srcRow := srcChunk.GetRow(0) // Test basic append. - for i := 0; i < 5; i++ { + for range 5 { l.AppendRow(srcRow) } require.Equal(t, 3, l.NumChunks()) @@ -46,7 +46,7 @@ func TestList(t *testing.T) { l.Reset() require.Len(t, l.freelist, 3) - for i := 0; i < 5; i++ { + for range 5 { l.AppendRow(srcRow) } require.Empty(t, l.freelist) @@ -65,7 +65,7 @@ func TestList(t *testing.T) { // Test iteration. l.Reset() - for i := 0; i < 5; i++ { + for i := range 5 { tmp := NewChunkWithCapacity(fields, 32) tmp.AppendInt64(0, int64(i)) l.AppendRow(tmp.GetRow(0)) @@ -133,11 +133,11 @@ func BenchmarkListMemoryUsage(b *testing.B) { initCap := 50 list := NewList(fieldTypes, 2, 8) - for i := 0; i < initCap; i++ { + for range initCap { list.AppendRow(row) } b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { list.GetMemTracker().BytesConsumed() } } @@ -149,7 +149,7 @@ func BenchmarkListAdd(b *testing.B) { l := NewList(fields, numRow, numRow) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { l.Add(chk) } } @@ -163,7 +163,7 @@ func BenchmarkListGetRow(b *testing.B) { } rnd := rand.New(rand.NewSource(0)) ptrs := make([]RowPtr, 0, b.N) - for i := 0; i < min(b.N, 10000); i++ { + for range min(b.N, 10000) { ptrs = append(ptrs, RowPtr{ ChkIdx: rnd.Uint32() % uint32(numChk), RowIdx: rnd.Uint32() % uint32(numRow), @@ -173,7 +173,7 @@ func BenchmarkListGetRow(b *testing.B) { ptrs = append(ptrs, ptrs[i%10000]) } b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { l.GetRow(ptrs[i]) } } diff --git a/pkg/util/chunk/mutrow_test.go b/pkg/util/chunk/mutrow_test.go index c349a2b812166..48c2c1feaa239 100644 --- a/pkg/util/chunk/mutrow_test.go +++ b/pkg/util/chunk/mutrow_test.go @@ -29,7 +29,7 @@ func TestMutRow(t *testing.T) { mutRow := MutRowFromTypes(allTypes) row := mutRow.ToRow() typeCtx := types.DefaultStmtNoWarningContext - for i := 0; i < row.Len(); i++ { + for i := range row.Len() { val := zeroValForType(allTypes[i]) d := row.GetDatum(i, allTypes[i]) d2 := types.NewDatum(val) @@ -117,7 +117,7 @@ func BenchmarkMutRowSetRow(b *testing.B) { rowChk.AppendString(1, "abcd") row := rowChk.GetRow(0) mutRow := MutRowFromValues(1, "abcd") - for i := 0; i < b.N; i++ { + for range b.N { mutRow.SetRow(row) } } @@ -126,7 +126,7 @@ func BenchmarkMutRowSetDatums(b *testing.B) { b.ReportAllocs() mutRow := MutRowFromValues(1, "abcd") datums := []types.Datum{types.NewDatum(1), types.NewDatum("abcd")} - for i := 0; i < b.N; i++ { + for range b.N { mutRow.SetDatums(datums...) } } @@ -134,7 +134,7 @@ func BenchmarkMutRowSetDatums(b *testing.B) { func BenchmarkMutRowSetValues(b *testing.B) { b.ReportAllocs() mutRow := MutRowFromValues(1, "abcd") - for i := 0; i < b.N; i++ { + for range b.N { mutRow.SetValues(1, "abcd") } } @@ -145,7 +145,7 @@ func BenchmarkMutRowFromTypes(b *testing.B) { types.NewFieldType(mysql.TypeLonglong), types.NewFieldType(mysql.TypeVarchar), } - for i := 0; i < b.N; i++ { + for range b.N { MutRowFromTypes(tps) } } @@ -153,7 +153,7 @@ func BenchmarkMutRowFromTypes(b *testing.B) { func BenchmarkMutRowFromDatums(b *testing.B) { b.ReportAllocs() datums := []types.Datum{types.NewDatum(1), types.NewDatum("abc")} - for i := 0; i < b.N; i++ { + for range b.N { MutRowFromDatums(datums) } } @@ -161,7 +161,7 @@ func BenchmarkMutRowFromDatums(b *testing.B) { func BenchmarkMutRowFromValues(b *testing.B) { b.ReportAllocs() values := []any{1, "abc"} - for i := 0; i < b.N; i++ { + for range b.N { MutRowFromValues(values) } } @@ -207,8 +207,8 @@ func BenchmarkMutRowShallowCopyPartialRow(b *testing.B) { mutRow := MutRowFromTypes(colTypes) row := MutRowFromValues("abc", "abcdefg", 123, 456, types.ZeroDatetime).ToRow() b.ResetTimer() - for i := 0; i < b.N; i++ { - for j := 0; j < rowsNum; j++ { + for range b.N { + for range rowsNum { mutRow.ShallowCopyPartialRow(0, row) } } @@ -219,9 +219,9 @@ func BenchmarkChunkAppendPartialRow(b *testing.B) { chk := newChunkWithInitCap(rowsNum, 0, 0, 8, 8, sizeTime) row := MutRowFromValues("abc", "abcdefg", 123, 456, types.ZeroDatetime).ToRow() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { chk.Reset() - for j := 0; j < rowsNum; j++ { + for range rowsNum { chk.AppendPartialRow(0, row) } } diff --git a/pkg/util/chunk/row.go b/pkg/util/chunk/row.go index d3fd285f1936f..325109c91d3e6 100644 --- a/pkg/util/chunk/row.go +++ b/pkg/util/chunk/row.go @@ -130,7 +130,7 @@ func (r Row) GetDatumRow(fields []*types.FieldType) []types.Datum { // GetDatumRowWithBuffer gets datum using the buffer datumRow. func (r Row) GetDatumRowWithBuffer(fields []*types.FieldType, datumRow []types.Datum) []types.Datum { - for colIdx := 0; colIdx < len(datumRow); colIdx++ { + for colIdx := range datumRow { r.DatumWithBuffer(colIdx, fields[colIdx], &datumRow[colIdx]) } return datumRow @@ -241,7 +241,7 @@ func (r Row) CopyConstruct() Row { // ToString returns all the values in a row. func (r Row) ToString(ft []*types.FieldType) string { var buf []byte - for colIdx := 0; colIdx < r.Chunk().NumCols(); colIdx++ { + for colIdx := range r.Chunk().NumCols() { if r.IsNull(colIdx) { buf = append(buf, "NULL"...) } else { diff --git a/pkg/util/chunk/row_container.go b/pkg/util/chunk/row_container.go index 9b443569fb05c..eb5bc6f351f6c 100644 --- a/pkg/util/chunk/row_container.go +++ b/pkg/util/chunk/row_container.go @@ -173,7 +173,7 @@ func (c *RowContainer) spillToDisk(preSpillError error) { c.m.records.spillError = preSpillError return } - for i := 0; i < n; i++ { + for i := range n { chk := c.m.records.inMemory.GetChunk(i) err = c.m.records.inDisk.Add(chk) if err != nil { @@ -588,13 +588,13 @@ func (c *SortedRowContainer) Sort() (ret error) { return } c.ptrM.rowPtrs = make([]RowPtr, 0, c.NumRow()) // The memory usage has been tracked in SortedRowContainer.Add() function - for chkIdx := 0; chkIdx < c.NumChunks(); chkIdx++ { + for chkIdx := range c.NumChunks() { rowChk, err := c.GetChunk(chkIdx) // err must be nil, because the chunk is in memory. if err != nil { panic(err) } - for rowIdx := 0; rowIdx < rowChk.NumRows(); rowIdx++ { + for rowIdx := range rowChk.NumRows() { c.ptrM.rowPtrs = append(c.ptrM.rowPtrs, RowPtr{ChkIdx: uint32(chkIdx), RowIdx: uint32(rowIdx)}) } } diff --git a/pkg/util/chunk/row_container_reader.go b/pkg/util/chunk/row_container_reader.go index ca124083079c5..3b2648d9634a0 100644 --- a/pkg/util/chunk/row_container_reader.go +++ b/pkg/util/chunk/row_container_reader.go @@ -122,7 +122,7 @@ func (reader *rowContainerReader) startWorker() { defer close(reader.rowCh) defer reader.wg.Done() - for chkIdx := 0; chkIdx < reader.rc.NumChunks(); chkIdx++ { + for chkIdx := range reader.rc.NumChunks() { chk, err := reader.rc.GetChunk(chkIdx) failpoint.Inject("get-chunk-error", func(val failpoint.Value) { if val.(bool) { @@ -134,7 +134,7 @@ func (reader *rowContainerReader) startWorker() { return } - for i := 0; i < chk.NumRows(); i++ { + for i := range chk.NumRows() { select { case reader.rowCh <- chk.GetRow(i): case <-reader.ctx.Done(): diff --git a/pkg/util/chunk/row_container_test.go b/pkg/util/chunk/row_container_test.go index dd1e1cc1a57a0..904ede140b678 100644 --- a/pkg/util/chunk/row_container_test.go +++ b/pkg/util/chunk/row_container_test.go @@ -46,7 +46,7 @@ func TestSel(t *testing.T) { n := 64 chk := NewChunkWithCapacity(fields, sz) numRows := 0 - for i := 0; i < n-sz; i++ { + for i := range n - sz { chk.AppendInt64(0, int64(i)) if chk.NumRows() == sz { chk.SetSel([]int{0, 2}) @@ -93,7 +93,7 @@ func TestSpillAction(t *testing.T) { rc := NewRowContainer(fields, sz) chk := NewChunkWithCapacity(fields, sz) - for i := 0; i < sz; i++ { + for i := range sz { chk.AppendInt64(0, int64(i)) } var tracker *memory.Tracker @@ -118,7 +118,7 @@ func TestSpillAction(t *testing.T) { resChk, err := rc.GetChunk(0) require.NoError(t, err) require.Equal(t, chk.NumRows(), resChk.NumRows()) - for rowIdx := 0; rowIdx < resChk.NumRows(); rowIdx++ { + for rowIdx := range resChk.NumRows() { require.Equal(t, chk.GetRow(rowIdx).GetDatumRow(fields), resChk.GetRow(rowIdx).GetDatumRow(fields)) } // Write again @@ -131,7 +131,7 @@ func TestSpillAction(t *testing.T) { resChk, err = rc.GetChunk(2) require.NoError(t, err) require.Equal(t, chk.NumRows(), resChk.NumRows()) - for rowIdx := 0; rowIdx < resChk.NumRows(); rowIdx++ { + for rowIdx := range resChk.NumRows() { require.Equal(t, chk.GetRow(rowIdx).GetDatumRow(fields), resChk.GetRow(rowIdx).GetDatumRow(fields)) } @@ -155,7 +155,7 @@ func TestSortedRowContainerSortSpillAction(t *testing.T) { rc := NewSortedRowContainer(fields, sz, byItemsDesc, keyColumns, keyCmpFuncs) chk := NewChunkWithCapacity(fields, sz) - for i := 0; i < sz; i++ { + for i := range sz { chk.AppendInt64(0, int64(i)) } var tracker *memory.Tracker @@ -176,7 +176,7 @@ func TestSortedRowContainerSortSpillAction(t *testing.T) { require.NoError(t, err) require.True(t, rc.AlreadySpilledSafeForTest()) // The result has been sorted. - for i := 0; i < sz*2; i++ { + for i := range sz * 2 { row, err := rc.GetSortedRow(i) require.NoError(t, err) require.Equal(t, int64(i/2), row.GetInt64(0)) @@ -195,7 +195,7 @@ func TestRowContainerResetAndAction(t *testing.T) { rc := NewRowContainer(fields, sz) chk := NewChunkWithCapacity(fields, sz) - for i := 0; i < sz; i++ { + for i := range sz { chk.AppendInt64(0, int64(i)) } var tracker *memory.Tracker @@ -240,7 +240,7 @@ func TestSpillActionDeadLock(t *testing.T) { rc := NewRowContainer(fields, sz) chk := NewChunkWithCapacity(fields, sz) - for i := 0; i < sz; i++ { + for i := range sz { chk.AppendInt64(0, int64(i)) } var tracker *memory.Tracker @@ -266,7 +266,7 @@ func TestActionBlocked(t *testing.T) { rc := NewRowContainer(fields, sz) chk := NewChunkWithCapacity(fields, sz) - for i := 0; i < sz; i++ { + for i := range sz { chk.AppendInt64(0, int64(i)) } var tracker *memory.Tracker @@ -276,7 +276,7 @@ func TestActionBlocked(t *testing.T) { tracker.SetBytesLimit(1450) ac := rc.ActionSpill() tracker.FallbackOldAndSetNewAction(ac) - for i := 0; i < 10; i++ { + for range 10 { err = rc.Add(chk) require.NoError(t, err) } @@ -317,10 +317,10 @@ func insertBytesRowsIntoRowContainer(t *testing.T, chkCount int, rowPerChk int) allRows := [][]byte{} // insert chunks - for i := 0; i < chkCount; i++ { + for range chkCount { chk := NewChunkWithCapacity(fields, rowPerChk) // insert rows for each chunk - for j := 0; j < rowPerChk; j++ { + for range rowPerChk { length := rand2.Uint32() randomBytes := make([]byte, length%4096) _, err := rand.Read(randomBytes) @@ -347,8 +347,8 @@ func TestRowContainerReaderInDisk(t *testing.T) { reader := NewRowContainerReader(rc) defer reader.Close() - for i := 0; i < 16; i++ { - for j := 0; j < 16; j++ { + for i := range 16 { + for j := range 16 { row := reader.Current() require.Equal(t, allRows[i*16+j], row.GetBytes(0)) reader.Next() @@ -369,14 +369,14 @@ func TestCloseRowContainerReader(t *testing.T) { // read 8.5 of these chunks reader := NewRowContainerReader(rc) defer reader.Close() - for i := 0; i < 8; i++ { - for j := 0; j < 16; j++ { + for i := range 8 { + for j := range 16 { row := reader.Current() require.Equal(t, allRows[i*16+j], row.GetBytes(0)) reader.Next() } } - for j := 0; j < 8; j++ { + for j := range 8 { row := reader.Current() require.Equal(t, allRows[8*16+j], row.GetBytes(0)) reader.Next() @@ -400,8 +400,8 @@ func TestConcurrentSpillWithRowContainerReader(t *testing.T) { reader := NewRowContainerReader(rc) defer reader.Close() - for i := 0; i < 16; i++ { - for j := 0; j < 1024; j++ { + for i := range 16 { + for j := range 1024 { row := reader.Current() require.Equal(t, allRows[i*1024+j], row.GetBytes(0)) reader.Next() @@ -423,8 +423,8 @@ func TestReadAfterSpillWithRowContainerReader(t *testing.T) { reader := NewRowContainerReader(rc) defer reader.Close() - for i := 0; i < 8; i++ { - for j := 0; j < 1024; j++ { + for i := range 8 { + for j := range 1024 { row := reader.Current() require.Equal(t, allRows[i*1024+j], row.GetBytes(0)) reader.Next() @@ -432,7 +432,7 @@ func TestReadAfterSpillWithRowContainerReader(t *testing.T) { } rc.SpillToDisk() for i := 8; i < 16; i++ { - for j := 0; j < 1024; j++ { + for j := range 1024 { row := reader.Current() require.Equal(t, allRows[i*1024+j], row.GetBytes(0)) reader.Next() @@ -444,7 +444,7 @@ func TestPanicWhenSpillToDisk(t *testing.T) { fields := []*types.FieldType{types.NewFieldType(mysql.TypeLonglong)} sz := 20 chk := NewChunkWithCapacity(fields, sz) - for i := 0; i < sz; i++ { + for i := range sz { chk.AppendInt64(0, int64(i)) } @@ -480,7 +480,7 @@ func TestPanicDuringSortedRowContainerSpill(t *testing.T) { rc := NewSortedRowContainer(fields, sz, byItemsDesc, keyColumns, keyCmpFuncs) chk := NewChunkWithCapacity(fields, sz) - for i := 0; i < sz; i++ { + for i := range sz { chk.AppendInt64(0, int64(i)) } var tracker *memory.Tracker @@ -524,14 +524,14 @@ func TestInterruptedDuringSpilling(t *testing.T) { rc.GetMemTracker().AttachTo(rootTracker) defer rc.Close() chk := NewChunkWithCapacity(fields, sz) - for i := 0; i < sz; i++ { + for i := range sz { chk.AppendInt64(0, int64(i)) chk.AppendInt64(1, int64(i)) chk.AppendInt64(2, int64(i)) chk.AppendString(3, "testtesttest") chk.AppendInt64(4, int64(i)) } - for i := 0; i < 102400; i++ { + for range 102400 { rc.Add(chk) } wg := sync.WaitGroup{} @@ -582,9 +582,9 @@ func benchmarkRowContainerReaderInDiskWithRowLength(b *testing.B, rowLength int) rc.SpillToDisk() // insert `b.N * 1<<10` rows (`b.N` chunks) into the rc - for i := 0; i < b.N; i++ { + for range b.N { chk := NewChunkWithCapacity(fields, 1<<10) - for j := 0; j < 1<<10; j++ { + for range 1 << 10 { chk.AppendBytes(0, randomBytes) } @@ -595,7 +595,7 @@ func benchmarkRowContainerReaderInDiskWithRowLength(b *testing.B, rowLength int) defer reader.Close() b.StartTimer() for n := 0; n < b.N; n++ { - for i := 0; i < 1<<10; i++ { + for range 1 << 10 { reader.Next() } } diff --git a/pkg/util/chunk/row_in_disk.go b/pkg/util/chunk/row_in_disk.go index 77c378eacc790..2fb0415084ca4 100644 --- a/pkg/util/chunk/row_in_disk.go +++ b/pkg/util/chunk/row_in_disk.go @@ -132,7 +132,7 @@ func (l *DataInDiskByRows) GetChunk(chkIdx int) (*Chunk, error) { // for longer rows. r := bufio.NewReader(l.dataFile.getSectionReader(firstRowOffset)) format := rowInDisk{numCol: len(l.fieldTypes)} - for rowIdx := 0; rowIdx < chkSize; rowIdx++ { + for range chkSize { _, err = format.ReadFrom(r) if err != nil { formatChErr = err @@ -246,7 +246,7 @@ func (chk *chunkInDisk) WriteTo(w io.Writer) (written int64, err error) { numRows := chk.NumRows() chk.offsetsOfRows = make([]int64, 0, numRows) var format *diskFormatRow - for rowIdx := 0; rowIdx < numRows; rowIdx++ { + for rowIdx := range numRows { format = convertFromRow(chk.GetRow(rowIdx), format) chk.offsetsOfRows = append(chk.offsetsOfRows, chk.offWrite+written) @@ -339,7 +339,7 @@ func convertFromRow(row Row, reuse *diskFormatRow) (format *diskFormatRow) { cells: make([][]byte, 0, numCols), } } - for colIdx := 0; colIdx < numCols; colIdx++ { + for colIdx := range numCols { if row.IsNull(colIdx) { format.sizesOfColumns = append(format.sizesOfColumns, -1) } else { diff --git a/pkg/util/chunk/row_in_disk_test.go b/pkg/util/chunk/row_in_disk_test.go index c463beb03b63d..683b5fce68622 100644 --- a/pkg/util/chunk/row_in_disk_test.go +++ b/pkg/util/chunk/row_in_disk_test.go @@ -36,7 +36,7 @@ import ( func genString() string { retStr := "西xi瓜gua" factor := rand.Intn(5) - for i := 0; i < factor; i++ { + for range factor { retStr += retStr } @@ -53,9 +53,9 @@ func initChunks(numChk, numRow int) ([]*Chunk, []*types.FieldType) { } chks := make([]*Chunk, 0, numChk) - for chkIdx := 0; chkIdx < numChk; chkIdx++ { + for chkIdx := range numChk { chk := NewChunkWithCapacity(fields, numRow) - for rowIdx := 0; rowIdx < numRow; rowIdx++ { + for rowIdx := range numRow { data := int64(chkIdx*numRow + rowIdx) chk.AppendString(0, genString()) chk.AppendNull(1) @@ -91,8 +91,8 @@ func TestDataInDiskByRows(t *testing.T) { assert.Equal(t, numChk, l.NumChunks()) assert.Greater(t, l.GetDiskTracker().BytesConsumed(), int64(0)) - for chkIdx := 0; chkIdx < numChk; chkIdx++ { - for rowIdx := 0; rowIdx < numRow; rowIdx++ { + for chkIdx := range numChk { + for rowIdx := range numRow { row, err := l.GetRow(RowPtr{ChkIdx: uint32(chkIdx), RowIdx: uint32(rowIdx)}) assert.NoError(t, err) assert.Equal(t, chks[chkIdx].GetRow(rowIdx).GetDatumRow(fields), row.GetDatumRow(fields)) @@ -108,7 +108,7 @@ func BenchmarkDataInDiskByRowsAdd(b *testing.B) { defer l.Close() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { err := l.Add(chk) if err != nil { b.Fatal(err) @@ -129,7 +129,7 @@ func BenchmarkDataInDiskByRowsGetRow(b *testing.B) { } rnd := rand.New(rand.NewSource(0)) ptrs := make([]RowPtr, 0, b.N) - for i := 0; i < min(b.N, 10000); i++ { + for range min(b.N, 10000) { ptrs = append(ptrs, RowPtr{ ChkIdx: rnd.Uint32() % uint32(numChk), RowIdx: rnd.Uint32() % uint32(numRow), @@ -139,7 +139,7 @@ func BenchmarkDataInDiskByRowsGetRow(b *testing.B) { ptrs = append(ptrs, ptrs[i%10000]) } b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { _, err := l.GetRow(ptrs[i]) if err != nil { b.Fatal(err) @@ -238,8 +238,8 @@ func testDataInDiskByRows(t *testing.T, concurrency int) { } var ptrs []RowPtr - for i := 0; i < numChk; i++ { - for j := 0; j < numRow; j++ { + for i := range numChk { + for j := range numRow { ptrs = append(ptrs, RowPtr{ ChkIdx: uint32(i), RowIdx: uint32(j), @@ -256,7 +256,7 @@ func testDataInDiskByRows(t *testing.T, concurrency int) { wg := sync.WaitGroup{} wg.Add(concurrency) - for con := 0; con < concurrency; con++ { + for range concurrency { go func() { for i, rowPtr := range ptrs { row, err := lChecksum.GetRow(rowPtr) @@ -280,7 +280,7 @@ func BenchmarkDataInDiskByRows_GetChunk(b *testing.B) { } b.StartTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { v := i % numChk _, _ = l.GetChunk(v) } @@ -383,7 +383,7 @@ func TestDataInDiskByRowsWithChecksumAndEncryptReaderWithCacheNoFlush(t *testing func testReaderWithCache(t *testing.T) { testData := "0123456789" buf := bytes.NewBuffer(nil) - for i := 0; i < 102; i++ { + for range 102 { buf.WriteString(testData) } buf.WriteString("0123") diff --git a/pkg/util/codec/bench_test.go b/pkg/util/codec/bench_test.go index 3d1efddf79e77..1b8397ab50691 100644 --- a/pkg/util/codec/bench_test.go +++ b/pkg/util/codec/bench_test.go @@ -28,7 +28,7 @@ var valueCnt = 100 func composeEncodedData(size int) []byte { values := make([]types.Datum, 0, size) - for i := 0; i < size; i++ { + for i := range size { values = append(values, types.NewDatum(i)) } bs, _ := EncodeValue(time.UTC, nil, values...) @@ -39,7 +39,7 @@ func BenchmarkDecodeWithSize(b *testing.B) { b.StopTimer() bs := composeEncodedData(valueCnt) b.StartTimer() - for i := 0; i < b.N; i++ { + for range b.N { _, err := Decode(bs, valueCnt) if err != nil { b.Fatal(err) @@ -51,7 +51,7 @@ func BenchmarkDecodeWithOutSize(b *testing.B) { b.StopTimer() bs := composeEncodedData(valueCnt) b.StartTimer() - for i := 0; i < b.N; i++ { + for range b.N { _, err := Decode(bs, 1) if err != nil { b.Fatal(err) @@ -60,14 +60,14 @@ func BenchmarkDecodeWithOutSize(b *testing.B) { } func BenchmarkEncodeIntWithSize(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { data := make([]byte, 0, 8) EncodeInt(data, 10) } } func BenchmarkEncodeIntWithOutSize(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { EncodeInt(nil, 10) } } @@ -81,7 +81,7 @@ func BenchmarkDecodeDecimal(b *testing.B) { precision, frac := dec.PrecisionAndFrac() raw, _ := EncodeDecimal([]byte{}, dec, precision, frac) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { _, _, _, _, err := DecodeDecimal(raw) if err != nil { b.Fatal(err) @@ -98,7 +98,7 @@ func BenchmarkDecodeOneToChunk(b *testing.B) { intType := types.NewFieldType(mysql.TypeLonglong) b.ResetTimer() decoder := NewDecoder(chunk.New([]*types.FieldType{intType}, 32, 32), nil) - for i := 0; i < b.N; i++ { + for range b.N { _, err := decoder.DecodeOne(raw, 0, intType) if err != nil { b.Fatal(err) diff --git a/pkg/util/codec/bytes.go b/pkg/util/codec/bytes.go index a5a2f3f45e3e4..c0b3e13b4b269 100644 --- a/pkg/util/codec/bytes.go +++ b/pkg/util/codec/bytes.go @@ -188,7 +188,7 @@ func fastReverseBytes(b []byte) { w := n / wordSize if w > 0 { bw := *(*[]uintptr)(unsafe.Pointer(&b)) - for i := 0; i < w; i++ { + for i := range w { bw[i] = ^bw[i] } } diff --git a/pkg/util/codec/codec.go b/pkg/util/codec/codec.go index 8ebcaee493f75..8901818bd6acc 100644 --- a/pkg/util/codec/codec.go +++ b/pkg/util/codec/codec.go @@ -699,7 +699,7 @@ func HashChunkSelected(typeCtx types.Context, h []hash.Hash64, chk *chunk.Chunk, _, _ = h[i].Write(b) } case mysql.TypeVarchar, mysql.TypeVarString, mysql.TypeString, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob: - for i := 0; i < rows; i++ { + for i := range rows { if sel != nil && !sel[i] { continue } @@ -743,7 +743,7 @@ func HashChunkSelected(typeCtx types.Context, h []hash.Hash64, chk *chunk.Chunk, _, _ = h[i].Write(b) } case mysql.TypeDuration: - for i := 0; i < rows; i++ { + for i := range rows { if sel != nil && !sel[i] { continue } @@ -785,7 +785,7 @@ func HashChunkSelected(typeCtx types.Context, h []hash.Hash64, chk *chunk.Chunk, _, _ = h[i].Write(b) } case mysql.TypeEnum: - for i := 0; i < rows; i++ { + for i := range rows { if sel != nil && !sel[i] { continue } @@ -813,7 +813,7 @@ func HashChunkSelected(typeCtx types.Context, h []hash.Hash64, chk *chunk.Chunk, _, _ = h[i].Write(b) } case mysql.TypeSet: - for i := 0; i < rows; i++ { + for i := range rows { if sel != nil && !sel[i] { continue } @@ -835,7 +835,7 @@ func HashChunkSelected(typeCtx types.Context, h []hash.Hash64, chk *chunk.Chunk, _, _ = h[i].Write(b) } case mysql.TypeBit: - for i := 0; i < rows; i++ { + for i := range rows { if sel != nil && !sel[i] { continue } @@ -856,7 +856,7 @@ func HashChunkSelected(typeCtx types.Context, h []hash.Hash64, chk *chunk.Chunk, _, _ = h[i].Write(b) } case mysql.TypeJSON: - for i := 0; i < rows; i++ { + for i := range rows { if sel != nil && !sel[i] { continue } @@ -876,7 +876,7 @@ func HashChunkSelected(typeCtx types.Context, h []hash.Hash64, chk *chunk.Chunk, _, _ = h[i].Write(b) } case mysql.TypeTiDBVectorFloat32: - for i := 0; i < rows; i++ { + for i := range rows { if sel != nil && !sel[i] { continue } @@ -895,7 +895,7 @@ func HashChunkSelected(typeCtx types.Context, h []hash.Hash64, chk *chunk.Chunk, _, _ = h[i].Write(b) } case mysql.TypeNull: - for i := 0; i < rows; i++ { + for i := range rows { if sel != nil && !sel[i] { continue } @@ -1198,7 +1198,7 @@ func CutColumnID(b []byte) (remain []byte, n int64, err error) { // SetRawValues set raw datum values from a row data. func SetRawValues(data []byte, values []types.Datum) error { - for i := 0; i < len(values); i++ { + for i := range values { l, err := peek(data) if err != nil { return errors.Trace(err) @@ -1491,7 +1491,7 @@ func HashGroupKey(loc *time.Location, n int, col *chunk.Column, buf [][]byte, ft switch ft.EvalType() { case types.ETInt: i64s := col.Int64s() - for i := 0; i < n; i++ { + for i := range n { if col.IsNull(i) { buf[i] = append(buf[i], NilFlag) } else { @@ -1500,7 +1500,7 @@ func HashGroupKey(loc *time.Location, n int, col *chunk.Column, buf [][]byte, ft } case types.ETReal: f64s := col.Float64s() - for i := 0; i < n; i++ { + for i := range n { if col.IsNull(i) { buf[i] = append(buf[i], NilFlag) } else { @@ -1510,7 +1510,7 @@ func HashGroupKey(loc *time.Location, n int, col *chunk.Column, buf [][]byte, ft } case types.ETDecimal: ds := col.Decimals() - for i := 0; i < n; i++ { + for i := range n { if col.IsNull(i) { buf[i] = append(buf[i], NilFlag) } else { @@ -1523,7 +1523,7 @@ func HashGroupKey(loc *time.Location, n int, col *chunk.Column, buf [][]byte, ft } case types.ETDatetime, types.ETTimestamp: ts := col.Times() - for i := 0; i < n; i++ { + for i := range n { if col.IsNull(i) { buf[i] = append(buf[i], NilFlag) } else { @@ -1536,7 +1536,7 @@ func HashGroupKey(loc *time.Location, n int, col *chunk.Column, buf [][]byte, ft } case types.ETDuration: ds := col.GoDurations() - for i := 0; i < n; i++ { + for i := range n { if col.IsNull(i) { buf[i] = append(buf[i], NilFlag) } else { @@ -1545,7 +1545,7 @@ func HashGroupKey(loc *time.Location, n int, col *chunk.Column, buf [][]byte, ft } } case types.ETJson: - for i := 0; i < n; i++ { + for i := range n { if col.IsNull(i) { buf[i] = append(buf[i], NilFlag) } else { @@ -1554,7 +1554,7 @@ func HashGroupKey(loc *time.Location, n int, col *chunk.Column, buf [][]byte, ft } } case types.ETString: - for i := 0; i < n; i++ { + for i := range n { if col.IsNull(i) { buf[i] = append(buf[i], NilFlag) } else { @@ -1562,7 +1562,7 @@ func HashGroupKey(loc *time.Location, n int, col *chunk.Column, buf [][]byte, ft } } case types.ETVectorFloat32: - for i := 0; i < n; i++ { + for i := range n { if col.IsNull(i) { buf[i] = append(buf[i], NilFlag) } else { diff --git a/pkg/util/codec/codec_test.go b/pkg/util/codec/codec_test.go index 25f0611133852..6abc1c382c16f 100644 --- a/pkg/util/codec/codec_test.go +++ b/pkg/util/codec/codec_test.go @@ -763,7 +763,7 @@ func TestDecimal(t *testing.T) { // size - 1 because the flag occupy 1 bit. require.Len(t, b, size-1) } - for i := 0; i < len(decs)-1; i++ { + for i := range len(decs) - 1 { cmpRes := bytes.Compare(decs[i], decs[i+1]) require.LessOrEqual(t, cmpRes, 0) } @@ -953,7 +953,7 @@ func TestDecodeOneToChunk(t *testing.T) { rowCount := 3 chk := chunkForTest(t, typeCtx.Location(), datums, tps, rowCount) for colIdx, tp := range tps { - for rowIdx := 0; rowIdx < rowCount; rowIdx++ { + for rowIdx := range rowCount { got := chk.GetRow(rowIdx).GetDatum(colIdx, tp) expect := datums[colIdx] if got.IsNull() { @@ -1063,7 +1063,7 @@ func datumsForTest() ([]types.Datum, []*types.FieldType) { func chunkForTest(t *testing.T, tz *time.Location, datums []types.Datum, tps []*types.FieldType, rowCount int) *chunk.Chunk { decoder := NewDecoder(chunk.New(tps, 32, 32), tz) - for rowIdx := 0; rowIdx < rowCount; rowIdx++ { + for range rowCount { encoded, err := EncodeValue(tz, nil, datums...) require.NoError(t, err) decoder.buf = make([]byte, 0, len(encoded)) @@ -1148,7 +1148,7 @@ func TestHashChunkRow(t *testing.T) { chk := chunkForTest(t, typeCtx.Location(), datums, tps, 1) colIdx := make([]int, len(tps)) - for i := 0; i < len(tps); i++ { + for i := range tps { colIdx[i] = i } h := crc32.NewIEEE() @@ -1236,7 +1236,7 @@ func TestHashChunkColumns(t *testing.T) { chk := chunkForTest(t, typeCtx.Location(), datums, tps, 4) colIdx := make([]int, len(tps)) - for i := 0; i < len(tps); i++ { + for i := range tps { colIdx[i] = i } hasNull := []bool{false, false, false} @@ -1244,12 +1244,12 @@ func TestHashChunkColumns(t *testing.T) { rowHash := []hash.Hash64{fnv.New64(), fnv.New64(), fnv.New64()} sel := make([]bool, len(datums)) - for i := 0; i < 3; i++ { + for i := range 3 { sel[i] = true } // Test hash value of the first 12 `Null` columns - for i := 0; i < 12; i++ { + for i := range 12 { require.True(t, chk.GetRow(0).IsNull(i)) err1 := HashChunkSelected(typeCtx, vecHash, chk, tps[i], i, buf, hasNull, sel, false) err2 := HashChunkRow(typeCtx, rowHash[0], chk.GetRow(0), tps[i:i+1], colIdx[i:i+1], buf) diff --git a/pkg/util/codec/collation_test.go b/pkg/util/codec/collation_test.go index 0541ef1f30d80..4ac902b8e5e05 100644 --- a/pkg/util/codec/collation_test.go +++ b/pkg/util/codec/collation_test.go @@ -56,7 +56,7 @@ func TestHashGroupKeyCollation(t *testing.T) { buf2, err = HashGroupKey(time.Local, n, chk2.Column(0), buf2, tp) require.NoError(t, err) - for i := 0; i < n; i++ { + for i := range n { require.Equal(t, len(buf2[i]), len(buf1[i])) for j := range buf1 { require.Equal(t, buf2[i][j], buf1[i][j]) @@ -71,7 +71,7 @@ func TestHashGroupKeyCollation(t *testing.T) { buf2, err = HashGroupKey(time.Local, n, chk2.Column(0), buf2, tp) require.NoError(t, err) - for i := 0; i < n; i++ { + for i := range n { require.Equal(t, len(buf2[i]), len(buf1[i])) for j := range buf1 { require.Equal(t, buf2[i][j], buf1[i][j]) @@ -88,7 +88,7 @@ func TestHashChunkRowCollation(t *testing.T) { buf := make([]byte, 1) tp.SetCollate("binary") - for i := 0; i < n; i++ { + for i := range n { h1 := crc32.NewIEEE() h2 := crc32.NewIEEE() require.NoError(t, HashChunkRow(typeCtx, h1, chk1.GetRow(i), tps, cols, buf)) @@ -99,7 +99,7 @@ func TestHashChunkRowCollation(t *testing.T) { } tp.SetCollate("utf8_general_ci") - for i := 0; i < n; i++ { + for i := range n { h1 := crc32.NewIEEE() h2 := crc32.NewIEEE() require.NoError(t, HashChunkRow(typeCtx, h1, chk1.GetRow(i), tps, cols, buf)) @@ -110,7 +110,7 @@ func TestHashChunkRowCollation(t *testing.T) { } tp.SetCollate("utf8_unicode_ci") - for i := 0; i < n; i++ { + for i := range n { h1 := crc32.NewIEEE() h2 := crc32.NewIEEE() require.NoError(t, HashChunkRow(typeCtx, h1, chk1.GetRow(i), tps, cols, buf)) @@ -134,7 +134,7 @@ func TestHashChunkColumnsCollation(t *testing.T) { require.NoError(t, HashChunkColumns(typeCtx, h1s, chk1, tp, 0, buf, hasNull)) require.NoError(t, HashChunkColumns(typeCtx, h2s, chk2, tp, 0, buf, hasNull)) - for i := 0; i < n; i++ { + for i := range n { require.NotEqual(t, h2s[i].Sum64(), h1s[i].Sum64()) h1s[i].Reset() h2s[i].Reset() @@ -143,14 +143,14 @@ func TestHashChunkColumnsCollation(t *testing.T) { tp.SetCollate("utf8_general_ci") require.NoError(t, HashChunkColumns(typeCtx, h1s, chk1, tp, 0, buf, hasNull)) require.NoError(t, HashChunkColumns(typeCtx, h2s, chk2, tp, 0, buf, hasNull)) - for i := 0; i < n; i++ { + for i := range n { require.Equal(t, h2s[i].Sum64(), h1s[i].Sum64()) } tp.SetCollate("utf8_unicode_ci") require.NoError(t, HashChunkColumns(typeCtx, h1s, chk1, tp, 0, buf, hasNull)) require.NoError(t, HashChunkColumns(typeCtx, h2s, chk2, tp, 0, buf, hasNull)) - for i := 0; i < n; i++ { + for i := range n { require.Equal(t, h2s[i].Sum64(), h1s[i].Sum64()) } } diff --git a/pkg/util/collate/collate.go b/pkg/util/collate/collate.go index 7afb4c6238d3b..5f84f502edaba 100644 --- a/pkg/util/collate/collate.go +++ b/pkg/util/collate/collate.go @@ -167,7 +167,7 @@ func GetBinaryCollatorSlice(n int) []Collator { return binCollatorInstanceSliceWithLen1 } collators := make([]Collator, n) - for i := 0; i < n; i++ { + for i := range n { collators[i] = binCollatorInstance } return collators diff --git a/pkg/util/collate/collate_bench_test.go b/pkg/util/collate/collate_bench_test.go index 593278640c880..b508ba9bcf3db 100644 --- a/pkg/util/collate/collate_bench_test.go +++ b/pkg/util/collate/collate_bench_test.go @@ -39,7 +39,7 @@ func compare(b *testing.B, collator Collator, length int) { s2 := generateData(length) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { collator.Compare(s1, s2) } } @@ -48,7 +48,7 @@ func key(b *testing.B, collator Collator, length int) { s := generateData(length) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { collator.Key(s) } } diff --git a/pkg/util/cpu/cpu_test.go b/pkg/util/cpu/cpu_test.go index d520b367d4007..4458263e73aee 100644 --- a/pkg/util/cpu/cpu_test.go +++ b/pkg/util/cpu/cpu_test.go @@ -37,7 +37,7 @@ func TestCPUValue(t *testing.T) { observer := cpu.NewCPUObserver() exit := make(chan struct{}) var wg sync.WaitGroup - for i := 0; i < 10; i++ { + for range 10 { wg.Add(1) go func() { defer wg.Done() @@ -52,7 +52,7 @@ func TestCPUValue(t *testing.T) { }() } observer.Start() - for n := 0; n < 10; n++ { + for range 10 { time.Sleep(200 * time.Millisecond) value, unsupported := cpu.GetCPUUsage() require.False(t, unsupported) @@ -72,7 +72,7 @@ func TestFailpointCPUValue(t *testing.T) { observer := cpu.NewCPUObserver() exit := make(chan struct{}) var wg sync.WaitGroup - for i := 0; i < 10; i++ { + for range 10 { wg.Add(1) go func() { defer wg.Done() @@ -87,7 +87,7 @@ func TestFailpointCPUValue(t *testing.T) { }() } observer.Start() - for n := 0; n < 10; n++ { + for range 10 { time.Sleep(200 * time.Millisecond) value, unsupported := cpu.GetCPUUsage() require.True(t, unsupported) diff --git a/pkg/util/cpuprofile/cpuprofile_test.go b/pkg/util/cpuprofile/cpuprofile_test.go index d852721ba861f..eebb7e0e89b2f 100644 --- a/pkg/util/cpuprofile/cpuprofile_test.go +++ b/pkg/util/cpuprofile/cpuprofile_test.go @@ -183,7 +183,7 @@ func TestGetCPUProfile(t *testing.T) { defer cancel() testutil.MockCPULoad(ctx, "sql", "sql_digest", "plan_digest") var wg sync.WaitGroup - for i := 0; i < 10; i++ { + for range 10 { wg.Add(1) go func() { defer wg.Done() diff --git a/pkg/util/cpuprofile/testutil/util.go b/pkg/util/cpuprofile/testutil/util.go index d8ba724ece523..7e3567869e25c 100644 --- a/pkg/util/cpuprofile/testutil/util.go +++ b/pkg/util/cpuprofile/testutil/util.go @@ -58,7 +58,7 @@ func mockCPULoadByGoroutineWithLabel(ctx context.Context, labels ...string) { default: } sum := 0 - for i := 0; i < 1000000; i++ { + for i := range 1000000 { sum = sum + i*2 } } diff --git a/pkg/util/cteutil/storage_test.go b/pkg/util/cteutil/storage_test.go index 0028864028e56..6b5d2c9d5fe30 100644 --- a/pkg/util/cteutil/storage_test.go +++ b/pkg/util/cteutil/storage_test.go @@ -61,12 +61,12 @@ func TestOpenAndClose(t *testing.T) { chkSize := 1 storage := NewStorageRowContainer(fields, chkSize) - for i := 0; i < 10; i++ { + for range 10 { err := storage.OpenAndRef() require.NoError(t, err) } - for i := 0; i < 9; i++ { + for range 9 { err := storage.DerefAndClose() require.NoError(t, err) } @@ -84,7 +84,7 @@ func TestAddAndGetChunk(t *testing.T) { storage := NewStorageRowContainer(fields, chkSize) inChk := chunk.NewChunkWithCapacity(fields, chkSize) - for i := 0; i < chkSize; i++ { + for i := range chkSize { inChk.AppendInt64(0, int64(i)) } @@ -112,7 +112,7 @@ func TestSpillToDisk(t *testing.T) { var tmp any = storage inChk := chunk.NewChunkWithCapacity(fields, chkSize) - for i := 0; i < chkSize; i++ { + for i := range chkSize { inChk.AppendInt64(0, int64(i)) } @@ -168,7 +168,7 @@ func TestReopen(t *testing.T) { require.NoError(t, err) inChk := chunk.NewChunkWithCapacity(fields, chkSize) - for i := 0; i < chkSize; i++ { + for i := range chkSize { inChk.AppendInt64(0, int64(i)) } err = storage.Add(inChk) @@ -188,7 +188,7 @@ func TestReopen(t *testing.T) { out64s := outChk.Column(0).Int64s() require.Equal(t, in64s, out64s) // Reopen multiple times. - for i := 0; i < 100; i++ { + for range 100 { err = storage.Reopen() require.NoError(t, err) } @@ -210,7 +210,7 @@ func TestSwapData(t *testing.T) { err := storage1.OpenAndRef() require.NoError(t, err) inChk1 := chunk.NewChunkWithCapacity(tp1, chkSize) - for i := 0; i < chkSize; i++ { + for i := range chkSize { inChk1.AppendInt64(0, int64(i)) } in1 := inChk1.Column(0).Int64s() @@ -223,11 +223,11 @@ func TestSwapData(t *testing.T) { require.NoError(t, err) inChk2 := chunk.NewChunkWithCapacity(tp2, chkSize) - for i := 0; i < chkSize; i++ { + for i := range chkSize { inChk2.AppendString(0, strconv.FormatInt(int64(i), 10)) } - var in2 []string - for i := 0; i < inChk2.NumRows(); i++ { + in2 := make([]string, 0, inChk2.NumRows()) + for i := range inChk2.NumRows() { in2 = append(in2, inChk2.Column(0).GetString(i)) } err = storage2.Add(inChk2) @@ -241,8 +241,8 @@ func TestSwapData(t *testing.T) { outChk2, err := storage2.GetChunk(0) require.NoError(t, err) - var out1 []string - for i := 0; i < outChk1.NumRows(); i++ { + out1 := make([]string, 0, outChk1.NumRows()) + for i := range outChk1.NumRows() { out1 = append(out1, outChk1.Column(0).GetString(i)) } out2 := outChk2.Column(0).Int64s() diff --git a/pkg/util/dbutil/common.go b/pkg/util/dbutil/common.go index 901503940acdf..53a5e495ed4d9 100644 --- a/pkg/util/dbutil/common.go +++ b/pkg/util/dbutil/common.go @@ -744,7 +744,7 @@ func ReplacePlaceholder(str string, args []string) string { // ExecSQLWithRetry executes sql with retry func ExecSQLWithRetry(ctx context.Context, db DBExecutor, sql string, args ...any) (err error) { - for i := 0; i < DefaultRetryTime; i++ { + for i := range DefaultRetryTime { startTime := time.Now() _, err = db.ExecContext(ctx, sql, args...) takeDuration := time.Since(startTime) diff --git a/pkg/util/deadlockhistory/deadlock_history_test.go b/pkg/util/deadlockhistory/deadlock_history_test.go index 65354687698eb..b6ff479a8960d 100644 --- a/pkg/util/deadlockhistory/deadlock_history_test.go +++ b/pkg/util/deadlockhistory/deadlock_history_test.go @@ -124,7 +124,7 @@ func TestDeadlockHistoryCollection(t *testing.T) { expectedItems := []*DeadlockRecord{rec1, rec2, rec3} expectedIDs := []uint64{1, 2, 3} expectedDequeHead := 0 - for i := 0; i < 6; i++ { + for range 6 { newRec := &DeadlockRecord{ OccurTime: time.Now(), } diff --git a/pkg/util/deeptest/statictesthelper.go b/pkg/util/deeptest/statictesthelper.go index 0936d761d5dbe..b9493444037ae 100644 --- a/pkg/util/deeptest/statictesthelper.go +++ b/pkg/util/deeptest/statictesthelper.go @@ -68,7 +68,7 @@ func (h *staticTestHelper) assertRecursivelyNotEqual(t require.TestingT, valA, v // This function assumes that `a` and `b` are the same type switch valA.Type().Kind() { case reflect.Struct: - for i := 0; i < valA.NumField(); i++ { + for i := range valA.NumField() { h.assertRecursivelyNotEqual(t, valA.Field(i), valB.Field(i), path+"."+valA.Type().Field(i).Name) } case reflect.Ptr: @@ -82,13 +82,13 @@ func (h *staticTestHelper) assertRecursivelyNotEqual(t require.TestingT, valA, v if !h.shouldComparePointer(path) { minLen := min(valA.Len(), valB.Len()) - for i := 0; i < minLen; i++ { + for i := range minLen { h.assertRecursivelyNotEqual(t, valA.Index(i), valB.Index(i), path+fmt.Sprintf("[%d]", i)) } } case reflect.Array: minLen := min(valA.Len(), valB.Len()) - for i := 0; i < minLen; i++ { + for i := range minLen { h.assertRecursivelyNotEqual(t, valA.Index(i), valB.Index(i), path+fmt.Sprintf("[%d]", i)) } case reflect.Bool: @@ -139,7 +139,7 @@ func (h *staticTestHelper) assertDeepClonedEqual(t require.TestingT, valA, valB // This function assumes that `a` and `b` are the same type switch valA.Type().Kind() { case reflect.Struct: - for i := 0; i < valA.NumField(); i++ { + for i := range valA.NumField() { h.assertDeepClonedEqual(t, valA.Field(i), valB.Field(i), path+"."+valA.Type().Field(i).Name) } case reflect.Ptr: @@ -166,13 +166,13 @@ func (h *staticTestHelper) assertDeepClonedEqual(t require.TestingT, valA, valB require.Equal(t, valA.Pointer(), valB.Pointer(), path+" should be the same") } else { require.NotEqual(t, valA.Pointer(), valB.Pointer(), path+" should not be the same") - for i := 0; i < valA.Len(); i++ { + for i := range valA.Len() { h.assertDeepClonedEqual(t, valA.Index(i), valB.Index(i), path+fmt.Sprintf("[%d]", i)) } } case reflect.Array: require.Equal(t, valA.Len(), valB.Len(), path+" should have the same length") - for i := 0; i < valA.Len(); i++ { + for i := range valA.Len() { h.assertDeepClonedEqual(t, valA.Index(i), valB.Index(i), path+fmt.Sprintf("[%d]", i)) } case reflect.Bool: diff --git a/pkg/util/disk/tempDir_test.go b/pkg/util/disk/tempDir_test.go index b0fe1d0c14248..76e207eadd6b5 100644 --- a/pkg/util/disk/tempDir_test.go +++ b/pkg/util/disk/tempDir_test.go @@ -40,7 +40,7 @@ func TestRemoveDir(t *testing.T) { require.NoError(t, os.RemoveAll(config.GetGlobalConfig().TempStoragePath)) require.Equal(t, checkTempDirExist(), false) wg := sync.WaitGroup{} - for i := 0; i < 10; i++ { + for range 10 { wg.Add(1) go func(t *testing.T) { err := CheckAndInitTempDir() diff --git a/pkg/util/encrypt/aes_layer_test.go b/pkg/util/encrypt/aes_layer_test.go index d8149f7ad74a9..aebcd4257ec58 100644 --- a/pkg/util/encrypt/aes_layer_test.go +++ b/pkg/util/encrypt/aes_layer_test.go @@ -43,7 +43,7 @@ func testReadAtWithCase(t *testing.T, testCase readAtTestCase) { writeString := "0123456789" buf := bytes.NewBuffer(nil) - for i := 0; i < 510; i++ { + for range 510 { buf.WriteString(writeString) } @@ -121,7 +121,7 @@ func benchmarkReadAtWithCase(b *testing.B, testCase readAtTestCase) { writeString := "0123456789" buf := bytes.NewBuffer(nil) - for i := 0; i < 510; i++ { + for range 510 { buf.WriteString(writeString) } @@ -146,7 +146,7 @@ func benchmarkReadAtWithCase(b *testing.B, testCase readAtTestCase) { r := testCase.newReader(f) rBuf := make([]byte, 10) b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { _, err := r.ReadAt(rBuf, int64(i%(n1+n2))) if err != nil { b.Fatal(err) diff --git a/pkg/util/encrypt/crypt.go b/pkg/util/encrypt/crypt.go index 0adc2e419166f..9fbf3b4f6a59c 100644 --- a/pkg/util/encrypt/crypt.go +++ b/pkg/util/encrypt/crypt.go @@ -29,7 +29,7 @@ func (rs *randStruct) randomInit(password []byte, length int) { add = 7 nr2 = 0x12345671 - for i := 0; i < length; i++ { + for i := range length { pswChar := password[i] if pswChar == ' ' || pswChar == '\t' { continue @@ -90,7 +90,7 @@ func (sc *sqlCrypt) init(password []byte, length int) { } func (sc *sqlCrypt) encode(str []byte, length int) { - for i := 0; i < length; i++ { + for i := range length { sc.shift ^= uint32(sc.rand.myRand() * 255.0) idx := uint32(str[i]) str[i] = sc.encodeBuff[idx] ^ byte(sc.shift) @@ -99,7 +99,7 @@ func (sc *sqlCrypt) encode(str []byte, length int) { } func (sc *sqlCrypt) decode(str []byte, length int) { - for i := 0; i < length; i++ { + for i := range length { sc.shift ^= uint32(sc.rand.myRand() * 255.0) idx := uint32(str[i] ^ byte(sc.shift)) str[i] = sc.decodeBuff[idx] diff --git a/pkg/util/etcd.go b/pkg/util/etcd.go index bff00a8d66428..dd8597c1e7f59 100644 --- a/pkg/util/etcd.go +++ b/pkg/util/etcd.go @@ -46,7 +46,7 @@ func NewSession(ctx context.Context, logPrefix string, etcdCli *clientv3.Client, var etcdSession *concurrency.Session failedCnt := 0 - for i := 0; i < retryCnt; i++ { + for range retryCnt { if err = contextDone(ctx, err); err != nil { return etcdSession, errors.Trace(err) } diff --git a/pkg/util/execdetails/execdetails_test.go b/pkg/util/execdetails/execdetails_test.go index 0de6c3c3de9d2..0c4974c1773f3 100644 --- a/pkg/util/execdetails/execdetails_test.go +++ b/pkg/util/execdetails/execdetails_test.go @@ -541,7 +541,7 @@ func TestCopRuntimeStats2(t *testing.T) { TotalRPCWallTime: 50 * time.Millisecond, } stats.RecordScanDetail(tableScanID, "tikv", scanDetail) - for i := 0; i < 1005; i++ { + for range 1005 { stats.RecordOneCopTask(tableScanID, "tikv", "8.8.8.9", mockExecutorExecutionSummary(2, 2, 2)) stats.RecordScanDetail(tableScanID, "tikv", scanDetail) stats.RecordTimeDetail(tableScanID, "tikv", timeDetail) diff --git a/pkg/util/extsort/disk_sorter_test.go b/pkg/util/extsort/disk_sorter_test.go index caff0df3df92b..8d13aa59c3ec7 100644 --- a/pkg/util/extsort/disk_sorter_test.go +++ b/pkg/util/extsort/disk_sorter_test.go @@ -402,7 +402,7 @@ func TestSSTReaderPoolParallel(t *testing.T) { wg.Add(1) go func(fileNum int) { defer wg.Done() - for j := 0; j < 10000; j++ { + for range 10000 { _, err := pool.get(fileNum) require.NoError(t, err) require.NoError(t, pool.unref(fileNum)) diff --git a/pkg/util/extsort/external_sorter_test.go b/pkg/util/extsort/external_sorter_test.go index e1329a8618283..62a98f5337427 100644 --- a/pkg/util/extsort/external_sorter_test.go +++ b/pkg/util/extsort/external_sorter_test.go @@ -75,7 +75,7 @@ func runCommonParallelTest(t *testing.T, sorter ExternalSorter) { kvCh := make(chan keyValue, 16) g, gCtx := errgroup.WithContext(ctx) - for i := 0; i < numWriters; i++ { + for range numWriters { g.Go(func() (retErr error) { w, err := sorter.NewWriter(gCtx) if err != nil { @@ -134,7 +134,7 @@ func genRandomKVs( valueSizeRange int, ) []keyValue { kvs := make([]keyValue, 0, n) - for i := 0; i < n; i++ { + for i := range n { keySize := rng.Intn(keySizeRange-4) + 4 kv := keyValue{ key: make([]byte, keySize), diff --git a/pkg/util/fastrand/random.go b/pkg/util/fastrand/random.go index d823876ad6c1a..83b49174e3fe6 100644 --- a/pkg/util/fastrand/random.go +++ b/pkg/util/fastrand/random.go @@ -36,7 +36,7 @@ func (r *wyrand) Next() uint64 { func Buf(size int) []byte { buf := make([]byte, size) r := wyrand(Uint32()) - for i := 0; i < size; i++ { + for i := range size { // This is similar to Uint32() % n, but faster. // See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ buf[i] = byte(uint32(uint64(uint32(r.Next())) * uint64(127) >> 32)) diff --git a/pkg/util/fastrand/random_test.go b/pkg/util/fastrand/random_test.go index ad2cba5d2c90f..de32d1d1cf8fc 100644 --- a/pkg/util/fastrand/random_test.go +++ b/pkg/util/fastrand/random_test.go @@ -29,12 +29,12 @@ func TestRand(t *testing.T) { _ = Buf(20) var arr [256]bool - for i := 0; i < 1024; i++ { + for range 1024 { idx := Uint32N(256) arr[idx] = true } sum := 0 - for i := 0; i < 256; i++ { + for i := range 256 { if !arr[i] { sum++ } diff --git a/pkg/util/format/format.go b/pkg/util/format/format.go index 2554f7919a277..6e641b0fffd2e 100644 --- a/pkg/util/format/format.go +++ b/pkg/util/format/format.go @@ -85,7 +85,7 @@ func IndentFormatter(w io.Writer, indent string) Formatter { func (f *indentFormatter) format(flat bool, format string, args ...any) (n int, errno error) { var buf = make([]byte, 0) - for i := 0; i < len(format); i++ { + for i := range len(format) { c := format[i] switch f.state { case st0: @@ -114,7 +114,7 @@ func (f *indentFormatter) format(flat bool, format string, args ...any) (n int, f.state = stBOLPERC default: if !flat { - for i := 0; i < f.indentLevel; i++ { + for range f.indentLevel { buf = append(buf, f.indent...) } } @@ -131,7 +131,7 @@ func (f *indentFormatter) format(flat bool, format string, args ...any) (n int, f.state = stBOL default: if !flat { - for i := 0; i < f.indentLevel; i++ { + for range f.indentLevel { buf = append(buf, f.indent...) } } diff --git a/pkg/util/gctuner/tuner_test.go b/pkg/util/gctuner/tuner_test.go index 2ee5fef1a318b..ebd1cf9ca2bdd 100644 --- a/pkg/util/gctuner/tuner_test.go +++ b/pkg/util/gctuner/tuner_test.go @@ -79,7 +79,7 @@ func TestTuner(t *testing.T) { testHeap = make([]byte, threshold+1024) t.Logf("old gc percent before gc: %d", tn.getGCPercent()) runtime.GC() - for i := 0; i < 8; i++ { + for range 8 { runtime.GC() require.Equal(t, minGCPercent.Load(), tn.getGCPercent()) } diff --git a/pkg/util/globalconn/pool.go b/pkg/util/globalconn/pool.go index 4b9573c6a86ec..861142915bf66 100644 --- a/pkg/util/globalconn/pool.go +++ b/pkg/util/globalconn/pool.go @@ -77,7 +77,7 @@ func (p *AutoIncPool) InitExt(size uint64, checkExisted bool, tryCnt int) { // Get id by auto-increment. func (p *AutoIncPool) Get() (id uint64, ok bool) { - for i := 0; i < p.tryCnt; i++ { + for range p.tryCnt { id := atomic.AddUint64(&p.lastID, 1) if p.cap < math.MaxUint64 { id = id % p.cap @@ -166,6 +166,7 @@ func (p *LockFreeCircularPool) InitExt(size uint32, fillCount uint32) { fillCount = mathutil.MinUint32(p.cap-1, fillCount) var i uint32 + //nolint: intrange for i = 0; i < fillCount; i++ { p.slots[i] = lockFreePoolItem{value: i + 1, seq: i + 1} } @@ -181,7 +182,7 @@ func (p *LockFreeCircularPool) InitExt(size uint32, fillCount uint32) { func (p *LockFreeCircularPool) InitForTest(head uint32, fillCount uint32) { fillCount = mathutil.MinUint32(p.cap-1, fillCount) var i uint32 - for i = 0; i < fillCount; i++ { + for i = range fillCount { p.slots[i] = lockFreePoolItem{value: i + 1, seq: head + i + 1} } for ; i < p.cap; i++ { diff --git a/pkg/util/globalconn/pool_test.go b/pkg/util/globalconn/pool_test.go index 353d86804a79c..2a7c248ed0fb3 100644 --- a/pkg/util/globalconn/pool_test.go +++ b/pkg/util/globalconn/pool_test.go @@ -189,7 +189,7 @@ func (p *LockBasedCircularPool) InitExt(size uint32, fillCount uint32) { fillCount = mathutil.MinUint32(p.cap-1, fillCount) var i uint32 - for i = 0; i < fillCount; i++ { + for i = range fillCount { p.slots[i] = i + 1 } for ; i < p.cap; i++ { @@ -270,7 +270,7 @@ func prepareConcurrencyTest(pool globalconn.IDPool, producers int, consumers int if producers > 0 { reqsPerProducer := (requests + producers - 1) / producers wgProducer.Add(producers) - for p := 0; p < producers; p++ { + for p := range producers { go func(p int) { defer wgProducer.Done() <-ready @@ -287,7 +287,7 @@ func prepareConcurrencyTest(pool globalconn.IDPool, producers int, consumers int wgConsumer = &sync.WaitGroup{} if consumers > 0 { wgConsumer.Add(consumers) - for c := 0; c < consumers; c++ { + for c := range consumers { go func(c int) { defer wgConsumer.Done() <-ready @@ -459,7 +459,7 @@ func BenchmarkPoolConcurrency(b *testing.B) { for _, ta := range cases { b.Run(fmt.Sprintf("LockBasedCircularPool: P:C: %v:%v", ta.producers, ta.consumers), func(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { b.StopTimer() var total int64 pool := prepareLockBasedPool(poolSizeInBits, 0) @@ -479,7 +479,7 @@ func BenchmarkPoolConcurrency(b *testing.B) { b.Run(fmt.Sprintf("LockFreeCircularPool: P:C: %v:%v", ta.producers, ta.consumers), func(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { b.StopTimer() var total int64 pool := prepareLockFreePool(poolSizeInBits, 0, 0) diff --git a/pkg/util/importer/db.go b/pkg/util/importer/db.go index 6e40f30241604..e5c9e6f4de44a 100644 --- a/pkg/util/importer/db.go +++ b/pkg/util/importer/db.go @@ -66,7 +66,7 @@ func uniqInt64Value(column *column, minv int64, maxv int64) int64 { func genRowDatas(table *table, count int) ([]string, error) { datas := make([]string, 0, count) - for i := 0; i < count; i++ { + for range count { data, err := genRowData(table) if err != nil { return nil, errors.Trace(err) @@ -243,7 +243,7 @@ func closeDB(db *sql.DB) error { func createDBs(cfg dbutil.DBConfig, count int) ([]*sql.DB, error) { dbs := make([]*sql.DB, 0, count) - for i := 0; i < count; i++ { + for range count { db, err := createDB(cfg) if err != nil { return nil, errors.Trace(err) diff --git a/pkg/util/importer/job.go b/pkg/util/importer/job.go index 316f611925689..ae8c92da54d8d 100644 --- a/pkg/util/importer/job.go +++ b/pkg/util/importer/job.go @@ -24,7 +24,7 @@ import ( ) func addJobs(jobCount int, jobChan chan struct{}) { - for i := 0; i < jobCount; i++ { + for range jobCount { jobChan <- struct{}{} } @@ -73,7 +73,7 @@ func doJob(table *table, db *sql.DB, batch int, jobChan chan struct{}, doneChan } func doWait(doneChan chan struct{}, start time.Time, jobCount int, workerCount int) { - for i := 0; i < workerCount; i++ { + for range workerCount { <-doneChan } @@ -97,7 +97,7 @@ func doProcess(table *table, dbs []*sql.DB, jobCount int, workerCount int, batch start := time.Now() go addJobs(jobCount, jobChan) - for i := 0; i < workerCount; i++ { + for i := range workerCount { go doJob(table, dbs[i], batch, jobChan, doneChan) } diff --git a/pkg/util/intset/fast_int_set_bench_test.go b/pkg/util/intset/fast_int_set_bench_test.go index 3414124d9a1e8..25be816361abe 100644 --- a/pkg/util/intset/fast_int_set_bench_test.go +++ b/pkg/util/intset/fast_int_set_bench_test.go @@ -22,7 +22,7 @@ import ( func BenchmarkMapIntSet_Difference(b *testing.B) { intSetA := NewIntSet() - for i := 0; i < 200000; i++ { + for i := range 200000 { intSetA[i] = struct{}{} } intSetB := NewIntSet() @@ -30,7 +30,7 @@ func BenchmarkMapIntSet_Difference(b *testing.B) { intSetB[i] = struct{}{} } b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { tmp := NewIntSet() tmp.Difference2(intSetA, intSetB) //intSetA.SubsetOf(intSetB) @@ -39,7 +39,7 @@ func BenchmarkMapIntSet_Difference(b *testing.B) { func BenchmarkIntSet_Difference(b *testing.B) { intSetA := &intsets.Sparse{} - for i := 0; i < 200000; i++ { + for i := range 200000 { intSetA.Insert(i) } intSetB := &intsets.Sparse{} @@ -47,7 +47,7 @@ func BenchmarkIntSet_Difference(b *testing.B) { intSetA.Insert(i) } b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { tmp := &intsets.Sparse{} tmp.Difference(intSetA, intSetB) //intSetA.SubsetOf(intSetB) @@ -56,7 +56,7 @@ func BenchmarkIntSet_Difference(b *testing.B) { func BenchmarkFastIntSet_Difference(b *testing.B) { intSetA := NewFastIntSet() - for i := 0; i < 200000; i++ { + for i := range 200000 { intSetA.Insert(i) } intSetB := NewFastIntSet() @@ -64,7 +64,7 @@ func BenchmarkFastIntSet_Difference(b *testing.B) { intSetA.Insert(i) } b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { intSetA.Difference(intSetB) //intSetA.SubsetOf(intSetB) } @@ -72,9 +72,9 @@ func BenchmarkFastIntSet_Difference(b *testing.B) { func BenchmarkIntSet_Insert(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { intSet := NewIntSet() - for j := 0; j < 64; j++ { + for j := range 64 { intSet.Insert(j) } } @@ -82,9 +82,9 @@ func BenchmarkIntSet_Insert(b *testing.B) { func BenchmarkSparse_Insert(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { intSet := &intsets.Sparse{} - for j := 0; j < 64; j++ { + for j := range 64 { intSet.Insert(j) } } @@ -92,9 +92,9 @@ func BenchmarkSparse_Insert(b *testing.B) { func BenchmarkFastIntSet_Insert(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { intSet := NewFastIntSet() - for j := 0; j < 64; j++ { + for j := range 64 { intSet.Insert(j) } } diff --git a/pkg/util/intset/fast_int_set_test.go b/pkg/util/intset/fast_int_set_test.go index d06b49939f6cf..35e9ee1e14e0b 100644 --- a/pkg/util/intset/fast_int_set_test.go +++ b/pkg/util/intset/fast_int_set_test.go @@ -242,7 +242,7 @@ func TestFastIntSet(t *testing.T) { s.Remove(v) } empty := true - for j := 0; j < m; j++ { + for j := range m { empty = empty && !in[j] if in[j] != s.Has(j) { t.Fatalf("incorrect result for Contains(%d), expected %t", j, in[j]) @@ -258,7 +258,7 @@ func TestFastIntSet(t *testing.T) { s.ForEach(func(j int) { forEachRes[j] = true }) - for j := 0; j < m; j++ { + for j := range m { if in[j] != forEachRes[j] { t.Fatalf("incorrect ForEachResult for %d (%t, expected %t)", j, forEachRes[j], in[j]) } @@ -318,7 +318,7 @@ func TestFastIntSetTwoSetOps(t *testing.T) { s.Insert(k) } p := rng.Perm(len(vals)) - for i := 0; i < numRemoved; i++ { + for i := range numRemoved { k := vals[p[i]] s.Remove(k) delete(used, k) diff --git a/pkg/util/kvcache/simple_lru_test.go b/pkg/util/kvcache/simple_lru_test.go index 6dd9476482afb..b6f6dd9578be4 100644 --- a/pkg/util/kvcache/simple_lru_test.go +++ b/pkg/util/kvcache/simple_lru_test.go @@ -33,8 +33,8 @@ func (mk *mockCacheKey) Hash() []byte { return mk.hash } mk.hash = make([]byte, 8) - for i := uint(0); i < 8; i++ { - mk.hash[i] = byte((mk.key >> ((i - 1) * 8)) & 0xff) + for i := range 8 { + mk.hash[i] = byte((mk.key >> ((uint(i) - 1) * 8)) & 0xff) } return mk.hash } @@ -67,7 +67,7 @@ func TestPut(t *testing.T) { lruZeroQuota.SetOnEvict(func(key Key, value Value) { zeroQuotaDroppedKv[key] = value }) - for i := 0; i < 5; i++ { + for i := range 5 { keys[i] = newMockHashKey(int64(i)) vals[i] = int64(i) lruMaxMem.Put(keys[i], vals[i]) @@ -80,7 +80,7 @@ func TestPut(t *testing.T) { // test for non-existent elements require.Len(t, maxMemDroppedKv, 2) - for i := 0; i < 2; i++ { + for i := range 2 { element, exists := lruMaxMem.elements[string(keys[i].Hash())] require.False(t, exists) require.Nil(t, element) @@ -125,7 +125,7 @@ func TestZeroQuota(t *testing.T) { keys := make([]*mockCacheKey, 100) vals := make([]int64, 100) - for i := 0; i < 100; i++ { + for i := range 100 { keys[i] = newMockHashKey(int64(i)) vals[i] = int64(i) lru.Put(keys[i], vals[i]) @@ -144,7 +144,7 @@ func TestOOMGuard(t *testing.T) { keys := make([]*mockCacheKey, 5) vals := make([]int64, 5) - for i := 0; i < 5; i++ { + for i := range 5 { keys[i] = newMockHashKey(int64(i)) vals[i] = int64(i) lru.Put(keys[i], vals[i]) @@ -152,7 +152,7 @@ func TestOOMGuard(t *testing.T) { require.Equal(t, uint(0), lru.size) // test for non-existent elements - for i := 0; i < 5; i++ { + for i := range 5 { element, exists := lru.elements[string(keys[i].Hash())] require.False(t, exists) require.Nil(t, element) @@ -168,14 +168,14 @@ func TestGet(t *testing.T) { keys := make([]*mockCacheKey, 5) vals := make([]int64, 5) - for i := 0; i < 5; i++ { + for i := range 5 { keys[i] = newMockHashKey(int64(i)) vals[i] = int64(i) lru.Put(keys[i], vals[i]) } // test for non-existent elements - for i := 0; i < 2; i++ { + for i := range 2 { value, exists := lru.Get(keys[i]) require.False(t, exists) require.Nil(t, value) @@ -211,7 +211,7 @@ func TestDelete(t *testing.T) { keys := make([]*mockCacheKey, 3) vals := make([]int64, 3) - for i := 0; i < 3; i++ { + for i := range 3 { keys[i] = newMockHashKey(int64(i)) vals[i] = int64(i) lru.Put(keys[i], vals[i]) @@ -240,7 +240,7 @@ func TestDeleteAll(t *testing.T) { keys := make([]*mockCacheKey, 3) vals := make([]int64, 3) - for i := 0; i < 3; i++ { + for i := range 3 { keys[i] = newMockHashKey(int64(i)) vals[i] = int64(i) lru.Put(keys[i], vals[i]) @@ -249,7 +249,7 @@ func TestDeleteAll(t *testing.T) { lru.DeleteAll() - for i := 0; i < 3; i++ { + for i := range 3 { value, exists := lru.Get(keys[i]) require.False(t, exists) require.Nil(t, value) @@ -266,7 +266,7 @@ func TestValues(t *testing.T) { keys := make([]*mockCacheKey, 5) vals := make([]int64, 5) - for i := 0; i < 5; i++ { + for i := range 5 { keys[i] = newMockHashKey(int64(i)) vals[i] = int64(i) lru.Put(keys[i], vals[i]) @@ -274,7 +274,7 @@ func TestValues(t *testing.T) { values := lru.Values() require.Equal(t, 5, len(values)) - for i := 0; i < 5; i++ { + for i := range 5 { require.Equal(t, int64(4-i), values[i]) } } @@ -285,7 +285,7 @@ func TestPutProfileName(t *testing.T) { tem := reflect.TypeOf(*lru) pt := reflect.TypeOf(lru) functionName := "" - for i := 0; i < pt.NumMethod(); i++ { + for i := range pt.NumMethod() { if pt.Method(i).Name == "Put" { functionName = "Put" } diff --git a/pkg/util/logutil/hex.go b/pkg/util/logutil/hex.go index 9f422d4dc3f54..8777a15991a8c 100644 --- a/pkg/util/logutil/hex.go +++ b/pkg/util/logutil/hex.go @@ -54,7 +54,7 @@ func prettyPrint(w io.Writer, val reflect.Value) { } case reflect.Struct: fmt.Fprintf(w, "{") - for i := 0; i < val.NumField(); i++ { + for i := range val.NumField() { fv := val.Field(i) ft := tp.Field(i) if strings.HasPrefix(ft.Name, "XXX") { diff --git a/pkg/util/logutil/log_test.go b/pkg/util/logutil/log_test.go index 428fefc478b00..6c8051406387f 100644 --- a/pkg/util/logutil/log_test.go +++ b/pkg/util/logutil/log_test.go @@ -187,7 +187,7 @@ func TestSetLevel(t *testing.T) { func TestSlowQueryLoggerAndGeneralLoggerCreation(t *testing.T) { var prop *log.ZapProperties var err error - for i := 0; i < 2; i++ { + for i := range 2 { level := "Error" conf := NewLogConfig(level, DefaultLogFormat, "", "", EmptyFileLogConfig, false) if i == 0 { @@ -285,7 +285,7 @@ func TestProxyFields(t *testing.T) { require.NoError(t, os.Unsetenv(env)) } - for i := 0; i < 3; i++ { + for i := range 3 { if (1<= 0; i-- { diff --git a/pkg/util/memoryusagealarm/memoryusagealarm_test.go b/pkg/util/memoryusagealarm/memoryusagealarm_test.go index 46802c7920ab4..d793d404ca278 100644 --- a/pkg/util/memoryusagealarm/memoryusagealarm_test.go +++ b/pkg/util/memoryusagealarm/memoryusagealarm_test.go @@ -101,7 +101,7 @@ func TestGetTop10Sql(t *testing.T) { func genMockProcessInfoList(memConsumeList []int64, startTimeList []time.Time, size int) []*util.ProcessInfo { processInfoList := make([]*util.ProcessInfo, 0, size) - for i := 0; i < size; i++ { + for i := range size { tracker := memory.NewTracker(0, 0) tracker.Consume(memConsumeList[i]) var stmtCtxRefCount stmtctx.ReferenceCount = 0 diff --git a/pkg/util/mock/mock_test.go b/pkg/util/mock/mock_test.go index 8d2af9fe6575a..8c468a9d80721 100644 --- a/pkg/util/mock/mock_test.go +++ b/pkg/util/mock/mock_test.go @@ -42,7 +42,7 @@ func TestContext(t *testing.T) { func BenchmarkNewContext(b *testing.B) { b.ReportAllocs() - for i := 0; i < b.N; i++ { + for range b.N { NewContext() } } diff --git a/pkg/util/mvmap/bench_test.go b/pkg/util/mvmap/bench_test.go index 31445e334dd26..dbd039a01a7c3 100644 --- a/pkg/util/mvmap/bench_test.go +++ b/pkg/util/mvmap/bench_test.go @@ -23,7 +23,7 @@ import ( func BenchmarkMVMapPut(b *testing.B) { m := NewMVMap() buffer := make([]byte, 8) - for i := 0; i < b.N; i++ { + for i := range b.N { binary.BigEndian.PutUint64(buffer, uint64(i)) m.Put(buffer, buffer) } @@ -32,13 +32,13 @@ func BenchmarkMVMapPut(b *testing.B) { func BenchmarkMVMapGet(b *testing.B) { m := NewMVMap() buffer := make([]byte, 8) - for i := 0; i < b.N; i++ { + for i := range b.N { binary.BigEndian.PutUint64(buffer, uint64(i)) m.Put(buffer, buffer) } val := make([][]byte, 0, 8) b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { binary.BigEndian.PutUint64(buffer, uint64(i)) val = m.Get(buffer, val[:0]) if len(val) != 1 || !bytes.Equal(val[0], buffer) { diff --git a/pkg/util/mvmap/mvmap.go b/pkg/util/mvmap/mvmap.go index 342d4d424f475..9466a9b4e61e6 100644 --- a/pkg/util/mvmap/mvmap.go +++ b/pkg/util/mvmap/mvmap.go @@ -158,7 +158,7 @@ func (m *MVMap) Get(key []byte, values [][]byte) [][]byte { values = append(values, val) } // Keep the order of input. - for i := 0; i < len(values)/2; i++ { + for i := range len(values) / 2 { j := len(values) - 1 - i values[i], values[j] = values[j], values[i] } diff --git a/pkg/util/mvmap/mvmap_test.go b/pkg/util/mvmap/mvmap_test.go index dbc5214925ec2..917ce45cd820c 100644 --- a/pkg/util/mvmap/mvmap_test.go +++ b/pkg/util/mvmap/mvmap_test.go @@ -39,7 +39,7 @@ func TestMVMap(t *testing.T) { results := []string{"abc abc1", "abc abc2", "def def1", "def def2"} it := m.NewIterator() - for i := 0; i < 4; i++ { + for i := range 4 { key, val := it.Next() require.Equal(t, results[i], fmt.Sprintf("%s %s", key, val)) } diff --git a/pkg/util/password-validation/password_validation.go b/pkg/util/password-validation/password_validation.go index 9646ed1d40635..b463c9ef38b71 100644 --- a/pkg/util/password-validation/password_validation.go +++ b/pkg/util/password-validation/password_validation.go @@ -94,7 +94,7 @@ func ValidatePasswordLowPolicy(pwd string, globalVars *variable.GlobalVarAccesso func ValidatePasswordMediumPolicy(pwd string, globalVars *variable.GlobalVarAccessor) (string, error) { var lowerCaseCount, upperCaseCount, numberCount, specialCharCount int64 runes := []rune(pwd) - for i := 0; i < len(runes); i++ { + for i := range runes { if unicode.IsUpper(runes[i]) { upperCaseCount++ } else if unicode.IsLower(runes[i]) { diff --git a/pkg/util/plancodec/codec.go b/pkg/util/plancodec/codec.go index 17de83b4bcbaf..ef6e8ae79d551 100644 --- a/pkg/util/plancodec/codec.go +++ b/pkg/util/plancodec/codec.go @@ -164,7 +164,7 @@ func (pd *planDecoder) buildPlanTree(planString string) (string, error) { // This is for alignment. pd.buf.WriteByte(separator) pd.buf.WriteString(string(pd.indents[i])) - for j := 0; j < len(p.fields); j++ { + for j := range p.fields { if j > 0 { pd.buf.WriteByte(separator) } @@ -198,13 +198,13 @@ func (pd *planDecoder) addPlanHeader() { func (pd *planDecoder) initPlanTreeIndents() { pd.indents = pd.indents[:0] - for i := 0; i < len(pd.depths); i++ { + for i := range pd.depths { indent := make([]rune, 2*pd.depths[i]) pd.indents = append(pd.indents, indent) if len(indent) == 0 { continue } - for i := 0; i < len(indent)-2; i++ { + for i := range len(indent) - 2 { indent[i] = ' ' } indent[len(indent)-2] = texttree.TreeLastNode @@ -263,7 +263,7 @@ func (pd *planDecoder) alignFields() { // Last field no need to align. fieldsLen-- var buf []byte - for colIdx := 0; colIdx < fieldsLen; colIdx++ { + for colIdx := range fieldsLen { maxFieldLen := pd.getMaxFieldLength(colIdx) for rowIdx, p := range pd.planInfos { fillLen := maxFieldLen - pd.getPlanFieldLen(rowIdx, colIdx, p) diff --git a/pkg/util/profile/profile.go b/pkg/util/profile/profile.go index a9e347b235d57..e11235446e99e 100644 --- a/pkg/util/profile/profile.go +++ b/pkg/util/profile/profile.go @@ -131,7 +131,7 @@ func (*Collector) ParseGoroutines(reader io.Reader) ([][]types.Datum, error) { } state := strings.Trim(headers[1], "[]") stack := strings.Split(strings.TrimSpace(goroutine[colIndex+1:]), "\n") - for i := 0; i < len(stack)/2; i++ { + for i := range len(stack) / 2 { fn := stack[i*2] loc := stack[i*2+1] var identifier string diff --git a/pkg/util/ranger/bench_test.go b/pkg/util/ranger/bench_test.go index 8a1f1848fdabd..b07adb7203932 100644 --- a/pkg/util/ranger/bench_test.go +++ b/pkg/util/ranger/bench_test.go @@ -130,7 +130,7 @@ WHERE b.ResetTimer() pctx := sctx.GetPlanCtx() - for i := 0; i < b.N; i++ { + for range b.N { _, err = ranger.DetachCondAndBuildRangeForIndex(pctx.GetRangerCtx(), conds, cols, lengths, 0) require.NoError(b, err) } diff --git a/pkg/util/ranger/ranger.go b/pkg/util/ranger/ranger.go index c0e53b63ef338..e18d3e0c4dd15 100644 --- a/pkg/util/ranger/ranger.go +++ b/pkg/util/ranger/ranger.go @@ -284,7 +284,7 @@ func appendPoints2Ranges(sctx *rangerctx.RangerContext, origin Ranges, rangePoin return origin, true, nil } var newIndexRanges Ranges - for i := 0; i < len(origin); i++ { + for i := range origin { oRange := origin[i] if !oRange.IsPoint(sctx) { newIndexRanges = append(newIndexRanges, oRange) @@ -484,7 +484,7 @@ func (d *rangeDetacher) buildRangeOnColsByCNFCond(newTp []*types.FieldType, eqAn rangeFallback bool err error ) - for i := 0; i < eqAndInCount; i++ { + for i := range eqAndInCount { // Build ranges for equal or in access conditions. point := rb.build(accessConds[i], newTp[i], d.lengths[i], d.convertToSortKey) if rb.err != nil { diff --git a/pkg/util/ranger/types.go b/pkg/util/ranger/types.go index 5b2e0d9491870..a97c2956f0908 100644 --- a/pkg/util/ranger/types.go +++ b/pkg/util/ranger/types.go @@ -269,7 +269,7 @@ func (ran *Range) Encode(ec errctx.Context, loc *time.Location, lowBuffer, highB // e.g. If this range is (1 2 3, 1 2 +inf), then the return value is 2. func (ran *Range) PrefixEqualLen(tc types.Context) (int, error) { // Here, len(ran.LowVal) always equal to len(ran.HighVal) - for i := 0; i < len(ran.LowVal); i++ { + for i := range len(ran.LowVal) { cmp, err := ran.LowVal[i].Compare(tc, &ran.HighVal[i], ran.Collators[i]) if err != nil { return 0, errors.Trace(err) @@ -343,7 +343,7 @@ func compareLexicographically(tc types.Context, bound1, bound2 []types.Datum, co n2 := len(bound2) n := min(n1, n2) - for i := 0; i < n; i++ { + for i := range n { cmp, err := bound1[i].Compare(tc, &bound2[i], collators[i]) if err != nil { return 0, err @@ -400,7 +400,7 @@ func compareLexicographically(tc types.Context, bound1, bound2 []types.Datum, co // Check if a list of Datum is a prefix of another list of Datum. This is useful for checking if // lower/upper bound of a range is a subset of another. func prefix(tc types.Context, superValue []types.Datum, supValue []types.Datum, length int, collators []collate.Collator) bool { - for i := 0; i < length; i++ { + for i := range length { cmp, err := superValue[i].Compare(tc, &supValue[i], collators[i]) if (err != nil) || (cmp != 0) { return false @@ -435,7 +435,7 @@ func (rs Ranges) Subset(tc types.Context, superRanges Ranges) bool { return false } } - for i := 0; i < len(superRangesCovered); i++ { + for i := range superRangesCovered { if !superRangesCovered[i] { return false } @@ -449,7 +449,7 @@ func checkCollators(ran1 *Range, ran2 *Range, length int) bool { // The current code path for this function always will have same collation // for ran and superRange. It is added here for future // use of the function. - for i := 0; i < length; i++ { + for i := range length { if ran1.Collators[i] != ran2.Collators[i] { return false } diff --git a/pkg/util/regexpr-router/regexpr_router_test.go b/pkg/util/regexpr-router/regexpr_router_test.go index 905d3c3f66fff..7bf47790055d6 100644 --- a/pkg/util/regexpr-router/regexpr_router_test.go +++ b/pkg/util/regexpr-router/regexpr_router_test.go @@ -330,7 +330,7 @@ func TestAllRule(t *testing.T) { require.Equal(t, 1, len(schemaRules)) require.Equal(t, 3, len(tableRules)) require.Equal(t, rules[0].SchemaPattern, schemaRules[0].SchemaPattern) - for i := 0; i < 3; i++ { + for i := range 3 { require.Equal(t, rules[i+1].SchemaPattern, tableRules[i].SchemaPattern) require.Equal(t, rules[i+1].TablePattern, tableRules[i].TablePattern) } diff --git a/pkg/util/resourcegrouptag/resource_group_tag_test.go b/pkg/util/resourcegrouptag/resource_group_tag_test.go index cfcaa3cd1d58e..34ee181e9e4d7 100644 --- a/pkg/util/resourcegrouptag/resource_group_tag_test.go +++ b/pkg/util/resourcegrouptag/resource_group_tag_test.go @@ -187,7 +187,7 @@ func TestGetFirstKeyFromRequest(t *testing.T) { func genRandHex(length int) []byte { const chars = "0123456789abcdef" res := make([]byte, length) - for i := 0; i < length; i++ { + for i := range length { res[i] = chars[rand.Intn(len(chars))] } return res diff --git a/pkg/util/rowcodec/bench_test.go b/pkg/util/rowcodec/bench_test.go index cfc78e3434fcd..2e342134200ac 100644 --- a/pkg/util/rowcodec/bench_test.go +++ b/pkg/util/rowcodec/bench_test.go @@ -40,7 +40,7 @@ func BenchmarkChecksum(b *testing.B) { {&model.ColumnInfo{ID: 3, FieldType: *tp3}, &datums[2]}, } row := rowcodec.RowData{Cols: cols} - for i := 0; i < b.N; i++ { + for range b.N { _, err := row.Checksum(time.Local) if err != nil { b.Fatal(err) @@ -55,7 +55,7 @@ func BenchmarkEncode(b *testing.B) { var buf []byte colIDs := []int64{1, 2, 3} var err error - for i := 0; i < b.N; i++ { + for range b.N { buf = buf[:0] buf, err = xb.Encode(nil, colIDs, oldRow, nil, buf) if err != nil { @@ -73,7 +73,7 @@ func BenchmarkEncodeFromOldRow(b *testing.B) { } var xb rowcodec.Encoder var buf []byte - for i := 0; i < b.N; i++ { + for range b.N { buf, err = rowcodec.EncodeFromOldRow(&xb, nil, oldRowData, buf) if err != nil { b.Fatal(err) @@ -104,7 +104,7 @@ func BenchmarkDecode(b *testing.B) { } decoder := rowcodec.NewChunkDecoder(cols, []int64{-1}, nil, time.Local) chk := chunk.NewChunkWithCapacity(tps, 1) - for i := 0; i < b.N; i++ { + for range b.N { chk.Reset() err = decoder.DecodeToChunk(xRowData, kv.IntHandle(1), chk) if err != nil { diff --git a/pkg/util/rowcodec/encoder.go b/pkg/util/rowcodec/encoder.go index 6ba51fb625907..b7832192557ba 100644 --- a/pkg/util/rowcodec/encoder.go +++ b/pkg/util/rowcodec/encoder.go @@ -140,7 +140,7 @@ func (encoder *Encoder) reformatCols() (numCols, notNullIdx int) { func (encoder *Encoder) encodeRowCols(loc *time.Location, numCols, notNullIdx int) error { r := &encoder.row var errs error - for i := 0; i < notNullIdx; i++ { + for i := range notNullIdx { d := encoder.values[i] var err error r.data, err = encodeValueDatum(loc, d, r.data) @@ -150,7 +150,7 @@ func (encoder *Encoder) encodeRowCols(loc *time.Location, numCols, notNullIdx in // handle convert to large if len(r.data) > math.MaxUint16 && !r.large() { r.initColIDs32() - for j := 0; j < numCols; j++ { + for j := range numCols { r.colIDs32[j] = uint32(r.colIDs[j]) } r.initOffsets32() diff --git a/pkg/util/rowcodec/rowcodec_test.go b/pkg/util/rowcodec/rowcodec_test.go index e6c0e106eb1eb..8857491383f46 100644 --- a/pkg/util/rowcodec/rowcodec_test.go +++ b/pkg/util/rowcodec/rowcodec_test.go @@ -752,7 +752,7 @@ func TestVarintCompatibility(t *testing.T) { func TestCodecUtil(t *testing.T) { colIDs := []int64{1, 2, 3, 4} tps := make([]*types.FieldType, 4) - for i := 0; i < 3; i++ { + for i := range 3 { tps[i] = types.NewFieldType(mysql.TypeLonglong) } tps[3] = types.NewFieldType(mysql.TypeNull) @@ -802,7 +802,7 @@ func TestCodecUtil(t *testing.T) { func TestOldRowCodec(t *testing.T) { colIDs := []int64{1, 2, 3, 4} tps := make([]*types.FieldType, 4) - for i := 0; i < 3; i++ { + for i := range 3 { tps[i] = types.NewFieldType(mysql.TypeLonglong) } tps[3] = types.NewFieldType(mysql.TypeNull) @@ -829,7 +829,7 @@ func TestOldRowCodec(t *testing.T) { err = rd.DecodeToChunk(newRow, kv.IntHandle(-1), chk) require.NoError(t, err) row := chk.GetRow(0) - for i := 0; i < 3; i++ { + for i := range 3 { require.Equal(t, int64(i+1), row.GetInt64(i)) } } diff --git a/pkg/util/schemacmp/lattice.go b/pkg/util/schemacmp/lattice.go index 89d9b341a6e75..9f6a37850c9b2 100644 --- a/pkg/util/schemacmp/lattice.go +++ b/pkg/util/schemacmp/lattice.go @@ -638,7 +638,7 @@ func (a StringList) Compare(other Lattice) (int, error) { if minLen > len(b) { minLen = len(b) } - for i := 0; i < minLen; i++ { + for i := range minLen { if a[i] != b[i] { return 0, &IncompatibleError{ Msg: ErrMsgStringListElemMismatch, diff --git a/pkg/util/selection/selection_test.go b/pkg/util/selection/selection_test.go index 779c8b05677f6..78fbf41fea539 100644 --- a/pkg/util/selection/selection_test.go +++ b/pkg/util/selection/selection_test.go @@ -83,7 +83,7 @@ func TestSelectionWithSerialCase(t *testing.T) { func randomTestCase(size int) testSlice { data := make(testSlice, 0, size) - for i := 0; i < size; i++ { + for range size { data = append(data, rand.Int()%100) } return data @@ -91,7 +91,7 @@ func randomTestCase(size int) testSlice { func serialTestCase(size int) testSlice { data := make(testSlice, 0, size) - for i := 0; i < size; i++ { + for i := range size { data = append(data, i) } return data diff --git a/pkg/util/servermemorylimit/servermemorylimit.go b/pkg/util/servermemorylimit/servermemorylimit.go index 47022d23c6635..53b01d0d7a876 100644 --- a/pkg/util/servermemorylimit/servermemorylimit.go +++ b/pkg/util/servermemorylimit/servermemorylimit.go @@ -248,7 +248,7 @@ func (m *memoryOpsHistoryManager) GetRows() [][]types.Datum { }) } var zeroTime = time.Time{} - for i := 0; i < len(m.infos); i++ { + for i := range m.infos { pos := (m.offsets + i) % len(m.infos) info := m.infos[pos] if info.killTime.Equal(zeroTime) { diff --git a/pkg/util/servermemorylimit/servermemorylimit_test.go b/pkg/util/servermemorylimit/servermemorylimit_test.go index 841af29702b7f..faed46f767384 100644 --- a/pkg/util/servermemorylimit/servermemorylimit_test.go +++ b/pkg/util/servermemorylimit/servermemorylimit_test.go @@ -35,7 +35,7 @@ func TestMemoryUsageOpsHistory(t *testing.T) { info.Info = strconv.Itoa(6 * i) } - for i := 0; i < 3; i++ { + for i := range 3 { genInfo(i) GlobalMemoryOpsHistoryManager.recordOne(&info, time.Now(), uint64(i), uint64(2*i)) } @@ -54,7 +54,7 @@ func TestMemoryUsageOpsHistory(t *testing.T) { rows := GlobalMemoryOpsHistoryManager.GetRows() require.Equal(t, 3, len(rows)) - for i := 0; i < 3; i++ { + for i := range 3 { checkResult(rows[i], i) } // Test evict diff --git a/pkg/util/set/mem_aware_map_test.go b/pkg/util/set/mem_aware_map_test.go index 959e52830985a..fad3e75258ac9 100644 --- a/pkg/util/set/mem_aware_map_test.go +++ b/pkg/util/set/mem_aware_map_test.go @@ -33,10 +33,10 @@ var inputs = []struct { func memAwareIntMap(size int) int { var x int m := NewMemAwareMap[int, int]() - for j := 0; j < size; j++ { + for j := range size { m.Set(j, j) } - for j := 0; j < size; j++ { + for j := range size { x, _ = m.Get(j) } return x @@ -45,11 +45,11 @@ func memAwareIntMap(size int) int { func nativeIntMap(size int) int { var x int m := make(map[int]int) - for j := 0; j < size; j++ { + for j := range size { m[j] = j } - for j := 0; j < size; j++ { + for j := range size { x = m[j] } return x @@ -59,7 +59,7 @@ func BenchmarkMemAwareIntMap(b *testing.B) { for _, s := range inputs { b.Run("MemAwareIntMap_"+strconv.Itoa(s.input), func(b *testing.B) { var x int - for i := 0; i < b.N; i++ { + for range b.N { x = memAwareIntMap(s.input) } result = x @@ -71,7 +71,7 @@ func BenchmarkNativeIntMap(b *testing.B) { for _, s := range inputs { b.Run("NativeIntMap_"+strconv.Itoa(s.input), func(b *testing.B) { var x int - for i := 0; i < b.N; i++ { + for range b.N { x = nativeIntMap(s.input) } result = x diff --git a/pkg/util/set/set_with_memory_usage_test.go b/pkg/util/set/set_with_memory_usage_test.go index 6156e6a99f1c3..becfef39d9e55 100644 --- a/pkg/util/set/set_with_memory_usage_test.go +++ b/pkg/util/set/set_with_memory_usage_test.go @@ -39,9 +39,9 @@ func BenchmarkFloat64SetMemoryUsage(b *testing.B) { for _, c := range cases { b.Run(fmt.Sprintf("MapRows %v", c.rowNum), func(b *testing.B) { b.ReportAllocs() - for i := 0; i < b.N; i++ { + for range b.N { float64Set, _ := NewFloat64SetWithMemoryUsage() - for num := 0; num < c.rowNum; num++ { + for num := range c.rowNum { float64Set.Insert(float64(num)) } } @@ -68,9 +68,9 @@ func BenchmarkInt64SetMemoryUsage(b *testing.B) { for _, c := range cases { b.Run(fmt.Sprintf("MapRows %v", c.rowNum), func(b *testing.B) { b.ReportAllocs() - for i := 0; i < b.N; i++ { + for range b.N { int64Set, _ := NewInt64SetWithMemoryUsage() - for num := 0; num < c.rowNum; num++ { + for num := range c.rowNum { int64Set.Insert(int64(num)) } } @@ -97,9 +97,9 @@ func BenchmarkStringSetMemoryUsage(b *testing.B) { for _, c := range cases { b.Run(fmt.Sprintf("MapRows %v", c.rowNum), func(b *testing.B) { b.ReportAllocs() - for i := 0; i < b.N; i++ { + for range b.N { stringSet, _ := NewStringSetWithMemoryUsage() - for num := 0; num < c.rowNum; num++ { + for num := range c.rowNum { stringSet.Insert(strconv.Itoa(num)) } } diff --git a/pkg/util/slice/slice.go b/pkg/util/slice/slice.go index 7a21a6bd698fc..45f6a085d016e 100644 --- a/pkg/util/slice/slice.go +++ b/pkg/util/slice/slice.go @@ -19,7 +19,7 @@ import "reflect" // AnyOf returns true if any element in the slice matches the predict func. func AnyOf(s any, p func(int) bool) bool { l := reflect.ValueOf(s).Len() - for i := 0; i < l; i++ { + for i := range l { if p(i) { return true } diff --git a/pkg/util/sqlescape/utils_test.go b/pkg/util/sqlescape/utils_test.go index 9c2789a206653..f2b48a0e16833 100644 --- a/pkg/util/sqlescape/utils_test.go +++ b/pkg/util/sqlescape/utils_test.go @@ -464,25 +464,25 @@ func TestEscapeString(t *testing.T) { } func BenchmarkEscapeString(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { escapeSQL("select %?", "3") } } func BenchmarkUnderlyingString(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { escapeSQL("select %?", myStr("3")) } } func BenchmarkEscapeInt(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { escapeSQL("select %?", 3) } } func BenchmarkUnderlyingInt(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { escapeSQL("select %?", myInt(3)) } } diff --git a/pkg/util/stmtsummary/evicted_test.go b/pkg/util/stmtsummary/evicted_test.go index d5f18c7c61dd5..45ea79d1e5312 100644 --- a/pkg/util/stmtsummary/evicted_test.go +++ b/pkg/util/stmtsummary/evicted_test.go @@ -102,7 +102,7 @@ func TestMapToEvictedCountDatum(t *testing.T) { ssMap.beginTimeForCurInterval = now + interval // insert one statement per interval. - for i := 0; i < 50; i++ { + for range 50 { ssMap.AddStatement(generateAnyExecInfo()) ssMap.beginTimeForCurInterval += interval * 2 } @@ -261,7 +261,7 @@ func TestEvictedCountDetailed(t *testing.T) { require.NoError(t, err) // test stmtSummaryByDigest's history length - for i := 0; i < 100; i++ { + for i := range 100 { if i == 0 { require.Equal(t, 0, ssMap.summaryMap.Size()) } else { diff --git a/pkg/util/stmtsummary/statement_summary_test.go b/pkg/util/stmtsummary/statement_summary_test.go index 462c82f4c43df..60272545c68a5 100644 --- a/pkg/util/stmtsummary/statement_summary_test.go +++ b/pkg/util/stmtsummary/statement_summary_test.go @@ -936,7 +936,7 @@ func TestAddStatementParallel(t *testing.T) { stmtExecInfo1 := generateAnyExecInfo() // Add 32 times with different digest. - for i := 0; i < loops; i++ { + for i := range loops { stmtExecInfo1.Digest = fmt.Sprintf("digest%d", i) ssMap.AddStatement(stmtExecInfo1) } @@ -946,7 +946,7 @@ func TestAddStatementParallel(t *testing.T) { require.Len(t, datums, loops) } - for i := 0; i < threads; i++ { + for range threads { go addStmtFunc() } wg.Wait() @@ -975,7 +975,7 @@ func TestMaxStmtCount(t *testing.T) { // 100 digests stmtExecInfo1 := generateAnyExecInfo() loops := 100 - for i := 0; i < loops; i++ { + for i := range loops { stmtExecInfo1.Digest = fmt.Sprintf("digest%d", i) ssMap.AddStatement(stmtExecInfo1) } @@ -998,7 +998,7 @@ func TestMaxStmtCount(t *testing.T) { // Change to a bigger value. require.Nil(t, ssMap.SetMaxStmtCount(50)) - for i := 0; i < loops; i++ { + for i := range loops { stmtExecInfo1.Digest = fmt.Sprintf("digest%d", i) ssMap.AddStatement(stmtExecInfo1) } @@ -1006,7 +1006,7 @@ func TestMaxStmtCount(t *testing.T) { // Change to a smaller value. require.Nil(t, ssMap.SetMaxStmtCount(10)) - for i := 0; i < loops; i++ { + for i := range loops { stmtExecInfo1.Digest = fmt.Sprintf("digest%d", i) ssMap.AddStatement(stmtExecInfo1) } @@ -1072,12 +1072,12 @@ func TestSetMaxStmtCountParallel(t *testing.T) { stmtExecInfo1 := generateAnyExecInfo() // Add 32 times with different digest. - for i := 0; i < loops; i++ { + for i := range loops { stmtExecInfo1.Digest = fmt.Sprintf("digest%d", i) ssMap.AddStatement(stmtExecInfo1) } } - for i := 0; i < threads; i++ { + for range threads { wg.Run(addStmtFunc) } @@ -1176,7 +1176,7 @@ func TestEnableSummaryParallel(t *testing.T) { stmtExecInfo1 := generateAnyExecInfo() // Add 32 times with same digest. - for i := 0; i < loops; i++ { + for i := range loops { // Sometimes enable it and sometimes disable it. err := ssMap.SetEnabled(i%2 == 0) require.NoError(t, err) @@ -1188,7 +1188,7 @@ func TestEnableSummaryParallel(t *testing.T) { require.NoError(t, err) } - for i := 0; i < threads; i++ { + for range threads { go addStmtFunc() } // Ensure that there's no deadlocks. @@ -1299,7 +1299,7 @@ func TestSummaryHistory(t *testing.T) { planDigest: stmtExecInfo1.PlanDigest, resourceGroupName: stmtExecInfo1.ResourceGroupName, } - for i := 0; i < 11; i++ { + for i := range 11 { ssMap.beginTimeForCurInterval = now + int64(i+1)*10 ssMap.AddStatement(stmtExecInfo1) require.Equal(t, 1, ssMap.summaryMap.Size()) @@ -1337,7 +1337,7 @@ func TestSummaryHistory(t *testing.T) { require.NoError(t, err) }() // insert first digest - for i := 0; i < 6; i++ { + for i := range 6 { ssMap.beginTimeForCurInterval = now + int64(i)*10 ssMap.AddStatement(stmtExecInfo1) require.Equal(t, 1, ssMap.summaryMap.Size()) @@ -1472,7 +1472,7 @@ func TestAccessPrivilege(t *testing.T) { loops := 32 stmtExecInfo1 := generateAnyExecInfo() - for i := 0; i < loops; i++ { + for i := range loops { stmtExecInfo1.Digest = fmt.Sprintf("digest%d", i) ssMap.AddStatement(stmtExecInfo1) } diff --git a/pkg/util/stmtsummary/v2/reader.go b/pkg/util/stmtsummary/v2/reader.go index c33b5a3327a44..498c9c7437238 100644 --- a/pkg/util/stmtsummary/v2/reader.go +++ b/pkg/util/stmtsummary/v2/reader.go @@ -330,7 +330,7 @@ func (r *HistoryReader) scheduleTasks( waitParseAllDone := parseWg.Wait // Half of workers are scheduled to scan files and then parse lines. - for i := 0; i < concurrent/2; i++ { + for range concurrent / 2 { go func() { scanWorker.run(filesCh, linesCh, innerErrCh) scanDone() diff --git a/pkg/util/stmtsummary/v2/tests/table_test.go b/pkg/util/stmtsummary/v2/tests/table_test.go index 788c1c19aee07..8c0e8446fba20 100644 --- a/pkg/util/stmtsummary/v2/tests/table_test.go +++ b/pkg/util/stmtsummary/v2/tests/table_test.go @@ -657,7 +657,7 @@ func TestPlanCacheUnqualified(t *testing.T) { "select * from `t1` where `t1` . `a` > ( select ? from `t2` where `t2` . `b` < ? ) 3 3 query has uncorrelated sub-queries is un-cacheable", "select database ( ) from `t1` 2 2 query has 'database' is un-cacheable")) - for i := 0; i < 100; i++ { + for range 100 { tk.MustExec(`execute st3`) tk.MustExec(`execute st4`) } @@ -669,7 +669,7 @@ func TestPlanCacheUnqualified(t *testing.T) { "select database ( ) from `t1` 102 102 query has 'database' is un-cacheable")) tk.MustExec(`set @x2=123`) - for i := 0; i < 20; i++ { + for range 20 { tk.MustExec(`execute st1 using @x1`) tk.MustExec(`execute st1 using @x2`) } diff --git a/pkg/util/stringutil/string_util.go b/pkg/util/stringutil/string_util.go index 91ca12d60e00b..6bd2d08d819f2 100644 --- a/pkg/util/stringutil/string_util.go +++ b/pkg/util/stringutil/string_util.go @@ -260,7 +260,7 @@ func matchRune(a, b rune) bool { func CompileLike2Regexp(str string) string { patChars, patTypes := CompilePattern(str, '\\') var result []rune - for i := 0; i < len(patChars); i++ { + for i := range patChars { switch patTypes[i] { case PatMatch: result = append(result, patChars[i]) @@ -496,7 +496,7 @@ func IsLowerASCII(c byte) bool { // LowerOneString lowers the ascii characters in a string func LowerOneString(str []byte) { strLen := len(str) - for i := 0; i < strLen; i++ { + for i := range strLen { if IsUpperASCII(str[i]) { str[i] = toLowerIfAlphaASCII(str[i]) } diff --git a/pkg/util/stringutil/string_util_test.go b/pkg/util/stringutil/string_util_test.go index 4b5b429dfec07..2c6fc10095343 100644 --- a/pkg/util/stringutil/string_util_test.go +++ b/pkg/util/stringutil/string_util_test.go @@ -233,7 +233,7 @@ func BenchmarkDoMatch(b *testing.B) { b.Run(v.pattern, func(b *testing.B) { patChars, patTypes := CompilePattern(v.pattern, escape) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { match := DoMatch(v.target, patChars, patTypes) if !match { b.Fatal("Match expected.") @@ -256,7 +256,7 @@ func BenchmarkDoMatchNegative(b *testing.B) { b.Run(v.pattern, func(b *testing.B) { patChars, patTypes := CompilePattern(v.pattern, escape) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { match := DoMatch(v.target, patChars, patTypes) if match { b.Fatal("Unmatch expected.") diff --git a/pkg/util/table-rule-selector/selector_test.go b/pkg/util/table-rule-selector/selector_test.go index f1bcf64f26b6e..7c13072087059 100644 --- a/pkg/util/table-rule-selector/selector_test.go +++ b/pkg/util/table-rule-selector/selector_test.go @@ -208,7 +208,7 @@ func testMatch(t *testing.T, s Selector) { for _, mc := range ts.matchCase { rules := s.Match(mc.schema, mc.table) expectedRules := make(RuleSet, 0, mc.matchedNum) - for i := 0; i < mc.matchedNum; i++ { + for i := range mc.matchedNum { rule := &dummyRule{quoteSchemaTable(mc.matchedRules[2*i], mc.matchedRules[2*i+1])} expectedRules = append(expectedRules, rule) } diff --git a/pkg/util/tokenlimiter.go b/pkg/util/tokenlimiter.go index 2e7924638a886..d82737de9860a 100644 --- a/pkg/util/tokenlimiter.go +++ b/pkg/util/tokenlimiter.go @@ -37,7 +37,7 @@ func (tl *TokenLimiter) Get() *Token { // NewTokenLimiter creates a TokenLimiter with count tokens. func NewTokenLimiter(count uint) *TokenLimiter { tl := &TokenLimiter{count: count, ch: make(chan *Token, count)} - for i := uint(0); i < count; i++ { + for range count { tl.ch <- &Token{} } diff --git a/pkg/util/topsql/collector/main_test.go b/pkg/util/topsql/collector/main_test.go index d2f1555d7be50..54a6efe36a725 100644 --- a/pkg/util/topsql/collector/main_test.go +++ b/pkg/util/topsql/collector/main_test.go @@ -70,7 +70,7 @@ func TestPProfCPUProfile(t *testing.T) { dataChLen := len(mc.dataCh) deltaLen := 0 topsqlstate.EnableTopSQL() - for i := 0; i < 10; i++ { + for range 10 { t1 := time.Now() data = <-mc.dataCh require.True(t, time.Since(t1) < interval*4) diff --git a/pkg/util/topsql/reporter/datamodel.go b/pkg/util/topsql/reporter/datamodel.go index 7b0b71b691848..9aef688198fc4 100644 --- a/pkg/util/topsql/reporter/datamodel.go +++ b/pkg/util/topsql/reporter/datamodel.go @@ -114,7 +114,7 @@ func (ts tsItems) Swap(i, j int) { } func (ts tsItems) sorted() bool { - for n := 0; n < len(ts)-1; n++ { + for n := range len(ts) - 1 { if ts[n].timestamp > ts[n+1].timestamp { return false } diff --git a/pkg/util/topsql/reporter/reporter_test.go b/pkg/util/topsql/reporter/reporter_test.go index 19cd6b3fa91ee..23708b196e3f1 100644 --- a/pkg/util/topsql/reporter/reporter_test.go +++ b/pkg/util/topsql/reporter/reporter_test.go @@ -311,14 +311,14 @@ func TestCollectAndTopN(t *testing.T) { func TestCollectCapacity(t *testing.T) { tsr, _ := setupRemoteTopSQLReporter(maxSQLNum, 60) registerSQL := func(n int) { - for i := 0; i < n; i++ { + for i := range n { key := []byte("sqlDigest" + strconv.Itoa(i)) value := "sqlNormalized" + strconv.Itoa(i) tsr.RegisterSQL(key, value, false) } } registerPlan := func(n int) { - for i := 0; i < n; i++ { + for i := range n { key := []byte("planDigest" + strconv.Itoa(i)) value := "planNormalized" + strconv.Itoa(i) tsr.RegisterPlan(key, value, false) @@ -326,7 +326,7 @@ func TestCollectCapacity(t *testing.T) { } genRecord := func(n int) []collector.SQLCPUTimeRecord { records := make([]collector.SQLCPUTimeRecord, 0, n) - for i := 0; i < n; i++ { + for i := range n { records = append(records, collector.SQLCPUTimeRecord{ SQLDigest: []byte("sqlDigest" + strconv.Itoa(i+1)), PlanDigest: []byte("planDigest" + strconv.Itoa(i+1)), @@ -397,8 +397,8 @@ func TestMultipleDataSinks(t *testing.T) { tsr := NewRemoteTopSQLReporter(mockPlanBinaryDecoderFunc, mockPlanBinaryCompressFunc) - var chs []chan *ReportData - for i := 0; i < 7; i++ { + chs := make([]chan *ReportData, 0, 7) + for range 7 { chs = append(chs, make(chan *ReportData, 1)) } dss := make([]DataSink, 0, len(chs)) @@ -528,7 +528,7 @@ func initializeCache(maxStatementsNum, interval int) (*RemoteTopSQLReporter, *mo func BenchmarkTopSQL_CollectAndIncrementFrequency(b *testing.B) { tsr, _ := initializeCache(maxSQLNum, 120) - for i := 0; i < b.N; i++ { + for i := range b.N { populateCache(tsr, 0, maxSQLNum, uint64(i)) } } @@ -537,7 +537,7 @@ func BenchmarkTopSQL_CollectAndEvict(b *testing.B) { tsr, _ := initializeCache(maxSQLNum, 120) begin := 0 end := maxSQLNum - for i := 0; i < b.N; i++ { + for i := range b.N { begin += maxSQLNum end += maxSQLNum populateCache(tsr, begin, end, uint64(i)) diff --git a/pkg/util/topsql/stmtstats/aggregator_test.go b/pkg/util/topsql/stmtstats/aggregator_test.go index c0f236b7c00b5..987deac63b329 100644 --- a/pkg/util/topsql/stmtstats/aggregator_test.go +++ b/pkg/util/topsql/stmtstats/aggregator_test.go @@ -26,7 +26,7 @@ import ( ) func Test_SetupCloseAggregator(t *testing.T) { - for n := 0; n < 3; n++ { + for range 3 { SetupAggregator() time.Sleep(100 * time.Millisecond) assert.False(t, globalAggregator.closed()) @@ -80,7 +80,7 @@ func Test_aggregator_run_close(t *testing.T) { assert.True(t, a.closed()) // randomly start and close - for i := 0; i < 100; i++ { + for range 100 { if rand.Intn(2) == 0 { a.start() } else { diff --git a/pkg/util/topsql/stmtstats/kv_exec_count_test.go b/pkg/util/topsql/stmtstats/kv_exec_count_test.go index 1d42ee74093ed..97d7d94942a01 100644 --- a/pkg/util/topsql/stmtstats/kv_exec_count_test.go +++ b/pkg/util/topsql/stmtstats/kv_exec_count_test.go @@ -27,12 +27,12 @@ func TestKvExecCounter(t *testing.T) { stats := CreateStatementStats() counter := stats.CreateKvExecCounter([]byte("SQL-1"), []byte("")) interceptor := counter.RPCInterceptor() - for n := 0; n < 10; n++ { + for range 10 { _, _ = interceptor.Wrap(func(target string, req *tikvrpc.Request) (*tikvrpc.Response, error) { return nil, nil })("TIKV-1", nil) } - for n := 0; n < 10; n++ { + for range 10 { _, _ = interceptor.Wrap(func(target string, req *tikvrpc.Request) (*tikvrpc.Response, error) { return nil, nil })("TIKV-2", nil) diff --git a/pkg/util/topsql/stmtstats/stmtstats_test.go b/pkg/util/topsql/stmtstats/stmtstats_test.go index f1e1b198dfbd6..7e3dd377d8a0a 100644 --- a/pkg/util/topsql/stmtstats/stmtstats_test.go +++ b/pkg/util/topsql/stmtstats/stmtstats_test.go @@ -179,14 +179,14 @@ func TestExecCounter_AddExecCount_Take(t *testing.T) { stats := CreateStatementStats() m := stats.Take() assert.Len(t, m, 0) - for n := 0; n < 1; n++ { + for range 1 { stats.OnExecutionBegin([]byte("SQL-1"), []byte("")) } - for n := 0; n < 2; n++ { + for range 2 { stats.OnExecutionBegin([]byte("SQL-2"), []byte("")) stats.OnExecutionFinished([]byte("SQL-2"), []byte(""), time.Second) } - for n := 0; n < 3; n++ { + for range 3 { stats.OnExecutionBegin([]byte("SQL-3"), []byte("")) stats.OnExecutionFinished([]byte("SQL-3"), []byte(""), time.Millisecond) } diff --git a/pkg/util/topsql/topsql.go b/pkg/util/topsql/topsql.go index 09d11fdadef9a..91c0d15883943 100644 --- a/pkg/util/topsql/topsql.go +++ b/pkg/util/topsql/topsql.go @@ -175,7 +175,7 @@ func MockHighCPULoad(sql string, sqlPrefixs []string, load int64) bool { if time.Since(start) > 12*time.Millisecond*time.Duration(load) { break } - for i := 0; i < 10e5; i++ { + for range int(10e5) { continue } } diff --git a/pkg/util/topsql/topsql_test.go b/pkg/util/topsql/topsql_test.go index 82588b31268cf..831edc52a7e57 100644 --- a/pkg/util/topsql/topsql_test.go +++ b/pkg/util/topsql/topsql_test.go @@ -149,7 +149,7 @@ func TestTopSQLReporter(t *testing.T) { }) } checkSQLPlanMap := map[string]struct{}{} - for retry := 0; retry < 5; retry++ { + for range 5 { server.WaitCollectCnt(recordsCnt, 1, time.Second*5) records := server.GetLatestRecords() for _, req := range records { @@ -397,7 +397,7 @@ func mockExecuteSQL(sql, plan string) { func mockExecute(d time.Duration) { start := time.Now() for { - for i := 0; i < 10e5; i++ { + for range int(10e5) { } if time.Since(start) > d { return diff --git a/pkg/util/tracing/noop_bench_test.go b/pkg/util/tracing/noop_bench_test.go index 4fcc8d28d1be0..7c111d54f00b2 100644 --- a/pkg/util/tracing/noop_bench_test.go +++ b/pkg/util/tracing/noop_bench_test.go @@ -23,7 +23,7 @@ import ( // BenchmarkNoopLogKV benchs the cost of noop's `LogKV`. func BenchmarkNoopLogKV(b *testing.B) { sp := noopSpan() - for i := 0; i < b.N; i++ { + for range b.N { sp.LogKV("event", "noop is finished") } } @@ -32,7 +32,7 @@ func BenchmarkNoopLogKV(b *testing.B) { // used with `fmt.Sprintf` func BenchmarkNoopLogKVWithF(b *testing.B) { sp := noopSpan() - for i := 0; i < b.N; i++ { + for range b.N { sp.LogKV("event", fmt.Sprintf("this is format %s", "noop is finished")) } } @@ -40,7 +40,7 @@ func BenchmarkNoopLogKVWithF(b *testing.B) { // BenchmarkSpanFromContext benchs the cost of `SpanFromContext`. func BenchmarkSpanFromContext(b *testing.B) { ctx := context.TODO() - for i := 0; i < b.N; i++ { + for range b.N { SpanFromContext(ctx) } } @@ -48,7 +48,7 @@ func BenchmarkSpanFromContext(b *testing.B) { // BenchmarkChildFromContext benchs the cost of `ChildSpanFromContxt`. func BenchmarkChildFromContext(b *testing.B) { ctx := context.TODO() - for i := 0; i < b.N; i++ { + for range b.N { ChildSpanFromContxt(ctx, "child") } } diff --git a/pkg/util/util.go b/pkg/util/util.go index ea85c64be4dc9..ded6218542d59 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -179,7 +179,7 @@ func PrintableASCII(b byte) bool { func FmtNonASCIIPrintableCharToHex(str string) string { var b bytes.Buffer b.Grow(len(str) * 2) - for i := 0; i < len(str); i++ { + for i := range len(str) { if PrintableASCII(str[i]) { b.WriteByte(str[i]) continue @@ -255,7 +255,7 @@ func ReadLine(reader *bufio.Reader, maxLineSize int) ([]byte, error) { // maxLineSize specifies the maximum size of a single line. func ReadLines(reader *bufio.Reader, count int, maxLineSize int) ([][]byte, error) { lines := make([][]byte, 0, count) - for i := 0; i < count; i++ { + for range count { line, err := ReadLine(reader, maxLineSize) if err == io.EOF && len(lines) > 0 { return lines, nil diff --git a/pkg/util/wait_group_wrapper_test.go b/pkg/util/wait_group_wrapper_test.go index 1e0fabc06d3b0..561fe7e6d0e93 100644 --- a/pkg/util/wait_group_wrapper_test.go +++ b/pkg/util/wait_group_wrapper_test.go @@ -30,7 +30,7 @@ func TestWaitGroupWrapperRun(t *testing.T) { var expect int32 = 4 var val atomic.Int32 var wg WaitGroupWrapper - for i := int32(0); i < expect; i++ { + for range expect { wg.Run(func() { val.Inc() }) @@ -40,7 +40,7 @@ func TestWaitGroupWrapperRun(t *testing.T) { val.Store(0) wg2 := NewWaitGroupEnhancedWrapper("", nil, false) - for i := int32(0); i < expect; i++ { + for i := range expect { wg2.Run(func() { val.Inc() }, fmt.Sprintf("test_%v", i)) @@ -53,7 +53,7 @@ func TestWaitGroupWrapperRunWithRecover(t *testing.T) { var expect int32 = 2 var val atomic.Int32 var wg WaitGroupWrapper - for i := int32(0); i < expect; i++ { + for range expect { wg.RunWithRecover(func() { panic("test1") }, func(r any) { @@ -65,7 +65,7 @@ func TestWaitGroupWrapperRunWithRecover(t *testing.T) { val.Store(0) wg2 := NewWaitGroupEnhancedWrapper("", nil, false) - for i := int32(0); i < expect; i++ { + for i := range expect { wg2.RunWithRecover(func() { panic("test1") }, func(r any) { diff --git a/pkg/util/worker_pool.go b/pkg/util/worker_pool.go index 3f74844ad34da..4ff46ea1a5f63 100644 --- a/pkg/util/worker_pool.go +++ b/pkg/util/worker_pool.go @@ -35,7 +35,7 @@ type Worker struct { // NewWorkerPool returns a WorkPool. func NewWorkerPool(limit uint, name string) *WorkerPool { workers := make(chan *Worker, limit) - for i := uint(0); i < limit; i++ { + for i := range limit { workers <- &Worker{ID: uint64(i + 1)} } return &WorkerPool{ diff --git a/pkg/util/zeropool/pool_test.go b/pkg/util/zeropool/pool_test.go index 5a287dfae43c5..94f367dd82cc1 100644 --- a/pkg/util/zeropool/pool_test.go +++ b/pkg/util/zeropool/pool_test.go @@ -46,12 +46,12 @@ func TestPool(t *testing.T) { t.Run("is not racy", func(t *testing.T) { pool := zeropool.New(func() []byte { return make([]byte, 1024) }) - const iterations = 1e6 + const iterations int = 1e6 const concurrency = math.MaxUint8 var counter atomic.Int64 do := make(chan struct{}, 1e6) - for i := 0; i < iterations; i++ { + for range iterations { do <- struct{}{} } close(do) @@ -59,7 +59,7 @@ func TestPool(t *testing.T) { run := make(chan struct{}) done := sync.WaitGroup{} done.Add(concurrency) - for i := 0; i < concurrency; i++ { + for i := range concurrency { go func(worker int) { <-run for range do { @@ -117,7 +117,7 @@ func BenchmarkZeropoolPool(b *testing.B) { pool.Put(item) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { item := pool.Get() pool.Put(item) } @@ -134,7 +134,7 @@ func BenchmarkSyncPoolValue(b *testing.B) { pool.Put(item) //nolint:staticcheck // This allocates. b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { item := pool.Get().([]byte) pool.Put(item) //nolint:staticcheck // This allocates. } @@ -152,7 +152,7 @@ func BenchmarkSyncPoolNewPointer(b *testing.B) { pool.Put(item) //nolint:staticcheck // This allocates. b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { item := pool.Get().(*[]byte) buf := *item pool.Put(&buf) //nolint:staticcheck // New pointer. @@ -171,7 +171,7 @@ func BenchmarkSyncPoolPointer(b *testing.B) { pool.Put(item) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { item := pool.Get().(*[]byte) pool.Put(item) } diff --git a/tools/check/ut.go b/tools/check/ut.go index 295ab72be0302..8292c51c4e3c6 100644 --- a/tools/check/ut.go +++ b/tools/check/ut.go @@ -302,7 +302,7 @@ func cmdRun(args ...string) bool { taskCh := make(chan task, 100) works := make([]numa, p) var wg sync.WaitGroup - for i := 0; i < p; i++ { + for range p { wg.Add(1) go works[i].worker(&wg, taskCh) } @@ -427,7 +427,7 @@ func handleFlags(flag string) string { func handleFlag(f string) (found bool) { tmp := os.Args[:0] - for i := 0; i < len(os.Args); i++ { + for range len(os.Args) { if os.Args[i] == f { found = true continue @@ -757,7 +757,7 @@ func (n *numa) runTestCase(pkg string, fn string) testResult { var buf bytes.Buffer var err error var start time.Time - for i := 0; i < 3; i++ { + for range 3 { cmd := n.testCommand(pkg, fn) cmd.Dir = filepath.Join(workDir, pkg) // Combine the test case output, so the run result for failed cases can be displayed. @@ -1009,7 +1009,7 @@ func filter(input []string, f func(string) bool) []string { } func shuffle(tasks []task) { - for i := 0; i < len(tasks); i++ { + for range len(tasks) { pos := rand.Intn(len(tasks)) tasks[i], tasks[pos] = tasks[pos], tasks[i] }