Skip to content

Commit

Permalink
*: enable intrange linter (pingcap#56674)
Browse files Browse the repository at this point in the history
  • Loading branch information
hawkingrei authored Oct 28, 2024
1 parent c550aae commit 6a85c6b
Show file tree
Hide file tree
Showing 118 changed files with 599 additions and 541 deletions.
1 change: 1 addition & 0 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ linters:
- revive
- lll
- gofmt
- intrange

linters-settings:
staticcheck:
Expand Down
1 change: 1 addition & 0 deletions build/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,7 @@ nogo(
"//build/linter/gci",
"//build/linter/gosec",
"//build/linter/ineffassign",
"//build/linter/intrange",
"//build/linter/makezero",
"//build/linter/mirror",
"//build/linter/misspell",
Expand Down
12 changes: 12 additions & 0 deletions build/linter/intrange/BUILD.bazel
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")

go_library(
name = "intrange",
srcs = ["analyzer.go"],
importpath = "github.com/pingcap/tidb/build/linter/intrange",
visibility = ["//visibility:public"],
deps = [
"//build/linter/util",
"@com_github_ckaznocha_intrange//:intrange",
],
)
28 changes: 28 additions & 0 deletions build/linter/intrange/analyzer.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
// Copyright 2024 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package intrange

import (
"github.com/ckaznocha/intrange"
"github.com/pingcap/tidb/build/linter/util"
)

// Analyzer is the analyzer struct of ineffassign.
var Analyzer = intrange.Analyzer

func init() {
util.SkipAnalyzerByConfig(Analyzer)
util.SkipAnalyzer(Analyzer)
}
12 changes: 12 additions & 0 deletions build/nogo_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -346,6 +346,18 @@
"/cgo/": "no need to vet cgo code"
}
},
"intrange": {
"exclude_files": {
"pkg/parser/parser.go": "parser/parser.go code",
"external/": "no need to vet third party code",
".*_generated\\.go$": "ignore generated code",
".*mock.go$": "ignore generated code",
"/cgo/": "no need to vet cgo code"
},
"only_files": {
"pkg/util/": "util code"
}
},
"inspect": {
"exclude_files": {
"pkg/parser/parser.go": "parser/parser.go code",
Expand Down
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ require (
github.com/charithe/durationcheck v0.0.10
github.com/cheggaaa/pb/v3 v3.0.8
github.com/cheynewallace/tabby v1.1.1
github.com/ckaznocha/intrange v0.2.1
github.com/cloudfoundry/gosigar v1.3.6
github.com/cockroachdb/pebble v1.1.0
github.com/coocood/freecache v1.2.1
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,8 @@ github.com/cheynewallace/tabby v1.1.1/go.mod h1:Pba/6cUL8uYqvOc9RkyvFbHGrQ9wShyr
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/ckaznocha/intrange v0.2.1 h1:M07spnNEQoALOJhwrImSrJLaxwuiQK+hA2DeajBlwYk=
github.com/ckaznocha/intrange v0.2.1/go.mod h1:7NEhVyf8fzZO5Ds7CRaqPEm52Ut83hsTiL5zbER/HYk=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudfoundry/gosigar v1.3.6 h1:gIc08FbB3QPb+nAQhINIK/qhf5REKkY0FTGgRGXkcVc=
github.com/cloudfoundry/gosigar v1.3.6/go.mod h1:lNWstu5g5gw59O09Y+wsMNFzBSnU8a0u+Sfx4dq360E=
Expand Down
6 changes: 3 additions & 3 deletions pkg/util/backoff/backoff_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,16 +23,16 @@ import (

func TestExponential(t *testing.T) {
backoffer := NewExponential(1, 1, 1)
for i := 0; i < 10; i++ {
for i := range 10 {
require.Equal(t, time.Duration(1), backoffer.Backoff(i))
}
backoffer = NewExponential(1, 1, 10)
for i := 0; i < 10; i++ {
for i := range 10 {
require.Equal(t, time.Duration(1), backoffer.Backoff(i))
}
backoffer = NewExponential(1, 2, 10)
res := []time.Duration{1, 2, 4, 8, 10, 10, 10, 10, 10, 10}
for i := 0; i < 10; i++ {
for i := range 10 {
require.Equal(t, res[i], backoffer.Backoff(i))
}
}
2 changes: 1 addition & 1 deletion pkg/util/bitmap/concurrent.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ type ConcurrentBitmap struct {
func (cb *ConcurrentBitmap) Clone() *ConcurrentBitmap {
cp := NewConcurrentBitmap(cb.bitLen)
needLen := len(cp.segments)
for i := 0; i < needLen; i++ {
for i := range needLen {
cp.segments[i] = cb.segments[i]
}
return cp
Expand Down
8 changes: 4 additions & 4 deletions pkg/util/bitmap/concurrent_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ func TestConcurrentBitmapSet(t *testing.T) {

bm := NewConcurrentBitmap(loopCount * interval)
wg := &sync.WaitGroup{}
for i := 0; i < loopCount; i++ {
for i := range loopCount {
wg.Add(1)
go func(bitIndex int) {
bm.Set(bitIndex)
Expand All @@ -37,7 +37,7 @@ func TestConcurrentBitmapSet(t *testing.T) {
}
wg.Wait()

for i := 0; i < loopCount; i++ {
for i := range loopCount {
if i%interval == 0 {
assert.Equal(t, true, bm.UnsafeIsSet(i))
} else {
Expand All @@ -57,13 +57,13 @@ func TestConcurrentBitmapUniqueSetter(t *testing.T) {
var setterCounter uint64
var clearCounter uint64
// Concurrently set bit, and check if isSetter count matches zero clearing count.
for i := 0; i < loopCount; i++ {
for range loopCount {
// Clear bitmap to zero.
if atomic.CompareAndSwapUint32(&(bm.segments[0]), 0x00000001, 0x00000000) {
atomic.AddUint64(&clearCounter, 1)
}
// Concurrently set.
for j := 0; j < competitorsPerSet; j++ {
for range competitorsPerSet {
wg.Add(1)
go func() {
if bm.Set(31) {
Expand Down
2 changes: 1 addition & 1 deletion pkg/util/cgroup/cgroup_cpu_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ func TestGetCgroupCPU(t *testing.T) {
}
exit := make(chan struct{})
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
for range 10 {
wg.Add(1)
go func() {
defer wg.Done()
Expand Down
2 changes: 1 addition & 1 deletion pkg/util/cgroup/cgroup_mock_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -378,7 +378,7 @@ const (
)

func TestCgroupsGetCPU(t *testing.T) {
for i := 0; i < 2; i++ {
for i := range 2 {
if i == 1 {
// The field in /proc/self/cgroup and /proc/self/mountinfo may appear as "cpuacct,cpu" or "rw,cpuacct,cpu"
// while the input controller is "cpu,cpuacct"
Expand Down
2 changes: 1 addition & 1 deletion pkg/util/checksum/checksum_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -416,7 +416,7 @@ func TestChecksumWriterAutoFlush(t *testing.T) {
func newTestBuff(str string, n int) *bytes.Buffer {
buf := bytes.NewBuffer(nil)
testData := str
for i := 0; i < n; i++ {
for range n {
buf.WriteString(testData)
}
return buf
Expand Down
26 changes: 13 additions & 13 deletions pkg/util/chunk/alloc_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ func TestAllocator(t *testing.T) {
check()

// Check maxFreeListLen
for i := 0; i < maxFreeChunks+10; i++ {
for range maxFreeChunks + 10 {
alloc.Alloc(fieldTypes, initCap, maxChunkSize)
}
alloc.Reset()
Expand Down Expand Up @@ -105,7 +105,7 @@ func TestColumnAllocator(t *testing.T) {
ft := fieldTypes[2]
// Test reuse.
cols := make([]*Column, 0, maxFreeColumnsPerType+10)
for i := 0; i < maxFreeColumnsPerType+10; i++ {
for range maxFreeColumnsPerType + 10 {
col := alloc1.NewColumn(ft, 20)
cols = append(cols, col)
}
Expand Down Expand Up @@ -135,7 +135,7 @@ func TestNoDuplicateColumnReuse(t *testing.T) {
types.NewFieldType(mysql.TypeDatetime),
}
alloc := NewAllocator()
for i := 0; i < maxFreeChunks+10; i++ {
for range maxFreeChunks + 10 {
chk := alloc.Alloc(fieldTypes, 5, 10)
chk.MakeRef(1, 3)
}
Expand Down Expand Up @@ -170,7 +170,7 @@ func TestAvoidColumnReuse(t *testing.T) {
types.NewFieldTypeBuilder().SetType(mysql.TypeDatetime).BuildP(),
}
alloc := NewAllocator()
for i := 0; i < maxFreeChunks+10; i++ {
for range maxFreeChunks + 10 {
chk := alloc.Alloc(fieldTypes, 5, 10)
for _, col := range chk.columns {
col.avoidReusing = true
Expand All @@ -186,7 +186,7 @@ func TestAvoidColumnReuse(t *testing.T) {

// test decoder will set avoid reusing flag.
chk := alloc.Alloc(fieldTypes, 5, 1024)
for i := 0; i <= 10; i++ {
for range 10 {
for _, col := range chk.columns {
col.AppendNull()
}
Expand Down Expand Up @@ -220,7 +220,7 @@ func TestColumnAllocatorLimit(t *testing.T) {
InitChunkAllocSize(10, 20)
alloc := NewAllocator()
require.True(t, alloc.CheckReuseAllocSize())
for i := 0; i < maxFreeChunks+10; i++ {
for range maxFreeChunks + 10 {
alloc.Alloc(fieldTypes, 5, 10)
}
alloc.Reset()
Expand All @@ -232,7 +232,7 @@ func TestColumnAllocatorLimit(t *testing.T) {
//Reduce capacity
InitChunkAllocSize(5, 10)
alloc = NewAllocator()
for i := 0; i < maxFreeChunks+10; i++ {
for range maxFreeChunks + 10 {
alloc.Alloc(fieldTypes, 5, 10)
}
alloc.Reset()
Expand All @@ -244,7 +244,7 @@ func TestColumnAllocatorLimit(t *testing.T) {
//increase capacity
InitChunkAllocSize(50, 100)
alloc = NewAllocator()
for i := 0; i < maxFreeChunks+10; i++ {
for range maxFreeChunks + 10 {
alloc.Alloc(fieldTypes, 5, 10)
}
alloc.Reset()
Expand All @@ -259,7 +259,7 @@ func TestColumnAllocatorLimit(t *testing.T) {
nu := len(alloc.columnAlloc.pool[VarElemLen].allocColumns)
require.Equal(t, nu, 1)
for _, col := range rs.columns {
for i := 0; i < 20480; i++ {
for range 20480 {
col.data = append(col.data, byte('a'))
}
}
Expand All @@ -280,7 +280,7 @@ func TestColumnAllocatorCheck(t *testing.T) {
}
InitChunkAllocSize(10, 20)
alloc := NewAllocator()
for i := 0; i < 4; i++ {
for range 4 {
alloc.Alloc(fieldTypes, 5, 10)
}
col := alloc.columnAlloc.NewColumn(types.NewFieldTypeBuilder().SetType(mysql.TypeFloat).BuildP(), 10)
Expand Down Expand Up @@ -343,11 +343,11 @@ func TestSyncAllocator(t *testing.T) {
alloc := NewSyncAllocator(NewAllocator())

wg := &sync.WaitGroup{}
for i := 0; i < 1000; i++ {
for range 1000 {
wg.Add(1)
go func() {
for j := 0; j < 10; j++ {
for k := 0; k < 100; k++ {
for range 10 {
for range 100 {
chk := alloc.Alloc(fieldTypes, 5, 100)
require.NotNil(t, chk)
}
Expand Down
10 changes: 5 additions & 5 deletions pkg/util/chunk/chunk.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ func renewEmpty(chk *Chunk) *Chunk {
}

func (c *Chunk) resetForReuse() {
for i := 0; i < len(c.columns); i++ {
for i := range len(c.columns) {
c.columns[i] = nil
}
columns := c.columns[:0]
Expand Down Expand Up @@ -245,12 +245,12 @@ func (c *Chunk) SwapColumn(colIdx int, other *Chunk, otherIdx int) error {
}
// Find the leftmost Column of the reference which is the actual Column to
// be swapped.
for i := 0; i < colIdx; i++ {
for i := range colIdx {
if c.columns[i] == c.columns[colIdx] {
colIdx = i
}
}
for i := 0; i < otherIdx; i++ {
for i := range otherIdx {
if other.columns[i] == other.columns[otherIdx] {
otherIdx = i
}
Expand Down Expand Up @@ -699,8 +699,8 @@ func (c *Chunk) Reconstruct() {

// ToString returns all the values in a chunk.
func (c *Chunk) ToString(ft []*types.FieldType) string {
var buf []byte
for rowIdx := 0; rowIdx < c.NumRows(); rowIdx++ {
buf := make([]byte, 0, c.NumRows()*2)
for rowIdx := range c.NumRows() {
row := c.GetRow(rowIdx)
buf = append(buf, row.ToString(ft)...)
buf = append(buf, '\n')
Expand Down
6 changes: 3 additions & 3 deletions pkg/util/chunk/chunk_in_disk.go
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ func (d *DataInDiskByChunks) serializeChunkData(pos *int64, chk *Chunk, selSize
d.buf = d.buf[:*pos+selSize]

selLen := len(chk.sel)
for i := 0; i < selLen; i++ {
for i := range selLen {
*(*int)(unsafe.Pointer(&d.buf[*pos])) = chk.sel[i]
*pos += intLen
}
Expand Down Expand Up @@ -265,7 +265,7 @@ func (d *DataInDiskByChunks) deserializeColMeta(pos *int64) (length int64, nullM
func (d *DataInDiskByChunks) deserializeSel(chk *Chunk, pos *int64, selSize int) {
selLen := int64(selSize) / intLen
chk.sel = make([]int, selLen)
for i := int64(0); i < selLen; i++ {
for i := range selLen {
chk.sel[i] = *(*int)(unsafe.Pointer(&d.buf[*pos]))
*pos += intLen
}
Expand All @@ -290,7 +290,7 @@ func (d *DataInDiskByChunks) deserializeChunkData(chk *Chunk, pos *int64) {

func (d *DataInDiskByChunks) deserializeOffsets(dst []int64, pos *int64) {
offsetNum := len(dst)
for i := 0; i < offsetNum; i++ {
for i := range offsetNum {
dst[i] = *(*int64)(unsafe.Pointer(&d.buf[*pos]))
*pos += int64Len
}
Expand Down
8 changes: 4 additions & 4 deletions pkg/util/chunk/chunk_in_disk_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ func addAuxDataForChunks(chunks []*Chunk) {

selLen := rand.Intn(50) + 1
chk.sel = make([]int, selLen)
for i := 0; i < selLen; i++ {
for i := range selLen {
chk.sel[i] = rand.Int()
}
}
Expand All @@ -49,7 +49,7 @@ func checkAuxDataForChunk(t *testing.T, chk1, chk2 *Chunk) {
require.Equal(t, len(chk1.sel), len(chk2.sel))

length := len(chk1.sel)
for i := 0; i < length; i++ {
for i := range length {
require.Equal(t, chk1.sel[i], chk2.sel[i])
}
}
Expand All @@ -61,7 +61,7 @@ func checkChunk(t *testing.T, chk1, chk2 *Chunk) {

require.Equal(t, chk1.NumRows(), chk2.NumRows())
numRows := chk1.NumRows()
for i := 0; i < numRows; i++ {
for i := range numRows {
checkRow(t, chk1.GetRow(i), chk2.GetRow(i))
}
}
Expand All @@ -78,7 +78,7 @@ func TestDataInDiskByChunks(t *testing.T) {
require.NoError(t, err)
}

for i := 0; i < numChk; i++ {
for i := range numChk {
chk, err := dataInDiskByChunks.GetChunk(i)
require.NoError(t, err)
checkChunk(t, chk, chks[i])
Expand Down
Loading

0 comments on commit 6a85c6b

Please sign in to comment.