Skip to content

Commit

Permalink
Merge branch 'master' into support_meta_delete
Browse files Browse the repository at this point in the history
  • Loading branch information
HuSharp authored Feb 26, 2024
2 parents a7ad4c6 + d21c626 commit 492fd2b
Show file tree
Hide file tree
Showing 7 changed files with 74 additions and 14 deletions.
7 changes: 5 additions & 2 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,13 @@ linters-settings:
- require-error
- suite-dont-use-pkg
- suite-extra-assert-call
disable:
- float-compare
- go-require
gofmt:
# https://golangci-lint.run/usage/linters/#gofmt
# disable for faster check
simplify: false
rewrite-rules:
- pattern: 'interface{}'
replacement: 'any'
- pattern: "interface{}"
replacement: "any"
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ SHELL := env PATH='$(PATH)' GOBIN='$(GO_TOOLS_BIN_PATH)' $(shell which bash)

install-tools:
@mkdir -p $(GO_TOOLS_BIN_PATH)
@which golangci-lint >/dev/null 2>&1 || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GO_TOOLS_BIN_PATH) v1.55.2
@which golangci-lint >/dev/null 2>&1 || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GO_TOOLS_BIN_PATH) v1.56.2
@grep '_' tools.go | sed 's/"//g' | awk '{print $$2}' | xargs go install

.PHONY: install-tools
Expand Down
12 changes: 6 additions & 6 deletions pkg/core/storelimit/limit_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,18 +101,18 @@ func TestWindow(t *testing.T) {
token := capacity + 10
re.True(s.take(token))
re.False(s.take(token))
re.EqualValues(s.ack(token), 0)
re.EqualValues(0, s.ack(token))
re.True(s.take(token))
re.EqualValues(s.ack(token), 0)
re.EqualValues(0, s.ack(token))
re.Equal(s.ack(token), token)
re.EqualValues(s.getUsed(), 0)
re.EqualValues(0, s.getUsed())

// case2: the capacity of the window must greater than the minSnapSize.
s.reset(minSnapSize - 1)
re.EqualValues(s.capacity, minSnapSize)
re.EqualValues(minSnapSize, s.capacity)
re.True(s.take(minSnapSize))
re.EqualValues(s.ack(minSnapSize*2), minSnapSize)
re.EqualValues(s.getUsed(), 0)
re.EqualValues(minSnapSize, s.ack(minSnapSize*2))
re.EqualValues(0, s.getUsed())
}

func TestFeedback(t *testing.T) {
Expand Down
55 changes: 55 additions & 0 deletions pkg/schedule/schedulers/hot_region_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,7 @@ func TestHotWriteRegionScheduleByteRateOnly(t *testing.T) {
statisticsInterval = 0
checkHotWriteRegionScheduleByteRateOnly(re, false /* disable placement rules */)
checkHotWriteRegionScheduleByteRateOnly(re, true /* enable placement rules */)
checkHotWriteRegionPlacement(re, true)
}

func TestSplitIfRegionTooHot(t *testing.T) {
Expand Down Expand Up @@ -393,6 +394,60 @@ func TestSplitBucketsByLoad(t *testing.T) {
}
}

func checkHotWriteRegionPlacement(re *require.Assertions, enablePlacementRules bool) {
cancel, _, tc, oc := prepareSchedulersTest()
defer cancel()
tc.SetEnableUseJointConsensus(true)
tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.ConfChangeV2))
tc.SetEnablePlacementRules(enablePlacementRules)
labels := []string{"zone", "host"}
tc.SetMaxReplicasWithLabel(enablePlacementRules, 3, labels...)
hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil)
re.NoError(err)
tc.SetHotRegionCacheHitsThreshold(0)

tc.AddLabelsStore(1, 2, map[string]string{"zone": "z1", "host": "h1"})
tc.AddLabelsStore(2, 2, map[string]string{"zone": "z1", "host": "h2"})
tc.AddLabelsStore(3, 2, map[string]string{"zone": "z2", "host": "h3"})
tc.AddLabelsStore(4, 2, map[string]string{"zone": "z2", "host": "h4"})
tc.AddLabelsStore(5, 2, map[string]string{"zone": "z2", "host": "h5"})
tc.AddLabelsStore(6, 2, map[string]string{"zone": "z2", "host": "h6"})
tc.RuleManager.SetRule(&placement.Rule{
GroupID: "pd", ID: "leader", Role: placement.Leader, Count: 1, LabelConstraints: []placement.LabelConstraint{{Key: "zone", Op: "in", Values: []string{"z1"}}},
})
tc.RuleManager.SetRule(&placement.Rule{
GroupID: "pd", ID: "voter", Role: placement.Follower, Count: 2, LabelConstraints: []placement.LabelConstraint{{Key: "zone", Op: "in", Values: []string{"z2"}}},
})
tc.RuleManager.DeleteRule("pd", "default")

tc.UpdateStorageWrittenBytes(1, 10*units.MiB*utils.StoreHeartBeatReportInterval)
tc.UpdateStorageWrittenBytes(2, 0)
tc.UpdateStorageWrittenBytes(3, 6*units.MiB*utils.StoreHeartBeatReportInterval)
tc.UpdateStorageWrittenBytes(4, 3*units.MiB*utils.StoreHeartBeatReportInterval)
tc.UpdateStorageWrittenBytes(5, 3*units.MiB*utils.StoreHeartBeatReportInterval)
tc.UpdateStorageWrittenBytes(6, 6*units.MiB*utils.StoreHeartBeatReportInterval)

// Region 1, 2 and 3 are hot regions.
addRegionInfo(tc, utils.Write, []testRegionInfo{
{1, []uint64{1, 3, 5}, 512 * units.KiB, 0, 0},
{2, []uint64{1, 4, 6}, 512 * units.KiB, 0, 0},
{3, []uint64{1, 3, 6}, 512 * units.KiB, 0, 0},
})
ops, _ := hb.Schedule(tc, false)
re.NotEmpty(ops)
re.NotContains(ops[0].Step(1).String(), "transfer leader")
clearPendingInfluence(hb.(*hotScheduler))

tc.RuleManager.SetRule(&placement.Rule{
GroupID: "pd", ID: "voter", Role: placement.Voter, Count: 2, LabelConstraints: []placement.LabelConstraint{{Key: "zone", Op: "in", Values: []string{"z2"}}},
})
tc.RuleManager.DeleteRule("pd", "follower")
ops, _ = hb.Schedule(tc, false)
re.NotEmpty(ops)
// TODO: fix the test
// re.NotContains(ops[0].Step(1).String(), "transfer leader")
}

func checkHotWriteRegionScheduleByteRateOnly(re *require.Assertions, enablePlacementRules bool) {
cancel, opt, tc, oc := prepareSchedulersTest()
defer cancel()
Expand Down
6 changes: 4 additions & 2 deletions pkg/utils/etcdutil/health_checker.go
Original file line number Diff line number Diff line change
Expand Up @@ -274,12 +274,14 @@ func (checker *healthChecker) updateEvictedEps(lastEps, pickedEps []string) {
pickedSet[ep] = true
}
// Reset the count to 0 if it's in evictedEps but not in the pickedEps.
checker.evictedEps.Range(func(key, _ any) bool {
checker.evictedEps.Range(func(key, value any) bool {
ep := key.(string)
if !pickedSet[ep] {
count := value.(int)
if count > 0 && !pickedSet[ep] {
checker.evictedEps.Store(ep, 0)
log.Info("reset evicted etcd endpoint picked count",
zap.String("endpoint", ep),
zap.Int("previous-count", count),
zap.String("source", checker.source))
}
return true
Expand Down
4 changes: 2 additions & 2 deletions pkg/utils/typeutil/clone_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,9 @@ func TestDeepClone(t *testing.T) {
re := assert.New(t)
src := &fate{ID: 1}
dst := DeepClone(src, fateFactory)
re.EqualValues(dst.ID, 1)
re.EqualValues(1, dst.ID)
dst.ID = 2
re.EqualValues(src.ID, 1)
re.EqualValues(1, src.ID)

// case2: the source is nil
var src2 *fate
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -679,7 +679,7 @@ func (suite *resourceManagerClientTestSuite) TestResourcePenalty() {
_, penalty, _, _, err = c.OnRequestWait(suite.ctx, resourceGroupName, req2)
re.NoError(err)
re.Equal(60.0, penalty.WriteBytes)
re.InEpsilon(penalty.TotalCpuTimeMs, 10.0/1000.0/1000.0, 1e-6)
re.InEpsilon(10.0/1000.0/1000.0, penalty.TotalCpuTimeMs, 1e-6)
_, err = c.OnResponse(resourceGroupName, req2, resp2)
re.NoError(err)

Expand Down

0 comments on commit 492fd2b

Please sign in to comment.