Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

core: allow cancel load region #4175

Merged
merged 13 commits into from
Oct 14, 2021
2 changes: 1 addition & 1 deletion server/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,7 @@ func (c *RaftCluster) LoadClusterInfo() (*RaftCluster, error) {
start = time.Now()

// used to load region from kv storage to cache storage.
if err := c.storage.LoadRegionsOnce(c.core.CheckAndPutRegion); err != nil {
if err := c.storage.LoadRegionsOnce(c.ctx, c.core.CheckAndPutRegion); err != nil {
return nil, err
}
log.Info("load regions",
Expand Down
11 changes: 11 additions & 0 deletions server/core/region_storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"sync"
"time"

"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/log"
"github.com/tikv/pd/pkg/encryption"
Expand Down Expand Up @@ -130,6 +131,7 @@ func deleteRegion(kv kv.Base, region *metapb.Region) error {
}

func loadRegions(
ctx context.Context,
kv kv.Base,
encryptionKeyManager *encryptionkm.KeyManager,
f func(region *RegionInfo) []*RegionInfo,
Expand All @@ -142,6 +144,10 @@ func loadRegions(
// a variable rangeLimit to work around.
rangeLimit := maxKVRangeLimit
for {
failpoint.Inject("slowLoadRegion", func() {
rangeLimit = 1
time.Sleep(time.Second)
})
startKey := regionPath(nextID)
_, res, err := kv.LoadRange(startKey, endKey, rangeLimit)
if err != nil {
Expand All @@ -150,6 +156,11 @@ func loadRegions(
}
return err
}
select {
case <-ctx.Done():
return ctx.Err()
default:
}

for _, s := range res {
region := &metapb.Region{}
Expand Down
13 changes: 7 additions & 6 deletions server/core/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
package core

import (
"context"
"encoding/json"
"fmt"
"math"
Expand Down Expand Up @@ -195,22 +196,22 @@ func (s *Storage) LoadRegion(regionID uint64, region *metapb.Region) (ok bool, e
}

// LoadRegions loads all regions from storage to RegionsInfo.
func (s *Storage) LoadRegions(f func(region *RegionInfo) []*RegionInfo) error {
func (s *Storage) LoadRegions(ctx context.Context, f func(region *RegionInfo) []*RegionInfo) error {
if atomic.LoadInt32(&s.useRegionStorage) > 0 {
return loadRegions(s.regionStorage, s.encryptionKeyManager, f)
return loadRegions(ctx, s.regionStorage, s.encryptionKeyManager, f)
}
return loadRegions(s.Base, s.encryptionKeyManager, f)
return loadRegions(ctx, s.Base, s.encryptionKeyManager, f)
}

// LoadRegionsOnce loads all regions from storage to RegionsInfo.Only load one time from regionStorage.
func (s *Storage) LoadRegionsOnce(f func(region *RegionInfo) []*RegionInfo) error {
func (s *Storage) LoadRegionsOnce(ctx context.Context, f func(region *RegionInfo) []*RegionInfo) error {
if atomic.LoadInt32(&s.useRegionStorage) == 0 {
return loadRegions(s.Base, s.encryptionKeyManager, f)
return loadRegions(ctx, s.Base, s.encryptionKeyManager, f)
}
s.mu.Lock()
defer s.mu.Unlock()
if s.regionLoaded == 0 {
if err := loadRegions(s.regionStorage, s.encryptionKeyManager, f); err != nil {
if err := loadRegions(ctx, s.regionStorage, s.encryptionKeyManager, f); err != nil {
return err
}
s.regionLoaded = 1
Expand Down
9 changes: 5 additions & 4 deletions server/core/storage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
package core

import (
"context"
"encoding/json"
"fmt"
"math"
Expand Down Expand Up @@ -144,7 +145,7 @@ func (s *testKVSuite) TestLoadRegions(c *C) {

n := 10
regions := mustSaveRegions(c, storage, n)
c.Assert(storage.LoadRegions(cache.SetRegion), IsNil)
c.Assert(storage.LoadRegions(context.Background(), cache.SetRegion), IsNil)

c.Assert(cache.GetRegionCount(), Equals, n)
for _, region := range cache.GetMetaRegions() {
Expand All @@ -158,7 +159,7 @@ func (s *testKVSuite) TestLoadRegionsToCache(c *C) {

n := 10
regions := mustSaveRegions(c, storage, n)
c.Assert(storage.LoadRegionsOnce(cache.SetRegion), IsNil)
c.Assert(storage.LoadRegionsOnce(context.Background(), cache.SetRegion), IsNil)

c.Assert(cache.GetRegionCount(), Equals, n)
for _, region := range cache.GetMetaRegions() {
Expand All @@ -167,7 +168,7 @@ func (s *testKVSuite) TestLoadRegionsToCache(c *C) {

n = 20
mustSaveRegions(c, storage, n)
c.Assert(storage.LoadRegionsOnce(cache.SetRegion), IsNil)
c.Assert(storage.LoadRegionsOnce(context.Background(), cache.SetRegion), IsNil)
HunDunDM marked this conversation as resolved.
Show resolved Hide resolved
c.Assert(cache.GetRegionCount(), Equals, n)
}

Expand All @@ -177,7 +178,7 @@ func (s *testKVSuite) TestLoadRegionsExceedRangeLimit(c *C) {

n := 1000
regions := mustSaveRegions(c, storage, n)
c.Assert(storage.LoadRegions(cache.SetRegion), IsNil)
c.Assert(storage.LoadRegions(context.Background(), cache.SetRegion), IsNil)
c.Assert(cache.GetRegionCount(), Equals, n)
for _, region := range cache.GetMetaRegions() {
c.Assert(region, DeepEquals, regions[region.GetId()])
Expand Down
12 changes: 6 additions & 6 deletions server/region_syncer/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@ const (
func (s *RegionSyncer) StopSyncWithLeader() {
s.reset()
s.mu.Lock()
close(s.mu.closed)
s.mu.closed = make(chan struct{})
s.mu.clientCancel()
s.mu.clientCtx, s.mu.clientCancel = context.WithCancel(context.Background())
HunDunDM marked this conversation as resolved.
Show resolved Hide resolved
s.mu.Unlock()
s.wg.Wait()
}
Expand Down Expand Up @@ -130,22 +130,22 @@ var regionGuide = core.GenerateRegionGuideFunc(false)
func (s *RegionSyncer) StartSyncWithLeader(addr string) {
s.wg.Add(1)
s.mu.RLock()
closed := s.mu.closed
ctx := s.mu.clientCtx
s.mu.RUnlock()
go func() {
defer s.wg.Done()
// used to load region from kv storage to cache storage.
bc := s.server.GetBasicCluster()
storage := s.server.GetStorage()
err := storage.LoadRegionsOnce(bc.CheckAndPutRegion)
err := storage.LoadRegionsOnce(ctx, bc.CheckAndPutRegion)
disksing marked this conversation as resolved.
Show resolved Hide resolved
if err != nil {
log.Warn("failed to load regions.", errs.ZapError(err))
}
// establish client.
var conn *grpc.ClientConn
for {
select {
case <-closed:
case <-ctx.Done():
return
default:
}
Expand All @@ -161,7 +161,7 @@ func (s *RegionSyncer) StartSyncWithLeader(addr string) {
// Start syncing data.
for {
select {
case <-closed:
case <-ctx.Done():
return
default:
}
Expand Down
104 changes: 104 additions & 0 deletions server/region_syncer/client_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
// Copyright 2021 TiKV Project Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package syncer

import (
"context"
"os"
"time"

. "github.com/pingcap/check"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/kvproto/pkg/pdpb"
"github.com/tikv/pd/pkg/grpcutil"
"github.com/tikv/pd/server/core"
"github.com/tikv/pd/server/kv"
)

var _ = Suite(&testClientSuite{})

type testClientSuite struct{}

// For issue https://github.com/tikv/pd/issues/3936
func (t *testClientSuite) TestLoadRegion(c *C) {
tempDir, err := os.MkdirTemp(os.TempDir(), "region_syncer_load_region")
c.Assert(err, IsNil)
defer os.RemoveAll(tempDir)
rs, err := core.NewRegionStorage(context.Background(), tempDir, nil)
c.Assert(err, IsNil)

server := &mockServer{
ctx: context.Background(),
storage: core.NewStorage(kv.NewMemoryKV(), core.WithRegionStorage(rs)),
bc: core.NewBasicCluster(),
}
for i := 0; i < 30; i++ {
rs.SaveRegion(&metapb.Region{Id: uint64(i) + 1})
}
c.Assert(failpoint.Enable("github.com/tikv/pd/server/core/slowLoadRegion", "return(true)"), IsNil)
defer func() { c.Assert(failpoint.Disable("github.com/tikv/pd/server/core/slowLoadRegion"), IsNil) }()

rc := NewRegionSyncer(server)
start := time.Now()
rc.StartSyncWithLeader("")
time.Sleep(time.Second)
rc.StopSyncWithLeader()
c.Assert(time.Since(start), Greater, time.Second) // make sure failpoint is injected
c.Assert(time.Since(start), Less, time.Second*2)
}

type mockServer struct {
ctx context.Context
member, leader *pdpb.Member
storage *core.Storage
bc *core.BasicCluster
}

func (s *mockServer) LoopContext() context.Context {
return s.ctx
}

func (s *mockServer) ClusterID() uint64 {
return 1
}

func (s *mockServer) GetMemberInfo() *pdpb.Member {
return s.member
}

func (s *mockServer) GetLeader() *pdpb.Member {
return s.leader
}

func (s *mockServer) GetStorage() *core.Storage {
return s.storage
}

func (s *mockServer) Name() string {
return "mock-server"
}

func (s *mockServer) GetRegions() []*core.RegionInfo {
return s.bc.GetRegions()
}

func (s *mockServer) GetTLSConfig() *grpcutil.TLSConfig {
return &grpcutil.TLSConfig{}
}

func (s *mockServer) GetBasicCluster() *core.BasicCluster {
return s.bc
}
5 changes: 3 additions & 2 deletions server/region_syncer/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,8 @@ type RegionSyncer struct {
streams map[string]ServerStream
regionSyncerCtx context.Context
regionSyncerCancel context.CancelFunc
closed chan struct{}
clientCtx context.Context
clientCancel context.CancelFunc
}
server Server
wg sync.WaitGroup
Expand All @@ -95,7 +96,7 @@ func NewRegionSyncer(s Server) *RegionSyncer {
tlsConfig: s.GetTLSConfig(),
}
syncer.mu.streams = make(map[string]ServerStream)
syncer.mu.closed = make(chan struct{})
syncer.mu.clientCtx, syncer.mu.clientCancel = context.WithCancel(context.Background())
HunDunDM marked this conversation as resolved.
Show resolved Hide resolved
return syncer
}

Expand Down
2 changes: 1 addition & 1 deletion tests/server/cluster/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -727,7 +727,7 @@ func (s *clusterTestSuite) TestLoadClusterInfo(c *C) {
for _, region := range regions {
c.Assert(storage.SaveRegion(region), IsNil)
}
raftCluster.GetStorage().LoadRegionsOnce(raftCluster.GetCacheCluster().PutRegion)
raftCluster.GetStorage().LoadRegionsOnce(s.ctx, raftCluster.GetCacheCluster().PutRegion)
c.Assert(raftCluster.GetRegionCount(), Equals, n)
}

Expand Down