From 483a504afdf781571fe1678348ee52ec27b21fdf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BE=99=E6=96=B9=E6=B7=9E?= Date: Wed, 31 Mar 2021 11:33:24 +0800 Subject: [PATCH 1/2] store/tikv: fix misuse of PD client's GetStore (#23695) --- store/tikv/region_cache.go | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/store/tikv/region_cache.go b/store/tikv/region_cache.go index 657d8f5bc9665..dc26684fee826 100644 --- a/store/tikv/region_cache.go +++ b/store/tikv/region_cache.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "math/rand" + "strings" "sync" "sync/atomic" "time" @@ -1872,6 +1873,12 @@ func (s *Store) initResolve(bo *Backoffer, c *RegionCache) (addr string, err err } } +// A quick and dirty solution to find out whether an err is caused by StoreNotFound. +// todo: A better solution, maybe some err-code based error handling? +func isStoreNotFoundError(err error) bool { + return strings.Contains(err.Error(), "invalid store ID") && strings.Contains(err.Error(), "not found") +} + // reResolve try to resolve addr for store that need check. Returns false if the region is in tombstone state or is // deleted. func (s *Store) reResolve(c *RegionCache) (bool, error) { @@ -1882,16 +1889,20 @@ func (s *Store) reResolve(c *RegionCache) (bool, error) { } else { metrics.RegionCacheCounterWithGetStoreOK.Inc() } - if err != nil { + // `err` here can mean either "load Store from PD failed" or "store not found" + // If load Store from PD is successful but PD didn't find the store + // the err should be handled by next `if` instead of here + if err != nil && !isStoreNotFoundError(err) { logutil.BgLogger().Error("loadStore from PD failed", zap.Uint64("id", s.storeID), zap.Error(err)) // we cannot do backoff in reResolve loop but try check other store and wait tick. return false, err } - if store == nil || store.State == metapb.StoreState_Tombstone { + if store == nil { // store has be removed in PD, we should invalidate all regions using those store. logutil.BgLogger().Info("invalidate regions in removed store", zap.Uint64("store", s.storeID), zap.String("add", s.addr)) atomic.AddUint32(&s.epoch, 1) + atomic.StoreUint64(&s.state, uint64(deleted)) metrics.RegionCacheCounterWithInvalidateStoreRegionsOK.Inc() return false, nil } From de6eb556a9948c400d5cb3ac7492d8a285adadcd Mon Sep 17 00:00:00 2001 From: Lei Zhao Date: Fri, 21 May 2021 10:16:22 +0800 Subject: [PATCH 2/2] region_cache: filter peers on tombstone or dropped stores (#24726) Signed-off-by: youjiali1995 --- store/mockstore/mocktikv/pd.go | 8 ++ store/tikv/region_cache.go | 182 +++++++++++++++++-------------- store/tikv/region_cache_test.go | 188 ++++++++++++++++++++++++++++---- 3 files changed, 277 insertions(+), 101 deletions(-) diff --git a/store/mockstore/mocktikv/pd.go b/store/mockstore/mocktikv/pd.go index b2db0de8fb343..6531a4ca71cc3 100644 --- a/store/mockstore/mocktikv/pd.go +++ b/store/mockstore/mocktikv/pd.go @@ -15,6 +15,7 @@ package mocktikv import ( "context" + "fmt" "math" "sync" "time" @@ -126,6 +127,13 @@ func (c *pdClient) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, default: } store := c.cluster.GetStore(storeID) + // It's same as PD's implementation. + if store == nil { + return nil, fmt.Errorf("invalid store ID %d, not found", storeID) + } + if store.GetState() == metapb.StoreState_Tombstone { + return nil, nil + } return store, nil } diff --git a/store/tikv/region_cache.go b/store/tikv/region_cache.go index dc26684fee826..c8d6bead8d0a8 100644 --- a/store/tikv/region_cache.go +++ b/store/tikv/region_cache.go @@ -186,7 +186,7 @@ func (r *RegionStore) filterStoreCandidate(aidx AccessIndex, op *storeSelectorOp } // init initializes region after constructed. -func (r *Region) init(c *RegionCache) error { +func (r *Region) init(bo *Backoffer, c *RegionCache) error { // region store pull used store from global store map // to avoid acquire storeMu in later access. rs := &RegionStore{ @@ -196,6 +196,7 @@ func (r *Region) init(c *RegionCache) error { stores: make([]*Store, 0, len(r.meta.Peers)), storeEpochs: make([]uint32, 0, len(r.meta.Peers)), } + availablePeers := r.meta.GetPeers()[:0] for _, p := range r.meta.Peers { c.storeMu.RLock() store, exists := c.storeMu.stores[p.StoreId] @@ -203,10 +204,15 @@ func (r *Region) init(c *RegionCache) error { if !exists { store = c.getStoreByStoreID(p.StoreId) } - _, err := store.initResolve(NewNoopBackoff(context.Background()), c) + addr, err := store.initResolve(bo, c) if err != nil { return err } + // Filter the peer on a tombstone store. + if addr == "" { + continue + } + availablePeers = append(availablePeers, p) switch store.storeType { case TiKV: rs.accessIndex[TiKVOnly] = append(rs.accessIndex[TiKVOnly], len(rs.stores)) @@ -216,6 +222,13 @@ func (r *Region) init(c *RegionCache) error { rs.stores = append(rs.stores, store) rs.storeEpochs = append(rs.storeEpochs, atomic.LoadUint32(&store.epoch)) } + // TODO(youjiali1995): It's possible the region info in PD is stale for now but it can recover. + // Maybe we need backoff here. + if len(availablePeers) == 0 { + return errors.Errorf("no available peers, region: {%v}", r.meta) + } + r.meta.Peers = availablePeers + atomic.StorePointer(&r.store, unsafe.Pointer(rs)) // mark region has been init accessed. @@ -318,6 +331,17 @@ func NewRegionCache(pdClient pd.Client) *RegionCache { return c } +// clear clears all cached data in the RegionCache. It's only used in tests. +func (c *RegionCache) clear() { + c.mu.Lock() + c.mu.regions = make(map[RegionVerID]*Region) + c.mu.sorted = btree.New(btreeDegree) + c.mu.Unlock() + c.storeMu.Lock() + c.storeMu.stores = make(map[uint64]*Store) + c.storeMu.Unlock() +} + // Close releases region cache's resource. func (c *RegionCache) Close() { close(c.closeCh) @@ -329,32 +353,29 @@ func (c *RegionCache) asyncCheckAndResolveLoop(interval time.Duration) { defer ticker.Stop() var needCheckStores []*Store for { + needCheckStores = needCheckStores[:0] select { case <-c.closeCh: return case <-c.notifyCheckCh: - needCheckStores = needCheckStores[:0] - c.checkAndResolve(needCheckStores) + c.checkAndResolve(needCheckStores, func(s *Store) bool { + return s.getResolveState() == needCheck + }) case <-ticker.C: - // refresh store once a minute to update labels - var stores []*Store - c.storeMu.RLock() - stores = make([]*Store, 0, len(c.storeMu.stores)) - for _, s := range c.storeMu.stores { - stores = append(stores, s) - } - c.storeMu.RUnlock() - for _, store := range stores { - _, err := store.reResolve(c) - terror.Log(err) - } + // refresh store to update labels. + c.checkAndResolve(needCheckStores, func(s *Store) bool { + state := s.getResolveState() + // Only valid stores should be reResolved. In fact, it's impossible + // there's a deleted store in the stores map which guaranteed by reReslve(). + return state != unresolved && state != tombstone && state != deleted + }) } } } // checkAndResolve checks and resolve addr of failed stores. // this method isn't thread-safe and only be used by one goroutine. -func (c *RegionCache) checkAndResolve(needCheckStores []*Store) { +func (c *RegionCache) checkAndResolve(needCheckStores []*Store, needCheck func(*Store) bool) { defer func() { r := recover() if r != nil { @@ -366,8 +387,7 @@ func (c *RegionCache) checkAndResolve(needCheckStores []*Store) { c.storeMu.RLock() for _, store := range c.storeMu.stores { - state := store.getResolveState() - if state == needCheck { + if needCheck(store) { needCheckStores = append(needCheckStores, store) } } @@ -1180,9 +1200,6 @@ func filterUnavailablePeers(region *pd.Region) { new = append(new, p) } } - for i := len(new); i < len(region.Meta.Peers); i++ { - region.Meta.Peers[i] = nil - } region.Meta.Peers = new } @@ -1235,7 +1252,7 @@ func (c *RegionCache) loadRegion(bo *Backoffer, key []byte, isEndKey bool) (*Reg continue } region := &Region{meta: reg.Meta} - err = region.init(c) + err = region.init(bo, c) if err != nil { return nil, err } @@ -1280,7 +1297,7 @@ func (c *RegionCache) loadRegionByID(bo *Backoffer, regionID uint64) (*Region, e return nil, errors.New("receive Region with no available peer") } region := &Region{meta: reg.Meta} - err = region.init(c) + err = region.init(bo, c) if err != nil { return nil, err } @@ -1331,7 +1348,7 @@ func (c *RegionCache) scanRegions(bo *Backoffer, startKey, endKey []byte, limit regions := make([]*Region, 0, len(regionsInfo)) for _, r := range regionsInfo { region := &Region{meta: r.Meta} - err := region.init(c) + err := region.init(bo, c) if err != nil { return nil, err } @@ -1372,6 +1389,8 @@ func (c *RegionCache) getStoreAddr(bo *Backoffer, region *Region, store *Store, case deleted: addr = c.changeToActiveStore(region, store, storeIdx) return + case tombstone: + return "", nil default: panic("unsupported resolve state") } @@ -1419,6 +1438,8 @@ func (c *RegionCache) getProxyStore(region *Region, store *Store, rs *RegionStor return nil, 0, 0 } +// changeToActiveStore replace the deleted store in the region by an up-to-date store in the stores map. +// The order is guaranteed by reResolve() which adds the new store before marking old store deleted. func (c *RegionCache) changeToActiveStore(region *Region, store *Store, storeIdx int) (addr string) { c.storeMu.RLock() store = c.storeMu.stores[store.storeID] @@ -1493,7 +1514,7 @@ func (c *RegionCache) OnRegionEpochNotMatch(bo *Backoffer, ctx *RPCContext, curr } } region := &Region{meta: meta} - err := region.init(c) + err := region.init(bo, c) if err != nil { return err } @@ -1818,19 +1839,31 @@ type Store struct { type resolveState uint64 const ( + // The store is just created and normally is being resolved. + // Store in this state will only be resolved by initResolve(). unresolved resolveState = iota + // The store is resolved and its address is valid. resolved + // Request failed on this store and it will be re-resolved by asyncCheckAndResolveLoop(). needCheck + // The store's address or label is changed and marked deleted. + // There is a new store struct replaced it in the RegionCache and should + // call changeToActiveStore() to get the new struct. deleted + // The store is a tombstone. Should invalidate the region if tries to access it. + tombstone ) -// initResolve resolves addr for store that never resolved. +// initResolve resolves the address of the store that never resolved and returns an +// empty string if it's a tombstone. func (s *Store) initResolve(bo *Backoffer, c *RegionCache) (addr string, err error) { s.resolveMutex.Lock() state := s.getResolveState() defer s.resolveMutex.Unlock() if state != unresolved { - addr = s.addr + if state != tombstone { + addr = s.addr + } return } var store *metapb.Store @@ -1841,35 +1874,33 @@ func (s *Store) initResolve(bo *Backoffer, c *RegionCache) (addr string, err err } else { metrics.RegionCacheCounterWithGetStoreOK.Inc() } - if err != nil { + if bo.GetCtx().Err() != nil && errors.Cause(bo.GetCtx().Err()) == context.Canceled { + return + } + if err != nil && !isStoreNotFoundError(err) { // TODO: more refine PD error status handle. - if errors.Cause(err) == context.Canceled { - return - } err = errors.Errorf("loadStore from PD failed, id: %d, err: %v", s.storeID, err) if err = bo.Backoff(BoPDRPC, err); err != nil { return } continue } + // The store is a tombstone. if store == nil { - return + s.setResolveState(tombstone) + return "", nil } addr = store.GetAddress() + if addr == "" { + return "", errors.Errorf("empty store(%d) address", s.storeID) + } s.addr = addr s.saddr = store.GetStatusAddress() s.storeType = GetStoreTypeByMeta(store) s.labels = store.GetLabels() - retry: - state = s.getResolveState() - if state != unresolved { - addr = s.addr - return - } - if !s.compareAndSwapState(state, resolved) { - goto retry - } - return + // Shouldn't have other one changing its state concurrently, but we still use changeResolveStateTo for safety. + s.changeResolveStateTo(unresolved, resolved) + return s.addr, nil } } @@ -1902,7 +1933,7 @@ func (s *Store) reResolve(c *RegionCache) (bool, error) { logutil.BgLogger().Info("invalidate regions in removed store", zap.Uint64("store", s.storeID), zap.String("add", s.addr)) atomic.AddUint32(&s.epoch, 1) - atomic.StoreUint64(&s.state, uint64(deleted)) + s.setResolveState(tombstone) metrics.RegionCacheCounterWithInvalidateStoreRegionsOK.Inc() return false, nil } @@ -1910,33 +1941,14 @@ func (s *Store) reResolve(c *RegionCache) (bool, error) { storeType := GetStoreTypeByMeta(store) addr = store.GetAddress() if s.addr != addr || !s.IsSameLabels(store.GetLabels()) { - state := resolved - newStore := &Store{storeID: s.storeID, addr: addr, saddr: store.GetStatusAddress(), storeType: storeType, labels: store.GetLabels()} - newStore.state = *(*uint64)(&state) + newStore := &Store{storeID: s.storeID, addr: addr, saddr: store.GetStatusAddress(), storeType: storeType, labels: store.GetLabels(), state: uint64(resolved)} c.storeMu.Lock() c.storeMu.stores[newStore.storeID] = newStore c.storeMu.Unlock() - retryMarkDel: - // all region used those - oldState := s.getResolveState() - if oldState == deleted { - return false, nil - } - newState := deleted - if !s.compareAndSwapState(oldState, newState) { - goto retryMarkDel - } + s.setResolveState(deleted) return false, nil } -retryMarkResolved: - oldState := s.getResolveState() - if oldState != needCheck { - return true, nil - } - newState := resolved - if !s.compareAndSwapState(oldState, newState) { - goto retryMarkResolved - } + s.changeResolveStateTo(needCheck, resolved) return true, nil } @@ -1948,23 +1960,35 @@ func (s *Store) getResolveState() resolveState { return resolveState(atomic.LoadUint64(&s.state)) } -func (s *Store) compareAndSwapState(oldState, newState resolveState) bool { - return atomic.CompareAndSwapUint64(&s.state, uint64(oldState), uint64(newState)) +func (s *Store) setResolveState(state resolveState) { + atomic.StoreUint64(&s.state, uint64(state)) +} + +// changeResolveStateTo changes the store resolveState from the old state to the new state. +// Returns true if it changes the state successfully, and false if the store's state +// is changed by another one. +func (s *Store) changeResolveStateTo(from, to resolveState) bool { + for { + state := s.getResolveState() + if state == to { + return true + } + if state != from { + return false + } + if atomic.CompareAndSwapUint64(&s.state, uint64(from), uint64(to)) { + return true + } + } } // markNeedCheck marks resolved store to be async resolve to check store addr change. func (s *Store) markNeedCheck(notifyCheckCh chan struct{}) { -retry: - oldState := s.getResolveState() - if oldState != resolved { - return - } - if !s.compareAndSwapState(oldState, needCheck) { - goto retry - } - select { - case notifyCheckCh <- struct{}{}: - default: + if s.changeResolveStateTo(resolved, needCheck) { + select { + case notifyCheckCh <- struct{}{}: + default: + } } } diff --git a/store/tikv/region_cache_test.go b/store/tikv/region_cache_test.go index 73a8eef428c6d..e92ba3bf2e9ac 100644 --- a/store/tikv/region_cache_test.go +++ b/store/tikv/region_cache_test.go @@ -165,32 +165,176 @@ func (s *testRegionCacheSuite) TestSimple(c *C) { c.Assert(r, IsNil) } -func (s *testRegionCacheSuite) TestDropStore(c *C) { - bo := NewBackofferWithVars(context.Background(), 100, nil) +// TestResolveStateTransition verifies store's resolve state transition. For example, +// a newly added store is in unresolved state and will be resolved soon if it's an up store, +// or in tombstone state if it's a tombstone. +func (s *testRegionCacheSuite) TestResolveStateTransition(c *C) { + cache := s.cache + bo := NewNoopBackoff(context.Background()) + + // Check resolving normal stores. The resolve state should be resolved. + for _, storeMeta := range s.cluster.GetAllStores() { + store := cache.getStoreByStoreID(storeMeta.GetId()) + c.Assert(store.getResolveState(), Equals, unresolved) + addr, err := store.initResolve(bo, cache) + c.Assert(err, IsNil) + c.Assert(addr, Equals, storeMeta.GetAddress()) + c.Assert(store.getResolveState(), Equals, resolved) + } + + waitResolve := func(s *Store) { + for i := 0; i < 10; i++ { + if s.getResolveState() != needCheck { + break + } + time.Sleep(50 * time.Millisecond) + } + } + + // Mark the store needCheck. The resolve state should be resolved soon. + store := cache.getStoreByStoreID(s.store1) + store.markNeedCheck(cache.notifyCheckCh) + waitResolve(store) + c.Assert(store.getResolveState(), Equals, resolved) + + // Mark the store needCheck and it becomes a tombstone. The resolve state should be tombstone. + s.cluster.MarkTombstone(s.store1) + store.markNeedCheck(cache.notifyCheckCh) + waitResolve(store) + c.Assert(store.getResolveState(), Equals, tombstone) + s.cluster.StartStore(s.store1) + + // Mark the store needCheck and it's deleted from PD. The resolve state should be tombstone. + cache.clear() + store = cache.getStoreByStoreID(s.store1) + store.initResolve(bo, cache) + c.Assert(store.getResolveState(), Equals, resolved) + storeMeta := s.cluster.GetStore(s.store1) s.cluster.RemoveStore(s.store1) - loc, err := s.cache.LocateKey(bo, []byte("a")) - c.Assert(err, IsNil) - ctx, err := s.cache.GetTiKVRPCContext(bo, loc.Region, kv.ReplicaReadLeader, 0) + store.markNeedCheck(cache.notifyCheckCh) + waitResolve(store) + c.Assert(store.getResolveState(), Equals, tombstone) + s.cluster.AddStore(storeMeta.GetId(), storeMeta.GetAddress(), storeMeta.GetLabels()...) + + // Mark the store needCheck and its address and labels are changed. + // The resolve state should be deleted and a new store is added to the cache. + cache.clear() + store = cache.getStoreByStoreID(s.store1) + store.initResolve(bo, cache) + c.Assert(store.getResolveState(), Equals, resolved) + s.cluster.UpdateStoreAddr(s.store1, store.addr+"0", &metapb.StoreLabel{Key: "k", Value: "v"}) + store.markNeedCheck(cache.notifyCheckCh) + waitResolve(store) + c.Assert(store.getResolveState(), Equals, deleted) + newStore := cache.getStoreByStoreID(s.store1) + c.Assert(newStore.getResolveState(), Equals, resolved) + c.Assert(newStore.addr, Equals, store.addr+"0") + c.Assert(newStore.labels, DeepEquals, []*metapb.StoreLabel{{Key: "k", Value: "v"}}) + + // Check initResolve()ing a tombstone store. The resolve state should be tombstone. + cache.clear() + s.cluster.MarkTombstone(s.store1) + store = cache.getStoreByStoreID(s.store1) + for i := 0; i < 2; i++ { + addr, err := store.initResolve(bo, cache) + c.Assert(err, IsNil) + c.Assert(addr, Equals, "") + c.Assert(store.getResolveState(), Equals, tombstone) + } + s.cluster.StartStore(s.store1) + cache.clear() + + // Check initResolve()ing a dropped store. The resolve state should be tombstone. + cache.clear() + storeMeta = s.cluster.GetStore(s.store1) + s.cluster.RemoveStore(s.store1) + store = cache.getStoreByStoreID(s.store1) + for i := 0; i < 2; i++ { + addr, err := store.initResolve(bo, cache) + c.Assert(err, IsNil) + c.Assert(addr, Equals, "") + c.Assert(store.getResolveState(), Equals, tombstone) + } + s.cluster.AddStore(storeMeta.GetId(), storeMeta.GetAddress(), storeMeta.GetLabels()...) +} + +// TestFilterDownPeersOrPeersOnTombstoneOrDroppedStore verifies the RegionCache filter +// region's down peers and peers on tombstone or dropped stores. RegionCache shouldn't +// report errors in such cases if there are available peers. +func (s *testRegionCacheSuite) TestFilterDownPeersOrPeersOnTombstoneOrDroppedStores(c *C) { + key := []byte("a") + bo := NewBackofferWithVars(context.Background(), 100, nil) + + verifyGetRPCCtx := func(meta *metapb.Region) { + loc, err := s.cache.LocateKey(bo, key) + c.Assert(loc, NotNil) + c.Assert(err, IsNil) + ctx, err := s.cache.GetTiKVRPCContext(bo, loc.Region, kv.ReplicaReadLeader, 0) + c.Assert(err, IsNil) + c.Assert(ctx, NotNil) + c.Assert(ctx.Meta, DeepEquals, meta) + ctx, err = s.cache.GetTiKVRPCContext(bo, loc.Region, kv.ReplicaReadFollower, rand.Uint32()) + c.Assert(err, IsNil) + c.Assert(ctx, NotNil) + c.Assert(ctx.Meta, DeepEquals, meta) + } + + // When all peers are normal, the cached region should contain all peers. + reg, err := s.cache.findRegionByKey(bo, key, false) + c.Assert(reg, NotNil) c.Assert(err, IsNil) - c.Assert(ctx, IsNil) - ctx, err = s.cache.GetTiKVRPCContext(bo, loc.Region, kv.ReplicaReadFollower, rand.Uint32()) + regInPD, _ := s.cluster.GetRegion(reg.GetID()) + c.Assert(reg.meta, DeepEquals, regInPD) + c.Assert(len(reg.meta.GetPeers()), Equals, len(reg.getStore().stores)) + verifyGetRPCCtx(reg.meta) + s.checkCache(c, 1) + s.cache.clear() + + // Shouldn't contain the peer on the tombstone store. + s.cluster.MarkTombstone(s.store1) + reg, err = s.cache.findRegionByKey(bo, key, false) + c.Assert(reg, NotNil) c.Assert(err, IsNil) - c.Assert(ctx, IsNil) - s.checkCache(c, 0) -} + c.Assert(len(reg.meta.GetPeers()), Equals, len(regInPD.GetPeers())-1) + c.Assert(len(reg.meta.GetPeers()), Equals, len(reg.getStore().stores)) + for _, peer := range reg.meta.GetPeers() { + c.Assert(peer.GetStoreId(), Not(Equals), s.store1) + } + for _, store := range reg.getStore().stores { + c.Assert(store.storeID, Not(Equals), s.store1) + } + verifyGetRPCCtx(reg.meta) + s.checkCache(c, 1) + s.cache.clear() + s.cluster.StartStore(s.store1) -func (s *testRegionCacheSuite) TestDropStoreRetry(c *C) { + // Shouldn't contain the peer on the dropped store. + store := s.cluster.GetStore(s.store1) s.cluster.RemoveStore(s.store1) - done := make(chan struct{}) - go func() { - time.Sleep(time.Millisecond * 10) - s.cluster.AddStore(s.store1, s.storeAddr(s.store1)) - close(done) - }() - loc, err := s.cache.LocateKey(s.bo, []byte("a")) + reg, err = s.cache.findRegionByKey(bo, key, false) + c.Assert(reg, NotNil) c.Assert(err, IsNil) - c.Assert(loc.Region.id, Equals, s.region1) - <-done + c.Assert(len(reg.meta.GetPeers()), Equals, len(regInPD.GetPeers())-1) + c.Assert(len(reg.meta.GetPeers()), Equals, len(reg.getStore().stores)) + for _, peer := range reg.meta.GetPeers() { + c.Assert(peer.GetStoreId(), Not(Equals), s.store1) + } + for _, store := range reg.getStore().stores { + c.Assert(store.storeID, Not(Equals), s.store1) + } + verifyGetRPCCtx(reg.meta) + s.checkCache(c, 1) + s.cache.clear() + s.cluster.AddStore(store.GetId(), store.GetAddress(), store.GetLabels()...) + + // Report an error when there's no available peers. + s.cluster.MarkTombstone(s.store1) + s.cluster.MarkTombstone(s.store2) + _, err = s.cache.findRegionByKey(bo, key, false) + c.Assert(err, NotNil) + c.Assert(err.Error(), Matches, ".*no available peers.*") + s.cluster.StartStore(s.store1) + s.cluster.StartStore(s.store2) } func (s *testRegionCacheSuite) TestUpdateLeader(c *C) { @@ -296,7 +440,7 @@ func (s *testRegionCacheSuite) TestUpdateLeader3(c *C) { c.Assert(err, IsNil) c.Assert(ctx.Addr, Equals, "store2") s.cache.OnSendFail(NewNoopBackoff(context.Background()), ctx, false, errors.New("send fail")) - s.cache.checkAndResolve(nil) + s.cache.checkAndResolve(nil, func(*Store) bool { return true }) s.cache.UpdateLeader(loc.Region, s.store2, 0) addr := s.getAddr(c, []byte("a"), kv.ReplicaReadLeader, 0) c.Assert(addr, Equals, "") @@ -1309,7 +1453,7 @@ func (s *testRegionCacheSuite) TestPeersLenChange(c *C) { } filterUnavailablePeers(cpRegion) region := &Region{meta: cpRegion.Meta} - err = region.init(s.cache) + err = region.init(s.bo, s.cache) c.Assert(err, IsNil) s.cache.insertRegionToCache(region)