Skip to content

Commit

Permalink
feat(horaemeta): add node inspector (#1483)
Browse files Browse the repository at this point in the history
## Rationale
In some extreme scenarios, etcd events may be lost or update failures
may occur, which may cause the mapping between nodes and shards to fail
to be updated correctly. We need to add a cover-up mechanism to ensure
that the mapping relationship is always correct.

## Detailed Changes
* Add `NodeInspector`, it will start when the cluster start, running in
he background and detect the status of nodes.

## Test Plan
Pass CI.

---------

Co-authored-by: xikai.wxk <xikai.wxk@antgroup.com>
  • Loading branch information
ZuLiangWang and ShiKaiWi authored Mar 18, 2024
1 parent 629bf39 commit 9291dfc
Show file tree
Hide file tree
Showing 8 changed files with 308 additions and 5 deletions.
11 changes: 11 additions & 0 deletions horaemeta/server/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
"github.com/apache/incubator-horaedb-meta/server/cluster/metadata"
"github.com/apache/incubator-horaedb-meta/server/coordinator"
"github.com/apache/incubator-horaedb-meta/server/coordinator/eventdispatch"
"github.com/apache/incubator-horaedb-meta/server/coordinator/inspector"
"github.com/apache/incubator-horaedb-meta/server/coordinator/procedure"
"github.com/apache/incubator-horaedb-meta/server/coordinator/scheduler/manager"
"github.com/apache/incubator-horaedb-meta/server/id"
Expand All @@ -47,6 +48,7 @@ type Cluster struct {
procedureFactory *coordinator.Factory
procedureManager procedure.Manager
schedulerManager manager.SchedulerManager
nodeInspector *inspector.NodeInspector
}

func NewCluster(logger *zap.Logger, metadata *metadata.ClusterMetadata, client *clientv3.Client, rootPath string) (*Cluster, error) {
Expand All @@ -62,12 +64,15 @@ func NewCluster(logger *zap.Logger, metadata *metadata.ClusterMetadata, client *

schedulerManager := manager.NewManager(logger, procedureManager, procedureFactory, metadata, client, rootPath, metadata.GetTopologyType(), metadata.GetProcedureExecutingBatchSize())

nodeInspector := inspector.NewNodeInspector(logger, metadata)

return &Cluster{
logger: logger,
metadata: metadata,
procedureFactory: procedureFactory,
procedureManager: procedureManager,
schedulerManager: schedulerManager,
nodeInspector: nodeInspector,
}, nil
}

Expand All @@ -78,6 +83,9 @@ func (c *Cluster) Start(ctx context.Context) error {
if err := c.schedulerManager.Start(ctx); err != nil {
return errors.WithMessage(err, "start scheduler manager")
}
if err := c.nodeInspector.Start(ctx); err != nil {
return errors.WithMessage(err, "start node inspector")
}
return nil
}

Expand All @@ -88,6 +96,9 @@ func (c *Cluster) Stop(ctx context.Context) error {
if err := c.schedulerManager.Stop(ctx); err != nil {
return errors.WithMessage(err, "stop scheduler manager")
}
if err := c.nodeInspector.Stop(ctx); err != nil {
return errors.WithMessage(err, "stop node inspector")
}
return nil
}

Expand Down
2 changes: 1 addition & 1 deletion horaemeta/server/cluster/metadata/cluster_metadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -685,7 +685,7 @@ func (c *ClusterMetadata) UpdateClusterViewByNode(ctx context.Context, shardNode
return nil
}

func (c *ClusterMetadata) DropShardNode(ctx context.Context, shardNodes []storage.ShardNode) error {
func (c *ClusterMetadata) DropShardNodes(ctx context.Context, shardNodes []storage.ShardNode) error {
if err := c.topologyManager.DropShardNodes(ctx, shardNodes); err != nil {
return errors.WithMessage(err, "drop shard nodes")
}
Expand Down
2 changes: 1 addition & 1 deletion horaemeta/server/cluster/metadata/cluster_metadata_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ func testShardOperation(ctx context.Context, re *require.Assertions, m *metadata
_, err = m.GetShardNodeByTableIDs([]storage.TableID{})
re.NoError(err)

err = m.DropShardNode(ctx, []storage.ShardNode{{
err = m.DropShardNodes(ctx, []storage.ShardNode{{
ID: shardNodeResult.NodeShards[0].ShardNode.ID,
ShardRole: shardNodeResult.NodeShards[0].ShardNode.ShardRole,
NodeName: shardNodeResult.NodeShards[0].ShardNode.NodeName,
Expand Down
147 changes: 147 additions & 0 deletions horaemeta/server/coordinator/inspector/node_inspector.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package inspector

import (
"context"
"sync"
"time"

"github.com/apache/incubator-horaedb-meta/pkg/coderr"
"github.com/apache/incubator-horaedb-meta/pkg/log"
"github.com/apache/incubator-horaedb-meta/server/cluster/metadata"
"github.com/apache/incubator-horaedb-meta/server/storage"
"go.uber.org/zap"
)

var ErrStartAgain = coderr.NewCodeError(coderr.Internal, "try to start again")
var ErrStopNotStart = coderr.NewCodeError(coderr.Internal, "try to stop a not-started inspector")

const defaultInspectInterval = time.Second * 5

// NodeInspector will inspect node status and remove expired data.
type NodeInspector struct {
logger *zap.Logger
clusterMetadata ClusterMetaDataManipulator
interval time.Duration

starter sync.Once
// After `Start` is called, the following fields will be initialized
stopCtx context.Context
bgJobCancel context.CancelFunc
}

// ClusterMetaDataManipulator provides the snapshot for NodeInspector to check and utilities of drop expired shard nodes.
type ClusterMetaDataManipulator interface {
GetClusterSnapshot() metadata.Snapshot
DropShardNodes(context.Context, []storage.ShardNode) error
}

func NewNodeInspectorWithInterval(logger *zap.Logger, clusterMetadata ClusterMetaDataManipulator, inspectInterval time.Duration) *NodeInspector {
return &NodeInspector{
logger: logger,
clusterMetadata: clusterMetadata,
interval: inspectInterval,
starter: sync.Once{},
stopCtx: nil,
bgJobCancel: nil,
}
}

func NewNodeInspector(logger *zap.Logger, clusterMetadata ClusterMetaDataManipulator) *NodeInspector {
return NewNodeInspectorWithInterval(logger, clusterMetadata, defaultInspectInterval)
}

func (ni *NodeInspector) Start(ctx context.Context) error {
started := false
ni.starter.Do(func() {
log.Info("node inspector start")
started = true
ni.stopCtx, ni.bgJobCancel = context.WithCancel(ctx)
go func() {
for {
t := time.NewTimer(ni.interval)
select {
case <-ni.stopCtx.Done():
ni.logger.Info("node inspector is stopped, cancel the bg inspecting")
if !t.Stop() {
<-t.C
}
return
case <-t.C:
}

ni.inspect(ctx)
}
}()
})

if !started {
return ErrStartAgain
}

return nil
}

func (ni *NodeInspector) Stop(_ context.Context) error {
if ni.bgJobCancel != nil {
ni.bgJobCancel()
return nil
}

return ErrStopNotStart
}

func (ni *NodeInspector) inspect(ctx context.Context) {
// Get latest cluster snapshot.
snapshot := ni.clusterMetadata.GetClusterSnapshot()
expiredShardNodes := findExpiredShardNodes(snapshot)
if len(expiredShardNodes) == 0 {
return
}

// Try to remove useless data if it exists.
if err := ni.clusterMetadata.DropShardNodes(ctx, expiredShardNodes); err != nil {
log.Error("drop shard node failed", zap.Error(err))
}
}

func findExpiredShardNodes(snapshot metadata.Snapshot) []storage.ShardNode {
// In most cases, there is no expired shard nodes so don't pre-allocate the memory here.
expiredNodes := make(map[string]struct{}, 0)
// Check node status.
now := time.Now()
for i := range snapshot.RegisteredNodes {
node := &snapshot.RegisteredNodes[i]
if node.IsExpired(now) {
expiredNodes[node.Node.Name] = struct{}{}
}
}

expiredShardNodes := make([]storage.ShardNode, 0, len(expiredNodes))
for _, shardNode := range snapshot.Topology.ClusterView.ShardNodes {
_, ok := expiredNodes[shardNode.NodeName]
if ok {
expiredShardNodes = append(expiredShardNodes, shardNode)
}
}

return expiredShardNodes
}
145 changes: 145 additions & 0 deletions horaemeta/server/coordinator/inspector/node_inspector_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,145 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package inspector

import (
"context"
"slices"
"sync"
"testing"
"time"

"github.com/apache/incubator-horaedb-meta/server/cluster/metadata"
"github.com/apache/incubator-horaedb-meta/server/storage"
"github.com/stretchr/testify/assert"
"go.uber.org/zap"
)

type mockClusterMetaDataManipulator struct {
snapshot metadata.Snapshot
lock sync.Mutex
droppedShardNodes [][]storage.ShardNode
}

func newMockClusterMetaDataManipulator(shardNodes []storage.ShardNode, registeredNodes []metadata.RegisteredNode) *mockClusterMetaDataManipulator {
var clusterView storage.ClusterView
clusterView.ShardNodes = shardNodes
topology := metadata.Topology{
ShardViewsMapping: nil,
ClusterView: clusterView,
}

snapshot := metadata.Snapshot{
Topology: topology,
RegisteredNodes: registeredNodes,
}
return &mockClusterMetaDataManipulator{
snapshot: snapshot,
lock: sync.Mutex{},
droppedShardNodes: make([][]storage.ShardNode, 0),
}
}

func (n *mockClusterMetaDataManipulator) GetClusterSnapshot() metadata.Snapshot {
return n.snapshot
}

func (n *mockClusterMetaDataManipulator) DropShardNodes(_ context.Context, shardNodes []storage.ShardNode) error {
n.lock.Lock()
defer n.lock.Unlock()

n.droppedShardNodes = append(n.droppedShardNodes, shardNodes)
newShardNodes := make([]storage.ShardNode, 0, 2)
for _, node := range n.snapshot.Topology.ClusterView.ShardNodes {
dropped := slices.ContainsFunc(shardNodes, func(droppedNode storage.ShardNode) bool {
return node.NodeName == droppedNode.NodeName
})
if !dropped {
newShardNodes = append(newShardNodes, node)
}
}
n.snapshot.Topology.ClusterView.ShardNodes = newShardNodes
return nil
}

func (n *mockClusterMetaDataManipulator) CheckDroppedShardNodes(check func(droppedShardNodes [][]storage.ShardNode)) {
n.lock.Lock()
defer n.lock.Unlock()

check(n.droppedShardNodes)
}

func TestStartStopInspector(t *testing.T) {
inspector := NewNodeInspector(zap.NewNop(), newMockClusterMetaDataManipulator(nil, nil))

ctx := context.Background()
assert.NoError(t, inspector.Start(ctx))
assert.Error(t, inspector.Start(ctx))

assert.NoError(t, inspector.Stop(ctx))
}

func TestInspect(t *testing.T) {
shardNodes := []storage.ShardNode{
{ID: storage.ShardID(0), ShardRole: storage.ShardRoleLeader, NodeName: "192.168.1.102"},
{ID: storage.ShardID(1), ShardRole: storage.ShardRoleLeader, NodeName: "192.168.1.102"},
{ID: storage.ShardID(2), ShardRole: storage.ShardRoleLeader, NodeName: "192.168.1.103"},
{ID: storage.ShardID(3), ShardRole: storage.ShardRoleLeader, NodeName: "192.168.1.103"},
}
registeredNodes := []metadata.RegisteredNode{
{
Node: storage.Node{
Name: "192.168.1.102",
NodeStats: storage.NodeStats{Lease: 0, Zone: "", NodeVersion: ""},
LastTouchTime: uint64(time.Now().UnixMilli()),
State: storage.NodeStateOnline,
},
ShardInfos: nil,
},
{
// This node should be outdated.
Node: storage.Node{
Name: "192.168.1.103",
NodeStats: storage.NodeStats{Lease: 0, Zone: "", NodeVersion: ""},
LastTouchTime: uint64(time.Now().UnixMilli()) - uint64((time.Second * 20)),
State: storage.NodeStateOnline,
},
ShardInfos: nil,
},
}

metadata := newMockClusterMetaDataManipulator(shardNodes, registeredNodes)
inspector := NewNodeInspectorWithInterval(zap.NewNop(), metadata, time.Millisecond*100)
ctx := context.Background()
assert.NoError(t, inspector.Start(ctx))

// The inspect should be triggered after 200ms.
time.Sleep(time.Millisecond * 200)

// The outdated node should be removed by triggered.
metadata.CheckDroppedShardNodes(func(droppedShardNodes [][]storage.ShardNode) {
assert.True(t, len(droppedShardNodes) == 1)
assert.True(t, len(droppedShardNodes[0]) == 2)
assert.Equal(t, droppedShardNodes[0][0], shardNodes[2])
assert.Equal(t, droppedShardNodes[0][1], shardNodes[3])
})

assert.NoError(t, inspector.Stop(ctx))
}
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ func (callback *schedulerWatchCallback) OnShardRegistered(_ context.Context, _ w
func (callback *schedulerWatchCallback) OnShardExpired(ctx context.Context, event watch.ShardExpireEvent) error {
oldLeader := event.OldLeaderNode
shardID := event.ShardID
return callback.c.DropShardNode(ctx, []storage.ShardNode{
return callback.c.DropShardNodes(ctx, []storage.ShardNode{
{
ID: shardID,
ShardRole: storage.ShardRoleLeader,
Expand Down
2 changes: 1 addition & 1 deletion horaemeta/server/coordinator/shard_picker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ func TestLeastTableShardPicker(t *testing.T) {
// drop shard node 1, shard 1 should not be picked.
for _, shardNode := range snapshot.Topology.ClusterView.ShardNodes {
if shardNode.ID == 1 {
err = c.GetMetadata().DropShardNode(ctx, []storage.ShardNode{shardNode})
err = c.GetMetadata().DropShardNodes(ctx, []storage.ShardNode{shardNode})
re.NoError(err)
}
}
Expand Down
2 changes: 1 addition & 1 deletion horaemeta/server/service/http/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ func (a *API) dropNodeShards(req *http.Request) apiFuncResult {
}
}

if err := c.GetMetadata().DropShardNode(req.Context(), targetShardNodes); err != nil {
if err := c.GetMetadata().DropShardNodes(req.Context(), targetShardNodes); err != nil {
log.Error("drop node shards failed", zap.Error(err))
return errResult(ErrDropNodeShards, err.Error())
}
Expand Down

0 comments on commit 9291dfc

Please sign in to comment.