From 9291dfc5ab8fca30b5bfeba4799de2db0ae5768a Mon Sep 17 00:00:00 2001 From: CooooolFrog Date: Mon, 18 Mar 2024 15:47:51 +0800 Subject: [PATCH] feat(horaemeta): add node inspector (#1483) ## Rationale In some extreme scenarios, etcd events may be lost or update failures may occur, which may cause the mapping between nodes and shards to fail to be updated correctly. We need to add a cover-up mechanism to ensure that the mapping relationship is always correct. ## Detailed Changes * Add `NodeInspector`, it will start when the cluster start, running in he background and detect the status of nodes. ## Test Plan Pass CI. --------- Co-authored-by: xikai.wxk --- horaemeta/server/cluster/cluster.go | 11 ++ .../cluster/metadata/cluster_metadata.go | 2 +- .../cluster/metadata/cluster_metadata_test.go | 2 +- .../coordinator/inspector/node_inspector.go | 147 ++++++++++++++++++ .../inspector/node_inspector_test.go | 145 +++++++++++++++++ .../scheduler/manager/scheduler_manager.go | 2 +- .../server/coordinator/shard_picker_test.go | 2 +- horaemeta/server/service/http/api.go | 2 +- 8 files changed, 308 insertions(+), 5 deletions(-) create mode 100644 horaemeta/server/coordinator/inspector/node_inspector.go create mode 100644 horaemeta/server/coordinator/inspector/node_inspector_test.go diff --git a/horaemeta/server/cluster/cluster.go b/horaemeta/server/cluster/cluster.go index 951361774a..48ff07abb6 100644 --- a/horaemeta/server/cluster/cluster.go +++ b/horaemeta/server/cluster/cluster.go @@ -26,6 +26,7 @@ import ( "github.com/apache/incubator-horaedb-meta/server/cluster/metadata" "github.com/apache/incubator-horaedb-meta/server/coordinator" "github.com/apache/incubator-horaedb-meta/server/coordinator/eventdispatch" + "github.com/apache/incubator-horaedb-meta/server/coordinator/inspector" "github.com/apache/incubator-horaedb-meta/server/coordinator/procedure" "github.com/apache/incubator-horaedb-meta/server/coordinator/scheduler/manager" "github.com/apache/incubator-horaedb-meta/server/id" @@ -47,6 +48,7 @@ type Cluster struct { procedureFactory *coordinator.Factory procedureManager procedure.Manager schedulerManager manager.SchedulerManager + nodeInspector *inspector.NodeInspector } func NewCluster(logger *zap.Logger, metadata *metadata.ClusterMetadata, client *clientv3.Client, rootPath string) (*Cluster, error) { @@ -62,12 +64,15 @@ func NewCluster(logger *zap.Logger, metadata *metadata.ClusterMetadata, client * schedulerManager := manager.NewManager(logger, procedureManager, procedureFactory, metadata, client, rootPath, metadata.GetTopologyType(), metadata.GetProcedureExecutingBatchSize()) + nodeInspector := inspector.NewNodeInspector(logger, metadata) + return &Cluster{ logger: logger, metadata: metadata, procedureFactory: procedureFactory, procedureManager: procedureManager, schedulerManager: schedulerManager, + nodeInspector: nodeInspector, }, nil } @@ -78,6 +83,9 @@ func (c *Cluster) Start(ctx context.Context) error { if err := c.schedulerManager.Start(ctx); err != nil { return errors.WithMessage(err, "start scheduler manager") } + if err := c.nodeInspector.Start(ctx); err != nil { + return errors.WithMessage(err, "start node inspector") + } return nil } @@ -88,6 +96,9 @@ func (c *Cluster) Stop(ctx context.Context) error { if err := c.schedulerManager.Stop(ctx); err != nil { return errors.WithMessage(err, "stop scheduler manager") } + if err := c.nodeInspector.Stop(ctx); err != nil { + return errors.WithMessage(err, "stop node inspector") + } return nil } diff --git a/horaemeta/server/cluster/metadata/cluster_metadata.go b/horaemeta/server/cluster/metadata/cluster_metadata.go index 0ae03b6a48..d68b4d9b36 100644 --- a/horaemeta/server/cluster/metadata/cluster_metadata.go +++ b/horaemeta/server/cluster/metadata/cluster_metadata.go @@ -685,7 +685,7 @@ func (c *ClusterMetadata) UpdateClusterViewByNode(ctx context.Context, shardNode return nil } -func (c *ClusterMetadata) DropShardNode(ctx context.Context, shardNodes []storage.ShardNode) error { +func (c *ClusterMetadata) DropShardNodes(ctx context.Context, shardNodes []storage.ShardNode) error { if err := c.topologyManager.DropShardNodes(ctx, shardNodes); err != nil { return errors.WithMessage(err, "drop shard nodes") } diff --git a/horaemeta/server/cluster/metadata/cluster_metadata_test.go b/horaemeta/server/cluster/metadata/cluster_metadata_test.go index 907dbbf507..e29761491c 100644 --- a/horaemeta/server/cluster/metadata/cluster_metadata_test.go +++ b/horaemeta/server/cluster/metadata/cluster_metadata_test.go @@ -213,7 +213,7 @@ func testShardOperation(ctx context.Context, re *require.Assertions, m *metadata _, err = m.GetShardNodeByTableIDs([]storage.TableID{}) re.NoError(err) - err = m.DropShardNode(ctx, []storage.ShardNode{{ + err = m.DropShardNodes(ctx, []storage.ShardNode{{ ID: shardNodeResult.NodeShards[0].ShardNode.ID, ShardRole: shardNodeResult.NodeShards[0].ShardNode.ShardRole, NodeName: shardNodeResult.NodeShards[0].ShardNode.NodeName, diff --git a/horaemeta/server/coordinator/inspector/node_inspector.go b/horaemeta/server/coordinator/inspector/node_inspector.go new file mode 100644 index 0000000000..e4a3671c8a --- /dev/null +++ b/horaemeta/server/coordinator/inspector/node_inspector.go @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package inspector + +import ( + "context" + "sync" + "time" + + "github.com/apache/incubator-horaedb-meta/pkg/coderr" + "github.com/apache/incubator-horaedb-meta/pkg/log" + "github.com/apache/incubator-horaedb-meta/server/cluster/metadata" + "github.com/apache/incubator-horaedb-meta/server/storage" + "go.uber.org/zap" +) + +var ErrStartAgain = coderr.NewCodeError(coderr.Internal, "try to start again") +var ErrStopNotStart = coderr.NewCodeError(coderr.Internal, "try to stop a not-started inspector") + +const defaultInspectInterval = time.Second * 5 + +// NodeInspector will inspect node status and remove expired data. +type NodeInspector struct { + logger *zap.Logger + clusterMetadata ClusterMetaDataManipulator + interval time.Duration + + starter sync.Once + // After `Start` is called, the following fields will be initialized + stopCtx context.Context + bgJobCancel context.CancelFunc +} + +// ClusterMetaDataManipulator provides the snapshot for NodeInspector to check and utilities of drop expired shard nodes. +type ClusterMetaDataManipulator interface { + GetClusterSnapshot() metadata.Snapshot + DropShardNodes(context.Context, []storage.ShardNode) error +} + +func NewNodeInspectorWithInterval(logger *zap.Logger, clusterMetadata ClusterMetaDataManipulator, inspectInterval time.Duration) *NodeInspector { + return &NodeInspector{ + logger: logger, + clusterMetadata: clusterMetadata, + interval: inspectInterval, + starter: sync.Once{}, + stopCtx: nil, + bgJobCancel: nil, + } +} + +func NewNodeInspector(logger *zap.Logger, clusterMetadata ClusterMetaDataManipulator) *NodeInspector { + return NewNodeInspectorWithInterval(logger, clusterMetadata, defaultInspectInterval) +} + +func (ni *NodeInspector) Start(ctx context.Context) error { + started := false + ni.starter.Do(func() { + log.Info("node inspector start") + started = true + ni.stopCtx, ni.bgJobCancel = context.WithCancel(ctx) + go func() { + for { + t := time.NewTimer(ni.interval) + select { + case <-ni.stopCtx.Done(): + ni.logger.Info("node inspector is stopped, cancel the bg inspecting") + if !t.Stop() { + <-t.C + } + return + case <-t.C: + } + + ni.inspect(ctx) + } + }() + }) + + if !started { + return ErrStartAgain + } + + return nil +} + +func (ni *NodeInspector) Stop(_ context.Context) error { + if ni.bgJobCancel != nil { + ni.bgJobCancel() + return nil + } + + return ErrStopNotStart +} + +func (ni *NodeInspector) inspect(ctx context.Context) { + // Get latest cluster snapshot. + snapshot := ni.clusterMetadata.GetClusterSnapshot() + expiredShardNodes := findExpiredShardNodes(snapshot) + if len(expiredShardNodes) == 0 { + return + } + + // Try to remove useless data if it exists. + if err := ni.clusterMetadata.DropShardNodes(ctx, expiredShardNodes); err != nil { + log.Error("drop shard node failed", zap.Error(err)) + } +} + +func findExpiredShardNodes(snapshot metadata.Snapshot) []storage.ShardNode { + // In most cases, there is no expired shard nodes so don't pre-allocate the memory here. + expiredNodes := make(map[string]struct{}, 0) + // Check node status. + now := time.Now() + for i := range snapshot.RegisteredNodes { + node := &snapshot.RegisteredNodes[i] + if node.IsExpired(now) { + expiredNodes[node.Node.Name] = struct{}{} + } + } + + expiredShardNodes := make([]storage.ShardNode, 0, len(expiredNodes)) + for _, shardNode := range snapshot.Topology.ClusterView.ShardNodes { + _, ok := expiredNodes[shardNode.NodeName] + if ok { + expiredShardNodes = append(expiredShardNodes, shardNode) + } + } + + return expiredShardNodes +} diff --git a/horaemeta/server/coordinator/inspector/node_inspector_test.go b/horaemeta/server/coordinator/inspector/node_inspector_test.go new file mode 100644 index 0000000000..f119299ed2 --- /dev/null +++ b/horaemeta/server/coordinator/inspector/node_inspector_test.go @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package inspector + +import ( + "context" + "slices" + "sync" + "testing" + "time" + + "github.com/apache/incubator-horaedb-meta/server/cluster/metadata" + "github.com/apache/incubator-horaedb-meta/server/storage" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +type mockClusterMetaDataManipulator struct { + snapshot metadata.Snapshot + lock sync.Mutex + droppedShardNodes [][]storage.ShardNode +} + +func newMockClusterMetaDataManipulator(shardNodes []storage.ShardNode, registeredNodes []metadata.RegisteredNode) *mockClusterMetaDataManipulator { + var clusterView storage.ClusterView + clusterView.ShardNodes = shardNodes + topology := metadata.Topology{ + ShardViewsMapping: nil, + ClusterView: clusterView, + } + + snapshot := metadata.Snapshot{ + Topology: topology, + RegisteredNodes: registeredNodes, + } + return &mockClusterMetaDataManipulator{ + snapshot: snapshot, + lock: sync.Mutex{}, + droppedShardNodes: make([][]storage.ShardNode, 0), + } +} + +func (n *mockClusterMetaDataManipulator) GetClusterSnapshot() metadata.Snapshot { + return n.snapshot +} + +func (n *mockClusterMetaDataManipulator) DropShardNodes(_ context.Context, shardNodes []storage.ShardNode) error { + n.lock.Lock() + defer n.lock.Unlock() + + n.droppedShardNodes = append(n.droppedShardNodes, shardNodes) + newShardNodes := make([]storage.ShardNode, 0, 2) + for _, node := range n.snapshot.Topology.ClusterView.ShardNodes { + dropped := slices.ContainsFunc(shardNodes, func(droppedNode storage.ShardNode) bool { + return node.NodeName == droppedNode.NodeName + }) + if !dropped { + newShardNodes = append(newShardNodes, node) + } + } + n.snapshot.Topology.ClusterView.ShardNodes = newShardNodes + return nil +} + +func (n *mockClusterMetaDataManipulator) CheckDroppedShardNodes(check func(droppedShardNodes [][]storage.ShardNode)) { + n.lock.Lock() + defer n.lock.Unlock() + + check(n.droppedShardNodes) +} + +func TestStartStopInspector(t *testing.T) { + inspector := NewNodeInspector(zap.NewNop(), newMockClusterMetaDataManipulator(nil, nil)) + + ctx := context.Background() + assert.NoError(t, inspector.Start(ctx)) + assert.Error(t, inspector.Start(ctx)) + + assert.NoError(t, inspector.Stop(ctx)) +} + +func TestInspect(t *testing.T) { + shardNodes := []storage.ShardNode{ + {ID: storage.ShardID(0), ShardRole: storage.ShardRoleLeader, NodeName: "192.168.1.102"}, + {ID: storage.ShardID(1), ShardRole: storage.ShardRoleLeader, NodeName: "192.168.1.102"}, + {ID: storage.ShardID(2), ShardRole: storage.ShardRoleLeader, NodeName: "192.168.1.103"}, + {ID: storage.ShardID(3), ShardRole: storage.ShardRoleLeader, NodeName: "192.168.1.103"}, + } + registeredNodes := []metadata.RegisteredNode{ + { + Node: storage.Node{ + Name: "192.168.1.102", + NodeStats: storage.NodeStats{Lease: 0, Zone: "", NodeVersion: ""}, + LastTouchTime: uint64(time.Now().UnixMilli()), + State: storage.NodeStateOnline, + }, + ShardInfos: nil, + }, + { + // This node should be outdated. + Node: storage.Node{ + Name: "192.168.1.103", + NodeStats: storage.NodeStats{Lease: 0, Zone: "", NodeVersion: ""}, + LastTouchTime: uint64(time.Now().UnixMilli()) - uint64((time.Second * 20)), + State: storage.NodeStateOnline, + }, + ShardInfos: nil, + }, + } + + metadata := newMockClusterMetaDataManipulator(shardNodes, registeredNodes) + inspector := NewNodeInspectorWithInterval(zap.NewNop(), metadata, time.Millisecond*100) + ctx := context.Background() + assert.NoError(t, inspector.Start(ctx)) + + // The inspect should be triggered after 200ms. + time.Sleep(time.Millisecond * 200) + + // The outdated node should be removed by triggered. + metadata.CheckDroppedShardNodes(func(droppedShardNodes [][]storage.ShardNode) { + assert.True(t, len(droppedShardNodes) == 1) + assert.True(t, len(droppedShardNodes[0]) == 2) + assert.Equal(t, droppedShardNodes[0][0], shardNodes[2]) + assert.Equal(t, droppedShardNodes[0][1], shardNodes[3]) + }) + + assert.NoError(t, inspector.Stop(ctx)) +} diff --git a/horaemeta/server/coordinator/scheduler/manager/scheduler_manager.go b/horaemeta/server/coordinator/scheduler/manager/scheduler_manager.go index 0ed97f87db..51246a569b 100644 --- a/horaemeta/server/coordinator/scheduler/manager/scheduler_manager.go +++ b/horaemeta/server/coordinator/scheduler/manager/scheduler_manager.go @@ -203,7 +203,7 @@ func (callback *schedulerWatchCallback) OnShardRegistered(_ context.Context, _ w func (callback *schedulerWatchCallback) OnShardExpired(ctx context.Context, event watch.ShardExpireEvent) error { oldLeader := event.OldLeaderNode shardID := event.ShardID - return callback.c.DropShardNode(ctx, []storage.ShardNode{ + return callback.c.DropShardNodes(ctx, []storage.ShardNode{ { ID: shardID, ShardRole: storage.ShardRoleLeader, diff --git a/horaemeta/server/coordinator/shard_picker_test.go b/horaemeta/server/coordinator/shard_picker_test.go index 3b69d9a4d1..9cbcc4d1c0 100644 --- a/horaemeta/server/coordinator/shard_picker_test.go +++ b/horaemeta/server/coordinator/shard_picker_test.go @@ -83,7 +83,7 @@ func TestLeastTableShardPicker(t *testing.T) { // drop shard node 1, shard 1 should not be picked. for _, shardNode := range snapshot.Topology.ClusterView.ShardNodes { if shardNode.ID == 1 { - err = c.GetMetadata().DropShardNode(ctx, []storage.ShardNode{shardNode}) + err = c.GetMetadata().DropShardNodes(ctx, []storage.ShardNode{shardNode}) re.NoError(err) } } diff --git a/horaemeta/server/service/http/api.go b/horaemeta/server/service/http/api.go index 4476e81c63..5f4a36eaee 100644 --- a/horaemeta/server/service/http/api.go +++ b/horaemeta/server/service/http/api.go @@ -230,7 +230,7 @@ func (a *API) dropNodeShards(req *http.Request) apiFuncResult { } } - if err := c.GetMetadata().DropShardNode(req.Context(), targetShardNodes); err != nil { + if err := c.GetMetadata().DropShardNodes(req.Context(), targetShardNodes); err != nil { log.Error("drop node shards failed", zap.Error(err)) return errResult(ErrDropNodeShards, err.Error()) }