Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cluster: support node_pool value as filter #810

Merged
merged 2 commits into from
Jan 2, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

IMPROVEMENTS:
* agent: Add `BlockQueryWaitTime` config option for Nomad API connectivity [[GH-755](https://github.com/hashicorp/nomad-autoscaler/pull/755)]
* scaleutils: Add new node filter option `node_pool` to select nodes by their node pool value [[GH-810](https://github.com/hashicorp/nomad-autoscaler/pull/810)]

## 0.4.0 (December 20, 2023)

Expand Down
6 changes: 4 additions & 2 deletions sdk/helper/scaleutils/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -204,8 +204,10 @@ func (c *ClusterScaleUtils) IdentifyScaleInNodes(cfg map[string]string, num int)

if c.log.IsDebug() {
for _, n := range nodes {
c.log.Debug("found node", "node_id", n.ID, "datacenter", n.Datacenter, "node_class", n.NodeClass,
"status", n.Status, "eligibility", n.SchedulingEligibility, "draining", n.Drain)
c.log.Debug("found node",
"node_id", n.ID, "datacenter", n.Datacenter, "node_class", n.NodeClass, "node_pool", n.NodePool,
"status", n.Status, "eligibility", n.SchedulingEligibility, "draining", n.Drain,
)
}
}

Expand Down
32 changes: 18 additions & 14 deletions sdk/helper/scaleutils/nodepool/nodepool.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,21 +33,25 @@ type ClusterNodePoolIdentifier interface {
func NewClusterNodePoolIdentifier(cfg map[string]string) (ClusterNodePoolIdentifier, error) {
class, hasClass := cfg[sdk.TargetConfigKeyClass]
dc, hasDC := cfg[sdk.TargetConfigKeyDatacenter]
pool, hasPool := cfg[sdk.TargetConfigKeyNodePool]

switch {
case hasClass && hasDC:
return NewCombinedClusterPoolIdentifier(
[]ClusterNodePoolIdentifier{
NewNodeClassPoolIdentifier(class),
NewNodeDatacenterPoolIdentifier(dc),
},
CombinedClusterPoolIdentifierAnd,
), nil
case hasClass:
return NewNodeClassPoolIdentifier(class), nil
case hasDC:
return NewNodeDatacenterPoolIdentifier(dc), nil
ids := make([]ClusterNodePoolIdentifier, 0, 1)
if hasClass {
ids = append(ids, NewNodeClassPoolIdentifier(class))
}
if hasDC {
ids = append(ids, NewNodeDatacenterPoolIdentifier(dc))
}
if hasPool {
ids = append(ids, NewNodePoolClusterPoolIdentifier(pool))
}

return nil, fmt.Errorf("node pool identification method required")
switch len(ids) {
case 0:
return nil, fmt.Errorf("node pool identification method required")
case 1:
return ids[0], nil
default:
return NewCombinedClusterPoolIdentifier(ids, CombinedClusterPoolIdentifierAnd), nil
}
}
25 changes: 25 additions & 0 deletions sdk/helper/scaleutils/nodepool/nodepool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,13 +46,38 @@ func Test_NewClusterNodePoolIdentifier(t *testing.T) {
expectedOutputErr: nil,
name: "node_class configured in config",
},
{
inputCfg: map[string]string{"node_pool": "gpu"},
expectedOutputKey: "node_pool",
expectedOutputValue: "gpu",
expectedOutputErr: nil,
name: "node_pool configured in config",
},
{
inputCfg: map[string]string{"node_class": "high-memory", "datacenter": "dc1"},
expectedOutputKey: "combined_identifier",
expectedOutputValue: "node_class:high-memory and datacenter:dc1",
expectedOutputErr: nil,
name: "node_class and datacenter are configured in config",
},
{
inputCfg: map[string]string{"node_pool": "gpu", "datacenter": "dc1"},
expectedOutputKey: "combined_identifier",
expectedOutputValue: "datacenter:dc1 and node_pool:gpu",
expectedOutputErr: nil,
name: "node_pool and datacenter are configured in config",
},
{
inputCfg: map[string]string{
"node_class": "high-memory",
"node_pool": "gpu",
"datacenter": "dc1",
},
expectedOutputKey: "combined_identifier",
expectedOutputValue: "node_class:high-memory and datacenter:dc1 and node_pool:gpu",
expectedOutputErr: nil,
name: "node_class, node_pool, and datacenter are configured in config",
},
}

for _, tc := range testCases {
Expand Down
37 changes: 37 additions & 0 deletions sdk/helper/scaleutils/nodepool/pool.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0

package nodepool

import (
"github.com/hashicorp/nomad-autoscaler/sdk"
"github.com/hashicorp/nomad/api"
)

// nodePoolClusterPoolIdentifier is the NodePool implementation of the
// ClusterNodePoolIdentifier interface that filters Nomad nodes by their
// Node.NodePool parameter.
type nodePoolClusterPoolIdentifier struct {
pool string
}

// NewNodePoolClusterPoolIdentifier returns a new nodePoolClusterPoolIdentifier
// implementation of the ClusterNodePoolIdentifier interface.
func NewNodePoolClusterPoolIdentifier(pool string) ClusterNodePoolIdentifier {
return &nodePoolClusterPoolIdentifier{
pool: pool,
}
}

// IsPoolMember satisfies the IsPoolMember function on the
// ClusterNodePoolIdentifier interface.
func (n nodePoolClusterPoolIdentifier) IsPoolMember(node *api.NodeListStub) bool {
return node.NodePool == n.pool
}

// Key satisfies the Key function on the ClusterNodePoolIdentifier interface.
func (n nodePoolClusterPoolIdentifier) Key() string { return sdk.TargetConfigKeyNodePool }

// Value satisfies the Value function on the ClusterNodePoolIdentifier
// interface.
func (n nodePoolClusterPoolIdentifier) Value() string { return n.pool }
45 changes: 45 additions & 0 deletions sdk/helper/scaleutils/nodepool/pool_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0

package nodepool

import (
"testing"

"github.com/hashicorp/nomad/api"
"github.com/shoenig/test/must"
)

func TestNewNodePoolClusterIdentifier(t *testing.T) {
poolID := NewNodePoolClusterPoolIdentifier("gpu")
must.Eq(t, "node_pool", poolID.Key())
must.Eq(t, "gpu", poolID.Value())
}

func TestNodePoolClusterPoolIdentifier_NodeIsPoolMember(t *testing.T) {
testCases := []struct {
inputPI ClusterNodePoolIdentifier
inputNode *api.NodeListStub
expectedOutput bool
name string
}{
{
inputPI: NewNodePoolClusterPoolIdentifier("foo"),
inputNode: &api.NodeListStub{NodePool: "bar"},
expectedOutput: false,
name: "non-matched non-empty class",
},
{
inputPI: NewNodePoolClusterPoolIdentifier("foo"),
inputNode: &api.NodeListStub{NodePool: "foo"},
expectedOutput: true,
name: "matched class",
},
}

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
must.Eq(t, tc.expectedOutput, tc.inputPI.IsPoolMember(tc.inputNode))
})
}
}
5 changes: 5 additions & 0 deletions sdk/target.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,11 @@ const (
// the clients node_class configuration param.
TargetConfigKeyClass = "node_class"

// TargetConfigKeyNodePool is the horizontal cluster scaling target config
// key which identifies nodes as part of a pool of resources using the
// clients node_pool configuration param.
TargetConfigKeyNodePool = "node_pool"

// TargetConfigKeyDatacenter is the horizontal cluster scaling target
// config key which identifies nodes as part of a pool of resources using
// the agents datacenter configuration param.
Expand Down