Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Drain nodes in parallel #4966

Merged
merged 7 commits into from
Mar 22, 2022
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -335,6 +335,7 @@ require (
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/orcaman/concurrent-map v1.0.0 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect
github.com/pierrec/lz4 v2.0.5+incompatible // indirect
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -1929,6 +1929,8 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
github.com/otiai10/copy v1.7.0 h1:hVoPiN+t+7d2nzzwMiDHPSOogsWAStewq3TwU05+clE=
github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U=
Expand Down
1 change: 1 addition & 0 deletions integration/tests/unowned_cluster/unowned_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -319,6 +319,7 @@ var _ = Describe("(Integration) [non-eksctl cluster & nodegroup support]", func(
WithArgs(
"--cluster", params.ClusterName,
"--name", mng1,
"--parallel", "2",
"--verbose", "2",
)
Expect(cmd).To(RunSuccessfully())
Expand Down
2 changes: 1 addition & 1 deletion pkg/actions/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ import (

type Cluster interface {
Upgrade(dryRun bool) error
Delete(waitInterval time.Duration, wait, force, disableNodegroupEviction bool) error
Delete(waitInterval time.Duration, wait, force, disableNodegroupEviction bool, parallel int) error
}

func New(cfg *api.ClusterConfig, ctl *eks.ClusterProvider) (Cluster, error) {
Expand Down
4 changes: 3 additions & 1 deletion pkg/actions/cluster/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,8 @@ func checkForUndeletedStacks(stackManager manager.StackManager) error {
return nil
}

func drainAllNodeGroups(cfg *api.ClusterConfig, ctl *eks.ClusterProvider, clientSet kubernetes.Interface, allStacks []manager.NodeGroupStack, disableEviction bool, nodeGroupDrainer NodeGroupDrainer, vpcCniDeleter vpcCniDeleter) error {
func drainAllNodeGroups(cfg *api.ClusterConfig, ctl *eks.ClusterProvider, clientSet kubernetes.Interface, allStacks []manager.NodeGroupStack,
disableEviction bool, parallel int, nodeGroupDrainer NodeGroupDrainer, vpcCniDeleter vpcCniDeleter) error {
if len(allStacks) == 0 {
return nil
}
Expand All @@ -183,6 +184,7 @@ func drainAllNodeGroups(cfg *api.ClusterConfig, ctl *eks.ClusterProvider, client
NodeGroups: cmdutils.ToKubeNodeGroups(cfg),
MaxGracePeriod: ctl.Provider.WaitTimeout(),
DisableEviction: disableEviction,
Parallel: parallel,
}
if err := nodeGroupDrainer.Drain(drainInput); err != nil {
return err
Expand Down
4 changes: 2 additions & 2 deletions pkg/actions/cluster/owned.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ func (c *OwnedCluster) Upgrade(dryRun bool) error {
return nil
}

func (c *OwnedCluster) Delete(_ time.Duration, wait, force, disableNodegroupEviction bool) error {
func (c *OwnedCluster) Delete(_ time.Duration, wait, force, disableNodegroupEviction bool, parallel int) error {
var (
clientSet kubernetes.Interface
oidc *iamoidc.OpenIDConnectManager
Expand Down Expand Up @@ -111,7 +111,7 @@ func (c *OwnedCluster) Delete(_ time.Duration, wait, force, disableNodegroupEvic
}

nodeGroupManager := c.newNodeGroupManager(c.cfg, c.ctl, clientSet)
if err := drainAllNodeGroups(c.cfg, c.ctl, clientSet, allStacks, disableNodegroupEviction, nodeGroupManager, attemptVpcCniDeletion); err != nil {
if err := drainAllNodeGroups(c.cfg, c.ctl, clientSet, allStacks, disableNodegroupEviction, parallel, nodeGroupManager, attemptVpcCniDeletion); err != nil {
if !force {
return err
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/actions/cluster/unowned.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func (c *UnownedCluster) Upgrade(dryRun bool) error {
return nil
}

func (c *UnownedCluster) Delete(waitInterval time.Duration, wait, force, disableNodegroupEviction bool) error {
func (c *UnownedCluster) Delete(waitInterval time.Duration, wait, force, disableNodegroupEviction bool, parallel int) error {
clusterName := c.cfg.Metadata.Name

if err := c.checkClusterExists(clusterName); err != nil {
Expand All @@ -79,7 +79,7 @@ func (c *UnownedCluster) Delete(waitInterval time.Duration, wait, force, disable
}

nodeGroupManager := c.newNodeGroupManager(c.cfg, c.ctl, clientSet)
if err := drainAllNodeGroups(c.cfg, c.ctl, clientSet, allStacks, disableNodegroupEviction, nodeGroupManager, attemptVpcCniDeletion); err != nil {
if err := drainAllNodeGroups(c.cfg, c.ctl, clientSet, allStacks, disableNodegroupEviction, parallel, nodeGroupManager, attemptVpcCniDeletion); err != nil {
if !force {
return err
}
Expand Down
3 changes: 2 additions & 1 deletion pkg/actions/nodegroup/drain.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,13 @@ type DrainInput struct {
NodeDrainWaitPeriod time.Duration
Undo bool
DisableEviction bool
Parallel int
}

func (m *Manager) Drain(input *DrainInput) error {
if !input.Plan {
for _, n := range input.NodeGroups {
nodeGroupDrainer := drain.NewNodeGroupDrainer(m.clientSet, n, m.ctl.Provider.WaitTimeout(), input.MaxGracePeriod, input.NodeDrainWaitPeriod, input.Undo, input.DisableEviction)
nodeGroupDrainer := drain.NewNodeGroupDrainer(m.clientSet, n, m.ctl.Provider.WaitTimeout(), input.MaxGracePeriod, input.NodeDrainWaitPeriod, input.Undo, input.DisableEviction, input.Parallel)
if err := nodeGroupDrainer.Drain(); err != nil {
return err
}
Expand Down
11 changes: 9 additions & 2 deletions pkg/ctl/cmdutils/configfile.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"encoding/csv"
"fmt"
"reflect"
"strconv"
"strings"

"github.com/kris-nova/logger"
Expand Down Expand Up @@ -517,8 +518,8 @@ func normalizeBaseNodeGroup(np api.NodePool, cmd *cobra.Command) {
}
}

// NewDeleteNodeGroupLoader will load config or use flags for 'eksctl delete nodegroup'
func NewDeleteNodeGroupLoader(cmd *Cmd, ng *api.NodeGroup, ngFilter *filter.NodeGroupFilter) ClusterConfigLoader {
// NewDeleteAndDrainNodeGroupLoader will load config or use flags for 'eksctl delete nodegroup'
func NewDeleteAndDrainNodeGroupLoader(cmd *Cmd, ng *api.NodeGroup, ngFilter *filter.NodeGroupFilter) ClusterConfigLoader {
l := newCommonClusterConfigLoader(cmd)

l.validateWithConfigFile = func() error {
Expand Down Expand Up @@ -546,6 +547,12 @@ func NewDeleteNodeGroupLoader(cmd *Cmd, ng *api.NodeGroup, ngFilter *filter.Node
return ErrMustBeSet("--name")
}

if flag := l.CobraCommand.Flag("parallel"); flag != nil && flag.Changed {
if val, _ := strconv.Atoi(flag.Value.String()); val > 25 || val < 1 {
return fmt.Errorf("--parallel value must be of range 1-25")
}
}

ngFilter.AppendIncludeNames(ng.Name)

l.Plan = false
Expand Down
20 changes: 12 additions & 8 deletions pkg/ctl/delete/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,22 +15,25 @@ import (
)

func deleteClusterCmd(cmd *cmdutils.Cmd) {
deleteClusterWithRunFunc(cmd, func(cmd *cmdutils.Cmd, force bool, disableNodegroupEviction bool) error {
return doDeleteCluster(cmd, force, disableNodegroupEviction)
deleteClusterWithRunFunc(cmd, func(cmd *cmdutils.Cmd, force bool, disableNodegroupEviction bool, parallel int) error {
return doDeleteCluster(cmd, force, disableNodegroupEviction, parallel)
})
}

func deleteClusterWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cmd, force bool, disableNodegroupEviction bool) error) {
func deleteClusterWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cmd, force bool, disableNodegroupEviction bool, parallel int) error) {
cfg := api.NewClusterConfig()
cmd.ClusterConfig = cfg

cmd.SetDescription("cluster", "Delete a cluster", "")

var force bool
var disableNodegroupEviction bool
var (
force bool
disableNodegroupEviction bool
parallel int
)
cmd.CobraCommand.RunE = func(_ *cobra.Command, args []string) error {
cmd.NameArg = cmdutils.GetNameArg(args)
return runFunc(cmd, force, disableNodegroupEviction)
return runFunc(cmd, force, disableNodegroupEviction, parallel)
}

cmd.FlagSetGroup.InFlagSet("General", func(fs *pflag.FlagSet) {
Expand All @@ -41,6 +44,7 @@ func deleteClusterWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cmd,
cmdutils.AddWaitFlag(fs, &cmd.Wait, "deletion of all resources")
fs.BoolVar(&force, "force", false, "Force deletion to continue when errors occur")
fs.BoolVar(&disableNodegroupEviction, "disable-nodegroup-eviction", false, "Force drain to use delete, even if eviction is supported. This will bypass checking PodDisruptionBudgets, use with caution.")
fs.IntVar(&parallel, "parallel", 1, "Number of nodes to drain in parallel. Max 25")

cmdutils.AddConfigFileFlag(fs, &cmd.ClusterConfigFile)
cmdutils.AddTimeoutFlag(fs, &cmd.ProviderConfig.WaitTimeout)
Expand All @@ -49,7 +53,7 @@ func deleteClusterWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cmd,
cmdutils.AddCommonFlagsForAWS(cmd.FlagSetGroup, &cmd.ProviderConfig, true)
}

func doDeleteCluster(cmd *cmdutils.Cmd, force bool, disableNodegroupEviction bool) error {
func doDeleteCluster(cmd *cmdutils.Cmd, force bool, disableNodegroupEviction bool, parallel int) error {
if err := cmdutils.NewMetadataLoader(cmd).Load(); err != nil {
return err
}
Expand Down Expand Up @@ -81,5 +85,5 @@ func doDeleteCluster(cmd *cmdutils.Cmd, force bool, disableNodegroupEviction boo
return err
}

return cluster.Delete(time.Second*20, cmd.Wait, force, disableNodegroupEviction)
return cluster.Delete(time.Second*20, cmd.Wait, force, disableNodegroupEviction, parallel)
}
25 changes: 16 additions & 9 deletions pkg/ctl/delete/nodegroup.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,25 +17,30 @@ import (
)

func deleteNodeGroupCmd(cmd *cmdutils.Cmd) {
deleteNodeGroupWithRunFunc(cmd, func(cmd *cmdutils.Cmd, ng *api.NodeGroup, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing bool, maxGracePeriod time.Duration, disableEviction bool) error {
return doDeleteNodeGroup(cmd, ng, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing, maxGracePeriod, disableEviction)
deleteNodeGroupWithRunFunc(cmd, func(cmd *cmdutils.Cmd, ng *api.NodeGroup, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing bool, maxGracePeriod time.Duration, disableEviction bool, parallel int) error {
return doDeleteNodeGroup(cmd, ng, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing, maxGracePeriod, disableEviction, parallel)
})
}

func deleteNodeGroupWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cmd, ng *api.NodeGroup, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing bool, maxGracePeriod time.Duration, disableEviction bool) error) {
func deleteNodeGroupWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cmd, ng *api.NodeGroup, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing bool, maxGracePeriod time.Duration, disableEviction bool, parallel int) error) {
cfg := api.NewClusterConfig()
ng := api.NewNodeGroup()
cmd.ClusterConfig = cfg

var updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing bool
var maxGracePeriod time.Duration
var disableEviction bool
var (
updateAuthConfigMap bool
deleteNodeGroupDrain bool
onlyMissing bool
maxGracePeriod time.Duration
disableEviction bool
parallel int
)

cmd.SetDescription("nodegroup", "Delete a nodegroup", "", "ng")

cmd.CobraCommand.RunE = func(_ *cobra.Command, args []string) error {
cmd.NameArg = cmdutils.GetNameArg(args)
return runFunc(cmd, ng, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing, maxGracePeriod, disableEviction)
return runFunc(cmd, ng, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing, maxGracePeriod, disableEviction, parallel)
}

cmd.FlagSetGroup.InFlagSet("General", func(fs *pflag.FlagSet) {
Expand All @@ -52,6 +57,7 @@ func deleteNodeGroupWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cm
fs.DurationVar(&maxGracePeriod, "max-grace-period", defaultMaxGracePeriod, "Maximum pods termination grace period")
defaultDisableEviction := false
fs.BoolVar(&disableEviction, "disable-eviction", defaultDisableEviction, "Force drain to use delete, even if eviction is supported. This will bypass checking PodDisruptionBudgets, use with caution.")
fs.IntVar(&parallel, "parallel", 1, "Number of nodes to drain in parallel. Max 25")

cmd.Wait = false
cmdutils.AddWaitFlag(fs, &cmd.Wait, "deletion of all resources")
Expand All @@ -61,10 +67,10 @@ func deleteNodeGroupWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cm
cmdutils.AddCommonFlagsForAWS(cmd.FlagSetGroup, &cmd.ProviderConfig, true)
}

func doDeleteNodeGroup(cmd *cmdutils.Cmd, ng *api.NodeGroup, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing bool, maxGracePeriod time.Duration, disableEviction bool) error {
func doDeleteNodeGroup(cmd *cmdutils.Cmd, ng *api.NodeGroup, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing bool, maxGracePeriod time.Duration, disableEviction bool, parallel int) error {
ngFilter := filter.NewNodeGroupFilter()

if err := cmdutils.NewDeleteNodeGroupLoader(cmd, ng, ngFilter).Load(); err != nil {
if err := cmdutils.NewDeleteAndDrainNodeGroupLoader(cmd, ng, ngFilter).Load(); err != nil {
return err
}

Expand Down Expand Up @@ -126,6 +132,7 @@ func doDeleteNodeGroup(cmd *cmdutils.Cmd, ng *api.NodeGroup, updateAuthConfigMap
Plan: cmd.Plan,
MaxGracePeriod: maxGracePeriod,
DisableEviction: disableEviction,
Parallel: parallel,
}
err := nodeGroupManager.Drain(drainInput)
if err != nil {
Expand Down
11 changes: 10 additions & 1 deletion pkg/ctl/delete/nodegroup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ var _ = Describe("delete", func() {
cmd := newMockEmptyCmd(args...)
count := 0
cmdutils.AddResourceCmd(cmdutils.NewGrouping(), cmd.parentCmd, func(cmd *cmdutils.Cmd) {
deleteNodeGroupWithRunFunc(cmd, func(cmd *cmdutils.Cmd, ng *v1alpha5.NodeGroup, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing bool, maxGracePeriod time.Duration, disableEviction bool) error {
deleteNodeGroupWithRunFunc(cmd, func(cmd *cmdutils.Cmd, ng *v1alpha5.NodeGroup, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing bool, maxGracePeriod time.Duration, disableEviction bool, parallel int) error {
Expect(cmd.ClusterConfig.Metadata.Name).To(Equal("clusterName"))
Expect(ng.Name).To(Equal("ng"))
count++
Expand Down Expand Up @@ -52,5 +52,14 @@ var _ = Describe("delete", func() {
args: []string{"nodegroup", "ng", "--cluster", "dummy", "--name", "ng"},
error: fmt.Errorf("Error: --name=ng and argument ng cannot be used at the same time"),
}),

Entry("setting --parallel below 1", invalidParamsCase{
args: []string{"nodegroup", "--cluster", "dummy", "--name", "ng", "--parallel", "-1"},
error: fmt.Errorf("Error: --parallel value must be of range 1-25"),
}),
Entry("setting --parallel above 25", invalidParamsCase{
args: []string{"nodegroup", "--cluster", "dummy", "--name", "ng", "--parallel", "26"},
error: fmt.Errorf("Error: --parallel value must be of range 1-25"),
}),
)
})
26 changes: 16 additions & 10 deletions pkg/ctl/drain/nodegroup.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,26 +15,30 @@ import (
)

func drainNodeGroupCmd(cmd *cmdutils.Cmd) {
drainNodeGroupWithRunFunc(cmd, func(cmd *cmdutils.Cmd, ng *api.NodeGroup, undo, onlyMissing bool, maxGracePeriod, nodeDrainWaitPeriod time.Duration, disableEviction bool) error {
return doDrainNodeGroup(cmd, ng, undo, onlyMissing, maxGracePeriod, nodeDrainWaitPeriod, disableEviction)
drainNodeGroupWithRunFunc(cmd, func(cmd *cmdutils.Cmd, ng *api.NodeGroup, undo, onlyMissing bool, maxGracePeriod, nodeDrainWaitPeriod time.Duration, disableEviction bool, parallel int) error {
return doDrainNodeGroup(cmd, ng, undo, onlyMissing, maxGracePeriod, nodeDrainWaitPeriod, disableEviction, parallel)
})
}

func drainNodeGroupWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cmd, ng *api.NodeGroup, undo, onlyMissing bool, maxGracePeriod, nodeDrainWaitPeriod time.Duration, disableEviction bool) error) {
func drainNodeGroupWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cmd, ng *api.NodeGroup, undo, onlyMissing bool, maxGracePeriod, nodeDrainWaitPeriod time.Duration, disableEviction bool, parallel int) error) {
cfg := api.NewClusterConfig()
ng := api.NewNodeGroup()
cmd.ClusterConfig = cfg

var undo, onlyMissing bool
var maxGracePeriod time.Duration
var nodeDrainWaitPeriod time.Duration
var disableEviction bool
var (
undo bool
onlyMissing bool
disableEviction bool
parallel int
maxGracePeriod time.Duration
nodeDrainWaitPeriod time.Duration
)

cmd.SetDescription("nodegroup", "Cordon and drain a nodegroup", "", "ng")

cmd.CobraCommand.RunE = func(_ *cobra.Command, args []string) error {
cmd.NameArg = cmdutils.GetNameArg(args)
return runFunc(cmd, ng, undo, onlyMissing, maxGracePeriod, nodeDrainWaitPeriod, disableEviction)
return runFunc(cmd, ng, undo, onlyMissing, maxGracePeriod, nodeDrainWaitPeriod, disableEviction, parallel)
}

cmd.FlagSetGroup.InFlagSet("General", func(fs *pflag.FlagSet) {
Expand All @@ -52,15 +56,16 @@ func drainNodeGroupWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cmd
fs.BoolVar(&disableEviction, "disable-eviction", defaultDisableEviction, "Force drain to use delete, even if eviction is supported. This will bypass checking PodDisruptionBudgets, use with caution.")
cmdutils.AddTimeoutFlag(fs, &cmd.ProviderConfig.WaitTimeout)
fs.DurationVar(&nodeDrainWaitPeriod, "node-drain-wait-period", 0, "Amount of time to wait between draining nodes in a nodegroup")
fs.IntVar(&parallel, "parallel", 1, "Number of nodes to drain in parallel. Max 25")
})

cmdutils.AddCommonFlagsForAWS(cmd.FlagSetGroup, &cmd.ProviderConfig, true)
}

func doDrainNodeGroup(cmd *cmdutils.Cmd, ng *api.NodeGroup, undo, onlyMissing bool, maxGracePeriod, nodeDrainWaitPeriod time.Duration, disableEviction bool) error {
func doDrainNodeGroup(cmd *cmdutils.Cmd, ng *api.NodeGroup, undo, onlyMissing bool, maxGracePeriod, nodeDrainWaitPeriod time.Duration, disableEviction bool, parallel int) error {
ngFilter := filter.NewNodeGroupFilter()

if err := cmdutils.NewDeleteNodeGroupLoader(cmd, ng, ngFilter).Load(); err != nil {
if err := cmdutils.NewDeleteAndDrainNodeGroupLoader(cmd, ng, ngFilter).Load(); err != nil {
return err
}

Expand Down Expand Up @@ -126,6 +131,7 @@ func doDrainNodeGroup(cmd *cmdutils.Cmd, ng *api.NodeGroup, undo, onlyMissing bo
NodeDrainWaitPeriod: nodeDrainWaitPeriod,
Undo: undo,
DisableEviction: disableEviction,
Parallel: parallel,
}
return nodegroup.New(cfg, ctl, clientSet).Drain(drainInput)
}
10 changes: 9 additions & 1 deletion pkg/ctl/drain/nodegroup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ var _ = Describe("drain node group", func() {
cmd := newMockEmptyCmd(args...)
count := 0
cmdutils.AddResourceCmd(cmdutils.NewGrouping(), cmd.parentCmd, func(cmd *cmdutils.Cmd) {
drainNodeGroupWithRunFunc(cmd, func(cmd *cmdutils.Cmd, ng *v1alpha5.NodeGroup, undo, onlyMissing bool, maxGracePeriod, nodeDrainWaitPeriod time.Duration, disableEviction bool) error {
drainNodeGroupWithRunFunc(cmd, func(cmd *cmdutils.Cmd, ng *v1alpha5.NodeGroup, undo, onlyMissing bool, maxGracePeriod, nodeDrainWaitPeriod time.Duration, disableEviction bool, parallel int) error {
Expect(cmd.ClusterConfig.Metadata.Name).To(Equal("clusterName"))
Expect(ng.Name).To(Equal("ng"))
count++
Expand Down Expand Up @@ -54,5 +54,13 @@ var _ = Describe("drain node group", func() {
args: []string{"nodegroup", "ng", "--cluster", "dummy", "--name", "ng"},
error: fmt.Errorf("Error: --name=ng and argument ng cannot be used at the same time"),
}),
Entry("setting --parallel below 1", invalidParamsCase{
args: []string{"nodegroup", "--cluster", "dummy", "--name", "ng", "--parallel", "-1"},
error: fmt.Errorf("Error: --parallel value must be of range 1-25"),
}),
Entry("setting --parallel above 25", invalidParamsCase{
args: []string{"nodegroup", "--cluster", "dummy", "--name", "ng", "--parallel", "26"},
error: fmt.Errorf("Error: --parallel value must be of range 1-25"),
}),
)
})
Loading