Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Another reduction of describe via reducing the number of list calls #4772

Merged
merged 3 commits into from
Feb 14, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 7 additions & 9 deletions pkg/actions/cluster/owned.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,6 @@ func (c *OwnedCluster) Upgrade(dryRun bool) error {
return err
}

if err := c.ctl.RefreshClusterStatus(c.cfg); err != nil {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This was redundant as New already called this.

return err
}

supportsManagedNodes, err := c.ctl.SupportsManagedNodes(c.cfg)
if err != nil {
return err
Expand Down Expand Up @@ -84,6 +80,12 @@ func (c *OwnedCluster) Delete(_ time.Duration, wait, force, disableNodegroupEvic
logger.Debug("failed to check if cluster is operable: %v", err)
}

// moving this here was fine because inside `NewTasksToDeleteClusterWithNodeGroups` we did it anyway.
allStacks, err := c.stackManager.ListNodeGroupStacks()
if err != nil {
return err
}

oidcSupported := true
if clusterOperable {
var err error
Expand All @@ -107,10 +109,6 @@ func (c *OwnedCluster) Delete(_ time.Duration, wait, force, disableNodegroupEvic
}
oidcSupported = false
}
allStacks, err := c.stackManager.ListNodeGroupStacks()
if err != nil {
return err
}

nodeGroupManager := c.newNodeGroupManager(c.cfg, c.ctl, clientSet)
if err := drainAllNodeGroups(c.cfg, c.ctl, clientSet, allStacks, disableNodegroupEviction, nodeGroupManager, attemptVpcCniDeletion); err != nil {
Expand All @@ -133,7 +131,7 @@ func (c *OwnedCluster) Delete(_ time.Duration, wait, force, disableNodegroupEvic
}

deleteOIDCProvider := clusterOperable && oidcSupported
tasks, err := c.stackManager.NewTasksToDeleteClusterWithNodeGroups(deleteOIDCProvider, oidc, kubernetes.NewCachedClientSet(clientSet), wait, func(errs chan error, _ string) error {
tasks, err := c.stackManager.NewTasksToDeleteClusterWithNodeGroups(c.clusterStack, allStacks, deleteOIDCProvider, oidc, kubernetes.NewCachedClientSet(clientSet), wait, func(errs chan error, _ string) error {
logger.Info("trying to cleanup dangling network interfaces")
if err := c.ctl.LoadClusterVPC(c.cfg, c.stackManager); err != nil {
return errors.Wrapf(err, "getting VPC configuration for cluster %q", c.cfg.Metadata.Name)
Expand Down
26 changes: 9 additions & 17 deletions pkg/actions/cluster/unowned.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,29 +4,21 @@ import (
"fmt"
"time"

"github.com/weaveworks/eksctl/pkg/actions/nodegroup"
"github.com/weaveworks/eksctl/pkg/kubernetes"

"github.com/weaveworks/eksctl/pkg/cfn/manager"

"github.com/aws/aws-sdk-go/aws/awserr"

"github.com/aws/aws-sdk-go/aws/request"
"github.com/weaveworks/eksctl/pkg/utils/tasks"

"github.com/pkg/errors"

"github.com/weaveworks/eksctl/pkg/utils/waiters"

iamoidc "github.com/weaveworks/eksctl/pkg/iam/oidc"

"github.com/kris-nova/logger"

awseks "github.com/aws/aws-sdk-go/service/eks"
"github.com/kris-nova/logger"
"github.com/pkg/errors"

"github.com/weaveworks/eksctl/pkg/actions/nodegroup"
api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5"
"github.com/weaveworks/eksctl/pkg/cfn/manager"
"github.com/weaveworks/eksctl/pkg/ctl/cmdutils"
"github.com/weaveworks/eksctl/pkg/eks"
iamoidc "github.com/weaveworks/eksctl/pkg/iam/oidc"
"github.com/weaveworks/eksctl/pkg/kubernetes"
"github.com/weaveworks/eksctl/pkg/utils/tasks"
"github.com/weaveworks/eksctl/pkg/utils/waiters"
)

type UnownedCluster struct {
Expand Down Expand Up @@ -274,7 +266,7 @@ func (c *UnownedCluster) deleteAndWaitForNodegroupsDeletion(waitInterval time.Du
}

// we kill every nodegroup with a stack the standard way. wait is always true
tasks, err := c.stackManager.NewTasksToDeleteNodeGroups(func(_ string) bool { return true }, true, nil)
tasks, err := c.stackManager.NewTasksToDeleteNodeGroups(allStacks, func(_ string) bool { return true }, true, nil)
if err != nil {
return err
}
Expand Down
13 changes: 6 additions & 7 deletions pkg/actions/nodegroup/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,13 @@ func (m *Manager) Delete(nodeGroups []*api.NodeGroup, managedNodeGroups []*api.M
}

tasks := &tasks.TaskTree{Parallel: true}
stacks, err := m.stackManager.ListNodeGroupStacks()
if err != nil {
return err
}

for _, n := range managedNodeGroups {
hasStacks, err := m.hasStacks(n.Name)
if err != nil {
return err
}

if hasStacks {
if m.hasStacks(stacks, n.Name) != nil {
nodeGroupsWithStacks = append(nodeGroupsWithStacks, n)
} else {
tasks.Append(m.stackManager.NewTaskToDeleteUnownedNodeGroup(m.cfg.Metadata.Name, n.Name, m.ctl.Provider.EKS(), nil))
Expand All @@ -41,7 +40,7 @@ func (m *Manager) Delete(nodeGroups []*api.NodeGroup, managedNodeGroups []*api.M
return false
}

deleteTasks, err := m.stackManager.NewTasksToDeleteNodeGroups(shouldDelete, wait, nil)
deleteTasks, err := m.stackManager.NewTasksToDeleteNodeGroups(stacks, shouldDelete, wait, nil)
if err != nil {
return err
}
Expand Down
13 changes: 4 additions & 9 deletions pkg/actions/nodegroup/nodegroup.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,14 @@ package nodegroup
import (
"time"

"github.com/weaveworks/eksctl/pkg/utils/waiters"

"github.com/aws/aws-sdk-go/aws/request"
"k8s.io/client-go/kubernetes"

api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5"
"github.com/weaveworks/eksctl/pkg/cfn/builder"
"github.com/weaveworks/eksctl/pkg/cfn/manager"
"github.com/weaveworks/eksctl/pkg/eks"
"github.com/weaveworks/eksctl/pkg/utils/waiters"
)

type Manager struct {
Expand Down Expand Up @@ -43,15 +42,11 @@ func New(cfg *api.ClusterConfig, ctl *eks.ClusterProvider, clientSet kubernetes.
}
}

func (m *Manager) hasStacks(name string) (bool, error) {
stacks, err := m.stackManager.ListNodeGroupStacks()
if err != nil {
return false, err
}
func (m *Manager) hasStacks(stacks []manager.NodeGroupStack, name string) *manager.NodeGroupStack {
for _, stack := range stacks {
if stack.NodeGroupName == name {
return true, nil
return &stack
}
}
return false, nil
return nil
}
19 changes: 13 additions & 6 deletions pkg/actions/nodegroup/upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@ import (
"github.com/pkg/errors"
"github.com/weaveworks/goformation/v4"
"github.com/weaveworks/goformation/v4/cloudformation"
gfnec2 "github.com/weaveworks/goformation/v4/cloudformation/ec2"
gfneks "github.com/weaveworks/goformation/v4/cloudformation/eks"
gfnt "github.com/weaveworks/goformation/v4/cloudformation/types"

"github.com/weaveworks/eksctl/pkg/ami"
api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5"
Expand All @@ -22,9 +25,6 @@ import (
"github.com/weaveworks/eksctl/pkg/managed"
"github.com/weaveworks/eksctl/pkg/utils/waiters"
"github.com/weaveworks/eksctl/pkg/version"
gfnec2 "github.com/weaveworks/goformation/v4/cloudformation/ec2"
gfneks "github.com/weaveworks/goformation/v4/cloudformation/eks"
gfnt "github.com/weaveworks/goformation/v4/cloudformation/types"
)

// UpgradeOptions contains options to configure nodegroup upgrades
Expand All @@ -42,13 +42,16 @@ type UpgradeOptions struct {
ReleaseVersion string
// Wait for the upgrade to finish
Wait bool
// Stack to upgrade
Stack *manager.NodeGroupStack
}

func (m *Manager) Upgrade(options UpgradeOptions) error {
hasStacks, err := m.hasStacks(options.NodegroupName)
stacks, err := m.stackManager.ListNodeGroupStacks()
if err != nil {
return err
}
hasStack := m.hasStacks(stacks, options.NodegroupName)

if options.KubernetesVersion != "" {
if _, err := semver.ParseTolerant(options.KubernetesVersion); err != nil {
Expand All @@ -68,7 +71,8 @@ func (m *Manager) Upgrade(options UpgradeOptions) error {
return err
}

if hasStacks {
if hasStack != nil {
options.Stack = hasStack
return m.upgradeUsingStack(options, nodegroupOutput.Nodegroup)
}

Expand Down Expand Up @@ -166,7 +170,10 @@ func (m *Manager) upgradeUsingStack(options UpgradeOptions, nodegroup *eks.Nodeg
return errors.New("only one of kubernetes-version or release-version can be specified")
}

template, err := m.stackManager.GetManagedNodeGroupTemplate(options.NodegroupName)
template, err := m.stackManager.GetManagedNodeGroupTemplate(manager.GetNodegroupOption{
Stack: options.Stack,
NodeGroupName: options.NodegroupName,
})
if err != nil {
return errors.Wrap(err, "error fetching nodegroup template")
}
Expand Down
9 changes: 5 additions & 4 deletions pkg/actions/nodegroup/upgrade_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,15 @@ import (
"github.com/aws/aws-sdk-go/service/ssm"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/fake"

"github.com/weaveworks/eksctl/pkg/actions/nodegroup"
api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5"
"github.com/weaveworks/eksctl/pkg/cfn/manager"
"github.com/weaveworks/eksctl/pkg/cfn/manager/fakes"
"github.com/weaveworks/eksctl/pkg/eks"
"github.com/weaveworks/eksctl/pkg/testutils/mockprovider"
"github.com/weaveworks/eksctl/pkg/version"
"k8s.io/client-go/kubernetes/fake"
)

var _ = Describe("Upgrade", func() {
Expand Down Expand Up @@ -166,7 +167,7 @@ var _ = Describe("Upgrade", func() {
It("upgrades the nodegroup with the latest al2 release_version by updating the stack", func() {
Expect(m.Upgrade(options)).To(Succeed())
Expect(fakeStackManager.GetManagedNodeGroupTemplateCallCount()).To(Equal(1))
Expect(fakeStackManager.GetManagedNodeGroupTemplateArgsForCall(0)).To(Equal(ngName))
Expect(fakeStackManager.GetManagedNodeGroupTemplateArgsForCall(0).NodeGroupName).To(Equal(ngName))
Expect(fakeStackManager.UpdateNodeGroupStackCallCount()).To(Equal(2))
By("upgrading the ForceUpdateEnabled setting first")
ng, template, wait := fakeStackManager.UpdateNodeGroupStackArgsForCall(0)
Expand Down Expand Up @@ -227,7 +228,7 @@ var _ = Describe("Upgrade", func() {
It("upgrades the nodegroup with the latest al2 release_version by updating the stack", func() {
Expect(m.Upgrade(options)).To(Succeed())
Expect(fakeStackManager.GetManagedNodeGroupTemplateCallCount()).To(Equal(1))
Expect(fakeStackManager.GetManagedNodeGroupTemplateArgsForCall(0)).To(Equal(ngName))
Expect(fakeStackManager.GetManagedNodeGroupTemplateArgsForCall(0).NodeGroupName).To(Equal(ngName))
Expect(fakeStackManager.UpdateNodeGroupStackCallCount()).To(Equal(1))
By("upgrading the ReleaseVersion and not updating the ForceUpdateEnabled setting")
ng, template, wait := fakeStackManager.UpdateNodeGroupStackArgsForCall(0)
Expand Down Expand Up @@ -282,7 +283,7 @@ var _ = Describe("Upgrade", func() {
It("upgrades the nodegroup updating the stack with the kubernetes version", func() {
Expect(m.Upgrade(options)).To(Succeed())
Expect(fakeStackManager.GetManagedNodeGroupTemplateCallCount()).To(Equal(1))
Expect(fakeStackManager.GetManagedNodeGroupTemplateArgsForCall(0)).To(Equal(ngName))
Expect(fakeStackManager.GetManagedNodeGroupTemplateArgsForCall(0).NodeGroupName).To(Equal(ngName))

By("upgrading the ForceUpdateEnabled setting first")
Expect(fakeStackManager.UpdateNodeGroupStackCallCount()).To(Equal(2))
Expand Down
8 changes: 4 additions & 4 deletions pkg/cfn/manager/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -298,17 +298,17 @@ func (c *StackCollection) DescribeStack(i *Stack) (*Stack, error) {
}

// GetManagedNodeGroupTemplate returns the template for a ManagedNodeGroup resource
func (c *StackCollection) GetManagedNodeGroupTemplate(nodeGroupName string) (string, error) {
nodeGroupType, err := c.GetNodeGroupStackType(nodeGroupName)
func (c *StackCollection) GetManagedNodeGroupTemplate(options GetNodegroupOption) (string, error) {
nodeGroupType, err := c.GetNodeGroupStackType(options)
if err != nil {
return "", err
}

if nodeGroupType != api.NodeGroupTypeManaged {
return "", fmt.Errorf("%q is not a managed nodegroup", nodeGroupName)
return "", fmt.Errorf("%q is not a managed nodegroup", options.NodeGroupName)
}

stackName := c.makeNodeGroupStackName(nodeGroupName)
stackName := c.makeNodeGroupStackName(options.NodeGroupName)
templateBody, err := c.GetStackTemplate(stackName)
if err != nil {
return "", err
Expand Down
32 changes: 11 additions & 21 deletions pkg/cfn/manager/delete_tasks.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,11 @@ import (
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/aws/aws-sdk-go/service/eks"
"github.com/aws/aws-sdk-go/service/eks/eksiface"
"github.com/kris-nova/logger"
"github.com/pkg/errors"
"github.com/weaveworks/eksctl/pkg/cfn/waiter"

"github.com/kris-nova/logger"
api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5"
"github.com/weaveworks/eksctl/pkg/cfn/waiter"
iamoidc "github.com/weaveworks/eksctl/pkg/iam/oidc"
"github.com/weaveworks/eksctl/pkg/kubernetes"
"github.com/weaveworks/eksctl/pkg/utils/tasks"
Expand All @@ -22,10 +22,10 @@ import (
func deleteAll(_ string) bool { return true }

// NewTasksToDeleteClusterWithNodeGroups defines tasks required to delete the given cluster along with all of its resources
func (c *StackCollection) NewTasksToDeleteClusterWithNodeGroups(deleteOIDCProvider bool, oidc *iamoidc.OpenIDConnectManager, clientSetGetter kubernetes.ClientSetGetter, wait bool, cleanup func(chan error, string) error) (*tasks.TaskTree, error) {
func (c *StackCollection) NewTasksToDeleteClusterWithNodeGroups(clusterStack *Stack, nodeGroupStacks []NodeGroupStack, deleteOIDCProvider bool, oidc *iamoidc.OpenIDConnectManager, clientSetGetter kubernetes.ClientSetGetter, wait bool, cleanup func(chan error, string) error) (*tasks.TaskTree, error) {
taskTree := &tasks.TaskTree{Parallel: false}

nodeGroupTasks, err := c.NewTasksToDeleteNodeGroups(deleteAll, true, cleanup)
nodeGroupTasks, err := c.NewTasksToDeleteNodeGroups(nodeGroupStacks, deleteAll, true, cleanup)

if err != nil {
return nil, err
Expand Down Expand Up @@ -57,10 +57,6 @@ func (c *StackCollection) NewTasksToDeleteClusterWithNodeGroups(deleteOIDCProvid
taskTree.Append(deleteAddonIAMtasks)
}

clusterStack, err := c.DescribeClusterStack()
if err != nil {
return nil, err
}
if clusterStack == nil {
return nil, &StackNotFoundErr{ClusterName: c.spec.Metadata.Name}
}
Expand All @@ -84,38 +80,32 @@ func (c *StackCollection) NewTasksToDeleteClusterWithNodeGroups(deleteOIDCProvid
}

// NewTasksToDeleteNodeGroups defines tasks required to delete all of the nodegroups
func (c *StackCollection) NewTasksToDeleteNodeGroups(shouldDelete func(string) bool, wait bool, cleanup func(chan error, string) error) (*tasks.TaskTree, error) {
nodeGroupStacks, err := c.DescribeNodeGroupStacks()
if err != nil {
return nil, err
}

func (c *StackCollection) NewTasksToDeleteNodeGroups(nodeGroupStacks []NodeGroupStack, shouldDelete func(string) bool, wait bool, cleanup func(chan error, string) error) (*tasks.TaskTree, error) {
taskTree := &tasks.TaskTree{Parallel: true}

for _, s := range nodeGroupStacks {
name := c.GetNodeGroupName(s)

if !shouldDelete(name) {
if !shouldDelete(s.NodeGroupName) {
continue
}

if *s.StackStatus == cloudformation.StackStatusDeleteFailed && cleanup != nil {
if *s.Stack.StackStatus == cloudformation.StackStatusDeleteFailed && cleanup != nil {
taskTree.Append(&tasks.TaskWithNameParam{
Info: fmt.Sprintf("cleanup for nodegroup %q", name),
Info: fmt.Sprintf("cleanup for nodegroup %q", s.NodeGroupName),
Call: cleanup,
})
}
info := fmt.Sprintf("delete nodegroup %q", name)
info := fmt.Sprintf("delete nodegroup %q", s.NodeGroupName)
if wait {
taskTree.Append(&taskWithStackSpec{
info: info,
stack: s,
stack: s.Stack,
call: c.DeleteStackBySpecSync,
})
} else {
taskTree.Append(&asyncTaskWithStackSpec{
info: info,
stack: s,
stack: s.Stack,
call: c.DeleteStackBySpec,
})
}
Expand Down
Loading