Skip to content

Commit

Permalink
Drain nodes in parallel (#4966)
Browse files Browse the repository at this point in the history
* Revert "Revert "Drain nodes in parallel (#4864)" (#4964)"

This reverts commit 00f5fcf.

* set value in delete cluster

* Update pkg/drain/nodegroup.go

Co-authored-by: Gergely Brautigam <182850+Skarlso@users.noreply.github.com>

* Update pkg/drain/nodegroup.go

Co-authored-by: Gergely Brautigam <182850+Skarlso@users.noreply.github.com>

* update unit tests

Co-authored-by: Gergely Brautigam <182850+Skarlso@users.noreply.github.com>
  • Loading branch information
aclevername and Skarlso authored Mar 22, 2022
1 parent 030ac58 commit a00d6d1
Show file tree
Hide file tree
Showing 22 changed files with 176 additions and 81 deletions.
3 changes: 2 additions & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ require (
github.com/maxbrunsfeld/counterfeiter/v6 v6.4.1
github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.18.1
github.com/orcaman/concurrent-map v1.0.0
github.com/otiai10/copy v1.7.0
github.com/pelletier/go-toml v1.9.4
github.com/pkg/errors v0.9.1
Expand All @@ -50,6 +51,7 @@ require (
github.com/weaveworks/schemer v0.0.0-20210802122110-338b258ad2ca
github.com/xgfone/netaddr v0.5.1
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/tools v0.1.9
gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0
gopkg.in/yaml.v2 v2.4.0
Expand Down Expand Up @@ -423,7 +425,6 @@ require (
golang.org/x/mod v0.5.1 // indirect
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -1907,6 +1907,8 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
github.com/otiai10/copy v1.7.0 h1:hVoPiN+t+7d2nzzwMiDHPSOogsWAStewq3TwU05+clE=
github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U=
Expand Down
1 change: 1 addition & 0 deletions integration/tests/unowned_cluster/unowned_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -319,6 +319,7 @@ var _ = Describe("(Integration) [non-eksctl cluster & nodegroup support]", func(
WithArgs(
"--cluster", params.ClusterName,
"--name", mng1,
"--parallel", "2",
"--verbose", "2",
)
Expect(cmd).To(RunSuccessfully())
Expand Down
2 changes: 1 addition & 1 deletion pkg/actions/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ import (

type Cluster interface {
Upgrade(dryRun bool) error
Delete(waitInterval time.Duration, wait, force, disableNodegroupEviction bool) error
Delete(waitInterval time.Duration, wait, force, disableNodegroupEviction bool, parallel int) error
}

func New(cfg *api.ClusterConfig, ctl *eks.ClusterProvider) (Cluster, error) {
Expand Down
4 changes: 3 additions & 1 deletion pkg/actions/cluster/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,8 @@ func checkForUndeletedStacks(stackManager manager.StackManager) error {
return nil
}

func drainAllNodeGroups(cfg *api.ClusterConfig, ctl *eks.ClusterProvider, clientSet kubernetes.Interface, allStacks []manager.NodeGroupStack, disableEviction bool, nodeGroupDrainer NodeGroupDrainer, vpcCniDeleter vpcCniDeleter) error {
func drainAllNodeGroups(cfg *api.ClusterConfig, ctl *eks.ClusterProvider, clientSet kubernetes.Interface, allStacks []manager.NodeGroupStack,
disableEviction bool, parallel int, nodeGroupDrainer NodeGroupDrainer, vpcCniDeleter vpcCniDeleter) error {
if len(allStacks) == 0 {
return nil
}
Expand All @@ -183,6 +184,7 @@ func drainAllNodeGroups(cfg *api.ClusterConfig, ctl *eks.ClusterProvider, client
NodeGroups: cmdutils.ToKubeNodeGroups(cfg),
MaxGracePeriod: ctl.Provider.WaitTimeout(),
DisableEviction: disableEviction,
Parallel: parallel,
}
if err := nodeGroupDrainer.Drain(drainInput); err != nil {
return err
Expand Down
9 changes: 6 additions & 3 deletions pkg/actions/cluster/delete_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ var _ = Describe("DrainAllNodeGroups", func() {
mockedDrainInput := &nodegroup.DrainInput{
NodeGroups: cmdutils.ToKubeNodeGroups(cfg),
MaxGracePeriod: ctl.Provider.WaitTimeout(),
Parallel: 1,
}

mockedDrainer := &drainerMock{}
Expand All @@ -67,7 +68,7 @@ var _ = Describe("DrainAllNodeGroups", func() {
vpcCniDeleterCalled++
}

err := cluster.DrainAllNodeGroups(cfg, ctl, fakeClientSet, nodeGroupStacks, false, mockedDrainer, vpcCniDeleter)
err := cluster.DrainAllNodeGroups(cfg, ctl, fakeClientSet, nodeGroupStacks, false, 1, mockedDrainer, vpcCniDeleter)
Expect(err).NotTo(HaveOccurred())
mockedDrainer.AssertNumberOfCalls(GinkgoT(), "Drain", 1)
Expect(vpcCniDeleterCalled).To(Equal(1))
Expand All @@ -86,6 +87,7 @@ var _ = Describe("DrainAllNodeGroups", func() {
NodeGroups: cmdutils.ToKubeNodeGroups(cfg),
MaxGracePeriod: ctl.Provider.WaitTimeout(),
DisableEviction: true,
Parallel: 1,
}

mockedDrainer := &drainerMock{}
Expand All @@ -95,7 +97,7 @@ var _ = Describe("DrainAllNodeGroups", func() {
vpcCniDeleterCalled++
}

err := cluster.DrainAllNodeGroups(cfg, ctl, fakeClientSet, nodeGroupStacks, true, mockedDrainer, vpcCniDeleter)
err := cluster.DrainAllNodeGroups(cfg, ctl, fakeClientSet, nodeGroupStacks, true, 1, mockedDrainer, vpcCniDeleter)
Expect(err).NotTo(HaveOccurred())
mockedDrainer.AssertNumberOfCalls(GinkgoT(), "Drain", 1)
Expect(vpcCniDeleterCalled).To(Equal(1))
Expand All @@ -113,6 +115,7 @@ var _ = Describe("DrainAllNodeGroups", func() {
mockedDrainInput := &nodegroup.DrainInput{
NodeGroups: cmdutils.ToKubeNodeGroups(cfg),
MaxGracePeriod: ctl.Provider.WaitTimeout(),
Parallel: 1,
}

mockedDrainer := &drainerMock{}
Expand All @@ -122,7 +125,7 @@ var _ = Describe("DrainAllNodeGroups", func() {
vpcCniDeleterCalled++
}

err := cluster.DrainAllNodeGroups(cfg, ctl, fakeClientSet, nodeGroupStacks, false, mockedDrainer, vpcCniDeleter)
err := cluster.DrainAllNodeGroups(cfg, ctl, fakeClientSet, nodeGroupStacks, false, 1, mockedDrainer, vpcCniDeleter)
Expect(err).NotTo(HaveOccurred())
mockedDrainer.AssertNotCalled(GinkgoT(), "Drain")
Expect(vpcCniDeleterCalled).To(Equal(0))
Expand Down
4 changes: 2 additions & 2 deletions pkg/actions/cluster/owned.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ func (c *OwnedCluster) Upgrade(dryRun bool) error {
return nil
}

func (c *OwnedCluster) Delete(_ time.Duration, wait, force, disableNodegroupEviction bool) error {
func (c *OwnedCluster) Delete(_ time.Duration, wait, force, disableNodegroupEviction bool, parallel int) error {
var (
clientSet kubernetes.Interface
oidc *iamoidc.OpenIDConnectManager
Expand Down Expand Up @@ -111,7 +111,7 @@ func (c *OwnedCluster) Delete(_ time.Duration, wait, force, disableNodegroupEvic
}

nodeGroupManager := c.newNodeGroupManager(c.cfg, c.ctl, clientSet)
if err := drainAllNodeGroups(c.cfg, c.ctl, clientSet, allStacks, disableNodegroupEviction, nodeGroupManager, attemptVpcCniDeletion); err != nil {
if err := drainAllNodeGroups(c.cfg, c.ctl, clientSet, allStacks, disableNodegroupEviction, parallel, nodeGroupManager, attemptVpcCniDeletion); err != nil {
if !force {
return err
}
Expand Down
10 changes: 6 additions & 4 deletions pkg/actions/cluster/owned_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ var _ = Describe("Delete", func() {
return fakeClientSet, nil
})

err := c.Delete(time.Microsecond, false, false, false)
err := c.Delete(time.Microsecond, false, false, false, 1)
Expect(err).NotTo(HaveOccurred())
Expect(fakeStackManager.DeleteTasksForDeprecatedStacksCallCount()).To(Equal(1))
Expect(ranDeleteDeprecatedTasks).To(BeTrue())
Expand Down Expand Up @@ -177,6 +177,7 @@ var _ = Describe("Delete", func() {
mockedDrainInput := &nodegroup.DrainInput{
NodeGroups: cmdutils.ToKubeNodeGroups(cfg),
MaxGracePeriod: ctl.Provider.WaitTimeout(),
Parallel: 1,
}

mockedDrainer := &drainerMockOwned{}
Expand All @@ -185,7 +186,7 @@ var _ = Describe("Delete", func() {
return mockedDrainer
})

err := c.Delete(time.Microsecond, false, true, false)
err := c.Delete(time.Microsecond, false, true, false, 1)
Expect(err).NotTo(HaveOccurred())
Expect(fakeStackManager.DeleteTasksForDeprecatedStacksCallCount()).To(Equal(1))
Expect(ranDeleteDeprecatedTasks).To(BeFalse())
Expand Down Expand Up @@ -241,6 +242,7 @@ var _ = Describe("Delete", func() {
mockedDrainInput := &nodegroup.DrainInput{
NodeGroups: cmdutils.ToKubeNodeGroups(cfg),
MaxGracePeriod: ctl.Provider.WaitTimeout(),
Parallel: 1,
}
ctl.Status = &eks.ProviderStatus{
ClusterInfo: &eks.ClusterInfo{
Expand All @@ -257,7 +259,7 @@ var _ = Describe("Delete", func() {
return mockedDrainer
})

err := c.Delete(time.Microsecond, false, false, false)
err := c.Delete(time.Microsecond, false, false, false, 1)
Expect(err).To(MatchError(errorMessage))
Expect(fakeStackManager.DeleteTasksForDeprecatedStacksCallCount()).To(Equal(0))
Expect(ranDeleteDeprecatedTasks).To(BeFalse())
Expand Down Expand Up @@ -298,7 +300,7 @@ var _ = Describe("Delete", func() {

c := cluster.NewOwnedCluster(cfg, ctl, nil, fakeStackManager)

err := c.Delete(time.Microsecond, false, false, false)
err := c.Delete(time.Microsecond, false, false, false, 1)
Expect(err).NotTo(HaveOccurred())
Expect(fakeStackManager.DeleteTasksForDeprecatedStacksCallCount()).To(Equal(1))
Expect(ranDeleteDeprecatedTasks).To(BeTrue())
Expand Down
4 changes: 2 additions & 2 deletions pkg/actions/cluster/unowned.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func (c *UnownedCluster) Upgrade(dryRun bool) error {
return nil
}

func (c *UnownedCluster) Delete(waitInterval time.Duration, wait, force, disableNodegroupEviction bool) error {
func (c *UnownedCluster) Delete(waitInterval time.Duration, wait, force, disableNodegroupEviction bool, parallel int) error {
clusterName := c.cfg.Metadata.Name

if err := c.checkClusterExists(clusterName); err != nil {
Expand All @@ -79,7 +79,7 @@ func (c *UnownedCluster) Delete(waitInterval time.Duration, wait, force, disable
}

nodeGroupManager := c.newNodeGroupManager(c.cfg, c.ctl, clientSet)
if err := drainAllNodeGroups(c.cfg, c.ctl, clientSet, allStacks, disableNodegroupEviction, nodeGroupManager, attemptVpcCniDeletion); err != nil {
if err := drainAllNodeGroups(c.cfg, c.ctl, clientSet, allStacks, disableNodegroupEviction, parallel, nodeGroupManager, attemptVpcCniDeletion); err != nil {
if !force {
return err
}
Expand Down
10 changes: 6 additions & 4 deletions pkg/actions/cluster/unowned_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ var _ = Describe("Delete", func() {
return fakeClientSet, nil
})

err := c.Delete(time.Microsecond, false, false, false)
err := c.Delete(time.Microsecond, false, false, false, 1)
Expect(err).NotTo(HaveOccurred())
Expect(deleteCallCount).To(Equal(1))
Expect(unownedDeleteCallCount).To(Equal(1))
Expand Down Expand Up @@ -243,6 +243,7 @@ var _ = Describe("Delete", func() {
mockedDrainInput := &nodegroup.DrainInput{
NodeGroups: cmdutils.ToKubeNodeGroups(cfg),
MaxGracePeriod: ctl.Provider.WaitTimeout(),
Parallel: 1,
}

mockedDrainer := &drainerMockUnowned{}
Expand All @@ -251,7 +252,7 @@ var _ = Describe("Delete", func() {
return mockedDrainer
})

err := c.Delete(time.Microsecond, false, true, false)
err := c.Delete(time.Microsecond, false, true, false, 1)
Expect(err).NotTo(HaveOccurred())
Expect(deleteCallCount).To(Equal(0))
Expect(unownedDeleteCallCount).To(Equal(0))
Expand Down Expand Up @@ -344,6 +345,7 @@ var _ = Describe("Delete", func() {
mockedDrainInput := &nodegroup.DrainInput{
NodeGroups: cmdutils.ToKubeNodeGroups(cfg),
MaxGracePeriod: ctl.Provider.WaitTimeout(),
Parallel: 1,
}

errorMessage := "Mocked error"
Expand All @@ -353,7 +355,7 @@ var _ = Describe("Delete", func() {
return mockedDrainer
})

err := c.Delete(time.Microsecond, false, false, false)
err := c.Delete(time.Microsecond, false, false, false, 1)
Expect(err).To(MatchError(errorMessage))
Expect(deleteCallCount).To(Equal(0))
Expect(unownedDeleteCallCount).To(Equal(0))
Expand Down Expand Up @@ -417,7 +419,7 @@ var _ = Describe("Delete", func() {
p.MockEKS().On("DeleteCluster", mock.Anything).Return(&awseks.DeleteClusterOutput{}, nil)

c := cluster.NewUnownedCluster(cfg, ctl, fakeStackManager)
err := c.Delete(time.Microsecond, false, false, false)
err := c.Delete(time.Microsecond, false, false, false, 1)
Expect(err).NotTo(HaveOccurred())
Expect(fakeStackManager.DeleteTasksForDeprecatedStacksCallCount()).To(Equal(1))
Expect(deleteCallCount).To(Equal(1))
Expand Down
3 changes: 2 additions & 1 deletion pkg/actions/nodegroup/drain.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,13 @@ type DrainInput struct {
NodeDrainWaitPeriod time.Duration
Undo bool
DisableEviction bool
Parallel int
}

func (m *Manager) Drain(input *DrainInput) error {
if !input.Plan {
for _, n := range input.NodeGroups {
nodeGroupDrainer := drain.NewNodeGroupDrainer(m.clientSet, n, m.ctl.Provider.WaitTimeout(), input.MaxGracePeriod, input.NodeDrainWaitPeriod, input.Undo, input.DisableEviction)
nodeGroupDrainer := drain.NewNodeGroupDrainer(m.clientSet, n, m.ctl.Provider.WaitTimeout(), input.MaxGracePeriod, input.NodeDrainWaitPeriod, input.Undo, input.DisableEviction, input.Parallel)
if err := nodeGroupDrainer.Drain(); err != nil {
return err
}
Expand Down
11 changes: 9 additions & 2 deletions pkg/ctl/cmdutils/configfile.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"encoding/csv"
"fmt"
"reflect"
"strconv"
"strings"

"github.com/kris-nova/logger"
Expand Down Expand Up @@ -517,8 +518,8 @@ func normalizeBaseNodeGroup(np api.NodePool, cmd *cobra.Command) {
}
}

// NewDeleteNodeGroupLoader will load config or use flags for 'eksctl delete nodegroup'
func NewDeleteNodeGroupLoader(cmd *Cmd, ng *api.NodeGroup, ngFilter *filter.NodeGroupFilter) ClusterConfigLoader {
// NewDeleteAndDrainNodeGroupLoader will load config or use flags for 'eksctl delete nodegroup'
func NewDeleteAndDrainNodeGroupLoader(cmd *Cmd, ng *api.NodeGroup, ngFilter *filter.NodeGroupFilter) ClusterConfigLoader {
l := newCommonClusterConfigLoader(cmd)

l.validateWithConfigFile = func() error {
Expand Down Expand Up @@ -546,6 +547,12 @@ func NewDeleteNodeGroupLoader(cmd *Cmd, ng *api.NodeGroup, ngFilter *filter.Node
return ErrMustBeSet("--name")
}

if flag := l.CobraCommand.Flag("parallel"); flag != nil && flag.Changed {
if val, _ := strconv.Atoi(flag.Value.String()); val > 25 || val < 1 {
return fmt.Errorf("--parallel value must be of range 1-25")
}
}

ngFilter.AppendIncludeNames(ng.Name)

l.Plan = false
Expand Down
20 changes: 12 additions & 8 deletions pkg/ctl/delete/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,22 +15,25 @@ import (
)

func deleteClusterCmd(cmd *cmdutils.Cmd) {
deleteClusterWithRunFunc(cmd, func(cmd *cmdutils.Cmd, force bool, disableNodegroupEviction bool) error {
return doDeleteCluster(cmd, force, disableNodegroupEviction)
deleteClusterWithRunFunc(cmd, func(cmd *cmdutils.Cmd, force bool, disableNodegroupEviction bool, parallel int) error {
return doDeleteCluster(cmd, force, disableNodegroupEviction, parallel)
})
}

func deleteClusterWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cmd, force bool, disableNodegroupEviction bool) error) {
func deleteClusterWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cmd, force bool, disableNodegroupEviction bool, parallel int) error) {
cfg := api.NewClusterConfig()
cmd.ClusterConfig = cfg

cmd.SetDescription("cluster", "Delete a cluster", "")

var force bool
var disableNodegroupEviction bool
var (
force bool
disableNodegroupEviction bool
parallel int
)
cmd.CobraCommand.RunE = func(_ *cobra.Command, args []string) error {
cmd.NameArg = cmdutils.GetNameArg(args)
return runFunc(cmd, force, disableNodegroupEviction)
return runFunc(cmd, force, disableNodegroupEviction, parallel)
}

cmd.FlagSetGroup.InFlagSet("General", func(fs *pflag.FlagSet) {
Expand All @@ -41,6 +44,7 @@ func deleteClusterWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cmd,
cmdutils.AddWaitFlag(fs, &cmd.Wait, "deletion of all resources")
fs.BoolVar(&force, "force", false, "Force deletion to continue when errors occur")
fs.BoolVar(&disableNodegroupEviction, "disable-nodegroup-eviction", false, "Force drain to use delete, even if eviction is supported. This will bypass checking PodDisruptionBudgets, use with caution.")
fs.IntVar(&parallel, "parallel", 1, "Number of nodes to drain in parallel. Max 25")

cmdutils.AddConfigFileFlag(fs, &cmd.ClusterConfigFile)
cmdutils.AddTimeoutFlag(fs, &cmd.ProviderConfig.WaitTimeout)
Expand All @@ -49,7 +53,7 @@ func deleteClusterWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cmd,
cmdutils.AddCommonFlagsForAWS(cmd.FlagSetGroup, &cmd.ProviderConfig, true)
}

func doDeleteCluster(cmd *cmdutils.Cmd, force bool, disableNodegroupEviction bool) error {
func doDeleteCluster(cmd *cmdutils.Cmd, force bool, disableNodegroupEviction bool, parallel int) error {
if err := cmdutils.NewMetadataLoader(cmd).Load(); err != nil {
return err
}
Expand Down Expand Up @@ -81,5 +85,5 @@ func doDeleteCluster(cmd *cmdutils.Cmd, force bool, disableNodegroupEviction boo
return err
}

return cluster.Delete(time.Second*20, cmd.Wait, force, disableNodegroupEviction)
return cluster.Delete(time.Second*20, cmd.Wait, force, disableNodegroupEviction, parallel)
}
2 changes: 1 addition & 1 deletion pkg/ctl/delete/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ var _ = Describe("delete cluster", func() {
cmd := newMockEmptyCmd(args...)
count := 0
cmdutils.AddResourceCmd(cmdutils.NewGrouping(), cmd.parentCmd, func(cmd *cmdutils.Cmd) {
deleteClusterWithRunFunc(cmd, func(cmd *cmdutils.Cmd, force bool, disableNodegroupEviction bool) error {
deleteClusterWithRunFunc(cmd, func(cmd *cmdutils.Cmd, force bool, disableNodegroupEviction bool, parallel int) error {
Expect(cmd.ClusterConfig.Metadata.Name).To(Equal(clusterName))
Expect(force).To(Equal(forceExpected))
Expect(disableNodegroupEviction).To(Equal(disableNodegroupEvictionExpected))
Expand Down
Loading

0 comments on commit a00d6d1

Please sign in to comment.