Skip to content

Commit

Permalink
Revert "Drain nodes in parallel (#4864)" (#4964)
Browse files Browse the repository at this point in the history
This reverts commit 391a6ce.
  • Loading branch information
aclevername authored Mar 17, 2022
1 parent 391a6ce commit 00f5fcf
Show file tree
Hide file tree
Showing 13 changed files with 54 additions and 136 deletions.
1 change: 0 additions & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,6 @@ require (
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/orcaman/concurrent-map v1.0.0 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect
github.com/pierrec/lz4 v2.0.5+incompatible // indirect
Expand Down
2 changes: 0 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -1929,8 +1929,6 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
github.com/otiai10/copy v1.7.0 h1:hVoPiN+t+7d2nzzwMiDHPSOogsWAStewq3TwU05+clE=
github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U=
Expand Down
1 change: 0 additions & 1 deletion integration/tests/unowned_cluster/unowned_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,6 @@ var _ = Describe("(Integration) [non-eksctl cluster & nodegroup support]", func(
WithArgs(
"--cluster", params.ClusterName,
"--name", mng1,
"--parallel", "2",
"--verbose", "2",
)
Expect(cmd).To(RunSuccessfully())
Expand Down
3 changes: 1 addition & 2 deletions pkg/actions/nodegroup/drain.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,12 @@ type DrainInput struct {
NodeDrainWaitPeriod time.Duration
Undo bool
DisableEviction bool
Parallel int
}

func (m *Manager) Drain(input *DrainInput) error {
if !input.Plan {
for _, n := range input.NodeGroups {
nodeGroupDrainer := drain.NewNodeGroupDrainer(m.clientSet, n, m.ctl.Provider.WaitTimeout(), input.MaxGracePeriod, input.NodeDrainWaitPeriod, input.Undo, input.DisableEviction, input.Parallel)
nodeGroupDrainer := drain.NewNodeGroupDrainer(m.clientSet, n, m.ctl.Provider.WaitTimeout(), input.MaxGracePeriod, input.NodeDrainWaitPeriod, input.Undo, input.DisableEviction)
if err := nodeGroupDrainer.Drain(); err != nil {
return err
}
Expand Down
11 changes: 2 additions & 9 deletions pkg/ctl/cmdutils/configfile.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import (
"encoding/csv"
"fmt"
"reflect"
"strconv"
"strings"

"github.com/kris-nova/logger"
Expand Down Expand Up @@ -518,8 +517,8 @@ func normalizeBaseNodeGroup(np api.NodePool, cmd *cobra.Command) {
}
}

// NewDeleteAndDrainNodeGroupLoader will load config or use flags for 'eksctl delete nodegroup'
func NewDeleteAndDrainNodeGroupLoader(cmd *Cmd, ng *api.NodeGroup, ngFilter *filter.NodeGroupFilter) ClusterConfigLoader {
// NewDeleteNodeGroupLoader will load config or use flags for 'eksctl delete nodegroup'
func NewDeleteNodeGroupLoader(cmd *Cmd, ng *api.NodeGroup, ngFilter *filter.NodeGroupFilter) ClusterConfigLoader {
l := newCommonClusterConfigLoader(cmd)

l.validateWithConfigFile = func() error {
Expand Down Expand Up @@ -547,12 +546,6 @@ func NewDeleteAndDrainNodeGroupLoader(cmd *Cmd, ng *api.NodeGroup, ngFilter *fil
return ErrMustBeSet("--name")
}

if flag := l.CobraCommand.Flag("parallel"); flag != nil && flag.Changed {
if val, _ := strconv.Atoi(flag.Value.String()); val > 25 || val < 1 {
return fmt.Errorf("--parallel value must be of range 1-25")
}
}

ngFilter.AppendIncludeNames(ng.Name)

l.Plan = false
Expand Down
25 changes: 9 additions & 16 deletions pkg/ctl/delete/nodegroup.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,30 +17,25 @@ import (
)

func deleteNodeGroupCmd(cmd *cmdutils.Cmd) {
deleteNodeGroupWithRunFunc(cmd, func(cmd *cmdutils.Cmd, ng *api.NodeGroup, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing bool, maxGracePeriod time.Duration, disableEviction bool, parallel int) error {
return doDeleteNodeGroup(cmd, ng, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing, maxGracePeriod, disableEviction, parallel)
deleteNodeGroupWithRunFunc(cmd, func(cmd *cmdutils.Cmd, ng *api.NodeGroup, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing bool, maxGracePeriod time.Duration, disableEviction bool) error {
return doDeleteNodeGroup(cmd, ng, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing, maxGracePeriod, disableEviction)
})
}

func deleteNodeGroupWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cmd, ng *api.NodeGroup, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing bool, maxGracePeriod time.Duration, disableEviction bool, parallel int) error) {
func deleteNodeGroupWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cmd, ng *api.NodeGroup, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing bool, maxGracePeriod time.Duration, disableEviction bool) error) {
cfg := api.NewClusterConfig()
ng := api.NewNodeGroup()
cmd.ClusterConfig = cfg

var (
updateAuthConfigMap bool
deleteNodeGroupDrain bool
onlyMissing bool
maxGracePeriod time.Duration
disableEviction bool
parallel int
)
var updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing bool
var maxGracePeriod time.Duration
var disableEviction bool

cmd.SetDescription("nodegroup", "Delete a nodegroup", "", "ng")

cmd.CobraCommand.RunE = func(_ *cobra.Command, args []string) error {
cmd.NameArg = cmdutils.GetNameArg(args)
return runFunc(cmd, ng, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing, maxGracePeriod, disableEviction, parallel)
return runFunc(cmd, ng, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing, maxGracePeriod, disableEviction)
}

cmd.FlagSetGroup.InFlagSet("General", func(fs *pflag.FlagSet) {
Expand All @@ -57,7 +52,6 @@ func deleteNodeGroupWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cm
fs.DurationVar(&maxGracePeriod, "max-grace-period", defaultMaxGracePeriod, "Maximum pods termination grace period")
defaultDisableEviction := false
fs.BoolVar(&disableEviction, "disable-eviction", defaultDisableEviction, "Force drain to use delete, even if eviction is supported. This will bypass checking PodDisruptionBudgets, use with caution.")
fs.IntVar(&parallel, "parallel", 1, "Number of nodes to drain in parallel. Max 25")

cmd.Wait = false
cmdutils.AddWaitFlag(fs, &cmd.Wait, "deletion of all resources")
Expand All @@ -67,10 +61,10 @@ func deleteNodeGroupWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cm
cmdutils.AddCommonFlagsForAWS(cmd.FlagSetGroup, &cmd.ProviderConfig, true)
}

func doDeleteNodeGroup(cmd *cmdutils.Cmd, ng *api.NodeGroup, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing bool, maxGracePeriod time.Duration, disableEviction bool, parallel int) error {
func doDeleteNodeGroup(cmd *cmdutils.Cmd, ng *api.NodeGroup, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing bool, maxGracePeriod time.Duration, disableEviction bool) error {
ngFilter := filter.NewNodeGroupFilter()

if err := cmdutils.NewDeleteAndDrainNodeGroupLoader(cmd, ng, ngFilter).Load(); err != nil {
if err := cmdutils.NewDeleteNodeGroupLoader(cmd, ng, ngFilter).Load(); err != nil {
return err
}

Expand Down Expand Up @@ -132,7 +126,6 @@ func doDeleteNodeGroup(cmd *cmdutils.Cmd, ng *api.NodeGroup, updateAuthConfigMap
Plan: cmd.Plan,
MaxGracePeriod: maxGracePeriod,
DisableEviction: disableEviction,
Parallel: parallel,
}
err := nodeGroupManager.Drain(drainInput)
if err != nil {
Expand Down
11 changes: 1 addition & 10 deletions pkg/ctl/delete/nodegroup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ var _ = Describe("delete", func() {
cmd := newMockEmptyCmd(args...)
count := 0
cmdutils.AddResourceCmd(cmdutils.NewGrouping(), cmd.parentCmd, func(cmd *cmdutils.Cmd) {
deleteNodeGroupWithRunFunc(cmd, func(cmd *cmdutils.Cmd, ng *v1alpha5.NodeGroup, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing bool, maxGracePeriod time.Duration, disableEviction bool, parallel int) error {
deleteNodeGroupWithRunFunc(cmd, func(cmd *cmdutils.Cmd, ng *v1alpha5.NodeGroup, updateAuthConfigMap, deleteNodeGroupDrain, onlyMissing bool, maxGracePeriod time.Duration, disableEviction bool) error {
Expect(cmd.ClusterConfig.Metadata.Name).To(Equal("clusterName"))
Expect(ng.Name).To(Equal("ng"))
count++
Expand Down Expand Up @@ -52,14 +52,5 @@ var _ = Describe("delete", func() {
args: []string{"nodegroup", "ng", "--cluster", "dummy", "--name", "ng"},
error: fmt.Errorf("Error: --name=ng and argument ng cannot be used at the same time"),
}),

Entry("setting --parallel below 1", invalidParamsCase{
args: []string{"nodegroup", "--cluster", "dummy", "--name", "ng", "--parallel", "-1"},
error: fmt.Errorf("Error: --parallel value must be of range 1-25"),
}),
Entry("setting --parallel above 25", invalidParamsCase{
args: []string{"nodegroup", "--cluster", "dummy", "--name", "ng", "--parallel", "26"},
error: fmt.Errorf("Error: --parallel value must be of range 1-25"),
}),
)
})
26 changes: 10 additions & 16 deletions pkg/ctl/drain/nodegroup.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,30 +15,26 @@ import (
)

func drainNodeGroupCmd(cmd *cmdutils.Cmd) {
drainNodeGroupWithRunFunc(cmd, func(cmd *cmdutils.Cmd, ng *api.NodeGroup, undo, onlyMissing bool, maxGracePeriod, nodeDrainWaitPeriod time.Duration, disableEviction bool, parallel int) error {
return doDrainNodeGroup(cmd, ng, undo, onlyMissing, maxGracePeriod, nodeDrainWaitPeriod, disableEviction, parallel)
drainNodeGroupWithRunFunc(cmd, func(cmd *cmdutils.Cmd, ng *api.NodeGroup, undo, onlyMissing bool, maxGracePeriod, nodeDrainWaitPeriod time.Duration, disableEviction bool) error {
return doDrainNodeGroup(cmd, ng, undo, onlyMissing, maxGracePeriod, nodeDrainWaitPeriod, disableEviction)
})
}

func drainNodeGroupWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cmd, ng *api.NodeGroup, undo, onlyMissing bool, maxGracePeriod, nodeDrainWaitPeriod time.Duration, disableEviction bool, parallel int) error) {
func drainNodeGroupWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cmd, ng *api.NodeGroup, undo, onlyMissing bool, maxGracePeriod, nodeDrainWaitPeriod time.Duration, disableEviction bool) error) {
cfg := api.NewClusterConfig()
ng := api.NewNodeGroup()
cmd.ClusterConfig = cfg

var (
undo bool
onlyMissing bool
disableEviction bool
parallel int
maxGracePeriod time.Duration
nodeDrainWaitPeriod time.Duration
)
var undo, onlyMissing bool
var maxGracePeriod time.Duration
var nodeDrainWaitPeriod time.Duration
var disableEviction bool

cmd.SetDescription("nodegroup", "Cordon and drain a nodegroup", "", "ng")

cmd.CobraCommand.RunE = func(_ *cobra.Command, args []string) error {
cmd.NameArg = cmdutils.GetNameArg(args)
return runFunc(cmd, ng, undo, onlyMissing, maxGracePeriod, nodeDrainWaitPeriod, disableEviction, parallel)
return runFunc(cmd, ng, undo, onlyMissing, maxGracePeriod, nodeDrainWaitPeriod, disableEviction)
}

cmd.FlagSetGroup.InFlagSet("General", func(fs *pflag.FlagSet) {
Expand All @@ -56,16 +52,15 @@ func drainNodeGroupWithRunFunc(cmd *cmdutils.Cmd, runFunc func(cmd *cmdutils.Cmd
fs.BoolVar(&disableEviction, "disable-eviction", defaultDisableEviction, "Force drain to use delete, even if eviction is supported. This will bypass checking PodDisruptionBudgets, use with caution.")
cmdutils.AddTimeoutFlag(fs, &cmd.ProviderConfig.WaitTimeout)
fs.DurationVar(&nodeDrainWaitPeriod, "node-drain-wait-period", 0, "Amount of time to wait between draining nodes in a nodegroup")
fs.IntVar(&parallel, "parallel", 1, "Number of nodes to drain in parallel. Max 25")
})

cmdutils.AddCommonFlagsForAWS(cmd.FlagSetGroup, &cmd.ProviderConfig, true)
}

func doDrainNodeGroup(cmd *cmdutils.Cmd, ng *api.NodeGroup, undo, onlyMissing bool, maxGracePeriod, nodeDrainWaitPeriod time.Duration, disableEviction bool, parallel int) error {
func doDrainNodeGroup(cmd *cmdutils.Cmd, ng *api.NodeGroup, undo, onlyMissing bool, maxGracePeriod, nodeDrainWaitPeriod time.Duration, disableEviction bool) error {
ngFilter := filter.NewNodeGroupFilter()

if err := cmdutils.NewDeleteAndDrainNodeGroupLoader(cmd, ng, ngFilter).Load(); err != nil {
if err := cmdutils.NewDeleteNodeGroupLoader(cmd, ng, ngFilter).Load(); err != nil {
return err
}

Expand Down Expand Up @@ -131,7 +126,6 @@ func doDrainNodeGroup(cmd *cmdutils.Cmd, ng *api.NodeGroup, undo, onlyMissing bo
NodeDrainWaitPeriod: nodeDrainWaitPeriod,
Undo: undo,
DisableEviction: disableEviction,
Parallel: parallel,
}
return nodegroup.New(cfg, ctl, clientSet).Drain(drainInput)
}
10 changes: 1 addition & 9 deletions pkg/ctl/drain/nodegroup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ var _ = Describe("drain node group", func() {
cmd := newMockEmptyCmd(args...)
count := 0
cmdutils.AddResourceCmd(cmdutils.NewGrouping(), cmd.parentCmd, func(cmd *cmdutils.Cmd) {
drainNodeGroupWithRunFunc(cmd, func(cmd *cmdutils.Cmd, ng *v1alpha5.NodeGroup, undo, onlyMissing bool, maxGracePeriod, nodeDrainWaitPeriod time.Duration, disableEviction bool, parallel int) error {
drainNodeGroupWithRunFunc(cmd, func(cmd *cmdutils.Cmd, ng *v1alpha5.NodeGroup, undo, onlyMissing bool, maxGracePeriod, nodeDrainWaitPeriod time.Duration, disableEviction bool) error {
Expect(cmd.ClusterConfig.Metadata.Name).To(Equal("clusterName"))
Expect(ng.Name).To(Equal("ng"))
count++
Expand Down Expand Up @@ -54,13 +54,5 @@ var _ = Describe("drain node group", func() {
args: []string{"nodegroup", "ng", "--cluster", "dummy", "--name", "ng"},
error: fmt.Errorf("Error: --name=ng and argument ng cannot be used at the same time"),
}),
Entry("setting --parallel below 1", invalidParamsCase{
args: []string{"nodegroup", "--cluster", "dummy", "--name", "ng", "--parallel", "-1"},
error: fmt.Errorf("Error: --parallel value must be of range 1-25"),
}),
Entry("setting --parallel above 25", invalidParamsCase{
args: []string{"nodegroup", "--cluster", "dummy", "--name", "ng", "--parallel", "26"},
error: fmt.Errorf("Error: --parallel value must be of range 1-25"),
}),
)
})
81 changes: 23 additions & 58 deletions pkg/drain/nodegroup.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"fmt"
"time"

"golang.org/x/sync/semaphore"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/weaveworks/eksctl/pkg/drain/evictor"
Expand All @@ -16,7 +15,6 @@ import (
"github.com/pkg/errors"
"github.com/weaveworks/eksctl/pkg/eks"

cmap "github.com/orcaman/concurrent-map"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
)
Expand All @@ -42,10 +40,9 @@ type NodeGroupDrainer struct {
waitTimeout time.Duration
nodeDrainWaitPeriod time.Duration
undo bool
parallel int
}

func NewNodeGroupDrainer(clientSet kubernetes.Interface, ng eks.KubeNodeGroup, waitTimeout, maxGracePeriod, nodeDrainWaitPeriod time.Duration, undo, disableEviction bool, parallel int) NodeGroupDrainer {
func NewNodeGroupDrainer(clientSet kubernetes.Interface, ng eks.KubeNodeGroup, waitTimeout, maxGracePeriod, nodeDrainWaitPeriod time.Duration, undo, disableEviction bool) NodeGroupDrainer {
ignoreDaemonSets := []metav1.ObjectMeta{
{
Namespace: "kube-system",
Expand Down Expand Up @@ -79,7 +76,6 @@ func NewNodeGroupDrainer(clientSet kubernetes.Interface, ng eks.KubeNodeGroup, w
waitTimeout: waitTimeout,
nodeDrainWaitPeriod: nodeDrainWaitPeriod,
undo: undo,
parallel: parallel,
}
}

Expand All @@ -105,20 +101,15 @@ func (n *NodeGroupDrainer) Drain() error {
return nil // no need to kill any pods
}

drainedNodes := cmap.New()
ctx, cancel := context.WithTimeout(context.TODO(), n.waitTimeout)
defer cancel()

parallelLimit := int64(n.parallel)
sem := semaphore.NewWeighted(parallelLimit)
logger.Info("starting parallel draining, max in-flight of %d", parallelLimit)
drainedNodes := sets.NewString()
// loop until all nodes are drained to handle accidental scale-up
// or any other changes in the ASG
timer := time.NewTimer(n.waitTimeout)
defer timer.Stop()

for {
select {
case <-ctx.Done():
//need to use a different context
waitForAllRoutinesToFinish(context.TODO(), sem, parallelLimit)
case <-timer.C:
return fmt.Errorf("timed out (after %s) waiting for nodegroup %q to be drained", n.waitTimeout, n.ng.NameString())
default:
nodes, err := n.clientSet.CoreV1().Nodes().List(context.TODO(), listOptions)
Expand All @@ -136,58 +127,35 @@ func (n *NodeGroupDrainer) Drain() error {
}

if newPendingNodes.Len() == 0 {
waitForAllRoutinesToFinish(ctx, sem, parallelLimit)
logger.Success("drained all nodes: %v", mapToList(drainedNodes.Items()))
logger.Success("drained all nodes: %v", drainedNodes.List())
return nil // no new nodes were seen
}

logger.Debug("already drained: %v", mapToList(drainedNodes.Items()))
logger.Debug("already drained: %v", drainedNodes.List())
logger.Debug("will drain: %v", newPendingNodes.List())

for i, node := range newPendingNodes.List() {
if err := sem.Acquire(ctx, 1); err != nil {
logger.Critical("failed to claim sem: %w", err)
pending, err := n.evictPods(node)
if err != nil {
logger.Warning("pod eviction error (%q) on node %s", err, node)
time.Sleep(retryDelay)
continue
}
logger.Debug("%d pods to be evicted from %s", pending, node)
if pending == 0 {
drainedNodes.Insert(node)
}

go func(i int, node string) {
defer sem.Release(1)
logger.Debug("starting drain of node %s", node)
pending, err := n.evictPods(node)
if err != nil {
logger.Warning("pod eviction error (%q) on node %s", err, node)
time.Sleep(retryDelay)
return
}

logger.Debug("%d pods to be evicted from %s", pending, node)
if pending == 0 {
drainedNodes.Set(node, nil)
}

if n.nodeDrainWaitPeriod > 0 {
logger.Debug("waiting for %.0f seconds before draining next node", n.nodeDrainWaitPeriod.Seconds())
time.Sleep(n.nodeDrainWaitPeriod)
}
}(i, node)
// only wait if we're not on the last node of this iteration
if n.nodeDrainWaitPeriod > 0 && i < newPendingNodes.Len()-1 {
logger.Debug("waiting for %.0f seconds before draining next node", n.nodeDrainWaitPeriod.Seconds())
time.Sleep(n.nodeDrainWaitPeriod)
}
}
}
}
}

func waitForAllRoutinesToFinish(ctx context.Context, sem *semaphore.Weighted, size int64) {
if err := sem.Acquire(ctx, size); err != nil {
logger.Critical("failed to claim sem: %w", err)
}
}

func mapToList(m map[string]interface{}) []string {
list := []string{}
for key := range m {
list = append(list, key)
}

return list
}

func (n *NodeGroupDrainer) toggleCordon(cordon bool, nodes *corev1.NodeList) {
for _, node := range nodes.Items {
c := NewCordonHelper(&node, cordon)
Expand All @@ -212,9 +180,6 @@ func (n *NodeGroupDrainer) evictPods(node string) (int, error) {
if len(errs) > 0 {
return 0, fmt.Errorf("errs: %v", errs) // TODO: improve formatting
}
if list == nil {
return 0, nil
}
if w := list.Warnings(); w != "" {
logger.Warning(w)
}
Expand Down
Loading

0 comments on commit 00f5fcf

Please sign in to comment.