Skip to content

Commit

Permalink
Remove Initialize() on Destroyer and PruneOptions
Browse files Browse the repository at this point in the history
  • Loading branch information
ash2k committed Jun 18, 2021
1 parent c726dba commit 4eb7dc1
Show file tree
Hide file tree
Showing 13 changed files with 150 additions and 153 deletions.
18 changes: 10 additions & 8 deletions cmd/destroy/cmddestroy.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,12 @@ import (
"sigs.k8s.io/cli-utils/pkg/inventory"
"sigs.k8s.io/cli-utils/pkg/manifestreader"
"sigs.k8s.io/cli-utils/pkg/provider"
"sigs.k8s.io/cli-utils/pkg/util/factory"
)

// GetDestroyRunner creates and returns the DestroyRunner which stores the cobra command.
func GetDestroyRunner(provider provider.Provider, loader manifestreader.ManifestLoader, ioStreams genericclioptions.IOStreams) *DestroyRunner {
r := &DestroyRunner{
Destroyer: apply.NewDestroyer(provider),
ioStreams: ioStreams,
provider: provider,
loader: loader,
Expand Down Expand Up @@ -62,7 +62,6 @@ type DestroyRunner struct {
Command *cobra.Command
PreProcess func(info inventory.InventoryInfo, strategy common.DryRunStrategy) (inventory.InventoryPolicy, error)
ioStreams genericclioptions.IOStreams
Destroyer *apply.Destroyer
provider provider.Provider
loader manifestreader.ManifestLoader

Expand Down Expand Up @@ -102,18 +101,21 @@ func (r *DestroyRunner) RunE(cmd *cobra.Command, args []string) error {
}
}

// Run the destroyer. It will return a channel where we can receive updates
// to keep track of progress and any issues.
err = r.Destroyer.Initialize()
statusPoller, err := factory.NewStatusPoller(r.provider.Factory())
if err != nil {
return err
}
d, err := apply.NewDestroyer(r.provider, statusPoller)
if err != nil {
return err
}
option := &apply.DestroyerOption{
// Run the destroyer. It will return a channel where we can receive updates
// to keep track of progress and any issues.
ch := d.Run(inv, apply.DestroyerOptions{
DeleteTimeout: r.deleteTimeout,
DeletePropagationPolicy: deletePropPolicy,
InventoryPolicy: inventoryPolicy,
}
ch := r.Destroyer.Run(inv, option)
})

// The printer will print updates from the channel. It will block
// until the channel is closed.
Expand Down
18 changes: 8 additions & 10 deletions cmd/preview/cmdpreview.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ var (
// GetPreviewRunner creates and returns the PreviewRunner which stores the cobra command.
func GetPreviewRunner(provider provider.Provider, loader manifestreader.ManifestLoader, ioStreams genericclioptions.IOStreams) *PreviewRunner {
r := &PreviewRunner{
Destroyer: apply.NewDestroyer(provider),
ioStreams: ioStreams,
provider: provider,
loader: loader,
Expand Down Expand Up @@ -74,7 +73,6 @@ type PreviewRunner struct {
Command *cobra.Command
PreProcess func(info inventory.InventoryInfo, strategy common.DryRunStrategy) (inventory.InventoryPolicy, error)
ioStreams genericclioptions.IOStreams
Destroyer *apply.Destroyer
provider provider.Provider
loader manifestreader.ManifestLoader

Expand Down Expand Up @@ -118,17 +116,18 @@ func (r *PreviewRunner) RunE(cmd *cobra.Command, args []string) error {
}
}

statusPoller, err := factory.NewStatusPoller(r.provider.Factory())
if err != nil {
return err
}

// if destroy flag is set in preview, transmit it to destroyer DryRunStrategy flag
// and pivot execution to destroy with dry-run
if !previewDestroy {
_, err = common.DemandOneDirectory(args)
if err != nil {
return err
}
statusPoller, err := factory.NewStatusPoller(r.provider.Factory())
if err != nil {
return err
}
a, err := apply.NewApplier(r.provider, statusPoller)
if err != nil {
return err
Expand All @@ -147,15 +146,14 @@ func (r *PreviewRunner) RunE(cmd *cobra.Command, args []string) error {
InventoryPolicy: inventoryPolicy,
})
} else {
err = r.Destroyer.Initialize()
d, err := apply.NewDestroyer(r.provider, statusPoller)
if err != nil {
return err
}
option := &apply.DestroyerOption{
ch = d.Run(inv, apply.DestroyerOptions{
InventoryPolicy: inventoryPolicy,
DryRunStrategy: drs,
}
ch = r.Destroyer.Run(inv, option)
})
}

// The printer will print updates from the channel. It will block
Expand Down
9 changes: 4 additions & 5 deletions pkg/apply/applier.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,19 +41,17 @@ func NewApplier(provider provider.Provider, statusPoller poller.Poller) (*Applie
return nil, err
}
factory := provider.Factory()
pruneOpts := prune.NewPruneOptions()
err = pruneOpts.Initialize(factory, invClient)
pruneOpts, err := prune.NewPruneOptions(factory, invClient)
if err != nil {
return nil, err
}
a := &Applier{
return &Applier{
pruneOptions: pruneOpts,
statusPoller: statusPoller,
factory: factory,
invClient: invClient,
infoHelper: info.NewInfoHelper(factory),
}
return a, nil
}, nil
}

// Applier performs the step of applying a set of resources into a cluster,
Expand Down Expand Up @@ -145,6 +143,7 @@ func (a *Applier) Run(ctx context.Context, invInfo inventory.InventoryInfo, obje
InfoHelper: a.infoHelper,
Mapper: mapper,
InvClient: a.invClient,
Destroy: false,
}
opts := solver.Options{
ServerSideOptions: options.ServerSideOptions,
Expand Down
26 changes: 11 additions & 15 deletions pkg/apply/applier_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -766,30 +766,26 @@ func TestReadAndPrepareObjects(t *testing.T) {
clusterObjs := object.UnstructuredsToObjMetas(tc.clusterObjs)
fakeInvClient := inventory.NewFakeInventoryClient(clusterObjs)
// Set up the fake dynamic client to recognize all objects, and the RESTMapper.
po := prune.NewPruneOptions()
objs := []runtime.Object{}
objs := make([]runtime.Object, 0, len(tc.clusterObjs))
for _, obj := range tc.clusterObjs {
objs = append(objs, obj)
}
po.InvClient = fakeInvClient
po.Client = dynamicfake.NewSimpleDynamicClient(scheme.Scheme, objs...)
po.Mapper = testrestmapper.TestOnlyStaticRESTMapper(scheme.Scheme,
scheme.Scheme.PrioritizedVersionsAllGroups()...)
// Create applier with fake inventory client, and call prepareObjects
applier := &Applier{
pruneOptions: po,
invClient: fakeInvClient,
applier := Applier{
pruneOptions: &prune.PruneOptions{
InvClient: fakeInvClient,
Client: dynamicfake.NewSimpleDynamicClient(scheme.Scheme, objs...),
Mapper: testrestmapper.TestOnlyStaticRESTMapper(scheme.Scheme,
scheme.Scheme.PrioritizedVersionsAllGroups()...),
},
invClient: fakeInvClient,
}
applyObjs, pruneObjs, err := applier.prepareObjects(tc.inventory, tc.localObjs)
if !tc.isError && err != nil {
t.Fatalf("unexpected error received: %s", err)
}
if tc.isError {
if err == nil {
t.Fatalf("expected error, but received none")
}
assert.Error(t, err)
return
}
require.NoError(t, err)
// Validate the returned applyObjs and pruneObjs
if !objSetsEqual(tc.applyObjs, applyObjs) {
t.Errorf("expected local infos (%v), got (%v)",
Expand Down
74 changes: 32 additions & 42 deletions pkg/apply/destroyer.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/klog/v2"
"k8s.io/kubectl/pkg/cmd/util"
"sigs.k8s.io/cli-utils/pkg/apply/event"
"sigs.k8s.io/cli-utils/pkg/apply/filter"
"sigs.k8s.io/cli-utils/pkg/apply/poller"
Expand All @@ -21,7 +22,6 @@ import (
"sigs.k8s.io/cli-utils/pkg/inventory"
"sigs.k8s.io/cli-utils/pkg/object"
"sigs.k8s.io/cli-utils/pkg/provider"
"sigs.k8s.io/cli-utils/pkg/util/factory"
)

// NewDestroyer returns a new destroyer. It will set up the ApplyOptions and
Expand All @@ -30,23 +30,34 @@ import (
// the ApplyOptions were responsible for printing progress. This is now
// handled by a separate printer with the KubectlPrinterAdapter bridging
// between the two.
func NewDestroyer(provider provider.Provider) *Destroyer {
return &Destroyer{
PruneOptions: prune.NewPruneOptions(),
provider: provider,
func NewDestroyer(provider provider.Provider, statusPoller poller.Poller) (*Destroyer, error) {
invClient, err := provider.InventoryClient()
if err != nil {
return nil, errors.WrapPrefix(err, "error creating inventory client", 1)
}
factory := provider.Factory()
pruneOpts, err := prune.NewPruneOptions(factory, invClient)
if err != nil {
return nil, errors.WrapPrefix(err, "error setting up PruneOptions", 1)
}
return &Destroyer{
pruneOptions: pruneOpts,
statusPoller: statusPoller,
factory: factory,
invClient: invClient,
}, nil
}

// Destroyer performs the step of grabbing all the previous inventory objects and
// prune them. This also deletes all the previous inventory objects
type Destroyer struct {
provider provider.Provider
StatusPoller poller.Poller
PruneOptions *prune.PruneOptions
pruneOptions *prune.PruneOptions
statusPoller poller.Poller
factory util.Factory
invClient inventory.InventoryClient
}

type DestroyerOption struct {
type DestroyerOptions struct {
// InventoryPolicy defines the inventory policy of apply.
InventoryPolicy inventory.InventoryPolicy

Expand All @@ -64,67 +75,46 @@ type DestroyerOption struct {
DeletePropagationPolicy metav1.DeletionPropagation
}

// Initialize sets up the Destroyer for actually doing an destroy against
// a cluster. This involves validating command line inputs and configuring
// clients for communicating with the cluster.
func (d *Destroyer) Initialize() error {
statusPoller, err := factory.NewStatusPoller(d.provider.Factory())
if err != nil {
return errors.WrapPrefix(err, "error creating status poller", 1)
}
d.StatusPoller = statusPoller
invClient, err := d.provider.InventoryClient()
if err != nil {
return errors.WrapPrefix(err, "error creating inventory client", 1)
}
d.invClient = invClient
err = d.PruneOptions.Initialize(d.provider.Factory(), invClient)
if err != nil {
return errors.WrapPrefix(err, "error setting up PruneOptions", 1)
}
d.PruneOptions.Destroy = true
return nil
}

// Run performs the destroy step. Passes the inventory object. This
// happens asynchronously on progress and any errors are reported
// back on the event channel.
func (d *Destroyer) Run(inv inventory.InventoryInfo, option *DestroyerOption) <-chan event.Event {
func (d *Destroyer) Run(inv inventory.InventoryInfo, options DestroyerOptions) <-chan event.Event {
eventChannel := make(chan event.Event)
d.invClient.SetDryRunStrategy(option.DryRunStrategy)
d.invClient.SetDryRunStrategy(options.DryRunStrategy)
go func() {
defer close(eventChannel)
// Retrieve the objects to be deleted from the cluster. Second parameter is empty
// because no local objects returns all inventory objects for deletion.
emptyLocalObjs := []*unstructured.Unstructured{}
deleteObjs, err := d.PruneOptions.GetPruneObjs(inv, emptyLocalObjs)
deleteObjs, err := d.pruneOptions.GetPruneObjs(inv, emptyLocalObjs)
if err != nil {
handleError(eventChannel, err)
return
}
mapper, err := d.provider.Factory().ToRESTMapper()
mapper, err := d.factory.ToRESTMapper()
if err != nil {
handleError(eventChannel, err)
return
}
klog.V(4).Infoln("destroyer building task queue...")
taskBuilder := &solver.TaskQueueBuilder{
PruneOptions: d.PruneOptions,
Factory: d.provider.Factory(),
PruneOptions: d.pruneOptions,
Factory: d.factory,
Mapper: mapper,
InvClient: d.invClient,
Destroy: true,
}
opts := solver.Options{
Prune: true,
PruneTimeout: option.DeleteTimeout,
DryRunStrategy: option.DryRunStrategy,
PrunePropagationPolicy: option.DeletePropagationPolicy,
PruneTimeout: options.DeleteTimeout,
DryRunStrategy: options.DryRunStrategy,
PrunePropagationPolicy: options.DeletePropagationPolicy,
}
deleteFilters := []filter.ValidationFilter{
filter.PreventRemoveFilter{},
filter.InventoryPolicyFilter{
Inv: inv,
InvPolicy: option.InventoryPolicy,
InvPolicy: options.InventoryPolicy,
},
}
// Build the ordered set of tasks to execute.
Expand All @@ -143,7 +133,7 @@ func (d *Destroyer) Run(inv inventory.InventoryInfo, option *DestroyerOption) <-
// Create a new TaskStatusRunner to execute the taskQueue.
klog.V(4).Infoln("destroyer building TaskStatusRunner...")
deleteIds := object.UnstructuredsToObjMetas(deleteObjs)
runner := taskrunner.NewTaskStatusRunner(deleteIds, d.StatusPoller)
runner := taskrunner.NewTaskStatusRunner(deleteIds, d.statusPoller)
klog.V(4).Infoln("destroyer running TaskStatusRunner...")
// TODO(seans): Make the poll interval configurable like the applier.
err = runner.Run(context.Background(), taskQueue.ToChannel(), eventChannel, taskrunner.Options{
Expand Down
34 changes: 15 additions & 19 deletions pkg/apply/prune/prune.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,34 +35,26 @@ type PruneOptions struct {
InvClient inventory.InventoryClient
Client dynamic.Interface
Mapper meta.RESTMapper
// True if we are destroying, which deletes the inventory object
// as well (possibly) the inventory namespace.
Destroy bool
}

// NewPruneOptions returns a struct (PruneOptions) encapsulating the necessary
// information to run the prune. Returns an error if an error occurs
// gathering this information.
func NewPruneOptions() *PruneOptions {
po := &PruneOptions{
Destroy: false,
}
return po
}

func (po *PruneOptions) Initialize(factory util.Factory, invClient inventory.InventoryClient) error {
var err error
func NewPruneOptions(factory util.Factory, invClient inventory.InventoryClient) (*PruneOptions, error) {
// Client/Builder fields from the Factory.
po.Client, err = factory.DynamicClient()
client, err := factory.DynamicClient()
if err != nil {
return err
return nil, err
}
po.Mapper, err = factory.ToRESTMapper()
mapper, err := factory.ToRESTMapper()
if err != nil {
return err
return nil, err
}
po.InvClient = invClient
return nil
return &PruneOptions{
InvClient: invClient,
Client: client,
Mapper: mapper,
}, nil
}

// Options defines a set of parameters that can be used to tune
Expand All @@ -73,6 +65,10 @@ type Options struct {
DryRunStrategy common.DryRunStrategy

PropagationPolicy metav1.DeletionPropagation

// True if we are destroying, which deletes the inventory object
// as well (possibly) the inventory namespace.
Destroy bool
}

// Prune deletes the set of passed pruneObjs. A prune skip/failure is
Expand All @@ -93,7 +89,7 @@ func (po *PruneOptions) Prune(pruneObjs []*unstructured.Unstructured,
pruneFilters []filter.ValidationFilter,
taskContext *taskrunner.TaskContext,
o Options) error {
eventFactory := CreateEventFactory(po.Destroy)
eventFactory := CreateEventFactory(o.Destroy)
// Iterate through objects to prune (delete). If an object is not pruned
// and we need to keep it in the inventory, we must capture the prune failure.
for _, pruneObj := range pruneObjs {
Expand Down
Loading

0 comments on commit 4eb7dc1

Please sign in to comment.