Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add controller flag to turn off built-in resolution #4168

Merged
merged 1 commit into from Sep 14, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 16 additions & 2 deletions cmd/controller/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,10 @@ var (
disableHighAvailability = flag.Bool("disable-ha", false, "Whether to disable high-availability functionality for this component. This flag will be deprecated "+
"and removed when we have promoted this feature to stable, so do not pass it without filing an "+
"issue upstream!")
experimentalDisableInTreeResolution = flag.Bool(disableInTreeResolutionFlag, false,
"Disable resolution of taskrun and pipelinerun refs by the taskrun and pipelinerun reconcilers.")

disableInTreeResolutionFlag = "experimental-disable-in-tree-resolution"
)

func main() {
Expand Down Expand Up @@ -108,10 +112,20 @@ func main() {
log.Fatal(http.ListenAndServe(":"+port, mux))
}()

taskrunControllerConfig := taskrun.ControllerConfiguration{
Images: images,
DisableTaskRefResolution: *experimentalDisableInTreeResolution,
}

pipelinerunControllerConfig := pipelinerun.ControllerConfiguration{
Images: images,
DisablePipelineRefResolution: *experimentalDisableInTreeResolution,
}
Comment on lines +115 to +123
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Seems inconsistent with pipeline.Images for these to specialize between the two controllers, since the pipeline run controller only uses a subset of the pipeline.Images struct, but the same "options" struct is passed to both controllers.

My personal preference would be to rename/generalize the Images struct to reflect a more complete set of options, but this seems nitpicky and I have some other changes I've been thinking about to some of this handling, so perhaps we land this and I can try to find something we're happy with.

thanks for the change. 👍


ctx = filteredinformerfactory.WithSelectors(ctx, v1beta1.ManagedByLabelKey)
sharedmain.MainWithConfig(ctx, ControllerLogKey, cfg,
taskrun.NewController(*namespace, images),
pipelinerun.NewController(*namespace, images),
taskrun.NewController(*namespace, taskrunControllerConfig),
pipelinerun.NewController(*namespace, pipelinerunControllerConfig),
)
}

Expand Down
5 changes: 4 additions & 1 deletion config/controller.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,10 @@ spec:
"-shell-image", "gcr.io/distroless/base@sha256:aa4fd987555ea10e1a4ec8765da8158b5ffdfef1e72da512c7ede509bc9966c4",
# for script mode to work with windows we need a powershell image
# pinning to nanoserver tag as of July 15 2021
"-shell-image-win", "mcr.microsoft.com/powershell:nanoserver@sha256:b6d5ff841b78bdf2dfed7550000fd4f3437385b8fa686ec0f010be24777654d6"
"-shell-image-win", "mcr.microsoft.com/powershell:nanoserver@sha256:b6d5ff841b78bdf2dfed7550000fd4f3437385b8fa686ec0f010be24777654d6",
# Experimental. Uncomment below to disable TaskRun and PipelineRun
# reconcilers' built-in taskRef and pipelineRef resolution procedures.
# "-experimental-disable-in-tree-resolution",
]
volumeMounts:
- name: config-logging
Expand Down
15 changes: 13 additions & 2 deletions pkg/reconciler/pipelinerun/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,18 @@ import (
"knative.dev/pkg/logging"
)

// ControllerConfiguration holds fields used to configure the
// PipelineRun controller.
type ControllerConfiguration struct {
// Images are the image references used across Tekton Pipelines.
Images pipeline.Images
// DisablePipelineRefResolution tells the controller not to perform
// resolution of pipeline refs from the cluster or bundles.
DisablePipelineRefResolution bool
}

// NewController instantiates a new controller.Impl from knative.dev/pkg/controller
func NewController(namespace string, images pipeline.Images) func(context.Context, configmap.Watcher) *controller.Impl {
func NewController(namespace string, conf ControllerConfiguration) func(context.Context, configmap.Watcher) *controller.Impl {
return func(ctx context.Context, cmw configmap.Watcher) *controller.Impl {
logger := logging.FromContext(ctx)
kubeclientset := kubeclient.Get(ctx)
Expand All @@ -62,7 +72,7 @@ func NewController(namespace string, images pipeline.Images) func(context.Contex
c := &Reconciler{
KubeClientSet: kubeclientset,
PipelineClientSet: pipelineclientset,
Images: images,
Images: conf.Images,
pipelineRunLister: pipelineRunInformer.Lister(),
pipelineLister: pipelineInformer.Lister(),
taskLister: taskInformer.Lister(),
Expand All @@ -74,6 +84,7 @@ func NewController(namespace string, images pipeline.Images) func(context.Contex
cloudEventClient: cloudeventclient.Get(ctx),
metrics: pipelinerunmetrics.Get(ctx),
pvcHandler: volumeclaim.NewPVCHandler(kubeclientset, logger),
disableResolution: conf.DisablePipelineRefResolution,
}
impl := pipelinerunreconciler.NewImpl(ctx, c, func(impl *controller.Impl) controller.Options {
return controller.Options{
Expand Down
21 changes: 21 additions & 0 deletions pkg/reconciler/pipelinerun/pipelinerun.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,11 +132,21 @@ type Reconciler struct {
cloudEventClient cloudevent.CEClient
metrics *pipelinerunmetrics.Recorder
pvcHandler volumeclaim.PvcHandler

// disableResolution is a flag to the reconciler that it should
// not be performing resolution of pipelineRefs.
// TODO(sbwsg): Once we've agreed on a way forward for TEP-0060
// this can be removed in favor of whatever that chosen solution
// is.
disableResolution bool
}

var (
// Check that our Reconciler implements pipelinerunreconciler.Interface
_ pipelinerunreconciler.Interface = (*Reconciler)(nil)

// Indicates pipelinerun resolution hasn't occurred yet.
errResourceNotResolved = fmt.Errorf("pipeline ref has not been resolved")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: errors.New when there's no formatting.

🤔 I thought this was something golangci-lint would complain about, sorry I didn't notice before.

Also cc @n3wscott in case there's something useful in the controller framework we can use here to simplify any of the logic. None of the things that come to mind for me seem quite right.

)

// ReconcileKind compares the actual state with the desired, and attempts to
Expand Down Expand Up @@ -232,6 +242,13 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, pr *v1beta1.PipelineRun)
logger.Errorf("Reconcile error: %v", err.Error())
}

if c.disableResolution && err == errResourceNotResolved {
// This is not an error: an out-of-band process can
// still resolve the PipelineRun, at which point
// reconciliation can continue as normal.
err = nil
}

if err = c.finishReconcileUpdateEmitEvents(ctx, pr, before, err); err != nil {
return err
}
Expand Down Expand Up @@ -336,6 +353,10 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun, get
return nil
}

if c.disableResolution && pr.Status.PipelineSpec == nil {
return errResourceNotResolved
}

pipelineMeta, pipelineSpec, err := resources.GetPipelineData(ctx, pr, getPipelineFunc)
if err != nil {
logger.Errorf("Failed to determine Pipeline spec to use for pipelinerun %s: %v", pr.Name, err)
Expand Down
2 changes: 1 addition & 1 deletion pkg/reconciler/pipelinerun/pipelinerun_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ func getPipelineRunController(t *testing.T, d test.Data) (test.Assets, func()) {
c, informers := test.SeedTestData(t, ctx, d)
configMapWatcher := cminformer.NewInformedWatcher(c.Kube, system.Namespace())

ctl := NewController(namespace, images)(ctx, configMapWatcher)
ctl := NewController(namespace, ControllerConfiguration{Images: images})(ctx, configMapWatcher)

if la, ok := ctl.Reconciler.(reconciler.LeaderAware); ok {
la.Promote(reconciler.UniversalBucket(), func(reconciler.Bucket, types.NamespacedName) {})
Expand Down
15 changes: 13 additions & 2 deletions pkg/reconciler/taskrun/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,18 @@ import (
"knative.dev/pkg/logging"
)

// ControllerConfiguration holds fields used to configure the
// TaskRun controller.
type ControllerConfiguration struct {
// Images are the image references used across Tekton Pipelines.
Images pipeline.Images
// DisableTaskRefResolution tells the controller not to perform
// resolution of task refs from the cluster or bundles.
DisableTaskRefResolution bool
}

// NewController instantiates a new controller.Impl from knative.dev/pkg/controller
func NewController(namespace string, images pipeline.Images) func(context.Context, configmap.Watcher) *controller.Impl {
func NewController(namespace string, conf ControllerConfiguration) func(context.Context, configmap.Watcher) *controller.Impl {
return func(ctx context.Context, cmw configmap.Watcher) *controller.Impl {
logger := logging.FromContext(ctx)
kubeclientset := kubeclient.Get(ctx)
Expand All @@ -62,7 +72,7 @@ func NewController(namespace string, images pipeline.Images) func(context.Contex
c := &Reconciler{
KubeClientSet: kubeclientset,
PipelineClientSet: pipelineclientset,
Images: images,
Images: conf.Images,
taskRunLister: taskRunInformer.Lister(),
taskLister: taskInformer.Lister(),
clusterTaskLister: clusterTaskInformer.Lister(),
Expand All @@ -71,6 +81,7 @@ func NewController(namespace string, images pipeline.Images) func(context.Contex
metrics: taskrunmetrics.Get(ctx),
entrypointCache: entrypointCache,
pvcHandler: volumeclaim.NewPVCHandler(kubeclientset, logger),
disableResolution: conf.DisableTaskRefResolution,
}
impl := taskrunreconciler.NewImpl(ctx, c, func(impl *controller.Impl) controller.Options {
return controller.Options{
Expand Down
55 changes: 38 additions & 17 deletions pkg/reconciler/taskrun/taskrun.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,10 +74,22 @@ type Reconciler struct {
entrypointCache podconvert.EntrypointCache
metrics *taskrunmetrics.Recorder
pvcHandler volumeclaim.PvcHandler

// disableResolution is a flag to the reconciler that it should
// not be performing resolution of taskRefs.
// TODO(sbwsg): Once we've agreed on a way forward for TEP-0060
// this can be removed in favor of whatever that chosen solution
// is.
disableResolution bool
}

// Check that our Reconciler implements taskrunreconciler.Interface
var _ taskrunreconciler.Interface = (*Reconciler)(nil)
var (
_ taskrunreconciler.Interface = (*Reconciler)(nil)

// Indicates taskrun resolution hasn't occurred yet.
errResourceNotResolved = fmt.Errorf("task ref has not been resolved")
)

// ReconcileKind compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the Task Run
Expand Down Expand Up @@ -168,25 +180,30 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, tr *v1beta1.TaskRun) pkg
// taskrun, runs API convertions. Errors that come out of prepare are
// permanent one, so in case of error we update, emit events and return
_, rtr, err := c.prepare(ctx, tr)
if err != nil {
logger.Errorf("TaskRun prepare error: %v", err.Error())
// We only return an error if update failed, otherwise we don't want to
// reconcile an invalid TaskRun anymore
return c.finishReconcileUpdateEmitEvents(ctx, tr, nil, err)
}
if c.disableResolution && err == errResourceNotResolved {
// This is not an error - the taskrun is still expected
// to be resolved out-of-band.
} else {
if err != nil {
logger.Errorf("TaskRun prepare error: %v", err.Error())
// We only return an error if update failed, otherwise we don't want to
// reconcile an invalid TaskRun anymore
return c.finishReconcileUpdateEmitEvents(ctx, tr, nil, err)
}

// Store the condition before reconcile
before = tr.Status.GetCondition(apis.ConditionSucceeded)
// Store the condition before reconcile
before = tr.Status.GetCondition(apis.ConditionSucceeded)

// Reconcile this copy of the task run and then write back any status
// updates regardless of whether the reconciliation errored out.
if err = c.reconcile(ctx, tr, rtr); err != nil {
logger.Errorf("Reconcile: %v", err.Error())
}
// Reconcile this copy of the task run and then write back any status
// updates regardless of whether the reconciliation errored out.
if err = c.reconcile(ctx, tr, rtr); err != nil {
logger.Errorf("Reconcile: %v", err.Error())
}

// Emit events (only when ConditionSucceeded was changed)
if err = c.finishReconcileUpdateEmitEvents(ctx, tr, before, err); err != nil {
return err
// Emit events (only when ConditionSucceeded was changed)
if err = c.finishReconcileUpdateEmitEvents(ctx, tr, before, err); err != nil {
return err
}
}

if tr.Status.StartTime != nil {
Expand Down Expand Up @@ -270,6 +287,10 @@ func (c *Reconciler) prepare(ctx context.Context, tr *v1beta1.TaskRun) (*v1beta1
// and may not have had all of the assumed default specified.
tr.SetDefaults(contexts.WithUpgradeViaDefaulting(ctx))

if c.disableResolution && tr.Status.TaskSpec == nil {
return nil, nil, errResourceNotResolved
}

getTaskfunc, err := resources.GetTaskFuncFromTaskRun(ctx, c.KubeClientSet, c.PipelineClientSet, tr)
if err != nil {
logger.Errorf("Failed to fetch task reference %s: %v", tr.Spec.TaskRef.Name, err)
Expand Down
2 changes: 1 addition & 1 deletion pkg/reconciler/taskrun/taskrun_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ func getTaskRunController(t *testing.T, d test.Data) (test.Assets, func()) {
c, informers := test.SeedTestData(t, ctx, d)
configMapWatcher := cminformer.NewInformedWatcher(c.Kube, system.Namespace())

ctl := NewController(namespace, images)(ctx, configMapWatcher)
ctl := NewController(namespace, ControllerConfiguration{Images: images})(ctx, configMapWatcher)
if err := configMapWatcher.Start(ctx.Done()); err != nil {
t.Fatalf("error starting configmap watcher: %v", err)
}
Expand Down