From adaae90b9bb93aa96cb345f75bf7797a12f8c308 Mon Sep 17 00:00:00 2001 From: Vincent Demeester Date: Thu, 16 Sep 2021 18:52:58 +0200 Subject: [PATCH] =?UTF-8?q?Use=20an=20Informer=20to=20list=20LimitRanges?= =?UTF-8?q?=20=F0=9F=A5=BC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using a LimitRange lister here instead, so this doesn't end up hitting the real API server on each call. Taking into account a review : https://github.com/tektoncd/pipeline/pull/4176#issuecomment-920366660. Signed-off-by: Vincent Demeester --- internal/builder/v1beta1/pod.go | 7 -- internal/builder/v1beta1/pod_test.go | 3 - pkg/internal/limitrange/limitrange.go | 23 ++-- pkg/internal/limitrange/transformer.go | 6 +- pkg/internal/limitrange/transformer_test.go | 66 +++++++---- pkg/reconciler/taskrun/controller.go | 3 + pkg/reconciler/taskrun/taskrun.go | 4 +- pkg/reconciler/taskrun/taskrun_test.go | 32 +++--- test/controller.go | 4 + .../informers/core/v1/limitrange/fake/fake.go | 40 +++++++ .../core/v1/limitrange/limitrange.go | 106 ++++++++++++++++++ vendor/modules.txt | 2 + 12 files changed, 233 insertions(+), 63 deletions(-) create mode 100644 vendor/knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange/fake/fake.go create mode 100644 vendor/knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange/limitrange.go diff --git a/internal/builder/v1beta1/pod.go b/internal/builder/v1beta1/pod.go index 0179c7cbdbb..4e0f9b98871 100644 --- a/internal/builder/v1beta1/pod.go +++ b/internal/builder/v1beta1/pod.go @@ -19,7 +19,6 @@ package builder import ( corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -124,9 +123,6 @@ func PodContainer(name, image string, ops ...ContainerOp) PodSpecOp { c := &corev1.Container{ Name: name, Image: image, - Resources: corev1.ResourceRequirements{ - Requests: map[corev1.ResourceName]resource.Quantity{}, - }, } for _, op := range ops { op(c) @@ -143,9 +139,6 @@ func PodInitContainer(name, image string, ops ...ContainerOp) PodSpecOp { Name: name, Image: image, Args: []string{}, - Resources: corev1.ResourceRequirements{ - Requests: map[corev1.ResourceName]resource.Quantity{}, - }, } for _, op := range ops { op(c) diff --git a/internal/builder/v1beta1/pod_test.go b/internal/builder/v1beta1/pod_test.go index 1eab56f810c..7098fa0cb98 100644 --- a/internal/builder/v1beta1/pod_test.go +++ b/internal/builder/v1beta1/pod_test.go @@ -87,9 +87,6 @@ func TestPod(t *testing.T) { Containers: []corev1.Container{{ Name: "nop", Image: "nop:latest", - Resources: corev1.ResourceRequirements{ - Requests: map[corev1.ResourceName]resource.Quantity{}, - }, }}, InitContainers: []corev1.Container{{ Name: "basic", diff --git a/pkg/internal/limitrange/limitrange.go b/pkg/internal/limitrange/limitrange.go index af3c905fdeb..2f919e24d19 100644 --- a/pkg/internal/limitrange/limitrange.go +++ b/pkg/internal/limitrange/limitrange.go @@ -20,23 +20,23 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" + "k8s.io/apimachinery/pkg/labels" + corev1listers "k8s.io/client-go/listers/core/v1" ) -func getVirtualLimitRange(ctx context.Context, namespace string, c kubernetes.Interface) (*corev1.LimitRange, error) { - limitRanges, err := c.CoreV1().LimitRanges(namespace).List(ctx, metav1.ListOptions{}) +func getVirtualLimitRange(ctx context.Context, namespace string, lister corev1listers.LimitRangeLister) (*corev1.LimitRange, error) { + limitRanges, err := lister.LimitRanges(namespace).List(labels.Everything()) if err != nil { return nil, err } - var limitRange corev1.LimitRange + var limitRange *corev1.LimitRange switch { - case len(limitRanges.Items) == 0: + case len(limitRanges) == 0: // No LimitRange defined break - case len(limitRanges.Items) == 1: + case len(limitRanges) == 1: // One LimitRange defined - limitRange = limitRanges.Items[0] + limitRange = limitRanges[0] default: // Several LimitRange defined // Create a virtual LimitRange with @@ -45,8 +45,9 @@ func getVirtualLimitRange(ctx context.Context, namespace string, c kubernetes.In // - Default that "fits" into min/max taken above // - Default request that "fits" into min/max taken above // - Smallest ratio (aka the most restrictive one) + limitRange = &corev1.LimitRange{} m := map[corev1.LimitType]corev1.LimitRangeItem{} - for _, lr := range limitRanges.Items { + for _, lr := range limitRanges { for _, item := range lr.Spec.Limits { _, exists := m[item.Type] if !exists { @@ -74,7 +75,7 @@ func getVirtualLimitRange(ctx context.Context, namespace string, c kubernetes.In } } // Handle Default and DefaultRequest - for _, lr := range limitRanges.Items { + for _, lr := range limitRanges { for _, item := range lr.Spec.Limits { // Default m[item.Type].Default[corev1.ResourceCPU] = minOfBetween(m[item.Type].Default[corev1.ResourceCPU], item.Default[corev1.ResourceCPU], m[item.Type].Min[corev1.ResourceCPU], m[item.Type].Max[corev1.ResourceCPU]) @@ -90,7 +91,7 @@ func getVirtualLimitRange(ctx context.Context, namespace string, c kubernetes.In limitRange.Spec.Limits = append(limitRange.Spec.Limits, v) } } - return &limitRange, nil + return limitRange, nil } func maxOf(a, b resource.Quantity) resource.Quantity { diff --git a/pkg/internal/limitrange/transformer.go b/pkg/internal/limitrange/transformer.go index 1d3897db62f..5ab1f081454 100644 --- a/pkg/internal/limitrange/transformer.go +++ b/pkg/internal/limitrange/transformer.go @@ -21,7 +21,7 @@ import ( "github.com/tektoncd/pipeline/pkg/pod" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" ) var resourceNames = []corev1.ResourceName{corev1.ResourceCPU, corev1.ResourceMemory, corev1.ResourceEphemeralStorage} @@ -30,9 +30,9 @@ func isZero(q resource.Quantity) bool { return (&q).IsZero() } -func NewTransformer(ctx context.Context, namespace string, clientset kubernetes.Interface) pod.Transformer { +func NewTransformer(ctx context.Context, namespace string, lister corev1listers.LimitRangeLister) pod.Transformer { return func(p *corev1.Pod) (*corev1.Pod, error) { - limitRange, err := getVirtualLimitRange(ctx, namespace, clientset) + limitRange, err := getVirtualLimitRange(ctx, namespace, lister) if err != nil { return p, err } diff --git a/pkg/internal/limitrange/transformer_test.go b/pkg/internal/limitrange/transformer_test.go index 54ad05a069c..471aee80b72 100644 --- a/pkg/internal/limitrange/transformer_test.go +++ b/pkg/internal/limitrange/transformer_test.go @@ -20,12 +20,15 @@ import ( "testing" "github.com/google/go-cmp/cmp" + ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" + "github.com/tektoncd/pipeline/test" "github.com/tektoncd/pipeline/test/diff" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - fakek8s "k8s.io/client-go/kubernetes/fake" + fakekubeclient "knative.dev/pkg/client/injection/kube/client/fake" + fakelimitrangeinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange/fake" + fakeserviceaccountinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/serviceaccount/fake" ) var resourceQuantityCmp = cmp.Comparer(func(x, y resource.Quantity) bool { @@ -405,15 +408,16 @@ func TestTransformerOneContainer(t *testing.T) { }, }} { t.Run(tc.description, func(t *testing.T) { - kubeclient := fakek8s.NewSimpleClientset( - &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"}}, - &corev1.LimitRange{ObjectMeta: metav1.ObjectMeta{Name: "limitrange", Namespace: "default"}, + ctx, cancel := setup(t, + []corev1.ServiceAccount{{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"}}}, + []corev1.LimitRange{{ObjectMeta: metav1.ObjectMeta{Name: "limitrange", Namespace: "default"}, Spec: corev1.LimitRangeSpec{ Limits: tc.limitranges, }, - }, + }}, ) - f := NewTransformer(context.Background(), "default", kubeclient) + defer cancel() + f := NewTransformer(ctx, "default", fakelimitrangeinformer.Get(ctx).Lister()) got, err := f(&corev1.Pod{ Spec: tc.podspec, }) @@ -817,15 +821,16 @@ func TestTransformerMultipleContainer(t *testing.T) { }, }} { t.Run(tc.description, func(t *testing.T) { - kubeclient := fakek8s.NewSimpleClientset( - &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"}}, - &corev1.LimitRange{ObjectMeta: metav1.ObjectMeta{Name: "limitrange", Namespace: "default"}, + ctx, cancel := setup(t, + []corev1.ServiceAccount{{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"}}}, + []corev1.LimitRange{{ObjectMeta: metav1.ObjectMeta{Name: "limitrange", Namespace: "default"}, Spec: corev1.LimitRangeSpec{ Limits: tc.limitranges, }, - }, + }}, ) - f := NewTransformer(context.Background(), "default", kubeclient) + defer cancel() + f := NewTransformer(ctx, "default", fakelimitrangeinformer.Get(ctx).Lister()) got, err := f(&corev1.Pod{ Spec: tc.podspec, }) @@ -943,13 +948,12 @@ func TestTransformerOneContainerMultipleLimitRange(t *testing.T) { }, }} { t.Run(tc.description, func(t *testing.T) { - runtimeObjects := []runtime.Object{&corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"}}} - for _, l := range tc.limitranges { - l := l // because we use pointer, we need to descope this... - runtimeObjects = append(runtimeObjects, &l) - } - kubeclient := fakek8s.NewSimpleClientset(runtimeObjects...) - f := NewTransformer(context.Background(), "default", kubeclient) + ctx, cancel := setup(t, + []corev1.ServiceAccount{{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"}}}, + tc.limitranges, + ) + defer cancel() + f := NewTransformer(ctx, "default", fakelimitrangeinformer.Get(ctx).Lister()) got, err := f(&corev1.Pod{ Spec: tc.podspec, }) @@ -985,3 +989,27 @@ func cmpRequestsAndLimits(t *testing.T, want, got corev1.PodSpec) { } } } + +func setup(t *testing.T, serviceaccounts []corev1.ServiceAccount, limitranges []corev1.LimitRange) (context.Context, func()) { + ctx, _ := ttesting.SetupFakeContext(t) + ctx, cancel := context.WithCancel(ctx) + kubeclient := fakekubeclient.Get(ctx) + // LimitRange + limitRangeInformer := fakelimitrangeinformer.Get(ctx) + kubeclient.PrependReactor("*", "limitranges", test.AddToInformer(t, limitRangeInformer.Informer().GetIndexer())) + for _, tl := range limitranges { + if _, err := kubeclient.CoreV1().LimitRanges(tl.Namespace).Create(ctx, &tl, metav1.CreateOptions{}); err != nil { + t.Fatal(err) + } + } + // ServiceAccount + serviceAccountInformer := fakeserviceaccountinformer.Get(ctx) + kubeclient.PrependReactor("*", "serviceaccounts", test.AddToInformer(t, serviceAccountInformer.Informer().GetIndexer())) + for _, ts := range serviceaccounts { + if _, err := kubeclient.CoreV1().ServiceAccounts(ts.Namespace).Create(ctx, &ts, metav1.CreateOptions{}); err != nil { + t.Fatal(err) + } + } + kubeclient.ClearActions() + return ctx, cancel +} diff --git a/pkg/reconciler/taskrun/controller.go b/pkg/reconciler/taskrun/controller.go index 81d3bd91857..3739deb3ca2 100644 --- a/pkg/reconciler/taskrun/controller.go +++ b/pkg/reconciler/taskrun/controller.go @@ -34,6 +34,7 @@ import ( "github.com/tektoncd/pipeline/pkg/taskrunmetrics" "k8s.io/client-go/tools/cache" kubeclient "knative.dev/pkg/client/injection/kube/client" + limitrangeinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange" filteredpodinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/pod/filtered" "knative.dev/pkg/configmap" "knative.dev/pkg/controller" @@ -61,6 +62,7 @@ func NewController(namespace string, conf ControllerConfiguration) func(context. clusterTaskInformer := clustertaskinformer.Get(ctx) podInformer := filteredpodinformer.Get(ctx, v1beta1.ManagedByLabelKey) resourceInformer := resourceinformer.Get(ctx) + limitrangeInformer := limitrangeinformer.Get(ctx) configStore := config.NewStore(logger.Named("config-store"), taskrunmetrics.MetricsOnStore(logger)) configStore.WatchConfigs(cmw) @@ -77,6 +79,7 @@ func NewController(namespace string, conf ControllerConfiguration) func(context. taskLister: taskInformer.Lister(), clusterTaskLister: clusterTaskInformer.Lister(), resourceLister: resourceInformer.Lister(), + limitrangeLister: limitrangeInformer.Lister(), cloudEventClient: cloudeventclient.Get(ctx), metrics: taskrunmetrics.Get(ctx), entrypointCache: entrypointCache, diff --git a/pkg/reconciler/taskrun/taskrun.go b/pkg/reconciler/taskrun/taskrun.go index 2a956c1f62b..e4c370038ec 100644 --- a/pkg/reconciler/taskrun/taskrun.go +++ b/pkg/reconciler/taskrun/taskrun.go @@ -53,6 +53,7 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" + corev1Listers "k8s.io/client-go/listers/core/v1" "knative.dev/pkg/apis" "knative.dev/pkg/controller" "knative.dev/pkg/kmeta" @@ -71,6 +72,7 @@ type Reconciler struct { taskLister listers.TaskLister clusterTaskLister listers.ClusterTaskLister resourceLister resourcelisters.PipelineResourceLister + limitrangeLister corev1Listers.LimitRangeLister cloudEventClient cloudevent.CEClient entrypointCache podconvert.EntrypointCache metrics *taskrunmetrics.Recorder @@ -704,7 +706,7 @@ func (c *Reconciler) createPod(ctx context.Context, tr *v1beta1.TaskRun, rtr *re EntrypointCache: c.entrypointCache, OverrideHomeEnv: shouldOverrideHomeEnv, } - pod, err := podbuilder.Build(ctx, tr, *ts, limitrange.NewTransformer(ctx, tr.Namespace, c.KubeClientSet)) + pod, err := podbuilder.Build(ctx, tr, *ts, limitrange.NewTransformer(ctx, tr.Namespace, c.limitrangeLister)) if err != nil { return nil, fmt.Errorf("translating TaskSpec to Pod: %w", err) } diff --git a/pkg/reconciler/taskrun/taskrun_test.go b/pkg/reconciler/taskrun/taskrun_test.go index 66de42251d0..f6618aff38e 100644 --- a/pkg/reconciler/taskrun/taskrun_test.go +++ b/pkg/reconciler/taskrun/taskrun_test.go @@ -1814,7 +1814,7 @@ func TestReconcile_SetsStartTime(t *testing.T) { t.Fatal(err) } - if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err == nil { + if err := testAssets.Controller.Reconciler.Reconcile(testAssets.Ctx, getRunName(taskRun)); err == nil { t.Error("Wanted a wrapped requeue error, but got nil.") } else if ok, _ := controller.IsRequeueKey(err); !ok { t.Errorf("expected no error reconciling valid TaskRun but got %v", err) @@ -1851,7 +1851,7 @@ func TestReconcile_DoesntChangeStartTime(t *testing.T) { testAssets, cancel := getTaskRunController(t, d) defer cancel() - if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil { + if err := testAssets.Controller.Reconciler.Reconcile(testAssets.Ctx, getRunName(taskRun)); err != nil { t.Errorf("expected no error reconciling valid TaskRun but got %v", err) } @@ -1964,7 +1964,7 @@ func TestReconcileTaskRunWithPermanentError(t *testing.T) { defer cancel() c := testAssets.Controller clients := testAssets.Clients - reconcileErr := c.Reconciler.Reconcile(context.Background(), getRunName(noTaskRun)) + reconcileErr := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(noTaskRun)) // When a TaskRun was rejected with a permanent error, reconciler must stop and forget about the run // Such TaskRun enters Reconciler and from within the isDone block, marks the run success so that @@ -2392,9 +2392,6 @@ func TestExpandMountPath(t *testing.T) { testAssets, cancel := getTaskRunController(t, d) defer cancel() - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - // c := testAssets.Controller clients := testAssets.Clients saName := "default" @@ -2415,6 +2412,7 @@ func TestExpandMountPath(t *testing.T) { taskLister: testAssets.Informers.Task.Lister(), clusterTaskLister: testAssets.Informers.ClusterTask.Lister(), resourceLister: testAssets.Informers.PipelineResource.Lister(), + limitrangeLister: testAssets.Informers.LimitRange.Lister(), cloudEventClient: testAssets.Clients.CloudEvents, metrics: nil, // Not used entrypointCache: nil, // Not used @@ -2427,7 +2425,7 @@ func TestExpandMountPath(t *testing.T) { TaskSpec: &v1beta1.TaskSpec{Steps: simpleTask.Spec.Steps, Workspaces: simpleTask.Spec.Workspaces}, } - pod, err := r.createPod(ctx, taskRun, rtr) + pod, err := r.createPod(testAssets.Ctx, taskRun, rtr) if err != nil { t.Fatalf("create pod threw error %v", err) @@ -2486,10 +2484,6 @@ func TestExpandMountPath_DuplicatePaths(t *testing.T) { testAssets, cancel := getTaskRunController(t, d) defer cancel() - - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - clients := testAssets.Clients saName := "default" if _, err := clients.Kube.CoreV1().ServiceAccounts(taskRun.Namespace).Create(testAssets.Ctx, &corev1.ServiceAccount{ @@ -2508,6 +2502,7 @@ func TestExpandMountPath_DuplicatePaths(t *testing.T) { taskLister: testAssets.Informers.Task.Lister(), clusterTaskLister: testAssets.Informers.ClusterTask.Lister(), resourceLister: testAssets.Informers.PipelineResource.Lister(), + limitrangeLister: testAssets.Informers.LimitRange.Lister(), cloudEventClient: testAssets.Clients.CloudEvents, metrics: nil, // Not used entrypointCache: nil, // Not used @@ -2520,7 +2515,7 @@ func TestExpandMountPath_DuplicatePaths(t *testing.T) { TaskSpec: &v1beta1.TaskSpec{Steps: simpleTask.Spec.Steps, Workspaces: simpleTask.Spec.Workspaces}, } - _, err := r.createPod(ctx, taskRun, rtr) + _, err := r.createPod(testAssets.Ctx, taskRun, rtr) if err == nil || err.Error() != expectedError { t.Errorf("Expected to fail validation for duplicate Workspace mount paths, error was %v", err) @@ -2544,9 +2539,6 @@ func TestHandlePodCreationError(t *testing.T) { testAssets, cancel := getTaskRunController(t, d) defer cancel() - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - // Use the test assets to create a *Reconciler directly for focused testing. c := &Reconciler{ KubeClientSet: testAssets.Clients.Kube, @@ -2555,6 +2547,7 @@ func TestHandlePodCreationError(t *testing.T) { taskLister: testAssets.Informers.Task.Lister(), clusterTaskLister: testAssets.Informers.ClusterTask.Lister(), resourceLister: testAssets.Informers.PipelineResource.Lister(), + limitrangeLister: testAssets.Informers.LimitRange.Lister(), cloudEventClient: testAssets.Clients.CloudEvents, metrics: nil, // Not used entrypointCache: nil, // Not used @@ -2588,7 +2581,7 @@ func TestHandlePodCreationError(t *testing.T) { }} for _, tc := range testcases { t.Run(tc.description, func(t *testing.T) { - c.handlePodCreationError(ctx, taskRun, tc.err) + c.handlePodCreationError(testAssets.Ctx, taskRun, tc.err) foundCondition := false for _, cond := range taskRun.Status.Conditions { if cond.Type == tc.expectedType && cond.Status == tc.expectedStatus && cond.Reason == tc.expectedReason { @@ -3112,7 +3105,7 @@ func TestReconcileValidDefaultWorkspaceOmittedOptionalWorkspace(t *testing.T) { t.Fatal(err) } - if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRunOmittingWorkspace)); err == nil { + if err := testAssets.Controller.Reconciler.Reconcile(testAssets.Ctx, getRunName(taskRunOmittingWorkspace)); err == nil { t.Error("Wanted a wrapped requeue error, but got nil.") } else if ok, _ := controller.IsRequeueKey(err); !ok { t.Errorf("Unexpected reconcile error for TaskRun %q: %v", taskRunOmittingWorkspace.Name, err) @@ -3280,7 +3273,7 @@ func TestReconcileWithWorkspacesIncompatibleWithAffinityAssistant(t *testing.T) t.Fatal(err) } - _ = testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRun)) + _ = testAssets.Controller.Reconciler.Reconcile(testAssets.Ctx, getRunName(taskRun)) _, err := clients.Pipeline.TektonV1beta1().Tasks(taskRun.Namespace).Get(testAssets.Ctx, taskWithTwoWorkspaces.Name, metav1.GetOptions{}) if err != nil { @@ -3343,7 +3336,7 @@ func TestReconcileWorkspaceWithVolumeClaimTemplate(t *testing.T) { t.Fatal(err) } - if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err == nil { + if err := testAssets.Controller.Reconciler.Reconcile(testAssets.Ctx, getRunName(taskRun)); err == nil { t.Error("Wanted a wrapped requeue error, but got nil.") } else if ok, _ := controller.IsRequeueKey(err); !ok { t.Errorf("expected no error reconciling valid TaskRun but got %v", err) @@ -3661,6 +3654,7 @@ func TestFailTaskRun(t *testing.T) { taskLister: testAssets.Informers.Task.Lister(), clusterTaskLister: testAssets.Informers.ClusterTask.Lister(), resourceLister: testAssets.Informers.PipelineResource.Lister(), + limitrangeLister: testAssets.Informers.LimitRange.Lister(), cloudEventClient: testAssets.Clients.CloudEvents, metrics: nil, // Not used entrypointCache: nil, // Not used diff --git a/test/controller.go b/test/controller.go index 435a99ac398..cef6cf604c9 100644 --- a/test/controller.go +++ b/test/controller.go @@ -54,6 +54,7 @@ import ( "k8s.io/client-go/tools/record" fakekubeclient "knative.dev/pkg/client/injection/kube/client/fake" fakeconfigmapinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/configmap/fake" + fakelimitrangeinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange/fake" fakefilteredpodinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/pod/filtered/fake" fakeserviceaccountinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/serviceaccount/fake" "knative.dev/pkg/controller" @@ -74,6 +75,7 @@ type Data struct { Namespaces []*corev1.Namespace ConfigMaps []*corev1.ConfigMap ServiceAccounts []*corev1.ServiceAccount + LimitRange []*corev1.LimitRange } // Clients holds references to clients which are useful for reconciler tests. @@ -97,6 +99,7 @@ type Informers struct { Pod coreinformers.PodInformer ConfigMap coreinformers.ConfigMapInformer ServiceAccount coreinformers.ServiceAccountInformer + LimitRange coreinformers.LimitRangeInformer } // Assets holds references to the controller, logs, clients, and informers. @@ -177,6 +180,7 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers Pod: fakefilteredpodinformer.Get(ctx, v1beta1.ManagedByLabelKey), ConfigMap: fakeconfigmapinformer.Get(ctx), ServiceAccount: fakeserviceaccountinformer.Get(ctx), + LimitRange: fakelimitrangeinformer.Get(ctx), } // Attach reactors that add resource mutations to the appropriate diff --git a/vendor/knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange/fake/fake.go b/vendor/knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange/fake/fake.go new file mode 100644 index 00000000000..0d5f06be923 --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + limitrange "knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange" + fake "knative.dev/pkg/client/injection/kube/informers/factory/fake" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = limitrange.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Core().V1().LimitRanges() + return context.WithValue(ctx, limitrange.Key{}, inf), inf.Informer() +} diff --git a/vendor/knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange/limitrange.go b/vendor/knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange/limitrange.go new file mode 100644 index 00000000000..d86077f7db7 --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange/limitrange.go @@ -0,0 +1,106 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package limitrange + +import ( + context "context" + + apicorev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + v1 "k8s.io/client-go/informers/core/v1" + kubernetes "k8s.io/client-go/kubernetes" + corev1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + client "knative.dev/pkg/client/injection/kube/client" + factory "knative.dev/pkg/client/injection/kube/informers/factory" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) + injection.Dynamic.RegisterDynamicInformer(withDynamicInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Core().V1().LimitRanges() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +func withDynamicInformer(ctx context.Context) context.Context { + inf := &wrapper{client: client.Get(ctx)} + return context.WithValue(ctx, Key{}, inf) +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1.LimitRangeInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch k8s.io/client-go/informers/core/v1.LimitRangeInformer from context.") + } + return untyped.(v1.LimitRangeInformer) +} + +type wrapper struct { + client kubernetes.Interface + + namespace string +} + +var _ v1.LimitRangeInformer = (*wrapper)(nil) +var _ corev1.LimitRangeLister = (*wrapper)(nil) + +func (w *wrapper) Informer() cache.SharedIndexInformer { + return cache.NewSharedIndexInformer(nil, &apicorev1.LimitRange{}, 0, nil) +} + +func (w *wrapper) Lister() corev1.LimitRangeLister { + return w +} + +func (w *wrapper) LimitRanges(namespace string) corev1.LimitRangeNamespaceLister { + return &wrapper{client: w.client, namespace: namespace} +} + +func (w *wrapper) List(selector labels.Selector) (ret []*apicorev1.LimitRange, err error) { + lo, err := w.client.CoreV1().LimitRanges(w.namespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: selector.String(), + // TODO(mattmoor): Incorporate resourceVersion bounds based on staleness criteria. + }) + if err != nil { + return nil, err + } + for idx := range lo.Items { + ret = append(ret, &lo.Items[idx]) + } + return ret, nil +} + +func (w *wrapper) Get(name string) (*apicorev1.LimitRange, error) { + return w.client.CoreV1().LimitRanges(w.namespace).Get(context.TODO(), name, metav1.GetOptions{ + // TODO(mattmoor): Incorporate resourceVersion bounds based on staleness criteria. + }) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 8bffb17a773..9dedc592c72 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -937,6 +937,8 @@ knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/mutatin knative.dev/pkg/client/injection/kube/informers/admissionregistration/v1/validatingwebhookconfiguration knative.dev/pkg/client/injection/kube/informers/core/v1/configmap knative.dev/pkg/client/injection/kube/informers/core/v1/configmap/fake +knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange +knative.dev/pkg/client/injection/kube/informers/core/v1/limitrange/fake knative.dev/pkg/client/injection/kube/informers/core/v1/pod/filtered knative.dev/pkg/client/injection/kube/informers/core/v1/pod/filtered/fake knative.dev/pkg/client/injection/kube/informers/core/v1/serviceaccount