From ba0808d5ce4a86b20e1456a0461c76df9a71d1be Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Fri, 9 Jul 2021 11:42:41 -0400 Subject: [PATCH 01/18] chore: update test for previous code review These were requested changes in a previous code review which I accidentally left in my git stash, so just getting them committed now so they are in. --- pkg/util/debug_logging_test.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/pkg/util/debug_logging_test.go b/pkg/util/debug_logging_test.go index 59d1e5b7ce..8a5a80d4cd 100644 --- a/pkg/util/debug_logging_test.go +++ b/pkg/util/debug_logging_test.go @@ -133,8 +133,9 @@ func TestDebugLoggerThreadSafety(t *testing.T) { // spam the logger concurrently across several goroutines to ensure no dataraces wg := &sync.WaitGroup{} - for i := 0; i < 100; i++ { - wg.Add(1) + total := 100 + wg.Add(total) + for i := 0; i < total; i++ { go func() { defer wg.Done() log.Debug("unique") @@ -142,9 +143,10 @@ func TestDebugLoggerThreadSafety(t *testing.T) { } wg.Wait() assert.Contains(t, buf.String(), "unique") - lines := strings.Split(buf.String(), "\n") + // Ensure that _some_ lines have been stifled. The actual number is not deterministic. - assert.True(t, len(lines) < 100) + lines := strings.Split(buf.String(), "\n") + assert.True(t, len(lines) < total) } // ----------------------------------------------------------------------------- From 9743b0ebd81c00604b7269625612d6d49f40ed35 Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Fri, 9 Jul 2021 11:42:57 -0400 Subject: [PATCH 02/18] feat: cachestores convenience method to get obj --- pkg/store/store.go | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/pkg/store/store.go b/pkg/store/store.go index 363d52706a..7c047630ca 100644 --- a/pkg/store/store.go +++ b/pkg/store/store.go @@ -184,6 +184,46 @@ func NewCacheStoresFromObjs(objs ...runtime.Object) (CacheStores, error) { return c, nil } +// Get checks whether or not there's already some version of the provided object present in the cache. +func (c CacheStores) Get(obj runtime.Object) (item interface{}, exists bool, err error) { + switch obj := obj.(type) { + // ---------------------------------------------------------------------------- + // Kubernetes Core API Support + // ---------------------------------------------------------------------------- + case *extensions.Ingress: + return c.IngressV1beta1.Get(obj) + case *networkingv1.Ingress: + return c.IngressV1.Get(obj) + case *corev1.Service: + return c.Service.Get(obj) + case *corev1.Secret: + return c.Secret.Get(obj) + case *corev1.Endpoints: + return c.Endpoint.Get(obj) + // ---------------------------------------------------------------------------- + // Kong API Support + // ---------------------------------------------------------------------------- + case *kongv1.KongPlugin: + return c.Plugin.Get(obj) + case *kongv1.KongClusterPlugin: + return c.ClusterPlugin.Get(obj) + case *kongv1.KongConsumer: + return c.Consumer.Get(obj) + case *kongv1.KongIngress: + return c.KongIngress.Get(obj) + case *kongv1beta1.TCPIngress: + return c.TCPIngress.Get(obj) + case *kongv1beta1.UDPIngress: + return c.UDPIngress.Get(obj) + // ---------------------------------------------------------------------------- + // 3rd Party API Support + // ---------------------------------------------------------------------------- + case *knative.Ingress: + return c.KnativeIngress.Get(obj) + } + return nil, false, fmt.Errorf("%T is not a supported cache object type", obj) +} + // Add stores a provided runtime.Object into the CacheStore if it's of a supported type. // The CacheStore must be initialized (see NewCacheStores()) or this will panic. func (c CacheStores) Add(obj runtime.Object) error { From e117a01ce08dfd3bba8330aa5b6ec931aca8bbb5 Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Fri, 9 Jul 2021 11:43:47 -0400 Subject: [PATCH 03/18] feat: add ObjectExists() check to Proxy interface --- railgun/internal/proxy/proxy.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/railgun/internal/proxy/proxy.go b/railgun/internal/proxy/proxy.go index b503dfbead..83d3593941 100644 --- a/railgun/internal/proxy/proxy.go +++ b/railgun/internal/proxy/proxy.go @@ -62,6 +62,9 @@ type Proxy interface { // The delete action will asynchronously be converted to Kong DSL and applied to the Kong Admin API. // A status will later be added to the object whether the configuration update succeeds or fails. DeleteObject(obj client.Object) error + + // ObjectExists indicates whether or not any version of the provided object is already present in the cache. + ObjectExists(obj client.Object) (bool, error) } // KongUpdater is a type of function that describes how to provide updates to the Kong Admin API From 39f31a5b9ae32a26f946c6acce968d8c290f0d44 Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Fri, 9 Jul 2021 11:44:08 -0400 Subject: [PATCH 04/18] feat: implement new proxy int for clientgo cached prxy --- .../proxy/clientgo_cached_proxy_resolver.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/railgun/internal/proxy/clientgo_cached_proxy_resolver.go b/railgun/internal/proxy/clientgo_cached_proxy_resolver.go index b839c49c97..f4b8a92507 100644 --- a/railgun/internal/proxy/clientgo_cached_proxy_resolver.go +++ b/railgun/internal/proxy/clientgo_cached_proxy_resolver.go @@ -3,6 +3,7 @@ package proxy import ( "context" "fmt" + "sync" "time" "github.com/blang/semver/v4" @@ -74,6 +75,8 @@ func NewCacheBasedProxyWithStagger(ctx context.Context, stagger: stagger, timeout: timeout, syncTicker: time.NewTicker(stagger), + + internalCacheLock: &sync.RWMutex{}, } // initialize the proxy which validates connectivity with the Admin API and @@ -139,6 +142,9 @@ type clientgoCachedProxyResolver struct { // channels update chan *cachedObject del chan *cachedObject + + // locks + internalCacheLock *sync.RWMutex } // cacheAction indicates what caching action (update, delete) was taken for any particular runtime.Object. @@ -188,6 +194,13 @@ func (p *clientgoCachedProxyResolver) DeleteObject(obj client.Object) error { } } +func (p *clientgoCachedProxyResolver) ObjectExists(obj client.Object) (bool, error) { + p.internalCacheLock.RLock() + defer p.internalCacheLock.RUnlock() + _, exists, err := p.cache.Get(obj) + return exists, err +} + // ----------------------------------------------------------------------------- // Client Go Cached Proxy Resolver - Private Methods - Cache Server // ----------------------------------------------------------------------------- From 4ad392920a82ce6a02cb20544d308e6687dad5c6 Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Fri, 9 Jul 2021 11:48:21 -0400 Subject: [PATCH 05/18] fix: don't finalize on non-annotated types This was a leftover issue from early v2 iterations wherein we were just adding our finalizer to everything. However, we should only be adding our finalizer to toplevel objects which directly influence Kong Admin configurations, not subordinate objects that are used for reference as it's too problematic and noisy to maintain finalizers for such a wide range of objects. When we tighten our object reconcilations for some of the upstream types, we will follow up on this: https://github.com/Kong/kubernetes-ingress-controller/issues/1259 --- .../hack/generators/controllers/networking/main.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/railgun/hack/generators/controllers/networking/main.go b/railgun/hack/generators/controllers/networking/main.go index 7649c50aaf..c9f92332a2 100644 --- a/railgun/hack/generators/controllers/networking/main.go +++ b/railgun/hack/generators/controllers/networking/main.go @@ -295,6 +295,9 @@ type typeNeeded struct { // AcceptsIngressClassNameAnnotation indicates that the object accepts (and the controller will listen to) // the "kubernetes.io/ingress.class" annotation to decide whether or not the object is supported. + // + // This setting will also indicate whether or not a generated controller will employ a teardown finalizer + // to indicate that we need to wait for cache deletion to succeed before allowing Kubernetes GC to remove the obj. AcceptsIngressClassNameAnnotation bool // AcceptsIngressClassNameSpec indicates the the object indicates the ingress.class that should support it via @@ -420,7 +423,11 @@ func (r *{{.PackageAlias}}{{.Type}}Reconciler) Reconcile(ctx context.Context, re if err := r.Proxy.DeleteObject(obj); err != nil { return ctrl.Result{}, err } +{{- if .AcceptsIngressClassNameAnnotation}} return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) +{{- else}} + return ctrl.Result{}, nil +{{- end}} } {{if .AcceptsIngressClassNameAnnotation}} // if the object is not configured with our ingress.class, then we need to ensure it's removed from the cache @@ -431,7 +438,7 @@ func (r *{{.PackageAlias}}{{.Type}}Reconciler) Reconcile(ctx context.Context, re } return ctrl.Result{}, nil } -{{end}} + // before we store cache data for this object, ensure that it has our finalizer set if !ctrlutils.HasFinalizer(obj, ctrlutils.KongIngressFinalizer) { log.Info("finalizer is not set for resource, setting it", req.Namespace, req.Name) @@ -442,7 +449,7 @@ func (r *{{.PackageAlias}}{{.Type}}Reconciler) Reconcile(ctx context.Context, re } return ctrl.Result{Requeue: true}, nil } - +{{end}} // update the kong Admin API with the changes log.Info("updating the proxy with new {{.Type}}", "namespace", obj.Namespace, "name", obj.Name) if err := r.Proxy.UpdateObject(obj); err != nil { From 2cf938b49f0624b75822409219af2ede79c4cdee Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Fri, 9 Jul 2021 11:54:32 -0400 Subject: [PATCH 06/18] fix: wait for cache drop before completing reconcile Several objects we reconcile were previously considered "done" after we had signaled removal of the object from the cache, but that process is asynchronous and it's a small improvement to check the status of their removal async prior to considering them completed. --- .../generators/controllers/networking/main.go | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/railgun/hack/generators/controllers/networking/main.go b/railgun/hack/generators/controllers/networking/main.go index c9f92332a2..b58e2c0aa0 100644 --- a/railgun/hack/generators/controllers/networking/main.go +++ b/railgun/hack/generators/controllers/networking/main.go @@ -413,6 +413,17 @@ func (r *{{.PackageAlias}}{{.Type}}Reconciler) Reconcile(ctx context.Context, re obj := new({{.PackageImportAlias}}.{{.Type}}) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { + return ctrl.Result{}, err + } + if objectExistsInCache { + log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrl.Result{}, client.IgnoreNotFound(err) } log.Info("reconciling resource", "namespace", req.Namespace, "name", req.Name) @@ -420,9 +431,16 @@ func (r *{{.PackageAlias}}{{.Type}}Reconciler) Reconcile(ctx context.Context, re // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "{{.Type}}", "namespace", req.Namespace, "name", req.Name) - if err := r.Proxy.DeleteObject(obj); err != nil { + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { return ctrl.Result{}, err } + if objectExistsInCache { + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } {{- if .AcceptsIngressClassNameAnnotation}} return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) {{- else}} From b245da86268fbccf4eedd776d5b7df6380211f0c Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Fri, 9 Jul 2021 11:54:42 -0400 Subject: [PATCH 07/18] chore: run generators --- .../configuration/zz_generated_controllers.go | 315 ++++++++++++++---- 1 file changed, 247 insertions(+), 68 deletions(-) diff --git a/railgun/controllers/configuration/zz_generated_controllers.go b/railgun/controllers/configuration/zz_generated_controllers.go index f4d6332938..69ee6370c6 100644 --- a/railgun/controllers/configuration/zz_generated_controllers.go +++ b/railgun/controllers/configuration/zz_generated_controllers.go @@ -74,6 +74,17 @@ func (r *CoreV1ServiceReconciler) Reconcile(ctx context.Context, req ctrl.Reques obj := new(corev1.Service) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { + return ctrl.Result{}, err + } + if objectExistsInCache { + log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrl.Result{}, client.IgnoreNotFound(err) } log.Info("reconciling resource", "namespace", req.Namespace, "name", req.Name) @@ -81,21 +92,17 @@ func (r *CoreV1ServiceReconciler) Reconcile(ctx context.Context, req ctrl.Reques // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "Service", "namespace", req.Namespace, "name", req.Name) - if err := r.Proxy.DeleteObject(obj); err != nil { + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { return ctrl.Result{}, err } - return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) - } - - // before we store cache data for this object, ensure that it has our finalizer set - if !ctrlutils.HasFinalizer(obj, ctrlutils.KongIngressFinalizer) { - log.Info("finalizer is not set for resource, setting it", req.Namespace, req.Name) - finalizers := obj.GetFinalizers() - obj.SetFinalizers(append(finalizers, ctrlutils.KongIngressFinalizer)) - if err := r.Client.Update(ctx, obj); err != nil { - return ctrl.Result{}, err + if objectExistsInCache { + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache } - return ctrl.Result{Requeue: true}, nil + return ctrl.Result{}, nil } // update the kong Admin API with the changes @@ -140,6 +147,17 @@ func (r *CoreV1EndpointsReconciler) Reconcile(ctx context.Context, req ctrl.Requ obj := new(corev1.Endpoints) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { + return ctrl.Result{}, err + } + if objectExistsInCache { + log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrl.Result{}, client.IgnoreNotFound(err) } log.Info("reconciling resource", "namespace", req.Namespace, "name", req.Name) @@ -147,21 +165,17 @@ func (r *CoreV1EndpointsReconciler) Reconcile(ctx context.Context, req ctrl.Requ // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "Endpoints", "namespace", req.Namespace, "name", req.Name) - if err := r.Proxy.DeleteObject(obj); err != nil { + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { return ctrl.Result{}, err } - return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) - } - - // before we store cache data for this object, ensure that it has our finalizer set - if !ctrlutils.HasFinalizer(obj, ctrlutils.KongIngressFinalizer) { - log.Info("finalizer is not set for resource, setting it", req.Namespace, req.Name) - finalizers := obj.GetFinalizers() - obj.SetFinalizers(append(finalizers, ctrlutils.KongIngressFinalizer)) - if err := r.Client.Update(ctx, obj); err != nil { - return ctrl.Result{}, err + if objectExistsInCache { + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache } - return ctrl.Result{Requeue: true}, nil + return ctrl.Result{}, nil } // update the kong Admin API with the changes @@ -206,6 +220,17 @@ func (r *CoreV1SecretReconciler) Reconcile(ctx context.Context, req ctrl.Request obj := new(corev1.Secret) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { + return ctrl.Result{}, err + } + if objectExistsInCache { + log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrl.Result{}, client.IgnoreNotFound(err) } log.Info("reconciling resource", "namespace", req.Namespace, "name", req.Name) @@ -213,21 +238,17 @@ func (r *CoreV1SecretReconciler) Reconcile(ctx context.Context, req ctrl.Request // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "Secret", "namespace", req.Namespace, "name", req.Name) - if err := r.Proxy.DeleteObject(obj); err != nil { + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { return ctrl.Result{}, err } - return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) - } - - // before we store cache data for this object, ensure that it has our finalizer set - if !ctrlutils.HasFinalizer(obj, ctrlutils.KongIngressFinalizer) { - log.Info("finalizer is not set for resource, setting it", req.Namespace, req.Name) - finalizers := obj.GetFinalizers() - obj.SetFinalizers(append(finalizers, ctrlutils.KongIngressFinalizer)) - if err := r.Client.Update(ctx, obj); err != nil { - return ctrl.Result{}, err + if objectExistsInCache { + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache } - return ctrl.Result{Requeue: true}, nil + return ctrl.Result{}, nil } // update the kong Admin API with the changes @@ -275,6 +296,17 @@ func (r *NetV1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Request obj := new(netv1.Ingress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { + return ctrl.Result{}, err + } + if objectExistsInCache { + log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrl.Result{}, client.IgnoreNotFound(err) } log.Info("reconciling resource", "namespace", req.Namespace, "name", req.Name) @@ -282,9 +314,16 @@ func (r *NetV1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Request // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "Ingress", "namespace", req.Namespace, "name", req.Name) - if err := r.Proxy.DeleteObject(obj); err != nil { + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { return ctrl.Result{}, err } + if objectExistsInCache { + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) } @@ -353,6 +392,17 @@ func (r *NetV1Beta1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Re obj := new(netv1beta1.Ingress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { + return ctrl.Result{}, err + } + if objectExistsInCache { + log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrl.Result{}, client.IgnoreNotFound(err) } log.Info("reconciling resource", "namespace", req.Namespace, "name", req.Name) @@ -360,9 +410,16 @@ func (r *NetV1Beta1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Re // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "Ingress", "namespace", req.Namespace, "name", req.Name) - if err := r.Proxy.DeleteObject(obj); err != nil { + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { return ctrl.Result{}, err } + if objectExistsInCache { + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) } @@ -431,6 +488,17 @@ func (r *ExtV1Beta1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Re obj := new(extv1beta1.Ingress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { + return ctrl.Result{}, err + } + if objectExistsInCache { + log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrl.Result{}, client.IgnoreNotFound(err) } log.Info("reconciling resource", "namespace", req.Namespace, "name", req.Name) @@ -438,9 +506,16 @@ func (r *ExtV1Beta1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Re // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "Ingress", "namespace", req.Namespace, "name", req.Name) - if err := r.Proxy.DeleteObject(obj); err != nil { + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { return ctrl.Result{}, err } + if objectExistsInCache { + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) } @@ -506,6 +581,17 @@ func (r *KongV1KongIngressReconciler) Reconcile(ctx context.Context, req ctrl.Re obj := new(kongv1.KongIngress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { + return ctrl.Result{}, err + } + if objectExistsInCache { + log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrl.Result{}, client.IgnoreNotFound(err) } log.Info("reconciling resource", "namespace", req.Namespace, "name", req.Name) @@ -513,21 +599,17 @@ func (r *KongV1KongIngressReconciler) Reconcile(ctx context.Context, req ctrl.Re // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "KongIngress", "namespace", req.Namespace, "name", req.Name) - if err := r.Proxy.DeleteObject(obj); err != nil { + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { return ctrl.Result{}, err } - return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) - } - - // before we store cache data for this object, ensure that it has our finalizer set - if !ctrlutils.HasFinalizer(obj, ctrlutils.KongIngressFinalizer) { - log.Info("finalizer is not set for resource, setting it", req.Namespace, req.Name) - finalizers := obj.GetFinalizers() - obj.SetFinalizers(append(finalizers, ctrlutils.KongIngressFinalizer)) - if err := r.Client.Update(ctx, obj); err != nil { - return ctrl.Result{}, err + if objectExistsInCache { + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache } - return ctrl.Result{Requeue: true}, nil + return ctrl.Result{}, nil } // update the kong Admin API with the changes @@ -572,6 +654,17 @@ func (r *KongV1KongPluginReconciler) Reconcile(ctx context.Context, req ctrl.Req obj := new(kongv1.KongPlugin) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { + return ctrl.Result{}, err + } + if objectExistsInCache { + log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrl.Result{}, client.IgnoreNotFound(err) } log.Info("reconciling resource", "namespace", req.Namespace, "name", req.Name) @@ -579,21 +672,17 @@ func (r *KongV1KongPluginReconciler) Reconcile(ctx context.Context, req ctrl.Req // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "KongPlugin", "namespace", req.Namespace, "name", req.Name) - if err := r.Proxy.DeleteObject(obj); err != nil { + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { return ctrl.Result{}, err } - return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) - } - - // before we store cache data for this object, ensure that it has our finalizer set - if !ctrlutils.HasFinalizer(obj, ctrlutils.KongIngressFinalizer) { - log.Info("finalizer is not set for resource, setting it", req.Namespace, req.Name) - finalizers := obj.GetFinalizers() - obj.SetFinalizers(append(finalizers, ctrlutils.KongIngressFinalizer)) - if err := r.Client.Update(ctx, obj); err != nil { - return ctrl.Result{}, err + if objectExistsInCache { + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache } - return ctrl.Result{Requeue: true}, nil + return ctrl.Result{}, nil } // update the kong Admin API with the changes @@ -641,6 +730,17 @@ func (r *KongV1KongClusterPluginReconciler) Reconcile(ctx context.Context, req c obj := new(kongv1.KongClusterPlugin) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { + return ctrl.Result{}, err + } + if objectExistsInCache { + log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrl.Result{}, client.IgnoreNotFound(err) } log.Info("reconciling resource", "namespace", req.Namespace, "name", req.Name) @@ -648,9 +748,16 @@ func (r *KongV1KongClusterPluginReconciler) Reconcile(ctx context.Context, req c // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "KongClusterPlugin", "namespace", req.Namespace, "name", req.Name) - if err := r.Proxy.DeleteObject(obj); err != nil { + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { return ctrl.Result{}, err } + if objectExistsInCache { + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) } @@ -719,6 +826,17 @@ func (r *KongV1KongConsumerReconciler) Reconcile(ctx context.Context, req ctrl.R obj := new(kongv1.KongConsumer) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { + return ctrl.Result{}, err + } + if objectExistsInCache { + log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrl.Result{}, client.IgnoreNotFound(err) } log.Info("reconciling resource", "namespace", req.Namespace, "name", req.Name) @@ -726,9 +844,16 @@ func (r *KongV1KongConsumerReconciler) Reconcile(ctx context.Context, req ctrl.R // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "KongConsumer", "namespace", req.Namespace, "name", req.Name) - if err := r.Proxy.DeleteObject(obj); err != nil { + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { return ctrl.Result{}, err } + if objectExistsInCache { + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) } @@ -797,6 +922,17 @@ func (r *KongV1Beta1TCPIngressReconciler) Reconcile(ctx context.Context, req ctr obj := new(kongv1beta1.TCPIngress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { + return ctrl.Result{}, err + } + if objectExistsInCache { + log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrl.Result{}, client.IgnoreNotFound(err) } log.Info("reconciling resource", "namespace", req.Namespace, "name", req.Name) @@ -804,9 +940,16 @@ func (r *KongV1Beta1TCPIngressReconciler) Reconcile(ctx context.Context, req ctr // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "TCPIngress", "namespace", req.Namespace, "name", req.Name) - if err := r.Proxy.DeleteObject(obj); err != nil { + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { return ctrl.Result{}, err } + if objectExistsInCache { + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) } @@ -875,6 +1018,17 @@ func (r *KongV1Beta1UDPIngressReconciler) Reconcile(ctx context.Context, req ctr obj := new(kongv1beta1.UDPIngress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { + return ctrl.Result{}, err + } + if objectExistsInCache { + log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrl.Result{}, client.IgnoreNotFound(err) } log.Info("reconciling resource", "namespace", req.Namespace, "name", req.Name) @@ -882,9 +1036,16 @@ func (r *KongV1Beta1UDPIngressReconciler) Reconcile(ctx context.Context, req ctr // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "UDPIngress", "namespace", req.Namespace, "name", req.Name) - if err := r.Proxy.DeleteObject(obj); err != nil { + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { return ctrl.Result{}, err } + if objectExistsInCache { + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) } @@ -953,6 +1114,17 @@ func (r *Knativev1alpha1IngressReconciler) Reconcile(ctx context.Context, req ct obj := new(knativev1alpha1.Ingress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { + return ctrl.Result{}, err + } + if objectExistsInCache { + log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrl.Result{}, client.IgnoreNotFound(err) } log.Info("reconciling resource", "namespace", req.Namespace, "name", req.Name) @@ -960,9 +1132,16 @@ func (r *Knativev1alpha1IngressReconciler) Reconcile(ctx context.Context, req ct // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "Ingress", "namespace", req.Namespace, "name", req.Name) - if err := r.Proxy.DeleteObject(obj); err != nil { + objectExistsInCache, err := p.Proxy.ObjectExists(obj) + if err != nil { return ctrl.Result{}, err } + if objectExistsInCache { + if err := r.Proxy.DeleteObject(obj); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache + } return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) } From f85ba584857f9284cd8b7b9cdd8a44cb199a8aca Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Fri, 9 Jul 2021 11:56:56 -0400 Subject: [PATCH 08/18] FIXME: squash into new ctrl updates for exists --- railgun/hack/generators/controllers/networking/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/railgun/hack/generators/controllers/networking/main.go b/railgun/hack/generators/controllers/networking/main.go index b58e2c0aa0..0532a2da0a 100644 --- a/railgun/hack/generators/controllers/networking/main.go +++ b/railgun/hack/generators/controllers/networking/main.go @@ -413,7 +413,7 @@ func (r *{{.PackageAlias}}{{.Type}}Reconciler) Reconcile(ctx context.Context, re obj := new({{.PackageImportAlias}}.{{.Type}}) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -431,7 +431,7 @@ func (r *{{.PackageAlias}}{{.Type}}Reconciler) Reconcile(ctx context.Context, re // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "{{.Type}}", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } From 8203f44b98bef9a088f381f9ac5c3078cda172c2 Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Fri, 9 Jul 2021 11:57:25 -0400 Subject: [PATCH 09/18] FIXME: squash into generators --- .../configuration/zz_generated_controllers.go | 52 +++++++++---------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/railgun/controllers/configuration/zz_generated_controllers.go b/railgun/controllers/configuration/zz_generated_controllers.go index 69ee6370c6..e01d77db8d 100644 --- a/railgun/controllers/configuration/zz_generated_controllers.go +++ b/railgun/controllers/configuration/zz_generated_controllers.go @@ -74,7 +74,7 @@ func (r *CoreV1ServiceReconciler) Reconcile(ctx context.Context, req ctrl.Reques obj := new(corev1.Service) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -92,7 +92,7 @@ func (r *CoreV1ServiceReconciler) Reconcile(ctx context.Context, req ctrl.Reques // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "Service", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -147,7 +147,7 @@ func (r *CoreV1EndpointsReconciler) Reconcile(ctx context.Context, req ctrl.Requ obj := new(corev1.Endpoints) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -165,7 +165,7 @@ func (r *CoreV1EndpointsReconciler) Reconcile(ctx context.Context, req ctrl.Requ // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "Endpoints", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -220,7 +220,7 @@ func (r *CoreV1SecretReconciler) Reconcile(ctx context.Context, req ctrl.Request obj := new(corev1.Secret) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -238,7 +238,7 @@ func (r *CoreV1SecretReconciler) Reconcile(ctx context.Context, req ctrl.Request // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "Secret", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -296,7 +296,7 @@ func (r *NetV1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Request obj := new(netv1.Ingress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -314,7 +314,7 @@ func (r *NetV1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Request // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "Ingress", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -392,7 +392,7 @@ func (r *NetV1Beta1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Re obj := new(netv1beta1.Ingress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -410,7 +410,7 @@ func (r *NetV1Beta1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Re // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "Ingress", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -488,7 +488,7 @@ func (r *ExtV1Beta1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Re obj := new(extv1beta1.Ingress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -506,7 +506,7 @@ func (r *ExtV1Beta1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Re // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "Ingress", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -581,7 +581,7 @@ func (r *KongV1KongIngressReconciler) Reconcile(ctx context.Context, req ctrl.Re obj := new(kongv1.KongIngress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -599,7 +599,7 @@ func (r *KongV1KongIngressReconciler) Reconcile(ctx context.Context, req ctrl.Re // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "KongIngress", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -654,7 +654,7 @@ func (r *KongV1KongPluginReconciler) Reconcile(ctx context.Context, req ctrl.Req obj := new(kongv1.KongPlugin) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -672,7 +672,7 @@ func (r *KongV1KongPluginReconciler) Reconcile(ctx context.Context, req ctrl.Req // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "KongPlugin", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -730,7 +730,7 @@ func (r *KongV1KongClusterPluginReconciler) Reconcile(ctx context.Context, req c obj := new(kongv1.KongClusterPlugin) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -748,7 +748,7 @@ func (r *KongV1KongClusterPluginReconciler) Reconcile(ctx context.Context, req c // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "KongClusterPlugin", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -826,7 +826,7 @@ func (r *KongV1KongConsumerReconciler) Reconcile(ctx context.Context, req ctrl.R obj := new(kongv1.KongConsumer) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -844,7 +844,7 @@ func (r *KongV1KongConsumerReconciler) Reconcile(ctx context.Context, req ctrl.R // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "KongConsumer", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -922,7 +922,7 @@ func (r *KongV1Beta1TCPIngressReconciler) Reconcile(ctx context.Context, req ctr obj := new(kongv1beta1.TCPIngress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -940,7 +940,7 @@ func (r *KongV1Beta1TCPIngressReconciler) Reconcile(ctx context.Context, req ctr // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "TCPIngress", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -1018,7 +1018,7 @@ func (r *KongV1Beta1UDPIngressReconciler) Reconcile(ctx context.Context, req ctr obj := new(kongv1beta1.UDPIngress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -1036,7 +1036,7 @@ func (r *KongV1Beta1UDPIngressReconciler) Reconcile(ctx context.Context, req ctr // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "UDPIngress", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -1114,7 +1114,7 @@ func (r *Knativev1alpha1IngressReconciler) Reconcile(ctx context.Context, req ct obj := new(knativev1alpha1.Ingress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } @@ -1132,7 +1132,7 @@ func (r *Knativev1alpha1IngressReconciler) Reconcile(ctx context.Context, req ct // clean the object up if it's being deleted if !obj.DeletionTimestamp.IsZero() && time.Now().After(obj.DeletionTimestamp.Time) { log.Info("resource is being deleted, its configuration will be removed", "type", "Ingress", "namespace", req.Namespace, "name", req.Name) - objectExistsInCache, err := p.Proxy.ObjectExists(obj) + objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } From d1c233f4f37d7848d103b9019a3be7ff29a6b1f2 Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Fri, 9 Jul 2021 12:06:54 -0400 Subject: [PATCH 10/18] chore: add isolated namespace to ingress integration tests --- .../test/integration/ingress_https_test.go | 42 ++++++++++++++++- railgun/test/integration/ingress_test.go | 46 +++++++++++++++++-- railgun/test/integration/suite_test.go | 1 + 3 files changed, 83 insertions(+), 6 deletions(-) diff --git a/railgun/test/integration/ingress_https_test.go b/railgun/test/integration/ingress_https_test.go index a4c033643c..b239f855ad 100644 --- a/railgun/test/integration/ingress_https_test.go +++ b/railgun/test/integration/ingress_https_test.go @@ -135,10 +135,29 @@ func TestHTTPSRedirect(t *testing.T) { ctx := context.Background() opts := metav1.CreateOptions{} + t.Logf("creating namespace %s for testing", testIngressNamespace) + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testIngressNamespace}} + ns, err := env.Cluster().Client().CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) + require.NoError(t, err) + + defer func() { + t.Logf("cleaning up namespace %s", testIngressNamespace) + require.NoError(t, env.Cluster().Client().CoreV1().Namespaces().Delete(ctx, ns.Name, metav1.DeleteOptions{})) + require.Eventually(t, func() bool { + _, err := env.Cluster().Client().CoreV1().Namespaces().Get(ctx, ns.Name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return true + } + } + return false + }, ingressWait, waitTick) + }() + t.Log("creating an HTTP container via deployment to test redirect functionality") container := generators.NewContainer("alsohttpbin", httpBinImage, 80) deployment := generators.NewDeploymentForContainer(container) - _, err := env.Cluster().Client().AppsV1().Deployments(corev1.NamespaceDefault).Create(ctx, deployment, opts) + _, err = env.Cluster().Client().AppsV1().Deployments(corev1.NamespaceDefault).Create(ctx, deployment, opts) assert.NoError(t, err) defer func() { @@ -213,10 +232,29 @@ func TestHTTPSIngress(t *testing.T) { Transport: &testTransport, } + t.Logf("creating namespace %s for testing", testIngressNamespace) + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testIngressNamespace}} + ns, err := env.Cluster().Client().CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) + require.NoError(t, err) + + defer func() { + t.Logf("cleaning up namespace %s", testIngressNamespace) + require.NoError(t, env.Cluster().Client().CoreV1().Namespaces().Delete(ctx, ns.Name, metav1.DeleteOptions{})) + require.Eventually(t, func() bool { + _, err := env.Cluster().Client().CoreV1().Namespaces().Get(ctx, ns.Name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return true + } + } + return false + }, ingressWait, waitTick) + }() + t.Log("deploying a minimal HTTP container deployment to test Ingress routes") container := generators.NewContainer("httpbin", httpBinImage, 80) deployment := generators.NewDeploymentForContainer(container) - deployment, err := env.Cluster().Client().AppsV1().Deployments(corev1.NamespaceDefault).Create(ctx, deployment, metav1.CreateOptions{}) + deployment, err = env.Cluster().Client().AppsV1().Deployments(corev1.NamespaceDefault).Create(ctx, deployment, metav1.CreateOptions{}) assert.NoError(t, err) defer func() { diff --git a/railgun/test/integration/ingress_test.go b/railgun/test/integration/ingress_test.go index 88d5a1e6a8..01a302e155 100644 --- a/railgun/test/integration/ingress_test.go +++ b/railgun/test/integration/ingress_test.go @@ -21,14 +21,34 @@ import ( "github.com/kong/kubernetes-testing-framework/pkg/utils/kubernetes/generators" ) +var testIngressNamespace = "ingress" + func TestIngressEssentials(t *testing.T) { ctx := context.Background() - testIngressNamespace := corev1.NamespaceDefault + + t.Logf("creating namespace %s for testing", testIngressNamespace) + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testIngressNamespace}} + ns, err := env.Cluster().Client().CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) + require.NoError(t, err) + + defer func() { + t.Logf("cleaning up namespace %s", testIngressNamespace) + require.NoError(t, env.Cluster().Client().CoreV1().Namespaces().Delete(ctx, ns.Name, metav1.DeleteOptions{})) + require.Eventually(t, func() bool { + _, err := env.Cluster().Client().CoreV1().Namespaces().Get(ctx, ns.Name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return true + } + } + return false + }, ingressWait, waitTick) + }() t.Log("deploying a minimal HTTP container deployment to test Ingress routes") container := generators.NewContainer("httpbin", httpBinImage, 80) deployment := generators.NewDeploymentForContainer(container) - deployment, err := env.Cluster().Client().AppsV1().Deployments(testIngressNamespace).Create(ctx, deployment, metav1.CreateOptions{}) + deployment, err = env.Cluster().Client().AppsV1().Deployments(testIngressNamespace).Create(ctx, deployment, metav1.CreateOptions{}) require.NoError(t, err) defer func() { @@ -148,12 +168,30 @@ func TestIngressEssentials(t *testing.T) { func TestIngressClassNameSpec(t *testing.T) { ctx := context.Background() - testIngressNamespace := corev1.NamespaceDefault + + t.Logf("creating namespace %s for testing", testIngressNamespace) + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testIngressNamespace}} + ns, err := env.Cluster().Client().CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) + require.NoError(t, err) + + defer func() { + t.Logf("cleaning up namespace %s", testIngressNamespace) + require.NoError(t, env.Cluster().Client().CoreV1().Namespaces().Delete(ctx, ns.Name, metav1.DeleteOptions{})) + require.Eventually(t, func() bool { + _, err := env.Cluster().Client().CoreV1().Namespaces().Get(ctx, ns.Name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return true + } + } + return false + }, ingressWait, waitTick) + }() t.Log("deploying a minimal HTTP container deployment to test Ingress routes using the IngressClassName spec") container := generators.NewContainer("httpbin", httpBinImage, 80) deployment := generators.NewDeploymentForContainer(container) - deployment, err := env.Cluster().Client().AppsV1().Deployments(testIngressNamespace).Create(ctx, deployment, metav1.CreateOptions{}) + deployment, err = env.Cluster().Client().AppsV1().Deployments(testIngressNamespace).Create(ctx, deployment, metav1.CreateOptions{}) require.NoError(t, err) defer func() { diff --git a/railgun/test/integration/suite_test.go b/railgun/test/integration/suite_test.go index d7e413439c..ec0b9df241 100644 --- a/railgun/test/integration/suite_test.go +++ b/railgun/test/integration/suite_test.go @@ -75,6 +75,7 @@ var ( watchNamespaces = strings.Join([]string{ elsewhere, corev1.NamespaceDefault, + testIngressNamespace, testTCPIngressNamespace, testUDPIngressNamespace, }, ",") From e200a98670637b07ee3739654582ae6f3df34ed5 Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Fri, 9 Jul 2021 12:07:12 -0400 Subject: [PATCH 11/18] fix: ensure ns deletion for tcp/udp tests --- railgun/test/integration/tcpingress_test.go | 15 ++++++++++++--- railgun/test/integration/udpingress_test.go | 14 ++++++++++++-- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/railgun/test/integration/tcpingress_test.go b/railgun/test/integration/tcpingress_test.go index 9fe04904d0..30e8595aed 100644 --- a/railgun/test/integration/tcpingress_test.go +++ b/railgun/test/integration/tcpingress_test.go @@ -26,7 +26,7 @@ import ( const testTCPIngressNamespace = "tcpingress" -func TestTCPIngress(t *testing.T) { +func TestTCPIngressEssentials(t *testing.T) { t.Log("setting up the TCPIngress tests") testName := "tcpingress" c, err := clientset.NewForConfig(env.Cluster().Config()) @@ -34,14 +34,23 @@ func TestTCPIngress(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), ingressWait) defer cancel() - t.Logf("creating namespace %s for testing TCPIngress", testTCPIngressNamespace) + t.Logf("creating namespace %s for testing", testTCPIngressNamespace) ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testTCPIngressNamespace}} ns, err = env.Cluster().Client().CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) require.NoError(t, err) defer func() { t.Logf("cleaning up namespace %s", testTCPIngressNamespace) - assert.NoError(t, env.Cluster().Client().CoreV1().Namespaces().Delete(ctx, ns.Name, metav1.DeleteOptions{})) + require.NoError(t, env.Cluster().Client().CoreV1().Namespaces().Delete(ctx, ns.Name, metav1.DeleteOptions{})) + require.Eventually(t, func() bool { + _, err := env.Cluster().Client().CoreV1().Namespaces().Get(ctx, ns.Name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return true + } + } + return false + }, ingressWait, waitTick) }() t.Log("deploying a minimal HTTP container deployment to test Ingress routes") diff --git a/railgun/test/integration/udpingress_test.go b/railgun/test/integration/udpingress_test.go index 33f89a55bc..8734d58aa9 100644 --- a/railgun/test/integration/udpingress_test.go +++ b/railgun/test/integration/udpingress_test.go @@ -25,7 +25,7 @@ import ( const testUDPIngressNamespace = "udpingress" -func TestUDPIngress(t *testing.T) { +func TestUDPIngressEssentials(t *testing.T) { // TODO: once KIC 2.0 lands and pre v2 is gone, we can remove this check if useLegacyKIC() { t.Skip("legacy KIC does not support UDPIngress, skipping") @@ -38,10 +38,20 @@ func TestUDPIngress(t *testing.T) { t.Logf("creating namespace %s for testing", testUDPIngressNamespace) ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testUDPIngressNamespace}} ns, err := env.Cluster().Client().CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) + require.NoError(t, err) defer func() { t.Logf("cleaning up namespace %s", testUDPIngressNamespace) - assert.NoError(t, env.Cluster().Client().CoreV1().Namespaces().Delete(ctx, testUDPIngressNamespace, metav1.DeleteOptions{})) + require.NoError(t, env.Cluster().Client().CoreV1().Namespaces().Delete(ctx, ns.Name, metav1.DeleteOptions{})) + require.Eventually(t, func() bool { + _, err := env.Cluster().Client().CoreV1().Namespaces().Get(ctx, ns.Name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return true + } + } + return false + }, ingressWait, waitTick) }() t.Log("configuring coredns corefile") From 7925691d2a4da6aab120316125bb61eee5ad59b2 Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Fri, 9 Jul 2021 14:23:21 -0400 Subject: [PATCH 12/18] fix: don't use extra test timeout This timeout used to be long enough, but technically wasn't even correct: we will rely on the go test timeout setting instead. --- railgun/test/integration/suite_test.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/railgun/test/integration/suite_test.go b/railgun/test/integration/suite_test.go index ec0b9df241..362cf66aeb 100644 --- a/railgun/test/integration/suite_test.go +++ b/railgun/test/integration/suite_test.go @@ -33,9 +33,6 @@ import ( // ----------------------------------------------------------------------------- const ( - // clusterDeployWait is the timeout duration for deploying the kind cluster for testing - clusterDeployWait = time.Minute * 5 - // waitTick is the default timeout tick interval for checking on ingress resources. waitTick = time.Second * 1 @@ -103,7 +100,7 @@ var ( func TestMain(m *testing.M) { var err error - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(clusterDeployWait)) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() fmt.Println("INFO: configuring testing environment") From 71e9d2ec57a16849d2d39e40a773648f823e67a6 Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Fri, 9 Jul 2021 14:24:00 -0400 Subject: [PATCH 13/18] chore: lower go test timeout At the time of writing, 15 minutes should be more than ample time for the tests to complete overal, and we would want to know sooner if something was making this take longer than we expect. --- railgun/Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/railgun/Makefile b/railgun/Makefile index a2f8fdfeec..fcb4a0cfd3 100644 --- a/railgun/Makefile +++ b/railgun/Makefile @@ -166,7 +166,7 @@ test.integration: test.integration.dbless test.integration.postgres .PHONY: test.integration.dbless test.integration.dbless: @./scripts/setup-integration-tests.sh - @TEST_DATABASE_MODE="off" GOFLAGS="-tags=integration_tests" go test -timeout 20m -race -v -count=1 -covermode=atomic -coverpkg=$(PKG_LIST) -coverprofile=$(COVERAGE_INTEGRATION_PROFILE) ./test/integration/ + @TEST_DATABASE_MODE="off" GOFLAGS="-tags=integration_tests" go test -timeout 15m -race -v -count=1 -covermode=atomic -coverpkg=$(PKG_LIST) -coverprofile=$(COVERAGE_INTEGRATION_PROFILE) ./test/integration/ # Our integration tests using the postgres backend, with verbose output # TODO: race checking has been temporarily turned off because of race conditions found with deck. This will be resolved in an upcoming Alpha release of KIC 2.0. @@ -174,10 +174,10 @@ test.integration.dbless: .PHONY: test.integration.postgres test.integration.postgres: @./scripts/setup-integration-tests.sh - @TEST_DATABASE_MODE="postgres" GOFLAGS="-tags=integration_tests" go test -timeout 20m -v -count=1 -covermode=atomic -coverpkg=$(PKG_LIST) -coverprofile=$(COVERAGE_INTEGRATION_PROFILE) ./test/integration/ + @TEST_DATABASE_MODE="postgres" GOFLAGS="-tags=integration_tests" go test -timeout 15m -v -count=1 -covermode=atomic -coverpkg=$(PKG_LIST) -coverprofile=$(COVERAGE_INTEGRATION_PROFILE) ./test/integration/ # Our integration tests using the legacy v1 controller manager .PHONY: test.integration.legacy test.integration.legacy: @./scripts/setup-integration-tests.sh - @KONG_LEGACY_CONTROLLER=1 GOFLAGS="-tags=integration_tests" go test -timeout 20m -race -v -count=1 -covermode=atomic -coverpkg=$(PKG_LIST) -coverprofile=$(COVERAGE_INTEGRATION_PROFILE) ./test/integration/ + @KONG_LEGACY_CONTROLLER=1 GOFLAGS="-tags=integration_tests" go test -timeout 15m -race -v -count=1 -covermode=atomic -coverpkg=$(PKG_LIST) -coverprofile=$(COVERAGE_INTEGRATION_PROFILE) ./test/integration/ From 1eaeda3c834de3bd0362dd04d84329dd807c5dcf Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Fri, 9 Jul 2021 14:42:42 -0400 Subject: [PATCH 14/18] fix: typo in proxy interface docs --- railgun/internal/proxy/proxy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/railgun/internal/proxy/proxy.go b/railgun/internal/proxy/proxy.go index 83d3593941..3cea1d1a72 100644 --- a/railgun/internal/proxy/proxy.go +++ b/railgun/internal/proxy/proxy.go @@ -63,7 +63,7 @@ type Proxy interface { // A status will later be added to the object whether the configuration update succeeds or fails. DeleteObject(obj client.Object) error - // ObjectExists indicates whether or not any version of the provided object is already present in the cache. + // ObjectExists indicates whether or not any version of the provided object is already present in the proxy. ObjectExists(obj client.Object) (bool, error) } From 1553b0fad3866dc9ba12b03ec109077c4c306f02 Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Fri, 9 Jul 2021 23:13:23 -0400 Subject: [PATCH 15/18] fix: remove finalizer handling Originally we added finalizers with the intention of holding back object deletion until we were certain that we had removed the relevant configuration from the Kong Admin API. Ultimately at the time of writing it's not feasible to get this done, so this removes a half of the implementation which was causing us complexity without any added value. --- railgun/config/rbac/role.yaml | 144 ------------------ .../configuration/zz_generated_controllers.go | 130 +--------------- .../generators/controllers/networking/main.go | 20 --- railgun/internal/ctrlutils/utils.go | 38 ----- railgun/internal/ctrlutils/vars.go | 8 - 5 files changed, 8 insertions(+), 332 deletions(-) delete mode 100644 railgun/internal/ctrlutils/vars.go diff --git a/railgun/config/rbac/role.yaml b/railgun/config/rbac/role.yaml index 37c6f1deee..248b22ed44 100644 --- a/railgun/config/rbac/role.yaml +++ b/railgun/config/rbac/role.yaml @@ -13,12 +13,6 @@ rules: verbs: - list - watch -- apiGroups: - - "" - resources: - - endpoints/finalizers - verbs: - - update - apiGroups: - "" resources: @@ -56,12 +50,6 @@ rules: verbs: - list - watch -- apiGroups: - - "" - resources: - - secrets/finalizers - verbs: - - update - apiGroups: - "" resources: @@ -78,12 +66,6 @@ rules: - get - list - watch -- apiGroups: - - "" - resources: - - services/finalizers - verbs: - - update - apiGroups: - "" resources: @@ -100,12 +82,6 @@ rules: - get - list - watch -- apiGroups: - - apiextensions.k8s.io - resources: - - ingresses/finalizers - verbs: - - update - apiGroups: - apiextensions.k8s.io resources: @@ -122,12 +98,6 @@ rules: - get - list - watch -- apiGroups: - - configuration.konghq.com - resources: - - kongclusterplugins/finalizers - verbs: - - update - apiGroups: - configuration.konghq.com resources: @@ -144,12 +114,6 @@ rules: - get - list - watch -- apiGroups: - - configuration.konghq.com - resources: - - kongconsumers/finalizers - verbs: - - update - apiGroups: - configuration.konghq.com resources: @@ -166,12 +130,6 @@ rules: - get - list - watch -- apiGroups: - - configuration.konghq.com - resources: - - kongingresses/finalizers - verbs: - - update - apiGroups: - configuration.konghq.com resources: @@ -188,12 +146,6 @@ rules: - get - list - watch -- apiGroups: - - configuration.konghq.com - resources: - - kongplugins/finalizers - verbs: - - update - apiGroups: - configuration.konghq.com resources: @@ -210,12 +162,6 @@ rules: - get - list - watch -- apiGroups: - - configuration.konghq.com - resources: - - tcpingresses/finalizers - verbs: - - update - apiGroups: - configuration.konghq.com resources: @@ -232,12 +178,6 @@ rules: - get - list - watch -- apiGroups: - - configuration.konghq.com - resources: - - udpingresses/finalizers - verbs: - - update - apiGroups: - configuration.konghq.com resources: @@ -254,12 +194,6 @@ rules: - get - list - watch -- apiGroups: - - networking.internal.knative.dev - resources: - - ingresses/finalizers - verbs: - - update - apiGroups: - networking.internal.knative.dev resources: @@ -276,12 +210,6 @@ rules: - get - list - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses/finalizers - verbs: - - update - apiGroups: - networking.k8s.io resources: @@ -306,12 +234,6 @@ rules: verbs: - list - watch -- apiGroups: - - "" - resources: - - endpoints/finalizers - verbs: - - update - apiGroups: - "" resources: @@ -349,12 +271,6 @@ rules: verbs: - list - watch -- apiGroups: - - "" - resources: - - secrets/finalizers - verbs: - - update - apiGroups: - "" resources: @@ -371,12 +287,6 @@ rules: - get - list - watch -- apiGroups: - - "" - resources: - - services/finalizers - verbs: - - update - apiGroups: - "" resources: @@ -393,12 +303,6 @@ rules: - get - list - watch -- apiGroups: - - apiextensions.k8s.io - resources: - - ingresses/finalizers - verbs: - - update - apiGroups: - apiextensions.k8s.io resources: @@ -415,12 +319,6 @@ rules: - get - list - watch -- apiGroups: - - configuration.konghq.com - resources: - - kongclusterplugins/finalizers - verbs: - - update - apiGroups: - configuration.konghq.com resources: @@ -437,12 +335,6 @@ rules: - get - list - watch -- apiGroups: - - configuration.konghq.com - resources: - - kongconsumers/finalizers - verbs: - - update - apiGroups: - configuration.konghq.com resources: @@ -459,12 +351,6 @@ rules: - get - list - watch -- apiGroups: - - configuration.konghq.com - resources: - - kongingresses/finalizers - verbs: - - update - apiGroups: - configuration.konghq.com resources: @@ -481,12 +367,6 @@ rules: - get - list - watch -- apiGroups: - - configuration.konghq.com - resources: - - kongplugins/finalizers - verbs: - - update - apiGroups: - configuration.konghq.com resources: @@ -503,12 +383,6 @@ rules: - get - list - watch -- apiGroups: - - configuration.konghq.com - resources: - - tcpingresses/finalizers - verbs: - - update - apiGroups: - configuration.konghq.com resources: @@ -525,12 +399,6 @@ rules: - get - list - watch -- apiGroups: - - configuration.konghq.com - resources: - - udpingresses/finalizers - verbs: - - update - apiGroups: - configuration.konghq.com resources: @@ -547,12 +415,6 @@ rules: - get - list - watch -- apiGroups: - - networking.internal.knative.dev - resources: - - ingresses/finalizers - verbs: - - update - apiGroups: - networking.internal.knative.dev resources: @@ -569,12 +431,6 @@ rules: - get - list - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses/finalizers - verbs: - - update - apiGroups: - networking.k8s.io resources: diff --git a/railgun/controllers/configuration/zz_generated_controllers.go b/railgun/controllers/configuration/zz_generated_controllers.go index e01d77db8d..9288981fa3 100644 --- a/railgun/controllers/configuration/zz_generated_controllers.go +++ b/railgun/controllers/configuration/zz_generated_controllers.go @@ -61,10 +61,8 @@ func (r *CoreV1ServiceReconciler) SetupWithManager(mgr ctrl.Manager) error { //+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch //+kubebuilder:rbac:groups="",resources=services/status,verbs=get;update;patch -//+kubebuilder:rbac:groups="",resources=services/finalizers,verbs=update //+kubebuilder:rbac:groups="",namespace=CHANGEME,resources=services,verbs=get;list;watch //+kubebuilder:rbac:groups="",namespace=CHANGEME,resources=services/status,verbs=get;update;patch -//+kubebuilder:rbac:groups="",namespace=CHANGEME,resources=services/finalizers,verbs=update // Reconcile processes the watched objects func (r *CoreV1ServiceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -134,10 +132,8 @@ func (r *CoreV1EndpointsReconciler) SetupWithManager(mgr ctrl.Manager) error { //+kubebuilder:rbac:groups="",resources=endpoints,verbs=list;watch //+kubebuilder:rbac:groups="",resources=endpoints/status,verbs=get;update;patch -//+kubebuilder:rbac:groups="",resources=endpoints/finalizers,verbs=update //+kubebuilder:rbac:groups="",namespace=CHANGEME,resources=endpoints,verbs=list;watch //+kubebuilder:rbac:groups="",namespace=CHANGEME,resources=endpoints/status,verbs=get;update;patch -//+kubebuilder:rbac:groups="",namespace=CHANGEME,resources=endpoints/finalizers,verbs=update // Reconcile processes the watched objects func (r *CoreV1EndpointsReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -207,10 +203,8 @@ func (r *CoreV1SecretReconciler) SetupWithManager(mgr ctrl.Manager) error { //+kubebuilder:rbac:groups="",resources=secrets,verbs=list;watch //+kubebuilder:rbac:groups="",resources=secrets/status,verbs=get;update;patch -//+kubebuilder:rbac:groups="",resources=secrets/finalizers,verbs=update //+kubebuilder:rbac:groups="",namespace=CHANGEME,resources=secrets,verbs=list;watch //+kubebuilder:rbac:groups="",namespace=CHANGEME,resources=secrets/status,verbs=get;update;patch -//+kubebuilder:rbac:groups="",namespace=CHANGEME,resources=secrets/finalizers,verbs=update // Reconcile processes the watched objects func (r *CoreV1SecretReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -283,10 +277,8 @@ func (r *NetV1IngressReconciler) SetupWithManager(mgr ctrl.Manager) error { //+kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch //+kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses/finalizers,verbs=update //+kubebuilder:rbac:groups=networking.k8s.io,namespace=CHANGEME,resources=ingresses,verbs=get;list;watch //+kubebuilder:rbac:groups=networking.k8s.io,namespace=CHANGEME,resources=ingresses/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=networking.k8s.io,namespace=CHANGEME,resources=ingresses/finalizers,verbs=update // Reconcile processes the watched objects func (r *NetV1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -324,7 +316,7 @@ func (r *NetV1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Request } return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache } - return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) + return ctrl.Result{}, nil } // if the object is not configured with our ingress.class, then we need to ensure it's removed from the cache @@ -336,17 +328,6 @@ func (r *NetV1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Request return ctrl.Result{}, nil } - // before we store cache data for this object, ensure that it has our finalizer set - if !ctrlutils.HasFinalizer(obj, ctrlutils.KongIngressFinalizer) { - log.Info("finalizer is not set for resource, setting it", req.Namespace, req.Name) - finalizers := obj.GetFinalizers() - obj.SetFinalizers(append(finalizers, ctrlutils.KongIngressFinalizer)) - if err := r.Client.Update(ctx, obj); err != nil { - return ctrl.Result{}, err - } - return ctrl.Result{Requeue: true}, nil - } - // update the kong Admin API with the changes log.Info("updating the proxy with new Ingress", "namespace", obj.Namespace, "name", obj.Name) if err := r.Proxy.UpdateObject(obj); err != nil { @@ -379,10 +360,8 @@ func (r *NetV1Beta1IngressReconciler) SetupWithManager(mgr ctrl.Manager) error { //+kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch //+kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses/finalizers,verbs=update //+kubebuilder:rbac:groups=networking.k8s.io,namespace=CHANGEME,resources=ingresses,verbs=get;list;watch //+kubebuilder:rbac:groups=networking.k8s.io,namespace=CHANGEME,resources=ingresses/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=networking.k8s.io,namespace=CHANGEME,resources=ingresses/finalizers,verbs=update // Reconcile processes the watched objects func (r *NetV1Beta1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -420,7 +399,7 @@ func (r *NetV1Beta1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Re } return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache } - return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) + return ctrl.Result{}, nil } // if the object is not configured with our ingress.class, then we need to ensure it's removed from the cache @@ -432,17 +411,6 @@ func (r *NetV1Beta1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, nil } - // before we store cache data for this object, ensure that it has our finalizer set - if !ctrlutils.HasFinalizer(obj, ctrlutils.KongIngressFinalizer) { - log.Info("finalizer is not set for resource, setting it", req.Namespace, req.Name) - finalizers := obj.GetFinalizers() - obj.SetFinalizers(append(finalizers, ctrlutils.KongIngressFinalizer)) - if err := r.Client.Update(ctx, obj); err != nil { - return ctrl.Result{}, err - } - return ctrl.Result{Requeue: true}, nil - } - // update the kong Admin API with the changes log.Info("updating the proxy with new Ingress", "namespace", obj.Namespace, "name", obj.Name) if err := r.Proxy.UpdateObject(obj); err != nil { @@ -475,10 +443,8 @@ func (r *ExtV1Beta1IngressReconciler) SetupWithManager(mgr ctrl.Manager) error { //+kubebuilder:rbac:groups=apiextensions.k8s.io,resources=ingresses,verbs=get;list;watch //+kubebuilder:rbac:groups=apiextensions.k8s.io,resources=ingresses/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=apiextensions.k8s.io,resources=ingresses/finalizers,verbs=update //+kubebuilder:rbac:groups=apiextensions.k8s.io,namespace=CHANGEME,resources=ingresses,verbs=get;list;watch //+kubebuilder:rbac:groups=apiextensions.k8s.io,namespace=CHANGEME,resources=ingresses/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=apiextensions.k8s.io,namespace=CHANGEME,resources=ingresses/finalizers,verbs=update // Reconcile processes the watched objects func (r *ExtV1Beta1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -516,7 +482,7 @@ func (r *ExtV1Beta1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Re } return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache } - return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) + return ctrl.Result{}, nil } // if the object is not configured with our ingress.class, then we need to ensure it's removed from the cache @@ -528,17 +494,6 @@ func (r *ExtV1Beta1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, nil } - // before we store cache data for this object, ensure that it has our finalizer set - if !ctrlutils.HasFinalizer(obj, ctrlutils.KongIngressFinalizer) { - log.Info("finalizer is not set for resource, setting it", req.Namespace, req.Name) - finalizers := obj.GetFinalizers() - obj.SetFinalizers(append(finalizers, ctrlutils.KongIngressFinalizer)) - if err := r.Client.Update(ctx, obj); err != nil { - return ctrl.Result{}, err - } - return ctrl.Result{Requeue: true}, nil - } - // update the kong Admin API with the changes log.Info("updating the proxy with new Ingress", "namespace", obj.Namespace, "name", obj.Name) if err := r.Proxy.UpdateObject(obj); err != nil { @@ -568,10 +523,8 @@ func (r *KongV1KongIngressReconciler) SetupWithManager(mgr ctrl.Manager) error { //+kubebuilder:rbac:groups=configuration.konghq.com,resources=kongingresses,verbs=get;list;watch //+kubebuilder:rbac:groups=configuration.konghq.com,resources=kongingresses/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=configuration.konghq.com,resources=kongingresses/finalizers,verbs=update //+kubebuilder:rbac:groups=configuration.konghq.com,namespace=CHANGEME,resources=kongingresses,verbs=get;list;watch //+kubebuilder:rbac:groups=configuration.konghq.com,namespace=CHANGEME,resources=kongingresses/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=configuration.konghq.com,namespace=CHANGEME,resources=kongingresses/finalizers,verbs=update // Reconcile processes the watched objects func (r *KongV1KongIngressReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -641,10 +594,8 @@ func (r *KongV1KongPluginReconciler) SetupWithManager(mgr ctrl.Manager) error { //+kubebuilder:rbac:groups=configuration.konghq.com,resources=kongplugins,verbs=get;list;watch //+kubebuilder:rbac:groups=configuration.konghq.com,resources=kongplugins/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=configuration.konghq.com,resources=kongplugins/finalizers,verbs=update //+kubebuilder:rbac:groups=configuration.konghq.com,namespace=CHANGEME,resources=kongplugins,verbs=get;list;watch //+kubebuilder:rbac:groups=configuration.konghq.com,namespace=CHANGEME,resources=kongplugins/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=configuration.konghq.com,namespace=CHANGEME,resources=kongplugins/finalizers,verbs=update // Reconcile processes the watched objects func (r *KongV1KongPluginReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -717,10 +668,8 @@ func (r *KongV1KongClusterPluginReconciler) SetupWithManager(mgr ctrl.Manager) e //+kubebuilder:rbac:groups=configuration.konghq.com,resources=kongclusterplugins,verbs=get;list;watch //+kubebuilder:rbac:groups=configuration.konghq.com,resources=kongclusterplugins/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=configuration.konghq.com,resources=kongclusterplugins/finalizers,verbs=update //+kubebuilder:rbac:groups=configuration.konghq.com,namespace=CHANGEME,resources=kongclusterplugins,verbs=get;list;watch //+kubebuilder:rbac:groups=configuration.konghq.com,namespace=CHANGEME,resources=kongclusterplugins/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=configuration.konghq.com,namespace=CHANGEME,resources=kongclusterplugins/finalizers,verbs=update // Reconcile processes the watched objects func (r *KongV1KongClusterPluginReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -758,7 +707,7 @@ func (r *KongV1KongClusterPluginReconciler) Reconcile(ctx context.Context, req c } return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache } - return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) + return ctrl.Result{}, nil } // if the object is not configured with our ingress.class, then we need to ensure it's removed from the cache @@ -770,17 +719,6 @@ func (r *KongV1KongClusterPluginReconciler) Reconcile(ctx context.Context, req c return ctrl.Result{}, nil } - // before we store cache data for this object, ensure that it has our finalizer set - if !ctrlutils.HasFinalizer(obj, ctrlutils.KongIngressFinalizer) { - log.Info("finalizer is not set for resource, setting it", req.Namespace, req.Name) - finalizers := obj.GetFinalizers() - obj.SetFinalizers(append(finalizers, ctrlutils.KongIngressFinalizer)) - if err := r.Client.Update(ctx, obj); err != nil { - return ctrl.Result{}, err - } - return ctrl.Result{Requeue: true}, nil - } - // update the kong Admin API with the changes log.Info("updating the proxy with new KongClusterPlugin", "namespace", obj.Namespace, "name", obj.Name) if err := r.Proxy.UpdateObject(obj); err != nil { @@ -813,10 +751,8 @@ func (r *KongV1KongConsumerReconciler) SetupWithManager(mgr ctrl.Manager) error //+kubebuilder:rbac:groups=configuration.konghq.com,resources=kongconsumers,verbs=get;list;watch //+kubebuilder:rbac:groups=configuration.konghq.com,resources=kongconsumers/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=configuration.konghq.com,resources=kongconsumers/finalizers,verbs=update //+kubebuilder:rbac:groups=configuration.konghq.com,namespace=CHANGEME,resources=kongconsumers,verbs=get;list;watch //+kubebuilder:rbac:groups=configuration.konghq.com,namespace=CHANGEME,resources=kongconsumers/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=configuration.konghq.com,namespace=CHANGEME,resources=kongconsumers/finalizers,verbs=update // Reconcile processes the watched objects func (r *KongV1KongConsumerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -854,7 +790,7 @@ func (r *KongV1KongConsumerReconciler) Reconcile(ctx context.Context, req ctrl.R } return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache } - return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) + return ctrl.Result{}, nil } // if the object is not configured with our ingress.class, then we need to ensure it's removed from the cache @@ -866,17 +802,6 @@ func (r *KongV1KongConsumerReconciler) Reconcile(ctx context.Context, req ctrl.R return ctrl.Result{}, nil } - // before we store cache data for this object, ensure that it has our finalizer set - if !ctrlutils.HasFinalizer(obj, ctrlutils.KongIngressFinalizer) { - log.Info("finalizer is not set for resource, setting it", req.Namespace, req.Name) - finalizers := obj.GetFinalizers() - obj.SetFinalizers(append(finalizers, ctrlutils.KongIngressFinalizer)) - if err := r.Client.Update(ctx, obj); err != nil { - return ctrl.Result{}, err - } - return ctrl.Result{Requeue: true}, nil - } - // update the kong Admin API with the changes log.Info("updating the proxy with new KongConsumer", "namespace", obj.Namespace, "name", obj.Name) if err := r.Proxy.UpdateObject(obj); err != nil { @@ -909,10 +834,8 @@ func (r *KongV1Beta1TCPIngressReconciler) SetupWithManager(mgr ctrl.Manager) err //+kubebuilder:rbac:groups=configuration.konghq.com,resources=tcpingresses,verbs=get;list;watch //+kubebuilder:rbac:groups=configuration.konghq.com,resources=tcpingresses/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=configuration.konghq.com,resources=tcpingresses/finalizers,verbs=update //+kubebuilder:rbac:groups=configuration.konghq.com,namespace=CHANGEME,resources=tcpingresses,verbs=get;list;watch //+kubebuilder:rbac:groups=configuration.konghq.com,namespace=CHANGEME,resources=tcpingresses/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=configuration.konghq.com,namespace=CHANGEME,resources=tcpingresses/finalizers,verbs=update // Reconcile processes the watched objects func (r *KongV1Beta1TCPIngressReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -950,7 +873,7 @@ func (r *KongV1Beta1TCPIngressReconciler) Reconcile(ctx context.Context, req ctr } return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache } - return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) + return ctrl.Result{}, nil } // if the object is not configured with our ingress.class, then we need to ensure it's removed from the cache @@ -962,17 +885,6 @@ func (r *KongV1Beta1TCPIngressReconciler) Reconcile(ctx context.Context, req ctr return ctrl.Result{}, nil } - // before we store cache data for this object, ensure that it has our finalizer set - if !ctrlutils.HasFinalizer(obj, ctrlutils.KongIngressFinalizer) { - log.Info("finalizer is not set for resource, setting it", req.Namespace, req.Name) - finalizers := obj.GetFinalizers() - obj.SetFinalizers(append(finalizers, ctrlutils.KongIngressFinalizer)) - if err := r.Client.Update(ctx, obj); err != nil { - return ctrl.Result{}, err - } - return ctrl.Result{Requeue: true}, nil - } - // update the kong Admin API with the changes log.Info("updating the proxy with new TCPIngress", "namespace", obj.Namespace, "name", obj.Name) if err := r.Proxy.UpdateObject(obj); err != nil { @@ -1005,10 +917,8 @@ func (r *KongV1Beta1UDPIngressReconciler) SetupWithManager(mgr ctrl.Manager) err //+kubebuilder:rbac:groups=configuration.konghq.com,resources=udpingresses,verbs=get;list;watch //+kubebuilder:rbac:groups=configuration.konghq.com,resources=udpingresses/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=configuration.konghq.com,resources=udpingresses/finalizers,verbs=update //+kubebuilder:rbac:groups=configuration.konghq.com,namespace=CHANGEME,resources=udpingresses,verbs=get;list;watch //+kubebuilder:rbac:groups=configuration.konghq.com,namespace=CHANGEME,resources=udpingresses/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=configuration.konghq.com,namespace=CHANGEME,resources=udpingresses/finalizers,verbs=update // Reconcile processes the watched objects func (r *KongV1Beta1UDPIngressReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -1046,7 +956,7 @@ func (r *KongV1Beta1UDPIngressReconciler) Reconcile(ctx context.Context, req ctr } return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache } - return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) + return ctrl.Result{}, nil } // if the object is not configured with our ingress.class, then we need to ensure it's removed from the cache @@ -1058,17 +968,6 @@ func (r *KongV1Beta1UDPIngressReconciler) Reconcile(ctx context.Context, req ctr return ctrl.Result{}, nil } - // before we store cache data for this object, ensure that it has our finalizer set - if !ctrlutils.HasFinalizer(obj, ctrlutils.KongIngressFinalizer) { - log.Info("finalizer is not set for resource, setting it", req.Namespace, req.Name) - finalizers := obj.GetFinalizers() - obj.SetFinalizers(append(finalizers, ctrlutils.KongIngressFinalizer)) - if err := r.Client.Update(ctx, obj); err != nil { - return ctrl.Result{}, err - } - return ctrl.Result{Requeue: true}, nil - } - // update the kong Admin API with the changes log.Info("updating the proxy with new UDPIngress", "namespace", obj.Namespace, "name", obj.Name) if err := r.Proxy.UpdateObject(obj); err != nil { @@ -1101,10 +1000,8 @@ func (r *Knativev1alpha1IngressReconciler) SetupWithManager(mgr ctrl.Manager) er //+kubebuilder:rbac:groups=networking.internal.knative.dev,resources=ingresses,verbs=get;list;watch //+kubebuilder:rbac:groups=networking.internal.knative.dev,resources=ingresses/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=networking.internal.knative.dev,resources=ingresses/finalizers,verbs=update //+kubebuilder:rbac:groups=networking.internal.knative.dev,namespace=CHANGEME,resources=ingresses,verbs=get;list;watch //+kubebuilder:rbac:groups=networking.internal.knative.dev,namespace=CHANGEME,resources=ingresses/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=networking.internal.knative.dev,namespace=CHANGEME,resources=ingresses/finalizers,verbs=update // Reconcile processes the watched objects func (r *Knativev1alpha1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -1142,7 +1039,7 @@ func (r *Knativev1alpha1IngressReconciler) Reconcile(ctx context.Context, req ct } return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache } - return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) + return ctrl.Result{}, nil } // if the object is not configured with our ingress.class, then we need to ensure it's removed from the cache @@ -1154,17 +1051,6 @@ func (r *Knativev1alpha1IngressReconciler) Reconcile(ctx context.Context, req ct return ctrl.Result{}, nil } - // before we store cache data for this object, ensure that it has our finalizer set - if !ctrlutils.HasFinalizer(obj, ctrlutils.KongIngressFinalizer) { - log.Info("finalizer is not set for resource, setting it", req.Namespace, req.Name) - finalizers := obj.GetFinalizers() - obj.SetFinalizers(append(finalizers, ctrlutils.KongIngressFinalizer)) - if err := r.Client.Update(ctx, obj); err != nil { - return ctrl.Result{}, err - } - return ctrl.Result{Requeue: true}, nil - } - // update the kong Admin API with the changes log.Info("updating the proxy with new Ingress", "namespace", obj.Namespace, "name", obj.Name) if err := r.Proxy.UpdateObject(obj); err != nil { diff --git a/railgun/hack/generators/controllers/networking/main.go b/railgun/hack/generators/controllers/networking/main.go index 0532a2da0a..61fa2d2229 100644 --- a/railgun/hack/generators/controllers/networking/main.go +++ b/railgun/hack/generators/controllers/networking/main.go @@ -295,9 +295,6 @@ type typeNeeded struct { // AcceptsIngressClassNameAnnotation indicates that the object accepts (and the controller will listen to) // the "kubernetes.io/ingress.class" annotation to decide whether or not the object is supported. - // - // This setting will also indicate whether or not a generated controller will employ a teardown finalizer - // to indicate that we need to wait for cache deletion to succeed before allowing Kubernetes GC to remove the obj. AcceptsIngressClassNameAnnotation bool // AcceptsIngressClassNameSpec indicates the the object indicates the ingress.class that should support it via @@ -400,10 +397,8 @@ func (r *{{.PackageAlias}}{{.Type}}Reconciler) SetupWithManager(mgr ctrl.Manager //+kubebuilder:rbac:groups={{.URL}},resources={{.Plural}},verbs={{ .RBACVerbs | join ";" }} //+kubebuilder:rbac:groups={{.URL}},resources={{.Plural}}/status,verbs=get;update;patch -//+kubebuilder:rbac:groups={{.URL}},resources={{.Plural}}/finalizers,verbs=update //+kubebuilder:rbac:groups={{.URL}},namespace=CHANGEME,resources={{.Plural}},verbs={{ .RBACVerbs | join ";" }} //+kubebuilder:rbac:groups={{.URL}},namespace=CHANGEME,resources={{.Plural}}/status,verbs=get;update;patch -//+kubebuilder:rbac:groups={{.URL}},namespace=CHANGEME,resources={{.Plural}}/finalizers,verbs=update // Reconcile processes the watched objects func (r *{{.PackageAlias}}{{.Type}}Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -441,11 +436,7 @@ func (r *{{.PackageAlias}}{{.Type}}Reconciler) Reconcile(ctx context.Context, re } return ctrl.Result{Requeue: true}, nil // wait until the object is no longer present in the cache } -{{- if .AcceptsIngressClassNameAnnotation}} - return ctrlutils.CleanupFinalizer(ctx, r.Client, log, req.NamespacedName, obj) -{{- else}} return ctrl.Result{}, nil -{{- end}} } {{if .AcceptsIngressClassNameAnnotation}} // if the object is not configured with our ingress.class, then we need to ensure it's removed from the cache @@ -456,17 +447,6 @@ func (r *{{.PackageAlias}}{{.Type}}Reconciler) Reconcile(ctx context.Context, re } return ctrl.Result{}, nil } - - // before we store cache data for this object, ensure that it has our finalizer set - if !ctrlutils.HasFinalizer(obj, ctrlutils.KongIngressFinalizer) { - log.Info("finalizer is not set for resource, setting it", req.Namespace, req.Name) - finalizers := obj.GetFinalizers() - obj.SetFinalizers(append(finalizers, ctrlutils.KongIngressFinalizer)) - if err := r.Client.Update(ctx, obj); err != nil { - return ctrl.Result{}, err - } - return ctrl.Result{Requeue: true}, nil - } {{end}} // update the kong Admin API with the changes log.Info("updating the proxy with new {{.Type}}", "namespace", obj.Namespace, "name", obj.Name) diff --git a/railgun/internal/ctrlutils/utils.go b/railgun/internal/ctrlutils/utils.go index 7eda26980b..e7344af64c 100644 --- a/railgun/internal/ctrlutils/utils.go +++ b/railgun/internal/ctrlutils/utils.go @@ -1,16 +1,11 @@ package ctrlutils import ( - "context" - - "github.com/go-logr/logr" "github.com/kong/kubernetes-ingress-controller/pkg/annotations" netv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" knative "knative.dev/networking/pkg/apis/networking/v1alpha1" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" @@ -19,39 +14,6 @@ import ( // classSpec indicates the fieldName for objects which support indicating their Ingress Class by spec const classSpec = "IngressClassName" -// CleanupFinalizer removes an object finalizer from an object which is currently being deleted. -func CleanupFinalizer(ctx context.Context, c client.Client, log logr.Logger, nsn types.NamespacedName, obj client.Object) (ctrl.Result, error) { - if HasFinalizer(obj, KongIngressFinalizer) { - log.Info("kong ingress finalizer needs to be removed from a resource which is deleting", "ingress", obj.GetName(), "finalizer", KongIngressFinalizer) - finalizers := []string{} - for _, finalizer := range obj.GetFinalizers() { - if finalizer != KongIngressFinalizer { - finalizers = append(finalizers, finalizer) - } - } - obj.SetFinalizers(finalizers) - if err := c.Update(ctx, obj); err != nil { - return ctrl.Result{}, err - } - log.Info("the kong ingress finalizer was removed from an a resource which is deleting", "ingress", obj.GetName(), "finalizer", KongIngressFinalizer) - return ctrl.Result{Requeue: true}, nil - } - - return ctrl.Result{}, nil -} - -// HasFinalizer is a helper function to check whether a client.Object -// already has a specific finalizer set. -func HasFinalizer(obj client.Object, finalizer string) bool { - hasFinalizer := false - for _, foundFinalizer := range obj.GetFinalizers() { - if foundFinalizer == finalizer { - hasFinalizer = true - } - } - return hasFinalizer -} - // HasAnnotation is a helper function to determine whether an object has a given annotation, and whether it's // to the value provided. func HasAnnotation(obj client.Object, key, expectedValue string) bool { diff --git a/railgun/internal/ctrlutils/vars.go b/railgun/internal/ctrlutils/vars.go deleted file mode 100644 index 5f8d0bc7d7..0000000000 --- a/railgun/internal/ctrlutils/vars.go +++ /dev/null @@ -1,8 +0,0 @@ -package ctrlutils - -// ----------------------------------------------------------------------------- -// General Controller Variables -// ----------------------------------------------------------------------------- - -// KongIngressFinalizer is the finalizer used to ensure Kong configuration cleanup for deleted resources. -const KongIngressFinalizer = "configuration.konghq.com/ingress" From 6212866dd0f7338d1d818084742acd3250c6afca Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Sat, 10 Jul 2021 23:22:25 -0400 Subject: [PATCH 16/18] chore: plugin tests use their own ns --- railgun/test/integration/plugin_test.go | 50 ++++++++++++++++++------- railgun/test/integration/suite_test.go | 1 + 2 files changed, 37 insertions(+), 14 deletions(-) diff --git a/railgun/test/integration/plugin_test.go b/railgun/test/integration/plugin_test.go index b51a63ec38..e1814ecc25 100644 --- a/railgun/test/integration/plugin_test.go +++ b/railgun/test/integration/plugin_test.go @@ -22,28 +22,49 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const testPluginsNamespace = "kongplugins" + func TestPluginEssentials(t *testing.T) { ctx := context.Background() + t.Logf("creating namespace %s for testing", testPluginsNamespace) + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testPluginsNamespace}} + ns, err := env.Cluster().Client().CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) + require.NoError(t, err) + + defer func() { + t.Logf("cleaning up namespace %s", testPluginsNamespace) + require.NoError(t, env.Cluster().Client().CoreV1().Namespaces().Delete(ctx, ns.Name, metav1.DeleteOptions{})) + require.Eventually(t, func() bool { + _, err := env.Cluster().Client().CoreV1().Namespaces().Get(ctx, ns.Name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return true + } + } + return false + }, ingressWait, waitTick) + }() + t.Log("deploying a minimal HTTP container deployment to test Ingress routes") container := generators.NewContainer("httpbin", httpBinImage, 80) deployment := generators.NewDeploymentForContainer(container) - deployment, err := env.Cluster().Client().AppsV1().Deployments(corev1.NamespaceDefault).Create(ctx, deployment, metav1.CreateOptions{}) + deployment, err = env.Cluster().Client().AppsV1().Deployments(testPluginsNamespace).Create(ctx, deployment, metav1.CreateOptions{}) assert.NoError(t, err) defer func() { t.Logf("cleaning up the deployment %s", deployment.Name) - assert.NoError(t, env.Cluster().Client().AppsV1().Deployments(corev1.NamespaceDefault).Delete(ctx, deployment.Name, metav1.DeleteOptions{})) + assert.NoError(t, env.Cluster().Client().AppsV1().Deployments(testPluginsNamespace).Delete(ctx, deployment.Name, metav1.DeleteOptions{})) }() t.Logf("exposing deployment %s via service", deployment.Name) service := generators.NewServiceForDeployment(deployment, corev1.ServiceTypeLoadBalancer) - _, err = env.Cluster().Client().CoreV1().Services(corev1.NamespaceDefault).Create(ctx, service, metav1.CreateOptions{}) + _, err = env.Cluster().Client().CoreV1().Services(testPluginsNamespace).Create(ctx, service, metav1.CreateOptions{}) assert.NoError(t, err) defer func() { t.Logf("cleaning up the service %s", service.Name) - assert.NoError(t, env.Cluster().Client().CoreV1().Services(corev1.NamespaceDefault).Delete(ctx, service.Name, metav1.DeleteOptions{})) + assert.NoError(t, env.Cluster().Client().CoreV1().Services(testPluginsNamespace).Delete(ctx, service.Name, metav1.DeleteOptions{})) }() t.Logf("creating an ingress for service %s with ingress.class %s", service.Name, ingressClass) @@ -51,12 +72,12 @@ func TestPluginEssentials(t *testing.T) { annotations.IngressClassKey: ingressClass, "konghq.com/strip-path": "true", }, service) - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Create(ctx, ingress, metav1.CreateOptions{}) + ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testPluginsNamespace).Create(ctx, ingress, metav1.CreateOptions{}) assert.NoError(t, err) defer func() { t.Logf("ensuring that Ingress %s is cleaned up", ingress.Name) - if err := env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Delete(ctx, ingress.Name, metav1.DeleteOptions{}); err != nil { + if err := env.Cluster().Client().NetworkingV1().Ingresses(testPluginsNamespace).Delete(ctx, ingress.Name, metav1.DeleteOptions{}); err != nil { if !errors.IsNotFound(err) { require.NoError(t, err) } @@ -83,7 +104,7 @@ func TestPluginEssentials(t *testing.T) { kongplugin := &kongv1.KongPlugin{ ObjectMeta: metav1.ObjectMeta{ - Namespace: corev1.NamespaceDefault, + Namespace: testPluginsNamespace, Name: "teapot", }, PluginName: "request-termination", @@ -105,22 +126,22 @@ func TestPluginEssentials(t *testing.T) { } c, err := clientset.NewForConfig(env.Cluster().Config()) assert.NoError(t, err) - kongplugin, err = c.ConfigurationV1().KongPlugins(corev1.NamespaceDefault).Create(ctx, kongplugin, metav1.CreateOptions{}) + kongplugin, err = c.ConfigurationV1().KongPlugins(testPluginsNamespace).Create(ctx, kongplugin, metav1.CreateOptions{}) assert.NoError(t, err) kongclusterplugin, err = c.ConfigurationV1().KongClusterPlugins().Create(ctx, kongclusterplugin, metav1.CreateOptions{}) assert.NoError(t, err) - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Get(ctx, ingress.Name, metav1.GetOptions{}) + ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testPluginsNamespace).Get(ctx, ingress.Name, metav1.GetOptions{}) assert.NoError(t, err) t.Logf("updating Ingress %s to use plugin %s", ingress.Name, kongplugin.Name) require.Eventually(t, func() bool { - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Get(ctx, ingress.Name, metav1.GetOptions{}) + ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testPluginsNamespace).Get(ctx, ingress.Name, metav1.GetOptions{}) if err != nil { return false } ingress.ObjectMeta.Annotations[annotations.AnnotationPrefix+annotations.PluginsKey] = kongplugin.Name - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Update(ctx, ingress, metav1.UpdateOptions{}) + ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testPluginsNamespace).Update(ctx, ingress, metav1.UpdateOptions{}) if err != nil { return false } @@ -140,12 +161,12 @@ func TestPluginEssentials(t *testing.T) { t.Logf("updating Ingress %s to use cluster plugin %s", ingress.Name, kongclusterplugin.Name) require.Eventually(t, func() bool { - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Get(ctx, ingress.Name, metav1.GetOptions{}) + ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testPluginsNamespace).Get(ctx, ingress.Name, metav1.GetOptions{}) if err != nil { return false } ingress.ObjectMeta.Annotations[annotations.AnnotationPrefix+annotations.PluginsKey] = kongclusterplugin.Name - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Update(ctx, ingress, metav1.UpdateOptions{}) + ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testPluginsNamespace).Update(ctx, ingress, metav1.UpdateOptions{}) if err != nil { return false } @@ -164,7 +185,7 @@ func TestPluginEssentials(t *testing.T) { }, ingressWait, waitTick) t.Logf("deleting Ingress %s and waiting for routes to be torn down", ingress.Name) - assert.NoError(t, env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Delete(ctx, ingress.Name, metav1.DeleteOptions{})) + assert.NoError(t, env.Cluster().Client().NetworkingV1().Ingresses(testPluginsNamespace).Delete(ctx, ingress.Name, metav1.DeleteOptions{})) assert.Eventually(t, func() bool { resp, err := httpc.Get(fmt.Sprintf("%s/httpbin", proxyURL)) if err != nil { @@ -172,6 +193,7 @@ func TestPluginEssentials(t *testing.T) { return false } defer resp.Body.Close() + t.Logf("WARNING: endpoint %s returned response STATUS=(%d)", fmt.Sprintf("%s/httpbin", proxyURL), resp.StatusCode) return expect404WithNoRoute(t, proxyURL.String(), resp) }, ingressWait, waitTick) } diff --git a/railgun/test/integration/suite_test.go b/railgun/test/integration/suite_test.go index 362cf66aeb..3496bcf85a 100644 --- a/railgun/test/integration/suite_test.go +++ b/railgun/test/integration/suite_test.go @@ -75,6 +75,7 @@ var ( testIngressNamespace, testTCPIngressNamespace, testUDPIngressNamespace, + testPluginsNamespace, }, ",") // dbmode indicates the database backend of the test cluster ("off" and "postgres" are supported) From e9671ea7a21e85c841411a79053797563878b7bd Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Sat, 10 Jul 2021 23:36:03 -0400 Subject: [PATCH 17/18] fix: generator needs to fill in fields for 404 resources --- .../configuration/zz_generated_controllers.go | 91 +++++++++++-------- .../generators/controllers/networking/main.go | 7 +- 2 files changed, 56 insertions(+), 42 deletions(-) diff --git a/railgun/controllers/configuration/zz_generated_controllers.go b/railgun/controllers/configuration/zz_generated_controllers.go index 9288981fa3..239e132c36 100644 --- a/railgun/controllers/configuration/zz_generated_controllers.go +++ b/railgun/controllers/configuration/zz_generated_controllers.go @@ -45,7 +45,7 @@ import ( // CoreV1 Service // ----------------------------------------------------------------------------- -// CoreV1Service reconciles a Ingress object +// CoreV1Service reconciles Service resources type CoreV1ServiceReconciler struct { client.Client @@ -71,13 +71,14 @@ func (r *CoreV1ServiceReconciler) Reconcile(ctx context.Context, req ctrl.Reques // get the relevant object obj := new(corev1.Service) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { - log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + obj.Namespace = req.Namespace + obj.Name = req.Name objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } if objectExistsInCache { - log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + log.Info("deleted Service object remains in proxy cache, removing", "namespace", req.Namespace, "name", req.Name) if err := r.Proxy.DeleteObject(obj); err != nil { return ctrl.Result{}, err } @@ -116,7 +117,7 @@ func (r *CoreV1ServiceReconciler) Reconcile(ctx context.Context, req ctrl.Reques // CoreV1 Endpoints // ----------------------------------------------------------------------------- -// CoreV1Endpoints reconciles a Ingress object +// CoreV1Endpoints reconciles Endpoints resources type CoreV1EndpointsReconciler struct { client.Client @@ -142,13 +143,14 @@ func (r *CoreV1EndpointsReconciler) Reconcile(ctx context.Context, req ctrl.Requ // get the relevant object obj := new(corev1.Endpoints) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { - log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + obj.Namespace = req.Namespace + obj.Name = req.Name objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } if objectExistsInCache { - log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + log.Info("deleted Endpoints object remains in proxy cache, removing", "namespace", req.Namespace, "name", req.Name) if err := r.Proxy.DeleteObject(obj); err != nil { return ctrl.Result{}, err } @@ -187,7 +189,7 @@ func (r *CoreV1EndpointsReconciler) Reconcile(ctx context.Context, req ctrl.Requ // CoreV1 Secret // ----------------------------------------------------------------------------- -// CoreV1Secret reconciles a Ingress object +// CoreV1Secret reconciles Secret resources type CoreV1SecretReconciler struct { client.Client @@ -213,13 +215,14 @@ func (r *CoreV1SecretReconciler) Reconcile(ctx context.Context, req ctrl.Request // get the relevant object obj := new(corev1.Secret) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { - log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + obj.Namespace = req.Namespace + obj.Name = req.Name objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } if objectExistsInCache { - log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + log.Info("deleted Secret object remains in proxy cache, removing", "namespace", req.Namespace, "name", req.Name) if err := r.Proxy.DeleteObject(obj); err != nil { return ctrl.Result{}, err } @@ -258,7 +261,7 @@ func (r *CoreV1SecretReconciler) Reconcile(ctx context.Context, req ctrl.Request // NetV1 Ingress // ----------------------------------------------------------------------------- -// NetV1Ingress reconciles a Ingress object +// NetV1Ingress reconciles Ingress resources type NetV1IngressReconciler struct { client.Client @@ -287,13 +290,14 @@ func (r *NetV1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Request // get the relevant object obj := new(netv1.Ingress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { - log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + obj.Namespace = req.Namespace + obj.Name = req.Name objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } if objectExistsInCache { - log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + log.Info("deleted Ingress object remains in proxy cache, removing", "namespace", req.Namespace, "name", req.Name) if err := r.Proxy.DeleteObject(obj); err != nil { return ctrl.Result{}, err } @@ -341,7 +345,7 @@ func (r *NetV1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Request // NetV1Beta1 Ingress // ----------------------------------------------------------------------------- -// NetV1Beta1Ingress reconciles a Ingress object +// NetV1Beta1Ingress reconciles Ingress resources type NetV1Beta1IngressReconciler struct { client.Client @@ -370,13 +374,14 @@ func (r *NetV1Beta1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Re // get the relevant object obj := new(netv1beta1.Ingress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { - log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + obj.Namespace = req.Namespace + obj.Name = req.Name objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } if objectExistsInCache { - log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + log.Info("deleted Ingress object remains in proxy cache, removing", "namespace", req.Namespace, "name", req.Name) if err := r.Proxy.DeleteObject(obj); err != nil { return ctrl.Result{}, err } @@ -424,7 +429,7 @@ func (r *NetV1Beta1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Re // ExtV1Beta1 Ingress // ----------------------------------------------------------------------------- -// ExtV1Beta1Ingress reconciles a Ingress object +// ExtV1Beta1Ingress reconciles Ingress resources type ExtV1Beta1IngressReconciler struct { client.Client @@ -453,13 +458,14 @@ func (r *ExtV1Beta1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Re // get the relevant object obj := new(extv1beta1.Ingress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { - log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + obj.Namespace = req.Namespace + obj.Name = req.Name objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } if objectExistsInCache { - log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + log.Info("deleted Ingress object remains in proxy cache, removing", "namespace", req.Namespace, "name", req.Name) if err := r.Proxy.DeleteObject(obj); err != nil { return ctrl.Result{}, err } @@ -507,7 +513,7 @@ func (r *ExtV1Beta1IngressReconciler) Reconcile(ctx context.Context, req ctrl.Re // KongV1 KongIngress // ----------------------------------------------------------------------------- -// KongV1KongIngress reconciles a Ingress object +// KongV1KongIngress reconciles KongIngress resources type KongV1KongIngressReconciler struct { client.Client @@ -533,13 +539,14 @@ func (r *KongV1KongIngressReconciler) Reconcile(ctx context.Context, req ctrl.Re // get the relevant object obj := new(kongv1.KongIngress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { - log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + obj.Namespace = req.Namespace + obj.Name = req.Name objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } if objectExistsInCache { - log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + log.Info("deleted KongIngress object remains in proxy cache, removing", "namespace", req.Namespace, "name", req.Name) if err := r.Proxy.DeleteObject(obj); err != nil { return ctrl.Result{}, err } @@ -578,7 +585,7 @@ func (r *KongV1KongIngressReconciler) Reconcile(ctx context.Context, req ctrl.Re // KongV1 KongPlugin // ----------------------------------------------------------------------------- -// KongV1KongPlugin reconciles a Ingress object +// KongV1KongPlugin reconciles KongPlugin resources type KongV1KongPluginReconciler struct { client.Client @@ -604,13 +611,14 @@ func (r *KongV1KongPluginReconciler) Reconcile(ctx context.Context, req ctrl.Req // get the relevant object obj := new(kongv1.KongPlugin) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { - log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + obj.Namespace = req.Namespace + obj.Name = req.Name objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } if objectExistsInCache { - log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + log.Info("deleted KongPlugin object remains in proxy cache, removing", "namespace", req.Namespace, "name", req.Name) if err := r.Proxy.DeleteObject(obj); err != nil { return ctrl.Result{}, err } @@ -649,7 +657,7 @@ func (r *KongV1KongPluginReconciler) Reconcile(ctx context.Context, req ctrl.Req // KongV1 KongClusterPlugin // ----------------------------------------------------------------------------- -// KongV1KongClusterPlugin reconciles a Ingress object +// KongV1KongClusterPlugin reconciles KongClusterPlugin resources type KongV1KongClusterPluginReconciler struct { client.Client @@ -678,13 +686,14 @@ func (r *KongV1KongClusterPluginReconciler) Reconcile(ctx context.Context, req c // get the relevant object obj := new(kongv1.KongClusterPlugin) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { - log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + obj.Namespace = req.Namespace + obj.Name = req.Name objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } if objectExistsInCache { - log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + log.Info("deleted KongClusterPlugin object remains in proxy cache, removing", "namespace", req.Namespace, "name", req.Name) if err := r.Proxy.DeleteObject(obj); err != nil { return ctrl.Result{}, err } @@ -732,7 +741,7 @@ func (r *KongV1KongClusterPluginReconciler) Reconcile(ctx context.Context, req c // KongV1 KongConsumer // ----------------------------------------------------------------------------- -// KongV1KongConsumer reconciles a Ingress object +// KongV1KongConsumer reconciles KongConsumer resources type KongV1KongConsumerReconciler struct { client.Client @@ -761,13 +770,14 @@ func (r *KongV1KongConsumerReconciler) Reconcile(ctx context.Context, req ctrl.R // get the relevant object obj := new(kongv1.KongConsumer) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { - log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + obj.Namespace = req.Namespace + obj.Name = req.Name objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } if objectExistsInCache { - log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + log.Info("deleted KongConsumer object remains in proxy cache, removing", "namespace", req.Namespace, "name", req.Name) if err := r.Proxy.DeleteObject(obj); err != nil { return ctrl.Result{}, err } @@ -815,7 +825,7 @@ func (r *KongV1KongConsumerReconciler) Reconcile(ctx context.Context, req ctrl.R // KongV1Beta1 TCPIngress // ----------------------------------------------------------------------------- -// KongV1Beta1TCPIngress reconciles a Ingress object +// KongV1Beta1TCPIngress reconciles TCPIngress resources type KongV1Beta1TCPIngressReconciler struct { client.Client @@ -844,13 +854,14 @@ func (r *KongV1Beta1TCPIngressReconciler) Reconcile(ctx context.Context, req ctr // get the relevant object obj := new(kongv1beta1.TCPIngress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { - log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + obj.Namespace = req.Namespace + obj.Name = req.Name objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } if objectExistsInCache { - log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + log.Info("deleted TCPIngress object remains in proxy cache, removing", "namespace", req.Namespace, "name", req.Name) if err := r.Proxy.DeleteObject(obj); err != nil { return ctrl.Result{}, err } @@ -898,7 +909,7 @@ func (r *KongV1Beta1TCPIngressReconciler) Reconcile(ctx context.Context, req ctr // KongV1Beta1 UDPIngress // ----------------------------------------------------------------------------- -// KongV1Beta1UDPIngress reconciles a Ingress object +// KongV1Beta1UDPIngress reconciles UDPIngress resources type KongV1Beta1UDPIngressReconciler struct { client.Client @@ -927,13 +938,14 @@ func (r *KongV1Beta1UDPIngressReconciler) Reconcile(ctx context.Context, req ctr // get the relevant object obj := new(kongv1beta1.UDPIngress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { - log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + obj.Namespace = req.Namespace + obj.Name = req.Name objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } if objectExistsInCache { - log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + log.Info("deleted UDPIngress object remains in proxy cache, removing", "namespace", req.Namespace, "name", req.Name) if err := r.Proxy.DeleteObject(obj); err != nil { return ctrl.Result{}, err } @@ -981,7 +993,7 @@ func (r *KongV1Beta1UDPIngressReconciler) Reconcile(ctx context.Context, req ctr // Knativev1alpha1 Ingress // ----------------------------------------------------------------------------- -// Knativev1alpha1Ingress reconciles a Ingress object +// Knativev1alpha1Ingress reconciles Ingress resources type Knativev1alpha1IngressReconciler struct { client.Client @@ -1010,13 +1022,14 @@ func (r *Knativev1alpha1IngressReconciler) Reconcile(ctx context.Context, req ct // get the relevant object obj := new(knativev1alpha1.Ingress) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { - log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + obj.Namespace = req.Namespace + obj.Name = req.Name objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } if objectExistsInCache { - log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + log.Info("deleted Ingress object remains in proxy cache, removing", "namespace", req.Namespace, "name", req.Name) if err := r.Proxy.DeleteObject(obj); err != nil { return ctrl.Result{}, err } diff --git a/railgun/hack/generators/controllers/networking/main.go b/railgun/hack/generators/controllers/networking/main.go index 61fa2d2229..7ac578f7e6 100644 --- a/railgun/hack/generators/controllers/networking/main.go +++ b/railgun/hack/generators/controllers/networking/main.go @@ -372,7 +372,7 @@ var controllerTemplate = ` // {{.PackageAlias}} {{.Type}} // ----------------------------------------------------------------------------- -// {{.PackageAlias}}{{.Type}} reconciles a Ingress object +// {{.PackageAlias}}{{.Type}} reconciles {{.Type}} resources type {{.PackageAlias}}{{.Type}}Reconciler struct { client.Client @@ -407,13 +407,14 @@ func (r *{{.PackageAlias}}{{.Type}}Reconciler) Reconcile(ctx context.Context, re // get the relevant object obj := new({{.PackageImportAlias}}.{{.Type}}) if err := r.Get(ctx, req.NamespacedName, obj); err != nil { - log.Error(err, "object was queued for reconcilation but could not be retrieved", "namespace", req.Namespace, "name", req.Name) + obj.Namespace = req.Namespace + obj.Name = req.Name objectExistsInCache, err := r.Proxy.ObjectExists(obj) if err != nil { return ctrl.Result{}, err } if objectExistsInCache { - log.Error(err, "object which wasn't found in the Kubernetes API still exists in the cache, deleting", "namespace", req.Namespace, "name", req.Name) + log.Info("deleted {{.Type}} object remains in proxy cache, removing", "namespace", req.Namespace, "name", req.Name) if err := r.Proxy.DeleteObject(obj); err != nil { return ctrl.Result{}, err } From d2781dec0095baa183adf5cd214b54577fa2973f Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Sun, 11 Jul 2021 14:59:35 -0400 Subject: [PATCH 18/18] chore: add more tests to store_test.go --- pkg/store/store_test.go | 94 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 85 insertions(+), 9 deletions(-) diff --git a/pkg/store/store_test.go b/pkg/store/store_test.go index 00e302b00a..a74bb9fac0 100644 --- a/pkg/store/store_test.go +++ b/pkg/store/store_test.go @@ -2,11 +2,16 @@ package store import ( "reflect" + "strings" "testing" "github.com/sirupsen/logrus" - core "k8s.io/api/core/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" + netv1 "k8s.io/api/networking/v1" networking "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -31,11 +36,11 @@ func Test_networkingIngressV1Beta1(t *testing.T) { { name: "returns nil if a non-ingress object is passed in", args: args{ - &core.Service{ - Spec: core.ServiceSpec{ - Type: core.ServiceTypeClusterIP, + &corev1.Service{ + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, ClusterIP: "1.1.1.1", - Ports: []core.ServicePort{ + Ports: []corev1.ServicePort{ { Name: "default", TargetPort: intstr.FromString("port-1"), @@ -75,8 +80,8 @@ func Test_networkingIngressV1Beta1(t *testing.T) { }, }, Status: extensions.IngressStatus{ - LoadBalancer: core.LoadBalancerStatus{ - Ingress: []core.LoadBalancerIngress{{IP: "1.2.3.4"}}, + LoadBalancer: corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{{IP: "1.2.3.4"}}, }, }, }, @@ -107,8 +112,8 @@ func Test_networkingIngressV1Beta1(t *testing.T) { }, }, Status: networking.IngressStatus{ - LoadBalancer: core.LoadBalancerStatus{ - Ingress: []core.LoadBalancerIngress{{IP: "1.2.3.4"}}, + LoadBalancer: corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{{IP: "1.2.3.4"}}, }, }, }, @@ -125,3 +130,74 @@ func Test_networkingIngressV1Beta1(t *testing.T) { }) } } + +func TestCacheStoresGet(t *testing.T) { + t.Log("configuring some yaml objects to store in the cache") + svcYAML := []byte(`--- +apiVersion: v1 +kind: Service +metadata: + name: httpbin-deployment + namespace: default + labels: + app: httpbin +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app: httpbin + type: ClusterIP +`) + ingYAML := []byte(`--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: httpbin-ingress + namespace: default + annotations: + httpbin.ingress.kubernetes.io/rewrite-target: / + kubernetes.io/ingress.class: "kong" +spec: + rules: + - http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: httpbin-deployment + port: + number: 80 +`) + + t.Log("creating a new cache store from object yaml files") + cs, err := NewCacheStoresFromObjYAML(svcYAML, ingYAML) + require.NoError(t, err) + + t.Log("verifying that the cache store doesnt try to retrieve unsupported object types") + _, exists, err := cs.Get(new(appsv1.Deployment)) + assert.Error(t, err) + assert.True(t, strings.Contains(err.Error(), "Deployment is not a supported cache object type")) + assert.False(t, exists) + + t.Log("verifying the integrity of the cache store") + assert.Len(t, cs.IngressV1.List(), 1) + assert.Len(t, cs.Service.List(), 1) + assert.Len(t, cs.IngressV1beta1.List(), 0) + assert.Len(t, cs.KongIngress.List(), 0) + _, exists, err = cs.Get(&corev1.Service{ObjectMeta: metav1.ObjectMeta{Namespace: "doesntexist", Name: "doesntexist"}}) + assert.NoError(t, err) + assert.False(t, exists) + + t.Log("ensuring that we can Get() the objects back out of the cache store") + svc := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "httpbin-deployment"}} + ing := &netv1.Ingress{ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "httpbin-ingress"}} + _, exists, err = cs.Get(svc) + assert.NoError(t, err) + assert.True(t, exists) + _, exists, err = cs.Get(ing) + assert.NoError(t, err) + assert.True(t, exists) +}