From 0561b997a59c83b2ac6e7073906bf7d42fbd3823 Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Mon, 2 Aug 2021 12:44:16 -0400 Subject: [PATCH 1/4] fix: add missing legacy ingress support --- .../generators/controllers/networking/main.go | 4 +- .../configuration/zz_generated_controllers.go | 4 +- internal/ctrlutils/ingress-status.go | 126 ++++++++++++++++-- internal/ctrlutils/utils.go | 6 + internal/store/store.go | 6 + 5 files changed, 129 insertions(+), 17 deletions(-) diff --git a/hack/generators/controllers/networking/main.go b/hack/generators/controllers/networking/main.go index 2e128b492e..91b4678045 100644 --- a/hack/generators/controllers/networking/main.go +++ b/hack/generators/controllers/networking/main.go @@ -89,7 +89,7 @@ var inputControllersNeeded = &typesNeeded{ URL: "networking.k8s.io", CacheType: "IngressV1beta1", AcceptsIngressClassNameAnnotation: true, - AcceptsIngressClassNameSpec: false, + AcceptsIngressClassNameSpec: true, RBACVerbs: []string{"get", "list", "watch"}, }, typeNeeded{ @@ -101,7 +101,7 @@ var inputControllersNeeded = &typesNeeded{ URL: "extensions", CacheType: "IngressV1beta1", AcceptsIngressClassNameAnnotation: true, - AcceptsIngressClassNameSpec: false, + AcceptsIngressClassNameSpec: true, RBACVerbs: []string{"get", "list", "watch"}, }, typeNeeded{ diff --git a/internal/controllers/configuration/zz_generated_controllers.go b/internal/controllers/configuration/zz_generated_controllers.go index 60fa3f4d40..c616997622 100644 --- a/internal/controllers/configuration/zz_generated_controllers.go +++ b/internal/controllers/configuration/zz_generated_controllers.go @@ -356,7 +356,7 @@ type NetV1Beta1IngressReconciler struct { // SetupWithManager sets up the controller with the Manager. func (r *NetV1Beta1IngressReconciler) SetupWithManager(mgr ctrl.Manager) error { - preds := ctrlutils.GeneratePredicateFuncsForIngressClassFilter(r.IngressClassName, false, true) + preds := ctrlutils.GeneratePredicateFuncsForIngressClassFilter(r.IngressClassName, true, true) return ctrl.NewControllerManagedBy(mgr).For(&netv1beta1.Ingress{}, builder.WithPredicates(preds)).Complete(r) } @@ -440,7 +440,7 @@ type ExtV1Beta1IngressReconciler struct { // SetupWithManager sets up the controller with the Manager. func (r *ExtV1Beta1IngressReconciler) SetupWithManager(mgr ctrl.Manager) error { - preds := ctrlutils.GeneratePredicateFuncsForIngressClassFilter(r.IngressClassName, false, true) + preds := ctrlutils.GeneratePredicateFuncsForIngressClassFilter(r.IngressClassName, true, true) return ctrl.NewControllerManagedBy(mgr).For(&extv1beta1.Ingress{}, builder.WithPredicates(preds)).Complete(r) } diff --git a/internal/ctrlutils/ingress-status.go b/internal/ctrlutils/ingress-status.go index 4c5602e79b..b8710c5d3b 100644 --- a/internal/ctrlutils/ingress-status.go +++ b/internal/ctrlutils/ingress-status.go @@ -9,6 +9,7 @@ import ( "sync" "time" + "github.com/blang/semver/v4" "github.com/go-logr/logr" "github.com/kong/deck/file" "github.com/prometheus/common/log" @@ -32,8 +33,14 @@ const ( ) // PullConfigUpdate is a dedicated function that process ingress/customer resource status update after configuration is updated within kong. -func PullConfigUpdate(ctx context.Context, kongConfig sendconfig.Kong, log logr.Logger, kubeConfig *rest.Config, - publishService string, publishAddresses []string) { +func PullConfigUpdate( + ctx context.Context, + kongConfig sendconfig.Kong, + log logr.Logger, + kubeConfig *rest.Config, + publishService string, + publishAddresses []string, +) { ips, hostname, err := RunningAddresses(ctx, kubeConfig, publishService, publishAddresses) if err != nil { log.Error(err, "failed to determine kong proxy external ips/hostnames.") @@ -46,6 +53,18 @@ func PullConfigUpdate(ctx context.Context, kongConfig sendconfig.Kong, log logr. return } + versionInfo, err := cli.ServerVersion() + if err != nil { + log.Error(err, "failed to retrieve cluster version") + return + } + + kubernetesVersion, err := semver.Parse(strings.TrimPrefix(versionInfo.String(), "v")) + if err != nil { + log.Error(err, "could not parse cluster version") + return + } + kiccli, err := kicclientset.NewForConfig(kubeConfig) if err != nil { log.Error(err, "failed to create kong ingress client.") @@ -61,7 +80,7 @@ func PullConfigUpdate(ctx context.Context, kongConfig sendconfig.Kong, log logr. log.V(4).Info("receive configuration information. Update ingress status %v \n", updateDone) wg.Add(1) go func() { - if err := UpdateIngress(ctx, &updateDone, log, cli, kiccli, &wg, ips, hostname, kubeConfig); err != nil { + if err := UpdateStatuses(ctx, &updateDone, log, cli, kiccli, &wg, ips, hostname, kubeConfig, kubernetesVersion); err != nil { log.Error(err, "failed to update resource statuses") } }() @@ -73,11 +92,19 @@ func PullConfigUpdate(ctx context.Context, kongConfig sendconfig.Kong, log logr. } } -// UpdateIngress update ingress status according to generated rules and specs -func UpdateIngress(ctx context.Context, targetContent *file.Content, log logr.Logger, cli *clientset.Clientset, +// UpdateStatuses update resources statuses according to generated rules and specs +func UpdateStatuses( + ctx context.Context, + targetContent *file.Content, + log logr.Logger, + cli *clientset.Clientset, kiccli *kicclientset.Clientset, - wg *sync.WaitGroup, ips []string, hostname string, - kubeConfig *rest.Config) error { + wg *sync.WaitGroup, + ips []string, + hostname string, + kubeConfig *rest.Config, + kubernetesVersion semver.Version, +) error { defer wg.Done() for _, svc := range targetContent.Services { @@ -91,7 +118,6 @@ func UpdateIngress(ctx context.Context, targetContent *file.Content, log logr.Lo if err := UpdateKnativeIngress(ctx, log, svc, kubeConfig, ips, hostname); err != nil { return fmt.Errorf("failed to update knative ingress err %v", err) } - } } } @@ -109,8 +135,17 @@ func UpdateIngress(ctx context.Context, targetContent *file.Content, log logr.Lo } case "http": - if err := UpdateIngressV1(ctx, log, svc, cli, ips); err != nil { - return fmt.Errorf("failed to update ingressv1. err %v", err) + // if the cluster is on a very old version, we fall back to legacy Ingress support + // for compatibility with clusters older than v1.19.x. + // TODO: this can go away once we drop support for Kubernetes older than v1.19 + if kubernetesVersion.Major >= uint64(1) && kubernetesVersion.Minor > uint64(18) { + if err := UpdateIngress(ctx, log, svc, cli, ips); err != nil { + return fmt.Errorf("failed to update ingressv1. err %v", err) + } + } else { + if err := UpdateIngressLegacy(ctx, log, svc, cli, ips); err != nil { + return fmt.Errorf("failed to update ingressv1. err %v", err) + } } default: log.Info("protocol " + proto + " is not supported") @@ -131,9 +166,14 @@ func toKnativeLBStatus(coreLBStatus []apiv1.LoadBalancerIngress) []knative.LoadB return res } -// UpdateIngressV1 networking v1 ingress status -func UpdateIngressV1(ctx context.Context, logger logr.Logger, svc file.FService, cli *clientset.Clientset, - ips []string) error { +// UpdateIngress networking v1 ingress status +func UpdateIngress( + ctx context.Context, + logger logr.Logger, + svc file.FService, + cli *clientset.Clientset, + ips []string, +) error { for _, route := range svc.Routes { routeInf := strings.Split(*((*route).Name), ".") namespace := routeInf[0] @@ -185,6 +225,66 @@ func UpdateIngressV1(ctx context.Context, logger logr.Logger, svc file.FService, return nil } +// UpdateIngressLegacy networking v1beta1 ingress status +// TODO: this can be removed once we no longer support old kubernetes < v1.19 +func UpdateIngressLegacy( + ctx context.Context, + logger logr.Logger, + svc file.FService, + cli *clientset.Clientset, + ips []string, +) error { + for _, route := range svc.Routes { + routeInf := strings.Split(*((*route).Name), ".") + namespace := routeInf[0] + name := routeInf[1] + log.Debugf("updating status for v1beta1.Ingress route name %s namespace %s", name, namespace) + + ingCli := cli.NetworkingV1beta1().Ingresses(namespace) + retry := 0 + for retry < statusUpdateRetry { + curIng, err := ingCli.Get(ctx, name, metav1.GetOptions{}) + if err != nil || curIng == nil { + if errors.IsNotFound(err) { + return nil + } + + log.Errorf("failed to fetch Ingress %v/%v: %v. retrying...", namespace, name, err) + retry++ + time.Sleep(time.Second) + continue + } + + var status []apiv1.LoadBalancerIngress + sort.SliceStable(status, lessLoadBalancerIngress(status)) + curIPs := curIng.Status.LoadBalancer.Ingress + + status = SliceToStatus(ips) + if ingressSliceEqual(status, curIPs) { + log.Debugf("no change in status, update ingress v1beta1 skipped") + return nil + } + + curIng.Status.LoadBalancer.Ingress = status + + _, err = ingCli.UpdateStatus(ctx, curIng, metav1.UpdateOptions{}) + if err == nil { + break + } + if errors.IsNotFound(err) { + return nil + } + + log.Errorf("failed to update Ingress V1beta1 status. %v. retrying...", err) + time.Sleep(time.Second) + retry++ + } + } + + log.Debugf("successfully updated networkingv1beta1 Ingress status") + return nil +} + // UpdateUDPIngress update udp ingress status func UpdateUDPIngress(ctx context.Context, logger logr.Logger, svc file.FService, kiccli *kicclientset.Clientset, ips []string) error { diff --git a/internal/ctrlutils/utils.go b/internal/ctrlutils/utils.go index 4c1ed19a89..892c3d0361 100644 --- a/internal/ctrlutils/utils.go +++ b/internal/ctrlutils/utils.go @@ -1,7 +1,9 @@ package ctrlutils import ( + extv1beta1 "k8s.io/api/extensions/v1beta1" netv1 "k8s.io/api/networking/v1" + netv1beta1 "k8s.io/api/networking/v1beta1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime/schema" knative "knative.dev/networking/pkg/apis/networking/v1alpha1" @@ -85,6 +87,10 @@ func IsIngressClassSpecConfigured(obj client.Object, expectedIngressClassName st switch obj := obj.(type) { case *netv1.Ingress: return obj.Spec.IngressClassName != nil && *obj.Spec.IngressClassName == expectedIngressClassName + case *netv1beta1.Ingress: + return obj.Spec.IngressClassName != nil && *obj.Spec.IngressClassName == expectedIngressClassName + case *extv1beta1.Ingress: + return obj.Spec.IngressClassName != nil && *obj.Spec.IngressClassName == expectedIngressClassName } return false } diff --git a/internal/store/store.go b/internal/store/store.go index bf53cad18e..f99079cb6a 100644 --- a/internal/store/store.go +++ b/internal/store/store.go @@ -198,6 +198,8 @@ func (c CacheStores) Get(obj runtime.Object) (item interface{}, exists bool, err // ---------------------------------------------------------------------------- case *extensions.Ingress: return c.IngressV1beta1.Get(obj) + case *networkingv1beta1.Ingress: + return c.IngressV1beta1.Get(obj) case *networkingv1.Ingress: return c.IngressV1.Get(obj) case *corev1.Service: @@ -242,6 +244,8 @@ func (c CacheStores) Add(obj runtime.Object) error { // ---------------------------------------------------------------------------- case *extensions.Ingress: return c.IngressV1beta1.Add(obj) + case *networkingv1beta1.Ingress: + return c.IngressV1beta1.Add(obj) case *networkingv1.Ingress: return c.IngressV1.Add(obj) case *corev1.Service: @@ -287,6 +291,8 @@ func (c CacheStores) Delete(obj runtime.Object) error { // ---------------------------------------------------------------------------- case *extensions.Ingress: return c.IngressV1beta1.Delete(obj) + case *networkingv1beta1.Ingress: + return c.IngressV1beta1.Delete(obj) case *networkingv1.Ingress: return c.IngressV1.Delete(obj) case *corev1.Service: From 436f5054ab58094cb60dd00387ccddd73c2637b3 Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Mon, 2 Aug 2021 12:44:55 -0400 Subject: [PATCH 2/4] fix: integration test suite compat with k8s <1.19 --- test/integration/controller_test.go | 4 +- test/integration/ingress_bulk_test.go | 24 ++-- test/integration/ingress_https_test.go | 119 ++++++++++------- test/integration/ingress_test.go | 172 ++++++++++++++++--------- test/integration/knative_test.go | 19 ++- test/integration/kongingress_test.go | 15 ++- test/integration/plugin_test.go | 98 +++++++++----- test/integration/suite_test.go | 19 +-- 8 files changed, 298 insertions(+), 172 deletions(-) diff --git a/test/integration/controller_test.go b/test/integration/controller_test.go index d18d37b485..4ededaca0b 100644 --- a/test/integration/controller_test.go +++ b/test/integration/controller_test.go @@ -72,17 +72,17 @@ func TestConfigEndpoint(t *testing.T) { successURL := fmt.Sprintf("http://localhost:%v/debug/config/successful", manager.DiagnosticsPort) failURL := fmt.Sprintf("http://localhost:%v/debug/config/failed", manager.DiagnosticsPort) successResp, err := httpc.Get(successURL) - defer successResp.Body.Close() if err != nil { t.Logf("WARNING: error while waiting for %s: %v", successURL, err) return false } + defer successResp.Body.Close() failResp, err := httpc.Get(failURL) - defer failResp.Body.Close() if err != nil { t.Logf("WARNING: error while waiting for %s: %v", failURL, err) return false } + defer failResp.Body.Close() return successResp.StatusCode == http.StatusOK && failResp.StatusCode == http.StatusOK }, ingressWait, waitTick) } diff --git a/test/integration/ingress_bulk_test.go b/test/integration/ingress_bulk_test.go index 854665edd7..dde489e66a 100644 --- a/test/integration/ingress_bulk_test.go +++ b/test/integration/ingress_bulk_test.go @@ -11,11 +11,13 @@ import ( "testing" "time" + "github.com/kong/kubernetes-testing-framework/pkg/clusters" "github.com/kong/kubernetes-testing-framework/pkg/utils/kubernetes/generators" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "github.com/kong/kubernetes-ingress-controller/internal/annotations" ) @@ -51,7 +53,12 @@ func TestIngressBulk(t *testing.T) { deployment, err = env.Cluster().Client().AppsV1().Deployments(testBulkIngressNamespace).Create(ctx, deployment, metav1.CreateOptions{}) require.NoError(t, err) + t.Log("checking the cluster version to determine which ingress version to use") + kubernetesVersion, err := env.Cluster().Version() + require.NoError(t, err) + t.Logf("exposing deployment %s via ingress %d times", deployment.Name, maxBatchSize) + ingresses := make([]runtime.Object, maxBatchSize) for i := 0; i < maxBatchSize; i++ { name := fmt.Sprintf("bulk-httpbin-%d", i) path := fmt.Sprintf("/%s", name) @@ -62,12 +69,12 @@ func TestIngressBulk(t *testing.T) { _, err = env.Cluster().Client().CoreV1().Services(testBulkIngressNamespace).Create(ctx, service, metav1.CreateOptions{}) require.NoError(t, err) - ingress := generators.NewIngressForService(path, map[string]string{ + ingress := generators.NewIngressForServiceWithClusterVersion(kubernetesVersion, path, map[string]string{ annotations.IngressClassKey: ingressClass, "konghq.com/strip-path": "true", }, service) - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testBulkIngressNamespace).Create(ctx, ingress, metav1.CreateOptions{}) - require.NoError(t, err) + require.NoError(t, clusters.DeployIngress(ctx, env.Cluster(), testBulkIngressNamespace, ingress)) + ingresses[i] = ingress } t.Logf("verifying that all %d ingresses route properly", maxBatchSize) @@ -97,7 +104,7 @@ func TestIngressBulk(t *testing.T) { for i := 0; i < maxBatchSize; i++ { name := fmt.Sprintf("bulk-httpbin-%d", i) require.NoError(t, env.Cluster().Client().CoreV1().Services(testBulkIngressNamespace).Delete(ctx, name, metav1.DeleteOptions{})) - require.NoError(t, env.Cluster().Client().NetworkingV1().Ingresses(testBulkIngressNamespace).Delete(ctx, name, metav1.DeleteOptions{})) + require.NoError(t, clusters.DeleteIngress(ctx, env.Cluster(), testBulkIngressNamespace, ingresses[i])) require.Eventually(t, func() bool { _, err := env.Cluster().Client().CoreV1().Services(testBulkIngressNamespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { @@ -109,6 +116,7 @@ func TestIngressBulk(t *testing.T) { t.Log("staggering ingress deployments over several seconds") maxStaggeredBatchSize := maxBatchSize + ingresses = make([]runtime.Object, maxStaggeredBatchSize) for i := 0; i < maxStaggeredBatchSize; i++ { name := fmt.Sprintf("bulk-staggered-httpbin-%d", i) path := fmt.Sprintf("/%s", name) @@ -119,12 +127,12 @@ func TestIngressBulk(t *testing.T) { _, err = env.Cluster().Client().CoreV1().Services(testBulkIngressNamespace).Create(ctx, service, metav1.CreateOptions{}) require.NoError(t, err) - ingress := generators.NewIngressForService(path, map[string]string{ + ingress := generators.NewIngressForServiceWithClusterVersion(kubernetesVersion, path, map[string]string{ annotations.IngressClassKey: ingressClass, "konghq.com/strip-path": "true", }, service) - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testBulkIngressNamespace).Create(ctx, ingress, metav1.CreateOptions{}) - require.NoError(t, err) + require.NoError(t, clusters.DeployIngress(ctx, env.Cluster(), testBulkIngressNamespace, ingress)) + ingresses[i] = ingress // every 10 items sleep for 1 second to stagger the updates to ~10/s if (i + 1%10) == 0 { @@ -159,7 +167,7 @@ func TestIngressBulk(t *testing.T) { for i := 0; i < maxBatchSize; i++ { name := fmt.Sprintf("bulk-staggered-httpbin-%d", i) require.NoError(t, env.Cluster().Client().CoreV1().Services(testBulkIngressNamespace).Delete(ctx, name, metav1.DeleteOptions{})) - require.NoError(t, env.Cluster().Client().NetworkingV1().Ingresses(testBulkIngressNamespace).Delete(ctx, name, metav1.DeleteOptions{})) + require.NoError(t, clusters.DeleteIngress(ctx, env.Cluster(), testBulkIngressNamespace, ingresses[i])) require.Eventually(t, func() bool { _, err := env.Cluster().Client().CoreV1().Services(testBulkIngressNamespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { diff --git a/test/integration/ingress_https_test.go b/test/integration/ingress_https_test.go index 2f9c7ded8f..01e2402a1b 100644 --- a/test/integration/ingress_https_test.go +++ b/test/integration/ingress_https_test.go @@ -13,11 +13,13 @@ import ( "testing" "time" + "github.com/kong/kubernetes-testing-framework/pkg/clusters" "github.com/kong/kubernetes-testing-framework/pkg/utils/kubernetes/generators" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" + netv1 "k8s.io/api/networking/v1" + netv1beta1 "k8s.io/api/networking/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -181,20 +183,21 @@ func TestHTTPSRedirect(t *testing.T) { }() t.Logf("exposing Service %s via Ingress", service.Name) - ingress := generators.NewIngressForService("/httpbin", map[string]string{ + kubernetesVersion, err := env.Cluster().Version() + require.NoError(t, err) + ingress := generators.NewIngressForServiceWithClusterVersion(kubernetesVersion, "/httpbin", map[string]string{ annotations.IngressClassKey: ingressClass, "konghq.com/protocols": "https", "konghq.com/https-redirect-status-code": "301", }, service) - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Create(ctx, ingress, opts) - assert.NoError(t, err) + assert.NoError(t, clusters.DeployIngress(ctx, env.Cluster(), corev1.NamespaceDefault, ingress)) defer func() { - t.Logf("cleaning up Ingress %s", ingress.Name) - assert.NoError(t, env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Delete(ctx, ingress.Name, metav1.DeleteOptions{})) + t.Log("cleaning up Ingress resource") + assert.NoError(t, clusters.DeleteIngress(ctx, env.Cluster(), corev1.NamespaceDefault, ingress)) }() - t.Logf("waiting for Ingress %s to be operational and properly redirect", ingress.Name) + t.Log("waiting for Ingress to be operational and properly redirect") client := &http.Client{ CheckRedirect: func(req *http.Request, via []*http.Request) error { return http.ErrUseLastResponse @@ -278,28 +281,36 @@ func TestHTTPSIngress(t *testing.T) { }() t.Logf("creating an ingress for service %s with ingress.class %s", service.Name, ingressClass) - ingress1 := generators.NewIngressForService("/foo", map[string]string{ + kubernetesVersion, err := env.Cluster().Version() + require.NoError(t, err) + ingress1 := generators.NewIngressForServiceWithClusterVersion(kubernetesVersion, "/foo", map[string]string{ annotations.IngressClassKey: ingressClass, "konghq.com/strip-path": "true", }, service) - ingress1.Spec.TLS = []networkingv1.IngressTLS{ - { - SecretName: "secret1", - Hosts: []string{"foo.example"}, - }, - } - ingress1.ObjectMeta.Name = "ingress1" - ingress2 := generators.NewIngressForService("/bar", map[string]string{ + ingress2 := generators.NewIngressForServiceWithClusterVersion(kubernetesVersion, "/bar", map[string]string{ annotations.IngressClassKey: ingressClass, "konghq.com/strip-path": "true", }, service) - ingress2.Spec.TLS = []networkingv1.IngressTLS{ - { - SecretName: "secret2", - Hosts: []string{"bar.example"}, - }, + + t.Log("configuring ingress tls spec") + switch obj := ingress1.(type) { + case *netv1.Ingress: + obj.Spec.TLS = []netv1.IngressTLS{{SecretName: "secret1", Hosts: []string{"foo.example"}}} + obj.ObjectMeta.Name = "ingress1" + case *netv1beta1.Ingress: + obj.Spec.TLS = []netv1beta1.IngressTLS{{SecretName: "secret1", Hosts: []string{"foo.example"}}} + obj.ObjectMeta.Name = "ingress1" + } + switch obj := ingress2.(type) { + case *netv1.Ingress: + obj.Spec.TLS = []netv1.IngressTLS{{SecretName: "secret2", Hosts: []string{"bar.example"}}} + obj.ObjectMeta.Name = "ingress2" + case *netv1beta1.Ingress: + obj.Spec.TLS = []netv1beta1.IngressTLS{{SecretName: "secret2", Hosts: []string{"bar.example"}}} + obj.ObjectMeta.Name = "ingress2" } - ingress2.ObjectMeta.Name = "ingress2" + + t.Log("configuring secrets") secrets := []*corev1.Secret{ { ObjectMeta: metav1.ObjectMeta{ @@ -324,24 +335,25 @@ func TestHTTPSIngress(t *testing.T) { }, }, } + + t.Log("deploying secrets") secret1, err := env.Cluster().Client().CoreV1().Secrets(corev1.NamespaceDefault).Create(ctx, secrets[0], metav1.CreateOptions{}) assert.NoError(t, err) secret2, err := env.Cluster().Client().CoreV1().Secrets(corev1.NamespaceDefault).Create(ctx, secrets[1], metav1.CreateOptions{}) assert.NoError(t, err) - ingress1, err = env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Create(ctx, ingress1, metav1.CreateOptions{}) - assert.NoError(t, err) - ingress2, err = env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Create(ctx, ingress2, metav1.CreateOptions{}) - assert.NoError(t, err) + + t.Log("deploying ingress resources") + require.NoError(t, clusters.DeployIngress(ctx, env.Cluster(), corev1.NamespaceDefault, ingress1)) + require.NoError(t, clusters.DeployIngress(ctx, env.Cluster(), corev1.NamespaceDefault, ingress2)) defer func() { - t.Logf("ensuring that Ingress %s is cleaned up", ingress1.Name) - if err := env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Delete(ctx, ingress1.Name, metav1.DeleteOptions{}); err != nil { + t.Log("ensuring that Ingress resources are cleaned up") + if err := clusters.DeleteIngress(ctx, env.Cluster(), corev1.NamespaceDefault, ingress1); err != nil { if !errors.IsNotFound(err) { require.NoError(t, err) } } - t.Logf("ensuring that Ingress %s is cleaned up", ingress2.Name) - if err := env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Delete(ctx, ingress2.Name, metav1.DeleteOptions{}); err != nil { + if err := clusters.DeleteIngress(ctx, env.Cluster(), corev1.NamespaceDefault, ingress2); err != nil { if !errors.IsNotFound(err) { require.NoError(t, err) } @@ -360,14 +372,13 @@ func TestHTTPSIngress(t *testing.T) { } }() - t.Logf("checking ingress %s status readiness.", ingress1.Name) + t.Log("checking first ingress status readiness") require.Eventually(t, func() bool { - curIng, err := env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Get(ctx, ingress1.Name, metav1.GetOptions{}) - if err != nil || curIng == nil { + lbstatus, err := clusters.GetIngressLoadbalancerStatus(ctx, env.Cluster(), corev1.NamespaceDefault, ingress1) + if err != nil { return false } - ingresses := curIng.Status.LoadBalancer.Ingress - for _, ingress := range ingresses { + for _, ingress := range lbstatus.Ingress { if len(ingress.Hostname) > 0 || len(ingress.IP) > 0 { t.Logf("networkingv1 ingress1 hostname %s or ip %s is ready to redirect traffic.", ingress.Hostname, ingress.IP) return true @@ -376,14 +387,13 @@ func TestHTTPSIngress(t *testing.T) { return false }, 120*time.Second, 1*time.Second, true) - t.Logf("checking ingress %s status readiness.", ingress2.Name) + t.Log("checking second ingress status readiness") assert.Eventually(t, func() bool { - curIng, err := env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Get(ctx, ingress2.Name, metav1.GetOptions{}) - if err != nil || curIng == nil { + lbstatus, err := clusters.GetIngressLoadbalancerStatus(ctx, env.Cluster(), corev1.NamespaceDefault, ingress2) + if err != nil { return false } - ingresses := curIng.Status.LoadBalancer.Ingress - for _, ingress := range ingresses { + for _, ingress := range lbstatus.Ingress { if len(ingress.Hostname) > 0 || len(ingress.IP) > 0 { t.Logf("networkingv1 ingress2 hostname %s or ip %s is ready to redirect traffic.", ingress.Hostname, ingress.IP) return true @@ -392,7 +402,7 @@ func TestHTTPSIngress(t *testing.T) { return false }, 120*time.Second, 1*time.Second, true) - t.Logf("waiting for routes from Ingress %s to be operational with expected certificate", ingress1.Name) + t.Log("waiting for routes from Ingress to be operational with expected certificate") assert.Eventually(t, func() bool { resp, err := httpcStatic.Get("https://foo.example:443/foo") if err != nil { @@ -410,7 +420,7 @@ func TestHTTPSIngress(t *testing.T) { return false }, ingressWait, waitTick, true) - t.Logf("waiting for routes from Ingress %s to be operational with expected certificate", ingress2.Name) + t.Log("waiting for routes from Ingress to be operational with expected certificate") assert.Eventually(t, func() bool { resp, err := httpcStatic.Get("https://bar.example:443/bar") if err != nil { @@ -431,7 +441,7 @@ func TestHTTPSIngress(t *testing.T) { // This should work currently. generators.NewIngressForService() only creates path rules by default, so while we don't // do anything for baz.example other than add fake DNS for it, the /bar still routes it through ingress2's route. // We're going to break it later, but need to confirm it does work first. - t.Logf("confirm Ingress %s path routes available on other hostnames", ingress2.Name) + t.Log("confirm Ingress path routes available on other hostnames") assert.Eventually(t, func() bool { resp, err := httpcStatic.Get("https://baz.example:443/bar") if err != nil { @@ -449,13 +459,22 @@ func TestHTTPSIngress(t *testing.T) { return false }, ingressWait, waitTick) - ingress2, err = env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Get(ctx, ingress2.Name, metav1.GetOptions{}) - assert.NoError(t, err) - ingress2.ObjectMeta.Annotations["konghq.com/snis"] = "bar.example" - ingress2, err = env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Update(ctx, ingress2, metav1.UpdateOptions{}) - assert.NoError(t, err) + switch obj := ingress2.(type) { + case *netv1.Ingress: + ingress2, err := env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Get(ctx, obj.Name, metav1.GetOptions{}) + assert.NoError(t, err) + ingress2.ObjectMeta.Annotations["konghq.com/snis"] = "bar.example" + _, err = env.Cluster().Client().NetworkingV1().Ingresses(corev1.NamespaceDefault).Update(ctx, ingress2, metav1.UpdateOptions{}) + assert.NoError(t, err) + case *netv1beta1.Ingress: + ingress2, err := env.Cluster().Client().NetworkingV1beta1().Ingresses(corev1.NamespaceDefault).Get(ctx, obj.Name, metav1.GetOptions{}) + assert.NoError(t, err) + ingress2.ObjectMeta.Annotations["konghq.com/snis"] = "bar.example" + _, err = env.Cluster().Client().NetworkingV1beta1().Ingresses(corev1.NamespaceDefault).Update(ctx, ingress2, metav1.UpdateOptions{}) + assert.NoError(t, err) + } - t.Logf("confirm Ingress %s no longer routes without matching SNI", ingress2.Name) + t.Log("confirm Ingress no longer routes without matching SNI") assert.Eventually(t, func() bool { resp, err := httpcStatic.Get("https://baz.example:443/bar") if err != nil { @@ -467,7 +486,7 @@ func TestHTTPSIngress(t *testing.T) { return resp.StatusCode == http.StatusNotFound }, ingressWait, waitTick) - t.Logf("confirm Ingress %s still routes with matching SNI", ingress2.Name) + t.Log("confirm Ingress still routes with matching SNI") assert.Eventually(t, func() bool { resp, err := httpcStatic.Get("https://bar.example:443/bar") if err != nil { diff --git a/test/integration/ingress_test.go b/test/integration/ingress_test.go index 430baeb0f4..0f7f8f1af5 100644 --- a/test/integration/ingress_test.go +++ b/test/integration/ingress_test.go @@ -11,10 +11,13 @@ import ( "testing" "github.com/kong/go-kong/kong" + "github.com/kong/kubernetes-testing-framework/pkg/clusters" "github.com/kong/kubernetes-testing-framework/pkg/utils/kubernetes/generators" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" + netv1 "k8s.io/api/networking/v1" + netv1beta1 "k8s.io/api/networking/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -68,32 +71,33 @@ func TestIngressEssentials(t *testing.T) { }() t.Logf("creating an ingress for service %s with ingress.class %s", service.Name, ingressClass) - ingress := generators.NewIngressForService("/httpbin", map[string]string{ + kubernetesVersion, err := env.Cluster().Version() + require.NoError(t, err) + ingress := generators.NewIngressForServiceWithClusterVersion(kubernetesVersion, "/httpbin", map[string]string{ annotations.IngressClassKey: ingressClass, "konghq.com/strip-path": "true", }, service) - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testIngressEssentialsNamespace).Create(ctx, ingress, metav1.CreateOptions{}) - require.NoError(t, err) + require.NoError(t, clusters.DeployIngress(ctx, env.Cluster(), testIngressEssentialsNamespace, ingress)) defer func() { - t.Logf("ensuring that Ingress %s is cleaned up", ingress.Name) - if err := env.Cluster().Client().NetworkingV1().Ingresses(testIngressEssentialsNamespace).Delete(ctx, ingress.Name, metav1.DeleteOptions{}); err != nil { + t.Log("cleaning up Ingress resource") + if err := clusters.DeleteIngress(ctx, env.Cluster(), testIngressEssentialsNamespace, ingress); err != nil { if !errors.IsNotFound(err) { require.NoError(t, err) } } }() - t.Logf("waiting for updated ingress status to include IP") + t.Log("waiting for updated ingress status to include IP") require.Eventually(t, func() bool { - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testIngressEssentialsNamespace).Get(ctx, ingress.Name, metav1.GetOptions{}) + lbstatus, err := clusters.GetIngressLoadbalancerStatus(ctx, env.Cluster(), testIngressEssentialsNamespace, ingress) if err != nil { return false } - return len(ingress.Status.LoadBalancer.Ingress) > 0 + return len(lbstatus.Ingress) > 0 }, ingressWait, waitTick) - t.Logf("waiting for routes from Ingress %s to be operational", ingress.Name) + t.Log("waiting for routes from Ingress to be operational") require.Eventually(t, func() bool { resp, err := httpc.Get(fmt.Sprintf("%s/httpbin", proxyURL)) if err != nil { @@ -113,14 +117,23 @@ func TestIngressEssentials(t *testing.T) { return false }, ingressWait, waitTick) - t.Logf("removing the ingress.class annotation %q from ingress %s", ingressClass, ingress.Name) - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testIngressEssentialsNamespace).Get(ctx, ingress.Name, metav1.GetOptions{}) - require.NoError(t, err) - delete(ingress.ObjectMeta.Annotations, annotations.IngressClassKey) - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testIngressEssentialsNamespace).Update(ctx, ingress, metav1.UpdateOptions{}) - require.NoError(t, err) - - t.Logf("verifying that removing the ingress.class annotation %q from ingress %s causes routes to disconnect", ingressClass, ingress.Name) + t.Logf("removing the ingress.class annotation %q from ingress", ingressClass) + switch obj := ingress.(type) { + case *netv1.Ingress: + ingress, err := env.Cluster().Client().NetworkingV1().Ingresses(testIngressEssentialsNamespace).Get(ctx, obj.Name, metav1.GetOptions{}) + require.NoError(t, err) + delete(ingress.ObjectMeta.Annotations, annotations.IngressClassKey) + _, err = env.Cluster().Client().NetworkingV1().Ingresses(testIngressEssentialsNamespace).Update(ctx, ingress, metav1.UpdateOptions{}) + require.NoError(t, err) + case *netv1beta1.Ingress: + ingress, err := env.Cluster().Client().NetworkingV1beta1().Ingresses(testIngressEssentialsNamespace).Get(ctx, obj.Name, metav1.GetOptions{}) + require.NoError(t, err) + delete(ingress.ObjectMeta.Annotations, annotations.IngressClassKey) + _, err = env.Cluster().Client().NetworkingV1beta1().Ingresses(testIngressEssentialsNamespace).Update(ctx, ingress, metav1.UpdateOptions{}) + require.NoError(t, err) + } + + t.Logf("verifying that removing the ingress.class annotation %q from ingress causes routes to disconnect", ingressClass) require.Eventually(t, func() bool { resp, err := httpc.Get(fmt.Sprintf("%s/httpbin", proxyURL)) if err != nil { @@ -131,14 +144,23 @@ func TestIngressEssentials(t *testing.T) { return expect404WithNoRoute(t, proxyURL.String(), resp) }, ingressWait, waitTick) - t.Logf("putting the ingress.class annotation %q back on ingress %s", ingressClass, ingress.Name) - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testIngressEssentialsNamespace).Get(ctx, ingress.Name, metav1.GetOptions{}) - require.NoError(t, err) - ingress.ObjectMeta.Annotations[annotations.IngressClassKey] = ingressClass - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testIngressEssentialsNamespace).Update(ctx, ingress, metav1.UpdateOptions{}) - require.NoError(t, err) - - t.Logf("waiting for routes from Ingress %s to be operational after reintroducing ingress class annotation", ingress.Name) + t.Logf("putting the ingress.class annotation %q back on ingress", ingressClass) + switch obj := ingress.(type) { + case *netv1.Ingress: + ingress, err := env.Cluster().Client().NetworkingV1().Ingresses(testIngressEssentialsNamespace).Get(ctx, obj.Name, metav1.GetOptions{}) + require.NoError(t, err) + ingress.ObjectMeta.Annotations[annotations.IngressClassKey] = ingressClass + _, err = env.Cluster().Client().NetworkingV1().Ingresses(testIngressEssentialsNamespace).Update(ctx, ingress, metav1.UpdateOptions{}) + require.NoError(t, err) + case *netv1beta1.Ingress: + ingress, err := env.Cluster().Client().NetworkingV1beta1().Ingresses(testIngressEssentialsNamespace).Get(ctx, obj.Name, metav1.GetOptions{}) + require.NoError(t, err) + ingress.ObjectMeta.Annotations[annotations.IngressClassKey] = ingressClass + _, err = env.Cluster().Client().NetworkingV1beta1().Ingresses(testIngressEssentialsNamespace).Update(ctx, ingress, metav1.UpdateOptions{}) + require.NoError(t, err) + } + + t.Log("waiting for routes from Ingress to be operational after reintroducing ingress class annotation") require.Eventually(t, func() bool { resp, err := httpc.Get(fmt.Sprintf("%s/httpbin", proxyURL)) if err != nil { @@ -158,8 +180,8 @@ func TestIngressEssentials(t *testing.T) { return false }, ingressWait, waitTick) - t.Logf("deleting Ingress %s and waiting for routes to be torn down", ingress.Name) - require.NoError(t, env.Cluster().Client().NetworkingV1().Ingresses(testIngressEssentialsNamespace).Delete(ctx, ingress.Name, metav1.DeleteOptions{})) + t.Log("deleting Ingress and waiting for routes to be torn down") + require.NoError(t, clusters.DeleteIngress(ctx, env.Cluster(), testIngressEssentialsNamespace, ingress)) require.Eventually(t, func() bool { resp, err := httpc.Get(fmt.Sprintf("%s/httpbin", proxyURL)) if err != nil { @@ -172,6 +194,10 @@ func TestIngressEssentials(t *testing.T) { } func TestIngressClassNameSpec(t *testing.T) { + if clusterVersion.Major < uint64(2) && clusterVersion.Minor < uint64(19) { + t.Skip("ingress spec tests can not be properly validated against old clusters") + } + ctx := context.Background() t.Logf("creating namespace %s for testing", testIngressClassNameSpecNamespace) @@ -215,21 +241,27 @@ func TestIngressClassNameSpec(t *testing.T) { }() t.Logf("creating an ingress for service %s with ingress.class %s", service.Name, ingressClass) - ingress := generators.NewIngressForService("/httpbin", map[string]string{"konghq.com/strip-path": "true"}, service) - ingress.Spec.IngressClassName = kong.String(ingressClass) - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testIngressClassNameSpecNamespace).Create(ctx, ingress, metav1.CreateOptions{}) + kubernetesVersion, err := env.Cluster().Version() require.NoError(t, err) + ingress := generators.NewIngressForServiceWithClusterVersion(kubernetesVersion, "/httpbin", map[string]string{"konghq.com/strip-path": "true"}, service) + switch obj := ingress.(type) { + case *netv1.Ingress: + obj.Spec.IngressClassName = kong.String(ingressClass) + case *netv1beta1.Ingress: + obj.Spec.IngressClassName = kong.String(ingressClass) + } + require.NoError(t, clusters.DeployIngress(ctx, env.Cluster(), testIngressClassNameSpecNamespace, ingress)) defer func() { - t.Logf("ensuring that Ingress %s is cleaned up", ingress.Name) - if err := env.Cluster().Client().NetworkingV1().Ingresses(testIngressClassNameSpecNamespace).Delete(ctx, ingress.Name, metav1.DeleteOptions{}); err != nil { + t.Log("ensuring that Ingress is cleaned up") + if err := clusters.DeleteIngress(ctx, env.Cluster(), testIngressClassNameSpecNamespace, ingress); err != nil { if !errors.IsNotFound(err) { require.NoError(t, err) } } }() - t.Logf("waiting for routes from Ingress %s to be operational", ingress.Name) + t.Log("waiting for routes from Ingress to be operational") require.Eventually(t, func() bool { resp, err := httpc.Get(fmt.Sprintf("%s/httpbin", proxyURL)) if err != nil { @@ -249,14 +281,23 @@ func TestIngressClassNameSpec(t *testing.T) { return false }, ingressWait, waitTick) - t.Logf("removing the IngressClassName %q from ingress %s", ingressClass, ingress.Name) - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testIngressClassNameSpecNamespace).Get(ctx, ingress.Name, metav1.GetOptions{}) - require.NoError(t, err) - ingress.Spec.IngressClassName = nil - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testIngressClassNameSpecNamespace).Update(ctx, ingress, metav1.UpdateOptions{}) - require.NoError(t, err) - - t.Logf("verifying that removing the IngressClassName %q from ingress %s causes routes to disconnect", ingressClass, ingress.Name) + t.Logf("removing the IngressClassName %q from ingress", ingressClass) + switch obj := ingress.(type) { + case *netv1.Ingress: + ingress, err := env.Cluster().Client().NetworkingV1().Ingresses(testIngressClassNameSpecNamespace).Get(ctx, obj.Name, metav1.GetOptions{}) + require.NoError(t, err) + ingress.Spec.IngressClassName = nil + _, err = env.Cluster().Client().NetworkingV1().Ingresses(testIngressClassNameSpecNamespace).Update(ctx, ingress, metav1.UpdateOptions{}) + require.NoError(t, err) + case *netv1beta1.Ingress: + ingress, err := env.Cluster().Client().NetworkingV1beta1().Ingresses(testIngressClassNameSpecNamespace).Get(ctx, obj.Name, metav1.GetOptions{}) + require.NoError(t, err) + ingress.Spec.IngressClassName = nil + _, err = env.Cluster().Client().NetworkingV1beta1().Ingresses(testIngressClassNameSpecNamespace).Update(ctx, ingress, metav1.UpdateOptions{}) + require.NoError(t, err) + } + + t.Logf("verifying that removing the IngressClassName %q from ingress causes routes to disconnect", ingressClass) require.Eventually(t, func() bool { resp, err := httpc.Get(fmt.Sprintf("%s/httpbin", proxyURL)) if err != nil { @@ -267,14 +308,23 @@ func TestIngressClassNameSpec(t *testing.T) { return expect404WithNoRoute(t, proxyURL.String(), resp) }, ingressWait, waitTick) - t.Logf("putting the IngressClassName %q back on ingress %s", ingressClass, ingress.Name) - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testIngressClassNameSpecNamespace).Get(ctx, ingress.Name, metav1.GetOptions{}) - require.NoError(t, err) - ingress.Spec.IngressClassName = kong.String(ingressClass) - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testIngressClassNameSpecNamespace).Update(ctx, ingress, metav1.UpdateOptions{}) - require.NoError(t, err) - - t.Logf("waiting for routes from Ingress %s to be operational after reintroducing ingress class annotation", ingress.Name) + t.Logf("putting the IngressClassName %q back on ingress", ingressClass) + switch obj := ingress.(type) { + case *netv1.Ingress: + ingress, err := env.Cluster().Client().NetworkingV1().Ingresses(testIngressClassNameSpecNamespace).Get(ctx, obj.Name, metav1.GetOptions{}) + require.NoError(t, err) + ingress.Spec.IngressClassName = kong.String(ingressClass) + _, err = env.Cluster().Client().NetworkingV1().Ingresses(testIngressClassNameSpecNamespace).Update(ctx, ingress, metav1.UpdateOptions{}) + require.NoError(t, err) + case *netv1beta1.Ingress: + ingress, err := env.Cluster().Client().NetworkingV1beta1().Ingresses(testIngressClassNameSpecNamespace).Get(ctx, obj.Name, metav1.GetOptions{}) + require.NoError(t, err) + ingress.Spec.IngressClassName = kong.String(ingressClass) + _, err = env.Cluster().Client().NetworkingV1beta1().Ingresses(testIngressClassNameSpecNamespace).Update(ctx, ingress, metav1.UpdateOptions{}) + require.NoError(t, err) + } + + t.Log("waiting for routes from Ingress to be operational after reintroducing ingress class annotation") require.Eventually(t, func() bool { resp, err := httpc.Get(fmt.Sprintf("%s/httpbin", proxyURL)) if err != nil { @@ -294,8 +344,8 @@ func TestIngressClassNameSpec(t *testing.T) { return false }, ingressWait, waitTick) - t.Logf("deleting Ingress %s and waiting for routes to be torn down", ingress.Name) - require.NoError(t, env.Cluster().Client().NetworkingV1().Ingresses(testIngressClassNameSpecNamespace).Delete(ctx, ingress.Name, metav1.DeleteOptions{})) + t.Log("deleting Ingress and waiting for routes to be torn down") + require.NoError(t, clusters.DeleteIngress(ctx, env.Cluster(), testIngressClassNameSpecNamespace, ingress)) require.Eventually(t, func() bool { resp, err := httpc.Get(fmt.Sprintf("%s/httpbin", proxyURL)) if err != nil { @@ -354,34 +404,34 @@ func TestIngressNamespaces(t *testing.T) { }() t.Logf("creating an ingress for service %s with ingress.class %s", service.Name, ingressClass) - elsewhereIngress := generators.NewIngressForService("/elsewhere", map[string]string{ + kubernetesVersion, err := env.Cluster().Version() + require.NoError(t, err) + elsewhereIngress := generators.NewIngressForServiceWithClusterVersion(kubernetesVersion, "/elsewhere", map[string]string{ annotations.IngressClassKey: ingressClass, "konghq.com/strip-path": "true", }, service) - nowhereIngress := generators.NewIngressForService("/nowhere", map[string]string{ + nowhereIngress := generators.NewIngressForServiceWithClusterVersion(kubernetesVersion, "/nowhere", map[string]string{ annotations.IngressClassKey: ingressClass, "konghq.com/strip-path": "true", }, service) - elsewhereIngress, err = env.Cluster().Client().NetworkingV1().Ingresses(elsewhere).Create(ctx, elsewhereIngress, metav1.CreateOptions{}) - require.NoError(t, err) - nowhereIngress, err = env.Cluster().Client().NetworkingV1().Ingresses(nowhere).Create(ctx, nowhereIngress, metav1.CreateOptions{}) - require.NoError(t, err) + require.NoError(t, clusters.DeployIngress(ctx, env.Cluster(), elsewhere, elsewhereIngress)) + require.NoError(t, clusters.DeployIngress(ctx, env.Cluster(), nowhere, nowhereIngress)) defer func() { - t.Logf("ensuring that Ingress %s is cleaned up", elsewhereIngress.Name) - if err := env.Cluster().Client().NetworkingV1().Ingresses(elsewhere).Delete(ctx, elsewhereIngress.Name, metav1.DeleteOptions{}); err != nil { + t.Log("ensuring that Ingress resources are cleaned up") + if err := clusters.DeleteIngress(ctx, env.Cluster(), elsewhere, elsewhereIngress); err != nil { if !errors.IsNotFound(err) { require.NoError(t, err) } } - if err := env.Cluster().Client().NetworkingV1().Ingresses(nowhere).Delete(ctx, nowhereIngress.Name, metav1.DeleteOptions{}); err != nil { + if err := clusters.DeleteIngress(ctx, env.Cluster(), nowhere, nowhereIngress); err != nil { if !errors.IsNotFound(err) { require.NoError(t, err) } } }() - t.Logf("waiting for routes from Ingress %s to be operational", elsewhereIngress.Name) + t.Log("waiting for routes from Ingress to be operational") require.Eventually(t, func() bool { resp, err := httpc.Get(fmt.Sprintf("%s/elsewhere", proxyURL)) if err != nil { diff --git a/test/integration/knative_test.go b/test/integration/knative_test.go index d26359ec03..a5bf43ded0 100644 --- a/test/integration/knative_test.go +++ b/test/integration/knative_test.go @@ -58,7 +58,7 @@ func TestKnativeIngress(t *testing.T) { require.Eventually(t, func() bool { err := installKnativeSrv(ctx, t) if err != nil { - t.Logf("checking knativing webhook readiness.") + t.Log("checking knativing webhook readiness.") return false } return true @@ -89,7 +89,7 @@ func configKnativeNetwork(ctx context.Context, cluster clusters.Cluster, t *test return err } - t.Logf("successfully configured knative network.") + t.Log("successfully configured knative network.") return nil } @@ -122,11 +122,16 @@ func installKnativeSrv(ctx context.Context, t *testing.T) error { }, } knativeCli, err := knativeversioned.NewForConfig(env.Cluster().Config()) + if err != nil { + return fmt.Errorf("failed to create knative service. %v", err) + } + _, err = knativeCli.ServingV1().Services("default").Create(ctx, tobeDeployedService, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("failed to create knative service. %v", err) } - t.Logf("successfully installed knative service.") + + t.Log("successfully installed knative service.") return nil } @@ -152,7 +157,7 @@ func configKnativeDomain(ctx context.Context, proxy string, cluster clusters.Clu t.Logf("failed updating config map %v", err) return err } - t.Logf("successfully update knative config domain.") + t.Log("successfully update knative config domain.") return nil } @@ -170,7 +175,7 @@ func accessKnativeSrv(ctx context.Context, proxy string, t *testing.T) bool { conds := curIng.Status.Status.GetConditions() for _, cond := range conds { if cond.Type == apis.ConditionReady && cond.Status == v1.ConditionTrue { - t.Logf("knative ingress status is ready.") + t.Log("knative ingress status is ready.") return true } } @@ -209,7 +214,7 @@ func accessKnativeSrv(ctx context.Context, proxy string, t *testing.T) bool { } bodyString := string(bodyBytes) t.Logf(bodyString) - t.Logf("service is successfully accessed through kong.") + t.Log("service is successfully accessed through kong.") return true } return false @@ -236,7 +241,7 @@ func isKnativeReady(ctx context.Context, cluster clusters.Cluster, t *testing.T) } } - t.Logf("All knative pods are up and ready.") + t.Log("All knative pods are up and ready.") return true }, 60*time.Second, 1*time.Second, true) diff --git a/test/integration/kongingress_test.go b/test/integration/kongingress_test.go index 547506bdc9..5955108ee3 100644 --- a/test/integration/kongingress_test.go +++ b/test/integration/kongingress_test.go @@ -10,6 +10,7 @@ import ( "time" "github.com/kong/go-kong/kong" + "github.com/kong/kubernetes-testing-framework/pkg/clusters" "github.com/kong/kubernetes-testing-framework/pkg/utils/kubernetes/generators" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -52,16 +53,18 @@ func TestKongIngressEssentials(t *testing.T) { }() t.Logf("routing to service %s via Ingress", service.Name) - ingress := generators.NewIngressForService("/httpbin", map[string]string{ + kubernetesVersion, err := env.Cluster().Version() + require.NoError(t, err) + ingress := generators.NewIngressForServiceWithClusterVersion(kubernetesVersion, "/httpbin", map[string]string{ annotations.IngressClassKey: ingressClass, "konghq.com/strip-path": "true", }, service) - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses("default").Create(ctx, ingress, metav1.CreateOptions{}) - assert.NoError(t, err) + require.NoError(t, clusters.DeployIngress(ctx, env.Cluster(), corev1.NamespaceDefault, ingress)) defer func() { - t.Logf("ensuring that Ingress %s is cleaned up", ingress.Name) - assert.NoError(t, env.Cluster().Client().NetworkingV1().Ingresses("default").Delete(ctx, ingress.Name, metav1.DeleteOptions{})) + t.Log("ensuring that Ingress resources are cleaned up") + assert.NoError(t, clusters.DeleteIngress(ctx, env.Cluster(), corev1.NamespaceDefault, ingress)) + }() t.Logf("applying service overrides to Service %s via KongIngress", service.Name) @@ -89,7 +92,7 @@ func TestKongIngressEssentials(t *testing.T) { } }() - t.Logf("waiting for routes from Ingress %s to be operational and that overrides are in place", ingress.Name) + t.Log("waiting for routes from Ingress to be operational and that overrides are in place") httpc := http.Client{Timeout: time.Second * 10} // this timeout should never be hit, we expect a 504 from the proxy within 1000ms assert.Eventually(t, func() bool { resp, err := httpc.Get(fmt.Sprintf("%s/httpbin/delay/5", proxyURL)) diff --git a/test/integration/plugin_test.go b/test/integration/plugin_test.go index 8f92e66ef9..d151fc8fbf 100644 --- a/test/integration/plugin_test.go +++ b/test/integration/plugin_test.go @@ -10,10 +10,13 @@ import ( "strings" "testing" + "github.com/kong/kubernetes-testing-framework/pkg/clusters" "github.com/kong/kubernetes-testing-framework/pkg/utils/kubernetes/generators" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" + netv1 "k8s.io/api/networking/v1" + netv1beta1 "k8s.io/api/networking/v1beta1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -51,7 +54,7 @@ func TestPluginEssentials(t *testing.T) { container := generators.NewContainer("httpbin", httpBinImage, 80) deployment := generators.NewDeploymentForContainer(container) deployment, err = env.Cluster().Client().AppsV1().Deployments(testPluginsNamespace).Create(ctx, deployment, metav1.CreateOptions{}) - assert.NoError(t, err) + require.NoError(t, err) defer func() { t.Logf("cleaning up the deployment %s", deployment.Name) @@ -61,7 +64,7 @@ func TestPluginEssentials(t *testing.T) { t.Logf("exposing deployment %s via service", deployment.Name) service := generators.NewServiceForDeployment(deployment, corev1.ServiceTypeLoadBalancer) _, err = env.Cluster().Client().CoreV1().Services(testPluginsNamespace).Create(ctx, service, metav1.CreateOptions{}) - assert.NoError(t, err) + require.NoError(t, err) defer func() { t.Logf("cleaning up the service %s", service.Name) @@ -69,23 +72,24 @@ func TestPluginEssentials(t *testing.T) { }() t.Logf("creating an ingress for service %s with ingress.class %s", service.Name, ingressClass) - ingress := generators.NewIngressForService("/httpbin", map[string]string{ + kubernetesVersion, err := env.Cluster().Version() + require.NoError(t, err) + ingress := generators.NewIngressForServiceWithClusterVersion(kubernetesVersion, "/httpbin", map[string]string{ annotations.IngressClassKey: ingressClass, "konghq.com/strip-path": "true", }, service) - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testPluginsNamespace).Create(ctx, ingress, metav1.CreateOptions{}) - assert.NoError(t, err) + require.NoError(t, clusters.DeployIngress(ctx, env.Cluster(), testPluginsNamespace, ingress)) defer func() { - t.Logf("ensuring that Ingress %s is cleaned up", ingress.Name) - if err := env.Cluster().Client().NetworkingV1().Ingresses(testPluginsNamespace).Delete(ctx, ingress.Name, metav1.DeleteOptions{}); err != nil { + t.Log("ensuring that Ingress is cleaned up") + if err := clusters.DeleteIngress(ctx, env.Cluster(), testPluginsNamespace, ingress); err != nil { if !errors.IsNotFound(err) { require.NoError(t, err) } } }() - t.Logf("waiting for routes from Ingress %s to be operational", ingress.Name) + t.Log("waiting for routes from Ingress to be operational") assert.Eventually(t, func() bool { resp, err := httpc.Get(fmt.Sprintf("%s/httpbin", proxyURL)) if err != nil { @@ -128,24 +132,47 @@ func TestPluginEssentials(t *testing.T) { }, } c, err := clientset.NewForConfig(env.Cluster().Config()) - assert.NoError(t, err) + require.NoError(t, err) kongplugin, err = c.ConfigurationV1().KongPlugins(testPluginsNamespace).Create(ctx, kongplugin, metav1.CreateOptions{}) - assert.NoError(t, err) + require.NoError(t, err) kongclusterplugin, err = c.ConfigurationV1().KongClusterPlugins().Create(ctx, kongclusterplugin, metav1.CreateOptions{}) - assert.NoError(t, err) + require.NoError(t, err) - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testPluginsNamespace).Get(ctx, ingress.Name, metav1.GetOptions{}) - assert.NoError(t, err) + defer func() { + t.Log("cleaning up plugins") + if err := c.ConfigurationV1().KongPlugins(testPluginsNamespace).Delete(ctx, kongplugin.Name, metav1.DeleteOptions{}); err != nil { + if !errors.IsNotFound(err) { + assert.NoError(t, err) + } + } + if err := c.ConfigurationV1().KongClusterPlugins().Delete(ctx, kongclusterplugin.Name, metav1.DeleteOptions{}); err != nil { + if !errors.IsNotFound(err) { + assert.NoError(t, err) + } + } + }() - t.Logf("updating Ingress %s to use plugin %s", ingress.Name, kongplugin.Name) + t.Logf("updating Ingress to use plugin %s", kongplugin.Name) require.Eventually(t, func() bool { - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testPluginsNamespace).Get(ctx, ingress.Name, metav1.GetOptions{}) - if err != nil { - return false + switch obj := ingress.(type) { + case *netv1.Ingress: + ingress, err := env.Cluster().Client().NetworkingV1().Ingresses(testPluginsNamespace).Get(ctx, obj.Name, metav1.GetOptions{}) + if err != nil { + return false + } + ingress.ObjectMeta.Annotations[annotations.AnnotationPrefix+annotations.PluginsKey] = kongplugin.Name + _, err = env.Cluster().Client().NetworkingV1().Ingresses(testPluginsNamespace).Update(ctx, ingress, metav1.UpdateOptions{}) + return err == nil + case *netv1beta1.Ingress: + ingress, err := env.Cluster().Client().NetworkingV1beta1().Ingresses(testPluginsNamespace).Get(ctx, obj.Name, metav1.GetOptions{}) + if err != nil { + return false + } + ingress.ObjectMeta.Annotations[annotations.AnnotationPrefix+annotations.PluginsKey] = kongplugin.Name + _, err = env.Cluster().Client().NetworkingV1beta1().Ingresses(testPluginsNamespace).Update(ctx, ingress, metav1.UpdateOptions{}) + return err == nil } - ingress.ObjectMeta.Annotations[annotations.AnnotationPrefix+annotations.PluginsKey] = kongplugin.Name - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testPluginsNamespace).Update(ctx, ingress, metav1.UpdateOptions{}) - return err == nil + return false }, ingressWait, waitTick) t.Logf("validating that plugin %s was successfully configured", kongplugin.Name) @@ -159,15 +186,27 @@ func TestPluginEssentials(t *testing.T) { return resp.StatusCode == http.StatusTeapot }, ingressWait, waitTick) - t.Logf("updating Ingress %s to use cluster plugin %s", ingress.Name, kongclusterplugin.Name) + t.Logf("updating Ingress to use cluster plugin %s", kongclusterplugin.Name) require.Eventually(t, func() bool { - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testPluginsNamespace).Get(ctx, ingress.Name, metav1.GetOptions{}) - if err != nil { - return false + switch obj := ingress.(type) { + case *netv1.Ingress: + ingress, err := env.Cluster().Client().NetworkingV1().Ingresses(testPluginsNamespace).Get(ctx, obj.Name, metav1.GetOptions{}) + if err != nil { + return false + } + ingress.ObjectMeta.Annotations[annotations.AnnotationPrefix+annotations.PluginsKey] = kongclusterplugin.Name + _, err = env.Cluster().Client().NetworkingV1().Ingresses(testPluginsNamespace).Update(ctx, ingress, metav1.UpdateOptions{}) + return err == nil + case *netv1beta1.Ingress: + ingress, err := env.Cluster().Client().NetworkingV1beta1().Ingresses(testPluginsNamespace).Get(ctx, obj.Name, metav1.GetOptions{}) + if err != nil { + return false + } + ingress.ObjectMeta.Annotations[annotations.AnnotationPrefix+annotations.PluginsKey] = kongclusterplugin.Name + _, err = env.Cluster().Client().NetworkingV1beta1().Ingresses(testPluginsNamespace).Update(ctx, ingress, metav1.UpdateOptions{}) + return err == nil } - ingress.ObjectMeta.Annotations[annotations.AnnotationPrefix+annotations.PluginsKey] = kongclusterplugin.Name - ingress, err = env.Cluster().Client().NetworkingV1().Ingresses(testPluginsNamespace).Update(ctx, ingress, metav1.UpdateOptions{}) - return err == nil + return false }, ingressWait, waitTick) t.Logf("validating that clusterplugin %s was successfully configured", kongclusterplugin.Name) @@ -181,8 +220,8 @@ func TestPluginEssentials(t *testing.T) { return resp.StatusCode == http.StatusUnavailableForLegalReasons }, ingressWait, waitTick) - t.Logf("deleting Ingress %s and waiting for routes to be torn down", ingress.Name) - assert.NoError(t, env.Cluster().Client().NetworkingV1().Ingresses(testPluginsNamespace).Delete(ctx, ingress.Name, metav1.DeleteOptions{})) + t.Log("deleting Ingress and waiting for routes to be torn down") + require.NoError(t, clusters.DeleteIngress(ctx, env.Cluster(), testPluginsNamespace, ingress)) assert.Eventually(t, func() bool { resp, err := httpc.Get(fmt.Sprintf("%s/httpbin", proxyURL)) if err != nil { @@ -190,7 +229,6 @@ func TestPluginEssentials(t *testing.T) { return false } defer resp.Body.Close() - t.Logf("WARNING: endpoint %s returned response STATUS=(%d)", fmt.Sprintf("%s/httpbin", proxyURL), resp.StatusCode) return expect404WithNoRoute(t, proxyURL.String(), resp) }, ingressWait, waitTick) } diff --git a/test/integration/suite_test.go b/test/integration/suite_test.go index 2d3b462954..314175328c 100644 --- a/test/integration/suite_test.go +++ b/test/integration/suite_test.go @@ -43,7 +43,13 @@ const ( // httpcTimeout is the default client timeout for HTTP clients used in tests. httpcTimeout = time.Second * 3 +) +// ----------------------------------------------------------------------------- +// Testing Variables +// ----------------------------------------------------------------------------- + +var ( // httpBinImage is the container image name we use for deploying the "httpbin" HTTP testing tool. // if you need a simple HTTP server for tests you're writing, use this and check the documentation. // See: https://github.com/postmanlabs/httpbin @@ -57,13 +63,7 @@ const ( // controllerNamespace is the Kubernetes namespace where the controller is deployed controllerNamespace = "kong-system" -) -// ----------------------------------------------------------------------------- -// Testing Variables -// ----------------------------------------------------------------------------- - -var ( // httpc is the default HTTP client to use for tests httpc = http.Client{Timeout: httpcTimeout} @@ -93,6 +93,9 @@ var ( // proxyUDPURL provides access to the UDP API endpoint for the Kong Addon which is deployed to the test environment's cluster. proxyUDPURL *url.URL + + // clusterVersion is a convenience var where the found version of the env.Cluster is stored. + clusterVersion semver.Version ) // ----------------------------------------------------------------------------- @@ -248,13 +251,13 @@ func TestMain(m *testing.M) { } fmt.Printf("INFO: running final testing environment checks") - serverVersion, err := env.Cluster().Client().ServerVersion() + clusterVersion, err = env.Cluster().Version() if err != nil { fmt.Fprintf(os.Stderr, "Error: could not retrieve server version for cluster: %s", err) os.Exit(ExitCodeCantCreateCluster) } - fmt.Printf("INFO: testing environment is ready KUBERNETES_VERSION=(%v): running tests\n", serverVersion) + fmt.Printf("INFO: testing environment is ready KUBERNETES_VERSION=(%v): running tests\n", clusterVersion) code := m.Run() os.Exit(code) } From d217fcb8d5fd50db8d024c09c8cd95c81aa3f708 Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Mon, 2 Aug 2021 12:45:11 -0400 Subject: [PATCH 3/4] chore: update ktf to v0.4.0 --- go.mod | 2 +- go.sum | 17 ++++++++++++----- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index ad877eb59b..87c119a27e 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/huandu/xstrings v1.3.2 // indirect github.com/kong/deck v1.7.0 github.com/kong/go-kong v0.20.0 - github.com/kong/kubernetes-testing-framework v0.3.3 + github.com/kong/kubernetes-testing-framework v0.4.0 github.com/lithammer/dedent v1.1.0 github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.1 diff --git a/go.sum b/go.sum index ba89a7e7aa..edabc77205 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,10 @@ cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.88.0 h1:MZ2cf9Elnv1wqccq8ooKO2MqHQLc+ChCp/+QWObCpxg= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= cloud.google.com/go v0.88.0/go.mod h1:dnKwfYbP9hQhefiUvpbcAyoGSHUrOxR20JVElLiUvEY= +cloud.google.com/go v0.89.0 h1:ZT4GU+y59fC95Mfdn2RtxuzN2gc69dzlVevQK8Ykyqs= +cloud.google.com/go v0.89.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -542,7 +544,9 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210715191844-86eeefc3e471/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -673,8 +677,8 @@ github.com/kong/deck v1.7.0/go.mod h1:o2letQaSpXVnDNoXehEibOF6q7v46qtbsKOCC+1owA github.com/kong/go-kong v0.19.0/go.mod h1:HyNtOxzh/tzmOV//ccO5NAdmrCnq8b86YUPjmdy5aog= github.com/kong/go-kong v0.20.0 h1:KiPsJORNs9UbjU8m1Tr3MZkIiKkzcVHIz0EYeT7SD3c= github.com/kong/go-kong v0.20.0/go.mod h1:eQP22bzJVeiEH77hYdWD019WjecJFVFHm77kPXquC28= -github.com/kong/kubernetes-testing-framework v0.3.3 h1:UgWjsApvxD/86DnwLQnr6OIk/sgDi+C2lcgFEFw7Buk= -github.com/kong/kubernetes-testing-framework v0.3.3/go.mod h1:O9ARzRPAnEBURREH4fiGmCBhEUxj3HRIyUEjR9eYIVU= +github.com/kong/kubernetes-testing-framework v0.4.0 h1:QFUUiNSxnKfOKGbgmooXMhFnL+CQRxQcLFVxKCKB8o0= +github.com/kong/kubernetes-testing-framework v0.4.0/go.mod h1:Rh4H0hY5t7hSkaaIqO7lwx/jRykOGaQeI1JgTg5p9UU= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1460,6 +1464,7 @@ google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1Avk google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= google.golang.org/api v0.52.0 h1:m5FLEd6dp5CU1F0tMWyqDi2XjchviIz8ntzOSz7w8As= google.golang.org/api v0.52.0/go.mod h1:Him/adpjt0sxtkWViy0b6xyKW/SD71CwdJ7HqJo7SrU= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -1525,10 +1530,12 @@ google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxH google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210721163202-f1cecdd8b78a/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= google.golang.org/genproto v0.0.0-20210722135532-667f2b7c528f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210726200206-e7812ac95cc0 h1:VpRFBmFg/ol+rqJnkKLPjVebPNFbSxuj17B7bH1xMc8= -google.golang.org/genproto v0.0.0-20210726200206-e7812ac95cc0/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f h1:4m1jFN3fHeKo0UvpraW2ipO2O0rgp5w2ugXeggtecAk= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= From b057d745178d4f23f9f5e3c0bae83e7987a547d9 Mon Sep 17 00:00:00 2001 From: Shane Utt Date: Mon, 2 Aug 2021 12:45:24 -0400 Subject: [PATCH 4/4] feat: add 1.17.x and 1.18.x release testing --- .github/workflows/release.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 5fa7ae8908..448b111f23 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -47,6 +47,8 @@ jobs: strategy: matrix: minor: + - '17' + - '18' - '19' - '20' dbmode: