diff --git a/.github/workflows/kind-e2e.yaml b/.github/workflows/kind-e2e.yaml index 6f18ae1d8acb..3154421b6afc 100644 --- a/.github/workflows/kind-e2e.yaml +++ b/.github/workflows/kind-e2e.yaml @@ -97,6 +97,7 @@ jobs: ingress: - kourier + - kourier-tls - istio - contour # Disabled due to consistent failures @@ -132,6 +133,10 @@ jobs: - ingress: istio namespace-resources: virtualservices + - ingress: kourier-tls + ingress-class: kourier + enable-tls: 1 + - test-suite: runtime test-path: ./test/conformance/runtime/... @@ -144,6 +149,7 @@ jobs: env: KIND: 1 INGRESS_CLASS: ${{ matrix.ingress-class || matrix.ingress }}.ingress.networking.knative.dev + ENABLE_TLS: ${{ matrix.enable-tls || 0 }} steps: - name: Set up Go 1.17.x diff --git a/cmd/activator/main.go b/cmd/activator/main.go index 1ae3c9c640d2..3d2633023d38 100644 --- a/cmd/activator/main.go +++ b/cmd/activator/main.go @@ -18,6 +18,7 @@ package main import ( "context" + "crypto/tls" "errors" "fmt" "log" @@ -241,6 +242,31 @@ func main() { }(name, server) } + // Enable TLS server when activator-server-cert is specified. + // At this moment activator with TLS does not disable HTTP. + // See also https://github.com/knative/serving/issues/12808. + if networkConfig.ActivatorCertSecret != "" { + secret, err := kubeClient.CoreV1().Secrets(system.Namespace()).Get(ctx, networkConfig.ActivatorCertSecret, metav1.GetOptions{}) + if err != nil { + logger.Fatalw("failed to get secret", zap.Error(err)) + } + cert, err := tls.X509KeyPair(secret.Data["tls.crt"], secret.Data["tls.key"]) + if err != nil { + logger.Fatalw("failed to load certs", zap.Error(err)) + } + + // TODO: Implement the secret (certificate) rotation like knative.dev/pkg/webhook/certificates/. + // Also, the current activator must be restarted when updating the secret. + name, server := "https", pkgnet.NewServer(":"+strconv.Itoa(networking.BackendHTTPSPort), ah) + go func(name string, s *http.Server) { + s.TLSConfig = &tls.Config{Certificates: []tls.Certificate{cert}, MinVersion: tls.VersionTLS12} + // Don't forward ErrServerClosed as that indicates we're already shutting down. + if err := s.ListenAndServeTLS("", ""); err != nil && !errors.Is(err, http.ErrServerClosed) { + errCh <- fmt.Errorf("%s server failed: %w", name, err) + } + }(name, server) + } + // Wait for the signal to drain. select { case <-sigCtx.Done(): diff --git a/config/core/deployments/activator.yaml b/config/core/deployments/activator.yaml index 0a5a5b0dbcd2..0f69a980848a 100644 --- a/config/core/deployments/activator.yaml +++ b/config/core/deployments/activator.yaml @@ -151,4 +151,7 @@ spec: - name: http2 port: 81 targetPort: 8013 + - name: https + port: 443 + targetPort: 8112 type: ClusterIP diff --git a/pkg/networking/constants.go b/pkg/networking/constants.go index e85118bf939b..0fe1e5ea1048 100644 --- a/pkg/networking/constants.go +++ b/pkg/networking/constants.go @@ -26,6 +26,9 @@ const ( // BackendHTTP2Port is the backend, i.e. `targetPort` that we setup for HTTP/2 services. BackendHTTP2Port = 8013 + // BackendHTTPSPort is the backend. i.e. `targetPort` that we setup for HTTPS services. + BackendHTTPSPort = 8112 + // QueueAdminPort specifies the port number for // health check and lifecycle hooks for queue-proxy. QueueAdminPort = 8022 diff --git a/pkg/reconciler/route/resources/ingress.go b/pkg/reconciler/route/resources/ingress.go index 13c272654d56..edf9a817a204 100644 --- a/pkg/reconciler/route/resources/ingress.go +++ b/pkg/reconciler/route/resources/ingress.go @@ -135,6 +135,7 @@ func makeIngressSpec( rules := make([]netv1alpha1.IngressRule, 0, len(names)) featuresConfig := config.FromContextOrDefaults(ctx).Features + networkConfig := config.FromContextOrDefaults(ctx).Network for _, name := range names { visibilities := []netv1alpha1.IngressVisibility{netv1alpha1.IngressVisibilityClusterLocal} @@ -148,7 +149,7 @@ func makeIngressSpec( return netv1alpha1.IngressSpec{}, err } rule := makeIngressRule(domains, r.Namespace, - visibility, tc.Targets[name], ro.RolloutsByTag(name)) + visibility, tc.Targets[name], ro.RolloutsByTag(name), networkConfig.ActivatorCA) if featuresConfig.TagHeaderBasedRouting == apicfg.Enabled { if rule.HTTP.Paths[0].AppendHeaders == nil { rule.HTTP.Paths[0].AppendHeaders = make(map[string]string, 1) @@ -170,7 +171,7 @@ func makeIngressSpec( // Since names are sorted `DefaultTarget == ""` is the first one, // so just pass the subslice. rule.HTTP.Paths = append( - makeTagBasedRoutingIngressPaths(r.Namespace, tc, ro, names[1:]), rule.HTTP.Paths...) + makeTagBasedRoutingIngressPaths(r.Namespace, tc, ro, networkConfig.ActivatorCA, names[1:]), rule.HTTP.Paths...) } else { // If a request is routed by a tag-attached hostname instead of the tag header, // the request may not have the tag header "Knative-Serving-Tag", @@ -263,24 +264,25 @@ func MakeACMEIngressPaths(acmeChallenges []netv1alpha1.HTTP01Challenge, domains func makeIngressRule(domains []string, ns string, visibility netv1alpha1.IngressVisibility, targets traffic.RevisionTargets, - roCfgs []*traffic.ConfigurationRollout) netv1alpha1.IngressRule { + roCfgs []*traffic.ConfigurationRollout, + activatorCA string) netv1alpha1.IngressRule { return netv1alpha1.IngressRule{ Hosts: domains, Visibility: visibility, HTTP: &netv1alpha1.HTTPIngressRuleValue{ Paths: []netv1alpha1.HTTPIngressPath{ - *makeBaseIngressPath(ns, targets, roCfgs), + *makeBaseIngressPath(ns, targets, roCfgs, activatorCA), }, }, } } // `names` must not include `""` — the DefaultTarget. -func makeTagBasedRoutingIngressPaths(ns string, tc *traffic.Config, ro *traffic.Rollout, names []string) []netv1alpha1.HTTPIngressPath { +func makeTagBasedRoutingIngressPaths(ns string, tc *traffic.Config, ro *traffic.Rollout, activatorCA string, names []string) []netv1alpha1.HTTPIngressPath { paths := make([]netv1alpha1.HTTPIngressPath, 0, len(names)) for _, name := range names { - path := makeBaseIngressPath(ns, tc.Targets[name], ro.RolloutsByTag(name)) + path := makeBaseIngressPath(ns, tc.Targets[name], ro.RolloutsByTag(name), activatorCA) path.Headers = map[string]netv1alpha1.HeaderMatch{network.TagHeaderName: {Exact: name}} paths = append(paths, *path) } @@ -300,7 +302,7 @@ func rolloutConfig(cfgName string, ros []*traffic.ConfigurationRollout) *traffic } func makeBaseIngressPath(ns string, targets traffic.RevisionTargets, - roCfgs []*traffic.ConfigurationRollout) *netv1alpha1.HTTPIngressPath { + roCfgs []*traffic.ConfigurationRollout, activatorCA string) *netv1alpha1.HTTPIngressPath { // Optimistically allocate |targets| elements. splits := make([]netv1alpha1.IngressBackendSplit, 0, len(targets)) for _, t := range targets { @@ -312,6 +314,12 @@ func makeBaseIngressPath(ns string, targets traffic.RevisionTargets, if t.LatestRevision != nil && *t.LatestRevision { cfg = rolloutConfig(t.ConfigurationName, roCfgs) } + var servicePort intstr.IntOrString + if len(activatorCA) != 0 { + servicePort = intstr.FromInt(networking.ServiceHTTPSPort) + } else { + servicePort = intstr.FromInt(networking.ServicePort(t.Protocol)) + } if cfg == nil || len(cfg.Revisions) < 2 { // No rollout in progress. splits = append(splits, netv1alpha1.IngressBackendSplit{ @@ -320,7 +328,7 @@ func makeBaseIngressPath(ns string, targets traffic.RevisionTargets, ServiceName: t.RevisionName, // Port on the public service must match port on the activator. // Otherwise, the serverless services can't guarantee seamless positive handoff. - ServicePort: intstr.FromInt(networking.ServicePort(t.Protocol)), + ServicePort: servicePort, }, Percent: int(*t.Percent), AppendHeaders: map[string]string{ @@ -337,7 +345,7 @@ func makeBaseIngressPath(ns string, targets traffic.RevisionTargets, ServiceName: rev.RevisionName, // Port on the public service must match port on the activator. // Otherwise, the serverless services can't guarantee seamless positive handoff. - ServicePort: intstr.FromInt(networking.ServicePort(t.Protocol)), + ServicePort: servicePort, }, Percent: rev.Percent, AppendHeaders: map[string]string{ diff --git a/pkg/reconciler/route/resources/ingress_test.go b/pkg/reconciler/route/resources/ingress_test.go index f955e0b08f56..c87816c3a92d 100644 --- a/pkg/reconciler/route/resources/ingress_test.go +++ b/pkg/reconciler/route/resources/ingress_test.go @@ -867,7 +867,7 @@ func TestMakeIngressRuleVanilla(t *testing.T) { } ro := tc.BuildRollout() rule := makeIngressRule(domains, ns, - netv1alpha1.IngressVisibilityExternalIP, targets, ro.RolloutsByTag(traffic.DefaultTarget)) + netv1alpha1.IngressVisibilityExternalIP, targets, ro.RolloutsByTag(traffic.DefaultTarget), "" /* activatorCA */) expected := netv1alpha1.IngressRule{ Hosts: []string{ "a.com", @@ -920,7 +920,7 @@ func TestMakeIngressRuleZeroPercentTarget(t *testing.T) { } ro := tc.BuildRollout() rule := makeIngressRule(domains, ns, - netv1alpha1.IngressVisibilityExternalIP, targets, ro.RolloutsByTag(traffic.DefaultTarget)) + netv1alpha1.IngressVisibilityExternalIP, targets, ro.RolloutsByTag(traffic.DefaultTarget), "" /* activatorCA */) expected := netv1alpha1.IngressRule{ Hosts: []string{"test.org"}, HTTP: &netv1alpha1.HTTPIngressRuleValue{ @@ -970,7 +970,7 @@ func TestMakeIngressRuleTwoTargets(t *testing.T) { ro := tc.BuildRollout() domains := []string{"test.org"} rule := makeIngressRule(domains, ns, netv1alpha1.IngressVisibilityExternalIP, - targets, ro.RolloutsByTag("a-tag")) + targets, ro.RolloutsByTag("a-tag"), "" /* activatorCA */) expected := netv1alpha1.IngressRule{ Hosts: []string{"test.org"}, HTTP: &netv1alpha1.HTTPIngressRuleValue{ @@ -1089,6 +1089,128 @@ func TestMakeIngressWithHTTPOption(t *testing.T) { } } +func TestMakeIngressWithActivatorCA(t *testing.T) { + targets := map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "config", + RevisionName: "v2", + Percent: ptr.Int64(100), + }, + }}, + "v1": {{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "config", + RevisionName: "v1", + Percent: ptr.Int64(100), + }, + }}, + } + + r := Route(ns, "test-route", WithURL) + + expected := []netv1alpha1.IngressRule{{ + Hosts: []string{ + "test-route." + ns, + "test-route." + ns + ".svc", + pkgnet.GetServiceHostname("test-route", ns), + }, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: ns, + ServiceName: "v2", + ServicePort: intstr.FromInt(networking.ServiceHTTPSPort), + }, + Percent: 100, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": "v2", + "Knative-Serving-Namespace": ns, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityClusterLocal, + }, { + Hosts: []string{ + "test-route." + ns + ".example.com", + }, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: ns, + ServiceName: "v2", + ServicePort: intstr.FromInt(networking.ServiceHTTPSPort), + }, + Percent: 100, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": "v2", + "Knative-Serving-Namespace": ns, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + }, { + Hosts: []string{ + "v1-test-route." + ns, + "v1-test-route." + ns + ".svc", + pkgnet.GetServiceHostname("v1-test-route", ns), + }, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: ns, + ServiceName: "v1", + ServicePort: intstr.FromInt(networking.ServiceHTTPSPort), + }, + Percent: 100, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": "v1", + "Knative-Serving-Namespace": ns, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityClusterLocal, + }, { + Hosts: []string{ + "v1-test-route." + ns + ".example.com", + }, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: ns, + ServiceName: "v1", + ServicePort: intstr.FromInt(networking.ServiceHTTPSPort), + }, + Percent: 100, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": "v1", + "Knative-Serving-Namespace": ns, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + }} + + tc := &traffic.Config{Targets: targets} + ro := tc.BuildRollout() + ci, err := makeIngressSpec(testContextWithActivatorCA(), r, nil /*tls*/, tc, ro) + if err != nil { + t.Error("Unexpected error", err) + } + + if !cmp.Equal(expected, ci.Rules) { + t.Error("Unexpected rules (-want, +got):", cmp.Diff(expected, ci.Rules)) + } +} + func TestMakeIngressTLS(t *testing.T) { cert := &netv1alpha1.Certificate{ ObjectMeta: metav1.ObjectMeta{ @@ -1300,3 +1422,10 @@ func testContextWithHTTPOption() context.Context { cfg.Network.HTTPProtocol = network.HTTPRedirected return config.ToContext(context.Background(), cfg) } + +func testContextWithActivatorCA() context.Context { + cfg := testConfig() + cfg.Network.ActivatorCA = "ca-ame" + cfg.Network.ActivatorSAN = "san-name" + return config.ToContext(context.Background(), cfg) +} diff --git a/pkg/reconciler/serverlessservice/resources/services.go b/pkg/reconciler/serverlessservice/resources/services.go index a6f51f032986..7ad61ad674ed 100644 --- a/pkg/reconciler/serverlessservice/resources/services.go +++ b/pkg/reconciler/serverlessservice/resources/services.go @@ -53,16 +53,29 @@ func MakePublicService(sks *v1alpha1.ServerlessService) *corev1.Service { OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(sks)}, }, Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{{ - Name: pkgnet.ServicePortName(sks.Spec.ProtocolType), - Protocol: corev1.ProtocolTCP, - Port: int32(pkgnet.ServicePort(sks.Spec.ProtocolType)), - TargetPort: targetPort(sks), - }}, + Ports: makePublicServicePorts(sks), }, } } +func makePublicServicePorts(sks *v1alpha1.ServerlessService) []corev1.ServicePort { + ports := []corev1.ServicePort{{ + Name: pkgnet.ServicePortName(sks.Spec.ProtocolType), + Protocol: corev1.ProtocolTCP, + Port: int32(pkgnet.ServicePort(sks.Spec.ProtocolType)), + TargetPort: targetPort(sks), + }, { + // The HTTPS port is used when activator-ca is enabled. + // Although it is not used by default, we put it here as it should be harmless + // and makes the code simple. + Name: pkgnet.ServicePortNameHTTPS, + Protocol: corev1.ProtocolTCP, + Port: pkgnet.ServiceHTTPSPort, + TargetPort: intstr.FromInt(networking.BackendHTTPSPort), + }} + return ports +} + // MakePublicEndpoints constructs a K8s Endpoints that is not backed a selector // and will be manually reconciled by the SKS controller. func MakePublicEndpoints(sks *v1alpha1.ServerlessService, src *corev1.Endpoints) *corev1.Endpoints { @@ -98,15 +111,18 @@ func filterSubsetPorts(targetPort int32, subsets []corev1.EndpointSubset) []core } ret := make([]corev1.EndpointSubset, len(subsets)) for i, sss := range subsets { - sst := sss.DeepCopy() + sst := sss + sst.Ports = nil // Find the port we care about and remove all others. - for j, p := range sst.Ports { - if p.Port == targetPort { - sst.Ports = sst.Ports[j : j+1] - break + for j, p := range sss.Ports { + switch p.Port { + case networking.BackendHTTPSPort: + fallthrough + case targetPort: + sst.Ports = append(sst.Ports, sss.Ports[j]) } } - ret[i] = *sst + ret[i] = sst } return ret } @@ -134,6 +150,11 @@ func MakePrivateService(sks *v1alpha1.ServerlessService, selector map[string]str // This one is matching the public one, since this is the // port queue-proxy listens on. TargetPort: targetPort(sks), + }, { + Name: pkgnet.ServicePortNameHTTPS, + Protocol: corev1.ProtocolTCP, + Port: pkgnet.ServiceHTTPSPort, + TargetPort: intstr.FromInt(networking.BackendHTTPSPort), }, { Name: servingv1.AutoscalingQueueMetricsPortName, Protocol: corev1.ProtocolTCP, diff --git a/pkg/reconciler/serverlessservice/resources/services_test.go b/pkg/reconciler/serverlessservice/resources/services_test.go index d24dabb6c5af..61f7bad027ad 100644 --- a/pkg/reconciler/serverlessservice/resources/services_test.go +++ b/pkg/reconciler/serverlessservice/resources/services_test.go @@ -112,6 +112,11 @@ func svc(t networking.ServiceType, mods ...func(*corev1.Service)) *corev1.Servic Protocol: corev1.ProtocolTCP, Port: pkgnet.ServiceHTTPPort, TargetPort: intstr.FromInt(networking.BackendHTTPPort), + }, { + Name: pkgnet.ServicePortNameHTTPS, + Protocol: corev1.ProtocolTCP, + Port: pkgnet.ServiceHTTPSPort, + TargetPort: intstr.FromInt(networking.BackendHTTPSPort), }}, }, } @@ -181,6 +186,11 @@ func TestMakePublicService(t *testing.T) { Protocol: corev1.ProtocolTCP, Port: pkgnet.ServiceHTTP2Port, TargetPort: intstr.FromInt(networking.BackendHTTP2Port), + }, { + Name: pkgnet.ServicePortNameHTTPS, + Protocol: corev1.ProtocolTCP, + Port: pkgnet.ServiceHTTPSPort, + TargetPort: intstr.FromInt(networking.BackendHTTPSPort), }} s.Annotations = map[string]string{"cherub": "rock"} s.OwnerReferences[0].UID = "1988" @@ -196,6 +206,11 @@ func TestMakePublicService(t *testing.T) { Protocol: corev1.ProtocolTCP, Port: pkgnet.ServiceHTTP2Port, TargetPort: intstr.FromInt(networking.BackendHTTP2Port), + }, { + Name: pkgnet.ServicePortNameHTTPS, + Protocol: corev1.ProtocolTCP, + Port: pkgnet.ServiceHTTPSPort, + TargetPort: intstr.FromInt(networking.BackendHTTPSPort), }} }), }, { @@ -211,6 +226,11 @@ func TestMakePublicService(t *testing.T) { Protocol: corev1.ProtocolTCP, Port: pkgnet.ServiceHTTP2Port, TargetPort: intstr.FromInt(networking.BackendHTTP2Port), + }, { + Name: pkgnet.ServicePortNameHTTPS, + Protocol: corev1.ProtocolTCP, + Port: pkgnet.ServiceHTTPSPort, + TargetPort: intstr.FromInt(networking.BackendHTTPSPort), }} s.Labels["infinite"] = "sadness" }), @@ -367,6 +387,42 @@ func TestFilterSubsetPorts(t *testing.T) { Protocol: "TCP", }}, }}, + }, { + name: "four ports including https ports, keep target and https port", + port: 2006, + subsets: []corev1.EndpointSubset{{ + Ports: []corev1.EndpointPort{{ + Name: "http", + Port: 2009, + Protocol: "TCP", + }, { + Name: "http", + Port: 2006, + Protocol: "TCP", + }, { + Name: "http", + Port: 2019, + Protocol: "TCP", + }, { + Name: "https", + Port: networking.BackendHTTPSPort, + Protocol: "TCP", + }}, + }}, + want: []corev1.EndpointSubset{{ + Ports: []corev1.EndpointPort{ + { + Name: "http", + Port: 2006, + Protocol: "TCP", + }, + { + Name: "https", + Port: networking.BackendHTTPSPort, + Protocol: "TCP", + }, + }, + }}, }} for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -419,7 +475,7 @@ func TestMakePrivateService(t *testing.T) { Port: pkgnet.ServiceHTTPPort, TargetPort: intstr.FromInt(networking.BackendHTTP2Port), } - s.Spec.Ports[4] = corev1.ServicePort{ + s.Spec.Ports[5] = corev1.ServicePort{ Name: pkgnet.ServicePortNameH2C + "-istio", Protocol: corev1.ProtocolTCP, Port: networking.BackendHTTP2Port, diff --git a/pkg/reconciler/serverlessservice/serverlessservice_test.go b/pkg/reconciler/serverlessservice/serverlessservice_test.go index a20317c74b24..90e32b9bd1cb 100644 --- a/pkg/reconciler/serverlessservice/serverlessservice_test.go +++ b/pkg/reconciler/serverlessservice/serverlessservice_test.go @@ -769,9 +769,9 @@ func withHTTP2Priv(svc *corev1.Service) { svc.Spec.Ports[0].Name = "http2" svc.Spec.Ports[0].TargetPort = intstr.FromInt(networking.BackendHTTP2Port) - svc.Spec.Ports[4].Name = "http2-istio" - svc.Spec.Ports[4].Port = networking.BackendHTTP2Port - svc.Spec.Ports[4].TargetPort = intstr.FromInt(networking.BackendHTTP2Port) + svc.Spec.Ports[5].Name = "http2-istio" + svc.Spec.Ports[5].Port = networking.BackendHTTP2Port + svc.Spec.Ports[5].TargetPort = intstr.FromInt(networking.BackendHTTP2Port) } func withHTTP2(svc *corev1.Service) { diff --git a/test/config/tls/config-network.yaml b/test/config/tls/config-network.yaml new file mode 100644 index 000000000000..6a3b909b8a48 --- /dev/null +++ b/test/config/tls/config-network.yaml @@ -0,0 +1,26 @@ +# Copyright 2022 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-network + labels: + app.kubernetes.io/name: knative-serving + app.kubernetes.io/version: devel + serving.knative.dev/release: devel +data: + activator-ca: "serving-ca" + activator-san: "knative" + activator-cert-secret: "server-certs" diff --git a/test/e2e-common.sh b/test/e2e-common.sh index 9aafaa795088..aabb0adff055 100644 --- a/test/e2e-common.sh +++ b/test/e2e-common.sh @@ -34,6 +34,7 @@ export RUN_HTTP01_AUTO_TLS_TESTS=0 export HTTPS=0 export SHORT=0 export ENABLE_HA=0 +export ENABLE_TLS=${ENABLE_TLS:-0} export MESH=0 export PERF=0 export KIND=${KIND:-0} @@ -356,6 +357,16 @@ function install() { # kubectl -n ${SYSTEM_NAMESPACE} delete leases --all wait_for_leader_controller || return 1 fi + + if (( ENABLE_TLS )); then + echo "Generate certificates" + bash ${REPO_ROOT_DIR}/test/generate-cert.sh + + echo "Patch to activator to serve TLS" + kubectl apply -n ${SYSTEM_NAMESPACE} -f ${REPO_ROOT_DIR}/test/config/tls/config-network.yaml + kubectl delete pod -n ${SYSTEM_NAMESPACE} -l app=activator + fi + } # Check if we should use --resolvabledomain. In case the ingress only has diff --git a/test/e2e/autoscale_test.go b/test/e2e/autoscale_test.go index 806c7f59f785..a4a76c12f8ec 100644 --- a/test/e2e/autoscale_test.go +++ b/test/e2e/autoscale_test.go @@ -28,6 +28,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + netpkg "knative.dev/networking/pkg" "knative.dev/pkg/system" "knative.dev/serving/pkg/apis/autoscaling" "knative.dev/serving/pkg/networking" @@ -132,6 +133,16 @@ func TestTargetBurstCapacity(t *testing.T) { })) test.EnsureTearDown(t, ctx.Clients(), ctx.Names()) + cm, err := ctx.clients.KubeClient.CoreV1().ConfigMaps(system.Namespace()). + Get(context.Background(), netpkg.ConfigName, metav1.GetOptions{}) + if err != nil { + t.Fatal("Fail to get ConfigMap config-network:", err) + } + if cm.Data[netpkg.ActivatorCAKey] != "" { + // TODO: Remove this when https://github.com/knative/serving/issues/12797 was done. + t.Skip("Skipping TestTargetBurstCapacity as activator-ca is specified. See issue/12797.") + } + cfg, err := autoscalerCM(ctx.clients) if err != nil { t.Fatal("Error retrieving autoscaler configmap:", err) diff --git a/test/generate-cert.sh b/test/generate-cert.sh new file mode 100755 index 000000000000..00d9ce5d7b51 --- /dev/null +++ b/test/generate-cert.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# Copyright 2022 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +SYSTEM_NAMESPACE="${SYSTEM_NAMESPACE:-knative-serving}" +TEST_NAMESPACE=serving-tests +out_dir="$(mktemp -d /tmp/certs-XXX)" +san="knative" + +# Generate Root key and cert. +openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -subj '/O=Example/CN=Example' -keyout "${out_dir}"/root.key -out "${out_dir}"/root.crt + +# Create server key +openssl req -out "${out_dir}"/tls.csr -newkey rsa:2048 -nodes -keyout "${out_dir}"/tls.key -subj "/CN=Example/O=Example" -addext "subjectAltName = DNS:$san" + +# Create server certs +openssl x509 -req -extfile <(printf "subjectAltName=DNS:$san") -days 365 -in "${out_dir}"/tls.csr -CA "${out_dir}"/root.crt -CAkey "${out_dir}"/root.key -CAcreateserial -out "${out_dir}"/tls.crt + +# Create secret +kubectl create -n ${SYSTEM_NAMESPACE} secret generic serving-ca \ + --from-file=ca.crt="${out_dir}"/root.crt --dry-run=client -o yaml | kubectl apply -f - + +kubectl create -n ${SYSTEM_NAMESPACE} secret tls server-certs \ + --key="${out_dir}"/tls.key \ + --cert="${out_dir}"/tls.crt --dry-run=client -o yaml | kubectl apply -f -