From f3ca80443843ff64a54108b83a64fe4e7bd22986 Mon Sep 17 00:00:00 2001 From: jagathprakash <31057312+jagathprakash@users.noreply.github.com> Date: Thu, 3 Nov 2022 11:13:28 -0400 Subject: [PATCH] [TEP-0089] Enable SPIRE for signing taskrun results in alpha. Breaking down PR #4759 originally proposed by @pxp928 to address TEP-0089 according @lumjjb suggestions. Plan for breaking down PR is PR 1.1: api PR 1.2: entrypointer (+cmd line + test/entrypointer) Entrypoint takes results and signs the results (termination message). PR 1.3: reconciler + pod + cmd/controller + integration tests Controller will verify the signed result. This commit corresponds to 1.3 above. --- cmd/imagedigestexporter/main.go | 3 +- config/config-feature-flags.yaml | 4 + config/config-spire.yaml | 49 +++ config/controller.yaml | 2 + docs/spire.md | 287 +++++++++++++ .../v1beta1/pipelineruns/4808-regression.yaml | 2 +- hack/update-codegen.sh | 5 + pkg/apis/config/spire_config.go | 83 ++++ pkg/apis/config/spire_config_test.go | 72 ++++ pkg/apis/config/store.go | 11 + pkg/apis/config/store_test.go | 6 + .../config/testdata/config-spire-empty.yaml | 29 ++ pkg/apis/config/testdata/config-spire.yaml | 31 ++ pkg/pod/pod.go | 43 +- pkg/pod/pod_test.go | 233 +++++++++- pkg/pod/status.go | 93 +++- pkg/pod/status_test.go | 404 +++++++++++++++++- pkg/reconciler/taskrun/controller.go | 5 +- pkg/reconciler/taskrun/taskrun.go | 25 +- pkg/reconciler/taskrun/taskrun_test.go | 180 +++++--- pkg/spire/config/config.go | 3 +- pkg/spire/config/zz_generated.deepcopy.go | 38 ++ pkg/spire/controller.go | 16 + test/controller.go | 11 +- test/controller_test.go | 4 + test/e2e-common.sh | 59 +++ test/e2e-tests.sh | 17 + test/embed_test.go | 30 +- test/entrypoint_test.go | 31 +- test/helm_task_test.go | 31 +- test/hermetic_taskrun_test.go | 35 +- test/ignore_step_error_test.go | 23 +- test/init_test.go | 22 + test/kaniko_task_test.go | 23 +- test/pipelinefinally_test.go | 139 +++++- test/pipelinerun_test.go | 83 +++- test/status_test.go | 41 +- test/taskrun_test.go | 158 ++++++- .../patch/pipeline-controller-spire.json | 56 +++ test/testdata/spire/spiffe-csi-driver.yaml | 20 + test/testdata/spire/spire-agent.yaml | 208 +++++++++ test/testdata/spire/spire-server.yaml | 211 +++++++++ 42 files changed, 2737 insertions(+), 89 deletions(-) create mode 100644 config/config-spire.yaml create mode 100644 docs/spire.md create mode 100644 pkg/apis/config/spire_config.go create mode 100644 pkg/apis/config/spire_config_test.go create mode 100644 pkg/apis/config/testdata/config-spire-empty.yaml create mode 100644 pkg/apis/config/testdata/config-spire.yaml create mode 100644 pkg/spire/config/zz_generated.deepcopy.go create mode 100644 test/testdata/patch/pipeline-controller-spire.json create mode 100644 test/testdata/spire/spiffe-csi-driver.yaml create mode 100644 test/testdata/spire/spire-agent.yaml create mode 100644 test/testdata/spire/spire-server.yaml diff --git a/cmd/imagedigestexporter/main.go b/cmd/imagedigestexporter/main.go index 33496dab427..a0584e95b64 100644 --- a/cmd/imagedigestexporter/main.go +++ b/cmd/imagedigestexporter/main.go @@ -33,7 +33,8 @@ var ( terminationMessagePath = flag.String("terminationMessagePath", "/tekton/termination", "Location of file containing termination message") ) -/* The input of this go program will be a JSON string with all the output PipelineResources of type +/* +The input of this go program will be a JSON string with all the output PipelineResources of type Image, which will include the path to where the index.json file will be located. The program will read the related index.json file(s) and log another JSON string including the name of the image resource and the digests. diff --git a/config/config-feature-flags.yaml b/config/config-feature-flags.yaml index 68c38c2ddf6..9cac6b319c6 100644 --- a/config/config-feature-flags.yaml +++ b/config/config-feature-flags.yaml @@ -90,3 +90,7 @@ data: # in the TaskRun/PipelineRun such as the source from where a remote Task/Pipeline # definition was fetched. enable-provenance-in-status: "false" + # Setting this flag to "true" enables spire integration with pipeline. + # This is an experimental feature and thus should still be considered + # an alpha feature. + enable-spire: "false" diff --git a/config/config-spire.yaml b/config/config-spire.yaml new file mode 100644 index 00000000000..726d5ade916 --- /dev/null +++ b/config/config-spire.yaml @@ -0,0 +1,49 @@ +# Copyright 2022 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-spire + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + # + # spire-trust-domain specifies the SPIRE trust domain to use. + # spire-trust-domain: "example.org" + # + # spire-socket-path specifies the SPIRE agent socket for SPIFFE workload API. + # spire-socket-path: "unix:///spiffe-workload-api/spire-agent.sock" + # + # spire-server-addr specifies the SPIRE server address for workload/node registration. + # spire-server-addr: "spire-server.spire.svc.cluster.local:8081" + # + # spire-node-alias-prefix specifies the SPIRE node alias prefix to use. + # spire-node-alias-prefix: "/tekton-node/" diff --git a/config/controller.yaml b/config/controller.yaml index 4883d1c4865..a688634a5f9 100644 --- a/config/controller.yaml +++ b/config/controller.yaml @@ -116,6 +116,8 @@ spec: value: feature-flags - name: CONFIG_LEADERELECTION_NAME value: config-leader-election + - name: CONFIG_SPIRE + value: config-spire - name: CONFIG_TRUSTED_RESOURCES_NAME value: config-trusted-resources - name: SSL_CERT_FILE diff --git a/docs/spire.md b/docs/spire.md new file mode 100644 index 00000000000..fe8ab453daf --- /dev/null +++ b/docs/spire.md @@ -0,0 +1,287 @@ + +# TaskRun Result Attestations + +TaskRun result attestations is currently an alpha experimental feature. + +The TaskRun result attestations feature provides the first part of non-falsifiable provenance to the build processes that run in the pipeline. They ensure that the results of the tekton pipeline executions originate from the build workloads themselves and that they have not been tampered with. The second part of non-falsifiable provenance is to ensure that no third party interfered with the build process. Using SPIRE, the TaskRun status is monitored for any activity or change not preformed by the Tekton Pipeline Controller. If a unauthorized change is detected, it will invalidate the TaskRun. + +When the TaskRun result attestations feature is enabled, all TaskRuns will produce a signature alongside its results, which can then be used to validate its provenance. For example, a TaskRun result that creates user-specified results `commit` and `url` would look like the following. `SVID`, `RESULT_MANIFEST`, `RESULT_MANIFEST.sig`, `commit.sig` and `url.sig` are generated attestations by the integration of SPIRE and Tekton Controller. + +Parsed, the fields would be: +``` +... + +... +πŸ“ Results + + NAME VALUE + βˆ™ RESULT_MANIFEST commit,url,SVID,commit.sig,url.sig + βˆ™ RESULT_MANIFEST.sig MEUCIQD55MMII9SEk/esQvwNLGC43y7efNGZ+7fsTdq+9vXYFAIgNoRW7cV9WKriZkcHETIaAKqfcZVJfsKbEmaDyohDSm4= + βˆ™ SVID -----BEGIN CERTIFICATE----- +MIICGzCCAcGgAwIBAgIQH9VkLxKkYMidPIsofckRQTAKBggqhkjOPQQDAjAeMQsw +CQYDVQQGEwJVUzEPMA0GA1UEChMGU1BJRkZFMB4XDTIyMDIxMTE2MzM1MFoXDTIy +MDIxMTE3MzQwMFowHTELMAkGA1UEBhMCVVMxDjAMBgNVBAoTBVNQSVJFMFkwEwYH +KoZIzj0CAQYIKoZIzj0DAQcDQgAEBRdg3LdxVAELeH+lq8wzdEJd4Gnt+m9G0Qhy +NyWoPmFUaj9vPpvOyRgzxChYnW0xpcDWihJBkq/EbusPvQB8CKOB4TCB3jAOBgNV +HQ8BAf8EBAMCA6gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1Ud +EwEB/wQCMAAwHQYDVR0OBBYEFID7ARM5+vwzvnLPMO7Icfnj7l7hMB8GA1UdIwQY +MBaAFES3IzpGDqgV3QcQNgX8b/MBwyAtMF8GA1UdEQRYMFaGVHNwaWZmZTovL2V4 +YW1wbGUub3JnL25zL2RlZmF1bHQvdGFza3J1bi9jYWNoZS1pbWFnZS1waXBlbGlu +ZXJ1bi04ZHE5Yy1mZXRjaC1mcm9tLWdpdDAKBggqhkjOPQQDAgNIADBFAiEAi+LR +JkrZn93PZPslaFmcrQw3rVcEa4xKmPleSvQaBoACIF1QB+q1uwH6cNvWdbLK9g+W +T9Np18bK0xc6p5SuTM2C +-----END CERTIFICATE----- + βˆ™ commit aa79de59c4bae24e32f15fda467d02ae9cd94b01 + βˆ™ commit.sig MEQCIEJHk+8B+mCFozp0F52TQ1AadlhEo1lZNOiOnb/ht71aAiBCE0otKB1R0BktlPvweFPldfZfjG0F+NUSc2gPzhErzg== + βˆ™ url https://github.com/buildpacks/samples + βˆ™ url.sig MEUCIF0Fuxr6lv1MmkreqDKcPH3m+eXp+gY++VcxWgGCx7T1AiEA9U/tROrKuCGfKApLq2A9EModbdoGXyQXFOpAa0aMpOg= +``` + +However, the verification materials are removed from the final results as part of the TaskRun status. It is stored in the termination messages (more details below): + +``` +$ tkn tr describe cache-image-pipelinerun-8dq9c-fetch-from-git +... + +... +πŸ“ Results + NAME VALUE + βˆ™ commit aa79de59c4bae24e32f15fda467d02ae9cd94b01 + βˆ™ url https://github.com/buildpacks/samples +``` + +## Architecture Overview + +This feature relies on a SPIRE installation. This is how it integrates into the architecture of Tekton: + +``` +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” Register TaskRun Workload Identity β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β–Ίβ”‚ β”‚ +β”‚ Tekton β”‚ β”‚ SPIRE β”‚ +β”‚ Controller │◄───────────┐ β”‚ Server β”‚ +β”‚ β”‚ β”‚ Listen on TaskRun β”‚ β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”˜ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β–² β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β–² + β”‚ β”‚ β”‚ Tekton TaskRun β”‚ β”‚ + β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ + β”‚ Configureβ”‚ β–² β”‚ Attest + β”‚ Pod & β”‚ β”‚ β”‚ + + β”‚ check β”‚ β”‚ β”‚ Request + β”‚ ready β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ SVIDs + β”‚ └────►│ TaskRun β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ + β”‚ β”‚ Pod β”‚ β”‚ + β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ TaskRun Entrypointer β”‚ + β”‚ β–² Sign Result and update β”‚ + β”‚ Get β”‚ Get SVID TaskRun status with β”‚ + β”‚ SPIRE β”‚ signature + cert β”‚ + β”‚ server β”‚ β”‚ + β”‚ Credentials β”‚ β–Ό +β”Œβ”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ β”‚ +β”‚ SPIRE Agent ( Runs as ) β”‚ +β”‚ + CSI Driver ( Daemonset ) β”‚ +β”‚ β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +Initial Setup: +1. As part of the SPIRE deployment, the SPIRE server attests the agents running on each node in the cluster. +1. The Tekton Controller is configured to have workload identity entry creation permissions to the SPIRE server. +1. As part of the Tekton Controller operations, the Tekton Controller will retrieve an identity that it can use to talk to the SPIRE server to register TaskRun workloads. + +When a TaskRun is created: +1. The Tekton Controller creates a TaskRun pod and its associated resources +1. When the TaskRun pod is ready, the Tekton Controller registers an identity with the information of the pod to the SPIRE server. This will tell the SPIRE server the identity of the TaskRun to use as well as how to attest the workload/pod. +1. After the TaskRun steps complete, as part of the entrypointer code, it requests an SVID from SPIFFE workload API (via the SPIRE agent socket) +1. The SPIRE agent will attest the workload and request an SVID. +1. The entrypointer receives an x509 SVID, containing the x509 certificate and associated private key. +1. The entrypointer signs the results of the TaskRun and emits the signatures and x509 certificate to the TaskRun results for later verification. + +## Enabling TaskRun result attestations + +To enable TaskRun attestations: +1. Make sure `enable-spire` is set to `"true"` in the `feature-flags` configmap, see [`install.md`](./install.md#customizing-the-pipelines-controller-behavior) for details +1. Create a SPIRE deployment containing a SPIRE server, SPIRE agents and the SPIRE CSI driver, for convenience, [this sample single cluster deployment](https://github.com/spiffe/spiffe-csi/tree/main/example/config) can be used. +1. Register the SPIRE workload entry for Tekton with the "Admin" flag, which will allow the Tekton controller to communicate with the SPIRE server to manage the TaskRun identities dynamically. + ``` + + # This example is assuming use of the above SPIRE deployment + # Example where trust domain is "example.org" and cluster name is "example-cluster" + + # Register a node alias for all nodes of which the Tekton Controller may reside + kubectl -n spire exec -it \ + deployment/spire-server -- \ + /opt/spire/bin/spire-server entry create \ + -node \ + -spiffeID spiffe://example.org/allnodes \ + -selector k8s_psat:cluster:example-cluster + + # Register the tekton controller workload to have access to creating entries in the SPIRE server + kubectl -n spire exec -it \ + deployment/spire-server -- \ + /opt/spire/bin/spire-server entry create \ + -admin \ + -spiffeID spiffe://example.org/tekton/controller \ + -parentID spiffe://example.org/allnode \ + -selector k8s:ns:tekton-pipelines \ + -selector k8s:pod-label:app:tekton-pipelines-controller \ + -selector k8s:sa:tekton-pipelines-controller + + ``` + +1. Modify the controller (`config/controller.yaml`) to provide access to the SPIRE agent socket. + ```yaml + # Add the following the volumeMounts of the "tekton-pipelines-controller" container + - name: spiffe-workload-api + mountPath: /spiffe-workload-api + readOnly: true + + # Add the following to the volumes of the controller pod + - name: spiffe-workload-api + csi: + driver: "csi.spiffe.io" + ``` +1. (Optional) Modify the controller (`config/controller.yaml`) to configure non-default SPIRE options by adding arguments to the CLI. + ```yaml + containers: + - name: tekton-pipelines-controller + image: ko://github.com/tektoncd/pipeline/cmd/controller + args: [ + # These images are built on-demand by `ko resolve` and are replaced + # by image references by digest. + "-kubeconfig-writer-image", "ko://github.com/tektoncd/pipeline/cmd/kubeconfigwriter", + "-git-image", "ko://github.com/tektoncd/pipeline/cmd/git-init", + "-entrypoint-image", "ko://github.com/tektoncd/pipeline/cmd/entrypoint", + "-nop-image", "ko://github.com/tektoncd/pipeline/cmd/nop", + "-imagedigest-exporter-image", "ko://github.com/tektoncd/pipeline/cmd/imagedigestexporter", + "-pr-image", "ko://github.com/tektoncd/pipeline/cmd/pullrequest-init", + "-workingdirinit-image", "ko://github.com/tektoncd/pipeline/cmd/workingdirinit", + + # Configure optional SPIRE arguments + + "-spire-trust-domain", "example.org", + + "-spire-socket-path", "/spiffe-workload-api/spire-agent.sock", + + "spire-server-addr", "spire-server.spire.svc.cluster.local:8081" + + "spire-node-alias-prefix", "/tekton-node/", + + # This is gcr.io/google.com/cloudsdktool/cloud-sdk:302.0.0-slim + "-gsutil-image", "gcr.io/google.com/cloudsdktool/cloud-sdk@sha256:27b2c22bf259d9bc1a291e99c63791ba0c27a04d2db0a43241ba0f1f20f4067f", + # The shell image must be root in order to create directories and copy files to PVCs. + # gcr.io/distroless/base:debug as of October 21, 2021 + # image shall not contains tag, so it will be supported on a runtime like cri-o + "-shell-image", "gcr.io/distroless/base@sha256:cfdc553400d41b47fd231b028403469811fcdbc0e69d66ea8030c5a0b5fbac2b", + # for script mode to work with windows we need a powershell image + # pinning to nanoserver tag as of July 15 2021 + "-shell-image-win", "mcr.microsoft.com/powershell:nanoserver@sha256:b6d5ff841b78bdf2dfed7550000fd4f3437385b8fa686ec0f010be24777654d6", + ] + ``` + +## Sample TaskRun attestation + +The following example shows how this feature works: + +```yaml +kind: TaskRun +apiVersion: tekton.dev/v1beta1 +metadata: + name: non-falsifiable-provenance +spec: + timeout: 60s + taskSpec: + steps: + - name: non-falsifiable + image: ubuntu + script: | + #!/usr/bin/env bash + printf "%s" "hello" > "$(results.foo.path)" + printf "%s" "world" > "$(results.bar.path)" + results: + - name: foo + - name: bar +``` + + +The termination message is: +``` +message: '[{"key":"RESULT_MANIFEST","value":"foo,bar","type":1},{"key":"RESULT_MANIFEST.sig","value":"MEQCIB4grfqBkcsGuVyoQd9KUVzNZaFGN6jQOKK90p5HWHqeAiB7yZerDA+YE3Af/ALG43DQzygiBpKhTt8gzWGmpvXJFw==","type":1},{"key":"SVID","value":"-----BEGIN + CERTIFICATE-----\nMIICCjCCAbCgAwIBAgIRALH94zAZZXdtPg97O5vG5M0wCgYIKoZIzj0EAwIwHjEL\nMAkGA1UEBhMCVVMxDzANBgNVBAoTBlNQSUZGRTAeFw0yMjAzMTQxNTUzNTlaFw0y\nMjAzMTQxNjU0MDlaMB0xCzAJBgNVBAYTAlVTMQ4wDAYDVQQKEwVTUElSRTBZMBMG\nByqGSM49AgEGCCqGSM49AwEHA0IABPLzFTDY0RDpjKb+eZCIWgUw9DViu8/pM8q7\nHMTKCzlyGqhaU80sASZfpkZvmi72w+gLszzwVI1ZNU5e7aCzbtSjgc8wgcwwDgYD\nVR0PAQH/BAQDAgOoMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNV\nHRMBAf8EAjAAMB0GA1UdDgQWBBSsUvspy+/Dl24pA1f+JuNVJrjgmTAfBgNVHSME\nGDAWgBSOMyOHnyLLGxPSD9RRFL+Yhm/6qzBNBgNVHREERjBEhkJzcGlmZmU6Ly9l\neGFtcGxlLm9yZy9ucy9kZWZhdWx0L3Rhc2tydW4vbm9uLWZhbHNpZmlhYmxlLXBy\nb3ZlbmFuY2UwCgYIKoZIzj0EAwIDSAAwRQIhAM4/bPAH9dyhBEj3DbwtJKMyEI56\n4DVrP97ps9QYQb23AiBiXWrQkvRYl0h4CX0lveND2yfqLrGdVL405O5NzCcUrA==\n-----END + CERTIFICATE-----\n","type":1},{"key":"bar","value":"world","type":1},{"key":"bar.sig","value":"MEUCIQDOtg+aEP1FCr6/FsHX+bY1d5abSQn2kTiUMg4Uic2lVQIgTVF5bbT/O77VxESSMtQlpBreMyw2GmKX2hYJlaOEH1M=","type":1},{"key":"foo","value":"hello","type":1},{"key":"foo.sig","value":"MEQCIBr+k0i7SRSyb4h96vQE9hhxBZiZb/2PXQqReOKJDl/rAiBrjgSsalwOvN0zgQay0xQ7PRbm5YSmI8tvKseLR8Ryww==","type":1}]' +``` + +Parsed, the fields are: +- `RESULT_MANIFEST`: List of results that should be present, to prevent pick and choose attacks +- `RESULT_MANIFEST.sig`: The signature of the result manifest +- `SVID`: The x509 certificate that will be used to verify the signature trust chain to the authority +- `*.sig`: The signature of each individual result output +``` + βˆ™ RESULT_MANIFEST foo,bar + βˆ™ RESULT_MANIFEST.sig MEQCIB4grfqBkcsGuVyoQd9KUVzNZaFGN6jQOKK90p5HWHqeAiB7yZerDA+YE3Af/ALG43DQzygiBpKhTt8gzWGmpvXJFw== + βˆ™ SVID -----BEGIN CERTIFICATE----- +MIICCjCCAbCgAwIBAgIRALH94zAZZXdtPg97O5vG5M0wCgYIKoZIzj0EAwIwHjEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBlNQSUZGRTAeFw0yMjAzMTQxNTUzNTlaFw0y +MjAzMTQxNjU0MDlaMB0xCzAJBgNVBAYTAlVTMQ4wDAYDVQQKEwVTUElSRTBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABPLzFTDY0RDpjKb+eZCIWgUw9DViu8/pM8q7 +HMTKCzlyGqhaU80sASZfpkZvmi72w+gLszzwVI1ZNU5e7aCzbtSjgc8wgcwwDgYD +VR0PAQH/BAQDAgOoMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNV +HRMBAf8EAjAAMB0GA1UdDgQWBBSsUvspy+/Dl24pA1f+JuNVJrjgmTAfBgNVHSME +GDAWgBSOMyOHnyLLGxPSD9RRFL+Yhm/6qzBNBgNVHREERjBEhkJzcGlmZmU6Ly9l +eGFtcGxlLm9yZy9ucy9kZWZhdWx0L3Rhc2tydW4vbm9uLWZhbHNpZmlhYmxlLXBy +b3ZlbmFuY2UwCgYIKoZIzj0EAwIDSAAwRQIhAM4/bPAH9dyhBEj3DbwtJKMyEI56 +4DVrP97ps9QYQb23AiBiXWrQkvRYl0h4CX0lveND2yfqLrGdVL405O5NzCcUrA== +-----END CERTIFICATE----- + βˆ™ bar world + βˆ™ bar.sig MEUCIQDOtg+aEP1FCr6/FsHX+bY1d5abSQn2kTiUMg4Uic2lVQIgTVF5bbT/O77VxESSMtQlpBreMyw2GmKX2hYJlaOEH1M= + βˆ™ foo hello + βˆ™ foo.sig MEQCIBr+k0i7SRSyb4h96vQE9hhxBZiZb/2PXQqReOKJDl/rAiBrjgSsalwOvN0zgQay0xQ7PRbm5YSmI8tvKseLR8Ryww== +``` + + +However, the verification materials are removed from the results as part of the TaskRun status: +```console +$ tkn tr describe non-falsifiable-provenance +Name: non-falsifiable-provenance +Namespace: default +Service Account: default +Timeout: 1m0s +Labels: + app.kubernetes.io/managed-by=tekton-pipelines + +🌑️ Status + +STARTED DURATION STATUS +38 seconds ago 36 seconds Succeeded + +πŸ“ Results + + NAME VALUE + βˆ™ bar world + βˆ™ foo hello + +🦢 Steps + + NAME STATUS + βˆ™ non-falsifiable Completed +``` + +## How is the result being verified + +The signatures are being verified by the Tekton controller, the process of verification is as follows: + +- Verifying the SVID + - Obtain the trust bundle from the SPIRE server + - Verify the SVID with the trust bundle + - Verify that the SVID spiffe ID is for the correct TaskRun +- Verifying the result manifest + - Verify the content of `RESULT_MANIFEST` with the field `RESULT_MANIFEST.sig` with the SVID public key + - Verify that there is a corresponding field for all items listed in `RESULT_MANIFEST` (besides SVID and `*.sig` fields) +- Verify individual result fields + - For each of the items in the results, verify its content against its associated `.sig` field + + +## Further Details + +To learn more about SPIRE TaskRun attestations, check out the [TEP](https://github.com/tektoncd/community/blob/main/teps/0089-nonfalsifiable-provenance-support.md). \ No newline at end of file diff --git a/examples/v1beta1/pipelineruns/4808-regression.yaml b/examples/v1beta1/pipelineruns/4808-regression.yaml index df4502a8a88..4ebf63c8fca 100644 --- a/examples/v1beta1/pipelineruns/4808-regression.yaml +++ b/examples/v1beta1/pipelineruns/4808-regression.yaml @@ -92,4 +92,4 @@ spec: name: result-test params: - name: RESULT_STRING_LENGTH - value: "3000" + value: "2000" diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 52fd3bd8d81..5336a86826d 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -57,6 +57,11 @@ ${PREFIX}/deepcopy-gen \ --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt \ -i github.com/tektoncd/pipeline/pkg/apis/config +${PREFIX}/deepcopy-gen \ + -O zz_generated.deepcopy \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt \ + -i github.com/tektoncd/pipeline/pkg/spire/config + ${PREFIX}/deepcopy-gen \ -O zz_generated.deepcopy \ --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt \ diff --git a/pkg/apis/config/spire_config.go b/pkg/apis/config/spire_config.go new file mode 100644 index 00000000000..7ad507f2020 --- /dev/null +++ b/pkg/apis/config/spire_config.go @@ -0,0 +1,83 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "os" + + sc "github.com/tektoncd/pipeline/pkg/spire/config" + corev1 "k8s.io/api/core/v1" +) + +const ( + // SpireConfigMapName is the name of the trusted resources configmap + SpireConfigMapName = "config-spire" + + // SpireTrustDomain is the key to extract out the SPIRE trust domain to use + SpireTrustDomain = "spire-trust-domain" + // SpireSocketPath is the key to extract out the SPIRE agent socket for SPIFFE workload API + SpireSocketPath = "spire-socket-path" + // SpireServerAddr is the key to extract out the SPIRE server address for workload/node registration + SpireServerAddr = "spire-server-addr" + // SpireNodeAliasPrefix is the key to extract out the SPIRE node alias prefix to use + SpireNodeAliasPrefix = "spire-node-alias-prefix" + + // SpireTrustDomainDefault is the default value for the SpireTrustDomain + SpireTrustDomainDefault = "example.org" + // SpireSocketPathDefault is the default value for the SpireSocketPath + SpireSocketPathDefault = "unix:///spiffe-workload-api/spire-agent.sock" + // SpireServerAddrDefault is the default value for the SpireServerAddr + SpireServerAddrDefault = "spire-server.spire.svc.cluster.local:8081" + // SpireNodeAliasPrefixDefault is the default value for the SpireNodeAliasPrefix + SpireNodeAliasPrefixDefault = "/tekton-node/" +) + +// NewSpireConfigFromMap creates a Config from the supplied map +func NewSpireConfigFromMap(data map[string]string) (*sc.SpireConfig, error) { + cfg := &sc.SpireConfig{} + var ok bool + if cfg.TrustDomain, ok = data[SpireTrustDomain]; !ok { + cfg.TrustDomain = SpireTrustDomainDefault + } + if cfg.SocketPath, ok = data[SpireSocketPath]; !ok { + cfg.SocketPath = SpireSocketPathDefault + } + if cfg.ServerAddr, ok = data[SpireServerAddr]; !ok { + cfg.ServerAddr = SpireServerAddrDefault + } + if cfg.NodeAliasPrefix, ok = data[SpireNodeAliasPrefix]; !ok { + cfg.NodeAliasPrefix = SpireNodeAliasPrefixDefault + } + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("failed to parse SPIRE configmap: %w", err) + } + return cfg, nil +} + +// NewSpireConfigFromConfigMap creates a Config from the supplied ConfigMap +func NewSpireConfigFromConfigMap(configMap *corev1.ConfigMap) (*sc.SpireConfig, error) { + return NewSpireConfigFromMap(configMap.Data) +} + +// GetSpireConfigName returns the name of Spire ConfigMap +func GetSpireConfigName() string { + if e := os.Getenv("CONFIG_SPIRE"); e != "" { + return e + } + return SpireConfigMapName +} diff --git a/pkg/apis/config/spire_config_test.go b/pkg/apis/config/spire_config_test.go new file mode 100644 index 00000000000..8a502c6fd67 --- /dev/null +++ b/pkg/apis/config/spire_config_test.go @@ -0,0 +1,72 @@ +/* +Copyright 2021 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config_test + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/tektoncd/pipeline/pkg/apis/config" + test "github.com/tektoncd/pipeline/pkg/reconciler/testing" + sc "github.com/tektoncd/pipeline/pkg/spire/config" + "github.com/tektoncd/pipeline/test/diff" +) + +func TestNewSpireConfigFromConfigMap(t *testing.T) { + type testCase struct { + expectedConfig *sc.SpireConfig + fileName string + } + + testCases := []testCase{ + { + expectedConfig: &sc.SpireConfig{ + TrustDomain: "test.com", + SocketPath: "unix:///spiffe-workload-api/test-spire-agent.sock", + ServerAddr: "test-spire-server.spire.svc.cluster.local:8081", + NodeAliasPrefix: "/test-tekton-node/", + }, + fileName: config.GetSpireConfigName(), + }, + } + + for _, tc := range testCases { + verifyConfigFileWithExpectedSpireConfig(t, tc.fileName, tc.expectedConfig) + } +} + +func TestNewSpireConfigFromEmptyConfigMap(t *testing.T) { + SpireConfigEmptyName := "config-spire-empty" + expectedConfig := &sc.SpireConfig{ + TrustDomain: "example.org", + SocketPath: "unix:///spiffe-workload-api/spire-agent.sock", + ServerAddr: "spire-server.spire.svc.cluster.local:8081", + NodeAliasPrefix: "/tekton-node/", + } + verifyConfigFileWithExpectedSpireConfig(t, SpireConfigEmptyName, expectedConfig) +} + +func verifyConfigFileWithExpectedSpireConfig(t *testing.T, fileName string, expectedConfig *sc.SpireConfig) { + cm := test.ConfigMapFromTestFile(t, fileName) + if ab, err := config.NewSpireConfigFromConfigMap(cm); err == nil { + if d := cmp.Diff(ab, expectedConfig); d != "" { + t.Errorf("Diff:\n%s", diff.PrintWantGot(d)) + } + } else { + t.Errorf("NewSpireConfigFromConfigMap(actual) = %v", err) + } +} diff --git a/pkg/apis/config/store.go b/pkg/apis/config/store.go index 338a05c2ff9..5143d3e0b79 100644 --- a/pkg/apis/config/store.go +++ b/pkg/apis/config/store.go @@ -19,6 +19,7 @@ package config import ( "context" + sc "github.com/tektoncd/pipeline/pkg/spire/config" "knative.dev/pkg/configmap" ) @@ -33,6 +34,7 @@ type Config struct { ArtifactPVC *ArtifactPVC Metrics *Metrics TrustedResources *TrustedResources + SpireConfig *sc.SpireConfig } // FromContext extracts a Config from the provided context. @@ -56,6 +58,8 @@ func FromContextOrDefaults(ctx context.Context) *Config { artifactPVC, _ := NewArtifactPVCFromMap(map[string]string{}) metrics, _ := newMetricsFromMap(map[string]string{}) trustedresources, _ := NewTrustedResourcesConfigFromMap(map[string]string{}) + spireconfig, _ := NewSpireConfigFromMap(map[string]string{}) + return &Config{ Defaults: defaults, FeatureFlags: featureFlags, @@ -63,6 +67,7 @@ func FromContextOrDefaults(ctx context.Context) *Config { ArtifactPVC: artifactPVC, Metrics: metrics, TrustedResources: trustedresources, + SpireConfig: spireconfig, } } @@ -91,6 +96,7 @@ func NewStore(logger configmap.Logger, onAfterStore ...func(name string, value i GetArtifactPVCConfigName(): NewArtifactPVCFromConfigMap, GetMetricsConfigName(): NewMetricsFromConfigMap, GetTrustedResourcesConfigName(): NewTrustedResourcesConfigFromConfigMap, + GetSpireConfigName(): NewSpireConfigFromConfigMap, }, onAfterStore..., ), @@ -131,6 +137,10 @@ func (s *Store) Load() *Config { if trustedresources == nil { trustedresources, _ = NewTrustedResourcesConfigFromMap(map[string]string{}) } + spireconfig := s.UntypedLoad(GetSpireConfigName()) + if spireconfig == nil { + spireconfig, _ = NewSpireConfigFromMap(map[string]string{}) + } return &Config{ Defaults: defaults.(*Defaults).DeepCopy(), @@ -139,5 +149,6 @@ func (s *Store) Load() *Config { ArtifactPVC: artifactPVC.(*ArtifactPVC).DeepCopy(), Metrics: metrics.(*Metrics).DeepCopy(), TrustedResources: trustedresources.(*TrustedResources).DeepCopy(), + SpireConfig: spireconfig.(*sc.SpireConfig).DeepCopy(), } } diff --git a/pkg/apis/config/store_test.go b/pkg/apis/config/store_test.go index 3e04f19d1fe..c8e02c1f51a 100644 --- a/pkg/apis/config/store_test.go +++ b/pkg/apis/config/store_test.go @@ -35,6 +35,7 @@ func TestStoreLoadWithContext(t *testing.T) { artifactPVCConfig := test.ConfigMapFromTestFile(t, "config-artifact-pvc") metricsConfig := test.ConfigMapFromTestFile(t, "config-observability") trustedresourcesConfig := test.ConfigMapFromTestFile(t, "config-trusted-resources") + spireConfig := test.ConfigMapFromTestFile(t, "config-spire") expectedDefaults, _ := config.NewDefaultsFromConfigMap(defaultConfig) expectedFeatures, _ := config.NewFeatureFlagsFromConfigMap(featuresConfig) @@ -42,6 +43,7 @@ func TestStoreLoadWithContext(t *testing.T) { expectedArtifactPVC, _ := config.NewArtifactPVCFromConfigMap(artifactPVCConfig) metrics, _ := config.NewMetricsFromConfigMap(metricsConfig) expectedTrustedResources, _ := config.NewTrustedResourcesConfigFromConfigMap(trustedresourcesConfig) + expectedSpireConfig, _ := config.NewSpireConfigFromConfigMap(spireConfig) expected := &config.Config{ Defaults: expectedDefaults, @@ -50,6 +52,7 @@ func TestStoreLoadWithContext(t *testing.T) { ArtifactPVC: expectedArtifactPVC, Metrics: metrics, TrustedResources: expectedTrustedResources, + SpireConfig: expectedSpireConfig, } store := config.NewStore(logtesting.TestLogger(t)) @@ -59,6 +62,7 @@ func TestStoreLoadWithContext(t *testing.T) { store.OnConfigChanged(artifactPVCConfig) store.OnConfigChanged(metricsConfig) store.OnConfigChanged(trustedresourcesConfig) + store.OnConfigChanged(spireConfig) cfg := config.FromContext(store.ToContext(context.Background())) @@ -74,6 +78,7 @@ func TestStoreLoadWithContext_Empty(t *testing.T) { artifactPVC, _ := config.NewArtifactPVCFromMap(map[string]string{}) metrics, _ := config.NewMetricsFromConfigMap(&corev1.ConfigMap{Data: map[string]string{}}) trustedresources, _ := config.NewTrustedResourcesConfigFromMap(map[string]string{}) + spireConfig, _ := config.NewSpireConfigFromMap(map[string]string{}) expected := &config.Config{ Defaults: defaults, @@ -82,6 +87,7 @@ func TestStoreLoadWithContext_Empty(t *testing.T) { ArtifactPVC: artifactPVC, Metrics: metrics, TrustedResources: trustedresources, + SpireConfig: spireConfig, } store := config.NewStore(logtesting.TestLogger(t)) diff --git a/pkg/apis/config/testdata/config-spire-empty.yaml b/pkg/apis/config/testdata/config-spire-empty.yaml new file mode 100644 index 00000000000..834f88ee409 --- /dev/null +++ b/pkg/apis/config/testdata/config-spire-empty.yaml @@ -0,0 +1,29 @@ +# Copyright 2022 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-spire + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ diff --git a/pkg/apis/config/testdata/config-spire.yaml b/pkg/apis/config/testdata/config-spire.yaml new file mode 100644 index 00000000000..a5f82e979bd --- /dev/null +++ b/pkg/apis/config/testdata/config-spire.yaml @@ -0,0 +1,31 @@ +# Copyright 2022 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-spire + namespace: tekton-pipelines + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +data: + # spire-trust-domain specifies the SPIRE trust domain to use. + spire-trust-domain: "test.com" + # spire-socket-path specifies the SPIRE agent socket for SPIFFE workload API. + spire-socket-path: "unix:///spiffe-workload-api/test-spire-agent.sock" + # spire-server-addr specifies the SPIRE server address for workload/node registration. + spire-server-addr: "test-spire-server.spire.svc.cluster.local:8081" + # spire-node-alias-prefix specifies the SPIRE node alias prefix to use. + spire-node-alias-prefix: "/test-tekton-node/" diff --git a/pkg/pod/pod.go b/pkg/pod/pod.go index 30bf9d8fa1c..cd3ef41c2bb 100644 --- a/pkg/pod/pod.go +++ b/pkg/pod/pod.go @@ -30,6 +30,7 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/internal/computeresources/tasklevel" "github.com/tektoncd/pipeline/pkg/names" + "github.com/tektoncd/pipeline/pkg/spire" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -120,6 +121,12 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec featureFlags := config.FromContextOrDefaults(ctx).FeatureFlags alphaAPIEnabled := featureFlags.EnableAPIFields == config.AlphaAPIFields + // Entrypoint arg to enable or disable spire + var commonExtraEntrypointArgs []string + if config.FromContextOrDefaults(ctx).FeatureFlags.EnableSpire { + commonExtraEntrypointArgs = append(commonExtraEntrypointArgs, "-enable_spire") + } + // Add our implicit volumes first, so they can be overridden by the user if they prefer. volumes = append(volumes, implicitVolumes...) volumeMounts = append(volumeMounts, implicitVolumeMounts...) @@ -190,11 +197,13 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec } readyImmediately := isPodReadyImmediately(*featureFlags, taskSpec.Sidecars) + // append credEntrypointArgs with entrypoint arg that contains if spire is enabled by configmap + commonExtraEntrypointArgs = append(commonExtraEntrypointArgs, credEntrypointArgs...) if alphaAPIEnabled { - stepContainers, err = orderContainers(credEntrypointArgs, stepContainers, &taskSpec, taskRun.Spec.Debug, !readyImmediately) + stepContainers, err = orderContainers(commonExtraEntrypointArgs, stepContainers, &taskSpec, taskRun.Spec.Debug, !readyImmediately) } else { - stepContainers, err = orderContainers(credEntrypointArgs, stepContainers, &taskSpec, nil, !readyImmediately) + stepContainers, err = orderContainers(commonExtraEntrypointArgs, stepContainers, &taskSpec, nil, !readyImmediately) } if err != nil { return nil, err @@ -275,6 +284,36 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec return nil, err } + readonly := true + if config.FromContextOrDefaults(ctx).FeatureFlags.EnableSpire { + volumes = append(volumes, corev1.Volume{ + Name: spire.WorkloadAPI, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "csi.spiffe.io", + ReadOnly: &readonly, + }, + }, + }) + + for i := range stepContainers { + c := &stepContainers[i] + c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{ + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + ReadOnly: true, + }) + } + for i := range initContainers { + c := &initContainers[i] + c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{ + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + ReadOnly: true, + }) + } + } + mergedPodContainers := stepContainers // Merge sidecar containers with step containers. diff --git a/pkg/pod/pod_test.go b/pkg/pod/pod_test.go index 7e5a7817d74..ea1691e3325 100644 --- a/pkg/pod/pod_test.go +++ b/pkg/pod/pod_test.go @@ -35,6 +35,7 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/pipeline" "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/spire" "github.com/tektoncd/pipeline/test/diff" "github.com/tektoncd/pipeline/test/names" corev1 "k8s.io/api/core/v1" @@ -86,6 +87,17 @@ func TestPodBuild(t *testing.T) { enableServiceLinks := false priorityClassName := "system-cluster-critical" taskRunName := "taskrun-name" + readonly := true + + initContainers := []corev1.Container{entrypointInitContainer(images.EntrypointImage, []v1beta1.Step{{Name: "name"}})} + for i := range initContainers { + c := &initContainers[i] + c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{ + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + ReadOnly: true, + }) + } for _, c := range []struct { desc string @@ -1522,7 +1534,7 @@ _EOF_ }, want: &corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, - InitContainers: []corev1.Container{entrypointInitContainer(images.EntrypointImage, []v1beta1.Step{{Name: "name"}})}, + InitContainers: initContainers, Containers: []corev1.Container{{ Name: "step-name", Image: "image", @@ -1537,6 +1549,7 @@ _EOF_ "/tekton/termination", "-step_metadata_dir", "/tekton/run/0/status", + "-enable_spire", "-entrypoint", "cmd", "--", @@ -1544,6 +1557,10 @@ _EOF_ VolumeMounts: append([]corev1.VolumeMount{binROMount, runMount(0, false), downwardMount, { Name: "tekton-creds-init-home-0", MountPath: "/tekton/creds", + }, { + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + ReadOnly: true, }}, implicitVolumeMounts...), TerminationMessagePath: "/tekton/termination", Env: []corev1.EnvVar{ @@ -1553,6 +1570,14 @@ _EOF_ Volumes: append(implicitVolumes, binVolume, runVolume(0), downwardVolume, corev1.Volume{ Name: "tekton-creds-init-home-0", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}}, + }, corev1.Volume{ + Name: spire.WorkloadAPI, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "csi.spiffe.io", + ReadOnly: &readonly, + }, + }, }), ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds, }, @@ -1572,7 +1597,7 @@ _EOF_ }, want: &corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, - InitContainers: []corev1.Container{entrypointInitContainer(images.EntrypointImage, []v1beta1.Step{{Name: "name"}})}, + InitContainers: initContainers, Containers: []corev1.Container{{ Name: "step-name", Image: "image", @@ -1587,6 +1612,7 @@ _EOF_ "/tekton/termination", "-step_metadata_dir", "/tekton/run/0/status", + "-enable_spire", "-entrypoint", "cmd", "--", @@ -1594,6 +1620,10 @@ _EOF_ VolumeMounts: append([]corev1.VolumeMount{binROMount, runMount(0, false), downwardMount, { Name: "tekton-creds-init-home-0", MountPath: "/tekton/creds", + }, { + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + ReadOnly: true, }}, implicitVolumeMounts...), TerminationMessagePath: "/tekton/termination", Env: []corev1.EnvVar{ @@ -1605,6 +1635,14 @@ _EOF_ Volumes: append(implicitVolumes, binVolume, runVolume(0), downwardVolume, corev1.Volume{ Name: "tekton-creds-init-home-0", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}}, + }, corev1.Volume{ + Name: spire.WorkloadAPI, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "csi.spiffe.io", + ReadOnly: &readonly, + }, + }, }), ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds, }, @@ -1930,9 +1968,24 @@ debug-fail-continue-heredoc-randomly-generated-mz4c7 `}, } + initContainers := []corev1.Container{entrypointInitContainer(images.EntrypointImage, []v1beta1.Step{{Name: "name"}}), placeScriptsContainer} + readonly := true + for i := range initContainers { + c := &initContainers[i] + c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{ + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + ReadOnly: true, + }) + } + containersVolumeMounts := append([]corev1.VolumeMount{binROMount, runMount(0, false), downwardMount, { Name: "tekton-creds-init-home-0", MountPath: "/tekton/creds", + }, { + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + ReadOnly: true, }}, implicitVolumeMounts...) containersVolumeMounts = append(containersVolumeMounts, debugScriptsVolumeMount) containersVolumeMounts = append(containersVolumeMounts, corev1.VolumeMount{ @@ -1963,7 +2016,7 @@ debug-fail-continue-heredoc-randomly-generated-mz4c7 }, want: &corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, - InitContainers: []corev1.Container{entrypointInitContainer(images.EntrypointImage, []v1beta1.Step{{Name: "name"}}), placeScriptsContainer}, + InitContainers: initContainers, Containers: []corev1.Container{{ Name: "step-name", Image: "image", @@ -1978,6 +2031,7 @@ debug-fail-continue-heredoc-randomly-generated-mz4c7 "/tekton/termination", "-step_metadata_dir", "/tekton/run/0/status", + "-enable_spire", "-breakpoint_on_failure", "-entrypoint", "cmd", @@ -1989,6 +2043,14 @@ debug-fail-continue-heredoc-randomly-generated-mz4c7 Volumes: append(implicitVolumes, debugScriptsVolume, debugInfoVolume, binVolume, scriptsVolume, runVolume(0), downwardVolume, corev1.Volume{ Name: "tekton-creds-init-home-0", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}}, + }, corev1.Volume{ + Name: spire.WorkloadAPI, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "csi.spiffe.io", + ReadOnly: &readonly, + }, + }, }), ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds, }, @@ -2273,6 +2335,171 @@ func TestPodBuild_TaskLevelResourceRequirements(t *testing.T) { } } +func TestPodBuildwithSpireEnabled(t *testing.T) { + initContainers := []corev1.Container{entrypointInitContainer(images.EntrypointImage, []v1beta1.Step{{Name: "name"}})} + readonly := true + for i := range initContainers { + c := &initContainers[i] + c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{ + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + ReadOnly: true, + }) + } + + for _, c := range []struct { + desc string + trs v1beta1.TaskRunSpec + trAnnotation map[string]string + ts v1beta1.TaskSpec + want *corev1.PodSpec + wantAnnotations map[string]string + }{{ + desc: "simple with debug breakpoint onFailure", + trs: v1beta1.TaskRunSpec{ + Debug: &v1beta1.TaskRunDebug{ + Breakpoint: []string{breakpointOnFailure}, + }, + }, + ts: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{ + Name: "name", + Image: "image", + Command: []string{"cmd"}, // avoid entrypoint lookup. + }}, + }, + want: &corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + InitContainers: initContainers, + Containers: []corev1.Container{{ + Name: "step-name", + Image: "image", + Command: []string{"/tekton/bin/entrypoint"}, + Args: []string{ + "-wait_file", + "/tekton/downward/ready", + "-wait_file_content", + "-post_file", + "/tekton/run/0/out", + "-termination_path", + "/tekton/termination", + "-step_metadata_dir", + "/tekton/run/0/status", + "-enable_spire", + "-entrypoint", + "cmd", + "--", + }, + VolumeMounts: append([]corev1.VolumeMount{binROMount, runMount(0, false), downwardMount, { + Name: "tekton-creds-init-home-0", + MountPath: "/tekton/creds", + }, { + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + ReadOnly: true, + }}, implicitVolumeMounts...), + TerminationMessagePath: "/tekton/termination", + }}, + Volumes: append(implicitVolumes, binVolume, runVolume(0), downwardVolume, corev1.Volume{ + Name: "tekton-creds-init-home-0", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}}, + }, corev1.Volume{ + Name: spire.WorkloadAPI, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "csi.spiffe.io", + ReadOnly: &readonly, + }, + }, + }), + ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds, + }, + }} { + t.Run(c.desc, func(t *testing.T) { + featureFlags := map[string]string{ + "enable-spire": "true", + } + names.TestingSeed() + store := config.NewStore(logtesting.TestLogger(t)) + store.OnConfigChanged( + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.Namespace()}, + Data: featureFlags, + }, + ) + kubeclient := fakek8s.NewSimpleClientset( + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "service-account", Namespace: "default"}, + Secrets: []corev1.ObjectReference{{ + Name: "multi-creds", + }}, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "multi-creds", + Namespace: "default", + Annotations: map[string]string{ + "tekton.dev/docker-0": "https://us.gcr.io", + "tekton.dev/docker-1": "https://docker.io", + "tekton.dev/git-0": "github.com", + "tekton.dev/git-1": "gitlab.com", + }}, + Type: "kubernetes.io/basic-auth", + Data: map[string][]byte{ + "username": []byte("foo"), + "password": []byte("BestEver"), + }, + }, + ) + var trAnnotations map[string]string + if c.trAnnotation == nil { + trAnnotations = map[string]string{ + ReleaseAnnotation: fakeVersion, + } + } else { + trAnnotations = c.trAnnotation + trAnnotations[ReleaseAnnotation] = fakeVersion + } + tr := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "taskrun-name", + Namespace: "default", + Annotations: trAnnotations, + }, + Spec: c.trs, + } + + // No entrypoints should be looked up. + entrypointCache := fakeCache{} + builder := Builder{ + Images: images, + KubeClient: kubeclient, + EntrypointCache: entrypointCache, + } + + got, err := builder.Build(store.ToContext(context.Background()), tr, c.ts) + if err != nil { + t.Fatalf("builder.Build: %v", err) + } + + expectedName := kmeta.ChildName(tr.Name, "-pod") + if d := cmp.Diff(expectedName, got.Name); d != "" { + t.Errorf("Pod name does not match: %q", d) + } + + if d := cmp.Diff(c.want, &got.Spec, resourceQuantityCmp, volumeSort, volumeMountSort); d != "" { + t.Errorf("Diff %s", diff.PrintWantGot(d)) + } + + if c.wantAnnotations != nil { + if d := cmp.Diff(c.wantAnnotations, got.ObjectMeta.Annotations, cmpopts.IgnoreMapEntries(ignoreReleaseAnnotation)); d != "" { + t.Errorf("Annotation Diff(-want, +got):\n%s", d) + } + } + }) + } +} + // verifyTaskLevelComputeResources verifies that the given TaskRun's containers have the expected compute resources. func verifyTaskLevelComputeResources(expectedComputeResources []ExpectedComputeResources, containers []corev1.Container) error { if len(expectedComputeResources) != len(containers) { diff --git a/pkg/pod/status.go b/pkg/pod/status.go index 0a0894981eb..66e1b0f7854 100644 --- a/pkg/pod/status.go +++ b/pkg/pod/status.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "context" "encoding/json" "fmt" "strconv" @@ -25,6 +26,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/spire" "github.com/tektoncd/pipeline/pkg/termination" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" @@ -104,11 +106,16 @@ func SidecarsReady(podStatus corev1.PodStatus) bool { } // MakeTaskRunStatus returns a TaskRunStatus based on the Pod's status. -func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev1.Pod) (v1beta1.TaskRunStatus, error) { +func MakeTaskRunStatus(ctx context.Context, logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev1.Pod, spireEnabled bool, + spireAPI spire.ControllerAPIClient) (v1beta1.TaskRunStatus, error) { trs := &tr.Status if trs.GetCondition(apis.ConditionSucceeded) == nil || trs.GetCondition(apis.ConditionSucceeded).Status == corev1.ConditionUnknown { // If the taskRunStatus doesn't exist yet, it's because we just started running markStatusRunning(trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing") + + if spireEnabled { + markStatusSignedResultsRunning(trs) + } } sortPodContainerStatuses(pod.Status.ContainerStatuses, pod.Spec.Containers) @@ -118,7 +125,7 @@ func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev if complete { updateCompletedTaskRunStatus(logger, trs, pod) } else { - updateIncompleteTaskRunStatus(trs, pod) + updateIncompleteTaskRunStatus(trs, pod, spireEnabled) } trs.PodName = pod.Name @@ -136,7 +143,7 @@ func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev } var merr *multierror.Error - if err := setTaskRunStatusBasedOnStepStatus(logger, stepStatuses, &tr); err != nil { + if err := setTaskRunStatusBasedOnStepStatus(ctx, logger, stepStatuses, &tr, spireEnabled, spireAPI); err != nil { merr = multierror.Append(merr, err) } @@ -147,7 +154,30 @@ func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev return *trs, merr.ErrorOrNil() } -func setTaskRunStatusBasedOnStepStatus(logger *zap.SugaredLogger, stepStatuses []corev1.ContainerStatus, tr *v1beta1.TaskRun) *multierror.Error { +func setTaskRunStatusBasedOnSpireVerification(ctx context.Context, logger *zap.SugaredLogger, tr *v1beta1.TaskRun, trs *v1beta1.TaskRunStatus, + filteredResults []v1beta1.PipelineResourceResult, spireAPI spire.ControllerAPIClient) { + + if tr.IsSuccessful() && spireAPI != nil && + ((tr.Status.TaskSpec != nil && len(tr.Status.TaskSpec.Results) >= 1) || len(filteredResults) >= 1) { + logger.Info("validating signed results with spire: ", trs.TaskRunResults) + if err := spireAPI.VerifyTaskRunResults(ctx, filteredResults, tr); err != nil { + logger.Errorf("failed to verify signed results with spire: %w", err) + markStatusSignedResultsFailure(trs, err.Error()) + } else { + logger.Info("successfully validated signed results with spire") + markStatusSignedResultsVerified(trs) + } + } + + // If no results and no results requested, set verified unless results were specified as part of task spec + if len(filteredResults) == 0 && (tr.Status.TaskSpec == nil || len(tr.Status.TaskSpec.Results) == 0) { + markStatusSignedResultsVerified(trs) + } +} + +func setTaskRunStatusBasedOnStepStatus(ctx context.Context, logger *zap.SugaredLogger, stepStatuses []corev1.ContainerStatus, tr *v1beta1.TaskRun, + spireEnabled bool, spireAPI spire.ControllerAPIClient) *multierror.Error { + trs := &tr.Status var merr *multierror.Error @@ -170,10 +200,13 @@ func setTaskRunStatusBasedOnStepStatus(logger *zap.SugaredLogger, stepStatuses [ logger.Errorf("error extracting the exit code of step %q in taskrun %q: %v", s.Name, tr.Name, err) merr = multierror.Append(merr, err) } - taskResults, pipelineResourceResults, filteredResults := filterResultsAndResources(results) + taskResults, pipelineResourceResults, filteredResults := filterResultsAndResources(results, spireEnabled) if tr.IsSuccessful() { trs.TaskRunResults = append(trs.TaskRunResults, taskResults...) trs.ResourcesResult = append(trs.ResourcesResult, pipelineResourceResults...) + if spireEnabled { + setTaskRunStatusBasedOnSpireVerification(ctx, logger, tr, trs, filteredResults, spireAPI) + } } msg, err = createMessageFromResults(filteredResults) if err != nil { @@ -224,7 +257,8 @@ func createMessageFromResults(results []v1beta1.PipelineResourceResult) (string, return string(bytes), nil } -func filterResultsAndResources(results []v1beta1.PipelineResourceResult) ([]v1beta1.TaskRunResult, []v1beta1.PipelineResourceResult, []v1beta1.PipelineResourceResult) { +func filterResultsAndResources(results []v1beta1.PipelineResourceResult, spireEnabled bool) ([]v1beta1.TaskRunResult, []v1beta1.PipelineResourceResult, []v1beta1.PipelineResourceResult) { + var taskResults []v1beta1.TaskRunResult var pipelineResourceResults []v1beta1.PipelineResourceResult var filteredResults []v1beta1.PipelineResourceResult @@ -236,6 +270,15 @@ func filterResultsAndResources(results []v1beta1.PipelineResourceResult) ([]v1be if err != nil { continue } + // TODO(#4723): Validate that the type we inferred from aos is matching the + // TaskResult Type before setting it to the taskRunResult. + // TODO(#4723): Validate the taskrun results against taskresults for object val + if spireEnabled { + if r.Key == spire.KeySVID || r.Key == spire.KeyResultManifest || strings.HasSuffix(r.Key, spire.KeySignatureSuffix) { + filteredResults = append(filteredResults, r) + continue + } + } taskRunResult := v1beta1.TaskRunResult{ Name: r.Key, Type: v1beta1.ResultsType(v.Type), @@ -317,10 +360,13 @@ func updateCompletedTaskRunStatus(logger *zap.SugaredLogger, trs *v1beta1.TaskRu trs.CompletionTime = &metav1.Time{Time: time.Now()} } -func updateIncompleteTaskRunStatus(trs *v1beta1.TaskRunStatus, pod *corev1.Pod) { +func updateIncompleteTaskRunStatus(trs *v1beta1.TaskRunStatus, pod *corev1.Pod, spireEnabled bool) { switch pod.Status.Phase { case corev1.PodRunning: markStatusRunning(trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing") + if spireEnabled { + markStatusSignedResultsRunning(trs) + } case corev1.PodPending: switch { case IsPodExceedingNodeResources(pod): @@ -331,6 +377,9 @@ func updateIncompleteTaskRunStatus(trs *v1beta1.TaskRunStatus, pod *corev1.Pod) markStatusRunning(trs, ReasonPullImageFailed, getWaitingMessage(pod)) default: markStatusRunning(trs, ReasonPending, getWaitingMessage(pod)) + if spireEnabled { + markStatusSignedResultsRunning(trs) + } } } } @@ -508,6 +557,36 @@ func markStatusSuccess(trs *v1beta1.TaskRunStatus) { }) } +// markStatusResultsVerified sets taskrun status to +func markStatusSignedResultsVerified(trs *v1beta1.TaskRunStatus) { + trs.SetCondition(&apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionTrue, + Reason: v1beta1.TaskRunReasonResultsVerified.String(), + Message: "Successfully verified all spire signed taskrun results", + }) +} + +// markStatusFailure sets taskrun status to failure with specified reason +func markStatusSignedResultsFailure(trs *v1beta1.TaskRunStatus, message string) { + trs.SetCondition(&apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionFalse, + Reason: v1beta1.TaskRunReasonsResultsVerificationFailed.String(), + Message: message, + }) +} + +// markStatusRunning sets taskrun status to running +func markStatusSignedResultsRunning(trs *v1beta1.TaskRunStatus) { + trs.SetCondition(&apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionUnknown, + Reason: v1beta1.AwaitingTaskRunResults.String(), + Message: "Waiting upon TaskRun results and signatures to verify", + }) +} + // sortPodContainerStatuses reorders a pod's container statuses so that // they're in the same order as the step containers from the TaskSpec. func sortPodContainerStatuses(podContainerStatuses []corev1.ContainerStatus, podSpecContainers []corev1.Container) { diff --git a/pkg/pod/status_test.go b/pkg/pod/status_test.go index c44f2b679ac..ddb1f4bb42b 100644 --- a/pkg/pod/status_test.go +++ b/pkg/pod/status_test.go @@ -17,6 +17,9 @@ limitations under the License. package pod import ( + "context" + "encoding/json" + "sort" "strings" "testing" "time" @@ -24,6 +27,8 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/spire" + "github.com/tektoncd/pipeline/pkg/termination" "github.com/tektoncd/pipeline/test/diff" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -66,6 +71,7 @@ func TestSetTaskRunStatusBasedOnStepStatus(t *testing.T) { }}, }} { t.Run(c.desc, func(t *testing.T) { + ctx := context.Background() startTime := time.Date(2010, 1, 1, 1, 1, 1, 1, time.UTC) tr := v1beta1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ @@ -80,7 +86,7 @@ func TestSetTaskRunStatusBasedOnStepStatus(t *testing.T) { } logger, _ := logging.NewLogger("", "status") - merr := setTaskRunStatusBasedOnStepStatus(logger, c.ContainerStatuses, &tr) + merr := setTaskRunStatusBasedOnStepStatus(ctx, logger, c.ContainerStatuses, &tr, false, nil) if merr != nil { t.Errorf("setTaskRunStatusBasedOnStepStatus: %s", merr) } @@ -89,6 +95,396 @@ func TestSetTaskRunStatusBasedOnStepStatus(t *testing.T) { } } +func TestMakeTaskRunStatusVerify(t *testing.T) { + sc := &spire.MockClient{} + processConditions := cmp.Transformer("sortConditionsAndFilterMessages", func(in []apis.Condition) []apis.Condition { + for i := range in { + in[i].Message = "" + } + sort.Slice(in, func(i, j int) bool { + return in[i].Type < in[j].Type + }) + return in + }) + + terminationMessageTrans := cmp.Transformer("sortAndPrint", func(in *corev1.ContainerStateTerminated) *corev1.ContainerStateTerminated { + prs, err := termination.ParseMessage(nil, in.Message) + if err != nil { + return in + } + sort.Slice(prs, func(i, j int) bool { + return prs[i].Key < prs[j].Key + }) + + b, _ := json.Marshal(prs) + in.Message = string(b) + + return in + }) + + // test awaiting results - OK + // results + test signed termination message - OK + // results + test unsigned termination message - OK + + // no task results, no result + test signed termiantion message + // no task results, no result + test unsigned termiantion message + // force task result, no result + test unsigned termiantion message + + statusSRVUnknown := func() duckv1beta1.Status { + status := statusRunning() + status.Conditions = append(status.Conditions, apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionUnknown, + Reason: v1beta1.AwaitingTaskRunResults.String(), + Message: "Waiting upon TaskRun results and signatures to verify", + }) + return status + } + + statusSRVVerified := func() duckv1beta1.Status { + status := statusSuccess() + status.Conditions = append(status.Conditions, apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionTrue, + Reason: v1beta1.TaskRunReasonResultsVerified.String(), + Message: "Successfully verified all spire signed taskrun results", + }) + return status + } + + statusSRVUnverified := func() duckv1beta1.Status { + status := statusSuccess() + status.Conditions = append(status.Conditions, apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionFalse, + Reason: v1beta1.TaskRunReasonsResultsVerificationFailed.String(), + Message: "", + }) + return status + } + + for _, c := range []struct { + desc string + specifyTaskRunResult bool + resultOut []v1beta1.PipelineResourceResult + podStatus corev1.PodStatus + pod corev1.Pod + want v1beta1.TaskRunStatus + }{{ + // test awaiting results + desc: "running pod awaiting results", + podStatus: corev1.PodStatus{}, + + want: v1beta1.TaskRunStatus{ + Status: statusSRVUnknown(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{}, + Sidecars: []v1beta1.SidecarState{}, + }, + }, + }, { + desc: "test result with pipeline result without signed termination message", + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-bar", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `[{"key":"resultName","value":"resultValue", "type":1}, {"key":"digest","value":"sha256:1234","resourceName":"source-image"}]`, + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: statusSRVUnverified(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `[{"key":"digest","value":"sha256:1234","resourceName":"source-image"},{"key":"resultName","value":"resultValue","type":1}]`, + }}, + Name: "bar", + ContainerName: "step-bar", + }}, + Sidecars: []v1beta1.SidecarState{}, + ResourcesResult: []v1beta1.PipelineResourceResult{{ + Key: "digest", + Value: "sha256:1234", + ResourceName: "source-image", + }}, + TaskRunResults: []v1beta1.TaskRunResult{{ + Name: "resultName", + Type: v1beta1.ResultsTypeString, + Value: *v1beta1.NewStructuredValues("resultValue"), + }}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "test result with pipeline result with signed termination message", + resultOut: []v1beta1.PipelineResourceResult{ + { + Key: "resultName", + Value: "resultValue", + ResultType: v1beta1.TaskRunResultType, + }, + }, + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-bar", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: ``, + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: statusSRVVerified(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `to be overridden by signing`, + }}, + Name: "bar", + ContainerName: "step-bar", + }}, + Sidecars: []v1beta1.SidecarState{}, + TaskRunResults: []v1beta1.TaskRunResult{{ + Name: "resultName", + Type: v1beta1.ResultsTypeString, + Value: *v1beta1.NewStructuredValues("resultValue"), + }}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "test array result with signed termination message", + resultOut: []v1beta1.PipelineResourceResult{ + { + Key: "resultName", + Value: "[\"hello\",\"world\"]", + ResultType: v1beta1.TaskRunResultType, + }, + }, + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-bar", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: ``, + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: statusSRVVerified(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `to be overridden by signing`, + }}, + Name: "bar", + ContainerName: "step-bar", + }}, + Sidecars: []v1beta1.SidecarState{}, + TaskRunResults: []v1beta1.TaskRunResult{{ + Name: "resultName", + Type: v1beta1.ResultsTypeArray, + Value: *v1beta1.NewStructuredValues("hello", "world"), + }}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "test result with no result with signed termination message", + resultOut: []v1beta1.PipelineResourceResult{}, + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-bar", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `to be overridden by signing`, + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: statusSRVVerified(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `to be overridden by signing`, + }}, + Name: "bar", + ContainerName: "step-bar", + }}, + Sidecars: []v1beta1.SidecarState{}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "test result with no result without signed termination message", + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-bar", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: "[]", + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: statusSRVVerified(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: "[]", + }}, + Name: "bar", + ContainerName: "step-bar", + }}, + Sidecars: []v1beta1.SidecarState{}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "test result (with task run result defined) with no result without signed termination message", + specifyTaskRunResult: true, + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-bar", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: "[]", + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: statusSRVUnverified(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: "[]", + }}, + Name: "bar", + ContainerName: "step-bar", + }}, + Sidecars: []v1beta1.SidecarState{}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }} { + t.Run(c.desc, func(t *testing.T) { + now := metav1.Now() + ctx := context.Background() + if cmp.Diff(c.pod, corev1.Pod{}) == "" { + c.pod = corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Namespace: "foo", + CreationTimestamp: now, + }, + Status: c.podStatus, + } + } + + startTime := time.Date(2010, 1, 1, 1, 1, 1, 1, time.UTC) + tr := v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "task-run", + Namespace: "foo", + }, + Status: v1beta1.TaskRunStatus{ + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + StartTime: &metav1.Time{Time: startTime}, + }, + }, + } + + if c.specifyTaskRunResult { + // Specify result + tr.Status.TaskSpec = &v1beta1.TaskSpec{ + Results: []v1beta1.TaskResult{{ + Name: "some-task-result", + }}, + } + + c.want.TaskSpec = tr.Status.TaskSpec + } + + if err := sc.CreateEntries(ctx, &tr, &c.pod, 10000); err != nil { + t.Fatalf("unable to create entry for tr: %v", tr.Name) + } + + if c.resultOut != nil { + id := sc.GetIdentity(&tr) + for i := 0; i < 20; i++ { + sc.SignIdentities = append(sc.SignIdentities, id) + } + sigs, err := sc.Sign(ctx, c.resultOut) + if err != nil { + t.Fatalf("failed to sign: %v", err) + } + c.resultOut = append(c.resultOut, sigs...) + s, err := createMessageFromResults(c.resultOut) + if err != nil { + t.Fatalf("failed to create message from result: %v", err) + } + + c.podStatus.ContainerStatuses[0].State.Terminated.Message = s + c.want.TaskRunStatusFields.Steps[0].ContainerState.Terminated.Message = s + } + + logger, _ := logging.NewLogger("", "status") + got, err := MakeTaskRunStatus(context.Background(), logger, tr, &c.pod, true, sc) + if err != nil { + t.Errorf("MakeTaskRunResult: %s", err) + } + + // Common traits, set for test case brevity. + c.want.PodName = "pod" + c.want.StartTime = &metav1.Time{Time: startTime} + + ensureTimeNotNil := cmp.Comparer(func(x, y *metav1.Time) bool { + if x == nil { + return y == nil + } + return y != nil + }) + if d := cmp.Diff(c.want, got, ignoreVolatileTime, ensureTimeNotNil, processConditions, terminationMessageTrans); d != "" { + t.Errorf("Diff %s", diff.PrintWantGot(d)) + } + if tr.Status.StartTime.Time != c.want.StartTime.Time { + t.Errorf("Expected TaskRun startTime to be unchanged but was %s", tr.Status.StartTime) + } + + if err := sc.DeleteEntry(ctx, &tr, &c.pod); err != nil { + t.Fatalf("unable to create entry for tr: %v", tr.Name) + } + + }) + } +} + func TestMakeTaskRunStatus(t *testing.T) { for _, c := range []struct { desc string @@ -1061,7 +1457,7 @@ func TestMakeTaskRunStatus(t *testing.T) { }, } logger, _ := logging.NewLogger("", "status") - got, err := MakeTaskRunStatus(logger, tr, &c.pod) + got, err := MakeTaskRunStatus(context.Background(), logger, tr, &c.pod, false, nil) if err != nil { t.Errorf("MakeTaskRunResult: %s", err) } @@ -1275,7 +1671,7 @@ func TestMakeTaskRunStatusAlpha(t *testing.T) { }, } logger, _ := logging.NewLogger("", "status") - got, err := MakeTaskRunStatus(logger, tr, &c.pod) + got, err := MakeTaskRunStatus(context.Background(), logger, tr, &c.pod, false, nil) if err != nil { t.Errorf("MakeTaskRunResult: %s", err) } @@ -1396,7 +1792,7 @@ func TestMakeRunStatusJSONError(t *testing.T) { } logger, _ := logging.NewLogger("", "status") - gotTr, err := MakeTaskRunStatus(logger, tr, pod) + gotTr, err := MakeTaskRunStatus(context.Background(), logger, tr, pod, false, nil) if err == nil { t.Error("Expected error, got nil") } diff --git a/pkg/reconciler/taskrun/controller.go b/pkg/reconciler/taskrun/controller.go index 9e2b07a9044..a8bc1161b6a 100644 --- a/pkg/reconciler/taskrun/controller.go +++ b/pkg/reconciler/taskrun/controller.go @@ -32,6 +32,7 @@ import ( cloudeventclient "github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent" "github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim" resolution "github.com/tektoncd/pipeline/pkg/resolution/resource" + "github.com/tektoncd/pipeline/pkg/spire" "github.com/tektoncd/pipeline/pkg/taskrunmetrics" "k8s.io/client-go/tools/cache" "k8s.io/utils/clock" @@ -54,7 +55,8 @@ func NewController(opts *pipeline.Options, clock clock.PassiveClock) func(contex resourceInformer := resourceinformer.Get(ctx) limitrangeInformer := limitrangeinformer.Get(ctx) resolutionInformer := resolutioninformer.Get(ctx) - configStore := config.NewStore(logger.Named("config-store"), taskrunmetrics.MetricsOnStore(logger)) + spireControllerAPI := spire.GetControllerAPIClient(ctx) + configStore := config.NewStore(logger.Named("config-store"), taskrunmetrics.MetricsOnStore(logger), spire.OnStore(ctx, logger)) configStore.WatchConfigs(cmw) entrypointCache, err := pod.NewEntrypointCache(kubeclientset) @@ -66,6 +68,7 @@ func NewController(opts *pipeline.Options, clock clock.PassiveClock) func(contex KubeClientSet: kubeclientset, PipelineClientSet: pipelineclientset, Images: opts.Images, + SpireClient: spireControllerAPI, Clock: clock, taskRunLister: taskRunInformer.Lister(), resourceLister: resourceInformer.Lister(), diff --git a/pkg/reconciler/taskrun/taskrun.go b/pkg/reconciler/taskrun/taskrun.go index 4ddd3d360b1..3c474eb97e0 100644 --- a/pkg/reconciler/taskrun/taskrun.go +++ b/pkg/reconciler/taskrun/taskrun.go @@ -46,6 +46,7 @@ import ( "github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim" "github.com/tektoncd/pipeline/pkg/remote" resolution "github.com/tektoncd/pipeline/pkg/resolution/resource" + "github.com/tektoncd/pipeline/pkg/spire" "github.com/tektoncd/pipeline/pkg/taskrunmetrics" _ "github.com/tektoncd/pipeline/pkg/taskrunmetrics/fake" // Make sure the taskrunmetrics are setup "github.com/tektoncd/pipeline/pkg/trustedresources" @@ -73,6 +74,7 @@ type Reconciler struct { KubeClientSet kubernetes.Interface PipelineClientSet clientset.Interface Images pipeline.Images + SpireClient spire.ControllerAPIClient Clock clock.PassiveClock // listers index properties about resources @@ -453,10 +455,11 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, rtr *re defer c.durationAndCountMetrics(ctx, tr) logger := logging.FromContext(ctx) recorder := controller.GetEventRecorder(ctx) - var err error // Get the TaskRun's Pod if it should have one. Otherwise, create the Pod. var pod *corev1.Pod + var err error + spireEnabled := config.FromContextOrDefaults(ctx).FeatureFlags.EnableSpire if tr.Status.PodName != "" { pod, err = c.podLister.Pods(tr.Namespace).Get(tr.Status.PodName) @@ -532,6 +535,16 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, rtr *re } if podconvert.SidecarsReady(pod.Status) { + if spireEnabled { + // TTL for the entry is in seconds + ttl := time.Duration(config.FromContextOrDefaults(ctx).Defaults.DefaultTimeoutMinutes) * time.Minute + if err = c.SpireClient.CreateEntries(ctx, tr, pod, ttl); err != nil { + logger.Errorf("Failed to create workload SPIFFE entry for taskrun %v: %v", tr.Name, err) + return err + } + logger.Infof("Created SPIFFE workload entry for %v/%v", tr.Namespace, tr.Name) + } + if err := podconvert.UpdateReady(ctx, c.KubeClientSet, *pod); err != nil { return err } @@ -541,7 +554,7 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, rtr *re } // Convert the Pod's status to the equivalent TaskRun Status. - tr.Status, err = podconvert.MakeTaskRunStatus(logger, *tr, pod) + tr.Status, err = podconvert.MakeTaskRunStatus(ctx, logger, *tr, pod, spireEnabled, c.SpireClient) if err != nil { return err } @@ -551,6 +564,14 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, rtr *re return err } + if spireEnabled && tr.IsDone() { + if err := c.SpireClient.DeleteEntry(ctx, tr, pod); err != nil { + logger.Infof("Failed to remove workload SPIFFE entry for taskrun %v: %v", tr.Name, err) + return err + } + logger.Infof("Deleted SPIFFE workload entry for %v/%v", tr.Namespace, tr.Name) + } + logger.Infof("Successfully reconciled taskrun %s/%s with status: %#v", tr.Name, tr.Namespace, tr.Status.GetCondition(apis.ConditionSucceeded)) return nil } diff --git a/pkg/reconciler/taskrun/taskrun_test.go b/pkg/reconciler/taskrun/taskrun_test.go index 0d4fe769ef5..9bbbac35db6 100644 --- a/pkg/reconciler/taskrun/taskrun_test.go +++ b/pkg/reconciler/taskrun/taskrun_test.go @@ -45,6 +45,7 @@ import ( ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" "github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim" resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common" + "github.com/tektoncd/pipeline/pkg/spire" "github.com/tektoncd/pipeline/pkg/workspace" "github.com/tektoncd/pipeline/test" "github.com/tektoncd/pipeline/test/diff" @@ -549,7 +550,7 @@ spec: image: "foo", name: "simple-step", cmd: "/mycmd", - }}), + }}, false), }, { name: "serviceaccount", taskRun: taskRunWithSaSuccess, @@ -557,7 +558,7 @@ spec: image: "foo", name: "sa-step", cmd: "/mycmd", - }}), + }}, false), }} { t.Run(tc.name, func(t *testing.T) { saName := tc.taskRun.Spec.ServiceAccountName @@ -957,7 +958,7 @@ spec: image: "foo", name: "simple-step", cmd: "/mycmd", - }}), + }}, false), }, { name: "serviceaccount", taskRun: taskRunWithSaSuccess, @@ -969,7 +970,7 @@ spec: image: "foo", name: "sa-step", cmd: "/mycmd", - }}), + }}, false), }, { name: "params", taskRun: taskRunSubstitution, @@ -1034,7 +1035,7 @@ spec: "[{\"name\":\"myimage\",\"type\":\"image\",\"url\":\"gcr.io/kristoff/sven\",\"digest\":\"\",\"OutputImageDir\":\"/workspace/output/myimage\"}]", }, }, - }), + }, false), }, { name: "taskrun-with-taskspec", taskRun: taskRunWithTaskSpec, @@ -1064,7 +1065,7 @@ spec: "--my-arg=foo", }, }, - }), + }, false), }, { name: "success-with-cluster-task", taskRun: taskRunWithClusterTask, @@ -1076,7 +1077,7 @@ spec: name: "simple-step", image: "foo", cmd: "/mycmd", - }}), + }}, false), }, { name: "taskrun-with-resource-spec-task-spec", taskRun: taskRunWithResourceSpecAndTaskSpec, @@ -1105,7 +1106,7 @@ spec: image: "ubuntu", cmd: "/mycmd", }, - }), + }, false), }, { name: "taskrun-with-pod", taskRun: taskRunWithPod, @@ -1117,7 +1118,7 @@ spec: name: "simple-step", image: "foo", cmd: "/mycmd", - }}), + }}, false), }, { name: "taskrun-with-credentials-variable-default-tekton-creds", taskRun: taskRunWithCredentialsVariable, @@ -1129,7 +1130,7 @@ spec: name: "mycontainer", image: "myimage", cmd: "/mycmd /tekton/creds", - }}), + }}, false), }, { name: "remote-task", taskRun: taskRunBundle, @@ -1141,7 +1142,7 @@ spec: name: "simple-step", image: "foo", cmd: "/mycmd", - }}), + }}, false), }} { t.Run(tc.name, func(t *testing.T) { testAssets, cancel := getTaskRunController(t, d) @@ -1203,6 +1204,7 @@ spec: func TestAlphaReconcile(t *testing.T) { names.TestingSeed() + readonly := true taskRunWithOutputConfig := parse.MustParseV1beta1TaskRun(t, ` metadata: name: test-taskrun-with-output-config @@ -1242,12 +1244,14 @@ spec: taskRunWithOutputConfig, taskRunWithOutputConfigAndWorkspace, } - cms := []*corev1.ConfigMap{{ - ObjectMeta: metav1.ObjectMeta{Namespace: system.Namespace(), Name: config.GetFeatureFlagsConfigName()}, - Data: map[string]string{ - "enable-api-fields": config.AlphaAPIFields, + cms := []*corev1.ConfigMap{ + { + ObjectMeta: metav1.ObjectMeta{Namespace: system.Namespace(), Name: config.GetFeatureFlagsConfigName()}, + Data: map[string]string{ + "enable-api-fields": config.AlphaAPIFields, + }, }, - }} + } d := test.Data{ ConfigMaps: cms, TaskRuns: taskruns, @@ -1267,12 +1271,30 @@ spec: "Normal Started ", "Normal Running Not all Steps", }, - wantPod: expectedPod("test-taskrun-with-output-config-pod", "", "test-taskrun-with-output-config", "foo", config.DefaultServiceAccountValue, false, nil, []stepForExpectedPod{{ - name: "mycontainer", - image: "myimage", - stdoutPath: "stdout.txt", - cmd: "/mycmd", - }}), + wantPod: addVolumeMounts(expectedPod("test-taskrun-with-output-config-pod", "", "test-taskrun-with-output-config", "foo", config.DefaultServiceAccountValue, false, + []corev1.Volume{ + { + Name: spire.WorkloadAPI, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "csi.spiffe.io", + ReadOnly: &readonly, + }, + }, + }}, []stepForExpectedPod{{ + name: "mycontainer", + image: "myimage", + stdoutPath: "stdout.txt", + cmd: "/mycmd", + }}, true), + []corev1.VolumeMount{ + { + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + ReadOnly: true, + }, + }, + ), }, { name: "taskrun-with-output-config-ws", taskRun: taskRunWithOutputConfigAndWorkspace, @@ -1281,22 +1303,40 @@ spec: "Normal Running Not all Steps", }, wantPod: addVolumeMounts(expectedPod("test-taskrun-with-output-config-ws-pod", "", "test-taskrun-with-output-config-ws", "foo", config.DefaultServiceAccountValue, false, - []corev1.Volume{{ - Name: "ws-9l9zj", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, + []corev1.Volume{ + { + Name: "ws-9l9zj", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, { + Name: spire.WorkloadAPI, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "csi.spiffe.io", + ReadOnly: &readonly, + }, + }, }, - }}, + }, []stepForExpectedPod{{ name: "mycontainer", image: "myimage", stdoutPath: "stdout.txt", cmd: "/mycmd", - }}), - []corev1.VolumeMount{{ - Name: "ws-9l9zj", - MountPath: "/workspace/data", - }}), + }}, true), + []corev1.VolumeMount{ + { + Name: "ws-9l9zj", + MountPath: "/workspace/data", + }, + { + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + ReadOnly: true, + }, + }, + ), }} { t.Run(tc.name, func(t *testing.T) { testAssets, cancel := getTaskRunController(t, d) @@ -1357,8 +1397,8 @@ spec: } func addVolumeMounts(p *corev1.Pod, vms []corev1.VolumeMount) *corev1.Pod { - for i, vm := range vms { - p.Spec.Containers[i].VolumeMounts = append(p.Spec.Containers[i].VolumeMounts, vm) + for i := range p.Spec.Containers { + p.Spec.Containers[i].VolumeMounts = append(p.Spec.Containers[i].VolumeMounts, vms...) } return p } @@ -1378,8 +1418,18 @@ spec: serviceAccountName: default `) + cms := []*corev1.ConfigMap{ + { + ObjectMeta: metav1.ObjectMeta{Namespace: system.Namespace(), Name: config.GetFeatureFlagsConfigName()}, + Data: map[string]string{ + "enable-api-fields": config.AlphaAPIFields, + }, + }, + } + d := test.Data{ - TaskRuns: []*v1beta1.TaskRun{tr}, + ConfigMaps: cms, + TaskRuns: []*v1beta1.TaskRun{tr}, ServiceAccounts: []*corev1.ServiceAccount{{ ObjectMeta: metav1.ObjectMeta{Name: tr.Spec.ServiceAccountName, Namespace: "foo"}, }}, @@ -1478,8 +1528,18 @@ spec: serviceAccountName: default `) + cms := []*corev1.ConfigMap{ + { + ObjectMeta: metav1.ObjectMeta{Namespace: system.Namespace(), Name: config.GetFeatureFlagsConfigName()}, + Data: map[string]string{ + "enable-api-fields": config.AlphaAPIFields, + }, + }, + } + d := test.Data{ - TaskRuns: []*v1beta1.TaskRun{tr}, + ConfigMaps: cms, + TaskRuns: []*v1beta1.TaskRun{tr}, ServiceAccounts: []*corev1.ServiceAccount{{ ObjectMeta: metav1.ObjectMeta{Name: tr.Spec.ServiceAccountName, Namespace: "foo"}, }}, @@ -2424,12 +2484,14 @@ spec: d := test.Data{ TaskRuns: []*v1beta1.TaskRun{taskRun}, } - d.ConfigMaps = []*corev1.ConfigMap{{ - ObjectMeta: metav1.ObjectMeta{Namespace: system.Namespace(), Name: config.GetFeatureFlagsConfigName()}, - Data: map[string]string{ - "enable-api-fields": config.AlphaAPIFields, + d.ConfigMaps = []*corev1.ConfigMap{ + { + ObjectMeta: metav1.ObjectMeta{Namespace: system.Namespace(), Name: config.GetFeatureFlagsConfigName()}, + Data: map[string]string{ + "enable-api-fields": config.AlphaAPIFields, + }, }, - }} + } testAssets, cancel := getTaskRunController(t, d) defer cancel() createServiceAccount(t, testAssets, "default", taskRun.Namespace) @@ -4662,7 +4724,7 @@ func podVolumeMounts(idx, totalSteps int) []corev1.VolumeMount { return mnts } -func podArgs(cmd string, stdoutPath string, stderrPath string, additionalArgs []string, idx int) []string { +func podArgs(cmd string, stdoutPath string, stderrPath string, additionalArgs []string, idx int, alpha bool) []string { args := []string{ "-wait_file", } @@ -4679,6 +4741,9 @@ func podArgs(cmd string, stdoutPath string, stderrPath string, additionalArgs [] "-step_metadata_dir", fmt.Sprintf("/tekton/run/%d/status", idx), ) + if alpha { + args = append(args, "-enable_spire") + } if stdoutPath != "" { args = append(args, "-stdout_path", stdoutPath) } @@ -4740,11 +4805,24 @@ type stepForExpectedPod struct { stderrPath string } -func expectedPod(podName, taskName, taskRunName, ns, saName string, isClusterTask bool, extraVolumes []corev1.Volume, steps []stepForExpectedPod) *corev1.Pod { +func expectedPod(podName, taskName, taskRunName, ns, saName string, isClusterTask bool, extraVolumes []corev1.Volume, steps []stepForExpectedPod, alpha bool) *corev1.Pod { stepNames := make([]string, 0, len(steps)) for _, s := range steps { stepNames = append(stepNames, fmt.Sprintf("step-%s", s.name)) } + + initContainers := []corev1.Container{placeToolsInitContainer(stepNames)} + if alpha { + for i := range initContainers { + c := &initContainers[i] + c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{ + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + ReadOnly: true, + }) + } + } + p := &corev1.Pod{ ObjectMeta: podObjectMeta(podName, taskName, taskRunName, ns, isClusterTask), Spec: corev1.PodSpec{ @@ -4756,7 +4834,7 @@ func expectedPod(podName, taskName, taskRunName, ns, saName string, isClusterTas binVolume, downwardVolume, }, - InitContainers: []corev1.Container{placeToolsInitContainer(stepNames)}, + InitContainers: initContainers, RestartPolicy: corev1.RestartPolicyNever, ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds, ServiceAccountName: saName, @@ -4777,7 +4855,7 @@ func expectedPod(podName, taskName, taskRunName, ns, saName string, isClusterTas VolumeMounts: podVolumeMounts(idx, len(steps)), TerminationMessagePath: "/tekton/termination", } - stepContainer.Args = podArgs(s.cmd, s.stdoutPath, s.stderrPath, s.args, idx) + stepContainer.Args = podArgs(s.cmd, s.stdoutPath, s.stderrPath, s.args, idx, alpha) for k, v := range s.envVars { stepContainer.Env = append(stepContainer.Env, corev1.EnvVar{ @@ -4849,12 +4927,14 @@ status: d := test.Data{ TaskRuns: taskruns, Tasks: []*v1beta1.Task{resultsTask}, - ConfigMaps: []*corev1.ConfigMap{{ - ObjectMeta: metav1.ObjectMeta{Namespace: system.Namespace(), Name: config.GetFeatureFlagsConfigName()}, - Data: map[string]string{ - "enable-api-fields": config.AlphaAPIFields, + ConfigMaps: []*corev1.ConfigMap{ + { + ObjectMeta: metav1.ObjectMeta{Namespace: system.Namespace(), Name: config.GetFeatureFlagsConfigName()}, + Data: map[string]string{ + "enable-api-fields": config.AlphaAPIFields, + }, }, - }}, + }, } for _, tc := range []struct { name string diff --git a/pkg/spire/config/config.go b/pkg/spire/config/config.go index 5398e234f24..fb4ee1bbdc6 100644 --- a/pkg/spire/config/config.go +++ b/pkg/spire/config/config.go @@ -24,6 +24,7 @@ import ( // SpireConfig holds the images reference for a number of container images used // across tektoncd pipelines. +// +k8s:deepcopy-gen=true type SpireConfig struct { // The trust domain corresponds to the trust root of a SPIFFE identity provider. TrustDomain string @@ -61,7 +62,7 @@ func (c SpireConfig) Validate() error { } if !strings.HasPrefix(c.NodeAliasPrefix, "/") { - return fmt.Errorf("Spire node alias should start with a /") + return fmt.Errorf("spire node alias should start with a /") } return nil diff --git a/pkg/spire/config/zz_generated.deepcopy.go b/pkg/spire/config/zz_generated.deepcopy.go new file mode 100644 index 00000000000..56590eee535 --- /dev/null +++ b/pkg/spire/config/zz_generated.deepcopy.go @@ -0,0 +1,38 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package config + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpireConfig) DeepCopyInto(out *SpireConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpireConfig. +func (in *SpireConfig) DeepCopy() *SpireConfig { + if in == nil { + return nil + } + out := new(SpireConfig) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/spire/controller.go b/pkg/spire/controller.go index 410c9c2ad63..cfc57160c14 100644 --- a/pkg/spire/controller.go +++ b/pkg/spire/controller.go @@ -27,8 +27,10 @@ import ( "github.com/spiffe/go-spiffe/v2/workloadapi" entryv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1" spiffetypes "github.com/spiffe/spire-api-sdk/proto/spire/api/types" + "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" spireconfig "github.com/tektoncd/pipeline/pkg/spire/config" + "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -45,6 +47,20 @@ func init() { // controllerKey is a way to associate the ControllerAPIClient from inside the context.Context type controllerKey struct{} +// OnStore stores the changed spire config into the SpireClientApi +func OnStore(ctx context.Context, logger *zap.SugaredLogger) func(name string, value interface{}) { + return func(name string, value interface{}) { + if name == config.GetSpireConfigName() { + cfg, ok := value.(*spireconfig.SpireConfig) + if !ok { + logger.Error("Failed to do type insertion for extracting metrics config") + return + } + GetControllerAPIClient(ctx).SetConfig(*cfg) + } + } +} + // GetControllerAPIClient extracts the ControllerAPIClient from the context. func GetControllerAPIClient(ctx context.Context) ControllerAPIClient { untyped := ctx.Value(controllerKey{}) diff --git a/test/controller.go b/test/controller.go index ff3f3f7a20e..9445f0748af 100644 --- a/test/controller.go +++ b/test/controller.go @@ -340,7 +340,7 @@ func PrependResourceVersionReactor(f *ktesting.Fake) { // EnsureConfigurationConfigMapsExist makes sure all the configmaps exists. func EnsureConfigurationConfigMapsExist(d *Data) { - var defaultsExists, featureFlagsExists, artifactBucketExists, artifactPVCExists, metricsExists, trustedresourcesExists bool + var defaultsExists, featureFlagsExists, artifactBucketExists, artifactPVCExists, metricsExists, trustedresourcesExists, spireconfigExists bool for _, cm := range d.ConfigMaps { if cm.Name == config.GetDefaultsConfigName() { defaultsExists = true @@ -360,6 +360,9 @@ func EnsureConfigurationConfigMapsExist(d *Data) { if cm.Name == config.GetTrustedResourcesConfigName() { trustedresourcesExists = true } + if cm.Name == config.GetSpireConfigName() { + spireconfigExists = true + } } if !defaultsExists { d.ConfigMaps = append(d.ConfigMaps, &corev1.ConfigMap{ @@ -397,4 +400,10 @@ func EnsureConfigurationConfigMapsExist(d *Data) { Data: map[string]string{}, }) } + if !spireconfigExists { + d.ConfigMaps = append(d.ConfigMaps, &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: config.GetSpireConfigName(), Namespace: system.Namespace()}, + Data: map[string]string{}, + }) + } } diff --git a/test/controller_test.go b/test/controller_test.go index 65bd444626e..fba6e94cb7f 100644 --- a/test/controller_test.go +++ b/test/controller_test.go @@ -158,6 +158,10 @@ func TestEnsureConfigurationConfigMapsExist(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: config.GetTrustedResourcesConfigName(), Namespace: system.Namespace()}, Data: map[string]string{}, }) + expected.ConfigMaps = append(expected.ConfigMaps, &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: config.GetSpireConfigName(), Namespace: system.Namespace()}, + Data: map[string]string{}, + }) EnsureConfigurationConfigMapsExist(&d) if d := cmp.Diff(expected, d); d != "" { diff --git a/test/e2e-common.sh b/test/e2e-common.sh index 5ef47f77e81..fdbf914df06 100755 --- a/test/e2e-common.sh +++ b/test/e2e-common.sh @@ -46,6 +46,65 @@ function install_pipeline_crd_version() { verify_pipeline_installation } +function spire_apply() { + if [ $# -lt 2 -o "$1" != "-spiffeID" ]; then + echo "spire_apply requires a spiffeID as the first arg" >&2 + exit 1 + fi + show=$(kubectl exec -n spire deployment/spire-server -- \ + /opt/spire/bin/spire-server entry show $1 $2) + if [ "$show" != "Found 0 entries" ]; then + # delete to recreate + entryid=$(echo "$show" | grep "^Entry ID" | cut -f2 -d:) + kubectl exec -n spire deployment/spire-server -- \ + /opt/spire/bin/spire-server entry delete -entryID $entryid + fi + kubectl exec -n spire deployment/spire-server -- \ + /opt/spire/bin/spire-server entry create "$@" +} + +function install_spire() { + echo ">> Deploying Spire" + DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" + + echo "Creating SPIRE namespace..." + kubectl create ns spire + + echo "Applying SPIFFE CSI Driver configuration..." + kubectl apply -f "$DIR"/testdata/spire/spiffe-csi-driver.yaml + + echo "Deploying SPIRE server" + kubectl apply -f "$DIR"/testdata/spire/spire-server.yaml + + echo "Deploying SPIRE agent" + kubectl apply -f "$DIR"/testdata/spire/spire-agent.yaml + + wait_until_pods_running spire || fail_test "SPIRE did not come up" + + spire_apply \ + -spiffeID spiffe://example.org/ns/spire/node/example \ + -selector k8s_psat:cluster:example-cluster \ + -selector k8s_psat:agent_ns:spire \ + -selector k8s_psat:agent_sa:spire-agent \ + -node + spire_apply \ + -spiffeID spiffe://example.org/ns/tekton-pipelines/sa/tekton-pipelines-controller \ + -parentID spiffe://example.org/ns/spire/node/example \ + -selector k8s:ns:tekton-pipelines \ + -selector k8s:pod-label:app:tekton-pipelines-controller \ + -selector k8s:sa:tekton-pipelines-controller \ + -admin +} + +function patch_pipline_spire() { + kubectl patch \ + deployment tekton-pipelines-controller \ + -n tekton-pipelines \ + --patch-file "$DIR"/testdata/patch/pipeline-controller-spire.json + + verify_pipeline_installation +} + function verify_pipeline_installation() { # Make sure that everything is cleaned up in the current namespace. delete_pipeline_resources diff --git a/test/e2e-tests.sh b/test/e2e-tests.sh index b553452f46c..32d8868b621 100755 --- a/test/e2e-tests.sh +++ b/test/e2e-tests.sh @@ -40,6 +40,22 @@ header "Setting up environment" install_pipeline_crd failed=0 +function alpha_gate() { + local gate="$1" + if [ "$gate" != "alpha" ] && [ "$gate" != "stable" ] && [ "$gate" != "beta" ] ; then + printf "Invalid gate %s\n" ${gate} + exit 255 + fi + if [ "$gate" == "alpha" ] ; then + printf "Setting up environement for alpha features" + install_spire + patch_pipline_spire + jsonpatch=$(printf "{\"data\": {\"spire-trust-domain\": \"example.org\", \"spire-socket-path\": \"unix:///spiffe-workload-api/spire-agent.sock\", \"spire-server-addr\": \"spire-server.spire.svc.cluster.local:8081\", \"spire-node-alias-prefix\": \"/tekton-node/\"}}") + echo "config-spire ConfigMap patch: ${jsonpatch}" + kubectl patch configmap config-spire -n tekton-pipelines -p "$jsonpatch" + failed=0 + fi +} function set_feature_gate() { local gate="$1" @@ -91,6 +107,7 @@ function run_e2e() { fi } +alpha_gate "$PIPELINE_FEATURE_GATE" set_feature_gate "$PIPELINE_FEATURE_GATE" set_embedded_status "$EMBEDDED_STATUS_GATE" run_e2e diff --git a/test/embed_test.go b/test/embed_test.go index 893a4305bb4..515759a0102 100644 --- a/test/embed_test.go +++ b/test/embed_test.go @@ -41,10 +41,29 @@ const ( // TestTaskRun_EmbeddedResource is an integration test that will verify a very simple "hello world" TaskRun can be // executed with an embedded resource spec. func TestTaskRun_EmbeddedResource(t *testing.T) { + embeddedResourceTest(t, false) +} + +// TestTaskRun_EmbeddedResourceWithSpire is an integration test with spire enabled that will verify a very simple "hello world" TaskRun can be +// executed with an embedded resource spec. +func TestTaskRun_EmbeddedResourceWithSpire(t *testing.T) { + embeddedResourceTest(t, true) +} + +func embeddedResourceTest(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } + t.Parallel() knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) @@ -68,6 +87,15 @@ func TestTaskRun_EmbeddedResource(t *testing.T) { // TODO(#127) Currently we have no reliable access to logs from the TaskRun so we'll assume successful // completion of the TaskRun means the TaskRun did what it was intended. + + if spireEnabled { + tr, err := c.V1beta1TaskRunClient.Get(ctx, embedTaskRunName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error retrieving taskrun: %s", err) + } + spireShouldPassTaskRunResultsVerify(tr, t) + } + } func getEmbeddedTask(t *testing.T, taskName, namespace string, args []string) *v1beta1.Task { diff --git a/test/entrypoint_test.go b/test/entrypoint_test.go index f2ffa7b0ab5..f95e36fe22e 100644 --- a/test/entrypoint_test.go +++ b/test/entrypoint_test.go @@ -36,10 +36,31 @@ import ( // that doesn't have a cmd defined. In addition to making sure the steps // are executed in the order specified func TestEntrypointRunningStepsInOrder(t *testing.T) { + entryPointerTest(t, false) +} + +// TestEntrypointRunningStepsInOrderWithSpire is an integration test with spire enabled that will +// verify attempt to the get the entrypoint of a container image +// that doesn't have a cmd defined. In addition to making sure the steps +// are executed in the order specified +func TestEntrypointRunningStepsInOrderWithSpire(t *testing.T) { + entryPointerTest(t, true) +} + +func entryPointerTest(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } + t.Parallel() knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) @@ -70,4 +91,12 @@ spec: t.Errorf("Error waiting for TaskRun to finish successfully: %s", err) } + if spireEnabled { + tr, err := c.V1beta1TaskRunClient.Get(ctx, epTaskRunName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error retrieving taskrun: %s", err) + } + spireShouldPassTaskRunResultsVerify(tr, t) + } + } diff --git a/test/helm_task_test.go b/test/helm_task_test.go index e56ee1c4015..5eab16b6535 100644 --- a/test/helm_task_test.go +++ b/test/helm_task_test.go @@ -42,11 +42,30 @@ var ( // TestHelmDeployPipelineRun is an integration test that will verify a pipeline build an image // and then using helm to deploy it func TestHelmDeployPipelineRun(t *testing.T) { + helmDeploytest(t, false) +} + +// TestHelmDeployPipelineRunWithSpire is an integration test with spire enabled that will verify a pipeline build an image +// and then using helm to deploy it +func TestHelmDeployPipelineRunWithSpire(t *testing.T) { + helmDeploytest(t, true) +} + +func helmDeploytest(t *testing.T, spireEnabled bool) { repo := ensureDockerRepo(t) ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } + setupClusterBindingForHelm(ctx, c, t, namespace) var ( @@ -103,6 +122,16 @@ func TestHelmDeployPipelineRun(t *testing.T) { t.Fatalf("PipelineRun execution failed; helm may or may not have been installed :(") } + if spireEnabled { + taskrunList, err := c.V1beta1TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + helmDeployPipelineRunName}) + if err != nil { + t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", helmDeployPipelineRunName, err) + } + for _, taskrunItem := range taskrunList.Items { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } + } + // cleanup task to remove helm releases from cluster and cluster role bindings, will not fail the test if it fails, just log knativetest.CleanupOnInterrupt(func() { helmCleanup(ctx, c, t, namespace) }, t.Logf) defer helmCleanup(ctx, c, t, namespace) diff --git a/test/hermetic_taskrun_test.go b/test/hermetic_taskrun_test.go index 72b9c208de7..0aa174484a9 100644 --- a/test/hermetic_taskrun_test.go +++ b/test/hermetic_taskrun_test.go @@ -34,11 +34,30 @@ import ( // it does this by first running the TaskRun normally to make sure it passes // Then, it enables hermetic mode and makes sure the same TaskRun fails because it no longer has access to a network. func TestHermeticTaskRun(t *testing.T) { + hermeticTest(t, false) +} + +// TestHermeticTaskRunWithSpire (with spire enabled) make sure that the hermetic execution mode actually drops network from a TaskRun step +// it does this by first running the TaskRun normally to make sure it passes +// Then, it enables hermetic mode and makes sure the same TaskRun fails because it no longer has access to a network. +func TestHermeticTaskRunWithSpire(t *testing.T) { + hermeticTest(t, true) +} + +func hermeticTest(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t, requireAnyGate(map[string]string{"enable-api-fields": "alpha"})) + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t, requireAnyGate(map[string]string{"enable-api-fields": "alpha"})) + } + t.Parallel() defer tearDown(ctx, t, c, namespace) @@ -67,6 +86,13 @@ func TestHermeticTaskRun(t *testing.T) { if err := WaitForTaskRunState(ctx, c, regularTaskRunName, Succeed(regularTaskRunName), "TaskRunCompleted", v1beta1Version); err != nil { t.Errorf("Error waiting for TaskRun %s to finish: %s", regularTaskRunName, err) } + if spireEnabled { + tr, err := c.V1beta1TaskRunClient.Get(ctx, regularTaskRunName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error retrieving taskrun: %s", err) + } + spireShouldPassTaskRunResultsVerify(tr, t) + } // now, run the task mode with hermetic mode // it should fail, since it shouldn't be able to access any network @@ -79,6 +105,13 @@ func TestHermeticTaskRun(t *testing.T) { if err := WaitForTaskRunState(ctx, c, hermeticTaskRunName, Failed(hermeticTaskRunName), "Failed", v1beta1Version); err != nil { t.Errorf("Error waiting for TaskRun %s to fail: %s", hermeticTaskRunName, err) } + if spireEnabled { + tr, err := c.V1beta1TaskRunClient.Get(ctx, hermeticTaskRunName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error retrieving taskrun: %s", err) + } + spireShouldFailTaskRunResultsVerify(tr, t) + } }) } } diff --git a/test/ignore_step_error_test.go b/test/ignore_step_error_test.go index 9b1aed8fac2..03cc32837dc 100644 --- a/test/ignore_step_error_test.go +++ b/test/ignore_step_error_test.go @@ -33,10 +33,27 @@ import ( ) func TestMissingResultWhenStepErrorIsIgnored(t *testing.T) { + stepErrorTest(t, false) +} + +func TestMissingResultWhenStepErrorIsIgnoredWithSpire(t *testing.T) { + stepErrorTest(t, true) +} + +func stepErrorTest(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) @@ -99,6 +116,10 @@ spec: t.Fatalf("task1 should have produced a result before failing the step") } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } + for _, r := range taskrunItem.Status.TaskRunResults { if r.Name == "result1" && r.Value.StringVal != "123" { t.Fatalf("task1 should have initialized a result \"result1\" to \"123\"") diff --git a/test/init_test.go b/test/init_test.go index 7089fb6715b..4beca060c80 100644 --- a/test/init_test.go +++ b/test/init_test.go @@ -31,6 +31,7 @@ import ( "testing" "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/names" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -46,6 +47,11 @@ import ( "sigs.k8s.io/yaml" ) +var spireFeatureGates = map[string]string{ + "enable-spire": "true", + "enable-api-fields": "alpha", +} + var initMetrics sync.Once var skipRootUserTests = false @@ -319,3 +325,19 @@ func getCRDYaml(ctx context.Context, cs *clients, ns string) ([]byte, error) { return output, nil } + +// Verifies if the taskrun results should not be verified by spire +func spireShouldFailTaskRunResultsVerify(tr *v1beta1.TaskRun, t *testing.T) { + if tr.IsTaskRunResultVerified() { + t.Errorf("Taskrun `%s` status condition should not be verified as taskrun failed", tr.Name) + } + t.Logf("Taskrun `%s` status results condition verified by spire as false, which is valid", tr.Name) +} + +// Verifies if the taskrun results are verified by spire +func spireShouldPassTaskRunResultsVerify(tr *v1beta1.TaskRun, t *testing.T) { + if !tr.IsTaskRunResultVerified() { + t.Errorf("Taskrun `%s` status condition not verified. Spire taskrun results verification failure", tr.Name) + } + t.Logf("Taskrun `%s` status results condition verified by spire as true, which is valid", tr.Name) +} diff --git a/test/kaniko_task_test.go b/test/kaniko_task_test.go index 875c244b0bf..f600e72dfc8 100644 --- a/test/kaniko_task_test.go +++ b/test/kaniko_task_test.go @@ -42,6 +42,15 @@ const ( // TestTaskRun is an integration test that will verify a TaskRun using kaniko func TestKanikoTaskRun(t *testing.T) { + kanikoTest(t, false) +} + +// TestKanikoTaskRunWithSpire is an integration test that will verify a TaskRun using kaniko with Spire enabled +func TestKanikoTaskRunWithSpire(t *testing.T) { + kanikoTest(t, true) +} + +func kanikoTest(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -50,7 +59,15 @@ func TestKanikoTaskRun(t *testing.T) { t.Skip("Skip test as skipRootUserTests set to true") } - c, namespace := setup(ctx, t, withRegistry) + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, withRegistry, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t, withRegistry) + } + t.Parallel() repo := fmt.Sprintf("registry.%s:5000/kanikotasktest", namespace) @@ -123,6 +140,10 @@ func TestKanikoTaskRun(t *testing.T) { t.Fatalf("Expected remote commit to match local revision: %s, %s", commit, revision) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(tr, t) + } + // match the local digest, which is first capture group against the remote image remoteDigest, err := getRemoteDigest(t, c, namespace, repo) if err != nil { diff --git a/test/pipelinefinally_test.go b/test/pipelinefinally_test.go index d0eeb7ca110..4a9f67147f7 100644 --- a/test/pipelinefinally_test.go +++ b/test/pipelinefinally_test.go @@ -44,10 +44,27 @@ var requireAlphaFeatureFlags = requireAnyGate(map[string]string{ }) func TestPipelineLevelFinally_OneDAGTaskFailed_InvalidTaskResult_Failure(t *testing.T) { + pipelineLevelFinallyOneDAGTaskFailedInvalidTaskResultFailureWithOptions(t, false) +} + +func TestPipelineLevelFinally_OneDAGTaskFailed_InvalidTaskResult_FailureWithSpire(t *testing.T) { + pipelineLevelFinallyOneDAGTaskFailedInvalidTaskResultFailureWithOptions(t, true) +} + +func pipelineLevelFinallyOneDAGTaskFailedInvalidTaskResultFailureWithOptions(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) @@ -260,27 +277,46 @@ spec: if !isFailed(t, n, taskrunItem.Status.Conditions) { t.Fatalf("dag task %s should have failed", n) } + if spireEnabled { + spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + } dagTask1EndTime = taskrunItem.Status.CompletionTime case n == "dagtask2": if err := WaitForTaskRunState(ctx, c, taskrunItem.Name, TaskRunSucceed(taskrunItem.Name), "TaskRunSuccess", v1beta1Version); err != nil { t.Errorf("Error waiting for TaskRun to succeed: %v", err) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } dagTask2EndTime = taskrunItem.Status.CompletionTime case n == "dagtask4": + if spireEnabled { + // Skipped so status annotations should not be there. Results should not be verified as not run + spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + } t.Fatalf("task %s should have skipped due to when expression", n) case n == "dagtask5": if err := WaitForTaskRunState(ctx, c, taskrunItem.Name, TaskRunSucceed(taskrunItem.Name), "TaskRunSuccess", v1beta1Version); err != nil { t.Errorf("Error waiting for TaskRun to succeed: %v", err) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } case n == "finaltask1": if err := WaitForTaskRunState(ctx, c, taskrunItem.Name, TaskRunSucceed(taskrunItem.Name), "TaskRunSuccess", v1beta1Version); err != nil { t.Errorf("Error waiting for TaskRun to succeed: %v", err) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } finalTaskStartTime = taskrunItem.Status.StartTime case n == "finaltask2": if err := WaitForTaskRunState(ctx, c, taskrunItem.Name, TaskRunSucceed(taskrunItem.Name), "TaskRunSuccess", v1beta1Version); err != nil { t.Errorf("Error waiting for TaskRun to succeed: %v", err) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } for _, p := range taskrunItem.Spec.Params { switch param := p.Name; param { case "dagtask1-status": @@ -306,6 +342,9 @@ spec: if err := WaitForTaskRunState(ctx, c, taskrunItem.Name, TaskRunSucceed(taskrunItem.Name), "TaskRunSuccess", v1beta1Version); err != nil { t.Errorf("Error waiting for TaskRun to succeed: %v", err) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } for _, p := range taskrunItem.Spec.Params { if p.Name == "dagtask-result" && p.Value.StringVal != "Hello" { t.Errorf("Error resolving task result reference in a finally task %s", n) @@ -315,13 +354,27 @@ spec: if !isSuccessful(t, n, taskrunItem.Status.Conditions) { t.Fatalf("final task %s should have succeeded", n) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } case n == "guardedfinaltaskusingdagtask5status1": if !isSuccessful(t, n, taskrunItem.Status.Conditions) { t.Fatalf("final task %s should have succeeded", n) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } case n == "guardedfinaltaskusingdagtask5result2": + if spireEnabled { + // Skipped so status annotations should not be there. Results should not be verified as not run + spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + } t.Fatalf("final task %s should have skipped due to when expression evaluating to false", n) case n == "finaltaskconsumingdagtask1" || n == "finaltaskconsumingdagtask4" || n == "guardedfinaltaskconsumingdagtask4": + if spireEnabled { + // Skipped so status annotations should not be there. Results should not be verified as not run + spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + } t.Fatalf("final task %s should have skipped due to missing task result reference", n) default: t.Fatalf("Found unexpected taskRun %s", n) @@ -394,10 +447,27 @@ spec: } func TestPipelineLevelFinally_OneFinalTaskFailed_Failure(t *testing.T) { + pipelineLevelFinallyOneFinalTaskFailedFailureWithOptions(t, false) +} + +func TestPipelineLevelFinally_OneFinalTaskFailed_FailureWithSpire(t *testing.T) { + pipelineLevelFinallyOneFinalTaskFailedFailureWithOptions(t, true) +} + +func pipelineLevelFinallyOneFinalTaskFailedFailureWithOptions(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) @@ -451,10 +521,16 @@ spec: if !isSuccessful(t, n, taskrunItem.Status.Conditions) { t.Fatalf("dag task %s should have succeeded", n) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } case n == "finaltask1": if !isFailed(t, n, taskrunItem.Status.Conditions) { t.Fatalf("final task %s should have failed", n) } + if spireEnabled { + spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + } default: t.Fatalf("TaskRuns were not found for both final and dag tasks") } @@ -462,10 +538,27 @@ spec: } func TestPipelineLevelFinally_OneFinalTask_CancelledRunFinally(t *testing.T) { + pipelineLevelFinallyOneFinalTaskCancelledRunFinallyWithOptions(t, false) +} + +func TestPipelineLevelFinally_OneFinalTask_CancelledRunFinallyWithSpire(t *testing.T) { + pipelineLevelFinallyOneFinalTaskCancelledRunFinallyWithOptions(t, true) +} + +func pipelineLevelFinallyOneFinalTaskCancelledRunFinallyWithOptions(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t, requireAlphaFeatureFlags) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t, requireAlphaFeatureFlags) + } + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) @@ -562,13 +655,25 @@ spec: if !isCancelled(t, n, taskrunItem.Status.Conditions) { t.Fatalf("dag task %s should have been cancelled", n) } + if spireEnabled { + spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + } case "dagtask2": + if spireEnabled { + spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + } t.Fatalf("second dag task %s should be skipped as it depends on the result from cancelled 'dagtask1'", n) case "finaltask1": if !isSuccessful(t, n, taskrunItem.Status.Conditions) { t.Fatalf("first final task %s should have succeeded", n) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } case "finaltask2": + if spireEnabled { + spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + } t.Fatalf("second final task %s should be skipped as it depends on the result from cancelled 'dagtask1'", n) default: t.Fatalf("TaskRuns were not found for both final and dag tasks") @@ -577,10 +682,27 @@ spec: } func TestPipelineLevelFinally_OneFinalTask_StoppedRunFinally(t *testing.T) { + pipelineLevelFinallyOneFinalTaskStoppedRunFinallyWithOptions(t, false) +} + +func TestPipelineLevelFinally_OneFinalTask_StoppedRunFinallyWithSpire(t *testing.T) { + pipelineLevelFinallyOneFinalTaskStoppedRunFinallyWithOptions(t, true) +} + +func pipelineLevelFinallyOneFinalTaskStoppedRunFinallyWithOptions(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t, requireAlphaFeatureFlags) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t, requireAlphaFeatureFlags) + } + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) @@ -677,14 +799,23 @@ spec: if !isSuccessful(t, n, taskrunItem.Status.Conditions) { t.Fatalf("dag task %s should have succeeded", n) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } case "finaltask1": if !isSuccessful(t, n, taskrunItem.Status.Conditions) { t.Fatalf("first final task %s should have succeeded", n) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } case "finaltask2": if !isSuccessful(t, n, taskrunItem.Status.Conditions) { t.Fatalf("second final task %s should have succeeded", n) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } default: t.Fatalf("TaskRuns were not found for both final and dag tasks") } diff --git a/test/pipelinerun_test.go b/test/pipelinerun_test.go index d82fd09412f..97c30512e24 100644 --- a/test/pipelinerun_test.go +++ b/test/pipelinerun_test.go @@ -176,6 +176,15 @@ spec: } func TestPipelineRun(t *testing.T) { + pipelineTestWithOptions(t, false) +} + +// Used different function name as helpers.ObjectNameForTest(t) would run into an issue with the number of characters exceeding the limit causing it to crash +func TestWithSpirePR(t *testing.T) { + pipelineTestWithOptions(t, true) +} + +func pipelineTestWithOptions(t *testing.T, spireEnabled bool) { t.Parallel() type tests struct { name string @@ -315,7 +324,15 @@ spec: ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) @@ -347,6 +364,9 @@ spec: if strings.HasPrefix(actualTaskRunItem.Name, taskRunName) { taskRunName = actualTaskRunItem.Name } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&actualTaskRunItem, t) + } } expectedTaskRunNames = append(expectedTaskRunNames, taskRunName) r, err := c.V1beta1TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{}) @@ -448,10 +468,28 @@ spec: // TestPipelineRunRefDeleted tests that a running PipelineRun doesn't fail when the Pipeline // it references is deleted. func TestPipelineRunRefDeleted(t *testing.T) { + pipelineRunRefDeletedTestWithOptions(t, false) +} + +// TestPipelineRunRefDeletedWithSpire tests (with spire enabled) that a running PipelineRun doesn't fail when the Pipeline +// it references is deleted. +func TestPipelineRunRefDeletedWithSpire(t *testing.T) { + pipelineRunRefDeletedTestWithOptions(t, true) +} + +func pipelineRunRefDeletedTestWithOptions(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) @@ -515,6 +553,16 @@ spec: t.Fatalf("Error waiting for PipelineRun %s to finish: %s", prName, err) } + if spireEnabled { + taskrunList, err := c.V1beta1TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + prName}) + if err != nil { + t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", prName, err) + } + for _, taskrunItem := range taskrunList.Items { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } + } + } // TestPipelineRunPending tests that a Pending PipelineRun is not run until the pending @@ -522,10 +570,30 @@ spec: // transition PipelineRun states during the test, which the TestPipelineRun suite does not // support. func TestPipelineRunPending(t *testing.T) { + pipelineRunPendingTestWithOptions(t, false) +} + +// TestPipelineRunPendingWithSpire tests (with spire) that a Pending PipelineRun is not run until the pending +// status is cleared. This is separate from the TestPipelineRun suite because it has to +// transition PipelineRun states during the test, which the TestPipelineRun suite does not +// support. +func TestPipelineRunPendingWithSpire(t *testing.T) { + pipelineRunPendingTestWithOptions(t, true) +} + +func pipelineRunPendingTestWithOptions(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) @@ -601,6 +669,15 @@ spec: if err := WaitForPipelineRunState(ctx, c, prName, timeout, PipelineRunSucceed(prName), "PipelineRunSuccess", v1beta1Version); err != nil { t.Fatalf("Error waiting for PipelineRun %s to finish: %s", prName, err) } + if spireEnabled { + taskrunList, err := c.V1beta1TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + prName}) + if err != nil { + t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", prName, err) + } + for _, taskrunItem := range taskrunList.Items { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } + } } func getFanInFanOutTasks(t *testing.T, namespace string) map[string]*v1beta1.Task { diff --git a/test/status_test.go b/test/status_test.go index f8dc6ecdab3..422bd0e3324 100644 --- a/test/status_test.go +++ b/test/status_test.go @@ -51,10 +51,30 @@ var ( // verify a very simple "hello world" TaskRun and PipelineRun failure // execution lead to the correct TaskRun status. func TestTaskRunPipelineRunStatus(t *testing.T) { + taskRunPipelineRunStatus(t, false) +} + +// TestTaskRunPipelineRunStatusWithSpire is an integration test with spire enabled that will +// verify a very simple "hello world" TaskRun and PipelineRun failure +// execution lead to the correct TaskRun status. +func TestTaskRunPipelineRunStatusWithSpire(t *testing.T) { + taskRunPipelineRunStatus(t, true) +} + +func taskRunPipelineRunStatus(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } + t.Parallel() knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) @@ -88,6 +108,14 @@ spec: t.Errorf("Error waiting for TaskRun to finish: %s", err) } + if spireEnabled { + tr, err := c.V1beta1TaskRunClient.Get(ctx, taskRun.Name, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error retrieving taskrun: %s", err) + } + spireShouldFailTaskRunResultsVerify(tr, t) + } + pipeline := parse.MustParseV1beta1Pipeline(t, fmt.Sprintf(` metadata: name: %s @@ -114,6 +142,17 @@ spec: if err := WaitForPipelineRunState(ctx, c, pipelineRun.Name, timeout, PipelineRunFailed(pipelineRun.Name), "BuildValidationFailed", v1beta1Version); err != nil { t.Errorf("Error waiting for TaskRun to finish: %s", err) } + + if spireEnabled { + taskrunList, err := c.V1beta1TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + pipelineRun.Name}) + if err != nil { + t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", pipelineRun.Name, err) + } + for _, taskrunItem := range taskrunList.Items { + spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + } + } + } // TestProvenanceFieldInPipelineRunTaskRunStatus is an integration test that will diff --git a/test/taskrun_test.go b/test/taskrun_test.go index fb05ba2a755..154cd38250a 100644 --- a/test/taskrun_test.go +++ b/test/taskrun_test.go @@ -21,12 +21,14 @@ package test import ( "context" + "encoding/json" "fmt" "regexp" "strings" "testing" "github.com/tektoncd/pipeline/test/parse" + jsonpatch "gomodules.xyz/jsonpatch/v2" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -34,16 +36,33 @@ import ( "github.com/tektoncd/pipeline/pkg/pod" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" knativetest "knative.dev/pkg/test" "knative.dev/pkg/test/helpers" ) func TestTaskRunFailure(t *testing.T) { + taskrunFailureTest(t, false) +} + +func TestTaskRunFailureWithSpire(t *testing.T) { + taskrunFailureTest(t, true) +} + +func taskrunFailureTest(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } + t.Parallel() knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) @@ -93,6 +112,10 @@ spec: t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) } + if spireEnabled { + spireShouldFailTaskRunResultsVerify(taskrun, t) + } + expectedStepState := []v1beta1.StepState{{ ContainerState: corev1.ContainerState{ Terminated: &corev1.ContainerStateTerminated{ @@ -136,10 +159,27 @@ spec: } func TestTaskRunStatus(t *testing.T) { + taskrunStatusTest(t, false) +} + +func TestTaskRunStatusWithSpire(t *testing.T) { + taskrunStatusTest(t, true) +} + +func taskrunStatusTest(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } + t.Parallel() knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) @@ -185,6 +225,10 @@ spec: t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(taskrun, t) + } + expectedStepState := []v1beta1.StepState{{ ContainerState: corev1.ContainerState{ Terminated: &corev1.ContainerStateTerminated{ @@ -210,3 +254,113 @@ spec: t.Fatalf("-got, +want: %v", d) } } + +func TestTaskRunModification(t *testing.T) { + taskrunModificationTest(t, false) +} + +func TestTaskRunModificationWithSpire(t *testing.T) { + taskrunModificationTest(t, true) +} + +func taskrunModificationTest(t *testing.T, spireEnabled bool) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } + + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) + + taskRunName := "non-falsifiable-provenance" + + t.Logf("Creating Task and TaskRun in namespace %s", namespace) + task := parse.MustParseV1beta1Task(t, fmt.Sprintf(` +metadata: + name: non-falsifiable + namespace: %s +spec: + steps: + - image: ubuntu + script: | + #!/usr/bin/env bash + sleep 20 + printf "hello" > "$(results.foo.path)" + printf "world" > "$(results.bar.path)" + results: + - name: foo + - name: bar +`, namespace)) + if _, err := c.V1beta1TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + taskRun := parse.MustParseV1beta1TaskRun(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + taskRef: + name: non-falsifiable +`, taskRunName, namespace)) + if _, err := c.V1beta1TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create TaskRun: %s", err) + } + + t.Logf("Waiting for TaskRun in namespace %s to be in running state", namespace) + if err := WaitForTaskRunState(ctx, c, taskRunName, Running(taskRunName), "TaskRunRunning"); err != nil { + t.Errorf("Error waiting for TaskRun to start running: %s", err) + } + + patches := []jsonpatch.JsonPatchOperation{{ + Operation: "replace", + Path: "/status/taskSpec/steps/0/image", + Value: "not-ubuntu", + }} + patchBytes, err := json.Marshal(patches) + if err != nil { + t.Fatalf("failed to marshal patch bytes in order to stop") + } + t.Logf("Patching TaskRun %s in namespace %s mid run for spire to catch the un-authorized changed", taskRunName, namespace) + if _, err := c.V1beta1TaskRunClient.Patch(ctx, taskRunName, types.JSONPatchType, patchBytes, metav1.PatchOptions{}, "status"); err != nil { + t.Fatalf("Failed to patch taskrun `%s`: %s", taskRunName, err) + } + + t.Logf("Waiting for TaskRun %s in namespace %s to succeed", taskRunName, namespace) + if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunFailed(taskRunName), "TaskRunFailed"); err != nil { + t.Errorf("Error waiting for TaskRun to finish: %s", err) + } + + taskrun, err := c.V1beta1TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) + } + + if spireEnabled { + spireShouldFailTaskRunResultsVerify(taskrun, t) + } + + expectedStepState := []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 1, + Reason: "Error", + }, + }, + Name: "unnamed-0", + ContainerName: "step-unnamed-0", + }} + + ignoreTerminatedFields := cmpopts.IgnoreFields(corev1.ContainerStateTerminated{}, "StartedAt", "FinishedAt", "ContainerID") + ignoreStepFields := cmpopts.IgnoreFields(v1beta1.StepState{}, "ImageID") + if d := cmp.Diff(taskrun.Status.Steps, expectedStepState, ignoreTerminatedFields, ignoreStepFields); d != "" { + t.Fatalf("-got, +want: %v", d) + } +} diff --git a/test/testdata/patch/pipeline-controller-spire.json b/test/testdata/patch/pipeline-controller-spire.json new file mode 100644 index 00000000000..6c08f20dfe9 --- /dev/null +++ b/test/testdata/patch/pipeline-controller-spire.json @@ -0,0 +1,56 @@ +{ + "spec":{ + "template":{ + "spec":{ + "$setElementOrder/containers":[ + { + "name":"tekton-pipelines-controller" + } + ], + "$setElementOrder/volumes":[ + { + "name":"config-logging" + }, + { + "name":"config-registry-cert" + }, + { + "name":"spiffe-workload-api" + } + ], + "containers":[ + { + "$setElementOrder/volumeMounts":[ + { + "mountPath":"/etc/config-logging" + }, + { + "mountPath":"/etc/config-registry-cert" + }, + { + "mountPath":"/spiffe-workload-api" + } + ], + "name":"tekton-pipelines-controller", + "volumeMounts":[ + { + "mountPath":"/spiffe-workload-api", + "name":"spiffe-workload-api", + "readOnly":true + } + ] + } + ], + "volumes":[ + { + "csi":{ + "driver":"csi.spiffe.io", + "readOnly":true + }, + "name":"spiffe-workload-api" + } + ] + } + } + } +} diff --git a/test/testdata/spire/spiffe-csi-driver.yaml b/test/testdata/spire/spiffe-csi-driver.yaml new file mode 100644 index 00000000000..e9d07bc5683 --- /dev/null +++ b/test/testdata/spire/spiffe-csi-driver.yaml @@ -0,0 +1,20 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: "csi.spiffe.io" +spec: + # Only ephemeral, inline volumes are supported. There is no need for a + # controller to provision and attach volumes. + attachRequired: false + + # Request the pod information which the CSI driver uses to verify that an + # ephemeral mount was requested. + podInfoOnMount: true + + # Don't change ownership on the contents of the mount since the Workload API + # Unix Domain Socket is typically open to all (i.e. 0777). + fsGroupPolicy: None + + # Declare support for ephemeral volumes only. + volumeLifecycleModes: + - Ephemeral diff --git a/test/testdata/spire/spire-agent.yaml b/test/testdata/spire/spire-agent.yaml new file mode 100644 index 00000000000..4e848a51388 --- /dev/null +++ b/test/testdata/spire/spire-agent.yaml @@ -0,0 +1,208 @@ +# ServiceAccount for the SPIRE agent +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spire-agent + namespace: spire + +--- + +# Required cluster role to allow spire-agent to query k8s API server +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-agent-cluster-role +rules: +- apiGroups: [""] + resources: ["pods", "nodes", "nodes/proxy"] + verbs: ["get"] + +--- + +# Binds above cluster role to spire-agent service account +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-agent-cluster-role-binding +subjects: +- kind: ServiceAccount + name: spire-agent + namespace: spire +roleRef: + kind: ClusterRole + name: spire-agent-cluster-role + apiGroup: rbac.authorization.k8s.io + + +--- + +# ConfigMap for the SPIRE agent featuring: +# 1) PSAT node attestation +# 2) K8S Workload Attestation over the secure kubelet port +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-agent + namespace: spire +data: + agent.conf: | + agent { + data_dir = "/run/spire" + log_level = "DEBUG" + server_address = "spire-server" + server_port = "8081" + socket_path = "/run/spire/sockets/spire-agent.sock" + trust_bundle_path = "/run/spire/bundle/bundle.crt" + trust_domain = "example.org" + } + + plugins { + NodeAttestor "k8s_psat" { + plugin_data { + cluster = "example-cluster" + } + } + + KeyManager "memory" { + plugin_data { + } + } + + WorkloadAttestor "k8s" { + plugin_data { + skip_kubelet_verification = true + } + } + } + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: spire-agent + namespace: spire + labels: + app: spire-agent +spec: + selector: + matchLabels: + app: spire-agent + updateStrategy: + type: RollingUpdate + template: + metadata: + namespace: spire + labels: + app: spire-agent + spec: + hostPID: true + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: spire-agent + containers: + - name: spire-agent + image: ghcr.io/spiffe/spire-agent:1.1.1 + imagePullPolicy: IfNotPresent + args: ["-config", "/run/spire/config/agent.conf"] + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + - name: spire-bundle + mountPath: /run/spire/bundle + readOnly: true + - name: spire-token + mountPath: /var/run/secrets/tokens + - name: spire-agent-socket-dir + mountPath: /run/spire/sockets + # This is the container which runs the SPIFFE CSI driver. + - name: spiffe-csi-driver + image: ghcr.io/spiffe/spiffe-csi-driver:nightly + imagePullPolicy: IfNotPresent + args: [ + "-workload-api-socket-dir", "/spire-agent-socket", + "-csi-socket-path", "/spiffe-csi/csi.sock", + ] + env: + # The CSI driver needs a unique node ID. The node name can be + # used for this purpose. + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + # The volume containing the SPIRE agent socket. The SPIFFE CSI + # driver will mount this directory into containers. + - mountPath: /spire-agent-socket + name: spire-agent-socket-dir + readOnly: true + # The volume that will contain the CSI driver socket shared + # with the kubelet and the driver registrar. + - mountPath: /spiffe-csi + name: spiffe-csi-socket-dir + # The volume containing mount points for containers. + - mountPath: /var/lib/kubelet/pods + mountPropagation: Bidirectional + name: mountpoint-dir + securityContext: + privileged: true + # This container runs the CSI Node Driver Registrar which takes care + # of all the little details required to register a CSI driver with + # the kubelet. + - name: node-driver-registrar + image: quay.io/k8scsi/csi-node-driver-registrar:v2.0.1 + imagePullPolicy: IfNotPresent + args: [ + "-csi-address", "/spiffe-csi/csi.sock", + "-kubelet-registration-path", "/var/lib/kubelet/plugins/csi.spiffe.io/csi.sock", + ] + volumeMounts: + # The registrar needs access to the SPIFFE CSI driver socket + - mountPath: /spiffe-csi + name: spiffe-csi-socket-dir + # The registrar needs access to the Kubelet plugin registration + # directory + - name: kubelet-plugin-registration-dir + mountPath: /registration + volumes: + - name: spire-config + configMap: + name: spire-agent + - name: spire-bundle + configMap: + name: spire-bundle + - name: spire-token + projected: + sources: + - serviceAccountToken: + path: spire-agent + expirationSeconds: 7200 + audience: spire-server + # This volume is used to share the Workload API socket between the CSI + # driver and SPIRE agent. Note, an emptyDir volume could also be used, + # however, this can lead to broken bind mounts in the workload + # containers if the agent pod is restarted (since the emptyDir + # directory on the node that was mounted into workload containers by + # the CSI driver belongs to the old pod instance and is no longer + # valid). + - name: spire-agent-socket-dir + hostPath: + path: /run/spire/agent-sockets + type: DirectoryOrCreate + # This volume is where the socket for kubelet->driver communication lives + - name: spiffe-csi-socket-dir + hostPath: + path: /var/lib/kubelet/plugins/csi.spiffe.io + type: DirectoryOrCreate + # This volume is where the SPIFFE CSI driver mounts volumes + - name: mountpoint-dir + hostPath: + path: /var/lib/kubelet/pods + type: Directory + # This volume is where the node-driver-registrar registers the plugin + # with kubelet + - name: kubelet-plugin-registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory diff --git a/test/testdata/spire/spire-server.yaml b/test/testdata/spire/spire-server.yaml new file mode 100644 index 00000000000..ceec824613d --- /dev/null +++ b/test/testdata/spire/spire-server.yaml @@ -0,0 +1,211 @@ +# ServiceAccount used by the SPIRE server. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spire-server + namespace: spire + +--- + +# Required cluster role to allow spire-server to query k8s API server +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-cluster-role +rules: +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + # allow TokenReview requests (to verify service account tokens for PSAT + # attestation) +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["get", "create"] + +--- + +# Binds above cluster role to spire-server service account +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-cluster-role-binding + namespace: spire +subjects: +- kind: ServiceAccount + name: spire-server + namespace: spire +roleRef: + kind: ClusterRole + name: spire-server-cluster-role + apiGroup: rbac.authorization.k8s.io + +--- + +# Role for the SPIRE server +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: spire + name: spire-server-role +rules: + # allow "get" access to pods (to resolve selectors for PSAT attestation) +- apiGroups: [""] + resources: ["pods"] + verbs: ["get"] + # allow access to "get" and "patch" the spire-bundle ConfigMap (for SPIRE + # agent bootstrapping, see the spire-bundle ConfigMap below) +- apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["spire-bundle"] + verbs: ["get", "patch"] + +--- + +# RoleBinding granting the spire-server-role to the SPIRE server +# service account. +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-role-binding + namespace: spire +subjects: +- kind: ServiceAccount + name: spire-server + namespace: spire +roleRef: + kind: Role + name: spire-server-role + apiGroup: rbac.authorization.k8s.io + +--- + +# ConfigMap containing the latest trust bundle for the trust domain. It is +# updated by SPIRE using the k8sbundle notifier plugin. SPIRE agents mount +# this config map and use the certificate to bootstrap trust with the SPIRE +# server during attestation. +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-bundle + namespace: spire + +--- + +# ConfigMap containing the SPIRE server configuration. +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-server + namespace: spire +data: + server.conf: | + server { + bind_address = "0.0.0.0" + bind_port = "8081" + trust_domain = "example.org" + data_dir = "/run/spire/data" + log_level = "DEBUG" + default_svid_ttl = "1h" + ca_ttl = "12h" + ca_subject { + country = ["US"] + organization = ["SPIFFE"] + common_name = "" + } + } + + plugins { + DataStore "sql" { + plugin_data { + database_type = "sqlite3" + connection_string = "/run/spire/data/datastore.sqlite3" + } + } + + NodeAttestor "k8s_psat" { + plugin_data { + clusters = { + "example-cluster" = { + service_account_allow_list = ["spire:spire-agent"] + } + } + } + } + + KeyManager "disk" { + plugin_data { + keys_path = "/run/spire/data/keys.json" + } + } + + Notifier "k8sbundle" { + plugin_data { + # This plugin updates the bundle.crt value in the spire:spire-bundle + # ConfigMap by default, so no additional configuration is necessary. + } + } + } + + health_checks { + listener_enabled = true + bind_address = "0.0.0.0" + bind_port = "8080" + live_path = "/live" + ready_path = "/ready" + } + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: spire-server + namespace: spire + labels: + app: spire-server +spec: + replicas: 1 + selector: + matchLabels: + app: spire-server + template: + metadata: + namespace: spire + labels: + app: spire-server + spec: + serviceAccountName: spire-server + shareProcessNamespace: true + containers: + - name: spire-server + image: ghcr.io/spiffe/spire-server:1.1.1 + imagePullPolicy: IfNotPresent + args: ["-config", "/run/spire/config/server.conf"] + ports: + - containerPort: 8081 + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + volumes: + - name: spire-config + configMap: + name: spire-server + +--- + +# Service definition for SPIRE server defining the gRPC port. +apiVersion: v1 +kind: Service +metadata: + name: spire-server + namespace: spire +spec: + type: NodePort + ports: + - name: grpc + port: 8081 + targetPort: 8081 + protocol: TCP + selector: + app: spire-server