diff --git a/docs/spire.md b/docs/spire.md index b43ebdd344e..f537fbb447c 100644 --- a/docs/spire.md +++ b/docs/spire.md @@ -63,22 +63,22 @@ This feature relies on a SPIRE installation. This is how it integrates into the ┌─────────────┐ Register TaskRun Workload Identity ┌──────────┐ │ ├──────────────────────────────────────────────►│ │ │ Tekton │ │ SPIRE │ -│ Controller │◄───────────┐ │ Server │ -│ │ │ Listen on TaskRun │ │ -└────────────┬┘ │ └──────────┘ - ▲ │ ┌───────┴───────────────────────────────┐ ▲ - │ │ │ Tekton TaskRun │ │ - │ │ └───────────────────────────────────────┘ │ - │ Configure│ ▲ │ Attest - │ Pod & │ │ │ + - │ check │ │ │ Request - │ ready │ ┌───────────┐ │ │ SVIDs - │ └────►│ TaskRun ├────────────────────────┘ │ - │ │ Pod │ │ - │ └───────────┘ TaskRun Entrypointer │ - │ ▲ Sign Result and update │ - │ Get │ Get SVID TaskRun status with │ - │ SPIRE │ signature + cert │ +│ Pipelines │◄───────────┐ │ Server │ +│ Controller │ │ Listen on TaskRun │ │ +└────────────┬┘◄┐ │ └──────────┘ + ▲ │ │ ┌───────┴───────────────────────────────┐ ▲ + │ │ │ │ Tekton TaskRun │ │ + │ │ │ └───────────────────────────────────────┘ │ + │ Configure│ │ │ Attest + │ Pod & │ └─────────────────┐ TaskRun Entrypointer │ + + │ check │ │ Sign Result and update │ Request + │ ready │ ┌───────────┐ │ the status with the │ SVIDs + │ └────►│ TaskRun ├──┘ signature + cert │ + │ │ Pod │ which will be used by │ + │ └───────────┘ tekton-pipelines-controller │ + │ ▲ to update TaskRun. │ + │ Get │ Get SVID │ + │ SPIRE │ │ │ server │ │ │ Credentials │ ▼ ┌┴───────────────────┴─────────────────────────────────────────────────────┐ @@ -280,6 +280,101 @@ The signatures are being verified by the Tekton controller, the process of verif - For each of the items in the results, verify its content against its associated `.sig` field +# TaskRun Status attestations + +Each TaskRun status that is written by the tekton-pipelines-controller will be signed to ensure that there is no external +tampering of the TaskRun status. Upon each retrieval of the TaskRun, the tekton-pipelines-controller checks if the status is initialized, +and that the signature validates the current status. +The signature and SVID will be stored as annotations on the TaskRun Status field, and can be verified by a client. + +The verification is done on every consumption of the TaskRun except when the TaskRun is uninitialized. When uninitialized, the +tekton-pipelines-controller is not influenced by fields in the status and thus will not sign incorrect reflections of the TaskRun. + +The spec and TaskRun annotations/labels are not signed when there are valid interactions from other controllers or users (i.e. cancelling taskrun). +Editing the object annotations/labels or spec will not result in any unverifiable outcome of the status field. + +As the TaskRun progresses, the Pipeline Controller will reconcile the TaskRun object and continually verify the current hash against the `tekton.dev/status-hash-sig` before updating the hash to match the new status and creating a new signature. + +An example TaskRun annotations would be: + +```console +$ tkn tr describe non-falsifiable-provenance -oyaml +apiVersion: tekton.dev/v1beta1 +kind: TaskRun +metadata: + annotations: + pipeline.tekton.dev/release: 3ee99ec + creationTimestamp: "2022-03-04T19:10:46Z" + generation: 1 + labels: + app.kubernetes.io/managed-by: tekton-pipelines + name: non-falsifiable-provenance + namespace: default + resourceVersion: "23088242" + uid: 548ebe99-d40b-4580-a9bc-afe80915e22e +spec: + serviceAccountName: default + taskSpec: + results: + - description: "" + name: foo + - description: "" + name: bar + steps: + - image: ubuntu + name: non-falsifiable + resources: {} + script: | + #!/usr/bin/env bash + sleep 30 + printf "%s" "hello" > "$(results.foo.path)" + printf "%s" "world" > "$(results.bar.path)" + timeout: 1m0s +status: + annotations: + tekton.dev/controller-svid: | + -----BEGIN CERTIFICATE----- + MIIB7jCCAZSgAwIBAgIRAI8/08uXSn9tyv7cRN87uvgwCgYIKoZIzj0EAwIwHjEL + MAkGA1UEBhMCVVMxDzANBgNVBAoTBlNQSUZGRTAeFw0yMjAzMDQxODU0NTlaFw0y + MjAzMDQxOTU1MDlaMB0xCzAJBgNVBAYTAlVTMQ4wDAYDVQQKEwVTUElSRTBZMBMG + ByqGSM49AgEGCCqGSM49AwEHA0IABL+e9OjkMv+7XgMWYtrzq0ESzJi+znA/Pm8D + nvApAHg3/rEcNS8c5LgFFRzDfcs9fxGSSkL1JrELzoYul1Q13XejgbMwgbAwDgYD + VR0PAQH/BAQDAgOoMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNV + HRMBAf8EAjAAMB0GA1UdDgQWBBR+ma+yZfo092FKIM4F3yhEY8jgDDAfBgNVHSME + GDAWgBRKiCg5+YdTaQ+5gJmvt2QcDkQ6KjAxBgNVHREEKjAohiZzcGlmZmU6Ly9l + eGFtcGxlLm9yZy90ZWt0b24vY29udHJvbGxlcjAKBggqhkjOPQQDAgNIADBFAiEA + 8xVWrQr8+i6yMLDm9IUjtvTbz9ofjSsWL6c/+rxmmRYCIBTiJ/HW7di3inSfxwqK + 5DKyPrKoR8sq8Ne7flkhgbkg + -----END CERTIFICATE----- + tekton.dev/status-hash: 76692c9dcd362f8a6e4bda8ccb4c0937ad16b0d23149ae256049433192892511 + tekton.dev/status-hash-sig: MEQCIFv2bW0k4g0Azx+qaeZjUulPD8Ma3uCUn0tXQuuR1FaEAiBHQwN4XobOXmC2nddYm04AZ74YubUyNl49/vnbnR/HcQ== + completionTime: "2022-03-04T19:11:22Z" + conditions: + - lastTransitionTime: "2022-03-04T19:11:22Z" + message: All Steps have completed executing + reason: Succeeded + status: "True" + type: Succeeded + - lastTransitionTime: "2022-03-04T19:11:22Z" + message: Spire verified + reason: TaskRunResultsVerified + status: "True" + type: SignedResultsVerified + podName: non-falsifiable-provenance-pod + startTime: "2022-03-04T19:10:46Z" + steps: + ... + +``` + +## How is the status being verified + +The signature are being verified by the Tekton controller, the process of verification is as follows: + +- Verify status-hash fields + - verify `tekton.dev/status-hash` content against its associated `tekton.dev/status-hash-sig` field. If status hash does + not match invalidate the `tekton.dev/not-verified = yes` annotation will be added + ## Further Details -To learn more about SPIRE TaskRun attestations, check out the [TEP](https://github.com/tektoncd/community/blob/main/teps/0089-nonfalsifiable-provenance-support.md). +To learn more about SPIRE attestations, check out the [TEP](https://github.com/tektoncd/community/blob/main/teps/0089-nonfalsifiable-provenance-support.md). diff --git a/pkg/apis/config/feature_flags.go b/pkg/apis/config/feature_flags.go index 7f348b11307..c294daa608b 100644 --- a/pkg/apis/config/feature_flags.go +++ b/pkg/apis/config/feature_flags.go @@ -142,9 +142,6 @@ func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) { if err := setEmbeddedStatus(cfgMap, DefaultEmbeddedStatus, &tc.EmbeddedStatus); err != nil { return nil, err } - if err := setFeature(enableSpire, DefaultEnableSpire, &tc.EnableSpire); err != nil { - return nil, err - } // Given that they are alpha features, Tekton Bundles and Custom Tasks should be switched on if // enable-api-fields is "alpha". If enable-api-fields is not "alpha" then fall back to the value of diff --git a/pkg/pod/pod.go b/pkg/pod/pod.go index 859a9dec238..28638aaebf0 100644 --- a/pkg/pod/pod.go +++ b/pkg/pod/pod.go @@ -193,7 +193,6 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec } readyImmediately := isPodReadyImmediately(*featureFlags, taskSpec.Sidecars) - // append credEntrypointArgs with entrypoint arg that contains if spire is enabled by configmap commonExtraEntrypointArgs = append(commonExtraEntrypointArgs, credEntrypointArgs...) diff --git a/pkg/pod/pod_test.go b/pkg/pod/pod_test.go index 5b11a7f93d6..f079a4bbd40 100644 --- a/pkg/pod/pod_test.go +++ b/pkg/pod/pod_test.go @@ -2219,175 +2219,6 @@ func TestPodBuildwithSpireEnabled(t *testing.T) { } } -func TestPodBuildwithSpireEnabled(t *testing.T) { - placeToolsInit := corev1.Container{ - Name: "place-tools", - Image: images.EntrypointImage, - WorkingDir: "/", - Command: []string{"/ko-app/entrypoint", "cp", "/ko-app/entrypoint", "/tekton/bin/entrypoint"}, - VolumeMounts: []corev1.VolumeMount{binMount}, - } - - initContainers := []corev1.Container{placeToolsInit, tektonDirInit(images.EntrypointImage, []v1beta1.Step{{Name: "name"}})} - for i := range initContainers { - c := &initContainers[i] - c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{ - Name: "spiffe-workload-api", - MountPath: "/spiffe-workload-api", - }) - } - - for _, c := range []struct { - desc string - trs v1beta1.TaskRunSpec - trAnnotation map[string]string - ts v1beta1.TaskSpec - want *corev1.PodSpec - wantAnnotations map[string]string - }{{ - desc: "simple with debug breakpoint onFailure", - trs: v1beta1.TaskRunSpec{ - Debug: &v1beta1.TaskRunDebug{ - Breakpoint: []string{breakpointOnFailure}, - }, - }, - ts: v1beta1.TaskSpec{ - Steps: []v1beta1.Step{{ - Name: "name", - Image: "image", - Command: []string{"cmd"}, // avoid entrypoint lookup. - }}, - }, - want: &corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyNever, - InitContainers: initContainers, - Containers: []corev1.Container{{ - Name: "step-name", - Image: "image", - Command: []string{"/tekton/bin/entrypoint"}, - Args: []string{ - "-wait_file", - "/tekton/downward/ready", - "-wait_file_content", - "-post_file", - "/tekton/run/0/out", - "-termination_path", - "/tekton/termination", - "-step_metadata_dir", - "/tekton/run/0/status", - "-enable_spire", - "-entrypoint", - "cmd", - "--", - }, - VolumeMounts: append([]corev1.VolumeMount{binROMount, runMount(0, false), downwardMount, { - Name: "tekton-creds-init-home-0", - MountPath: "/tekton/creds", - }, { - Name: "spiffe-workload-api", - MountPath: "/spiffe-workload-api", - }}, implicitVolumeMounts...), - TerminationMessagePath: "/tekton/termination", - }}, - Volumes: append(implicitVolumes, binVolume, runVolume(0), downwardVolume, corev1.Volume{ - Name: "tekton-creds-init-home-0", - VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}}, - }, corev1.Volume{ - Name: "spiffe-workload-api", - VolumeSource: corev1.VolumeSource{ - CSI: &corev1.CSIVolumeSource{ - Driver: "csi.spiffe.io", - }, - }, - }), - ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds, - }, - }} { - t.Run(c.desc, func(t *testing.T) { - featureFlags := map[string]string{ - "enable-spire": "true", - } - names.TestingSeed() - store := config.NewStore(logtesting.TestLogger(t)) - store.OnConfigChanged( - &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.Namespace()}, - Data: featureFlags, - }, - ) - kubeclient := fakek8s.NewSimpleClientset( - &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"}}, - &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "service-account", Namespace: "default"}, - Secrets: []corev1.ObjectReference{{ - Name: "multi-creds", - }}, - }, - &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "multi-creds", - Namespace: "default", - Annotations: map[string]string{ - "tekton.dev/docker-0": "https://us.gcr.io", - "tekton.dev/docker-1": "https://docker.io", - "tekton.dev/git-0": "github.com", - "tekton.dev/git-1": "gitlab.com", - }}, - Type: "kubernetes.io/basic-auth", - Data: map[string][]byte{ - "username": []byte("foo"), - "password": []byte("BestEver"), - }, - }, - ) - var trAnnotations map[string]string - if c.trAnnotation == nil { - trAnnotations = map[string]string{ - ReleaseAnnotation: fakeVersion, - } - } else { - trAnnotations = c.trAnnotation - trAnnotations[ReleaseAnnotation] = fakeVersion - } - tr := &v1beta1.TaskRun{ - ObjectMeta: metav1.ObjectMeta{ - Name: "taskrun-name", - Namespace: "default", - Annotations: trAnnotations, - }, - Spec: c.trs, - } - - // No entrypoints should be looked up. - entrypointCache := fakeCache{} - builder := Builder{ - Images: images, - KubeClient: kubeclient, - EntrypointCache: entrypointCache, - } - - got, err := builder.Build(store.ToContext(context.Background()), tr, c.ts) - if err != nil { - t.Fatalf("builder.Build: %v", err) - } - - expectedName := kmeta.ChildName(tr.Name, "-pod") - if d := cmp.Diff(expectedName, got.Name); d != "" { - t.Errorf("Pod name does not match: %q", d) - } - - if d := cmp.Diff(c.want, &got.Spec, resourceQuantityCmp, volumeSort, volumeMountSort); d != "" { - t.Errorf("Diff %s", diff.PrintWantGot(d)) - } - - if c.wantAnnotations != nil { - if d := cmp.Diff(c.wantAnnotations, got.ObjectMeta.Annotations, cmpopts.IgnoreMapEntries(ignoreReleaseAnnotation)); d != "" { - t.Errorf("Annotation Diff(-want, +got):\n%s", d) - } - } - }) - } -} - func TestMakeLabels(t *testing.T) { taskRunName := "task-run-name" want := map[string]string{ diff --git a/pkg/reconciler/taskrun/taskrun.go b/pkg/reconciler/taskrun/taskrun.go index c4f6533b7c4..124e234f7d9 100644 --- a/pkg/reconciler/taskrun/taskrun.go +++ b/pkg/reconciler/taskrun/taskrun.go @@ -117,6 +117,20 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, tr *v1beta1.TaskRun) pkg // on the event to perform user facing initialisations, such has reset a CI check status afterCondition := tr.Status.GetCondition(apis.ConditionSucceeded) events.Emit(ctx, nil, afterCondition, tr) + } else if config.FromContextOrDefaults(ctx).FeatureFlags.EnableSpire { + var verified = false + if c.SpireClient != nil { + if err := c.SpireClient.VerifyStatusInternalAnnotation(ctx, tr, logger); err == nil { + verified = true + } + if !verified { + if tr.Status.Annotations == nil { + tr.Status.Annotations = map[string]string{} + } + tr.Status.Annotations[spire.NotVerifiedAnnotation] = "yes" + } + logger.Infof("taskrun verification status: %t with hash %v \n", verified, tr.Status.Annotations[spire.TaskRunStatusHashAnnotation]) + } } // If the TaskRun is complete, run some post run fixtures when applicable @@ -289,6 +303,20 @@ func (c *Reconciler) finishReconcileUpdateEmitEvents(ctx context.Context, tr *v1 events.Emit(ctx, beforeCondition, afterCondition, tr) var err error + // Add status internal annotations hash only if it was verified + if config.FromContextOrDefaults(ctx).FeatureFlags.EnableSpire && + c.SpireClient != nil && c.SpireClient.CheckSpireVerifiedFlag(tr) { + if err := spire.CheckStatusInternalAnnotation(tr); err != nil { + err = c.SpireClient.AppendStatusInternalAnnotation(ctx, tr) + if err != nil { + logger.Warn("Failed to sign TaskRun internal status hash", zap.Error(err)) + events.EmitError(controller.GetEventRecorder(ctx), err, tr) + } else { + logger.Infof("Successfully signed TaskRun internal status with hash: %v", + tr.Status.Annotations[spire.TaskRunStatusHashAnnotation]) + } + } + } merr := multierror.Append(previousError, err).ErrorOrNil() diff --git a/pkg/reconciler/taskrun/taskrun_test.go b/pkg/reconciler/taskrun/taskrun_test.go index 04e84d8e206..96f31ce123c 100644 --- a/pkg/reconciler/taskrun/taskrun_test.go +++ b/pkg/reconciler/taskrun/taskrun_test.go @@ -63,6 +63,7 @@ import ( ktesting "k8s.io/client-go/testing" "k8s.io/client-go/tools/record" "knative.dev/pkg/apis" + duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" cminformer "knative.dev/pkg/configmap/informer" "knative.dev/pkg/controller" "knative.dev/pkg/kmeta" @@ -4338,6 +4339,124 @@ status: } } +func TestReconcileOnTaskRunSign(t *testing.T) { + sc := &spire.MockClient{} + + taskSt := &apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionTrue, + Reason: "Build succeeded", + Message: "Build succeeded", + } + taskRunStartedUnsigned := &v1beta1.TaskRun{ + ObjectMeta: objectMeta("taskrun-started-unsigned", "foo"), + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{ + Name: simpleTask.Name, + }, + }, + Status: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: duckv1beta1.Conditions{ + *taskSt, + }, + }, + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + StartTime: &metav1.Time{Time: now.Add(-15 * time.Second)}, + }, + }, + } + taskRunUnstarted := &v1beta1.TaskRun{ + ObjectMeta: objectMeta("taskrun-unstarted", "foo"), + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{ + Name: simpleTask.Name, + }, + }, + Status: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: duckv1beta1.Conditions{ + *taskSt, + }, + }, + }, + } + taskRunStartedSigned := &v1beta1.TaskRun{ + ObjectMeta: objectMeta("taskrun-started-signed", "foo"), + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{ + Name: simpleTask.Name, + }, + }, + Status: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: duckv1beta1.Conditions{ + *taskSt, + }, + }, + }, + } + if err := sc.AppendStatusInternalAnnotation(context.Background(), taskRunStartedSigned); err != nil { + t.Fatal("failed to sign test taskrun") + } + + d := test.Data{ + ConfigMaps: []*corev1.ConfigMap{{ + ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.Namespace()}, + Data: map[string]string{ + "enable-spire": "true", + }, + }}, + + TaskRuns: []*v1beta1.TaskRun{ + taskRunStartedUnsigned, taskRunUnstarted, taskRunStartedSigned, + }, + Tasks: []*v1beta1.Task{simpleTask}, + } + testAssets, cancel := getTaskRunController(t, d) + defer cancel() + c := testAssets.Controller + clients := testAssets.Clients + + testCases := []struct { + name string + tr *v1beta1.TaskRun + verifiable bool + }{ + { + name: "sign/verify unstarted taskrun", + tr: taskRunUnstarted, + verifiable: true, + }, + { + name: "sign/verify signed started taskrun", + tr: taskRunStartedSigned, + verifiable: true, + }, + { + name: "sign/verify unsigned started taskrun should fail", + tr: taskRunStartedUnsigned, + verifiable: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(tc.tr)); err != nil { + t.Fatalf("Unexpected error when reconciling completed TaskRun : %v", err) + } + newTr, err := clients.Pipeline.TektonV1beta1().TaskRuns(tc.tr.Namespace).Get(testAssets.Ctx, tc.tr.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Expected completed TaskRun %s to exist but instead got error when getting it: %v", tc.tr.Name, err) + } + verified := sc.CheckSpireVerifiedFlag(newTr) + if verified != tc.verifiable { + t.Fatalf("expected verifiable: %v, got %v", tc.verifiable, verified) + } + }) + } +} + func Test_validateTaskSpecRequestResources_ValidResources(t *testing.T) { tcs := []struct { name string diff --git a/test/artifact_bucket_test.go b/test/artifact_bucket_test.go index 98ceafd6219..db75c74f7f2 100644 --- a/test/artifact_bucket_test.go +++ b/test/artifact_bucket_test.go @@ -33,11 +33,13 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" knativetest "knative.dev/pkg/test" "knative.dev/pkg/test/helpers" ) const ( + systemNamespace = "tekton-pipelines" bucketSecretName = "bucket-secret" bucketSecretKey = "bucket-secret-key" ) @@ -256,6 +258,26 @@ spec: } } +// updateConfigMap updates the config map for specified @name with values. We can't use the one from knativetest because +// it assumes that Data is already a non-nil map, and by default, it isn't! +func updateConfigMap(ctx context.Context, client kubernetes.Interface, name string, configName string, values map[string]string) error { + configMap, err := client.CoreV1().ConfigMaps(name).Get(ctx, configName, metav1.GetOptions{}) + if err != nil { + return err + } + + if configMap.Data == nil { + configMap.Data = make(map[string]string) + } + + for key, value := range values { + configMap.Data[key] = value + } + + _, err = client.CoreV1().ConfigMaps(name).Update(ctx, configMap, metav1.UpdateOptions{}) + return err +} + func getBucketSecret(t *testing.T, configFilePath, namespace string) *corev1.Secret { t.Helper() f, err := ioutil.ReadFile(configFilePath) diff --git a/test/embed_test.go b/test/embed_test.go index 5273147faf9..4be790c23df 100644 --- a/test/embed_test.go +++ b/test/embed_test.go @@ -72,10 +72,6 @@ func embeddedResourceTest(t *testing.T, spireEnabled bool) { embedTaskName := helpers.ObjectNameForTest(t) embedTaskRunName := helpers.ObjectNameForTest(t) - if spireEnabled { - originalConfigMapData := enableSpireConfigMap(ctx, c, t) - defer resetConfigMap(ctx, t, c, systemNamespace, config.GetFeatureFlagsConfigName(), originalConfigMapData) - } t.Logf("Creating Task and TaskRun in namespace %s", namespace) if _, err := c.TaskClient.Create(ctx, getEmbeddedTask(t, embedTaskName, namespace, []string{"/bin/sh", "-c", fmt.Sprintf("echo %s", taskOutput)}), metav1.CreateOptions{}); err != nil { @@ -99,6 +95,7 @@ func embeddedResourceTest(t *testing.T, spireEnabled bool) { t.Errorf("Error retrieving taskrun: %s", err) } spireShouldPassTaskRunResultsVerify(tr, t) + spireShouldPassSpireAnnotation(tr, t) } } diff --git a/test/entrypoint_test.go b/test/entrypoint_test.go index 579fe886bb9..57275a5b8e7 100644 --- a/test/entrypoint_test.go +++ b/test/entrypoint_test.go @@ -68,10 +68,6 @@ func entryPointerTest(t *testing.T, spireEnabled bool) { defer tearDown(ctx, t, c, namespace) epTaskRunName := helpers.ObjectNameForTest(t) - if spireEnabled { - originalConfigMapData := enableSpireConfigMap(ctx, c, t) - defer resetConfigMap(ctx, t, c, systemNamespace, config.GetFeatureFlagsConfigName(), originalConfigMapData) - } t.Logf("Creating TaskRun in namespace %s", namespace) if _, err := c.TaskRunClient.Create(ctx, parse.MustParseTaskRun(t, fmt.Sprintf(` @@ -102,6 +98,7 @@ spec: t.Errorf("Error retrieving taskrun: %s", err) } spireShouldPassTaskRunResultsVerify(tr, t) + spireShouldPassSpireAnnotation(tr, t) } } diff --git a/test/helm_task_test.go b/test/helm_task_test.go index 51a78835f3a..e60bf4e33e3 100644 --- a/test/helm_task_test.go +++ b/test/helm_task_test.go @@ -81,11 +81,6 @@ func helmDeploytest(t *testing.T, spireEnabled bool) { knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) - if spireEnabled { - originalConfigMapData := enableSpireConfigMap(ctx, c, t) - defer resetConfigMap(ctx, t, c, systemNamespace, config.GetFeatureFlagsConfigName(), originalConfigMapData) - } - t.Logf("Creating Git PipelineResource %s", sourceResourceName) if _, err := c.PipelineResourceClient.Create(ctx, getGoHelloworldGitResource(t, sourceResourceName), metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline Resource `%s`: %s", sourceResourceName, err) @@ -134,6 +129,7 @@ func helmDeploytest(t *testing.T, spireEnabled bool) { } for _, taskrunItem := range taskrunList.Items { spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + spireShouldPassSpireAnnotation(&taskrunItem, t) } } diff --git a/test/hermetic_taskrun_test.go b/test/hermetic_taskrun_test.go index f5c5482cd87..cca9b0d6a6c 100644 --- a/test/hermetic_taskrun_test.go +++ b/test/hermetic_taskrun_test.go @@ -62,11 +62,6 @@ func hermeticTest(t *testing.T, spireEnabled bool) { t.Parallel() defer tearDown(ctx, t, c, namespace) - if spireEnabled { - originalConfigMapData := enableSpireConfigMap(ctx, c, t) - defer resetConfigMap(ctx, t, c, systemNamespace, config.GetFeatureFlagsConfigName(), originalConfigMapData) - } - tests := []struct { desc string getTaskRun func(*testing.T, string, string, string) *v1beta1.TaskRun @@ -98,6 +93,7 @@ func hermeticTest(t *testing.T, spireEnabled bool) { t.Errorf("Error retrieving taskrun: %s", err) } spireShouldPassTaskRunResultsVerify(tr, t) + spireShouldPassSpireAnnotation(tr, t) } // now, run the task mode with hermetic mode @@ -117,6 +113,7 @@ func hermeticTest(t *testing.T, spireEnabled bool) { t.Errorf("Error retrieving taskrun: %s", err) } spireShouldFailTaskRunResultsVerify(tr, t) + spireShouldPassSpireAnnotation(tr, t) } }) } diff --git a/test/ignore_step_error_test.go b/test/ignore_step_error_test.go index 4dbe0c22648..984b2de2366 100644 --- a/test/ignore_step_error_test.go +++ b/test/ignore_step_error_test.go @@ -58,11 +58,6 @@ func stepErrorTest(t *testing.T, spireEnabled bool) { knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) - if spireEnabled { - originalConfigMapData := enableSpireConfigMap(ctx, c, t) - defer resetConfigMap(ctx, t, c, systemNamespace, config.GetFeatureFlagsConfigName(), originalConfigMapData) - } - pipelineRun := parse.MustParsePipelineRun(t, fmt.Sprintf(` metadata: name: %s @@ -124,6 +119,7 @@ spec: if spireEnabled { spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + spireShouldPassSpireAnnotation(&taskrunItem, t) } for _, r := range taskrunItem.Status.TaskRunResults { diff --git a/test/init_test.go b/test/init_test.go index 0525f5c00e3..354472c8903 100644 --- a/test/init_test.go +++ b/test/init_test.go @@ -33,6 +33,7 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/names" + "github.com/tektoncd/pipeline/pkg/spire" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -290,3 +291,29 @@ func spireShouldPassTaskRunResultsVerify(tr *v1beta1.TaskRun, t *testing.T) { } t.Logf("Taskrun `%s` status results condition verified by spire as true, which is valid", tr.Name) } + +// Verifies if the taskrun status annotation does not contain "not-verified" +func spireShouldPassSpireAnnotation(tr *v1beta1.TaskRun, t *testing.T) { + if _, notVerified := tr.Status.Annotations[spire.NotVerifiedAnnotation]; notVerified { + t.Errorf("Taskrun `%s` status not verified. Spire annotation tekton.dev/not-verified = yes. Failed spire verification", tr.Name) + } + t.Logf("Taskrun `%s` status spire annotation verified", tr.Name) +} + +// Verifies if the taskrun status annotation does contain "not-verified" +func spireShouldFailSpireAnnotation(tr *v1beta1.TaskRun, t *testing.T) { + _, notVerified := tr.Status.Annotations[spire.NotVerifiedAnnotation] + _, hash := tr.Status.Annotations[spire.TaskRunStatusHashAnnotation] + if !notVerified && hash { + t.Errorf("Taskrun `%s` status should be not verified missing spire Annotation tekton.dev/not-verified = yes", tr.Name) + } + t.Logf("Taskrun `%s` status spire annotation not verified, which is valid", tr.Name) +} + +// Verifies the taskrun status annotation should not exist +func spireZeroSpireAnnotation(tr *v1beta1.TaskRun, t *testing.T) { + if len(tr.Status.Annotations) > 0 { + t.Errorf("Taskrun `%s` status should not have any spire annotations", tr.Name) + } + t.Logf("Taskrun `%s` status spire annotation not found, which is valid", tr.Name) +} diff --git a/test/kaniko_task_test.go b/test/kaniko_task_test.go index 605e9f8d4f4..d4dcb385628 100644 --- a/test/kaniko_task_test.go +++ b/test/kaniko_task_test.go @@ -81,11 +81,6 @@ func kanikoTest(t *testing.T, spireEnabled bool) { t.Fatalf("Failed to create Pipeline Resource `%s`: %s", git.Name, err) } - if spireEnabled { - originalConfigMapData := enableSpireConfigMap(ctx, c, t) - defer resetConfigMap(ctx, t, c, systemNamespace, config.GetFeatureFlagsConfigName(), originalConfigMapData) - } - image := getImageResource(t, repo) t.Logf("Creating Image PipelineResource %s", repo) if _, err := c.PipelineResourceClient.Create(ctx, image, metav1.CreateOptions{}); err != nil { @@ -147,6 +142,7 @@ func kanikoTest(t *testing.T, spireEnabled bool) { if spireEnabled { spireShouldPassTaskRunResultsVerify(tr, t) + spireShouldPassSpireAnnotation(tr, t) } // match the local digest, which is first capture group against the remote image diff --git a/test/pipelinefinally_test.go b/test/pipelinefinally_test.go index 8e9b3005d22..d90ff9b5e6c 100644 --- a/test/pipelinefinally_test.go +++ b/test/pipelinefinally_test.go @@ -279,6 +279,7 @@ spec: } if spireEnabled { spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + spireShouldPassSpireAnnotation(&taskrunItem, t) } dagTask1EndTime = taskrunItem.Status.CompletionTime case n == "dagtask2": @@ -287,12 +288,14 @@ spec: } if spireEnabled { spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + spireShouldPassSpireAnnotation(&taskrunItem, t) } dagTask2EndTime = taskrunItem.Status.CompletionTime case n == "dagtask4": if spireEnabled { // Skipped so status annotations should not be there. Results should not be verified as not run spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + spireZeroSpireAnnotation(&taskrunItem, t) } t.Fatalf("task %s should have skipped due to when expression", n) case n == "dagtask5": @@ -301,6 +304,7 @@ spec: } if spireEnabled { spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + spireShouldPassSpireAnnotation(&taskrunItem, t) } case n == "finaltask1": if err := WaitForTaskRunState(ctx, c, taskrunItem.Name, TaskRunSucceed(taskrunItem.Name), "TaskRunSuccess"); err != nil { @@ -308,6 +312,7 @@ spec: } if spireEnabled { spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + spireShouldPassSpireAnnotation(&taskrunItem, t) } finalTaskStartTime = taskrunItem.Status.StartTime case n == "finaltask2": @@ -316,6 +321,7 @@ spec: } if spireEnabled { spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + spireShouldPassSpireAnnotation(&taskrunItem, t) } for _, p := range taskrunItem.Spec.Params { switch param := p.Name; param { @@ -344,6 +350,7 @@ spec: } if spireEnabled { spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + spireShouldPassSpireAnnotation(&taskrunItem, t) } for _, p := range taskrunItem.Spec.Params { if p.Name == "dagtask-result" && p.Value.StringVal != "Hello" { @@ -356,6 +363,7 @@ spec: } if spireEnabled { spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + spireShouldPassSpireAnnotation(&taskrunItem, t) } case n == "guardedfinaltaskusingdagtask5status1": if !isSuccessful(t, n, taskrunItem.Status.Conditions) { @@ -363,17 +371,20 @@ spec: } if spireEnabled { spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + spireShouldPassSpireAnnotation(&taskrunItem, t) } case n == "guardedfinaltaskusingdagtask5result2": if spireEnabled { // Skipped so status annotations should not be there. Results should not be verified as not run spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + spireZeroSpireAnnotation(&taskrunItem, t) } t.Fatalf("final task %s should have skipped due to when expression evaluating to false", n) case n == "finaltaskconsumingdagtask1" || n == "finaltaskconsumingdagtask4" || n == "guardedfinaltaskconsumingdagtask4": if spireEnabled { // Skipped so status annotations should not be there. Results should not be verified as not run spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + spireZeroSpireAnnotation(&taskrunItem, t) } t.Fatalf("final task %s should have skipped due to missing task result reference", n) default: @@ -471,11 +482,6 @@ func pipelineLevelFinallyOneFinalTaskFailedFailureWithOptions(t *testing.T, spir knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) - if spireEnabled { - originalConfigMapData := enableSpireConfigMap(ctx, c, t) - defer resetConfigMap(ctx, t, c, systemNamespace, config.GetFeatureFlagsConfigName(), originalConfigMapData) - } - task := getSuccessTask(t, namespace) if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create dag Task: %s", err) @@ -528,6 +534,7 @@ spec: } if spireEnabled { spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + spireShouldPassSpireAnnotation(&taskrunItem, t) } case n == "finaltask1": if !isFailed(t, n, taskrunItem.Status.Conditions) { @@ -535,6 +542,7 @@ spec: } if spireEnabled { spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + spireShouldPassSpireAnnotation(&taskrunItem, t) } default: t.Fatalf("TaskRuns were not found for both final and dag tasks") @@ -567,11 +575,6 @@ func pipelineLevelFinallyOneFinalTaskCancelledRunFinallyWithOptions(t *testing.T knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) - if spireEnabled { - originalConfigMapData := enableSpireConfigMap(ctx, c, t) - defer resetConfigMap(ctx, t, c, systemNamespace, config.GetFeatureFlagsConfigName(), originalConfigMapData) - } - task1 := getDelaySuccessTaskProducingResults(t, namespace) task1.Spec.Results = append(task1.Spec.Results, v1beta1.TaskResult{ Name: "result", @@ -667,10 +670,12 @@ spec: } if spireEnabled { spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + spireShouldPassSpireAnnotation(&taskrunItem, t) } case "dagtask2": if spireEnabled { spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + spireZeroSpireAnnotation(&taskrunItem, t) } t.Fatalf("second dag task %s should be skipped as it depends on the result from cancelled 'dagtask1'", n) case "finaltask1": @@ -679,10 +684,12 @@ spec: } if spireEnabled { spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + spireShouldPassSpireAnnotation(&taskrunItem, t) } case "finaltask2": if spireEnabled { spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + spireZeroSpireAnnotation(&taskrunItem, t) } t.Fatalf("second final task %s should be skipped as it depends on the result from cancelled 'dagtask1'", n) default: @@ -716,11 +723,6 @@ func pipelineLevelFinallyOneFinalTaskStoppedRunFinallyWithOptions(t *testing.T, knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) - if spireEnabled { - originalConfigMapData := enableSpireConfigMap(ctx, c, t) - defer resetConfigMap(ctx, t, c, systemNamespace, config.GetFeatureFlagsConfigName(), originalConfigMapData) - } - task1 := getDelaySuccessTaskProducingResults(t, namespace) task1.Spec.Results = append(task1.Spec.Results, v1beta1.TaskResult{ Name: "result", @@ -816,6 +818,7 @@ spec: } if spireEnabled { spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + spireShouldPassSpireAnnotation(&taskrunItem, t) } case "finaltask1": if !isSuccessful(t, n, taskrunItem.Status.Conditions) { @@ -823,6 +826,7 @@ spec: } if spireEnabled { spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + spireShouldPassSpireAnnotation(&taskrunItem, t) } case "finaltask2": if !isSuccessful(t, n, taskrunItem.Status.Conditions) { @@ -830,6 +834,7 @@ spec: } if spireEnabled { spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + spireShouldPassSpireAnnotation(&taskrunItem, t) } default: t.Fatalf("TaskRuns were not found for both final and dag tasks") diff --git a/test/pipelinerun_test.go b/test/pipelinerun_test.go index 9cff8c75076..ee713429a77 100644 --- a/test/pipelinerun_test.go +++ b/test/pipelinerun_test.go @@ -320,6 +320,7 @@ spec: i := i // capture range variable td := td // capture range variable t.Run(td.name, func(t *testing.T) { + t.Parallel() ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -336,11 +337,6 @@ spec: knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) - if spireEnabled { - originalConfigMapData := enableSpireConfigMap(ctx, c, t) - defer resetConfigMap(ctx, t, c, systemNamespace, config.GetFeatureFlagsConfigName(), originalConfigMapData) - } - t.Logf("Setting up test resources for %q test in namespace %s", td.name, namespace) resources, p := td.testSetup(ctx, t, c, namespace, i) @@ -370,6 +366,7 @@ spec: } if spireEnabled { spireShouldPassTaskRunResultsVerify(&actualTaskRunItem, t) + spireShouldPassSpireAnnotation(&actualTaskRunItem, t) } } expectedTaskRunNames = append(expectedTaskRunNames, taskRunName) @@ -565,6 +562,7 @@ spec: } for _, taskrunItem := range taskrunList.Items { spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + spireShouldPassSpireAnnotation(&taskrunItem, t) } } @@ -606,10 +604,6 @@ func pipelineRunPendingTestWithOptions(t *testing.T, spireEnabled bool) { taskName := helpers.ObjectNameForTest(t) pipelineName := helpers.ObjectNameForTest(t) prName := helpers.ObjectNameForTest(t) - if spireEnabled { - originalConfigMapData := enableSpireConfigMap(ctx, c, t) - defer resetConfigMap(ctx, t, c, systemNamespace, config.GetFeatureFlagsConfigName(), originalConfigMapData) - } t.Logf("Creating Task, Pipeline, and Pending PipelineRun %s in namespace %s", prName, namespace) @@ -685,6 +679,7 @@ spec: } for _, taskrunItem := range taskrunList.Items { spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + spireShouldPassSpireAnnotation(&taskrunItem, t) } } } diff --git a/test/status_test.go b/test/status_test.go index 8f6a733e2bd..597697c78c9 100644 --- a/test/status_test.go +++ b/test/status_test.go @@ -65,11 +65,6 @@ func taskRunPipelineRunStatus(t *testing.T, spireEnabled bool) { knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) - if spireEnabled { - originalConfigMapData := enableSpireConfigMap(ctx, c, t) - defer resetConfigMap(ctx, t, c, systemNamespace, config.GetFeatureFlagsConfigName(), originalConfigMapData) - } - t.Logf("Creating Task and TaskRun in namespace %s", namespace) task := parse.MustParseTask(t, fmt.Sprintf(` metadata: @@ -104,6 +99,7 @@ spec: t.Errorf("Error retrieving taskrun: %s", err) } spireShouldFailTaskRunResultsVerify(tr, t) + spireShouldPassSpireAnnotation(tr, t) } pipeline := parse.MustParsePipeline(t, fmt.Sprintf(` @@ -140,6 +136,7 @@ spec: } for _, taskrunItem := range taskrunList.Items { spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + spireShouldPassSpireAnnotation(&taskrunItem, t) } } diff --git a/test/taskrun_test.go b/test/taskrun_test.go index 6df78077487..a72ff23c641 100644 --- a/test/taskrun_test.go +++ b/test/taskrun_test.go @@ -71,11 +71,6 @@ func taskrunFailureTest(t *testing.T, spireEnabled bool) { taskRunName := helpers.ObjectNameForTest(t) - if spireEnabled { - originalConfigMapData := enableSpireConfigMap(ctx, c, t) - defer resetConfigMap(ctx, t, c, systemNamespace, config.GetFeatureFlagsConfigName(), originalConfigMapData) - } - t.Logf("Creating Task and TaskRun in namespace %s", namespace) task := parse.MustParseTask(t, fmt.Sprintf(` metadata: @@ -120,6 +115,7 @@ spec: if spireEnabled { spireShouldFailTaskRunResultsVerify(taskrun, t) + spireShouldPassSpireAnnotation(taskrun, t) } expectedStepState := []v1beta1.StepState{{ @@ -192,10 +188,6 @@ func taskrunStatusTest(t *testing.T, spireEnabled bool) { defer tearDown(ctx, t, c, namespace) taskRunName := helpers.ObjectNameForTest(t) - if spireEnabled { - originalConfigMapData := enableSpireConfigMap(ctx, c, t) - defer resetConfigMap(ctx, t, c, systemNamespace, config.GetFeatureFlagsConfigName(), originalConfigMapData) - } fqImageName := getTestImage(busyboxImage) @@ -237,6 +229,7 @@ spec: if spireEnabled { spireShouldPassTaskRunResultsVerify(taskrun, t) + spireShouldPassSpireAnnotation(taskrun, t) } expectedStepState := []v1beta1.StepState{{ @@ -355,6 +348,7 @@ spec: if spireEnabled { spireShouldFailTaskRunResultsVerify(taskrun, t) + spireShouldFailSpireAnnotation(taskrun, t) } expectedStepState := []v1beta1.StepState{{