diff --git a/docs/install.md b/docs/install.md index 8d8bece9c16..1c476a1851d 100644 --- a/docs/install.md +++ b/docs/install.md @@ -389,7 +389,7 @@ features](#alpha-features) to be used. - `embedded-status`: set this flag to "full" to enable full embedding of `TaskRun` and `Run` statuses in the `PipelineRun` status. Set it to "minimal" to populate the `ChildReferences` field in the `PipelineRun` status with name, kind, and API version information for each `TaskRun` and `Run` in the `PipelineRun` instead. Set it to "both" to - do both. For more information, see [Configuring usage of `TaskRun` and `Run` embedded statuses](pipelineruns.md#configuring-usage-of-taskrun-and-run-embedded-statuses). **NOTE**: This functionality is not yet active. + do both. For more information, see [Configuring usage of `TaskRun` and `Run` embedded statuses](pipelineruns.md#configuring-usage-of-taskrun-and-run-embedded-statuses). For example: diff --git a/go.mod b/go.mod index 8c050f3f5a7..2366dd21574 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/google/go-containerregistry v0.8.1-0.20220216220642-00c59d91847c github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20220328141311-efc62d802606 github.com/google/uuid v1.3.0 + github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru v0.5.4 github.com/jenkins-x/go-scm v1.10.10 @@ -90,7 +91,6 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/googleapis/gnostic v0.5.5 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/hashicorp/errwrap v1.0.0 github.com/imdario/mergo v0.3.12 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect diff --git a/pkg/apis/pipeline/v1beta1/pipelinerun_types.go b/pkg/apis/pipeline/v1beta1/pipelinerun_types.go index 6c86bd65ca2..4c71c19f95a 100644 --- a/pkg/apis/pipeline/v1beta1/pipelinerun_types.go +++ b/pkg/apis/pipeline/v1beta1/pipelinerun_types.go @@ -415,6 +415,21 @@ type ChildStatusReference struct { WhenExpressions []WhenExpression `json:"whenExpressions,omitempty"` } +// GetConditionChecksAsMap returns a map representation of this ChildStatusReference's ConditionChecks, in the same form +// as PipelineRunTaskRunStatus.ConditionChecks. +func (cr ChildStatusReference) GetConditionChecksAsMap() map[string]*PipelineRunConditionCheckStatus { + if len(cr.ConditionChecks) == 0 { + return nil + } + ccMap := make(map[string]*PipelineRunConditionCheckStatus) + + for _, cc := range cr.ConditionChecks { + ccMap[cc.ConditionCheckName] = &cc.PipelineRunConditionCheckStatus + } + + return ccMap +} + // PipelineRunStatusFields holds the fields of PipelineRunStatus' status. // This is defined separately and inlined so that other types can readily // consume these fields via duck typing. diff --git a/pkg/reconciler/pipelinerun/cancel.go b/pkg/reconciler/pipelinerun/cancel.go index 24539ed9a24..ef653a43e59 100644 --- a/pkg/reconciler/pipelinerun/cancel.go +++ b/pkg/reconciler/pipelinerun/cancel.go @@ -99,23 +99,48 @@ func cancelPipelineRun(ctx context.Context, logger *zap.SugaredLogger, pr *v1bet func cancelPipelineTaskRuns(ctx context.Context, logger *zap.SugaredLogger, pr *v1beta1.PipelineRun, clientSet clientset.Interface) []string { errs := []string{} - // Loop over the TaskRuns in the PipelineRun status. - // If a TaskRun is not in the status yet we should not cancel it anyways. - for taskRunName := range pr.Status.TaskRuns { - logger.Infof("cancelling TaskRun %s", taskRunName) - - if _, err := clientSet.TektonV1beta1().TaskRuns(pr.Namespace).Patch(ctx, taskRunName, types.JSONPatchType, cancelTaskRunPatchBytes, metav1.PatchOptions{}, ""); err != nil { - errs = append(errs, fmt.Errorf("Failed to patch TaskRun `%s` with cancellation: %s", taskRunName, err).Error()) - continue + // If pr.Status.ChildReferences is populated, use that as source of truth for TaskRun and Run names. + if len(pr.Status.ChildReferences) > 0 { + // Loop over the ChildReferences in the PipelineRun status. + for _, cr := range pr.Status.ChildReferences { + switch cr.Kind { + case "TaskRun": + logger.Infof("cancelling TaskRun %s", cr.Name) + + if _, err := clientSet.TektonV1beta1().TaskRuns(pr.Namespace).Patch(ctx, cr.Name, types.JSONPatchType, cancelTaskRunPatchBytes, metav1.PatchOptions{}, ""); err != nil { + errs = append(errs, fmt.Errorf("Failed to patch TaskRun `%s` with cancellation: %s", cr.Name, err).Error()) + continue + } + case "Run": + logger.Infof("cancelling Run %s", cr.Name) + + if err := cancelRun(ctx, cr.Name, pr.Namespace, clientSet); err != nil { + errs = append(errs, fmt.Errorf("Failed to patch Run `%s` with cancellation: %s", cr.Name, err).Error()) + continue + } + default: + errs = append(errs, fmt.Errorf("unknown or unsupported kind `%s` for cancellation", cr.Kind).Error()) + } } - } - // Loop over the Runs in the PipelineRun status. - for runName := range pr.Status.Runs { - logger.Infof("cancelling Run %s", runName) - - if err := cancelRun(ctx, runName, pr.Namespace, clientSet); err != nil { - errs = append(errs, fmt.Errorf("Failed to patch Run `%s` with cancellation: %s", runName, err).Error()) - continue + } else { + // Loop over the TaskRuns in the PipelineRun status. + // If a TaskRun is not in the status yet we should not cancel it anyways. + for taskRunName := range pr.Status.TaskRuns { + logger.Infof("cancelling TaskRun %s", taskRunName) + + if _, err := clientSet.TektonV1beta1().TaskRuns(pr.Namespace).Patch(ctx, taskRunName, types.JSONPatchType, cancelTaskRunPatchBytes, metav1.PatchOptions{}, ""); err != nil { + errs = append(errs, fmt.Errorf("Failed to patch TaskRun `%s` with cancellation: %s", taskRunName, err).Error()) + continue + } + } + // Loop over the Runs in the PipelineRun status. + for runName := range pr.Status.Runs { + logger.Infof("cancelling Run %s", runName) + + if err := cancelRun(ctx, runName, pr.Namespace, clientSet); err != nil { + errs = append(errs, fmt.Errorf("Failed to patch Run `%s` with cancellation: %s", runName, err).Error()) + continue + } } } diff --git a/pkg/reconciler/pipelinerun/cancel_test.go b/pkg/reconciler/pipelinerun/cancel_test.go index d8c3492749c..3882a56a267 100644 --- a/pkg/reconciler/pipelinerun/cancel_test.go +++ b/pkg/reconciler/pipelinerun/cancel_test.go @@ -20,6 +20,8 @@ import ( "context" "testing" + "k8s.io/apimachinery/pkg/runtime" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" _ "github.com/tektoncd/pipeline/pkg/pipelinerunmetrics/fake" // Make sure the pipelinerunmetrics are setup @@ -36,6 +38,7 @@ func TestCancelPipelineRun(t *testing.T) { pipelineRun *v1beta1.PipelineRun taskRuns []*v1beta1.TaskRun runs []*v1alpha1.Run + wantErr bool }{{ name: "no-resolved-taskrun", pipelineRun: &v1beta1.PipelineRun{ @@ -104,10 +107,67 @@ func TestCancelPipelineRun(t *testing.T) { Status: v1beta1.PipelineRunSpecStatusCancelledDeprecated, }, }, + }, { + name: "child-references", + pipelineRun: &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pipeline-run-cancelled"}, + Spec: v1beta1.PipelineRunSpec{ + Status: v1beta1.PipelineRunSpecStatusCancelled, + }, + Status: v1beta1.PipelineRunStatus{PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + ChildReferences: []v1beta1.ChildStatusReference{ + { + TypeMeta: runtime.TypeMeta{Kind: "TaskRun"}, + Name: "t1", + PipelineTaskName: "task-1", + }, + { + TypeMeta: runtime.TypeMeta{Kind: "TaskRun"}, + Name: "t2", + PipelineTaskName: "task-2", + }, + { + TypeMeta: runtime.TypeMeta{Kind: "Run"}, + Name: "r1", + PipelineTaskName: "run-1", + }, + { + TypeMeta: runtime.TypeMeta{Kind: "Run"}, + Name: "r2", + PipelineTaskName: "run-2", + }, + }, + }}, + }, + taskRuns: []*v1beta1.TaskRun{ + {ObjectMeta: metav1.ObjectMeta{Name: "t1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "t2"}}, + }, + runs: []*v1alpha1.Run{ + {ObjectMeta: metav1.ObjectMeta{Name: "r1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "r2"}}, + }, + }, { + name: "unknown-kind-on-child-references", + pipelineRun: &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pipeline-run-cancelled"}, + Spec: v1beta1.PipelineRunSpec{ + Status: v1beta1.PipelineRunSpecStatusCancelled, + }, + Status: v1beta1.PipelineRunStatus{PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + ChildReferences: []v1beta1.ChildStatusReference{{ + TypeMeta: runtime.TypeMeta{Kind: "InvalidKind"}, + Name: "t1", + PipelineTaskName: "task-1", + }}, + }}, + }, + wantErr: true, }} for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { + d := test.Data{ PipelineRuns: []*v1beta1.PipelineRun{tc.pipelineRun}, TaskRuns: tc.taskRuns, @@ -117,33 +177,41 @@ func TestCancelPipelineRun(t *testing.T) { ctx, cancel := context.WithCancel(ctx) defer cancel() c, _ := test.SeedTestData(t, ctx, d) - if err := cancelPipelineRun(ctx, logtesting.TestLogger(t), tc.pipelineRun, c.Pipeline); err != nil { - t.Fatal(err) - } - // This PipelineRun should still be complete and false, and the status should reflect that - cond := tc.pipelineRun.Status.GetCondition(apis.ConditionSucceeded) - if cond.IsTrue() { - t.Errorf("Expected PipelineRun status to be complete and false, but was %v", cond) - } - if tc.taskRuns != nil { - l, err := c.Pipeline.TektonV1beta1().TaskRuns("").List(ctx, metav1.ListOptions{}) + + err := cancelPipelineRun(ctx, logtesting.TestLogger(t), tc.pipelineRun, c.Pipeline) + if tc.wantErr { + if err == nil { + t.Error("expected an error, but did not get one") + } + } else { if err != nil { t.Fatal(err) } - for _, tr := range l.Items { - if tr.Spec.Status != v1beta1.TaskRunSpecStatusCancelled { - t.Errorf("expected task %q to be marked as cancelled, was %q", tr.Name, tr.Spec.Status) - } + // This PipelineRun should still be complete and false, and the status should reflect that + cond := tc.pipelineRun.Status.GetCondition(apis.ConditionSucceeded) + if cond.IsTrue() { + t.Errorf("Expected PipelineRun status to be complete and false, but was %v", cond) } - } - if tc.runs != nil { - l, err := c.Pipeline.TektonV1alpha1().Runs("").List(ctx, metav1.ListOptions{}) - if err != nil { - t.Fatal(err) + if tc.taskRuns != nil { + for _, expectedTR := range tc.taskRuns { + tr, err := c.Pipeline.TektonV1beta1().TaskRuns("").Get(ctx, expectedTR.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("couldn't get expected TaskRun %s, got error %s", expectedTR.Name, err) + } + if tr.Spec.Status != v1beta1.TaskRunSpecStatusCancelled { + t.Errorf("expected task %q to be marked as cancelled, was %q", tr.Name, tr.Spec.Status) + } + } } - for _, r := range l.Items { - if r.Spec.Status != v1alpha1.RunSpecStatusCancelled { - t.Errorf("expected Run %q to be marked as cancelled, was %q", r.Name, r.Spec.Status) + if tc.runs != nil { + for _, expectedRun := range tc.runs { + r, err := c.Pipeline.TektonV1alpha1().Runs("").Get(ctx, expectedRun.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("couldn't get expected Run %s, got error %s", expectedRun.Name, err) + } + if r.Spec.Status != v1alpha1.RunSpecStatusCancelled { + t.Errorf("expected task %q to be marked as cancelled, was %q", r.Name, r.Spec.Status) + } } } } diff --git a/pkg/reconciler/pipelinerun/pipelinerun.go b/pkg/reconciler/pipelinerun/pipelinerun.go index 9e61a7fefdc..9c2c1532479 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun.go +++ b/pkg/reconciler/pipelinerun/pipelinerun.go @@ -573,11 +573,22 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1beta1.PipelineRun, get // Read the condition the way it was set by the Mark* helpers after = pr.Status.GetCondition(apis.ConditionSucceeded) pr.Status.StartTime = pipelineRunFacts.State.AdjustStartTime(pr.Status.StartTime) - pr.Status.TaskRuns = pipelineRunFacts.State.GetTaskRunsStatus(pr) - pr.Status.Runs = pipelineRunFacts.State.GetRunsStatus(pr) + taskRunStatuses := pipelineRunFacts.State.GetTaskRunsStatus(pr) + runStatuses := pipelineRunFacts.State.GetRunsStatus(pr) + + if cfg.FeatureFlags.EmbeddedStatus == config.FullEmbeddedStatus || cfg.FeatureFlags.EmbeddedStatus == config.BothEmbeddedStatus { + pr.Status.TaskRuns = taskRunStatuses + pr.Status.Runs = runStatuses + } + + if cfg.FeatureFlags.EmbeddedStatus == config.MinimalEmbeddedStatus || cfg.FeatureFlags.EmbeddedStatus == config.BothEmbeddedStatus { + pr.Status.ChildReferences = pipelineRunFacts.State.GetChildReferences(v1beta1.SchemeGroupVersion.String(), + v1alpha1.SchemeGroupVersion.String()) + } + pr.Status.SkippedTasks = pipelineRunFacts.GetSkippedTasks() if after.Status == corev1.ConditionTrue { - pr.Status.PipelineResults = resources.ApplyTaskResultsToPipelineResults(pipelineSpec.Results, pr.Status.TaskRuns, pr.Status.Runs) + pr.Status.PipelineResults = resources.ApplyTaskResultsToPipelineResults(pipelineSpec.Results, taskRunStatuses, runStatuses) } logger.Infof("PipelineRun %s status is being set to %s", pr.Name, after) @@ -696,6 +707,8 @@ func (c *Reconciler) runNextSchedulableTask(ctx context.Context, pr *v1beta1.Pip return nil } +// updateTaskRunsStatusDirectly is used with "full" or "both" set as the value for the "embedded-status" feature flag. +// When the "full" and "both" options are removed, updateTaskRunsStatusDirectly can be removed. func (c *Reconciler) updateTaskRunsStatusDirectly(pr *v1beta1.PipelineRun) error { for taskRunName := range pr.Status.TaskRuns { // TODO(dibyom): Add conditionCheck statuses here @@ -713,6 +726,8 @@ func (c *Reconciler) updateTaskRunsStatusDirectly(pr *v1beta1.PipelineRun) error return nil } +// updateRunsStatusDirectly is used with "full" or "both" set as the value for the "embedded-status" feature flag. +// When the "full" and "both" options are removed, updateRunsStatusDirectly can be removed. func (c *Reconciler) updateRunsStatusDirectly(pr *v1beta1.PipelineRun) error { for runName := range pr.Status.Runs { prRunStatus := pr.Status.Runs[runName] @@ -1183,121 +1198,172 @@ func (c *Reconciler) updatePipelineRunStatusFromInformer(ctx context.Context, pr logger.Errorf("could not list TaskRuns %#v", err) return err } - updatePipelineRunStatusFromTaskRuns(logger, pr, taskRuns) - runs, err := c.runLister.Runs(pr.Namespace).List(k8slabels.SelectorFromSet(pipelineRunLabels)) if err != nil { logger.Errorf("could not list Runs %#v", err) return err } - updatePipelineRunStatusFromRuns(logger, pr, runs) + + updatePipelineRunStatusFromSlices(ctx, logger, pr, taskRuns, runs) return nil } -func updatePipelineRunStatusFromTaskRuns(logger *zap.SugaredLogger, pr *v1beta1.PipelineRun, trs []*v1beta1.TaskRun) { +func updatePipelineRunStatusFromSlices(ctx context.Context, logger *zap.SugaredLogger, pr *v1beta1.PipelineRun, taskRuns []*v1beta1.TaskRun, runs []*v1alpha1.Run) { + taskNameByPipelineTask := make(map[string]string) + + cfg := config.FromContextOrDefaults(ctx) + fullEmbedded := cfg.FeatureFlags.EmbeddedStatus == config.FullEmbeddedStatus || cfg.FeatureFlags.EmbeddedStatus == config.BothEmbeddedStatus + minimalEmbedded := cfg.FeatureFlags.EmbeddedStatus == config.MinimalEmbeddedStatus || cfg.FeatureFlags.EmbeddedStatus == config.BothEmbeddedStatus + + if minimalEmbedded { + taskNameByPipelineTask = updatePipelineRunStatusFromChildRefs(logger, pr, taskRuns, runs) + } + if fullEmbedded { + updatePipelineRunStatusFromTaskRuns(logger, pr, taskRuns, taskNameByPipelineTask) + updatePipelineRunStatusFromRuns(logger, pr, runs) + } else { + pr.Status.TaskRuns = make(map[string]*v1beta1.PipelineRunTaskRunStatus) + pr.Status.Runs = make(map[string]*v1beta1.PipelineRunRunStatus) + } +} + +// updatePipelineRunStatusFromTaskRuns takes a PipelineRun, a list of TaskRuns within that PipelineRun, and a map of +// existing PipelineTask names to TaskRun (or Run) names, and updates the PipelineRun's .Status.TaskRuns, including +// ensuring that any (deprecated) condition checks are represented. +func updatePipelineRunStatusFromTaskRuns(logger *zap.SugaredLogger, pr *v1beta1.PipelineRun, trs []*v1beta1.TaskRun, taskRunNameByPipelineTask map[string]string) { // If no TaskRun was found, nothing to be done. We never remove taskruns from the status - if trs == nil || len(trs) == 0 { + if len(trs) == 0 { return } + + if taskRunNameByPipelineTask == nil { + taskRunNameByPipelineTask = make(map[string]string) + } + // Store a list of Condition TaskRuns for each PipelineTask (by name) conditionTaskRuns := make(map[string][]*v1beta1.TaskRun) - // Map PipelineTask names to TaskRun names that were already in the status - taskRunByPipelineTask := make(map[string]string) + if pr.Status.TaskRuns != nil { for taskRunName, pipelineRunTaskRunStatus := range pr.Status.TaskRuns { - taskRunByPipelineTask[pipelineRunTaskRunStatus.PipelineTaskName] = taskRunName + taskRunNameByPipelineTask[pipelineRunTaskRunStatus.PipelineTaskName] = taskRunName } } else { pr.Status.TaskRuns = make(map[string]*v1beta1.PipelineRunTaskRunStatus) } + // Loop over all the TaskRuns associated to Tasks - for _, taskrun := range trs { + for _, tr := range trs { // Only process TaskRuns that are owned by this PipelineRun. // This skips TaskRuns that are indirectly created by the PipelineRun (e.g. by custom tasks). - if len(taskrun.OwnerReferences) < 1 || taskrun.OwnerReferences[0].UID != pr.ObjectMeta.UID { - logger.Debugf("Found a TaskRun %s that is not owned by this PipelineRun", taskrun.Name) + if len(tr.OwnerReferences) < 1 || tr.OwnerReferences[0].UID != pr.ObjectMeta.UID { + logger.Debugf("Found a TaskRun %s that is not owned by this PipelineRun", tr.Name) continue } - lbls := taskrun.GetLabels() + lbls := tr.GetLabels() pipelineTaskName := lbls[pipeline.PipelineTaskLabelKey] if _, ok := lbls[pipeline.ConditionCheckKey]; ok { // Save condition for looping over them after this if _, ok := conditionTaskRuns[pipelineTaskName]; !ok { - // If it's the first condition taskrun, initialise the slice + // If it's the first condition tr, initialise the slice conditionTaskRuns[pipelineTaskName] = []*v1beta1.TaskRun{} } - conditionTaskRuns[pipelineTaskName] = append(conditionTaskRuns[pipelineTaskName], taskrun) + conditionTaskRuns[pipelineTaskName] = append(conditionTaskRuns[pipelineTaskName], tr) continue } - if _, ok := pr.Status.TaskRuns[taskrun.Name]; !ok { - // This taskrun was missing from the status. + + if _, ok := pr.Status.TaskRuns[tr.Name]; !ok { + // This tr was missing from the status. // Add it without conditions, which are handled in the next loop - logger.Infof("Found a TaskRun %s that was missing from the PipelineRun status", taskrun.Name) - pr.Status.TaskRuns[taskrun.Name] = &v1beta1.PipelineRunTaskRunStatus{ + logger.Infof("Found a TaskRun %s that was missing from the PipelineRun status", tr.Name) + pr.Status.TaskRuns[tr.Name] = &v1beta1.PipelineRunTaskRunStatus{ PipelineTaskName: pipelineTaskName, - Status: &taskrun.Status, + Status: &tr.Status, ConditionChecks: nil, } // Since this was recovered now, add it to the map, or it might be overwritten - taskRunByPipelineTask[pipelineTaskName] = taskrun.Name + taskRunNameByPipelineTask[pipelineTaskName] = tr.Name } } // Then loop by pipelinetask name over all the TaskRuns associated to Conditions for pipelineTaskName, actualConditionTaskRuns := range conditionTaskRuns { - taskRunName, ok := taskRunByPipelineTask[pipelineTaskName] - if !ok { + ok := false + // Default the taskRunName to be a generated one - this will be overridden if we already find pipelineTaskName + // in either childRefByPipelineTask or taskRunNameByPipelineTask. + taskRunName := "" + if taskRunName, ok = taskRunNameByPipelineTask[pipelineTaskName]; !ok { // The pipelineTask associated to the conditions was not found in the pipelinerun // status. This means that the conditions were orphaned, and never added to the // status. In this case we need to generate a new TaskRun name, that will be used // to run the TaskRun if the conditions are passed. - taskRunName = resources.GetTaskRunName(pr.Status.TaskRuns, pipelineTaskName, pr.Name) + taskRunName = resources.GetTaskRunName(pr.Status.TaskRuns, pr.Status.ChildReferences, pipelineTaskName, pr.Name) + } + + if _, ok := pr.Status.TaskRuns[taskRunName]; !ok { pr.Status.TaskRuns[taskRunName] = &v1beta1.PipelineRunTaskRunStatus{ PipelineTaskName: pipelineTaskName, Status: nil, ConditionChecks: nil, } } - // Build the map of condition checks for the taskrun - // If there were no other condition, initialise the map - conditionChecks := pr.Status.TaskRuns[taskRunName].ConditionChecks - if conditionChecks == nil { - conditionChecks = make(map[string]*v1beta1.PipelineRunConditionCheckStatus) + + // Add any new condition checks which we've found but weren't already present to the status's condition checks map + for k, v := range getNewConditionChecksForTaskRun(logger, pr.Status.TaskRuns[taskRunName].ConditionChecks, actualConditionTaskRuns) { + if pr.Status.TaskRuns[taskRunName].ConditionChecks == nil { + pr.Status.TaskRuns[taskRunName].ConditionChecks = make(map[string]*v1beta1.PipelineRunConditionCheckStatus) + } + + pr.Status.TaskRuns[taskRunName].ConditionChecks[k] = v } - for i, foundTaskRun := range actualConditionTaskRuns { - lbls := foundTaskRun.GetLabels() - if _, ok := conditionChecks[foundTaskRun.Name]; !ok { - // The condition check was not found, so we need to add it - // We only add the condition name, the status can now be gathered by the - // normal reconcile process - if conditionName, ok := lbls[pipeline.ConditionNameKey]; ok { - conditionChecks[foundTaskRun.Name] = &v1beta1.PipelineRunConditionCheckStatus{ - ConditionName: fmt.Sprintf("%s-%s", conditionName, strconv.Itoa(i)), - } - } else { - // The condition name label is missing, so we cannot recover this - logger.Warnf("found an orphaned condition taskrun %#v with missing %s label", - foundTaskRun, pipeline.ConditionNameKey) + } +} + +// getNewConditionChecksForTaskRun returns a map of condition task name to condition check status for each condition TaskRun +// provided which isn't already present in the existing map of condition checks. +func getNewConditionChecksForTaskRun(logger *zap.SugaredLogger, existingChecks map[string]*v1beta1.PipelineRunConditionCheckStatus, + actualConditionTaskRuns []*v1beta1.TaskRun) map[string]*v1beta1.PipelineRunConditionCheckStatus { + // If we don't have any condition task runs to process, just return nil. + if len(actualConditionTaskRuns) == 0 { + return nil + } + + newChecks := make(map[string]*v1beta1.PipelineRunConditionCheckStatus) + + for i, foundTaskRun := range actualConditionTaskRuns { + lbls := foundTaskRun.GetLabels() + if _, ok := existingChecks[foundTaskRun.Name]; !ok { + // The condition check was not found, so we need to add it + // We only add the condition name, the status can now be gathered by the + // normal reconcile process + if conditionName, ok := lbls[pipeline.ConditionNameKey]; ok { + newChecks[foundTaskRun.Name] = &v1beta1.PipelineRunConditionCheckStatus{ + ConditionName: fmt.Sprintf("%s-%s", conditionName, strconv.Itoa(i)), } + } else { + // The condition name label is missing, so we cannot recover this + logger.Warnf("found an orphaned condition taskrun %#v with missing %s label", + foundTaskRun, pipeline.ConditionNameKey) } } - pr.Status.TaskRuns[taskRunName].ConditionChecks = conditionChecks } + + return newChecks } func updatePipelineRunStatusFromRuns(logger *zap.SugaredLogger, pr *v1beta1.PipelineRun, runs []*v1alpha1.Run) { // If no Run was found, nothing to be done. We never remove runs from the status - if runs == nil || len(runs) == 0 { + if len(runs) == 0 { return } if pr.Status.Runs == nil { pr.Status.Runs = make(map[string]*v1beta1.PipelineRunRunStatus) } + // Loop over all the Runs associated to Tasks for _, run := range runs { // Only process Runs that are owned by this PipelineRun. // This skips Runs that are indirectly created by the PipelineRun (e.g. by custom tasks). - if len(run.OwnerReferences) < 1 && run.OwnerReferences[0].UID != pr.ObjectMeta.UID { + if len(run.OwnerReferences) < 1 || run.OwnerReferences[0].UID != pr.ObjectMeta.UID { logger.Debugf("Found a Run %s that is not owned by this PipelineRun", run.Name) continue } @@ -1312,3 +1378,136 @@ func updatePipelineRunStatusFromRuns(logger *zap.SugaredLogger, pr *v1beta1.Pipe } } } + +func updatePipelineRunStatusFromChildRefs(logger *zap.SugaredLogger, pr *v1beta1.PipelineRun, trs []*v1beta1.TaskRun, runs []*v1alpha1.Run) map[string]string { + // If no TaskRun or Run was found, nothing to be done. We never remove child references from the status. + // We do still return an empty map of TaskRun/Run names keyed by PipelineTask name for later functions. + if len(trs) == 0 && len(runs) == 0 { + return make(map[string]string) + } + + // Store a list of Condition TaskRuns for each PipelineTask (by name) + conditionTaskRuns := make(map[string][]*v1beta1.TaskRun) + // Map PipelineTask names to TaskRun names that were already in the status + taskRunNameByPipelineTask := make(map[string]string) + // Map PipelineTask names to TaskRun child references that were already in the status + childRefByPipelineTask := make(map[string]*v1beta1.ChildStatusReference) + + for i := range pr.Status.ChildReferences { + childRefByPipelineTask[pr.Status.ChildReferences[i].PipelineTaskName] = &pr.Status.ChildReferences[i] + taskRunNameByPipelineTask[pr.Status.ChildReferences[i].PipelineTaskName] = pr.Status.ChildReferences[i].Name + } + + // Loop over all the TaskRuns associated to Tasks + for _, tr := range trs { + // Only process TaskRuns that are owned by this PipelineRun. + // This skips TaskRuns that are indirectly created by the PipelineRun (e.g. by custom tasks). + if len(tr.OwnerReferences) < 1 || tr.OwnerReferences[0].UID != pr.ObjectMeta.UID { + logger.Debugf("Found a TaskRun %s that is not owned by this PipelineRun", tr.Name) + continue + } + lbls := tr.GetLabels() + pipelineTaskName := lbls[pipeline.PipelineTaskLabelKey] + if _, ok := lbls[pipeline.ConditionCheckKey]; ok { + // Save condition for looping over them after this + if _, ok := conditionTaskRuns[pipelineTaskName]; !ok { + // If it's the first condition tr, initialise the slice + conditionTaskRuns[pipelineTaskName] = []*v1beta1.TaskRun{} + } + conditionTaskRuns[pipelineTaskName] = append(conditionTaskRuns[pipelineTaskName], tr) + continue + } + + if _, ok := taskRunNameByPipelineTask[pipelineTaskName]; !ok { + // This tr was missing from the status. + // Add it without conditions, which are handled in the next loop + logger.Infof("Found a TaskRun %s that was missing from the PipelineRun status", tr.Name) + + // Since this was recovered now, add it to the map, or it might be overwritten + childRefByPipelineTask[pipelineTaskName] = &v1beta1.ChildStatusReference{ + TypeMeta: runtime.TypeMeta{ + APIVersion: v1beta1.SchemeGroupVersion.String(), + Kind: "TaskRun", + }, + Name: tr.Name, + PipelineTaskName: pipelineTaskName, + } + } + } + + // Loop over all the Runs associated to Tasks + for _, r := range runs { + // Only process Runs that are owned by this PipelineRun. + // This skips Runs that are indirectly created by the PipelineRun (e.g. by custom tasks). + if len(r.OwnerReferences) < 1 || r.OwnerReferences[0].UID != pr.ObjectMeta.UID { + logger.Debugf("Found a Run %s that is not owned by this PipelineRun", r.Name) + continue + } + lbls := r.GetLabels() + pipelineTaskName := lbls[pipeline.PipelineTaskLabelKey] + + if _, ok := childRefByPipelineTask[pipelineTaskName]; !ok { + // This run was missing from the status. + // Add it without conditions, which are handled in the next loop + logger.Infof("Found a Run %s that was missing from the PipelineRun status", r.Name) + + // Since this was recovered now, add it to the map, or it might be overwritten + childRefByPipelineTask[pipelineTaskName] = &v1beta1.ChildStatusReference{ + TypeMeta: runtime.TypeMeta{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "Run", + }, + Name: r.Name, + PipelineTaskName: pipelineTaskName, + } + } + } + + // Then loop by pipelinetask name over all the TaskRuns associated to Conditions + for pipelineTaskName, actualConditionTaskRuns := range conditionTaskRuns { + ok := false + // Default the taskRunName to be a generated one - this will be overridden if we already find pipelineTaskName + // in either childRefByPipelineTask or taskRunNameByPipelineTask. + taskRunName := "" + cr, inChildRef := childRefByPipelineTask[pipelineTaskName] + if inChildRef { + taskRunName = cr.Name + } else if taskRunName, ok = taskRunNameByPipelineTask[pipelineTaskName]; !ok { + // The pipelineTask associated to the conditions was not found in the pipelinerun + // status. This means that the conditions were orphaned, and never added to the + // status. In this case we need to generate a new TaskRun name, that will be used + // to run the TaskRun if the conditions are passed. + taskRunName = resources.GetTaskRunName(pr.Status.TaskRuns, pr.Status.ChildReferences, pipelineTaskName, pr.Name) + taskRunNameByPipelineTask[pipelineTaskName] = taskRunName + } + + if _, ok := childRefByPipelineTask[pipelineTaskName]; !ok { + childRefByPipelineTask[pipelineTaskName] = &v1beta1.ChildStatusReference{ + TypeMeta: runtime.TypeMeta{ + APIVersion: v1beta1.SchemeGroupVersion.String(), + Kind: "TaskRun", + }, + Name: taskRunName, + PipelineTaskName: pipelineTaskName, + } + cr = childRefByPipelineTask[pipelineTaskName] + } + + for k, v := range getNewConditionChecksForTaskRun(logger, cr.GetConditionChecksAsMap(), actualConditionTaskRuns) { + // Just append any new condition checks to the relevant ChildStatusReference.ConditionChecks. + childRefByPipelineTask[pipelineTaskName].ConditionChecks = append(childRefByPipelineTask[pipelineTaskName].ConditionChecks, + &v1beta1.PipelineRunChildConditionCheckStatus{ + PipelineRunConditionCheckStatus: *v, + ConditionCheckName: k, + }) + } + } + + var newChildRefs []v1beta1.ChildStatusReference + for k := range childRefByPipelineTask { + newChildRefs = append(newChildRefs, *childRefByPipelineTask[k]) + } + pr.Status.ChildReferences = newChildRefs + + return taskRunNameByPipelineTask +} diff --git a/pkg/reconciler/pipelinerun/pipelinerun_test.go b/pkg/reconciler/pipelinerun/pipelinerun_test.go index 8701b0bbf41..df43426b062 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun_test.go +++ b/pkg/reconciler/pipelinerun/pipelinerun_test.go @@ -24,7 +24,6 @@ import ( "fmt" "net/http/httptest" "net/url" - "regexp" "testing" "time" @@ -101,10 +100,18 @@ var ( now = time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC) testClock = clock.NewFakePassiveClock(now) + + valuesForEmbeddedStatus = []string{ + config.DefaultEmbeddedStatus, + config.FullEmbeddedStatus, + config.BothEmbeddedStatus, + config.MinimalEmbeddedStatus, + } ) const ( - apiFieldsFeatureFlag = "enable-api-fields" + apiFieldsFeatureFlag = "enable-api-fields" + embeddedStatusFeatureFlag = "embedded-status" ) type PipelineRunTest struct { @@ -238,6 +245,36 @@ func getPipelineRunUpdates(t *testing.T, actions []ktesting.Action) []*v1beta1.P } func TestReconcile(t *testing.T) { + testCases := []struct { + name string + embeddedStatusVal string + }{ + { + name: "default embedded status", + embeddedStatusVal: config.DefaultEmbeddedStatus, + }, + { + name: "full embedded status", + embeddedStatusVal: config.FullEmbeddedStatus, + }, + { + name: "both embedded status", + embeddedStatusVal: config.BothEmbeddedStatus, + }, + { + name: "minimal embedded status", + embeddedStatusVal: config.MinimalEmbeddedStatus, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runTestReconcileWithEmbeddedStatus(t, tc.embeddedStatusVal) + }) + } +} + +func runTestReconcileWithEmbeddedStatus(t *testing.T, embeddedStatus string) { // TestReconcile runs "Reconcile" on a PipelineRun with one Task that has not been started yet. // It verifies that the TaskRun is created, it checks the resulting API actions, status and events. names.TestingSeed() @@ -572,6 +609,7 @@ func TestReconcile(t *testing.T) { Tasks: ts, ClusterTasks: clusterTasks, PipelineResources: rs, + ConfigMaps: getConfigMapsWithEmbeddedStatus(embeddedStatus), } prt := newPipelineRunTest(d, t) defer prt.Cancel() @@ -678,18 +716,47 @@ func TestReconcile(t *testing.T) { t.Errorf("Expected reason %q but was %s", v1beta1.PipelineRunReasonRunning.String(), condition.Reason) } - if len(reconciledRun.Status.TaskRuns) != 2 { - t.Errorf("Expected PipelineRun status to include both TaskRun status items that can run immediately: %v", reconciledRun.Status.TaskRuns) - } - if _, exists := reconciledRun.Status.TaskRuns["test-pipeline-run-success-unit-test-1"]; !exists { - t.Errorf("Expected PipelineRun status to include TaskRun status but was %v", reconciledRun.Status.TaskRuns) + tr1Name := "test-pipeline-run-success-unit-test-1" + tr2Name := "test-pipeline-run-success-unit-test-cluster-task" + + if shouldHaveFullEmbeddedStatus(embeddedStatus) { + if len(reconciledRun.Status.TaskRuns) != 2 { + t.Errorf("Expected PipelineRun status to include both TaskRun status items that can run immediately: %v", reconciledRun.Status.TaskRuns) + } + if _, exists := reconciledRun.Status.TaskRuns[tr1Name]; !exists { + t.Errorf("Expected PipelineRun status to include TaskRun status but was %v", reconciledRun.Status.TaskRuns) + } + if _, exists := reconciledRun.Status.TaskRuns[tr2Name]; !exists { + t.Errorf("Expected PipelineRun status to include TaskRun status but was %v", reconciledRun.Status.TaskRuns) + } } - if _, exists := reconciledRun.Status.TaskRuns["test-pipeline-run-success-unit-test-cluster-task"]; !exists { - t.Errorf("Expected PipelineRun status to include TaskRun status but was %v", reconciledRun.Status.TaskRuns) + if shouldHaveMinimalEmbeddedStatus(embeddedStatus) { + if len(reconciledRun.Status.ChildReferences) != 2 { + t.Errorf("Expected PipelineRun status to include both ChildReferences status items that can run immediately: %v", reconciledRun.Status.ChildReferences) + } + foundTR1 := false + foundTR2 := false + + for _, cr := range reconciledRun.Status.ChildReferences { + if cr.Name == tr1Name { + foundTR1 = true + } + if cr.Name == tr2Name { + foundTR2 = true + } + } + + if !foundTR1 { + t.Errorf("Expected PipelineRun status to include ChildReferences status for %s but was %v", tr1Name, reconciledRun.Status.ChildReferences) + } + if !foundTR2 { + t.Errorf("Expected PipelineRun status to include ChildReferences status for %s but was %v", tr2Name, reconciledRun.Status.ChildReferences) + } } // A PVC should have been created to deal with output -> input linking ensurePVCCreated(prt.TestAssets.Ctx, t, clients, expectedTaskRun.GetPipelineRunPVCName(), "foo") + } // TestReconcile_CustomTask runs "Reconcile" on a PipelineRun with one Custom @@ -700,6 +767,7 @@ func TestReconcile_CustomTask(t *testing.T) { const pipelineRunName = "test-pipelinerun" const pipelineTaskName = "custom-task" const namespace = "namespace" + tcs := []struct { name string pr *v1beta1.PipelineRun @@ -901,17 +969,16 @@ func TestReconcile_CustomTask(t *testing.T) { }, }} - cms := []*corev1.ConfigMap{ - { - ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.Namespace()}, - Data: map[string]string{ - "enable-custom-tasks": "true", - }, - }, - } - for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { + cms := []*corev1.ConfigMap{ + { + ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.Namespace()}, + Data: map[string]string{ + "enable-custom-tasks": "true", + }, + }, + } d := test.Data{ PipelineRuns: []*v1beta1.PipelineRun{tc.pr}, @@ -946,11 +1013,172 @@ func TestReconcile_CustomTask(t *testing.T) { t.Errorf("Expected reason %q but was %s", v1beta1.PipelineRunReasonRunning.String(), condition.Reason) } - if len(reconciledRun.Status.Runs) != 1 { - t.Errorf("Expected PipelineRun status to include one Run status, got %d", len(reconciledRun.Status.Runs)) + if shouldHaveFullEmbeddedStatus(config.DefaultEmbeddedStatus) { + if len(reconciledRun.Status.Runs) != 1 { + t.Errorf("Expected PipelineRun status to include one Run status, got %d", len(reconciledRun.Status.Runs)) + } + if _, exists := reconciledRun.Status.Runs[tc.wantRun.Name]; !exists { + t.Errorf("Expected PipelineRun status to include Run status but was %v", reconciledRun.Status.Runs) + } + } + if shouldHaveMinimalEmbeddedStatus(config.DefaultEmbeddedStatus) { + if len(reconciledRun.Status.ChildReferences) != 1 { + t.Errorf("Expected PipelineRun status ChildReferences to include one Run, got %d", len(reconciledRun.Status.ChildReferences)) + } + if reconciledRun.Status.ChildReferences[0].Name != tc.wantRun.Name { + t.Errorf("Expected PipelineRun status ChildReferences to include Run %s but was %v", tc.wantRun.Name, reconciledRun.Status.ChildReferences) + } + } + }) + } +} + +// TestReconcile_CustomTaskWithEmbeddedStatus runs "Reconcile" on a PipelineRun +// with one Custom Task reference that has not been run yet, and each possible +// value for the "embedded-status" feature flag. It verifies that the Run is +// created, it checks the resulting API actions, status and events. +func TestReconcile_CustomTaskWithEmbeddedStatus(t *testing.T) { + testCases := []struct { + name string + embeddedStatusVal string + }{ + { + name: "default embedded status", + embeddedStatusVal: config.DefaultEmbeddedStatus, + }, + { + name: "full embedded status", + embeddedStatusVal: config.FullEmbeddedStatus, + }, + { + name: "both embedded status", + embeddedStatusVal: config.BothEmbeddedStatus, + }, + { + name: "minimal embedded status", + embeddedStatusVal: config.MinimalEmbeddedStatus, + }, + } + const pipelineRunName = "test-pipelinerun" + const pipelineTaskName = "custom-task" + const namespace = "namespace" + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + names.TestingSeed() + cms := []*corev1.ConfigMap{ + { + ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.Namespace()}, + Data: map[string]string{ + "enable-custom-tasks": "true", + embeddedStatusFeatureFlag: tc.embeddedStatusVal, + }, + }, + } + + d := test.Data{ + PipelineRuns: []*v1beta1.PipelineRun{{ + ObjectMeta: metav1.ObjectMeta{ + Name: pipelineRunName, + Namespace: namespace, + }, + Spec: v1beta1.PipelineRunSpec{ + PipelineSpec: &v1beta1.PipelineSpec{ + Tasks: []v1beta1.PipelineTask{{ + Name: pipelineTaskName, + Retries: 3, + Params: []v1beta1.Param{{ + Name: "param1", + Value: v1beta1.ArrayOrString{Type: v1beta1.ParamTypeString, StringVal: "value1"}, + }}, + TaskRef: &v1beta1.TaskRef{ + APIVersion: "example.dev/v0", + Kind: "Example", + }, + }}, + }, + }, + }}, + ConfigMaps: cms, + } + prt := newPipelineRunTest(d, t) + defer prt.Cancel() + + wantRun := &v1alpha1.Run{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pipelinerun-custom-task", + Namespace: namespace, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "tekton.dev/v1beta1", + Kind: "PipelineRun", + Name: pipelineRunName, + Controller: &trueb, + BlockOwnerDeletion: &trueb, + }}, + Labels: map[string]string{ + "tekton.dev/pipeline": pipelineRunName, + "tekton.dev/pipelineRun": pipelineRunName, + "tekton.dev/pipelineTask": pipelineTaskName, + pipeline.MemberOfLabelKey: v1beta1.PipelineTasks, + }, + Annotations: map[string]string{}, + }, + Spec: v1alpha1.RunSpec{ + Retries: 3, + Params: []v1beta1.Param{{ + Name: "param1", + Value: v1beta1.ArrayOrString{Type: v1beta1.ParamTypeString, StringVal: "value1"}, + }}, + Timeout: &metav1.Duration{Duration: time.Hour}, + Ref: &v1beta1.TaskRef{ + APIVersion: "example.dev/v0", + Kind: "Example", + }, + ServiceAccountName: "default", + }, + } + + wantEvents := []string{ + "Normal Started", + "Normal Running Tasks Completed: 0", + } + reconciledRun, clients := prt.reconcileRun(namespace, pipelineRunName, wantEvents, false) + + actions := clients.Pipeline.Actions() + if len(actions) < 2 { + t.Fatalf("Expected client to have at least two action implementation but it has %d", len(actions)) + } + + // Check that the expected Run was created. + actual := actions[0].(ktesting.CreateAction).GetObject() + if d := cmp.Diff(wantRun, actual); d != "" { + t.Errorf("expected to see Run created: %s", diff.PrintWantGot(d)) + } + + // This PipelineRun is in progress now and the status should reflect that + condition := reconciledRun.Status.GetCondition(apis.ConditionSucceeded) + if condition == nil || condition.Status != corev1.ConditionUnknown { + t.Errorf("Expected PipelineRun status to be in progress, but was %v", condition) + } + if condition != nil && condition.Reason != v1beta1.PipelineRunReasonRunning.String() { + t.Errorf("Expected reason %q but was %s", v1beta1.PipelineRunReasonRunning.String(), condition.Reason) + } + + if shouldHaveFullEmbeddedStatus(tc.embeddedStatusVal) { + if len(reconciledRun.Status.Runs) != 1 { + t.Errorf("Expected PipelineRun status to include one Run status, got %d", len(reconciledRun.Status.Runs)) + } + if _, exists := reconciledRun.Status.Runs[wantRun.Name]; !exists { + t.Errorf("Expected PipelineRun status to include Run status but was %v", reconciledRun.Status.Runs) + } } - if _, exists := reconciledRun.Status.Runs[tc.wantRun.Name]; !exists { - t.Errorf("Expected PipelineRun status to include Run status but was %v", reconciledRun.Status.Runs) + if shouldHaveMinimalEmbeddedStatus(tc.embeddedStatusVal) { + if len(reconciledRun.Status.ChildReferences) != 1 { + t.Errorf("Expected PipelineRun status ChildReferences to include one Run, got %d", len(reconciledRun.Status.ChildReferences)) + } + if reconciledRun.Status.ChildReferences[0].Name != wantRun.Name { + t.Errorf("Expected PipelineRun status ChildReferences to include Run %s but was %v", wantRun.Name, reconciledRun.Status.ChildReferences) + } } }) } @@ -959,6 +1187,36 @@ func TestReconcile_CustomTask(t *testing.T) { func TestReconcile_PipelineSpecTaskSpec(t *testing.T) { // TestReconcile_PipelineSpecTaskSpec runs "Reconcile" on a PipelineRun that has an embedded PipelineSpec that has an embedded TaskSpec. // It verifies that a TaskRun is created, it checks the resulting API actions, status and events. + testCases := []struct { + name string + embeddedStatusVal string + }{ + { + name: "default embedded status", + embeddedStatusVal: config.DefaultEmbeddedStatus, + }, + { + name: "full embedded status", + embeddedStatusVal: config.FullEmbeddedStatus, + }, + { + name: "both embedded status", + embeddedStatusVal: config.BothEmbeddedStatus, + }, + { + name: "minimal embedded status", + embeddedStatusVal: config.MinimalEmbeddedStatus, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runTestReconcilePipelineSpecTaskSpec(t, tc.embeddedStatusVal) + }) + } +} + +func runTestReconcilePipelineSpecTaskSpec(t *testing.T, embeddedStatus string) { names.TestingSeed() prs := []*v1beta1.PipelineRun{ @@ -989,6 +1247,7 @@ spec: d := test.Data{ PipelineRuns: prs, Pipelines: ps, + ConfigMaps: getConfigMapsWithEmbeddedStatus(embeddedStatus), } prt := newPipelineRunTest(d, t) defer prt.Cancel() @@ -1029,12 +1288,23 @@ spec: t.Errorf("expected to see TaskRun PVC name set to %q created but got %s", "test-pipeline-run-success-pvc", expectedTaskRun.GetPipelineRunPVCName()) } - if len(reconciledRun.Status.TaskRuns) != 1 { - t.Errorf("Expected PipelineRun status to include both TaskRun status items that can run immediately: %v", reconciledRun.Status.TaskRuns) + if shouldHaveFullEmbeddedStatus(embeddedStatus) { + if len(reconciledRun.Status.TaskRuns) != 1 { + t.Errorf("Expected PipelineRun status to include both TaskRun status items that can run immediately: %v", reconciledRun.Status.TaskRuns) + } + + if _, exists := reconciledRun.Status.TaskRuns["test-pipeline-run-success-unit-test-task-spec"]; !exists { + t.Errorf("Expected PipelineRun status to include TaskRun status but was %v", reconciledRun.Status.TaskRuns) + } } + if shouldHaveMinimalEmbeddedStatus(embeddedStatus) { + if len(reconciledRun.Status.ChildReferences) != 1 { + t.Errorf("Expected PipelineRun status ChildReferences to include both TaskRun status items that can run immediately: %v", reconciledRun.Status.ChildReferences) + } - if _, exists := reconciledRun.Status.TaskRuns["test-pipeline-run-success-unit-test-task-spec"]; !exists { - t.Errorf("Expected PipelineRun status to include TaskRun status but was %v", reconciledRun.Status.TaskRuns) + if reconciledRun.Status.ChildReferences[0].Name != "test-pipeline-run-success-unit-test-task-spec" { + t.Errorf("Expected PipelineRun status to include TaskRun status but was %v", reconciledRun.Status.ChildReferences) + } } } @@ -1661,7 +1931,21 @@ func getConfigMapsWithEnabledAlphaAPIFields() []*corev1.ConfigMap { }} } -func TestReconcileOnCancelledPipelineRun(t *testing.T) { +func getConfigMapsWithEmbeddedStatus(flagVal string) []*corev1.ConfigMap { + return []*corev1.ConfigMap{{ + ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.Namespace()}, + Data: map[string]string{embeddedStatusFeatureFlag: flagVal}, + }} +} + +func getConfigMapsWithEmbeddedStatusAndAlphaAPI(flagVal string) []*corev1.ConfigMap { + cm := getConfigMapsWithEnabledAlphaAPIFields() + cm[0].Data[embeddedStatusFeatureFlag] = flagVal + + return cm +} + +func TestReconcileOnCancelledPipelineRunFullEmbeddedStatus(t *testing.T) { // TestReconcileOnCancelledPipelineRun runs "Reconcile" on a PipelineRun that has been cancelled. // It verifies that reconcile is successful, the pipeline status updated and events generated. prs := []*v1beta1.PipelineRun{createCancelledPipelineRun(t, "test-pipeline-run-cancelled", v1beta1.PipelineRunSpecStatusCancelled)} @@ -1669,7 +1953,42 @@ func TestReconcileOnCancelledPipelineRun(t *testing.T) { ts := []*v1beta1.Task{simpleHelloWorldTask} trs := []*v1beta1.TaskRun{createHelloWorldTaskRun(t, "test-pipeline-run-cancelled-hello-world", "foo", "test-pipeline-run-cancelled", "test-pipeline")} - cms := getConfigMapsWithEnabledAlphaAPIFields() + cms := getConfigMapsWithEmbeddedStatusAndAlphaAPI(config.FullEmbeddedStatus) + + d := test.Data{ + PipelineRuns: prs, + Pipelines: ps, + Tasks: ts, + TaskRuns: trs, + ConfigMaps: cms, + } + prt := newPipelineRunTest(d, t) + defer prt.Cancel() + + wantEvents := []string{ + "Warning Failed PipelineRun \"test-pipeline-run-cancelled\" was cancelled", + } + reconciledRun, _ := prt.reconcileRun("foo", "test-pipeline-run-cancelled", wantEvents, false) + + if reconciledRun.Status.CompletionTime == nil { + t.Errorf("Expected a CompletionTime on invalid PipelineRun but was nil") + } + + // This PipelineRun should still be complete and false, and the status should reflect that + if !reconciledRun.Status.GetCondition(apis.ConditionSucceeded).IsFalse() { + t.Errorf("Expected PipelineRun status to be complete and false, but was %v", reconciledRun.Status.GetCondition(apis.ConditionSucceeded)) + } +} + +func TestReconcileOnCancelledPipelineRunMinimalEmbeddedStatus(t *testing.T) { + // TestReconcileOnCancelledPipelineRun runs "Reconcile" on a PipelineRun that has been cancelled. + // It verifies that reconcile is successful, the pipeline status updated and events generated. + prs := []*v1beta1.PipelineRun{createCancelledPipelineRun(t, "test-pipeline-run-cancelled", v1beta1.PipelineRunSpecStatusCancelled)} + ps := []*v1beta1.Pipeline{simpleHelloWorldPipeline} + ts := []*v1beta1.Task{simpleHelloWorldTask} + trs := []*v1beta1.TaskRun{createHelloWorldTaskRun(t, "test-pipeline-run-cancelled-hello-world", "foo", + "test-pipeline-run-cancelled", "test-pipeline")} + cms := getConfigMapsWithEmbeddedStatusAndAlphaAPI(config.MinimalEmbeddedStatus) d := test.Data{ PipelineRuns: prs, @@ -1931,6 +2250,36 @@ func TestReconcileForCustomTaskWithPipelineRunTimedOut(t *testing.T) { } func TestReconcileOnCancelledRunFinallyPipelineRun(t *testing.T) { + testCases := []struct { + name string + embeddedVal string + }{ + { + name: "default embedded status", + embeddedVal: config.DefaultEmbeddedStatus, + }, + { + name: "full embedded status", + embeddedVal: config.FullEmbeddedStatus, + }, + { + name: "both embedded status", + embeddedVal: config.BothEmbeddedStatus, + }, + { + name: "minimal embedded status", + embeddedVal: config.MinimalEmbeddedStatus, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runTestReconcileOnCancelledRunFinallyPipelineRun(t, tc.embeddedVal) + }) + } +} + +func runTestReconcileOnCancelledRunFinallyPipelineRun(t *testing.T, embeddedStatus string) { // TestReconcileOnCancelledRunFinallyPipelineRun runs "Reconcile" on a PipelineRun that has been gracefully cancelled. // It verifies that reconcile is successful, the pipeline status updated and events generated. prs := []*v1beta1.PipelineRun{createCancelledPipelineRun(t, "test-pipeline-run-cancelled-run-finally", v1beta1.PipelineRunSpecStatusCancelledRunFinally)} @@ -1978,9 +2327,12 @@ spec: } // There should be no task runs triggered for the pipeline tasks - if len(reconciledRun.Status.TaskRuns) != 0 { + if shouldHaveFullEmbeddedStatus(embeddedStatus) && len(reconciledRun.Status.TaskRuns) != 0 { t.Errorf("Expected PipelineRun status to have exactly no task runs, but was %v", len(reconciledRun.Status.TaskRuns)) } + if shouldHaveMinimalEmbeddedStatus(embeddedStatus) && len(reconciledRun.Status.ChildReferences) != 0 { + t.Errorf("Expected PipelineRun status ChildReferences to have exactly no task runs, but was %v", len(reconciledRun.Status.ChildReferences)) + } expectedSkippedTasks := []v1beta1.SkippedTask{{ Name: "hello-world-1", @@ -1991,10 +2343,39 @@ spec: if d := cmp.Diff(expectedSkippedTasks, reconciledRun.Status.SkippedTasks); d != "" { t.Fatalf("Didn't get the expected list of skipped tasks. Diff: %s", diff.PrintWantGot(d)) } - } func TestReconcileOnCancelledRunFinallyPipelineRunWithFinalTask(t *testing.T) { + testCases := []struct { + name string + embeddedStatusVal string + }{ + { + name: "default embedded status", + embeddedStatusVal: config.DefaultEmbeddedStatus, + }, + { + name: "full embedded status", + embeddedStatusVal: config.FullEmbeddedStatus, + }, + { + name: "both embedded status", + embeddedStatusVal: config.BothEmbeddedStatus, + }, + { + name: "minimal embedded status", + embeddedStatusVal: config.MinimalEmbeddedStatus, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runTestReconcileOnCancelledRunFinallyPipelineRunWithFinalTask(t, tc.embeddedStatusVal) + }) + } +} + +func runTestReconcileOnCancelledRunFinallyPipelineRunWithFinalTask(t *testing.T, embeddedStatus string) { // TestReconcileOnCancelledRunFinallyPipelineRunWithFinalTask runs "Reconcile" on a PipelineRun that has been gracefully cancelled. // It verifies that reconcile is successful, final tasks run, the pipeline status updated and events generated. prs := []*v1beta1.PipelineRun{createCancelledPipelineRun(t, "test-pipeline-run-cancelled-run-finally", v1beta1.PipelineRunSpecStatusCancelledRunFinally)} @@ -2023,7 +2404,7 @@ spec: simpleHelloWorldTask, simpleSomeTask, } - cms := getConfigMapsWithEnabledAlphaAPIFields() + cms := getConfigMapsWithEmbeddedStatusAndAlphaAPI(embeddedStatus) d := test.Data{ PipelineRuns: prs, @@ -2049,17 +2430,28 @@ spec: } // There should be exactly one task run triggered for the "final-task-1" final task - if len(reconciledRun.Status.TaskRuns) != 1 { - t.Errorf("Expected PipelineRun status to have exactly one task run, but was %v", len(reconciledRun.Status.TaskRuns)) - } + if shouldHaveFullEmbeddedStatus(embeddedStatus) { + if len(reconciledRun.Status.TaskRuns) != 1 { + t.Errorf("Expected PipelineRun status to have exactly one task run, but was %v", len(reconciledRun.Status.TaskRuns)) + } - expectedTaskRunsStatus := &v1beta1.PipelineRunTaskRunStatus{ - PipelineTaskName: "final-task-1", - Status: &v1beta1.TaskRunStatus{}, + expectedTaskRunsStatus := &v1beta1.PipelineRunTaskRunStatus{ + PipelineTaskName: "final-task-1", + Status: &v1beta1.TaskRunStatus{}, + } + for _, taskRun := range reconciledRun.Status.TaskRuns { + if d := cmp.Diff(taskRun, expectedTaskRunsStatus); d != "" { + t.Fatalf("Expected PipelineRun status to match TaskRun(s) status, but got a mismatch %s", diff.PrintWantGot(d)) + } + } } - for _, taskRun := range reconciledRun.Status.TaskRuns { - if d := cmp.Diff(taskRun, expectedTaskRunsStatus); d != "" { - t.Fatalf("Expected PipelineRun status to match TaskRun(s) status, but got a mismatch %s", diff.PrintWantGot(d)) + if shouldHaveMinimalEmbeddedStatus(embeddedStatus) { + if len(reconciledRun.Status.ChildReferences) != 1 { + t.Errorf("Expected PipelineRun status ChildReferences to have exactly one task run, but was %v", len(reconciledRun.Status.ChildReferences)) + } + + if reconciledRun.Status.ChildReferences[0].PipelineTaskName != "final-task-1" { + t.Errorf("Expected PipelineRun status to include TaskRun status but was %v", reconciledRun.Status.ChildReferences) } } } @@ -2175,17 +2567,48 @@ func TestReconcileOnCancelledRunFinallyPipelineRunWithFinalTaskAndRetries(t *tes // TestReconcileOnCancelledRunFinallyPipelineRunWithFinalTaskAndRetries runs "Reconcile" on a PipelineRun that has // been gracefully cancelled. It verifies that reconcile is successful, the pipeline status updated and events generated. - // Pipeline has a DAG task "hello-world-1" and Finally task "hello-world-2" - ps := []*v1beta1.Pipeline{{ - ObjectMeta: baseObjectMeta("test-pipeline", "foo"), - Spec: v1beta1.PipelineSpec{ - Tasks: []v1beta1.PipelineTask{{ - Name: "hello-world-1", - TaskRef: &v1beta1.TaskRef{ - Name: "hello-world", - }, - Retries: 2, - }}, + testCases := []struct { + name string + embeddedStatusVal string + }{ + { + name: "default embedded status", + embeddedStatusVal: config.DefaultEmbeddedStatus, + }, + { + name: "full embedded status", + embeddedStatusVal: config.FullEmbeddedStatus, + }, + { + name: "both embedded status", + embeddedStatusVal: config.BothEmbeddedStatus, + }, + { + name: "minimal embedded status", + embeddedStatusVal: config.MinimalEmbeddedStatus, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runTestReconcileOnCancelledRunFinallyPipelineRunWithFinalTaskAndRetries(t, tc.embeddedStatusVal) + }) + } +} + +func runTestReconcileOnCancelledRunFinallyPipelineRunWithFinalTaskAndRetries(t *testing.T, embeddedStatus string) { + + // Pipeline has a DAG task "hello-world-1" and Finally task "hello-world-2" + ps := []*v1beta1.Pipeline{{ + ObjectMeta: baseObjectMeta("test-pipeline", "foo"), + Spec: v1beta1.PipelineSpec{ + Tasks: []v1beta1.PipelineTask{{ + Name: "hello-world-1", + TaskRef: &v1beta1.TaskRef{ + Name: "hello-world", + }, + Retries: 2, + }}, Finally: []v1beta1.PipelineTask{{ Name: "hello-world-2", TaskRef: &v1beta1.TaskRef{ @@ -2234,7 +2657,7 @@ func TestReconcileOnCancelledRunFinallyPipelineRunWithFinalTaskAndRetries(t *tes })} ts := []*v1beta1.Task{simpleHelloWorldTask} - cms := getConfigMapsWithEnabledAlphaAPIFields() + cms := getConfigMapsWithEmbeddedStatusAndAlphaAPI(embeddedStatus) d := test.Data{ PipelineRuns: prs, @@ -2259,10 +2682,12 @@ func TestReconcileOnCancelledRunFinallyPipelineRunWithFinalTaskAndRetries(t *tes } // There should be two task runs (failed dag task and one triggered for the finally task) - if len(reconciledRun.Status.TaskRuns) != 2 { + if shouldHaveFullEmbeddedStatus(embeddedStatus) && len(reconciledRun.Status.TaskRuns) != 2 { t.Errorf("Expected PipelineRun status to have exactly two task runs, but was %v", len(reconciledRun.Status.TaskRuns)) } - + if shouldHaveMinimalEmbeddedStatus(embeddedStatus) && len(reconciledRun.Status.ChildReferences) != 2 { + t.Errorf("Expected PipelineRun status ChildReferences to have exactly two task runs, but was %v", len(reconciledRun.Status.ChildReferences)) + } } func TestReconcileCancelledRunFinallyFailsTaskRunCancellation(t *testing.T) { @@ -4567,6 +4992,37 @@ func ensurePVCCreated(ctx context.Context, t *testing.T, clients test.Clients, n } func TestReconcileWithWhenExpressionsWithParameters(t *testing.T) { + testCases := []struct { + name string + embeddedStatusVal string + }{ + { + name: "default embedded status", + embeddedStatusVal: config.DefaultEmbeddedStatus, + }, + { + name: "full embedded status", + embeddedStatusVal: config.FullEmbeddedStatus, + }, + { + name: "both embedded status", + embeddedStatusVal: config.BothEmbeddedStatus, + }, + { + name: "minimal embedded status", + embeddedStatusVal: config.MinimalEmbeddedStatus, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runTestReconcileWithWhenExpressionsWithParameters(t, tc.embeddedStatusVal) + }) + } +} + +func runTestReconcileWithWhenExpressionsWithParameters(t *testing.T, embeddedStatus string) { + names.TestingSeed() prName := "test-pipeline-run" ps := []*v1beta1.Pipeline{{ @@ -4629,6 +5085,7 @@ func TestReconcileWithWhenExpressionsWithParameters(t *testing.T) { PipelineRuns: prs, Pipelines: ps, Tasks: ts, + ConfigMaps: getConfigMapsWithEmbeddedStatus(embeddedStatus), } prt := newPipelineRunTest(d, t) defer prt.Cancel() @@ -4667,7 +5124,6 @@ func TestReconcileWithWhenExpressionsWithParameters(t *testing.T) { t.Errorf("expected to see TaskRun %v created. Diff %s", expectedTaskRunName, diff.PrintWantGot(d)) } - actualWhenExpressionsInTaskRun := pipelineRun.Status.PipelineRunStatusFields.TaskRuns[expectedTaskRunName].WhenExpressions expectedWhenExpressionsInTaskRun := []v1beta1.WhenExpression{{ Input: "foo", Operator: "notin", @@ -4677,8 +5133,22 @@ func TestReconcileWithWhenExpressionsWithParameters(t *testing.T) { Operator: "in", Values: []string{"yes"}, }} - if d := cmp.Diff(expectedWhenExpressionsInTaskRun, actualWhenExpressionsInTaskRun); d != "" { - t.Errorf("expected to see When Expressions %v created. Diff %s", expectedTaskRunName, diff.PrintWantGot(d)) + if shouldHaveFullEmbeddedStatus(embeddedStatus) { + actualWhenExpressionsInTaskRun := pipelineRun.Status.PipelineRunStatusFields.TaskRuns[expectedTaskRunName].WhenExpressions + if d := cmp.Diff(expectedWhenExpressionsInTaskRun, actualWhenExpressionsInTaskRun); d != "" { + t.Errorf("expected to see When Expressions %v created. Diff %s", expectedTaskRunName, diff.PrintWantGot(d)) + } + } + if shouldHaveMinimalEmbeddedStatus(embeddedStatus) { + var actualWhenExpressionsInTaskRun []v1beta1.WhenExpression + for _, cr := range pipelineRun.Status.ChildReferences { + if cr.Name == expectedTaskRunName { + actualWhenExpressionsInTaskRun = append(actualWhenExpressionsInTaskRun, cr.WhenExpressions...) + } + } + if d := cmp.Diff(expectedWhenExpressionsInTaskRun, actualWhenExpressionsInTaskRun); d != "" { + t.Errorf("expected to see When Expressions %v created. Diff %s", expectedTaskRunName, diff.PrintWantGot(d)) + } } actualSkippedTasks := pipelineRun.Status.SkippedTasks @@ -4711,6 +5181,36 @@ func TestReconcileWithWhenExpressionsWithParameters(t *testing.T) { } func TestReconcileWithWhenExpressionsWithTaskResults(t *testing.T) { + testCases := []struct { + name string + embeddedStatusVal string + }{ + { + name: "default embedded status", + embeddedStatusVal: config.DefaultEmbeddedStatus, + }, + { + name: "full embedded status", + embeddedStatusVal: config.FullEmbeddedStatus, + }, + { + name: "both embedded status", + embeddedStatusVal: config.BothEmbeddedStatus, + }, + { + name: "minimal embedded status", + embeddedStatusVal: config.MinimalEmbeddedStatus, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runTestReconcileWithWhenExpressionsWithTaskResults(t, tc.embeddedStatusVal) + }) + } +} + +func runTestReconcileWithWhenExpressionsWithTaskResults(t *testing.T, embeddedStatus string) { names.TestingSeed() ps := []*v1beta1.Pipeline{{ ObjectMeta: baseObjectMeta("test-pipeline", "foo"), @@ -4799,6 +5299,7 @@ func TestReconcileWithWhenExpressionsWithTaskResults(t *testing.T) { Pipelines: ps, Tasks: ts, TaskRuns: trs, + ConfigMaps: getConfigMapsWithEmbeddedStatus(embeddedStatus), } prt := newPipelineRunTest(d, t) defer prt.Cancel() @@ -4836,7 +5337,6 @@ func TestReconcileWithWhenExpressionsWithTaskResults(t *testing.T) { t.Errorf("expected to see TaskRun %v created. Diff %s", expectedTaskRunName, diff.PrintWantGot(d)) } - actualWhenExpressionsInTaskRun := pipelineRun.Status.PipelineRunStatusFields.TaskRuns[expectedTaskRunName].WhenExpressions expectedWhenExpressionsInTaskRun := []v1beta1.WhenExpression{{ Input: "aResultValue", Operator: "in", @@ -4846,8 +5346,22 @@ func TestReconcileWithWhenExpressionsWithTaskResults(t *testing.T) { Operator: "in", Values: []string{"aResultValue"}, }} - if d := cmp.Diff(expectedWhenExpressionsInTaskRun, actualWhenExpressionsInTaskRun); d != "" { - t.Errorf("expected to see When Expressions %v created. Diff %s", expectedTaskRunName, diff.PrintWantGot(d)) + if shouldHaveFullEmbeddedStatus(embeddedStatus) { + actualWhenExpressionsInTaskRun := pipelineRun.Status.PipelineRunStatusFields.TaskRuns[expectedTaskRunName].WhenExpressions + if d := cmp.Diff(expectedWhenExpressionsInTaskRun, actualWhenExpressionsInTaskRun); d != "" { + t.Errorf("expected to see When Expressions %v created. Diff %s", expectedTaskRunName, diff.PrintWantGot(d)) + } + } + if shouldHaveMinimalEmbeddedStatus(embeddedStatus) { + var actualWhenExpressionsInTaskRun []v1beta1.WhenExpression + for _, cr := range pipelineRun.Status.ChildReferences { + if cr.Name == expectedTaskRunName { + actualWhenExpressionsInTaskRun = append(actualWhenExpressionsInTaskRun, cr.WhenExpressions...) + } + } + if d := cmp.Diff(expectedWhenExpressionsInTaskRun, actualWhenExpressionsInTaskRun); d != "" { + t.Errorf("expected to see When Expressions %v created. Diff %s", expectedTaskRunName, diff.PrintWantGot(d)) + } } actualSkippedTasks := pipelineRun.Status.SkippedTasks @@ -5869,6 +6383,36 @@ func TestReconcileWithTaskResultsEmbeddedNoneStarted(t *testing.T) { } func TestReconcileWithPipelineResults(t *testing.T) { + testCases := []struct { + name string + embeddedVal string + }{ + { + name: "default embedded status", + embeddedVal: config.DefaultEmbeddedStatus, + }, + { + name: "full embedded status", + embeddedVal: config.FullEmbeddedStatus, + }, + { + name: "both embedded status", + embeddedVal: config.BothEmbeddedStatus, + }, + { + name: "minimal embedded status", + embeddedVal: config.MinimalEmbeddedStatus, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runTestReconcileWithPipelineResults(t, tc.embeddedVal) + }) + } +} + +func runTestReconcileWithPipelineResults(t *testing.T, embeddedStatus string) { names.TestingSeed() ps := []*v1beta1.Pipeline{{ ObjectMeta: baseObjectMeta("test-pipeline", "foo"), @@ -5976,6 +6520,7 @@ func TestReconcileWithPipelineResults(t *testing.T) { Pipelines: ps, Tasks: ts, TaskRuns: trs, + ConfigMaps: getConfigMapsWithEmbeddedStatus(embeddedStatus), } prt := newPipelineRunTest(d, t) defer prt.Cancel() @@ -6055,6 +6600,39 @@ func TestReconcileOutOfSyncPipelineRun(t *testing.T) { // the reconciler is able to coverge back to a consistent state with the orphaned // TaskRuns back in the PipelineRun status. // For more details, see https://github.com/tektoncd/pipeline/issues/2558 + + testCases := []struct { + name string + embeddedStatusVal string + }{ + { + name: "default embedded status", + embeddedStatusVal: config.DefaultEmbeddedStatus, + }, + { + name: "full embedded status", + embeddedStatusVal: config.FullEmbeddedStatus, + }, + { + name: "both embedded status", + embeddedStatusVal: config.BothEmbeddedStatus, + }, + { + name: "minimal embedded status", + embeddedStatusVal: config.MinimalEmbeddedStatus, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runTestReconcileOutOfSyncPipelineRun(t, tc.embeddedStatusVal) + }) + } +} + +func runTestReconcileOutOfSyncPipelineRun(t *testing.T, embeddedStatus string) { + ctx := context.Background() + prOutOfSyncName := "test-pipeline-run-out-of-sync" helloWorldTask := simpleHelloWorldTask @@ -6292,21 +6870,48 @@ func TestReconcileOutOfSyncPipelineRun(t *testing.T) { }, }, }, - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - TaskRuns: map[string]*v1beta1.PipelineRunTaskRunStatus{ - taskRunDone.Name: { - PipelineTaskName: "hello-world-1", - Status: &v1beta1.TaskRunStatus{}, - }, - taskRunWithCondition.Name: { - PipelineTaskName: "hello-world-3", - Status: nil, - ConditionChecks: prccs3, - }, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{}, + }, + } + + if shouldHaveFullEmbeddedStatus(embeddedStatus) { + prOutOfSync.Status.TaskRuns = map[string]*v1beta1.PipelineRunTaskRunStatus{ + taskRunDone.Name: { + PipelineTaskName: "hello-world-1", + Status: &v1beta1.TaskRunStatus{}, + }, + taskRunWithCondition.Name: { + PipelineTaskName: "hello-world-3", + Status: nil, + ConditionChecks: prccs3, + }, + } + } + if shouldHaveMinimalEmbeddedStatus(embeddedStatus) { + prOutOfSync.Status.ChildReferences = []v1beta1.ChildStatusReference{ + { + TypeMeta: runtime.TypeMeta{ + APIVersion: v1beta1.SchemeGroupVersion.String(), + Kind: "TaskRun", }, + Name: taskRunDone.Name, + PipelineTaskName: "hello-world-1", }, - }, + { + TypeMeta: runtime.TypeMeta{ + APIVersion: v1beta1.SchemeGroupVersion.String(), + Kind: "TaskRun", + }, + Name: taskRunWithCondition.Name, + PipelineTaskName: "hello-world-3", + ConditionChecks: []*v1beta1.PipelineRunChildConditionCheckStatus{{ + PipelineRunConditionCheckStatus: *prccs3[conditionCheckName3], + ConditionCheckName: conditionCheckName3, + }}, + }, + } } + prs := []*v1beta1.PipelineRun{prOutOfSync} ps := []*v1beta1.Pipeline{testPipeline} ts := []*v1beta1.Task{helloWorldTask} @@ -6332,7 +6937,8 @@ func TestReconcileOutOfSyncPipelineRun(t *testing.T) { { ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.Namespace()}, Data: map[string]string{ - "enable-custom-tasks": "true", + "enable-custom-tasks": "true", + embeddedStatusFeatureFlag: embeddedStatus, }, }, } @@ -6453,433 +7059,61 @@ func TestReconcileOutOfSyncPipelineRun(t *testing.T) { }, } - if d := cmp.Diff(reconciledRun.Status.TaskRuns, expectedTaskRunsStatus); d != "" { - t.Fatalf("Expected PipelineRun status to match TaskRun(s) status, but got a mismatch: %s", d) - } - if d := cmp.Diff(reconciledRun.Status.Runs, expectedRunsStatus); d != "" { - t.Fatalf("Expected PipelineRun status to match Run(s) status, but got a mismatch: %s", d) - } -} - -func TestUpdatePipelineRunStatusFromInformer(t *testing.T) { - names.TestingSeed() - - pr := &v1beta1.PipelineRun{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pipeline-run", - Namespace: "foo", - Labels: map[string]string{"mylabel": "myvale"}, - }, - Spec: v1beta1.PipelineRunSpec{ - PipelineSpec: &v1beta1.PipelineSpec{ - Tasks: []v1beta1.PipelineTask{{ - Name: "unit-test-task-spec", - TaskSpec: &v1beta1.EmbeddedTask{ - TaskSpec: v1beta1.TaskSpec{ - Steps: []v1beta1.Step{{Container: corev1.Container{ - Name: "mystep", - Image: "myimage"}}}, - }, - }, - }}, - }, - }, - } - - d := test.Data{ - PipelineRuns: []*v1beta1.PipelineRun{pr}, - } - prt := newPipelineRunTest(d, t) - defer prt.Cancel() - - wantEvents := []string{ - "Normal Started", - "Normal Running Tasks Completed: 0", - } - - // Reconcile the PipelineRun. This creates a Taskrun. - reconciledRun, clients := prt.reconcileRun("foo", "test-pipeline-run", wantEvents, false) - - // Save the name of the TaskRun that was created. - taskRunName := "" - if len(reconciledRun.Status.TaskRuns) != 1 { - t.Fatalf("Expected 1 TaskRun but got %d", len(reconciledRun.Status.TaskRuns)) - } - for k := range reconciledRun.Status.TaskRuns { - taskRunName = k - break - } - - // Add a label to the PipelineRun. This tests a scenario in issue 3126 which could prevent the reconciler - // from finding TaskRuns that are missing from the status. - reconciledRun.ObjectMeta.Labels["bah"] = "humbug" - reconciledRun, err := clients.Pipeline.TektonV1beta1().PipelineRuns("foo").Update(prt.TestAssets.Ctx, reconciledRun, metav1.UpdateOptions{}) - if err != nil { - t.Fatalf("unexpected error when updating status: %v", err) - } - - // The label update triggers another reconcile. Depending on timing, the PipelineRun passed to the reconcile may or may not - // have the updated status with the name of the created TaskRun. Clear the status because we want to test the case where the - // status does not have the TaskRun. - reconciledRun.Status = v1beta1.PipelineRunStatus{} - if _, err := clients.Pipeline.TektonV1beta1().PipelineRuns("foo").UpdateStatus(prt.TestAssets.Ctx, reconciledRun, metav1.UpdateOptions{}); err != nil { - t.Fatalf("unexpected error when updating status: %v", err) - } - - reconciledRun, _ = prt.reconcileRun("foo", "test-pipeline-run", wantEvents, false) - - // Verify that the reconciler found the existing TaskRun instead of creating a new one. - if len(reconciledRun.Status.TaskRuns) != 1 { - t.Fatalf("Expected 1 TaskRun after label change but got %d", len(reconciledRun.Status.TaskRuns)) - } - for k := range reconciledRun.Status.TaskRuns { - if k != taskRunName { - t.Fatalf("Status has unexpected taskrun %s", k) + if shouldHaveFullEmbeddedStatus(embeddedStatus) { + if d := cmp.Diff(expectedTaskRunsStatus, reconciledRun.Status.TaskRuns); d != "" { + t.Fatalf("Expected PipelineRun status to match TaskRun(s) status, but got a mismatch: %s", d) + } + if d := cmp.Diff(expectedRunsStatus, reconciledRun.Status.Runs); d != "" { + t.Fatalf("Expected PipelineRun status to match Run(s) status, but got a mismatch: %s", d) } } -} - -func TestUpdatePipelineRunStatusFromTaskRuns(t *testing.T) { - - prUID := types.UID("11111111-1111-1111-1111-111111111111") - otherPrUID := types.UID("22222222-2222-2222-2222-222222222222") - - // PipelineRunConditionCheckStatus recovered by updatePipelineRunStatusFromTaskRuns - // It does not include the status, which is then retrieved via the regular reconcile - prccs2Recovered := map[string]*v1beta1.PipelineRunConditionCheckStatus{ - "pr-task-2-running-condition-check-xxyyy": { - ConditionName: "running-condition-0", - }, - } - prccs3Recovered := map[string]*v1beta1.PipelineRunConditionCheckStatus{ - "pr-task-3-successful-condition-check-xxyyy": { - ConditionName: "successful-condition-0", - }, - } - prccs4Recovered := map[string]*v1beta1.PipelineRunConditionCheckStatus{ - "pr-task-4-failed-condition-check-xxyyy": { - ConditionName: "failed-condition-0", - }, - } - - // PipelineRunConditionCheckStatus full is used to test the behaviour of updatePipelineRunStatusFromTaskRuns - // when no orphan TaskRuns are found, to check we don't alter good ones - prccs2Full := map[string]*v1beta1.PipelineRunConditionCheckStatus{ - "pr-task-2-running-condition-check-xxyyy": { - ConditionName: "running-condition-0", - Status: &v1beta1.ConditionCheckStatus{ - ConditionCheckStatusFields: v1beta1.ConditionCheckStatusFields{ - Check: corev1.ContainerState{ - Running: &corev1.ContainerStateRunning{}, - }, - }, - Status: duckv1beta1.Status{ - Conditions: []apis.Condition{{Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown}}, - }, - }, - }, - } - prccs3Full := map[string]*v1beta1.PipelineRunConditionCheckStatus{ - "pr-task-3-successful-condition-check-xxyyy": { - ConditionName: "successful-condition-0", - Status: &v1beta1.ConditionCheckStatus{ - ConditionCheckStatusFields: v1beta1.ConditionCheckStatusFields{ - Check: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ExitCode: 0}, - }, - }, - Status: duckv1beta1.Status{ - Conditions: []apis.Condition{{Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue}}, - }, - }, - }, - } - prccs4Full := map[string]*v1beta1.PipelineRunConditionCheckStatus{ - "pr-task-4-failed-condition-check-xxyyy": { - ConditionName: "failed-condition-0", - Status: &v1beta1.ConditionCheckStatus{ - ConditionCheckStatusFields: v1beta1.ConditionCheckStatusFields{ - Check: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ExitCode: 127}, - }, - }, - Status: duckv1beta1.Status{ - Conditions: []apis.Condition{{Type: apis.ConditionSucceeded, Status: corev1.ConditionFalse}}, - }, - }, - }, - } - - prRunningStatus := duckv1beta1.Status{ - Conditions: []apis.Condition{ - { - Type: "Succeeded", - Status: "Unknown", - Reason: "Running", - Message: "Not all Tasks in the Pipeline have finished executing", - }, - }, - } - - prStatusWithCondition := v1beta1.PipelineRunStatus{ - Status: prRunningStatus, - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - TaskRuns: map[string]*v1beta1.PipelineRunTaskRunStatus{ - "pr-task-1-xxyyy": { - PipelineTaskName: "task-1", - Status: &v1beta1.TaskRunStatus{}, - }, - "pr-task-2-xxyyy": { - PipelineTaskName: "task-2", - Status: nil, - ConditionChecks: prccs2Full, - }, - "pr-task-3-xxyyy": { - PipelineTaskName: "task-3", - Status: &v1beta1.TaskRunStatus{}, - ConditionChecks: prccs3Full, - }, - "pr-task-4-xxyyy": { - PipelineTaskName: "task-4", - Status: nil, - ConditionChecks: prccs4Full, - }, - }, - }, - } - - prStatusWithEmptyTaskRuns := v1beta1.PipelineRunStatus{ - Status: prRunningStatus, - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - TaskRuns: map[string]*v1beta1.PipelineRunTaskRunStatus{}, - }, - } - - prStatusWithOrphans := v1beta1.PipelineRunStatus{ - Status: duckv1beta1.Status{ - Conditions: []apis.Condition{ - { - Type: "Succeeded", - Status: "Unknown", - Reason: "Running", - Message: "Not all Tasks in the Pipeline have finished executing", - }, - }, - }, - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - TaskRuns: map[string]*v1beta1.PipelineRunTaskRunStatus{}, - }, - } - - prStatusRecovered := v1beta1.PipelineRunStatus{ - Status: prRunningStatus, - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - TaskRuns: map[string]*v1beta1.PipelineRunTaskRunStatus{ - "pr-task-1-xxyyy": { - PipelineTaskName: "task-1", - Status: &v1beta1.TaskRunStatus{}, - }, - "orphaned-taskruns-pr-task-2-xxyyy": { - PipelineTaskName: "task-2", - Status: nil, - ConditionChecks: prccs2Recovered, - }, - "pr-task-3-xxyyy": { - PipelineTaskName: "task-3", - Status: &v1beta1.TaskRunStatus{}, - ConditionChecks: prccs3Recovered, - }, - "orphaned-taskruns-pr-task-4-xxyyy": { - PipelineTaskName: "task-4", - Status: nil, - ConditionChecks: prccs4Recovered, - }, - }, - }, - } - - prStatusRecoveredSimple := v1beta1.PipelineRunStatus{ - Status: prRunningStatus, - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - TaskRuns: map[string]*v1beta1.PipelineRunTaskRunStatus{ - "pr-task-1-xxyyy": { - PipelineTaskName: "task-1", - Status: &v1beta1.TaskRunStatus{}, - }, - }, - }, - } - - allTaskRuns := []*v1beta1.TaskRun{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pr-task-1-xxyyy", - Labels: map[string]string{ - pipeline.PipelineTaskLabelKey: "task-1", - }, - OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pr-task-2-running-condition-check-xxyyy", - Labels: map[string]string{ - pipeline.PipelineTaskLabelKey: "task-2", - pipeline.ConditionCheckKey: "pr-task-2-running-condition-check-xxyyy", - pipeline.ConditionNameKey: "running-condition", - }, - OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pr-task-3-xxyyy", - Labels: map[string]string{ - pipeline.PipelineTaskLabelKey: "task-3", - }, - OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pr-task-3-successful-condition-check-xxyyy", - Labels: map[string]string{ - pipeline.PipelineTaskLabelKey: "task-3", - pipeline.ConditionCheckKey: "pr-task-3-successful-condition-check-xxyyy", - pipeline.ConditionNameKey: "successful-condition", - }, - OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pr-task-4-failed-condition-check-xxyyy", - Labels: map[string]string{ - pipeline.PipelineTaskLabelKey: "task-4", - pipeline.ConditionCheckKey: "pr-task-4-failed-condition-check-xxyyy", - pipeline.ConditionNameKey: "failed-condition", - }, - OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, - }, - }, - } + if shouldHaveMinimalEmbeddedStatus(embeddedStatus) { + taskRunsStatus := make(map[string]*v1beta1.PipelineRunTaskRunStatus) + runsStatus := make(map[string]*v1beta1.PipelineRunRunStatus) - taskRunsFromAnotherPR := []*v1beta1.TaskRun{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pr-task-1-xxyyy", - Labels: map[string]string{ - pipeline.PipelineTaskLabelKey: "task-1", - }, - OwnerReferences: []metav1.OwnerReference{{UID: otherPrUID}}, - }, - }, - } + for _, cr := range reconciledRun.Status.ChildReferences { + if cr.Kind == "TaskRun" { + trStatusForPipelineRun := &v1beta1.PipelineRunTaskRunStatus{ + PipelineTaskName: cr.PipelineTaskName, + WhenExpressions: cr.WhenExpressions, + } - tcs := []struct { - prName string - prStatus v1beta1.PipelineRunStatus - trs []*v1beta1.TaskRun - expectedPrStatus v1beta1.PipelineRunStatus - }{ - { - prName: "no-status-no-taskruns", - prStatus: v1beta1.PipelineRunStatus{}, - trs: nil, - expectedPrStatus: v1beta1.PipelineRunStatus{}, - }, { - prName: "status-no-taskruns", - prStatus: prStatusWithCondition, - trs: nil, - expectedPrStatus: prStatusWithCondition, - }, { - prName: "status-nil-taskruns", - prStatus: prStatusWithEmptyTaskRuns, - trs: []*v1beta1.TaskRun{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pr-task-1-xxyyy", - Labels: map[string]string{ - pipeline.PipelineTaskLabelKey: "task-1", - }, - OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, - }, - }, - }, - expectedPrStatus: prStatusRecoveredSimple, - }, { - prName: "status-missing-taskruns", - prStatus: prStatusWithCondition, - trs: []*v1beta1.TaskRun{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pr-task-3-xxyyy", - Labels: map[string]string{ - pipeline.PipelineTaskLabelKey: "task-3", - }, - OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pr-task-3-successful-condition-check-xxyyy", - Labels: map[string]string{ - pipeline.PipelineTaskLabelKey: "task-3", - pipeline.ConditionCheckKey: "pr-task-3-successful-condition-check-xxyyy", - pipeline.ConditionNameKey: "successful-condition", - }, - OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, - }, - }, - }, - expectedPrStatus: prStatusWithCondition, - }, { - prName: "status-matching-taskruns-pr", - prStatus: prStatusWithCondition, - trs: allTaskRuns, - expectedPrStatus: prStatusWithCondition, - }, { - prName: "orphaned-taskruns-pr", - prStatus: prStatusWithOrphans, - trs: allTaskRuns, - expectedPrStatus: prStatusRecovered, - }, { - prName: "tr-from-another-pr", - prStatus: prStatusWithEmptyTaskRuns, - trs: taskRunsFromAnotherPR, - expectedPrStatus: prStatusWithEmptyTaskRuns, - }, - } + for _, cc := range cr.ConditionChecks { + if trStatusForPipelineRun.ConditionChecks == nil { + trStatusForPipelineRun.ConditionChecks = make(map[string]*v1beta1.PipelineRunConditionCheckStatus) + } + trStatusForPipelineRun.ConditionChecks[cc.ConditionCheckName] = &v1beta1.PipelineRunConditionCheckStatus{ + ConditionName: cc.ConditionName, + Status: cc.Status, + } + } - for _, tc := range tcs { - t.Run(tc.prName, func(t *testing.T) { - logger := logtesting.TestLogger(t) + tr, _ := clients.Pipeline.TektonV1beta1().TaskRuns("foo").Get(ctx, cr.Name, metav1.GetOptions{}) + if tr != nil { + trStatusForPipelineRun.Status = &tr.Status + } - pr := &v1beta1.PipelineRun{ - ObjectMeta: metav1.ObjectMeta{Name: tc.prName, UID: prUID}, - Status: tc.prStatus, - } + taskRunsStatus[cr.Name] = trStatusForPipelineRun + } else if cr.Kind == "Run" { + rStatusForPipelineRun := &v1beta1.PipelineRunRunStatus{ + PipelineTaskName: cr.PipelineTaskName, + WhenExpressions: cr.WhenExpressions, + } - updatePipelineRunStatusFromTaskRuns(logger, pr, tc.trs) - actualPrStatus := pr.Status - - // The TaskRun keys for recovered taskruns will contain a new random key, appended to the - // base name that we expect. Replace the random part so we can diff the whole structure - actualTaskRuns := actualPrStatus.PipelineRunStatusFields.TaskRuns - if actualTaskRuns != nil { - fixedTaskRuns := make(map[string]*v1beta1.PipelineRunTaskRunStatus) - re := regexp.MustCompile(`^[a-z\-]*?-task-[0-9]`) - for k, v := range actualTaskRuns { - newK := re.FindString(k) - fixedTaskRuns[newK+"-xxyyy"] = v + r, _ := clients.Pipeline.TektonV1alpha1().Runs("foo").Get(ctx, cr.Name, metav1.GetOptions{}) + if r != nil { + rStatusForPipelineRun.Status = &r.Status } - actualPrStatus.PipelineRunStatusFields.TaskRuns = fixedTaskRuns - } - if d := cmp.Diff(tc.expectedPrStatus, actualPrStatus); d != "" { - t.Errorf("expected the PipelineRun status to match %#v. Diff %s", tc.expectedPrStatus, diff.PrintWantGot(d)) + runsStatus[cr.Name] = rStatusForPipelineRun } - }) + } + if d := cmp.Diff(expectedTaskRunsStatus, taskRunsStatus); d != "" { + t.Fatalf("Expected PipelineRun status to match TaskRun(s) status, but got a mismatch: %s", d) + } + if d := cmp.Diff(expectedRunsStatus, runsStatus); d != "" { + t.Fatalf("Expected PipelineRun status to match Run(s) status, but got a mismatch: %s", d) + } } } @@ -7418,8 +7652,15 @@ spec: t.Errorf("Expected reason %q but was %s", v1beta1.PipelineRunReasonRunning.String(), condition.Reason) } - if len(reconciledRun.Status.TaskRuns) != 1 { - t.Errorf("Expected PipelineRun status to include the TaskRun status items that can run immediately: %v", reconciledRun.Status.TaskRuns) + if shouldHaveFullEmbeddedStatus(cms[0].Data[embeddedStatusFeatureFlag]) { + if len(reconciledRun.Status.TaskRuns) != 1 { + t.Errorf("Expected PipelineRun status to include the TaskRun status items that can run immediately: %v", reconciledRun.Status.TaskRuns) + } + } + if shouldHaveMinimalEmbeddedStatus(cms[0].Data[embeddedStatusFeatureFlag]) { + if len(reconciledRun.Status.ChildReferences) != 1 { + t.Errorf("Expected PipelineRun status to include the ChildReferences status items that can run immediately: %v", reconciledRun.Status.ChildReferences) + } } wantCloudEvents := []string{ @@ -7435,6 +7676,36 @@ spec: // this test validates taskSpec metadata is embedded into task run func TestReconcilePipeline_TaskSpecMetadata(t *testing.T) { + testCases := []struct { + name string + embeddedStatusVal string + }{ + { + name: "default embedded status", + embeddedStatusVal: config.DefaultEmbeddedStatus, + }, + { + name: "full embedded status", + embeddedStatusVal: config.FullEmbeddedStatus, + }, + { + name: "both embedded status", + embeddedStatusVal: config.BothEmbeddedStatus, + }, + { + name: "minimal embedded status", + embeddedStatusVal: config.MinimalEmbeddedStatus, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runTestReconcilePipelineTaskSpecMetadata(t, tc.embeddedStatusVal) + }) + } +} + +func runTestReconcilePipelineTaskSpecMetadata(t *testing.T, embeddedStatus string) { names.TestingSeed() prs := []*v1beta1.PipelineRun{ @@ -7478,6 +7749,7 @@ spec: d := test.Data{ PipelineRuns: prs, Pipelines: ps, + ConfigMaps: getConfigMapsWithEmbeddedStatus(embeddedStatus), } prt := newPipelineRunTest(d, t) defer prt.Cancel() @@ -7525,8 +7797,15 @@ spec: t.Fatalf("Expected TaskRuns to match, but got a mismatch: %s", d) } - if len(reconciledRun.Status.TaskRuns) != 2 { - t.Errorf("Expected PipelineRun status to include both TaskRun status items that can run immediately: %v", reconciledRun.Status.TaskRuns) + if shouldHaveFullEmbeddedStatus(embeddedStatus) { + if len(reconciledRun.Status.TaskRuns) != 2 { + t.Errorf("Expected PipelineRun status to include both TaskRun status items that can run immediately: %v", reconciledRun.Status.TaskRuns) + } + } + if shouldHaveMinimalEmbeddedStatus(embeddedStatus) { + if len(reconciledRun.Status.ChildReferences) != 2 { + t.Errorf("Expected PipelineRun status ChildReferences to include both TaskRun status items that can run immediately: %v", reconciledRun.Status.ChildReferences) + } } } @@ -7957,6 +8236,36 @@ func (prt PipelineRunTest) reconcileRun(namespace, pipelineRunName string, wantE } func TestReconcile_RemotePipelineRef(t *testing.T) { + testCases := []struct { + name string + embeddedStatusVal string + }{ + { + name: "default embedded status", + embeddedStatusVal: config.DefaultEmbeddedStatus, + }, + { + name: "full embedded status", + embeddedStatusVal: config.FullEmbeddedStatus, + }, + { + name: "both embedded status", + embeddedStatusVal: config.BothEmbeddedStatus, + }, + { + name: "minimal embedded status", + embeddedStatusVal: config.MinimalEmbeddedStatus, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runTestReconcileRemotePipelineRef(t, tc.embeddedStatusVal) + }) + } +} + +func runTestReconcileRemotePipelineRef(t *testing.T, embeddedStatus string) { names.TestingSeed() ctx := context.Background() @@ -8002,6 +8311,7 @@ func TestReconcile_RemotePipelineRef(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.Namespace()}, Data: map[string]string{ "enable-tekton-oci-bundles": "true", + embeddedStatusFeatureFlag: embeddedStatus, }, }, } @@ -8089,11 +8399,22 @@ func TestReconcile_RemotePipelineRef(t *testing.T) { t.Errorf("Expected reason %q but was %s", v1beta1.PipelineRunReasonRunning.String(), condition.Reason) } - if len(reconciledRun.Status.TaskRuns) != 1 { - t.Errorf("Expected PipelineRun status to include the TaskRun status item that ran immediately: %v", reconciledRun.Status.TaskRuns) + if shouldHaveFullEmbeddedStatus(embeddedStatus) { + if len(reconciledRun.Status.TaskRuns) != 1 { + t.Errorf("Expected PipelineRun status to include the TaskRun status item that ran immediately: %v", reconciledRun.Status.TaskRuns) + } + if _, exists := reconciledRun.Status.TaskRuns["test-pipeline-run-success-unit-test-1"]; !exists { + t.Errorf("Expected PipelineRun status to include TaskRun status but was %v", reconciledRun.Status.TaskRuns) + } } - if _, exists := reconciledRun.Status.TaskRuns["test-pipeline-run-success-unit-test-1"]; !exists { - t.Errorf("Expected PipelineRun status to include TaskRun status but was %v", reconciledRun.Status.TaskRuns) + + if shouldHaveMinimalEmbeddedStatus(embeddedStatus) { + if len(reconciledRun.Status.ChildReferences) != 1 { + t.Errorf("Expected PipelineRun status ChildReferences to include the TaskRun status item that ran immediately: %v", reconciledRun.Status.ChildReferences) + } + if reconciledRun.Status.ChildReferences[0].Name != "test-pipeline-run-success-unit-test-1" { + t.Errorf("Expected PipelineRun status to include TaskRun status but was %v", reconciledRun.Status.ChildReferences) + } } } @@ -8101,6 +8422,36 @@ func TestReconcile_RemotePipelineRef(t *testing.T) { // a Task and a Pipeline can be omitted by a PipelineRun and the run will still start // successfully without an error. func TestReconcile_OptionalWorkspacesOmitted(t *testing.T) { + testCases := []struct { + name string + embeddedStatusVal string + }{ + { + name: "default embedded status", + embeddedStatusVal: config.DefaultEmbeddedStatus, + }, + { + name: "full embedded status", + embeddedStatusVal: config.FullEmbeddedStatus, + }, + { + name: "both embedded status", + embeddedStatusVal: config.BothEmbeddedStatus, + }, + { + name: "minimal embedded status", + embeddedStatusVal: config.MinimalEmbeddedStatus, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runTestReconcileOptionalWorkspacesOmitted(t, tc.embeddedStatusVal) + }) + } +} + +func runTestReconcileOptionalWorkspacesOmitted(t *testing.T, embeddedStatus string) { names.TestingSeed() ctx := context.Background() @@ -8144,6 +8495,7 @@ func TestReconcile_OptionalWorkspacesOmitted(t *testing.T) { ServiceAccounts: []*corev1.ServiceAccount{{ ObjectMeta: metav1.ObjectMeta{Name: prs[0].Spec.ServiceAccountName, Namespace: "foo"}, }}, + ConfigMaps: getConfigMapsWithEmbeddedStatus(embeddedStatus), } prt := newPipelineRunTest(d, t) @@ -8202,11 +8554,22 @@ func TestReconcile_OptionalWorkspacesOmitted(t *testing.T) { t.Errorf("Expected reason %q but was %s", v1beta1.PipelineRunReasonRunning.String(), condition.Reason) } - if len(reconciledRun.Status.TaskRuns) != 1 { - t.Errorf("Expected PipelineRun status to include the TaskRun status item that ran immediately: %v", reconciledRun.Status.TaskRuns) + if shouldHaveFullEmbeddedStatus(embeddedStatus) { + if len(reconciledRun.Status.TaskRuns) != 1 { + t.Errorf("Expected PipelineRun status to include the TaskRun status item that ran immediately: %v", reconciledRun.Status.TaskRuns) + } + if _, exists := reconciledRun.Status.TaskRuns["test-pipeline-run-success-unit-test-1"]; !exists { + t.Errorf("Expected PipelineRun status to include TaskRun status but was %v", reconciledRun.Status.TaskRuns) + } } - if _, exists := reconciledRun.Status.TaskRuns["test-pipeline-run-success-unit-test-1"]; !exists { - t.Errorf("Expected PipelineRun status to include TaskRun status but was %v", reconciledRun.Status.TaskRuns) + + if shouldHaveMinimalEmbeddedStatus(embeddedStatus) { + if len(reconciledRun.Status.ChildReferences) != 1 { + t.Errorf("Expected PipelineRun status ChildReferences to include the TaskRun status item that ran immediately: %v", reconciledRun.Status.ChildReferences) + } + if reconciledRun.Status.ChildReferences[0].Name != "test-pipeline-run-success-unit-test-1" { + t.Errorf("Expected PipelineRun status to include TaskRun status but was %v", reconciledRun.Status.ChildReferences) + } } } @@ -8566,3 +8929,11 @@ spec: status: startTime: %s`, prName, specStatus, now.Format(time.RFC3339))) } + +func shouldHaveFullEmbeddedStatus(embeddedVal string) bool { + return embeddedVal == config.FullEmbeddedStatus || embeddedVal == config.BothEmbeddedStatus +} + +func shouldHaveMinimalEmbeddedStatus(embeddedVal string) bool { + return embeddedVal == config.MinimalEmbeddedStatus || embeddedVal == config.BothEmbeddedStatus +} diff --git a/pkg/reconciler/pipelinerun/pipelinerun_updatestatus_test.go b/pkg/reconciler/pipelinerun/pipelinerun_updatestatus_test.go new file mode 100644 index 00000000000..e2e83d00d58 --- /dev/null +++ b/pkg/reconciler/pipelinerun/pipelinerun_updatestatus_test.go @@ -0,0 +1,1589 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pipelinerun + +import ( + "context" + "fmt" + "regexp" + "sort" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/test" + "github.com/tektoncd/pipeline/test/diff" + "github.com/tektoncd/pipeline/test/names" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "knative.dev/pkg/apis" + duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" + logtesting "knative.dev/pkg/logging/testing" +) + +func TestUpdatePipelineRunStatusFromInformer(t *testing.T) { + testCases := []struct { + name string + embeddedStatusVal string + }{ + { + name: "default embedded status", + embeddedStatusVal: config.DefaultEmbeddedStatus, + }, + { + name: "full embedded status", + embeddedStatusVal: config.FullEmbeddedStatus, + }, + { + name: "both embedded status", + embeddedStatusVal: config.BothEmbeddedStatus, + }, + { + name: "minimal embedded status", + embeddedStatusVal: config.MinimalEmbeddedStatus, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + names.TestingSeed() + + pr := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pipeline-run", + Namespace: "foo", + Labels: map[string]string{"mylabel": "myvale"}, + }, + Spec: v1beta1.PipelineRunSpec{ + PipelineSpec: &v1beta1.PipelineSpec{ + Tasks: []v1beta1.PipelineTask{ + { + Name: "unit-test-task-spec", + TaskSpec: &v1beta1.EmbeddedTask{ + TaskSpec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "mystep", + Image: "myimage"}}}, + }, + }, + }, + { + Name: "custom-task-ref", + TaskRef: &v1beta1.TaskRef{ + APIVersion: "example.dev/v0", + Kind: "Example", + Name: "some-custom-task", + }, + }, + }, + }, + }, + } + + cms := getConfigMapsWithEmbeddedStatus(tc.embeddedStatusVal) + cms[0].Data["enable-custom-tasks"] = "true" + d := test.Data{ + PipelineRuns: []*v1beta1.PipelineRun{pr}, + ConfigMaps: cms, + } + prt := newPipelineRunTest(d, t) + defer prt.Cancel() + + wantEvents := []string{ + "Normal Started", + "Normal Running Tasks Completed: 0", + } + + // Reconcile the PipelineRun. This creates a Taskrun. + reconciledRun, clients := prt.reconcileRun("foo", "test-pipeline-run", wantEvents, false) + + // Save the name of the TaskRun and Run that were created. + taskRunName := "" + runName := "" + if shouldHaveFullEmbeddedStatus(tc.embeddedStatusVal) { + if len(reconciledRun.Status.TaskRuns) != 1 { + t.Fatalf("Expected 1 TaskRun but got %d", len(reconciledRun.Status.TaskRuns)) + } + for k := range reconciledRun.Status.TaskRuns { + taskRunName = k + break + } + if len(reconciledRun.Status.Runs) != 1 { + t.Fatalf("Expected 1 Run but got %d", len(reconciledRun.Status.Runs)) + } + for k := range reconciledRun.Status.Runs { + runName = k + break + } + } + if shouldHaveMinimalEmbeddedStatus(tc.embeddedStatusVal) { + if len(reconciledRun.Status.ChildReferences) != 2 { + t.Fatalf("Expected 2 ChildReferences but got %d", len(reconciledRun.Status.ChildReferences)) + } + for _, cr := range reconciledRun.Status.ChildReferences { + if cr.Kind == "TaskRun" { + taskRunName = cr.Name + } + if cr.Kind == "Run" { + runName = cr.Name + } + } + } + + if taskRunName == "" { + t.Fatal("expected to find a TaskRun name, but didn't") + } + if runName == "" { + t.Fatal("expected to find a Run name, but didn't") + } + + // Add a label to the PipelineRun. This tests a scenario in issue 3126 which could prevent the reconciler + // from finding TaskRuns that are missing from the status. + reconciledRun.ObjectMeta.Labels["bah"] = "humbug" + reconciledRun, err := clients.Pipeline.TektonV1beta1().PipelineRuns("foo").Update(prt.TestAssets.Ctx, reconciledRun, metav1.UpdateOptions{}) + if err != nil { + t.Fatalf("unexpected error when updating status: %v", err) + } + + // The label update triggers another reconcile. Depending on timing, the PipelineRun passed to the reconcile may or may not + // have the updated status with the name of the created TaskRun. Clear the status because we want to test the case where the + // status does not have the TaskRun. + reconciledRun.Status = v1beta1.PipelineRunStatus{} + if _, err := clients.Pipeline.TektonV1beta1().PipelineRuns("foo").UpdateStatus(prt.TestAssets.Ctx, reconciledRun, metav1.UpdateOptions{}); err != nil { + t.Fatalf("unexpected error when updating status: %v", err) + } + + reconciledRun, _ = prt.reconcileRun("foo", "test-pipeline-run", wantEvents, false) + + // Verify that the reconciler found the existing TaskRun instead of creating a new one. + if shouldHaveFullEmbeddedStatus(tc.embeddedStatusVal) { + if len(reconciledRun.Status.TaskRuns) != 1 { + t.Fatalf("Expected 1 TaskRun after label change but got %d", len(reconciledRun.Status.TaskRuns)) + } + for k := range reconciledRun.Status.TaskRuns { + if k != taskRunName { + t.Fatalf("Status has unexpected taskrun %s", k) + } + } + if len(reconciledRun.Status.Runs) != 1 { + t.Fatalf("Expected 1 Run after label change but got %d", len(reconciledRun.Status.Runs)) + } + for k := range reconciledRun.Status.Runs { + if k != runName { + t.Fatalf("Status has unexpected Run %s", k) + } + } + } + if shouldHaveMinimalEmbeddedStatus(tc.embeddedStatusVal) { + if len(reconciledRun.Status.ChildReferences) != 2 { + t.Fatalf("Expected 2 ChildReferences after label change but got %d", len(reconciledRun.Status.ChildReferences)) + } + for _, cr := range reconciledRun.Status.ChildReferences { + if cr.Kind == "TaskRun" && cr.Name != taskRunName { + t.Errorf("Status has unexpected taskrun %s", cr.Name) + } + if cr.Kind == "Run" && cr.Name != runName { + t.Errorf("Status has unexpected Run %s", cr.Name) + } + } + } + }) + } +} + +type updateStatusTaskRunsData struct { + withConditions map[string]*v1beta1.PipelineRunTaskRunStatus + missingTaskRun map[string]*v1beta1.PipelineRunTaskRunStatus + foundTaskRun map[string]*v1beta1.PipelineRunTaskRunStatus + recovered map[string]*v1beta1.PipelineRunTaskRunStatus + simple map[string]*v1beta1.PipelineRunTaskRunStatus +} + +func getUpdateStatusTaskRunsData() updateStatusTaskRunsData { + // PipelineRunConditionCheckStatus recovered by updatePipelineRunStatusFromTaskRuns + // It does not include the status, which is then retrieved via the regular reconcile + prccs2Recovered := map[string]*v1beta1.PipelineRunConditionCheckStatus{ + "pr-task-2-running-condition-check-xxyyy": { + ConditionName: "running-condition-0", + }, + } + prccs3Recovered := map[string]*v1beta1.PipelineRunConditionCheckStatus{ + "pr-task-3-successful-condition-check-xxyyy": { + ConditionName: "successful-condition-0", + }, + } + prccs4Recovered := map[string]*v1beta1.PipelineRunConditionCheckStatus{ + "pr-task-4-failed-condition-check-xxyyy": { + ConditionName: "failed-condition-0", + }, + } + + // PipelineRunConditionCheckStatus full is used to test the behaviour of updatePipelineRunStatusFromTaskRuns + // when no orphan TaskRuns are found, to check we don't alter good ones + prccs2Full := map[string]*v1beta1.PipelineRunConditionCheckStatus{ + "pr-task-2-running-condition-check-xxyyy": { + ConditionName: "running-condition-0", + Status: &v1beta1.ConditionCheckStatus{ + ConditionCheckStatusFields: v1beta1.ConditionCheckStatusFields{ + Check: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, + }, + }, + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown}}, + }, + }, + }, + } + prccs3Full := map[string]*v1beta1.PipelineRunConditionCheckStatus{ + "pr-task-3-successful-condition-check-xxyyy": { + ConditionName: "successful-condition-0", + Status: &v1beta1.ConditionCheckStatus{ + ConditionCheckStatusFields: v1beta1.ConditionCheckStatusFields{ + Check: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ExitCode: 0}, + }, + }, + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue}}, + }, + }, + }, + } + prccs4Full := map[string]*v1beta1.PipelineRunConditionCheckStatus{ + "pr-task-4-failed-condition-check-xxyyy": { + ConditionName: "failed-condition-0", + Status: &v1beta1.ConditionCheckStatus{ + ConditionCheckStatusFields: v1beta1.ConditionCheckStatusFields{ + Check: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ExitCode: 127}, + }, + }, + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{Type: apis.ConditionSucceeded, Status: corev1.ConditionFalse}}, + }, + }, + }, + } + + return updateStatusTaskRunsData{ + withConditions: map[string]*v1beta1.PipelineRunTaskRunStatus{ + "pr-task-1-xxyyy": { + PipelineTaskName: "task-1", + Status: &v1beta1.TaskRunStatus{}, + }, + "pr-task-2-xxyyy": { + PipelineTaskName: "task-2", + Status: nil, + ConditionChecks: prccs2Full, + }, + "pr-task-3-xxyyy": { + PipelineTaskName: "task-3", + Status: &v1beta1.TaskRunStatus{}, + ConditionChecks: prccs3Full, + }, + "pr-task-4-xxyyy": { + PipelineTaskName: "task-4", + Status: nil, + ConditionChecks: prccs4Full, + }, + }, + missingTaskRun: map[string]*v1beta1.PipelineRunTaskRunStatus{ + "pr-task-1-xxyyy": { + PipelineTaskName: "task-1", + Status: &v1beta1.TaskRunStatus{}, + }, + "pr-task-2-xxyyy": { + PipelineTaskName: "task-2", + Status: nil, + ConditionChecks: prccs2Full, + }, + "pr-task-4-xxyyy": { + PipelineTaskName: "task-4", + Status: nil, + ConditionChecks: prccs4Full, + }, + }, + foundTaskRun: map[string]*v1beta1.PipelineRunTaskRunStatus{ + "pr-task-1-xxyyy": { + PipelineTaskName: "task-1", + Status: &v1beta1.TaskRunStatus{}, + }, + "pr-task-2-xxyyy": { + PipelineTaskName: "task-2", + Status: nil, + ConditionChecks: prccs2Full, + }, + "pr-task-3-xxyyy": { + PipelineTaskName: "task-3", + Status: &v1beta1.TaskRunStatus{}, + ConditionChecks: prccs3Recovered, + }, + "pr-task-4-xxyyy": { + PipelineTaskName: "task-4", + Status: nil, + ConditionChecks: prccs4Full, + }, + }, + recovered: map[string]*v1beta1.PipelineRunTaskRunStatus{ + "pr-task-1-xxyyy": { + PipelineTaskName: "task-1", + Status: &v1beta1.TaskRunStatus{}, + }, + "orphaned-taskruns-pr-task-2-xxyyy": { + PipelineTaskName: "task-2", + Status: nil, + ConditionChecks: prccs2Recovered, + }, + "pr-task-3-xxyyy": { + PipelineTaskName: "task-3", + Status: &v1beta1.TaskRunStatus{}, + ConditionChecks: prccs3Recovered, + }, + "orphaned-taskruns-pr-task-4-xxyyy": { + PipelineTaskName: "task-4", + Status: nil, + ConditionChecks: prccs4Recovered, + }, + }, + simple: map[string]*v1beta1.PipelineRunTaskRunStatus{ + "pr-task-1-xxyyy": { + PipelineTaskName: "task-1", + Status: &v1beta1.TaskRunStatus{}, + }, + }, + } +} + +func TestUpdatePipelineRunStatusFromTaskRuns(t *testing.T) { + + prUID := types.UID("11111111-1111-1111-1111-111111111111") + otherPrUID := types.UID("22222222-2222-2222-2222-222222222222") + + taskRunsPRStatusData := getUpdateStatusTaskRunsData() + + prRunningStatus := duckv1beta1.Status{ + Conditions: []apis.Condition{ + { + Type: "Succeeded", + Status: "Unknown", + Reason: "Running", + Message: "Not all Tasks in the Pipeline have finished executing", + }, + }, + } + + prStatusWithCondition := v1beta1.PipelineRunStatus{ + Status: prRunningStatus, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + TaskRuns: taskRunsPRStatusData.withConditions, + }, + } + + prStatusMissingTaskRun := v1beta1.PipelineRunStatus{ + Status: prRunningStatus, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + TaskRuns: taskRunsPRStatusData.missingTaskRun, + }, + } + + prStatusFoundTaskRun := v1beta1.PipelineRunStatus{ + Status: prRunningStatus, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + TaskRuns: taskRunsPRStatusData.foundTaskRun, + }, + } + + prStatusWithEmptyTaskRuns := v1beta1.PipelineRunStatus{ + Status: prRunningStatus, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + TaskRuns: map[string]*v1beta1.PipelineRunTaskRunStatus{}, + }, + } + + prStatusWithOrphans := v1beta1.PipelineRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{ + { + Type: "Succeeded", + Status: "Unknown", + Reason: "Running", + Message: "Not all Tasks in the Pipeline have finished executing", + }, + }, + }, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + TaskRuns: map[string]*v1beta1.PipelineRunTaskRunStatus{}, + }, + } + + prStatusRecovered := v1beta1.PipelineRunStatus{ + Status: prRunningStatus, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + TaskRuns: taskRunsPRStatusData.recovered, + }, + } + + prStatusRecoveredSimple := v1beta1.PipelineRunStatus{ + Status: prRunningStatus, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + TaskRuns: taskRunsPRStatusData.simple, + }, + } + + allTaskRuns := []*v1beta1.TaskRun{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-1-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-1", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-2-running-condition-check-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-2", + pipeline.ConditionCheckKey: "pr-task-2-running-condition-check-xxyyy", + pipeline.ConditionNameKey: "running-condition", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-3-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-3", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-3-successful-condition-check-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-3", + pipeline.ConditionCheckKey: "pr-task-3-successful-condition-check-xxyyy", + pipeline.ConditionNameKey: "successful-condition", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-4-failed-condition-check-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-4", + pipeline.ConditionCheckKey: "pr-task-4-failed-condition-check-xxyyy", + pipeline.ConditionNameKey: "failed-condition", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + } + + taskRunsFromAnotherPR := []*v1beta1.TaskRun{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-1-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-1", + }, + OwnerReferences: []metav1.OwnerReference{{UID: otherPrUID}}, + }, + }, + } + + taskRunsWithNoOwner := []*v1beta1.TaskRun{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-1-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-1", + }, + }, + }, + } + + tcs := []struct { + prName string + prStatus v1beta1.PipelineRunStatus + trs []*v1beta1.TaskRun + expectedPrStatus v1beta1.PipelineRunStatus + }{ + { + prName: "no-status-no-taskruns", + prStatus: v1beta1.PipelineRunStatus{}, + trs: nil, + expectedPrStatus: v1beta1.PipelineRunStatus{}, + }, { + prName: "status-no-taskruns", + prStatus: prStatusWithCondition, + trs: nil, + expectedPrStatus: prStatusWithCondition, + }, { + prName: "status-nil-taskruns", + prStatus: prStatusWithEmptyTaskRuns, + trs: []*v1beta1.TaskRun{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-1-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-1", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + }, + expectedPrStatus: prStatusRecoveredSimple, + }, { + prName: "status-missing-taskruns", + prStatus: prStatusMissingTaskRun, + trs: []*v1beta1.TaskRun{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-3-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-3", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-3-successful-condition-check-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-3", + pipeline.ConditionCheckKey: "pr-task-3-successful-condition-check-xxyyy", + pipeline.ConditionNameKey: "successful-condition", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + }, + expectedPrStatus: prStatusFoundTaskRun, + }, { + prName: "status-matching-taskruns-pr", + prStatus: prStatusWithCondition, + trs: allTaskRuns, + expectedPrStatus: prStatusWithCondition, + }, { + prName: "orphaned-taskruns-pr", + prStatus: prStatusWithOrphans, + trs: allTaskRuns, + expectedPrStatus: prStatusRecovered, + }, { + prName: "tr-from-another-pr", + prStatus: prStatusWithEmptyTaskRuns, + trs: taskRunsFromAnotherPR, + expectedPrStatus: prStatusWithEmptyTaskRuns, + }, { + prName: "tr-with-no-owner", + prStatus: prStatusWithEmptyTaskRuns, + trs: taskRunsWithNoOwner, + expectedPrStatus: prStatusWithEmptyTaskRuns, + }, + } + + for _, tc := range tcs { + t.Run(tc.prName, func(t *testing.T) { + logger := logtesting.TestLogger(t) + + pr := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: tc.prName, UID: prUID}, + Status: tc.prStatus, + } + + updatePipelineRunStatusFromTaskRuns(logger, pr, tc.trs, nil) + actualPrStatus := pr.Status + + expectedPRStatus := tc.expectedPrStatus + + // The TaskRun keys for recovered taskruns will contain a new random key, appended to the + // base name that we expect. Replace the random part so we can diff the whole structure + actualTaskRuns := actualPrStatus.PipelineRunStatusFields.TaskRuns + if actualTaskRuns != nil { + fixedTaskRuns := make(map[string]*v1beta1.PipelineRunTaskRunStatus) + re := regexp.MustCompile(`^[a-z\-]*?-task-[0-9]`) + for k, v := range actualTaskRuns { + newK := re.FindString(k) + fixedTaskRuns[newK+"-xxyyy"] = v + } + actualPrStatus.PipelineRunStatusFields.TaskRuns = fixedTaskRuns + } + + if d := cmp.Diff(expectedPRStatus, actualPrStatus); d != "" { + t.Errorf("expected the PipelineRun status to match %#v. Diff %s", expectedPRStatus, diff.PrintWantGot(d)) + } + }) + } +} + +type updateStatusChildRefsData struct { + withConditions []v1beta1.ChildStatusReference + missingTaskRun []v1beta1.ChildStatusReference + foundTaskRun []v1beta1.ChildStatusReference + missingRun []v1beta1.ChildStatusReference + recovered []v1beta1.ChildStatusReference + simple []v1beta1.ChildStatusReference + simpleRun []v1beta1.ChildStatusReference +} + +func getUpdateStatusChildRefsData() updateStatusChildRefsData { + // PipelineRunChildConditionCheckStatus recovered by updatePipelineRunStatusFromChildRefs + // It does not include the status, which is then retrieved via the regular reconcile + prccs2Recovered := []*v1beta1.PipelineRunChildConditionCheckStatus{{ + ConditionCheckName: "pr-task-2-running-condition-check-xxyyy", + PipelineRunConditionCheckStatus: v1beta1.PipelineRunConditionCheckStatus{ + ConditionName: "running-condition-0", + }, + }} + prccs3Recovered := []*v1beta1.PipelineRunChildConditionCheckStatus{{ + ConditionCheckName: "pr-task-3-successful-condition-check-xxyyy", + PipelineRunConditionCheckStatus: v1beta1.PipelineRunConditionCheckStatus{ + ConditionName: "successful-condition-0", + }, + }} + prccs4Recovered := []*v1beta1.PipelineRunChildConditionCheckStatus{{ + ConditionCheckName: "pr-task-4-failed-condition-check-xxyyy", + PipelineRunConditionCheckStatus: v1beta1.PipelineRunConditionCheckStatus{ + ConditionName: "failed-condition-0", + }, + }} + + // PipelineRunChildConditionCheckStatus full is used to test the behaviour of updatePipelineRunStatusFromChildRefs + // when no orphan TaskRuns are found, to check we don't alter good ones + prccs2Full := []*v1beta1.PipelineRunChildConditionCheckStatus{{ + ConditionCheckName: "pr-task-2-running-condition-check-xxyyy", + PipelineRunConditionCheckStatus: v1beta1.PipelineRunConditionCheckStatus{ + ConditionName: "running-condition-0", + Status: &v1beta1.ConditionCheckStatus{ + ConditionCheckStatusFields: v1beta1.ConditionCheckStatusFields{ + Check: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{}, + }, + }, + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown}}, + }, + }, + }, + }} + prccs3Full := []*v1beta1.PipelineRunChildConditionCheckStatus{{ + ConditionCheckName: "pr-task-3-successful-condition-check-xxyyy", + PipelineRunConditionCheckStatus: v1beta1.PipelineRunConditionCheckStatus{ + ConditionName: "successful-condition-0", + Status: &v1beta1.ConditionCheckStatus{ + ConditionCheckStatusFields: v1beta1.ConditionCheckStatusFields{ + Check: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ExitCode: 0}, + }, + }, + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue}}, + }, + }, + }, + }} + prccs4Full := []*v1beta1.PipelineRunChildConditionCheckStatus{{ + ConditionCheckName: "pr-task-4-failed-condition-check-xxyyy", + PipelineRunConditionCheckStatus: v1beta1.PipelineRunConditionCheckStatus{ + ConditionName: "failed-condition-0", + Status: &v1beta1.ConditionCheckStatus{ + ConditionCheckStatusFields: v1beta1.ConditionCheckStatusFields{ + Check: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ExitCode: 127}, + }, + }, + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{Type: apis.ConditionSucceeded, Status: corev1.ConditionFalse}}, + }, + }, + }, + }} + + return updateStatusChildRefsData{ + withConditions: []v1beta1.ChildStatusReference{ + childRefForPipelineTask("pr-task-1-xxyyy", "task-1", "TaskRun", "v1beta1", nil, nil), + childRefForPipelineTask("pr-task-2-xxyyy", "task-2", "TaskRun", "v1beta1", nil, prccs2Full), + childRefForPipelineTask("pr-task-3-xxyyy", "task-3", "TaskRun", "v1beta1", nil, prccs3Full), + childRefForPipelineTask("pr-task-4-xxyyy", "task-4", "TaskRun", "v1beta1", nil, prccs4Full), + childRefForPipelineTask("pr-run-6-xxyyy", "task-6", "Run", "v1alpha1", nil, nil), + }, + missingTaskRun: []v1beta1.ChildStatusReference{ + childRefForPipelineTask("pr-task-1-xxyyy", "task-1", "TaskRun", "v1beta1", nil, nil), + childRefForPipelineTask("pr-task-2-xxyyy", "task-2", "TaskRun", "v1beta1", nil, prccs2Full), + childRefForPipelineTask("pr-task-4-xxyyy", "task-4", "TaskRun", "v1beta1", nil, prccs4Full), + childRefForPipelineTask("pr-run-6-xxyyy", "task-6", "Run", "v1alpha1", nil, nil), + }, + foundTaskRun: []v1beta1.ChildStatusReference{ + childRefForPipelineTask("pr-task-1-xxyyy", "task-1", "TaskRun", "v1beta1", nil, nil), + childRefForPipelineTask("pr-task-2-xxyyy", "task-2", "TaskRun", "v1beta1", nil, prccs2Full), + childRefForPipelineTask("pr-task-3-xxyyy", "task-3", "TaskRun", "v1beta1", nil, prccs3Recovered), + childRefForPipelineTask("pr-task-4-xxyyy", "task-4", "TaskRun", "v1beta1", nil, prccs4Full), + childRefForPipelineTask("pr-run-6-xxyyy", "task-6", "Run", "v1alpha1", nil, nil), + }, + missingRun: []v1beta1.ChildStatusReference{ + childRefForPipelineTask("pr-task-1-xxyyy", "task-1", "TaskRun", "v1beta1", nil, nil), + childRefForPipelineTask("pr-task-2-xxyyy", "task-2", "TaskRun", "v1beta1", nil, prccs2Full), + childRefForPipelineTask("pr-task-3-xxyyy", "task-3", "TaskRun", "v1beta1", nil, prccs3Full), + childRefForPipelineTask("pr-task-4-xxyyy", "task-4", "TaskRun", "v1beta1", nil, prccs4Full), + }, + recovered: []v1beta1.ChildStatusReference{ + childRefForPipelineTask("pr-task-1-xxyyy", "task-1", "TaskRun", "v1beta1", nil, nil), + childRefForPipelineTask("orphaned-taskruns-pr-task-2-xxyyy", "task-2", "TaskRun", "v1beta1", nil, prccs2Recovered), + childRefForPipelineTask("pr-task-3-xxyyy", "task-3", "TaskRun", "v1beta1", nil, prccs3Recovered), + childRefForPipelineTask("orphaned-taskruns-pr-task-4-xxyyy", "task-4", "TaskRun", "v1beta1", nil, prccs4Recovered), + childRefForPipelineTask("pr-run-6-xxyyy", "task-6", "Run", "v1alpha1", nil, nil), + }, + simple: []v1beta1.ChildStatusReference{ + childRefForPipelineTask("pr-task-1-xxyyy", "task-1", "TaskRun", "v1beta1", nil, nil), + }, + simpleRun: []v1beta1.ChildStatusReference{ + childRefForPipelineTask("pr-run-6-xxyyy", "run-6", "Run", "v1alpha1", nil, nil), + }, + } +} + +func TestUpdatePipelineRunStatusFromChildRefs(t *testing.T) { + prUID := types.UID("11111111-1111-1111-1111-111111111111") + otherPrUID := types.UID("22222222-2222-2222-2222-222222222222") + + childRefsPRStatusData := getUpdateStatusChildRefsData() + + prRunningStatus := duckv1beta1.Status{ + Conditions: []apis.Condition{ + { + Type: "Succeeded", + Status: "Unknown", + Reason: "Running", + Message: "Not all Tasks in the Pipeline have finished executing", + }, + }, + } + + prStatusWithCondition := v1beta1.PipelineRunStatus{ + Status: prRunningStatus, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + ChildReferences: childRefsPRStatusData.withConditions, + }, + } + + prStatusMissingTaskRun := v1beta1.PipelineRunStatus{ + Status: prRunningStatus, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + ChildReferences: childRefsPRStatusData.missingTaskRun, + }, + } + + prStatusFoundTaskRun := v1beta1.PipelineRunStatus{ + Status: prRunningStatus, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + ChildReferences: childRefsPRStatusData.foundTaskRun, + }, + } + + prStatusMissingRun := v1beta1.PipelineRunStatus{ + Status: prRunningStatus, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + ChildReferences: childRefsPRStatusData.missingRun, + }, + } + + prStatusWithEmptyChildRefs := v1beta1.PipelineRunStatus{ + Status: prRunningStatus, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{}, + } + + prStatusWithOrphans := v1beta1.PipelineRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{ + { + Type: "Succeeded", + Status: "Unknown", + Reason: "Running", + Message: "Not all Tasks in the Pipeline have finished executing", + }, + }, + }, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{}, + } + + prStatusRecovered := v1beta1.PipelineRunStatus{ + Status: prRunningStatus, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + ChildReferences: childRefsPRStatusData.recovered, + }, + } + + prStatusRecoveredSimple := v1beta1.PipelineRunStatus{ + Status: prRunningStatus, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + ChildReferences: childRefsPRStatusData.simple, + }, + } + + prStatusRecoveredSimpleWithRun := v1beta1.PipelineRunStatus{ + Status: prRunningStatus, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + ChildReferences: []v1beta1.ChildStatusReference{{ + TypeMeta: runtime.TypeMeta{ + APIVersion: "tekton.dev/v1alpha1", + Kind: "Run", + }, + Name: "pr-run-6-xxyyy", + PipelineTaskName: "task-6", + }}, + }, + } + + allTaskRuns := []*v1beta1.TaskRun{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-1-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-1", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-2-running-condition-check-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-2", + pipeline.ConditionCheckKey: "pr-task-2-running-condition-check-xxyyy", + pipeline.ConditionNameKey: "running-condition", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-3-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-3", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-3-successful-condition-check-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-3", + pipeline.ConditionCheckKey: "pr-task-3-successful-condition-check-xxyyy", + pipeline.ConditionNameKey: "successful-condition", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-4-failed-condition-check-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-4", + pipeline.ConditionCheckKey: "pr-task-4-failed-condition-check-xxyyy", + pipeline.ConditionNameKey: "failed-condition", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + } + + taskRunsFromAnotherPR := []*v1beta1.TaskRun{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-1-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-1", + }, + OwnerReferences: []metav1.OwnerReference{{UID: otherPrUID}}, + }, + }, + } + + runsFromAnotherPR := []*v1alpha1.Run{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-run-1-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-1", + }, + OwnerReferences: []metav1.OwnerReference{{UID: otherPrUID}}, + }, + }, + } + + taskRunsWithNoOwner := []*v1beta1.TaskRun{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-1-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-1", + }, + }, + }, + } + + runsWithNoOwner := []*v1alpha1.Run{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-run-1-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-1", + }, + }, + }, + } + + tcs := []struct { + prName string + prStatus v1beta1.PipelineRunStatus + trs []*v1beta1.TaskRun + runs []*v1alpha1.Run + expectedPrStatus v1beta1.PipelineRunStatus + }{ + { + prName: "no-status-no-taskruns-or-runs", + prStatus: v1beta1.PipelineRunStatus{}, + trs: nil, + runs: nil, + expectedPrStatus: v1beta1.PipelineRunStatus{}, + }, { + prName: "status-no-taskruns-or-runs", + prStatus: prStatusWithCondition, + trs: nil, + runs: nil, + expectedPrStatus: prStatusWithCondition, + }, { + prName: "status-nil-taskruns", + prStatus: prStatusWithEmptyChildRefs, + trs: []*v1beta1.TaskRun{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-1-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-1", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + }, + expectedPrStatus: prStatusRecoveredSimple, + }, { + prName: "status-nil-runs", + prStatus: prStatusWithEmptyChildRefs, + runs: []*v1alpha1.Run{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-run-6-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-6", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }}, + expectedPrStatus: prStatusRecoveredSimpleWithRun, + }, { + prName: "status-missing-taskruns", + prStatus: prStatusMissingTaskRun, + trs: []*v1beta1.TaskRun{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-3-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-3", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-3-successful-condition-check-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-3", + pipeline.ConditionCheckKey: "pr-task-3-successful-condition-check-xxyyy", + pipeline.ConditionNameKey: "successful-condition", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + }, + expectedPrStatus: prStatusFoundTaskRun, + }, { + prName: "status-missing-runs", + prStatus: prStatusMissingRun, + runs: []*v1alpha1.Run{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-run-6-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-6", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }}, + expectedPrStatus: prStatusWithCondition, + }, { + prName: "status-matching-taskruns-pr", + prStatus: prStatusWithCondition, + trs: allTaskRuns, + expectedPrStatus: prStatusWithCondition, + }, { + prName: "orphaned-taskruns-pr", + prStatus: prStatusWithOrphans, + trs: allTaskRuns, + runs: []*v1alpha1.Run{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-run-6-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-6", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }}, + expectedPrStatus: prStatusRecovered, + }, { + prName: "tr-and-run-from-another-pr", + prStatus: prStatusWithEmptyChildRefs, + trs: taskRunsFromAnotherPR, + runs: runsFromAnotherPR, + expectedPrStatus: prStatusWithEmptyChildRefs, + }, { + prName: "tr-and-run-with-no-owner", + prStatus: prStatusWithEmptyChildRefs, + trs: taskRunsWithNoOwner, + runs: runsWithNoOwner, + expectedPrStatus: prStatusWithEmptyChildRefs, + }, + } + + for _, tc := range tcs { + t.Run(tc.prName, func(t *testing.T) { + logger := logtesting.TestLogger(t) + + pr := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: tc.prName, UID: prUID}, + Status: tc.prStatus, + } + + _ = updatePipelineRunStatusFromChildRefs(logger, pr, tc.trs, tc.runs) + + actualPrStatus := pr.Status + + actualChildRefs := actualPrStatus.ChildReferences + if len(actualChildRefs) != 0 { + var fixedChildRefs []v1beta1.ChildStatusReference + re := regexp.MustCompile(`^[a-z\-]*?-(task|run)-[0-9]`) + for _, cr := range actualChildRefs { + cr.Name = fmt.Sprintf("%s-xxyyy", re.FindString(cr.Name)) + fixedChildRefs = append(fixedChildRefs, cr) + } + actualPrStatus.ChildReferences = fixedChildRefs + } + + // Sort the ChildReferences to deal with annoying ordering issues. + sort.Slice(actualPrStatus.ChildReferences, func(i, j int) bool { + return actualPrStatus.ChildReferences[i].PipelineTaskName < actualPrStatus.ChildReferences[j].PipelineTaskName + }) + + if d := cmp.Diff(tc.expectedPrStatus, actualPrStatus); d != "" { + t.Errorf("expected the PipelineRun status to match %#v. Diff %s", tc.expectedPrStatus, diff.PrintWantGot(d)) + } + }) + } +} + +func TestUpdatePipelineRunStatusFromSlices(t *testing.T) { + prUID := types.UID("11111111-1111-1111-1111-111111111111") + + childRefsPRStatusData := getUpdateStatusChildRefsData() + taskRunsPRStatusData := getUpdateStatusTaskRunsData() + + prRunningStatus := duckv1beta1.Status{ + Conditions: []apis.Condition{ + { + Type: "Succeeded", + Status: "Unknown", + Reason: "Running", + Message: "Not all Tasks in the Pipeline have finished executing", + }, + }, + } + + prStatusWithOrphans := v1beta1.PipelineRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{ + { + Type: "Succeeded", + Status: "Unknown", + Reason: "Running", + Message: "Not all Tasks in the Pipeline have finished executing", + }, + }, + }, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{}, + } + + prStatusWithEmptyEverything := func() v1beta1.PipelineRunStatus { + return v1beta1.PipelineRunStatus{ + Status: prRunningStatus, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + Runs: map[string]*v1beta1.PipelineRunRunStatus{}, + TaskRuns: map[string]*v1beta1.PipelineRunTaskRunStatus{}, + }, + } + } + + allTaskRuns := []*v1beta1.TaskRun{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-1-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-1", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-2-running-condition-check-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-2", + pipeline.ConditionCheckKey: "pr-task-2-running-condition-check-xxyyy", + pipeline.ConditionNameKey: "running-condition", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-3-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-3", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-3-successful-condition-check-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-3", + pipeline.ConditionCheckKey: "pr-task-3-successful-condition-check-xxyyy", + pipeline.ConditionNameKey: "successful-condition", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-4-failed-condition-check-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-4", + pipeline.ConditionCheckKey: "pr-task-4-failed-condition-check-xxyyy", + pipeline.ConditionNameKey: "failed-condition", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + } + + tcs := []struct { + prName string + prStatus func() v1beta1.PipelineRunStatus + trs []*v1beta1.TaskRun + runs []*v1alpha1.Run + expectedStatusTRs map[string]*v1beta1.PipelineRunTaskRunStatus + expectedStatusRuns map[string]*v1beta1.PipelineRunRunStatus + expectedStatusCRs []v1beta1.ChildStatusReference + }{ + { + prName: "status-nil-taskruns", + prStatus: prStatusWithEmptyEverything, + trs: []*v1beta1.TaskRun{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-task-1-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-1", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + }, + expectedStatusCRs: childRefsPRStatusData.simple, + expectedStatusTRs: taskRunsPRStatusData.simple, + }, { + prName: "status-nil-runs", + prStatus: prStatusWithEmptyEverything, + runs: []*v1alpha1.Run{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-run-6-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "run-6", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }}, + expectedStatusCRs: childRefsPRStatusData.simpleRun, + expectedStatusRuns: map[string]*v1beta1.PipelineRunRunStatus{ + "pr-run-6-xxyyy": { + PipelineTaskName: "run-6", + Status: &v1alpha1.RunStatus{}, + }, + }, + }, { + prName: "orphaned-taskruns-pr", + prStatus: func() v1beta1.PipelineRunStatus { return prStatusWithOrphans }, + trs: allTaskRuns, + runs: []*v1alpha1.Run{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-run-6-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "task-6", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }}, + expectedStatusTRs: taskRunsPRStatusData.recovered, + expectedStatusCRs: childRefsPRStatusData.recovered, + expectedStatusRuns: map[string]*v1beta1.PipelineRunRunStatus{ + "pr-run-6-xxyyy": { + PipelineTaskName: "task-6", + Status: &v1alpha1.RunStatus{}, + }, + }, + }, + } + + for _, tc := range tcs { + for _, embeddedVal := range valuesForEmbeddedStatus { + t.Run(fmt.Sprintf("%s with %s embedded status", tc.prName, embeddedVal), func(t *testing.T) { + ctx := context.Background() + cfg := config.NewStore(logtesting.TestLogger(t)) + cfg.OnConfigChanged(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName()}, + Data: map[string]string{ + embeddedStatusFeatureFlag: embeddedVal, + }, + }) + ctx = cfg.ToContext(ctx) + logger := logtesting.TestLogger(t) + + pr := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: tc.prName, UID: prUID}, + Status: tc.prStatus(), + } + + updatePipelineRunStatusFromSlices(ctx, logger, pr, tc.trs, tc.runs) + + actualPrStatus := pr.Status + + // The TaskRun keys for recovered taskruns will contain a new random key, appended to the + // base name that we expect. Replace the random part so we can diff the whole structure + actualTaskRuns := actualPrStatus.PipelineRunStatusFields.TaskRuns + if actualTaskRuns != nil { + fixedTaskRuns := make(map[string]*v1beta1.PipelineRunTaskRunStatus) + re := regexp.MustCompile(`^[a-z\-]*?-task-[0-9]`) + for k, v := range actualTaskRuns { + newK := re.FindString(k) + fixedTaskRuns[newK+"-xxyyy"] = v + } + actualPrStatus.PipelineRunStatusFields.TaskRuns = fixedTaskRuns + } + + actualChildRefs := actualPrStatus.ChildReferences + if len(actualChildRefs) != 0 { + var fixedChildRefs []v1beta1.ChildStatusReference + re := regexp.MustCompile(`^[a-z\-]*?-(task|run)-[0-9]`) + for _, cr := range actualChildRefs { + cr.Name = fmt.Sprintf("%s-xxyyy", re.FindString(cr.Name)) + fixedChildRefs = append(fixedChildRefs, cr) + } + actualPrStatus.ChildReferences = fixedChildRefs + } + + // Sort the ChildReferences to deal with annoying ordering issues. + sort.Slice(actualPrStatus.ChildReferences, func(i, j int) bool { + return actualPrStatus.ChildReferences[i].PipelineTaskName < actualPrStatus.ChildReferences[j].PipelineTaskName + }) + + expectedPRStatus := prStatusFromInputs(embeddedVal, prRunningStatus, tc.expectedStatusTRs, tc.expectedStatusRuns, tc.expectedStatusCRs) + + if d := cmp.Diff(expectedPRStatus, actualPrStatus); d != "" { + t.Errorf("expected the PipelineRun status to match %#v. Diff %s", expectedPRStatus, diff.PrintWantGot(d)) + } + }) + } + } +} + +func TestUpdatePipelineRunStatusFromRuns(t *testing.T) { + + prUID := types.UID("11111111-1111-1111-1111-111111111111") + otherPrUID := types.UID("22222222-2222-2222-2222-222222222222") + + prRunningStatus := duckv1beta1.Status{ + Conditions: []apis.Condition{ + { + Type: "Succeeded", + Status: "Unknown", + Reason: "Running", + Message: "Not all Tasks in the Pipeline have finished executing", + }, + }, + } + + prStatusWithSomeRuns := v1beta1.PipelineRunStatus{ + Status: prRunningStatus, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + Runs: map[string]*v1beta1.PipelineRunRunStatus{ + "pr-run-1-xxyyy": { + PipelineTaskName: "run-1", + Status: &v1alpha1.RunStatus{}, + }, + "pr-run-2-xxyyy": { + PipelineTaskName: "run-2", + Status: nil, + }, + }, + }, + } + + prStatusWithAllRuns := v1beta1.PipelineRunStatus{ + Status: prRunningStatus, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + Runs: map[string]*v1beta1.PipelineRunRunStatus{ + "pr-run-1-xxyyy": { + PipelineTaskName: "run-1", + Status: &v1alpha1.RunStatus{}, + }, + "pr-run-2-xxyyy": { + PipelineTaskName: "run-2", + Status: nil, + }, + "pr-run-3-xxyyy": { + PipelineTaskName: "run-3", + Status: &v1alpha1.RunStatus{}, + }, + }, + }, + } + + prStatusWithEmptyRuns := v1beta1.PipelineRunStatus{ + Status: prRunningStatus, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + Runs: map[string]*v1beta1.PipelineRunRunStatus{}, + }, + } + + prStatusRecoveredSimple := v1beta1.PipelineRunStatus{ + Status: prRunningStatus, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + Runs: map[string]*v1beta1.PipelineRunRunStatus{ + "pr-run-1-xxyyy": { + PipelineTaskName: "run-1", + Status: &v1alpha1.RunStatus{}, + }, + }, + }, + } + + allRuns := []*v1alpha1.Run{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-run-1-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "run-1", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-run-2-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "run-2", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-run-3-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "run-3", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + } + + runsFromAnotherPR := []*v1alpha1.Run{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-run-1-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "run-1", + }, + OwnerReferences: []metav1.OwnerReference{{UID: otherPrUID}}, + }, + }, + } + + runsWithNoOwner := []*v1alpha1.Run{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-run-1-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "run-1", + }, + }, + }, + } + + tcs := []struct { + prName string + prStatus v1beta1.PipelineRunStatus + runs []*v1alpha1.Run + expectedPrStatus v1beta1.PipelineRunStatus + }{ + { + prName: "no-status-no-runs", + prStatus: v1beta1.PipelineRunStatus{}, + runs: nil, + expectedPrStatus: v1beta1.PipelineRunStatus{}, + }, { + prName: "status-no-runs", + prStatus: prStatusWithSomeRuns, + runs: nil, + expectedPrStatus: prStatusWithSomeRuns, + }, { + prName: "status-nil-runs", + prStatus: prStatusWithEmptyRuns, + runs: []*v1alpha1.Run{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-run-1-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "run-1", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }, + }, + expectedPrStatus: prStatusRecoveredSimple, + }, { + prName: "status-missing-runs", + prStatus: prStatusWithSomeRuns, + runs: []*v1alpha1.Run{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-run-3-xxyyy", + Labels: map[string]string{ + pipeline.PipelineTaskLabelKey: "run-3", + }, + OwnerReferences: []metav1.OwnerReference{{UID: prUID}}, + }, + }}, + expectedPrStatus: prStatusWithAllRuns, + }, { + prName: "status-matching-runs-pr", + prStatus: prStatusWithAllRuns, + runs: allRuns, + expectedPrStatus: prStatusWithAllRuns, + }, { + prName: "run-from-another-pr", + prStatus: prStatusWithEmptyRuns, + runs: runsFromAnotherPR, + expectedPrStatus: prStatusWithEmptyRuns, + }, { + prName: "run-with-no-owner", + prStatus: prStatusWithEmptyRuns, + runs: runsWithNoOwner, + expectedPrStatus: prStatusWithEmptyRuns, + }, + } + + for _, tc := range tcs { + t.Run(tc.prName, func(t *testing.T) { + logger := logtesting.TestLogger(t) + + pr := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: tc.prName, UID: prUID}, + Status: tc.prStatus, + } + + updatePipelineRunStatusFromRuns(logger, pr, tc.runs) + actualPrStatus := pr.Status + + if d := cmp.Diff(tc.expectedPrStatus, actualPrStatus); d != "" { + t.Errorf("expected the PipelineRun status to match %#v. Diff %s", tc.expectedPrStatus, diff.PrintWantGot(d)) + } + }) + } +} + +func childRefForPipelineTask(taskRunName, pipelineTaskName, kind, apiVersion string, whenExpressions []v1beta1.WhenExpression, + conditionChecks []*v1beta1.PipelineRunChildConditionCheckStatus) v1beta1.ChildStatusReference { + return v1beta1.ChildStatusReference{ + TypeMeta: runtime.TypeMeta{ + APIVersion: fmt.Sprintf("%s/%s", pipeline.GroupName, apiVersion), + Kind: kind, + }, + Name: taskRunName, + PipelineTaskName: pipelineTaskName, + WhenExpressions: whenExpressions, + ConditionChecks: conditionChecks, + } +} + +func prStatusFromInputs(embeddedStatus string, status duckv1beta1.Status, taskRuns map[string]*v1beta1.PipelineRunTaskRunStatus, runs map[string]*v1beta1.PipelineRunRunStatus, childRefs []v1beta1.ChildStatusReference) v1beta1.PipelineRunStatus { + prs := v1beta1.PipelineRunStatus{ + Status: status, + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + TaskRuns: map[string]*v1beta1.PipelineRunTaskRunStatus{}, + Runs: map[string]*v1beta1.PipelineRunRunStatus{}, + }, + } + if shouldHaveFullEmbeddedStatus(embeddedStatus) { + for k, v := range taskRuns { + trStatus := *v + prs.TaskRuns[k] = &trStatus + } + for k, v := range runs { + runStatus := *v + prs.Runs[k] = &runStatus + } + } + if shouldHaveMinimalEmbeddedStatus(embeddedStatus) { + prs.ChildReferences = append(prs.ChildReferences, childRefs...) + // Sort the ChildReferences to deal with annoying ordering issues. + sort.Slice(prs.ChildReferences, func(i, j int) bool { + return prs.ChildReferences[i].PipelineTaskName < prs.ChildReferences[j].PipelineTaskName + }) + } + + return prs +} diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go index 0b08e103db5..18c14ef1602 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go @@ -502,14 +502,14 @@ func ResolvePipelineRunTask( } rprt.CustomTask = isCustomTask(ctx, rprt) if rprt.IsCustomTask() { - rprt.RunName = getRunName(pipelineRun.Status.Runs, task.Name, pipelineRun.Name) + rprt.RunName = getRunName(pipelineRun.Status.Runs, pipelineRun.Status.ChildReferences, task.Name, pipelineRun.Name) run, err := getRun(rprt.RunName) if err != nil && !errors.IsNotFound(err) { return nil, fmt.Errorf("error retrieving Run %s: %w", rprt.RunName, err) } rprt.Run = run } else { - rprt.TaskRunName = GetTaskRunName(pipelineRun.Status.TaskRuns, task.Name, pipelineRun.Name) + rprt.TaskRunName = GetTaskRunName(pipelineRun.Status.TaskRuns, pipelineRun.Status.ChildReferences, task.Name, pipelineRun.Name) // Find the Task that this PipelineTask is using var ( @@ -560,7 +560,7 @@ func ResolvePipelineRunTask( // Get all conditions that this pipelineTask will be using, if any if len(task.Conditions) > 0 { - rcc, err := resolveConditionChecks(&task, pipelineRun.Status.TaskRuns, rprt.TaskRunName, getTaskRun, getCondition, providedResources) + rcc, err := resolveConditionChecks(&task, pipelineRun.Status.TaskRuns, pipelineRun.Status.ChildReferences, rprt.TaskRunName, getTaskRun, getCondition, providedResources) if err != nil { return nil, err } @@ -571,7 +571,16 @@ func ResolvePipelineRunTask( } // getConditionCheckName should return a unique name for a `ConditionCheck` if one has not already been defined, and the existing one otherwise. -func getConditionCheckName(taskRunStatus map[string]*v1beta1.PipelineRunTaskRunStatus, trName, conditionRegisterName string) string { +func getConditionCheckName(taskRunStatus map[string]*v1beta1.PipelineRunTaskRunStatus, childRefs []v1beta1.ChildStatusReference, trName, conditionRegisterName string) string { + for _, cr := range childRefs { + if cr.Name == trName { + for _, cc := range cr.ConditionChecks { + if cc.ConditionName == conditionRegisterName { + return cc.ConditionCheckName + } + } + } + } trStatus, ok := taskRunStatus[trName] if ok && trStatus.ConditionChecks != nil { for k, v := range trStatus.ConditionChecks { @@ -585,7 +594,13 @@ func getConditionCheckName(taskRunStatus map[string]*v1beta1.PipelineRunTaskRunS } // GetTaskRunName should return a unique name for a `TaskRun` if one has not already been defined, and the existing one otherwise. -func GetTaskRunName(taskRunsStatus map[string]*v1beta1.PipelineRunTaskRunStatus, ptName, prName string) string { +func GetTaskRunName(taskRunsStatus map[string]*v1beta1.PipelineRunTaskRunStatus, childRefs []v1beta1.ChildStatusReference, ptName, prName string) string { + for _, cr := range childRefs { + if cr.Kind == "TaskRun" && cr.PipelineTaskName == ptName { + return cr.Name + } + } + for k, v := range taskRunsStatus { if v.PipelineTaskName == ptName { return k @@ -597,16 +612,23 @@ func GetTaskRunName(taskRunsStatus map[string]*v1beta1.PipelineRunTaskRunStatus, // getRunName should return a unique name for a `Run` if one has not already // been defined, and the existing one otherwise. -func getRunName(runsStatus map[string]*v1beta1.PipelineRunRunStatus, ptName, prName string) string { +func getRunName(runsStatus map[string]*v1beta1.PipelineRunRunStatus, childRefs []v1beta1.ChildStatusReference, ptName, prName string) string { + for _, cr := range childRefs { + if cr.Kind == "Run" && cr.PipelineTaskName == ptName { + return cr.Name + } + } + for k, v := range runsStatus { if v.PipelineTaskName == ptName { return k } } + return kmeta.ChildName(prName, fmt.Sprintf("-%s", ptName)) } -func resolveConditionChecks(pt *v1beta1.PipelineTask, taskRunStatus map[string]*v1beta1.PipelineRunTaskRunStatus, taskRunName string, getTaskRun resources.GetTaskRun, getCondition GetCondition, providedResources map[string]*resourcev1alpha1.PipelineResource) ([]*ResolvedConditionCheck, error) { +func resolveConditionChecks(pt *v1beta1.PipelineTask, taskRunStatus map[string]*v1beta1.PipelineRunTaskRunStatus, childRefs []v1beta1.ChildStatusReference, taskRunName string, getTaskRun resources.GetTaskRun, getCondition GetCondition, providedResources map[string]*resourcev1alpha1.PipelineResource) ([]*ResolvedConditionCheck, error) { rccs := []*ResolvedConditionCheck{} for i := range pt.Conditions { ptc := pt.Conditions[i] @@ -619,7 +641,7 @@ func resolveConditionChecks(pt *v1beta1.PipelineTask, taskRunStatus map[string]* Msg: err.Error(), } } - conditionCheckName := getConditionCheckName(taskRunStatus, taskRunName, crName) + conditionCheckName := getConditionCheckName(taskRunStatus, childRefs, taskRunName, crName) // TODO(#3133): Also handle Custom Task Runs (getRun here) cctr, err := getTaskRun(conditionCheckName) if err != nil { diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go index bf0de9b04b7..1290aac0d54 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go @@ -3444,6 +3444,11 @@ func TestGetTaskRunName(t *testing.T) { PipelineTaskName: "task1", }, } + childRefs := []v1beta1.ChildStatusReference{{ + TypeMeta: runtime.TypeMeta{Kind: "TaskRun"}, + Name: "taskrun-for-task1", + PipelineTaskName: "task1", + }} for _, tc := range []struct { name string @@ -3478,8 +3483,12 @@ func TestGetTaskRunName(t *testing.T) { if tc.prName != "" { testPrName = tc.prName } - gotTrName := GetTaskRunName(taskRunsStatus, tc.ptName, testPrName) - if d := cmp.Diff(tc.wantTrName, gotTrName); d != "" { + trNameFromTRStatus := GetTaskRunName(taskRunsStatus, nil, tc.ptName, testPrName) + if d := cmp.Diff(tc.wantTrName, trNameFromTRStatus); d != "" { + t.Errorf("GetTaskRunName: %s", diff.PrintWantGot(d)) + } + trNameFromChildRefs := GetTaskRunName(nil, childRefs, tc.ptName, testPrName) + if d := cmp.Diff(tc.wantTrName, trNameFromChildRefs); d != "" { t.Errorf("GetTaskRunName: %s", diff.PrintWantGot(d)) } }) @@ -3493,6 +3502,11 @@ func TestGetRunName(t *testing.T) { PipelineTaskName: "task1", }, } + childRefs := []v1beta1.ChildStatusReference{{ + TypeMeta: runtime.TypeMeta{Kind: "Run"}, + Name: "run-for-task1", + PipelineTaskName: "task1", + }} for _, tc := range []struct { name string @@ -3527,8 +3541,12 @@ func TestGetRunName(t *testing.T) { if tc.prName != "" { testPrName = tc.prName } - gotTrName := getRunName(runsStatus, tc.ptName, testPrName) - if d := cmp.Diff(tc.wantTrName, gotTrName); d != "" { + rnFromRunsStatus := getRunName(runsStatus, nil, tc.ptName, testPrName) + if d := cmp.Diff(tc.wantTrName, rnFromRunsStatus); d != "" { + t.Errorf("GetTaskRunName: %s", diff.PrintWantGot(d)) + } + rnFromChildRefs := getRunName(nil, childRefs, tc.ptName, testPrName) + if d := cmp.Diff(tc.wantTrName, rnFromChildRefs); d != "" { t.Errorf("GetTaskRunName: %s", diff.PrintWantGot(d)) } }) diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunstate.go b/pkg/reconciler/pipelinerun/resources/pipelinerunstate.go index d1dc65eecee..925e12db457 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunstate.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunstate.go @@ -26,6 +26,7 @@ import ( "go.uber.org/zap" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" "knative.dev/pkg/apis" @@ -222,6 +223,71 @@ func (state PipelineRunState) GetRunsStatus(pr *v1beta1.PipelineRun) map[string] return status } +// GetChildReferences returns a slice of references, including version, kind, name, and pipeline task name, for all +// TaskRuns and Runs in the state. +func (state PipelineRunState) GetChildReferences(taskRunVersion string, runVersion string) []v1beta1.ChildStatusReference { + var childRefs []v1beta1.ChildStatusReference + + for _, rprt := range state { + if rprt.ResolvedConditionChecks == nil && ((rprt.CustomTask && rprt.Run == nil) || (!rprt.CustomTask && rprt.TaskRun == nil)) { + continue + } + + var childAPIVersion string + var childTaskKind string + var childName string + var childConditions []*v1beta1.PipelineRunChildConditionCheckStatus + + if rprt.CustomTask { + childName = rprt.RunName + childTaskKind = "Run" + + if rprt.Run != nil { + childAPIVersion = rprt.Run.APIVersion + } else { + childAPIVersion = runVersion + } + } else { + childName = rprt.TaskRunName + childTaskKind = "TaskRun" + + if rprt.TaskRun != nil { + childAPIVersion = rprt.TaskRun.APIVersion + } else { + childAPIVersion = taskRunVersion + } + if len(rprt.ResolvedConditionChecks) > 0 { + for _, c := range rprt.ResolvedConditionChecks { + condCheck := &v1beta1.PipelineRunChildConditionCheckStatus{ + PipelineRunConditionCheckStatus: v1beta1.PipelineRunConditionCheckStatus{ + ConditionName: c.ConditionRegisterName, + }, + ConditionCheckName: c.ConditionCheckName, + } + if c.ConditionCheck != nil { + condCheck.Status = c.NewConditionCheckStatus() + } + + childConditions = append(childConditions, condCheck) + } + } + } + + childRefs = append(childRefs, v1beta1.ChildStatusReference{ + TypeMeta: runtime.TypeMeta{ + APIVersion: childAPIVersion, + Kind: childTaskKind, + }, + Name: childName, + PipelineTaskName: rprt.PipelineTask.Name, + WhenExpressions: rprt.PipelineTask.WhenExpressions, + ConditionChecks: childConditions, + }) + + } + return childRefs +} + // getNextTasks returns a list of tasks which should be executed next i.e. // a list of tasks from candidateTasks which aren't yet indicated in state to be running and // a list of cancelled/failed tasks from candidateTasks which haven't exhausted their retries diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go b/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go index ae29af9594d..62fb509ca46 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go @@ -22,8 +22,12 @@ import ( "testing" "time" - "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "k8s.io/apimachinery/pkg/runtime" + + "k8s.io/apimachinery/pkg/selection" + + "github.com/google/go-cmp/cmp" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" @@ -34,7 +38,6 @@ import ( "go.uber.org/zap" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" "knative.dev/pkg/apis" @@ -2314,3 +2317,309 @@ func conditionCheckFromTaskRun(tr *v1beta1.TaskRun) *v1beta1.ConditionCheck { cc := v1beta1.ConditionCheck(*tr) return &cc } + +func TestPipelineRunState_GetChildReferences(t *testing.T) { + successConditionCheckName := "success-condition" + failingConditionCheckName := "fail-condition" + + successCondition := &v1alpha1.Condition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cond-1", + Namespace: "foo", + }, + } + failingCondition := &v1alpha1.Condition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cond-2", + Namespace: "foo", + }, + } + + successConditionCheck := v1beta1.ConditionCheck(v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: successConditionCheckName}, + Status: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: duckv1beta1.Conditions{ + apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionTrue, + }, + }, + }, + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ExitCode: int32(0)}, + }, + }}, + }, + }, + }) + failingConditionCheck := v1beta1.ConditionCheck(v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: failingConditionCheckName}, + Status: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: duckv1beta1.Conditions{ + apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + }, + }, + }, + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ExitCode: int32(127)}, + }, + }}, + }, + }, + }) + + successrcc := ResolvedConditionCheck{ + ConditionRegisterName: successCondition.Name + "-0", + ConditionCheckName: successConditionCheckName, + Condition: successCondition, + ConditionCheck: &successConditionCheck, + } + failingrcc := ResolvedConditionCheck{ + ConditionRegisterName: failingCondition.Name + "-0", + ConditionCheckName: failingConditionCheckName, + Condition: failingCondition, + ConditionCheck: &failingConditionCheck, + } + + successConditionCheckStatus := &v1beta1.PipelineRunChildConditionCheckStatus{ + PipelineRunConditionCheckStatus: v1beta1.PipelineRunConditionCheckStatus{ + ConditionName: successrcc.ConditionRegisterName, + Status: &v1beta1.ConditionCheckStatus{ + ConditionCheckStatusFields: v1beta1.ConditionCheckStatusFields{ + Check: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ExitCode: 0}, + }, + }, + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue}}, + }, + }, + }, + ConditionCheckName: successConditionCheckName, + } + failingConditionCheckStatus := &v1beta1.PipelineRunChildConditionCheckStatus{ + PipelineRunConditionCheckStatus: v1beta1.PipelineRunConditionCheckStatus{ + ConditionName: failingrcc.ConditionRegisterName, + Status: &v1beta1.ConditionCheckStatus{ + ConditionCheckStatusFields: v1beta1.ConditionCheckStatusFields{ + Check: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ExitCode: 127}, + }, + }, + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{Type: apis.ConditionSucceeded, Status: corev1.ConditionFalse}}, + }, + }, + }, + ConditionCheckName: failingConditionCheckName, + } + + testCases := []struct { + name string + state PipelineRunState + childRefs []v1beta1.ChildStatusReference + }{ + { + name: "no-tasks", + state: PipelineRunState{}, + childRefs: nil, + }, + { + name: "unresolved-task", + state: PipelineRunState{{ + TaskRunName: "unresolved-task-run", + PipelineTask: &v1beta1.PipelineTask{ + Name: "unresolved-task-1", + TaskRef: &v1beta1.TaskRef{ + Name: "unresolved-task", + Kind: "Task", + APIVersion: "v1beta1", + }, + }, + }}, + childRefs: nil, + }, + { + name: "unresolved-custom-task", + state: PipelineRunState{{ + RunName: "unresolved-custom-task-run", + CustomTask: true, + PipelineTask: &v1beta1.PipelineTask{ + Name: "unresolved-custom-task-1", + TaskRef: &v1beta1.TaskRef{ + APIVersion: "example.dev/v0", + Kind: "Example", + Name: "unresolved-custom-task", + }, + }, + }}, + childRefs: nil, + }, + { + name: "single-task", + state: PipelineRunState{{ + TaskRunName: "single-task-run", + PipelineTask: &v1beta1.PipelineTask{ + Name: "single-task-1", + TaskRef: &v1beta1.TaskRef{ + Name: "single-task", + Kind: "Task", + APIVersion: "v1beta1", + }, + WhenExpressions: []v1beta1.WhenExpression{{ + Input: "foo", + Operator: selection.In, + Values: []string{"foo", "bar"}, + }}, + }, + TaskRun: &v1beta1.TaskRun{ + TypeMeta: metav1.TypeMeta{APIVersion: "tekton.dev/v1beta1"}, + ObjectMeta: metav1.ObjectMeta{Name: "single-task-run"}, + }, + }}, + childRefs: []v1beta1.ChildStatusReference{{ + TypeMeta: runtime.TypeMeta{ + APIVersion: "tekton.dev/v1beta1", + Kind: "TaskRun", + }, + Name: "single-task-run", + PipelineTaskName: "single-task-1", + WhenExpressions: []v1beta1.WhenExpression{{ + Input: "foo", + Operator: selection.In, + Values: []string{"foo", "bar"}, + }}, + }}, + }, + { + name: "task-with-condition-check", + state: PipelineRunState{{ + TaskRunName: "task-with-condition-check-run", + PipelineTask: &v1beta1.PipelineTask{ + Name: "task-with-condition-check-1", + TaskRef: &v1beta1.TaskRef{ + Name: "task-with-condition-check", + Kind: "Task", + APIVersion: "v1beta1", + }, + }, + ResolvedConditionChecks: TaskConditionCheckState{&successrcc, &failingrcc}, + }}, + childRefs: []v1beta1.ChildStatusReference{{ + TypeMeta: runtime.TypeMeta{ + APIVersion: "tekton.dev/v1beta1", + Kind: "TaskRun", + }, + Name: "task-with-condition-check-run", + PipelineTaskName: "task-with-condition-check-1", + ConditionChecks: []*v1beta1.PipelineRunChildConditionCheckStatus{ + successConditionCheckStatus, + failingConditionCheckStatus, + }, + }}, + }, + { + name: "single-custom-task", + state: PipelineRunState{{ + RunName: "single-custom-task-run", + CustomTask: true, + PipelineTask: &v1beta1.PipelineTask{ + Name: "single-custom-task-1", + TaskRef: &v1beta1.TaskRef{ + APIVersion: "example.dev/v0", + Kind: "Example", + Name: "single-custom-task", + }, + WhenExpressions: []v1beta1.WhenExpression{{ + Input: "foo", + Operator: selection.In, + Values: []string{"foo", "bar"}, + }}, + }, + Run: &v1alpha1.Run{ + TypeMeta: metav1.TypeMeta{APIVersion: "tekton.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: "single-custom-task-run"}, + }, + }}, + childRefs: []v1beta1.ChildStatusReference{{ + TypeMeta: runtime.TypeMeta{ + APIVersion: "tekton.dev/v1alpha1", + Kind: "Run", + }, + Name: "single-custom-task-run", + PipelineTaskName: "single-custom-task-1", + WhenExpressions: []v1beta1.WhenExpression{{ + Input: "foo", + Operator: selection.In, + Values: []string{"foo", "bar"}, + }}, + }}, + }, + { + name: "task-and-custom-task", + state: PipelineRunState{{ + TaskRunName: "single-task-run", + PipelineTask: &v1beta1.PipelineTask{ + Name: "single-task-1", + TaskRef: &v1beta1.TaskRef{ + Name: "single-task", + Kind: "Task", + APIVersion: "v1beta1", + }, + }, + TaskRun: &v1beta1.TaskRun{ + TypeMeta: metav1.TypeMeta{APIVersion: "tekton.dev/v1beta1"}, + ObjectMeta: metav1.ObjectMeta{Name: "single-task-run"}, + }, + }, { + RunName: "single-custom-task-run", + CustomTask: true, + PipelineTask: &v1beta1.PipelineTask{ + Name: "single-custom-task-1", + TaskRef: &v1beta1.TaskRef{ + APIVersion: "example.dev/v0", + Kind: "Example", + Name: "single-custom-task", + }, + }, + Run: &v1alpha1.Run{ + TypeMeta: metav1.TypeMeta{APIVersion: "tekton.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{Name: "single-custom-task-run"}, + }, + }}, + childRefs: []v1beta1.ChildStatusReference{{ + TypeMeta: runtime.TypeMeta{ + APIVersion: "tekton.dev/v1beta1", + Kind: "TaskRun", + }, + Name: "single-task-run", + PipelineTaskName: "single-task-1", + }, { + TypeMeta: runtime.TypeMeta{ + APIVersion: "tekton.dev/v1alpha1", + Kind: "Run", + }, + Name: "single-custom-task-run", + PipelineTaskName: "single-custom-task-1", + }}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + childRefs := tc.state.GetChildReferences(v1beta1.SchemeGroupVersion.String(), v1alpha1.SchemeGroupVersion.String()) + if d := cmp.Diff(tc.childRefs, childRefs); d != "" { + t.Errorf("Didn't get expected child references for %s: %s", tc.name, diff.PrintWantGot(d)) + } + + }) + } +} diff --git a/test/custom_task_test.go b/test/custom_task_test.go index 17e67d0344a..726192637d4 100644 --- a/test/custom_task_test.go +++ b/test/custom_task_test.go @@ -27,6 +27,8 @@ import ( "testing" "time" + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/test/parse" "github.com/google/go-cmp/cmp" @@ -50,7 +52,6 @@ const ( var supportedFeatureGates = map[string]string{ "enable-custom-tasks": "true", "enable-api-fields": "alpha", - "embedded-status": "full", } func TestCustomTask(t *testing.T) { @@ -60,6 +61,9 @@ func TestCustomTask(t *testing.T) { c, namespace := setup(ctx, t, requireAnyGate(supportedFeatureGates)) knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) + + embeddedStatusValue := GetEmbeddedStatusValue(ctx, t, c.KubeClient) + customTaskRawSpec := []byte(`{"field1":123,"field2":"value"}`) metadataLabel := map[string]string{"test-label": "test"} // Create a PipelineRun that runs a Custom Task. @@ -122,11 +126,25 @@ spec: } // Get the Run name. - if len(pr.Status.Runs) != 2 { - t.Fatalf("PipelineRun had unexpected .status.runs; got %d, want 2", len(pr.Status.Runs)) + var runNames []string + if embeddedStatusValue != config.MinimalEmbeddedStatus { + if len(pr.Status.Runs) != 2 { + t.Fatalf("PipelineRun had unexpected .status.runs; got %d, want 2", len(pr.Status.Runs)) + } + for rn := range pr.Status.Runs { + runNames = append(runNames, rn) + } + } else { + for _, cr := range pr.Status.ChildReferences { + if cr.Kind == "Run" { + runNames = append(runNames, cr.Name) + } + } + if len(runNames) != 2 { + t.Fatalf("PipelineRun had unexpected number of Runs in .status.childReferences; got %d, want 2", len(runNames)) + } } - - for runName := range pr.Status.Runs { + for _, runName := range runNames { // Get the Run. r, err := c.RunClient.Get(ctx, runName, metav1.GetOptions{}) if err != nil { @@ -186,13 +204,25 @@ spec: } // Get the TaskRun name. - if len(pr.Status.TaskRuns) != 1 { - t.Fatalf("PipelineRun had unexpected .status.taskRuns; got %d, want 1", len(pr.Status.TaskRuns)) - } var taskRunName string - for k := range pr.Status.TaskRuns { - taskRunName = k - break + + if embeddedStatusValue != config.MinimalEmbeddedStatus { + if len(pr.Status.TaskRuns) != 1 { + t.Fatalf("PipelineRun had unexpected .status.taskRuns; got %d, want 1", len(pr.Status.TaskRuns)) + } + for k := range pr.Status.TaskRuns { + taskRunName = k + break + } + } else { + for _, cr := range pr.Status.ChildReferences { + if cr.Kind == "TaskRun" { + taskRunName = cr.Name + } + } + if taskRunName == "" { + t.Fatal("PipelineRun does not have expected TaskRun in .status.childReferences") + } } // Get the TaskRun. @@ -257,6 +287,9 @@ func TestPipelineRunCustomTaskTimeout(t *testing.T) { knativetest.CleanupOnInterrupt(func() { tearDown(context.Background(), t, c, namespace) }, t.Logf) defer tearDown(context.Background(), t, c, namespace) + + embeddedStatusValue := GetEmbeddedStatusValue(ctx, t, c.KubeClient) + pipeline := parse.MustParsePipeline(t, fmt.Sprintf(` metadata: name: %s @@ -295,36 +328,46 @@ spec: } // Get the Run name. - if len(pr.Status.Runs) != 1 { - t.Fatalf("PipelineRun had unexpected .status.runs; got %d, want 1", len(pr.Status.Runs)) - } + runName := "" - for runName := range pr.Status.Runs { - // Get the Run. - r, err := c.RunClient.Get(ctx, runName, metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to get Run %q: %v", runName, err) + if embeddedStatusValue != config.MinimalEmbeddedStatus { + if len(pr.Status.Runs) != 1 { + t.Fatalf("PipelineRun had unexpected .status.runs; got %d, want 1", len(pr.Status.Runs)) } - if r.IsDone() { - t.Fatalf("Run unexpectedly done: %v", r.Status.GetCondition(apis.ConditionSucceeded)) + for rn := range pr.Status.Runs { + runName = rn } - - // Simulate a Custom Task controller updating the Run to be started/running, - // because, a run that has not started cannot timeout. - r.Status = v1alpha1.RunStatus{ - RunStatusFields: v1alpha1.RunStatusFields{ - StartTime: &metav1.Time{Time: time.Now()}, - }, - Status: duckv1.Status{ - Conditions: []apis.Condition{{ - Type: apis.ConditionSucceeded, - Status: corev1.ConditionUnknown, - }}, - }, - } - if _, err := c.RunClient.UpdateStatus(ctx, r, metav1.UpdateOptions{}); err != nil { - t.Fatalf("Failed to update Run to successful: %v", err) + } else { + if len(pr.Status.ChildReferences) != 1 { + t.Fatalf("PipelineRun had unexpected .status.childReferences; got %d, want 1", len(pr.Status.ChildReferences)) } + runName = pr.Status.ChildReferences[0].Name + } + + // Get the Run. + r, err := c.RunClient.Get(ctx, runName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get Run %q: %v", runName, err) + } + if r.IsDone() { + t.Fatalf("Run unexpectedly done: %v", r.Status.GetCondition(apis.ConditionSucceeded)) + } + + // Simulate a Custom Task controller updating the Run to be started/running, + // because, a run that has not started cannot timeout. + r.Status = v1alpha1.RunStatus{ + RunStatusFields: v1alpha1.RunStatusFields{ + StartTime: &metav1.Time{Time: time.Now()}, + }, + Status: duckv1.Status{ + Conditions: []apis.Condition{{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionUnknown, + }}, + }, + } + if _, err := c.RunClient.UpdateStatus(ctx, r, metav1.UpdateOptions{}); err != nil { + t.Fatalf("Failed to update Run to successful: %v", err) } t.Logf("Waiting for PipelineRun %s in namespace %s to be timed out", pipelineRun.Name, namespace) diff --git a/test/gate.go b/test/featureflags.go similarity index 63% rename from test/gate.go rename to test/featureflags.go index 13292e1c411..6e510339449 100644 --- a/test/gate.go +++ b/test/featureflags.go @@ -6,6 +6,8 @@ import ( "strings" "testing" + "k8s.io/client-go/kubernetes" + "github.com/tektoncd/pipeline/pkg/apis/config" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/system" @@ -32,3 +34,17 @@ func requireAnyGate(gates map[string]string) func(context.Context, *testing.T, * t.Skipf("No feature flag matching %s", strings.Join(pairs, " or ")) } } + +// GetEmbeddedStatusValue gets the current value for the "embedded-status" feature flag. +// If the flag is not set, it returns the default value. +func GetEmbeddedStatusValue(ctx context.Context, t *testing.T, kubeClient kubernetes.Interface) string { + featureFlagsCM, err := kubeClient.CoreV1().ConfigMaps(system.Namespace()).Get(ctx, config.GetFeatureFlagsConfigName(), metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get ConfigMap `%s`: %s", config.GetFeatureFlagsConfigName(), err) + } + val := featureFlagsCM.Data["embedded-status"] + if val == "" { + return config.DefaultEmbeddedStatus + } + return val +} diff --git a/test/retry_test.go b/test/retry_test.go index bdafedb75dd..c54d93a7f74 100644 --- a/test/retry_test.go +++ b/test/retry_test.go @@ -25,6 +25,8 @@ import ( "testing" "time" + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/test/parse" corev1 "k8s.io/api/core/v1" @@ -44,6 +46,8 @@ func TestTaskRunRetry(t *testing.T) { knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) + embeddedStatus := GetEmbeddedStatusValue(ctx, t, c.KubeClient) + // Create a PipelineRun with a single TaskRun that can only fail, // configured to retry 5 times. pipelineRunName := "retry-pipeline" @@ -75,12 +79,32 @@ spec: t.Fatalf("Failed to get PipelineRun %q: %v", pipelineRunName, err) } - // PipelineRunStatus should have 1 TaskRun status, and it should be failed. - if len(pr.Status.TaskRuns) != 1 { - t.Errorf("Got %d TaskRun statuses, wanted %d", len(pr.Status.TaskRuns), numRetries) + if embeddedStatus == config.FullEmbeddedStatus || embeddedStatus == config.BothEmbeddedStatus { + // PipelineRunStatus should have 1 TaskRun status, and it should be failed. + if len(pr.Status.TaskRuns) != 1 { + t.Errorf("Got %d TaskRun statuses, wanted %d", len(pr.Status.TaskRuns), numRetries) + } + for taskRunName, trs := range pr.Status.TaskRuns { + if !isFailed(t, taskRunName, trs.Status.Conditions) { + t.Errorf("TaskRun status %q is not failed", taskRunName) + } + } } - for taskRunName, trs := range pr.Status.TaskRuns { - if !isFailed(t, taskRunName, trs.Status.Conditions) { + if embeddedStatus == config.MinimalEmbeddedStatus || embeddedStatus == config.BothEmbeddedStatus { + // PipelineRunStatus should have 1 child reference, and the TaskRun it refers to should be failed. + if len(pr.Status.ChildReferences) != 1 { + t.Fatalf("Got %d child references, wanted %d", len(pr.Status.ChildReferences), numRetries) + } + if pr.Status.ChildReferences[0].Kind != "TaskRun" { + t.Errorf("Got a child reference of kind %s, but expected TaskRun", pr.Status.ChildReferences[0].Kind) + } + taskRunName := pr.Status.ChildReferences[0].Name + + tr, err := c.TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get TaskRun %q: %v", taskRunName, err) + } + if !isFailed(t, taskRunName, tr.Status.Conditions) { t.Errorf("TaskRun status %q is not failed", taskRunName) } } diff --git a/test/v1alpha1/retry_test.go b/test/v1alpha1/retry_test.go index 16bfc2dcf5d..928b51ffdd7 100644 --- a/test/v1alpha1/retry_test.go +++ b/test/v1alpha1/retry_test.go @@ -25,6 +25,10 @@ import ( "testing" "time" + "github.com/tektoncd/pipeline/test" + + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/test/parse" corev1 "k8s.io/api/core/v1" @@ -44,6 +48,9 @@ func TestTaskRunRetry(t *testing.T) { knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) + // Skip if the "embedded-status" feature flag has any value but "full". + embeddedStatus := test.GetEmbeddedStatusValue(ctx, t, c.KubeClient) + // Create a PipelineRun with a single TaskRun that can only fail, // configured to retry 5 times. pipelineRunName := "retry-pipeline" @@ -75,12 +82,32 @@ spec: t.Fatalf("Failed to get PipelineRun %q: %v", pipelineRunName, err) } - // PipelineRunStatus should have 1 TaskRun status, and it should be failed. - if len(pr.Status.TaskRuns) != 1 { - t.Errorf("Got %d TaskRun statuses, wanted %d", len(pr.Status.TaskRuns), numRetries) + if embeddedStatus == config.FullEmbeddedStatus || embeddedStatus == config.BothEmbeddedStatus { + // PipelineRunStatus should have 1 TaskRun status, and it should be failed. + if len(pr.Status.TaskRuns) != 1 { + t.Errorf("Got %d TaskRun statuses, wanted %d", len(pr.Status.TaskRuns), numRetries) + } + for taskRunName, trs := range pr.Status.TaskRuns { + if !isFailed(t, taskRunName, trs.Status.Conditions) { + t.Errorf("TaskRun status %q is not failed", taskRunName) + } + } } - for taskRunName, trs := range pr.Status.TaskRuns { - if !isFailed(t, taskRunName, trs.Status.Conditions) { + if embeddedStatus == config.MinimalEmbeddedStatus || embeddedStatus == config.BothEmbeddedStatus { + // PipelineRunStatus should have 1 child reference, and the TaskRun it refers to should be failed. + if len(pr.Status.ChildReferences) != 1 { + t.Fatalf("Got %d child references, wanted %d", len(pr.Status.ChildReferences), numRetries) + } + if pr.Status.ChildReferences[0].Kind != "TaskRun" { + t.Errorf("Got a child reference of kind %s, but expected TaskRun", pr.Status.ChildReferences[0].Kind) + } + taskRunName := pr.Status.ChildReferences[0].Name + + tr, err := c.TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get TaskRun %q: %v", taskRunName, err) + } + if !isFailed(t, taskRunName, tr.Status.Conditions) { t.Errorf("TaskRun status %q is not failed", taskRunName) } }