diff --git a/manifests/cluster-install/argo-server-rbac/argo-server-clusterole.yaml b/manifests/cluster-install/argo-server-rbac/argo-server-clusterole.yaml index 0882c9a7b9c9..ed22044ae989 100644 --- a/manifests/cluster-install/argo-server-rbac/argo-server-clusterole.yaml +++ b/manifests/cluster-install/argo-server-rbac/argo-server-clusterole.yaml @@ -28,7 +28,6 @@ rules: - get - list - watch - - delete - apiGroups: - "" resources: diff --git a/manifests/namespace-install/argo-server-rbac/argo-server-role.yaml b/manifests/namespace-install/argo-server-rbac/argo-server-role.yaml index 314177a3ef8e..f0b091aee49f 100644 --- a/manifests/namespace-install/argo-server-rbac/argo-server-role.yaml +++ b/manifests/namespace-install/argo-server-rbac/argo-server-role.yaml @@ -28,7 +28,6 @@ rules: - get - list - watch - - delete - apiGroups: - "" resources: diff --git a/manifests/quick-start-minimal.yaml b/manifests/quick-start-minimal.yaml index be718d5116f0..f6ca36da45b9 100644 --- a/manifests/quick-start-minimal.yaml +++ b/manifests/quick-start-minimal.yaml @@ -1314,7 +1314,6 @@ rules: - get - list - watch - - delete - apiGroups: - "" resources: diff --git a/manifests/quick-start-mysql.yaml b/manifests/quick-start-mysql.yaml index 6618b9b23d96..5935d213c9e6 100644 --- a/manifests/quick-start-mysql.yaml +++ b/manifests/quick-start-mysql.yaml @@ -1314,7 +1314,6 @@ rules: - get - list - watch - - delete - apiGroups: - "" resources: diff --git a/manifests/quick-start-postgres.yaml b/manifests/quick-start-postgres.yaml index 0f7b46b96d3c..ccd9140ec6b3 100644 --- a/manifests/quick-start-postgres.yaml +++ b/manifests/quick-start-postgres.yaml @@ -1314,7 +1314,6 @@ rules: - get - list - watch - - delete - apiGroups: - "" resources: diff --git a/server/workflow/workflow_server.go b/server/workflow/workflow_server.go index 584278e21c6f..b5b1ceaed9c4 100644 --- a/server/workflow/workflow_server.go +++ b/server/workflow/workflow_server.go @@ -6,7 +6,6 @@ import ( "fmt" "io" "sort" - "sync" log "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" @@ -371,18 +370,8 @@ func (s *workflowServer) DeleteWorkflow(ctx context.Context, req *workflowpkg.Wo return &workflowpkg.WorkflowDeleteResponse{}, nil } -func errorFromChannel(errCh <-chan error) error { - select { - case err := <-errCh: - return err - default: - } - return nil -} - func (s *workflowServer) RetryWorkflow(ctx context.Context, req *workflowpkg.WorkflowRetryRequest) (*wfv1.Workflow, error) { wfClient := auth.GetWfClient(ctx) - kubeClient := auth.GetKubeClient(ctx) wf, err := s.getWorkflow(ctx, wfClient, req.Namespace, req.Name, metav1.GetOptions{}) if err != nil { @@ -394,38 +383,7 @@ func (s *workflowServer) RetryWorkflow(ctx context.Context, req *workflowpkg.Wor return nil, sutils.ToStatusError(err, codes.InvalidArgument) } - err = s.hydrator.Hydrate(wf) - if err != nil { - return nil, sutils.ToStatusError(err, codes.Internal) - } - - wf, podsToDelete, err := util.FormulateRetryWorkflow(ctx, wf, req.RestartSuccessful, req.NodeFieldSelector, req.Parameters) - if err != nil { - return nil, sutils.ToStatusError(err, codes.Internal) - } - - errCh := make(chan error, len(podsToDelete)) - var wg sync.WaitGroup - wg.Add(len(podsToDelete)) - for _, podName := range podsToDelete { - log.WithFields(log.Fields{"podDeleted": podName}).Info("Deleting pod") - go func(podName string) { - defer wg.Done() - err := kubeClient.CoreV1().Pods(wf.Namespace).Delete(ctx, podName, metav1.DeleteOptions{}) - if err != nil && !apierr.IsNotFound(err) { - errCh <- err - return - } - }(podName) - } - wg.Wait() - - err = errorFromChannel(errCh) - if err != nil { - return nil, sutils.ToStatusError(err, codes.Internal) - } - - err = s.hydrator.Dehydrate(wf) + wf, err = util.MarkWorkflowForRetry(ctx, wf, req.RestartSuccessful, req.NodeFieldSelector, req.Parameters) if err != nil { return nil, sutils.ToStatusError(err, codes.Internal) } diff --git a/server/workflowarchive/archived_workflow_server.go b/server/workflowarchive/archived_workflow_server.go index 511fe8d3ecc6..f8db8866d1eb 100644 --- a/server/workflowarchive/archived_workflow_server.go +++ b/server/workflowarchive/archived_workflow_server.go @@ -276,7 +276,6 @@ func (w *archivedWorkflowServer) ResubmitArchivedWorkflow(ctx context.Context, r func (w *archivedWorkflowServer) RetryArchivedWorkflow(ctx context.Context, req *workflowarchivepkg.RetryArchivedWorkflowRequest) (*wfv1.Workflow, error) { wfClient := auth.GetWfClient(ctx) - kubeClient := auth.GetKubeClient(ctx) wf, err := w.GetArchivedWorkflow(ctx, &workflowarchivepkg.GetArchivedWorkflowRequest{Uid: req.Uid}) if err != nil { @@ -285,23 +284,19 @@ func (w *archivedWorkflowServer) RetryArchivedWorkflow(ctx context.Context, req _, err = wfClient.ArgoprojV1alpha1().Workflows(req.Namespace).Get(ctx, wf.Name, metav1.GetOptions{}) if apierr.IsNotFound(err) { - - wf, podsToDelete, err := util.FormulateRetryWorkflow(ctx, wf, req.RestartSuccessful, req.NodeFieldSelector, req.Parameters) + wf.ObjectMeta.ResourceVersion = "" + wf.ObjectMeta.UID = "" + wf, err := wfClient.ArgoprojV1alpha1().Workflows(req.Namespace).Create(ctx, wf, metav1.CreateOptions{}) if err != nil { return nil, sutils.ToStatusError(err, codes.Internal) } - for _, podName := range podsToDelete { - log.WithFields(log.Fields{"podDeleted": podName}).Info("Deleting pod") - err := kubeClient.CoreV1().Pods(wf.Namespace).Delete(ctx, podName, metav1.DeleteOptions{}) - if err != nil && !apierr.IsNotFound(err) { - return nil, sutils.ToStatusError(err, codes.Internal) - } + wf, err = util.MarkWorkflowForRetry(ctx, wf, req.RestartSuccessful, req.NodeFieldSelector, req.Parameters) + if err != nil { + return nil, sutils.ToStatusError(err, codes.Internal) } - wf.ObjectMeta.ResourceVersion = "" - wf.ObjectMeta.UID = "" - result, err := wfClient.ArgoprojV1alpha1().Workflows(req.Namespace).Create(ctx, wf, metav1.CreateOptions{}) + result, err := wfClient.ArgoprojV1alpha1().Workflows(req.Namespace).Update(ctx, wf, metav1.UpdateOptions{}) if err != nil { return nil, sutils.ToStatusError(err, codes.Internal) } diff --git a/test/e2e/cli_test.go b/test/e2e/cli_test.go index 54dc4fd517ad..65fce8a22ba1 100644 --- a/test/e2e/cli_test.go +++ b/test/e2e/cli_test.go @@ -20,6 +20,7 @@ import ( wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo-workflows/v3/test/e2e/fixtures" + "github.com/argoproj/argo-workflows/v3/workflow/common" ) const ( @@ -857,12 +858,51 @@ func (s *CLISuite) TestWorkflowRetry() { return wf.Status.AnyActiveSuspendNode(), "suspended node" }), time.Second*90). Then(). - ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { outerStepsPodNode := status.Nodes.FindByDisplayName("steps-outer-step1") innerStepsPodNode := status.Nodes.FindByDisplayName("steps-inner-step1") assert.True(t, outerStepsPodNode.FinishedAt.Before(&retryTime)) assert.True(t, retryTime.Before(&innerStepsPodNode.FinishedAt)) + + assert.Equal(t, "Retried", metadata.GetLabels()[common.LabelKeyWorkflowRetryingStatus]) + assert.Equal(t, "true", metadata.GetAnnotations()[common.AnnotationKeyRetryRestartSuccessful]) + assert.Equal(t, "templateName=steps-inner", metadata.GetAnnotations()[common.AnnotationKeyRetryNodeFieldSelector]) + assert.Equal(t, "null", metadata.GetAnnotations()[common.AnnotationKeyRetryParameters]) + }) +} + +func (s *CLISuite) TestWorkflowRetryWithParameters() { + s.Given(). + Workflow("@testdata/retry-parameters.yaml"). + When(). + SubmitWorkflow(). + WaitForWorkflow(fixtures.ToBeFailed). + RunCli([]string{"logs", "@latest", "--follow"}, func(t *testing.T, output string, err error) { + if assert.NoError(t, err) { + assert.Contains(t, output, "hello world") + } + }). + Wait(3*time.Second). + RunCli([]string{"retry", "@latest", "--node-field-selector", "templateName=main", "-p", "message1=hi", "-p", "message2=argo"}, func(t *testing.T, output string, err error) { + if assert.NoError(t, err, output) { + assert.Contains(t, output, "Name:") + assert.Contains(t, output, "Namespace:") + } + }). + Wait(3*time.Second). + WaitForWorkflow(fixtures.ToBeFailed). + RunCli([]string{"logs", "@latest", "--follow"}, func(t *testing.T, output string, err error) { + if assert.NoError(t, err) { + assert.Contains(t, output, "hi argo") + } + }). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, "Retried", metadata.GetLabels()[common.LabelKeyWorkflowRetryingStatus]) + assert.Equal(t, "false", metadata.GetAnnotations()[common.AnnotationKeyRetryRestartSuccessful]) + assert.Equal(t, "templateName=main", metadata.GetAnnotations()[common.AnnotationKeyRetryNodeFieldSelector]) + assert.Equal(t, "[\"message1=hi\",\"message2=argo\"]", metadata.GetAnnotations()[common.AnnotationKeyRetryParameters]) }) } @@ -877,7 +917,11 @@ func (s *CLISuite) TestWorkflowRetryFailedWorkflow() { RunCli([]string{"retry", "-l", "workflows.argoproj.io/workflow=fail-first-pass-second-workflow", "--namespace=argo"}, func(t *testing.T, output string, err error) { assert.NoError(t, err, output) }). - WaitForWorkflow(fixtures.ToBeSucceeded) + WaitForWorkflow(fixtures.ToBeSucceeded). + Then(). + ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) { + assert.Equal(t, "Retried", metadata.GetLabels()[common.LabelKeyWorkflowRetryingStatus]) + }) } func (s *CLISuite) TestWorkflowRetryNestedDag() { diff --git a/test/e2e/testdata/retry-parameters.yaml b/test/e2e/testdata/retry-parameters.yaml new file mode 100644 index 000000000000..73254ede4d10 --- /dev/null +++ b/test/e2e/testdata/retry-parameters.yaml @@ -0,0 +1,18 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: retry-parameters- +spec: + entrypoint: main + arguments: + parameters: + - name: message1 + value: "hello" + - name: message2 + value: "world" + templates: + - name: main + container: + image: argoproj/argosay:v2 + command: [sh, -c] + args: ["echo {{workflow.parameters.message1}} {{workflow.parameters.message2}}; exit 1"] \ No newline at end of file diff --git a/workflow/common/common.go b/workflow/common/common.go index b4f174263eaa..9c67e0b52a95 100644 --- a/workflow/common/common.go +++ b/workflow/common/common.go @@ -59,6 +59,15 @@ const ( // the strategy whose artifacts are being deleted AnnotationKeyArtifactGCStrategy = workflow.WorkflowFullName + "/artifact-gc-strategy" + // AnnotationKeyRetryNodeFieldSelector is the annotation that specifies the node field selector to use when retrying a node + AnnotationKeyRetryNodeFieldSelector = workflow.WorkflowFullName + "/retry-node-field-selector" + + // AnnotationKeyRetryParameters is the annotation that specifies the retry parameters to use when retrying a node + AnnotationKeyRetryParameters = workflow.WorkflowFullName + "/retry-parameters" + + // AnnotationKeyRetryRestartSuccessful is the annotation that specifies if retry succeeded node or not + AnnotationKeyRetryRestartSuccessful = workflow.WorkflowFullName + "/retry-restart-successful" + // LabelKeyControllerInstanceID is the label the controller will carry forward to workflows/pod labels // for the purposes of workflow segregation LabelKeyControllerInstanceID = workflow.WorkflowFullName + "/controller-instanceid" @@ -103,6 +112,12 @@ const ( // LabelKeyCronWorkflowCompleted is a label applied to the cron workflow when the configured stopping condition is achieved LabelKeyCronWorkflowCompleted = workflow.CronWorkflowFullName + "/completed" + // LabelKeyWorkflowRetryingStatus indicates if a workflow needs Retrying or not: + // * `` - does not need retrying ... yet + // * `Pending` - pending retrying + // * `Retrying` - retrying in progress + // * `Retried` - has been retried + LabelKeyWorkflowRetryingStatus = workflow.WorkflowFullName + "/workflow-retrying-status" // ExecutorArtifactBaseDir is the base directory in the init container in which artifacts will be copied to. // Each artifact will be named according to its input name (e.g: /argo/inputs/artifacts/CODE) diff --git a/workflow/controller/controller.go b/workflow/controller/controller.go index 1c8764a081a8..f0933e185ee6 100644 --- a/workflow/controller/controller.go +++ b/workflow/controller/controller.go @@ -590,6 +590,13 @@ func (wfc *WorkflowController) processNextPodCleanupItem(ctx context.Context) bo if err != nil && !apierr.IsNotFound(err) { return err } + case labelBatchDeletePodsCompleted: + // When running here, means that all pods that need to be deleted for the retry operation have been completed. + workflowName := podName + err := wfc.labelWorkflowRetried(ctx, namespace, workflowName) + if err != nil { + return err + } } return nil }() @@ -602,6 +609,37 @@ func (wfc *WorkflowController) processNextPodCleanupItem(ctx context.Context) bo return true } +func (wfc *WorkflowController) labelWorkflowRetried(ctx context.Context, namespace string, workflowName string) error { + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + err := wfc.patchWorkflowLabels(ctx, namespace, workflowName, map[string]string{ + common.LabelKeyWorkflowRetryingStatus: "Retried", + }) + return err + }) + if err != nil { + return err + } + return nil +} + +func (wfc *WorkflowController) patchWorkflowLabels(ctx context.Context, namespace string, workflowName string, labels map[string]string) error { + data, err := json.Marshal(&wfv1.WorkflowTaskResult{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + }) + if err != nil { + return err + } + _, err = wfc.wfclientset.ArgoprojV1alpha1().Workflows(namespace).Patch(ctx, + workflowName, + types.MergePatchType, + data, + metav1.PatchOptions{}, + ) + return err +} + func (wfc *WorkflowController) getPodFromAPI(ctx context.Context, namespace string, podName string) (*apiv1.Pod, error) { pod, err := wfc.kubeclientset.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) if err != nil { diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 0e93b2a9b397..6079409c071f 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -214,6 +214,16 @@ func (woc *wfOperationCtx) operate(ctx context.Context) { return } + // Check whether it is a workflow that requires retry + if woc.shouldRetry() { + woc.log.Info("workflow retried") + err := woc.retryWorkflow(ctx) + if err != nil { + woc.log.WithError(err).Errorf("Retry workflow failed") + } + return + } + if woc.wf.Status.ArtifactRepositoryRef == nil { ref, err := woc.controller.artifactRepositories.Resolve(ctx, woc.execWf.Spec.ArtifactRepositoryRef, woc.wf.Namespace) if err != nil { @@ -3851,6 +3861,50 @@ func (woc *wfOperationCtx) retryStrategy(tmpl *wfv1.Template) *wfv1.RetryStrateg return woc.execWf.Spec.RetryStrategy } +func (woc *wfOperationCtx) shouldRetry() bool { + retryingStatus, ok := woc.wf.Labels[common.LabelKeyWorkflowRetryingStatus] + if !ok || retryingStatus == "Retried" { + return false + } + return true +} + +func (woc *wfOperationCtx) IsRetried() bool { + return woc.wf.GetLabels()[common.LabelKeyWorkflowRetryingStatus] != "Pending" +} + +func (woc *wfOperationCtx) retryWorkflow(ctx context.Context) error { + if woc.IsRetried() { + return nil + } + // Parse the retry parameters from the annotations. + nodeFiledSelector := woc.wf.GetAnnotations()[common.AnnotationKeyRetryNodeFieldSelector] + parametersStr := woc.wf.GetAnnotations()[common.AnnotationKeyRetryParameters] + var parameters []string + err := json.Unmarshal([]byte(parametersStr), ¶meters) + if err != nil { + return fmt.Errorf("fail to unmarshaling parameters: %v", err) + } + restartSuccessful := woc.wf.GetAnnotations()[common.AnnotationKeyRetryRestartSuccessful] == "true" + + // Clean up remaining pods in the workflow + wf, podsToDelete, err := wfutil.FormulateRetryWorkflow(ctx, woc.wf, restartSuccessful, nodeFiledSelector, parameters) + if err != nil { + return fmt.Errorf("fail to FormulateRetryWorkflow") + } + for _, podName := range podsToDelete { + woc.controller.queuePodForCleanup(wf.Namespace, podName, deletePod) + } + + // Add labelBatchDeletePodsCompleted to the queue to help determine whether the pod has been cleaned up. + woc.controller.queuePodForCleanup(wf.Namespace, wf.Name, labelBatchDeletePodsCompleted) + + woc.wf = wf + woc.wf.ObjectMeta.Labels[common.LabelKeyWorkflowRetryingStatus] = "Retrying" + woc.updated = true + return nil +} + func (woc *wfOperationCtx) setExecWorkflow(ctx context.Context) error { if woc.wf.Spec.WorkflowTemplateRef != nil { // not-woc-misuse err := woc.setStoredWfSpec() diff --git a/workflow/controller/pod_cleanup_key.go b/workflow/controller/pod_cleanup_key.go index 1c050c94323c..b4d81af8af74 100644 --- a/workflow/controller/pod_cleanup_key.go +++ b/workflow/controller/pod_cleanup_key.go @@ -15,10 +15,11 @@ type ( ) const ( - deletePod podCleanupAction = "deletePod" - labelPodCompleted podCleanupAction = "labelPodCompleted" - terminateContainers podCleanupAction = "terminateContainers" - killContainers podCleanupAction = "killContainers" + deletePod podCleanupAction = "deletePod" + labelPodCompleted podCleanupAction = "labelPodCompleted" + terminateContainers podCleanupAction = "terminateContainers" + killContainers podCleanupAction = "killContainers" + labelBatchDeletePodsCompleted podCleanupAction = "labelBatchDeletePodsCompleted" ) func newPodCleanupKey(namespace string, podName string, action podCleanupAction) podCleanupKey { diff --git a/workflow/util/util.go b/workflow/util/util.go index 2f69ef282d99..70b42a52d8d3 100644 --- a/workflow/util/util.go +++ b/workflow/util/util.go @@ -827,22 +827,42 @@ func resetConnectedParentGroupNodes(oldWF *wfv1.Workflow, newWF *wfv1.Workflow, return newWF, resetParentGroupNodes } -// FormulateRetryWorkflow formulates a previous workflow to be retried, deleting all failed steps as well as the onExit node (and children) -func FormulateRetryWorkflow(ctx context.Context, wf *wfv1.Workflow, restartSuccessful bool, nodeFieldSelector string, parameters []string) (*wfv1.Workflow, []string, error) { +// MarkWorkflowForRetry mark a workflow's spec.retry and controller will retry it. +func MarkWorkflowForRetry(ctx context.Context, wf *wfv1.Workflow, restartSuccessful bool, nodeFieldSelector string, parameters []string) (*wfv1.Workflow, error) { switch wf.Status.Phase { case wfv1.WorkflowFailed, wfv1.WorkflowError: case wfv1.WorkflowSucceeded: if !(restartSuccessful && len(nodeFieldSelector) > 0) { - return nil, nil, errors.Errorf(errors.CodeBadRequest, "To retry a succeeded workflow, set the options restartSuccessful and nodeFieldSelector") + return nil, errors.Errorf(errors.CodeBadRequest, "To retry a succeeded workflow, set the options restartSuccessful and nodeFieldSelector") } default: - return nil, nil, errors.Errorf(errors.CodeBadRequest, "Cannot retry a workflow in phase %s", wf.Status.Phase) + return nil, errors.Errorf(errors.CodeBadRequest, "Cannot retry a workflow in phase %s", wf.Status.Phase) + } + + newWF := wf.DeepCopy() + delete(newWF.Labels, common.LabelKeyCompleted) + delete(newWF.Labels, common.LabelKeyWorkflowArchivingStatus) + + // Initialize Retry parameters + newWF.ObjectMeta.Labels[common.LabelKeyWorkflowRetryingStatus] = "Pending" + if newWF.ObjectMeta.Annotations == nil { + newWF.ObjectMeta.Annotations = make(map[string]string) + } + newWF.ObjectMeta.Annotations[common.AnnotationKeyRetryNodeFieldSelector] = nodeFieldSelector + parametersStr, err := json.Marshal(parameters) + if err != nil { + return nil, errors.Errorf(errors.CodeBadRequest, "Cannot marshalling retry parameters to json: %s", parameters) } + newWF.ObjectMeta.Annotations[common.AnnotationKeyRetryParameters] = string(parametersStr) + newWF.ObjectMeta.Annotations[common.AnnotationKeyRetryRestartSuccessful] = strconv.FormatBool(restartSuccessful) + return newWF, nil +} +// FormulateRetryWorkflow formulates a previous workflow to be retried, deleting all failed steps as well as the onExit node (and children) +func FormulateRetryWorkflow(ctx context.Context, wf *wfv1.Workflow, restartSuccessful bool, nodeFieldSelector string, parameters []string) (*wfv1.Workflow, []string, error) { newWF := wf.DeepCopy() // Delete/reset fields which indicate workflow completed - delete(newWF.Labels, common.LabelKeyCompleted) delete(newWF.Labels, common.LabelKeyWorkflowArchivingStatus) newWF.Status.Conditions.UpsertCondition(wfv1.Condition{Status: metav1.ConditionFalse, Type: wfv1.ConditionTypeCompleted}) newWF.ObjectMeta.Labels[common.LabelKeyPhase] = string(wfv1.NodeRunning) diff --git a/workflow/util/util_test.go b/workflow/util/util_test.go index 0328b37d04ab..cca2fff9491f 100644 --- a/workflow/util/util_test.go +++ b/workflow/util/util_test.go @@ -1091,7 +1091,6 @@ func TestFormulateRetryWorkflow(t *testing.T) { assert.Equal(t, wfv1.WorkflowRunning, wf.Status.Phase) assert.Equal(t, metav1.Time{}, wf.Status.FinishedAt) assert.True(t, wf.Status.StartedAt.After(createdTime.Time)) - assert.NotContains(t, wf.Labels, common.LabelKeyCompleted) assert.NotContains(t, wf.Labels, common.LabelKeyWorkflowArchivingStatus) for _, node := range wf.Status.Nodes { switch node.Phase { @@ -1297,7 +1296,7 @@ func TestFormulateRetryWorkflow(t *testing.T) { _, err := wfClient.Create(ctx, wf, metav1.CreateOptions{}) assert.NoError(t, err) _, _, err = FormulateRetryWorkflow(ctx, wf, false, "", nil) - assert.Error(t, err) + assert.Nil(t, err) }) t.Run("Fail on pending workflow", func(t *testing.T) { @@ -1314,7 +1313,7 @@ func TestFormulateRetryWorkflow(t *testing.T) { _, err := wfClient.Create(ctx, wf, metav1.CreateOptions{}) assert.NoError(t, err) _, _, err = FormulateRetryWorkflow(ctx, wf, false, "", nil) - assert.Error(t, err) + assert.Nil(t, err) }) t.Run("Fail on successful workflow without restartSuccessful and nodeFieldSelector", func(t *testing.T) { @@ -1334,7 +1333,7 @@ func TestFormulateRetryWorkflow(t *testing.T) { _, err := wfClient.Create(ctx, wf, metav1.CreateOptions{}) assert.NoError(t, err) _, _, err = FormulateRetryWorkflow(ctx, wf, false, "", nil) - assert.Error(t, err) + assert.Nil(t, err) }) t.Run("Retry successful workflow with restartSuccessful and nodeFieldSelector", func(t *testing.T) {