diff --git a/test/crd.go b/test/crd.go index 275b0dcf153..b2e33d964d0 100644 --- a/test/crd.go +++ b/test/crd.go @@ -23,10 +23,9 @@ import ( "bytes" "fmt" "io" - "strings" - "testing" buildv1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" + "github.com/knative/pkg/test/logging" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -53,7 +52,7 @@ const ( buildOutput = "Build successful" ) -func getHelloWorldValidationPod(namespace string) *corev1.Pod { +func getHelloWorldValidationPod(namespace, volumeClaimName string) *corev1.Pod { return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, @@ -80,7 +79,7 @@ func getHelloWorldValidationPod(namespace string) *corev1.Pod { Name: "scratch", VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "scratch", + ClaimName: volumeClaimName, }, }, }, @@ -121,27 +120,32 @@ func getHelloWorldTask(namespace string, args []string) *v1alpha1.Task { Name: hwContainerName, Image: "busybox", Args: args, - VolumeMounts: []corev1.VolumeMount{ - corev1.VolumeMount{ - Name: "scratch", - MountPath: logPath, - }, - }, }, }, - Volumes: []corev1.Volume{ - corev1.Volume{ - Name: "scratch", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "scratch", - }, - }, - }, + }, + }, + } +} + +func getHelloWorldTaskWithVolume(namespace string, args []string) *v1alpha1.Task { + t := getHelloWorldTask(namespace, args) + t.Spec.BuildSpec.Steps[0].VolumeMounts = []corev1.VolumeMount{ + corev1.VolumeMount{ + Name: "scratch", + MountPath: logPath, + }, + } + t.Spec.BuildSpec.Volumes = []corev1.Volume{ + corev1.Volume{ + Name: "scratch", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: hwVolumeName, }, }, }, } + return t } func getHelloWorldTaskRun(namespace string) *v1alpha1.TaskRun { @@ -217,15 +221,15 @@ func getHelloWorldPipelineRun(namespace string) *v1alpha1.PipelineRun { } } -func VerifyBuildOutput(t *testing.T, c *clients, namespace string, testStr string) { +func getBuildOutputFromVolume(logger *logging.BaseLogger, c *clients, namespace, testStr string) (string, error) { // Create Validation Pod pods := c.KubeClient.Kube.CoreV1().Pods(namespace) - if _, err := pods.Create(getHelloWorldValidationPod(namespace)); err != nil { - t.Fatalf("Failed to create TaskRun `%s`: %s", hwTaskRunName, err) + if _, err := pods.Create(getHelloWorldValidationPod(namespace, hwVolumeName)); err != nil { + return "", fmt.Errorf("failed to create Volume `%s`: %s", hwVolumeName, err) } - // Verify status of Pod (wait for it) + logger.Infof("Waiting for pod with test volume %s to come up so we can read logs from it", hwVolumeName) if err := WaitForPodState(c, hwValidationPodName, namespace, func(p *corev1.Pod) (bool, error) { // the "Running" status is used as "Succeeded" caused issues as the pod succeeds and restarts quickly // there might be a race condition here and possibly a better way of handling this, perhaps using a Job or different state validation @@ -234,20 +238,18 @@ func VerifyBuildOutput(t *testing.T, c *clients, namespace string, testStr strin } return false, nil }, "ValidationPodCompleted"); err != nil { - t.Errorf("Error waiting for Pod %s to finish: %s", hwValidationPodName, err) + return "", fmt.Errorf("error waiting for Pod %s to finish: %s", hwValidationPodName, err) } // Get validation pod logs and verify that the build executed a container w/ desired output req := pods.GetLogs(hwValidationPodName, &corev1.PodLogOptions{}) readCloser, err := req.Stream() if err != nil { - t.Fatalf("Failed to open stream to read: %v", err) + return "", fmt.Errorf("failed to open stream to read: %v", err) } defer readCloser.Close() var buf bytes.Buffer out := bufio.NewWriter(&buf) _, err = io.Copy(out, readCloser) - if !strings.Contains(buf.String(), testStr) { - t.Fatalf("Expected output %s from pod %s but got %s", buildOutput, hwValidationPodName, buf.String()) - } + return buf.String(), nil } diff --git a/test/e2e-tests.sh b/test/e2e-tests.sh index 292ed44b050..7d4eaef1920 100755 --- a/test/e2e-tests.sh +++ b/test/e2e-tests.sh @@ -45,11 +45,6 @@ function teardown() { # Called by `fail_test` (provided by `e2e-tests.sh`) to dump info on test failure function dump_extra_cluster_state() { - for crd in pipelines pipelineruns tasks taskruns resources pipelineparams builds - do - echo ">>> $crd:" - kubectl get $crd -o yaml --all-namespaces - done echo ">>> Pipeline controller log:" kubectl -n knative-build-pipeline logs $(get_app_pod build-pipeline-controller knative-build-pipeline) echo ">>> Pipeline webhook log:" diff --git a/test/pipelinerun_test.go b/test/pipelinerun_test.go index 36081f312eb..2e197780350 100644 --- a/test/pipelinerun_test.go +++ b/test/pipelinerun_test.go @@ -19,6 +19,7 @@ limitations under the License. package test import ( + "fmt" "strings" "testing" @@ -55,8 +56,12 @@ func TestPipelineRun(t *testing.T) { logger.Infof("Waiting for PipelineRun %s in namespace %s to complete", hwPipelineRunName, namespace) if err := WaitForPipelineRunState(c, hwPipelineRunName, func(tr *v1alpha1.PipelineRun) (bool, error) { c := tr.Status.GetCondition(duckv1alpha1.ConditionSucceeded) - if c != nil && c.Status == corev1.ConditionTrue { - return true, nil + if c != nil { + if c.Status == corev1.ConditionTrue { + return true, nil + } else if c.Status == corev1.ConditionFalse { + return true, fmt.Errorf("pipeline run %s failed!", hwPipelineRunName) + } } return false, nil }, "PipelineRunSuccess"); err != nil { @@ -78,5 +83,4 @@ func TestPipelineRun(t *testing.T) { } } } - VerifyBuildOutput(t, c, namespace, taskOutput) } diff --git a/test/taskrun_test.go b/test/taskrun_test.go index 0b8b4a94d70..2e738183151 100644 --- a/test/taskrun_test.go +++ b/test/taskrun_test.go @@ -16,10 +16,7 @@ limitations under the License. package test import ( - "bufio" - "bytes" "fmt" - "io" "strings" "testing" @@ -27,7 +24,6 @@ import ( knativetest "github.com/knative/pkg/test" "github.com/knative/pkg/test/logging" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1" ) @@ -41,16 +37,15 @@ func TestTaskRun(t *testing.T) { knativetest.CleanupOnInterrupt(func() { tearDown(logger, c.KubeClient, namespace) }, logger) defer tearDown(logger, c.KubeClient, namespace) - // Create Volume + logger.Infof("Creating volume %s to collect log output", hwVolumeName) if _, err := c.KubeClient.Kube.CoreV1().PersistentVolumeClaims(namespace).Create(getHelloWorldVolumeClaim(namespace)); err != nil { t.Fatalf("Failed to create Volume `%s`: %s", hwTaskName, err) } - // Create Task - if _, err := c.TaskClient.Create(getHelloWorldTask(namespace, []string{"/bin/sh", "-c", fmt.Sprintf("echo %s > %s/%s", taskOutput, logPath, logFile)})); err != nil { + logger.Infof("Creating Task and TaskRun in namespace %s", namespace) + if _, err := c.TaskClient.Create(getHelloWorldTaskWithVolume(namespace, []string{"/bin/sh", "-c", fmt.Sprintf("echo %s > %s/%s", taskOutput, logPath, logFile)})); err != nil { t.Fatalf("Failed to create Task `%s`: %s", hwTaskName, err) } - if _, err := c.TaskRunClient.Create(getHelloWorldTaskRun(namespace)); err != nil { t.Fatalf("Failed to create TaskRun `%s`: %s", hwTaskRunName, err) } @@ -58,39 +53,24 @@ func TestTaskRun(t *testing.T) { logger.Infof("Waiting for TaskRun %s in namespace %s to complete", hwTaskRunName, namespace) if err := WaitForTaskRunState(c, hwTaskRunName, func(tr *v1alpha1.TaskRun) (bool, error) { c := tr.Status.GetCondition(duckv1alpha1.ConditionSucceeded) - if c != nil && c.Status == corev1.ConditionTrue { - return true, nil + if c != nil { + if c.Status == corev1.ConditionTrue { + return true, nil + } else if c.Status == corev1.ConditionFalse { + return true, fmt.Errorf("pipeline run %s failed!", hwPipelineRunName) + } } return false, nil }, "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun %s to finish: %s", hwTaskRunName, err) } - // The Build created by the TaskRun will have the same name - b, err := c.BuildClient.Get(hwTaskRunName, metav1.GetOptions{}) - if err != nil { - t.Errorf("Expected there to be a Build with the same name as TaskRun %s but got error: %s", hwTaskRunName, err) - } - cluster := b.Status.Cluster - if cluster == nil || cluster.PodName == "" { - t.Fatalf("Expected build status to have a podname but it didn't!") - } - podName := cluster.PodName - pods := c.KubeClient.Kube.CoreV1().Pods(namespace) - - req := pods.GetLogs(podName, &corev1.PodLogOptions{}) - readCloser, err := req.Stream() + logger.Infof("Verifying TaskRun %s output in volume %s", hwTaskRunName, hwVolumeName) + output, err := getBuildOutputFromVolume(logger, c, namespace, taskOutput) if err != nil { - t.Fatalf("Failed to open stream to read: %v", err) + t.Fatalf("Unable to get build output from volume %s: %s", hwVolumeName, err) } - defer readCloser.Close() - var buf bytes.Buffer - out := bufio.NewWriter(&buf) - _, err = io.Copy(out, readCloser) - if !strings.Contains(buf.String(), buildOutput) { - t.Fatalf("Expected output %s from pod %s but got %s", buildOutput, podName, buf.String()) + if !strings.Contains(output, taskOutput) { + t.Fatalf("Expected output %s from pod %s but got %s", buildOutput, hwValidationPodName, output) } - - // Verify that the init containers Build ran had 'taskOutput' written - VerifyBuildOutput(t, c, namespace, taskOutput) }