Skip to content

Commit

Permalink
Only check build output in one integration test
Browse files Browse the repository at this point in the history
I think it's reasonable for only one of our eventually many integration
tests to verify the build output, especially when it involves adding a
volume mount to the pile of things that could go wrong in the test.

Refactored the test a bit, so we don't assert inside the test, and we
output some logs before polling.

Removed dumping of CRDs in test script b/c each test runs in its own
namespace and cleans up after itself, so there is never anything to dump
(see #145).

Updated condition checking so that if the Run fails, we bail immediately
instead of continuing to hope it will succeed.
  • Loading branch information
bobcatfish committed Oct 12, 2018
1 parent 0e2ad98 commit 87d6926
Show file tree
Hide file tree
Showing 4 changed files with 51 additions and 70 deletions.
58 changes: 30 additions & 28 deletions test/crd.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,9 @@ import (
"bytes"
"fmt"
"io"
"strings"
"testing"

buildv1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1"
"github.com/knative/pkg/test/logging"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand All @@ -53,7 +52,7 @@ const (
buildOutput = "Build successful"
)

func getHelloWorldValidationPod(namespace string) *corev1.Pod {
func getHelloWorldValidationPod(namespace, volumeClaimName string) *corev1.Pod {
return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Expand All @@ -80,7 +79,7 @@ func getHelloWorldValidationPod(namespace string) *corev1.Pod {
Name: "scratch",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "scratch",
ClaimName: volumeClaimName,
},
},
},
Expand Down Expand Up @@ -121,27 +120,32 @@ func getHelloWorldTask(namespace string, args []string) *v1alpha1.Task {
Name: hwContainerName,
Image: "busybox",
Args: args,
VolumeMounts: []corev1.VolumeMount{
corev1.VolumeMount{
Name: "scratch",
MountPath: logPath,
},
},
},
},
Volumes: []corev1.Volume{
corev1.Volume{
Name: "scratch",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "scratch",
},
},
},
},
},
}
}

func getHelloWorldTaskWithVolume(namespace string, args []string) *v1alpha1.Task {
t := getHelloWorldTask(namespace, args)
t.Spec.BuildSpec.Steps[0].VolumeMounts = []corev1.VolumeMount{
corev1.VolumeMount{
Name: "scratch",
MountPath: logPath,
},
}
t.Spec.BuildSpec.Volumes = []corev1.Volume{
corev1.Volume{
Name: "scratch",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: hwVolumeName,
},
},
},
}
return t
}

func getHelloWorldTaskRun(namespace string) *v1alpha1.TaskRun {
Expand Down Expand Up @@ -217,15 +221,15 @@ func getHelloWorldPipelineRun(namespace string) *v1alpha1.PipelineRun {
}
}

func VerifyBuildOutput(t *testing.T, c *clients, namespace string, testStr string) {
func getBuildOutputFromVolume(logger *logging.BaseLogger, c *clients, namespace, testStr string) (string, error) {
// Create Validation Pod
pods := c.KubeClient.Kube.CoreV1().Pods(namespace)

if _, err := pods.Create(getHelloWorldValidationPod(namespace)); err != nil {
t.Fatalf("Failed to create TaskRun `%s`: %s", hwTaskRunName, err)
if _, err := pods.Create(getHelloWorldValidationPod(namespace, hwVolumeName)); err != nil {
return "", fmt.Errorf("failed to create Volume `%s`: %s", hwVolumeName, err)
}

// Verify status of Pod (wait for it)
logger.Infof("Waiting for pod with test volume %s to come up so we can read logs from it", hwVolumeName)
if err := WaitForPodState(c, hwValidationPodName, namespace, func(p *corev1.Pod) (bool, error) {
// the "Running" status is used as "Succeeded" caused issues as the pod succeeds and restarts quickly
// there might be a race condition here and possibly a better way of handling this, perhaps using a Job or different state validation
Expand All @@ -234,20 +238,18 @@ func VerifyBuildOutput(t *testing.T, c *clients, namespace string, testStr strin
}
return false, nil
}, "ValidationPodCompleted"); err != nil {
t.Errorf("Error waiting for Pod %s to finish: %s", hwValidationPodName, err)
return "", fmt.Errorf("error waiting for Pod %s to finish: %s", hwValidationPodName, err)
}

// Get validation pod logs and verify that the build executed a container w/ desired output
req := pods.GetLogs(hwValidationPodName, &corev1.PodLogOptions{})
readCloser, err := req.Stream()
if err != nil {
t.Fatalf("Failed to open stream to read: %v", err)
return "", fmt.Errorf("failed to open stream to read: %v", err)
}
defer readCloser.Close()
var buf bytes.Buffer
out := bufio.NewWriter(&buf)
_, err = io.Copy(out, readCloser)
if !strings.Contains(buf.String(), testStr) {
t.Fatalf("Expected output %s from pod %s but got %s", buildOutput, hwValidationPodName, buf.String())
}
return buf.String(), nil
}
5 changes: 0 additions & 5 deletions test/e2e-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,6 @@ function teardown() {

# Called by `fail_test` (provided by `e2e-tests.sh`) to dump info on test failure
function dump_extra_cluster_state() {
for crd in pipelines pipelineruns tasks taskruns resources pipelineparams builds
do
echo ">>> $crd:"
kubectl get $crd -o yaml --all-namespaces
done
echo ">>> Pipeline controller log:"
kubectl -n knative-build-pipeline logs $(get_app_pod build-pipeline-controller knative-build-pipeline)
echo ">>> Pipeline webhook log:"
Expand Down
10 changes: 7 additions & 3 deletions test/pipelinerun_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ limitations under the License.
package test

import (
"fmt"
"strings"
"testing"

Expand Down Expand Up @@ -55,8 +56,12 @@ func TestPipelineRun(t *testing.T) {
logger.Infof("Waiting for PipelineRun %s in namespace %s to complete", hwPipelineRunName, namespace)
if err := WaitForPipelineRunState(c, hwPipelineRunName, func(tr *v1alpha1.PipelineRun) (bool, error) {
c := tr.Status.GetCondition(duckv1alpha1.ConditionSucceeded)
if c != nil && c.Status == corev1.ConditionTrue {
return true, nil
if c != nil {
if c.Status == corev1.ConditionTrue {
return true, nil
} else if c.Status == corev1.ConditionFalse {
return true, fmt.Errorf("pipeline run %s failed!", hwPipelineRunName)
}
}
return false, nil
}, "PipelineRunSuccess"); err != nil {
Expand All @@ -78,5 +83,4 @@ func TestPipelineRun(t *testing.T) {
}
}
}
VerifyBuildOutput(t, c, namespace, taskOutput)
}
48 changes: 14 additions & 34 deletions test/taskrun_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,18 +16,14 @@ limitations under the License.
package test

import (
"bufio"
"bytes"
"fmt"
"io"
"strings"
"testing"

duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
knativetest "github.com/knative/pkg/test"
"github.com/knative/pkg/test/logging"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1"
)
Expand All @@ -41,56 +37,40 @@ func TestTaskRun(t *testing.T) {
knativetest.CleanupOnInterrupt(func() { tearDown(logger, c.KubeClient, namespace) }, logger)
defer tearDown(logger, c.KubeClient, namespace)

// Create Volume
logger.Infof("Creating volume %s to collect log output", hwVolumeName)
if _, err := c.KubeClient.Kube.CoreV1().PersistentVolumeClaims(namespace).Create(getHelloWorldVolumeClaim(namespace)); err != nil {
t.Fatalf("Failed to create Volume `%s`: %s", hwTaskName, err)
}

// Create Task
if _, err := c.TaskClient.Create(getHelloWorldTask(namespace, []string{"/bin/sh", "-c", fmt.Sprintf("echo %s > %s/%s", taskOutput, logPath, logFile)})); err != nil {
logger.Infof("Creating Task and TaskRun in namespace %s", namespace)
if _, err := c.TaskClient.Create(getHelloWorldTaskWithVolume(namespace, []string{"/bin/sh", "-c", fmt.Sprintf("echo %s > %s/%s", taskOutput, logPath, logFile)})); err != nil {
t.Fatalf("Failed to create Task `%s`: %s", hwTaskName, err)
}

if _, err := c.TaskRunClient.Create(getHelloWorldTaskRun(namespace)); err != nil {
t.Fatalf("Failed to create TaskRun `%s`: %s", hwTaskRunName, err)
}

logger.Infof("Waiting for TaskRun %s in namespace %s to complete", hwTaskRunName, namespace)
if err := WaitForTaskRunState(c, hwTaskRunName, func(tr *v1alpha1.TaskRun) (bool, error) {
c := tr.Status.GetCondition(duckv1alpha1.ConditionSucceeded)
if c != nil && c.Status == corev1.ConditionTrue {
return true, nil
if c != nil {
if c.Status == corev1.ConditionTrue {
return true, nil
} else if c.Status == corev1.ConditionFalse {
return true, fmt.Errorf("pipeline run %s failed!", hwPipelineRunName)
}
}
return false, nil
}, "TaskRunSuccess"); err != nil {
t.Errorf("Error waiting for TaskRun %s to finish: %s", hwTaskRunName, err)
}

// The Build created by the TaskRun will have the same name
b, err := c.BuildClient.Get(hwTaskRunName, metav1.GetOptions{})
if err != nil {
t.Errorf("Expected there to be a Build with the same name as TaskRun %s but got error: %s", hwTaskRunName, err)
}
cluster := b.Status.Cluster
if cluster == nil || cluster.PodName == "" {
t.Fatalf("Expected build status to have a podname but it didn't!")
}
podName := cluster.PodName
pods := c.KubeClient.Kube.CoreV1().Pods(namespace)

req := pods.GetLogs(podName, &corev1.PodLogOptions{})
readCloser, err := req.Stream()
logger.Infof("Verifying TaskRun %s output in volume %s", hwTaskRunName, hwVolumeName)
output, err := getBuildOutputFromVolume(logger, c, namespace, taskOutput)
if err != nil {
t.Fatalf("Failed to open stream to read: %v", err)
t.Fatalf("Unable to get build output from volume %s: %s", hwVolumeName, err)
}
defer readCloser.Close()
var buf bytes.Buffer
out := bufio.NewWriter(&buf)
_, err = io.Copy(out, readCloser)
if !strings.Contains(buf.String(), buildOutput) {
t.Fatalf("Expected output %s from pod %s but got %s", buildOutput, podName, buf.String())
if !strings.Contains(output, taskOutput) {
t.Fatalf("Expected output %s from pod %s but got %s", buildOutput, hwValidationPodName, output)
}

// Verify that the init containers Build ran had 'taskOutput' written
VerifyBuildOutput(t, c, namespace, taskOutput)
}

0 comments on commit 87d6926

Please sign in to comment.