Skip to content

Commit

Permalink
Add additional checks to E2E tests
Browse files Browse the repository at this point in the history
Signed-off-by: Marko Mudrinić <mudrinic.mare@gmail.com>
  • Loading branch information
xmudrii committed Mar 28, 2019
1 parent e8851fe commit c82e631
Show file tree
Hide file tree
Showing 3 changed files with 159 additions and 124 deletions.
69 changes: 44 additions & 25 deletions test/e2e/conformance_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,45 +21,52 @@ package e2e
import (
"fmt"
"testing"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

func TestClusterConformance(t *testing.T) {
t.Parallel()

testcases := []struct {
name string
provider string
kubernetesVersion string
scenario string
configFilePath string
name string
provider string
kubernetesVersion string
scenario string
configFilePath string
expectedNumberOfNodes int
}{
{
name: "verify k8s 1.13.5 cluster deployment on AWS",
provider: AWS,
kubernetesVersion: "v1.13.5",
scenario: NodeConformance,
configFilePath: "../../test/e2e/testdata/config_aws_1.13.5.yaml",
name: "verify k8s 1.13.5 cluster deployment on AWS",
provider: AWS,
kubernetesVersion: "v1.13.5",
scenario: NodeConformance,
configFilePath: "../../test/e2e/testdata/config_aws_1.13.5.yaml",
expectedNumberOfNodes: 6, // 3 control planes + 3 workers
},
{
name: "verify k8s 1.14.0 cluster deployment on AWS",
provider: AWS,
kubernetesVersion: "v1.14.0",
scenario: NodeConformance,
configFilePath: "../../test/e2e/testdata/config_aws_1.14.0.yaml",
name: "verify k8s 1.14.0 cluster deployment on AWS",
provider: AWS,
kubernetesVersion: "v1.14.0",
scenario: NodeConformance,
configFilePath: "../../test/e2e/testdata/config_aws_1.14.0.yaml",
expectedNumberOfNodes: 6, // 3 control planes + 3 workers
},
{
name: "verify k8s 1.13.5 cluster deployment on DO",
provider: DigitalOcean,
kubernetesVersion: "v1.13.5",
scenario: NodeConformance,
configFilePath: "../../test/e2e/testdata/config_do_1.13.5.yaml",
name: "verify k8s 1.13.5 cluster deployment on DO",
provider: DigitalOcean,
kubernetesVersion: "v1.13.5",
scenario: NodeConformance,
configFilePath: "../../test/e2e/testdata/config_do_1.13.5.yaml",
expectedNumberOfNodes: 6, // 3 control planes + 3 workers
},
{
name: "verify k8s 1.14.0 cluster deployment on DO",
provider: DigitalOcean,
kubernetesVersion: "v1.14.0",
scenario: NodeConformance,
configFilePath: "../../test/e2e/testdata/config_do_1.14.0.yaml",
name: "verify k8s 1.14.0 cluster deployment on DO",
provider: DigitalOcean,
kubernetesVersion: "v1.14.0",
scenario: NodeConformance,
configFilePath: "../../test/e2e/testdata/config_do_1.14.0.yaml",
expectedNumberOfNodes: 6, // 3 control planes + 3 workers
},
}

Expand Down Expand Up @@ -114,6 +121,18 @@ func TestClusterConformance(t *testing.T) {
t.Fatalf("creating kubeconfig failed: %v", err)
}

t.Log("waiting for nodes to become ready")
err = waitForNodesReady(client, tc.expectedNumberOfNodes)
if err != nil {
t.Fatalf("nodes are not ready: %v", err)
}

t.Log("verifying cluster version")
err = verifyVersion(client, metav1.NamespaceSystem, tc.targetVersion)
if err != nil {
t.Fatalf("version mismatch: %v", err)
}

t.Log("run e2e tests")
err = clusterVerifier.Verify(tc.scenario)
if err != nil {
Expand Down
89 changes: 89 additions & 0 deletions test/e2e/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,18 @@ limitations under the License.
package e2e

import (
"context"
"flag"
"fmt"
"strings"
"testing"
"time"

"github.com/Masterminds/semver"
"github.com/pkg/errors"

"k8s.io/apimachinery/pkg/util/wait"
dynclient "sigs.k8s.io/controller-runtime/pkg/client"
)

// testRunIdentifier aka. the build number, a unique identifier for the test run.
Expand Down Expand Up @@ -52,3 +62,82 @@ func setupTearDown(p Provisioner, k Kubeone) func(t *testing.T) {
}
}
}

func waitForNodesReady(client dynclient.Client, expectedNumberOfNodes int) error {
return wait.Poll(5*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
nodes := corev1.NodeList{}
nodeListOpts := dynclient.ListOptions{}

err := client.List(context.Background(), &nodeListOpts, &nodes)
if err != nil {
return false, errors.Wrap(err, "unable to list nodes")
}

if len(nodes.Items) != expectedNumberOfNodes {
return false, nil
}

for _, n := range nodes.Items {
for _, c := range n.Status.Conditions {
if c.Type == corev1.NodeReady && c.Status != corev1.ConditionTrue {
return false, nil
}
}
}
return true, nil
})
}

func verifyVersion(client dynclient.Client, namespace string, targetVersion string) error {
reqVer, err := semver.NewVersion(targetVersion)
if err != nil {
return errors.Wrap(err, "desired version is invalid")
}

nodes := corev1.NodeList{}
nodeListOpts := dynclient.ListOptions{}
_ = nodeListOpts.SetLabelSelector(fmt.Sprintf("%s=%s", labelControlPlaneNode, ""))
err = client.List(context.Background(), &nodeListOpts, &nodes)
if err != nil {
return errors.Wrap(err, "failed to list nodes")
}

// Kubelet version check
for _, n := range nodes.Items {
kubeletVer, err := semver.NewVersion(n.Status.NodeInfo.KubeletVersion)
if err != nil {
return err
}
if reqVer.Compare(kubeletVer) != 0 {
return errors.Errorf("kubelet version mismatch: expected %v, got %v", reqVer.String(), kubeletVer.String())
}
}

apiserverPods := corev1.PodList{}
podsListOpts := dynclient.ListOptions{Namespace: namespace}
_ = podsListOpts.SetLabelSelector("component=kube-apiserver")
err = client.List(context.Background(), &podsListOpts, &apiserverPods)
if err != nil {
return errors.Wrap(err, "unable to list apiserver pods")
}

for _, p := range apiserverPods.Items {
apiserverVer, err := parseContainerImageVersion(p.Spec.Containers[0].Image)
if err != nil {
return errors.Wrap(err, "unable to parse apiserver version")
}
if reqVer.Compare(apiserverVer) != 0 {
return errors.Errorf("apiserver version mismatch: expected %v, got %v", reqVer.String(), apiserverVer.String())
}
}

return nil
}

func parseContainerImageVersion(image string) (*semver.Version, error) {
ver := strings.Split(image, ":")
if len(ver) != 2 {
return nil, errors.Errorf("invalid container image format: %s", image)
}
return semver.NewVersion(ver[1])
}
125 changes: 26 additions & 99 deletions test/e2e/upgrade_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,31 +43,34 @@ func TestClusterUpgrade(t *testing.T) {
t.Parallel()

testcases := []struct {
name string
provider string
initialVersion string
targetVersion string
initialConfigPath string
targetConfigPath string
scenario string
name string
provider string
initialVersion string
targetVersion string
initialConfigPath string
targetConfigPath string
epxectedNumberOfNodes int
scenario string
}{
{
name: "upgrade k8s 1.13.5 cluster to 1.14.0 on AWS",
provider: AWS,
initialVersion: "v1.13.5",
targetVersion: "v1.14.0",
initialConfigPath: "../../test/e2e/testdata/config_aws_1.13.5.yaml",
targetConfigPath: "../../test/e2e/testdata/config_aws_1.14.0.yaml",
scenario: NodeConformance,
name: "upgrade k8s 1.13.5 cluster to 1.14.0 on AWS",
provider: AWS,
initialVersion: "v1.13.5",
targetVersion: "v1.14.0",
initialConfigPath: "../../test/e2e/testdata/config_aws_1.13.5.yaml",
targetConfigPath: "../../test/e2e/testdata/config_aws_1.14.0.yaml",
expectedNumberOfNodes: 6, // 3 control planes + 3 workers
scenario: NodeConformance,
},
{
name: "upgrade k8s 1.13.5 cluster to 1.14.0 on DO",
provider: DigitalOcean,
initialVersion: "v1.13.5",
targetVersion: "v1.14.0",
initialConfigPath: "../../test/e2e/testdata/config_do_1.13.5.yaml",
targetConfigPath: "../../test/e2e/testdata/config_do_1.14.0.yaml",
scenario: NodeConformance,
name: "upgrade k8s 1.13.5 cluster to 1.14.0 on DO",
provider: DigitalOcean,
initialVersion: "v1.13.5",
targetVersion: "v1.14.0",
initialConfigPath: "../../test/e2e/testdata/config_do_1.13.5.yaml",
targetConfigPath: "../../test/e2e/testdata/config_do_1.14.0.yaml",
expectedNumberOfNodes: 6, // 3 control planes + 3 workers
scenario: NodeConformance,
},
}

Expand Down Expand Up @@ -132,7 +135,7 @@ func TestClusterUpgrade(t *testing.T) {
}

t.Log("waiting for nodes to become ready")
err = waitForNodesReady(client)
err = waitForNodesReady(client, tc.expectedNumberOfNodes)
if err != nil {
t.Fatalf("nodes are not ready: %v", err)
}
Expand All @@ -156,7 +159,7 @@ func TestClusterUpgrade(t *testing.T) {
}

t.Log("waiting for nodes to become ready")
err = waitForNodesReady(client)
err = waitForNodesReady(client, tc.expectedNumberOfNodes)
if err != nil {
t.Fatalf("nodes are not ready: %v", err)
}
Expand All @@ -182,28 +185,6 @@ func TestClusterUpgrade(t *testing.T) {
}
}

func waitForNodesReady(client dynclient.Client) error {
return wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
nodes := corev1.NodeList{}
nodeListOpts := dynclient.ListOptions{}
nodeListOpts.SetLabelSelector(fmt.Sprintf("%s=%s", labelControlPlaneNode, ""))

err := client.List(context.Background(), &nodeListOpts, &nodes)
if err != nil {
return false, errors.Wrap(err, "unable to list nodes")
}

for _, n := range nodes.Items {
for _, c := range n.Status.Conditions {
if c.Type == corev1.NodeReady && c.Status != corev1.ConditionTrue {
return false, nil
}
}
}
return true, nil
})
}

func waitForNodesUpgraded(client dynclient.Client, targetVersion string) error {
reqVer, err := semver.NewVersion(targetVersion)
if err != nil {
Expand Down Expand Up @@ -232,57 +213,3 @@ func waitForNodesUpgraded(client dynclient.Client, targetVersion string) error {
return true, nil
})
}

func verifyVersion(client dynclient.Client, namespace string, targetVersion string) error {
reqVer, err := semver.NewVersion(targetVersion)
if err != nil {
return errors.Wrap(err, "desired version is invalid")
}

nodes := corev1.NodeList{}
nodeListOpts := dynclient.ListOptions{}
_ = nodeListOpts.SetLabelSelector(fmt.Sprintf("%s=%s", labelControlPlaneNode, ""))
err = client.List(context.Background(), &nodeListOpts, &nodes)
if err != nil {
return errors.Wrap(err, "failed to list nodes")
}

// Kubelet version check
for _, n := range nodes.Items {
kubeletVer, err := semver.NewVersion(n.Status.NodeInfo.KubeletVersion)
if err != nil {
return err
}
if reqVer.Compare(kubeletVer) != 0 {
return errors.Errorf("kubelet version mismatch: expected %v, got %v", reqVer.String(), kubeletVer.String())
}
}

apiserverPods := corev1.PodList{}
podsListOpts := dynclient.ListOptions{Namespace: namespace}
_ = podsListOpts.SetLabelSelector("component=kube-apiserver")
err = client.List(context.Background(), &podsListOpts, &apiserverPods)
if err != nil {
return errors.Wrap(err, "unable to list apiserver pods")
}

for _, p := range apiserverPods.Items {
apiserverVer, err := parseContainerImageVersion(p.Spec.Containers[0].Image)
if err != nil {
return errors.Wrap(err, "unable to parse apiserver version")
}
if reqVer.Compare(apiserverVer) != 0 {
return errors.Errorf("apiserver version mismatch: expected %v, got %v", reqVer.String(), apiserverVer.String())
}
}

return nil
}

func parseContainerImageVersion(image string) (*semver.Version, error) {
ver := strings.Split(image, ":")
if len(ver) != 2 {
return nil, errors.Errorf("invalid container image format: %s", image)
}
return semver.NewVersion(ver[1])
}

0 comments on commit c82e631

Please sign in to comment.