diff --git a/test/e2e/upgrade/upgrade.go b/test/e2e/upgrade/upgrade.go index 9b3ce092bedc..41a0d8ab5360 100644 --- a/test/e2e/upgrade/upgrade.go +++ b/test/e2e/upgrade/upgrade.go @@ -41,6 +41,7 @@ import ( "github.com/openshift/origin/test/e2e/upgrade/adminack" "github.com/openshift/origin/test/e2e/upgrade/dns" "github.com/openshift/origin/test/e2e/upgrade/manifestdelete" + mc "github.com/openshift/origin/test/extended/machine_config" "github.com/openshift/origin/test/extended/prometheus" "github.com/openshift/origin/test/extended/util/disruption" "github.com/openshift/origin/test/extended/util/operator" @@ -625,6 +626,21 @@ func clusterUpgrade(f *framework.Framework, c configv1client.Interface, dc dynam func() (error, bool) { framework.Logf("Waiting on pools to be upgraded") if err := wait.PollImmediate(10*time.Second, 30*time.Minute, func() (bool, error) { + + nodes, err := kubeClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) + if err != nil { + framework.Logf("error getting nodes %v", err) + return false, nil + } + + allNodesReady := true + for _, node := range nodes.Items { + if !mc.IsNodeReady(node) { + allNodesReady = false + break + } + } + mcps := dc.Resource(schema.GroupVersionResource{ Group: "machineconfiguration.openshift.io", Version: "v1", @@ -641,7 +657,7 @@ func clusterUpgrade(f *framework.Framework, c configv1client.Interface, dc dynam allUpdated = allUpdated && updated // Invariant: when CVO reaches level, MCO is required to have rolled out control plane updates - if p.GetName() == "master" && requiresUpdate && errMasterUpdating == nil { + if p.GetName() == "master" && requiresUpdate && !allNodesReady && errMasterUpdating == nil { errMasterUpdating = fmt.Errorf("the %q pool should be updated before the CVO reports available at the new version", p.GetName()) framework.Logf("Invariant violation detected: %s", errMasterUpdating) } diff --git a/test/extended/machine_config/helpers.go b/test/extended/machine_config/helpers.go index 10dc13d36846..43e93857bb04 100644 --- a/test/extended/machine_config/helpers.go +++ b/test/extended/machine_config/helpers.go @@ -494,7 +494,7 @@ func ValidateMCNForNodeInPool(oc *exutil.CLI, clientSet *machineconfigclient.Cli // `GetRandomNode` gets a random node from with a given role and checks whether the node is ready. If no // nodes are ready, it will wait for up to 5 minutes for a node to become available. func GetRandomNode(oc *exutil.CLI, role string) corev1.Node { - if node := getRandomNode(oc, role); isNodeReady(node) { + if node := getRandomNode(oc, role); IsNodeReady(node) { return node } @@ -503,7 +503,7 @@ func GetRandomNode(oc *exutil.CLI, role string) corev1.Node { framework.Logf("No ready nodes found with role '%s', waiting up to %s for a ready node to become available", role, waitPeriod) var targetNode corev1.Node o.Eventually(func() bool { - if node := getRandomNode(oc, role); isNodeReady(node) { + if node := getRandomNode(oc, role); IsNodeReady(node) { targetNode = node return true } @@ -549,7 +549,7 @@ func GetAllNodes(oc *exutil.CLI) ([]corev1.Node, error) { } // `isNodeReady` determines if a given node is ready -func isNodeReady(node corev1.Node) bool { +func IsNodeReady(node corev1.Node) bool { // If the node is cordoned, it is not ready. if node.Spec.Unschedulable { return false @@ -1038,7 +1038,7 @@ func GetNewReadyNodeInMachine(oc *exutil.CLI, machineName string) (corev1.Node, // Check if node is in desiredStatus framework.Logf("Checking if node '%v' is ready.", node.Name) - if isNodeReady(node) { + if IsNodeReady(node) { framework.Logf("Node '%v' is ready.", node.Name) desiredNode = node err = nil