diff --git a/pkg/providers/tinkerbell/assert.go b/pkg/providers/tinkerbell/assert.go index f3a1ee1b0dd8..65324518428f 100644 --- a/pkg/providers/tinkerbell/assert.go +++ b/pkg/providers/tinkerbell/assert.go @@ -250,7 +250,7 @@ func AssertionsForScaleUpDown(catalogue *hardware.Catalogue, currentSpec *cluste for _, nodeGroupNewSpec := range spec.Cluster.Spec.WorkerNodeGroupConfigurations { if workerNodeGrpOldSpec, ok := workerNodeGroupMap[nodeGroupNewSpec.Name]; ok { - if nodeGroupNewSpec.Count != workerNodeGrpOldSpec.Count { + if *nodeGroupNewSpec.Count != *workerNodeGrpOldSpec.Count { if rollingUpgrade { return fmt.Errorf("cannot perform scale up or down during rolling upgrades") } diff --git a/pkg/providers/tinkerbell/assert_test.go b/pkg/providers/tinkerbell/assert_test.go index a578e71cb57a..183ea2c539f1 100644 --- a/pkg/providers/tinkerbell/assert_test.go +++ b/pkg/providers/tinkerbell/assert_test.go @@ -401,6 +401,33 @@ func TestMinimumHardwareAvailableAssertionForCreate_InsufficientFailsWithoutExte g.Expect(assertion(clusterSpec)).ToNot(gomega.Succeed()) } +func TestAssertionsForScaleUpDown_Success(t *testing.T) { + g := gomega.NewWithT(t) + + catalogue := hardware.NewCatalogue() + clusterSpec := NewDefaultValidClusterSpecBuilder().Build() + clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil + + assertion := tinkerbell.AssertionsForScaleUpDown(catalogue, clusterSpec.Spec, true) + newClusterSpec := NewDefaultValidClusterSpecBuilder().Build() + newClusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil + g.Expect(assertion(newClusterSpec)).To(gomega.Succeed()) +} + +func TestAssertionsForScaleUpDown_FailsScaleUpAndRollingError(t *testing.T) { + g := gomega.NewWithT(t) + + catalogue := hardware.NewCatalogue() + clusterSpec := NewDefaultValidClusterSpecBuilder().Build() + clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil + + assertion := tinkerbell.AssertionsForScaleUpDown(catalogue, clusterSpec.Spec, true) + newClusterSpec := NewDefaultValidClusterSpecBuilder().Build() + newClusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil + newClusterSpec.WorkerNodeGroupConfigurations()[0].Count = ptr.Int(2) + g.Expect(assertion(newClusterSpec)).NotTo(gomega.Succeed()) +} + func TestHardwareSatisfiesOnlyOneSelectorAssertion_MeetsOnlyOneSelector(t *testing.T) { g := gomega.NewWithT(t) diff --git a/pkg/providers/tinkerbell/tinkerbell_test.go b/pkg/providers/tinkerbell/tinkerbell_test.go index 13e4ade3bacc..ae5c34eb2947 100644 --- a/pkg/providers/tinkerbell/tinkerbell_test.go +++ b/pkg/providers/tinkerbell/tinkerbell_test.go @@ -18,6 +18,7 @@ import ( "github.com/aws/eks-anywhere/pkg/providers/tinkerbell/stack" stackmocks "github.com/aws/eks-anywhere/pkg/providers/tinkerbell/stack/mocks" "github.com/aws/eks-anywhere/pkg/types" + "github.com/aws/eks-anywhere/pkg/utils/ptr" ) const ( @@ -857,3 +858,23 @@ func TestProviderGenerateDeploymentFileForSingleNodeClusterSkipLB(t *testing.T) } test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_cp_single_node_skip_lb.yaml") } + +func TestTinkerbellTemplate_isScaleUpDownSuccess(t *testing.T) { + clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml" + mockCtrl := gomock.NewController(t) + docker := stackmocks.NewMockDocker(mockCtrl) + helm := stackmocks.NewMockHelm(mockCtrl) + kubectl := mocks.NewMockProviderKubectlClient(mockCtrl) + writer := filewritermocks.NewMockFileWriter(mockCtrl) + forceCleanup := false + + clusterSpec := givenClusterSpec(t, clusterSpecManifest) + datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest) + machineConfigs := givenMachineConfigs(t, clusterSpecManifest) + + newClusterSpec := clusterSpec.DeepCopy() + newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(2) + + provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup) + assert.True(t, provider.isScaleUpDown(clusterSpec.Cluster, newClusterSpec.Cluster), "expected scale up down true") +} diff --git a/pkg/providers/tinkerbell/upgrade.go b/pkg/providers/tinkerbell/upgrade.go index a473247e89aa..19ca132c92c4 100644 --- a/pkg/providers/tinkerbell/upgrade.go +++ b/pkg/providers/tinkerbell/upgrade.go @@ -287,7 +287,7 @@ func (p *Provider) isScaleUpDown(oldCluster *v1alpha1.Cluster, newCluster *v1alp for _, nodeGroupNewSpec := range newCluster.Spec.WorkerNodeGroupConfigurations { if workerNodeGrpOldSpec, ok := workerNodeGroupMap[nodeGroupNewSpec.Name]; ok { - if nodeGroupNewSpec.Count != workerNodeGrpOldSpec.Count { + if *nodeGroupNewSpec.Count != *workerNodeGrpOldSpec.Count { return true } }