Skip to content

Commit

Permalink
Worker count pointer dereference fix (#3852)
Browse files Browse the repository at this point in the history
* Fixing worker node group count dereference error for tinkerbell

* Fixing worker node group count dereference for nutanix
  • Loading branch information
pokearu authored Oct 30, 2022
1 parent fffb8c5 commit 91f1ae1
Show file tree
Hide file tree
Showing 5 changed files with 51 additions and 3 deletions.
2 changes: 1 addition & 1 deletion pkg/providers/nutanix/template.go
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ func buildTemplateMapMD(clusterSpec *cluster.Spec, workerNodeGroupMachineSpec v1
"eksaSystemNamespace": constants.EksaSystemNamespace,
"format": format,
"kubernetesVersion": bundle.KubeDistro.Kubernetes.Tag,
"workerReplicas": workerNodeGroupConfiguration.Count,
"workerReplicas": *workerNodeGroupConfiguration.Count,
"workerPoolName": "md-0",
"workerSshAuthorizedKey": workerNodeGroupMachineSpec.Users[0].SshAuthorizedKeys[0],
"workerSshUsername": workerNodeGroupMachineSpec.Users[0].Name,
Expand Down
2 changes: 1 addition & 1 deletion pkg/providers/tinkerbell/assert.go
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ func AssertionsForScaleUpDown(catalogue *hardware.Catalogue, currentSpec *cluste

for _, nodeGroupNewSpec := range spec.Cluster.Spec.WorkerNodeGroupConfigurations {
if workerNodeGrpOldSpec, ok := workerNodeGroupMap[nodeGroupNewSpec.Name]; ok {
if nodeGroupNewSpec.Count != workerNodeGrpOldSpec.Count {
if *nodeGroupNewSpec.Count != *workerNodeGrpOldSpec.Count {
if rollingUpgrade {
return fmt.Errorf("cannot perform scale up or down during rolling upgrades")
}
Expand Down
27 changes: 27 additions & 0 deletions pkg/providers/tinkerbell/assert_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -401,6 +401,33 @@ func TestMinimumHardwareAvailableAssertionForCreate_InsufficientFailsWithoutExte
g.Expect(assertion(clusterSpec)).ToNot(gomega.Succeed())
}

func TestAssertionsForScaleUpDown_Success(t *testing.T) {
g := gomega.NewWithT(t)

catalogue := hardware.NewCatalogue()
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil

assertion := tinkerbell.AssertionsForScaleUpDown(catalogue, clusterSpec.Spec, true)
newClusterSpec := NewDefaultValidClusterSpecBuilder().Build()
newClusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
g.Expect(assertion(newClusterSpec)).To(gomega.Succeed())
}

func TestAssertionsForScaleUpDown_FailsScaleUpAndRollingError(t *testing.T) {
g := gomega.NewWithT(t)

catalogue := hardware.NewCatalogue()
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil

assertion := tinkerbell.AssertionsForScaleUpDown(catalogue, clusterSpec.Spec, true)
newClusterSpec := NewDefaultValidClusterSpecBuilder().Build()
newClusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
newClusterSpec.WorkerNodeGroupConfigurations()[0].Count = ptr.Int(2)
g.Expect(assertion(newClusterSpec)).NotTo(gomega.Succeed())
}

func TestHardwareSatisfiesOnlyOneSelectorAssertion_MeetsOnlyOneSelector(t *testing.T) {
g := gomega.NewWithT(t)

Expand Down
21 changes: 21 additions & 0 deletions pkg/providers/tinkerbell/tinkerbell_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/stack"
stackmocks "github.com/aws/eks-anywhere/pkg/providers/tinkerbell/stack/mocks"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)

const (
Expand Down Expand Up @@ -857,3 +858,23 @@ func TestProviderGenerateDeploymentFileForSingleNodeClusterSkipLB(t *testing.T)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_cp_single_node_skip_lb.yaml")
}

func TestTinkerbellTemplate_isScaleUpDownSuccess(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
forceCleanup := false

clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)

newClusterSpec := clusterSpec.DeepCopy()
newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(2)

provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
assert.True(t, provider.isScaleUpDown(clusterSpec.Cluster, newClusterSpec.Cluster), "expected scale up down true")
}
2 changes: 1 addition & 1 deletion pkg/providers/tinkerbell/upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,7 @@ func (p *Provider) isScaleUpDown(oldCluster *v1alpha1.Cluster, newCluster *v1alp

for _, nodeGroupNewSpec := range newCluster.Spec.WorkerNodeGroupConfigurations {
if workerNodeGrpOldSpec, ok := workerNodeGroupMap[nodeGroupNewSpec.Name]; ok {
if nodeGroupNewSpec.Count != workerNodeGrpOldSpec.Count {
if *nodeGroupNewSpec.Count != *workerNodeGrpOldSpec.Count {
return true
}
}
Expand Down

0 comments on commit 91f1ae1

Please sign in to comment.