Skip to content

Commit

Permalink
Adding autoscaling annotations to nutanix (aws#4314)
Browse files Browse the repository at this point in the history
* Adding autoscaling annotations to nutanix

* Adding test for autoscaling template nutanix
  • Loading branch information
junshun authored Dec 7, 2022
1 parent 95553fd commit a1c21f7
Show file tree
Hide file tree
Showing 5 changed files with 204 additions and 0 deletions.
5 changes: 5 additions & 0 deletions pkg/providers/nutanix/config/md-template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,11 @@ metadata:
cluster.x-k8s.io/cluster-name: "{{.clusterName}}"
name: "{{.workerNodeGroupName}}"
namespace: "{{.eksaSystemNamespace}}"
{{- if .autoscalingConfig }}
annotations:
cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "{{ .autoscalingConfig.MinCount }}"
cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "{{ .autoscalingConfig.MaxCount }}"
{{- end }}
spec:
clusterName: "{{.clusterName}}"
replicas: {{.workerReplicas}}
Expand Down
1 change: 1 addition & 0 deletions pkg/providers/nutanix/template.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ func (ntb *TemplateBuilder) GenerateCAPISpecWorkers(clusterSpec *cluster.Spec, w
values := buildTemplateMapMD(clusterSpec, ntb.workerNodeGroupMachineSpecs[workerNodeGroupConfiguration.MachineGroupRef.Name], workerNodeGroupConfiguration)
values["workloadTemplateName"] = workloadTemplateNames[workerNodeGroupConfiguration.Name]
values["workloadkubeadmconfigTemplateName"] = kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name]
values["autoscalingConfig"] = workerNodeGroupConfiguration.AutoScalingConfiguration

bytes, err := templater.Execute(defaultClusterConfigMD, values)
if err != nil {
Expand Down
44 changes: 44 additions & 0 deletions pkg/providers/nutanix/template_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@ var nutanixDatacenterConfigSpec string
//go:embed testdata/machineConfig.yaml
var nutanixMachineConfigSpec string

//go:embed testdata/eksa-cluster-autoscaler.yaml
var nutanixClusterConfigSpecWithAutoscaler string

func fakemarshal(v interface{}) ([]byte, error) {
return []byte{}, errors.New("marshalling failed")
}
Expand Down Expand Up @@ -102,3 +105,44 @@ func TestNewNutanixTemplateBuilderGenerateCAPISpecSecret(t *testing.T) {
assert.Nil(t, secretSpec)
assert.Error(t, err)
}

func TestNutanixTemplateBuilderGenerateCAPISpecForCreateWithAutoscalingConfiguration(t *testing.T) {
clusterConf := &anywherev1.Cluster{}
err := yaml.Unmarshal([]byte(nutanixClusterConfigSpecWithAutoscaler), clusterConf)
require.NoError(t, err)

dcConf := &anywherev1.NutanixDatacenterConfig{}
err = yaml.Unmarshal([]byte(nutanixDatacenterConfigSpec), dcConf)
require.NoError(t, err)

machineConf := &anywherev1.NutanixMachineConfig{}
err = yaml.Unmarshal([]byte(nutanixMachineConfigSpec), machineConf)
require.NoError(t, err)

workerConfs := map[string]anywherev1.NutanixMachineConfigSpec{
"eksa-unit-test": machineConf.Spec,
}

t.Setenv(constants.NutanixUsernameKey, "admin")
t.Setenv(constants.NutanixPasswordKey, "password")
creds := GetCredsFromEnv()

builder := NewNutanixTemplateBuilder(&dcConf.Spec, &machineConf.Spec, &machineConf.Spec, workerConfs, creds, time.Now)
assert.NotNil(t, builder)

v := version.Info{GitVersion: "v0.0.1"}
buildSpec, err := cluster.NewSpecFromClusterConfig("testdata/eksa-cluster-autoscaler.yaml", v, cluster.WithReleasesManifest("testdata/simple_release.yaml"))
assert.NoError(t, err)

workloadTemplateNames := map[string]string{
"eksa-unit-test": "eksa-unit-test",
}
kubeadmconfigTemplateNames := map[string]string{
"eksa-unit-test": "eksa-unit-test",
}
workerSpec, err := builder.GenerateCAPISpecWorkers(buildSpec, workloadTemplateNames, kubeadmconfigTemplateNames)
assert.NoError(t, err)
expectedWorkerSpec, err := os.ReadFile("testdata/expected_results_autoscaling_md.yaml")
require.NoError(t, err)
assert.Equal(t, workerSpec, expectedWorkerSpec)
}
75 changes: 75 additions & 0 deletions pkg/providers/nutanix/testdata/eksa-cluster-autoscaler.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: Cluster
metadata:
name: eksa-unit-test
namespace: default
spec:
kubernetesVersion: "1.19"
controlPlaneConfiguration:
name: eksa-unit-test
count: 3
endpoint:
host: test-ip
machineGroupRef:
name: eksa-unit-test
kind: NutanixMachineConfig
workerNodeGroupConfigurations:
- count: 3
autoscalingConfiguration:
minCount: 3
maxCount: 5
name: eksa-unit-test
machineGroupRef:
name: eksa-unit-test
kind: NutanixMachineConfig
externalEtcdConfiguration:
name: eksa-unit-test
count: 3
machineGroupRef:
name: eksa-unit-test
kind: NutanixMachineConfig
datacenterRef:
kind: NutanixDatacenterConfig
name: eksa-unit-test
clusterNetwork:
cni: "cilium"
pods:
cidrBlocks:
- 192.168.0.0/16
services:
cidrBlocks:
- 10.96.0.0/12
---
apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: NutanixDatacenterConfig
metadata:
name: eksa-unit-test
namespace: default
spec:
endpoint: "prism.nutanix.com"
port: 9440
---
apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: NutanixMachineConfig
metadata:
name: eksa-unit-test
namespace: default
spec:
vcpusPerSocket: 1
vcpuSockets: 4
memorySize: 8Gi
image:
type: "name"
name: "prism-image"
cluster:
type: "name"
name: "prism-cluster"
subnet:
type: "name"
name: "prism-subnet"
systemDiskSize: 40Gi
osFamily: "ubuntu"
users:
- name: "mySshUsername"
sshAuthorizedKeys:
- "mySshAuthorizedKey"
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
labels:
cluster.x-k8s.io/cluster-name: "eksa-unit-test"
name: "eksa-unit-test-eksa-unit-test"
namespace: "eksa-system"
annotations:
cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "3"
cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "5"
spec:
clusterName: "eksa-unit-test"
replicas: 3
selector:
matchLabels: {}
template:
metadata:
labels:
cluster.x-k8s.io/cluster-name: "eksa-unit-test"
spec:
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
name: "eksa-unit-test"
clusterName: "eksa-unit-test"
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: NutanixMachineTemplate
name: "eksa-unit-test"
version: "v1.19.8-eks-1-19-4"
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: NutanixMachineTemplate
metadata:
name: "eksa-unit-test"
namespace: "eksa-system"
spec:
template:
spec:
providerID: "nutanix://eksa-unit-test-m1"
vcpusPerSocket: 1
vcpuSockets: 4
memorySize: 8Gi
systemDiskSize: 40Gi
image:
type: name
name: "prism-image"

cluster:
type: name
name: "prism-cluster"
subnet:
- type: name
name: "prism-subnet"
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: "eksa-unit-test"
namespace: "eksa-system"
spec:
template:
spec:
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
# We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd
# kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726
#cgroup-driver: cgroupfs
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
users:
- name: "mySshUsername"
lockPassword: false
sudo: ALL=(ALL) NOPASSWD:ALL
sshAuthorizedKeys:
- "mySshAuthorizedKey"

---

0 comments on commit a1c21f7

Please sign in to comment.