Skip to content

Commit

Permalink
Merge pull request #407 from kon-angelo/remove-azuremc-cleanup2
Browse files Browse the repository at this point in the history
Remove azuremachineclass cleanup logic
  • Loading branch information
dkistner authored Dec 2, 2021
2 parents 8fe675f + a627ea9 commit 0d5bbe0
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 26 deletions.
14 changes: 3 additions & 11 deletions pkg/controller/worker/machines.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)

var (
tagRegex = regexp.MustCompile(`[<>%\\&?/ ]`)
)
var tagRegex = regexp.MustCompile(`[<>%\\&?/ ]`)

// MachineClassKind yields the name of machine class kind used by Azure provider.
func (w *workerDelegate) MachineClassKind() string {
Expand All @@ -64,12 +62,6 @@ func (w *workerDelegate) DeployMachineClasses(ctx context.Context) error {
}
}

// Delete any older version of AzureMachineClass CRs.
// TODO: Remove this clean-up in future version.
if err := w.Client().DeleteAllOf(ctx, &machinev1alpha1.AzureMachineClass{}, client.InNamespace(w.worker.Namespace)); err != nil {
return fmt.Errorf("cleaning up older version of Azure machine class CRs failed: %w", err)
}

return w.seedChartApplier.Apply(ctx, filepath.Join(azure.InternalChartsPath, "machineclass"), w.worker.Namespace, "machineclass", kubernetes.Values(map[string]interface{}{"machineClasses": w.machineClasses}))
}

Expand Down Expand Up @@ -267,7 +259,7 @@ func (w *workerDelegate) generateMachineConfig(ctx context.Context) error {
}

// Availability Zones
var zoneCount = len(pool.Zones)
zoneCount := len(pool.Zones)
for zoneIndex, zone := range pool.Zones {
machineDeployment, machineClassSpec := generateMachineClassAndDeployment(&zoneInfo{
name: zone,
Expand Down Expand Up @@ -371,7 +363,7 @@ func SanitizeAzureVMTag(label string) string {
}

func (w *workerDelegate) generateWorkerPoolHash(pool extensionsv1alpha1.WorkerPool, infrastructureStatus *azureapi.InfrastructureStatus, vmoDependency *azureapi.VmoDependency) (string, error) {
var additionalHashData = []string{}
additionalHashData := []string{}

// Integrate data disks/volumes in the hash.
for _, dv := range pool.DataVolumes {
Expand Down
24 changes: 9 additions & 15 deletions pkg/controller/worker/machines_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
)

var _ = Describe("Machines", func() {
Expand Down Expand Up @@ -392,25 +391,20 @@ var _ = Describe("Machines", func() {
MachineConfiguration: &machinev1alpha1.MachineConfiguration{},
},
}

})

It("should return the expected machine deployments for profile image types", func() {
workerDelegate := wrapNewWorkerDelegate(c, chartApplier, w, cluster, nil)

gomock.InOrder(
c.EXPECT().
DeleteAllOf(context.TODO(), &machinev1alpha1.AzureMachineClass{}, client.InNamespace(namespace)),
chartApplier.
EXPECT().
Apply(
ctx,
filepath.Join(azure.InternalChartsPath, "machineclass"),
namespace,
"machineclass",
kubernetes.Values(machineClasses),
),
)
chartApplier.
EXPECT().
Apply(
ctx,
filepath.Join(azure.InternalChartsPath, "machineclass"),
namespace,
"machineclass",
kubernetes.Values(machineClasses),
)

// Test workerDelegate.DeployMachineClasses()
err := workerDelegate.DeployMachineClasses(ctx)
Expand Down

0 comments on commit 0d5bbe0

Please sign in to comment.