diff --git a/pkg/azure/access/helpers/disk.go b/pkg/azure/access/helpers/disk.go index fc2a2b7a..78f8ac8e 100644 --- a/pkg/azure/access/helpers/disk.go +++ b/pkg/azure/access/helpers/disk.go @@ -6,6 +6,8 @@ package helpers import ( "context" + "time" + "k8s.io/klog/v2" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" @@ -17,6 +19,9 @@ import ( const ( diskDeleteServiceLabel = "disk_delete" + diskCreateServiceLabel = "disk_create" + + defaultDiskOperationTimeout = 10 * time.Minute ) // DeleteDisk deletes disk for passed in resourceGroup and diskName. @@ -38,3 +43,24 @@ func DeleteDisk(ctx context.Context, client *armcompute.DisksClient, resourceGro klog.Infof("Successfully deleted Disk: %s, for ResourceGroup: %s", diskName, resourceGroup) return } + +// CreateDisk creates a Disk given a resourceGroup and disk creation parameters. +// NOTE: All calls to this Azure API are instrumented as prometheus metric. +func CreateDisk(ctx context.Context, client *armcompute.DisksClient, resourceGroup, diskName string, diskCreationParams armcompute.Disk) (disk *armcompute.Disk, err error) { + defer instrument.AZAPIMetricRecorderFn(diskCreateServiceLabel, &err)() + + createCtx, cancelFn := context.WithTimeout(ctx, defaultDiskOperationTimeout) + defer cancelFn() + poller, err := client.BeginCreateOrUpdate(createCtx, resourceGroup, diskName, diskCreationParams, nil) + if err != nil { + errors.LogAzAPIError(err, "Failed to trigger create of Disk [Name: %s, ResourceGroup: %s]", resourceGroup, diskName) + return + } + createResp, err := poller.PollUntilDone(createCtx, nil) + if err != nil { + errors.LogAzAPIError(err, "Polling failed while waiting for create of Disk: %s for ResourceGroup: %s", diskName, resourceGroup) + return + } + disk = &createResp.Disk + return +} diff --git a/pkg/azure/api/providerspec.go b/pkg/azure/api/providerspec.go index 72f05687..ea122871 100644 --- a/pkg/azure/api/providerspec.go +++ b/pkg/azure/api/providerspec.go @@ -197,13 +197,15 @@ type AzureDataDisk struct { Name string `json:"name,omitempty"` // Lun specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and // therefore must be unique for each data disk attached to a VM. - Lun *int32 `json:"lun,omitempty"` + Lun int32 `json:"lun,omitempty"` // Caching specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. Caching string `json:"caching,omitempty"` // StorageAccountType is the storage account type for a managed disk. StorageAccountType string `json:"storageAccountType,omitempty"` // DiskSizeGB is the size of an empty disk in gigabytes. DiskSizeGB int32 `json:"diskSizeGB,omitempty"` + // ImageRef optionally specifies an image source + ImageRef *AzureImageReference `json:"imageRef,omitempty"` } // AzureManagedDiskParameters is the parameters of a managed disk. diff --git a/pkg/azure/api/validation/validation.go b/pkg/azure/api/validation/validation.go index 9c7b412f..5df94951 100644 --- a/pkg/azure/api/validation/validation.go +++ b/pkg/azure/api/validation/validation.go @@ -211,17 +211,14 @@ func validateDataDisks(disks []api.AzureDataDisk, fldPath *field.Path) field.Err luns := make(map[int32]int, len(disks)) for _, disk := range disks { - if disk.Lun == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("lun"), "must provide lun")) + // Lun should always start from 0 and it cannot be negative. The max value of lun will depend upon the VM type to which the disks are associated. + // Therefore, we will avoid any max limit check for lun and delegate that responsibility to the provider as that mapping could change over time. + if disk.Lun < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), disk.Lun, "lun must be a positive number")) } else { - // Lun should always start from 0 and it cannot be negative. The max value of lun will depend upon the VM type to which the disks are associated. - // Therefore, we will avoid any max limit check for lun and delegate that responsibility to the provider as that mapping could change over time. - if *disk.Lun < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), *disk.Lun, "lun must be a positive number")) - } else { - luns[*disk.Lun]++ - } + luns[disk.Lun]++ } + if disk.DiskSizeGB <= 0 { allErrs = append(allErrs, field.Invalid(fldPath.Child("diskSizeGB"), disk.DiskSizeGB, "DataDisk size must be positive and greater than 0")) } diff --git a/pkg/azure/api/validation/validation_test.go b/pkg/azure/api/validation/validation_test.go index e25f8012..b82b1d78 100644 --- a/pkg/azure/api/validation/validation_test.go +++ b/pkg/azure/api/validation/validation_test.go @@ -277,30 +277,23 @@ func TestValidateDataDisks(t *testing.T) { matcher gomegatypes.GomegaMatcher }{ {"should forbid empty storageAccountType", - []api.AzureDataDisk{{Name: "disk-1", Lun: pointer.Int32(0), StorageAccountType: "", DiskSizeGB: 10}}, 1, + []api.AzureDataDisk{{Name: "disk-1", Lun: 0, StorageAccountType: "", DiskSizeGB: 10}}, 1, ConsistOf(PointTo(MatchFields(IgnoreExtras, Fields{"Type": Equal(field.ErrorTypeRequired), "Field": Equal("providerSpec.properties.storageProfile.dataDisks.storageAccountType")}))), }, {"should forbid negative diskSize and empty storageAccountType", - []api.AzureDataDisk{{Name: "disk-1", Lun: pointer.Int32(0), StorageAccountType: "", DiskSizeGB: -10}}, 2, + []api.AzureDataDisk{{Name: "disk-1", Lun: 0, StorageAccountType: "", DiskSizeGB: -10}}, 2, ConsistOf( PointTo(MatchFields(IgnoreExtras, Fields{"Type": Equal(field.ErrorTypeRequired), "Field": Equal("providerSpec.properties.storageProfile.dataDisks.storageAccountType")})), PointTo(MatchFields(IgnoreExtras, Fields{"Type": Equal(field.ErrorTypeInvalid), "Field": Equal("providerSpec.properties.storageProfile.dataDisks.diskSizeGB")})), ), }, - { - "should forbid nil Lun", - []api.AzureDataDisk{ - {Name: "disk-1", Lun: nil, StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10}, - }, 1, - ConsistOf(PointTo(MatchFields(IgnoreExtras, Fields{"Type": Equal(field.ErrorTypeRequired), "Field": Equal("providerSpec.properties.storageProfile.dataDisks.lun")}))), - }, {"should forbid duplicate Lun", []api.AzureDataDisk{ - {Name: "disk-1", Lun: pointer.Int32(0), StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10}, - {Name: "disk-2", Lun: pointer.Int32(1), StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10}, - {Name: "disk-3", Lun: pointer.Int32(0), StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10}, - {Name: "disk-4", Lun: pointer.Int32(2), StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10}, - {Name: "disk-5", Lun: pointer.Int32(1), StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10}, + {Name: "disk-1", Lun: 0, StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10}, + {Name: "disk-2", Lun: 1, StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10}, + {Name: "disk-3", Lun: 0, StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10}, + {Name: "disk-4", Lun: 2, StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10}, + {Name: "disk-5", Lun: 1, StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10}, }, 2, ConsistOf( PointTo(MatchFields(IgnoreExtras, Fields{"Type": Equal(field.ErrorTypeInvalid), "Field": Equal("providerSpec.properties.storageProfile.dataDisks.lun")})), @@ -309,9 +302,9 @@ func TestValidateDataDisks(t *testing.T) { }, {"should succeed with non-duplicate lun, valid diskSize and non-empty storageAccountType", []api.AzureDataDisk{ - {Name: "disk-1", Lun: pointer.Int32(0), StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10}, - {Name: "disk-2", Lun: pointer.Int32(1), StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 30}, - {Name: "disk-3", Lun: pointer.Int32(2), StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 50}, + {Name: "disk-1", Lun: 0, StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10}, + {Name: "disk-2", Lun: 1, StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 30}, + {Name: "disk-3", Lun: 2, StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 50}, }, 0, nil, }, } diff --git a/pkg/azure/provider/helpers/driver.go b/pkg/azure/provider/helpers/driver.go index 2995ccde..1ed70241 100644 --- a/pkg/azure/provider/helpers/driver.go +++ b/pkg/azure/provider/helpers/driver.go @@ -36,6 +36,12 @@ import ( "github.com/gardener/machine-controller-manager-provider-azure/pkg/azure/utils" ) +// DataDiskLun describes the dataDisk lun +type DataDiskLun int32 + +// DiskID describes the disk ID +type DiskID *string + // ExtractProviderSpecAndConnectConfig extracts api.AzureProviderSpec from mcc and access.ConnectConfig from secret. func ExtractProviderSpecAndConnectConfig(mcc *v1alpha1.MachineClass, secret *corev1.Secret) (api.AzureProviderSpec, access.ConnectConfig, error) { var ( @@ -126,7 +132,7 @@ func createDataDiskNames(providerSpec api.AzureProviderSpec, vmName string) []st dataDisks := providerSpec.Properties.StorageProfile.DataDisks diskNames := make([]string, 0, len(dataDisks)) for _, disk := range dataDisks { - diskName := utils.CreateDataDiskName(vmName, disk) + diskName := utils.CreateDataDiskName(vmName, disk.Name, disk.Lun) diskNames = append(diskNames, diskName) } return diskNames @@ -425,7 +431,7 @@ func createNICTags(tags map[string]string) map[string]*string { // 1. Gets the VM image. If the image does not exist then it will return an error. // 2. From the VM Image it checks if there is a plan. // 3. If there is a plan then it will check if there is an existing agreement for this plan. If an agreement does not exist then it will return an error. -// 4. If the agreement has not been accepted yet then it will accept the agreement and update the agreement. If that fails then it will return an error. +// 4. If the agreement has not been accepted, yet then it will accept the agreement and update the agreement. If that fails then it will return an error. func ProcessVMImageConfiguration(ctx context.Context, factory access.Factory, connectConfig access.ConnectConfig, providerSpec api.AzureProviderSpec, vmName string) (imgRef armcompute.ImageReference, plan *armcompute.Plan, err error) { imgRef = getImageReference(providerSpec) @@ -534,12 +540,12 @@ func checkAndAcceptAgreementIfNotAccepted(ctx context.Context, factory access.Fa } // CreateVM gathers the VM creation parameters and invokes a call to create or update the VM. -func CreateVM(ctx context.Context, factory access.Factory, connectConfig access.ConnectConfig, providerSpec api.AzureProviderSpec, imageRef armcompute.ImageReference, plan *armcompute.Plan, secret *corev1.Secret, nicID string, vmName string) (*armcompute.VirtualMachine, error) { +func CreateVM(ctx context.Context, factory access.Factory, connectConfig access.ConnectConfig, providerSpec api.AzureProviderSpec, vmImageRef armcompute.ImageReference, plan *armcompute.Plan, secret *corev1.Secret, nicID string, vmName string, imageRefDisks map[DataDiskLun]DiskID) (*armcompute.VirtualMachine, error) { vmAccess, err := factory.GetVirtualMachinesAccess(connectConfig) if err != nil { return nil, status.WrapError(codes.Internal, fmt.Sprintf("Failed to create virtual machine access to process request: [resourceGroup: %s, vmName: %s], Err: %v", providerSpec.ResourceGroup, vmName, err), err) } - vmCreationParams, err := createVMCreationParams(providerSpec, imageRef, plan, secret, nicID, vmName) + vmCreationParams, err := createVMCreationParams(providerSpec, vmImageRef, plan, secret, nicID, vmName, imageRefDisks) if err != nil { return nil, status.WrapError(codes.Internal, fmt.Sprintf("Failed to create virtual machine parameters to create VM: [ResourceGroup: %s, Name: %s], Err: %v", providerSpec.ResourceGroup, vmName, err), err) } @@ -552,6 +558,105 @@ func CreateVM(ctx context.Context, factory access.Factory, connectConfig access. return vm, nil } +// CreateDisksWithImageRef creates a disk with CreationData (e.g. ImageReference or GalleryImageReference) +func CreateDisksWithImageRef(ctx context.Context, factory access.Factory, connectConfig access.ConnectConfig, providerSpec api.AzureProviderSpec, vmName string) (map[DataDiskLun]DiskID, error) { + disksAccess, err := factory.GetDisksAccess(connectConfig) + if err != nil { + return nil, status.WrapError(codes.Internal, fmt.Sprintf("Failed to create disk access for VM: [ResourceGroup: %s], Err: %v", providerSpec.ResourceGroup, err), err) + } + + disks := make(map[DataDiskLun]DiskID) + specDataDisks := providerSpec.Properties.StorageProfile.DataDisks + if utils.IsSliceNilOrEmpty(specDataDisks) { + return disks, nil + } + + for _, specDataDisk := range specDataDisks { + if !isDataDiskWithImageRef(specDataDisk) { + continue + } + diskName := utils.CreateDataDiskName(vmName, specDataDisk.Name, specDataDisk.Lun) + diskCreationParams, err := createDiskCreationParams(ctx, specDataDisk, providerSpec, factory, connectConfig) + if err != nil { + errCode := accesserrors.GetMatchingErrorCode(err) + return nil, status.WrapError(errCode, fmt.Sprintf("Failed to create disk creation params: [ResourceGroup: %s, Name: %s], Err: %v", providerSpec.ResourceGroup, diskName, err), err) + } + disk, err := accesshelpers.CreateDisk(ctx, disksAccess, providerSpec.ResourceGroup, diskName, diskCreationParams) + if err != nil { + errCode := accesserrors.GetMatchingErrorCode(err) + return nil, status.WrapError(errCode, fmt.Sprintf("Failed to create Disk: [ResourceGroup: %s, Name: %s], Err: %v", providerSpec.ResourceGroup, diskName, err), err) + } + disks[DataDiskLun(specDataDisk.Lun)] = disk.ID + klog.Infof("Successfully created Disk: [ResourceGroup: %s, Name: %s]", providerSpec.ResourceGroup, diskName) + } + + return disks, nil +} + +func isDataDiskWithImageRef(dataDisk api.AzureDataDisk) bool { + if dataDisk.ImageRef != nil { + return true + } + return false +} + +func createDiskCreationParams(ctx context.Context, specDataDisk api.AzureDataDisk, providerSpec api.AzureProviderSpec, factory access.Factory, connectConfig access.ConnectConfig) (params armcompute.Disk, err error) { + params = armcompute.Disk{ + Location: to.Ptr(providerSpec.Location), + Properties: &armcompute.DiskProperties{ + CreationData: &armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionFromImage), + }, + DiskSizeGB: to.Ptr[int32](specDataDisk.DiskSizeGB), + OSType: to.Ptr(armcompute.OperatingSystemTypesLinux), + }, + SKU: &armcompute.DiskSKU{ + Name: to.Ptr(armcompute.DiskStorageAccountTypes(specDataDisk.StorageAccountType)), + }, + Tags: utils.CreateResourceTags(providerSpec.Tags), + Zones: getZonesFromProviderSpec(providerSpec), + } + err = setDiskImageReference(ctx, ¶ms, specDataDisk, providerSpec.Location, factory, connectConfig) + return +} + +func setDiskImageReference(ctx context.Context, disk *armcompute.Disk, specDataDisk api.AzureDataDisk, location string, factory access.Factory, connectConfig access.ConnectConfig) error { + if !utils.IsEmptyString(specDataDisk.ImageRef.ID) { + disk.Properties.CreationData.ImageReference = &armcompute.ImageDiskReference{ + ID: &specDataDisk.ImageRef.ID, + } + } else if !utils.IsNilOrEmptyStringPtr(specDataDisk.ImageRef.SharedGalleryImageID) { + disk.Properties.CreationData.GalleryImageReference = &armcompute.ImageDiskReference{ + SharedGalleryImageID: specDataDisk.ImageRef.SharedGalleryImageID, + } + } else if !utils.IsNilOrEmptyStringPtr(specDataDisk.ImageRef.CommunityGalleryImageID) { + disk.Properties.CreationData.GalleryImageReference = &armcompute.ImageDiskReference{ + CommunityGalleryImageID: specDataDisk.ImageRef.CommunityGalleryImageID, + } + } else if !utils.IsNilOrEmptyStringPtr(specDataDisk.ImageRef.URN) { + urnParts := strings.Split(*specDataDisk.ImageRef.URN, ":") + imageRef := armcompute.ImageReference{ + Publisher: to.Ptr(urnParts[0]), + Offer: to.Ptr(urnParts[1]), + SKU: to.Ptr(urnParts[2]), + Version: to.Ptr(urnParts[3]), + } + + vmImagesAccess, err := factory.GetVirtualMachineImagesAccess(connectConfig) + if err != nil { + return status.WrapError(codes.Internal, fmt.Sprintf("Failed to create image access, Err: %v", err), err) + } + image, err := accesshelpers.GetVMImage(ctx, vmImagesAccess, location, imageRef) + if err != nil { + return status.WrapError(codes.Internal, fmt.Sprintf("Failed to get image, Err: %v", err), err) + } + disk.Properties.CreationData.ImageReference = &armcompute.ImageDiskReference{ + ID: image.ID, + } + } + return nil +} + // LogVMCreation is a convenience method which helps to extract relevant details from the created virtual machine and logs it. // Today the azure create VM call is atomic only w.r.t creation of VM, OSDisk, DataDisk(s). NIC still has to be created prior to creation of the VM. // Therefore, this method produces a log which also prints the OSDisk, DataDisks that are created (which helps in traceability). For completeness it @@ -578,12 +683,16 @@ func LogVMCreation(location, resourceGroup string, vm *armcompute.VirtualMachine klog.Infof(msgBuilder.String()) } -func createVMCreationParams(providerSpec api.AzureProviderSpec, imageRef armcompute.ImageReference, plan *armcompute.Plan, secret *corev1.Secret, nicID, vmName string) (armcompute.VirtualMachine, error) { +func createVMCreationParams(providerSpec api.AzureProviderSpec, imageRef armcompute.ImageReference, plan *armcompute.Plan, secret *corev1.Secret, nicID, vmName string, imageRefDisks map[DataDiskLun]DiskID) (armcompute.VirtualMachine, error) { vmTags := utils.CreateResourceTags(providerSpec.Tags) sshConfiguration, err := getSSHConfiguration(providerSpec.Properties.OsProfile.LinuxConfiguration.SSH) if err != nil { return armcompute.VirtualMachine{}, err } + dataDisks, err := getDataDisks(providerSpec.Properties.StorageProfile.DataDisks, vmName, imageRefDisks) + if err != nil { + return armcompute.VirtualMachine{}, status.WrapError(codes.Internal, fmt.Sprintf("Failed to create vm creation params, Err: %v", err), err) + } vm := armcompute.VirtualMachine{ Location: to.Ptr(providerSpec.Location), @@ -613,7 +722,7 @@ func createVMCreationParams(providerSpec api.AzureProviderSpec, imageRef armcomp }, }, StorageProfile: &armcompute.StorageProfile{ - DataDisks: getDataDisks(providerSpec.Properties.StorageProfile.DataDisks, vmName), + DataDisks: dataDisks, ImageReference: &imageRef, OSDisk: &armcompute.OSDisk{ CreateOption: to.Ptr(armcompute.DiskCreateOptionTypes(providerSpec.Properties.StorageProfile.OsDisk.CreateOption)), @@ -663,21 +772,20 @@ func createVMCreationParams(providerSpec api.AzureProviderSpec, imageRef armcomp return vm, nil } -func getDataDisks(specDataDisks []api.AzureDataDisk, vmName string) []*armcompute.DataDisk { +func getDataDisks(specDataDisks []api.AzureDataDisk, vmName string, imageRefDisks map[DataDiskLun]DiskID) ([]*armcompute.DataDisk, error) { var dataDisks []*armcompute.DataDisk if utils.IsSliceNilOrEmpty(specDataDisks) { - return dataDisks + return dataDisks, nil } for _, specDataDisk := range specDataDisks { - dataDiskName := utils.CreateDataDiskName(vmName, specDataDisk) + dataDiskName := utils.CreateDataDiskName(vmName, specDataDisk.Name, specDataDisk.Lun) caching := armcompute.CachingTypesNone if !utils.IsEmptyString(specDataDisk.Caching) { caching = armcompute.CachingTypes(specDataDisk.Caching) } dataDisk := &armcompute.DataDisk{ - CreateOption: to.Ptr(armcompute.DiskCreateOptionTypesEmpty), - Lun: specDataDisk.Lun, + Lun: to.Ptr(specDataDisk.Lun), Caching: to.Ptr(caching), DeleteOption: to.Ptr(armcompute.DiskDeleteOptionTypesDelete), DiskSizeGB: pointer.Int32(specDataDisk.DiskSizeGB), @@ -686,9 +794,18 @@ func getDataDisks(specDataDisks []api.AzureDataDisk, vmName string) []*armcomput }, Name: to.Ptr(dataDiskName), } + if isDataDiskWithImageRef(specDataDisk) { + diskID := imageRefDisks[DataDiskLun(specDataDisk.Lun)] + if diskID == nil { + return nil, fmt.Errorf("could not find id of pre created disk %s with lun %d", + dataDiskName, specDataDisk.Lun) + } + dataDisk.CreateOption = to.Ptr(armcompute.DiskCreateOptionTypesAttach) + dataDisk.ManagedDisk.ID = diskID + } dataDisks = append(dataDisks, dataDisk) } - return dataDisks + return dataDisks, nil } func getVMIdentity(specVMIdentityID *string) *armcompute.VirtualMachineIdentity { diff --git a/pkg/azure/provider/helpers/resourcegraphprocessor.go b/pkg/azure/provider/helpers/resourcegraphprocessor.go index a06d82bd..854f6f52 100644 --- a/pkg/azure/provider/helpers/resourcegraphprocessor.go +++ b/pkg/azure/provider/helpers/resourcegraphprocessor.go @@ -120,7 +120,7 @@ func getDataDiskNameSuffixes(providerSpec api.AzureProviderSpec) sets.Set[string dataDisks := providerSpec.Properties.StorageProfile.DataDisks if dataDisks != nil { for _, dataDisk := range dataDisks { - dataDiskNameSuffixes.Insert(utils.GetDataDiskNameSuffix(dataDisk)) + dataDiskNameSuffixes.Insert(utils.GetDataDiskNameSuffix(dataDisk.Name, dataDisk.Lun)) } } return dataDiskNameSuffixes diff --git a/pkg/azure/provider/provider.go b/pkg/azure/provider/provider.go index 220a2bb7..353e775c 100644 --- a/pkg/azure/provider/provider.go +++ b/pkg/azure/provider/provider.go @@ -69,6 +69,7 @@ func (d defaultDriver) CreateMachine(ctx context.Context, req *driver.CreateMach if err != nil { return } + subnet, err := helpers.GetSubnet(ctx, d.factory, connectConfig, providerSpec) if err != nil { return @@ -79,10 +80,18 @@ func (d defaultDriver) CreateMachine(ctx context.Context, req *driver.CreateMach return } - vm, err := helpers.CreateVM(ctx, d.factory, connectConfig, providerSpec, imageReference, plan, req.Secret, nicID, vmName) + // create disks with image ref since they can not be created together with the vm + // TODO parallelize creation with nic? + imageRefDisks, err := helpers.CreateDisksWithImageRef(ctx, d.factory, connectConfig, providerSpec, vmName) + if err != nil { + return + } + + vm, err := helpers.CreateVM(ctx, d.factory, connectConfig, providerSpec, imageReference, plan, req.Secret, nicID, vmName, imageRefDisks) if err != nil { return } + resp = helpers.ConstructCreateMachineResponse(providerSpec.Location, vmName) helpers.LogVMCreation(providerSpec.Location, providerSpec.ResourceGroup, vm) return diff --git a/pkg/azure/testhelp/fakes/machineresources.go b/pkg/azure/testhelp/fakes/machineresources.go index 38425947..bc96b2c6 100644 --- a/pkg/azure/testhelp/fakes/machineresources.go +++ b/pkg/azure/testhelp/fakes/machineresources.go @@ -294,7 +294,7 @@ func createDataDiskResources(spec api.AzureProviderSpec, vmID *string, vmName st dataDisks := make(map[string]*armcompute.Disk, len(specDataDisks)) if specDataDisks != nil { for _, specDataDisk := range specDataDisks { - diskName := utils.CreateDataDiskName(vmName, specDataDisk) + diskName := utils.CreateDataDiskName(vmName, specDataDisk.Name, specDataDisk.Lun) dataDisks[diskName] = createDiskResource(spec, diskName, vmID, nil) } } @@ -442,8 +442,8 @@ func createDataDisks(spec api.AzureProviderSpec, vmName string, deleteOption *ar } dataDisks := make([]*armcompute.DataDisk, 0, len(specDataDisks)) for _, disk := range specDataDisks { - diskName := utils.CreateDataDiskName(vmName, disk) - d := createDataDisk(*disk.Lun, armcompute.CachingTypes(disk.Caching), deleteOption, disk.DiskSizeGB, armcompute.StorageAccountTypes(disk.StorageAccountType), diskName) + diskName := utils.CreateDataDiskName(vmName, disk.Name, disk.Lun) + d := createDataDisk(disk.Lun, armcompute.CachingTypes(disk.Caching), deleteOption, disk.DiskSizeGB, armcompute.StorageAccountTypes(disk.StorageAccountType), diskName) dataDisks = append(dataDisks, d) } return dataDisks diff --git a/pkg/azure/testhelp/providerspec.go b/pkg/azure/testhelp/providerspec.go index 1000d440..46620d52 100644 --- a/pkg/azure/testhelp/providerspec.go +++ b/pkg/azure/testhelp/providerspec.go @@ -9,8 +9,6 @@ import ( "fmt" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "k8s.io/utils/pointer" - "github.com/gardener/machine-controller-manager-provider-azure/pkg/azure/api" "github.com/gardener/machine-controller-manager-provider-azure/pkg/azure/utils" ) @@ -129,7 +127,7 @@ func (b *ProviderSpecBuilder) WithDataDisks(diskName string, numDisks int) *Prov for i := 0; i < numDisks; i++ { d := api.AzureDataDisk{ Name: diskName, - Lun: pointer.Int32(int32(i)), + Lun: int32(i), Caching: "None", StorageAccountType: StorageAccountType, DiskSizeGB: 20, @@ -193,7 +191,7 @@ func (b *ProviderSpecBuilder) Build() api.AzureProviderSpec { func CreateDataDiskNames(vmName string, spec api.AzureProviderSpec) []string { var diskNames []string for _, specDataDisk := range spec.Properties.StorageProfile.DataDisks { - diskNames = append(diskNames, utils.CreateDataDiskName(vmName, specDataDisk)) + diskNames = append(diskNames, utils.CreateDataDiskName(vmName, specDataDisk.Name, specDataDisk.Lun)) } return diskNames } diff --git a/pkg/azure/utils/names.go b/pkg/azure/utils/names.go index fd4b1db0..d56a9d6c 100644 --- a/pkg/azure/utils/names.go +++ b/pkg/azure/utils/names.go @@ -6,8 +6,6 @@ package utils import ( "fmt" - - "github.com/gardener/machine-controller-manager-provider-azure/pkg/azure/api" ) const ( @@ -42,22 +40,21 @@ func CreateOSDiskName(vmName string) string { } // CreateDataDiskName creates a name for a DataDisk using VM name and data disk name specified in the provider Spec -func CreateDataDiskName(vmName string, dataDisk api.AzureDataDisk) string { +func CreateDataDiskName(vmName, diskName string, lun int32) string { prefix := vmName - suffix := GetDataDiskNameSuffix(dataDisk) + suffix := GetDataDiskNameSuffix(diskName, lun) return fmt.Sprintf("%s%s", prefix, suffix) } // GetDataDiskNameSuffix creates the suffix based on an optional data disk name and required lun fields. -func GetDataDiskNameSuffix(dataDisk api.AzureDataDisk) string { - infix := getDataDiskInfix(dataDisk) +func GetDataDiskNameSuffix(diskName string, lun int32) string { + infix := getDataDiskInfix(diskName, lun) return fmt.Sprintf("-%s%s", infix, DataDiskSuffix) } -func getDataDiskInfix(dataDisk api.AzureDataDisk) string { - name := dataDisk.Name - if IsEmptyString(name) { - return fmt.Sprintf("%d", *dataDisk.Lun) +func getDataDiskInfix(diskName string, lun int32) string { + if IsEmptyString(diskName) { + return fmt.Sprintf("%d", lun) } - return fmt.Sprintf("%s-%d", name, *dataDisk.Lun) + return fmt.Sprintf("%s-%d", diskName, lun) } diff --git a/pkg/azure/utils/names_test.go b/pkg/azure/utils/names_test.go index 381a8794..e8f807b7 100644 --- a/pkg/azure/utils/names_test.go +++ b/pkg/azure/utils/names_test.go @@ -10,7 +10,6 @@ import ( "github.com/gardener/machine-controller-manager-provider-azure/pkg/azure/api" . "github.com/onsi/gomega" - "k8s.io/utils/pointer" ) const vmName = "shoot--test-project-z1-4567c-xj5sq" @@ -45,10 +44,10 @@ func TestCreateDataDiskName(t *testing.T) { t.Run(entry.description, func(t *testing.T) { dataDisk := api.AzureDataDisk{ Name: entry.dataDiskName, - Lun: pointer.Int32(entry.lun), + Lun: entry.lun, DiskSizeGB: 10, } - g.Expect(CreateDataDiskName(vmName, dataDisk)).To(Equal(entry.expectedDataDiskName)) + g.Expect(CreateDataDiskName(vmName, dataDisk.Name, dataDisk.Lun)).To(Equal(entry.expectedDataDiskName)) }) } }