Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Data disk image reference #155

Closed
wants to merge 15 commits into from
26 changes: 26 additions & 0 deletions pkg/azure/access/helpers/disk.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ package helpers

import (
"context"
"time"

"k8s.io/klog/v2"

"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
Expand All @@ -17,6 +19,9 @@ import (

const (
diskDeleteServiceLabel = "disk_delete"
diskCreateServiceLabel = "disk_create"

defaultDiskOperationTimeout = 10 * time.Minute
)

// DeleteDisk deletes disk for passed in resourceGroup and diskName.
Expand All @@ -38,3 +43,24 @@ func DeleteDisk(ctx context.Context, client *armcompute.DisksClient, resourceGro
klog.Infof("Successfully deleted Disk: %s, for ResourceGroup: %s", diskName, resourceGroup)
return
}

// CreateDisk creates a Disk given a resourceGroup and disk creation parameters.
// NOTE: All calls to this Azure API are instrumented as prometheus metric.
func CreateDisk(ctx context.Context, client *armcompute.DisksClient, resourceGroup, diskName string, diskCreationParams armcompute.Disk) (disk *armcompute.Disk, err error) {
defer instrument.AZAPIMetricRecorderFn(diskCreateServiceLabel, &err)()

createCtx, cancelFn := context.WithTimeout(ctx, defaultDiskOperationTimeout)
defer cancelFn()
poller, err := client.BeginCreateOrUpdate(createCtx, resourceGroup, diskName, diskCreationParams, nil)
if err != nil {
errors.LogAzAPIError(err, "Failed to trigger create of Disk [Name: %s, ResourceGroup: %s]", resourceGroup, diskName)
return
}
createResp, err := poller.PollUntilDone(createCtx, nil)
if err != nil {
errors.LogAzAPIError(err, "Polling failed while waiting for create of Disk: %s for ResourceGroup: %s", diskName, resourceGroup)
return
}
disk = &createResp.Disk
return
}
4 changes: 3 additions & 1 deletion pkg/azure/api/providerspec.go
Original file line number Diff line number Diff line change
Expand Up @@ -197,13 +197,15 @@ type AzureDataDisk struct {
Name string `json:"name,omitempty"`
// Lun specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and
// therefore must be unique for each data disk attached to a VM.
Lun *int32 `json:"lun,omitempty"`
Lun int32 `json:"lun,omitempty"`
// Caching specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite.
Caching string `json:"caching,omitempty"`
// StorageAccountType is the storage account type for a managed disk.
StorageAccountType string `json:"storageAccountType,omitempty"`
// DiskSizeGB is the size of an empty disk in gigabytes.
DiskSizeGB int32 `json:"diskSizeGB,omitempty"`
// ImageRef optionally specifies an image source
ImageRef *AzureImageReference `json:"imageRef,omitempty"`
}

// AzureManagedDiskParameters is the parameters of a managed disk.
Expand Down
15 changes: 6 additions & 9 deletions pkg/azure/api/validation/validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -211,17 +211,14 @@ func validateDataDisks(disks []api.AzureDataDisk, fldPath *field.Path) field.Err

luns := make(map[int32]int, len(disks))
for _, disk := range disks {
if disk.Lun == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("lun"), "must provide lun"))
// Lun should always start from 0 and it cannot be negative. The max value of lun will depend upon the VM type to which the disks are associated.
// Therefore, we will avoid any max limit check for lun and delegate that responsibility to the provider as that mapping could change over time.
if disk.Lun < 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), disk.Lun, "lun must be a positive number"))
} else {
// Lun should always start from 0 and it cannot be negative. The max value of lun will depend upon the VM type to which the disks are associated.
// Therefore, we will avoid any max limit check for lun and delegate that responsibility to the provider as that mapping could change over time.
if *disk.Lun < 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), *disk.Lun, "lun must be a positive number"))
} else {
luns[*disk.Lun]++
}
luns[disk.Lun]++
}

if disk.DiskSizeGB <= 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("diskSizeGB"), disk.DiskSizeGB, "DataDisk size must be positive and greater than 0"))
}
Expand Down
27 changes: 10 additions & 17 deletions pkg/azure/api/validation/validation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -277,30 +277,23 @@ func TestValidateDataDisks(t *testing.T) {
matcher gomegatypes.GomegaMatcher
}{
{"should forbid empty storageAccountType",
[]api.AzureDataDisk{{Name: "disk-1", Lun: pointer.Int32(0), StorageAccountType: "", DiskSizeGB: 10}}, 1,
[]api.AzureDataDisk{{Name: "disk-1", Lun: 0, StorageAccountType: "", DiskSizeGB: 10}}, 1,
ConsistOf(PointTo(MatchFields(IgnoreExtras, Fields{"Type": Equal(field.ErrorTypeRequired), "Field": Equal("providerSpec.properties.storageProfile.dataDisks.storageAccountType")}))),
},
{"should forbid negative diskSize and empty storageAccountType",
[]api.AzureDataDisk{{Name: "disk-1", Lun: pointer.Int32(0), StorageAccountType: "", DiskSizeGB: -10}}, 2,
[]api.AzureDataDisk{{Name: "disk-1", Lun: 0, StorageAccountType: "", DiskSizeGB: -10}}, 2,
ConsistOf(
PointTo(MatchFields(IgnoreExtras, Fields{"Type": Equal(field.ErrorTypeRequired), "Field": Equal("providerSpec.properties.storageProfile.dataDisks.storageAccountType")})),
PointTo(MatchFields(IgnoreExtras, Fields{"Type": Equal(field.ErrorTypeInvalid), "Field": Equal("providerSpec.properties.storageProfile.dataDisks.diskSizeGB")})),
),
},
{
"should forbid nil Lun",
[]api.AzureDataDisk{
{Name: "disk-1", Lun: nil, StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10},
}, 1,
ConsistOf(PointTo(MatchFields(IgnoreExtras, Fields{"Type": Equal(field.ErrorTypeRequired), "Field": Equal("providerSpec.properties.storageProfile.dataDisks.lun")}))),
},
{"should forbid duplicate Lun",
[]api.AzureDataDisk{
{Name: "disk-1", Lun: pointer.Int32(0), StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10},
{Name: "disk-2", Lun: pointer.Int32(1), StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10},
{Name: "disk-3", Lun: pointer.Int32(0), StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10},
{Name: "disk-4", Lun: pointer.Int32(2), StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10},
{Name: "disk-5", Lun: pointer.Int32(1), StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10},
{Name: "disk-1", Lun: 0, StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10},
{Name: "disk-2", Lun: 1, StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10},
{Name: "disk-3", Lun: 0, StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10},
{Name: "disk-4", Lun: 2, StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10},
{Name: "disk-5", Lun: 1, StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10},
}, 2,
ConsistOf(
PointTo(MatchFields(IgnoreExtras, Fields{"Type": Equal(field.ErrorTypeInvalid), "Field": Equal("providerSpec.properties.storageProfile.dataDisks.lun")})),
Expand All @@ -309,9 +302,9 @@ func TestValidateDataDisks(t *testing.T) {
},
{"should succeed with non-duplicate lun, valid diskSize and non-empty storageAccountType",
[]api.AzureDataDisk{
{Name: "disk-1", Lun: pointer.Int32(0), StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10},
{Name: "disk-2", Lun: pointer.Int32(1), StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 30},
{Name: "disk-3", Lun: pointer.Int32(2), StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 50},
{Name: "disk-1", Lun: 0, StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 10},
{Name: "disk-2", Lun: 1, StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 30},
{Name: "disk-3", Lun: 2, StorageAccountType: "StandardSSD_LRS", DiskSizeGB: 50},
}, 0, nil,
},
}
Expand Down
141 changes: 129 additions & 12 deletions pkg/azure/provider/helpers/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,12 @@ import (
"github.com/gardener/machine-controller-manager-provider-azure/pkg/azure/utils"
)

// DataDiskLun describes the dataDisk lun
type DataDiskLun int32

// DiskID describes the disk ID
type DiskID *string

// ExtractProviderSpecAndConnectConfig extracts api.AzureProviderSpec from mcc and access.ConnectConfig from secret.
func ExtractProviderSpecAndConnectConfig(mcc *v1alpha1.MachineClass, secret *corev1.Secret) (api.AzureProviderSpec, access.ConnectConfig, error) {
var (
Expand Down Expand Up @@ -126,7 +132,7 @@ func createDataDiskNames(providerSpec api.AzureProviderSpec, vmName string) []st
dataDisks := providerSpec.Properties.StorageProfile.DataDisks
diskNames := make([]string, 0, len(dataDisks))
for _, disk := range dataDisks {
diskName := utils.CreateDataDiskName(vmName, disk)
diskName := utils.CreateDataDiskName(vmName, disk.Name, disk.Lun)
diskNames = append(diskNames, diskName)
}
return diskNames
Expand Down Expand Up @@ -425,7 +431,7 @@ func createNICTags(tags map[string]string) map[string]*string {
// 1. Gets the VM image. If the image does not exist then it will return an error.
// 2. From the VM Image it checks if there is a plan.
// 3. If there is a plan then it will check if there is an existing agreement for this plan. If an agreement does not exist then it will return an error.
// 4. If the agreement has not been accepted yet then it will accept the agreement and update the agreement. If that fails then it will return an error.
// 4. If the agreement has not been accepted, yet then it will accept the agreement and update the agreement. If that fails then it will return an error.
func ProcessVMImageConfiguration(ctx context.Context, factory access.Factory, connectConfig access.ConnectConfig, providerSpec api.AzureProviderSpec, vmName string) (imgRef armcompute.ImageReference, plan *armcompute.Plan, err error) {
imgRef = getImageReference(providerSpec)

Expand Down Expand Up @@ -534,12 +540,12 @@ func checkAndAcceptAgreementIfNotAccepted(ctx context.Context, factory access.Fa
}

// CreateVM gathers the VM creation parameters and invokes a call to create or update the VM.
func CreateVM(ctx context.Context, factory access.Factory, connectConfig access.ConnectConfig, providerSpec api.AzureProviderSpec, imageRef armcompute.ImageReference, plan *armcompute.Plan, secret *corev1.Secret, nicID string, vmName string) (*armcompute.VirtualMachine, error) {
func CreateVM(ctx context.Context, factory access.Factory, connectConfig access.ConnectConfig, providerSpec api.AzureProviderSpec, vmImageRef armcompute.ImageReference, plan *armcompute.Plan, secret *corev1.Secret, nicID string, vmName string, imageRefDisks map[DataDiskLun]DiskID) (*armcompute.VirtualMachine, error) {
vmAccess, err := factory.GetVirtualMachinesAccess(connectConfig)
if err != nil {
return nil, status.WrapError(codes.Internal, fmt.Sprintf("Failed to create virtual machine access to process request: [resourceGroup: %s, vmName: %s], Err: %v", providerSpec.ResourceGroup, vmName, err), err)
}
vmCreationParams, err := createVMCreationParams(providerSpec, imageRef, plan, secret, nicID, vmName)
vmCreationParams, err := createVMCreationParams(providerSpec, vmImageRef, plan, secret, nicID, vmName, imageRefDisks)
if err != nil {
return nil, status.WrapError(codes.Internal, fmt.Sprintf("Failed to create virtual machine parameters to create VM: [ResourceGroup: %s, Name: %s], Err: %v", providerSpec.ResourceGroup, vmName, err), err)
}
Expand All @@ -552,6 +558,105 @@ func CreateVM(ctx context.Context, factory access.Factory, connectConfig access.
return vm, nil
}

// CreateDisksWithImageRef creates a disk with CreationData (e.g. ImageReference or GalleryImageReference)
func CreateDisksWithImageRef(ctx context.Context, factory access.Factory, connectConfig access.ConnectConfig, providerSpec api.AzureProviderSpec, vmName string) (map[DataDiskLun]DiskID, error) {
disksAccess, err := factory.GetDisksAccess(connectConfig)
if err != nil {
return nil, status.WrapError(codes.Internal, fmt.Sprintf("Failed to create disk access for VM: [ResourceGroup: %s], Err: %v", providerSpec.ResourceGroup, err), err)
}

disks := make(map[DataDiskLun]DiskID)
specDataDisks := providerSpec.Properties.StorageProfile.DataDisks
if utils.IsSliceNilOrEmpty(specDataDisks) {
return disks, nil
}

for _, specDataDisk := range specDataDisks {
if !isDataDiskWithImageRef(specDataDisk) {
continue
}
diskName := utils.CreateDataDiskName(vmName, specDataDisk.Name, specDataDisk.Lun)
diskCreationParams, err := createDiskCreationParams(ctx, specDataDisk, providerSpec, factory, connectConfig)
if err != nil {
errCode := accesserrors.GetMatchingErrorCode(err)
return nil, status.WrapError(errCode, fmt.Sprintf("Failed to create disk creation params: [ResourceGroup: %s, Name: %s], Err: %v", providerSpec.ResourceGroup, diskName, err), err)
}
disk, err := accesshelpers.CreateDisk(ctx, disksAccess, providerSpec.ResourceGroup, diskName, diskCreationParams)
if err != nil {
errCode := accesserrors.GetMatchingErrorCode(err)
return nil, status.WrapError(errCode, fmt.Sprintf("Failed to create Disk: [ResourceGroup: %s, Name: %s], Err: %v", providerSpec.ResourceGroup, diskName, err), err)
}
disks[DataDiskLun(specDataDisk.Lun)] = disk.ID
klog.Infof("Successfully created Disk: [ResourceGroup: %s, Name: %s]", providerSpec.ResourceGroup, diskName)
}

return disks, nil
}

func isDataDiskWithImageRef(dataDisk api.AzureDataDisk) bool {
if dataDisk.ImageRef != nil {
return true
}
return false
}

func createDiskCreationParams(ctx context.Context, specDataDisk api.AzureDataDisk, providerSpec api.AzureProviderSpec, factory access.Factory, connectConfig access.ConnectConfig) (params armcompute.Disk, err error) {
params = armcompute.Disk{
Location: to.Ptr(providerSpec.Location),
Properties: &armcompute.DiskProperties{
CreationData: &armcompute.CreationData{
CreateOption: to.Ptr(armcompute.DiskCreateOptionFromImage),
},
DiskSizeGB: to.Ptr[int32](specDataDisk.DiskSizeGB),
OSType: to.Ptr(armcompute.OperatingSystemTypesLinux),
},
SKU: &armcompute.DiskSKU{
Name: to.Ptr(armcompute.DiskStorageAccountTypes(specDataDisk.StorageAccountType)),
},
Tags: utils.CreateResourceTags(providerSpec.Tags),
Zones: getZonesFromProviderSpec(providerSpec),
}
err = setDiskImageReference(ctx, &params, specDataDisk, providerSpec.Location, factory, connectConfig)
return
}

func setDiskImageReference(ctx context.Context, disk *armcompute.Disk, specDataDisk api.AzureDataDisk, location string, factory access.Factory, connectConfig access.ConnectConfig) error {
if !utils.IsEmptyString(specDataDisk.ImageRef.ID) {
disk.Properties.CreationData.ImageReference = &armcompute.ImageDiskReference{
ID: &specDataDisk.ImageRef.ID,
}
} else if !utils.IsNilOrEmptyStringPtr(specDataDisk.ImageRef.SharedGalleryImageID) {
disk.Properties.CreationData.GalleryImageReference = &armcompute.ImageDiskReference{
SharedGalleryImageID: specDataDisk.ImageRef.SharedGalleryImageID,
}
} else if !utils.IsNilOrEmptyStringPtr(specDataDisk.ImageRef.CommunityGalleryImageID) {
disk.Properties.CreationData.GalleryImageReference = &armcompute.ImageDiskReference{
CommunityGalleryImageID: specDataDisk.ImageRef.CommunityGalleryImageID,
}
} else if !utils.IsNilOrEmptyStringPtr(specDataDisk.ImageRef.URN) {
urnParts := strings.Split(*specDataDisk.ImageRef.URN, ":")
imageRef := armcompute.ImageReference{
Publisher: to.Ptr(urnParts[0]),
Offer: to.Ptr(urnParts[1]),
SKU: to.Ptr(urnParts[2]),
Version: to.Ptr(urnParts[3]),
}

vmImagesAccess, err := factory.GetVirtualMachineImagesAccess(connectConfig)
if err != nil {
return status.WrapError(codes.Internal, fmt.Sprintf("Failed to create image access, Err: %v", err), err)
}
image, err := accesshelpers.GetVMImage(ctx, vmImagesAccess, location, imageRef)
if err != nil {
return status.WrapError(codes.Internal, fmt.Sprintf("Failed to get image, Err: %v", err), err)
}
disk.Properties.CreationData.ImageReference = &armcompute.ImageDiskReference{
ID: image.ID,
}
}
return nil
}

// LogVMCreation is a convenience method which helps to extract relevant details from the created virtual machine and logs it.
// Today the azure create VM call is atomic only w.r.t creation of VM, OSDisk, DataDisk(s). NIC still has to be created prior to creation of the VM.
// Therefore, this method produces a log which also prints the OSDisk, DataDisks that are created (which helps in traceability). For completeness it
Expand All @@ -578,12 +683,16 @@ func LogVMCreation(location, resourceGroup string, vm *armcompute.VirtualMachine
klog.Infof(msgBuilder.String())
}

func createVMCreationParams(providerSpec api.AzureProviderSpec, imageRef armcompute.ImageReference, plan *armcompute.Plan, secret *corev1.Secret, nicID, vmName string) (armcompute.VirtualMachine, error) {
func createVMCreationParams(providerSpec api.AzureProviderSpec, imageRef armcompute.ImageReference, plan *armcompute.Plan, secret *corev1.Secret, nicID, vmName string, imageRefDisks map[DataDiskLun]DiskID) (armcompute.VirtualMachine, error) {
vmTags := utils.CreateResourceTags(providerSpec.Tags)
sshConfiguration, err := getSSHConfiguration(providerSpec.Properties.OsProfile.LinuxConfiguration.SSH)
if err != nil {
return armcompute.VirtualMachine{}, err
}
dataDisks, err := getDataDisks(providerSpec.Properties.StorageProfile.DataDisks, vmName, imageRefDisks)
if err != nil {
return armcompute.VirtualMachine{}, status.WrapError(codes.Internal, fmt.Sprintf("Failed to create vm creation params, Err: %v", err), err)
}

vm := armcompute.VirtualMachine{
Location: to.Ptr(providerSpec.Location),
Expand Down Expand Up @@ -613,7 +722,7 @@ func createVMCreationParams(providerSpec api.AzureProviderSpec, imageRef armcomp
},
},
StorageProfile: &armcompute.StorageProfile{
DataDisks: getDataDisks(providerSpec.Properties.StorageProfile.DataDisks, vmName),
DataDisks: dataDisks,
ImageReference: &imageRef,
OSDisk: &armcompute.OSDisk{
CreateOption: to.Ptr(armcompute.DiskCreateOptionTypes(providerSpec.Properties.StorageProfile.OsDisk.CreateOption)),
Expand Down Expand Up @@ -663,21 +772,20 @@ func createVMCreationParams(providerSpec api.AzureProviderSpec, imageRef armcomp
return vm, nil
}

func getDataDisks(specDataDisks []api.AzureDataDisk, vmName string) []*armcompute.DataDisk {
func getDataDisks(specDataDisks []api.AzureDataDisk, vmName string, imageRefDisks map[DataDiskLun]DiskID) ([]*armcompute.DataDisk, error) {
var dataDisks []*armcompute.DataDisk
if utils.IsSliceNilOrEmpty(specDataDisks) {
return dataDisks
return dataDisks, nil
}
for _, specDataDisk := range specDataDisks {
dataDiskName := utils.CreateDataDiskName(vmName, specDataDisk)
dataDiskName := utils.CreateDataDiskName(vmName, specDataDisk.Name, specDataDisk.Lun)
caching := armcompute.CachingTypesNone
if !utils.IsEmptyString(specDataDisk.Caching) {
caching = armcompute.CachingTypes(specDataDisk.Caching)
}
dataDisk := &armcompute.DataDisk{

CreateOption: to.Ptr(armcompute.DiskCreateOptionTypesEmpty),
Lun: specDataDisk.Lun,
Lun: to.Ptr(specDataDisk.Lun),
Caching: to.Ptr(caching),
DeleteOption: to.Ptr(armcompute.DiskDeleteOptionTypesDelete),
DiskSizeGB: pointer.Int32(specDataDisk.DiskSizeGB),
Expand All @@ -686,9 +794,18 @@ func getDataDisks(specDataDisks []api.AzureDataDisk, vmName string) []*armcomput
},
Name: to.Ptr(dataDiskName),
}
if isDataDiskWithImageRef(specDataDisk) {
diskID := imageRefDisks[DataDiskLun(specDataDisk.Lun)]
if diskID == nil {
return nil, fmt.Errorf("could not find id of pre created disk %s with lun %d",
dataDiskName, specDataDisk.Lun)
}
dataDisk.CreateOption = to.Ptr(armcompute.DiskCreateOptionTypesAttach)
dataDisk.ManagedDisk.ID = diskID
}
dataDisks = append(dataDisks, dataDisk)
}
return dataDisks
return dataDisks, nil
}

func getVMIdentity(specVMIdentityID *string) *armcompute.VirtualMachineIdentity {
Expand Down
2 changes: 1 addition & 1 deletion pkg/azure/provider/helpers/resourcegraphprocessor.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ func getDataDiskNameSuffixes(providerSpec api.AzureProviderSpec) sets.Set[string
dataDisks := providerSpec.Properties.StorageProfile.DataDisks
if dataDisks != nil {
for _, dataDisk := range dataDisks {
dataDiskNameSuffixes.Insert(utils.GetDataDiskNameSuffix(dataDisk))
dataDiskNameSuffixes.Insert(utils.GetDataDiskNameSuffix(dataDisk.Name, dataDisk.Lun))
}
}
return dataDiskNameSuffixes
Expand Down
Loading