From bc3e5c4fc8d5149d148894dab498fb158f7042d6 Mon Sep 17 00:00:00 2001 From: Thomas Stringer Date: Thu, 7 Jun 2018 15:43:11 -0400 Subject: [PATCH 1/7] add support for upgrading a vmss in the cluster --- pkg/armhelpers/azureclient.go | 47 +++++------ pkg/armhelpers/compute.go | 23 ++++++ pkg/armhelpers/interfaces.go | 9 +++ .../kubernetesupgrade/upgradecluster.go | 58 ++++++++++++++ pkg/operations/kubernetesupgrade/upgrader.go | 79 +++++++++++++++++++ 5 files changed, 194 insertions(+), 22 deletions(-) diff --git a/pkg/armhelpers/azureclient.go b/pkg/armhelpers/azureclient.go index f32d9fc2b2..0a48e08c89 100644 --- a/pkg/armhelpers/azureclient.go +++ b/pkg/armhelpers/azureclient.go @@ -46,17 +46,18 @@ type AzureClient struct { environment azure.Environment subscriptionID string - authorizationClient authorization.RoleAssignmentsClient - deploymentsClient resources.DeploymentsClient - deploymentOperationsClient resources.DeploymentOperationsClient - resourcesClient resources.GroupClient - storageAccountsClient storage.AccountsClient - interfacesClient network.InterfacesClient - groupsClient resources.GroupsClient - providersClient resources.ProvidersClient - virtualMachinesClient compute.VirtualMachinesClient - virtualMachineScaleSetsClient compute.VirtualMachineScaleSetsClient - disksClient disk.DisksClient + authorizationClient authorization.RoleAssignmentsClient + deploymentsClient resources.DeploymentsClient + deploymentOperationsClient resources.DeploymentOperationsClient + resourcesClient resources.GroupClient + storageAccountsClient storage.AccountsClient + interfacesClient network.InterfacesClient + groupsClient resources.GroupsClient + providersClient resources.ProvidersClient + virtualMachinesClient compute.VirtualMachinesClient + virtualMachineScaleSetsClient compute.VirtualMachineScaleSetsClient + virtualMachineScaleSetVMsClient compute.VirtualMachineScaleSetVMsClient + disksClient disk.DisksClient applicationsClient graphrbac.ApplicationsClient servicePrincipalsClient graphrbac.ServicePrincipalsClient @@ -264,17 +265,18 @@ func getClient(env azure.Environment, subscriptionID, tenantID string, armSpt *a environment: env, subscriptionID: subscriptionID, - authorizationClient: authorization.NewRoleAssignmentsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), - deploymentsClient: resources.NewDeploymentsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), - deploymentOperationsClient: resources.NewDeploymentOperationsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), - resourcesClient: resources.NewGroupClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), - storageAccountsClient: storage.NewAccountsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), - interfacesClient: network.NewInterfacesClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), - groupsClient: resources.NewGroupsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), - providersClient: resources.NewProvidersClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), - virtualMachinesClient: compute.NewVirtualMachinesClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), - virtualMachineScaleSetsClient: compute.NewVirtualMachineScaleSetsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), - disksClient: disk.NewDisksClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), + authorizationClient: authorization.NewRoleAssignmentsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), + deploymentsClient: resources.NewDeploymentsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), + deploymentOperationsClient: resources.NewDeploymentOperationsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), + resourcesClient: resources.NewGroupClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), + storageAccountsClient: storage.NewAccountsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), + interfacesClient: network.NewInterfacesClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), + groupsClient: resources.NewGroupsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), + providersClient: resources.NewProvidersClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), + virtualMachinesClient: compute.NewVirtualMachinesClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), + virtualMachineScaleSetsClient: compute.NewVirtualMachineScaleSetsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), + virtualMachineScaleSetVMsClient: compute.NewVirtualMachineScaleSetVMsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), + disksClient: disk.NewDisksClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID), applicationsClient: graphrbac.NewApplicationsClientWithBaseURI(env.GraphEndpoint, tenantID), servicePrincipalsClient: graphrbac.NewServicePrincipalsClientWithBaseURI(env.GraphEndpoint, tenantID), @@ -291,6 +293,7 @@ func getClient(env azure.Environment, subscriptionID, tenantID string, armSpt *a c.providersClient.Authorizer = authorizer c.virtualMachinesClient.Authorizer = authorizer c.virtualMachineScaleSetsClient.Authorizer = authorizer + c.virtualMachineScaleSetVMsClient.Authorizer = authorizer c.disksClient.Authorizer = authorizer c.deploymentsClient.PollingDelay = time.Second * 5 diff --git a/pkg/armhelpers/compute.go b/pkg/armhelpers/compute.go index 3bc7a3479f..fde07ed472 100644 --- a/pkg/armhelpers/compute.go +++ b/pkg/armhelpers/compute.go @@ -23,3 +23,26 @@ func (az *AzureClient) DeleteVirtualMachine(resourceGroup, name string, cancel < func (az *AzureClient) ListVirtualMachineScaleSets(resourceGroup string) (compute.VirtualMachineScaleSetListResult, error) { return az.virtualMachineScaleSetsClient.List(resourceGroup) } + +// ListVirtualMachineScaleSetVMs returns the list of VMs per VMSS +func (az *AzureClient) ListVirtualMachineScaleSetVMs(resourceGroup, virtualMachineScaleSet string) (compute.VirtualMachineScaleSetVMListResult, error) { + return az.virtualMachineScaleSetVMsClient.List(resourceGroup, virtualMachineScaleSet, "", "", "") +} + +// DeleteVirtualMachineScaleSetVM deletes a VM in a VMSS +func (az *AzureClient) DeleteVirtualMachineScaleSetVM(resourceGroup, virtualMachineScaleSet, instanceID string, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) { + return az.virtualMachineScaleSetVMsClient.Delete(resourceGroup, virtualMachineScaleSet, instanceID, cancel) +} + +// SetVirtualMachineScaleSetCapacity sets the VMSS capacity +func (az *AzureClient) SetVirtualMachineScaleSetCapacity(resourceGroup, virtualMachineScaleSet string, sku compute.Sku, location string, cancel <-chan struct{}) (<-chan compute.VirtualMachineScaleSet, <-chan error) { + return az.virtualMachineScaleSetsClient.CreateOrUpdate( + resourceGroup, + virtualMachineScaleSet, + compute.VirtualMachineScaleSet{ + Location: &location, + Sku: &sku, + }, + cancel, + ) +} diff --git a/pkg/armhelpers/interfaces.go b/pkg/armhelpers/interfaces.go index 0f6e812914..55517ee2c8 100644 --- a/pkg/armhelpers/interfaces.go +++ b/pkg/armhelpers/interfaces.go @@ -44,6 +44,15 @@ type ACSEngineClient interface { // ListVirtualMachineScaleSets lists the vmss resources in the resource group ListVirtualMachineScaleSets(resourceGroup string) (compute.VirtualMachineScaleSetListResult, error) + // ListVirtualMachineScaleSetVMs lists the virtual machines contained in a vmss + ListVirtualMachineScaleSetVMs(resourceGroup, virtualMachineScaleSet string) (compute.VirtualMachineScaleSetVMListResult, error) + + // DeleteVirtualMachineScaleSetVM deletes a VM in a VMSS + DeleteVirtualMachineScaleSetVM(resourceGroup, virtualMachineScaleSet, instanceID string, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) + + // SetVirtualMachineScaleSetCapacity sets the VMSS capacity + SetVirtualMachineScaleSetCapacity(resourceGroup, virtualMachineScaleSet string, sku compute.Sku, location string, cancel <-chan struct{}) (<-chan compute.VirtualMachineScaleSet, <-chan error) + // // STORAGE diff --git a/pkg/operations/kubernetesupgrade/upgradecluster.go b/pkg/operations/kubernetesupgrade/upgradecluster.go index fdd3376f00..5df4f466ad 100644 --- a/pkg/operations/kubernetesupgrade/upgradecluster.go +++ b/pkg/operations/kubernetesupgrade/upgradecluster.go @@ -27,10 +27,20 @@ type ClusterTopology struct { AgentPoolsToUpgrade map[string]bool AgentPools map[string]*AgentPoolTopology + ScaleSets []ScaleSetToUpgrade + MasterVMs *[]compute.VirtualMachine UpgradedMasterVMs *[]compute.VirtualMachine } +// ScaleSetToUpgrade contains necessary data required to upgrade a VMSS +type ScaleSetToUpgrade struct { + Name string + Sku compute.Sku + Location string + VMsToUpgrade []string +} + // AgentPoolTopology contains agent VMs in a single pool type AgentPoolTopology struct { Identifier *string @@ -128,6 +138,54 @@ func (uc *UpgradeCluster) getClusterNodeStatus(subscriptionID uuid.UUID, resourc targetOrchestratorTypeVersion := fmt.Sprintf("%s:%s", uc.DataModel.Properties.OrchestratorProfile.OrchestratorType, uc.DataModel.Properties.OrchestratorProfile.OrchestratorVersion) + // Loop through all of the scale sets and see if the VMs in the scale + // set are at the current targetOrchestratorTypeVersion + // + // If they are not, then add them to be "ugpraded" + // + // Subsequently loop through the VMs to be upgrade and add scale up + // the VMSS by one and then remove the old node + // + // The unique identifier of a scale set vm is VmssName:InstanceId + vmScaleSets, err := uc.Client.ListVirtualMachineScaleSets(resourceGroup) + if err != nil { + return err + } + for _, vmScaleSet := range *vmScaleSets.Value { + vmScaleSetVMs, err := uc.Client.ListVirtualMachineScaleSetVMs(resourceGroup, *vmScaleSet.Name) + if err != nil { + return err + } + scaleSetToUpgrade := ScaleSetToUpgrade{ + Name: *vmScaleSet.Name, + Sku: *vmScaleSet.Sku, + Location: *vmScaleSet.Location, + } + for _, vm := range *vmScaleSetVMs.Value { + if vm.Tags == nil || (*vm.Tags)["orchestrator"] == nil { + uc.Logger.Infof("No tags found for scale set VM: %s skipping.\n", *vm.Name) + continue + } + + scaleSetVMOrchestratorTypeAndVersion := *(*vm.Tags)["orchestrator"] + if scaleSetVMOrchestratorTypeAndVersion != targetOrchestratorTypeVersion { + // This condition is a scale set VM that is an older version and should be handled + uc.Logger.Infof( + "VM %s in VMSS %s has a current tag of %s and a desired tag of %s. Upgrading this node.\n", + *vm.Name, + *vmScaleSet.Name, + scaleSetVMOrchestratorTypeAndVersion, + targetOrchestratorTypeVersion, + ) + scaleSetToUpgrade.VMsToUpgrade = append( + scaleSetToUpgrade.VMsToUpgrade, + *vm.InstanceID, + ) + } + } + uc.ScaleSets = append(uc.ScaleSets, scaleSetToUpgrade) + } + for _, vm := range *vmListResult.Value { if vm.Tags == nil || (*vm.Tags)["orchestrator"] == nil { uc.Logger.Infof("No tags found for VM: %s skipping.\n", *vm.Name) diff --git a/pkg/operations/kubernetesupgrade/upgrader.go b/pkg/operations/kubernetesupgrade/upgrader.go index 8cb4c0b0e9..9da6357334 100644 --- a/pkg/operations/kubernetesupgrade/upgrader.go +++ b/pkg/operations/kubernetesupgrade/upgrader.go @@ -56,6 +56,10 @@ func (ku *Upgrader) RunUpgrade() error { return err } + if err := ku.upgradeAgentScaleSets(); err != nil { + return err + } + return ku.upgradeAgentPools() } @@ -374,6 +378,81 @@ func (ku *Upgrader) upgradeAgentPools() error { return nil } +func (ku *Upgrader) upgradeAgentScaleSets() error { + for _, vmssToUpgrade := range ku.ClusterTopology.ScaleSets { + ku.logger.Infof("Upgrading VMSS %s", vmssToUpgrade.Name) + + if len(vmssToUpgrade.VMsToUpgrade) == 0 { + ku.logger.Infof("No VMs to upgrade for VMSS %s, skipping", vmssToUpgrade.Name) + continue + } + + newCapacity := *vmssToUpgrade.Sku.Capacity + 1 + ku.logger.Infof( + "VMSS %s current capacity is %d and new capacity will be %d while each node is swapped", + vmssToUpgrade.Name, + *vmssToUpgrade.Sku.Capacity, + newCapacity, + ) + + *vmssToUpgrade.Sku.Capacity = newCapacity + + for _, vmToUpgrade := range vmssToUpgrade.VMsToUpgrade { + success, failure := ku.Client.SetVirtualMachineScaleSetCapacity( + ku.ClusterTopology.ResourceGroup, + vmssToUpgrade.Name, + vmssToUpgrade.Sku, + vmssToUpgrade.Location, + make(chan struct{}), + ) + + select { + case <-success: + ku.logger.Infof("Successfully set capacity for VMSS %s", vmssToUpgrade.Name) + case err := <-failure: + ku.logger.Errorf("Failure to set capacity for VMSS %s", vmssToUpgrade.Name) + return err + } + + ku.logger.Infof( + "Deleting VM %s in VMSS %s", + vmToUpgrade, + vmssToUpgrade.Name, + ) + + // At this point we have our buffer node that will replace the node to delete + // so we can just remove this current node then + res, failure := ku.Client.DeleteVirtualMachineScaleSetVM( + ku.ClusterTopology.ResourceGroup, + vmssToUpgrade.Name, + vmToUpgrade, + make(chan struct{}), + ) + + select { + case <-res: + ku.logger.Infof( + "Successfully deleted VM %s in VMSS %s", + vmToUpgrade, + vmssToUpgrade.Name, + ) + case err := <-failure: + ku.logger.Errorf( + "Failed to delete VM %s in VMSS %s", + vmToUpgrade, + vmssToUpgrade, + ) + return err + } + } + ku.logger.Infof("Completed upgrading VMSS %s", vmssToUpgrade) + } + + ku.logger.Infoln("Completed upgrading all VMSS") + + return nil +} + func (ku *Upgrader) generateUpgradeTemplate(upgradeContainerService *api.ContainerService, acsengineVersion string) (map[string]interface{}, map[string]interface{}, error) { var err error ctx := acsengine.Context{ From ffb56bf78af769d0035bf5f1cbf465c2047b219a Mon Sep 17 00:00:00 2001 From: Thomas Stringer Date: Fri, 8 Jun 2018 15:42:35 -0400 Subject: [PATCH 2/7] add node drain prior to delete for vmss upgrade --- .../kubernetesupgrade/upgradecluster.go | 23 ++++++++---- pkg/operations/kubernetesupgrade/upgrader.go | 37 +++++++++++++++++-- 2 files changed, 50 insertions(+), 10 deletions(-) diff --git a/pkg/operations/kubernetesupgrade/upgradecluster.go b/pkg/operations/kubernetesupgrade/upgradecluster.go index 5df4f466ad..3b18723237 100644 --- a/pkg/operations/kubernetesupgrade/upgradecluster.go +++ b/pkg/operations/kubernetesupgrade/upgradecluster.go @@ -27,18 +27,24 @@ type ClusterTopology struct { AgentPoolsToUpgrade map[string]bool AgentPools map[string]*AgentPoolTopology - ScaleSets []ScaleSetToUpgrade + AgentPoolScaleSetsToUpgrade []AgentPoolScaleSet MasterVMs *[]compute.VirtualMachine UpgradedMasterVMs *[]compute.VirtualMachine } -// ScaleSetToUpgrade contains necessary data required to upgrade a VMSS -type ScaleSetToUpgrade struct { +// AgentPoolScaleSet contains necessary data required to upgrade a VMSS +type AgentPoolScaleSet struct { Name string Sku compute.Sku Location string - VMsToUpgrade []string + VMsToUpgrade []AgentPoolScaleSetVM +} + +// AgentPoolScaleSetVM represents a VM in a VMSS +type AgentPoolScaleSetVM struct { + Name string + InstanceID string } // AgentPoolTopology contains agent VMs in a single pool @@ -156,7 +162,7 @@ func (uc *UpgradeCluster) getClusterNodeStatus(subscriptionID uuid.UUID, resourc if err != nil { return err } - scaleSetToUpgrade := ScaleSetToUpgrade{ + scaleSetToUpgrade := AgentPoolScaleSet{ Name: *vmScaleSet.Name, Sku: *vmScaleSet.Sku, Location: *vmScaleSet.Location, @@ -179,11 +185,14 @@ func (uc *UpgradeCluster) getClusterNodeStatus(subscriptionID uuid.UUID, resourc ) scaleSetToUpgrade.VMsToUpgrade = append( scaleSetToUpgrade.VMsToUpgrade, - *vm.InstanceID, + AgentPoolScaleSetVM{ + Name: *vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName, + InstanceID: *vm.InstanceID, + }, ) } } - uc.ScaleSets = append(uc.ScaleSets, scaleSetToUpgrade) + uc.AgentPoolScaleSetsToUpgrade = append(uc.AgentPoolScaleSetsToUpgrade, scaleSetToUpgrade) } for _, vm := range *vmListResult.Value { diff --git a/pkg/operations/kubernetesupgrade/upgrader.go b/pkg/operations/kubernetesupgrade/upgrader.go index 9da6357334..bf3aebb8f5 100644 --- a/pkg/operations/kubernetesupgrade/upgrader.go +++ b/pkg/operations/kubernetesupgrade/upgrader.go @@ -10,6 +10,7 @@ import ( "github.com/Azure/acs-engine/pkg/armhelpers" "github.com/Azure/acs-engine/pkg/armhelpers/utils" "github.com/Azure/acs-engine/pkg/i18n" + "github.com/Azure/acs-engine/pkg/operations" "github.com/sirupsen/logrus" "k8s.io/api/core/v1" ) @@ -379,7 +380,7 @@ func (ku *Upgrader) upgradeAgentPools() error { } func (ku *Upgrader) upgradeAgentScaleSets() error { - for _, vmssToUpgrade := range ku.ClusterTopology.ScaleSets { + for _, vmssToUpgrade := range ku.ClusterTopology.AgentPoolScaleSetsToUpgrade { ku.logger.Infof("Upgrading VMSS %s", vmssToUpgrade.Name) if len(vmssToUpgrade.VMsToUpgrade) == 0 { @@ -416,16 +417,46 @@ func (ku *Upgrader) upgradeAgentScaleSets() error { ku.logger.Infof( "Deleting VM %s in VMSS %s", - vmToUpgrade, + vmToUpgrade.Name, vmssToUpgrade.Name, ) + // Before we can delete the node we should safely and responsibly drain it + var kubeAPIServerURL string + getClientTimeout := 10 * time.Second + + if ku.DataModel.Properties.HostedMasterProfile != nil { + kubeAPIServerURL = ku.DataModel.Properties.HostedMasterProfile.FQDN + } else { + kubeAPIServerURL = ku.DataModel.Properties.MasterProfile.FQDN + } + client, err := ku.Client.GetKubernetesClient( + kubeAPIServerURL, + ku.kubeConfig, + interval, + getClientTimeout, + ) + if err != nil { + ku.logger.Errorf("Error getting Kubernetes client: %v", err) + return err + } + err = operations.SafelyDrainNodeWithClient( + client, + ku.logger, + vmToUpgrade.Name, + time.Minute, + ) + if err != nil { + ku.logger.Errorf("Error draining VM in VMSS: %v", err) + return err + } + // At this point we have our buffer node that will replace the node to delete // so we can just remove this current node then res, failure := ku.Client.DeleteVirtualMachineScaleSetVM( ku.ClusterTopology.ResourceGroup, vmssToUpgrade.Name, - vmToUpgrade, + vmToUpgrade.InstanceID, make(chan struct{}), ) From 440bfbdbd56f099a69f2910eb613b55e740b002b Mon Sep 17 00:00:00 2001 From: Thomas Stringer Date: Sun, 10 Jun 2018 10:29:27 -0400 Subject: [PATCH 3/7] add logging for node drain --- pkg/operations/kubernetesupgrade/upgrader.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/pkg/operations/kubernetesupgrade/upgrader.go b/pkg/operations/kubernetesupgrade/upgrader.go index bf3aebb8f5..f5b00f3256 100644 --- a/pkg/operations/kubernetesupgrade/upgrader.go +++ b/pkg/operations/kubernetesupgrade/upgrader.go @@ -415,12 +415,6 @@ func (ku *Upgrader) upgradeAgentScaleSets() error { return err } - ku.logger.Infof( - "Deleting VM %s in VMSS %s", - vmToUpgrade.Name, - vmssToUpgrade.Name, - ) - // Before we can delete the node we should safely and responsibly drain it var kubeAPIServerURL string getClientTimeout := 10 * time.Second @@ -440,6 +434,8 @@ func (ku *Upgrader) upgradeAgentScaleSets() error { ku.logger.Errorf("Error getting Kubernetes client: %v", err) return err } + + ku.logger.Infof("Draining node %s", vmToUpgrade.Name) err = operations.SafelyDrainNodeWithClient( client, ku.logger, @@ -451,6 +447,12 @@ func (ku *Upgrader) upgradeAgentScaleSets() error { return err } + ku.logger.Infof( + "Deleting VM %s in VMSS %s", + vmToUpgrade.Name, + vmssToUpgrade.Name, + ) + // At this point we have our buffer node that will replace the node to delete // so we can just remove this current node then res, failure := ku.Client.DeleteVirtualMachineScaleSetVM( From 93b98ef2676eb3593cf5c2a81f4d895e0cb680dd Mon Sep 17 00:00:00 2001 From: Thomas Stringer Date: Wed, 13 Jun 2018 16:52:08 -0400 Subject: [PATCH 4/7] change logging to reference vm name --- pkg/operations/kubernetesupgrade/upgrader.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/operations/kubernetesupgrade/upgrader.go b/pkg/operations/kubernetesupgrade/upgrader.go index f5b00f3256..0b9e620a0e 100644 --- a/pkg/operations/kubernetesupgrade/upgrader.go +++ b/pkg/operations/kubernetesupgrade/upgrader.go @@ -466,13 +466,13 @@ func (ku *Upgrader) upgradeAgentScaleSets() error { case <-res: ku.logger.Infof( "Successfully deleted VM %s in VMSS %s", - vmToUpgrade, + vmToUpgrade.Name, vmssToUpgrade.Name, ) case err := <-failure: ku.logger.Errorf( "Failed to delete VM %s in VMSS %s", - vmToUpgrade, + vmToUpgrade.Name, vmssToUpgrade, ) return err From 8738c949f6eb33807556eba9448f25e6d8a208f5 Mon Sep 17 00:00:00 2001 From: Thomas Stringer Date: Wed, 13 Jun 2018 16:57:29 -0400 Subject: [PATCH 5/7] remove superfluous comment --- pkg/operations/kubernetesupgrade/upgradecluster.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/pkg/operations/kubernetesupgrade/upgradecluster.go b/pkg/operations/kubernetesupgrade/upgradecluster.go index 3b18723237..ae5b84e83b 100644 --- a/pkg/operations/kubernetesupgrade/upgradecluster.go +++ b/pkg/operations/kubernetesupgrade/upgradecluster.go @@ -144,15 +144,6 @@ func (uc *UpgradeCluster) getClusterNodeStatus(subscriptionID uuid.UUID, resourc targetOrchestratorTypeVersion := fmt.Sprintf("%s:%s", uc.DataModel.Properties.OrchestratorProfile.OrchestratorType, uc.DataModel.Properties.OrchestratorProfile.OrchestratorVersion) - // Loop through all of the scale sets and see if the VMs in the scale - // set are at the current targetOrchestratorTypeVersion - // - // If they are not, then add them to be "ugpraded" - // - // Subsequently loop through the VMs to be upgrade and add scale up - // the VMSS by one and then remove the old node - // - // The unique identifier of a scale set vm is VmssName:InstanceId vmScaleSets, err := uc.Client.ListVirtualMachineScaleSets(resourceGroup) if err != nil { return err From 6cc2e4aa3ac2fbc3b8fa8a2ff345568108f74b85 Mon Sep 17 00:00:00 2001 From: Thomas Stringer Date: Wed, 13 Jun 2018 18:26:22 -0400 Subject: [PATCH 6/7] add mock methods for scale set functionality --- pkg/armhelpers/mockclients.go | 106 +++++++++++++++++++++++++++++----- 1 file changed, 91 insertions(+), 15 deletions(-) diff --git a/pkg/armhelpers/mockclients.go b/pkg/armhelpers/mockclients.go index 20cb200706..b4820a9f23 100644 --- a/pkg/armhelpers/mockclients.go +++ b/pkg/armhelpers/mockclients.go @@ -20,21 +20,24 @@ import ( //MockACSEngineClient is an implementation of ACSEngineClient where all requests error out type MockACSEngineClient struct { - FailDeployTemplate bool - FailDeployTemplateQuota bool - FailDeployTemplateConflict bool - FailEnsureResourceGroup bool - FailListVirtualMachines bool - FailListVirtualMachineScaleSets bool - FailGetVirtualMachine bool - FailDeleteVirtualMachine bool - FailGetStorageClient bool - FailDeleteNetworkInterface bool - FailGetKubernetesClient bool - FailListProviders bool - ShouldSupportVMIdentity bool - FailDeleteRoleAssignment bool - MockKubernetesClient *MockKubernetesClient + FailDeployTemplate bool + FailDeployTemplateQuota bool + FailDeployTemplateConflict bool + FailEnsureResourceGroup bool + FailListVirtualMachines bool + FailListVirtualMachineScaleSets bool + FailGetVirtualMachine bool + FailDeleteVirtualMachine bool + FailDeleteVirtualMachineScaleSetVM bool + FailSetVirtualMachineScaleSetCapacity bool + FailListVirtualMachineScaleSetVMs bool + FailGetStorageClient bool + FailDeleteNetworkInterface bool + FailGetKubernetesClient bool + FailListProviders bool + ShouldSupportVMIdentity bool + FailDeleteRoleAssignment bool + MockKubernetesClient *MockKubernetesClient } //MockStorageClient mock implementation of StorageClient @@ -344,6 +347,79 @@ func (mc *MockACSEngineClient) DeleteVirtualMachine(resourceGroup, name string, return respChan, errChan } +//DeleteVirtualMachineScaleSetVM mock +func (mc *MockACSEngineClient) DeleteVirtualMachineScaleSetVM(resourceGroup, virtualMachineScaleSet, instanceID string, cancel <-chan struct{}) (<-chan compute.OperationStatusResponse, <-chan error) { + if mc.FailDeleteVirtualMachineScaleSetVM { + errChan := make(chan error) + respChan := make(chan compute.OperationStatusResponse) + go func() { + defer func() { + close(errChan) + }() + defer func() { + close(respChan) + }() + errChan <- fmt.Errorf("DeleteVirtualMachineScaleSetVM failed") + }() + return respChan, errChan + } + + errChan := make(chan error) + respChan := make(chan compute.OperationStatusResponse) + go func() { + defer func() { + close(errChan) + }() + defer func() { + close(respChan) + }() + errChan <- nil + respChan <- compute.OperationStatusResponse{} + }() + return respChan, errChan +} + +//SetVirtualMachineScaleSetCapacity mock +func (mc *MockACSEngineClient) SetVirtualMachineScaleSetCapacity(resourceGroup, virtualMachineScaleSet string, sku compute.Sku, location string, cancel <-chan struct{}) (<-chan compute.VirtualMachineScaleSet, <-chan error) { + if mc.FailSetVirtualMachineScaleSetCapacity { + errChan := make(chan error) + respChan := make(chan compute.VirtualMachineScaleSet) + go func() { + defer func() { + close(errChan) + }() + defer func() { + close(respChan) + }() + errChan <- fmt.Errorf("SetVirtualMachineScaleSetCapacity failed") + }() + return respChan, errChan + } + + errChan := make(chan error) + respChan := make(chan compute.VirtualMachineScaleSet) + go func() { + defer func() { + close(errChan) + }() + defer func() { + close(respChan) + }() + errChan <- nil + respChan <- compute.VirtualMachineScaleSet{} + }() + return respChan, errChan +} + +//ListVirtualMachineScaleSetVMs mock +func (mc *MockACSEngineClient) ListVirtualMachineScaleSetVMs(resourceGroup, virtualMachineScaleSet string) (compute.VirtualMachineScaleSetVMListResult, error) { + if mc.FailDeleteVirtualMachineScaleSetVM { + return compute.VirtualMachineScaleSetVMListResult{}, fmt.Errorf("DeleteVirtualMachineScaleSetVM failed") + } + + return compute.VirtualMachineScaleSetVMListResult{}, nil +} + //GetStorageClient mock func (mc *MockACSEngineClient) GetStorageClient(resourceGroup, accountName string) (ACSStorageClient, error) { if mc.FailGetStorageClient { From 686f93357766be7275b6f0796fa512ccb13d87b3 Mon Sep 17 00:00:00 2001 From: Thomas Stringer Date: Wed, 13 Jun 2018 18:49:41 -0400 Subject: [PATCH 7/7] add nil check for a cluster with no virtual machine scale sets --- .../kubernetesupgrade/upgradecluster.go | 66 ++++++++++--------- 1 file changed, 34 insertions(+), 32 deletions(-) diff --git a/pkg/operations/kubernetesupgrade/upgradecluster.go b/pkg/operations/kubernetesupgrade/upgradecluster.go index ae5b84e83b..fc85767268 100644 --- a/pkg/operations/kubernetesupgrade/upgradecluster.go +++ b/pkg/operations/kubernetesupgrade/upgradecluster.go @@ -148,42 +148,44 @@ func (uc *UpgradeCluster) getClusterNodeStatus(subscriptionID uuid.UUID, resourc if err != nil { return err } - for _, vmScaleSet := range *vmScaleSets.Value { - vmScaleSetVMs, err := uc.Client.ListVirtualMachineScaleSetVMs(resourceGroup, *vmScaleSet.Name) - if err != nil { - return err - } - scaleSetToUpgrade := AgentPoolScaleSet{ - Name: *vmScaleSet.Name, - Sku: *vmScaleSet.Sku, - Location: *vmScaleSet.Location, - } - for _, vm := range *vmScaleSetVMs.Value { - if vm.Tags == nil || (*vm.Tags)["orchestrator"] == nil { - uc.Logger.Infof("No tags found for scale set VM: %s skipping.\n", *vm.Name) - continue + if vmScaleSets.Value != nil { + for _, vmScaleSet := range *vmScaleSets.Value { + vmScaleSetVMs, err := uc.Client.ListVirtualMachineScaleSetVMs(resourceGroup, *vmScaleSet.Name) + if err != nil { + return err } + scaleSetToUpgrade := AgentPoolScaleSet{ + Name: *vmScaleSet.Name, + Sku: *vmScaleSet.Sku, + Location: *vmScaleSet.Location, + } + for _, vm := range *vmScaleSetVMs.Value { + if vm.Tags == nil || (*vm.Tags)["orchestrator"] == nil { + uc.Logger.Infof("No tags found for scale set VM: %s skipping.\n", *vm.Name) + continue + } - scaleSetVMOrchestratorTypeAndVersion := *(*vm.Tags)["orchestrator"] - if scaleSetVMOrchestratorTypeAndVersion != targetOrchestratorTypeVersion { - // This condition is a scale set VM that is an older version and should be handled - uc.Logger.Infof( - "VM %s in VMSS %s has a current tag of %s and a desired tag of %s. Upgrading this node.\n", - *vm.Name, - *vmScaleSet.Name, - scaleSetVMOrchestratorTypeAndVersion, - targetOrchestratorTypeVersion, - ) - scaleSetToUpgrade.VMsToUpgrade = append( - scaleSetToUpgrade.VMsToUpgrade, - AgentPoolScaleSetVM{ - Name: *vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName, - InstanceID: *vm.InstanceID, - }, - ) + scaleSetVMOrchestratorTypeAndVersion := *(*vm.Tags)["orchestrator"] + if scaleSetVMOrchestratorTypeAndVersion != targetOrchestratorTypeVersion { + // This condition is a scale set VM that is an older version and should be handled + uc.Logger.Infof( + "VM %s in VMSS %s has a current tag of %s and a desired tag of %s. Upgrading this node.\n", + *vm.Name, + *vmScaleSet.Name, + scaleSetVMOrchestratorTypeAndVersion, + targetOrchestratorTypeVersion, + ) + scaleSetToUpgrade.VMsToUpgrade = append( + scaleSetToUpgrade.VMsToUpgrade, + AgentPoolScaleSetVM{ + Name: *vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName, + InstanceID: *vm.InstanceID, + }, + ) + } } + uc.AgentPoolScaleSetsToUpgrade = append(uc.AgentPoolScaleSetsToUpgrade, scaleSetToUpgrade) } - uc.AgentPoolScaleSetsToUpgrade = append(uc.AgentPoolScaleSetsToUpgrade, scaleSetToUpgrade) } for _, vm := range *vmListResult.Value {