diff --git a/api/src/main/java/com/cloud/event/EventTypes.java b/api/src/main/java/com/cloud/event/EventTypes.java index 81ed185dae5a..1a814658a036 100644 --- a/api/src/main/java/com/cloud/event/EventTypes.java +++ b/api/src/main/java/com/cloud/event/EventTypes.java @@ -289,6 +289,8 @@ public class EventTypes { //registering userdata events public static final String EVENT_REGISTER_USER_DATA = "REGISTER.USER.DATA"; + public static final String EVENT_REGISTER_CNI_CONFIG = "REGISTER.CNI.CONFIG"; + public static final String EVENT_DELETE_CNI_CONFIG = "DELETE.CNI.CONFIG"; //register for user API and secret keys public static final String EVENT_REGISTER_FOR_SECRET_API_KEY = "REGISTER.USER.KEY"; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java b/api/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java similarity index 82% rename from plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java rename to api/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java index 591da077aec6..c0eb78129883 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java +++ b/api/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java @@ -44,6 +44,8 @@ enum Event { AutoscaleRequested, ScaleUpRequested, ScaleDownRequested, + AddNodeRequested, + RemoveNodeRequested, UpgradeRequested, OperationSucceeded, OperationFailed, @@ -59,6 +61,8 @@ enum State { Stopped("All resources for the Kubernetes cluster are destroyed, Kubernetes cluster may still have ephemeral resource like persistent volumes provisioned"), Scaling("Transient state in which resources are either getting scaled up/down"), Upgrading("Transient state in which cluster is getting upgraded"), + Importing("Transient state in which additional nodes are added as worker nodes to a cluster"), + RemovingNodes("Transient state in which additional nodes are removed from a cluster"), Alert("State to represent Kubernetes clusters which are not in expected desired state (operationally in active control place, stopped cluster VM's etc)."), Recovering("State in which Kubernetes cluster is recovering from alert state"), Destroyed("End state of Kubernetes cluster in which all resources are destroyed, cluster will not be usable further"), @@ -96,6 +100,17 @@ enum State { s_fsm.addTransition(State.Upgrading, Event.OperationSucceeded, State.Running); s_fsm.addTransition(State.Upgrading, Event.OperationFailed, State.Alert); + s_fsm.addTransition(State.Running, Event.AddNodeRequested, State.Importing); + s_fsm.addTransition(State.Alert, Event.AddNodeRequested, State.Importing); + s_fsm.addTransition(State.Importing, Event.OperationSucceeded, State.Running); + s_fsm.addTransition(State.Importing, Event.OperationFailed, State.Running); + s_fsm.addTransition(State.Alert, Event.OperationSucceeded, State.Running); + + s_fsm.addTransition(State.Running, Event.RemoveNodeRequested, State.RemovingNodes); + s_fsm.addTransition(State.Alert, Event.RemoveNodeRequested, State.RemovingNodes); + s_fsm.addTransition(State.RemovingNodes, Event.OperationSucceeded, State.Running); + s_fsm.addTransition(State.RemovingNodes, Event.OperationFailed, State.Running); + s_fsm.addTransition(State.Alert, Event.RecoveryRequested, State.Recovering); s_fsm.addTransition(State.Recovering, Event.OperationSucceeded, State.Running); s_fsm.addTransition(State.Recovering, Event.OperationFailed, State.Alert); @@ -142,4 +157,13 @@ enum State { Long getMaxSize(); Long getSecurityGroupId(); ClusterType getClusterType(); + Long getControlServiceOfferingId(); + Long getWorkerServiceOfferingId(); + Long getEtcdServiceOfferingId(); + Long getControlTemplateId(); + Long getWorkerTemplateId(); + Long getEtcdTemplateId(); + Long getEtcdNodeCount(); + Long getCniConfigId(); + String getCniConfigDetails(); } diff --git a/api/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelper.java b/api/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelper.java index a13c1b3a6a89..4d6dec1f08b8 100644 --- a/api/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelper.java +++ b/api/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelper.java @@ -18,12 +18,21 @@ import org.apache.cloudstack.acl.ControlledEntity; +import java.util.Map; + import com.cloud.uservm.UserVm; import com.cloud.utils.component.Adapter; public interface KubernetesServiceHelper extends Adapter { + enum KubernetesClusterNodeType { + CONTROL, WORKER, ETCD, DEFAULT + } + ControlledEntity findByUuid(String uuid); ControlledEntity findByVmId(long vmId); void checkVmCanBeDestroyed(UserVm userVm); + boolean isValidNodeType(String nodeType); + Map getServiceOfferingNodeTypeMap(Map> serviceOfferingNodeTypeMap); + Map getTemplateNodeTypeMap(Map> templateNodeTypeMap); } diff --git a/api/src/main/java/com/cloud/network/NetworkService.java b/api/src/main/java/com/cloud/network/NetworkService.java index b8dd464b3655..36d58c737ccc 100644 --- a/api/src/main/java/com/cloud/network/NetworkService.java +++ b/api/src/main/java/com/cloud/network/NetworkService.java @@ -268,4 +268,6 @@ Network createPrivateNetwork(String networkName, String displayText, long physic InternalLoadBalancerElementService getInternalLoadBalancerElementByNetworkServiceProviderId(long networkProviderId); InternalLoadBalancerElementService getInternalLoadBalancerElementById(long providerId); List getInternalLoadBalancerElements(); + + boolean handleCksIsoOnNetworkVirtualRouter(Long virtualRouterId, boolean mount) throws ResourceUnavailableException; } diff --git a/api/src/main/java/com/cloud/server/ManagementService.java b/api/src/main/java/com/cloud/server/ManagementService.java index 18f3e901cd93..3b8fb2c5e4b9 100644 --- a/api/src/main/java/com/cloud/server/ManagementService.java +++ b/api/src/main/java/com/cloud/server/ManagementService.java @@ -59,8 +59,10 @@ import org.apache.cloudstack.api.command.user.ssh.DeleteSSHKeyPairCmd; import org.apache.cloudstack.api.command.user.ssh.ListSSHKeyPairsCmd; import org.apache.cloudstack.api.command.user.ssh.RegisterSSHKeyPairCmd; +import org.apache.cloudstack.api.command.user.userdata.DeleteCniConfigurationCmd; import org.apache.cloudstack.api.command.user.userdata.DeleteUserDataCmd; import org.apache.cloudstack.api.command.user.userdata.ListUserDataCmd; +import org.apache.cloudstack.api.command.user.userdata.RegisterCniConfigurationCmd; import org.apache.cloudstack.api.command.user.userdata.RegisterUserDataCmd; import org.apache.cloudstack.api.command.user.vm.GetVMPasswordCmd; import org.apache.cloudstack.api.command.user.vmgroup.UpdateVMGroupCmd; @@ -360,17 +362,23 @@ public interface ManagementService { * The api command class. * @return The list of userdatas found. */ - Pair, Integer> listUserDatas(ListUserDataCmd cmd); + Pair, Integer> listUserDatas(ListUserDataCmd cmd, boolean forCks); + + /** + * Registers a cni configuration. + * + * @param cmd The api command class. + * @return A VO with the registered userdata. + */ + UserData registerCniConfigration(RegisterCniConfigurationCmd cmd); /** * Registers a userdata. * - * @param cmd - * The api command class. + * @param cmd The api command class. * @return A VO with the registered userdata. */ UserData registerUserData(RegisterUserDataCmd cmd); - /** * Deletes a userdata. * @@ -380,6 +388,14 @@ public interface ManagementService { */ boolean deleteUserData(DeleteUserDataCmd cmd); + /** + * Deletes a userdata. + * + * @param cmd + * The api command class. + * @return True on success. False otherwise. + */ + boolean deleteCniConfiguration(DeleteCniConfigurationCmd cmd); /** * Search registered key pairs for the logged in user. * diff --git a/api/src/main/java/com/cloud/template/TemplateApiService.java b/api/src/main/java/com/cloud/template/TemplateApiService.java index 5b494c308c3c..6138f24c92b0 100644 --- a/api/src/main/java/com/cloud/template/TemplateApiService.java +++ b/api/src/main/java/com/cloud/template/TemplateApiService.java @@ -58,10 +58,23 @@ public interface TemplateApiService { VirtualMachineTemplate prepareTemplate(long templateId, long zoneId, Long storageId); + /** + * Detach ISO from VM + * @param vmId id of the VM + * @param isoId id of the ISO (when passed). If it is not passed, it will get it from user_vm table + * @param extraParams forced, isVirtualRouter + * @return true when operation succeeds, false if not + */ + boolean detachIso(long vmId, Long isoId, Boolean... extraParams); - boolean detachIso(long vmId, boolean forced); - - boolean attachIso(long isoId, long vmId, boolean forced); + /** + * Attach ISO to a VM + * @param isoId id of the ISO to attach + * @param vmId id of the VM to attach the ISO to + * @param extraParams: forced, isVirtualRouter + * @return true when operation succeeds, false if not + */ + boolean attachIso(long isoId, long vmId, Boolean... extraParams); /** * Deletes a template diff --git a/api/src/main/java/com/cloud/template/VirtualMachineTemplate.java b/api/src/main/java/com/cloud/template/VirtualMachineTemplate.java index d8872d5fe724..89953d225a06 100644 --- a/api/src/main/java/com/cloud/template/VirtualMachineTemplate.java +++ b/api/src/main/java/com/cloud/template/VirtualMachineTemplate.java @@ -145,6 +145,8 @@ public enum TemplateFilter { boolean isDeployAsIs(); + boolean isForCks(); + Long getUserDataId(); UserData.UserDataOverridePolicy getUserDataOverridePolicy(); diff --git a/api/src/main/java/com/cloud/user/UserData.java b/api/src/main/java/com/cloud/user/UserData.java index fa0c50473c0d..13a3c74f3679 100644 --- a/api/src/main/java/com/cloud/user/UserData.java +++ b/api/src/main/java/com/cloud/user/UserData.java @@ -29,4 +29,5 @@ public enum UserDataOverridePolicy { String getUserData(); String getParams(); + boolean isForCks(); } diff --git a/api/src/main/java/com/cloud/vm/UserVmService.java b/api/src/main/java/com/cloud/vm/UserVmService.java index 72b18b70e186..5c1c2f9a2e5c 100644 --- a/api/src/main/java/com/cloud/vm/UserVmService.java +++ b/api/src/main/java/com/cloud/vm/UserVmService.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.Map; +import com.cloud.deploy.DeploymentPlan; import org.apache.cloudstack.api.BaseCmd.HTTPMethod; import org.apache.cloudstack.api.command.admin.vm.AssignVMCmd; import org.apache.cloudstack.api.command.admin.vm.RecoverVMCmd; @@ -111,7 +112,7 @@ UserVm startVirtualMachine(StartVMCmd cmd) throws StorageUnavailableException, E UserVm rebootVirtualMachine(RebootVMCmd cmd) throws InsufficientCapacityException, ResourceUnavailableException, ResourceAllocationException; - void startVirtualMachine(UserVm vm) throws OperationTimedoutException, ResourceUnavailableException, InsufficientCapacityException; + void startVirtualMachine(UserVm vm, DeploymentPlan plan) throws OperationTimedoutException, ResourceUnavailableException, InsufficientCapacityException; void startVirtualMachineForHA(VirtualMachine vm, Map params, DeploymentPlanner planner) throws InsufficientCapacityException, ResourceUnavailableException, diff --git a/api/src/main/java/com/cloud/vm/VmDetailConstants.java b/api/src/main/java/com/cloud/vm/VmDetailConstants.java index 29803d5271b4..040f32ee322b 100644 --- a/api/src/main/java/com/cloud/vm/VmDetailConstants.java +++ b/api/src/main/java/com/cloud/vm/VmDetailConstants.java @@ -89,6 +89,9 @@ public interface VmDetailConstants { String DEPLOY_AS_IS_CONFIGURATION = "configurationId"; String KEY_PAIR_NAMES = "keypairnames"; String CKS_CONTROL_NODE_LOGIN_USER = "controlNodeLoginUser"; + String CKS_NODE_TYPE = "node"; + String OFFERING = "offering"; + String TEMPLATE = "template"; // VMware to KVM VM migrations specific String VMWARE_TO_KVM_PREFIX = "vmware-to-kvm"; diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiCommandResourceType.java b/api/src/main/java/org/apache/cloudstack/api/ApiCommandResourceType.java index f2f52cec9697..a5bedc65d9cc 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiCommandResourceType.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiCommandResourceType.java @@ -84,7 +84,7 @@ public enum ApiCommandResourceType { ObjectStore(org.apache.cloudstack.storage.object.ObjectStore.class), Bucket(org.apache.cloudstack.storage.object.Bucket.class), QuotaTariff(org.apache.cloudstack.quota.QuotaTariff.class), - KubernetesCluster(null), + KubernetesCluster(com.cloud.kubernetes.cluster.KubernetesCluster.class), KubernetesSupportedVersion(null), SharedFS(org.apache.cloudstack.storage.sharedfs.SharedFS.class); diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index a406e2d7a722..20d1d7524744 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -112,6 +112,10 @@ public class ApiConstants { public static final String CN = "cn"; public static final String COMMAND = "command"; public static final String CMD_EVENT_TYPE = "cmdeventtype"; + public static final String CNI_CONFIG = "cniconfig"; + public static final String CNI_CONFIG_ID = "cniconfigurationid"; + public static final String CNI_CONFIG_DETAILS = "cniconfigdetails"; + public static final String CNI_CONFIG_NAME = "cniconfigname"; public static final String COMPONENT = "component"; public static final String CPU_CORE_PER_SOCKET = "cpucorepersocket"; public static final String CPU_NUMBER = "cpunumber"; @@ -133,6 +137,7 @@ public class ApiConstants { public static final String ENCRYPT_FORMAT = "encryptformat"; public static final String ENCRYPT_ROOT = "encryptroot"; public static final String ENCRYPTION_SUPPORTED = "encryptionsupported"; + public static final String ETCD_IPS = "etcdips"; public static final String MIN_IOPS = "miniops"; public static final String MAX_IOPS = "maxiops"; public static final String HYPERVISOR_SNAPSHOT_RESERVE = "hypervisorsnapshotreserve"; @@ -320,6 +325,7 @@ public class ApiConstants { public static final String LBID = "lbruleid"; public static final String LB_PROVIDER = "lbprovider"; public static final String MAC_ADDRESS = "macaddress"; + public static final String MANUAL_UPGRADE = "manualupgrade"; public static final String MAX = "max"; public static final String MAX_SNAPS = "maxsnaps"; public static final String MAX_CPU_NUMBER = "maxcpunumber"; @@ -330,6 +336,7 @@ public class ApiConstants { public static final String MIGRATIONS = "migrations"; public static final String MEMORY = "memory"; public static final String MODE = "mode"; + public static final String MOUNT_CKS_ISO_ON_VR = "mountcksisoonvr"; public static final String MULTI_ARCH = "ismultiarch"; public static final String NSX_MODE = "nsxmode"; public static final String NETWORK_MODE = "networkmode"; @@ -346,6 +353,7 @@ public class ApiConstants { public static final String NIC_PACKED_VIRTQUEUES_ENABLED = "nicpackedvirtqueuesenabled"; public static final String NEW_START_IP = "newstartip"; public static final String NEW_END_IP = "newendip"; + public static final String KUBERNETES_NODE_VERSION = "kubernetesnodeversion"; public static final String NUM_RETRIES = "numretries"; public static final String OFFER_HA = "offerha"; public static final String OS_DISTRIBUTION = "osdistribution"; @@ -543,6 +551,12 @@ public class ApiConstants { public static final String VLAN = "vlan"; public static final String VLAN_RANGE = "vlanrange"; + public static final String WORKER_SERVICE_OFFERING_ID = "workerofferingid"; + public static final String WORKER_SERVICE_OFFERING_NAME = "workerofferingname"; + public static final String CONTROL_SERVICE_OFFERING_ID = "controlofferingid"; + public static final String CONTROL_SERVICE_OFFERING_NAME = "controlofferingname"; + public static final String ETCD_SERVICE_OFFERING_ID = "etcdofferingid"; + public static final String ETCD_SERVICE_OFFERING_NAME = "etcdofferingname"; public static final String REMOVE_VLAN = "removevlan"; public static final String VLAN_ID = "vlanid"; public static final String ISOLATED_PVLAN = "isolatedpvlan"; @@ -893,6 +907,7 @@ public class ApiConstants { public static final String SPLIT_CONNECTIONS = "splitconnections"; public static final String FOR_VPC = "forvpc"; public static final String FOR_NSX = "fornsx"; + public static final String FOR_CKS = "forcks"; public static final String NSX_SUPPORT_LB = "nsxsupportlb"; public static final String NSX_SUPPORTS_INTERNAL_LB = "nsxsupportsinternallb"; public static final String FOR_TUNGSTEN = "fortungsten"; @@ -1102,6 +1117,10 @@ public class ApiConstants { public static final String MASTER_NODES = "masternodes"; public static final String NODE_IDS = "nodeids"; public static final String CONTROL_NODES = "controlnodes"; + public static final String ETCD_NODES = "etcdnodes"; + public static final String EXTERNAL_NODES = "externalnodes"; + public static final String IS_EXTERNAL_NODE = "isexternalnode"; + public static final String IS_ETCD_NODE = "isetcdnode"; public static final String MIN_SEMANTIC_VERSION = "minimumsemanticversion"; public static final String MIN_KUBERNETES_VERSION_ID = "minimumkubernetesversionid"; public static final String NODE_ROOT_DISK_SIZE = "noderootdisksize"; @@ -1110,6 +1129,8 @@ public class ApiConstants { public static final String AUTOSCALING_ENABLED = "autoscalingenabled"; public static final String MIN_SIZE = "minsize"; public static final String MAX_SIZE = "maxsize"; + public static final String NODE_TYPE_OFFERING_MAP = "nodeofferings"; + public static final String NODE_TYPE_TEMPLATE_MAP = "nodetemplates"; public static final String BOOT_TYPE = "boottype"; public static final String BOOT_MODE = "bootmode"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DetachIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DetachIsoCmd.java index 292e1c6f099b..78f1a4bcdee6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DetachIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DetachIsoCmd.java @@ -104,7 +104,7 @@ public ApiCommandResourceType getApiResourceType() { @Override public void execute() { - boolean result = _templateService.detachIso(virtualMachineId, isForced()); + boolean result = _templateService.detachIso(virtualMachineId, null, isForced()); if (result) { UserVm userVm = _entityMgr.findById(UserVm.class, virtualMachineId); UserVmResponse response = _responseGenerator.createUserVmResponse(getResponseView(), "virtualmachine", userVm).get(0); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/GetUploadParamsForTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/GetUploadParamsForTemplateCmd.java index 8fa1a5d53eb7..330224a60552 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/GetUploadParamsForTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/GetUploadParamsForTemplateCmd.java @@ -99,6 +99,11 @@ public class GetUploadParamsForTemplateCmd extends AbstractGetUploadParamsCmd { description = "(VMware only) true if VM deployments should preserve all the configurations defined for this template", since = "4.15.1") private Boolean deployAsIs; + @Parameter(name=ApiConstants.FOR_CKS, + type = CommandType.BOOLEAN, + description = "if true, the templates would be available for deploying CKS clusters", since = "4.21.0") + protected Boolean forCks; + public String getDisplayText() { return StringUtils.isBlank(displayText) ? getName() : displayText; } @@ -168,6 +173,10 @@ public boolean isDeployAsIs() { Boolean.TRUE.equals(deployAsIs); } + public boolean isForCks() { + return Boolean.TRUE.equals(forCks); + } + public CPU.CPUArch getArch() { return CPU.CPUArch.fromType(arch); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java index bff65ef70a92..8bd0c05401f0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java @@ -106,6 +106,11 @@ public class ListTemplatesCmd extends BaseListTaggedResourcesCmd implements User since = "4.19.0") private Boolean isVnf; + @Parameter(name = ApiConstants.FOR_CKS, type = CommandType.BOOLEAN, + description = "list templates that can be used to deploy CKS clusters", + since = "4.21.0") + private Boolean forCks; + @Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, description = "the CPU arch of the template. Valid options are: x86_64, aarch64", since = "4.20") @@ -198,6 +203,8 @@ public Boolean getVnf() { return isVnf; } + public Boolean getForCks() { return forCks; } + public CPU.CPUArch getArch() { if (StringUtils.isBlank(arch)) { return null; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java index 1f968b869b99..b7c14a6d3c89 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java @@ -168,6 +168,11 @@ public class RegisterTemplateCmd extends BaseCmd implements UserCmd { description = "(VMware only) true if VM deployments should preserve all the configurations defined for this template", since = "4.15.1") protected Boolean deployAsIs; + @Parameter(name=ApiConstants.FOR_CKS, + type = CommandType.BOOLEAN, + description = "if true, the templates would be available for deploying CKS clusters", since = "4.20.0") + protected Boolean forCks; + @Parameter(name = ApiConstants.TEMPLATE_TYPE, type = CommandType.STRING, description = "the type of the template. Valid options are: USER/VNF (for all users) and SYSTEM/ROUTING/BUILTIN (for admins only).", since = "4.19.0") @@ -295,6 +300,10 @@ public boolean isDeployAsIs() { Boolean.TRUE.equals(deployAsIs); } + public boolean isForCks() { + return Boolean.TRUE.equals(forCks); + } + public String getTemplateType() { return templateType; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/UpdateTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/UpdateTemplateCmd.java index dbbd771293a4..20849d1ba6c5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/UpdateTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/UpdateTemplateCmd.java @@ -46,6 +46,11 @@ public class UpdateTemplateCmd extends BaseUpdateTemplateOrIsoCmd implements Use @Parameter(name = ApiConstants.TEMPLATE_TAG, type = CommandType.STRING, description = "the tag for this template.", since = "4.20.0") private String templateTag; + @Parameter(name = ApiConstants.FOR_CKS, type = CommandType.BOOLEAN, + description = "indicates that the template can be used for deployment of CKS clusters", + since = "4.21.0") + private Boolean forCks; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -63,6 +68,10 @@ public String getTemplateTag() { return templateTag; } + public Boolean getForCks() { + return forCks; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/BaseRegisterUserDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/BaseRegisterUserDataCmd.java new file mode 100644 index 000000000000..32c1eaf42f8b --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/BaseRegisterUserDataCmd.java @@ -0,0 +1,87 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.userdata; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.network.NetworkModel; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.ProjectResponse; +import org.apache.commons.lang3.StringUtils; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public abstract class BaseRegisterUserDataCmd extends BaseCmd { + + @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = true, description = "Name of the userdata") + private String name; + + //Owner information + @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "an optional account for the userdata. Must be used with domainId.") + private String accountName; + + @Parameter(name = ApiConstants.DOMAIN_ID, + type = CommandType.UUID, + entityType = DomainResponse.class, + description = "an optional domainId for the userdata. If the account parameter is used, domainId must also be used.") + private Long domainId; + + @Parameter(name = ApiConstants.PROJECT_ID, type = CommandType.UUID, entityType = ProjectResponse.class, description = "an optional project for the userdata") + private Long projectId; + + @Parameter(name = ApiConstants.PARAMS, type = CommandType.STRING, description = "comma separated list of variables declared in userdata content") + private String params; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public String getName() { + return name; + } + + public String getAccountName() { + return accountName; + } + + public Long getDomainId() { + return domainId; + } + + public Long getProjectId() { + return projectId; + } + + public String getParams() { + checkForVRMetadataFileNames(params); + return params; + } + + public void checkForVRMetadataFileNames(String params) { + if (StringUtils.isNotEmpty(params)) { + List keyValuePairs = new ArrayList<>(Arrays.asList(params.split(","))); + keyValuePairs.retainAll(NetworkModel.metadataFileNames); + if (!keyValuePairs.isEmpty()) { + throw new InvalidParameterValueException(String.format("Params passed here have a few virtual router metadata file names %s", keyValuePairs)); + } + } + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/DeleteCniConfigurationCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/DeleteCniConfigurationCmd.java new file mode 100644 index 000000000000..286ad1f9d2e1 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/DeleteCniConfigurationCmd.java @@ -0,0 +1,74 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.userdata; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.context.CallContext; + +import com.cloud.user.Account; +import com.cloud.user.UserData; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + + +@APICommand(name = "deleteCniConfiguration", description = "Deletes a CNI Configuration", responseObject = SuccessResponse.class, entityType = {UserData.class}, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.19", + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class DeleteCniConfigurationCmd extends DeleteUserDataCmd { + + public static final Logger logger = LogManager.getLogger(DeleteUserDataCmd.class.getName()); + + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() { + boolean result = _mgr.deleteCniConfiguration(this); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + response.setSuccess(result); + setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete userdata"); + } + } + + @Override + public long getEntityOwnerId() { + Account account = CallContext.current().getCallingAccount(); + Long domainId = this.getDomainId(); + String accountName = this.getAccountName(); + if ((account == null || _accountService.isAdmin(account.getId())) && (domainId != null && accountName != null)) { + Account userAccount = _responseGenerator.findAccountByNameDomain(accountName, domainId); + if (userAccount != null) { + return userAccount.getId(); + } + } + + if (account != null) { + return account.getId(); + } + + return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/ListCniConfigurationCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/ListCniConfigurationCmd.java new file mode 100644 index 000000000000..86b02bd18d0e --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/ListCniConfigurationCmd.java @@ -0,0 +1,59 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.userdata; + +import com.cloud.user.UserData; +import com.cloud.utils.Pair; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.UserDataResponse; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.util.ArrayList; +import java.util.List; + +@APICommand(name = "listCniConfiguration", description = "List userdata for CNI plugins", responseObject = UserDataResponse.class, entityType = {UserData.class}, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.20", + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class ListCniConfigurationCmd extends ListUserDataCmd { + public static final Logger logger = LogManager.getLogger(ListCniConfigurationCmd.class.getName()); + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() { + Pair, Integer> resultList = _mgr.listUserDatas(this, true); + List responses = new ArrayList<>(); + for (UserData result : resultList.first()) { + UserDataResponse r = _responseGenerator.createUserDataResponse(result); + r.setObjectName(ApiConstants.CNI_CONFIG); + responses.add(r); + } + + ListResponse response = new ListResponse<>(); + response.setResponses(responses, resultList.second()); + response.setResponseName(getCommandName()); + setResponseObject(response); + } + + +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/ListUserDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/ListUserDataCmd.java index 64ab3ec3d70e..16bf1e5c1e44 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/ListUserDataCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/ListUserDataCmd.java @@ -61,7 +61,7 @@ public String getName() { @Override public void execute() { - Pair, Integer> resultList = _mgr.listUserDatas(this); + Pair, Integer> resultList = _mgr.listUserDatas(this, false); List responses = new ArrayList<>(); for (UserData result : resultList.first()) { UserDataResponse r = _responseGenerator.createUserDataResponse(result); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterCniConfigurationCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterCniConfigurationCmd.java new file mode 100644 index 000000000000..87ad87dc1a0b --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterCniConfigurationCmd.java @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.userdata; + +import com.cloud.user.UserData; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.api.response.UserDataResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +@APICommand(name = "registerCniConfiguration", + description = "Register a CNI Configuration to be used with CKS cluster", + since = "4.19.0", + responseObject = SuccessResponse.class, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class RegisterCniConfigurationCmd extends BaseRegisterUserDataCmd { + public static final Logger logger = LogManager.getLogger(RegisterCniConfigurationCmd.class.getName()); + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.CNI_CONFIG, type = CommandType.STRING, description = "CNI Configuration content to be registered as User data", length = 1048576) + private String cniConfig; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public String getCniConfig() { + return cniConfig; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() { + UserData result = _mgr.registerCniConfigration(this); + UserDataResponse response = _responseGenerator.createUserDataResponse(result); + response.setResponseName(getCommandName()); + response.setObjectName(ApiConstants.CNI_CONFIG); + setResponseObject(response); + } + + @Override + public long getEntityOwnerId() { + Long accountId = _accountService.finalyzeAccountId(getAccountName(), getDomainId(), getProjectId(), true); + if (accountId == null) { + return CallContext.current().getCallingAccount().getId(); + } + + return accountId; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterUserDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterUserDataCmd.java index 41d865d678c8..e2160d1418b4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterUserDataCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterUserDataCmd.java @@ -16,30 +16,20 @@ // under the License. package org.apache.cloudstack.api.command.user.userdata; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; -import org.apache.cloudstack.api.response.DomainResponse; -import org.apache.cloudstack.api.response.ProjectResponse; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.UserDataResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.commons.lang3.StringUtils; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; -import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.NetworkRuleConflictException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; -import com.cloud.network.NetworkModel; import com.cloud.user.UserData; @APICommand(name = "registerUserData", @@ -49,89 +39,28 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) -public class RegisterUserDataCmd extends BaseCmd { +public class RegisterUserDataCmd extends BaseRegisterUserDataCmd { ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// - @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = true, description = "Name of the userdata") - private String name; - - //Owner information - @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "an optional account for the userdata. Must be used with domainId.") - private String accountName; - - @Parameter(name = ApiConstants.DOMAIN_ID, - type = CommandType.UUID, - entityType = DomainResponse.class, - description = "an optional domainId for the userdata. If the account parameter is used, domainId must also be used.") - private Long domainId; - - @Parameter(name = ApiConstants.PROJECT_ID, type = CommandType.UUID, entityType = ProjectResponse.class, description = "an optional project for the userdata") - private Long projectId; - - @Parameter(name = ApiConstants.USER_DATA, - type = CommandType.STRING, - required = true, - description = "Base64 encoded userdata content. " + - "Using HTTP GET (via querystring), you can send up to 4KB of data after base64 encoding. " + - "Using HTTP POST (via POST body), you can send up to 1MB of data after base64 encoding. " + - "You also need to change vm.userdata.max.length value", - length = 1048576) - private String userData; - - @Parameter(name = ApiConstants.PARAMS, type = CommandType.STRING, description = "comma separated list of variables declared in userdata content") - private String params; - - - ///////////////////////////////////////////////////// - /////////////////// Accessors /////////////////////// - ///////////////////////////////////////////////////// - - public String getName() { - return name; - } - - public String getAccountName() { - return accountName; - } + @Parameter(name = ApiConstants.USER_DATA, type = CommandType.STRING, required = true, description = "Userdata content", length = 1048576) + protected String userData; - public Long getDomainId() { - return domainId; - } - - public Long getProjectId() { - return projectId; - } public String getUserData() { return userData; } - public String getParams() { - checkForVRMetadataFileNames(params); - return params; - } - - public void checkForVRMetadataFileNames(String params) { - if (StringUtils.isNotEmpty(params)) { - List keyValuePairs = new ArrayList<>(Arrays.asList(params.split(","))); - keyValuePairs.retainAll(NetworkModel.metadataFileNames); - if (!keyValuePairs.isEmpty()) { - throw new InvalidParameterValueException(String.format("Params passed here have a few virtual router metadata file names %s", keyValuePairs)); - } - } - } - ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalyzeAccountId(getAccountName(), getDomainId(), getProjectId(), true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/KubernetesUserVmResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/KubernetesUserVmResponse.java new file mode 100644 index 000000000000..cef5cdae2f45 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/response/KubernetesUserVmResponse.java @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import com.cloud.network.router.VirtualRouter; +import com.cloud.serializer.Param; +import com.cloud.uservm.UserVm; +import com.cloud.vm.VirtualMachine; +import com.google.gson.annotations.SerializedName; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.EntityReference; + +@EntityReference(value = {VirtualMachine.class, UserVm.class, VirtualRouter.class}) +public class KubernetesUserVmResponse extends UserVmResponse { + @SerializedName(ApiConstants.IS_EXTERNAL_NODE) + @Param(description = "If the VM is an externally added node") + private boolean isExternalNode; + + @SerializedName(ApiConstants.IS_ETCD_NODE) + @Param(description = "If the VM is an etcd node") + private boolean isEtcdNode; + + @SerializedName(ApiConstants.KUBERNETES_NODE_VERSION) + @Param(description = "Kubernetes version of the node") + private String nodeVersion; + + + public void setExternalNode(boolean externalNode) { + isExternalNode = externalNode; + } + + public void setEtcdNode(boolean etcdNode) { + isEtcdNode = etcdNode; + } + + public void setNodeVersion(String nodeVersion) { this.nodeVersion = nodeVersion;} +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java index 98e96091d8c7..dac3c0554a3b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java @@ -208,6 +208,11 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements since = "4.15") private Boolean deployAsIs; + @SerializedName(ApiConstants.FOR_CKS) + @Param(description = "If true it indicates that the template can be used for CKS cluster deployments", + since = "4.20") + private Boolean forCks; + @SerializedName(ApiConstants.DEPLOY_AS_IS_DETAILS) @Param(description = "VMware only: additional key/value details tied with deploy-as-is template", since = "4.15") @@ -453,6 +458,10 @@ public void setDeployAsIs(Boolean deployAsIs) { this.deployAsIs = deployAsIs; } + public void setForCks(Boolean forCks) { + this.forCks = forCks; + } + public void setParentTemplateId(String parentTemplateId) { this.parentTemplateId = parentTemplateId; } diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/userdata/ListUserDataCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/userdata/ListUserDataCmdTest.java index 3f47a078445c..8b7db2924629 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/user/userdata/ListUserDataCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/userdata/ListUserDataCmdTest.java @@ -68,7 +68,7 @@ public void testListSuccess() { Pair, Integer> result = new Pair, Integer>(userDataList, 1); UserDataResponse userDataResponse = Mockito.mock(UserDataResponse.class); - Mockito.when(_mgr.listUserDatas(cmd)).thenReturn(result); + Mockito.when(_mgr.listUserDatas(cmd, false)).thenReturn(result); Mockito.when(_responseGenerator.createUserDataResponse(userData)).thenReturn(userDataResponse); cmd.execute(); @@ -82,7 +82,7 @@ public void testEmptyList() { List userDataList = new ArrayList(); Pair, Integer> result = new Pair, Integer>(userDataList, 0); - Mockito.when(_mgr.listUserDatas(cmd)).thenReturn(result); + Mockito.when(_mgr.listUserDatas(cmd, false)).thenReturn(result); cmd.execute(); diff --git a/core/src/main/java/com/cloud/agent/api/HandleCksIsoCommand.java b/core/src/main/java/com/cloud/agent/api/HandleCksIsoCommand.java new file mode 100644 index 000000000000..16942bb05d44 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/HandleCksIsoCommand.java @@ -0,0 +1,34 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +package com.cloud.agent.api; + +import com.cloud.agent.api.routing.NetworkElementCommand; + +public class HandleCksIsoCommand extends NetworkElementCommand { + + private boolean mountCksIso; + + public HandleCksIsoCommand(boolean mountCksIso) { + this.mountCksIso = mountCksIso; + } + + public boolean isMountCksIso() { + return mountCksIso; + } +} diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VRScripts.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VRScripts.java index f9ea3e05e97f..7bfbf786e9b4 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VRScripts.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VRScripts.java @@ -82,5 +82,8 @@ public class VRScripts { public static final String VR_UPDATE_INTERFACE_CONFIG = "update_interface_config.sh"; public static final String ROUTER_FILESYSTEM_WRITABLE_CHECK = "filesystem_writable_check.py"; + + // CKS ISO mount + public static final String CKS_ISO_MOUNT_SERVE = "cks_iso.sh"; public static final String MANAGE_SERVICE = "manage_service.sh"; } diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java index 4afac9b43cb3..bd632632ae89 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java @@ -34,6 +34,7 @@ import javax.naming.ConfigurationException; +import com.cloud.agent.api.HandleCksIsoCommand; import org.apache.cloudstack.agent.routing.ManageServiceCommand; import com.cloud.agent.api.routing.UpdateNetworkCommand; import com.cloud.agent.api.to.IpAddressTO; @@ -145,6 +146,10 @@ public Answer executeRequest(final NetworkElementCommand cmd) { return execute((UpdateNetworkCommand) cmd); } + if (cmd instanceof HandleCksIsoCommand) { + return execute((HandleCksIsoCommand) cmd); + } + if (cmd instanceof ManageServiceCommand) { return execute((ManageServiceCommand) cmd); } @@ -176,6 +181,13 @@ public Answer executeRequest(final NetworkElementCommand cmd) { } } + protected Answer execute(final HandleCksIsoCommand cmd) { + String routerIp = getRouterSshControlIp(cmd); + logger.info("Attempting to mount CKS ISO on Virtual Router"); + ExecutionResult result = _vrDeployer.executeInVR(routerIp, VRScripts.CKS_ISO_MOUNT_SERVE, String.valueOf(cmd.isMountCksIso())); + return new Answer(cmd, result.isSuccess(), result.getDetails()); + } + private Answer execute(final SetupKeyStoreCommand cmd) { final String args = String.format("/usr/local/cloud/systemvm/conf/agent.properties " + "/usr/local/cloud/systemvm/conf/%s " + diff --git a/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDao.java b/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDao.java index b80ccd9cd1b9..e5c3a661bcd2 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDao.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDao.java @@ -73,5 +73,7 @@ public interface FirewallRulesDao extends GenericDao { void loadDestinationCidrs(FirewallRuleVO rule); + FirewallRuleVO findByNetworkIdAndPorts(long networkId, int startPort, int endPort); + List listRoutingIngressFirewallRules(long networkId); } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDaoImpl.java index c8bd7e2147ea..feed641df770 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDaoImpl.java @@ -48,6 +48,7 @@ public class FirewallRulesDaoImpl extends GenericDaoBase i protected final SearchBuilder NotRevokedSearch; protected final SearchBuilder ReleaseSearch; protected SearchBuilder VmSearch; + protected SearchBuilder FirewallByPortsAndNetwork; protected final SearchBuilder SystemRuleSearch; protected final GenericSearchBuilder RulesByIpCount; protected final SearchBuilder RoutingFirewallRulesSearch; @@ -106,6 +107,12 @@ protected FirewallRulesDaoImpl() { RulesByIpCount.and("state", RulesByIpCount.entity().getState(), Op.EQ); RulesByIpCount.done(); + FirewallByPortsAndNetwork = createSearchBuilder(); + FirewallByPortsAndNetwork.and("networkId", FirewallByPortsAndNetwork.entity().getNetworkId(), Op.EQ); + FirewallByPortsAndNetwork.and("sourcePortStart", FirewallByPortsAndNetwork.entity().getSourcePortStart(), Op.EQ); + FirewallByPortsAndNetwork.and("sourcePortEnd", FirewallByPortsAndNetwork.entity().getSourcePortEnd(), Op.EQ); + FirewallByPortsAndNetwork.done(); + RoutingFirewallRulesSearch = createSearchBuilder(); RoutingFirewallRulesSearch.and("networkId", RoutingFirewallRulesSearch.entity().getNetworkId(), Op.EQ); RoutingFirewallRulesSearch.and("purpose", RoutingFirewallRulesSearch.entity().getPurpose(), Op.EQ); @@ -391,6 +398,16 @@ public void loadDestinationCidrs(FirewallRuleVO rule){ rule.setDestinationCidrsList(destCidrs); } + @Override + public FirewallRuleVO findByNetworkIdAndPorts(long networkId, int startPort, int endPort) { + SearchCriteria sc = FirewallByPortsAndNetwork.create(); + sc.setParameters("networkId", networkId); + sc.setParameters("sourcePortStart", startPort); + sc.setParameters("sourcePortEnd", endPort); + + return findOneBy(sc); + } + @Override public List listRoutingIngressFirewallRules(long networkId) { SearchCriteria sc = RoutingFirewallRulesSearch.create(); diff --git a/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDao.java b/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDao.java index 8cd114b7fc4f..a737f1b9a205 100644 --- a/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDao.java +++ b/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDao.java @@ -47,5 +47,7 @@ public interface PortForwardingRulesDao extends GenericDao listByNetworkAndDestIpAddr(String ip4Address, long networkId); + + PortForwardingRuleVO findByNetworkAndPorts(long networkId, int startPort, int endPort); int expungeByVmList(List vmIds, Long batchSize); } diff --git a/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDaoImpl.java index 1b3df06e1a2d..637f47731b47 100644 --- a/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDaoImpl.java @@ -58,6 +58,8 @@ protected PortForwardingRulesDaoImpl() { AllFieldsSearch.and("vmId", AllFieldsSearch.entity().getVirtualMachineId(), Op.EQ); AllFieldsSearch.and("purpose", AllFieldsSearch.entity().getPurpose(), Op.EQ); AllFieldsSearch.and("dstIp", AllFieldsSearch.entity().getDestinationIpAddress(), Op.EQ); + AllFieldsSearch.and("sourcePortStart", AllFieldsSearch.entity().getSourcePortStart(), Op.EQ); + AllFieldsSearch.and("sourcePortEnd", AllFieldsSearch.entity().getSourcePortEnd(), Op.EQ); AllFieldsSearch.done(); ApplicationSearch = createSearchBuilder(); @@ -175,6 +177,15 @@ public PortForwardingRuleVO findByIdAndIp(long id, String secondaryIp) { return findOneBy(sc); } + @Override + public PortForwardingRuleVO findByNetworkAndPorts(long networkId, int startPort, int endPort) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("networkId", networkId); + sc.setParameters("sourcePortStart", startPort); + sc.setParameters("sourcePortEnd", endPort); + return findOneBy(sc); + } + @Override public int expungeByVmList(List vmIds, Long batchSize) { if (CollectionUtils.isEmpty(vmIds)) { diff --git a/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java b/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java index 10d08601515b..93f6a4640195 100644 --- a/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java @@ -162,6 +162,9 @@ public class VMTemplateVO implements VirtualMachineTemplate { @Column(name = "deploy_as_is") private boolean deployAsIs; + @Column(name = "for_cks") + private boolean forCks; + @Column(name = "user_data_id") private Long userDataId; @@ -664,6 +667,14 @@ public void setDeployAsIs(boolean deployAsIs) { this.deployAsIs = deployAsIs; } + public boolean isForCks() { + return forCks; + } + + public void setForCks(boolean forCks) { + this.forCks = forCks; + } + @Override public Long getUserDataId() { return userDataId; diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42010to42100.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42010to42100.java index 06a68ec3d8b2..eb557da58d83 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42010to42100.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42010to42100.java @@ -21,6 +21,13 @@ import java.io.InputStream; import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; public class Upgrade42010to42100 extends DbUpgradeAbstractImpl implements DbUpgrade, DbUpgradeSystemVmTemplate { private SystemVmTemplateRegistration systemVmTemplateRegistration; @@ -53,6 +60,7 @@ public InputStream[] getPrepareScripts() { @Override public void performDataMigration(Connection conn) { + updateKubernetesClusterNodeVersions(conn); } @Override @@ -80,4 +88,93 @@ public void updateSystemVmTemplates(Connection conn) { throw new CloudRuntimeException("Failed to find / register SystemVM template(s)"); } } + + private void updateKubernetesClusterNodeVersions(Connection conn) { + //get list of all non removed kubernetes clusters + try { + Map clusterAndVersion = getKubernetesClusterIdsAndVersion(conn); + updateKubernetesNodeVersions(conn, clusterAndVersion); + } catch (Exception e) { + String errMsg = "Failed to update kubernetes cluster nodes version"; + logger.error(errMsg); + throw new CloudRuntimeException(errMsg, e); + } + } + + private Map getKubernetesClusterIdsAndVersion(Connection conn) { + String listKubernetesClusters = "SELECT c.id, v.semantic_version FROM `cloud`.`kubernetes_cluster` c JOIN `cloud`.`kubernetes_supported_version` v ON (c.kubernetes_version_id = v.id) WHERE c.removed is NULL;"; + Map clusterAndVersion = new HashMap<>(); + try { + PreparedStatement pstmt = conn.prepareStatement(listKubernetesClusters); + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + clusterAndVersion.put(rs.getLong(1), rs.getString(2)); + } + rs.close(); + pstmt.close(); + } catch (SQLException e) { + String errMsg = String.format("Failed to get all the kubernetes cluster ids due to: %s", e.getMessage()); + logger.error(errMsg); + throw new CloudRuntimeException(errMsg, e); + } + return clusterAndVersion; + } + + private void updateKubernetesNodeVersions(Connection conn, Map clusterAndVersion) { + List kubernetesClusterVmIds; + for (Map.Entry clusterVersionEntry : clusterAndVersion.entrySet()) { + try { + Long cksClusterId = clusterVersionEntry.getKey(); + String cksVersion = clusterVersionEntry.getValue(); + logger.debug(String.format("Adding CKS version %s to existing CKS cluster %s nodes", cksVersion, cksClusterId)); + kubernetesClusterVmIds = getKubernetesClusterVmMapIds(conn, cksClusterId); + updateKubernetesNodeVersion(conn, kubernetesClusterVmIds, cksClusterId, cksVersion); + } catch (Exception e) { + String errMsg = String.format("Failed to update the node version for kubernetes cluster nodes for the" + + " kubernetes cluster with id: %s," + + " due to: %s", clusterVersionEntry.getKey(), e.getMessage()); + logger.error(errMsg, e); + throw new CloudRuntimeException(errMsg, e); + } + } + } + + private List getKubernetesClusterVmMapIds(Connection conn, Long cksClusterId) { + List kubernetesClusterVmIds = new ArrayList<>(); + String getKubernetesClustersVmMap = "SELECT id FROM `cloud`.`kubernetes_cluster_vm_map` WHERE cluster_id = %s;"; + try { + PreparedStatement pstmt = conn.prepareStatement(String.format(getKubernetesClustersVmMap, cksClusterId)); + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + kubernetesClusterVmIds.add(rs.getLong(1)); + } + rs.close(); + pstmt.close(); + } catch (SQLException e) { + String errMsg = String.format("Failed to get the kubernetes cluster vm map IDs for kubernetes cluster with id: %s," + + " due to: %s", cksClusterId, e.getMessage()); + logger.error(errMsg, e); + throw new CloudRuntimeException(errMsg, e); + } + return kubernetesClusterVmIds; + } + + private void updateKubernetesNodeVersion(Connection conn, List kubernetesClusterVmIds, Long cksClusterId, String cksVersion) { + String updateKubernetesNodeVersion = "UPDATE `cloud`.`kubernetes_cluster_vm_map` set kubernetes_node_version = ? WHERE id = ?;"; + for (Long nodeVmId : kubernetesClusterVmIds) { + try { + PreparedStatement pstmt = conn.prepareStatement(updateKubernetesNodeVersion); + pstmt.setString(1, cksVersion); + pstmt.setLong(2, nodeVmId); + pstmt.executeUpdate(); + pstmt.close(); + } catch (Exception e) { + String errMsg = String.format("Failed to update the node version for kubernetes cluster nodes for the" + + " kubernetes cluster with id: %s," + + " due to: %s", cksClusterId, e.getMessage()); + logger.error(errMsg, e); + throw new CloudRuntimeException(errMsg, e); + } + } + } } diff --git a/engine/schema/src/main/java/com/cloud/user/UserDataVO.java b/engine/schema/src/main/java/com/cloud/user/UserDataVO.java index a8e48ad22b1a..e8864976069d 100644 --- a/engine/schema/src/main/java/com/cloud/user/UserDataVO.java +++ b/engine/schema/src/main/java/com/cloud/user/UserDataVO.java @@ -65,6 +65,9 @@ public UserDataVO() { @Column(name = GenericDao.REMOVED_COLUMN) private Date removed; + @Column(name = "for_cks") + private boolean forCks; + @Override public long getDomainId() { return domainId; @@ -105,6 +108,11 @@ public String getParams() { return params; } + @Override + public boolean isForCks() { + return forCks; + } + public void setAccountId(long accountId) { this.accountId = accountId; } @@ -132,4 +140,6 @@ public void setRemoved(Date removed) { public Date getRemoved() { return removed; } + + public void setForCks(boolean forCks) { this.forCks = forCks; } } diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql b/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql index 47e7bebbee4f..92c2432344a6 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42010to42100.sql @@ -30,4 +30,38 @@ INSERT INTO `cloud`.`role_permissions` (uuid, role_id, rule, permission, sort_or SELECT uuid(), role_id, 'quotaCreditsList', permission, sort_order FROM `cloud`.`role_permissions` rp WHERE rp.rule = 'quotaStatement' -AND NOT EXISTS(SELECT 1 FROM cloud.role_permissions rp_ WHERE rp.role_id = rp_.role_id AND rp_.rule = 'quotaCreditsList'); + AND NOT EXISTS(SELECT 1 FROM cloud.role_permissions rp_ WHERE rp.role_id = rp_.role_id AND rp_.rule = 'quotaCreditsList'); + +----------------------------------------------------------- +-- CKS Enhancements: +----------------------------------------------------------- +-- Add for_cks column to the vm_template table +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.vm_template','for_cks', 'int(1) unsigned DEFAULT "0" COMMENT "if true, the template can be used for CKS cluster deployment"'); + +-- Add support for different node types service offerings on CKS clusters +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster','control_service_offering_id', 'bigint unsigned COMMENT "service offering ID for Control Node(s)"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster','worker_service_offering_id', 'bigint unsigned COMMENT "service offering ID for Worker Node(s)"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster','etcd_service_offering_id', 'bigint unsigned COMMENT "service offering ID for etcd Nodes"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster','etcd_node_count', 'bigint unsigned COMMENT "number of etcd nodes to be deployed for the Kubernetes cluster"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster','control_template_id', 'bigint unsigned COMMENT "template id to be used for Control Node(s)"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster','worker_template_id', 'bigint unsigned COMMENT "template id to be used for Worker Node(s)"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster','etcd_template_id', 'bigint unsigned COMMENT "template id to be used for etcd Nodes"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster','cni_config_id', 'bigint unsigned COMMENT "userdata id representing the associated cni configuration"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster','cni_config_details', 'varchar(4096) DEFAULT NULL COMMENT "userdata details representing the values required for the cni configuration associated"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster_vm_map','etcd_node', 'tinyint(1) unsigned NOT NULL DEFAULT 0 COMMENT "indicates if the VM is an etcd node"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster_vm_map','external_node', 'tinyint(1) unsigned NOT NULL DEFAULT 0 COMMENT "indicates if the node was imported into the Kubernetes cluster"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster_vm_map','manual_upgrade', 'tinyint(1) unsigned NOT NULL DEFAULT 0 COMMENT "indicates if the node is marked for manual upgrade and excluded from the Kubernetes cluster upgrade operation"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster_vm_map','kubernetes_node_version', 'varchar(40) COMMENT "version of k8s the cluster node is on"'); + +ALTER TABLE `cloud`.`kubernetes_cluster` ADD CONSTRAINT `fk_cluster__control_service_offering_id` FOREIGN KEY `fk_cluster__control_service_offering_id`(`control_service_offering_id`) REFERENCES `service_offering`(`id`) ON DELETE CASCADE; +ALTER TABLE `cloud`.`kubernetes_cluster` ADD CONSTRAINT `fk_cluster__worker_service_offering_id` FOREIGN KEY `fk_cluster__worker_service_offering_id`(`worker_service_offering_id`) REFERENCES `service_offering`(`id`) ON DELETE CASCADE; +ALTER TABLE `cloud`.`kubernetes_cluster` ADD CONSTRAINT `fk_cluster__etcd_service_offering_id` FOREIGN KEY `fk_cluster__etcd_service_offering_id`(`etcd_service_offering_id`) REFERENCES `service_offering`(`id`) ON DELETE CASCADE; +ALTER TABLE `cloud`.`kubernetes_cluster` ADD CONSTRAINT `fk_cluster__control_template_id` FOREIGN KEY `fk_cluster__control_template_id`(`control_template_id`) REFERENCES `vm_template`(`id`) ON DELETE CASCADE; +ALTER TABLE `cloud`.`kubernetes_cluster` ADD CONSTRAINT `fk_cluster__worker_template_id` FOREIGN KEY `fk_cluster__worker_template_id`(`worker_template_id`) REFERENCES `vm_template`(`id`) ON DELETE CASCADE; +ALTER TABLE `cloud`.`kubernetes_cluster` ADD CONSTRAINT `fk_cluster__etcd_template_id` FOREIGN KEY `fk_cluster__etcd_template_id`(`etcd_template_id`) REFERENCES `vm_template`(`id`) ON DELETE CASCADE; + +-- Add for_cks column to the user_data table to represent CNI Configuration stored as userdata +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.user_data','for_cks', 'int(1) unsigned DEFAULT "0" COMMENT "if true, the userdata represent CNI configuration meant for CKS use only"'); +----------------------------------------------------------- +-- END - CKS Enhancements +----------------------------------------------------------- diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.template_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.template_view.sql index 339e43860d88..93aa72ad0669 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.template_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.template_view.sql @@ -100,6 +100,7 @@ SELECT IFNULL(`data_center`.`id`, 0)) AS `temp_zone_pair`, `vm_template`.`direct_download` AS `direct_download`, `vm_template`.`deploy_as_is` AS `deploy_as_is`, + `vm_template`.`for_cks` AS `for_cks`, `user_data`.`id` AS `user_data_id`, `user_data`.`uuid` AS `user_data_uuid`, `user_data`.`name` AS `user_data_name`, diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java index a3b7d0c9ecc1..0dbe4fd72462 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java @@ -433,6 +433,11 @@ public boolean isDeployAsIs() { return this.imageVO.isDeployAsIs(); } + @Override + public boolean isForCks() { + return imageVO.isForCks(); + } + public void setInstallPath(String installPath) { this.installPath = installPath; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterEventTypes.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterEventTypes.java index a947e4273be0..486a093e4ad4 100755 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterEventTypes.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterEventTypes.java @@ -23,4 +23,6 @@ public class KubernetesClusterEventTypes { public static final String EVENT_KUBERNETES_CLUSTER_STOP = "KUBERNETES.CLUSTER.STOP"; public static final String EVENT_KUBERNETES_CLUSTER_SCALE = "KUBERNETES.CLUSTER.SCALE"; public static final String EVENT_KUBERNETES_CLUSTER_UPGRADE = "KUBERNETES.CLUSTER.UPGRADE"; + public static final String EVENT_KUBERNETES_CLUSTER_NODES_ADD = "KUBERNETES.CLUSTER.NODES.ADD"; + public static final String EVENT_KUBERNETES_CLUSTER_NODES_REMOVE = "KUBERNETES.CLUSTER.NODES.REMOVE"; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index 477eb257deef..36a8215ddf4b 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -16,13 +16,19 @@ // under the License. package com.cloud.kubernetes.cluster; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.CONTROL; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.ETCD; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.WORKER; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.DEFAULT; import static com.cloud.utils.NumbersUtil.toHumanReadableSize; import static com.cloud.vm.UserVmManager.AllowUserExpungeRecoverVm; +import java.lang.reflect.InvocationTargetException; import java.net.MalformedURLException; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.Date; import java.util.EnumSet; import java.util.HashMap; @@ -36,26 +42,53 @@ import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.bgp.BGPService; +import com.cloud.dc.DedicatedResourceVO; +import com.cloud.dc.dao.DedicatedResourceDao; +import com.cloud.exception.ManagementServerException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.host.Host; +import com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType; +import com.cloud.kubernetes.cluster.actionworkers.KubernetesClusterRemoveWorker; +import com.cloud.network.dao.NsxProviderDao; +import com.cloud.network.element.NsxProviderVO; +import com.cloud.kubernetes.cluster.actionworkers.KubernetesClusterAddWorker; +import com.cloud.network.rules.PortForwardingRuleVO; +import com.cloud.network.rules.dao.PortForwardingRulesDao; +import com.cloud.template.TemplateApiService; +import com.cloud.user.UserDataVO; +import com.cloud.user.dao.AccountDao; +import com.cloud.user.dao.UserDataDao; import com.cloud.uservm.UserVm; +import com.cloud.vm.NicVO; import com.cloud.vm.UserVmService; +import com.cloud.vm.dao.NicDao; +import com.cloud.vm.dao.UserVmDetailsDao; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.SecurityChecker; +import org.apache.cloudstack.affinity.AffinityGroupVO; +import org.apache.cloudstack.affinity.dao.AffinityGroupDao; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiConstants.VMDetails; +import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.ResponseObject.ResponseView; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.user.kubernetes.cluster.AddNodesToKubernetesClusterCmd; import org.apache.cloudstack.api.command.user.kubernetes.cluster.AddVirtualMachinesToKubernetesClusterCmd; import org.apache.cloudstack.api.command.user.kubernetes.cluster.CreateKubernetesClusterCmd; import org.apache.cloudstack.api.command.user.kubernetes.cluster.DeleteKubernetesClusterCmd; import org.apache.cloudstack.api.command.user.kubernetes.cluster.GetKubernetesClusterConfigCmd; import org.apache.cloudstack.api.command.user.kubernetes.cluster.ListKubernetesClustersCmd; +import org.apache.cloudstack.api.command.user.kubernetes.cluster.RemoveNodesFromKubernetesClusterCmd; import org.apache.cloudstack.api.command.user.kubernetes.cluster.RemoveVirtualMachinesFromKubernetesClusterCmd; import org.apache.cloudstack.api.command.user.kubernetes.cluster.ScaleKubernetesClusterCmd; import org.apache.cloudstack.api.command.user.kubernetes.cluster.StartKubernetesClusterCmd; @@ -63,6 +96,7 @@ import org.apache.cloudstack.api.command.user.kubernetes.cluster.UpgradeKubernetesClusterCmd; import org.apache.cloudstack.api.response.KubernetesClusterConfigResponse; import org.apache.cloudstack.api.response.KubernetesClusterResponse; +import org.apache.cloudstack.api.response.KubernetesUserVmResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.RemoveVirtualMachinesFromKubernetesClusterResponse; import org.apache.cloudstack.api.response.UserVmResponse; @@ -71,9 +105,11 @@ import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.commons.beanutils.BeanUtils; import org.apache.cloudstack.network.RoutedIpv4Manager; import org.apache.commons.codec.binary.Base64; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; import com.cloud.api.ApiDBUtils; @@ -99,7 +135,6 @@ import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceAllocationException; -import com.cloud.host.Host.Type; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; @@ -193,6 +228,8 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne protected StateMachine2 _stateMachine = KubernetesCluster.State.getStateMachine(); + protected final static List CLUSTER_NODES_TYPES_LIST = Arrays.asList(WORKER.name(), CONTROL.name(), ETCD.name()); + ScheduledExecutorService _gcExecutor; ScheduledExecutorService _stateScanner; @@ -215,18 +252,28 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne @Inject protected HostDao hostDao; @Inject + protected AffinityGroupDao affinityGroupDao; + @Inject protected ServiceOfferingDao serviceOfferingDao; @Inject + protected UserDataDao userDataDao; + @Inject protected VMTemplateDao templateDao; @Inject protected TemplateJoinDao templateJoinDao; @Inject + protected DedicatedResourceDao dedicatedResourceDao; + @Inject + protected AccountDao accountDao; + @Inject protected AccountService accountService; @Inject protected AccountManager accountManager; @Inject protected UserDao userDao; @Inject + protected UserVmDetailsDao userVmDetailsDao; + @Inject protected VMInstanceDao vmInstanceDao; @Inject protected UserVmJoinDao userVmJoinDao; @@ -262,11 +309,20 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne public SecurityGroupService securityGroupService; @Inject public NetworkHelper networkHelper; - + @Inject + private NsxProviderDao nsxProviderDao; + @Inject + private NicDao nicDao; @Inject private UserVmService userVmService; @Inject + private TemplateApiService templateService; + @Inject + private PortForwardingRulesDao pfRuleDao; + @Inject RoutedIpv4Manager routedIpv4Manager; + @Inject + private BGPService bgpService; private void logMessage(final Level logLevel, final String message, final Exception e) { if (logLevel == Level.WARN) { @@ -369,16 +425,32 @@ private IpAddress getSourceNatIp(Network network) { return null; } - public VMTemplateVO getKubernetesServiceTemplate(DataCenter dataCenter, Hypervisor.HypervisorType hypervisorType) { + public VMTemplateVO getKubernetesServiceTemplate(DataCenter dataCenter, Hypervisor.HypervisorType hypervisorType, Map templateNodeTypeMap, KubernetesClusterNodeType nodeType) { VMTemplateVO template = templateDao.findSystemVMReadyTemplate(dataCenter.getId(), hypervisorType); if (DataCenter.Type.Edge.equals(dataCenter.getType()) && template != null && !template.isDirectDownload()) { logger.debug(String.format("Template %s can not be used for edge zone %s", template, dataCenter)); template = templateDao.findRoutingTemplate(hypervisorType, networkHelper.getHypervisorRouterTemplateConfigMap().get(hypervisorType).valueIn(dataCenter.getId())); } - if (template == null) { - throw new CloudRuntimeException("Not able to find the System or Routing template in ready state for the zone " + dataCenter.getUuid()); + switch (nodeType) { + case CONTROL: + case ETCD: + case WORKER: + VMTemplateVO nodeTemplate = Objects.nonNull(templateNodeTypeMap) ? templateDao.findById(templateNodeTypeMap.getOrDefault(nodeType.name(), 0L)) : template; + template = Objects.nonNull(nodeTemplate) ? nodeTemplate : template; + if (Objects.isNull(template)) { + throwDefaultCksTemplateNotFound(dataCenter.getUuid()); + } + return template; + default: + if (Objects.isNull(template)) { + throwDefaultCksTemplateNotFound(dataCenter.getUuid()); + } + return template; } - return template; + } + + public void throwDefaultCksTemplateNotFound(String datacenterId) { + throw new CloudRuntimeException("Not able to find the System or Routing template in ready state for the zone " + datacenterId); } protected void validateIsolatedNetworkIpRules(long ipId, FirewallRule.Purpose purpose, Network network, int clusterTotalNodeCount) { @@ -453,7 +525,7 @@ private void validateNetwork(Network network, int clusterTotalNodeCount) { validateIsolatedNetwork(network, clusterTotalNodeCount); } - private boolean validateServiceOffering(final ServiceOffering serviceOffering, final KubernetesSupportedVersion version) { + protected void validateServiceOffering(final ServiceOffering serviceOffering, final KubernetesSupportedVersion version) throws InvalidParameterValueException { if (serviceOffering.isDynamic()) { throw new InvalidParameterValueException(String.format("Custom service offerings are not supported for creating clusters, service offering ID: %s", serviceOffering.getUuid())); } @@ -466,7 +538,6 @@ private boolean validateServiceOffering(final ServiceOffering serviceOffering, f if (serviceOffering.getRamSize() < version.getMinimumRamSize()) { throw new InvalidParameterValueException(String.format("Kubernetes cluster cannot be created with service offering ID: %s, associated Kubernetes version ID: %s needs minimum %d MB RAM", serviceOffering.getUuid(), version.getUuid(), version.getMinimumRamSize())); } - return true; } private void validateDockerRegistryParams(final String dockerRegistryUserName, @@ -494,16 +565,46 @@ private void validateDockerRegistryParams(final String dockerRegistryUserName, } } - private DeployDestination plan(final long nodesCount, final DataCenter zone, final ServiceOffering offering) throws InsufficientServerCapacityException { + public Long getExplicitAffinityGroup(Long domainId) { + AffinityGroupVO groupVO = affinityGroupDao.findDomainLevelGroupByType(domainId, "ExplicitDedication"); + if (Objects.nonNull(groupVO)) { + return groupVO.getId(); + } + return null; + } + + private DeployDestination plan(final long nodesCount, final DataCenter zone, final ServiceOffering offering, + final Long domainId, final Long accountId, Hypervisor.HypervisorType hypervisorType) throws InsufficientServerCapacityException { final int cpu_requested = offering.getCpu() * offering.getSpeed(); final long ram_requested = offering.getRamSize() * 1024L * 1024L; - List hosts = resourceManager.listAllHostsInOneZoneByType(Type.Routing, zone.getId()); + boolean useDedicatedHosts = false; + Long group = getExplicitAffinityGroup(domainId); + List hosts = new ArrayList<>(); + if (Objects.nonNull(group)) { + List dedicatedHosts = new ArrayList<>(); + if (Objects.nonNull(accountId)) { + dedicatedHosts = dedicatedResourceDao.listByAccountId(accountId); + } else if (Objects.nonNull(domainId)) { + dedicatedHosts = dedicatedResourceDao.listByDomainId(domainId); + } + for (DedicatedResourceVO dedicatedHost : dedicatedHosts) { + hosts.add(hostDao.findById(dedicatedHost.getHostId())); + useDedicatedHosts = true; + } + } + if (hosts.isEmpty()) { + hosts = resourceManager.listAllHostsInOneZoneByType(Host.Type.Routing, zone.getId()); + } + if (hypervisorType != null) { + hosts = hosts.stream().filter(x -> x.getHypervisorType() == hypervisorType).collect(Collectors.toList()); + } final Map> hosts_with_resevered_capacity = new ConcurrentHashMap>(); for (HostVO h : hosts) { hosts_with_resevered_capacity.put(h.getUuid(), new Pair(h, 0)); } boolean suitable_host_found = false; Cluster planCluster = null; + HostVO suitableHost = null; for (int i = 1; i <= nodesCount; i++) { suitable_host_found = false; for (Map.Entry> hostEntry : hosts_with_resevered_capacity.entrySet()) { @@ -527,6 +628,7 @@ private DeployDestination plan(final long nodesCount, final DataCenter zone, fin logger.debug("Found host {} to have enough capacity, CPU={} RAM={}", hostVO, cpu_requested * reserved, toHumanReadableSize(ram_requested * reserved)); hostEntry.setValue(new Pair(hostVO, reserved)); suitable_host_found = true; + suitableHost = hostVO; planCluster = cluster; break; } @@ -542,6 +644,10 @@ private DeployDestination plan(final long nodesCount, final DataCenter zone, fin if (logger.isInfoEnabled()) { logger.info("Suitable hosts found in datacenter: {}, creating deployment destination", zone); } + if (useDedicatedHosts) { + planCluster = clusterDao.findById(suitableHost.getClusterId()); + return new DeployDestination(zone, null, planCluster, suitableHost); + } return new DeployDestination(zone, null, planCluster, null); } String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%d memory=%s) with offering: %s", @@ -550,6 +656,33 @@ private DeployDestination plan(final long nodesCount, final DataCenter zone, fin throw new InsufficientServerCapacityException(msg, DataCenter.class, zone.getId()); } + protected void setNodeTypeServiceOfferingResponse(KubernetesClusterResponse response, + KubernetesClusterNodeType nodeType, + Long offeringId) { + if (offeringId == null) { + return; + } + ServiceOfferingVO offering = serviceOfferingDao.findById(offeringId); + if (offering != null) { + setServiceOfferingResponseForNodeType(response, offering, nodeType); + } + } + + protected void setServiceOfferingResponseForNodeType(KubernetesClusterResponse response, + ServiceOfferingVO offering, + KubernetesClusterNodeType nodeType) { + if (CONTROL == nodeType) { + response.setControlOfferingId(offering.getUuid()); + response.setControlOfferingName(offering.getName()); + } else if (WORKER == nodeType) { + response.setWorkerOfferingId(offering.getUuid()); + response.setWorkerOfferingName(offering.getName()); + } else if (ETCD == nodeType) { + response.setEtcdOfferingId(offering.getUuid()); + response.setEtcdOfferingName(offering.getName()); + } + } + @Override public KubernetesClusterResponse createKubernetesClusterResponse(long kubernetesClusterId) { KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId); @@ -573,6 +706,20 @@ public KubernetesClusterResponse createKubernetesClusterResponse(long kubernetes response.setServiceOfferingId(offering.getUuid()); response.setServiceOfferingName(offering.getName()); } + + Long cniConfigId = kubernetesCluster.getCniConfigId(); + if (Objects.nonNull(cniConfigId)) { + UserDataVO cniConfig = userDataDao.findById(cniConfigId); + response.setCniConfigId(cniConfig.getUuid()); + response.setCniConfigName(cniConfig.getName()); + } + setNodeTypeServiceOfferingResponse(response, WORKER, kubernetesCluster.getWorkerServiceOfferingId()); + setNodeTypeServiceOfferingResponse(response, CONTROL, kubernetesCluster.getControlServiceOfferingId()); + setNodeTypeServiceOfferingResponse(response, ETCD, kubernetesCluster.getEtcdServiceOfferingId()); + + if (kubernetesCluster.getEtcdNodeCount() != null) { + response.setEtcdNodes(kubernetesCluster.getEtcdNodeCount()); + } KubernetesSupportedVersionVO version = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId()); if (version != null) { response.setKubernetesVersionId(version.getUuid()); @@ -608,7 +755,7 @@ public KubernetesClusterResponse createKubernetesClusterResponse(long kubernetes } } - List vmResponses = new ArrayList(); + List vmResponses = new ArrayList<>(); List vmList = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); ResponseView respView = ResponseView.Restricted; Account caller = CallContext.current().getCallingAccount(); @@ -622,9 +769,31 @@ public KubernetesClusterResponse createKubernetesClusterResponse(long kubernetes if (userVM != null) { UserVmResponse vmResponse = ApiDBUtils.newUserVmResponse(respView, responseName, userVM, EnumSet.of(VMDetails.nics), caller); - vmResponses.add(vmResponse); + KubernetesUserVmResponse kubernetesUserVmResponse = new KubernetesUserVmResponse(); + try { + BeanUtils.copyProperties(kubernetesUserVmResponse, vmResponse); + } catch (IllegalAccessException | InvocationTargetException e) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to generate zone metrics response"); + } + kubernetesUserVmResponse.setExternalNode(vmMapVO.isExternalNode()); + kubernetesUserVmResponse.setEtcdNode(vmMapVO.isEtcdNode()); + kubernetesUserVmResponse.setNodeVersion(vmMapVO.getNodeVersion()); + vmResponses.add(kubernetesUserVmResponse); } } + List etcdNodeIds = vmList.stream().filter(KubernetesClusterVmMapVO::isEtcdNode).map(KubernetesClusterVmMapVO::getVmId).collect(Collectors.toList()); + List etcdIpIds = new ArrayList<>(); + Map etcdIps = new HashMap<>(); + int etcdNodeSshPort = KubernetesClusterService.KubernetesEtcdNodeStartPort.value(); + etcdNodeIds.forEach(id -> { + etcdIpIds.addAll(pfRuleDao.listByVm(id).stream().filter(rule -> rule.getSourcePortStart() == etcdNodeSshPort) + .map(PortForwardingRuleVO::getSourceIpAddressId).collect(Collectors.toList())); + }); + etcdIpIds.forEach(id -> { + IPAddressVO ipAddress = ipAddressDao.findById(id); + etcdIps.put(ipAddress.getUuid(), ipAddress.getAddress().addr()); + }); + response.setEtcdIps(etcdIps); } response.setHasAnnotation(annotationDao.hasAnnotations(kubernetesCluster.getUuid(), AnnotationService.EntityType.KUBERNETES_CLUSTER.name(), accountService.isRootAdmin(caller.getId()))); @@ -634,6 +803,9 @@ public KubernetesClusterResponse createKubernetesClusterResponse(long kubernetes response.setMaxSize(kubernetesCluster.getMaxSize()); response.setClusterType(kubernetesCluster.getClusterType()); response.setCreated(kubernetesCluster.getCreated()); + + + return response; } @@ -722,7 +894,9 @@ public boolean isCommandSupported(KubernetesCluster cluster, String cmdName) { BaseCmd.getCommandNameByClass(ScaleKubernetesClusterCmd.class), BaseCmd.getCommandNameByClass(StartKubernetesClusterCmd.class), BaseCmd.getCommandNameByClass(StopKubernetesClusterCmd.class), - BaseCmd.getCommandNameByClass(UpgradeKubernetesClusterCmd.class) + BaseCmd.getCommandNameByClass(UpgradeKubernetesClusterCmd.class), + BaseCmd.getCommandNameByClass(AddNodesToKubernetesClusterCmd.class), + BaseCmd.getCommandNameByClass(RemoveNodesFromKubernetesClusterCmd.class) ).contains(cmdName); case ExternalManaged: return Arrays.asList( @@ -742,7 +916,6 @@ private void validateManagedKubernetesClusterCreateParameters(final CreateKubern final String name = cmd.getName(); final Long zoneId = cmd.getZoneId(); final Long kubernetesVersionId = cmd.getKubernetesVersionId(); - final Long serviceOfferingId = cmd.getServiceOfferingId(); final Account owner = accountService.getActiveAccountById(cmd.getEntityOwnerId()); final Long networkId = cmd.getNetworkId(); final String sshKeyPair = cmd.getSSHKeyPairName(); @@ -753,6 +926,8 @@ private void validateManagedKubernetesClusterCreateParameters(final CreateKubern final String dockerRegistryUrl = cmd.getDockerRegistryUrl(); final Long nodeRootDiskSize = cmd.getNodeRootDiskSize(); final String externalLoadBalancerIpAddress = cmd.getExternalLoadBalancerIpAddress(); + final Map serviceOfferingNodeTypeMap = cmd.getServiceOfferingNodeTypeMap(); + final Long defaultServiceOfferingId = cmd.getServiceOfferingId(); if (name == null || name.isEmpty()) { throw new InvalidParameterValueException("Invalid name for the Kubernetes cluster name: " + name); @@ -810,10 +985,7 @@ private void validateManagedKubernetesClusterCreateParameters(final CreateKubern throw new InvalidParameterValueException(String.format("ISO associated with version ID: %s is not in Ready state for datacenter ID: %s", clusterKubernetesVersion.getUuid(), zone.getUuid())); } - ServiceOffering serviceOffering = serviceOfferingDao.findById(serviceOfferingId); - if (serviceOffering == null) { - throw new InvalidParameterValueException("No service offering with ID: " + serviceOfferingId); - } + validateServiceOfferingsForNodeTypes(serviceOfferingNodeTypeMap, defaultServiceOfferingId, cmd.getEtcdNodes(), clusterKubernetesVersion); validateSshKeyPairForKubernetesCreateParameters(sshKeyPair, owner); @@ -821,15 +993,15 @@ private void validateManagedKubernetesClusterCreateParameters(final CreateKubern throw new InvalidParameterValueException(String.format("Invalid value for %s", ApiConstants.NODE_ROOT_DISK_SIZE)); } - if (!validateServiceOffering(serviceOffering, clusterKubernetesVersion)) { - throw new InvalidParameterValueException("Given service offering ID: %s is not suitable for Kubernetes cluster"); - } - validateDockerRegistryParams(dockerRegistryUserName, dockerRegistryPassword, dockerRegistryUrl); Network network = validateAndGetNetworkForKubernetesCreateParameters(networkId); if (StringUtils.isNotEmpty(externalLoadBalancerIpAddress)) { + NsxProviderVO nsxProviderVO = nsxProviderDao.findByZoneId(zone.getId()); + if (Objects.nonNull(nsxProviderVO)) { + throw new InvalidParameterValueException("External load balancer IP address is not supported on NSX-enabled zones"); + } if (!NetUtils.isValidIp4(externalLoadBalancerIpAddress) && !NetUtils.isValidIp6(externalLoadBalancerIpAddress)) { throw new InvalidParameterValueException("Invalid external load balancer IP address"); } @@ -846,8 +1018,40 @@ private void validateManagedKubernetesClusterCreateParameters(final CreateKubern } } + protected void validateServiceOfferingsForNodeTypes(Map map, + Long defaultServiceOfferingId, + Long etcdNodes, + KubernetesSupportedVersion clusterKubernetesVersion) { + for (String key : CLUSTER_NODES_TYPES_LIST) { + validateServiceOfferingForNode(map, defaultServiceOfferingId, key, etcdNodes, clusterKubernetesVersion); + } + } + + protected void validateServiceOfferingForNode(Map map, + Long defaultServiceOfferingId, + String key, Long etcdNodes, + KubernetesSupportedVersion clusterKubernetesVersion) { + if (ETCD.name().equalsIgnoreCase(key) && (etcdNodes == null || etcdNodes == 0)) { + return; + } + Long serviceOfferingId = map.getOrDefault(key, defaultServiceOfferingId); + ServiceOffering serviceOffering = serviceOfferingId != null ? serviceOfferingDao.findById(serviceOfferingId) : null; + if (serviceOffering == null) { + throw new InvalidParameterValueException("When serviceofferingid is not specified, " + + "service offerings for each node type must be specified in the nodeofferings parameter."); + } + try { + validateServiceOffering(serviceOffering, clusterKubernetesVersion); + } catch (InvalidParameterValueException e) { + String msg = String.format("Given service offering ID: %s for %s nodes is not suitable for the Kubernetes cluster version %s - %s", + serviceOffering, key, clusterKubernetesVersion, e.getMessage()); + logger.error(msg); + throw new InvalidParameterValueException(msg); + } + } + private Network getKubernetesClusterNetworkIfMissing(final String clusterName, final DataCenter zone, final Account owner, final int controlNodesCount, - final int nodesCount, final String externalLoadBalancerIpAddress, final Long networkId) throws CloudRuntimeException { + final int nodesCount, final String externalLoadBalancerIpAddress, final Long networkId, final Long asNumber) throws CloudRuntimeException { Network network = null; if (networkId != null) { network = networkDao.findById(networkId); @@ -880,6 +1084,9 @@ private Network getKubernetesClusterNetworkIfMissing(final String clusterName, f network = networkService.createGuestNetwork(networkOffering.getId(), clusterName + "-network", owner.getAccountName() + "-network", owner, physicalNetwork, zone.getId(), ControlledEntity.ACLType.Account); + if (!networkOffering.isForVpc() && NetworkOffering.RoutingMode.Dynamic == networkOffering.getRoutingMode()) { + bgpService.allocateASNumber(zone.getId(), asNumber, network.getId(), null); + } } catch (ConcurrentOperationException | InsufficientCapacityException | ResourceAllocationException e) { logAndThrow(Level.ERROR, String.format("Unable to create network for the Kubernetes cluster: %s", clusterName)); } finally { @@ -952,12 +1159,13 @@ protected void validateKubernetesClusterScaleSize(final KubernetesClusterVO kube private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd cmd) { final Long kubernetesClusterId = cmd.getId(); - final Long serviceOfferingId = cmd.getServiceOfferingId(); final Long clusterSize = cmd.getClusterSize(); final List nodeIds = cmd.getNodeIds(); final Boolean isAutoscalingEnabled = cmd.isAutoscalingEnabled(); final Long minSize = cmd.getMinSize(); final Long maxSize = cmd.getMaxSize(); + final Long defaultServiceOfferingId = cmd.getServiceOfferingId(); + final Map serviceOfferingNodeTypeMap = cmd.getServiceOfferingNodeTypeMap(); if (kubernetesClusterId == null || kubernetesClusterId < 1L) { throw new InvalidParameterValueException("Invalid Kubernetes cluster ID"); @@ -973,7 +1181,8 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd logAndThrow(Level.WARN, String.format("Unable to find zone for Kubernetes cluster : %s", kubernetesCluster.getName())); } - if (serviceOfferingId == null && clusterSize == null && nodeIds == null && isAutoscalingEnabled == null) { + if (defaultServiceOfferingId == null && isAnyNodeOfferingEmpty(serviceOfferingNodeTypeMap) + && clusterSize == null && nodeIds == null && isAutoscalingEnabled == null) { throw new InvalidParameterValueException(String.format("Kubernetes cluster %s cannot be scaled, either service offering or cluster size or nodeids to be removed or autoscaling must be passed", kubernetesCluster.getName())); } @@ -1020,8 +1229,9 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd } } + Long workerOfferingId = serviceOfferingNodeTypeMap != null ? serviceOfferingNodeTypeMap.getOrDefault(WORKER.name(), null) : null; if (nodeIds != null) { - if (clusterSize != null || serviceOfferingId != null) { + if (clusterSize != null || defaultServiceOfferingId != null || workerOfferingId != null) { throw new InvalidParameterValueException("nodeids can not be passed along with clustersize or service offering"); } List nodes = kubernetesClusterVmMapDao.listByClusterIdAndVmIdsIn(kubernetesCluster.getId(), nodeIds); @@ -1041,37 +1251,53 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd } } - ServiceOffering serviceOffering = null; - if (serviceOfferingId != null) { - serviceOffering = serviceOfferingDao.findById(serviceOfferingId); - if (serviceOffering == null) { - throw new InvalidParameterValueException("Failed to find service offering ID: " + serviceOfferingId); - } else { - if (serviceOffering.isDynamic()) { - throw new InvalidParameterValueException(String.format("Custom service offerings are not supported for Kubernetes clusters. Kubernetes cluster : %s, service offering : %s", kubernetesCluster.getName(), serviceOffering.getName())); - } - if (serviceOffering.getCpu() < MIN_KUBERNETES_CLUSTER_NODE_CPU || serviceOffering.getRamSize() < MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE) { - throw new InvalidParameterValueException(String.format("Kubernetes cluster : %s cannot be scaled with service offering : %s, Kubernetes cluster template(CoreOS) needs minimum %d vCPUs and %d MB RAM", - kubernetesCluster.getName(), serviceOffering.getName(), MIN_KUBERNETES_CLUSTER_NODE_CPU, MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE)); - } - if (serviceOffering.getCpu() < clusterVersion.getMinimumCpu()) { - throw new InvalidParameterValueException(String.format("Kubernetes cluster : %s cannot be scaled with service offering : %s, associated Kubernetes version : %s needs minimum %d vCPUs", - kubernetesCluster.getName(), serviceOffering.getName(), clusterVersion.getName(), clusterVersion.getMinimumCpu())); + validateServiceOfferingsForNodeTypesScale(serviceOfferingNodeTypeMap, defaultServiceOfferingId, kubernetesCluster, clusterVersion); + + validateKubernetesClusterScaleSize(kubernetesCluster, clusterSize, maxClusterSize, zone); + } + + protected void validateServiceOfferingsForNodeTypesScale(Map map, Long defaultServiceOfferingId, KubernetesClusterVO kubernetesCluster, KubernetesSupportedVersion clusterVersion) { + for (String key : CLUSTER_NODES_TYPES_LIST) { + Long serviceOfferingId = map.getOrDefault(key, defaultServiceOfferingId); + if (serviceOfferingId != null) { + ServiceOffering serviceOffering = serviceOfferingDao.findById(serviceOfferingId); + if (serviceOffering == null) { + throw new InvalidParameterValueException("Failed to find service offering ID: " + serviceOfferingId); } - if (serviceOffering.getRamSize() < clusterVersion.getMinimumRamSize()) { - throw new InvalidParameterValueException(String.format("Kubernetes cluster : %s cannot be scaled with service offering : %s, associated Kubernetes version : %s needs minimum %d MB RAM", - kubernetesCluster.getName(), serviceOffering.getName(), clusterVersion.getName(), clusterVersion.getMinimumRamSize())); + checkServiceOfferingForNodesScale(serviceOffering, kubernetesCluster, clusterVersion); + final ServiceOffering existingServiceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); + if (KubernetesCluster.State.Running.equals(kubernetesCluster.getState()) && (serviceOffering.getRamSize() < existingServiceOffering.getRamSize() || + serviceOffering.getCpu() * serviceOffering.getSpeed() < existingServiceOffering.getCpu() * existingServiceOffering.getSpeed())) { + logAndThrow(Level.WARN, String.format("Kubernetes cluster cannot be scaled down for service offering. Service offering : %s offers lesser resources as compared to service offering : %s of Kubernetes cluster : %s", + serviceOffering.getName(), existingServiceOffering.getName(), kubernetesCluster.getName())); } } - final ServiceOffering existingServiceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); - if (KubernetesCluster.State.Running.equals(kubernetesCluster.getState()) && (serviceOffering.getRamSize() < existingServiceOffering.getRamSize() || - serviceOffering.getCpu() * serviceOffering.getSpeed() < existingServiceOffering.getCpu() * existingServiceOffering.getSpeed())) { - logAndThrow(Level.WARN, String.format("Kubernetes cluster cannot be scaled down for service offering. Service offering : %s offers lesser resources as compared to service offering : %s of Kubernetes cluster : %s", - serviceOffering.getName(), existingServiceOffering.getName(), kubernetesCluster.getName())); - } } + } - validateKubernetesClusterScaleSize(kubernetesCluster, clusterSize, maxClusterSize, zone); + protected void checkServiceOfferingForNodesScale(ServiceOffering serviceOffering, KubernetesClusterVO kubernetesCluster, KubernetesSupportedVersion clusterVersion) { + if (serviceOffering.isDynamic()) { + throw new InvalidParameterValueException(String.format("Custom service offerings are not supported for Kubernetes clusters. Kubernetes cluster : %s, service offering : %s", kubernetesCluster.getName(), serviceOffering.getName())); + } + if (serviceOffering.getCpu() < MIN_KUBERNETES_CLUSTER_NODE_CPU || serviceOffering.getRamSize() < MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE) { + throw new InvalidParameterValueException(String.format("Kubernetes cluster : %s cannot be scaled with service offering : %s, Kubernetes cluster template(CoreOS) needs minimum %d vCPUs and %d MB RAM", + kubernetesCluster.getName(), serviceOffering.getName(), MIN_KUBERNETES_CLUSTER_NODE_CPU, MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE)); + } + if (serviceOffering.getCpu() < clusterVersion.getMinimumCpu()) { + throw new InvalidParameterValueException(String.format("Kubernetes cluster : %s cannot be scaled with service offering : %s, associated Kubernetes version : %s needs minimum %d vCPUs", + kubernetesCluster.getName(), serviceOffering.getName(), clusterVersion.getName(), clusterVersion.getMinimumCpu())); + } + if (serviceOffering.getRamSize() < clusterVersion.getMinimumRamSize()) { + throw new InvalidParameterValueException(String.format("Kubernetes cluster : %s cannot be scaled with service offering : %s, associated Kubernetes version : %s needs minimum %d MB RAM", + kubernetesCluster.getName(), serviceOffering.getName(), clusterVersion.getName(), clusterVersion.getMinimumRamSize())); + } + } + + protected boolean isAnyNodeOfferingEmpty(Map map) { + if (MapUtils.isEmpty(map)) { + return true; + } + return map.values().stream().anyMatch(Objects::isNull); } private void validateKubernetesClusterUpgradeParameters(UpgradeKubernetesClusterCmd cmd) { @@ -1165,6 +1391,7 @@ public KubernetesCluster createUnmanagedKubernetesCluster(CreateKubernetesCluste final long controlNodeCount = cmd.getControlNodes(); final long clusterSize = Objects.requireNonNullElse(cmd.getClusterSize(), 0L); final ServiceOffering serviceOffering = serviceOfferingDao.findById(cmd.getServiceOfferingId()); + Map nodeTypeOfferingMap = cmd.getServiceOfferingNodeTypeMap(); final Account owner = accountService.getActiveAccountById(cmd.getEntityOwnerId()); final KubernetesSupportedVersion clusterKubernetesVersion = kubernetesSupportedVersionDao.findById(cmd.getKubernetesVersionId()); @@ -1217,49 +1444,81 @@ public KubernetesCluster createManagedKubernetesCluster(CreateKubernetesClusterC final DataCenter zone = dataCenterDao.findById(cmd.getZoneId()); final long controlNodeCount = cmd.getControlNodes(); final long clusterSize = cmd.getClusterSize(); - final long totalNodeCount = controlNodeCount + clusterSize; - final ServiceOffering serviceOffering = serviceOfferingDao.findById(cmd.getServiceOfferingId()); + final long etcdNodes = cmd.getEtcdNodes(); + final Map nodeTypeCount = Map.of(WORKER.name(), clusterSize, + CONTROL.name(), controlNodeCount, ETCD.name(), etcdNodes); final Account owner = accountService.getActiveAccountById(cmd.getEntityOwnerId()); final KubernetesSupportedVersion clusterKubernetesVersion = kubernetesSupportedVersionDao.findById(cmd.getKubernetesVersionId()); - - DeployDestination deployDestination = null; - try { - deployDestination = plan(totalNodeCount, zone, serviceOffering); - } catch (InsufficientCapacityException e) { - logAndThrow(Level.ERROR, String.format("Creating Kubernetes cluster failed due to insufficient capacity for %d nodes cluster in zone : %s with service offering : %s", totalNodeCount, zone.getName(), serviceOffering.getName())); - } - if (deployDestination == null || deployDestination.getCluster() == null) { - logAndThrow(Level.ERROR, String.format("Creating Kubernetes cluster failed due to error while finding suitable deployment plan for cluster in zone : %s", zone.getName())); + final Hypervisor.HypervisorType hypervisor = cmd.getHypervisorType(); + final Long asNumber = cmd.getAsNumber(); + + Map serviceOfferingNodeTypeMap = cmd.getServiceOfferingNodeTypeMap(); + Long defaultServiceOfferingId = cmd.getServiceOfferingId(); + String accountName = cmd.getAccountName(); + Long domainId = cmd.getDomainId(); + Long accountId = null; + if (Objects.nonNull(accountName) && Objects.nonNull(domainId)) { + Account account = accountDao.findActiveAccount(accountName, domainId); + if (Objects.nonNull(account)) { + accountId = account.getId(); + } } + Hypervisor.HypervisorType hypervisorType = getHypervisorTypeAndValidateNodeDeployments(serviceOfferingNodeTypeMap, defaultServiceOfferingId, nodeTypeCount, zone, domainId, accountId, hypervisor); SecurityGroup securityGroup = null; if (zone.isSecurityGroupEnabled()) { securityGroup = getOrCreateSecurityGroupForAccount(owner); } - final Network defaultNetwork = getKubernetesClusterNetworkIfMissing(cmd.getName(), zone, owner, (int)controlNodeCount, (int)clusterSize, cmd.getExternalLoadBalancerIpAddress(), cmd.getNetworkId()); - final VMTemplateVO finalTemplate = getKubernetesServiceTemplate(zone, deployDestination.getCluster().getHypervisorType()); - final long cores = serviceOffering.getCpu() * (controlNodeCount + clusterSize); - final long memory = serviceOffering.getRamSize() * (controlNodeCount + clusterSize); - + Map templateNodeTypeMap = cmd.getTemplateNodeTypeMap(); + final VMTemplateVO finalTemplate = getKubernetesServiceTemplate(zone, hypervisorType, templateNodeTypeMap, DEFAULT); + final VMTemplateVO controlNodeTemplate = getKubernetesServiceTemplate(zone, hypervisorType, templateNodeTypeMap, CONTROL); + final VMTemplateVO workerNodeTemplate = getKubernetesServiceTemplate(zone, hypervisorType, templateNodeTypeMap, WORKER); + final VMTemplateVO etcdNodeTemplate = getKubernetesServiceTemplate(zone, hypervisorType, templateNodeTypeMap, ETCD); + final Network defaultNetwork = getKubernetesClusterNetworkIfMissing(cmd.getName(), zone, owner, (int)controlNodeCount, (int)clusterSize, cmd.getExternalLoadBalancerIpAddress(), cmd.getNetworkId(), asNumber); final SecurityGroup finalSecurityGroup = securityGroup; final KubernetesClusterVO cluster = Transaction.execute(new TransactionCallback() { @Override public KubernetesClusterVO doInTransaction(TransactionStatus status) { + Pair capacityPair = calculateClusterCapacity(serviceOfferingNodeTypeMap, nodeTypeCount, defaultServiceOfferingId); + final long cores = capacityPair.first(); + final long memory = capacityPair.second(); + KubernetesClusterVO newCluster = new KubernetesClusterVO(cmd.getName(), cmd.getDisplayName(), zone.getId(), clusterKubernetesVersion.getId(), - serviceOffering.getId(), finalTemplate.getId(), defaultNetwork.getId(), owner.getDomainId(), - owner.getAccountId(), controlNodeCount, clusterSize, KubernetesCluster.State.Created, cmd.getSSHKeyPairName(), cores, memory, + defaultServiceOfferingId, Objects.nonNull(finalTemplate) ? finalTemplate.getId() : null, + defaultNetwork.getId(), owner.getDomainId(), owner.getAccountId(), controlNodeCount, clusterSize, + KubernetesCluster.State.Created, cmd.getSSHKeyPairName(), cores, memory, cmd.getNodeRootDiskSize(), "", KubernetesCluster.ClusterType.CloudManaged); + newCluster.setCniConfigId(cmd.getCniConfigId()); + String cniConfigDetails = null; + if (MapUtils.isNotEmpty(cmd.getCniConfigDetails())) { + cniConfigDetails = cmd.getCniConfigDetails().toString(); + } + newCluster.setCniConfigDetails(cniConfigDetails); + if (serviceOfferingNodeTypeMap.containsKey(WORKER.name())) { + newCluster.setWorkerServiceOfferingId(serviceOfferingNodeTypeMap.get(WORKER.name())); + } + if (serviceOfferingNodeTypeMap.containsKey(CONTROL.name())) { + newCluster.setControlServiceOfferingId(serviceOfferingNodeTypeMap.get(CONTROL.name())); + } + if (etcdNodes > 0) { + newCluster.setEtcdTemplateId(etcdNodeTemplate.getId()); + newCluster.setEtcdNodeCount(etcdNodes); + if (serviceOfferingNodeTypeMap.containsKey(ETCD.name())) { + newCluster.setEtcdServiceOfferingId(serviceOfferingNodeTypeMap.get(ETCD.name())); + } + } + newCluster.setWorkerTemplateId(workerNodeTemplate.getId()); + newCluster.setControlTemplateId(controlNodeTemplate.getId()); if (zone.isSecurityGroupEnabled()) { newCluster.setSecurityGroupId(finalSecurityGroup.getId()); } kubernetesClusterDao.persist(newCluster); + addKubernetesClusterDetails(newCluster, defaultNetwork, cmd); return newCluster; } }); - addKubernetesClusterDetails(cluster, defaultNetwork, cmd); - if (logger.isInfoEnabled()) { logger.info("Kubernetes cluster {} has been created", cluster); } @@ -1267,6 +1526,59 @@ public KubernetesClusterVO doInTransaction(TransactionStatus status) { return cluster; } + protected Pair calculateClusterCapacity(Map map, Map nodeTypeCount, Long defaultServiceOfferingId) { + long cores = 0L; + long memory = 0L; + for (String key : CLUSTER_NODES_TYPES_LIST) { + if (nodeTypeCount.getOrDefault(key, 0L) == 0) { + continue; + } + Long serviceOfferingId = map.getOrDefault(key, defaultServiceOfferingId); + ServiceOffering serviceOffering = serviceOfferingDao.findById(serviceOfferingId); + Long nodes = nodeTypeCount.get(key); + cores = cores + (serviceOffering.getCpu() * nodes); + memory = memory + (serviceOffering.getRamSize() * nodes); + } + return new Pair<>(cores, memory); + } + + protected Hypervisor.HypervisorType getHypervisorTypeAndValidateNodeDeployments(Map serviceOfferingNodeTypeMap, + Long defaultServiceOfferingId, + Map nodeTypeCount, + DataCenter zone, Long domainId, Long accountId, + Hypervisor.HypervisorType hypervisorType) { + Hypervisor.HypervisorType deploymentHypervisor = null; + for (String nodeType : CLUSTER_NODES_TYPES_LIST) { + if (!nodeTypeCount.containsKey(nodeType)) { + continue; + } + Long serviceOfferingId = serviceOfferingNodeTypeMap.getOrDefault(nodeType, defaultServiceOfferingId); + ServiceOffering serviceOffering = serviceOfferingDao.findById(serviceOfferingId); + Long nodes = nodeTypeCount.getOrDefault(nodeType, defaultServiceOfferingId); + try { + if (nodeType.equalsIgnoreCase(ETCD.name()) && + (!serviceOfferingNodeTypeMap.containsKey(ETCD.name()) || nodes == 0)) { + continue; + } + DeployDestination deployDestination = plan(nodes, zone, serviceOffering, domainId, accountId, hypervisorType); + if (deployDestination.getCluster() == null) { + logAndThrow(Level.ERROR, String.format("Creating Kubernetes cluster failed due to error while finding suitable deployment plan for cluster in zone : %s", zone.getName())); + } + if (deploymentHypervisor == null) { + deploymentHypervisor = deployDestination.getCluster().getHypervisorType(); + if (hypervisorType != deploymentHypervisor) { + String msg = String.format("The hypervisor type planned for the CKS cluster deployment %s is different " + + "from the selected hypervisor %s", deployDestination.getCluster().getHypervisorType(), hypervisorType); + logger.warn(msg); + } + } + } catch (InsufficientCapacityException e) { + logAndThrow(Level.ERROR, String.format("Creating Kubernetes cluster failed due to insufficient capacity for %d nodes cluster in zone : %s with service offering : %s", nodes, zone.getName(), serviceOffering.getName())); + } + } + return deploymentHypervisor; + } + private SecurityGroup getOrCreateSecurityGroupForAccount(Account owner) { String securityGroupName = String.format("%s-%s", KubernetesClusterActionWorker.CKS_CLUSTER_SECURITY_GROUP_NAME, owner.getUuid()); String securityGroupDesc = String.format("%s and account %s", KubernetesClusterActionWorker.CKS_SECURITY_GROUP_DESCRIPTION, owner.getName()); @@ -1293,7 +1605,7 @@ private SecurityGroup getOrCreateSecurityGroupForAccount(Account owner) { @Override @ActionEvent(eventType = KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_CREATE, eventDescription = "creating Kubernetes cluster", async = true) - public void startKubernetesCluster(CreateKubernetesClusterCmd cmd) throws CloudRuntimeException { + public void startKubernetesCluster(CreateKubernetesClusterCmd cmd) throws CloudRuntimeException, ManagementServerException, ResourceUnavailableException, InsufficientCapacityException { final Long id = cmd.getEntityId(); if (KubernetesCluster.ClusterType.valueOf(cmd.getClusterType()) != KubernetesCluster.ClusterType.CloudManaged) { return; @@ -1302,7 +1614,8 @@ public void startKubernetesCluster(CreateKubernetesClusterCmd cmd) throws CloudR if (kubernetesCluster == null) { throw new InvalidParameterValueException("Failed to find Kubernetes cluster with given ID"); } - if (!startKubernetesCluster(kubernetesCluster, true)) { + Account account = accountService.getAccount(kubernetesCluster.getAccountId()); + if (!startKubernetesCluster(kubernetesCluster.getId(), kubernetesCluster.getDomainId(), account.getAccountName(), cmd.getAsNumber(), true)) { throw new CloudRuntimeException(String.format("Failed to start created Kubernetes cluster: %s", kubernetesCluster.getName())); } @@ -1311,7 +1624,7 @@ public void startKubernetesCluster(CreateKubernetesClusterCmd cmd) throws CloudR @Override @ActionEvent(eventType = KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_START, eventDescription = "starting Kubernetes cluster", async = true) - public void startKubernetesCluster(StartKubernetesClusterCmd cmd) throws CloudRuntimeException { + public void startKubernetesCluster(StartKubernetesClusterCmd cmd) throws CloudRuntimeException, ManagementServerException, ResourceUnavailableException, InsufficientCapacityException { final Long id = cmd.getId(); if (id == null || id < 1L) { throw new InvalidParameterValueException("Invalid Kubernetes cluster ID provided"); @@ -1324,7 +1637,8 @@ public void startKubernetesCluster(StartKubernetesClusterCmd cmd) throws CloudRu throw new InvalidParameterValueException(String.format("Start kubernetes cluster is not supported for " + "an externally managed cluster (%s)", kubernetesCluster.getName())); } - if (!startKubernetesCluster(kubernetesCluster, false)) { + Account account = accountService.getAccount(kubernetesCluster.getAccountId()); + if (!startKubernetesCluster(kubernetesCluster.getId(), kubernetesCluster.getDomainId(), account.getAccountName(), null, false)) { throw new CloudRuntimeException(String.format("Failed to start Kubernetes cluster: %s", kubernetesCluster.getName())); } @@ -1336,15 +1650,24 @@ public void startKubernetesCluster(StartKubernetesClusterCmd cmd) throws CloudRu * are provisioned from scratch. Second kind of start, happens on Stopped Kubernetes cluster, in which all resources * are provisioned (like volumes, nics, networks etc). It just that VM's are not in running state. So just * start the VM's (which can possibly implicitly start the network also). - * @param kubernetesCluster + * + * @param kubernetesClusterId + * @param domainId + * @param accountName * @param onCreate * @return * @throws CloudRuntimeException */ - public boolean startKubernetesCluster(KubernetesClusterVO kubernetesCluster, boolean onCreate) throws CloudRuntimeException { + @Override + public boolean startKubernetesCluster(long kubernetesClusterId, Long domainId, String accountName, Long asNumber, boolean onCreate) + throws CloudRuntimeException, ManagementServerException, ResourceUnavailableException, InsufficientCapacityException { if (!KubernetesServiceEnabled.value()) { logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); } + final KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId); + if (kubernetesCluster == null) { + throw new InvalidParameterValueException("Failed to find Kubernetes cluster with given ID"); + } if (kubernetesCluster.getRemoved() != null) { throw new InvalidParameterValueException(String.format("Kubernetes cluster : %s is already deleted", kubernetesCluster.getName())); @@ -1365,6 +1688,13 @@ public boolean startKubernetesCluster(KubernetesClusterVO kubernetesCluster, boo if (zone == null) { logAndThrow(Level.WARN, String.format("Unable to find zone for Kubernetes cluster %s", kubernetesCluster)); } + Long accountId = null; + if (Objects.nonNull(accountName) && Objects.nonNull(domainId)) { + Account account = accountDao.findActiveAccount(accountName, domainId); + if (Objects.nonNull(account)) { + accountId = account.getId(); + } + } KubernetesClusterStartWorker startWorker = new KubernetesClusterStartWorker(kubernetesCluster, this); startWorker = ComponentContext.inject(startWorker); @@ -1372,10 +1702,10 @@ public boolean startKubernetesCluster(KubernetesClusterVO kubernetesCluster, boo // Start for Kubernetes cluster in 'Created' state String[] keys = getServiceUserKeys(kubernetesCluster); startWorker.setKeys(keys); - return startWorker.startKubernetesClusterOnCreate(); + return startWorker.startKubernetesClusterOnCreate(domainId, accountId, asNumber); } else { // Start for Kubernetes cluster in 'Stopped' state. Resources are already provisioned, just need to be started - return startWorker.startStoppedKubernetesCluster(); + return startWorker.startStoppedKubernetesCluster(domainId, accountId); } } @@ -1591,28 +1921,47 @@ public boolean scaleKubernetesCluster(ScaleKubernetesClusterCmd cmd) throws Clou logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); } validateKubernetesClusterScaleParameters(cmd); - KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(cmd.getId()); - final Long clusterSize = cmd.getClusterSize(); - if (clusterSize != null) { - CallContext.current().setEventDetails(String.format("Kubernetes cluster ID: %s scaling from size: %d to %d", - kubernetesCluster.getUuid(), kubernetesCluster.getNodeCount(), clusterSize)); - } + Map nodeToOfferingMap = createNodeTypeToServiceOfferingMap(cmd.getServiceOfferingNodeTypeMap(), cmd.getServiceOfferingId(), kubernetesCluster); + String[] keys = getServiceUserKeys(kubernetesCluster); KubernetesClusterScaleWorker scaleWorker = new KubernetesClusterScaleWorker(kubernetesClusterDao.findById(cmd.getId()), - serviceOfferingDao.findById(cmd.getServiceOfferingId()), - clusterSize, - cmd.getNodeIds(), - cmd.isAutoscalingEnabled(), - cmd.getMinSize(), - cmd.getMaxSize(), - this); + nodeToOfferingMap, + cmd.getClusterSize(), + cmd.getNodeIds(), + cmd.isAutoscalingEnabled(), + cmd.getMinSize(), + cmd.getMaxSize(), + this); scaleWorker.setKeys(keys); scaleWorker = ComponentContext.inject(scaleWorker); return scaleWorker.scaleCluster(); } + /** + * Creates a map for the requested node type service offering + * For the node type ALL: Every node is scaled to the same offering + */ + protected Map createNodeTypeToServiceOfferingMap(Map idsMapping, + Long serviceOfferingId, KubernetesClusterVO kubernetesCluster) { + Map map = new HashMap<>(); + if (MapUtils.isEmpty(idsMapping)) { + ServiceOfferingVO offering = serviceOfferingId != null ? + serviceOfferingDao.findById(serviceOfferingId) : + serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); + map.put(DEFAULT.name(), offering); + return map; + } + for (String key : CLUSTER_NODES_TYPES_LIST) { + if (!idsMapping.containsKey(key)) { + continue; + } + map.put(key, serviceOfferingDao.findById(idsMapping.get(key))); + } + return map; + } + @Override @ActionEvent(eventType = KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_UPGRADE, eventDescription = "upgrading Kubernetes cluster", async = true) @@ -1680,6 +2029,77 @@ public boolean addVmsToCluster(AddVirtualMachinesToKubernetesClusterCmd cmd) { return true; } + @Override + public boolean addNodesToKubernetesCluster(AddNodesToKubernetesClusterCmd cmd) { + if (!KubernetesServiceEnabled.value()) { + logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); + } + KubernetesClusterVO kubernetesCluster = validateCluster(cmd.getClusterId()); + long networkId = kubernetesCluster.getNetworkId(); + NetworkVO networkVO = networkDao.findById(networkId); + List validNodeIds = validateNodes(cmd.getNodeIds(), networkId, networkVO.getName(), kubernetesCluster, false); + if (validNodeIds.isEmpty()) { + throw new CloudRuntimeException("No valid nodes found to be added to the Kubernetes cluster"); + } + KubernetesClusterAddWorker addWorker = new KubernetesClusterAddWorker(kubernetesCluster, KubernetesClusterManagerImpl.this); + addWorker = ComponentContext.inject(addWorker); + return addWorker.addNodesToCluster(validNodeIds, cmd.isMountCksIsoOnVr(), cmd.isManualUpgrade()); + } + + @Override + public boolean removeNodesFromKubernetesCluster(RemoveNodesFromKubernetesClusterCmd cmd) throws Exception { + if (!KubernetesServiceEnabled.value()) { + logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); + } + KubernetesClusterVO kubernetesCluster = validateCluster(cmd.getClusterId()); + List validNodeIds = validateNodes(cmd.getNodeIds(), null, null, kubernetesCluster, true); + if (validNodeIds.isEmpty()) { + throw new CloudRuntimeException("No valid nodes found to be removed from the Kubernetes cluster"); + } + KubernetesClusterRemoveWorker removeWorker = new KubernetesClusterRemoveWorker(kubernetesCluster, KubernetesClusterManagerImpl.this); + removeWorker = ComponentContext.inject(removeWorker); + return removeWorker.removeNodesFromCluster(validNodeIds); + } + + private KubernetesClusterVO validateCluster(long clusterId) { + KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(clusterId); + if (kubernetesCluster == null) { + throw new InvalidParameterValueException("Invalid Kubernetes cluster ID specified"); + } + return kubernetesCluster; + } + + private List validateNodes(List nodeIds, Long networkId, String networkName, KubernetesCluster cluster, boolean removeNodes) { + List validNodeIds = new ArrayList<>(nodeIds); + for (Long id : nodeIds) { + VMInstanceVO node = vmInstanceDao.findById(id); + if (Objects.isNull(node)) { + logger.error(String.format("Failed to find node (physical or virtual machine) with ID: %s", id)); + validNodeIds.remove(id); + } else if (!removeNodes) { + VMTemplateVO template = templateDao.findById(node.getTemplateId()); + if (Objects.isNull(template)) { + logger.error((String.format("Failed to find template with ID: %s", id))); + validNodeIds.remove(id); + } else if (!template.isForCks()) { + logger.error(String.format("Node: %s is deployed with a template that is not marked to be used for CKS", node.getId())); + validNodeIds.remove(id); + } + NicVO nicVO = nicDao.findDefaultNicForVM(id); + if (networkId != nicVO.getNetworkId()) { + logger.error(String.format("Node: %s does not have its default NIC in the kubernetes cluster network: %s", node.getId(), networkName)); + validNodeIds.remove(id); + } + List clusterVmMapVO = kubernetesClusterVmMapDao.listByClusterIdAndVmIdsIn(cluster.getId(), Collections.singletonList(id)); + if (Objects.nonNull(clusterVmMapVO) && !clusterVmMapVO.isEmpty()) { + logger.warn(String.format("Node: %s is already part of the cluster %s", node.getId(), cluster.getName())); + validNodeIds.remove(id); + } + } + } + return validNodeIds; + } + @Override public List removeVmsFromCluster(RemoveVirtualMachinesFromKubernetesClusterCmd cmd) { if (!KubernetesServiceEnabled.value()) { @@ -1754,6 +2174,8 @@ public List> getCommands() { cmdList.add(UpgradeKubernetesClusterCmd.class); cmdList.add(AddVirtualMachinesToKubernetesClusterCmd.class); cmdList.add(RemoveVirtualMachinesFromKubernetesClusterCmd.class); + cmdList.add(AddNodesToKubernetesClusterCmd.class); + cmdList.add(RemoveNodesFromKubernetesClusterCmd.class); return cmdList; } @@ -2052,8 +2474,14 @@ public ConfigKey[] getConfigKeys() { KubernetesClusterScaleTimeout, KubernetesClusterUpgradeTimeout, KubernetesClusterUpgradeRetries, + KubernetesClusterAddNodeTimeout, KubernetesClusterExperimentalFeaturesEnabled, - KubernetesMaxClusterSize + KubernetesMaxClusterSize, + KubernetesControlNodeInstallAttemptWait, + KubernetesControlNodeInstallReattempts, + KubernetesWorkerNodeInstallAttemptWait, + KubernetesWorkerNodeInstallReattempts, + KubernetesEtcdNodeStartPort }; } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java index 9d86c564de48..ad7aef3250b6 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java @@ -16,11 +16,16 @@ // under the License. package com.cloud.kubernetes.cluster; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.ManagementServerException; +import com.cloud.exception.ResourceUnavailableException; +import org.apache.cloudstack.api.command.user.kubernetes.cluster.AddNodesToKubernetesClusterCmd; import org.apache.cloudstack.api.command.user.kubernetes.cluster.AddVirtualMachinesToKubernetesClusterCmd; import org.apache.cloudstack.api.command.user.kubernetes.cluster.CreateKubernetesClusterCmd; import org.apache.cloudstack.api.command.user.kubernetes.cluster.DeleteKubernetesClusterCmd; import org.apache.cloudstack.api.command.user.kubernetes.cluster.GetKubernetesClusterConfigCmd; import org.apache.cloudstack.api.command.user.kubernetes.cluster.ListKubernetesClustersCmd; +import org.apache.cloudstack.api.command.user.kubernetes.cluster.RemoveNodesFromKubernetesClusterCmd; import org.apache.cloudstack.api.command.user.kubernetes.cluster.RemoveVirtualMachinesFromKubernetesClusterCmd; import org.apache.cloudstack.api.command.user.kubernetes.cluster.ScaleKubernetesClusterCmd; import org.apache.cloudstack.api.command.user.kubernetes.cluster.StartKubernetesClusterCmd; @@ -80,6 +85,18 @@ public interface KubernetesClusterService extends PluggableService, Configurable "The number of retries if fail to upgrade kubernetes cluster due to some reasons (e.g. drain node, etcdserver leader changed)", true, KubernetesServiceEnabled.key()); + static final ConfigKey KubernetesClusterAddNodeTimeout = new ConfigKey("Advanced", Long.class, + "cloud.kubernetes.cluster.add.node.timeout", + "3600", + "Timeout interval (in seconds) in which an external node(VM / baremetal host) addition to a cluster should be completed", + true, + KubernetesServiceEnabled.key()); + static final ConfigKey KubernetesClusterRemoveNodeTimeout = new ConfigKey("Advanced", Long.class, + "cloud.kubernetes.cluster.add.node.timeout", + "900", + "Timeout interval (in seconds) in which an external node(VM / baremetal host) removal from a cluster should be completed", + true, + KubernetesServiceEnabled.key()); static final ConfigKey KubernetesClusterExperimentalFeaturesEnabled = new ConfigKey("Advanced", Boolean.class, "cloud.kubernetes.cluster.experimental.features.enabled", "false", @@ -93,6 +110,36 @@ public interface KubernetesClusterService extends PluggableService, Configurable true, ConfigKey.Scope.Account, KubernetesServiceEnabled.key()); + static final ConfigKey KubernetesControlNodeInstallAttemptWait = new ConfigKey("Advanced", Long.class, + "cloud.kubernetes.control.node.install.attempt.wait.duration", + "15", + "Time in seconds for the installation process to wait before it re-attempts", + true, + KubernetesServiceEnabled.key()); + static final ConfigKey KubernetesControlNodeInstallReattempts = new ConfigKey("Advanced", Long.class, + "cloud.kubernetes.control.node.install.reattempt.count", + "100", + "Number of times the offline installation of K8S will be re-attempted", + true, + KubernetesServiceEnabled.key()); + final ConfigKey KubernetesWorkerNodeInstallAttemptWait = new ConfigKey("Advanced", Long.class, + "cloud.kubernetes.worker.node.install.attempt.wait.duration", + "30", + "Time in seconds for the installation process to wait before it re-attempts", + true, + KubernetesServiceEnabled.key()); + static final ConfigKey KubernetesWorkerNodeInstallReattempts = new ConfigKey("Advanced", Long.class, + "cloud.kubernetes.worker.node.install.reattempt.count", + "40", + "Number of times the offline installation of K8S will be re-attempted", + true, + KubernetesServiceEnabled.key()); + static final ConfigKey KubernetesEtcdNodeStartPort = new ConfigKey("Advanced", Integer.class, + "cloud.kubernetes.etcd.node.start.port", + "50000", + "Start port for Port forwarding rules for etcd nodes", + true, + KubernetesServiceEnabled.key()); KubernetesCluster findById(final Long id); @@ -100,9 +147,11 @@ public interface KubernetesClusterService extends PluggableService, Configurable KubernetesCluster createManagedKubernetesCluster(CreateKubernetesClusterCmd cmd) throws CloudRuntimeException; - void startKubernetesCluster(CreateKubernetesClusterCmd cmd) throws CloudRuntimeException; + void startKubernetesCluster(CreateKubernetesClusterCmd cmd) throws CloudRuntimeException, ManagementServerException, ResourceUnavailableException, InsufficientCapacityException; + + void startKubernetesCluster(StartKubernetesClusterCmd cmd) throws CloudRuntimeException, ManagementServerException, ResourceUnavailableException, InsufficientCapacityException; - void startKubernetesCluster(StartKubernetesClusterCmd cmd) throws CloudRuntimeException; + boolean startKubernetesCluster(long kubernetesClusterId, Long domainId, String accountName, Long asNumber, boolean onCreate) throws CloudRuntimeException, ManagementServerException, ResourceUnavailableException, InsufficientCapacityException; boolean stopKubernetesCluster(StopKubernetesClusterCmd cmd) throws CloudRuntimeException; @@ -122,6 +171,10 @@ public interface KubernetesClusterService extends PluggableService, Configurable boolean addVmsToCluster(AddVirtualMachinesToKubernetesClusterCmd cmd); + boolean addNodesToKubernetesCluster(AddNodesToKubernetesClusterCmd cmd); + + boolean removeNodesFromKubernetesCluster(RemoveNodesFromKubernetesClusterCmd cmd) throws Exception; + List removeVmsFromCluster(RemoveVirtualMachinesFromKubernetesClusterCmd cmd); boolean isDirectAccess(Network network); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java index 01268f421110..0992a64bff32 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java @@ -118,6 +118,33 @@ public class KubernetesClusterVO implements KubernetesCluster { @Column(name = "cluster_type") private ClusterType clusterType; + @Column(name = "control_service_offering_id") + private Long controlServiceOfferingId; + + @Column(name = "worker_service_offering_id") + private Long workerServiceOfferingId; + + @Column(name = "etcd_service_offering_id") + private Long etcdServiceOfferingId; + + @Column(name = "etcd_node_count") + private Long etcdNodeCount; + + @Column(name = "control_template_id") + private Long controlTemplateId; + + @Column(name = "worker_template_id") + private Long workerTemplateId; + + @Column(name = "etcd_template_id") + private Long etcdTemplateId; + + @Column(name = "cni_config_id", nullable = true) + private Long cniConfigId = null; + + @Column(name = "cni_config_details", updatable = true, length = 4096) + private String cniConfigDetails; + @Override public long getId() { return id; @@ -237,7 +264,7 @@ public void setNodeCount(long nodeCount) { @Override public long getTotalNodeCount() { - return this.controlNodeCount + this.nodeCount; + return this.controlNodeCount + this.nodeCount + this.getEtcdNodeCount(); } @Override @@ -414,4 +441,77 @@ public String toString() { public Class getEntityType() { return KubernetesCluster.class; } + + public Long getControlServiceOfferingId() { + return controlServiceOfferingId; + } + + public void setControlServiceOfferingId(Long controlServiceOfferingId) { + this.controlServiceOfferingId = controlServiceOfferingId; + } + + public Long getWorkerServiceOfferingId() { + return workerServiceOfferingId; + } + + public void setWorkerServiceOfferingId(Long workerServiceOfferingId) { + this.workerServiceOfferingId = workerServiceOfferingId; + } + + public Long getEtcdServiceOfferingId() { + return etcdServiceOfferingId; + } + + public void setEtcdServiceOfferingId(Long etcdServiceOfferingId) { + this.etcdServiceOfferingId = etcdServiceOfferingId; + } + + public Long getEtcdNodeCount() { + return etcdNodeCount != null ? etcdNodeCount : 0L; + } + + public void setEtcdNodeCount(Long etcdNodeCount) { + this.etcdNodeCount = etcdNodeCount; + } + + public Long getEtcdTemplateId() { + return etcdTemplateId; + } + + public void setEtcdTemplateId(Long etcdTemplateId) { + this.etcdTemplateId = etcdTemplateId; + } + + public Long getWorkerTemplateId() { + return workerTemplateId; + } + + public void setWorkerTemplateId(Long workerTemplateId) { + this.workerTemplateId = workerTemplateId; + } + + public Long getControlTemplateId() { + return controlTemplateId; + } + + public void setControlTemplateId(Long controlTemplateId) { + this.controlTemplateId = controlTemplateId; + } + + public Long getCniConfigId() { + return cniConfigId; + } + + public void setCniConfigId(Long cniConfigId) { + this.cniConfigId = cniConfigId; + } + + public String getCniConfigDetails() { + return cniConfigDetails; + } + + public void setCniConfigDetails(String cniConfigDetails) { + this.cniConfigDetails = cniConfigDetails; + } + } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java index f6126f01be5b..6c45c63e16d8 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java @@ -42,6 +42,18 @@ public class KubernetesClusterVmMapVO implements KubernetesClusterVmMap { @Column(name = "control_node") boolean controlNode; + @Column(name = "etcd_node") + boolean etcdNode; + + @Column(name = "external_node") + boolean externalNode; + + @Column(name = "manual_upgrade") + boolean manualUpgrade; + + @Column(name = "kubernetes_node_version") + String nodeVersion; + public KubernetesClusterVmMapVO() { } @@ -83,4 +95,36 @@ public boolean isControlNode() { public void setControlNode(boolean controlNode) { this.controlNode = controlNode; } + + public boolean isEtcdNode() { + return etcdNode; + } + + public void setEtcdNode(boolean etcdNode) { + this.etcdNode = etcdNode; + } + + public boolean isExternalNode() { + return externalNode; + } + + public void setExternalNode(boolean externalNode) { + this.externalNode = externalNode; + } + + public boolean isManualUpgrade() { + return manualUpgrade; + } + + public void setManualUpgrade(boolean manualUpgrade) { + this.manualUpgrade = manualUpgrade; + } + + public String getNodeVersion() { + return nodeVersion; + } + + public void setNodeVersion(String nodeVersion) { + this.nodeVersion = nodeVersion; + } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelperImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelperImpl.java index bf49c2abb8d3..98dddd310c42 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelperImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelperImpl.java @@ -18,10 +18,18 @@ import java.lang.reflect.Field; import java.lang.reflect.Modifier; +import java.util.HashMap; +import java.util.Map; import java.util.Objects; import javax.inject.Inject; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.offering.ServiceOffering; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.vm.VmDetailConstants; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.framework.config.ConfigKey; @@ -37,6 +45,8 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.UserVmManager; +import org.apache.commons.collections.MapUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; import org.apache.commons.lang3.ObjectUtils; @@ -50,6 +60,10 @@ public class KubernetesServiceHelperImpl extends AdapterBase implements Kubernet private KubernetesClusterDao kubernetesClusterDao; @Inject private KubernetesClusterVmMapDao kubernetesClusterVmMapDao; + @Inject + protected ServiceOfferingDao serviceOfferingDao; + @Inject + protected VMTemplateDao vmTemplateDao; protected void setEventTypeEntityDetails(Class eventTypeDefinedClass, Class entityClass) { Field[] declaredFields = eventTypeDefinedClass.getDeclaredFields(); @@ -106,6 +120,127 @@ public void checkVmCanBeDestroyed(UserVm userVm) { throw new CloudRuntimeException(msg); } + @Override + public boolean isValidNodeType(String nodeType) { + if (StringUtils.isBlank(nodeType)) { + return false; + } + try { + KubernetesClusterNodeType.valueOf(nodeType.toUpperCase()); + return true; + } catch (IllegalArgumentException e) { + return false; + } + } + + @Override + public Map getServiceOfferingNodeTypeMap(Map> serviceOfferingNodeTypeMap) { + Map mapping = new HashMap<>(); + if (MapUtils.isNotEmpty(serviceOfferingNodeTypeMap)) { + for (Map entry : serviceOfferingNodeTypeMap.values()) { + processNodeTypeOfferingEntryAndAddToMappingIfValid(entry, mapping); + } + } + return mapping; + } + + protected void checkNodeTypeOfferingEntryCompleteness(String nodeTypeStr, String serviceOfferingUuid) { + if (StringUtils.isAnyEmpty(nodeTypeStr, serviceOfferingUuid)) { + String error = String.format("Incomplete Node Type to Service Offering ID mapping: '%s' -> '%s'", nodeTypeStr, serviceOfferingUuid); + logger.error(error); + throw new InvalidParameterValueException(error); + } + } + + protected void checkNodeTypeOfferingEntryValues(String nodeTypeStr, ServiceOffering serviceOffering, String serviceOfferingUuid) { + if (!isValidNodeType(nodeTypeStr)) { + String error = String.format("The provided value '%s' for Node Type is invalid", nodeTypeStr); + logger.error(error); + throw new InvalidParameterValueException(String.format(error)); + } + if (serviceOffering == null) { + String error = String.format("Cannot find a service offering with ID %s", serviceOfferingUuid); + logger.error(error); + throw new InvalidParameterValueException(error); + } + } + + protected void addNodeTypeOfferingEntry(String nodeTypeStr, String serviceOfferingUuid, ServiceOffering serviceOffering, Map mapping) { + if (logger.isDebugEnabled()) { + logger.debug(String.format("Node Type: '%s' should use Service Offering ID: '%s'", nodeTypeStr, serviceOfferingUuid)); + } + KubernetesClusterNodeType nodeType = KubernetesClusterNodeType.valueOf(nodeTypeStr.toUpperCase()); + mapping.put(nodeType.name(), serviceOffering.getId()); + } + + protected void processNodeTypeOfferingEntryAndAddToMappingIfValid(Map entry, Map mapping) { + if (MapUtils.isEmpty(entry)) { + return; + } + String nodeTypeStr = entry.get(VmDetailConstants.CKS_NODE_TYPE); + String serviceOfferingUuid = entry.get(VmDetailConstants.OFFERING); + checkNodeTypeOfferingEntryCompleteness(nodeTypeStr, serviceOfferingUuid); + + ServiceOffering serviceOffering = serviceOfferingDao.findByUuid(serviceOfferingUuid); + checkNodeTypeOfferingEntryValues(nodeTypeStr, serviceOffering, serviceOfferingUuid); + + addNodeTypeOfferingEntry(nodeTypeStr, serviceOfferingUuid, serviceOffering, mapping); + } + + protected void checkNodeTypeTemplateEntryCompleteness(String nodeTypeStr, String templateUuid) { + if (StringUtils.isAnyEmpty(nodeTypeStr, templateUuid)) { + String error = String.format("Incomplete Node Type to template ID mapping: '%s' -> '%s'", nodeTypeStr, templateUuid); + logger.error(error); + throw new InvalidParameterValueException(error); + } + } + + protected void checkNodeTypeTemplateEntryValues(String nodeTypeStr, VMTemplateVO template, String templateUuid) { + if (!isValidNodeType(nodeTypeStr)) { + String error = String.format("The provided value '%s' for Node Type is invalid", nodeTypeStr); + logger.error(error); + throw new InvalidParameterValueException(String.format(error)); + } + if (template == null) { + String error = String.format("Cannot find a template with ID %s", templateUuid); + logger.error(error); + throw new InvalidParameterValueException(error); + } + } + + protected void addNodeTypeTemplateEntry(String nodeTypeStr, String templateUuid, VMTemplateVO template, Map mapping) { + if (logger.isDebugEnabled()) { + logger.debug(String.format("Node Type: '%s' should use template ID: '%s'", nodeTypeStr, templateUuid)); + } + KubernetesClusterNodeType nodeType = KubernetesClusterNodeType.valueOf(nodeTypeStr.toUpperCase()); + mapping.put(nodeType.name(), template.getId()); + } + + protected void processNodeTypeTemplateEntryAndAddToMappingIfValid(Map entry, Map mapping) { + if (MapUtils.isEmpty(entry)) { + return; + } + String nodeTypeStr = entry.get(VmDetailConstants.CKS_NODE_TYPE); + String templateUuid = entry.get(VmDetailConstants.TEMPLATE); + checkNodeTypeTemplateEntryCompleteness(nodeTypeStr, templateUuid); + + VMTemplateVO template = vmTemplateDao.findByUuid(templateUuid); + checkNodeTypeTemplateEntryValues(nodeTypeStr, template, templateUuid); + + addNodeTypeTemplateEntry(nodeTypeStr, templateUuid, template, mapping); + } + + @Override + public Map getTemplateNodeTypeMap(Map> templateNodeTypeMap) { + Map mapping = new HashMap<>(); + if (MapUtils.isNotEmpty(templateNodeTypeMap)) { + for (Map entry : templateNodeTypeMap.values()) { + processNodeTypeTemplateEntryAndAddToMappingIfValid(entry, mapping); + } + } + return mapping; + } + @Override public String getConfigComponentName() { return KubernetesServiceHelper.class.getSimpleName(); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index 076bd105728c..eba2b8535ab8 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -14,19 +14,23 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. - package com.cloud.kubernetes.cluster.actionworkers; import java.io.BufferedWriter; import java.io.File; import java.io.FileWriter; import java.io.IOException; +import java.lang.reflect.Field; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import javax.inject.Inject; @@ -34,13 +38,40 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType; +import com.cloud.kubernetes.cluster.KubernetesClusterService; +import com.cloud.network.dao.NetworkVO; +import com.cloud.offering.ServiceOffering; +import com.cloud.exception.ManagementServerException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.kubernetes.cluster.utils.KubernetesClusterUtil; +import com.cloud.network.firewall.FirewallService; +import com.cloud.network.rules.FirewallRule; +import com.cloud.network.rules.PortForwardingRuleVO; +import com.cloud.network.rules.RulesService; +import com.cloud.network.rules.dao.PortForwardingRulesDao; +import com.cloud.user.SSHKeyPairVO; +import com.cloud.user.dao.UserDataDao; +import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.db.TransactionCallbackWithException; +import com.cloud.utils.net.Ip; +import com.cloud.vm.Nic; +import com.cloud.vm.NicVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.NicDao; +import com.cloud.vm.UserVmManager; +import org.apache.cloudstack.affinity.AffinityGroupVO; +import org.apache.cloudstack.affinity.dao.AffinityGroupDao; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.command.user.firewall.CreateFirewallRuleCmd; import org.apache.cloudstack.ca.CAManager; import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.userdata.UserDataManager; +import org.apache.commons.codec.binary.Base64; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; @@ -93,11 +124,14 @@ import com.cloud.vm.UserVmDetailVO; import com.cloud.vm.UserVmService; import com.cloud.vm.UserVmVO; -import com.cloud.vm.VirtualMachine; import com.cloud.vm.VmDetailConstants; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.UserVmDetailsDao; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.CONTROL; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.ETCD; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.WORKER; + public class KubernetesClusterActionWorker { @@ -105,6 +139,8 @@ public class KubernetesClusterActionWorker { public static final int CLUSTER_API_PORT = 6443; public static final int DEFAULT_SSH_PORT = 22; public static final int CLUSTER_NODES_DEFAULT_START_SSH_PORT = 2222; + public static final int ETCD_NODE_CLIENT_REQUEST_PORT = 2379; + public static final int ETCD_NODE_PEER_COMM_PORT = 2380; public static final int CLUSTER_NODES_DEFAULT_SSH_PORT_SG = DEFAULT_SSH_PORT; public static final String CKS_CLUSTER_SECURITY_GROUP_NAME = "CKSSecurityGroup"; @@ -112,6 +148,8 @@ public class KubernetesClusterActionWorker { protected Logger logger = LogManager.getLogger(getClass()); + protected final static List CLUSTER_NODES_TYPES_LIST = Arrays.asList(WORKER, CONTROL, ETCD); + protected StateMachine2 _stateMachine = KubernetesCluster.State.getStateMachine(); @Inject @@ -149,6 +187,12 @@ public class KubernetesClusterActionWorker { @Inject protected UserVmService userVmService; @Inject + protected UserDataManager userDataManager; + @Inject + protected UserDataDao userDataDao; + @Inject + protected UserVmManager userVmManager; + @Inject protected VlanDao vlanDao; @Inject protected LaunchPermissionDao launchPermissionDao; @@ -156,6 +200,16 @@ public class KubernetesClusterActionWorker { public ProjectService projectService; @Inject public VpcService vpcService; + @Inject + public PortForwardingRulesDao portForwardingRulesDao; + @Inject + protected RulesService rulesService; + @Inject + protected FirewallService firewallService; + @Inject + private NicDao nicDao; + @Inject + protected AffinityGroupDao affinityGroupDao; protected KubernetesClusterDao kubernetesClusterDao; protected KubernetesClusterVmMapDao kubernetesClusterVmMapDao; @@ -165,6 +219,9 @@ public class KubernetesClusterActionWorker { protected KubernetesCluster kubernetesCluster; protected Account owner; protected VirtualMachineTemplate clusterTemplate; + protected VirtualMachineTemplate controlNodeTemplate; + protected VirtualMachineTemplate workerNodeTemplate; + protected VirtualMachineTemplate etcdTemplate; protected File sshKeyFile; protected String publicIpAddress; protected int sshPort; @@ -173,6 +230,8 @@ public class KubernetesClusterActionWorker { protected final String deploySecretsScriptFilename = "deploy-cloudstack-secret"; protected final String deployProviderScriptFilename = "deploy-provider"; protected final String autoscaleScriptFilename = "autoscale-kube-cluster"; + protected final String validateNodeScript = "validate-cks-node"; + protected final String removeNodeFromClusterScript = "remove-node-from-cluster"; protected final String scriptPath = "/opt/bin/"; protected File deploySecretsScriptFile; protected File deployProviderScriptFile; @@ -196,7 +255,10 @@ protected void init() { DataCenterVO dataCenterVO = dataCenterDao.findById(zoneId); VMTemplateVO template = templateDao.findById(templateId); Hypervisor.HypervisorType type = template.getHypervisorType(); - this.clusterTemplate = manager.getKubernetesServiceTemplate(dataCenterVO, type); + this.clusterTemplate = manager.getKubernetesServiceTemplate(dataCenterVO, type, null, KubernetesClusterNodeType.DEFAULT); + this.controlNodeTemplate = templateDao.findById(this.kubernetesCluster.getControlTemplateId()); + this.workerNodeTemplate = templateDao.findById(this.kubernetesCluster.getWorkerTemplateId()); + this.etcdTemplate = templateDao.findById(this.kubernetesCluster.getEtcdTemplateId()); this.sshKeyFile = getManagementServerSshPublicKeyFile(); } @@ -269,7 +331,7 @@ protected void logTransitStateDetachIsoAndThrow(final Level logLevel, final Stri } protected void deleteTemplateLaunchPermission() { - if (clusterTemplate != null && owner != null) { + if (isDefaultTemplateUsed() && owner != null) { logger.info("Revoking launch permission for systemVM template"); launchPermissionDao.removePermissions(clusterTemplate.getId(), Collections.singletonList(owner.getId())); } @@ -308,11 +370,20 @@ protected File getManagementServerSshPublicKeyFile() { return new File(keyFile); } - protected KubernetesClusterVmMapVO addKubernetesClusterVm(final long kubernetesClusterId, final long vmId, boolean isControlNode) { + protected KubernetesClusterVmMapVO addKubernetesClusterVm(final long kubernetesClusterId, final long vmId, + boolean isControlNode, boolean isExternalNode, + boolean isEtcdNode, boolean markForManualUpgrade) { + KubernetesSupportedVersion kubernetesVersion = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId()); return Transaction.execute(new TransactionCallback() { @Override public KubernetesClusterVmMapVO doInTransaction(TransactionStatus status) { KubernetesClusterVmMapVO newClusterVmMap = new KubernetesClusterVmMapVO(kubernetesClusterId, vmId, isControlNode); + newClusterVmMap.setExternalNode(isExternalNode); + newClusterVmMap.setManualUpgrade(markForManualUpgrade); + newClusterVmMap.setEtcdNode(isEtcdNode); + if (!isEtcdNode) { + newClusterVmMap.setNodeVersion(kubernetesVersion.getSemanticVersion()); + } kubernetesClusterVmMapDao.persist(newClusterVmMap); return newClusterVmMap; } @@ -323,6 +394,7 @@ private UserVm fetchControlVmIfMissing(final UserVm controlVm) { if (controlVm != null) { return controlVm; } + Long etcdNodeCount = kubernetesCluster.getEtcdNodeCount(); List clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); if (CollectionUtils.isEmpty(clusterVMs)) { logger.warn(String.format("Unable to retrieve VMs for Kubernetes cluster : %s", kubernetesCluster.getName())); @@ -333,7 +405,8 @@ private UserVm fetchControlVmIfMissing(final UserVm controlVm) { vmIds.add(vmMap.getVmId()); } Collections.sort(vmIds); - return userVmDao.findById(vmIds.get(0)); + int controlNodeIndex = Objects.nonNull(etcdNodeCount) ? etcdNodeCount.intValue() : 0; + return userVmDao.findById(vmIds.get(controlNodeIndex)); } protected String getControlVmPrivateIp() { @@ -367,7 +440,23 @@ protected IpAddress getVpcTierKubernetesPublicIp(Network network) { return address; } - protected IpAddress acquireVpcTierKubernetesPublicIp(Network network) throws + protected IpAddress getPublicIp(Network network) throws ManagementServerException { + if (network.getVpcId() != null) { + IpAddress publicIp = getVpcTierKubernetesPublicIp(network); + if (publicIp == null) { + throw new ManagementServerException(String.format("No public IP addresses found for VPC tier : %s, Kubernetes cluster : %s", network.getName(), kubernetesCluster.getName())); + } + return publicIp; + } + IpAddress publicIp = getNetworkSourceNatIp(network); + if (publicIp == null) { + throw new ManagementServerException(String.format("No source NAT IP addresses found for network : %s, Kubernetes cluster : %s", + network.getName(), kubernetesCluster.getName())); + } + return publicIp; + } + + protected IpAddress acquireVpcTierKubernetesPublicIp(Network network, boolean forEtcd) throws InsufficientAddressCapacityException, ResourceAllocationException, ResourceUnavailableException { IpAddress ip = networkService.allocateIP(owner, kubernetesCluster.getZoneId(), network.getId(), null, null); if (ip == null) { @@ -375,7 +464,19 @@ protected IpAddress acquireVpcTierKubernetesPublicIp(Network network) throws } ip = vpcService.associateIPToVpc(ip.getId(), network.getVpcId()); ip = ipAddressManager.associateIPToGuestNetwork(ip.getId(), network.getId(), false); - kubernetesClusterDetailsDao.addDetail(kubernetesCluster.getId(), ApiConstants.PUBLIC_IP_ID, ip.getUuid(), false); + if (!forEtcd) { + kubernetesClusterDetailsDao.addDetail(kubernetesCluster.getId(), ApiConstants.PUBLIC_IP_ID, ip.getUuid(), false); + } + return ip; + } + + protected IpAddress acquirePublicIpForIsolatedNetwork(Network network) throws + InsufficientAddressCapacityException, ResourceAllocationException, ResourceUnavailableException { + IpAddress ip = networkService.allocateIP(owner, kubernetesCluster.getZoneId(), network.getId(), null, null); + if (ip == null) { + return null; + } + ip = networkService.associateIPToNetwork(ip.getId(), network.getId()); return ip; } @@ -407,7 +508,7 @@ protected Pair getKubernetesClusterServerIpSshPortForVpcTier(Ne return new Pair<>(address.getAddress().addr(), port); } if (acquireNewPublicIpForVpcTierIfNeeded) { - address = acquireVpcTierKubernetesPublicIp(network); + address = acquireVpcTierKubernetesPublicIp(network, false); if (address != null) { return new Pair<>(address.getAddress().addr(), port); } @@ -500,7 +601,7 @@ protected void detachIsoKubernetesVMs(List clusterVMs) { CallContext vmContext = CallContext.register(CallContext.current(), ApiCommandResourceType.VirtualMachine); vmContext.putContextParameter(VirtualMachine.class, vm.getUuid()); try { - result = templateService.detachIso(vm.getId(), true); + result = templateService.detachIso(vm.getId(), null, true); } catch (CloudRuntimeException ex) { logger.warn("Failed to detach binaries ISO from VM: {} in the Kubernetes cluster: {} ", vm, kubernetesCluster, ex); } finally { @@ -612,12 +713,16 @@ protected void copyScripts(String nodeAddress, final int sshPort) { copyScriptFile(nodeAddress, sshPort, autoscaleScriptFile, autoscaleScriptFilename); } - protected void copyScriptFile(String nodeAddress, final int sshPort, File file, String desitnation) { + protected void copyScriptFile(String nodeAddress, final int sshPort, File file, String destination) { try { + if (Objects.isNull(sshKeyFile)) { + sshKeyFile = getManagementServerSshPublicKeyFile(); + } SshHelper.scpTo(nodeAddress, sshPort, getControlNodeLoginUser(), sshKeyFile, null, - "~/", file.getAbsolutePath(), "0755"); - String cmdStr = String.format("sudo mv ~/%s %s/%s", file.getName(), scriptPath, desitnation); - SshHelper.sshExecute(publicIpAddress, sshPort, getControlNodeLoginUser(), sshKeyFile, null, + "~/", file.getAbsolutePath(), "0755", 20000, 30 * 60 * 1000); + // Ensure destination dir scriptPath exists and copy file to destination + String cmdStr = String.format("sudo mkdir -p %s ; sudo mv ~/%s %s/%s", scriptPath, file.getName(), scriptPath, destination); + SshHelper.sshExecute(nodeAddress, sshPort, getControlNodeLoginUser(), sshKeyFile, null, cmdStr, 10000, 10000, 10 * 60 * 1000); } catch (Exception e) { throw new CloudRuntimeException(e); @@ -635,20 +740,30 @@ protected boolean taintControlNodes() { String command = String.format("sudo /opt/bin/kubectl annotate node %s cluster-autoscaler.kubernetes.io/scale-down-disabled=true ; ", name); commands.append(command); } - try { - File pkFile = getManagementServerSshPublicKeyFile(); - Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); - publicIpAddress = publicIpSshPort.first(); - sshPort = publicIpSshPort.second(); + int retryCounter = 0; + while (retryCounter < 3) { + retryCounter++; + try { + File pkFile = getManagementServerSshPublicKeyFile(); + Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); + publicIpAddress = publicIpSshPort.first(); + sshPort = publicIpSshPort.second(); - Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, getControlNodeLoginUser(), - pkFile, null, commands.toString(), 10000, 10000, 60000); - return result.first(); - } catch (Exception e) { - String msg = String.format("Failed to taint control nodes on : %s : %s", kubernetesCluster.getName(), e.getMessage()); - logMessage(Level.ERROR, msg, e); - return false; + Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, getControlNodeLoginUser(), + pkFile, null, commands.toString(), 10000, 10000, 60000); + return result.first(); + } catch (Exception e) { + String msg = String.format("Failed to taint control nodes on : %s : %s", kubernetesCluster.getName(), e.getMessage()); + logMessage(Level.ERROR, msg, e); + } + try { + Thread.sleep(5 * 1000L); + } catch (InterruptedException ie) { + logger.error(String.format("Error while attempting to taint nodes on Kubernetes cluster: %s", kubernetesCluster.getName()), ie); + } + retryCounter++; } + return false; } protected boolean deployProvider() { @@ -698,4 +813,247 @@ protected boolean deployProvider() { public void setKeys(String[] keys) { this.keys = keys; } + + protected ServiceOffering getServiceOfferingForNodeTypeOnCluster(KubernetesClusterNodeType nodeType, + KubernetesCluster cluster) { + Long offeringId = null; + Long defaultOfferingId = cluster.getServiceOfferingId(); + Long controlOfferingId = cluster.getControlServiceOfferingId(); + Long workerOfferingId = cluster.getWorkerServiceOfferingId(); + Long etcdOfferingId = cluster.getEtcdServiceOfferingId(); + if (KubernetesClusterNodeType.CONTROL == nodeType) { + offeringId = controlOfferingId != null ? controlOfferingId : defaultOfferingId; + } else if (KubernetesClusterNodeType.WORKER == nodeType) { + offeringId = workerOfferingId != null ? workerOfferingId : defaultOfferingId; + } else if (KubernetesClusterNodeType.ETCD == nodeType && cluster.getEtcdNodeCount() != null && cluster.getEtcdNodeCount() > 0) { + offeringId = etcdOfferingId != null ? etcdOfferingId : defaultOfferingId; + } + + if (offeringId == null) { + String msg = String.format("Cannot find a service offering for the %s nodes on the Kubernetes cluster %s", nodeType.name(), cluster.getName()); + logger.error(msg); + throw new CloudRuntimeException(msg); + } + return serviceOfferingDao.findById(offeringId); + } + + protected boolean isDefaultTemplateUsed() { + if (Arrays.asList(kubernetesCluster.getControlTemplateId(), kubernetesCluster.getWorkerTemplateId(), kubernetesCluster.getEtcdTemplateId()).contains(kubernetesCluster.getTemplateId())) { + return true; + } + return false; + } + + protected void provisionPublicIpPortForwardingRule(IpAddress publicIp, Network network, Account account, + final long vmId, final int sourcePort, final int destPort) throws NetworkRuleConflictException, ResourceUnavailableException { + final long publicIpId = publicIp.getId(); + final long networkId = network.getId(); + final long accountId = account.getId(); + final long domainId = account.getDomainId(); + Nic vmNic = networkModel.getNicInNetwork(vmId, networkId); + final Ip vmIp = new Ip(vmNic.getIPv4Address()); + PortForwardingRuleVO pfRule = Transaction.execute((TransactionCallbackWithException) status -> { + PortForwardingRuleVO newRule = + new PortForwardingRuleVO(null, publicIpId, + sourcePort, sourcePort, + vmIp, + destPort, destPort, + "tcp", networkId, accountId, domainId, vmId); + newRule.setDisplay(true); + newRule.setState(FirewallRule.State.Add); + newRule = portForwardingRulesDao.persist(newRule); + return newRule; + }); + rulesService.applyPortForwardingRules(publicIp.getId(), account); + if (logger.isInfoEnabled()) { + logger.info(String.format("Provisioned SSH port forwarding rule: %s from port %d to %d on %s to the VM IP : %s in Kubernetes cluster : %s", pfRule.getUuid(), sourcePort, destPort, publicIp.getAddress().addr(), vmIp.toString(), kubernetesCluster.getName())); + } + } + + public String getKubernetesNodeConfig(final String joinIp, final boolean ejectIso, final boolean mountCksIsoOnVR) throws IOException { + String k8sNodeConfig = readResourceFile("/conf/k8s-node.yml"); + final String sshPubKey = "{{ k8s.ssh.pub.key }}"; + final String joinIpKey = "{{ k8s_control_node.join_ip }}"; + final String clusterTokenKey = "{{ k8s_control_node.cluster.token }}"; + final String ejectIsoKey = "{{ k8s.eject.iso }}"; + final String routerIpKey = "{{ k8s.vr.iso.mounted.ip }}"; + final String installWaitTime = "{{ k8s.install.wait.time }}"; + final String installReattemptsCount = "{{ k8s.install.reattempts.count }}"; + + final Long waitTime = KubernetesClusterService.KubernetesWorkerNodeInstallAttemptWait.value(); + final Long reattempts = KubernetesClusterService.KubernetesWorkerNodeInstallReattempts.value(); + String routerIp = ""; + if (mountCksIsoOnVR) { + NicVO routerNicOnNetwork = getVirtualRouterNicOnKubernetesClusterNetwork(kubernetesCluster); + if (Objects.nonNull(routerNicOnNetwork)) { + routerIp = routerNicOnNetwork.getIPv4Address(); + } + } + String pubKey = "- \"" + configurationDao.getValue("ssh.publickey") + "\""; + String sshKeyPair = kubernetesCluster.getKeyPair(); + if (StringUtils.isNotEmpty(sshKeyPair)) { + SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair); + if (sshkp != null) { + pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; + } + } + k8sNodeConfig = k8sNodeConfig.replace(sshPubKey, pubKey); + k8sNodeConfig = k8sNodeConfig.replace(joinIpKey, joinIp); + k8sNodeConfig = k8sNodeConfig.replace(clusterTokenKey, KubernetesClusterUtil.generateClusterToken(kubernetesCluster)); + k8sNodeConfig = k8sNodeConfig.replace(ejectIsoKey, String.valueOf(ejectIso)); + k8sNodeConfig = k8sNodeConfig.replace(routerIpKey, routerIp); + k8sNodeConfig = k8sNodeConfig.replace(installWaitTime, String.valueOf(waitTime)); + k8sNodeConfig = k8sNodeConfig.replace(installReattemptsCount, String.valueOf(reattempts)); + + k8sNodeConfig = updateKubeConfigWithRegistryDetails(k8sNodeConfig); + + return k8sNodeConfig; + } + + protected String updateKubeConfigWithRegistryDetails(String k8sConfig) { + /* genarate /etc/containerd/config.toml file on the nodes only if Kubernetes cluster is created to + * use docker private registry */ + String registryUsername = null; + String registryPassword = null; + String registryUrl = null; + + List details = kubernetesClusterDetailsDao.listDetails(kubernetesCluster.getId()); + for (KubernetesClusterDetailsVO detail : details) { + if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_USER_NAME)) { + registryUsername = detail.getValue(); + } + if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_PASSWORD)) { + registryPassword = detail.getValue(); + } + if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_URL)) { + registryUrl = detail.getValue(); + } + } + + if (StringUtils.isNoneEmpty(registryUsername, registryPassword, registryUrl)) { + // Update runcmd in the cloud-init configuration to run a script that updates the containerd config with provided registry details + String runCmd = "- bash -x /opt/bin/setup-containerd"; + + String registryEp = registryUrl.split("://")[1]; + k8sConfig = k8sConfig.replace("- containerd config default > /etc/containerd/config.toml", runCmd); + final String registryUrlKey = "{{registry.url}}"; + final String registryUrlEpKey = "{{registry.url.endpoint}}"; + final String registryAuthKey = "{{registry.token}}"; + final String registryUname = "{{registry.username}}"; + final String registryPsswd = "{{registry.password}}"; + + final String usernamePasswordKey = registryUsername + ":" + registryPassword; + String base64Auth = Base64.encodeBase64String(usernamePasswordKey.getBytes(com.cloud.utils.StringUtils.getPreferredCharset())); + k8sConfig = k8sConfig.replace(registryUrlKey, registryUrl); + k8sConfig = k8sConfig.replace(registryUrlEpKey, registryEp); + k8sConfig = k8sConfig.replace(registryUname, registryUsername); + k8sConfig = k8sConfig.replace(registryPsswd, registryPassword); + k8sConfig = k8sConfig.replace(registryAuthKey, base64Auth); + } + return k8sConfig; + } + + public Map addFirewallRulesForNodes(IpAddress publicIp, int size) throws ManagementServerException { + Map vmIdPortMap = new HashMap<>(); + CallContext.register(CallContext.current(), null); + try { + List clusterVmList = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); + List externalNodes = clusterVmList.stream().filter(KubernetesClusterVmMapVO::isExternalNode).collect(Collectors.toList()); + int endPort = (CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVmList.size() - externalNodes.size() - kubernetesCluster.getEtcdNodeCount().intValue() - 1); + provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort); + if (logger.isInfoEnabled()) { + logger.info(String.format("Provisioned firewall rule to open up port %d to %d on %s for Kubernetes cluster : %s", CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster.getName())); + } + if (!externalNodes.isEmpty()) { + AtomicInteger additionalNodes = new AtomicInteger(1); + externalNodes.forEach(externalNode -> { + int port = endPort + additionalNodes.get(); + try { + provisionFirewallRules(publicIp, owner, port, port); + vmIdPortMap.put(externalNode.getVmId(), port); + } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) { + throw new CloudRuntimeException(String.format("Failed to provision firewall rules for SSH access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + additionalNodes.addAndGet(1); + }); + } + } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) { + throw new ManagementServerException(String.format("Failed to provision firewall rules for SSH access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } finally { + CallContext.unregister(); + } + return vmIdPortMap; + } + + protected void provisionFirewallRules(final IpAddress publicIp, final Account account, int startPort, int endPort) throws NoSuchFieldException, + IllegalAccessException, ResourceUnavailableException, NetworkRuleConflictException { + List sourceCidrList = new ArrayList(); + sourceCidrList.add("0.0.0.0/0"); + + CreateFirewallRuleCmd rule = new CreateFirewallRuleCmd(); + rule = ComponentContext.inject(rule); + + Field addressField = rule.getClass().getDeclaredField("ipAddressId"); + addressField.setAccessible(true); + addressField.set(rule, publicIp.getId()); + + Field protocolField = rule.getClass().getDeclaredField("protocol"); + protocolField.setAccessible(true); + protocolField.set(rule, "TCP"); + + Field startPortField = rule.getClass().getDeclaredField("publicStartPort"); + startPortField.setAccessible(true); + startPortField.set(rule, startPort); + + Field endPortField = rule.getClass().getDeclaredField("publicEndPort"); + endPortField.setAccessible(true); + endPortField.set(rule, endPort); + + Field cidrField = rule.getClass().getDeclaredField("cidrlist"); + cidrField.setAccessible(true); + cidrField.set(rule, sourceCidrList); + + firewallService.createIngressFirewallRule(rule); + firewallService.applyIngressFwRules(publicIp.getId(), account); + } + + protected NicVO getVirtualRouterNicOnKubernetesClusterNetwork(KubernetesCluster kubernetesCluster) { + long networkId = kubernetesCluster.getNetworkId(); + NetworkVO kubernetesClusterNetwork = networkDao.findById(networkId); + if (kubernetesClusterNetwork == null) { + logAndThrow(Level.ERROR, String.format("Cannot find network %s set on Kubernetes Cluster %s", networkId, kubernetesCluster.getName())); + } + NicVO routerNicOnNetwork = nicDao.findByNetworkIdAndType(networkId, VirtualMachine.Type.DomainRouter); + if (routerNicOnNetwork == null) { + logAndThrow(Level.ERROR, String.format("Cannot find a Virtual Router on Kubernetes Cluster %s network %s", kubernetesCluster.getName(), kubernetesClusterNetwork.getName())); + } + return routerNicOnNetwork; + } + + protected Map getVmPortMap() { + List clusterVmList = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); + List externalNodes = clusterVmList.stream().filter(KubernetesClusterVmMapVO::isExternalNode).collect(Collectors.toList()); + Map vmIdPortMap = new HashMap<>(); + int defaultNodesCount = clusterVmList.size() - externalNodes.size(); + AtomicInteger i = new AtomicInteger(0); + externalNodes.forEach(node -> { + vmIdPortMap.put(node.getVmId(), CLUSTER_NODES_DEFAULT_START_SSH_PORT + defaultNodesCount + i.get()); + i.addAndGet(1); + }); + return vmIdPortMap; + } + + public Long getExplicitAffinityGroup(Long domainId, Long accountId) { + AffinityGroupVO groupVO = null; + if (Objects.nonNull(accountId)) { + groupVO = affinityGroupDao.findByAccountAndType(accountId, "ExplicitDedication"); + } + if (Objects.isNull(groupVO)) { + groupVO = affinityGroupDao.findDomainLevelGroupByType(domainId, "ExplicitDedication"); + } + if (Objects.nonNull(groupVO)) { + return groupVO.getId(); + } + return null; + } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterAddWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterAddWorker.java new file mode 100644 index 000000000000..8b694adf1cca --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterAddWorker.java @@ -0,0 +1,326 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.kubernetes.cluster.actionworkers; + +import com.cloud.event.ActionEventUtils; +import com.cloud.event.EventVO; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.ManagementServerException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes; +import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl; +import com.cloud.kubernetes.cluster.KubernetesClusterService; +import com.cloud.kubernetes.cluster.KubernetesClusterVO; +import com.cloud.kubernetes.cluster.utils.KubernetesClusterUtil; +import com.cloud.network.IpAddress; +import com.cloud.network.Network; +import com.cloud.network.dao.FirewallRulesDao; +import com.cloud.network.rules.FirewallRuleVO; +import com.cloud.network.rules.PortForwardingRuleVO; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.user.Account; +import com.cloud.uservm.UserVm; +import com.cloud.utils.Pair; +import com.cloud.utils.Ternary; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.ssh.SshHelper; +import com.cloud.vm.UserVmVO; +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.command.user.vm.RebootVMCmd; +import org.apache.cloudstack.context.CallContext; +import org.apache.commons.codec.binary.Base64; +import org.apache.logging.log4j.Level; + +import javax.inject.Inject; +import java.io.File; +import java.io.IOException; +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +public class KubernetesClusterAddWorker extends KubernetesClusterActionWorker { + + @Inject + private FirewallRulesDao firewallRulesDao; + private long addNodeTimeoutTime; + + List finalNodeIds = new ArrayList<>(); + + public KubernetesClusterAddWorker(KubernetesCluster kubernetesCluster, KubernetesClusterManagerImpl clusterManager) { + super(kubernetesCluster, clusterManager); + } + + public boolean addNodesToCluster(List nodeIds, boolean mountCksIsoOnVr, boolean manualUpgrade) throws CloudRuntimeException { + try { + init(); + addNodeTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterAddNodeTimeout.value() * 1000; + Long networkId = kubernetesCluster.getNetworkId(); + Network network = networkDao.findById(networkId); + if (Objects.isNull(network)) { + throw new CloudRuntimeException(String.format("Failed to find network with id: %s", networkId)); + } + templateDao.findById(kubernetesCluster.getTemplateId()); + IpAddress publicIp = null; + try { + publicIp = getPublicIp(network); + } catch (ManagementServerException e) { + throw new CloudRuntimeException(String.format("Failed to retrieve public IP for the network: %s ", network.getName())); + } + attachCksIsoForNodesAdditionToCluster(nodeIds, kubernetesCluster.getId(), mountCksIsoOnVr); + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.AddNodeRequested); + String controlNodeGuestIp = getControlVmPrivateIp(); + Ternary nodesAddedAndMemory = importNodeToCluster(nodeIds, network, publicIp, controlNodeGuestIp, mountCksIsoOnVr); + int nodesAdded = nodesAddedAndMemory.first(); + updateKubernetesCluster(kubernetesCluster.getId(), nodesAddedAndMemory, manualUpgrade); + if (nodeIds.size() != nodesAdded) { + String msg = String.format("Not every node was added to the CKS cluster %s, nodes added: %s out of %s", kubernetesCluster.getUuid(), nodesAdded, nodeIds.size()); + logger.info(msg); + detachCksIsoFromNodesAddedToCluster(nodeIds, kubernetesCluster.getId(), mountCksIsoOnVr); + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + ActionEventUtils.onCompletedActionEvent(CallContext.current().getCallingUserId(), CallContext.current().getCallingAccountId(), + EventVO.LEVEL_ERROR, KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_NODES_ADD, + msg, kubernetesCluster.getId(), ApiCommandResourceType.KubernetesCluster.toString(), 0); + return false; + } + Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); + KubernetesClusterUtil.validateKubernetesClusterReadyNodesCount(kubernetesCluster, publicIpSshPort.first(), publicIpSshPort.second(), + getControlNodeLoginUser(), sshKeyFile, addNodeTimeoutTime, 15000); + detachCksIsoFromNodesAddedToCluster(nodeIds, kubernetesCluster.getId(), mountCksIsoOnVr); + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); + String description = String.format("Successfully added %s nodes to Kubernetes Cluster %s", nodesAdded, kubernetesCluster.getUuid()); + ActionEventUtils.onCompletedActionEvent(CallContext.current().getCallingUserId(), CallContext.current().getCallingAccountId(), + EventVO.LEVEL_INFO, KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_NODES_ADD, + description, kubernetesCluster.getId(), ApiCommandResourceType.KubernetesCluster.toString(), 0); + return true; + } catch (Exception e) { + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + throw new CloudRuntimeException(e); + } + } + + private void detachCksIsoFromNodesAddedToCluster(List nodeIds, long kubernetesClusterId, boolean mountCksIsoOnVr) { + if (mountCksIsoOnVr) { + detachIsoOnVirtualRouter(kubernetesClusterId); + } else { + logger.info("Detaching CKS ISO from the nodes"); + List vms = nodeIds.stream().map(nodeId -> userVmDao.findById(nodeId)).collect(Collectors.toList()); + detachIsoKubernetesVMs(vms); + } + } + + public void detachIsoOnVirtualRouter(Long kubernetesClusterId) { + KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId); + Long virtualRouterId = getVirtualRouterNicOnKubernetesClusterNetwork(kubernetesCluster).getInstanceId(); + long isoId = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId()).getIsoId(); + + try { + networkService.handleCksIsoOnNetworkVirtualRouter(virtualRouterId, false); + } catch (ResourceUnavailableException e) { + String err = String.format("Error trying to handle ISO %s on virtual router %s", isoId, virtualRouterId); + logger.error(err); + throw new CloudRuntimeException(err); + } + + try { + templateService.detachIso(virtualRouterId, isoId, true, true); + } catch (CloudRuntimeException e) { + String err = String.format("Error trying to detach ISO %s from virtual router %s", isoId, virtualRouterId); + logger.error(err, e); + } + } + + public void attachCksIsoForNodesAdditionToCluster(List nodeIds, Long kubernetesClusterId, boolean mountCksIsoOnVr) { + if (mountCksIsoOnVr) { + attachAndServeIsoOnVirtualRouter(kubernetesClusterId); + } else { + logger.info("Attaching CKS ISO to the nodes"); + List vms = nodeIds.stream().map(nodeId -> userVmDao.findById(nodeId)).collect(Collectors.toList()); + attachIsoKubernetesVMs(vms); + } + } + + public void attachAndServeIsoOnVirtualRouter(Long kubernetesClusterId) { + KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId); + Long virtualRouterId = getVirtualRouterNicOnKubernetesClusterNetwork(kubernetesCluster).getInstanceId(); + long isoId = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId()).getIsoId(); + + try { + templateService.attachIso(isoId, virtualRouterId, true, true); + } catch (CloudRuntimeException e) { + String err = String.format("Error trying to attach ISO %s to virtual router %s", isoId, virtualRouterId); + logger.error(err); + throw new CloudRuntimeException(err); + } + + try { + networkService.handleCksIsoOnNetworkVirtualRouter(virtualRouterId, true); + } catch (ResourceUnavailableException e) { + String err = String.format("Error trying to handle ISO %s on virtual router %s", isoId, virtualRouterId); + logger.error(err); + throw new CloudRuntimeException(err); + } + } + + private Ternary importNodeToCluster(List nodeIds, Network network, IpAddress publicIp, + String controlNodeGuestIp, boolean mountCksIsoOnVr) { + int nodeIndex = 0; + Long additionalMemory = 0L; + Long additionalCores = 0L; + for (Long nodeId : nodeIds) { + UserVmVO vm = userVmDao.findById(nodeId); + String k8sControlNodeConfig = null; + try { + k8sControlNodeConfig = getKubernetesNodeConfig(controlNodeGuestIp, Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType()), mountCksIsoOnVr); + } catch (IOException e) { + logAndThrow(Level.ERROR, "Failed to read Kubernetes control node configuration file", e); + } + if (Objects.isNull(k8sControlNodeConfig)) { + logAndThrow(Level.ERROR, "Error generating worker node configuration"); + } + String base64UserData = Base64.encodeBase64String(k8sControlNodeConfig.getBytes(com.cloud.utils.StringUtils.getPreferredCharset())); + + Pair result = validateAndSetupNode(network, publicIp, owner, nodeId, nodeIndex, base64UserData); + if (Boolean.TRUE.equals(result.first())) { + ServiceOfferingVO offeringVO = serviceOfferingDao.findById(vm.getId(), vm.getServiceOfferingId()); + additionalMemory += offeringVO.getRamSize(); + additionalCores += offeringVO.getCpu(); + String msg = String.format("VM %s added as a node on the Kubernetes Cluster %s", vm.getUuid(), kubernetesCluster.getUuid()); + ActionEventUtils.onCompletedActionEvent(CallContext.current().getCallingUserId(), CallContext.current().getCallingAccountId(), + EventVO.LEVEL_INFO, KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_NODES_ADD, + msg, vm.getId(), ApiCommandResourceType.VirtualMachine.toString(), 0); + } + if (Boolean.FALSE.equals(result.first())) { + logger.error(String.format("Failed to add node %s [%s] to Kubernetes cluster : %s", vm.getName(), vm.getUuid(), kubernetesCluster.getName())); + } + if (System.currentTimeMillis() > addNodeTimeoutTime) { + logger.error(String.format("Failed to add node %s to Kubernetes cluster : %s", nodeId, kubernetesCluster.getName())); + } + nodeIndex = result.second(); + } + return new Ternary<>(nodeIndex, additionalMemory, additionalCores); + } + + private Pair validateAndSetupNode(Network network, IpAddress publicIp, Account account, + Long nodeId, int nodeIndex, String base64UserData) { + int startSshPortNumber = KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_START_SSH_PORT + (int) kubernetesCluster.getTotalNodeCount() - kubernetesCluster.getEtcdNodeCount().intValue(); + int sshStartPort = startSshPortNumber + nodeIndex; + try { + if (Objects.isNull(network.getVpcId())) { + provisionFirewallRules(publicIp, owner, sshStartPort, sshStartPort); + } + provisionPublicIpPortForwardingRule(publicIp, network, account, nodeId, sshStartPort, DEFAULT_SSH_PORT); + boolean isCompatible = validateNodeCompatibility(publicIp, nodeId, sshStartPort); + if (!isCompatible) { + revertNetworkRules(network, nodeId, sshStartPort); + return new Pair<>(false, nodeIndex); + } + + userVmManager.updateVirtualMachine(nodeId, null, null, null, null, + null, null, base64UserData, null, null, null, + BaseCmd.HTTPMethod.POST, null, null, null, null, null); + + RebootVMCmd rebootVMCmd = new RebootVMCmd(); + Field idField = rebootVMCmd.getClass().getDeclaredField("id"); + idField.setAccessible(true); + idField.set(rebootVMCmd, nodeId); + userVmService.rebootVirtualMachine(rebootVMCmd); + finalNodeIds.add(nodeId); + } catch (ResourceUnavailableException | NetworkRuleConflictException | NoSuchFieldException | + InsufficientCapacityException | IllegalAccessException e) { + logger.error(String.format("Failed to activate API port forwarding rules for the Kubernetes cluster : %s", kubernetesCluster.getName())); + // remove added Firewall and PF rules + revertNetworkRules(network, nodeId, sshStartPort); + return new Pair<>( false, nodeIndex); + } catch (Exception e) { + String errMsg = String.format("Unexpected exception while trying to add the external node %s to the Kubernetes cluster %s: %s", + nodeId, kubernetesCluster.getName(), e.getMessage()); + logger.error(errMsg, e); + revertNetworkRules(network, nodeId, sshStartPort); + throw new CloudRuntimeException(e); + } + return new Pair<>(true, ++nodeIndex); + } + + private void updateKubernetesCluster(long clusterId, Ternary additionalNodesDetails, boolean manualUpgrade) { + int additionalNodeCount = additionalNodesDetails.first(); + KubernetesClusterVO kubernetesClusterVO = kubernetesClusterDao.findById(clusterId); + kubernetesClusterVO.setNodeCount(kubernetesClusterVO.getNodeCount() + additionalNodeCount); + kubernetesClusterVO.setMemory(kubernetesClusterVO.getMemory() + additionalNodesDetails.second()); + kubernetesClusterVO.setCores(kubernetesClusterVO.getCores() + additionalNodesDetails.third()); + kubernetesClusterDao.update(clusterId, kubernetesClusterVO); + kubernetesCluster = kubernetesClusterVO; + + finalNodeIds.forEach(id -> addKubernetesClusterVm(clusterId, id, false, true, false, manualUpgrade)); + } + + + private boolean validateNodeCompatibility(IpAddress publicIp, long nodeId, int nodeSshPort) throws CloudRuntimeException { + File pkFile = getManagementServerSshPublicKeyFile(); + try { + File validateNodeScriptFile = retrieveScriptFile(validateNodeScript); + Thread.sleep(15*1000); + copyScriptFile(publicIp.getAddress().addr(), nodeSshPort, validateNodeScriptFile, validateNodeScript); + String command = String.format("%s%s", scriptPath, validateNodeScript); + Pair result = SshHelper.sshExecute(publicIp.getAddress().addr(), nodeSshPort, getControlNodeLoginUser(), + pkFile, null, command, 10000, 10000, 10 * 60 * 1000); + if (Boolean.FALSE.equals(result.first())) { + logger.error(String.format("Node with ID: %s cannot be added as a worker node as it does not have " + + "the following dependencies: %s ", nodeId, result.second())); + return false; + } + } catch (Exception e) { + logger.error(String.format("Failed to validate node with ID: %s", nodeId), e); + return false; + } + UserVmVO userVm = userVmDao.findById(nodeId); + cleanupCloudInitSemFolder(userVm, publicIp, pkFile, nodeSshPort); + return true; + } + + private void cleanupCloudInitSemFolder(UserVm userVm, IpAddress publicIp, File pkFile, int nodeSshPort) { + try { + String command = String.format("sudo rm -rf /var/lib/cloud/instances/%s/sem/*", userVm.getUuid()); + Pair result = SshHelper.sshExecute(publicIp.getAddress().addr(), nodeSshPort, getControlNodeLoginUser(), + pkFile, null, command, 10000, 10000, 10 * 60 * 1000); + if (Boolean.FALSE.equals(result.first())) { + logger.error(String.format("Failed to cleanup previous applied userdata on node: %s; This may hamper to addition of the node to the cluster ", userVm.getName())); + } + } catch (Exception e) { + logger.error(String.format("Failed to cleanup previous applied userdata on node: %s; This may hamper to addition of the node to the cluster ", userVm.getName()), e); + } + } + + private void revertNetworkRules(Network network, long vmId, int port) { + logger.debug(String.format("Reverting network rules for VM ID %s on network %s", vmId, network.getName())); + FirewallRuleVO ruleVO = firewallRulesDao.findByNetworkIdAndPorts(network.getId(), port, port); + if (Objects.isNull(network.getVpcId())) { + logger.debug(String.format("Removing firewall rule %s", ruleVO.getId())); + firewallService.revokeIngressFirewallRule(ruleVO.getId(), true); + } + List pfRules = portForwardingRulesDao.listByVm(vmId); + for (PortForwardingRuleVO pfRule : pfRules) { + logger.debug(String.format("Removing port forwarding rule %s", pfRule.getId())); + rulesService.revokePortForwardingRule(pfRule.getId(), true); + } + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java index fc80c3001810..3d1b7c67b526 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java @@ -23,6 +23,10 @@ import javax.inject.Inject; +import com.cloud.bgp.BGPService; +import com.cloud.dc.ASNumberVO; +import com.cloud.dc.DataCenter; +import com.cloud.dc.dao.ASNumberDao; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -63,6 +67,10 @@ public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceMod protected AccountManager accountManager; @Inject private AnnotationDao annotationDao; + @Inject + private ASNumberDao asNumberDao; + @Inject + private BGPService bgpService; private List clusterVMs; @@ -131,6 +139,7 @@ private void destroyKubernetesClusterNetwork() throws ManagementServerException Account owner = accountManager.getAccount(network.getAccountId()); User callerUser = accountManager.getActiveUser(CallContext.current().getCallingUserId()); ReservationContext context = new ReservationContextImpl(null, null, callerUser, owner); + releaseASNumber(kubernetesCluster.getZoneId(), kubernetesCluster.getNetworkId()); boolean networkDestroyed = networkMgr.destroyNetwork(kubernetesCluster.getNetworkId(), context, true); if (!networkDestroyed) { String msg = String.format("Failed to destroy network: %s as part of Kubernetes cluster: %s cleanup", network, kubernetesCluster); @@ -143,6 +152,15 @@ private void destroyKubernetesClusterNetwork() throws ManagementServerException } } + private void releaseASNumber(Long zoneId, long networkId) { + DataCenter zone = dataCenterDao.findById(zoneId); + ASNumberVO asNumber = asNumberDao.findByZoneAndNetworkId(zone.getId(), networkId); + if (asNumber != null) { + logger.debug(String.format("Releasing AS number %s from network %s", asNumber.getAsNumber(), networkId)); + bgpService.releaseASNumber(zone.getId(), asNumber.getAsNumber(), true); + } + } + protected void deleteKubernetesClusterIsolatedNetworkRules(Network network, List removedVmIds) throws ManagementServerException { IpAddress publicIp = getNetworkSourceNatIp(network); if (publicIp == null) { @@ -157,7 +175,7 @@ protected void deleteKubernetesClusterIsolatedNetworkRules(Network network, List if (firewallRule == null) { logMessage(Level.WARN, "Firewall rule for API access can't be removed", null); } - firewallRule = removeSshFirewallRule(publicIp); + firewallRule = removeSshFirewallRule(publicIp, network.getId()); if (firewallRule == null) { logMessage(Level.WARN, "Firewall rule for SSH access can't be removed", null); } @@ -252,6 +270,12 @@ public boolean destroy() throws CloudRuntimeException { } if (cleanupNetwork) { // if network has additional VM, cannot proceed with cluster destroy NetworkVO network = networkDao.findById(kubernetesCluster.getNetworkId()); + List externalNodes = clusterVMs.stream().filter(KubernetesClusterVmMapVO::isExternalNode).collect(Collectors.toList()); + if (!externalNodes.isEmpty()) { + String errMsg = String.format("Failed to delete kubernetes cluster %s as there are %s external node(s) present. Please remove the external node(s) from the cluster (and network) or delete them before deleting the cluster.", kubernetesCluster.getName(), externalNodes.size()); + logger.error(errMsg); + throw new CloudRuntimeException(errMsg); + } if (network != null) { List networkVMs = vmInstanceDao.listNonRemovedVmsByTypeAndNetwork(network.getId(), VirtualMachine.Type.User); if (networkVMs.size() > clusterVMs.size()) { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterRemoveWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterRemoveWorker.java new file mode 100644 index 000000000000..c76609686a6c --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterRemoveWorker.java @@ -0,0 +1,182 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.kubernetes.cluster.actionworkers; + +import com.cloud.event.ActionEventUtils; +import com.cloud.event.EventVO; +import com.cloud.exception.ManagementServerException; +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes; +import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl; +import com.cloud.kubernetes.cluster.KubernetesClusterService; +import com.cloud.kubernetes.cluster.KubernetesClusterVO; +import com.cloud.network.IpAddress; +import com.cloud.network.Network; +import com.cloud.network.dao.FirewallRulesDao; +import com.cloud.network.rules.FirewallRuleVO; +import com.cloud.network.rules.PortForwardingRuleVO; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.ssh.SshHelper; +import com.cloud.vm.UserVmVO; +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.context.CallContext; + +import javax.inject.Inject; +import java.io.File; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +public class KubernetesClusterRemoveWorker extends KubernetesClusterActionWorker { + + @Inject + private FirewallRulesDao firewallRulesDao; + + private long removeNodeTimeoutTime; + + public KubernetesClusterRemoveWorker(KubernetesCluster kubernetesCluster, KubernetesClusterManagerImpl clusterManager) { + super(kubernetesCluster, clusterManager); + } + + public boolean removeNodesFromCluster(List nodeIds) { + init(); + removeNodeTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterRemoveNodeTimeout.value() * 1000; + Long networkId = kubernetesCluster.getNetworkId(); + Network network = networkDao.findById(networkId); + if (Objects.isNull(network)) { + throw new CloudRuntimeException(String.format("Failed to find network with id: %s", networkId)); + } + IpAddress publicIp = null; + try { + publicIp = getPublicIp(network); + } catch (ManagementServerException e) { + throw new CloudRuntimeException(String.format("Failed to retrieve public IP for the network: %s ", network.getName())); + } + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.RemoveNodeRequested); + boolean result = removeNodesFromCluster(nodeIds, network, publicIp); + if (!result) { + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } else { + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); + } + String description = String.format("Successfully removed %s nodes from the Kubernetes Cluster %s", nodeIds.size(), kubernetesCluster.getUuid()); + ActionEventUtils.onCompletedActionEvent(CallContext.current().getCallingUserId(), CallContext.current().getCallingAccountId(), + EventVO.LEVEL_INFO, KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_NODES_REMOVE, + description, kubernetesCluster.getId(), ApiCommandResourceType.KubernetesCluster.toString(), 0); + return result; + } + + private boolean removeNodesFromCluster(List nodeIds, Network network, IpAddress publicIp) { + boolean result = true; + List removedNodeIds = new ArrayList<>(); + long removedMemory = 0L; + long removedCores = 0L; + for (Long nodeId : nodeIds) { + UserVmVO vm = userVmDao.findById(nodeId); + if (vm == null) { + logger.debug(String.format("Couldn't find a VM with ID %s, skipping removal from Kubernetes cluster", nodeId)); + continue; + } + try { + removeNodeVmFromCluster(nodeId, vm.getDisplayName(), publicIp.getAddress().addr()); + result &= removeNodePortForwardingRules(nodeId, network, vm); + if (System.currentTimeMillis() > removeNodeTimeoutTime) { + logger.error(String.format("Removal of node %s from Kubernetes cluster %s timed out", vm.getName(), kubernetesCluster.getName())); + result = false; + continue; + } + ServiceOfferingVO offeringVO = serviceOfferingDao.findById(vm.getId(), vm.getServiceOfferingId()); + removedNodeIds.add(nodeId); + removedMemory += offeringVO.getRamSize(); + removedCores += offeringVO.getCpu(); + String description = String.format("Successfully removed the node %s from Kubernetes cluster %s", vm.getUuid(), kubernetesCluster.getUuid()); + logger.info(description); + ActionEventUtils.onCompletedActionEvent(CallContext.current().getCallingUserId(), CallContext.current().getCallingAccountId(), + EventVO.LEVEL_INFO, KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_NODES_REMOVE, + description, vm.getId(), ApiCommandResourceType.VirtualMachine.toString(), 0); + } catch (Exception e) { + String err = String.format("Error trying to remove node %s from Kubernetes Cluster %s: %s", vm.getUuid(), kubernetesCluster.getUuid(), e.getMessage()); + logger.error(err, e); + result = false; + } + } + updateKubernetesCluster(kubernetesCluster.getId(), removedNodeIds, removedMemory, removedCores); + return result; + } + + protected boolean removeNodePortForwardingRules(Long nodeId, Network network, UserVmVO vm) { + List pfRules = portForwardingRulesDao.listByVm(nodeId); + boolean result = true; + for (PortForwardingRuleVO pfRule : pfRules) { + try { + result &= rulesService.revokePortForwardingRule(pfRule.getId(), true); + if (Objects.isNull(network.getVpcId())) { + FirewallRuleVO ruleVO = firewallRulesDao.findByNetworkIdAndPorts(network.getId(), pfRule.getSourcePortStart(), pfRule.getSourcePortEnd()); + result &= firewallService.revokeIngressFirewallRule(ruleVO.getId(), true); + } + } catch (Exception e) { + String err = String.format("Failed to cleanup network rules for node %s, due to: %s", vm.getName(), e.getMessage()); + logger.error(err, e); + } + } + return result; + } + + private void removeNodeVmFromCluster(Long nodeId, String nodeName, String publicIp) throws Exception { + File removeNodeScriptFile = retrieveScriptFile(removeNodeFromClusterScript); + copyScriptFile(publicIp, CLUSTER_NODES_DEFAULT_START_SSH_PORT, removeNodeScriptFile, removeNodeFromClusterScript); + File pkFile = getManagementServerSshPublicKeyFile(); + String command = String.format("%s%s %s %s %s", scriptPath, removeNodeFromClusterScript, nodeName, "control", "remove"); + Pair result = SshHelper.sshExecute(publicIp, CLUSTER_NODES_DEFAULT_START_SSH_PORT, getControlNodeLoginUser(), + pkFile, null, command, 10000, 10000, 10 * 60 * 1000); + if (Boolean.FALSE.equals(result.first())) { + logger.error(String.format("Node: %s failed to be gracefully drained as a worker node from cluster %s ", nodeName, kubernetesCluster.getName())); + } + List nodePfRules = portForwardingRulesDao.listByVm(nodeId); + Optional nodeSshPort = nodePfRules.stream().filter(rule -> rule.getDestinationPortStart() == DEFAULT_SSH_PORT + && rule.getVirtualMachineId() == nodeId && rule.getSourcePortStart() >= CLUSTER_NODES_DEFAULT_START_SSH_PORT).findFirst(); + if (nodeSshPort.isPresent()) { + copyScriptFile(publicIp, nodeSshPort.get().getSourcePortStart(), removeNodeScriptFile, removeNodeFromClusterScript); + command = String.format("sudo %s%s %s %s %s", scriptPath, removeNodeFromClusterScript, nodeName, "worker", "remove"); + result = SshHelper.sshExecute(publicIp, nodeSshPort.get().getSourcePortStart(), getControlNodeLoginUser(), + pkFile, null, command, 10000, 10000, 10 * 60 * 1000); + if (Boolean.FALSE.equals(result.first())) { + logger.error(String.format("Failed to reset node: %s from cluster %s ", nodeName, kubernetesCluster.getName())); + } + command = String.format("%s%s %s %s %s", scriptPath, removeNodeFromClusterScript, nodeName, "control", "delete"); + result = SshHelper.sshExecute(publicIp, CLUSTER_NODES_DEFAULT_START_SSH_PORT, getControlNodeLoginUser(), + pkFile, null, command, 10000, 10000, 10 * 60 * 1000); + if (Boolean.FALSE.equals(result.first())) { + logger.error(String.format("Node: %s failed to be gracefully delete node from cluster %s ", nodeName, kubernetesCluster.getName())); + } + + } + } + + private void updateKubernetesCluster(long clusterId, List nodesRemoved, long deallocatedRam, long deallocatedCores) { + KubernetesClusterVO kubernetesClusterVO = kubernetesClusterDao.findById(clusterId); + kubernetesClusterVO.setNodeCount(kubernetesClusterVO.getNodeCount() - nodesRemoved.size()); + kubernetesClusterVO.setMemory(kubernetesClusterVO.getMemory() - deallocatedRam); + kubernetesClusterVO.setCores(kubernetesClusterVO.getCores() - deallocatedCores); + kubernetesClusterDao.update(clusterId, kubernetesClusterVO); + + nodesRemoved.forEach(id -> kubernetesClusterVmMapDao.removeByClusterIdAndVmIdsIn(clusterId, nodesRemoved)); + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index 8c983149d02d..b84559797f6c 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -17,23 +17,37 @@ package com.cloud.kubernetes.cluster.actionworkers; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.CONTROL; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.ETCD; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.WORKER; import static com.cloud.utils.NumbersUtil.toHumanReadableSize; +import static com.cloud.utils.db.Transaction.execute; import java.io.File; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; import javax.inject.Inject; +import com.cloud.deploy.DataCenterDeployment; +import com.cloud.deploy.DeploymentPlan; +import com.cloud.dc.DedicatedResourceVO; +import com.cloud.dc.dao.DedicatedResourceDao; +import com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType; +import com.cloud.network.rules.RulesService; +import com.cloud.network.rules.dao.PortForwardingRulesDao; import com.cloud.network.rules.FirewallManager; import com.cloud.offering.NetworkOffering; import com.cloud.offerings.dao.NetworkOfferingDao; -import org.apache.cloudstack.api.ApiConstants; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.net.Ip; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.command.user.firewall.CreateFirewallRuleCmd; import org.apache.cloudstack.api.command.user.network.CreateNetworkACLCmd; @@ -64,23 +78,18 @@ import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.kubernetes.cluster.KubernetesCluster; -import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO; import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl; import com.cloud.kubernetes.cluster.KubernetesClusterVO; -import com.cloud.kubernetes.cluster.utils.KubernetesClusterUtil; import com.cloud.network.IpAddress; import com.cloud.network.Network; import com.cloud.network.dao.FirewallRulesDao; import com.cloud.network.dao.LoadBalancerDao; import com.cloud.network.dao.LoadBalancerVO; -import com.cloud.network.firewall.FirewallService; import com.cloud.network.lb.LoadBalancingRulesService; import com.cloud.network.rules.FirewallRule; import com.cloud.network.rules.FirewallRuleVO; import com.cloud.network.rules.LoadBalancer; import com.cloud.network.rules.PortForwardingRuleVO; -import com.cloud.network.rules.RulesService; -import com.cloud.network.rules.dao.PortForwardingRulesDao; import com.cloud.network.vpc.NetworkACL; import com.cloud.network.vpc.NetworkACLItem; import com.cloud.network.vpc.NetworkACLItemDao; @@ -94,15 +103,12 @@ import com.cloud.storage.dao.LaunchPermissionDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; -import com.cloud.user.SSHKeyPairVO; import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; import com.cloud.utils.component.ComponentContext; -import com.cloud.utils.db.Transaction; import com.cloud.utils.db.TransactionCallback; import com.cloud.utils.db.TransactionCallbackWithException; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.net.Ip; import com.cloud.utils.net.NetUtils; import com.cloud.utils.ssh.SshHelper; import com.cloud.vm.Nic; @@ -127,8 +133,6 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu @Inject protected FirewallRulesDao firewallRulesDao; @Inject - protected FirewallService firewallService; - @Inject protected NetworkACLService networkACLService; @Inject protected NetworkACLItemDao networkACLItemDao; @@ -143,6 +147,8 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu @Inject protected ResourceManager resourceManager; @Inject + protected DedicatedResourceDao dedicatedResourceDao; + @Inject protected LoadBalancerDao loadBalancerDao; @Inject protected VMInstanceDao vmInstanceDao; @@ -168,81 +174,37 @@ protected void init() { kubernetesClusterNodeNamePrefix = getKubernetesClusterNodeNamePrefix(); } - private String getKubernetesNodeConfig(final String joinIp, final boolean ejectIso) throws IOException { - String k8sNodeConfig = readResourceFile("/conf/k8s-node.yml"); - final String sshPubKey = "{{ k8s.ssh.pub.key }}"; - final String joinIpKey = "{{ k8s_control_node.join_ip }}"; - final String clusterTokenKey = "{{ k8s_control_node.cluster.token }}"; - final String ejectIsoKey = "{{ k8s.eject.iso }}"; - - String pubKey = "- \"" + configurationDao.getValue("ssh.publickey") + "\""; - String sshKeyPair = kubernetesCluster.getKeyPair(); - if (StringUtils.isNotEmpty(sshKeyPair)) { - SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair); - if (sshkp != null) { - pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; - } - } - k8sNodeConfig = k8sNodeConfig.replace(sshPubKey, pubKey); - k8sNodeConfig = k8sNodeConfig.replace(joinIpKey, joinIp); - k8sNodeConfig = k8sNodeConfig.replace(clusterTokenKey, KubernetesClusterUtil.generateClusterToken(kubernetesCluster)); - k8sNodeConfig = k8sNodeConfig.replace(ejectIsoKey, String.valueOf(ejectIso)); - k8sNodeConfig = updateKubeConfigWithRegistryDetails(k8sNodeConfig); - - return k8sNodeConfig; - } - - protected String updateKubeConfigWithRegistryDetails(String k8sConfig) { - /* genarate /etc/containerd/config.toml file on the nodes only if Kubernetes cluster is created to - * use docker private registry */ - String registryUsername = null; - String registryPassword = null; - String registryUrl = null; - - List details = kubernetesClusterDetailsDao.listDetails(kubernetesCluster.getId()); - for (KubernetesClusterDetailsVO detail : details) { - if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_USER_NAME)) { - registryUsername = detail.getValue(); - } - if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_PASSWORD)) { - registryPassword = detail.getValue(); + protected DeployDestination plan(final long nodesCount, final DataCenter zone, final ServiceOffering offering, + final Long domainId, final Long accountId, final Hypervisor.HypervisorType hypervisorType) throws InsufficientServerCapacityException { + final int cpu_requested = offering.getCpu() * offering.getSpeed(); + final long ram_requested = offering.getRamSize() * 1024L * 1024L; + boolean useDedicatedHosts = false; + List hosts = new ArrayList<>(); + Long group = getExplicitAffinityGroup(domainId, accountId); + if (Objects.nonNull(group)) { + List dedicatedHosts = new ArrayList<>(); + if (Objects.nonNull(accountId)) { + dedicatedHosts = dedicatedResourceDao.listByAccountId(accountId); + } else if (Objects.nonNull(domainId)) { + dedicatedHosts = dedicatedResourceDao.listByDomainId(domainId); } - if (detail.getName().equals(ApiConstants.DOCKER_REGISTRY_URL)) { - registryUrl = detail.getValue(); + for (DedicatedResourceVO dedicatedHost : dedicatedHosts) { + hosts.add(hostDao.findById(dedicatedHost.getHostId())); + useDedicatedHosts = true; } } - - if (StringUtils.isNoneEmpty(registryUsername, registryPassword, registryUrl)) { - // Update runcmd in the cloud-init configuration to run a script that updates the containerd config with provided registry details - String runCmd = "- bash -x /opt/bin/setup-containerd"; - - String registryEp = registryUrl.split("://")[1]; - k8sConfig = k8sConfig.replace("- containerd config default > /etc/containerd/config.toml", runCmd); - final String registryUrlKey = "{{registry.url}}"; - final String registryUrlEpKey = "{{registry.url.endpoint}}"; - final String registryAuthKey = "{{registry.token}}"; - final String registryUname = "{{registry.username}}"; - final String registryPsswd = "{{registry.password}}"; - - final String usernamePasswordKey = registryUsername + ":" + registryPassword; - String base64Auth = Base64.encodeBase64String(usernamePasswordKey.getBytes(com.cloud.utils.StringUtils.getPreferredCharset())); - k8sConfig = k8sConfig.replace(registryUrlKey, registryUrl); - k8sConfig = k8sConfig.replace(registryUrlEpKey, registryEp); - k8sConfig = k8sConfig.replace(registryUname, registryUsername); - k8sConfig = k8sConfig.replace(registryPsswd, registryPassword); - k8sConfig = k8sConfig.replace(registryAuthKey, base64Auth); + if (hosts.isEmpty()) { + hosts = resourceManager.listAllHostsInOneZoneByType(Host.Type.Routing, zone.getId()); + } + if (hypervisorType != null) { + hosts = hosts.stream().filter(x -> x.getHypervisorType() == hypervisorType).collect(Collectors.toList()); } - return k8sConfig; - } - protected DeployDestination plan(final long nodesCount, final DataCenter zone, final ServiceOffering offering) throws InsufficientServerCapacityException { - final int cpu_requested = offering.getCpu() * offering.getSpeed(); - final long ram_requested = offering.getRamSize() * 1024L * 1024L; - List hosts = resourceManager.listAllHostsInOneZoneByType(Host.Type.Routing, zone.getId()); final Map> hosts_with_resevered_capacity = new ConcurrentHashMap>(); for (HostVO h : hosts) { hosts_with_resevered_capacity.put(h.getUuid(), new Pair(h, 0)); } boolean suitable_host_found = false; + HostVO suitableHost = null; for (int i = 1; i <= nodesCount; i++) { suitable_host_found = false; for (Map.Entry> hostEntry : hosts_with_resevered_capacity.entrySet()) { @@ -269,6 +231,7 @@ protected DeployDestination plan(final long nodesCount, final DataCenter zone, f logger.debug("Found host {} with enough capacity: CPU={} RAM={}", h.getName(), cpu_requested * reserved, toHumanReadableSize(ram_requested * reserved)); hostEntry.setValue(new Pair(h, reserved)); suitable_host_found = true; + suitableHost = h; break; } } @@ -284,6 +247,9 @@ protected DeployDestination plan(final long nodesCount, final DataCenter zone, f if (logger.isInfoEnabled()) { logger.info("Suitable hosts found in datacenter: {}, creating deployment destination", zone); } + if (useDedicatedHosts) { + return new DeployDestination(zone, null, null, suitableHost); + } return new DeployDestination(zone, null, null, null); } String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%d memory=%s) with offering: %s and hypervisor: %s", @@ -293,13 +259,35 @@ protected DeployDestination plan(final long nodesCount, final DataCenter zone, f throw new InsufficientServerCapacityException(msg, DataCenter.class, zone.getId()); } - protected DeployDestination plan() throws InsufficientServerCapacityException { - ServiceOffering offering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); + /** + * Plan Kubernetes Cluster Deployment + * @return a map of DeployDestination per node type + */ + protected Map planKubernetesCluster(Long domainId, Long accountId, Hypervisor.HypervisorType hypervisorType) throws InsufficientServerCapacityException { + Map destinationMap = new HashMap<>(); DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); if (logger.isDebugEnabled()) { logger.debug("Checking deployment destination for Kubernetes cluster: {} in zone: {}", kubernetesCluster, zone); } - return plan(kubernetesCluster.getTotalNodeCount(), zone, offering); + long controlNodeCount = kubernetesCluster.getControlNodeCount(); + long clusterSize = kubernetesCluster.getNodeCount(); + long etcdNodes = kubernetesCluster.getEtcdNodeCount(); + Map nodeTypeCount = Map.of(WORKER.name(), clusterSize, + CONTROL.name(), controlNodeCount, ETCD.name(), etcdNodes); + + for (KubernetesClusterNodeType nodeType : CLUSTER_NODES_TYPES_LIST) { + Long nodes = nodeTypeCount.getOrDefault(nodeType.name(), kubernetesCluster.getServiceOfferingId()); + if (nodes == null || nodes == 0) { + continue; + } + ServiceOffering nodeOffering = getServiceOfferingForNodeTypeOnCluster(nodeType, kubernetesCluster); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Checking deployment destination for %s nodes on Kubernetes cluster : %s in zone : %s", nodeType.name(), kubernetesCluster.getName(), zone.getName())); + } + DeployDestination planForNodeType = plan(nodes, zone, nodeOffering, domainId, accountId, hypervisorType); + destinationMap.put(nodeType.name(), planForNodeType); + } + return destinationMap; } protected void resizeNodeVolume(final UserVm vm) throws ManagementServerException { @@ -322,14 +310,33 @@ protected void resizeNodeVolume(final UserVm vm) throws ManagementServerExceptio } } - protected void startKubernetesVM(final UserVm vm) throws ManagementServerException { + protected void startKubernetesVM(final UserVm vm, final Long domainId, final Long accountId, KubernetesClusterNodeType nodeType) throws ManagementServerException { CallContext vmContext = null; if (!ApiCommandResourceType.VirtualMachine.equals(CallContext.current().getEventResourceType())); { vmContext = CallContext.register(CallContext.current(), ApiCommandResourceType.VirtualMachine); vmContext.setEventResourceId(vm.getId()); } + DeploymentPlan plan = null; + if (Objects.nonNull(domainId) && !listDedicatedHostsInDomain(domainId).isEmpty()) { + DeployDestination dest = null; + try { + Map destinationMap = planKubernetesCluster(domainId, accountId, vm.getHypervisorType()); + dest = destinationMap.get(nodeType.name()); + } catch (InsufficientCapacityException e) { + logTransitStateAndThrow(Level.ERROR, String.format("Provisioning the cluster failed due to insufficient capacity in the Kubernetes cluster: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); + } + if (dest != null) { + plan = new DataCenterDeployment( + Objects.nonNull(dest.getDataCenter()) ? dest.getDataCenter().getId() : 0, + Objects.nonNull(dest.getPod()) ? dest.getPod().getId() : null, + Objects.nonNull(dest.getCluster()) ? dest.getCluster().getId() : null, + Objects.nonNull(dest.getHost()) ? dest.getHost().getId() : null, + null, + null); + } + } try { - userVmManager.startVirtualMachine(vm); + userVmManager.startVirtualMachine(vm, plan); } catch (OperationTimedoutException | ResourceUnavailableException | InsufficientCapacityException ex) { throw new ManagementServerException(String.format("Failed to start VM in the Kubernetes cluster : %s", kubernetesCluster.getName()), ex); } finally { @@ -344,19 +351,20 @@ protected void startKubernetesVM(final UserVm vm) throws ManagementServerExcepti } } - protected List provisionKubernetesClusterNodeVms(final long nodeCount, final int offset, final String publicIpAddress) throws ManagementServerException, + protected List provisionKubernetesClusterNodeVms(final long nodeCount, final int offset, + final String controlIpAddress, final Long domainId, final Long accountId) throws ManagementServerException, ResourceUnavailableException, InsufficientCapacityException { List nodes = new ArrayList<>(); for (int i = offset + 1; i <= nodeCount; i++) { CallContext vmContext = CallContext.register(CallContext.current(), ApiCommandResourceType.VirtualMachine); try { - UserVm vm = createKubernetesNode(publicIpAddress); + UserVm vm = createKubernetesNode(controlIpAddress, domainId, accountId); vmContext.setEventResourceId(vm.getId()); - addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId(), false); + addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId(), false, false, false, false); if (kubernetesCluster.getNodeRootDiskSize() > 0) { resizeNodeVolume(vm); } - startKubernetesVM(vm); + startKubernetesVM(vm, domainId, accountId, WORKER); vm = userVmDao.findById(vm.getId()); if (vm == null) { throw new ManagementServerException(String.format("Failed to provision worker VM for Kubernetes cluster : %s", kubernetesCluster.getName())); @@ -370,16 +378,16 @@ protected List provisionKubernetesClusterNodeVms(final long nodeCount, f return nodes; } - protected List provisionKubernetesClusterNodeVms(final long nodeCount, final String publicIpAddress) throws ManagementServerException, + protected List provisionKubernetesClusterNodeVms(final long nodeCount, final String controlIpAddress, final Long domainId, final Long accountId) throws ManagementServerException, ResourceUnavailableException, InsufficientCapacityException { - return provisionKubernetesClusterNodeVms(nodeCount, 0, publicIpAddress); + return provisionKubernetesClusterNodeVms(nodeCount, 0, controlIpAddress, domainId, accountId); } - protected UserVm createKubernetesNode(String joinIp) throws ManagementServerException, + protected UserVm createKubernetesNode(String joinIp, Long domainId, Long accountId) throws ManagementServerException, ResourceUnavailableException, InsufficientCapacityException { UserVm nodeVm = null; DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); - ServiceOffering serviceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); + ServiceOffering serviceOffering = getServiceOfferingForNodeTypeOnCluster(WORKER, kubernetesCluster); List networkIds = new ArrayList(); networkIds.add(kubernetesCluster.getNetworkId()); Account owner = accountDao.findById(kubernetesCluster.getAccountId()); @@ -396,7 +404,7 @@ protected UserVm createKubernetesNode(String joinIp) throws ManagementServerExce String hostName = String.format("%s-node-%s", kubernetesClusterNodeNamePrefix, suffix); String k8sNodeConfig = null; try { - k8sNodeConfig = getKubernetesNodeConfig(joinIp, Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType())); + k8sNodeConfig = getKubernetesNodeConfig(joinIp, Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType()), false); } catch (IOException e) { logAndThrow(Level.ERROR, "Failed to read Kubernetes node configuration file", e); } @@ -406,18 +414,21 @@ protected UserVm createKubernetesNode(String joinIp) throws ManagementServerExce if (StringUtils.isNotBlank(kubernetesCluster.getKeyPair())) { keypairs.add(kubernetesCluster.getKeyPair()); } + Long affinityGroupId = getExplicitAffinityGroup(domainId, accountId); if (kubernetesCluster.getSecurityGroupId() != null && networkModel.checkSecurityGroupSupportForNetwork(owner, zone, networkIds, List.of(kubernetesCluster.getSecurityGroupId()))) { List securityGroupIds = new ArrayList<>(); securityGroupIds.add(kubernetesCluster.getSecurityGroupId()); - nodeVm = userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, securityGroupIds, owner, + nodeVm = userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, workerNodeTemplate, networkIds, securityGroupIds, owner, hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST,base64UserData, null, null, keypairs, - null, addrs, null, null, null, customParameterMap, null, null, null, + null, addrs, null, null, Objects.nonNull(affinityGroupId) ? + Collections.singletonList(affinityGroupId) : null, customParameterMap, null, null, null, null, true, null, UserVmManager.CKS_NODE); } else { - nodeVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner, + nodeVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, workerNodeTemplate, networkIds, owner, hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, null, null, keypairs, - null, addrs, null, null, null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null); + null, addrs, null, null, Objects.nonNull(affinityGroupId) ? + Collections.singletonList(affinityGroupId) : null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null); } if (logger.isInfoEnabled()) { logger.info("Created node VM : {}, {} in the Kubernetes cluster : {}", hostName, nodeVm, kubernetesCluster.getName()); @@ -455,7 +466,7 @@ protected void provisionPublicIpPortForwardingRule(IpAddress publicIp, Network n final long domainId = account.getDomainId(); Nic vmNic = networkModel.getNicInNetwork(vmId, networkId); final Ip vmIp = new Ip(vmNic.getIPv4Address()); - PortForwardingRuleVO pfRule = Transaction.execute((TransactionCallbackWithException) status -> { + PortForwardingRuleVO pfRule = execute((TransactionCallbackWithException) status -> { PortForwardingRuleVO newRule = new PortForwardingRuleVO(null, publicIpId, sourcePort, sourcePort, @@ -487,11 +498,18 @@ protected void provisionPublicIpPortForwardingRule(IpAddress publicIp, Network n * @throws NetworkRuleConflictException */ protected void provisionSshPortForwardingRules(IpAddress publicIp, Network network, Account account, - List clusterVMIds) throws ResourceUnavailableException, + List clusterVMIds, Map vmIdPortMap) throws ResourceUnavailableException, NetworkRuleConflictException { if (!CollectionUtils.isEmpty(clusterVMIds)) { - for (int i = 0; i < clusterVMIds.size(); ++i) { - provisionPublicIpPortForwardingRule(publicIp, network, account, clusterVMIds.get(i), CLUSTER_NODES_DEFAULT_START_SSH_PORT + i, DEFAULT_SSH_PORT); + int defaultNodesCount = clusterVMIds.size() - vmIdPortMap.size(); + int sourcePort = CLUSTER_NODES_DEFAULT_START_SSH_PORT; + for (int i = 0; i < defaultNodesCount; ++i) { + sourcePort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + i; + provisionPublicIpPortForwardingRule(publicIp, network, account, clusterVMIds.get(i), sourcePort, DEFAULT_SSH_PORT); + } + for (int i = defaultNodesCount; i < clusterVMIds.size(); ++i) { + sourcePort += 1; + provisionPublicIpPortForwardingRule(publicIp, network, account, clusterVMIds.get(i), sourcePort, DEFAULT_SSH_PORT); } } } @@ -511,14 +529,15 @@ protected FirewallRule removeApiFirewallRule(final IpAddress publicIp) { return rule; } - protected FirewallRule removeSshFirewallRule(final IpAddress publicIp) { + protected FirewallRule removeSshFirewallRule(final IpAddress publicIp, final long networkId) { FirewallRule rule = null; List firewallRules = firewallRulesDao.listByIpAndPurposeAndNotRevoked(publicIp.getId(), FirewallRule.Purpose.Firewall); for (FirewallRuleVO firewallRule : firewallRules) { - if (firewallRule.getSourcePortStart() == CLUSTER_NODES_DEFAULT_START_SSH_PORT) { + PortForwardingRuleVO pfRule = portForwardingRulesDao.findByNetworkAndPorts(networkId, firewallRule.getSourcePortStart(), firewallRule.getSourcePortEnd()); + if (firewallRule.getSourcePortStart() == CLUSTER_NODES_DEFAULT_START_SSH_PORT || (Objects.nonNull(pfRule) && pfRule.getDestinationPortStart() == DEFAULT_SSH_PORT) ) { rule = firewallRule; firewallService.revokeIngressFwRule(firewallRule.getId(), true); - logger.debug("The SSH firewall rule [%s] with the id [%s] was revoked",firewallRule.getName(),firewallRule.getId()); + logger.debug("The SSH firewall rule {} with the id {} was revoked", firewallRule.getName(), firewallRule.getId()); break; } } @@ -537,7 +556,7 @@ protected void removePortForwardingRules(final IpAddress publicIp, final Network logger.trace("Marking PF rule {} with Revoke state", pfRule); pfRule.setState(FirewallRule.State.Revoke); revokedRules.add(pfRule); - logger.debug("The Port forwarding rule [%s] with the id [%s] was removed.", pfRule.getName(), pfRule.getId()); + logger.debug("The Port forwarding rule {} with the id {} was removed.", pfRule.getName(), pfRule.getId()); break; } } @@ -630,22 +649,11 @@ protected void provisionLoadBalancerRule(final IpAddress publicIp, final Network lbService.assignToLoadBalancer(lb.getId(), null, vmIdIpMap, false); } - protected void createFirewallRules(IpAddress publicIp, List clusterVMIds, boolean apiRule) throws ManagementServerException { + protected Map createFirewallRules(IpAddress publicIp, List clusterVMIds, boolean apiRule) throws ManagementServerException { // Firewall rule for SSH access on each node VM - CallContext.register(CallContext.current(), null); - try { - int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMIds.size() - 1; - provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort); - if (logger.isInfoEnabled()) { - logger.info("Provisioned firewall rule to open up port {} to {} on {} for Kubernetes cluster: {}", CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster); - } - } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) { - throw new ManagementServerException(String.format("Failed to provision firewall rules for SSH access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); - } finally { - CallContext.unregister(); - } + Map vmIdPortMap = addFirewallRulesForNodes(publicIp, clusterVMIds.size()); if (!apiRule) { - return; + return vmIdPortMap; } // Firewall rule for API access for control node VMs CallContext.register(CallContext.current(), null); @@ -659,6 +667,7 @@ protected void createFirewallRules(IpAddress publicIp, List clusterVMIds, } finally { CallContext.unregister(); } + return vmIdPortMap; } /** @@ -673,11 +682,11 @@ protected void createFirewallRules(IpAddress publicIp, List clusterVMIds, * @throws ManagementServerException */ protected void setupKubernetesClusterIsolatedNetworkRules(IpAddress publicIp, Network network, List clusterVMIds, boolean apiRule) throws ManagementServerException { - createFirewallRules(publicIp, clusterVMIds, apiRule); + Map vmIdPortMap = createFirewallRules(publicIp, clusterVMIds, apiRule); // Port forwarding rule for SSH access on each node VM try { - provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds); + provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, vmIdPortMap); } catch (ResourceUnavailableException | NetworkRuleConflictException e) { throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); } @@ -769,7 +778,8 @@ protected void setupKubernetesClusterVpcTierRules(IpAddress publicIp, Network ne // Add port forwarding rule for SSH access on each node VM try { - provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds); + Map vmIdPortMap = getVmPortMap(); + provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, vmIdPortMap); } catch (ResourceUnavailableException | NetworkRuleConflictException e) { throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); } @@ -790,8 +800,27 @@ protected String getKubernetesClusterNodeNamePrefix() { return prefix; } + protected String getEtcdNodeNameForCluster() { + String prefix = kubernetesCluster.getName(); + if (!NetUtils.verifyDomainNameLabel(prefix, true)) { + prefix = prefix.replaceAll("[^a-zA-Z0-9-]", ""); + if (prefix.isEmpty()) { + prefix = kubernetesCluster.getUuid(); + } + } + prefix = prefix + "-etcd" ; + if (prefix.length() > 40) { + prefix = prefix.substring(0, 40); + } + return prefix; + } + protected KubernetesClusterVO updateKubernetesClusterEntry(final Long cores, final Long memory, final Long size, - final Long serviceOfferingId, final Boolean autoscaleEnabled, final Long minSize, final Long maxSize) { + final Long serviceOfferingId, final Boolean autoscaleEnabled, + final Long minSize, final Long maxSize, + final KubernetesClusterNodeType nodeType, + final boolean updateNodeOffering, + final boolean updateClusterOffering) { return Transaction.execute((TransactionCallback) status -> { KubernetesClusterVO updatedCluster = kubernetesClusterDao.createForUpdate(kubernetesCluster.getId()); @@ -804,7 +833,16 @@ protected KubernetesClusterVO updateKubernetesClusterEntry(final Long cores, fin if (size != null) { updatedCluster.setNodeCount(size); } - if (serviceOfferingId != null) { + if (updateNodeOffering && serviceOfferingId != null && nodeType != null) { + if (WORKER == nodeType) { + updatedCluster.setWorkerServiceOfferingId(serviceOfferingId); + } else if (CONTROL == nodeType) { + updatedCluster.setControlServiceOfferingId(serviceOfferingId); + } else if (ETCD == nodeType) { + updatedCluster.setEtcdServiceOfferingId(serviceOfferingId); + } + } + if (updateClusterOffering && serviceOfferingId != null) { updatedCluster.setServiceOfferingId(serviceOfferingId); } if (autoscaleEnabled != null) { @@ -817,7 +855,7 @@ protected KubernetesClusterVO updateKubernetesClusterEntry(final Long cores, fin } private KubernetesClusterVO updateKubernetesClusterEntry(final Boolean autoscaleEnabled, final Long minSize, final Long maxSize) throws CloudRuntimeException { - KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(null, null, null, null, autoscaleEnabled, minSize, maxSize); + KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(null, null, null, null, autoscaleEnabled, minSize, maxSize, null, false, false); if (kubernetesClusterVO == null) { logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, unable to update Kubernetes cluster", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); @@ -880,4 +918,8 @@ protected boolean autoscaleCluster(boolean enable, Long minSize, Long maxSize) { updateLoginUserDetails(null); } } + + protected List listDedicatedHostsInDomain(Long domainId) { + return dedicatedResourceDao.listByDomainId(domainId); + } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index de85e6231f2d..54294e9c3d84 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -19,13 +19,18 @@ import java.io.File; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import javax.inject.Inject; +import com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.storage.VMTemplateVO; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.InternalIdentity; import org.apache.cloudstack.context.CallContext; @@ -60,12 +65,17 @@ import com.cloud.vm.dao.VMInstanceDao; import org.apache.logging.log4j.Level; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.CONTROL; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.DEFAULT; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.ETCD; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.WORKER; + public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModifierActionWorker { @Inject protected VMInstanceDao vmInstanceDao; - private ServiceOffering serviceOffering; + private Map serviceOfferingNodeTypeMap; private Long clusterSize; private List nodeIds; private KubernetesCluster.State originalState; @@ -75,8 +85,12 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif private Boolean isAutoscalingEnabled; private long scaleTimeoutTime; + protected KubernetesClusterScaleWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) { + super(kubernetesCluster, clusterManager); + } + public KubernetesClusterScaleWorker(final KubernetesCluster kubernetesCluster, - final ServiceOffering serviceOffering, + final Map serviceOfferingNodeTypeMap, final Long clusterSize, final List nodeIds, final Boolean isAutoscalingEnabled, @@ -84,7 +98,7 @@ public KubernetesClusterScaleWorker(final KubernetesCluster kubernetesCluster, final Long maxSize, final KubernetesClusterManagerImpl clusterManager) { super(kubernetesCluster, clusterManager); - this.serviceOffering = serviceOffering; + this.serviceOfferingNodeTypeMap = serviceOfferingNodeTypeMap; this.nodeIds = nodeIds; this.isAutoscalingEnabled = isAutoscalingEnabled; this.minSize = minSize; @@ -123,7 +137,7 @@ private void scaleKubernetesClusterIsolatedNetworkRules(final List cluster } // Remove existing SSH firewall rules - FirewallRule firewallRule = removeSshFirewallRule(publicIp); + FirewallRule firewallRule = removeSshFirewallRule(publicIp, network.getId()); if (firewallRule == null) { throw new ManagementServerException("Firewall rule for node SSH access can't be provisioned"); } @@ -148,7 +162,8 @@ private void scaleKubernetesClusterVpcTierRules(final List clusterVMIds) t } // Add port forwarding rule for SSH access on each node VM try { - provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds); + Map vmIdPortMap = getVmPortMap(); + provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, vmIdPortMap); } catch (ResourceUnavailableException | NetworkRuleConflictException e) { throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); } @@ -176,15 +191,19 @@ private void scaleKubernetesClusterNetworkRules(final List clusterVMIds) t scaleKubernetesClusterIsolatedNetworkRules(clusterVMIds); } - private KubernetesClusterVO updateKubernetesClusterEntry(final Long newSize, final ServiceOffering newServiceOffering) throws CloudRuntimeException { + private KubernetesClusterVO updateKubernetesClusterEntryForNodeType(final Long newWorkerSize, final KubernetesClusterNodeType nodeType, + final ServiceOffering newServiceOffering, + final boolean updateNodeOffering, boolean updateClusterOffering) throws CloudRuntimeException { final ServiceOffering serviceOffering = newServiceOffering == null ? serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()) : newServiceOffering; final Long serviceOfferingId = newServiceOffering == null ? null : serviceOffering.getId(); - final long size = newSize == null ? kubernetesCluster.getTotalNodeCount() : (newSize + kubernetesCluster.getControlNodeCount()); - final long cores = serviceOffering.getCpu() * size; - final long memory = serviceOffering.getRamSize() * size; - KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(cores, memory, newSize, serviceOfferingId, - kubernetesCluster.getAutoscalingEnabled(), kubernetesCluster.getMinSize(), kubernetesCluster.getMaxSize()); + + Pair clusterCountAndCapacity = calculateNewClusterCountAndCapacity(newWorkerSize, nodeType, serviceOffering); + long cores = clusterCountAndCapacity.first(); + long memory = clusterCountAndCapacity.second(); + + KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(cores, memory, newWorkerSize, serviceOfferingId, + kubernetesCluster.getAutoscalingEnabled(), kubernetesCluster.getMinSize(), kubernetesCluster.getMaxSize(), nodeType, updateNodeOffering, updateClusterOffering); if (kubernetesClusterVO == null) { logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, unable to update Kubernetes cluster", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); @@ -192,6 +211,55 @@ private KubernetesClusterVO updateKubernetesClusterEntry(final Long newSize, fin return kubernetesClusterVO; } + protected Pair calculateNewClusterCountAndCapacity(Long newWorkerSize, KubernetesClusterNodeType nodeType, ServiceOffering serviceOffering) { + long cores; + long memory; + long totalClusterSize = newWorkerSize == null ? kubernetesCluster.getTotalNodeCount() : (newWorkerSize + kubernetesCluster.getControlNodeCount() + kubernetesCluster.getEtcdNodeCount()); + + if (nodeType == DEFAULT) { + cores = serviceOffering.getCpu() * totalClusterSize; + memory = serviceOffering.getRamSize() * totalClusterSize; + } else { + long nodeCount = getNodeCountForType(nodeType, kubernetesCluster); + Long existingOfferingId = getExistingOfferingIdForNodeType(nodeType, kubernetesCluster); + ServiceOfferingVO previousOffering = serviceOfferingDao.findById(existingOfferingId); + Pair previousNodesCapacity = calculateNodesCapacity(previousOffering, nodeCount); + if (WORKER == nodeType) { + nodeCount = newWorkerSize == null ? kubernetesCluster.getNodeCount() : newWorkerSize; + } + Pair newNodesCapacity = calculateNodesCapacity(serviceOffering, nodeCount); + Pair newClusterCapacity = calculateClusterNewCapacity(kubernetesCluster, previousNodesCapacity, newNodesCapacity); + cores = newClusterCapacity.first(); + memory = newClusterCapacity.second(); + } + return new Pair<>(cores, memory); + } + + private long getNodeCountForType(KubernetesClusterNodeType nodeType, KubernetesCluster kubernetesCluster) { + if (WORKER == nodeType) { + return kubernetesCluster.getNodeCount(); + } else if (CONTROL == nodeType) { + return kubernetesCluster.getControlNodeCount(); + } else if (ETCD == nodeType) { + return kubernetesCluster.getEtcdNodeCount(); + } + return kubernetesCluster.getTotalNodeCount(); + } + + protected Pair calculateClusterNewCapacity(KubernetesCluster kubernetesCluster, + Pair previousNodeTypeCapacity, + Pair newNodeTypeCapacity) { + long previousCores = kubernetesCluster.getCores(); + long previousMemory = kubernetesCluster.getMemory(); + long newCores = previousCores - previousNodeTypeCapacity.first() + newNodeTypeCapacity.first(); + long newMemory = previousMemory - previousNodeTypeCapacity.second() + newNodeTypeCapacity.second(); + return new Pair<>(newCores, newMemory); + } + + protected Pair calculateNodesCapacity(ServiceOffering offering, long nodeCount) { + return new Pair<>(offering.getCpu() * nodeCount, offering.getRamSize() * nodeCount); + } + private boolean removeKubernetesClusterNode(final String ipAddress, final int port, final UserVm userVm, final int retries, final int waitDuration) { File pkFile = getManagementServerSshPublicKeyFile(); int retryCounter = 0; @@ -266,11 +334,12 @@ private void validateKubernetesClusterScaleSizeParameters() throws CloudRuntimeE } if (newVmRequiredCount > 0) { final DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); + VMTemplateVO clusterTemplate = templateDao.findById(kubernetesCluster.getTemplateId()); try { if (originalState.equals(KubernetesCluster.State.Running)) { - plan(newVmRequiredCount, zone, clusterServiceOffering); + plan(newVmRequiredCount, zone, clusterServiceOffering, kubernetesCluster.getDomainId(), kubernetesCluster.getAccountId(), clusterTemplate.getHypervisorType()); } else { - plan(kubernetesCluster.getTotalNodeCount() + newVmRequiredCount, zone, clusterServiceOffering); + plan(kubernetesCluster.getTotalNodeCount() + newVmRequiredCount, zone, clusterServiceOffering, kubernetesCluster.getDomainId(), kubernetesCluster.getAccountId(), clusterTemplate.getHypervisorType()); } } catch (InsufficientCapacityException e) { logTransitStateToFailedIfNeededAndThrow(Level.WARN, String.format("Scaling failed for Kubernetes cluster : %s in zone : %s, insufficient capacity", kubernetesCluster.getName(), zone.getName())); @@ -282,17 +351,18 @@ private void validateKubernetesClusterScaleSizeParameters() throws CloudRuntimeE } } - private void scaleKubernetesClusterOffering() throws CloudRuntimeException { + private void scaleKubernetesClusterOffering(KubernetesClusterNodeType nodeType, ServiceOffering serviceOffering, + boolean updateNodeOffering, boolean updateClusterOffering) throws CloudRuntimeException { validateKubernetesClusterScaleOfferingParameters(); if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) { stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleUpRequested); } if (KubernetesCluster.State.Created.equals(originalState)) { - kubernetesCluster = updateKubernetesClusterEntry(null, serviceOffering); + kubernetesCluster = updateKubernetesClusterEntryForNodeType(null, nodeType, serviceOffering, updateNodeOffering, updateClusterOffering); return; } - final long size = kubernetesCluster.getTotalNodeCount(); - List vmList = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); + final long size = getNodeCountForType(nodeType, kubernetesCluster); + List vmList = kubernetesClusterVmMapDao.listByClusterIdAndVmType(kubernetesCluster.getId(), nodeType); final long tobeScaledVMCount = Math.min(vmList.size(), size); for (long i = 0; i < tobeScaledVMCount; i++) { KubernetesClusterVmMapVO vmMapVO = vmList.get((int) i); @@ -310,7 +380,7 @@ private void scaleKubernetesClusterOffering() throws CloudRuntimeException { logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster : %s failed, scaling action timed out", kubernetesCluster.getName()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } } - kubernetesCluster = updateKubernetesClusterEntry(null, serviceOffering); + kubernetesCluster = updateKubernetesClusterEntryForNodeType(null, nodeType, serviceOffering, updateNodeOffering, updateClusterOffering); } private void removeNodesFromCluster(List vmMaps) throws CloudRuntimeException { @@ -346,7 +416,10 @@ private void removeNodesFromCluster(List vmMaps) throw // Scale network rules to update firewall rule try { - List clusterVMIds = getKubernetesClusterVMMaps().stream().map(KubernetesClusterVmMapVO::getVmId).collect(Collectors.toList()); + List clusterVMIds = getKubernetesClusterVMMaps() + .stream() + .filter(x -> !x.isEtcdNode()) + .map(KubernetesClusterVmMapVO::getVmId).collect(Collectors.toList()); scaleKubernetesClusterNetworkRules(clusterVMIds); } catch (ManagementServerException e) { logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes " + @@ -361,10 +434,13 @@ private void scaleDownKubernetesClusterSize() throws CloudRuntimeException { } List vmList; if (this.nodeIds != null) { - vmList = getKubernetesClusterVMMapsForNodes(this.nodeIds); + vmList = getKubernetesClusterVMMapsForNodes(this.nodeIds).stream().filter(vm -> !vm.isExternalNode()).collect(Collectors.toList()); } else { vmList = getKubernetesClusterVMMaps(); - vmList = vmList.subList((int) (kubernetesCluster.getControlNodeCount() + clusterSize), vmList.size()); + vmList = vmList.stream() + .filter(vm -> !vm.isExternalNode() && !vm.isControlNode() && !vm.isEtcdNode()) + .collect(Collectors.toList()); + vmList = vmList.subList((int) (kubernetesCluster.getControlNodeCount() + clusterSize - 1), vmList.size()); } Collections.reverse(vmList); removeNodesFromCluster(vmList); @@ -375,16 +451,20 @@ private void scaleUpKubernetesClusterSize(final long newVmCount) throws CloudRun stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleUpRequested); } List clusterVMs = new ArrayList<>(); - LaunchPermissionVO launchPermission = new LaunchPermissionVO(clusterTemplate.getId(), owner.getId()); - launchPermissionDao.persist(launchPermission); + if (isDefaultTemplateUsed()) { + LaunchPermissionVO launchPermission = new LaunchPermissionVO(clusterTemplate.getId(), owner.getId()); + launchPermissionDao.persist(launchPermission); + } try { - clusterVMs = provisionKubernetesClusterNodeVms((int)(newVmCount + kubernetesCluster.getNodeCount()), (int)kubernetesCluster.getNodeCount(), publicIpAddress); + clusterVMs = provisionKubernetesClusterNodeVms((int)(newVmCount + kubernetesCluster.getNodeCount()), (int)kubernetesCluster.getNodeCount(), publicIpAddress, kubernetesCluster.getDomainId(), kubernetesCluster.getAccountId()); updateLoginUserDetails(clusterVMs.stream().map(InternalIdentity::getId).collect(Collectors.toList())); } catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) { logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, unable to provision node VM in the cluster", kubernetesCluster.getName()), e); } try { - List clusterVMIds = getKubernetesClusterVMMaps().stream().map(KubernetesClusterVmMapVO::getVmId).collect(Collectors.toList()); + List externalNodeIds = getKubernetesClusterVMMaps().stream().filter(KubernetesClusterVmMapVO::isExternalNode).map(KubernetesClusterVmMapVO::getVmId).collect(Collectors.toList()); + List clusterVMIds = getKubernetesClusterVMMaps().stream().filter(vm -> !vm.isExternalNode() && !vm.isEtcdNode()).map(KubernetesClusterVmMapVO::getVmId).collect(Collectors.toList()); + clusterVMIds.addAll(externalNodeIds); scaleKubernetesClusterNetworkRules(clusterVMIds); } catch (ManagementServerException e) { logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, unable to update network rules", kubernetesCluster.getName()), e); @@ -401,7 +481,7 @@ private void scaleUpKubernetesClusterSize(final long newVmCount) throws CloudRun } } - private void scaleKubernetesClusterSize() throws CloudRuntimeException { + private void scaleKubernetesClusterSize(KubernetesClusterNodeType nodeType) throws CloudRuntimeException { validateKubernetesClusterScaleSizeParameters(); final long originalClusterSize = kubernetesCluster.getNodeCount(); final long newVmRequiredCount = clusterSize - originalClusterSize; @@ -409,7 +489,7 @@ private void scaleKubernetesClusterSize() throws CloudRuntimeException { if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) { stateTransitTo(kubernetesCluster.getId(), newVmRequiredCount > 0 ? KubernetesCluster.Event.ScaleUpRequested : KubernetesCluster.Event.ScaleDownRequested); } - kubernetesCluster = updateKubernetesClusterEntry(null, serviceOffering); + kubernetesCluster = updateKubernetesClusterEntryForNodeType(null, nodeType, serviceOfferingNodeTypeMap.get(nodeType.name()), false, false); return; } Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); @@ -423,7 +503,9 @@ private void scaleKubernetesClusterSize() throws CloudRuntimeException { } else { // upscale, same node count handled above scaleUpKubernetesClusterSize(newVmRequiredCount); } - kubernetesCluster = updateKubernetesClusterEntry(clusterSize, null); + boolean updateNodeOffering = serviceOfferingNodeTypeMap.containsKey(nodeType.name()); + ServiceOffering nodeOffering = serviceOfferingNodeTypeMap.getOrDefault(nodeType.name(), null); + kubernetesCluster = updateKubernetesClusterEntryForNodeType(clusterSize, nodeType, nodeOffering, updateNodeOffering, false); } private boolean isAutoscalingChanged() { @@ -446,37 +528,86 @@ public boolean scaleCluster() throws CloudRuntimeException { } scaleTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterScaleTimeout.value() * 1000; final long originalClusterSize = kubernetesCluster.getNodeCount(); - final ServiceOffering existingServiceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); - if (existingServiceOffering == null) { - logAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, service offering for the Kubernetes cluster not found!", kubernetesCluster)); + if (serviceOfferingNodeTypeMap.containsKey(DEFAULT.name())) { + final ServiceOffering existingServiceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); + if (existingServiceOffering == null) { + logAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster : %s failed, service offering for the Kubernetes cluster not found!", kubernetesCluster.getName())); + } } + final boolean autoscalingChanged = isAutoscalingChanged(); - final boolean serviceOfferingScalingNeeded = serviceOffering != null && serviceOffering.getId() != existingServiceOffering.getId(); + boolean hasDefaultOffering = serviceOfferingNodeTypeMap.containsKey(DEFAULT.name()); + Long existingDefaultOfferingId = kubernetesCluster.getServiceOfferingId(); + ServiceOffering defaultServiceOffering = serviceOfferingNodeTypeMap.getOrDefault(DEFAULT.name(), null); + + for (KubernetesClusterNodeType nodeType : Arrays.asList(CONTROL, ETCD, WORKER)) { + boolean isWorkerNodeOrAllNodes = WORKER == nodeType; + final long newVMRequired = (!isWorkerNodeOrAllNodes || clusterSize == null) ? 0 : clusterSize - originalClusterSize; + if (!hasDefaultOffering && !serviceOfferingNodeTypeMap.containsKey(nodeType.name()) && newVMRequired == 0) { + continue; + } - if (autoscalingChanged) { - boolean autoScaled = autoscaleCluster(this.isAutoscalingEnabled, minSize, maxSize); - if (autoScaled && serviceOfferingScalingNeeded) { - scaleKubernetesClusterOffering(); + boolean serviceOfferingScalingNeeded = isServiceOfferingScalingNeededForNodeType(nodeType, serviceOfferingNodeTypeMap, kubernetesCluster, existingDefaultOfferingId); + ServiceOffering serviceOffering = serviceOfferingNodeTypeMap.getOrDefault(nodeType.name(), defaultServiceOffering); + boolean updateNodeOffering = serviceOfferingNodeTypeMap.containsKey(nodeType.name()); + boolean updateClusterOffering = isWorkerNodeOrAllNodes && hasDefaultOffering; + if (isWorkerNodeOrAllNodes && autoscalingChanged) { + boolean autoScaled = autoscaleCluster(this.isAutoscalingEnabled, minSize, maxSize); + if (autoScaled && serviceOfferingScalingNeeded) { + scaleKubernetesClusterOffering(nodeType, serviceOffering, updateNodeOffering, updateClusterOffering); + } + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); + return autoScaled; } - stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); - return autoScaled; - } - final boolean clusterSizeScalingNeeded = clusterSize != null && clusterSize != originalClusterSize; - final long newVMRequired = clusterSize == null ? 0 : clusterSize - originalClusterSize; - if (serviceOfferingScalingNeeded && clusterSizeScalingNeeded) { - if (newVMRequired > 0) { - scaleKubernetesClusterOffering(); - scaleKubernetesClusterSize(); - } else { - scaleKubernetesClusterSize(); - scaleKubernetesClusterOffering(); + final boolean clusterSizeScalingNeeded = isWorkerNodeOrAllNodes && clusterSize != null && clusterSize != originalClusterSize; + if (serviceOfferingScalingNeeded && clusterSizeScalingNeeded) { + if (newVMRequired > 0) { + scaleKubernetesClusterOffering(nodeType, serviceOffering, updateNodeOffering, updateClusterOffering); + scaleKubernetesClusterSize(nodeType); + } else { + scaleKubernetesClusterSize(nodeType); + scaleKubernetesClusterOffering(nodeType, serviceOffering, updateNodeOffering, updateClusterOffering); + } + } else if (serviceOfferingScalingNeeded) { + scaleKubernetesClusterOffering(nodeType, serviceOffering, updateNodeOffering, updateClusterOffering); + } else if (clusterSizeScalingNeeded) { + scaleKubernetesClusterSize(nodeType); } - } else if (serviceOfferingScalingNeeded) { - scaleKubernetesClusterOffering(); - } else if (clusterSizeScalingNeeded) { - scaleKubernetesClusterSize(); } + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); return true; } + + protected boolean isServiceOfferingScalingNeededForNodeType(KubernetesClusterNodeType nodeType, + Map map, KubernetesCluster kubernetesCluster, + Long existingDefaultOfferingId) { + Long existingOfferingId = map.containsKey(DEFAULT.name()) ? + existingDefaultOfferingId : + getExistingOfferingIdForNodeType(nodeType, kubernetesCluster); + if (existingOfferingId == null) { + logAndThrow(Level.ERROR, String.format("The Kubernetes cluster %s does not have a global service offering set", kubernetesCluster.getName())); + } + ServiceOffering existingOffering = serviceOfferingDao.findById(existingOfferingId); + if (existingOffering == null) { + logAndThrow(Level.ERROR, String.format("Cannot find the global service offering with ID %s set on the Kubernetes cluster %s", existingOfferingId, kubernetesCluster.getName())); + } + ServiceOffering newOffering = map.containsKey(DEFAULT.name()) ? map.get(DEFAULT.name()) : map.get(nodeType.name()); + return newOffering != null && newOffering.getId() != existingOffering.getId(); + } + + protected Long getExistingOfferingIdForNodeType(KubernetesClusterNodeType nodeType, KubernetesCluster kubernetesCluster) { + Long offeringId = null; + if (WORKER == nodeType) { + offeringId = kubernetesCluster.getWorkerServiceOfferingId(); + } else if (CONTROL == nodeType) { + offeringId = kubernetesCluster.getControlServiceOfferingId(); + } else if (ETCD == nodeType) { + offeringId = kubernetesCluster.getEtcdServiceOfferingId(); + } + if (offeringId == null) { + offeringId = kubernetesCluster.getServiceOfferingId(); + } + return offeringId; + } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index a2384a2e0feb..48b0c12ecd2f 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -24,11 +24,20 @@ import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.stream.Collectors; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.kubernetes.cluster.KubernetesServiceHelper; +import com.cloud.network.vpc.NetworkACL; +import com.cloud.storage.VMTemplateVO; +import com.cloud.user.UserDataVO; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.InternalIdentity; import org.apache.cloudstack.framework.ca.Certificate; @@ -75,6 +84,10 @@ import com.cloud.vm.VmDetailConstants; import org.apache.logging.log4j.Level; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.CONTROL; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.ETCD; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.WORKER; + public class KubernetesClusterStartWorker extends KubernetesClusterResourceModifierActionWorker { private KubernetesSupportedVersion kubernetesClusterVersion; @@ -128,9 +141,9 @@ private boolean isKubernetesVersionSupportsHA() { return haSupported; } - private String getKubernetesControlNodeConfig(final String controlNodeIp, final String serverIp, - final String hostName, final boolean haSupported, - final boolean ejectIso) throws IOException { + private Pair getKubernetesControlNodeConfig(final String controlNodeIp, final String serverIp, + final List etcdIps, final String hostName, final boolean haSupported, + final boolean ejectIso, final boolean externalCni) throws IOException { String k8sControlNodeConfig = readResourceFile("/conf/k8s-control-node.yml"); final String apiServerCert = "{{ k8s_control_node.apiserver.crt }}"; final String apiServerKey = "{{ k8s_control_node.apiserver.key }}"; @@ -139,18 +152,33 @@ private String getKubernetesControlNodeConfig(final String controlNodeIp, final final String clusterToken = "{{ k8s_control_node.cluster.token }}"; final String clusterInitArgsKey = "{{ k8s_control_node.cluster.initargs }}"; final String ejectIsoKey = "{{ k8s.eject.iso }}"; + final String installWaitTime = "{{ k8s.install.wait.time }}"; + final String installReattemptsCount = "{{ k8s.install.reattempts.count }}"; + final String externalEtcdNodes = "{{ etcd.unstacked_etcd }}"; + final String etcdEndpointList = "{{ etcd.etcd_endpoint_list }}"; + final String k8sServerIp = "{{ k8s_control.server_ip }}"; + final String k8sApiPort = "{{ k8s.api_server_port }}"; + final String certSans = "{{ k8s_control.server_ips }}"; + final String k8sCertificate = "{{ k8s_control.certificate_key }}"; + final String externalCniPlugin = "{{ k8s.external.cni.plugin }}"; final List addresses = new ArrayList<>(); addresses.add(controlNodeIp); if (!serverIp.equals(controlNodeIp)) { addresses.add(serverIp); } + + boolean externalEtcd = !etcdIps.isEmpty(); final Certificate certificate = caManager.issueCertificate(null, Arrays.asList(hostName, "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local"), addresses, 3650, null); final String tlsClientCert = CertUtils.x509CertificateToPem(certificate.getClientCertificate()); final String tlsPrivateKey = CertUtils.privateKeyToPem(certificate.getPrivateKey()); final String tlsCaCert = CertUtils.x509CertificatesToPem(certificate.getCaCertificates()); + final Long waitTime = KubernetesClusterService.KubernetesControlNodeInstallAttemptWait.value(); + final Long reattempts = KubernetesClusterService.KubernetesControlNodeInstallReattempts.value(); + String endpointList = getEtcdEndpointList(etcdIps); + k8sControlNodeConfig = k8sControlNodeConfig.replace(apiServerCert, tlsClientCert.replace("\n", "\n ")); k8sControlNodeConfig = k8sControlNodeConfig.replace(apiServerKey, tlsPrivateKey.replace("\n", "\n ")); k8sControlNodeConfig = k8sControlNodeConfig.replace(caCert, tlsCaCert.replace("\n", "\n ")); @@ -162,29 +190,39 @@ private String getKubernetesControlNodeConfig(final String controlNodeIp, final pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; } } + k8sControlNodeConfig = k8sControlNodeConfig.replace(installWaitTime, String.valueOf(waitTime)); + k8sControlNodeConfig = k8sControlNodeConfig.replace(installReattemptsCount, String.valueOf(reattempts)); k8sControlNodeConfig = k8sControlNodeConfig.replace(sshPubKey, pubKey); k8sControlNodeConfig = k8sControlNodeConfig.replace(clusterToken, KubernetesClusterUtil.generateClusterToken(kubernetesCluster)); + k8sControlNodeConfig = k8sControlNodeConfig.replace(externalEtcdNodes, String.valueOf(externalEtcd)); String initArgs = ""; if (haSupported) { initArgs = String.format("--control-plane-endpoint %s:%d --upload-certs --certificate-key %s ", - serverIp, + controlNodeIp, CLUSTER_API_PORT, KubernetesClusterUtil.generateClusterHACertificateKey(kubernetesCluster)); } - initArgs += String.format("--apiserver-cert-extra-sans=%s", serverIp); + initArgs += String.format("--apiserver-cert-extra-sans=%s", controlNodeIp); initArgs += String.format(" --kubernetes-version=%s", getKubernetesClusterVersion().getSemanticVersion()); k8sControlNodeConfig = k8sControlNodeConfig.replace(clusterInitArgsKey, initArgs); k8sControlNodeConfig = k8sControlNodeConfig.replace(ejectIsoKey, String.valueOf(ejectIso)); + k8sControlNodeConfig = k8sControlNodeConfig.replace(etcdEndpointList, endpointList); + k8sControlNodeConfig = k8sControlNodeConfig.replace(k8sServerIp, controlNodeIp); + k8sControlNodeConfig = k8sControlNodeConfig.replace(k8sApiPort, String.valueOf(CLUSTER_API_PORT)); + k8sControlNodeConfig = k8sControlNodeConfig.replace(certSans, String.format("- %s", serverIp)); + k8sControlNodeConfig = k8sControlNodeConfig.replace(k8sCertificate, KubernetesClusterUtil.generateClusterHACertificateKey(kubernetesCluster)); + k8sControlNodeConfig = k8sControlNodeConfig.replace(externalCniPlugin, String.valueOf(externalCni)); + k8sControlNodeConfig = updateKubeConfigWithRegistryDetails(k8sControlNodeConfig); - return k8sControlNodeConfig; + return new Pair<>(k8sControlNodeConfig, controlNodeIp); } - private UserVm createKubernetesControlNode(final Network network, String serverIp) throws ManagementServerException, + private Pair createKubernetesControlNode(final Network network, String serverIp, List etcdIps, Long domainId, Long accountId, Long asNumber) throws ManagementServerException, ResourceUnavailableException, InsufficientCapacityException { UserVm controlVm = null; DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); - ServiceOffering serviceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); + ServiceOffering serviceOffering = getServiceOfferingForNodeTypeOnCluster(CONTROL, kubernetesCluster); List networkIds = new ArrayList(); networkIds.add(kubernetesCluster.getNetworkId()); Pair> ipAddresses = getKubernetesControlNodeIpAddresses(zone, network, owner); @@ -205,36 +243,61 @@ private UserVm createKubernetesControlNode(final Network network, String serverI String suffix = Long.toHexString(System.currentTimeMillis()); String hostName = String.format("%s-control-%s", kubernetesClusterNodeNamePrefix, suffix); boolean haSupported = isKubernetesVersionSupportsHA(); - String k8sControlNodeConfig = null; + Long userDataId = kubernetesCluster.getCniConfigId(); + Pair k8sControlNodeConfigAndControlIp = new Pair<>(null, null); try { - k8sControlNodeConfig = getKubernetesControlNodeConfig(controlNodeIp, serverIp, hostName, haSupported, Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType())); + k8sControlNodeConfigAndControlIp = getKubernetesControlNodeConfig(controlNodeIp, serverIp, etcdIps, hostName, haSupported, Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType()), Objects.nonNull(userDataId)); } catch (IOException e) { logAndThrow(Level.ERROR, "Failed to read Kubernetes control node configuration file", e); } + String k8sControlNodeConfig = k8sControlNodeConfigAndControlIp.first(); String base64UserData = Base64.encodeBase64String(k8sControlNodeConfig.getBytes(com.cloud.utils.StringUtils.getPreferredCharset())); + if (Objects.nonNull(userDataId)) { + logger.info("concatenating userdata"); + UserDataVO cniConfigVo = userDataDao.findById(userDataId); + String cniConfig = new String(Base64.decodeBase64(cniConfigVo.getUserData())); + if (Objects.nonNull(asNumber)) { + cniConfig = substituteASNumber(cniConfig, asNumber); + } + cniConfig = Base64.encodeBase64String(cniConfig.getBytes(com.cloud.utils.StringUtils.getPreferredCharset())); + base64UserData = userDataManager.concatenateUserData(base64UserData, cniConfig, null); + } + List keypairs = new ArrayList(); if (StringUtils.isNotBlank(kubernetesCluster.getKeyPair())) { keypairs.add(kubernetesCluster.getKeyPair()); } + + Long affinityGroupId = getExplicitAffinityGroup(domainId, accountId); + String userDataDetails = kubernetesCluster.getCniConfigDetails(); if (kubernetesCluster.getSecurityGroupId() != null && networkModel.checkSecurityGroupSupportForNetwork(owner, zone, networkIds, List.of(kubernetesCluster.getSecurityGroupId()))) { List securityGroupIds = new ArrayList<>(); securityGroupIds.add(kubernetesCluster.getSecurityGroupId()); - controlVm = userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, securityGroupIds, owner, - hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST,base64UserData, null, null, keypairs, - requestedIps, addrs, null, null, null, customParameterMap, null, null, null, + controlVm = userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, controlNodeTemplate, networkIds, securityGroupIds, owner, + hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST,base64UserData, userDataId, userDataDetails, keypairs, + requestedIps, addrs, null, null, Objects.nonNull(affinityGroupId) ? + Collections.singletonList(affinityGroupId) : null, customParameterMap, null, null, null, null, true, null, UserVmManager.CKS_NODE); } else { - controlVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner, + controlVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, controlNodeTemplate, networkIds, owner, hostName, hostName, null, null, null, - Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, null, null, keypairs, - requestedIps, addrs, null, null, null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null); + Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, userDataId, userDataDetails, keypairs, + requestedIps, addrs, null, null, Objects.nonNull(affinityGroupId) ? + Collections.singletonList(affinityGroupId) : null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null); } if (logger.isInfoEnabled()) { logger.info("Created control VM: {}, {} in the Kubernetes cluster: {}", controlVm, hostName, kubernetesCluster); } - return controlVm; + return new Pair<>(controlVm, k8sControlNodeConfigAndControlIp.second()); + } + + private String substituteASNumber(String cniConfig, Long asNumber) { + final String asNumberKey = "{{ AS_NUMBER }}"; + cniConfig = cniConfig.replace(asNumberKey, String.valueOf(asNumber)); + return cniConfig; + } private String getKubernetesAdditionalControlNodeConfig(final String joinIp, final boolean ejectIso) throws IOException { @@ -244,6 +307,11 @@ private String getKubernetesAdditionalControlNodeConfig(final String joinIp, fin final String sshPubKey = "{{ k8s.ssh.pub.key }}"; final String clusterHACertificateKey = "{{ k8s_control_node.cluster.ha.certificate.key }}"; final String ejectIsoKey = "{{ k8s.eject.iso }}"; + final String installWaitTime = "{{ k8s.install.wait.time }}"; + final String installReattemptsCount = "{{ k8s.install.reattempts.count }}"; + + final Long waitTime = KubernetesClusterService.KubernetesControlNodeInstallAttemptWait.value(); + final Long reattempts = KubernetesClusterService.KubernetesControlNodeInstallReattempts.value(); String pubKey = "- \"" + configurationDao.getValue("ssh.publickey") + "\""; String sshKeyPair = kubernetesCluster.getKeyPair(); @@ -253,6 +321,8 @@ private String getKubernetesAdditionalControlNodeConfig(final String joinIp, fin pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; } } + k8sControlNodeConfig = k8sControlNodeConfig.replace(installWaitTime, String.valueOf(waitTime)); + k8sControlNodeConfig = k8sControlNodeConfig.replace(installReattemptsCount, String.valueOf(reattempts)); k8sControlNodeConfig = k8sControlNodeConfig.replace(sshPubKey, pubKey); k8sControlNodeConfig = k8sControlNodeConfig.replace(joinIpKey, joinIp); k8sControlNodeConfig = k8sControlNodeConfig.replace(clusterTokenKey, KubernetesClusterUtil.generateClusterToken(kubernetesCluster)); @@ -263,11 +333,84 @@ private String getKubernetesAdditionalControlNodeConfig(final String joinIp, fin return k8sControlNodeConfig; } - private UserVm createKubernetesAdditionalControlNode(final String joinIp, final int additionalControlNodeInstance) throws ManagementServerException, + private String getInitialEtcdClusterDetails(List ipAddresses, List hostnames) { + String initialCluster = "%s=http://%s:2380"; + StringBuilder clusterInfo = new StringBuilder(); + for (int i = 0; i < ipAddresses.size(); i++) { + clusterInfo.append(String.format(initialCluster, hostnames.get(i), ipAddresses.get(i))); + if (i < ipAddresses.size()-1) { + clusterInfo.append(","); + } + } + return clusterInfo.toString(); + } + + /** + * + * @param ipAddresses list of etcd node guest IPs + * @return a formatted list of etcd endpoints adhering to YAML syntax + */ + private String getEtcdEndpointList(List ipAddresses) { + StringBuilder endpoints = new StringBuilder(); + for (int i = 0; i < ipAddresses.size(); i++) { + endpoints.append(String.format("- http://%s:2379", ipAddresses.get(i).getIp4Address())); + if (i < ipAddresses.size()-1) { + endpoints.append("\n "); + } + } + return endpoints.toString(); + } + + + private List getEtcdNodeHostnames() { + List hostnames = new ArrayList<>(); + for (int etcdNodeIndex = 1; etcdNodeIndex <= kubernetesCluster.getEtcdNodeCount(); etcdNodeIndex++) { + String suffix = Long.toHexString(System.currentTimeMillis()); + hostnames.add(String.format("%s-%s-%s", getEtcdNodeNameForCluster(), etcdNodeIndex, suffix)); + } + return hostnames; + } + + private String getEtcdNodeConfig(final List ipAddresses, final List hostnames, final int etcdNodeIndex, + final boolean ejectIso) throws IOException { + String k8sEtcdNodeConfig = readResourceFile("/conf/etcd-node.yml"); + final String sshPubKey = "{{ k8s.ssh.pub.key }}"; + final String ejectIsoKey = "{{ k8s.eject.iso }}"; + final String installWaitTime = "{{ k8s.install.wait.time }}"; + final String installReattemptsCount = "{{ k8s.install.reattempts.count }}"; + final String etcdNodeName = "{{ etcd.node_name }}"; + final String etcdNodeIp = "{{ etcd.node_ip }}"; + final String etcdInitialClusterNodes = "{{ etcd.initial_cluster_nodes }}"; + + final Long waitTime = KubernetesClusterService.KubernetesControlNodeInstallAttemptWait.value(); + final Long reattempts = KubernetesClusterService.KubernetesControlNodeInstallReattempts.value(); + String pubKey = "- \"" + configurationDao.getValue("ssh.publickey") + "\""; + String sshKeyPair = kubernetesCluster.getKeyPair(); + if (StringUtils.isNotEmpty(sshKeyPair)) { + SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair); + if (sshkp != null) { + pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; + } + } + String initialClusterDetails = getInitialEtcdClusterDetails(ipAddresses, hostnames); + + k8sEtcdNodeConfig = k8sEtcdNodeConfig.replace(installWaitTime, String.valueOf(waitTime)); + k8sEtcdNodeConfig = k8sEtcdNodeConfig.replace(installReattemptsCount, String.valueOf(reattempts)); + k8sEtcdNodeConfig = k8sEtcdNodeConfig.replace(sshPubKey, pubKey); + k8sEtcdNodeConfig = k8sEtcdNodeConfig.replace(ejectIsoKey, String.valueOf(ejectIso)); + k8sEtcdNodeConfig = k8sEtcdNodeConfig.replace(etcdNodeName, hostnames.get(etcdNodeIndex)); + k8sEtcdNodeConfig = k8sEtcdNodeConfig.replace(etcdNodeIp, ipAddresses.get(etcdNodeIndex)); + k8sEtcdNodeConfig = k8sEtcdNodeConfig.replace(etcdInitialClusterNodes, initialClusterDetails); + + return k8sEtcdNodeConfig; + } + + private UserVm createKubernetesAdditionalControlNode(final String joinIp, final int additionalControlNodeInstance, + final Long domainId, final Long accountId) throws ManagementServerException, ResourceUnavailableException, InsufficientCapacityException { UserVm additionalControlVm = null; DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); - ServiceOffering serviceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); + ServiceOffering serviceOffering = getServiceOfferingForNodeTypeOnCluster(CONTROL, kubernetesCluster); List networkIds = new ArrayList(); networkIds.add(kubernetesCluster.getNetworkId()); Network.IpAddresses addrs = new Network.IpAddresses(null, null); @@ -293,20 +436,24 @@ private UserVm createKubernetesAdditionalControlNode(final String joinIp, final if (StringUtils.isNotBlank(kubernetesCluster.getKeyPair())) { keypairs.add(kubernetesCluster.getKeyPair()); } + + Long affinityGroupId = getExplicitAffinityGroup(domainId, accountId); if (kubernetesCluster.getSecurityGroupId() != null && networkModel.checkSecurityGroupSupportForNetwork(owner, zone, networkIds, List.of(kubernetesCluster.getSecurityGroupId()))) { List securityGroupIds = new ArrayList<>(); securityGroupIds.add(kubernetesCluster.getSecurityGroupId()); - additionalControlVm = userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, securityGroupIds, owner, + additionalControlVm = userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, controlNodeTemplate, networkIds, securityGroupIds, owner, hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST,base64UserData, null, null, keypairs, - null, addrs, null, null, null, customParameterMap, null, null, null, + null, addrs, null, null, Objects.nonNull(affinityGroupId) ? + Collections.singletonList(affinityGroupId) : null, customParameterMap, null, null, null, null, true, null, UserVmManager.CKS_NODE); } else { - additionalControlVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner, + additionalControlVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, controlNodeTemplate, networkIds, owner, hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, null, null, keypairs, - null, addrs, null, null, null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null); + null, addrs, null, null, Objects.nonNull(affinityGroupId) ? + Collections.singletonList(affinityGroupId) : null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null); } if (logger.isInfoEnabled()) { @@ -315,15 +462,62 @@ private UserVm createKubernetesAdditionalControlNode(final String joinIp, final return additionalControlVm; } - private UserVm provisionKubernetesClusterControlVm(final Network network, final String publicIpAddress) throws + private UserVm createEtcdNode(List requestedIps, List etcdNodeHostnames, int etcdNodeIndex, Long domainId, Long accountId) throws ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException { + UserVm etcdNode = null; + DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); + ServiceOffering serviceOffering = getServiceOfferingForNodeTypeOnCluster(ETCD, kubernetesCluster); + List networkIds = Collections.singletonList(kubernetesCluster.getNetworkId()); + Network.IpAddresses addrs = new Network.IpAddresses(null, null); + List guestIps = requestedIps.stream().map(Network.IpAddresses::getIp4Address).collect(Collectors.toList()); + String k8sControlNodeConfig = null; + try { + k8sControlNodeConfig = getEtcdNodeConfig(guestIps, etcdNodeHostnames, etcdNodeIndex, Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType())); + } catch (IOException e) { + logAndThrow(Level.ERROR, "Failed to read Kubernetes control configuration file", e); + } + + String base64UserData = Base64.encodeBase64String(k8sControlNodeConfig.getBytes(com.cloud.utils.StringUtils.getPreferredCharset())); + List keypairs = new ArrayList(); + if (StringUtils.isNotBlank(kubernetesCluster.getKeyPair())) { + keypairs.add(kubernetesCluster.getKeyPair()); + } + Long affinityGroupId = getExplicitAffinityGroup(domainId, accountId); + String hostName = etcdNodeHostnames.get(etcdNodeIndex); + Map customParameterMap = new HashMap(); + if (zone.isSecurityGroupEnabled()) { + List securityGroupIds = new ArrayList<>(); + securityGroupIds.add(kubernetesCluster.getSecurityGroupId()); + etcdNode = userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, etcdTemplate, networkIds, securityGroupIds, owner, + hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST,base64UserData, null, null, keypairs, + Map.of(kubernetesCluster.getNetworkId(), requestedIps.get(etcdNodeIndex)), addrs, null, null, Objects.nonNull(affinityGroupId) ? + Collections.singletonList(affinityGroupId) : null, customParameterMap, null, null, null, + null, true, null, null); + } else { + etcdNode = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, etcdTemplate, networkIds, owner, + hostName, hostName, null, null, null, + Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, null, null, keypairs, + Map.of(kubernetesCluster.getNetworkId(), requestedIps.get(etcdNodeIndex)), addrs, null, null, Objects.nonNull(affinityGroupId) ? + Collections.singletonList(affinityGroupId) : null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null); + } + + if (logger.isInfoEnabled()) { + logger.info(String.format("Created control VM ID : %s, %s in the Kubernetes cluster : %s", etcdNode.getUuid(), hostName, kubernetesCluster.getName())); + } + return etcdNode; + } + + private Pair provisionKubernetesClusterControlVm(final Network network, final String publicIpAddress, final List etcdIps, + final Long domainId, final Long accountId, Long asNumber) throws ManagementServerException, InsufficientCapacityException, ResourceUnavailableException { UserVm k8sControlVM = null; - k8sControlVM = createKubernetesControlNode(network, publicIpAddress); - addKubernetesClusterVm(kubernetesCluster.getId(), k8sControlVM.getId(), true); + Pair k8sControlVMAndControlIP; + k8sControlVMAndControlIP = createKubernetesControlNode(network, publicIpAddress, etcdIps, domainId, accountId, asNumber); + k8sControlVM = k8sControlVMAndControlIP.first(); + addKubernetesClusterVm(kubernetesCluster.getId(), k8sControlVM.getId(), true, false, false, false); if (kubernetesCluster.getNodeRootDiskSize() > 0) { resizeNodeVolume(k8sControlVM); } - startKubernetesVM(k8sControlVM); + startKubernetesVM(k8sControlVM, domainId, accountId, CONTROL); k8sControlVM = userVmDao.findById(k8sControlVM.getId()); if (k8sControlVM == null) { throw new ManagementServerException(String.format("Failed to provision control VM for Kubernetes cluster : %s" , kubernetesCluster.getName())); @@ -331,21 +525,22 @@ private UserVm provisionKubernetesClusterControlVm(final Network network, final if (logger.isInfoEnabled()) { logger.info("Provisioned the control VM: {} in to the Kubernetes cluster: {}", k8sControlVM, kubernetesCluster); } - return k8sControlVM; + return new Pair<>(k8sControlVM, k8sControlVMAndControlIP.second()); } - private List provisionKubernetesClusterAdditionalControlVms(final String publicIpAddress) throws + private List provisionKubernetesClusterAdditionalControlVms(final String controlIpAddress, final Long domainId, + final Long accountId) throws InsufficientCapacityException, ManagementServerException, ResourceUnavailableException { List additionalControlVms = new ArrayList<>(); if (kubernetesCluster.getControlNodeCount() > 1) { for (int i = 1; i < kubernetesCluster.getControlNodeCount(); i++) { UserVm vm = null; - vm = createKubernetesAdditionalControlNode(publicIpAddress, i); - addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId(), true); + vm = createKubernetesAdditionalControlNode(controlIpAddress, i, domainId, accountId); + addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId(), true, false, false, false); if (kubernetesCluster.getNodeRootDiskSize() > 0) { resizeNodeVolume(vm); } - startKubernetesVM(vm); + startKubernetesVM(vm, domainId, accountId, CONTROL); vm = userVmDao.findById(vm.getId()); if (vm == null) { throw new ManagementServerException(String.format("Failed to provision additional control VM for Kubernetes cluster : %s" , kubernetesCluster.getName())); @@ -359,6 +554,35 @@ private List provisionKubernetesClusterAdditionalControlVms(final String return additionalControlVms; } + private Pair, List> provisionEtcdCluster(final Network network, final Long domainId, final Long accountId) + throws InsufficientCapacityException, ResourceUnavailableException, ManagementServerException { + List etcdNodeVms = new ArrayList<>(); + List etcdNodeGuestIps = getEtcdNodeGuestIps(network, kubernetesCluster.getEtcdNodeCount()); + List etcdHostnames = getEtcdNodeHostnames(); + for (int i = 0; i < kubernetesCluster.getEtcdNodeCount(); i++) { + UserVm vm = createEtcdNode(etcdNodeGuestIps, etcdHostnames, i, domainId, accountId); + addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId(), false, false, true, true); + startKubernetesVM(vm, domainId, accountId, ETCD); + vm = userVmDao.findById(vm.getId()); + if (vm == null) { + throw new ManagementServerException(String.format("Failed to provision additional control VM for Kubernetes cluster : %s" , kubernetesCluster.getName())); + } + etcdNodeVms.add(vm); + if (logger.isInfoEnabled()) { + logger.info(String.format("Provisioned additional control VM : %s in to the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName())); + } + } + return new Pair<>(etcdNodeVms, etcdNodeGuestIps); + } + + private List getEtcdNodeGuestIps(final Network network, final long etcdNodeCount) { + List guestIps = new ArrayList<>(); + for (int i = 1; i <= etcdNodeCount; i++) { + guestIps.add(new Network.IpAddresses(ipAddressManager.acquireGuestIpAddress(network, null), null)); + } + return guestIps; + } + private Network startKubernetesClusterNetwork(final DeployDestination destination) throws ManagementServerException { final ReservationContext context = new ReservationContextImpl(null, null, null, owner); Network network = networkDao.findById(kubernetesCluster.getNetworkId()); @@ -406,7 +630,40 @@ protected void setupKubernetesClusterNetworkRules(Network network, List setupKubernetesClusterIsolatedNetworkRules(publicIp, network, clusterVMIds, true); } - private void startKubernetesClusterVMs() { + protected void setupKubernetesEtcdNetworkRules(List etcdVms, Network network) throws ManagementServerException, ResourceUnavailableException { + if (!Network.GuestType.Isolated.equals(network.getGuestType())) { + if (logger.isDebugEnabled()) { + logger.debug(String.format("Network : %s for Kubernetes cluster : %s is not an isolated network, therefore, no need for network rules", network.getName(), kubernetesCluster.getName())); + } + } + List etcdVmIds = etcdVms.stream().map(UserVm::getId).collect(Collectors.toList()); + Integer startPort = KubernetesClusterService.KubernetesEtcdNodeStartPort.value(); + IpAddress publicIp = ipAddressDao.findByIpAndDcId(kubernetesCluster.getZoneId(), publicIpAddress); + for (int i = 0; i < etcdVmIds.size(); i++) { + int etcdStartPort = startPort + i; + try { + if (Objects.isNull(network.getVpcId())) { + provisionFirewallRules(publicIp, owner, etcdStartPort, etcdStartPort); + } else if (network.getNetworkACLId() != NetworkACL.DEFAULT_ALLOW) { + try { + provisionVpcTierAllowPortACLRule(network, ETCD_NODE_CLIENT_REQUEST_PORT, ETCD_NODE_CLIENT_REQUEST_PORT); + if (logger.isInfoEnabled()) { + logger.info(String.format("Provisioned ACL rule to open up port %d on %s for etcd nodes for Kubernetes cluster %s", + ETCD_NODE_CLIENT_REQUEST_PORT, publicIpAddress, kubernetesCluster.getName())); + } + } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | InvalidParameterValueException | PermissionDeniedException e) { + throw new ManagementServerException(String.format("Failed to provision ACL rules for etcd client access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + } + } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | + NetworkRuleConflictException e) { + throw new ManagementServerException(String.format("Failed to provision firewall rules for etcd nodes for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + provisionPublicIpPortForwardingRule(publicIp, network, owner, etcdVmIds.get(i), etcdStartPort, DEFAULT_SSH_PORT); + } + } + + private void startKubernetesClusterVMs(Long domainId, Long accountId) { List clusterVms = getKubernetesClusterVMs(); for (final UserVm vm : clusterVms) { if (vm == null) { @@ -414,7 +671,9 @@ private void startKubernetesClusterVMs() { } try { resizeNodeVolume(vm); - startKubernetesVM(vm); + KubernetesClusterVmMapVO map = kubernetesClusterVmMapDao.findByVmId(vm.getId()); + KubernetesServiceHelper.KubernetesClusterNodeType nodeType = getNodeTypeFromClusterVMMapRecord(map); + startKubernetesVM(vm, domainId, accountId, nodeType); } catch (ManagementServerException ex) { logger.warn("Failed to start VM: {} in Kubernetes cluster: {} due to {}", vm, kubernetesCluster, ex); // don't bail out here. proceed further to stop the reset of the VM's @@ -428,6 +687,16 @@ private void startKubernetesClusterVMs() { } } + private KubernetesServiceHelper.KubernetesClusterNodeType getNodeTypeFromClusterVMMapRecord(KubernetesClusterVmMapVO map) { + if (map.isControlNode()) { + return CONTROL; + } else if (map.isEtcdNode()) { + return ETCD; + } else { + return WORKER; + } + } + private boolean isKubernetesClusterKubeConfigAvailable(final long timeoutTime) { if (StringUtils.isEmpty(publicIpAddress)) { KubernetesClusterDetailsVO kubeConfigDetail = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), "kubeConfigData"); @@ -468,7 +737,7 @@ private void updateKubernetesClusterEntryEndpoint() { kubernetesClusterDao.update(kubernetesCluster.getId(), kubernetesClusterVO); } - public boolean startKubernetesClusterOnCreate() { + public boolean startKubernetesClusterOnCreate(Long domainId, Long accountId, Long asNumber) throws ManagementServerException, ResourceUnavailableException, InsufficientCapacityException { init(); if (logger.isInfoEnabled()) { logger.info("Starting Kubernetes cluster: {}", kubernetesCluster); @@ -477,7 +746,9 @@ public boolean startKubernetesClusterOnCreate() { stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StartRequested); DeployDestination dest = null; try { - dest = plan(); + VMTemplateVO clusterTemplate = templateDao.findById(kubernetesCluster.getTemplateId()); + Map destinationMap = planKubernetesCluster(domainId, accountId, clusterTemplate.getHypervisorType()); + dest = destinationMap.get(WORKER.name()); } catch (InsufficientCapacityException e) { logTransitStateAndThrow(Level.ERROR, String.format("Provisioning the cluster failed due to insufficient capacity in the Kubernetes cluster: %s", kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); } @@ -499,16 +770,28 @@ public boolean startKubernetesClusterOnCreate() { logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster : %s as no public IP found for the cluster" , kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed); } // Allow account creating the kubernetes cluster to access systemVM template - LaunchPermissionVO launchPermission = new LaunchPermissionVO(clusterTemplate.getId(), owner.getId()); - launchPermissionDao.persist(launchPermission); + if (isDefaultTemplateUsed()) { + LaunchPermissionVO launchPermission = new LaunchPermissionVO(kubernetesCluster.getTemplateId(), owner.getId()); + launchPermissionDao.persist(launchPermission); + } + + List etcdVms = new ArrayList<>(); + List etcdGuestNodeIps = new ArrayList<>(); + if (kubernetesCluster.getEtcdNodeCount() > 0) { + Pair, List> etcdNodesAndIps = provisionEtcdCluster(network, domainId, accountId); + etcdVms = etcdNodesAndIps.first(); + etcdGuestNodeIps = etcdNodesAndIps.second(); + } List clusterVMs = new ArrayList<>(); + Pair k8sControlVMAndIp = new Pair<>(null, null); UserVm k8sControlVM = null; try { - k8sControlVM = provisionKubernetesClusterControlVm(network, publicIpAddress); + k8sControlVMAndIp = provisionKubernetesClusterControlVm(network, publicIpAddress, etcdGuestNodeIps, domainId, accountId, asNumber); } catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) { logTransitStateAndThrow(Level.ERROR, String.format("Provisioning the control VM failed in the Kubernetes cluster : %s", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); } + k8sControlVM = k8sControlVMAndIp.first(); clusterVMs.add(k8sControlVM); if (StringUtils.isEmpty(publicIpAddress)) { publicIpSshPort = getKubernetesClusterServerIpSshPort(k8sControlVM); @@ -518,13 +801,13 @@ public boolean startKubernetesClusterOnCreate() { } } try { - List additionalControlVMs = provisionKubernetesClusterAdditionalControlVms(publicIpAddress); + List additionalControlVMs = provisionKubernetesClusterAdditionalControlVms(k8sControlVMAndIp.second(), domainId, accountId); clusterVMs.addAll(additionalControlVMs); } catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) { logTransitStateAndThrow(Level.ERROR, String.format("Provisioning additional control VM failed in the Kubernetes cluster : %s", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); } try { - List nodeVMs = provisionKubernetesClusterNodeVms(kubernetesCluster.getNodeCount(), publicIpAddress); + List nodeVMs = provisionKubernetesClusterNodeVms(kubernetesCluster.getNodeCount(), k8sControlVMAndIp.second(), domainId, accountId); clusterVMs.addAll(nodeVMs); } catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) { logTransitStateAndThrow(Level.ERROR, String.format("Provisioning node VM failed in the Kubernetes cluster : %s", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); @@ -537,6 +820,12 @@ public boolean startKubernetesClusterOnCreate() { } catch (ManagementServerException e) { logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster : %s, unable to setup network rules", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); } + try { + setupKubernetesEtcdNetworkRules(etcdVms, network); + } catch (ManagementServerException e) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster : %s, unable to setup network rules for etcd nodes", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); + } + attachIsoKubernetesVMs(etcdVms); attachIsoKubernetesVMs(clusterVMs); if (!KubernetesClusterUtil.isKubernetesClusterControlVmRunning(kubernetesCluster, publicIpAddress, publicIpSshPort.second(), startTimeoutTime)) { String msg = String.format("Failed to setup Kubernetes cluster : %s is not in usable state as the system is unable to access control node VMs of the cluster", kubernetesCluster.getName()); @@ -574,14 +863,16 @@ public boolean startKubernetesClusterOnCreate() { return true; } - public boolean startStoppedKubernetesCluster() throws CloudRuntimeException { + + + public boolean startStoppedKubernetesCluster(Long domainId, Long accountId) throws CloudRuntimeException { init(); if (logger.isInfoEnabled()) { logger.info("Starting Kubernetes cluster: {}", kubernetesCluster); } final long startTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterStartTimeout.value() * 1000; stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StartRequested); - startKubernetesClusterVMs(); + startKubernetesClusterVMs(domainId, accountId); try { InetAddress address = InetAddress.getByName(new URL(kubernetesCluster.getEndpoint()).getHost()); } catch (MalformedURLException | UnknownHostException ex) { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java index ab3121f207b7..4c2725fc2a2f 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java @@ -20,7 +20,10 @@ import java.io.File; import java.util.ArrayList; import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; +import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.Level; @@ -40,7 +43,7 @@ public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorker { - private List clusterVMs = new ArrayList<>(); + protected List clusterVMs = new ArrayList<>(); private KubernetesSupportedVersion upgradeVersion; private final String upgradeScriptFilename = "upgrade-kubernetes.sh"; private File upgradeScriptFile; @@ -65,12 +68,12 @@ private Pair runInstallScriptOnVM(final UserVm vm, final int in String nodeAddress = (index > 0 && sshPort == 22) ? vm.getPrivateIpAddress() : publicIpAddress; SshHelper.scpTo(nodeAddress, nodeSshPort, getControlNodeLoginUser(), sshKeyFile, null, "~/", upgradeScriptFile.getAbsolutePath(), "0755"); - String cmdStr = String.format("sudo ./%s %s %s %s %s", + String cmdStr = String.format("sudo ./%s %s %s %s %s %s", upgradeScriptFile.getName(), upgradeVersion.getSemanticVersion(), index == 0 ? "true" : "false", KubernetesVersionManagerImpl.compareSemanticVersions(upgradeVersion.getSemanticVersion(), "1.15.0") < 0 ? "true" : "false", - Hypervisor.HypervisorType.VMware.equals(vm.getHypervisorType())); + Hypervisor.HypervisorType.VMware.equals(vm.getHypervisorType()), Objects.isNull(kubernetesCluster.getCniConfigId())); return SshHelper.sshExecute(nodeAddress, nodeSshPort, getControlNodeLoginUser(), sshKeyFile, null, cmdStr, 10000, 10000, 10 * 60 * 1000); @@ -144,7 +147,7 @@ private void upgradeKubernetesClusterNodes() { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to get control Kubernetes node on VM : %s in ready state", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); } } - if (!KubernetesClusterUtil.clusterNodeVersionMatches(upgradeVersion.getSemanticVersion(), publicIpAddress, sshPort, getControlNodeLoginUser(), getManagementServerSshPublicKeyFile(), hostName, upgradeTimeoutTime, 15000)) { + if (!KubernetesClusterUtil.clusterNodeVersionMatches(upgradeVersion.getSemanticVersion(), publicIpAddress, sshPort, getControlNodeLoginUser(), getManagementServerSshPublicKeyFile(), hostName, upgradeTimeoutTime, 15000, vm.getId(), kubernetesClusterVmMapDao)) { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to get Kubernetes node on VM : %s upgraded to version %s", kubernetesCluster.getName(), vm.getDisplayName(), upgradeVersion.getSemanticVersion()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); } if (logger.isInfoEnabled()) { @@ -169,6 +172,7 @@ public boolean upgradeCluster() throws CloudRuntimeException { if (CollectionUtils.isEmpty(clusterVMs)) { logAndThrow(Level.ERROR, String.format("Upgrade failed for Kubernetes cluster: %s, unable to retrieve VMs for cluster", kubernetesCluster)); } + filterOutManualUpgradeNodesFromClusterUpgrade(); retrieveScriptFiles(); stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.UpgradeRequested); attachIsoKubernetesVMs(clusterVMs, upgradeVersion); @@ -184,4 +188,14 @@ public boolean upgradeCluster() throws CloudRuntimeException { } return updated; } + + protected void filterOutManualUpgradeNodesFromClusterUpgrade() { + if (CollectionUtils.isEmpty(clusterVMs)) { + return; + } + clusterVMs = clusterVMs.stream().filter(x -> { + KubernetesClusterVmMapVO mapVO = kubernetesClusterVmMapDao.getClusterMapFromVmId(x.getId()); + return mapVO != null && !mapVO.isManualUpgrade(); + }).collect(Collectors.toList()); + } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDao.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDao.java index eaeccd09f801..71b90ef2e6a0 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDao.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDao.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.kubernetes.cluster.dao; +import com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType; import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO; import com.cloud.utils.db.GenericDao; @@ -31,5 +32,7 @@ public interface KubernetesClusterVmMapDao extends GenericDao listByClusterIdAndVmType(long clusterId, KubernetesClusterNodeType nodeType); + KubernetesClusterVmMapVO findByVmId(long vmId); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java index 5e465848e1c1..9607783b50e4 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java @@ -18,6 +18,7 @@ import java.util.List; +import com.cloud.kubernetes.cluster.KubernetesServiceHelper; import org.springframework.stereotype.Component; import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO; @@ -26,6 +27,9 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.CONTROL; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.ETCD; + @Component public class KubernetesClusterVmMapDaoImpl extends GenericDaoBase implements KubernetesClusterVmMapDao { @@ -37,6 +41,8 @@ public KubernetesClusterVmMapDaoImpl() { clusterIdSearch = createSearchBuilder(); clusterIdSearch.and("clusterId", clusterIdSearch.entity().getClusterId(), SearchCriteria.Op.EQ); clusterIdSearch.and("vmIdsIN", clusterIdSearch.entity().getVmId(), SearchCriteria.Op.IN); + clusterIdSearch.and("controlNode", clusterIdSearch.entity().isControlNode(), SearchCriteria.Op.EQ); + clusterIdSearch.and("etcdNode", clusterIdSearch.entity().isEtcdNode(), SearchCriteria.Op.EQ); clusterIdSearch.done(); vmIdSearch = createSearchBuilder(); @@ -82,6 +88,23 @@ public int removeByClusterId(long clusterId) { return remove(sc); } + @Override + public List listByClusterIdAndVmType(long clusterId, KubernetesServiceHelper.KubernetesClusterNodeType nodeType) { + SearchCriteria sc = clusterIdSearch.create(); + sc.setParameters("clusterId", clusterId); + if (CONTROL == nodeType) { + sc.setParameters("controlNode", true); + sc.setParameters("etcdNode", false); + } else if (ETCD == nodeType) { + sc.setParameters("controlNode", false); + sc.setParameters("etcdNode", true); + } else { + sc.setParameters("controlNode", false); + sc.setParameters("etcdNode", false); + } + return listBy(sc); + } + @Override public KubernetesClusterVmMapVO findByVmId(long vmId) { SearchBuilder sb = createSearchBuilder(); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java index 74e8b0c9b23e..2c991c944586 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java @@ -31,6 +31,8 @@ import javax.net.ssl.SSLContext; import javax.net.ssl.TrustManager; +import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO; +import com.cloud.kubernetes.cluster.dao.KubernetesClusterVmMapDao; import org.apache.cloudstack.utils.security.SSLUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.Logger; @@ -215,10 +217,10 @@ public static int getKubernetesClusterReadyNodesCount(final KubernetesCluster ku final int port, final String user, final File sshKeyFile) throws Exception { Pair result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null, - "sudo /opt/bin/kubectl get nodes | awk '{if ($2 == \"Ready\") print $1}' | wc -l", + "sudo /opt/bin/kubectl get nodes | grep -w 'Ready' | wc -l", 10000, 10000, 20000); - if (result.first()) { - return Integer.parseInt(result.second().trim().replace("\"", "")); + if (Boolean.TRUE.equals(result.first())) { + return Integer.parseInt(result.second().trim().replace("\"", "")) + kubernetesCluster.getEtcdNodeCount().intValue(); } else { if (LOGGER.isDebugEnabled()) { LOGGER.debug(String.format("Failed to retrieve ready nodes for Kubernetes cluster %s. Output: %s", kubernetesCluster, result.second())); @@ -331,7 +333,7 @@ public static boolean clusterNodeVersionMatches(final String version, final String ipAddress, final int port, final String user, final File sshKeyFile, final String hostName, - final long timeoutTime, final long waitDuration) { + final long timeoutTime, final long waitDuration, final long vmId, KubernetesClusterVmMapDao vmMapDao) { int retry = 10; while (System.currentTimeMillis() < timeoutTime && retry-- > 0) { if (LOGGER.isDebugEnabled()) { @@ -343,7 +345,13 @@ public static boolean clusterNodeVersionMatches(final String version, user, sshKeyFile, null, String.format(CLUSTER_NODE_VERSION_COMMAND, hostName.toLowerCase()), 10000, 10000, 20000); - if (clusterNodeVersionMatches(result, version)) { + Pair clusterVersionMatchesAndValue = clusterNodeVersionMatches(result, version); + if (Boolean.TRUE.equals(clusterVersionMatchesAndValue.first())) { + KubernetesClusterVmMapVO vmMapVO = vmMapDao.getClusterMapFromVmId(vmId); + String newNodeVersion = clusterVersionMatchesAndValue.second(); + LOGGER.debug(String.format("Updating node %s Kubernetes version to %s", hostName, newNodeVersion)); + vmMapVO.setNodeVersion(newNodeVersion); + vmMapDao.update(vmMapVO.getId(), vmMapVO); return true; } } catch (Exception e) { @@ -360,11 +368,11 @@ public static boolean clusterNodeVersionMatches(final String version, return false; } - protected static boolean clusterNodeVersionMatches(final Pair result, final String version) { + protected static Pair clusterNodeVersionMatches(final Pair result, final String version) { if (result == null || Boolean.FALSE.equals(result.first()) || StringUtils.isBlank(result.second())) { - return false; + return new Pair<>(false, null); } String response = result.second(); - return response.contains(String.format("v%s", version)); + return new Pair<>(response.contains(String.format("v%s", version)), response); } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/AddNodesToKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/AddNodesToKubernetesClusterCmd.java new file mode 100644 index 000000000000..09cbefe0bf8e --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/AddNodesToKubernetesClusterCmd.java @@ -0,0 +1,136 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.kubernetes.cluster; + +import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes; +import com.cloud.kubernetes.cluster.KubernetesClusterService; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.KubernetesClusterResponse; +import org.apache.cloudstack.api.response.UserVmResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.commons.lang3.BooleanUtils; + +import javax.inject.Inject; + +import java.util.List; + +@APICommand(name = "addNodesToKubernetesCluster", + description = "Add nodes as workers to an existing CKS cluster. ", + responseObject = KubernetesClusterResponse.class, + since = "4.21.0", + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class AddNodesToKubernetesClusterCmd extends BaseAsyncCmd { + + @Inject + public KubernetesClusterService kubernetesClusterService; + + @Parameter(name = ApiConstants.NODE_IDS, + type = CommandType.LIST, + collectionType = CommandType.UUID, + entityType= UserVmResponse.class, + description = "comma separated list of (external) node (physical or virtual machines) IDs that need to be" + + "added as worker nodes to an existing managed Kubernetes cluster (CKS)", + required = true, + since = "4.21.0") + private List nodeIds; + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, required = true, + entityType = KubernetesClusterResponse.class, + description = "the ID of the Kubernetes cluster", since = "4.21.0") + private Long clusterId; + + @Parameter(name = ApiConstants.MOUNT_CKS_ISO_ON_VR, type = CommandType.BOOLEAN, + description = "(optional) Vmware only, uses the CKS cluster network VR to mount the CKS ISO", + since = "4.21.0") + private Boolean mountCksIsoOnVr; + + @Parameter(name = ApiConstants.MANUAL_UPGRADE, type = CommandType.BOOLEAN, + description = "(optional) indicates if the node is marked for manual upgrade and excluded from the Kubernetes cluster upgrade operation", + since = "4.21.0") + private Boolean manualUpgrade; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public List getNodeIds() { + return nodeIds; + } + + public Long getClusterId() { + return clusterId; + } + + public boolean isMountCksIsoOnVr() { + return BooleanUtils.isTrue(mountCksIsoOnVr); + } + + public boolean isManualUpgrade() { + return BooleanUtils.isTrue(manualUpgrade); + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getEventType() { + return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_NODES_ADD; + } + + @Override + public String getEventDescription() { + return String.format("Adding %s nodes to the Kubernetes cluster with ID: %s", nodeIds.size(), clusterId); + } + + @Override + public void execute() { + try { + kubernetesClusterService.addNodesToKubernetesCluster(this); + final KubernetesClusterResponse response = kubernetesClusterService.createKubernetesClusterResponse(getClusterId()); + response.setResponseName(getCommandName()); + setResponseObject(response); + } catch (Exception e) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to add nodes to cluster ID %s due to: %s", + getClusterId(), e.getLocalizedMessage()), e); + } + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } + + @Override + public ApiCommandResourceType getApiResourceType() { + return ApiCommandResourceType.KubernetesCluster; + } + + @Override + public Long getApiResourceId() { + return getClusterId(); + } + +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java index 721cb47867b0..10db364cef3f 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java @@ -16,9 +16,26 @@ // under the License. package org.apache.cloudstack.api.command.user.kubernetes.cluster; +import java.security.InvalidParameterException; +import java.util.Map; +import java.util.Objects; + import javax.inject.Inject; +import com.cloud.dc.ASNumberVO; +import com.cloud.dc.dao.ASNumberDao; +import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ManagementServerException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.kubernetes.cluster.KubernetesServiceHelper; +import com.cloud.network.dao.NetworkDao; +import com.cloud.network.dao.NetworkVO; +import com.cloud.offering.NetworkOffering; +import com.cloud.offerings.NetworkOfferingVO; +import com.cloud.offerings.dao.NetworkOfferingDao; +import com.cloud.utils.Pair; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -36,8 +53,11 @@ import org.apache.cloudstack.api.response.NetworkResponse; import org.apache.cloudstack.api.response.ProjectResponse; import org.apache.cloudstack.api.response.ServiceOfferingResponse; +import org.apache.cloudstack.api.response.UserDataResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.config.impl.ConfigurationVO; import org.apache.commons.lang3.StringUtils; import com.cloud.kubernetes.cluster.KubernetesCluster; @@ -58,6 +78,16 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd { @Inject public KubernetesClusterService kubernetesClusterService; + @Inject + protected KubernetesServiceHelper kubernetesClusterHelper; + @Inject + private ConfigurationDao configurationDao; + @Inject + private NetworkDao networkDao; + @Inject + private NetworkOfferingDao networkOfferingDao; + @Inject + private ASNumberDao asNumberDao; ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -81,7 +111,23 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd { @ACL(accessType = AccessType.UseEntry) @Parameter(name = ApiConstants.SERVICE_OFFERING_ID, type = CommandType.UUID, entityType = ServiceOfferingResponse.class, description = "the ID of the service offering for the virtual machines in the cluster.") - private Long serviceOfferingId; + protected Long serviceOfferingId; + + @ACL(accessType = AccessType.UseEntry) + @Parameter(name = ApiConstants.NODE_TYPE_OFFERING_MAP, type = CommandType.MAP, + description = "(Optional) Node Type to Service Offering ID mapping. If provided, it overrides the serviceofferingid parameter") + protected Map> serviceOfferingNodeTypeMap; + + @ACL(accessType = AccessType.UseEntry) + @Parameter(name = ApiConstants.NODE_TYPE_TEMPLATE_MAP, type = CommandType.MAP, + description = "(Optional) Node Type to Template ID mapping. If provided, it overrides the default template: System VM template") + protected Map> templateNodeTypeMap; + + @ACL(accessType = AccessType.UseEntry) + @Parameter(name = ApiConstants.ETCD_NODES, type = CommandType.LONG, + description = "(Optional) Number of Kubernetes cluster etcd nodes, default is 0." + + "In case the number is greater than 0, etcd nodes are separate from master nodes and are provisioned accordingly") + protected Long etcdNodes; @ACL(accessType = AccessType.UseEntry) @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "an optional account for the" + @@ -90,7 +136,8 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd { @ACL(accessType = AccessType.UseEntry) @Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, entityType = DomainResponse.class, - description = "an optional domainId for the virtual machine. If the account parameter is used, domainId must also be used.") + description = "an optional domainId for the virtual machine. If the account parameter is used, domainId must also be used. " + + "Hosts dedicated to the specified domain will be used for deploying the cluster") private Long domainId; @ACL(accessType = AccessType.UseEntry) @@ -144,6 +191,21 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd { @Parameter(name = ApiConstants.CLUSTER_TYPE, type = CommandType.STRING, description = "type of the cluster: CloudManaged, ExternalManaged. The default value is CloudManaged.", since="4.19.0") private String clusterType; + @Parameter(name = ApiConstants.HYPERVISOR, type = CommandType.STRING, description = "the hypervisor on which the CKS cluster is to be deployed. This is required if the zone in which the CKS cluster is being deployed has clusters with different hypervisor types.") + private String hypervisor; + + @Parameter(name = ApiConstants.CNI_CONFIG_ID, type = CommandType.UUID, entityType = UserDataResponse.class, description = "the ID of the Userdata", since = "4.19.0") + private Long cniConfigId; + + @Parameter(name = ApiConstants.CNI_CONFIG_DETAILS, type = CommandType.MAP, + description = "used to specify the parameters values for the variables in userdata. " + + "Example: cniconfigdetails[0].key=accesskey&cniconfigdetails[0].value=s389ddssaa&" + + "cniconfigdetails[1].key=secretkey&cniconfigdetails[1].value=8dshfsss", since = "4.19.0") + private Map cniConfigDetails; + + @Parameter(name=ApiConstants.AS_NUMBER, type=CommandType.LONG, description="the AS Number of the network") + private Long asNumber; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -202,6 +264,10 @@ public Long getControlNodes() { return controlNodes; } + public long getEtcdNodes() { + return etcdNodes == null ? 0 : etcdNodes; + } + public String getExternalLoadBalancerIpAddress() { return externalLoadBalancerIpAddress; } @@ -240,6 +306,67 @@ public String getClusterType() { return clusterType; } + public Map getServiceOfferingNodeTypeMap() { + return kubernetesClusterHelper.getServiceOfferingNodeTypeMap(serviceOfferingNodeTypeMap); + } + + public Map getTemplateNodeTypeMap() { + return kubernetesClusterHelper.getTemplateNodeTypeMap(templateNodeTypeMap); + } + + public Hypervisor.HypervisorType getHypervisorType() { + return hypervisor == null ? null : Hypervisor.HypervisorType.getType(hypervisor); + } + + private Pair getKubernetesNetworkOffering(Long networkId) { + if (Objects.isNull(networkId)) { + ConfigurationVO configurationVO = configurationDao.findByName(KubernetesClusterService.KubernetesClusterNetworkOffering.key()); + String offeringName = configurationVO.getValue(); + return new Pair<>(networkOfferingDao.findByUniqueName(offeringName), null); + } else { + NetworkVO networkVO = networkDao.findById(getNetworkId()); + if (networkVO == null) { + throw new InvalidParameterException(String.format("Failed to find network with id: %s", getNetworkId())); + } + NetworkOfferingVO offeringVO = networkOfferingDao.findById(networkVO.getNetworkOfferingId()); + return new Pair<>(offeringVO, networkVO); + } + } + + public Long getAsNumber() { + Pair offeringAndNetwork = getKubernetesNetworkOffering(getNetworkId()); + NetworkOfferingVO offering = offeringAndNetwork.first(); + NetworkVO networkVO = offeringAndNetwork.second(); + + if (offering == null) { + throw new CloudRuntimeException("Failed to find kubernetes network offering"); + } + ASNumberVO asNumberVO = null; + if (Objects.isNull(getNetworkId()) && !offering.isForVpc()) { + if (Boolean.TRUE.equals(NetworkOffering.RoutingMode.Dynamic.equals(offering.getRoutingMode()) && offering.isSpecifyAsNumber()) && asNumber == null) { + throw new InvalidParameterException("AsNumber must be specified as network offering has specifyasnumber set"); + } + } else if (Objects.nonNull(networkVO)) { + if (offering.isForVpc()) { + asNumberVO = asNumberDao.findByZoneAndVpcId(getZoneId(), networkVO.getVpcId()); + } else { + asNumberVO = asNumberDao.findByZoneAndNetworkId(getZoneId(), getNetworkId()); + } + } + if (Objects.nonNull(asNumberVO)) { + return asNumberVO.getAsNumber(); + } + return asNumber; + } + + public Map getCniConfigDetails() { + return convertDetailsToMap(cniConfigDetails); + } + + public Long getCniConfigId() { + return cniConfigId; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -290,7 +417,7 @@ public void execute() { KubernetesClusterResponse response = kubernetesClusterService.createKubernetesClusterResponse(getEntityId()); response.setResponseName(getCommandName()); setResponseObject(response); - } catch (CloudRuntimeException e) { + } catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/RemoveNodesFromKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/RemoveNodesFromKubernetesClusterCmd.java new file mode 100644 index 000000000000..fd089ede9ecb --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/RemoveNodesFromKubernetesClusterCmd.java @@ -0,0 +1,126 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.kubernetes.cluster; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes; +import com.cloud.kubernetes.cluster.KubernetesClusterService; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.KubernetesClusterResponse; +import org.apache.cloudstack.api.response.UserVmResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import javax.inject.Inject; +import java.util.List; + +@APICommand(name = "removeNodesFromKubernetesCluster", + description = "Removes external nodes from a CKS cluster. ", + responseObject = KubernetesClusterResponse.class, + since = "4.21.0", + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class RemoveNodesFromKubernetesClusterCmd extends BaseAsyncCmd { + + @Inject + public KubernetesClusterService kubernetesClusterService; + + protected static final Logger LOGGER = LogManager.getLogger(RemoveNodesFromKubernetesClusterCmd.class); + + @Parameter(name = ApiConstants.NODE_IDS, + type = CommandType.LIST, + collectionType = CommandType.UUID, + entityType= UserVmResponse.class, + description = "comma separated list of node (physical or virtual machines) IDs that need to be" + + "removed from the Kubernetes cluster (CKS)", + required = true, + since = "4.21.0") + private List nodeIds; + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, required = true, + entityType = KubernetesClusterResponse.class, + description = "the ID of the Kubernetes cluster", since = "4.21.0") + private Long clusterId; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public List getNodeIds() { + return nodeIds; + } + + public Long getClusterId() { + return clusterId; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getEventType() { + return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_NODES_REMOVE; + } + + @Override + public String getEventDescription() { + return String.format("Removing %s nodes from the Kubernetes Cluster with ID: %s", nodeIds.size(), clusterId); + } + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { + try { + if (!kubernetesClusterService.removeNodesFromKubernetesCluster(this)) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to remove node(s) from Kubernetes cluster ID: %d", getClusterId())); + } + final KubernetesClusterResponse response = kubernetesClusterService.createKubernetesClusterResponse(getClusterId()); + response.setResponseName(getCommandName()); + setResponseObject(response); + } catch (Exception e) { + String err = String.format("Failed to remove node(s) from Kubernetes cluster ID: %d due to: %s", getClusterId(), e.getMessage()); + LOGGER.error(err, e); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, err); + } + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } + + @Override + public ApiCommandResourceType getApiResourceType() { + return ApiCommandResourceType.KubernetesCluster; + } + + @Override + public Long getApiResourceId() { + return getClusterId(); + } +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java index 59c2bebf961d..9afd4b8c3240 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java @@ -17,9 +17,11 @@ package org.apache.cloudstack.api.command.user.kubernetes.cluster; import java.util.List; +import java.util.Map; import javax.inject.Inject; +import com.cloud.kubernetes.cluster.KubernetesServiceHelper; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.SecurityChecker; import org.apache.cloudstack.api.ACL; @@ -54,6 +56,8 @@ public class ScaleKubernetesClusterCmd extends BaseAsyncCmd { @Inject public KubernetesClusterService kubernetesClusterService; + @Inject + protected KubernetesServiceHelper kubernetesClusterHelper; ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -68,6 +72,11 @@ public class ScaleKubernetesClusterCmd extends BaseAsyncCmd { description = "the ID of the service offering for the virtual machines in the cluster.") private Long serviceOfferingId; + @ACL(accessType = SecurityChecker.AccessType.UseEntry) + @Parameter(name = ApiConstants.NODE_TYPE_OFFERING_MAP, type = CommandType.MAP, + description = "(Optional) Node Type to Service Offering ID mapping. If provided, it overrides the serviceofferingid parameter") + protected Map> serviceOfferingNodeTypeMap; + @Parameter(name=ApiConstants.SIZE, type = CommandType.LONG, description = "number of Kubernetes cluster nodes") private Long clusterSize; @@ -103,6 +112,10 @@ public Long getServiceOfferingId() { return serviceOfferingId; } + public Map getServiceOfferingNodeTypeMap() { + return kubernetesClusterHelper.getServiceOfferingNodeTypeMap(this.serviceOfferingNodeTypeMap); + } + public Long getClusterSize() { return clusterSize; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StartKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StartKubernetesClusterCmd.java index bfe00ca27b28..50e8202b8b0a 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StartKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StartKubernetesClusterCmd.java @@ -18,6 +18,9 @@ import javax.inject.Inject; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.ManagementServerException; +import com.cloud.exception.ResourceUnavailableException; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -102,7 +105,8 @@ public void execute() throws ServerApiException, ConcurrentOperationException { final KubernetesClusterResponse response = kubernetesClusterService.createKubernetesClusterResponse(getId()); response.setResponseName(getCommandName()); setResponseObject(response); - } catch (CloudRuntimeException ex) { + } catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | + InsufficientCapacityException ex) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java index 8074aef9effa..b811f4f9dcbd 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java @@ -18,6 +18,7 @@ import java.util.Date; import java.util.List; +import java.util.Map; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseResponseWithAnnotations; @@ -58,6 +59,34 @@ public class KubernetesClusterResponse extends BaseResponseWithAnnotations imple @Param(description = "the name of the service offering of the Kubernetes cluster") private String serviceOfferingName; + @SerializedName(ApiConstants.WORKER_SERVICE_OFFERING_ID) + @Param(description = "the ID of the service offering of the worker nodes on the Kubernetes cluster") + private String workerOfferingId; + + @SerializedName(ApiConstants.WORKER_SERVICE_OFFERING_NAME) + @Param(description = "the name of the service offering of the worker nodes on the Kubernetes cluster") + private String workerOfferingName; + + @SerializedName(ApiConstants.CONTROL_SERVICE_OFFERING_ID) + @Param(description = "the ID of the service offering of the control nodes on the Kubernetes cluster") + private String controlOfferingId; + + @SerializedName(ApiConstants.CONTROL_SERVICE_OFFERING_NAME) + @Param(description = "the name of the service offering of the control nodes on the Kubernetes cluster") + private String controlOfferingName; + + @SerializedName(ApiConstants.ETCD_SERVICE_OFFERING_ID) + @Param(description = "the ID of the service offering of the etcd nodes on the Kubernetes cluster") + private String etcdOfferingId; + + @SerializedName(ApiConstants.ETCD_SERVICE_OFFERING_NAME) + @Param(description = "the name of the service offering of the etcd nodes on the Kubernetes cluster") + private String etcdOfferingName; + + @SerializedName(ApiConstants.ETCD_NODES) + @Param(description = "the number of the etcd nodes on the Kubernetes cluster") + private Long etcdNodes; + @SerializedName(ApiConstants.TEMPLATE_ID) @Param(description = "the ID of the template of the Kubernetes cluster") private String templateId; @@ -106,6 +135,14 @@ public class KubernetesClusterResponse extends BaseResponseWithAnnotations imple @Param(description = "keypair details") private String keypair; + @SerializedName(ApiConstants.CNI_CONFIG_ID) + @Param(description = "ID of CNI Configuration associated with the cluster") + private String cniConfigId; + + @SerializedName(ApiConstants.CNI_CONFIG_NAME) + @Param(description = "Name of CNI Configuration associated with the cluster") + private String cniConfigName; + @Deprecated(since = "4.16") @SerializedName(ApiConstants.MASTER_NODES) @Param(description = "the master nodes count for the Kubernetes cluster. This parameter is deprecated, please use 'controlnodes' parameter.") @@ -141,7 +178,7 @@ public class KubernetesClusterResponse extends BaseResponseWithAnnotations imple @SerializedName(ApiConstants.VIRTUAL_MACHINES) @Param(description = "the list of virtualmachine associated with this Kubernetes cluster") - private List virtualMachines; + private List virtualMachines; @SerializedName(ApiConstants.IP_ADDRESS) @Param(description = "Public IP Address of the cluster") @@ -151,6 +188,10 @@ public class KubernetesClusterResponse extends BaseResponseWithAnnotations imple @Param(description = "Public IP Address ID of the cluster") private String ipAddressId; + @SerializedName(ApiConstants.ETCD_IPS) + @Param(description = "Public IP Addresses of the etcd nodes") + private Map etcdIps; + @SerializedName(ApiConstants.AUTOSCALING_ENABLED) @Param(description = "Whether autoscaling is enabled for the cluster") private boolean isAutoscalingEnabled; @@ -367,11 +408,67 @@ public void setServiceOfferingName(String serviceOfferingName) { this.serviceOfferingName = serviceOfferingName; } - public void setVirtualMachines(List virtualMachines) { + public String getWorkerOfferingId() { + return workerOfferingId; + } + + public void setWorkerOfferingId(String workerOfferingId) { + this.workerOfferingId = workerOfferingId; + } + + public String getWorkerOfferingName() { + return workerOfferingName; + } + + public void setWorkerOfferingName(String workerOfferingName) { + this.workerOfferingName = workerOfferingName; + } + + public String getControlOfferingId() { + return controlOfferingId; + } + + public void setControlOfferingId(String controlOfferingId) { + this.controlOfferingId = controlOfferingId; + } + + public String getControlOfferingName() { + return controlOfferingName; + } + + public void setControlOfferingName(String controlOfferingName) { + this.controlOfferingName = controlOfferingName; + } + + public String getEtcdOfferingId() { + return etcdOfferingId; + } + + public void setEtcdOfferingId(String etcdOfferingId) { + this.etcdOfferingId = etcdOfferingId; + } + + public String getEtcdOfferingName() { + return etcdOfferingName; + } + + public void setEtcdOfferingName(String etcdOfferingName) { + this.etcdOfferingName = etcdOfferingName; + } + + public Long getEtcdNodes() { + return etcdNodes; + } + + public void setEtcdNodes(Long etcdNodes) { + this.etcdNodes = etcdNodes; + } + + public void setVirtualMachines(List virtualMachines) { this.virtualMachines = virtualMachines; } - public List getVirtualMachines() { + public List getVirtualMachines() { return virtualMachines; } @@ -383,6 +480,10 @@ public void setIpAddressId(String ipAddressId) { this.ipAddressId = ipAddressId; } + public void setEtcdIps(Map etcdIps) { + this.etcdIps = etcdIps; + } + public void setAutoscalingEnabled(boolean isAutoscalingEnabled) { this.isAutoscalingEnabled = isAutoscalingEnabled; } @@ -406,4 +507,12 @@ public KubernetesCluster.ClusterType getClusterType() { public void setClusterType(KubernetesCluster.ClusterType clusterType) { this.clusterType = clusterType; } + + public void setCniConfigId(String cniConfigId) { + this.cniConfigId = cniConfigId; + } + + public void setCniConfigName(String cniConfigName) { + this.cniConfigName = cniConfigName; + } } diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/etcd-node.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/etcd-node.yml new file mode 100644 index 000000000000..c380151f49d2 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/etcd-node.yml @@ -0,0 +1,134 @@ +#cloud-config +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +--- +users: + - name: cloud + sudo: ALL=(ALL) NOPASSWD:ALL + shell: /bin/bash + ssh_authorized_keys: + {{ k8s.ssh.pub.key }} + +write_files: + - path: /opt/bin/setup-etcd-node + permissions: '0700' + owner: root:root + content: | + #!/bin/bash -e + + if [[ -f "/home/cloud/success" ]]; then + echo "Already provisioned!" + exit 0 + fi + + ISO_MOUNT_DIR=/mnt/etcddisk + BINARIES_DIR=${ISO_MOUNT_DIR}/ + ATTEMPT_ONLINE_INSTALL=false + setup_complete=false + + OFFLINE_INSTALL_ATTEMPT_SLEEP={{ k8s.install.wait.time }} + MAX_OFFLINE_INSTALL_ATTEMPTS={{ k8s.install.reattempts.count }} + if [[ -z $OFFLINE_INSTALL_ATTEMPT_SLEEP || $OFFLINE_INSTALL_ATTEMPT_SLEEP -eq 0 ]]; then + OFFLINE_INSTALL_ATTEMPT_SLEEP=15 + fi + if [[ -z $MAX_OFFLINE_INSTALL_ATTEMPTS || $MAX_OFFLINE_INSTALL_ATTEMPTS -eq 0 ]]; then + MAX_OFFLINE_INSTALL_ATTEMPTS=100 + fi + offline_attempts=1 + MAX_SETUP_CRUCIAL_CMD_ATTEMPTS=3 + EJECT_ISO_FROM_OS={{ k8s.eject.iso }} + crucial_cmd_attempts=1 + iso_drive_path="" + while true; do + if (( "$offline_attempts" > "$MAX_OFFLINE_INSTALL_ATTEMPTS" )); then + echo "Warning: Offline install timed out!" + break + fi + set +e + output=`blkid -o device -t TYPE=iso9660` + set -e + if [ "$output" != "" ]; then + while read -r line; do + if [ ! -d "${ISO_MOUNT_DIR}" ]; then + mkdir "${ISO_MOUNT_DIR}" + fi + retval=0 + set +e + mount -o ro "${line}" "${ISO_MOUNT_DIR}" + retval=$? + set -e + if [ $retval -eq 0 ]; then + if [ -d "$BINARIES_DIR" ]; then + iso_drive_path="${line}" + break + else + umount "${line}" && rmdir "${ISO_MOUNT_DIR}" + fi + fi + done <<< "$output" + fi + if [ -d "$BINARIES_DIR" ]; then + break + fi + echo "Waiting for Binaries directory $BINARIES_DIR to be available, sleeping for $OFFLINE_INSTALL_ATTEMPT_SLEEP seconds, attempt: $offline_attempts" + sleep $OFFLINE_INSTALL_ATTEMPT_SLEEP + offline_attempts=$[$offline_attempts + 1] + done + + if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then + export PATH=$PATH:/opt/bin + fi + + if [ -d "$BINARIES_DIR" ]; then + ### Binaries available offline ### + echo "Installing binaries from ${BINARIES_DIR}" + mkdir -p /opt/bin/ + tar -zxf ${BINARIES_DIR}/etcd/etcd-linux-amd64.tar.gz -C /opt/bin/ + mv /opt/bin/etcd*/etcd* /opt/bin/ + sudo rm -rf /opt/bin/etcd-* + fi + + - path: /etc/systemd/system/etcd.service + permissions: '0755' + owner: root:root + content: | + [Unit] + Description=etcd + + [Service] + Type=exec + ExecStart=/opt/bin/etcd \ + --name {{ etcd.node_name }} \ + --initial-advertise-peer-urls http://{{ etcd.node_ip }}:2380 \ + --listen-peer-urls http://{{ etcd.node_ip }}:2380 \ + --advertise-client-urls http://{{ etcd.node_ip }}:2379 \ + --listen-client-urls http://{{ etcd.node_ip }}:2379,http://127.0.0.1:2379 \ + --initial-cluster-token etcd-cluster-1 \ + --initial-cluster {{ etcd.initial_cluster_nodes }} \ + --initial-cluster-state new + Restart=on-failure + RestartSec=5 + + [Install] + WantedBy=multi-user.target + +runcmd: + - chown -R cloud:cloud /home/cloud/.ssh + - /opt/bin/setup-etcd-node + - systemctl daemon-reload + - systemctl enable --now etcd diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml index 6819723b3f04..38f217f403c4 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml @@ -40,8 +40,8 @@ write_files: sysctl net.ipv4.conf.default.arp_ignore=0 sysctl net.ipv4.conf.all.arp_announce=0 sysctl net.ipv4.conf.all.arp_ignore=0 - sysctl net.ipv4.conf.eth0.arp_announce=0 - sysctl net.ipv4.conf.eth0.arp_ignore=0 + sysctl net.ipv4.conf.eth0.arp_announce=0 || sysctl net.ipv4.conf.ens35.arp_announce=0 || true + sysctl net.ipv4.conf.eth0.arp_ignore=0 || sysctl net.ipv4.conf.ens35.arp_ignore=0 || true sed -i "s/net.ipv4.conf.default.arp_announce =.*$/net.ipv4.conf.default.arp_announce = 0/" /etc/sysctl.conf sed -i "s/net.ipv4.conf.default.arp_ignore =.*$/net.ipv4.conf.default.arp_ignore = 0/" /etc/sysctl.conf sed -i "s/net.ipv4.conf.all.arp_announce =.*$/net.ipv4.conf.all.arp_announce = 0/" /etc/sysctl.conf @@ -53,8 +53,14 @@ write_files: ATTEMPT_ONLINE_INSTALL=false setup_complete=false - OFFLINE_INSTALL_ATTEMPT_SLEEP=15 - MAX_OFFLINE_INSTALL_ATTEMPTS=100 + OFFLINE_INSTALL_ATTEMPT_SLEEP={{ k8s.install.wait.time }} + MAX_OFFLINE_INSTALL_ATTEMPTS={{ k8s.install.reattempts.count }} + if [[ -z $OFFLINE_INSTALL_ATTEMPT_SLEEP || $OFFLINE_INSTALL_ATTEMPT_SLEEP -eq 0 ]]; then + OFFLINE_INSTALL_ATTEMPT_SLEEP=15 + fi + if [[ -z $MAX_OFFLINE_INSTALL_ATTEMPTS || $MAX_OFFLINE_INSTALL_ATTEMPTS -eq 0 ]]; then + MAX_OFFLINE_INSTALL_ATTEMPTS=100 + fi offline_attempts=1 MAX_SETUP_CRUCIAL_CMD_ATTEMPTS=3 EJECT_ISO_FROM_OS={{ k8s.eject.iso }} diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node.yml index 90be8957d440..217ed814f6a7 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node.yml @@ -1,3 +1,4 @@ +## template: jinja #cloud-config # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file @@ -60,8 +61,8 @@ write_files: sysctl net.ipv4.conf.default.arp_ignore=0 sysctl net.ipv4.conf.all.arp_announce=0 sysctl net.ipv4.conf.all.arp_ignore=0 - sysctl net.ipv4.conf.eth0.arp_announce=0 - sysctl net.ipv4.conf.eth0.arp_ignore=0 + sysctl net.ipv4.conf.eth0.arp_announce=0 || sysctl net.ipv4.conf.ens35.arp_announce=0 || true + sysctl net.ipv4.conf.eth0.arp_ignore=0 || sysctl net.ipv4.conf.ens35.arp_ignore=0 || true sed -i "s/net.ipv4.conf.default.arp_announce =.*$/net.ipv4.conf.default.arp_announce = 0/" /etc/sysctl.conf sed -i "s/net.ipv4.conf.default.arp_ignore =.*$/net.ipv4.conf.default.arp_ignore = 0/" /etc/sysctl.conf sed -i "s/net.ipv4.conf.all.arp_announce =.*$/net.ipv4.conf.all.arp_announce = 0/" /etc/sysctl.conf @@ -73,8 +74,14 @@ write_files: ATTEMPT_ONLINE_INSTALL=false setup_complete=false - OFFLINE_INSTALL_ATTEMPT_SLEEP=15 - MAX_OFFLINE_INSTALL_ATTEMPTS=100 + OFFLINE_INSTALL_ATTEMPT_SLEEP={{ k8s.install.wait.time }} + MAX_OFFLINE_INSTALL_ATTEMPTS={{ k8s.install.reattempts.count }} + if [[ -z $OFFLINE_INSTALL_ATTEMPT_SLEEP || $OFFLINE_INSTALL_ATTEMPT_SLEEP -eq 0 ]]; then + OFFLINE_INSTALL_ATTEMPT_SLEEP=15 + fi + if [[ -z $MAX_OFFLINE_INSTALL_ATTEMPTS || $MAX_OFFLINE_INSTALL_ATTEMPTS -eq 0 ]]; then + MAX_OFFLINE_INSTALL_ATTEMPTS=100 + fi offline_attempts=1 MAX_SETUP_CRUCIAL_CMD_ATTEMPTS=3 EJECT_ISO_FROM_OS={{ k8s.eject.iso }} @@ -230,6 +237,34 @@ write_files: done fi + - path: /etc/kubernetes/kubeadm-config.yaml + permissions: '0644' + owner: root:root + content: | + apiVersion: kubeadm.k8s.io/v1beta3 + kind: ClusterConfiguration + apiServer: + certSANs: + {{ k8s_control.server_ips }} + controlPlaneEndpoint: {{ k8s_control.server_ip }}:{{ k8s.api_server_port }} + etcd: + external: + endpoints: + {{ etcd.etcd_endpoint_list }} + --- + apiVersion: kubeadm.k8s.io/v1beta3 + kind: InitConfiguration + bootstrapTokens: + - token: "{{ k8s_control_node.cluster.token }}" + ttl: "0" + nodeRegistration: + criSocket: /run/containerd/containerd.sock + localAPIEndpoint: + advertiseAddress: {{ k8s_control.server_ip }} + bindPort: {{ k8s.api_server_port }} + certificateKey: {{ k8s_control.certificate_key }} + + - path: /opt/bin/deploy-kube-system permissions: '0700' owner: root:root @@ -245,6 +280,8 @@ write_files: export PATH=$PATH:/opt/bin fi + EXTERNAL_ETCD_NODES={{ etcd.unstacked_etcd }} + EXTERNAL_CNI_PLUGIN={{ k8s.external.cni.plugin }} MAX_SETUP_CRUCIAL_CMD_ATTEMPTS=3 crucial_cmd_attempts=1 while true; do @@ -254,7 +291,11 @@ write_files: fi retval=0 set +e - kubeadm init --token {{ k8s_control_node.cluster.token }} --token-ttl 0 {{ k8s_control_node.cluster.initargs }} --cri-socket /run/containerd/containerd.sock + if [[ ${EXTERNAL_ETCD_NODES} == true ]]; then + kubeadm init --config /etc/kubernetes/kubeadm-config.yaml --upload-certs + else + kubeadm init --token {{ k8s_control_node.cluster.token }} --token-ttl 0 {{ k8s_control_node.cluster.initargs }} --cri-socket /run/containerd/containerd.sock + fi retval=$? set -e if [ $retval -eq 0 ]; then @@ -282,7 +323,9 @@ write_files: if [ -d "$K8S_CONFIG_SCRIPTS_COPY_DIR" ]; then ### Network, dashboard configs available offline ### echo "Offline configs are available!" - /opt/bin/kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/network.yaml + if [[ ${EXTERNAL_CNI_PLUGIN} == false ]]; then + /opt/bin/kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/network.yaml + fi /opt/bin/kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/dashboard.yaml rm -rf "${K8S_CONFIG_SCRIPTS_COPY_DIR}" else @@ -297,6 +340,7 @@ write_files: sudo touch /home/cloud/success echo "true" > /home/cloud/success +{% if registry is defined %} - path: /opt/bin/setup-containerd permissions: '0755' owner: root:root @@ -314,6 +358,7 @@ write_files: echo "Restarting containerd service" systemctl daemon-reload systemctl restart containerd +{% endif %} - path: /etc/systemd/system/deploy-kube-system.service permissions: '0755' diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml index ac80e8576ffe..fd9617e69dec 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml @@ -40,8 +40,8 @@ write_files: sysctl net.ipv4.conf.default.arp_ignore=0 sysctl net.ipv4.conf.all.arp_announce=0 sysctl net.ipv4.conf.all.arp_ignore=0 - sysctl net.ipv4.conf.eth0.arp_announce=0 - sysctl net.ipv4.conf.eth0.arp_ignore=0 + sysctl net.ipv4.conf.eth0.arp_announce=0 || sysctl net.ipv4.conf.ens35.arp_announce=0 || true + sysctl net.ipv4.conf.eth0.arp_ignore=0 || sysctl net.ipv4.conf.ens35.arp_ignore=0 || true sed -i "s/net.ipv4.conf.default.arp_announce =.*$/net.ipv4.conf.default.arp_announce = 0/" /etc/sysctl.conf sed -i "s/net.ipv4.conf.default.arp_ignore =.*$/net.ipv4.conf.default.arp_ignore = 0/" /etc/sysctl.conf sed -i "s/net.ipv4.conf.all.arp_announce =.*$/net.ipv4.conf.all.arp_announce = 0/" /etc/sysctl.conf @@ -53,8 +53,14 @@ write_files: ATTEMPT_ONLINE_INSTALL=false setup_complete=false - OFFLINE_INSTALL_ATTEMPT_SLEEP=30 - MAX_OFFLINE_INSTALL_ATTEMPTS=40 + OFFLINE_INSTALL_ATTEMPT_SLEEP={{ k8s.install.wait.time }} + MAX_OFFLINE_INSTALL_ATTEMPTS={{ k8s.install.reattempts.count }} + if [[ -z $OFFLINE_INSTALL_ATTEMPT_SLEEP || $OFFLINE_INSTALL_ATTEMPT_SLEEP -eq 0 ]]; then + OFFLINE_INSTALL_ATTEMPT_SLEEP=30 + fi + if [[ -z $MAX_OFFLINE_INSTALL_ATTEMPTS || $MAX_OFFLINE_INSTALL_ATTEMPTS -eq 0 ]]; then + MAX_OFFLINE_INSTALL_ATTEMPTS=40 + fi offline_attempts=1 MAX_SETUP_CRUCIAL_CMD_ATTEMPTS=3 EJECT_ISO_FROM_OS={{ k8s.eject.iso }} @@ -87,6 +93,21 @@ write_files: fi fi done <<< "$output" + else + ### Download from VR ### + ROUTER_IP="{{ k8s.vr.iso.mounted.ip }}" + if [ "$ROUTER_IP" != "" ]; then + echo "Downloading CKS binaries from the VR $ROUTER_IP" + if [ ! -d "${ISO_MOUNT_DIR}" ]; then + mkdir "${ISO_MOUNT_DIR}" + fi + ### Download from ROUTER_IP/cks-iso into ISO_MOUNT_DIR + AUX_DOWNLOAD_DIR=/aux-dwnld + mkdir -p $AUX_DOWNLOAD_DIR + wget -r -R "index.html*" $ROUTER_IP/cks-iso -P $AUX_DOWNLOAD_DIR || echo 'Cannot download some files from virtual router' + mv $AUX_DOWNLOAD_DIR/$ROUTER_IP/cks-iso/* $ISO_MOUNT_DIR + rm -rf $AUX_DOWNLOAD_DIR + fi fi if [ -d "$BINARIES_DIR" ]; then break diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/remove-node-from-cluster b/plugins/integrations/kubernetes-service/src/main/resources/script/remove-node-from-cluster new file mode 100644 index 000000000000..f852a0fd4ddd --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/resources/script/remove-node-from-cluster @@ -0,0 +1,43 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +export PATH=$PATH:/opt/bin +node_name=$1 +node_type=$2 +operation=$3 + +if [ $operation == "remove" ]; then + if [ $node_type == "control" ]; then + # get the specific node + kubectl get nodes $node_name >/dev/null 2>&1 + if [[ $(echo $?) -eq 1 ]]; then + echo "No node with name $node_name present in the cluster, exiting..." + exit 0 + else + # Drain the node + kubectl drain $node_name --delete-local-data --force --ignore-daemonsets + fi + else + kubeadm reset -f + fi +else + sudo mkdir -p /home/cloud/.kube + sudo cp /root/.kube/config /home/cloud/.kube/ + sudo chown -R cloud:cloud /home/cloud/.kube + kubectl delete node $node_name +fi diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh b/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh index 480b002ef179..a947d508436e 100755 --- a/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh +++ b/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh @@ -18,7 +18,7 @@ # Version 1.14 and below needs extra flags with kubeadm upgrade node if [ $# -lt 4 ]; then - echo "Invalid input. Valid usage: ./upgrade-kubernetes.sh UPGRADE_VERSION IS_CONTROL_NODE IS_OLD_VERSION IS_EJECT_ISO" + echo "Invalid input. Valid usage: ./upgrade-kubernetes.sh UPGRADE_VERSION IS_CONTROL_NODE IS_OLD_VERSION IS_EJECT_ISO IS_EXTERNAL_CNI" echo "eg: ./upgrade-kubernetes.sh 1.16.3 true false false" exit 1 fi @@ -35,6 +35,10 @@ EJECT_ISO_FROM_OS=false if [ $# -gt 3 ]; then EJECT_ISO_FROM_OS="${4}" fi +EXTERNAL_CNI=false +if [ $# -gt 4 ]; then + EXTERNAL_CNI="${5}" +fi export PATH=$PATH:/opt/bin if [[ "$PATH" != *:/usr/sbin && "$PATH" != *:/usr/sbin:* ]]; then @@ -144,7 +148,9 @@ if [ -d "$BINARIES_DIR" ]; then systemctl restart kubelet if [ "${IS_MAIN_CONTROL}" == 'true' ]; then - /opt/bin/kubectl apply -f ${BINARIES_DIR}/network.yaml + if [[ ${EXTERNAL_CNI} == true ]]; then + /opt/bin/kubectl apply -f ${BINARIES_DIR}/network.yaml + fi /opt/bin/kubectl apply -f ${BINARIES_DIR}/dashboard.yaml fi diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/validate-cks-node b/plugins/integrations/kubernetes-service/src/main/resources/script/validate-cks-node new file mode 100644 index 000000000000..e28614e05afe --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/resources/script/validate-cks-node @@ -0,0 +1,45 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +OS=`awk -F= '/^NAME/{print $2}' /etc/os-release` +REQUIRED_PACKAGES=(cloud-init cloud-guest-utils conntrack apt-transport-https ca-certificates curl gnupg gnupg-agent \ + software-properties-common gnupg lsb-release python3-json-pointer python3-jsonschema cloud-init containerd.io) +declare -a MISSING_PACKAGES +if [[ $OS == *"Ubuntu"* || $OS == *"Debian"* ]]; then + for package in ${REQUIRED_PACKAGES[@]}; do + dpkg -s $package >/dev/null 2>&1 + if [ $? -eq 1 ]; then + MISSING_PACKAGES+="$package" + fi + done +else + for package in ${REQUIRED_PACKAGES[@]}; do + rpm -qa | grep $package >/dev/null 2>&1 + if [ $? -eq 1 ]; then + MISSING_PACKAGES[${#MISSING_PACKAGES[@]}]=$package + fi + done +fi + +echo ${#MISSING_PACKAGES[@]} +if (( ${#MISSING_PACKAGES[@]} )); then + echo "Following packages are missing in the node template: ${MISSING_PACKAGES[@]}" + exit 1 +else + echo 0 +fi diff --git a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/KubernetesClusterHelperImplTest.java b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/KubernetesClusterHelperImplTest.java new file mode 100644 index 000000000000..298f1dfbcd61 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/KubernetesClusterHelperImplTest.java @@ -0,0 +1,145 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.kubernetes.cluster; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.vm.VmDetailConstants; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.CONTROL; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.ETCD; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.WORKER; + +@RunWith(MockitoJUnitRunner.class) +public class KubernetesClusterHelperImplTest { + + @Mock + private ServiceOfferingDao serviceOfferingDao; + @Mock + private ServiceOfferingVO workerServiceOffering; + @Mock + private ServiceOfferingVO controlServiceOffering; + @Mock + private ServiceOfferingVO etcdServiceOffering; + + private static final String workerNodesOfferingId = UUID.randomUUID().toString(); + private static final String controlNodesOfferingId = UUID.randomUUID().toString(); + private static final String etcdNodesOfferingId = UUID.randomUUID().toString(); + private static final Long workerOfferingId = 1L; + private static final Long controlOfferingId = 2L; + private static final Long etcdOfferingId = 3L; + + private final KubernetesServiceHelperImpl helper = new KubernetesServiceHelperImpl(); + + @Before + public void setUp() { + helper.serviceOfferingDao = serviceOfferingDao; + Mockito.when(serviceOfferingDao.findByUuid(workerNodesOfferingId)).thenReturn(workerServiceOffering); + Mockito.when(serviceOfferingDao.findByUuid(controlNodesOfferingId)).thenReturn(controlServiceOffering); + Mockito.when(serviceOfferingDao.findByUuid(etcdNodesOfferingId)).thenReturn(etcdServiceOffering); + Mockito.when(workerServiceOffering.getId()).thenReturn(workerOfferingId); + Mockito.when(controlServiceOffering.getId()).thenReturn(controlOfferingId); + Mockito.when(etcdServiceOffering.getId()).thenReturn(etcdOfferingId); + } + + @Test + public void testIsValidNodeTypeEmptyNodeType() { + Assert.assertFalse(helper.isValidNodeType(null)); + } + + @Test + public void testIsValidNodeTypeInvalidNodeType() { + String nodeType = "invalidNodeType"; + Assert.assertFalse(helper.isValidNodeType(nodeType)); + } + + @Test + public void testIsValidNodeTypeValidNodeTypeLowercase() { + String nodeType = KubernetesServiceHelper.KubernetesClusterNodeType.WORKER.name().toLowerCase(); + Assert.assertTrue(helper.isValidNodeType(nodeType)); + } + + private Map createMapEntry(KubernetesServiceHelper.KubernetesClusterNodeType nodeType, + String nodeTypeOfferingUuid) { + Map map = new HashMap<>(); + map.put(VmDetailConstants.CKS_NODE_TYPE, nodeType.name().toLowerCase()); + map.put(VmDetailConstants.OFFERING, nodeTypeOfferingUuid); + return map; + } + + @Test + public void testNodeOfferingMap() { + Map> serviceOfferingNodeTypeMap = new HashMap<>(); + Map firstMap = createMapEntry(WORKER, workerNodesOfferingId); + Map secondMap = createMapEntry(CONTROL, controlNodesOfferingId); + serviceOfferingNodeTypeMap.put("map1", firstMap); + serviceOfferingNodeTypeMap.put("map2", secondMap); + Map map = helper.getServiceOfferingNodeTypeMap(serviceOfferingNodeTypeMap); + Assert.assertNotNull(map); + Assert.assertEquals(2, map.size()); + Assert.assertTrue(map.containsKey(WORKER.name()) && map.containsKey(CONTROL.name())); + Assert.assertEquals(workerOfferingId, map.get(WORKER.name())); + Assert.assertEquals(controlOfferingId, map.get(CONTROL.name())); + } + + @Test + public void testNodeOfferingMapNullMap() { + Map map = helper.getServiceOfferingNodeTypeMap(null); + Assert.assertTrue(map.isEmpty()); + } + + @Test + public void testNodeOfferingMapEtcdNodes() { + Map> serviceOfferingNodeTypeMap = new HashMap<>(); + Map firstMap = createMapEntry(ETCD, etcdNodesOfferingId); + serviceOfferingNodeTypeMap.put("map1", firstMap); + Map map = helper.getServiceOfferingNodeTypeMap(serviceOfferingNodeTypeMap); + Assert.assertNotNull(map); + Assert.assertEquals(1, map.size()); + Assert.assertTrue(map.containsKey(ETCD.name())); + Assert.assertEquals(etcdOfferingId, map.get(ETCD.name())); + } + + @Test(expected = InvalidParameterValueException.class) + public void testCheckNodeTypeOfferingEntryCompletenessInvalidParameters() { + helper.checkNodeTypeOfferingEntryCompleteness(WORKER.name(), null); + } + + @Test(expected = InvalidParameterValueException.class) + public void testCheckNodeTypeOfferingEntryValuesInvalidNodeType() { + String invalidNodeType = "invalidNodeTypeName"; + helper.checkNodeTypeOfferingEntryValues(invalidNodeType, workerServiceOffering, workerNodesOfferingId); + } + + @Test(expected = InvalidParameterValueException.class) + public void testCheckNodeTypeOfferingEntryValuesEmptyOffering() { + String nodeType = WORKER.name(); + helper.checkNodeTypeOfferingEntryValues(nodeType, null, workerNodesOfferingId); + } +} diff --git a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImplTest.java b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImplTest.java index a6d46ffc9aa1..a9cb7096b8ae 100644 --- a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImplTest.java +++ b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImplTest.java @@ -27,16 +27,21 @@ import com.cloud.kubernetes.cluster.actionworkers.KubernetesClusterActionWorker; import com.cloud.kubernetes.cluster.dao.KubernetesClusterDao; import com.cloud.kubernetes.cluster.dao.KubernetesClusterVmMapDao; +import com.cloud.kubernetes.version.KubernetesSupportedVersion; import com.cloud.network.Network; import com.cloud.network.dao.FirewallRulesDao; import com.cloud.network.rules.FirewallRule; import com.cloud.network.rules.FirewallRuleVO; import com.cloud.network.vpc.NetworkACL; +import com.cloud.offering.ServiceOffering; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.User; +import com.cloud.utils.Pair; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.dao.VMInstanceDao; import org.apache.cloudstack.api.BaseCmd; @@ -44,6 +49,7 @@ import org.apache.cloudstack.api.command.user.kubernetes.cluster.RemoveVirtualMachinesFromKubernetesClusterCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.commons.collections.MapUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -59,7 +65,14 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; + +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.CONTROL; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.DEFAULT; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.ETCD; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.WORKER; @RunWith(MockitoJUnitRunner.class) public class KubernetesClusterManagerImplTest { @@ -85,6 +98,9 @@ public class KubernetesClusterManagerImplTest { @Mock private AccountManager accountManager; + @Mock + private ServiceOfferingDao serviceOfferingDao; + @Spy @InjectMocks KubernetesClusterManagerImpl kubernetesClusterManager; @@ -292,4 +308,117 @@ public void removeVmsFromCluster() { Mockito.when(kubernetesClusterDao.findById(Mockito.anyLong())).thenReturn(cluster); Assert.assertTrue(kubernetesClusterManager.removeVmsFromCluster(cmd).size() > 0); } + + @Test + public void testValidateServiceOfferingNodeType() { + Map map = new HashMap<>(); + map.put(WORKER.name(), 1L); + map.put(CONTROL.name(), 2L); + ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class); + Mockito.when(serviceOfferingDao.findById(1L)).thenReturn(serviceOffering); + Mockito.when(serviceOffering.isDynamic()).thenReturn(false); + Mockito.when(serviceOffering.getCpu()).thenReturn(2); + Mockito.when(serviceOffering.getRamSize()).thenReturn(2048); + KubernetesSupportedVersion version = Mockito.mock(KubernetesSupportedVersion.class); + Mockito.when(version.getMinimumCpu()).thenReturn(2); + Mockito.when(version.getMinimumRamSize()).thenReturn(2048); + kubernetesClusterManager.validateServiceOfferingForNode(map, 1L, WORKER.name(), null, version); + Mockito.verify(kubernetesClusterManager).validateServiceOffering(serviceOffering, version); + } + + @Test(expected = InvalidParameterValueException.class) + public void testValidateServiceOfferingNodeTypeInvalidOffering() { + Map map = new HashMap<>(); + map.put(WORKER.name(), 1L); + map.put(CONTROL.name(), 2L); + ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class); + Mockito.when(serviceOfferingDao.findById(1L)).thenReturn(serviceOffering); + Mockito.when(serviceOffering.isDynamic()).thenReturn(true); + kubernetesClusterManager.validateServiceOfferingForNode(map, 1L, WORKER.name(), null, null); + } + + @Test + public void testClusterCapacity() { + long workerOfferingId = 1L; + long controlOfferingId = 2L; + long workerCount = 2L; + long controlCount = 2L; + + int workerOfferingCpus = 4; + int workerOfferingMemory = 4096; + int controlOfferingCpus = 2; + int controlOfferingMemory = 2048; + + Map map = Map.of(WORKER.name(), workerOfferingId, CONTROL.name(), controlOfferingId); + Map nodeCount = Map.of(WORKER.name(), workerCount, CONTROL.name(), controlCount); + + ServiceOfferingVO workerOffering = Mockito.mock(ServiceOfferingVO.class); + Mockito.when(serviceOfferingDao.findById(workerOfferingId)).thenReturn(workerOffering); + ServiceOfferingVO controlOffering = Mockito.mock(ServiceOfferingVO.class); + Mockito.when(serviceOfferingDao.findById(controlOfferingId)).thenReturn(controlOffering); + Mockito.when(workerOffering.getCpu()).thenReturn(workerOfferingCpus); + Mockito.when(workerOffering.getRamSize()).thenReturn(workerOfferingMemory); + Mockito.when(controlOffering.getCpu()).thenReturn(controlOfferingCpus); + Mockito.when(controlOffering.getRamSize()).thenReturn(controlOfferingMemory); + + Pair pair = kubernetesClusterManager.calculateClusterCapacity(map, nodeCount, 1L); + Long expectedCpu = (workerOfferingCpus * workerCount) + (controlOfferingCpus * controlCount); + Long expectedMemory = (workerOfferingMemory * workerCount) + (controlOfferingMemory * controlCount); + Assert.assertEquals(expectedCpu, pair.first()); + Assert.assertEquals(expectedMemory, pair.second()); + } + + @Test + public void testIsAnyNodeOfferingEmptyNullMap() { + Assert.assertTrue(kubernetesClusterManager.isAnyNodeOfferingEmpty(null)); + } + + @Test + public void testIsAnyNodeOfferingEmptyNullValue() { + Map map = new HashMap<>(); + map.put(WORKER.name(), 1L); + map.put(CONTROL.name(), null); + map.put(ETCD.name(), 2L); + Assert.assertTrue(kubernetesClusterManager.isAnyNodeOfferingEmpty(map)); + } + + @Test + public void testIsAnyNodeOfferingEmpty() { + Map map = new HashMap<>(); + map.put(WORKER.name(), 1L); + map.put(CONTROL.name(), 2L); + Assert.assertFalse(kubernetesClusterManager.isAnyNodeOfferingEmpty(map)); + } + + @Test + public void testCreateNodeTypeToServiceOfferingMapNullMap() { + KubernetesClusterVO clusterVO = Mockito.mock(KubernetesClusterVO.class); + Mockito.when(clusterVO.getServiceOfferingId()).thenReturn(1L); + ServiceOfferingVO offering = Mockito.mock(ServiceOfferingVO.class); + Mockito.when(serviceOfferingDao.findById(1L)).thenReturn(offering); + Map mapping = kubernetesClusterManager.createNodeTypeToServiceOfferingMap(new HashMap<>(), null, clusterVO); + Assert.assertFalse(MapUtils.isEmpty(mapping)); + Assert.assertTrue(mapping.containsKey(DEFAULT.name())); + Assert.assertEquals(offering, mapping.get(DEFAULT.name())); + } + + @Test + public void testCreateNodeTypeToServiceOfferingMap() { + Map idsMap = new HashMap<>(); + long workerOfferingId = 1L; + long controlOfferingId = 2L; + idsMap.put(WORKER.name(), workerOfferingId); + idsMap.put(CONTROL.name(), controlOfferingId); + + ServiceOfferingVO workerOffering = Mockito.mock(ServiceOfferingVO.class); + Mockito.when(serviceOfferingDao.findById(workerOfferingId)).thenReturn(workerOffering); + ServiceOfferingVO controlOffering = Mockito.mock(ServiceOfferingVO.class); + Mockito.when(serviceOfferingDao.findById(controlOfferingId)).thenReturn(controlOffering); + + Map mapping = kubernetesClusterManager.createNodeTypeToServiceOfferingMap(idsMap, null, null); + Assert.assertEquals(2, mapping.size()); + Assert.assertTrue(mapping.containsKey(WORKER.name()) && mapping.containsKey(CONTROL.name())); + Assert.assertEquals(workerOffering, mapping.get(WORKER.name())); + Assert.assertEquals(controlOffering, mapping.get(CONTROL.name())); + } } diff --git a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorkerTest.java b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorkerTest.java new file mode 100644 index 000000000000..e6fb45fd7176 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorkerTest.java @@ -0,0 +1,130 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.kubernetes.cluster.actionworkers; + +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl; +import com.cloud.offering.ServiceOffering; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.utils.Pair; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.Map; + +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.DEFAULT; +import static com.cloud.kubernetes.cluster.KubernetesServiceHelper.KubernetesClusterNodeType.CONTROL; + +@RunWith(MockitoJUnitRunner.class) +public class KubernetesClusterScaleWorkerTest { + + @Mock + private KubernetesCluster kubernetesCluster; + @Mock + private KubernetesClusterManagerImpl clusterManager; + @Mock + private ServiceOfferingDao serviceOfferingDao; + + private KubernetesClusterScaleWorker worker; + + private static final Long defaultOfferingId = 1L; + + @Before + public void setUp() { + worker = new KubernetesClusterScaleWorker(kubernetesCluster, clusterManager); + worker.serviceOfferingDao = serviceOfferingDao; + } + + @Test + public void testIsServiceOfferingScalingNeededForNodeTypeAllNodesSameOffering() { + ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class); + Map map = Map.of(DEFAULT.name(), serviceOffering); + Mockito.when(serviceOfferingDao.findById(defaultOfferingId)).thenReturn(serviceOffering); + Assert.assertFalse(worker.isServiceOfferingScalingNeededForNodeType(DEFAULT, map, kubernetesCluster, defaultOfferingId)); + } + + @Test + public void testIsServiceOfferingScalingNeededForNodeTypeAllNodesDifferentOffering() { + ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class); + Mockito.when(serviceOffering.getId()).thenReturn(defaultOfferingId); + ServiceOfferingVO newOffering = Mockito.mock(ServiceOfferingVO.class); + Mockito.when(newOffering.getId()).thenReturn(4L); + Map map = Map.of(DEFAULT.name(), newOffering); + Mockito.when(serviceOfferingDao.findById(defaultOfferingId)).thenReturn(serviceOffering); + Assert.assertTrue(worker.isServiceOfferingScalingNeededForNodeType(DEFAULT, map, kubernetesCluster, defaultOfferingId)); + } + + @Test + public void testCalculateNewClusterCountAndCapacityAllNodesScaleSize() { + long controlNodes = 3L; + long etcdNodes = 2L; + Mockito.when(kubernetesCluster.getControlNodeCount()).thenReturn(controlNodes); + Mockito.when(kubernetesCluster.getEtcdNodeCount()).thenReturn(etcdNodes); + + ServiceOffering newOffering = Mockito.mock(ServiceOffering.class); + int newCores = 4; + int newMemory = 4096; + Mockito.when(newOffering.getCpu()).thenReturn(newCores); + Mockito.when(newOffering.getRamSize()).thenReturn(newMemory); + + long newWorkerSize = 4L; + Pair newClusterCapacity = worker.calculateNewClusterCountAndCapacity(newWorkerSize, DEFAULT, newOffering); + + long expectedCores = (newCores * newWorkerSize) + (newCores * controlNodes) + (newCores * etcdNodes); + long expectedMemory = (newMemory * newWorkerSize) + (newMemory * controlNodes) + (newMemory * etcdNodes); + Assert.assertEquals(expectedCores, newClusterCapacity.first().longValue()); + Assert.assertEquals(expectedMemory, newClusterCapacity.second().longValue()); + } + + @Test + public void testCalculateNewClusterCountAndCapacityNodeTypeScaleControlOffering() { + long controlNodes = 2L; + Mockito.when(kubernetesCluster.getControlNodeCount()).thenReturn(controlNodes); + + ServiceOfferingVO existingOffering = Mockito.mock(ServiceOfferingVO.class); + int existingCores = 2; + int existingMemory = 2048; + Mockito.when(existingOffering.getCpu()).thenReturn(existingCores); + Mockito.when(existingOffering.getRamSize()).thenReturn(existingMemory); + int remainingClusterCpu = 8; + int remainingClusterMemory = 12288; + Mockito.when(kubernetesCluster.getCores()).thenReturn(remainingClusterCpu + (controlNodes * existingCores)); + Mockito.when(kubernetesCluster.getMemory()).thenReturn(remainingClusterMemory + (controlNodes * existingMemory)); + + Mockito.when(kubernetesCluster.getControlServiceOfferingId()).thenReturn(1L); + Mockito.when(serviceOfferingDao.findById(1L)).thenReturn(existingOffering); + + ServiceOfferingVO newOffering = Mockito.mock(ServiceOfferingVO.class); + int newCores = 4; + int newMemory = 2048; + Mockito.when(newOffering.getCpu()).thenReturn(newCores); + Mockito.when(newOffering.getRamSize()).thenReturn(newMemory); + + Pair newClusterCapacity = worker.calculateNewClusterCountAndCapacity(null, CONTROL, newOffering); + + long expectedCores = remainingClusterCpu + (controlNodes * newCores); + long expectedMemory = remainingClusterMemory + (controlNodes * newMemory); + Assert.assertEquals(expectedCores, newClusterCapacity.first().longValue()); + Assert.assertEquals(expectedMemory, newClusterCapacity.second().longValue()); + } +} diff --git a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorkerTest.java b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorkerTest.java new file mode 100644 index 000000000000..ff8f875d06e1 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorkerTest.java @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.kubernetes.cluster.actionworkers; + +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl; +import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO; +import com.cloud.kubernetes.cluster.dao.KubernetesClusterVmMapDao; +import com.cloud.kubernetes.version.KubernetesSupportedVersion; +import com.cloud.uservm.UserVm; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +@RunWith(MockitoJUnitRunner.class) +public class KubernetesClusterUpgradeWorkerTest { + + @Mock + private KubernetesCluster kubernetesCluster; + @Mock + private KubernetesSupportedVersion kubernetesSupportedVersion; + @Mock + private KubernetesClusterManagerImpl clusterManager; + @Mock + private KubernetesClusterVmMapDao kubernetesClusterVmMapDao; + + private KubernetesClusterUpgradeWorker worker; + + @Before + public void setUp() { + String[] keys = {}; + worker = new KubernetesClusterUpgradeWorker(kubernetesCluster, kubernetesSupportedVersion, clusterManager, keys); + worker.kubernetesClusterVmMapDao = kubernetesClusterVmMapDao; + } + + @Test + public void testFilterOutManualUpgradeNodesFromClusterUpgrade() { + long controlNodeId = 1L; + long workerNode1Id = 2L; + long workerNode2Id = 3L; + UserVm controlNode = Mockito.mock(UserVm.class); + Mockito.when(controlNode.getId()).thenReturn(controlNodeId); + UserVm workerNode1 = Mockito.mock(UserVm.class); + Mockito.when(workerNode1.getId()).thenReturn(workerNode1Id); + UserVm workerNode2 = Mockito.mock(UserVm.class); + Mockito.when(workerNode2.getId()).thenReturn(workerNode2Id); + KubernetesClusterVmMapVO controlNodeMap = Mockito.mock(KubernetesClusterVmMapVO.class); + KubernetesClusterVmMapVO workerNode1Map = Mockito.mock(KubernetesClusterVmMapVO.class); + KubernetesClusterVmMapVO workerNode2Map = Mockito.mock(KubernetesClusterVmMapVO.class); + Mockito.when(workerNode2Map.isManualUpgrade()).thenReturn(true); + Mockito.when(kubernetesClusterVmMapDao.getClusterMapFromVmId(controlNodeId)).thenReturn(controlNodeMap); + Mockito.when(kubernetesClusterVmMapDao.getClusterMapFromVmId(workerNode1Id)).thenReturn(workerNode1Map); + Mockito.when(kubernetesClusterVmMapDao.getClusterMapFromVmId(workerNode2Id)).thenReturn(workerNode2Map); + worker.clusterVMs = Arrays.asList(controlNode, workerNode1, workerNode2); + worker.filterOutManualUpgradeNodesFromClusterUpgrade(); + Assert.assertEquals(2, worker.clusterVMs.size()); + List ids = worker.clusterVMs.stream().map(UserVm::getId).collect(Collectors.toList()); + Assert.assertTrue(ids.contains(controlNodeId) && ids.contains(workerNode1Id)); + Assert.assertFalse(ids.contains(workerNode2Id)); + } +} diff --git a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtilTest.java b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtilTest.java index 31363dbd1a1d..329f9b0e42ad 100644 --- a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtilTest.java +++ b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtilTest.java @@ -27,14 +27,14 @@ public class KubernetesClusterUtilTest { private void executeThrowAndTestVersionMatch() { Pair resultPair = null; - boolean result = KubernetesClusterUtil.clusterNodeVersionMatches(resultPair, "1.24.0"); - Assert.assertFalse(result); + Pair result = KubernetesClusterUtil.clusterNodeVersionMatches(resultPair, "1.24.0"); + Assert.assertFalse(result.first()); } private void executeAndTestVersionMatch(boolean status, String response, boolean expectedResult) { Pair resultPair = new Pair<>(status, response); - boolean result = KubernetesClusterUtil.clusterNodeVersionMatches(resultPair, "1.24.0"); - Assert.assertEquals(expectedResult, result); + Pair result = KubernetesClusterUtil.clusterNodeVersionMatches(resultPair, "1.24.0"); + Assert.assertEquals(expectedResult, result.first()); } @Test diff --git a/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxApiClient.java b/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxApiClient.java index 1ba1cc0fcc3b..b7631f781430 100644 --- a/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxApiClient.java +++ b/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxApiClient.java @@ -474,7 +474,6 @@ public void deleteSegment(long zoneId, long domainId, long accountId, Long vpcId } } - protected void removeSegment(String segmentName, long zoneId) { logger.debug(String.format("Removing the segment with ID %s", segmentName)); Segments segmentService = (Segments) nsxService.apply(Segments.class); diff --git a/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxElement.java b/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxElement.java index e1b37a8d6533..85d203f41257 100644 --- a/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxElement.java +++ b/plugins/network-elements/nsx/src/main/java/org/apache/cloudstack/service/NsxElement.java @@ -94,6 +94,7 @@ import com.cloud.utils.db.Transaction; import com.cloud.utils.db.TransactionCallback; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.net.NetUtils; import com.cloud.vm.NicProfile; import com.cloud.vm.ReservationContext; import com.cloud.vm.VMInstanceVO; @@ -121,6 +122,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; @@ -861,17 +863,17 @@ protected NsxNetworkRule.NsxRuleAction transformActionValue(NetworkACLItem.Actio * Replace 0.0.0.0/0 to ANY on each occurrence */ protected List transformCidrListValues(List sourceCidrList) { - List list = new ArrayList<>(); + Set set = new HashSet<>(); if (org.apache.commons.collections.CollectionUtils.isNotEmpty(sourceCidrList)) { for (String cidr : sourceCidrList) { - if (cidr.equals("0.0.0.0/0")) { - list.add("ANY"); + if (cidr.equals(NetUtils.ALL_IP4_CIDRS) || cidr.equals(NetUtils.ALL_IP6_CIDRS)) { + set.add("ANY"); } else { - list.add(cidr); + set.add(cidr); } } } - return list; + return set.stream().sorted().collect(Collectors.toList()); } @Override diff --git a/plugins/storage/sharedfs/storagevm/src/main/java/org/apache/cloudstack/storage/sharedfs/lifecycle/StorageVmSharedFSLifeCycle.java b/plugins/storage/sharedfs/storagevm/src/main/java/org/apache/cloudstack/storage/sharedfs/lifecycle/StorageVmSharedFSLifeCycle.java index 31159e7d3d95..a78164f603dc 100644 --- a/plugins/storage/sharedfs/storagevm/src/main/java/org/apache/cloudstack/storage/sharedfs/lifecycle/StorageVmSharedFSLifeCycle.java +++ b/plugins/storage/sharedfs/storagevm/src/main/java/org/apache/cloudstack/storage/sharedfs/lifecycle/StorageVmSharedFSLifeCycle.java @@ -199,7 +199,7 @@ private UserVm deploySharedFSVM(Long zoneId, Account owner, List networkId customParameterMap, null, null, null, null, true, UserVmManager.SHAREDFSVM, null); vmContext.setEventResourceId(vm.getId()); - userVmService.startVirtualMachine(vm); + userVmService.startVirtualMachine(vm, null); } catch (InsufficientCapacityException ex) { if (vm != null) { expungeVm(vm.getId()); @@ -243,7 +243,7 @@ public Pair deploySharedFS(SharedFS sharedFS, Long networkId, Long d @Override public void startSharedFS(SharedFS sharedFS) throws OperationTimedoutException, ResourceUnavailableException, InsufficientCapacityException { UserVmVO vm = userVmDao.findById(sharedFS.getVmId()); - userVmService.startVirtualMachine(vm); + userVmService.startVirtualMachine(vm, null); } @Override diff --git a/scripts/util/create-kubernetes-binaries-iso.sh b/scripts/util/create-kubernetes-binaries-iso.sh index d5fb014f220b..265c0ce5ce2d 100755 --- a/scripts/util/create-kubernetes-binaries-iso.sh +++ b/scripts/util/create-kubernetes-binaries-iso.sh @@ -19,8 +19,8 @@ set -e if [ $# -lt 6 ]; then - echo "Invalid input. Valid usage: ./create-kubernetes-binaries-iso.sh OUTPUT_PATH KUBERNETES_VERSION CNI_VERSION CRICTL_VERSION WEAVENET_NETWORK_YAML_CONFIG DASHBOARD_YAML_CONFIG BUILD_NAME" - echo "eg: ./create-kubernetes-binaries-iso.sh ./ 1.11.4 0.7.1 1.11.1 https://github.com/weaveworks/weave/releases/download/latest_release/weave-daemonset-k8s-1.11.yaml https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.0/src/deploy/recommended/kubernetes-dashboard.yaml setup-v1.11.4" + echo "Invalid input. Valid usage: ./create-kubernetes-binaries-iso.sh OUTPUT_PATH KUBERNETES_VERSION CNI_VERSION CRICTL_VERSION WEAVENET_NETWORK_YAML_CONFIG DASHBOARD_YAML_CONFIG ETCD_VER BUILD_NAME" + echo "eg: ./create-kubernetes-binaries-iso.sh ./ 1.11.4 0.7.1 1.11.1 https://github.com/weaveworks/weave/releases/download/latest_release/weave-daemonset-k8s-1.11.yaml https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.0/src/deploy/recommended/kubernetes-dashboard.yaml 3.5.1 setup-v1.11.4" exit 1 fi @@ -31,7 +31,7 @@ start_dir="$PWD" iso_dir="/tmp/iso" working_dir="${iso_dir}/" mkdir -p "${working_dir}" -build_name="${7}.iso" +build_name="${8}.iso" [ -z "${build_name}" ] && build_name="setup-${RELEASE}.iso" CNI_VERSION="v${3}" @@ -148,6 +148,12 @@ chmod ${kubeadm_file_permissions} "${working_dir}/k8s/kubeadm" echo "Updating imagePullPolicy to IfNotPresent in yaml files..." sed -i "s/imagePullPolicy:.*/imagePullPolicy: IfNotPresent/g" ${working_dir}/*.yaml +# Install etcd dependencies +etcd_dir="${working_dir}/etcd" +mkdir -p "${etcd_dir}" +ETCD_VER=v${7} +wget -q --show-progress "https://github.com/etcd-io/etcd/releases/download/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz" -O ${etcd_dir}/etcd-linux-amd64.tar.gz + mkisofs -o "${output_dir}/${build_name}" -J -R -l "${iso_dir}" rm -rf "${iso_dir}" diff --git a/server/src/main/java/com/cloud/api/ApiResponseHelper.java b/server/src/main/java/com/cloud/api/ApiResponseHelper.java index 810f0abd7e00..794f7aa96e52 100644 --- a/server/src/main/java/com/cloud/api/ApiResponseHelper.java +++ b/server/src/main/java/com/cloud/api/ApiResponseHelper.java @@ -5411,9 +5411,13 @@ public ASNumberResponse createASNumberResponse(ASNumber asn) { response.setZoneName(zone.getName()); response.setAsNumber(asn.getAsNumber()); ASNumberRangeVO range = asNumberRangeDao.findById(asn.getAsNumberRangeId()); - response.setAsNumberRangeId(range.getUuid()); - String rangeText = String.format("%s-%s", range.getStartASNumber(), range.getEndASNumber()); - response.setAsNumberRange(rangeText); + if (Objects.nonNull(range)) { + response.setAsNumberRangeId(range.getUuid()); + String rangeText = String.format("%s-%s", range.getStartASNumber(), range.getEndASNumber()); + response.setAsNumberRange(rangeText); + } else { + logger.info("is null for as number: "+ asn.getAsNumber()); + } response.setAllocated(asn.getAllocatedTime()); response.setAllocationState(asn.isAllocated() ? "Allocated" : "Free"); if (asn.getVpcId() != null) { diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index c60502fd5c86..f9f04dbf81c1 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -4543,12 +4543,13 @@ private Pair, Integer> searchForTemplatesInternal(ListTempl } } Boolean isVnf = cmd.getVnf(); + Boolean forCks = cmd.getForCks(); return searchForTemplatesInternal(id, cmd.getTemplateName(), cmd.getKeyword(), templateFilter, false, null, cmd.getPageSizeVal(), cmd.getStartIndex(), cmd.getZoneId(), cmd.getStoragePoolId(), cmd.getImageStoreId(), hypervisorType, showDomr, cmd.listInReadyState(), permittedAccounts, caller, listProjectResourcesCriteria, tags, showRemovedTmpl, cmd.getIds(), parentTemplateId, cmd.getShowUnique(), - templateType, isVnf, cmd.getArch()); + templateType, isVnf, cmd.getArch(), forCks); } private Pair, Integer> searchForTemplatesInternal(Long templateId, String name, String keyword, @@ -4557,7 +4558,7 @@ private Pair, Integer> searchForTemplatesInternal(Long temp boolean showDomr, boolean onlyReady, List permittedAccounts, Account caller, ListProjectResourcesCriteria listProjectResourcesCriteria, Map tags, boolean showRemovedTmpl, List ids, Long parentTemplateId, Boolean showUnique, String templateType, - Boolean isVnf, CPU.CPUArch arch) { + Boolean isVnf, CPU.CPUArch arch, Boolean forCks) { // check if zone is configured, if not, just return empty list List hypers = null; @@ -4743,7 +4744,7 @@ else if (!template.isPublicTemplate() && caller.getType() != Account.Type.ADMIN) applyPublicTemplateSharingRestrictions(sc, caller); return templateChecks(isIso, hypers, tags, name, keyword, hyperType, onlyReady, bootable, zoneId, showDomr, caller, - showRemovedTmpl, parentTemplateId, showUnique, templateType, isVnf, searchFilter, sc); + showRemovedTmpl, parentTemplateId, showUnique, templateType, isVnf, forCks, searchFilter, sc); } /** @@ -4797,7 +4798,7 @@ protected boolean checkIfDomainSharesTemplates(Long domainId) { private Pair, Integer> templateChecks(boolean isIso, List hypers, Map tags, String name, String keyword, HypervisorType hyperType, boolean onlyReady, Boolean bootable, Long zoneId, boolean showDomr, Account caller, - boolean showRemovedTmpl, Long parentTemplateId, Boolean showUnique, String templateType, Boolean isVnf, + boolean showRemovedTmpl, Long parentTemplateId, Boolean showUnique, String templateType, Boolean isVnf, Boolean forCks, Filter searchFilter, SearchCriteria sc) { if (!isIso) { // add hypervisor criteria for template case @@ -4895,6 +4896,10 @@ private Pair, Integer> templateChecks(boolean isIso, List, Integer> searchForIsosInternal(ListIsosCmd cm return searchForTemplatesInternal(cmd.getId(), cmd.getIsoName(), cmd.getKeyword(), isoFilter, true, cmd.isBootable(), cmd.getPageSizeVal(), cmd.getStartIndex(), cmd.getZoneId(), cmd.getStoragePoolId(), cmd.getImageStoreId(), hypervisorType, true, cmd.listInReadyState(), permittedAccounts, caller, listProjectResourcesCriteria, - tags, showRemovedISO, null, null, cmd.getShowUnique(), null, null, cmd.getArch()); + tags, showRemovedISO, null, null, cmd.getShowUnique(), null, null, cmd.getArch(), null); } @Override diff --git a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java index 0bdf5040c82f..a399d1014873 100644 --- a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java @@ -318,6 +318,7 @@ public TemplateResponse newTemplateResponse(EnumSet templateResponse.setDetails(details); setDeployAsIsDetails(template, templateResponse); + templateResponse.setForCks(template.isForCks()); } // update tag information diff --git a/server/src/main/java/com/cloud/api/query/vo/TemplateJoinVO.java b/server/src/main/java/com/cloud/api/query/vo/TemplateJoinVO.java index cd1496f65b1a..5e27f4b1415e 100644 --- a/server/src/main/java/com/cloud/api/query/vo/TemplateJoinVO.java +++ b/server/src/main/java/com/cloud/api/query/vo/TemplateJoinVO.java @@ -244,6 +244,9 @@ public class TemplateJoinVO extends BaseViewWithTagInformationVO implements Cont @Column(name = "deploy_as_is") private boolean deployAsIs; + @Column(name = "for_cks") + private boolean forCks; + @Column(name = "user_data_id") private Long userDataId; @@ -522,6 +525,10 @@ public boolean isDeployAsIs() { return deployAsIs; } + public boolean isForCks() { + return forCks; + } + public Object getParentTemplateId() { return parentTemplateId; } diff --git a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index d7e2160ef35b..9d63f4dbc3f1 100644 --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@ -316,6 +316,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements ConfigurationManager, ConfigurationService, Configurable { public static final String PERACCOUNT = "peraccount"; public static final String PERZONE = "perzone"; + public static final String CLUSTER_NODES_DEFAULT_START_SSH_PORT = "2222"; @Inject EntityManager _entityMgr; @@ -657,6 +658,17 @@ protected void validateIpAddressRelatedConfigValues(final String configName, fin } } + protected void validateConflictingConfigValue(final String configName, final String value) { + if (configName.equals("cloud.kubernetes.etcd.node.start.port")) { + if (value.equals(CLUSTER_NODES_DEFAULT_START_SSH_PORT)) { + String errorMessage = "This range is reserved for Kubernetes cluster nodes." + + "Please choose a value in a higher range would does not conflict with a kubernetes cluster deployed"; + logger.error(errorMessage); + throw new InvalidParameterValueException(errorMessage); + } + } + } + @Override public boolean start() { @@ -970,6 +982,9 @@ public Configuration updateConfiguration(final UpdateCfgCmd cmd) throws InvalidP category = config.getCategory(); } + validateIpAddressRelatedConfigValues(name, value); + validateConflictingConfigValue(name, value); + if (CATEGORY_SYSTEM.equals(category) && !_accountMgr.isRootAdmin(caller.getId())) { logger.warn("Only Root Admin is allowed to edit the configuration " + name); throw new CloudRuntimeException("Only Root Admin is allowed to edit this configuration."); diff --git a/server/src/main/java/com/cloud/network/NetworkServiceImpl.java b/server/src/main/java/com/cloud/network/NetworkServiceImpl.java index da1f0272297e..7401ff67b32a 100644 --- a/server/src/main/java/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkServiceImpl.java @@ -42,6 +42,7 @@ import com.cloud.bgp.BGPService; import com.cloud.dc.VlanDetailsVO; +import com.cloud.dc.dao.ASNumberDao; import com.cloud.dc.dao.VlanDetailsDao; import com.cloud.network.dao.NsxProviderDao; import com.cloud.network.dao.PublicIpQuarantineDao; @@ -421,6 +422,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C RoutedIpv4Manager routedIpv4Manager; @Inject private BGPService bgpService; + @Inject + private ASNumberDao asNumberDao; List internalLoadBalancerElementServices = new ArrayList<>(); Map internalLoadBalancerElementServiceMap = new HashMap<>(); @@ -6271,6 +6274,27 @@ public List getInternalLoadBalancerElements( return new ArrayList<>(this.internalLoadBalancerElementServiceMap.values()); } + @Override + public boolean handleCksIsoOnNetworkVirtualRouter(Long virtualRouterId, boolean mount) throws ResourceUnavailableException { + DomainRouterVO router = routerDao.findById(virtualRouterId); + if (router == null) { + String err = String.format("Cannot find VR with ID %s", virtualRouterId); + logger.error(err); + throw new CloudRuntimeException(err); + } + Commands commands = new Commands(Command.OnError.Stop); + commandSetupHelper.createHandleCksIsoCommand(router, mount, commands); + if (!networkHelper.sendCommandsToRouter(router, commands)) { + throw new CloudRuntimeException(String.format("Unable to send commands to virtual router: %s", router.getHostId())); + } + Answer answer = commands.getAnswer("handleCksIso"); + if (answer == null || !answer.getResult()) { + logger.error(String.format("Could not handle the CKS ISO properly: %s", answer.getDetails())); + return false; + } + return true; + } + /** * Retrieves the active quarantine for the given public IP address. It can find by the ID of the quarantine or the address of the public IP. * @throws CloudRuntimeException if it does not find an active quarantine for the given public IP. diff --git a/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java b/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java index 18ce55aa328b..5a53c1016cf6 100644 --- a/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java +++ b/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java @@ -28,6 +28,7 @@ import javax.inject.Inject; +import com.cloud.agent.api.HandleCksIsoCommand; import com.cloud.network.rules.PortForwardingRuleVO; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; @@ -1425,6 +1426,13 @@ public void setupUpdateNetworkCommands(final VirtualRouter router, final Set bgpPeers, final VirtualRouter router, final Commands cmds, final Network network) { List bgpPeerTOs = new ArrayList<>(); diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 76d2943e18c8..90a6985df8e2 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -519,9 +519,13 @@ import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; import org.apache.cloudstack.api.command.user.template.UpdateTemplateCmd; import org.apache.cloudstack.api.command.user.template.UpdateTemplatePermissionsCmd; +import org.apache.cloudstack.api.command.user.userdata.BaseRegisterUserDataCmd; +import org.apache.cloudstack.api.command.user.userdata.DeleteCniConfigurationCmd; import org.apache.cloudstack.api.command.user.userdata.DeleteUserDataCmd; import org.apache.cloudstack.api.command.user.userdata.LinkUserDataToTemplateCmd; +import org.apache.cloudstack.api.command.user.userdata.ListCniConfigurationCmd; import org.apache.cloudstack.api.command.user.userdata.ListUserDataCmd; +import org.apache.cloudstack.api.command.user.userdata.RegisterCniConfigurationCmd; import org.apache.cloudstack.api.command.user.userdata.RegisterUserDataCmd; import org.apache.cloudstack.api.command.user.vm.AddIpToVmNicCmd; import org.apache.cloudstack.api.command.user.vm.AddNicToVMCmd; @@ -4030,6 +4034,9 @@ public List> getCommands() { cmdList.add(DeleteUserDataCmd.class); cmdList.add(ListUserDataCmd.class); cmdList.add(LinkUserDataToTemplateCmd.class); + cmdList.add(RegisterCniConfigurationCmd.class); + cmdList.add(ListCniConfigurationCmd.class); + cmdList.add(DeleteCniConfigurationCmd.class); //object store APIs cmdList.add(AddObjectStoragePoolCmd.class); @@ -4829,7 +4836,13 @@ public boolean deleteUserData(final DeleteUserDataCmd cmd) { } @Override - public Pair, Integer> listUserDatas(final ListUserDataCmd cmd) { + @ActionEvent(eventType = EventTypes.EVENT_DELETE_CNI_CONFIG, eventDescription = "CNI Configuration deletion") + public boolean deleteCniConfiguration(DeleteCniConfigurationCmd cmd) { + return deleteUserData(cmd); + } + + @Override + public Pair, Integer> listUserDatas(final ListUserDataCmd cmd, final boolean forCks) { final Long id = cmd.getId(); final String name = cmd.getName(); final String keyword = cmd.getKeyword(); @@ -4849,6 +4862,8 @@ public Pair, Integer> listUserDatas(final ListUserDataC sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); sb.and("name", sb.entity().getName(), SearchCriteria.Op.EQ); sb.and("keyword", sb.entity().getName(), SearchCriteria.Op.LIKE); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.EQ); + sb.and("forCks", sb.entity().isForCks(), SearchCriteria.Op.EQ); final SearchCriteria sc = sb.create(); _accountMgr.buildACLSearchCriteria(sc, domainId, isRecursive, permittedAccounts, listProjectResourcesCriteria); @@ -4864,24 +4879,41 @@ public Pair, Integer> listUserDatas(final ListUserDataC sc.setParameters("keyword", "%" + keyword + "%"); } + sc.setParameters("forCks", forCks); + final Pair, Integer> result = userDataDao.searchAndCount(sc, searchFilter); return new Pair<>(result.first(), result.second()); } + @Override + @ActionEvent(eventType = EventTypes.EVENT_REGISTER_CNI_CONFIG, eventDescription = "registering CNI configration", async = true) + public UserData registerCniConfigration(RegisterCniConfigurationCmd cmd) { + final Account owner = getOwner(cmd); + checkForUserDataByName(cmd, owner); + final String name = cmd.getName(); + + String userdata = cmd.getCniConfig(); + final String params = cmd.getParams(); + + userdata = userDataManager.validateUserData(userdata, cmd.getHttpMethod()); + + return createAndSaveUserData(name, userdata, params, owner, true); + } + @Override @ActionEvent(eventType = EventTypes.EVENT_REGISTER_USER_DATA, eventDescription = "registering userdata", async = true) public UserData registerUserData(final RegisterUserDataCmd cmd) { final Account owner = getOwner(cmd); checkForUserDataByName(cmd, owner); - checkForUserData(cmd, owner); - final String name = cmd.getName(); + String userdata = cmd.getUserData(); + checkForUserData(cmd, owner); final String params = cmd.getParams(); userdata = userDataManager.validateUserData(userdata, cmd.getHttpMethod()); - return createAndSaveUserData(name, userdata, params, owner); + return createAndSaveUserData(name, userdata, params, owner, false); } /** @@ -4901,7 +4933,7 @@ private void checkForUserData(final RegisterUserDataCmd cmd, final Account owner * @param owner * @throws InvalidParameterValueException */ - private void checkForUserDataByName(final RegisterUserDataCmd cmd, final Account owner) throws InvalidParameterValueException { + private void checkForUserDataByName(final BaseRegisterUserDataCmd cmd, final Account owner) throws InvalidParameterValueException { final UserDataVO userData = userDataDao.findByName(owner.getAccountId(), owner.getDomainId(), cmd.getName()); if (userData != null) { throw new InvalidParameterValueException(String.format("A userdata with name %s already exists for this account.", cmd.getName())); @@ -4970,7 +5002,7 @@ protected Account getOwner(final RegisterSSHKeyPairCmd cmd) { * @param cmd * @return Account */ - protected Account getOwner(final RegisterUserDataCmd cmd) { + protected Account getOwner(final BaseRegisterUserDataCmd cmd) { final Account caller = getCaller(); return _accountMgr.finalizeOwner(caller, cmd.getAccountName(), cmd.getDomainId(), cmd.getProjectId()); } @@ -4983,7 +5015,7 @@ protected Account getCaller() { return caller; } - private SSHKeyPair createAndSaveSSHKeyPair(final String name, final String fingerprint, final String publicKey, final String privateKey, final Account owner) { + private SSHKeyPair createAndSaveSSHKeyPair(final String name, final String fingerprint, final String publicKey, final String privateKey, final Account owner) { final SSHKeyPairVO newPair = new SSHKeyPairVO(); newPair.setAccountId(owner.getAccountId()); @@ -4998,7 +5030,7 @@ private SSHKeyPair createAndSaveSSHKeyPair(final String name, final String finge return newPair; } - private UserData createAndSaveUserData(final String name, final String userdata, final String params, final Account owner) { + private UserData createAndSaveUserData(final String name, final String userdata, final String params, final Account owner, final boolean isForCks) { final UserDataVO userDataVO = new UserDataVO(); userDataVO.setAccountId(owner.getAccountId()); @@ -5006,6 +5038,7 @@ private UserData createAndSaveUserData(final String name, final String userdata, userDataVO.setName(name); userDataVO.setUserData(userdata); userDataVO.setParams(params); + userDataVO.setForCks(isForCks); userDataDao.persist(userDataVO); diff --git a/server/src/main/java/com/cloud/storage/TemplateProfile.java b/server/src/main/java/com/cloud/storage/TemplateProfile.java index 49fc6836d73d..c2a3c5062214 100644 --- a/server/src/main/java/com/cloud/storage/TemplateProfile.java +++ b/server/src/main/java/com/cloud/storage/TemplateProfile.java @@ -55,6 +55,7 @@ public class TemplateProfile { TemplateType templateType; Boolean directDownload; Boolean deployAsIs; + Boolean forCks; Long size; public TemplateProfile(Long templateId, Long userId, String name, String displayText, CPU.CPUArch arch, Integer bits, Boolean passwordEnabled, Boolean requiresHvm, String url, @@ -342,6 +343,14 @@ public boolean isDeployAsIs() { return this.deployAsIs; } + public Boolean isForCks() { + return forCks; + } + + public void setForCks(Boolean forCks) { + this.forCks = forCks; + } + public CPU.CPUArch getArch() { return arch; } diff --git a/server/src/main/java/com/cloud/storage/upload/params/TemplateUploadParams.java b/server/src/main/java/com/cloud/storage/upload/params/TemplateUploadParams.java index d11edce14c2a..769aa3dc1f2a 100644 --- a/server/src/main/java/com/cloud/storage/upload/params/TemplateUploadParams.java +++ b/server/src/main/java/com/cloud/storage/upload/params/TemplateUploadParams.java @@ -30,10 +30,10 @@ public TemplateUploadParams(long userId, String name, String displayText, CPU.CP Long zoneId, Hypervisor.HypervisorType hypervisorType, String chksum, String templateTag, long templateOwnerId, Map details, Boolean sshkeyEnabled, - Boolean isDynamicallyScalable, Boolean isRoutingType, boolean deployAsIs) { + Boolean isDynamicallyScalable, Boolean isRoutingType, boolean deployAsIs, boolean forCks) { super(userId, name, displayText, arch, bits, passwordEnabled, requiresHVM, isPublic, featured, isExtractable, format, guestOSId, zoneId, hypervisorType, chksum, templateTag, templateOwnerId, details, - sshkeyEnabled, isDynamicallyScalable, isRoutingType, deployAsIs); + sshkeyEnabled, isDynamicallyScalable, isRoutingType, deployAsIs, forCks); setBootable(true); } } diff --git a/server/src/main/java/com/cloud/storage/upload/params/UploadParamsBase.java b/server/src/main/java/com/cloud/storage/upload/params/UploadParamsBase.java index 3bf3e77fe1d4..c3499d75c3bc 100644 --- a/server/src/main/java/com/cloud/storage/upload/params/UploadParamsBase.java +++ b/server/src/main/java/com/cloud/storage/upload/params/UploadParamsBase.java @@ -46,6 +46,7 @@ public abstract class UploadParamsBase implements UploadParams { private boolean isDynamicallyScalable; private boolean isRoutingType; private boolean deployAsIs; + private boolean forCks; private CPU.CPUArch arch; UploadParamsBase(long userId, String name, String displayText, CPU.CPUArch arch, @@ -55,7 +56,7 @@ public abstract class UploadParamsBase implements UploadParams { Long zoneId, Hypervisor.HypervisorType hypervisorType, String checksum, String templateTag, long templateOwnerId, Map details, boolean sshkeyEnabled, - boolean isDynamicallyScalable, boolean isRoutingType, boolean deployAsIs) { + boolean isDynamicallyScalable, boolean isRoutingType, boolean deployAsIs, boolean forCks) { this.userId = userId; this.name = name; this.displayText = displayText; @@ -232,6 +233,10 @@ void setBootable(boolean bootable) { this.bootable = bootable; } + void setForCks(boolean forCks) { + this.forCks = forCks; + } + void setBits(Integer bits) { this.bits = bits; } diff --git a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java index fbf70a8eaade..8f0d7cf88ed6 100644 --- a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java +++ b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java @@ -246,6 +246,7 @@ public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocatio Long templateSize = performDirectDownloadUrlValidation(cmd.getFormat(), hypervisor, url, cmd.getZoneIds(), followRedirects); profile.setSize(templateSize); + profile.setForCks(cmd.isForCks()); } profile.setUrl(url); // Check that the resource limit for secondary storage won't be exceeded diff --git a/server/src/main/java/com/cloud/template/TemplateAdapter.java b/server/src/main/java/com/cloud/template/TemplateAdapter.java index 27ff563655dd..32a8db515aad 100644 --- a/server/src/main/java/com/cloud/template/TemplateAdapter.java +++ b/server/src/main/java/com/cloud/template/TemplateAdapter.java @@ -79,6 +79,6 @@ TemplateProfile prepare(boolean isIso, Long userId, String name, String displayT TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, CPU.CPUArch arch, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneId, HypervisorType hypervisorType, String chksum, Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshKeyEnabled, String imageStoreUuid, Boolean isDynamicallyScalable, - TemplateType templateType, boolean directDownload, boolean deployAsIs) throws ResourceAllocationException; + TemplateType templateType, boolean directDownload, boolean deployAsIs, boolean forCks) throws ResourceAllocationException; } diff --git a/server/src/main/java/com/cloud/template/TemplateAdapterBase.java b/server/src/main/java/com/cloud/template/TemplateAdapterBase.java index b5be09376fc5..bf0f4f7d8b90 100644 --- a/server/src/main/java/com/cloud/template/TemplateAdapterBase.java +++ b/server/src/main/java/com/cloud/template/TemplateAdapterBase.java @@ -135,14 +135,14 @@ public TemplateProfile prepare(boolean isIso, Long userId, String name, String d Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneId, HypervisorType hypervisorType, String accountName, Long domainId, String chksum, Boolean bootable, Map details, boolean directDownload, boolean deployAsIs) throws ResourceAllocationException { return prepare(isIso, userId, name, displayText, arch, bits, passwordEnabled, requiresHVM, url, isPublic, featured, isExtractable, format, guestOSId, zoneId, - hypervisorType, chksum, bootable, null, null, details, false, null, false, TemplateType.USER, directDownload, deployAsIs); + hypervisorType, chksum, bootable, null, null, details, false, null, false, TemplateType.USER, directDownload, deployAsIs, false); } @Override public TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, CPU.CPUArch arch, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneIdList, HypervisorType hypervisorType, String chksum, Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshkeyEnabled, String imageStoreUuid, Boolean isDynamicallyScalable, - TemplateType templateType, boolean directDownload, boolean deployAsIs) throws ResourceAllocationException { + TemplateType templateType, boolean directDownload, boolean deployAsIs, boolean forCks) throws ResourceAllocationException { //Long accountId = null; // parameters verification @@ -263,9 +263,11 @@ public TemplateProfile prepare(boolean isIso, long userId, String name, String d Long id = _tmpltDao.getNextInSequence(Long.class, "id"); CallContext.current().setEventDetails("Id: " + id + " name: " + name); - return new TemplateProfile(id, userId, name, displayText, arch, bits, passwordEnabled, requiresHVM, url, isPublic, featured, isExtractable, imgfmt, guestOSId, zoneIdList, + TemplateProfile profile = new TemplateProfile(id, userId, name, displayText, arch, bits, passwordEnabled, requiresHVM, url, isPublic, featured, isExtractable, imgfmt, guestOSId, zoneIdList, hypervisorType, templateOwner.getAccountName(), templateOwner.getDomainId(), templateOwner.getAccountId(), chksum, bootable, templateTag, details, sshkeyEnabled, null, isDynamicallyScalable, templateType, directDownload, deployAsIs); + profile.setForCks(forCks); + return profile; } @@ -310,7 +312,7 @@ public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocatio return prepare(false, CallContext.current().getCallingUserId(), cmd.getTemplateName(), cmd.getDisplayText(), cmd.getArch(), cmd.getBits(), cmd.isPasswordEnabled(), cmd.getRequiresHvm(), cmd.getUrl(), cmd.isPublic(), cmd.isFeatured(), cmd.isExtractable(), cmd.getFormat(), cmd.getOsTypeId(), zoneId, hypervisorType, cmd.getChecksum(), true, cmd.getTemplateTag(), owner, details, cmd.isSshKeyEnabled(), null, cmd.isDynamicallyScalable(), templateType, - cmd.isDirectDownload(), cmd.isDeployAsIs()); + cmd.isDirectDownload(), cmd.isDeployAsIs(), cmd.isForCks()); } @@ -343,7 +345,7 @@ private TemplateProfile prepareUploadParamsInternal(UploadParams params) throws params.isExtractable(), params.getFormat(), params.getGuestOSId(), zoneList, params.getHypervisorType(), params.getChecksum(), params.isBootable(), params.getTemplateTag(), owner, params.getDetails(), params.isSshKeyEnabled(), params.getImageStoreUuid(), - params.isDynamicallyScalable(), params.isRoutingType() ? TemplateType.ROUTING : TemplateType.USER, params.isDirectDownload(), params.isDeployAsIs()); + params.isDynamicallyScalable(), params.isRoutingType() ? TemplateType.ROUTING : TemplateType.USER, params.isDirectDownload(), params.isDeployAsIs(), false); } private Long getDefaultDeployAsIsGuestOsId() { @@ -364,7 +366,7 @@ public TemplateProfile prepare(GetUploadParamsForTemplateCmd cmd) throws Resourc BooleanUtils.toBoolean(cmd.isFeatured()), BooleanUtils.toBoolean(cmd.isExtractable()), cmd.getFormat(), osTypeId, cmd.getZoneId(), HypervisorType.getType(cmd.getHypervisor()), cmd.getChecksum(), cmd.getTemplateTag(), cmd.getEntityOwnerId(), cmd.getDetails(), BooleanUtils.toBoolean(cmd.isSshKeyEnabled()), - BooleanUtils.toBoolean(cmd.isDynamicallyScalable()), BooleanUtils.toBoolean(cmd.isRoutingType()), cmd.isDeployAsIs()); + BooleanUtils.toBoolean(cmd.isDynamicallyScalable()), BooleanUtils.toBoolean(cmd.isRoutingType()), cmd.isDeployAsIs(), cmd.isForCks()); return prepareUploadParamsInternal(params); } @@ -395,7 +397,7 @@ public TemplateProfile prepare(RegisterIsoCmd cmd) throws ResourceAllocationExce return prepare(true, CallContext.current().getCallingUserId(), cmd.getIsoName(), cmd.getDisplayText(), cmd.getArch(), 64, cmd.isPasswordEnabled(), true, cmd.getUrl(), cmd.isPublic(), cmd.isFeatured(), cmd.isExtractable(), ImageFormat.ISO.toString(), cmd.getOsTypeId(), zoneList, HypervisorType.None, cmd.getChecksum(), cmd.isBootable(), null, - owner, null, false, cmd.getImageStoreUuid(), cmd.isDynamicallyScalable(), TemplateType.USER, cmd.isDirectDownload(), false); + owner, null, false, cmd.getImageStoreUuid(), cmd.isDynamicallyScalable(), TemplateType.USER, cmd.isDirectDownload(), false, false); } protected VMTemplateVO persistTemplate(TemplateProfile profile, VirtualMachineTemplate.State initialState) { @@ -406,6 +408,7 @@ protected VMTemplateVO persistTemplate(TemplateProfile profile, VirtualMachineTe profile.getDisplayText(), profile.isPasswordEnabled(), profile.getGuestOsId(), profile.isBootable(), profile.getHypervisorType(), profile.getTemplateTag(), profile.getDetails(), profile.isSshKeyEnabled(), profile.IsDynamicallyScalable(), profile.isDirectDownload(), profile.isDeployAsIs(), profile.getArch()); template.setState(initialState); + template.setForCks(profile.isForCks()); if (profile.isDirectDownload()) { template.setSize(profile.getSize()); diff --git a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java index 6073b4f0bb7e..d2adb4ba9b8c 100755 --- a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java @@ -35,6 +35,7 @@ import javax.naming.ConfigurationException; import com.cloud.cpu.CPU; +import com.cloud.vm.VirtualMachine; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseCmd; @@ -1139,35 +1140,33 @@ public boolean templateIsDeleteable(long templateId) { @Override @ActionEvent(eventType = EventTypes.EVENT_ISO_DETACH, eventDescription = "detaching ISO", async = true) - public boolean detachIso(long vmId, boolean forced) { + public boolean detachIso(long vmId, Long isoParamId, Boolean... extraParams) { Account caller = CallContext.current().getCallingAccount(); Long userId = CallContext.current().getCallingUserId(); - // Verify input parameters - UserVmVO vmInstanceCheck = _userVmDao.findById(vmId); - if (vmInstanceCheck == null) { - throw new InvalidParameterValueException("Unable to find a virtual machine with id " + vmId); - } + boolean forced = extraParams != null && extraParams.length > 0 ? extraParams[0] : false; + boolean isVirtualRouter = extraParams != null && extraParams.length > 1 ? extraParams[1] : false; - UserVm userVM = _userVmDao.findById(vmId); - if (userVM == null) { + // Verify input parameters + VirtualMachine virtualMachine = !isVirtualRouter ? _userVmDao.findById(vmId) : _vmInstanceDao.findById(vmId); + if (virtualMachine == null || (isVirtualRouter && virtualMachine.getType() != VirtualMachine.Type.DomainRouter)) { throw new InvalidParameterValueException("Please specify a valid VM."); } - _accountMgr.checkAccess(caller, null, true, userVM); + _accountMgr.checkAccess(caller, null, true, virtualMachine); - Long isoId = userVM.getIsoId(); + Long isoId = !isVirtualRouter ? ((UserVm) virtualMachine).getIsoId() : isoParamId; if (isoId == null) { throw new InvalidParameterValueException("The specified VM has no ISO attached to it."); } - CallContext.current().setEventDetails("Vm Id: " + userVM.getUuid() + " ISO Id: " + isoId); + CallContext.current().setEventDetails("Vm Id: " + virtualMachine.getUuid() + " ISO Id: " + isoId); - State vmState = userVM.getState(); + State vmState = virtualMachine.getState(); if (vmState != State.Running && vmState != State.Stopped) { throw new InvalidParameterValueException("Please specify a VM that is either Stopped or Running."); } - boolean result = attachISOToVM(vmId, userId, isoId, false, forced); // attach=false + boolean result = attachISOToVM(vmId, userId, isoId, false, forced, isVirtualRouter); // attach=false // => detach if (result) { return result; @@ -1178,16 +1177,28 @@ public boolean detachIso(long vmId, boolean forced) { @Override @ActionEvent(eventType = EventTypes.EVENT_ISO_ATTACH, eventDescription = "attaching ISO", async = true) - public boolean attachIso(long isoId, long vmId, boolean forced) { + public boolean attachIso(long isoId, long vmId, Boolean... extraParams) { Account caller = CallContext.current().getCallingAccount(); Long userId = CallContext.current().getCallingUserId(); + boolean forced = extraParams != null && extraParams.length > 0 ? extraParams[0] : false; + boolean isVirtualRouter = extraParams != null && extraParams.length > 1 ? extraParams[1] : false; + // Verify input parameters - UserVmVO vm = _userVmDao.findById(vmId); + VirtualMachine vm = _userVmDao.findById(vmId); if (vm == null) { - throw new InvalidParameterValueException("Unable to find a virtual machine with id " + vmId); + if (isVirtualRouter) { + vm = _vmInstanceDao.findById(vmId); + if (vm == null) { + throw new InvalidParameterValueException("Unable to find a virtual machine with id " + vmId); + } else if (vm.getType() != VirtualMachine.Type.DomainRouter) { + throw new InvalidParameterValueException("Unable to find a virtual router with id " + vmId); + } + } else { + throw new InvalidParameterValueException("Unable to find a virtual machine with id " + vmId); + } } - if (UserVmManager.SHAREDFSVM.equals(vm.getUserVmType())) { + if (vm instanceof UserVm && UserVmManager.SHAREDFSVM.equals(((UserVm) vm).getUserVmType())) { throw new InvalidParameterValueException("Operation not supported on Shared FileSystem Instance"); } @@ -1224,7 +1235,7 @@ public boolean attachIso(long isoId, long vmId, boolean forced) { if (VMWARE_TOOLS_ISO.equals(iso.getUniqueName()) && vm.getHypervisorType() != Hypervisor.HypervisorType.VMware) { throw new InvalidParameterValueException("Cannot attach VMware tools drivers to incompatible hypervisor " + vm.getHypervisorType()); } - boolean result = attachISOToVM(vmId, userId, isoId, true, forced); + boolean result = attachISOToVM(vmId, userId, isoId, true, forced, isVirtualRouter); if (result) { return result; } else { @@ -1263,10 +1274,10 @@ public TemplateInfo prepareIso(long isoId, long dcId, Long hostId, Long poolId) } } - private boolean attachISOToVM(long vmId, long isoId, boolean attach, boolean forced) { - UserVmVO vm = _userVmDao.findById(vmId); + private boolean attachISOToVM(long vmId, long isoId, boolean attach, boolean forced, boolean isVirtualRouter) { + VirtualMachine vm = !isVirtualRouter ? _userVmDao.findById(vmId) : _vmInstanceDao.findById(vmId); - if (vm == null) { + if (vm == null || (isVirtualRouter && vm.getType() != VirtualMachine.Type.DomainRouter)) { return false; } else if (vm.getState() != State.Running) { return true; @@ -1305,16 +1316,16 @@ private boolean attachISOToVM(long vmId, long isoId, boolean attach, boolean for return (a != null && a.getResult()); } - private boolean attachISOToVM(long vmId, long userId, long isoId, boolean attach, boolean forced) { + private boolean attachISOToVM(long vmId, long userId, long isoId, boolean attach, boolean forced, boolean isVirtualRouter) { UserVmVO vm = _userVmDao.findById(vmId); VMTemplateVO iso = _tmpltDao.findById(isoId); - boolean success = attachISOToVM(vmId, isoId, attach, forced); - if (success && attach) { + boolean success = attachISOToVM(vmId, isoId, attach, forced, isVirtualRouter); + if (success && attach && !isVirtualRouter) { vm.setIsoId(iso.getId()); _userVmDao.update(vmId, vm); } - if (success && !attach) { + if (success && !attach && !isVirtualRouter) { vm.setIsoId(null); _userVmDao.update(vmId, vm); } @@ -2119,6 +2130,7 @@ private VMTemplateVO updateTemplateOrIso(BaseUpdateTemplateOrIsoCmd cmd) { Map details = cmd.getDetails(); Account account = CallContext.current().getCallingAccount(); boolean cleanupDetails = cmd.isCleanupDetails(); + Boolean forCks = cmd instanceof UpdateTemplateCmd ? ((UpdateTemplateCmd) cmd).getForCks() : null; CPU.CPUArch arch = cmd.getCPUArch(); // verify that template exists @@ -2168,6 +2180,7 @@ private VMTemplateVO updateTemplateOrIso(BaseUpdateTemplateOrIsoCmd cmd) { isRoutingTemplate == null && templateType == null && templateTag == null && + forCks == null && arch == null && (! cleanupDetails && details == null) //update details in every case except this one ); @@ -2272,6 +2285,9 @@ else if (details != null && !details.isEmpty()) { template.setDetails(details); _tmpltDao.saveDetails(template); } + if (forCks != null) { + template.setForCks(forCks); + } _tmpltDao.update(id, template); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index bdd2ce94f3e6..35bb34d06e26 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -55,6 +55,7 @@ import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.ParserConfigurationException; +import com.cloud.deploy.DeploymentPlan; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; @@ -3289,8 +3290,8 @@ public UserVm startVirtualMachine(StartVMCmd cmd) throws ExecutionException, Con @Override @ActionEvent(eventType = EventTypes.EVENT_VM_START, eventDescription = "starting Vm", async = true) - public void startVirtualMachine(UserVm vm) throws OperationTimedoutException, ResourceUnavailableException, InsufficientCapacityException { - _itMgr.advanceStart(vm.getUuid(), null, null); + public void startVirtualMachine(UserVm vm, DeploymentPlan plan) throws OperationTimedoutException, ResourceUnavailableException, InsufficientCapacityException { + _itMgr.advanceStart(vm.getUuid(), null, plan, null); } @Override diff --git a/server/src/test/java/com/cloud/server/ManagementServerImplTest.java b/server/src/test/java/com/cloud/server/ManagementServerImplTest.java index b26cd455cfbb..df94a7d2157e 100644 --- a/server/src/test/java/com/cloud/server/ManagementServerImplTest.java +++ b/server/src/test/java/com/cloud/server/ManagementServerImplTest.java @@ -479,7 +479,7 @@ public void testListUserDataById() { Pair, Integer> result = new Pair(userDataList, 1); when(_userDataDao.searchAndCount(nullable(SearchCriteria.class), nullable(Filter.class))).thenReturn(result); - Pair, Integer> userdataResultList = spy.listUserDatas(cmd); + Pair, Integer> userdataResultList = spy.listUserDatas(cmd, false); Assert.assertEquals(userdataResultList.first().get(0), userDataList.get(0)); } @@ -512,7 +512,7 @@ public void testListUserDataByName() { Pair, Integer> result = new Pair(userDataList, 1); when(_userDataDao.searchAndCount(nullable(SearchCriteria.class), nullable(Filter.class))).thenReturn(result); - Pair, Integer> userdataResultList = spy.listUserDatas(cmd); + Pair, Integer> userdataResultList = spy.listUserDatas(cmd, false); Assert.assertEquals(userdataResultList.first().get(0), userDataList.get(0)); } @@ -545,7 +545,7 @@ public void testListUserDataByKeyword() { Pair, Integer> result = new Pair(userDataList, 1); when(_userDataDao.searchAndCount(nullable(SearchCriteria.class), nullable(Filter.class))).thenReturn(result); - Pair, Integer> userdataResultList = spy.listUserDatas(cmd); + Pair, Integer> userdataResultList = spy.listUserDatas(cmd, false); Assert.assertEquals(userdataResultList.first().get(0), userDataList.get(0)); } diff --git a/server/src/test/java/com/cloud/vpc/MockNetworkManagerImpl.java b/server/src/test/java/com/cloud/vpc/MockNetworkManagerImpl.java index 7f4344f30e42..7533767c00bc 100644 --- a/server/src/test/java/com/cloud/vpc/MockNetworkManagerImpl.java +++ b/server/src/test/java/com/cloud/vpc/MockNetworkManagerImpl.java @@ -1122,6 +1122,11 @@ public List getInternalLoadBalancerElements( return null; } + @Override + public boolean handleCksIsoOnNetworkVirtualRouter(Long virtualRouterId, boolean mount) { + return false; + } + @Override public void expungeLbVmRefs(List vmIds, Long batchSize) { } diff --git a/systemvm/debian/opt/cloud/bin/cks_iso.sh b/systemvm/debian/opt/cloud/bin/cks_iso.sh new file mode 100644 index 000000000000..7cbcbc040d59 --- /dev/null +++ b/systemvm/debian/opt/cloud/bin/cks_iso.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +BASE_DIR=/var/www/html +CKS_ISO_DIR=$BASE_DIR/cks-iso +if [ "$1" == "true" ] +then + mkdir -p $CKS_ISO_DIR + echo "Options +Indexes" > $BASE_DIR/.htaccess + echo "Mounting CKS ISO into $CKS_ISO_DIR" + mount /dev/cdrom $CKS_ISO_DIR +else + echo "Unmounting CKS ISO from $CKS_ISO_DIR" + umount $CKS_ISO_DIR + echo "Options -Indexes" > $BASE_DIR/.htaccess + rm -rf $CKS_ISO_DIR +fi +echo "Restarting apache2 service" +service apache2 restart diff --git a/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py b/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py index 46259dcb356c..ce9493c5e695 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py @@ -110,7 +110,12 @@ def configure_server(self): if gn.get_dns() and device: sline = "dhcp-option=tag:interface-%s-%s,6" % (device, idx) dns_list = [x for x in gn.get_dns() if x] - if self.config.is_dhcp() and not self.config.use_extdns(): + if (self.config.is_vpc() or self.config.is_router()) and ('is_vr_guest_gateway' in gn.data and gn.data['is_vr_guest_gateway']): + if gateway in dns_list: + dns_list.remove(gateway) + if gn.data['router_guest_ip'] != ip: + dns_list.insert(0, ip) + elif self.config.is_dhcp() and not self.config.use_extdns(): guest_ip = self.config.address().get_guest_ip() if guest_ip and guest_ip in dns_list and ip not in dns_list: # Replace the default guest IP in VR with the ip in additional IP ranges, if shared network has multiple IP ranges. @@ -142,9 +147,9 @@ def configure_server(self): else: listen_address.append(ip) # Add localized "data-server" records in /etc/hosts for VPC routers - if self.config.is_vpc() or self.config.is_router(): + if (self.config.is_vpc() or self.config.is_router()) and ('is_vr_guest_gateway' not in gn.data or (not gn.data['is_vr_guest_gateway'])): self.add_host(gateway, "%s data-server" % CsHelper.get_hostname()) - elif self.config.is_dhcp(): + elif self.config.is_dhcp() or (self.config.is_vpc() or self.config.is_router() and gn.data['is_vr_guest_gateway']) : self.add_host(ip, "%s data-server" % CsHelper.get_hostname()) idx += 1 diff --git a/systemvm/debian/opt/cloud/bin/cs/CsGuestNetwork.py b/systemvm/debian/opt/cloud/bin/cs/CsGuestNetwork.py index 615c61d98e30..fe8737208c4b 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsGuestNetwork.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsGuestNetwork.py @@ -15,7 +15,6 @@ # specific language governing permissions and limitations # under the License. from merge import DataBag -from . import CsHelper class CsGuestNetwork: diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index 20f1cb3224ae..a77829bc255b 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -35,7 +35,9 @@ destroyVirtualMachine, deleteNetwork, addVirtualMachinesToKubernetesCluster, - removeVirtualMachinesFromKubernetesCluster) + removeVirtualMachinesFromKubernetesCluster, + addNodesToKubernetesCluster, + removeNodesFromKubernetesCluster) from marvin.cloudstackException import CloudstackAPIException from marvin.codes import PASS, FAILED from marvin.lib.base import (Template, @@ -49,28 +51,84 @@ VPC, NetworkACLList, NetworkACL, - VirtualMachine) + VirtualMachine, + PublicIPAddress, + FireWallRule, + NATRule) from marvin.lib.utils import (cleanup_resources, validateList, random_gen) from marvin.lib.common import (get_zone, get_domain, - get_template) + get_template, + get_test_template) from marvin.sshClient import SshClient from nose.plugins.attrib import attr from marvin.lib.decoratorGenerators import skipTestIf from kubernetes import client, config -import time, io, yaml +import time, io, yaml, random _multiprocess_shared_ = True k8s_cluster = None +k8s_cluster_node_offerings = None VPC_DATA = { "cidr": "10.1.0.0/22", "tier1_gateway": "10.1.1.1", "tier_netmask": "255.255.255.0" } +RAND_SUFFIX = random_gen() +NODES_TEMPLATE = { + "kvm": { + "name": "cks-u2204-kvm-" + RAND_SUFFIX, + "displaytext": "cks-u2204-kvm-" + RAND_SUFFIX, + "format": "qcow2", + "hypervisor": "kvm", + "ostype": "Ubuntu 22.04 LTS", + "url": "https://download.cloudstack.org/testing/custom_templates/ubuntu/22.04/cks-ubuntu-2204-kvm.qcow2.bz2", + "requireshvm": "True", + "ispublic": "True", + "isextractable": "True", + "forcks": "True" + }, + "xenserver": { + "name": "cks-u2204-hyperv-" + RAND_SUFFIX, + "displaytext": "cks-u2204-hyperv-" + RAND_SUFFIX, + "format": "vhd", + "hypervisor": "xenserver", + "ostype": "Ubuntu 22.04 LTS", + "url": "https://download.cloudstack.org/testing/custom_templates/ubuntu/22.04/cks-ubuntu-2204-hyperv.vhd.zip", + "requireshvm": "True", + "ispublic": "True", + "isextractable": "True", + "forcks": "True" + }, + "hyperv": { + "name": "cks-u2204-hyperv-" + RAND_SUFFIX, + "displaytext": "cks-u2204-hyperv-" + RAND_SUFFIX, + "format": "vhd", + "hypervisor": "hyperv", + "ostype": "Ubuntu 22.04 LTS", + "url": "https://download.cloudstack.org/testing/custom_templates/ubuntu/22.04/cks-ubuntu-2204-hyperv.vhd.zip", + "requireshvm": "True", + "ispublic": "True", + "isextractable": "True", + "forcks": "True" + }, + "vmware": { + "name": "cks-u2204-vmware-" + RAND_SUFFIX, + "displaytext": "cks-u2204-vmware-" + RAND_SUFFIX, + "format": "ova", + "hypervisor": "vmware", + "ostype": "Ubuntu 22.04 LTS", + "url": "https://download.cloudstack.org/testing/custom_templates/ubuntu/22.04/cks-ubuntu-2204-vmware.ova", + "requireshvm": "True", + "ispublic": "True", + "isextractable": "True", + "forcks": "True" + } +} class TestKubernetesCluster(cloudstackTestCase): @@ -84,6 +142,7 @@ def setUpClass(cls): cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__ cls.hypervisorNotSupported = False + cls.hypervisorIsNotVmware = cls.hypervisor.lower() != "vmware" if cls.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]: cls.hypervisorNotSupported = True cls.setup_failed = False @@ -129,13 +188,40 @@ def setUpClass(cls): (cls.services["cks_kubernetes_versions"][cls.k8s_version_to]["semanticversion"], cls.services["cks_kubernetes_versions"][cls.k8s_version_to]["url"], e)) if cls.setup_failed == False: + cls.nodes_template = None + cls.mgmtSshKey = None + if cls.hypervisor.lower() == "vmware": + cls.nodes_template = get_test_template(cls.apiclient, + cls.zone.id, + cls.hypervisor, + NODES_TEMPLATE) + cls.nodes_template.update(cls.apiclient, forcks=True) + cls.mgmtSshKey = cls.getMgmtSshKey() cks_offering_data = cls.services["cks_service_offering"] cks_offering_data["name"] = 'CKS-Instance-' + random_gen() cls.cks_service_offering = ServiceOffering.create( cls.apiclient, cks_offering_data ) + cks_offering_data["name"] = 'CKS-Worker-Offering-' + random_gen() + cls.cks_worker_nodes_offering = ServiceOffering.create( + cls.apiclient, + cks_offering_data + ) + cks_offering_data["name"] = 'CKS-Control-Offering-' + random_gen() + cls.cks_control_nodes_offering = ServiceOffering.create( + cls.apiclient, + cks_offering_data + ) + cks_offering_data["name"] = 'CKS-Etcd-Offering-' + random_gen() + cls.cks_etcd_nodes_offering = ServiceOffering.create( + cls.apiclient, + cks_offering_data + ) cls._cleanup.append(cls.cks_service_offering) + cls._cleanup.append(cls.cks_worker_nodes_offering) + cls._cleanup.append(cls.cks_control_nodes_offering) + cls._cleanup.append(cls.cks_etcd_nodes_offering) cls.domain = get_domain(cls.apiclient) cls.account = Account.create( cls.apiclient, @@ -204,6 +290,19 @@ def updateVmwareSettings(cls, tearDown): name="vmware.create.full.clone", value=value) + @classmethod + def getMgmtSshKey(cls): + """Get the management server SSH public key""" + sshClient = SshClient( + cls.mgtSvrDetails["mgtSvrIp"], + 22, + cls.mgtSvrDetails["user"], + cls.mgtSvrDetails["passwd"] + ) + command = "cat /var/cloudstack/management/.ssh/id_rsa.pub" + response = sshClient.execute(command) + return str(response[0]) + @classmethod def restartServer(cls): """Restart management server""" @@ -644,6 +743,151 @@ def test_11_test_unmanaged_cluster_lifecycle(self): self.deleteKubernetesClusterAndVerify(cluster.id) return + @attr(tags=["advanced", "smoke"], required_hardware="true") + def test_12_test_deploy_cluster_different_offerings_per_node_type(self): + """Test creating a CKS cluster with different offerings per node type + + # Validate the following on Kubernetes cluster creation: + # - Use a service offering for control nodes + # - Use a service offering for worker nodes + """ + if self.setup_failed == True: + self.fail("Setup incomplete") + cluster = self.getValidKubernetesCluster(worker_offering=self.cks_worker_nodes_offering, + control_offering=self.cks_control_nodes_offering) + self.assertEqual( + cluster.workerofferingid, + self.cks_worker_nodes_offering.id, + "Check Worker Nodes Offering {}, {}".format(cluster.workerofferingid, self.cks_worker_nodes_offering.id) + ) + self.assertEqual( + cluster.controlofferingid, + self.cks_control_nodes_offering.id, + "Check Control Nodes Offering {}, {}".format(cluster.workerofferingid, self.cks_worker_nodes_offering.id) + ) + self.assertEqual( + cluster.etcdnodes, + 0, + "No Etcd Nodes expected but got {}".format(cluster.etcdnodes) + ) + self.debug("Deleting Kubernetes cluster with ID: %s" % cluster.id) + self.deleteKubernetesClusterAndVerify(cluster.id) + return + + @attr(tags=["advanced", "smoke"], required_hardware="true") + @skipTestIf("hypervisorIsNotVmware") + def test_13_test_add_external_nodes_to_cluster(self): + """Test adding and removing external nodes to CKS clusters + + # Validate the following: + # - Deploy Kubernetes Cluster + # - Deploy VM on the same network as the Kubernetes cluster with the worker nodes offering and CKS ready template + # - Add external node to the Kubernetes Cluster + # - Remove external node from the Kubernetes Cluster + """ + if self.setup_failed == True: + self.fail("Setup incomplete") + cluster = self.getValidKubernetesCluster(worker_offering=self.cks_worker_nodes_offering, + control_offering=self.cks_control_nodes_offering) + self.assertEqual( + cluster.size, + 1, + "Expected 1 worker node but got {}".format(cluster.size) + ) + self.services["virtual_machine"]["template"] = self.nodes_template.id + external_node = VirtualMachine.create(self.apiclient, + self.services["virtual_machine"], + zoneid=self.zone.id, + accountid=self.account.name, + domainid=self.account.domainid, + rootdiskcontroller="osdefault", + rootdisksize=8, + serviceofferingid=self.cks_worker_nodes_offering.id, + networkids=cluster.networkid) + + # Acquire public IP and create Port Forwarding Rule and Firewall rule for SSH access + free_ip_addresses = PublicIPAddress.list( + self.apiclient, + domainid=self.account.domainid, + account=self.account.name, + forvirtualnetwork=True, + state='Free' + ) + random.shuffle(free_ip_addresses) + external_node_ip = free_ip_addresses[0] + external_node_ipaddress = PublicIPAddress.create( + self.apiclient, + zoneid=self.zone.id, + networkid=cluster.networkid, + ipaddress=external_node_ip.ipaddress + ) + self.debug("Creating Firewall rule for VM ID: %s" % external_node.id) + fw_rule = FireWallRule.create( + self.apiclient, + ipaddressid=external_node_ip.id, + protocol='TCP', + cidrlist=['0.0.0.0/0'], + startport=22, + endport=22 + ) + pf_rule = { + "privateport": 22, + "publicport": 22, + "protocol": "TCP" + } + nat_rule = NATRule.create( + self.apiclient, + external_node, + pf_rule, + ipaddressid=external_node_ip.id + ) + + # Add the management server SSH key to the authorized hosts on the external node + node_ssh_client = SshClient( + external_node_ip.ipaddress, + 22, + 'cloud', + 'cloud', + retries=30, + delay=10 + ) + node_ssh_client.execute("echo '" + self.mgmtSshKey + "' > ~/.ssh/authorized_keys") + # Remove acquired public IP address and rules + nat_rule.delete(self.apiclient) + fw_rule.delete(self.apiclient) + external_node_ipaddress.delete(self.apiclient) + + self.addExternalNodesToKubernetesCluster(cluster.id, [external_node.id]) + cluster = self.listKubernetesCluster(cluster.id) + self.assertEqual( + cluster.size, + 2, + "Expected 2 worker nodes but got {}".format(cluster.size) + ) + self.removeExternalNodesFromKubernetesCluster(cluster.id, [external_node.id]) + cluster = self.listKubernetesCluster(cluster.id) + self.assertEqual( + cluster.size, + 1, + "Expected 1 worker node but got {}".format(cluster.size) + ) + VirtualMachine.delete(external_node, self.apiclient, expunge=True) + self.debug("Deleting Kubernetes cluster with ID: %s" % cluster.id) + self.deleteKubernetesClusterAndVerify(cluster.id) + return + + def addExternalNodesToKubernetesCluster(self, cluster_id, vm_list): + cmd = addNodesToKubernetesCluster.addNodesToKubernetesClusterCmd() + cmd.id = cluster_id + cmd.nodeids = vm_list + return self.apiclient.addNodesToKubernetesCluster(cmd) + + def removeExternalNodesFromKubernetesCluster(self, cluster_id, vm_list): + cmd = removeNodesFromKubernetesCluster.removeNodesFromKubernetesClusterCmd() + cmd.id = cluster_id + cmd.nodeids = vm_list + return self.apiclient.removeNodesFromKubernetesCluster(cmd) + def addVirtualMachinesToKubernetesCluster(self, cluster_id, vm_list): cmd = addVirtualMachinesToKubernetesCluster.addVirtualMachinesToKubernetesClusterCmd() cmd.id = cluster_id @@ -658,8 +902,8 @@ def removeVirtualMachinesFromKubernetesCluster(self, cluster_id, vm_list): return self.apiclient.removeVirtualMachinesFromKubernetesCluster(cmd) - - def createKubernetesCluster(self, name, version_id, size=1, control_nodes=1, cluster_type='CloudManaged'): + def createKubernetesCluster(self, name, version_id, size=1, control_nodes=1, etcd_nodes=0, cluster_type='CloudManaged', + workers_offering=None, control_offering=None, etcd_offering=None): createKubernetesClusterCmd = createKubernetesCluster.createKubernetesClusterCmd() createKubernetesClusterCmd.name = name createKubernetesClusterCmd.description = name + "-description" @@ -672,6 +916,22 @@ def createKubernetesCluster(self, name, version_id, size=1, control_nodes=1, clu createKubernetesClusterCmd.account = self.account.name createKubernetesClusterCmd.domainid = self.domain.id createKubernetesClusterCmd.clustertype = cluster_type + if workers_offering: + createKubernetesClusterCmd.nodeofferings.append({ + "node": "WORKER", + "offering": workers_offering.id + }) + if control_offering: + createKubernetesClusterCmd.nodeofferings.append({ + "node": "CONTROL", + "offering": control_offering.id + }) + if etcd_nodes > 0 and etcd_offering: + createKubernetesClusterCmd.etcdnodes = etcd_nodes + createKubernetesClusterCmd.nodeofferings.append({ + "node": "ETCD", + "offering": etcd_offering.id + }) if self.default_network: createKubernetesClusterCmd.networkid = self.default_network.id clusterResponse = self.apiclient.createKubernetesCluster(createKubernetesClusterCmd) @@ -735,7 +995,8 @@ def waitForAutoscalerPodInRunningState(self, cluster_id, retries=5, interval=60) retries = retries - 1 return False - def getValidKubernetesCluster(self, size=1, control_nodes=1, version={}): + def getValidKubernetesCluster(self, size=1, control_nodes=1, version={}, etcd_nodes=0, + worker_offering=None, control_offering=None, etcd_offering=None): cluster = k8s_cluster # Does a cluster already exist ? @@ -743,7 +1004,9 @@ def getValidKubernetesCluster(self, size=1, control_nodes=1, version={}): if not version: version = self.kubernetes_version_v2 self.debug("No existing cluster available, k8s_cluster: %s" % cluster) - return self.createNewKubernetesCluster(version, size, control_nodes) + return self.createNewKubernetesCluster(version, size, control_nodes, etcd_nodes=etcd_nodes, + worker_offering=worker_offering, control_offering=control_offering, + etcd_offering=etcd_offering) # Is the existing cluster what is needed ? valid = cluster.size == size and cluster.controlnodes == control_nodes @@ -759,7 +1022,9 @@ def getValidKubernetesCluster(self, size=1, control_nodes=1, version={}): if cluster == None: # Looks like the cluster disappeared ! self.debug("Existing cluster, k8s_cluster ID: %s not returned by list API" % cluster_id) - return self.createNewKubernetesCluster(version, size, control_nodes) + return self.createNewKubernetesCluster(version, size, control_nodes, etcd_nodes=etcd_nodes, + worker_offering=worker_offering, control_offering=control_offering, + etcd_offering=etcd_offering) if valid: try: @@ -775,13 +1040,18 @@ def getValidKubernetesCluster(self, size=1, control_nodes=1, version={}): self.deleteKubernetesClusterAndVerify(cluster.id, False, True) self.debug("No valid cluster, need to deploy a new one") - return self.createNewKubernetesCluster(version, size, control_nodes) + return self.createNewKubernetesCluster(version, size, control_nodes, etcd_nodes=etcd_nodes, + worker_offering=worker_offering, control_offering=control_offering, + etcd_offering=etcd_offering) - def createNewKubernetesCluster(self, version, size, control_nodes) : + def createNewKubernetesCluster(self, version, size, control_nodes, etcd_nodes=0, + worker_offering=None, control_offering=None, etcd_offering=None): name = 'testcluster-' + random_gen() self.debug("Creating for Kubernetes cluster with name %s" % name) try: - cluster = self.createKubernetesCluster(name, version.id, size, control_nodes) + cluster = self.createKubernetesCluster(name, version.id, size, control_nodes, etcd_nodes=etcd_nodes, + workers_offering=worker_offering, control_offering=control_offering, + etcd_offering=etcd_offering) self.verifyKubernetesCluster(cluster, name, version.id, size, control_nodes) except Exception as ex: cluster = self.listKubernetesCluster(cluster_name = name) diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index 8d28749a637b..fc7cfb18164f 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -256,6 +256,8 @@ 'deleteASNRange': 'AS Number Range', 'listASNumbers': 'AS Number', 'releaseASNumber': 'AS Number', + 'addNodesToKubernetesCluster': 'Kubernetes Service', + 'removeNodesFromKubernetesCluster': 'Kubernetes Service' } diff --git a/tools/appliance/cks/ubuntu/22.04/cks-ubuntu-2204.json b/tools/appliance/cks/ubuntu/22.04/cks-ubuntu-2204.json new file mode 100644 index 000000000000..c7ee09f03547 --- /dev/null +++ b/tools/appliance/cks/ubuntu/22.04/cks-ubuntu-2204.json @@ -0,0 +1,54 @@ +{ + "_license": "Apache License 2.0", + "builders": [ + { + "accelerator": "kvm", + "boot_command": [ + "clinux /casper/vmlinuz --- autoinstall ds='nocloud-net;seedfrom=http://{{ .HTTPIP }}:{{ .HTTPPort }}/'", + "", + "initrd /casper/initrd", + "", + "boot", + "" + ], + "vm_name": "cks-ubuntu-2204", + "iso_checksum": "sha256:5e38b55d57d94ff029719342357325ed3bda38fa80054f9330dc789cd2d43931", + "iso_url": "https://old-releases.ubuntu.com/releases/jammy/ubuntu-22.04.2-live-server-amd64.iso", + "shutdown_command": "sudo shutdown -P now", + "net_device": "virtio-net", + "output_directory": "../dist", + "format": "qcow2", + "headless": true, + "http_directory": "http", + "ssh_password": "cloud", + "ssh_timeout": "30m", + "ssh_username": "cloud", + "type": "qemu", + "disk_interface": "virtio", + "disk_size": "5000M", + "qemuargs": [ + [ + "-m", + "2048M" + ], + [ + "-smp", + "1" + ] + ] + } + ], + "description": "CloudStack SystemVM template", + "provisioners": [ + { + "execute_command": "echo 'cloud' | sudo -u root -S bash {{.Path}}", + "scripts": [ + "scripts/apt_upgrade.sh", + "scripts/configure_networking.sh", + "scripts/configure-cloud-init.sh", + "scripts/cleanup.sh" + ], + "type": "shell" + } + ] +} diff --git a/tools/appliance/cks/ubuntu/22.04/http/meta-data b/tools/appliance/cks/ubuntu/22.04/http/meta-data new file mode 100644 index 000000000000..13a83393a912 --- /dev/null +++ b/tools/appliance/cks/ubuntu/22.04/http/meta-data @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/tools/appliance/cks/ubuntu/22.04/http/user-data b/tools/appliance/cks/ubuntu/22.04/http/user-data new file mode 100644 index 000000000000..15a7f8f32354 --- /dev/null +++ b/tools/appliance/cks/ubuntu/22.04/http/user-data @@ -0,0 +1,103 @@ +#cloud-config +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +autoinstall: + version: 1 + # Disable ssh server during installation, otherwise packer tries to connect and exceed max attempts + early-commands: + - systemctl stop ssh + # Configure the locale + locale: en_US + keyboard: + layout: us + refresh-installer: + update: yes + channel: stable + # Create a single-partition with no swap space. Kubernetes + # really dislikes the idea of anyone else managing memory. + # For more information on how partitioning is configured, + # please refer to https://curtin.readthedocs.io/en/latest/topics/storage.html. + storage: + swap: + size: 0 + grub: + replace_linux_default: false + config: + - type: disk + id: disk-0 + size: smallest + grub_device: true + preserve: false + ptable: msdos + wipe: superblock + - type: partition + id: partition-0 + device: disk-0 + size: -1 + number: 1 + preserve: false + flag: boot + - type: format + id: format-0 + volume: partition-0 + fstype: ext4 + preserve: false + - type: mount + id: mount-0 + device: format-0 + path: / + updates: 'all' + ssh: + install-server: true + allow-pw: true + # Customize the list of packages installed. + packages: + - open-vm-tools + - openssh-server + - cloud-init + - wget + - tasksel + # Create the default user. + # Ensures the "cloud" user doesn't require a password to use sudo. + user-data: + disable_root: false + timezone: UTC + users: + - name: cloud + # openssl passwd -6 -stdin <<< cloud + passwd: $6$pAFEBhaCDzN4ZmrO$kMmUuxhPMx447lJ8Mtas8n6uqkojh94nQ7I8poI6Kl4vRGeZKE57utub1cudS1fGyG8HUxK9YHIygd7vCpRFN0 + groups: [adm, cdrom, dip, plugdev, lxd, sudo] + lock-passwd: false + sudo: ALL=(ALL) NOPASSWD:ALL + shell: /bin/bash + + # This command runs after all other steps; it: + # 1. Disables swapfiles + # 2. Removes the existing swapfile + # 3. Removes the swapfile entry from /etc/fstab + # 4. Removes snapd, https://bugs.launchpad.net/subiquity/+bug/1946609 + # 5. Cleans up any packages that are no longer required + # 6. Removes the cached list of packages + late-commands: + - curtin in-target --target=/target -- swapoff -a + - curtin in-target --target=/target -- rm -f /swap.img + - curtin in-target --target=/target -- sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab + - chroot /target apt-get purge -y snapd + - curtin in-target --target=/target -- apt-get purge --auto-remove -y + - curtin in-target --target=/target -- apt-get clean + - curtin in-target --target=/target -- rm -rf /var/lib/apt/lists/* diff --git a/tools/appliance/cks/ubuntu/22.04/scripts/apt_upgrade.sh b/tools/appliance/cks/ubuntu/22.04/scripts/apt_upgrade.sh new file mode 100644 index 000000000000..22d25d628efa --- /dev/null +++ b/tools/appliance/cks/ubuntu/22.04/scripts/apt_upgrade.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +set -e +set -x + +function apt_upgrade() { + DEBIAN_FRONTEND=noninteractive + DEBIAN_PRIORITY=critical + + rm -fv /root/*.iso + apt-get -q -y update + + apt-get -q -y upgrade + apt-get -q -y dist-upgrade + + apt-get -y autoremove --purge + apt-get autoclean + apt-get clean +} + +return 2>/dev/null || apt_upgrade diff --git a/tools/appliance/cks/ubuntu/22.04/scripts/cleanup.sh b/tools/appliance/cks/ubuntu/22.04/scripts/cleanup.sh new file mode 100644 index 000000000000..ab0ceb628611 --- /dev/null +++ b/tools/appliance/cks/ubuntu/22.04/scripts/cleanup.sh @@ -0,0 +1,80 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +set -e + +function cleanup_apt() { + export DEBIAN_FRONTEND=noninteractive + apt-get -y remove --purge dictionaries-common busybox \ + task-english task-ssh-server tasksel tasksel-data laptop-detect wamerican sharutils \ + nano util-linux-locales krb5-locales + + apt-get -y autoremove --purge + apt-get autoclean + apt-get clean +} + +# Removing leftover leases and persistent rules +function cleanup_dhcp() { + rm -f /var/lib/dhcp/* +} + +# Make sure Udev doesn't block our network +function cleanup_dev() { + echo "cleaning up udev rules" + rm -f /etc/udev/rules.d/70-persistent-net.rules + rm -rf /dev/.udev/ + rm -f /lib/udev/rules.d/75-persistent-net-generator.rules +} + +function cleanup_misc() { + # Scripts + rm -fr /home/cloud/cloud_scripts* + rm -f /usr/share/cloud/cloud-scripts.tar + rm -f /root/.rnd + rm -f /var/www/html/index.html + # Logs + rm -f /var/log/*.log + rm -f /var/log/apache2/* + rm -f /var/log/messages + rm -f /var/log/syslog + rm -f /var/log/messages + rm -fr /var/log/apt + rm -fr /var/log/installer + # Docs and data files + rm -fr /var/lib/apt/* + rm -fr /var/cache/apt/* + rm -fr /var/cache/debconf/*old + rm -fr /usr/share/doc + rm -fr /usr/share/man + rm -fr /usr/share/info + rm -fr /usr/share/lintian + rm -fr /usr/share/apache2/icons + find /usr/share/locale -type f | grep -v en_US | xargs rm -fr + find /usr/share/zoneinfo -type f | grep -v UTC | xargs rm -fr + rm -fr /tmp/* +} + +function cleanup() { + cleanup_apt + cleanup_dhcp + cleanup_dev + cleanup_misc +} + +return 2>/dev/null || cleanup diff --git a/tools/appliance/cks/ubuntu/22.04/scripts/configure-cloud-init.sh b/tools/appliance/cks/ubuntu/22.04/scripts/configure-cloud-init.sh new file mode 100644 index 000000000000..80b661f30f55 --- /dev/null +++ b/tools/appliance/cks/ubuntu/22.04/scripts/configure-cloud-init.sh @@ -0,0 +1,49 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +function install_packages() { + apt-get install -y qemu-guest-agent rsyslog logrotate cron net-tools ifupdown cloud-guest-utils conntrack apt-transport-https ca-certificates curl \ + gnupg gnupg-agent software-properties-common gnupg lsb-release + apt-get install -y python3-json-pointer python3-jsonschema cloud-init resolvconf + + sudo mkdir -p /etc/apt/keyrings + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + apt update + apt install containerd.io + + systemctl start containerd + systemctl enable containerd +} + +function configure_services() { + install_packages + + systemctl daemon-reload +cat < /etc/cloud/cloud.cfg.d/cloudstack.cfg +datasource_list: ['CloudStack'] +datasource: + CloudStack: + max_wait: 120 + timeout: 50 +EOF +} + +configure_services diff --git a/tools/appliance/cks/ubuntu/22.04/scripts/configure_networking.sh b/tools/appliance/cks/ubuntu/22.04/scripts/configure_networking.sh new file mode 100644 index 000000000000..a5e4179a4416 --- /dev/null +++ b/tools/appliance/cks/ubuntu/22.04/scripts/configure_networking.sh @@ -0,0 +1,73 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +set -e +set -x + +HOSTNAME=cksnode + +function configure_resolv_conf() { + grep 8.8.8.8 /etc/resolv.conf && grep 8.8.4.4 /etc/resolv.conf && return + + cat > /etc/resolv.conf << EOF +nameserver 8.8.8.8 +nameserver 8.8.4.4 +EOF +} + +# Delete entry in /etc/hosts derived from dhcp +function delete_dhcp_ip() { + result=$(grep 127.0.1.1 /etc/hosts || true) + [ "${result}" == "" ] && return + + sed -i '/127.0.1.1/d' /etc/hosts +} + +function configure_hostname() { + sed -i "s/root@\(.*\)$/root@$HOSTNAME/g" /etc/ssh/ssh_host_*.pub + + echo "$HOSTNAME" > /etc/hostname + hostname $HOSTNAME +} + +function configure_interfaces() { + cat > /etc/network/interfaces << EOF +source /etc/network/interfaces.d/* + +# The loopback network interface +auto lo +iface lo inet loopback + +# The primary network interface +auto ens35 +iface ens35 inet dhcp + +EOF + +echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf +sysctl -p /etc/sysctl.conf +} + +function configure_networking() { + configure_interfaces + configure_resolv_conf + delete_dhcp_ip + configure_hostname +} + +return 2>/dev/null || configure_networking diff --git a/tools/appliance/cks/ubuntu/22.04/scripts/setup_template.sh b/tools/appliance/cks/ubuntu/22.04/scripts/setup_template.sh new file mode 100644 index 000000000000..be8577cf4e85 --- /dev/null +++ b/tools/appliance/cks/ubuntu/22.04/scripts/setup_template.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +function create_user() { + username=$1 + password=$2 + + # Create the user with the specified username + sudo useradd -m -s /bin/bash $username + + # Set the user's password + echo "$username:$password" | sudo chpasswd + + echo "User '$username' has been created with the password '$password'" +} + +sudo mkdir -p /opt/bin +create_user cloud password + +echo $SSHKEY +if [[ ! -z "$SSHKEY" ]]; then + mkdir -p /home/cloud/.ssh/ + mkdir .ssh + echo $SSHKEY > ~/.ssh/authorized_keys +else + echo "Please place Management server public key in the variables" + exit 1 +fi diff --git a/tools/appliance/cks/ubuntu/build.sh b/tools/appliance/cks/ubuntu/build.sh new file mode 100755 index 000000000000..0d9d8ea4e49b --- /dev/null +++ b/tools/appliance/cks/ubuntu/build.sh @@ -0,0 +1,346 @@ +#!/bin/bash -l +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# build script which wraps around packer and virtualbox to create the CKS template + +function usage() { + cat <&2 +} + +function error() { + log ERROR $@ + exit 1 +} + +# cleanup code support +declare -a on_exit_items + +function on_exit() { + for (( i=${#on_exit_items[@]}-1 ; i>=0 ; i-- )) ; do + sleep 2 + log DEBUG "on_exit: ${on_exit_items[i]}" + eval ${on_exit_items[i]} + done +} + +function add_on_exit() { + local n=${#on_exit_items[*]} + on_exit_items[${n}]="$*" + if [ ${n} -eq 0 ]; then + log DEBUG "Setting trap" + trap on_exit EXIT + fi +} + +# retry code support +function retry() { + local times=$1 + shift + local count=0 + while [ ${count} -lt ${times} ]; do + "$@" && break + count=$(( $count + 1 )) + sleep ${count} + done + + if [ ${count} -eq ${times} ]; then + error "Failed ${times} times: $@" + fi +} + +### +### Script logic +### + +function prepare() { + log INFO "preparing for build" + rm -rf dist *.ova *.vhd *.vdi *.qcow* *.bz2 *.vmdk *.ovf +} + +function packer_build() { + log INFO "building new image with packer" + #cd ${appliance_build_name} && packer build template.json && cd .. + cd 22.04 && packer build ${appliance_build_name}.json && cd .. +} + +function stage_vmx() { + cat << VMXFILE > "${1}.vmx" +.encoding = "UTF-8" +displayname = "${1}" +annotation = "${1}" +guestos = "otherlinux-64" +virtualHW.version = "11" +config.version = "8" +numvcpus = "1" +cpuid.coresPerSocket = "1" +memsize = "256" +pciBridge0.present = "TRUE" +pciBridge4.present = "TRUE" +pciBridge4.virtualDev = "pcieRootPort" +pciBridge4.functions = "8" +pciBridge5.present = "TRUE" +pciBridge5.virtualDev = "pcieRootPort" +pciBridge5.functions = "8" +pciBridge6.present = "TRUE" +pciBridge6.virtualDev = "pcieRootPort" +pciBridge6.functions = "8" +pciBridge7.present = "TRUE" +pciBridge7.virtualDev = "pcieRootPort" +pciBridge7.functions = "8" +vmci0.present = "TRUE" +floppy0.present = "FALSE" +ide0:0.clientDevice = "FALSE" +ide0:0.present = "TRUE" +ide0:0.deviceType = "atapi-cdrom" +ide0:0.autodetect = "TRUE" +ide0:0.startConnected = "FALSE" +mks.enable3d = "false" +svga.autodetect = "false" +svga.vramSize = "4194304" +scsi0:0.present = "TRUE" +scsi0:0.deviceType = "disk" +scsi0:0.fileName = "$2" +scsi0:0.mode = "persistent" +scsi0:0.writeThrough = "false" +scsi0.virtualDev = "lsilogic" +scsi0.present = "TRUE" +vmci0.unrestricted = "false" +vcpu.hotadd = "false" +vcpu.hotremove = "false" +firmware = "bios" +mem.hotadd = "false" +VMXFILE +} + +function xen_server_export() { + log INFO "creating xen server export" + set +e + which faketime >/dev/null 2>&1 && which vhd-util >/dev/null 2>&1 + local result=$? + set -e + if [ ${result} == 0 ]; then + qemu-img convert -f qcow2 -O raw "dist/${appliance}" img.raw + vhd-util convert -s 0 -t 1 -i img.raw -o stagefixed.vhd + faketime '2010-01-01' vhd-util convert -s 1 -t 2 -i stagefixed.vhd -o "${appliance_build_name}-xen.vhd" + rm -f *.bak + bzip2 "${appliance_build_name}-xen.vhd" + mv "${appliance_build_name}-xen.vhd.bz2" dist/ + log INFO "${appliance} exported for XenServer: dist/${appliance_build_name}-xen.vhd.bz2" + else + log WARN "** Skipping ${appliance_build_name} export for XenServer: faketime or vhd-util command is missing. **" + log WARN "** faketime source code is available from https://github.com/wolfcw/libfaketime **" + fi +} + +function ovm_export() { + log INFO "creating OVM export" + qemu-img convert -f qcow2 -O raw "dist/${appliance}" "dist/${appliance_build_name}-ovm.raw" + cd dist && bzip2 "${appliance_build_name}-ovm.raw" && cd .. + log INFO "${appliance} exported for OracleVM: dist/${appliance_build_name}-ovm.raw.bz2" +} + +function kvm_export() { + log INFO "creating kvm export" + set +e + qemu-img convert -o compat=0.10 -f qcow2 -c -O qcow2 "dist/${appliance}" "dist/${appliance_build_name}-kvm.qcow2" + local qemuresult=$? + cd dist && bzip2 "${appliance_build_name}-kvm.qcow2" && cd .. + log INFO "${appliance} exported for KVM: dist/${appliance_build_name}-kvm.qcow2.bz2" +} + +function vmware_export() { + log INFO "creating vmware export" + qemu-img convert -f qcow2 -O vmdk "dist/${appliance}" "dist/${appliance_build_name}-vmware.vmdk" + + if ! ovftool_loc="$(type -p "ovftool")" || [ -z "$ovftool_loc" ]; then + log INFO "ovftool not found, skipping ova generation for VMware" + return + fi + + log INFO "ovftool found, using it to export ova file" + CDIR=$PWD + cd dist + chmod 666 ${appliance_build_name}-vmware.vmdk + stage_vmx ${appliance_build_name}-vmware ${appliance_build_name}-vmware.vmdk + ovftool ${appliance_build_name}-vmware.vmx ${appliance_build_name}-vmware.ova + rm -f *vmx *vmdk + cd $CDIR + log INFO "${appliance} exported for VMWare: dist/${appliance_build_name}-vmware.ova" +} + +function hyperv_export() { + log INFO "creating hyperv export" + qemu-img convert -f qcow2 -O vpc "dist/${appliance}" "dist/${appliance_build_name}-hyperv.vhd" + CDIR=$PWD + cd dist + zip "${appliance_build_name}-hyperv.vhd.zip" "${appliance_build_name}-hyperv.vhd" + rm -f *vhd + cd $CDIR + log INFO "${appliance} exported for HyperV: dist/${appliance_build_name}-hyperv.vhd.zip" +} + +### +### Main invocation +### + +function main() { + prepare + + packer_build + + # process the disk at dist + kvm_export + ovm_export + xen_server_export + vmware_export + hyperv_export + rm -f "dist/${appliance}" + cd dist && chmod +r * && cd .. + cd dist && md5sum * > md5sum.txt && cd .. + cd dist && sha512sum * > sha512sum.txt && cd .. + add_on_exit log INFO "BUILD SUCCESSFUL" +} + +# we only run main() if not source-d +return 2>/dev/null || main diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index 428d0eb90f18..3af09a1caf71 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -52,6 +52,8 @@ "label.acquiring.ip": "Acquiring IP", "label.associated.resource": "Associated resource", "label.action": "Action", +"label.action.add.nodes.to.kubernetes.cluster": "Add nodes to Kubernetes cluster", +"label.action.remove.nodes.from.kubernetes.cluster": "Remove nodes from Kubernetes cluster", "label.action.attach.disk": "Attach disk", "label.action.attach.iso": "Attach ISO", "label.action.bulk.delete.egress.firewall.rules": "Bulk delete egress firewall rules", @@ -271,6 +273,7 @@ "label.add.list.name": "ACL List name", "label.add.logical.router": "Add Logical Router to this Network", "label.add.more": "Add more", +"label.add.nodes": "Add Nodes to Kubernetes Cluster", "label.add.netscaler.device": "Add Netscaler device", "label.add.network": "Add Network", "label.add.network.acl": "Add Network ACL", @@ -502,9 +505,17 @@ "label.cisco.nexus1000v.password": "Nexus 1000v password", "label.cisco.nexus1000v.username": "Nexus 1000v username", "label.cks.cluster.autoscalingenabled": "Enable auto scaling on this cluster", +"label.cks.cluster.control.nodes.offeringid": "Service Offering for Control Nodes", +"label.cks.cluster.control.nodes.templateid": "Template for Control Nodes", +"label.cks.cluster.etcd.nodes": "Etcd Nodes", +"label.cks.cluster.etcd.nodes.offeringid": "Service Offering for etcd Nodes", +"label.cks.cluster.etcd.nodes.templateid": "Template for etcd Nodes", "label.cks.cluster.maxsize": "Maximum cluster size (Worker nodes)", "label.cks.cluster.minsize": "Minimum cluster size (Worker nodes)", +"label.cks.cluster.node.manual.upgrade": "Mark nodes for manual upgrade", "label.cks.cluster.size": "Cluster size (Worker nodes)", +"label.cks.cluster.worker.nodes.offeringid": "Service Offering for Worker Nodes", +"label.cks.cluster.worker.nodes.templateid": "Template for Worker Nodes", "label.cleanup": "Clean up", "label.clear": "Clear", "label.clear.list": "Clear list", @@ -998,6 +1009,7 @@ "label.fix.errors": "Fix errors", "label.fixed": "Fixed offering", "label.for": "for", +"label.forcks": "For CKS", "label.forbidden": "Forbidden", "label.forced": "Force", "label.force.ms.to.import.vm.files": "Force MS to export OVF from VMware to temporary storage", @@ -1270,11 +1282,13 @@ "label.kubernetes": "Kubernetes", "label.kubernetes.access.details": "The kubernetes nodes can be accessed via ssh using:
ssh -i [ssh_key] -p [port_number] cloud@[public_ip_address]

where,
ssh_key: points to the ssh private key file corresponding to the key that was associated while creating the Kubernetes cluster. If no ssh key was provided during Kubernetes cluster creation, use the ssh private key of the management server.
port_number: can be obtained from the Port Forwarding Tab (Public Port column)", "label.kubernetes.cluster": "Kubernetes cluster", +"label.kubernetes.cluster.add.nodes.to.cluster": "Add nodes to Kubernetes cluster", "label.kubernetes.cluster.create": "Create Kubernetes cluster", "label.kubernetes.cluster.delete": "Delete Kubernetes cluster", "label.kubernetes.cluster.scale": "Scale Kubernetes cluster", "label.kubernetes.cluster.start": "Start Kubernetes cluster", "label.kubernetes.cluster.stop": "Stop Kubernetes cluster", +"label.kubernetes.cluster.remove.nodes.from.cluster": "Remove nodes from Kubernetes cluster", "label.kubernetes.cluster.upgrade": "Upgrade Kubernetes cluster", "label.kubernetes.dashboard": "Kubernetes dashboard UI", "label.kubernetes.dashboard.create.token": "Create token for Kubernetes dashboard", @@ -1477,6 +1491,7 @@ "label.monitor.url": "URL Path", "label.monthly": "Monthly", "label.more.access.dashboard.ui": "More about accessing dashboard UI", +"label.mount.cks.iso.on.vr": "Use CKS packages from Virtual Router", "label.mount.sharedfs": "Mount Shared FileSystem via NFS", "label.move.down.row": "Move down one row", "label.move.to.bottom": "Move to bottom", @@ -1560,6 +1575,7 @@ "label.no.items": "No available Items", "label.no.matching.offering": "No matching offering found", "label.no.matching.network": "No matching Networks found", +"label.node.version": "Node version", "label.no.usage.records": "No usage records found", "label.noderootdisksize": "Node root disk size (in GB)", "label.nodiskcache": "No disk cache", @@ -1858,7 +1874,8 @@ "label.region": "Region", "label.register.oauth": "Register OAuth", "label.register.template": "Register Template", -"label.register.user.data": "Register a userdata", +"label.register.user.data": "Register User Data", +"label.register.cni.config": "Register CNI Configuration", "label.reinstall.vm": "Reinstall Instance", "label.reject": "Reject", "label.related": "Related", @@ -1876,6 +1893,7 @@ "label.remove": "Remove", "label.remove.annotation": "Remove comment", "label.remove.bgp.peer": "Remove BGP peer", +"label.remove.cni.configuration": "Remove CNI configuration", "label.remove.egress.rule": "Remove egress rule", "label.remove.interface.route.table": "Remove Tungsten interface route table", "label.remove.ip.range": "Remove IP range", @@ -1885,6 +1903,7 @@ "label.remove.logical.router": "Remove logical router", "label.remove.network.offering": "Remove Network offering", "label.remove.network.route.table": "Remove Tungsten Fabric Network routing table", +"label.remove.nodes": "Remove nodes from Kubernetes cluster", "label.remove.pf": "Remove port forwarding rule", "label.remove.policy": "Remove policy", "label.remove.project.account": "Remove Account from project", @@ -2062,6 +2081,9 @@ "label.service.lb.netscaler.servicepackages": "Netscaler service packages", "label.service.lb.netscaler.servicepackages.description": "Service package description", "label.service.offering": "Service offering", +"label.service.offering.controlnodes": "Compute offering for Control Nodes", +"label.service.offering.etcdnodes": "Compute offering for etcd Nodes", +"label.service.offering.workernodes": "Compute offering for Worker Nodes", "label.service.staticnat.associatepublicip": "Associate public IP", "label.service.staticnat.elasticipcheckbox": "Elastic IP", "label.servicegroupuuid": "Service Group", @@ -2537,6 +2559,8 @@ "label.vnmc": "VNMC", "label.volgroup": "Volume group", "label.volume": "Volume", +"label.vms.empty": "No VMs available to be added to the Kubernetes cluster", +"label.vms.remove.empty": "No external VMs present in the Kubernetes cluster to be removed", "label.volume.empty": "No data volumes attached to this Instance", "label.volume.encryption.support": "Volume Encryption Supported", "label.volume.metrics": "Volume Metrics", @@ -2621,11 +2645,15 @@ "label.bucket.delete": "Delete Bucket", "label.quotagb": "Quota in GB", "label.encryption": "Encryption", +"label.etcdnodes": "Number of etcd nodes", "label.versioning": "Versioning", "label.objectlocking": "Object Lock", "label.bucket.policy": "Bucket Policy", "label.usersecretkey": "Secret Key", "label.create.bucket": "Create Bucket", +"label.cniconfiguration": "CNI Configuration", +"label.cniconfigname": "Associated CNI Configuration", +"label.cniconfigparams": "CNI Configuration parameters", "message.acquire.ip.failed": "Failed to acquire IP.", "message.action.acquire.ip": "Please confirm that you want to acquire new IP.", "message.action.cancel.maintenance": "Your host has been successfully canceled for maintenance. This process can take up to several minutes.", @@ -2803,6 +2831,8 @@ "message.adding.host": "Adding host", "message.adding.netscaler.device": "Adding Netscaler device", "message.adding.netscaler.provider": "Adding Netscaler provider", +"message.adding.nodes.to.cluster": "Adding nodes to Kubernetes cluster", +"message.removing.nodes.from.cluster": "Removing nodes from Kubernetes cluster", "message.advanced.security.group": "Choose this if you wish to use security groups to provide guest Instance isolation.", "message.allowed": "Allowed", "message.alert.show.all.stats.data": "This may return a lot of data depending on VM statistics and retention settings", @@ -2996,6 +3026,7 @@ "message.desc.reset.ssh.key.pair": "Please specify a ssh key pair that you would like to add to this Instance.", "message.desc.secondary.storage": "Each zone must have at least one NFS or secondary storage server. We will add the first one now. Secondary storage stores Instance Templates, ISO images, and Instance disk volume Snapshots. This server must be available to all hosts in the zone.

Provide the IP address and exported path.", "message.desc.register.user.data": "Please fill in the following data to register a User data.", +"message.desc.register.cni.config": "Please fill in the following data to register CNI Configuration as user data.", "message.desc.registered.user.data": "Registered a User Data.", "message.desc.zone": "A zone is the largest organizational unit in CloudStack, and it typically corresponds to a single datacenter. Zones provide physical isolation and redundancy. A zone consists of one or more pods (each of which contains hosts and primary storage servers) and a secondary storage server which is shared by all pods in the zone.", "message.desc.zone.edge": "A zone is the largest organizational unit in CloudStack, and it typically corresponds to a single datacenter. Zones provide physical isolation and redundancy. An edge zone consists of one or more hosts (each of which provides local storage as primary storage servers). Only shared and L2 Networks can be deployed in such zones and functionalities that require secondary storages are not supported.", @@ -3252,10 +3283,12 @@ "message.iso.arch": "Please select an ISO architecture", "message.iso.desc": "Disc image containing data or bootable media for OS.", "message.kubeconfig.cluster.not.available": "Kubernetes cluster kubeconfig not available currently.", +"message.kubernetes.cluster.add.nodes": "Please confirm that you want to add the following nodes to the cluster", "message.kubernetes.cluster.delete": "Please confirm that you want to destroy the cluster.", "message.kubernetes.cluster.scale": "Please select desired cluster configuration.", "message.kubernetes.cluster.start": "Please confirm that you want to start the cluster.", "message.kubernetes.cluster.stop": "Please confirm that you want to stop the cluster.", +"message.kubernetes.cluster.remove.nodes": "Please confirm that you want to remove the following nodes from the cluster", "message.kubernetes.cluster.upgrade": "Please select new Kubernetes version.", "message.kubernetes.version.delete": "Please confirm that you want to delete this Kubernetes version.", "message.l2.network.unsupported.for.nsx": "L2 networks aren't supported for NSX enabled zones", @@ -3331,6 +3364,7 @@ "message.password.reset.success": "Password has been reset successfully. Please login using your new credentials.", "message.path": "Path : ", "message.path.description": "NFS: exported path from the server. VMFS: /datacenter name/datastore name. SharedMountPoint: path where primary storage is mounted, such as /mnt/primary.", +"message.please.confirm.remove.cni.configuration": "Please confirm that you want to remove this CNI Configuration", "message.please.confirm.remove.ssh.key.pair": "Please confirm that you want to remove this SSH key pair.", "message.please.confirm.remove.user.data": "Please confirm that you want to remove this Userdata", "message.please.enter.valid.value": "Please enter a valid value.", @@ -3455,6 +3489,8 @@ "message.success.add.network.acl": "Successfully added Network ACL list", "message.success.add.network.static.route": "Successfully added Network Static Route", "message.success.add.network.permissions": "Successfully added Network permissions", +"message.success.add.nodes.to.cluster": "Successfully added nodes to Kubernetes cluster", +"message.success.remove.nodes.from.cluster": "Successfully removed nodes from Kubernetes cluster", "message.success.add.physical.network": "Successfully added Physical Network", "message.success.add.object.storage": "Successfully added Object Storage", "message.success.add.policy.rule": "Successfully added Policy rule", diff --git a/ui/src/components/view/DetailsTab.vue b/ui/src/components/view/DetailsTab.vue index 3622f87e67d8..d76da5f0d452 100644 --- a/ui/src/components/view/DetailsTab.vue +++ b/ui/src/components/view/DetailsTab.vue @@ -44,7 +44,9 @@