diff --git a/Makefile b/Makefile index e00e4935..2b3f89af 100644 --- a/Makefile +++ b/Makefile @@ -284,11 +284,11 @@ dev-creds-apply: dev-$(DEV_PROVIDER)-creds .PHONY: dev-provider-apply dev-provider-apply: envsubst - @NAMESPACE=$(NAMESPACE) $(ENVSUBST) -no-unset -i config/dev/$(DEV_PROVIDER)-deployment.yaml | $(KUBECTL) apply -f - + @NAMESPACE=$(NAMESPACE) $(ENVSUBST) -no-unset -i config/dev/$(DEV_PROVIDER)-managedcluster.yaml | $(KUBECTL) apply -f - .PHONY: dev-provider-delete dev-provider-delete: envsubst - @NAMESPACE=$(NAMESPACE) $(ENVSUBST) -no-unset -i config/dev/$(DEV_PROVIDER)-deployment.yaml | $(KUBECTL) delete -f - + @NAMESPACE=$(NAMESPACE) $(ENVSUBST) -no-unset -i config/dev/$(DEV_PROVIDER)-managedcluster.yaml | $(KUBECTL) delete -f - .PHONY: cli-install cli-install: clusterawsadm clusterctl diff --git a/README.md b/README.md index bfbc0990..4f200cd9 100644 --- a/README.md +++ b/README.md @@ -110,14 +110,14 @@ For details about the `Template system` in HMC, see [Templates system](docs/temp If you want to deploy hostded control plate template, make sure to check additional notes on [Hosted control plane](docs/aws/hosted-control-plane.md). -2. Create the file with the `Deployment` configuration: +2. Create the file with the `ManagedCluster` configuration: > Substitute the parameters enclosed in angle brackets with the corresponding values.\ > Enable the `dryRun` flag if required. For details, see [Dry run](#dry-run). ```yaml apiVersion: hmc.mirantis.com/v1alpha1 -kind: Deployment +kind: ManagedCluster metadata: name: namespace: @@ -128,42 +128,42 @@ spec: ``` -3. Create the `Deployment` object: +3. Create the `ManagedCluster` object: -`kubectl create -f deployment.yaml` +`kubectl create -f managedcluster.yaml` -4. Check the status of the newly created `Deployment` object: +4. Check the status of the newly created `ManagedCluster` object: -`kubectl -n get deployment.hmc -o=yaml` +`kubectl -n get managedcluster -o=yaml` 5. Wait for infrastructure to be provisioned and the cluster to be deployed (the provisioning starts only when `spec.dryRun` is disabled): - `kubectl -n get cluster -o=yaml` + `kubectl -n get cluster -o=yaml` > You may also watch the process with the `clusterctl describe` command (requires the `clusterctl` CLI to be installed): > ``` -> clusterctl describe cluster -n --show-conditions all +> clusterctl describe cluster -n --show-conditions all > ``` 6. Retrieve the `kubeconfig` of your managed cluster: ``` -kubectl get secret -n hmc-system -kubeconfig -o=jsonpath={.data.value} | base64 -d > kubeconfig +kubectl get secret -n hmc-system -kubeconfig -o=jsonpath={.data.value} | base64 -d > kubeconfig ``` ### Dry run -HMC `Deployment` supports two modes: with and without (default) `dryRun`. +HMC `ManagedCluster` supports two modes: with and without (default) `dryRun`. -If no configuration (`spec.config`) provided, the `Deployment` object will be populated with defaults +If no configuration (`spec.config`) provided, the `ManagedCluster` object will be populated with defaults (default configuration can be found in the corresponding `Template` status) and automatically marked as `dryRun`. -Here is an example of the `Deployment` object with default configuration: +Here is an example of the `ManagedCluster` object with default configuration: ```yaml apiVersion: hmc.mirantis.com/v1alpha1 -kind: Deployment +kind: ManagedCluster metadata: name: namespace: @@ -198,11 +198,11 @@ spec: After you adjust your configuration and ensure that it passes validation (`TemplateReady` condition from `status.conditions`), remove the `spec.dryRun` flag to proceed with the deployment. -Here is an example of a `Deployment` object that passed the validation: +Here is an example of a `ManagedCluster` object that passed the validation: ```yaml apiVersion: hmc.mirantis.com/v1alpha1 -kind: Deployment +kind: ManagedCluster metadata: name: aws-standalone namespace: aws @@ -232,7 +232,7 @@ spec: status: "True" type: HelmChartReady - lastTransitionTime: "2024-07-22T09:25:49Z" - message: Deployment is ready + message: ManagedCluster is ready reason: Succeeded status: "True" type: Ready @@ -245,7 +245,7 @@ spec: `kubectl delete management.hmc hmc -n hmc-system` -> Note: make sure you have no HMC Deployment objects left in the cluster prior to Management deletion +> Note: make sure you have no HMC ManagedCluster objects left in the cluster prior to Management deletion 2. Remove the `hmc` Helm release: diff --git a/api/v1alpha1/deployment_types.go b/api/v1alpha1/managedcluster_types.go similarity index 79% rename from api/v1alpha1/deployment_types.go rename to api/v1alpha1/managedcluster_types.go index 93a69fe1..e95be1d9 100644 --- a/api/v1alpha1/deployment_types.go +++ b/api/v1alpha1/managedcluster_types.go @@ -22,7 +22,7 @@ import ( ) const ( - DeploymentFinalizer = "hmc.mirantis.com/deployment" + ManagedClusterFinalizer = "hmc.mirantis.com/managed-cluster" FluxHelmChartNameKey = "helm.toolkit.fluxcd.io/name" HMCManagedLabelKey = "hmc.mirantis.com/managed" @@ -30,13 +30,16 @@ const ( ) const ( + // ManagedClusterKind is the string representation of a ManagedCluster. + ManagedClusterKind = "ManagedCluster" + // TemplateReadyCondition indicates the referenced Template exists and valid. TemplateReadyCondition = "TemplateReady" // HelmChartReadyCondition indicates the corresponding HelmChart is valid and ready. HelmChartReadyCondition = "HelmChartReady" // HelmReleaseReadyCondition indicates the corresponding HelmRelease is ready and fully reconciled. HelmReleaseReadyCondition = "HelmReleaseReady" - // ReadyCondition indicates the Deployment is ready and fully reconciled. + // ReadyCondition indicates the ManagedCluster is ready and fully reconciled. ReadyCondition string = "Ready" ) @@ -54,8 +57,8 @@ const ( ProgressingReason string = "Progressing" ) -// DeploymentSpec defines the desired state of Deployment -type DeploymentSpec struct { +// ManagedClusterSpec defines the desired state of ManagedCluster +type ManagedClusterSpec struct { // DryRun specifies whether the template should be applied after validation or only validated. // +optional DryRun bool `json:"dryRun,omitempty"` @@ -70,12 +73,12 @@ type DeploymentSpec struct { Config *apiextensionsv1.JSON `json:"config,omitempty"` } -// DeploymentStatus defines the observed state of Deployment -type DeploymentStatus struct { +// ManagedClusterStatus defines the observed state of ManagedCluster +type ManagedClusterStatus struct { // ObservedGeneration is the last observed generation. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty"` - // Conditions contains details for the current state of the Deployment + // Conditions contains details for the current state of the ManagedCluster Conditions []metav1.Condition `json:"conditions,omitempty"` } @@ -86,27 +89,27 @@ type DeploymentStatus struct { // +kubebuilder:printcolumn:name="status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="Status",priority=0 // +kubebuilder:printcolumn:name="dryRun",type="string",JSONPath=".spec.dryRun",description="Dry Run",priority=1 -// Deployment is the Schema for the deployments API -type Deployment struct { +// ManagedCluster is the Schema for the managedclusters API +type ManagedCluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec DeploymentSpec `json:"spec,omitempty"` - Status DeploymentStatus `json:"status,omitempty"` + Spec ManagedClusterSpec `json:"spec,omitempty"` + Status ManagedClusterStatus `json:"status,omitempty"` } -func (in *Deployment) HelmValues() (values map[string]interface{}, err error) { +func (in *ManagedCluster) HelmValues() (values map[string]interface{}, err error) { if in.Spec.Config != nil { err = yaml.Unmarshal(in.Spec.Config.Raw, &values) } return values, err } -func (in *Deployment) GetConditions() *[]metav1.Condition { +func (in *ManagedCluster) GetConditions() *[]metav1.Condition { return &in.Status.Conditions } -func (in *Deployment) InitConditions() { +func (in *ManagedCluster) InitConditions() { apimeta.SetStatusCondition(in.GetConditions(), metav1.Condition{ Type: TemplateReadyCondition, Status: metav1.ConditionUnknown, @@ -131,19 +134,19 @@ func (in *Deployment) InitConditions() { Type: ReadyCondition, Status: metav1.ConditionUnknown, Reason: ProgressingReason, - Message: "Deployment is not yet ready", + Message: "ManagedCluster is not yet ready", }) } //+kubebuilder:object:root=true -// DeploymentList contains a list of Deployment -type DeploymentList struct { +// ManagedClusterList contains a list of ManagedCluster +type ManagedClusterList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` - Items []Deployment `json:"items"` + Items []ManagedCluster `json:"items"` } func init() { - SchemeBuilder.Register(&Deployment{}, &DeploymentList{}) + SchemeBuilder.Register(&ManagedCluster{}, &ManagedClusterList{}) } diff --git a/api/v1alpha1/template_types.go b/api/v1alpha1/template_types.go index c8d41e1a..64eb8da1 100644 --- a/api/v1alpha1/template_types.go +++ b/api/v1alpha1/template_types.go @@ -29,8 +29,6 @@ const ( ManagementKind = "Management" // TemplateKind is the string representation of a Template. TemplateKind = "Template" - // DeploymentKind is the string representation of a Deployment. - DeploymentKind = "Deployment" // ChartAnnotationType is an annotation containing the type of Template. ChartAnnotationType = "hmc.mirantis.com/type" @@ -47,7 +45,7 @@ const ( type TemplateType string const ( - // TemplateTypeDeployment is the type used for creating HMC Deployment objects + // TemplateTypeDeployment is the type used for creating HMC ManagedCluster objects TemplateTypeDeployment TemplateType = "deployment" // TemplateTypeProvider is the type used for adding CAPI providers in the HMC Management object. TemplateTypeProvider TemplateType = "provider" @@ -92,7 +90,7 @@ type TemplateStatus struct { // +optional Description string `json:"description,omitempty"` // Config demonstrates available parameters for template customization, - // that can be used when creating Deployment objects. + // that can be used when creating ManagedCluster objects. // +optional Config *apiextensionsv1.JSON `json:"config,omitempty"` // ChartRef is a reference to a source controller resource containing the diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 7f6c6c00..27106101 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -78,7 +78,27 @@ func (in *Core) DeepCopy() *Core { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Deployment) DeepCopyInto(out *Deployment) { +func (in *HelmSpec) DeepCopyInto(out *HelmSpec) { + *out = *in + if in.ChartRef != nil { + in, out := &in.ChartRef, &out.ChartRef + *out = new(v2.CrossNamespaceSourceReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmSpec. +func (in *HelmSpec) DeepCopy() *HelmSpec { + if in == nil { + return nil + } + out := new(HelmSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedCluster) DeepCopyInto(out *ManagedCluster) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -86,18 +106,18 @@ func (in *Deployment) DeepCopyInto(out *Deployment) { in.Status.DeepCopyInto(&out.Status) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. -func (in *Deployment) DeepCopy() *Deployment { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedCluster. +func (in *ManagedCluster) DeepCopy() *ManagedCluster { if in == nil { return nil } - out := new(Deployment) + out := new(ManagedCluster) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Deployment) DeepCopyObject() runtime.Object { +func (in *ManagedCluster) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -105,31 +125,31 @@ func (in *Deployment) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { +func (in *ManagedClusterList) DeepCopyInto(out *ManagedClusterList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]Deployment, len(*in)) + *out = make([]ManagedCluster, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. -func (in *DeploymentList) DeepCopy() *DeploymentList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterList. +func (in *ManagedClusterList) DeepCopy() *ManagedClusterList { if in == nil { return nil } - out := new(DeploymentList) + out := new(ManagedClusterList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DeploymentList) DeepCopyObject() runtime.Object { +func (in *ManagedClusterList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -137,7 +157,7 @@ func (in *DeploymentList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { +func (in *ManagedClusterSpec) DeepCopyInto(out *ManagedClusterSpec) { *out = *in if in.Config != nil { in, out := &in.Config, &out.Config @@ -146,18 +166,18 @@ func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. -func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSpec. +func (in *ManagedClusterSpec) DeepCopy() *ManagedClusterSpec { if in == nil { return nil } - out := new(DeploymentSpec) + out := new(ManagedClusterSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { +func (in *ManagedClusterStatus) DeepCopyInto(out *ManagedClusterStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions @@ -168,32 +188,12 @@ func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. -func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterStatus. +func (in *ManagedClusterStatus) DeepCopy() *ManagedClusterStatus { if in == nil { return nil } - out := new(DeploymentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HelmSpec) DeepCopyInto(out *HelmSpec) { - *out = *in - if in.ChartRef != nil { - in, out := &in.ChartRef, &out.ChartRef - *out = new(v2.CrossNamespaceSourceReference) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmSpec. -func (in *HelmSpec) DeepCopy() *HelmSpec { - if in == nil { - return nil - } - out := new(HelmSpec) + out := new(ManagedClusterStatus) in.DeepCopyInto(out) return out } diff --git a/cmd/main.go b/cmd/main.go index d1bc2944..401105ac 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -166,13 +166,13 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "Template") os.Exit(1) } - if err = (&controller.DeploymentReconciler{ + if err = (&controller.ManagedClusterReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), Config: mgr.GetConfig(), DynamicClient: dc, }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Deployment") + setupLog.Error(err, "unable to create controller", "controller", "ManagedCluster") os.Exit(1) } if err = (&controller.ManagementReconciler{ @@ -218,8 +218,8 @@ func main() { } if enableWebhook { - if err := (&hmcwebhook.DeploymentValidator{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "Deployment") + if err := (&hmcwebhook.ManagedClusterValidator{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "ManagedCluster") os.Exit(1) } if err := (&hmcwebhook.ManagementValidator{}).SetupWebhookWithManager(mgr); err != nil { diff --git a/config/dev/aws-deployment.yaml b/config/dev/aws-managedcluster.yaml similarity index 100% rename from config/dev/aws-deployment.yaml rename to config/dev/aws-managedcluster.yaml diff --git a/config/dev/azure-deployment.yaml b/config/dev/azure-managedcluster.yaml similarity index 100% rename from config/dev/azure-deployment.yaml rename to config/dev/azure-managedcluster.yaml diff --git a/docs/aws/cluster-parameters.md b/docs/aws/cluster-parameters.md index 1b6792f5..35bde3e8 100644 --- a/docs/aws/cluster-parameters.md +++ b/docs/aws/cluster-parameters.md @@ -54,20 +54,20 @@ To access the nodes using the SSH protocol, several things should be configured: ### SSH keys Only one SSH key is supported and it should be added in AWS prior to creating -the `Deployment` object. The name of the key should then be placed under `.spec.config.sshKeyName`. +the `ManagedCluster` object. The name of the key should then be placed under `.spec.config.sshKeyName`. The same SSH key will be used for all machines and a bastion host. To enable bastion you should add `.spec.config.bastion.enabled` option in the -`Deployment` object to `true`. +`ManagedCluster` object to `true`. Full list of the bastion configuration options could be fould in [CAPA docs](https://cluster-api-aws.sigs.k8s.io/crd/#infrastructure.cluster.x-k8s.io/v1beta1.Bastion). -The resulting `Deployment` can look like this: +The resulting `ManagedCluster` can look like this: ```yaml apiVersion: hmc.mirantis.com/v1alpha1 -kind: Deployment +kind: ManagedCluster metadata: name: cluster-1 spec: diff --git a/docs/aws/hosted-control-plane.md b/docs/aws/hosted-control-plane.md index 58b0be22..d492d716 100644 --- a/docs/aws/hosted-control-plane.md +++ b/docs/aws/hosted-control-plane.md @@ -19,7 +19,7 @@ reused with a management cluster. If you deployed your AWS Kubernetes cluster using Cluster API Provider AWS (CAPA) you can obtain all the necessary data with the commands below or use the template found below in the -[HMC Deployment manifest generation](#hmc-deployment-manifest-generation) section. +[HMC ManagedCluster manifest generation](#hmc-managed-cluster-manifest-generation) section. **VPC ID** @@ -54,13 +54,13 @@ If you want to use different VPCs/regions for your management or managed cluster you should setup additional connectivity rules like [VPC peering](https://docs.aws.amazon.com/whitepapers/latest/building-scalable-secure-multi-vpc-network-infrastructure/vpc-peering.html). -## HMC Deployment manifest +## HMC ManagedCluster manifest -With all the collected data your `Deployment` manifest will look similar to this: +With all the collected data your `ManagedCluster` manifest will look similar to this: ```yaml apiVersion: hmc.mirantis.com/v1alpha1 - kind: Deployment + kind: ManagedCluster metadata: name: aws-hosted-cp spec: @@ -81,13 +81,13 @@ With all the collected data your `Deployment` manifest will look similar to this > [!NOTE] > In this example we're using the `us-west-1` region, but you should use the region of your VPC. -## HMC Deployment manifest generation +## HMC ManagedCluster manifest generation -Grab the following `Deployment` manifest template and save it to a file named `deployment.yaml.tpl`: +Grab the following `ManagedCluster` manifest template and save it to a file named `managedcluster.yaml.tpl`: ```yaml apiVersion: hmc.mirantis.com/v1alpha1 -kind: Deployment +kind: ManagedCluster metadata: name: aws-hosted-cp spec: @@ -104,8 +104,8 @@ spec: - "{{.status.networkStatus.securityGroups.node.id}}" ``` -Then run the following command to create the `deployment.yaml`: +Then run the following command to create the `managedcluster.yaml`: ``` -kubectl get awscluster cluster -o go-template="$(cat deployment.yaml.tpl)" > deployment.yaml +kubectl get awscluster cluster -o go-template="$(cat managedcluster.yaml.tpl)" > managedcluster.yaml ``` diff --git a/docs/azure/cluster-parameters.md b/docs/azure/cluster-parameters.md index d0bbf46d..72ea90f6 100644 --- a/docs/azure/cluster-parameters.md +++ b/docs/azure/cluster-parameters.md @@ -74,12 +74,12 @@ spec: type: ServicePrincipal ``` -These objects then should be referenced in the `Deployment` object in the +These objects then should be referenced in the `ManagedCluster` object in the `.spec.config.clusterIdentity` field. Subscription ID which was used to create service principal should be the same that will be used in the `.spec.config.subscriptionID` field of the -`Deployment` object. +`ManagedCluster` object. ### Cloud controller manager note @@ -90,7 +90,7 @@ Because of a limitation (k0sproject/k0smotron#692) it's not currently possible to automatically pass credentials to all nodes. To mitigate that you should pass cluster identity data once again in the -following fields of a `Deployment` object: +following fields of a `ManagedCluster` object: - `.spec.config.tenantID` - value of the `tenant` field of a service principal - `.spec.config.clientID` - value of the `appId` field of a service principal diff --git a/docs/azure/hosted-control-plane.md b/docs/azure/hosted-control-plane.md index 14329e74..ce39a96c 100644 --- a/docs/azure/hosted-control-plane.md +++ b/docs/azure/hosted-control-plane.md @@ -12,7 +12,7 @@ reside in the management cluster. ## Pre-existing resources Certain resources will not be created automatically in a hosted control plane -scenario thus they should be created in advance and provided in the `Deployment` +scenario thus they should be created in advance and provided in the `ManagedCluster` object. You can reuse these resources with management cluster as described below. @@ -63,13 +63,13 @@ kubectl get azurecluster -o go-template='{{(index .spec.networkSp -## HMC Deployment manifest +## HMC ManagedCluster manifest -With all the collected data your `Deployment` manifest will look similar to this: +With all the collected data your `ManagedCluster` manifest will look similar to this: ```yaml apiVersion: hmc.mirantis.com/v1alpha1 -kind: Deployment +kind: ManagedCluster metadata: name: azure-hosted-cp spec: @@ -92,11 +92,11 @@ spec: clientSecret: "u_RANDOM" ``` -To simplify creation of the deployment object you can use the template below: +To simplify creation of the ManagedCluster object you can use the template below: ```yaml apiVersion: hmc.mirantis.com/v1alpha1 -kind: Deployment +kind: ManagedCluster metadata: name: azure-hosted-cp spec: @@ -127,7 +127,7 @@ kubectl get azurecluster -o go-template="$(cat templat ## Cluster creation -After applying `Deployment` object you require to manually set the status of the +After applying `ManagedCluster` object you require to manually set the status of the `AzureCluster` object due to current limitations (see k0sproject/k0smotron#668). To do so you need to execute the following command: @@ -151,7 +151,7 @@ To place finalizer you can execute the following command: kubectl patch azurecluster --type=merge --patch 'metadata: {finalizers: [manual]}' ``` -When finalizer is placed you can remove the `Deployment` as usual. Check that +When finalizer is placed you can remove the `ManagedCluster` as usual. Check that all `AzureMachines` objects are deleted successfully and remove finalizer you've placed to finish cluster deletion. diff --git a/docs/azure/machine-parameters.md b/docs/azure/machine-parameters.md index 9d3e3122..6a5bf383 100644 --- a/docs/azure/machine-parameters.md +++ b/docs/azure/machine-parameters.md @@ -5,7 +5,7 @@ SSH public key can be passed to `.spec.config.sshPublicKey` (in case of hosted CP) parameter or `.spec.config.controlPlane.sshPublicKey` and `.spec.config.worker.sshPublicKey` parameters (in case of standalone CP) -of the `Deployment` object. +of the `ManagedCluster` object. It should be encoded in **base64** format. diff --git a/docs/dev.md b/docs/dev.md index 7db78297..e66b38c6 100644 --- a/docs/dev.md +++ b/docs/dev.md @@ -45,9 +45,9 @@ another provider change `DEV_PROVIDER` variable with the name of provider before running make (e.g. `export DEV_PROVIDER=azure`). 1. Configure your cluster parameters in provider specific file - (for example `config/dev/aws-deployment.yaml` in case of AWS): + (for example `config/dev/aws-managedcluster.yaml` in case of AWS): - * Configure the `name` of the deployment + * Configure the `name` of the ManagedCluster * Change instance type or size for control plane and worker machines * Specify the number of control plane and worker machines, etc @@ -66,7 +66,7 @@ running make (e.g. `export DEV_PROVIDER=azure`). ``` export KUBECONFIG=~/.kube/config -./bin/clusterctl describe cluster -n hmc-system --show-conditions all +./bin/clusterctl describe cluster -n hmc-system --show-conditions all ``` > [!NOTE] @@ -80,5 +80,5 @@ export KUBECONFIG=~/.kube/config 7. Retrieve the `kubeconfig` of your managed cluster: ``` -kubectl --kubeconfig ~/.kube/config get secret -n hmc-system -kubeconfig -o=jsonpath={.data.value} | base64 -d > kubeconfig +kubectl --kubeconfig ~/.kube/config get secret -n hmc-system -kubeconfig -o=jsonpath={.data.value} | base64 -d > kubeconfig ``` diff --git a/docs/templates/main.md b/docs/templates/main.md index 8e3b175e..b81bb72c 100644 --- a/docs/templates/main.md +++ b/docs/templates/main.md @@ -6,7 +6,7 @@ and use them for deployment. ## Custom deployment Templates > At the moment all `Templates` should reside in the `hmc-system` namespace. But they can be referenced -> by `Deployments` from any namespace. +> by `ManagedClusters` from any namespace. Here are the instructions on how to bring your own Template to HMC: diff --git a/internal/controller/deployment_controller.go b/internal/controller/deployment_controller.go index 1767a5bf..8537c388 100644 --- a/internal/controller/deployment_controller.go +++ b/internal/controller/deployment_controller.go @@ -50,8 +50,8 @@ import ( "github.com/Mirantis/hmc/internal/telemetry" ) -// DeploymentReconciler reconciles a Deployment object -type DeploymentReconciler struct { +// ManagedClusterReconciler reconciles a ManagedCluster object +type ManagedClusterReconciler struct { client.Client Scheme *runtime.Scheme Config *rest.Config @@ -60,47 +60,47 @@ type DeploymentReconciler struct { // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. -func (r *DeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - l := log.FromContext(ctx).WithValues("DeploymentController", req.NamespacedName) - l.Info("Reconciling Deployment") - deployment := &hmc.Deployment{} - if err := r.Get(ctx, req.NamespacedName, deployment); err != nil { +func (r *ManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + l := log.FromContext(ctx).WithValues("ManagedClusterController", req.NamespacedName) + l.Info("Reconciling ManagedCluster") + managedCluster := &hmc.ManagedCluster{} + if err := r.Get(ctx, req.NamespacedName, managedCluster); err != nil { if apierrors.IsNotFound(err) { - l.Info("Deployment not found, ignoring since object must be deleted") + l.Info("ManagedCluster not found, ignoring since object must be deleted") return ctrl.Result{}, nil } - l.Error(err, "Failed to get Deployment") + l.Error(err, "Failed to get ManagedCluster") return ctrl.Result{}, err } - if !deployment.DeletionTimestamp.IsZero() { - l.Info("Deleting Deployment") - return r.Delete(ctx, l, deployment) + if !managedCluster.DeletionTimestamp.IsZero() { + l.Info("Deleting ManagedCluster") + return r.Delete(ctx, l, managedCluster) } - if deployment.Status.ObservedGeneration == 0 { + if managedCluster.Status.ObservedGeneration == 0 { mgmt := &hmc.Management{} mgmtRef := types.NamespacedName{Namespace: hmc.ManagementNamespace, Name: hmc.ManagementName} if err := r.Get(ctx, mgmtRef, mgmt); err != nil { l.Error(err, "Failed to get Management object") return ctrl.Result{}, err } - if err := telemetry.TrackDeploymentCreate(string(mgmt.UID), string(deployment.UID), deployment.Spec.Template, deployment.Spec.DryRun); err != nil { - l.Error(err, "Failed to track Deployment creation") + if err := telemetry.TrackManagedClusterCreate(string(mgmt.UID), string(managedCluster.UID), managedCluster.Spec.Template, managedCluster.Spec.DryRun); err != nil { + l.Error(err, "Failed to track ManagedCluster creation") } } - return r.Update(ctx, l, deployment) + return r.Update(ctx, l, managedCluster) } -func (r *DeploymentReconciler) setStatusFromClusterStatus(ctx context.Context, l logr.Logger, deployment *hmc.Deployment) (bool, error) { +func (r *ManagedClusterReconciler) setStatusFromClusterStatus(ctx context.Context, l logr.Logger, managedCluster *hmc.ManagedCluster) (bool, error) { resourceId := schema.GroupVersionResource{ Group: "cluster.x-k8s.io", Version: "v1beta1", Resource: "clusters", } - list, err := r.DynamicClient.Resource(resourceId).Namespace(deployment.Namespace).List(ctx, metav1.ListOptions{ - LabelSelector: labels.SelectorFromSet(map[string]string{hmc.FluxHelmChartNameKey: deployment.Name}).String(), + list, err := r.DynamicClient.Resource(resourceId).Namespace(managedCluster.Namespace).List(ctx, metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{hmc.FluxHelmChartNameKey: managedCluster.Name}).String(), }) if apierrors.IsNotFound(err) || len(list.Items) == 0 { @@ -109,31 +109,31 @@ func (r *DeploymentReconciler) setStatusFromClusterStatus(ctx context.Context, l } if err != nil { - return true, fmt.Errorf("failed to get cluster information for deployment %s in namespace: %s: %w", - deployment.Namespace, deployment.Name, err) + return true, fmt.Errorf("failed to get cluster information for managedCluster %s in namespace: %s: %w", + managedCluster.Namespace, managedCluster.Name, err) } conditions, found, err := unstructured.NestedSlice(list.Items[0].Object, "status", "conditions") if err != nil { - return true, fmt.Errorf("failed to get cluster information for deployment %s in namespace: %s: %w", - deployment.Namespace, deployment.Name, err) + return true, fmt.Errorf("failed to get cluster information for managedCluster %s in namespace: %s: %w", + managedCluster.Namespace, managedCluster.Name, err) } if !found { - return true, fmt.Errorf("failed to get cluster information for deployment %s in namespace: %s: status.conditions not found", - deployment.Namespace, deployment.Name) + return true, fmt.Errorf("failed to get cluster information for managedCluster %s in namespace: %s: status.conditions not found", + managedCluster.Namespace, managedCluster.Name) } allConditionsComplete := true for _, condition := range conditions { conditionMap, ok := condition.(map[string]interface{}) if !ok { - return true, fmt.Errorf("failed to cast condition to map[string]interface{} for deployment: %s in namespace: %s: %w", - deployment.Namespace, deployment.Name, err) + return true, fmt.Errorf("failed to cast condition to map[string]interface{} for managedCluster: %s in namespace: %s: %w", + managedCluster.Namespace, managedCluster.Name, err) } var metaCondition metav1.Condition if err := runtime.DefaultUnstructuredConverter.FromUnstructured(conditionMap, &metaCondition); err != nil { - return true, fmt.Errorf("failed to convert unstructured conditions to metav1.Condition for deployment %s in namespace: %s: %w", - deployment.Namespace, deployment.Name, err) + return true, fmt.Errorf("failed to convert unstructured conditions to metav1.Condition for managedCluster %s in namespace: %s: %w", + managedCluster.Namespace, managedCluster.Name, err) } if metaCondition.Status != "True" { @@ -143,38 +143,38 @@ func (r *DeploymentReconciler) setStatusFromClusterStatus(ctx context.Context, l if metaCondition.Reason == "" && metaCondition.Status == "True" { metaCondition.Reason = "Succeeded" } - apimeta.SetStatusCondition(deployment.GetConditions(), metaCondition) + apimeta.SetStatusCondition(managedCluster.GetConditions(), metaCondition) } return !allConditionsComplete, nil } -func (r *DeploymentReconciler) Update(ctx context.Context, l logr.Logger, deployment *hmc.Deployment) (result ctrl.Result, err error) { - finalizersUpdated := controllerutil.AddFinalizer(deployment, hmc.DeploymentFinalizer) +func (r *ManagedClusterReconciler) Update(ctx context.Context, l logr.Logger, managedCluster *hmc.ManagedCluster) (result ctrl.Result, err error) { + finalizersUpdated := controllerutil.AddFinalizer(managedCluster, hmc.ManagedClusterFinalizer) if finalizersUpdated { - if err := r.Client.Update(ctx, deployment); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to update deployment %s/%s: %w", deployment.Namespace, deployment.Name, err) + if err := r.Client.Update(ctx, managedCluster); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update managedCluster %s/%s: %w", managedCluster.Namespace, managedCluster.Name, err) } return ctrl.Result{}, nil } - if len(deployment.Status.Conditions) == 0 { - deployment.InitConditions() + if len(managedCluster.Status.Conditions) == 0 { + managedCluster.InitConditions() } defer func() { - err = errors.Join(err, r.updateStatus(ctx, deployment)) + err = errors.Join(err, r.updateStatus(ctx, managedCluster)) }() template := &hmc.Template{} - templateRef := types.NamespacedName{Name: deployment.Spec.Template, Namespace: hmc.TemplatesNamespace} + templateRef := types.NamespacedName{Name: managedCluster.Spec.Template, Namespace: hmc.TemplatesNamespace} if err := r.Get(ctx, templateRef, template); err != nil { l.Error(err, "Failed to get Template") errMsg := fmt.Sprintf("failed to get provided template: %s", err) if apierrors.IsNotFound(err) { errMsg = "provided template is not found" } - apimeta.SetStatusCondition(deployment.GetConditions(), metav1.Condition{ + apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ Type: hmc.TemplateReadyCondition, Status: metav1.ConditionFalse, Reason: hmc.FailedReason, @@ -184,8 +184,8 @@ func (r *DeploymentReconciler) Update(ctx context.Context, l logr.Logger, deploy } templateType := template.Status.Type if templateType != hmc.TemplateTypeDeployment { - errMsg := "only templates of 'deployment' type are supported" - apimeta.SetStatusCondition(deployment.GetConditions(), metav1.Condition{ + errMsg := "only templates of 'managedCluster' type are supported" + apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ Type: hmc.TemplateReadyCondition, Status: metav1.ConditionFalse, Reason: hmc.FailedReason, @@ -195,7 +195,7 @@ func (r *DeploymentReconciler) Update(ctx context.Context, l logr.Logger, deploy } if !template.Status.Valid { errMsg := "provided template is not marked as valid" - apimeta.SetStatusCondition(deployment.GetConditions(), metav1.Condition{ + apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ Type: hmc.TemplateReadyCondition, Status: metav1.ConditionFalse, Reason: hmc.FailedReason, @@ -203,7 +203,7 @@ func (r *DeploymentReconciler) Update(ctx context.Context, l logr.Logger, deploy }) return ctrl.Result{}, errors.New(errMsg) } - apimeta.SetStatusCondition(deployment.GetConditions(), metav1.Condition{ + apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ Type: hmc.TemplateReadyCondition, Status: metav1.ConditionTrue, Reason: hmc.SucceededReason, @@ -211,7 +211,7 @@ func (r *DeploymentReconciler) Update(ctx context.Context, l logr.Logger, deploy }) source, err := r.getSource(ctx, template.Status.ChartRef) if err != nil { - apimeta.SetStatusCondition(deployment.GetConditions(), metav1.Condition{ + apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ Type: hmc.HelmChartReadyCondition, Status: metav1.ConditionFalse, Reason: hmc.FailedReason, @@ -222,7 +222,7 @@ func (r *DeploymentReconciler) Update(ctx context.Context, l logr.Logger, deploy l.Info("Downloading Helm chart") hcChart, err := helm.DownloadChartFromArtifact(ctx, source.GetArtifact()) if err != nil { - apimeta.SetStatusCondition(deployment.GetConditions(), metav1.Condition{ + apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ Type: hmc.HelmChartReadyCondition, Status: metav1.ConditionFalse, Reason: hmc.FailedReason, @@ -234,14 +234,14 @@ func (r *DeploymentReconciler) Update(ctx context.Context, l logr.Logger, deploy l.Info("Initializing Helm client") getter := helm.NewMemoryRESTClientGetter(r.Config, r.RESTMapper()) actionConfig := new(action.Configuration) - err = actionConfig.Init(getter, deployment.Namespace, "secret", l.Info) + err = actionConfig.Init(getter, managedCluster.Namespace, "secret", l.Info) if err != nil { return ctrl.Result{}, err } l.Info("Validating Helm chart with provided values") - if err := r.validateReleaseWithValues(ctx, actionConfig, deployment, hcChart); err != nil { - apimeta.SetStatusCondition(deployment.GetConditions(), metav1.Condition{ + if err := r.validateReleaseWithValues(ctx, actionConfig, managedCluster, hcChart); err != nil { + apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ Type: hmc.HelmChartReadyCondition, Status: metav1.ConditionFalse, Reason: hmc.FailedReason, @@ -250,25 +250,25 @@ func (r *DeploymentReconciler) Update(ctx context.Context, l logr.Logger, deploy return ctrl.Result{}, err } - apimeta.SetStatusCondition(deployment.GetConditions(), metav1.Condition{ + apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ Type: hmc.HelmChartReadyCondition, Status: metav1.ConditionTrue, Reason: hmc.SucceededReason, Message: "Helm chart is valid", }) - if !deployment.Spec.DryRun { + if !managedCluster.Spec.DryRun { ownerRef := &metav1.OwnerReference{ APIVersion: hmc.GroupVersion.String(), - Kind: hmc.DeploymentKind, - Name: deployment.Name, - UID: deployment.UID, + Kind: hmc.ManagedClusterKind, + Name: managedCluster.Name, + UID: managedCluster.UID, } - hr, _, err := helm.ReconcileHelmRelease(ctx, r.Client, deployment.Name, deployment.Namespace, deployment.Spec.Config, + hr, _, err := helm.ReconcileHelmRelease(ctx, r.Client, managedCluster.Name, managedCluster.Namespace, managedCluster.Spec.Config, ownerRef, template.Status.ChartRef, defaultReconcileInterval, nil) if err != nil { - apimeta.SetStatusCondition(deployment.GetConditions(), metav1.Condition{ + apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ Type: hmc.HelmReleaseReadyCondition, Status: metav1.ConditionFalse, Reason: hmc.FailedReason, @@ -279,7 +279,7 @@ func (r *DeploymentReconciler) Update(ctx context.Context, l logr.Logger, deploy hrReadyCondition := fluxconditions.Get(hr, fluxmeta.ReadyCondition) if hrReadyCondition != nil { - apimeta.SetStatusCondition(deployment.GetConditions(), metav1.Condition{ + apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ Type: hmc.HelmReleaseReadyCondition, Status: hrReadyCondition.Status, Reason: hrReadyCondition.Reason, @@ -287,7 +287,7 @@ func (r *DeploymentReconciler) Update(ctx context.Context, l logr.Logger, deploy }) } - requeue, err := r.setStatusFromClusterStatus(ctx, l, deployment) + requeue, err := r.setStatusFromClusterStatus(ctx, l, managedCluster) if err != nil { if requeue { return ctrl.Result{RequeueAfter: 10 * time.Second}, err @@ -307,14 +307,14 @@ func (r *DeploymentReconciler) Update(ctx context.Context, l logr.Logger, deploy return ctrl.Result{}, nil } -func (r *DeploymentReconciler) validateReleaseWithValues(ctx context.Context, actionConfig *action.Configuration, deployment *hmc.Deployment, hcChart *chart.Chart) error { +func (r *ManagedClusterReconciler) validateReleaseWithValues(ctx context.Context, actionConfig *action.Configuration, managedCluster *hmc.ManagedCluster, hcChart *chart.Chart) error { install := action.NewInstall(actionConfig) install.DryRun = true - install.ReleaseName = deployment.Name - install.Namespace = deployment.Namespace + install.ReleaseName = managedCluster.Name + install.Namespace = managedCluster.Namespace install.ClientOnly = true - vals, err := deployment.HelmValues() + vals, err := managedCluster.HelmValues() if err != nil { return err } @@ -325,11 +325,11 @@ func (r *DeploymentReconciler) validateReleaseWithValues(ctx context.Context, ac return nil } -func (r *DeploymentReconciler) updateStatus(ctx context.Context, deployment *hmc.Deployment) error { - deployment.Status.ObservedGeneration = deployment.Generation +func (r *ManagedClusterReconciler) updateStatus(ctx context.Context, managedCluster *hmc.ManagedCluster) error { + managedCluster.Status.ObservedGeneration = managedCluster.Generation warnings := "" errs := "" - for _, condition := range deployment.Status.Conditions { + for _, condition := range managedCluster.Status.Conditions { if condition.Type == hmc.ReadyCondition { continue } @@ -344,7 +344,7 @@ func (r *DeploymentReconciler) updateStatus(ctx context.Context, deployment *hmc Type: hmc.ReadyCondition, Status: metav1.ConditionTrue, Reason: hmc.SucceededReason, - Message: "Deployment is ready", + Message: "ManagedCluster is ready", } if warnings != "" { condition.Status = metav1.ConditionUnknown @@ -356,14 +356,14 @@ func (r *DeploymentReconciler) updateStatus(ctx context.Context, deployment *hmc condition.Reason = hmc.FailedReason condition.Message = errs } - apimeta.SetStatusCondition(deployment.GetConditions(), condition) - if err := r.Status().Update(ctx, deployment); err != nil { - return fmt.Errorf("failed to update status for deployment %s/%s: %w", deployment.Namespace, deployment.Name, err) + apimeta.SetStatusCondition(managedCluster.GetConditions(), condition) + if err := r.Status().Update(ctx, managedCluster); err != nil { + return fmt.Errorf("failed to update status for managedCluster %s/%s: %w", managedCluster.Namespace, managedCluster.Name, err) } return nil } -func (r *DeploymentReconciler) getSource(ctx context.Context, ref *hcv2.CrossNamespaceSourceReference) (sourcev1.Source, error) { +func (r *ManagedClusterReconciler) getSource(ctx context.Context, ref *hcv2.CrossNamespaceSourceReference) (sourcev1.Source, error) { if ref == nil { return nil, fmt.Errorf("helm chart source is not provided") } @@ -375,27 +375,27 @@ func (r *DeploymentReconciler) getSource(ctx context.Context, ref *hcv2.CrossNam return &hc, nil } -func (r *DeploymentReconciler) Delete(ctx context.Context, l logr.Logger, deployment *hmc.Deployment) (ctrl.Result, error) { +func (r *ManagedClusterReconciler) Delete(ctx context.Context, l logr.Logger, managedCluster *hmc.ManagedCluster) (ctrl.Result, error) { hr := &hcv2.HelmRelease{} err := r.Get(ctx, types.NamespacedName{ - Name: deployment.Name, - Namespace: deployment.Namespace, + Name: managedCluster.Name, + Namespace: managedCluster.Namespace, }, hr) if err != nil { if apierrors.IsNotFound(err) { - l.Info("Removing Finalizer", "finalizer", hmc.DeploymentFinalizer) - finalizersUpdated := controllerutil.RemoveFinalizer(deployment, hmc.DeploymentFinalizer) + l.Info("Removing Finalizer", "finalizer", hmc.ManagedClusterFinalizer) + finalizersUpdated := controllerutil.RemoveFinalizer(managedCluster, hmc.ManagedClusterFinalizer) if finalizersUpdated { - if err := r.Client.Update(ctx, deployment); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to update deployment %s/%s: %w", deployment.Namespace, deployment.Name, err) + if err := r.Client.Update(ctx, managedCluster); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update managedCluster %s/%s: %w", managedCluster.Namespace, managedCluster.Name, err) } } - l.Info("Deployment deleted") + l.Info("ManagedCluster deleted") return ctrl.Result{}, nil } return ctrl.Result{}, err } - err = helm.DeleteHelmRelease(ctx, r.Client, deployment.Name, deployment.Namespace) + err = helm.DeleteHelmRelease(ctx, r.Client, managedCluster.Name, managedCluster.Namespace) if err != nil { return ctrl.Result{}, err } @@ -404,23 +404,23 @@ func (r *DeploymentReconciler) Delete(ctx context.Context, l logr.Logger, deploy } // SetupWithManager sets up the controller with the Manager. -func (r *DeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *ManagedClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&hmc.Deployment{}). + For(&hmc.ManagedCluster{}). Watches(&hcv2.HelmRelease{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []ctrl.Request { - deployment := hmc.Deployment{} - deploymentRef := types.NamespacedName{ + managedCluster := hmc.ManagedCluster{} + managedClusterRef := types.NamespacedName{ Namespace: o.GetNamespace(), Name: o.GetName(), } - err := r.Client.Get(ctx, deploymentRef, &deployment) + err := r.Client.Get(ctx, managedClusterRef, &managedCluster) if err != nil { return []ctrl.Request{} } return []reconcile.Request{ { - NamespacedName: deploymentRef, + NamespacedName: managedClusterRef, }, } }), diff --git a/internal/controller/deployment_controller_test.go b/internal/controller/deployment_controller_test.go index ca5bed90..5ad9c34c 100644 --- a/internal/controller/deployment_controller_test.go +++ b/internal/controller/deployment_controller_test.go @@ -32,18 +32,18 @@ import ( hmc "github.com/Mirantis/hmc/api/v1alpha1" ) -var _ = Describe("Deployment Controller", func() { +var _ = Describe("ManagedCluster Controller", func() { Context("When reconciling a resource", func() { - const deploymentName = "test-deployment" + const managedClusterName = "test-managed-cluster" const templateName = "test-template" ctx := context.Background() typeNamespacedName := types.NamespacedName{ - Name: deploymentName, + Name: managedClusterName, Namespace: "default", } - deployment := &hmc.Deployment{} + managedCluster := &hmc.ManagedCluster{} template := &hmc.Template{} management := &hmc.Management{} namespace := &v1.Namespace{} @@ -104,36 +104,36 @@ var _ = Describe("Deployment Controller", func() { } Expect(k8sClient.Create(ctx, management)).To(Succeed()) } - By("creating the custom resource for the Kind Deployment") - err = k8sClient.Get(ctx, typeNamespacedName, deployment) + By("creating the custom resource for the Kind ManagedCluster") + err = k8sClient.Get(ctx, typeNamespacedName, managedCluster) if err != nil && errors.IsNotFound(err) { - deployment = &hmc.Deployment{ + managedCluster = &hmc.ManagedCluster{ ObjectMeta: metav1.ObjectMeta{ - Name: deploymentName, + Name: managedClusterName, Namespace: "default", }, - Spec: hmc.DeploymentSpec{ + Spec: hmc.ManagedClusterSpec{ Template: templateName, }, } - Expect(k8sClient.Create(ctx, deployment)).To(Succeed()) + Expect(k8sClient.Create(ctx, managedCluster)).To(Succeed()) } }) AfterEach(func() { By("Cleanup") - controllerReconciler := &DeploymentReconciler{ + controllerReconciler := &ManagedClusterReconciler{ Client: k8sClient, Scheme: k8sClient.Scheme(), } - Expect(k8sClient.Delete(ctx, deployment)).To(Succeed()) - // Running reconcile to remove the finalizer and delete the Deployment + Expect(k8sClient.Delete(ctx, managedCluster)).To(Succeed()) + // Running reconcile to remove the finalizer and delete the ManagedCluster _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{NamespacedName: typeNamespacedName}) Expect(err).NotTo(HaveOccurred()) - Eventually(k8sClient.Get(ctx, typeNamespacedName, deployment), 1*time.Minute, 5*time.Second).Should(HaveOccurred()) + Eventually(k8sClient.Get(ctx, typeNamespacedName, managedCluster), 1*time.Minute, 5*time.Second).Should(HaveOccurred()) Expect(k8sClient.Delete(ctx, template)).To(Succeed()) Expect(k8sClient.Delete(ctx, management)).To(Succeed()) @@ -141,7 +141,7 @@ var _ = Describe("Deployment Controller", func() { }) It("should successfully reconcile the resource", func() { By("Reconciling the created resource") - controllerReconciler := &DeploymentReconciler{ + controllerReconciler := &ManagedClusterReconciler{ Client: k8sClient, Scheme: k8sClient.Scheme(), Config: &rest.Config{}, diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 9a83782b..1866af19 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -130,7 +130,7 @@ var _ = BeforeSuite(func() { }) Expect(err).NotTo(HaveOccurred()) - err = (&hmcwebhook.DeploymentValidator{}).SetupWebhookWithManager(mgr) + err = (&hmcwebhook.ManagedClusterValidator{}).SetupWebhookWithManager(mgr) Expect(err).NotTo(HaveOccurred()) err = (&hmcwebhook.ManagementValidator{}).SetupWebhookWithManager(mgr) diff --git a/internal/telemetry/event.go b/internal/telemetry/event.go index 38939776..a3f5fba3 100644 --- a/internal/telemetry/event.go +++ b/internal/telemetry/event.go @@ -21,24 +21,24 @@ import ( ) const ( - deploymentCreateEvent = "deployment-create" - deploymentHeartbeatEvent = "deployment-heartbeat" + managedClusterCreateEvent = "managed-cluster-create" + managedClusterHeartbeatEvent = "managed-cluster-heartbeat" ) -func TrackDeploymentCreate(id, deploymentID, template string, dryRun bool) error { +func TrackManagedClusterCreate(id, managedClusterID, template string, dryRun bool) error { props := map[string]interface{}{ - "hmcVersion": build.Version, - "deploymentID": deploymentID, - "template": template, - "dryRun": dryRun, + "hmcVersion": build.Version, + "managedClusterID": managedClusterID, + "template": template, + "dryRun": dryRun, } - return TrackEvent(deploymentCreateEvent, id, props) + return TrackEvent(managedClusterCreateEvent, id, props) } -func TrackDeploymentHeartbeat(id, deploymentID, clusterID, template, templateHelmChartVersion, infrastructureProvider, bootstrapProvider, controlPlaneProvider string) error { +func TrackManagedClusterHeartbeat(id, managedClusterID, clusterID, template, templateHelmChartVersion, infrastructureProvider, bootstrapProvider, controlPlaneProvider string) error { props := map[string]interface{}{ "hmcVersion": build.Version, - "deploymentID": deploymentID, + "managedClusterID": managedClusterID, "clusterID": clusterID, "template": template, "templateHelmChartVersion": templateHelmChartVersion, @@ -46,7 +46,7 @@ func TrackDeploymentHeartbeat(id, deploymentID, clusterID, template, templateHel "bootstrapProvider": bootstrapProvider, "controlPlaneProvider": controlPlaneProvider, } - return TrackEvent(deploymentHeartbeatEvent, id, props) + return TrackEvent(managedClusterHeartbeatEvent, id, props) } func TrackEvent(name, id string, properties map[string]interface{}) error { diff --git a/internal/telemetry/tracker.go b/internal/telemetry/tracker.go index 66ad34ed..d2fb022d 100644 --- a/internal/telemetry/tracker.go +++ b/internal/telemetry/tracker.go @@ -50,8 +50,8 @@ func (t *Tracker) Start(ctx context.Context) error { func (t *Tracker) Tick(ctx context.Context) { l := log.FromContext(ctx).WithName("telemetry tracker") - logger := l.WithValues("event", deploymentHeartbeatEvent) - err := t.trackDeploymentHeartbeat(ctx) + logger := l.WithValues("event", managedClusterHeartbeatEvent) + err := t.trackManagedClusterHeartbeat(ctx) if err != nil { logger.Error(err, "failed to track an event") } else { @@ -59,7 +59,7 @@ func (t *Tracker) Tick(ctx context.Context) { } } -func (t *Tracker) trackDeploymentHeartbeat(ctx context.Context) error { +func (t *Tracker) trackManagedClusterHeartbeat(ctx context.Context) error { mgmt := &v1alpha1.Management{} mgmtRef := types.NamespacedName{Namespace: v1alpha1.ManagementNamespace, Name: v1alpha1.ManagementName} err := t.Get(ctx, mgmtRef, mgmt) @@ -78,28 +78,28 @@ func (t *Tracker) trackDeploymentHeartbeat(ctx context.Context) error { } var errs error - deployments := &v1alpha1.DeploymentList{} - err = t.List(ctx, deployments, &crclient.ListOptions{}) + managedClusters := &v1alpha1.ManagedClusterList{} + err = t.List(ctx, managedClusters, &crclient.ListOptions{}) if err != nil { return err } - for _, deployment := range deployments.Items { - template := templates[deployment.Spec.Template] + for _, managedCluster := range managedClusters.Items { + template := templates[managedCluster.Spec.Template] // TODO: get k0s cluster ID once it's exposed in k0smotron API clusterID := "" - err = TrackDeploymentHeartbeat( + err = TrackManagedClusterHeartbeat( string(mgmt.UID), - string(deployment.UID), + string(managedCluster.UID), clusterID, - deployment.Spec.Template, + managedCluster.Spec.Template, template.Spec.Helm.ChartVersion, strings.Join(template.Status.Providers.InfrastructureProviders, ","), strings.Join(template.Status.Providers.BootstrapProviders, ","), strings.Join(template.Status.Providers.ControlPlaneProviders, ","), ) if err != nil { - errs = errors.Join(errs, fmt.Errorf("failed to track the heartbeat of the deployment %s/%s", deployment.Namespace, deployment.Name)) + errs = errors.Join(errs, fmt.Errorf("failed to track the heartbeat of the managedcluster %s/%s", managedCluster.Namespace, managedCluster.Name)) continue } } diff --git a/internal/webhook/deployment_webhook.go b/internal/webhook/managedcluster_webhook.go similarity index 65% rename from internal/webhook/deployment_webhook.go rename to internal/webhook/managedcluster_webhook.go index 1b6cbba6..38b71b91 100644 --- a/internal/webhook/deployment_webhook.go +++ b/internal/webhook/managedcluster_webhook.go @@ -33,79 +33,79 @@ import ( "github.com/Mirantis/hmc/internal/utils" ) -type DeploymentValidator struct { +type ManagedClusterValidator struct { client.Client } -var InvalidDeploymentErr = errors.New("the deployment is invalid") +var InvalidManagedClusterErr = errors.New("the ManagedCluster is invalid") -func (v *DeploymentValidator) SetupWebhookWithManager(mgr ctrl.Manager) error { +func (v *ManagedClusterValidator) SetupWebhookWithManager(mgr ctrl.Manager) error { v.Client = mgr.GetClient() return ctrl.NewWebhookManagedBy(mgr). - For(&v1alpha1.Deployment{}). + For(&v1alpha1.ManagedCluster{}). WithValidator(v). WithDefaulter(v). Complete() } var ( - _ webhook.CustomValidator = &DeploymentValidator{} - _ webhook.CustomDefaulter = &DeploymentValidator{} + _ webhook.CustomValidator = &ManagedClusterValidator{} + _ webhook.CustomDefaulter = &ManagedClusterValidator{} ) // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (v *DeploymentValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - deployment, ok := obj.(*v1alpha1.Deployment) +func (v *ManagedClusterValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + managedCluster, ok := obj.(*v1alpha1.ManagedCluster) if !ok { - return nil, apierrors.NewBadRequest(fmt.Sprintf("expected Deployment but got a %T", obj)) + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected ManagedCluster but got a %T", obj)) } - template, err := v.getDeploymentTemplate(ctx, deployment.Spec.Template) + template, err := v.getManagedClusterTemplate(ctx, managedCluster.Spec.Template) if err != nil { - return nil, fmt.Errorf("%s: %v", InvalidDeploymentErr, err) + return nil, fmt.Errorf("%s: %v", InvalidManagedClusterErr, err) } err = v.isTemplateValid(ctx, template) if err != nil { - return nil, fmt.Errorf("%s: %v", InvalidDeploymentErr, err) + return nil, fmt.Errorf("%s: %v", InvalidManagedClusterErr, err) } return nil, nil } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (v *DeploymentValidator) ValidateUpdate(ctx context.Context, _ runtime.Object, newObj runtime.Object) (admission.Warnings, error) { - newDeployment, ok := newObj.(*v1alpha1.Deployment) +func (v *ManagedClusterValidator) ValidateUpdate(ctx context.Context, _ runtime.Object, newObj runtime.Object) (admission.Warnings, error) { + newManagedCluster, ok := newObj.(*v1alpha1.ManagedCluster) if !ok { - return nil, apierrors.NewBadRequest(fmt.Sprintf("expected Deployment but got a %T", newObj)) + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected ManagedCluster but got a %T", newObj)) } - template, err := v.getDeploymentTemplate(ctx, newDeployment.Spec.Template) + template, err := v.getManagedClusterTemplate(ctx, newManagedCluster.Spec.Template) if err != nil { - return nil, fmt.Errorf("%s: %v", InvalidDeploymentErr, err) + return nil, fmt.Errorf("%s: %v", InvalidManagedClusterErr, err) } err = v.isTemplateValid(ctx, template) if err != nil { - return nil, fmt.Errorf("%s: %v", InvalidDeploymentErr, err) + return nil, fmt.Errorf("%s: %v", InvalidManagedClusterErr, err) } return nil, nil } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (*DeploymentValidator) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { +func (*ManagedClusterValidator) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { return nil, nil } // Default implements webhook.Defaulter so a webhook will be registered for the type. -func (v *DeploymentValidator) Default(ctx context.Context, obj runtime.Object) error { - deployment, ok := obj.(*v1alpha1.Deployment) +func (v *ManagedClusterValidator) Default(ctx context.Context, obj runtime.Object) error { + managedCluster, ok := obj.(*v1alpha1.ManagedCluster) if !ok { - return apierrors.NewBadRequest(fmt.Sprintf("expected Deployment but got a %T", obj)) + return apierrors.NewBadRequest(fmt.Sprintf("expected ManagedCluster but got a %T", obj)) } // Only apply defaults when there's no configuration provided - if deployment.Spec.Config != nil { + if managedCluster.Spec.Config != nil { return nil } - template, err := v.getDeploymentTemplate(ctx, deployment.Spec.Template) + template, err := v.getManagedClusterTemplate(ctx, managedCluster.Spec.Template) if err != nil { - return fmt.Errorf("could not get template for the deployment: %s", err) + return fmt.Errorf("could not get template for the managedcluster: %s", err) } err = v.isTemplateValid(ctx, template) if err != nil { @@ -114,12 +114,12 @@ func (v *DeploymentValidator) Default(ctx context.Context, obj runtime.Object) e if template.Status.Config == nil { return nil } - deployment.Spec.DryRun = true - deployment.Spec.Config = &apiextensionsv1.JSON{Raw: template.Status.Config.Raw} + managedCluster.Spec.DryRun = true + managedCluster.Spec.Config = &apiextensionsv1.JSON{Raw: template.Status.Config.Raw} return nil } -func (v *DeploymentValidator) getDeploymentTemplate(ctx context.Context, templateName string) (*v1alpha1.Template, error) { +func (v *ManagedClusterValidator) getManagedClusterTemplate(ctx context.Context, templateName string) (*v1alpha1.Template, error) { template := &v1alpha1.Template{} templateRef := types.NamespacedName{Name: templateName, Namespace: v1alpha1.TemplatesNamespace} if err := v.Get(ctx, templateRef, template); err != nil { @@ -128,7 +128,7 @@ func (v *DeploymentValidator) getDeploymentTemplate(ctx context.Context, templat return template, nil } -func (v *DeploymentValidator) isTemplateValid(ctx context.Context, template *v1alpha1.Template) error { +func (v *ManagedClusterValidator) isTemplateValid(ctx context.Context, template *v1alpha1.Template) error { if template.Status.Type != v1alpha1.TemplateTypeDeployment { return fmt.Errorf("the template should be of the deployment type. Current: %s", template.Status.Type) } @@ -142,7 +142,7 @@ func (v *DeploymentValidator) isTemplateValid(ctx context.Context, template *v1a return nil } -func (v *DeploymentValidator) verifyProviders(ctx context.Context, template *v1alpha1.Template) error { +func (v *ManagedClusterValidator) verifyProviders(ctx context.Context, template *v1alpha1.Template) error { requiredProviders := template.Status.Providers management := &v1alpha1.Management{} managementRef := types.NamespacedName{Name: v1alpha1.ManagementName, Namespace: v1alpha1.ManagementNamespace} diff --git a/internal/webhook/deployment_webhook_test.go b/internal/webhook/managedcluster_webhook_test.go similarity index 66% rename from internal/webhook/deployment_webhook_test.go rename to internal/webhook/managedcluster_webhook_test.go index 3262aad9..8bcf9e5f 100644 --- a/internal/webhook/deployment_webhook_test.go +++ b/internal/webhook/managedcluster_webhook_test.go @@ -25,7 +25,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/Mirantis/hmc/api/v1alpha1" - "github.com/Mirantis/hmc/test/objects/deployment" + "github.com/Mirantis/hmc/test/objects/managedcluster" "github.com/Mirantis/hmc/test/objects/management" "github.com/Mirantis/hmc/test/objects/template" "github.com/Mirantis/hmc/test/scheme" @@ -44,19 +44,19 @@ var ( createAndUpdateTests = []struct { name string - deployment *v1alpha1.Deployment + managedCluster *v1alpha1.ManagedCluster existingObjects []runtime.Object err string warnings admission.Warnings }{ { - name: "should fail if the template is unset", - deployment: deployment.NewDeployment(), - err: "the deployment is invalid: templates.hmc.mirantis.com \"\" not found", + name: "should fail if the template is unset", + managedCluster: managedcluster.NewManagedCluster(), + err: "the ManagedCluster is invalid: templates.hmc.mirantis.com \"\" not found", }, { - name: "should fail if the template is not found in hmc-system namespace", - deployment: deployment.NewDeployment(deployment.WithTemplate(testTemplateName)), + name: "should fail if the template is not found in hmc-system namespace", + managedCluster: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), existingObjects: []runtime.Object{ mgmt, template.NewTemplate( @@ -64,20 +64,20 @@ var ( template.WithNamespace("default"), ), }, - err: fmt.Sprintf("the deployment is invalid: templates.hmc.mirantis.com \"%s\" not found", testTemplateName), + err: fmt.Sprintf("the ManagedCluster is invalid: templates.hmc.mirantis.com \"%s\" not found", testTemplateName), }, { - name: "should fail if the template was found but is invalid (type is unset)", - deployment: deployment.NewDeployment(deployment.WithTemplate(testTemplateName)), + name: "should fail if the template was found but is invalid (type is unset)", + managedCluster: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), existingObjects: []runtime.Object{ mgmt, template.NewTemplate(template.WithName(testTemplateName)), }, - err: "the deployment is invalid: the template should be of the deployment type. Current: ", + err: "the ManagedCluster is invalid: the template should be of the deployment type. Current: ", }, { - name: "should fail if the template was found but is invalid (some validation error)", - deployment: deployment.NewDeployment(deployment.WithTemplate(testTemplateName)), + name: "should fail if the template was found but is invalid (some validation error)", + managedCluster: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), existingObjects: []runtime.Object{ mgmt, template.NewTemplate( @@ -89,11 +89,11 @@ var ( }), ), }, - err: "the deployment is invalid: the template is not valid: validation error example", + err: "the ManagedCluster is invalid: the template is not valid: validation error example", }, { - name: "should fail if one or more requested providers are not available yet", - deployment: deployment.NewDeployment(deployment.WithTemplate(testTemplateName)), + name: "should fail if one or more requested providers are not available yet", + managedCluster: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), existingObjects: []runtime.Object{ management.NewManagement( management.WithAvailableProviders(v1alpha1.Providers{ @@ -112,11 +112,11 @@ var ( template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), ), }, - err: "the deployment is invalid: providers verification failed: one or more required control plane providers are not deployed yet: [k0s]\none or more required infrastructure providers are not deployed yet: [azure]", + err: "the ManagedCluster is invalid: providers verification failed: one or more required control plane providers are not deployed yet: [k0s]\none or more required infrastructure providers are not deployed yet: [azure]", }, { - name: "should succeed", - deployment: deployment.NewDeployment(deployment.WithTemplate(testTemplateName)), + name: "should succeed", + managedCluster: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), existingObjects: []runtime.Object{ mgmt, template.NewTemplate( @@ -134,15 +134,15 @@ var ( } ) -func TestDeploymentValidateCreate(t *testing.T) { +func TestManagedClusterValidateCreate(t *testing.T) { g := NewWithT(t) ctx := context.Background() for _, tt := range createAndUpdateTests { t.Run(tt.name, func(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(tt.existingObjects...).Build() - validator := &DeploymentValidator{Client: c} - warn, err := validator.ValidateCreate(ctx, tt.deployment) + validator := &ManagedClusterValidator{Client: c} + warn, err := validator.ValidateCreate(ctx, tt.managedCluster) if tt.err != "" { g.Expect(err).To(HaveOccurred()) if err.Error() != tt.err { @@ -160,15 +160,15 @@ func TestDeploymentValidateCreate(t *testing.T) { } } -func TestDeploymentValidateUpdate(t *testing.T) { +func TestManagedClusterValidateUpdate(t *testing.T) { g := NewWithT(t) ctx := context.Background() for _, tt := range createAndUpdateTests { t.Run(tt.name, func(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(tt.existingObjects...).Build() - validator := &DeploymentValidator{Client: c} - warn, err := validator.ValidateUpdate(ctx, deployment.NewDeployment(), tt.deployment) + validator := &ManagedClusterValidator{Client: c} + warn, err := validator.ValidateUpdate(ctx, managedcluster.NewManagedCluster(), tt.managedCluster) if tt.err != "" { g.Expect(err).To(HaveOccurred()) if err.Error() != tt.err { @@ -186,29 +186,29 @@ func TestDeploymentValidateUpdate(t *testing.T) { } } -func TestDeploymentDefault(t *testing.T) { +func TestManagedClusterDefault(t *testing.T) { g := NewWithT(t) ctx := context.Background() - deploymentConfig := `{"foo":"bar"}` + managedClusterConfig := `{"foo":"bar"}` tests := []struct { name string - input *v1alpha1.Deployment - output *v1alpha1.Deployment + input *v1alpha1.ManagedCluster + output *v1alpha1.ManagedCluster existingObjects []runtime.Object err string }{ { name: "should not set defaults if the config is provided", - input: deployment.NewDeployment(deployment.WithConfig(deploymentConfig)), - output: deployment.NewDeployment(deployment.WithConfig(deploymentConfig)), + input: managedcluster.NewManagedCluster(managedcluster.WithConfig(managedClusterConfig)), + output: managedcluster.NewManagedCluster(managedcluster.WithConfig(managedClusterConfig)), }, { name: "should not set defaults: template is invalid", - input: deployment.NewDeployment(deployment.WithTemplate(testTemplateName)), - output: deployment.NewDeployment(deployment.WithTemplate(testTemplateName)), + input: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), + output: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), existingObjects: []runtime.Object{ mgmt, template.NewTemplate( @@ -224,8 +224,8 @@ func TestDeploymentDefault(t *testing.T) { }, { name: "should not set defaults: config in template status is unset", - input: deployment.NewDeployment(deployment.WithTemplate(testTemplateName)), - output: deployment.NewDeployment(deployment.WithTemplate(testTemplateName)), + input: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), + output: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), existingObjects: []runtime.Object{ mgmt, template.NewTemplate( @@ -237,11 +237,11 @@ func TestDeploymentDefault(t *testing.T) { }, { name: "should set defaults", - input: deployment.NewDeployment(deployment.WithTemplate(testTemplateName)), - output: deployment.NewDeployment( - deployment.WithTemplate(testTemplateName), - deployment.WithConfig(deploymentConfig), - deployment.WithDryRun(true), + input: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), + output: managedcluster.NewManagedCluster( + managedcluster.WithTemplate(testTemplateName), + managedcluster.WithConfig(managedClusterConfig), + managedcluster.WithDryRun(true), ), existingObjects: []runtime.Object{ mgmt, @@ -249,7 +249,7 @@ func TestDeploymentDefault(t *testing.T) { template.WithName(testTemplateName), template.WithTypeStatus(v1alpha1.TemplateTypeDeployment), template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), - template.WithConfigStatus(deploymentConfig), + template.WithConfigStatus(managedClusterConfig), ), }, }, @@ -258,7 +258,7 @@ func TestDeploymentDefault(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(tt.existingObjects...).Build() - validator := &DeploymentValidator{Client: c} + validator := &ManagedClusterValidator{Client: c} err := validator.Default(ctx, tt.input) if tt.err != "" { g.Expect(err).To(HaveOccurred()) diff --git a/internal/webhook/management_webhook.go b/internal/webhook/management_webhook.go index 7ae6bdc8..47f6b238 100644 --- a/internal/webhook/management_webhook.go +++ b/internal/webhook/management_webhook.go @@ -63,13 +63,13 @@ func (*ManagementValidator) ValidateUpdate(_ context.Context, _ runtime.Object, // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. func (v *ManagementValidator) ValidateDelete(ctx context.Context, _ runtime.Object) (admission.Warnings, error) { - deployments := &v1alpha1.DeploymentList{} - err := v.Client.List(ctx, deployments, client.Limit(1)) + managedClusters := &v1alpha1.ManagedClusterList{} + err := v.Client.List(ctx, managedClusters, client.Limit(1)) if err != nil { return nil, err } - if len(deployments.Items) > 0 { - return admission.Warnings{"The Management object can't be removed if Deployment objects still exist"}, ManagementDeletionForbidden + if len(managedClusters.Items) > 0 { + return admission.Warnings{"The Management object can't be removed if ManagedCluster objects still exist"}, ManagementDeletionForbidden } return nil, nil } diff --git a/internal/webhook/management_webhook_test.go b/internal/webhook/management_webhook_test.go index 9044c512..a8bd1803 100644 --- a/internal/webhook/management_webhook_test.go +++ b/internal/webhook/management_webhook_test.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/Mirantis/hmc/api/v1alpha1" - "github.com/Mirantis/hmc/test/objects/deployment" + "github.com/Mirantis/hmc/test/objects/managedcluster" "github.com/Mirantis/hmc/test/objects/management" "github.com/Mirantis/hmc/test/scheme" ) @@ -42,10 +42,10 @@ func TestManagementValidateDelete(t *testing.T) { warnings admission.Warnings }{ { - name: "should fail if Deployment objects exist", + name: "should fail if ManagedCluster objects exist", management: management.NewManagement(), - existingObjects: []runtime.Object{deployment.NewDeployment()}, - warnings: admission.Warnings{"The Management object can't be removed if Deployment objects still exist"}, + existingObjects: []runtime.Object{managedcluster.NewManagedCluster()}, + warnings: admission.Warnings{"The Management object can't be removed if ManagedCluster objects still exist"}, err: "management deletion is forbidden", }, { diff --git a/templates/hmc/templates/crds/hmc.mirantis.com_deployments.yaml b/templates/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml similarity index 94% rename from templates/hmc/templates/crds/hmc.mirantis.com_deployments.yaml rename to templates/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml index 80676e49..d98619a4 100644 --- a/templates/hmc/templates/crds/hmc.mirantis.com_deployments.yaml +++ b/templates/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml @@ -4,17 +4,17 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.14.0 - name: deployments.hmc.mirantis.com + name: managedclusters.hmc.mirantis.com spec: group: hmc.mirantis.com names: - kind: Deployment - listKind: DeploymentList - plural: deployments + kind: ManagedCluster + listKind: ManagedClusterList + plural: managedclusters shortNames: - hmc-deploy - deploy - singular: deployment + singular: managedcluster scope: Namespaced versions: - additionalPrinterColumns: @@ -34,7 +34,7 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: Deployment is the Schema for the deployments API + description: ManagedCluster is the Schema for the managedclusters API properties: apiVersion: description: |- @@ -54,7 +54,7 @@ spec: metadata: type: object spec: - description: DeploymentSpec defines the desired state of Deployment + description: ManagedClusterSpec defines the desired state of ManagedCluster properties: config: description: |- @@ -75,11 +75,11 @@ spec: - template type: object status: - description: DeploymentStatus defines the observed state of Deployment + description: ManagedClusterStatus defines the observed state of ManagedCluster properties: conditions: description: Conditions contains details for the current state of - the Deployment + the ManagedCluster items: description: "Condition contains details for one aspect of the current state of this API Resource.\n---\nThis struct is intended for diff --git a/templates/hmc/templates/crds/hmc.mirantis.com_templates.yaml b/templates/hmc/templates/crds/hmc.mirantis.com_templates.yaml index f8216a89..91f46125 100644 --- a/templates/hmc/templates/crds/hmc.mirantis.com_templates.yaml +++ b/templates/hmc/templates/crds/hmc.mirantis.com_templates.yaml @@ -180,7 +180,7 @@ spec: config: description: |- Config demonstrates available parameters for template customization, - that can be used when creating Deployment objects. + that can be used when creating ManagedCluster objects. x-kubernetes-preserve-unknown-fields: true description: description: Description contains information about the template. diff --git a/templates/hmc/templates/rbac/roles.yaml b/templates/hmc/templates/rbac/roles.yaml index 18259557..68b0cd57 100644 --- a/templates/hmc/templates/rbac/roles.yaml +++ b/templates/hmc/templates/rbac/roles.yaml @@ -27,7 +27,7 @@ rules: - apiGroups: - hmc.mirantis.com resources: - - deployments + - managedclusters verbs: - create - delete @@ -39,13 +39,13 @@ rules: - apiGroups: - hmc.mirantis.com resources: - - deployments/finalizers + - managedclusters/finalizers verbs: - update - apiGroups: - hmc.mirantis.com resources: - - deployments/status + - managedclusters/status verbs: - get - patch diff --git a/templates/hmc/templates/webhooks.yaml b/templates/hmc/templates/webhooks.yaml index c92fbe48..418d4f06 100644 --- a/templates/hmc/templates/webhooks.yaml +++ b/templates/hmc/templates/webhooks.yaml @@ -13,10 +13,10 @@ webhooks: service: name: {{ include "hmc.webhook.serviceName" . }} namespace: {{ include "hmc.webhook.serviceNamespace" . }} - path: /mutate-hmc-mirantis-com-v1alpha1-deployment + path: /mutate-hmc-mirantis-com-v1alpha1-managedcluster failurePolicy: Fail matchPolicy: Equivalent - name: mutation.deployment.hmc.mirantis.com + name: mutation.managedcluster.hmc.mirantis.com rules: - apiGroups: - hmc.mirantis.com @@ -26,7 +26,7 @@ webhooks: - CREATE - UPDATE resources: - - deployments + - managedclusters sideEffects: None - admissionReviewVersions: - v1 @@ -87,10 +87,10 @@ webhooks: service: name: {{ include "hmc.webhook.serviceName" . }} namespace: {{ include "hmc.webhook.serviceNamespace" . }} - path: /validate-hmc-mirantis-com-v1alpha1-deployment + path: /validate-hmc-mirantis-com-v1alpha1-managedcluster failurePolicy: Fail matchPolicy: Equivalent - name: validation.deployment.hmc.mirantis.com + name: validation.managedcluster.hmc.mirantis.com rules: - apiGroups: - hmc.mirantis.com @@ -101,7 +101,7 @@ webhooks: - UPDATE - DELETE resources: - - deployments + - managedclusters sideEffects: None - admissionReviewVersions: - v1 diff --git a/test/objects/deployment/deployment.go b/test/objects/managedcluster/managedcluster.go similarity index 75% rename from test/objects/deployment/deployment.go rename to test/objects/managedcluster/managedcluster.go index ae365cc8..b0897e1e 100644 --- a/test/objects/deployment/deployment.go +++ b/test/objects/managedcluster/managedcluster.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package deployment +package managedcluster import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -22,14 +22,14 @@ import ( ) const ( - DefaultName = "deployment" + DefaultName = "managedcluster" DefaultNamespace = "default" ) -type Opt func(deployment *v1alpha1.Deployment) +type Opt func(managedCluster *v1alpha1.ManagedCluster) -func NewDeployment(opts ...Opt) *v1alpha1.Deployment { - p := &v1alpha1.Deployment{ +func NewManagedCluster(opts ...Opt) *v1alpha1.ManagedCluster { + p := &v1alpha1.ManagedCluster{ ObjectMeta: metav1.ObjectMeta{ Name: DefaultName, Namespace: DefaultNamespace, @@ -43,31 +43,31 @@ func NewDeployment(opts ...Opt) *v1alpha1.Deployment { } func WithName(name string) Opt { - return func(p *v1alpha1.Deployment) { + return func(p *v1alpha1.ManagedCluster) { p.Name = name } } func WithNamespace(namespace string) Opt { - return func(p *v1alpha1.Deployment) { + return func(p *v1alpha1.ManagedCluster) { p.Namespace = namespace } } func WithDryRun(dryRun bool) Opt { - return func(p *v1alpha1.Deployment) { + return func(p *v1alpha1.ManagedCluster) { p.Spec.DryRun = dryRun } } func WithTemplate(templateName string) Opt { - return func(p *v1alpha1.Deployment) { + return func(p *v1alpha1.ManagedCluster) { p.Spec.Template = templateName } } func WithConfig(config string) Opt { - return func(p *v1alpha1.Deployment) { + return func(p *v1alpha1.ManagedCluster) { p.Spec.Config = &apiextensionsv1.JSON{ Raw: []byte(config), } diff --git a/test/utils/utils.go b/test/utils/utils.go index e9f2c139..4e59dca0 100644 --- a/test/utils/utils.go +++ b/test/utils/utils.go @@ -91,7 +91,7 @@ func InstallCertManager() error { } // Wait for cert-manager-webhook to be ready, which can take time if cert-manager // was re-installed after uninstalling on a cluster. - cmd = exec.Command("kubectl", "wait", "deployment.apps/cert-manager-webhook", + cmd = exec.Command("kubectl", "wait", "managedcluster.apps/cert-manager-webhook", "--for", "condition=Available", "--namespace", "cert-manager", "--timeout", "5m",