diff --git a/Taskfile.yaml b/Taskfile.yaml index e957b88..7eb5480 100644 --- a/Taskfile.yaml +++ b/Taskfile.yaml @@ -7,9 +7,9 @@ includes: excludes: [] # put task names in here which are overwritten in this file vars: NESTED_MODULES: api - API_DIRS: '{{.ROOT_DIR}}/api/provider/v1alpha1/... {{.ROOT_DIR}}/api/clusters/v1alpha1/...' + API_DIRS: '{{.ROOT_DIR}}/api/...' MANIFEST_OUT: '{{.ROOT_DIR}}/api/crds/manifests' - CODE_DIRS: '{{.ROOT_DIR}}/cmd/... {{.ROOT_DIR}}/internal/... {{.ROOT_DIR}}/api/provider/v1alpha1/... {{.ROOT_DIR}}/api/clusters/v1alpha1/...' + CODE_DIRS: '{{.ROOT_DIR}}/cmd/... {{.ROOT_DIR}}/internal/... {{.ROOT_DIR}}/api/...' COMPONENTS: 'openmcp-operator' REPO_URL: 'https://github.com/openmcp-project/openmcp-operator' GENERATE_DOCS_INDEX: "true" diff --git a/api/clusters/v1alpha1/accessrequest_types.go b/api/clusters/v1alpha1/accessrequest_types.go index 27a947e..69de0b2 100644 --- a/api/clusters/v1alpha1/accessrequest_types.go +++ b/api/clusters/v1alpha1/accessrequest_types.go @@ -7,14 +7,14 @@ import ( type AccessRequestSpec struct { // ClusterRef is the reference to the Cluster for which access is requested. - // Exactly one of clusterRef or requestRef must be set. + // If set, requestRef will be ignored. // This value is immutable. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="clusterRef is immutable" // +optional ClusterRef *NamespacedObjectReference `json:"clusterRef,omitempty"` // RequestRef is the reference to the ClusterRequest for whose Cluster access is requested. - // Exactly one of clusterRef or requestRef must be set. + // Is ignored if clusterRef is set. // This value is immutable. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="requestRef is immutable" // +optional diff --git a/api/clusters/v1alpha1/clusterrequest_types.go b/api/clusters/v1alpha1/clusterrequest_types.go index 7407900..a7b2943 100644 --- a/api/clusters/v1alpha1/clusterrequest_types.go +++ b/api/clusters/v1alpha1/clusterrequest_types.go @@ -11,7 +11,7 @@ type ClusterRequestSpec struct { Purpose string `json:"purpose"` } -// +kubebuilder:validation:XValidation:rule="!has(oldSelf.clusterRef) || has(self.clusterRef)", message="clusterRef may not be removed once set" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.cluster) || has(self.cluster)", message="cluster may not be removed once set" type ClusterRequestStatus struct { CommonStatus `json:",inline"` @@ -23,8 +23,8 @@ type ClusterRequestStatus struct { // Cluster is the reference to the Cluster that was returned as a result of a granted request. // Note that this information needs to be recoverable in case this status is lost, e.g. by adding a back reference in form of a finalizer to the Cluster resource. // +optional - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="clusterRef is immutable" - Cluster *NamespacedObjectReference `json:"clusterRef,omitempty"` + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="cluster is immutable" + Cluster *NamespacedObjectReference `json:"cluster,omitempty"` } type RequestPhase string @@ -49,6 +49,8 @@ func (p RequestPhase) IsPending() bool { // +kubebuilder:selectablefield:JSONPath=".status.phase" // +kubebuilder:printcolumn:JSONPath=".spec.purpose",name="Purpose",type=string // +kubebuilder:printcolumn:JSONPath=".status.phase",name="Phase",type=string +// +kubebuilder:printcolumn:JSONPath=".status.cluster.name",name="Cluster",type=string +// +kubebuilder:printcolumn:JSONPath=".status.cluster.namespace",name="Cluster-NS",type=string // ClusterRequest is the Schema for the clusters API type ClusterRequest struct { diff --git a/api/clusters/v1alpha1/constants.go b/api/clusters/v1alpha1/constants.go index 1ef2c2f..b58fe4e 100644 --- a/api/clusters/v1alpha1/constants.go +++ b/api/clusters/v1alpha1/constants.go @@ -56,29 +56,24 @@ const ( ) const ( - // ClusterLabel can be used on CRDs to indicate onto which cluster they should be deployed. - ClusterLabel = "openmcp.cloud/cluster" - // OperationAnnotation is used to trigger specific operations on resources. - OperationAnnotation = "openmcp.cloud/operation" - // OperationAnnotationValueIgnore is used to ignore the resource. - OperationAnnotationValueIgnore = "ignore" - // OperationAnnotationValueReconcile is used to trigger a reconcile on the resource. - OperationAnnotationValueReconcile = "reconcile" - // K8sVersionAnnotation can be used to display the k8s version of the cluster. - K8sVersionAnnotation = "clusters.openmcp.cloud/k8sversion" + K8sVersionAnnotation = GroupName + "/k8sversion" // ProviderInfoAnnotation can be used to display provider-specific information about the cluster. - ProviderInfoAnnotation = "clusters.openmcp.cloud/providerinfo" + ProviderInfoAnnotation = GroupName + "/providerinfo" // ProfileNameAnnotation can be used to display the actual name (not the hash) of the cluster profile. - ProfileNameAnnotation = "clusters.openmcp.cloud/profile" + ProfileNameAnnotation = GroupName + "/profile" // EnvironmentAnnotation can be used to display the environment of the cluster. - EnvironmentAnnotation = "clusters.openmcp.cloud/environment" + EnvironmentAnnotation = GroupName + "/environment" // ProviderAnnotation can be used to display the provider of the cluster. - ProviderAnnotation = "clusters.openmcp.cloud/provider" + ProviderAnnotation = GroupName + "/provider" // DeleteWithoutRequestsLabel marks that the corresponding cluster can be deleted if the scheduler removes the last request pointing to it. // Its value must be "true" for the label to take effect. - DeleteWithoutRequestsLabel = "clusters.openmcp.cloud/delete-without-requests" + DeleteWithoutRequestsLabel = GroupName + "/delete-without-requests" + // ProviderLabel is used to indicate the provider that is responsible for an AccessRequest. + ProviderLabel = "provider." + GroupName + // ProfileLabel is used to make the profile information easily accessible for the ClusterProviders. + ProfileLabel = "profile." + GroupName ) const ( diff --git a/api/clusters/v1alpha1/constants/reasons.go b/api/clusters/v1alpha1/constants/reasons.go index 6b9978d..a585f5c 100644 --- a/api/clusters/v1alpha1/constants/reasons.go +++ b/api/clusters/v1alpha1/constants/reasons.go @@ -5,6 +5,8 @@ const ( ReasonOnboardingClusterInteractionProblem = "OnboardingClusterInteractionProblem" // ReasonPlatformClusterInteractionProblem is used when the platform cluster cannot be reached. ReasonPlatformClusterInteractionProblem = "PlatformClusterInteractionProblem" + // ReasonInvalidReference means that a reference points to a non-existing or otherwise invalid object. + ReasonInvalidReference = "InvalidReference" // ReasonConfigurationProblem indicates that something is configured incorrectly. ReasonConfigurationProblem = "ConfigurationProblem" // ReasonInternalError indicates that something went wrong internally. diff --git a/api/clusters/v1alpha1/groupversion_info.go b/api/clusters/v1alpha1/groupversion_info.go index 43dcb74..c00a92b 100644 --- a/api/clusters/v1alpha1/groupversion_info.go +++ b/api/clusters/v1alpha1/groupversion_info.go @@ -5,9 +5,11 @@ package v1alpha1 import ( "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/scheme" + + apiconst "github.com/openmcp-project/openmcp-operator/api/constants" ) -const GroupName = "clusters.openmcp.cloud" +const GroupName = "clusters." + apiconst.OpenMCPGroupName var ( // GroupVersion is group version used to register these objects diff --git a/api/constants/constants.go b/api/constants/constants.go new file mode 100644 index 0000000..15834ea --- /dev/null +++ b/api/constants/constants.go @@ -0,0 +1,16 @@ +package constants + +const ( + // OpenMCPGroupName is the base API group name for OpenMCP. + OpenMCPGroupName = "openmcp.cloud" + + // ClusterLabel can be used on CRDs to indicate onto which cluster they should be deployed. + ClusterLabel = OpenMCPGroupName + "/cluster" + + // OperationAnnotation is used to trigger specific operations on resources. + OperationAnnotation = OpenMCPGroupName + "/operation" + // OperationAnnotationValueIgnore is used to ignore the resource. + OperationAnnotationValueIgnore = "ignore" + // OperationAnnotationValueReconcile is used to trigger a reconcile on the resource. + OperationAnnotationValueReconcile = "reconcile" +) diff --git a/api/crds/manifests/clusters.openmcp.cloud_accessrequests.yaml b/api/crds/manifests/clusters.openmcp.cloud_accessrequests.yaml index 3b99002..04f82d5 100644 --- a/api/crds/manifests/clusters.openmcp.cloud_accessrequests.yaml +++ b/api/crds/manifests/clusters.openmcp.cloud_accessrequests.yaml @@ -50,7 +50,7 @@ spec: clusterRef: description: |- ClusterRef is the reference to the Cluster for which access is requested. - Exactly one of clusterRef or requestRef must be set. + If set, requestRef will be ignored. This value is immutable. properties: name: @@ -135,7 +135,7 @@ spec: requestRef: description: |- RequestRef is the reference to the ClusterRequest for whose Cluster access is requested. - Exactly one of clusterRef or requestRef must be set. + Is ignored if clusterRef is set. This value is immutable. properties: name: diff --git a/api/crds/manifests/clusters.openmcp.cloud_clusterrequests.yaml b/api/crds/manifests/clusters.openmcp.cloud_clusterrequests.yaml index 4ccecd8..e722406 100644 --- a/api/crds/manifests/clusters.openmcp.cloud_clusterrequests.yaml +++ b/api/crds/manifests/clusters.openmcp.cloud_clusterrequests.yaml @@ -26,6 +26,12 @@ spec: - jsonPath: .status.phase name: Phase type: string + - jsonPath: .status.cluster.name + name: Cluster + type: string + - jsonPath: .status.cluster.namespace + name: Cluster-NS + type: string name: v1alpha1 schema: openAPIV3Schema: @@ -62,7 +68,7 @@ spec: rule: self == oldSelf status: properties: - clusterRef: + cluster: description: |- Cluster is the reference to the Cluster that was returned as a result of a granted request. Note that this information needs to be recoverable in case this status is lost, e.g. by adding a back reference in form of a finalizer to the Cluster resource. @@ -79,7 +85,7 @@ spec: - namespace type: object x-kubernetes-validations: - - message: clusterRef is immutable + - message: cluster is immutable rule: self == oldSelf conditions: description: Conditions contains the conditions. @@ -147,8 +153,8 @@ spec: - phase type: object x-kubernetes-validations: - - message: clusterRef may not be removed once set - rule: '!has(oldSelf.clusterRef) || has(self.clusterRef)' + - message: cluster may not be removed once set + rule: '!has(oldSelf.cluster) || has(self.cluster)' type: object selectableFields: - jsonPath: .spec.purpose diff --git a/cmd/openmcp-operator/app/init.go b/cmd/openmcp-operator/app/init.go index 6608f83..69770fa 100644 --- a/cmd/openmcp-operator/app/init.go +++ b/cmd/openmcp-operator/app/init.go @@ -10,6 +10,7 @@ import ( "sigs.k8s.io/yaml" clustersv1alpha1 "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1" + apiconst "github.com/openmcp-project/openmcp-operator/api/constants" "github.com/openmcp-project/openmcp-operator/api/crds" "github.com/openmcp-project/openmcp-operator/api/install" ) @@ -75,12 +76,12 @@ func (o *InitOptions) Run(ctx context.Context) error { log.Info("Environment", "value", o.Environment) // apply CRDs - crdManager := crdutil.NewCRDManager(clustersv1alpha1.ClusterLabel, crds.CRDs) + crdManager := crdutil.NewCRDManager(apiconst.ClusterLabel, crds.CRDs) crdManager.AddCRDLabelToClusterMapping(clustersv1alpha1.PURPOSE_ONBOARDING, o.Clusters.Onboarding) crdManager.AddCRDLabelToClusterMapping(clustersv1alpha1.PURPOSE_PLATFORM, o.Clusters.Platform) - if err := crdManager.CreateOrUpdateCRDs(ctx, nil); err != nil { + if err := crdManager.CreateOrUpdateCRDs(ctx, &log); err != nil { return fmt.Errorf("error creating/updating CRDs: %w", err) } diff --git a/cmd/openmcp-operator/app/run.go b/cmd/openmcp-operator/app/run.go index 343d3da..6d2e70a 100644 --- a/cmd/openmcp-operator/app/run.go +++ b/cmd/openmcp-operator/app/run.go @@ -25,6 +25,7 @@ import ( "github.com/openmcp-project/openmcp-operator/api/install" "github.com/openmcp-project/openmcp-operator/api/provider/v1alpha1" + "github.com/openmcp-project/openmcp-operator/internal/controllers/accessrequest" "github.com/openmcp-project/openmcp-operator/internal/controllers/provider" "github.com/openmcp-project/openmcp-operator/internal/controllers/scheduler" ) @@ -33,6 +34,7 @@ var setupLog logging.Logger var allControllers = []string{ strings.ToLower(scheduler.ControllerName), strings.ToLower(provider.ControllerName), + strings.ToLower(accessrequest.ControllerName), } func NewRunCommand(so *SharedOptions) *cobra.Command { @@ -299,6 +301,13 @@ func (o *RunOptions) Run(ctx context.Context) error { } } + // setup accessrequest controller + if slices.Contains(o.Controllers, strings.ToLower(accessrequest.ControllerName)) { + if err := accessrequest.NewAccessRequestReconciler(o.Clusters.Platform, o.Config.AccessRequest).SetupWithManager(mgr); err != nil { + return fmt.Errorf("unable to setup accessrequest controller: %w", err) + } + } + // setup deployment controller if slices.Contains(o.Controllers, strings.ToLower(provider.ControllerName)) { utilruntime.Must(clientgoscheme.AddToScheme(mgr.GetScheme())) diff --git a/docs/README.md b/docs/README.md index 05b077e..998425d 100644 --- a/docs/README.md +++ b/docs/README.md @@ -3,6 +3,7 @@ ## Controller +- [AccessRequest Controller](controller/accessrequest.md) - [Cluster Scheduler](controller/scheduler.md) ## Resources diff --git a/docs/controller/accessrequest.md b/docs/controller/accessrequest.md new file mode 100644 index 0000000..a82de82 --- /dev/null +++ b/docs/controller/accessrequest.md @@ -0,0 +1,37 @@ +# AccessRequest Controller + +The _AccessRequest Controller_ is responsible for labelling `AccessRequest` resources with the name of the ClusterProvider that is responsible for them. It also adds a label for the corresponding `ClusterProfile` and adds the cluster reference to the spec, if only the request reference is specified. + +This is needed because the information, which ClusterProvider is responsible for answering the `AccessRequest` is contained in the referenced `ClusterProfile`. Depending on `AccessRequest`'s spec, a `Cluster` and potentially also a `ClusterRequest` must be fetched before the `ClusterProfile` is known, which then has to be fetched too. If multiple ClusterProviders are running in the cluster, all of them would need to fetch these resources, only for all but one of them to notice that they are not responsible and don't have to do anything. + +To increase performance and simplify reconciliation logic in the individual ClusterProviders, this central AccessRequest controller takes over the task of figuring out the ClusterProfile and the responsible ClusterProvider and it adds these as labels to the `AccessRequest` resource. It reacts only on resources which do not yet have both of these labels set, so it should reconcile each `AccessRequest` only once (excluding repeated reconciliations due to errors). + +The added labels are: +```yaml +provider.clusters.openmcp.cloud: +profile.clusters.openmcp.cloud: +``` + +ClusterProviders should only reconcile `AccessRequest` resources where both labels are set and the value of the provider label matches their own provider name. Resources where either label is missing or the value of the provider label does not match the own provider name must be ignored. + +Note that if a reconciled `AccessRequest` already has one of the labels set, but its value differs from the expected one, the controller will log an error, but not update the resource in any way, to not accidentally move the responsibility from one provider to another. This also means that `AccessRequest` resources that have only one of the labels set, and that one to a wrong value, will not be handled - this controller won't update the resource and the ClusterProvider should not pick it up because one of the labels is missing. It is therefore strongly recommended to not set the labels when creating a new `AccessRequest` resource. + +In addition to the labels, the controller also sets `spec.clusterRef`, if only `spec.requestRef` is specified. + +After an `AccessRequest` has been prepared this way, the ClusterProviders can easily infer which one is responsible, which `Cluster` resource this request belongs to, and which `ClusterProfile` is used by the `Cluster`, directly from the labels and spec of the `AccessRequest` resource. + +## Configuration + +The AccessRequest controller is run as long as `accessrequest` is included in the `--controllers` flag. It is included by default. + +The entire configuration for the AccessRequest controller is optional. +```yaml +accessRequest: # optional + selector: # optional + matchLabels: <...> # optional + matchExpressions: <...> # optional +``` + +The following fields can be specified inside the `accessRequest` node: +- `selector` _(optional)_ + - A standard k8s label selector, as it is also used in Deployments, for example. If specified, only `AccessRequest` resources matching the selector are reconciled by the controller. This can be used to distribute resources between multiple instances of the AccessRequest controller watching the same cluster. diff --git a/internal/config/config.go b/internal/config/config.go index 61ffdeb..cbc114d 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -26,6 +26,9 @@ type Config struct { // Scheduler is the configuration for the cluster scheduler. Scheduler *SchedulerConfig `json:"scheduler,omitempty"` + + // AccessRequest is the configuration for the access request controller. + AccessRequest *AccessRequestConfig `json:"accessRequest,omitempty"` } // Dump is used for logging and debugging purposes. diff --git a/internal/config/config_accessrequest.go b/internal/config/config_accessrequest.go new file mode 100644 index 0000000..6f4337f --- /dev/null +++ b/internal/config/config_accessrequest.go @@ -0,0 +1,24 @@ +package config + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/validation/field" +) + +type AccessRequestConfig struct { + // If set, only AccessRequests that match the selector will be reconciled. + Selector *Selector `json:"selector,omitempty"` +} + +func (c *AccessRequestConfig) Validate(fldPath *field.Path) error { + return c.Selector.Validate(fldPath.Child("selector")) +} + +func (c *AccessRequestConfig) Complete(fldPath *field.Path) error { + if err := c.Selector.Complete(fldPath.Child("selector")); err != nil { + return fmt.Errorf("error completing selector: %w", err) + } + + return nil +} diff --git a/internal/config/config_scheduler.go b/internal/config/config_scheduler.go index 0f6165a..6bba750 100644 --- a/internal/config/config_scheduler.go +++ b/internal/config/config_scheduler.go @@ -2,7 +2,6 @@ package config import ( "fmt" - "maps" "slices" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -24,12 +23,11 @@ type SchedulerConfig struct { // Defaults to "Balanced". Strategy Strategy `json:"strategy"` - // +optional - Selectors *SchedulerSelectors `json:"selectors,omitempty"` - // Note that CompletedSelectors.Clusters holds the global cluster selector. + // Note that the cluster selector specified here holds the global cluster selector. // During Complete(), the local selector is merged with the global one (or set to the global one if nil). // This means that always the local completed selector should be used, unless the task is not tied to a specific ClusterDefinition. - CompletedSelectors CompletedSchedulerSelectors `json:"-"` + // +optional + Selectors SchedulerSelectors `json:"selectors"` PurposeMappings map[string]*ClusterDefinition `json:"purposeMappings"` } @@ -55,9 +53,8 @@ type ClusterDefinition struct { // Must be equal to or greater than 0 otherwise, with 0 meaning "unlimited". TenancyCount int `json:"tenancyCount,omitempty"` - Template ClusterTemplate `json:"template"` - Selector *metav1.LabelSelector `json:"selector,omitempty"` - CompletedSelector labels.Selector `json:"-"` + Template ClusterTemplate `json:"template"` + Selector *Selector `json:"selector,omitempty"` } type ClusterTemplate struct { @@ -66,12 +63,8 @@ type ClusterTemplate struct { } type SchedulerSelectors struct { - Clusters *metav1.LabelSelector `json:"clusters,omitempty"` - Requests *metav1.LabelSelector `json:"requests,omitempty"` -} -type CompletedSchedulerSelectors struct { - Clusters labels.Selector - Requests labels.Selector + Clusters *Selector `json:"clusters,omitempty"` + Requests *Selector `json:"requests,omitempty"` } func (c *SchedulerConfig) Default(_ *field.Path) error { @@ -102,21 +95,17 @@ func (c *SchedulerConfig) Validate(fldPath *field.Path) error { // validate label selectors var cls labels.Selector - if c.Selectors != nil { - if c.Selectors.Clusters != nil { - var err error - cls, err = metav1.LabelSelectorAsSelector(c.Selectors.Clusters) - if err != nil { - errs = append(errs, field.Invalid(fldPath.Child("selectors").Child("clusters"), c.Selectors.Clusters, err.Error())) - } - } - if c.Selectors.Requests != nil { - _, err := metav1.LabelSelectorAsSelector(c.Selectors.Requests) - if err != nil { - errs = append(errs, field.Invalid(fldPath.Child("selectors").Child("requests"), c.Selectors.Requests, err.Error())) - } + if c.Selectors.Clusters != nil && c.Selectors.Clusters.LabelSelector != nil { + var err error + cls, err = metav1.LabelSelectorAsSelector(c.Selectors.Clusters.LabelSelector) + if err != nil { + errs = append(errs, field.Invalid(fldPath.Child("selectors").Child("clusters"), c.Selectors.Clusters, err.Error())) } } + err := c.Selectors.Requests.Validate(fldPath.Child("selectors").Child("requests")) + if err != nil { + errs = append(errs, err.(*field.Error)) + } // validate purpose mappings validTenancies := []string{string(clustersv1alpha1.TENANCY_EXCLUSIVE), string(clustersv1alpha1.TENANCY_SHARED)} @@ -150,9 +139,9 @@ func (c *SchedulerConfig) Validate(fldPath *field.Path) error { errs = append(errs, field.Invalid(pPath.Child("template").Child("metadata").Child("labels"), definition.Template.Labels, "labels do not match specified global cluster selector")) } var lcls labels.Selector - if definition.Selector != nil { + if definition.Selector != nil && definition.Selector.LabelSelector != nil { var err error - lcls, err = metav1.LabelSelectorAsSelector(definition.Selector) + lcls, err = metav1.LabelSelectorAsSelector(definition.Selector.LabelSelector) if err != nil { errs = append(errs, field.Invalid(pPath.Child("selector"), definition.Selector, err.Error())) } @@ -165,57 +154,19 @@ func (c *SchedulerConfig) Validate(fldPath *field.Path) error { } func (c *SchedulerConfig) Complete(fldPath *field.Path) error { - if c.Selectors != nil { - if c.Selectors.Clusters != nil { - var err error - c.CompletedSelectors.Clusters, err = metav1.LabelSelectorAsSelector(c.Selectors.Clusters) - if err != nil { - return field.Invalid(fldPath.Child("selectors").Child("clusters"), c.Selectors.Clusters, err.Error()) - } - } - if c.Selectors.Requests != nil { - var err error - c.CompletedSelectors.Requests, err = metav1.LabelSelectorAsSelector(c.Selectors.Requests) - if err != nil { - return field.Invalid(fldPath.Child("selectors").Child("requests"), c.Selectors.Requests, err.Error()) - } - } + if err := c.Selectors.Clusters.Complete(fldPath.Child("selectors").Child("clusters")); err != nil { + return err } - if c.CompletedSelectors.Clusters == nil { - c.CompletedSelectors.Clusters = labels.Everything() - } - if c.CompletedSelectors.Requests == nil { - c.CompletedSelectors.Requests = labels.Everything() + if err := c.Selectors.Requests.Complete(fldPath.Child("selectors").Child("requests")); err != nil { + return err } for purpose, definition := range c.PurposeMappings { pPath := fldPath.Child("purposeMappings").Key(purpose) - if definition.Selector != nil { - var combinedSelector *metav1.LabelSelector - if c.Selectors.Clusters == nil { - combinedSelector = definition.Selector - } else if definition.Selector == nil { - combinedSelector = c.Selectors.Clusters - } else { - combinedSelector = c.Selectors.Clusters.DeepCopy() - if combinedSelector.MatchLabels == nil { - combinedSelector.MatchLabels = definition.Selector.MatchLabels - } else if definition.Selector.MatchLabels != nil { - maps.Insert(combinedSelector.MatchLabels, maps.All(definition.Selector.MatchLabels)) - } - if combinedSelector.MatchExpressions == nil { - combinedSelector.MatchExpressions = definition.Selector.MatchExpressions - } else if definition.Selector.MatchExpressions != nil { - combinedSelector.MatchExpressions = append(combinedSelector.MatchExpressions, definition.Selector.MatchExpressions...) - } - } - var err error - definition.CompletedSelector, err = metav1.LabelSelectorAsSelector(combinedSelector) - if err != nil { - return field.Invalid(pPath.Child("selector"), combinedSelector, fmt.Sprintf("the combination of the global and local selector is invalid: %s", err.Error())) - } - } else { - definition.CompletedSelector = c.CompletedSelectors.Clusters + var err error + definition.Selector, err = definition.Selector.Combine(c.Selectors.Clusters) + if err != nil { + return field.Invalid(pPath.Child("selector"), definition.Selector, fmt.Sprintf("the combination of the global and local selector is invalid: %s", err.Error())) } } diff --git a/internal/config/selector.go b/internal/config/selector.go new file mode 100644 index 0000000..d4ff1b4 --- /dev/null +++ b/internal/config/selector.go @@ -0,0 +1,110 @@ +package config + +import ( + "maps" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +type Selector struct { + *metav1.LabelSelector `json:",inline"` + completed labels.Selector `json:"-"` +} + +func (s *Selector) Validate(fldPath *field.Path) error { + if s == nil || s.LabelSelector == nil { + return nil + } + _, err := metav1.LabelSelectorAsSelector(s.LabelSelector) + if err != nil { + return field.Invalid(fldPath, s, err.Error()) + } + return nil +} + +func (s *Selector) Complete(fldPath *field.Path) error { + if s == nil { + return nil + } + if s.LabelSelector == nil { + s.completed = labels.Everything() + return nil + } + var err error + s.completed, err = metav1.LabelSelectorAsSelector(s.LabelSelector) + if err != nil { + return field.Invalid(fldPath, s, err.Error()) + } + return nil +} + +// Completed returns the labels.Selector version of the selector. +// Returns a selector that matches everything if the selector is nil, empty, or has not been completed yet. +func (s *Selector) Completed() labels.Selector { + if s == nil || s.completed == nil { + return labels.Everything() + } + return s.completed +} + +// Combine returns a new selector that is a combination of the two selectors. +// Neither the original nor the other selector is modified. +// For the MatchLabels field, entries from other overwrite entries from the receiver object in case of key collisions. +// Note that the requirements of both selectors are ANDed. This can lead to selectors that cannot be satisfied. +// The returned selector is completed, even if neither the receiver nor the other one was. +func (s *Selector) Combine(other *Selector) (*Selector, error) { + if s == nil || s.LabelSelector == nil { + if other == nil || other.LabelSelector == nil { + return nil, nil + } + res := other.DeepCopy() + var err error + if res.completed == nil { + err = res.Complete(nil) + } + return res, err + } + res := s.DeepCopy() + if other == nil || other.LabelSelector == nil { + var err error + if res.completed == nil { + err = res.Complete(nil) + } + return res, err + } + + if len(res.MatchLabels) == 0 { + if len(other.MatchLabels) > 0 { + res.MatchLabels = make(map[string]string, len(other.MatchLabels)) + maps.Copy(res.MatchLabels, other.MatchLabels) + } + } else if len(other.MatchLabels) > 0 { + maps.Insert(res.MatchLabels, maps.All(other.MatchLabels)) + } + + if len(res.MatchExpressions) == 0 { + if len(other.MatchExpressions) > 0 { + res.MatchExpressions = make([]metav1.LabelSelectorRequirement, len(other.MatchExpressions)) + copy(res.MatchExpressions, other.MatchExpressions) + } + } else if len(other.MatchExpressions) > 0 { + for _, ome := range other.MatchExpressions { + res.MatchExpressions = append(res.MatchExpressions, *ome.DeepCopy()) + } + } + + err := res.Complete(nil) + return res, err +} + +func (s *Selector) DeepCopy() *Selector { + if s == nil { + return nil + } + return &Selector{ + LabelSelector: s.LabelSelector.DeepCopy(), + completed: s.completed, + } +} diff --git a/internal/controllers/accessrequest/controller.go b/internal/controllers/accessrequest/controller.go new file mode 100644 index 0000000..329ba65 --- /dev/null +++ b/internal/controllers/accessrequest/controller.go @@ -0,0 +1,203 @@ +package accessrequest + +import ( + "context" + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/openmcp-project/controller-utils/pkg/clusters" + ctrlutils "github.com/openmcp-project/controller-utils/pkg/controller" + errutils "github.com/openmcp-project/controller-utils/pkg/errors" + "github.com/openmcp-project/controller-utils/pkg/logging" + + clustersv1alpha1 "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1" + cconst "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1/constants" + apiconst "github.com/openmcp-project/openmcp-operator/api/constants" + "github.com/openmcp-project/openmcp-operator/internal/config" +) + +const ControllerName = "AccessRequest" + +func NewAccessRequestReconciler(platformCluster *clusters.Cluster, cfg *config.AccessRequestConfig) *AccessRequestReconciler { + if cfg == nil { + cfg = &config.AccessRequestConfig{} + } + return &AccessRequestReconciler{ + PlatformCluster: platformCluster, + Config: cfg, + } +} + +type AccessRequestReconciler struct { + PlatformCluster *clusters.Cluster + Config *config.AccessRequestConfig +} + +var _ reconcile.Reconciler = &AccessRequestReconciler{} + +type ReconcileResult = ctrlutils.ReconcileResult[*clustersv1alpha1.AccessRequest, clustersv1alpha1.ConditionStatus] + +func (r *AccessRequestReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + log := logging.FromContextOrPanic(ctx).WithName(ControllerName) + ctx = logging.NewContext(ctx, log) + log.Info("Starting reconcile") + rr := r.reconcile(ctx, req) + // status update + return ctrlutils.NewStatusUpdaterBuilder[*clustersv1alpha1.AccessRequest, clustersv1alpha1.RequestPhase, clustersv1alpha1.ConditionStatus](). + WithNestedStruct("CommonStatus"). + WithFieldOverride(ctrlutils.STATUS_FIELD_PHASE, "Phase"). + WithoutFields(ctrlutils.STATUS_FIELD_CONDITIONS). + WithPhaseUpdateFunc(func(obj *clustersv1alpha1.AccessRequest, rr ReconcileResult) (clustersv1alpha1.RequestPhase, error) { + return clustersv1alpha1.REQUEST_PENDING, nil + }). + Build(). + UpdateStatus(ctx, r.PlatformCluster.Client(), rr) +} + +func (r *AccessRequestReconciler) reconcile(ctx context.Context, req reconcile.Request) ReconcileResult { + log := logging.FromContextOrPanic(ctx) + // get AccessRequest resource + ar := &clustersv1alpha1.AccessRequest{} + if err := r.PlatformCluster.Client().Get(ctx, req.NamespacedName, ar); err != nil { + if apierrors.IsNotFound(err) { + log.Info("Resource not found") + return ReconcileResult{} + } + return ReconcileResult{ReconcileError: errutils.WithReason(fmt.Errorf("unable to get resource '%s' from cluster: %w", req.String(), err), cconst.ReasonPlatformClusterInteractionProblem)} + } + + // handle operation annotation + if ar.GetAnnotations() != nil { + op, ok := ar.GetAnnotations()[apiconst.OperationAnnotation] + if ok { + switch op { + case apiconst.OperationAnnotationValueIgnore: + log.Info("Ignoring resource due to ignore operation annotation") + return ReconcileResult{} + case apiconst.OperationAnnotationValueReconcile: + log.Debug("Removing reconcile operation annotation from resource") + if err := ctrlutils.EnsureAnnotation(ctx, r.PlatformCluster.Client(), ar, apiconst.OperationAnnotation, "", true, ctrlutils.DELETE); err != nil { + return ReconcileResult{ReconcileError: errutils.WithReason(fmt.Errorf("error removing operation annotation: %w", err), cconst.ReasonPlatformClusterInteractionProblem)} + } + } + } + } + + rr := ReconcileResult{ + Object: ar, + } + + // get Cluster that the request refers to + c := &clustersv1alpha1.Cluster{} + if ar.Spec.ClusterRef != nil { + c.SetName(ar.Spec.ClusterRef.Name) + c.SetNamespace(ar.Spec.ClusterRef.Namespace) + log.Debug("Cluster is referenced in AccessRequest", "clusterName", c.Name, "clusterNamespace", c.Namespace) + } else if ar.Spec.RequestRef != nil { + // fetch request to lookup the cluster reference + cr := &clustersv1alpha1.ClusterRequest{} + cr.SetName(ar.Spec.RequestRef.Name) + cr.SetNamespace(ar.Spec.RequestRef.Namespace) + log.Debug("ClusterRequest is referenced in AccessRequest", "clusterRequestName", cr.Name, "clusterRequestNamespace", cr.Namespace) + if err := r.PlatformCluster.Client().Get(ctx, client.ObjectKeyFromObject(cr), cr); err != nil { + if apierrors.IsNotFound(err) { + rr.ReconcileError = errutils.WithReason(fmt.Errorf("ClusterRequest '%s/%s' not found", cr.Namespace, cr.Name), cconst.ReasonInvalidReference) + return rr + } + rr.ReconcileError = errutils.WithReason(fmt.Errorf("unable to get ClusterRequest '%s/%s': %w", cr.Namespace, cr.Name, err), cconst.ReasonPlatformClusterInteractionProblem) + return rr + } + if cr.Status.Phase != clustersv1alpha1.REQUEST_GRANTED { + rr.ReconcileError = errutils.WithReason(fmt.Errorf("ClusterRequest '%s/%s' is not granted", cr.Namespace, cr.Name), cconst.ReasonInvalidReference) + return rr + } + if cr.Status.Cluster == nil { + rr.ReconcileError = errutils.WithReason(fmt.Errorf("ClusterRequest '%s/%s' is granted but does not reference a cluster", cr.Namespace, cr.Name), cconst.ReasonInternalError) + return rr + } + c.SetName(cr.Status.Cluster.Name) + c.SetNamespace(cr.Status.Cluster.Namespace) + log.Debug("Retrieved Cluster reference from ClusterRequest", "clusterName", c.Name, "clusterNamespace", c.Namespace) + } else { + rr.ReconcileError = errutils.WithReason(fmt.Errorf("invalid AccessRequest resource '%s/%s': neither clusterRef nor requestRef is set", ar.Namespace, ar.Name), cconst.ReasonConfigurationProblem) + return rr + } + + // fetch Cluster resource + if err := r.PlatformCluster.Client().Get(ctx, client.ObjectKeyFromObject(c), c); err != nil { + if apierrors.IsNotFound(err) { + rr.ReconcileError = errutils.WithReason(fmt.Errorf("referenced Cluster '%s/%s' not found", c.Namespace, c.Name), cconst.ReasonInvalidReference) + return rr + } + rr.ReconcileError = errutils.WithReason(fmt.Errorf("unable to get Cluster '%s/%s': %w", c.Namespace, c.Name, err), cconst.ReasonPlatformClusterInteractionProblem) + return rr + } + + // fetch ClusterProfile resource + cp := &clustersv1alpha1.ClusterProfile{} + cp.SetName(c.Spec.Profile) + log.Debug("Fetching ClusterProfile", "profileName", cp.Name) + if err := r.PlatformCluster.Client().Get(ctx, client.ObjectKeyFromObject(cp), cp); err != nil { + if apierrors.IsNotFound(err) { + rr.ReconcileError = errutils.WithReason(fmt.Errorf("ClusterProfile '%s' not found", cp.Name), cconst.ReasonInvalidReference) + return rr + } + rr.ReconcileError = errutils.WithReason(fmt.Errorf("unable to get ClusterProfile '%s': %w", cp.Name, err), cconst.ReasonPlatformClusterInteractionProblem) + return rr + } + + log.Info("Identified responsible ClusterProvider", "providerName", cp.Spec.ProviderRef.Name, "profileName", cp.Name) + arOld := ar.DeepCopy() + if err := ctrlutils.EnsureLabel(ctx, nil, ar, clustersv1alpha1.ProviderLabel, cp.Spec.ProviderRef.Name, false); err != nil { + if e, ok := err.(*ctrlutils.MetadataEntryAlreadyExistsError); ok { + log.Error(err, "label '%s' already set on resource '%s', but with value '%s' instead of the desired value '%s'", e.Key, req.String(), e.ActualValue, e.DesiredValue) + return rr + } + } + if err := ctrlutils.EnsureLabel(ctx, nil, ar, clustersv1alpha1.ProfileLabel, cp.Name, false); err != nil { + if e, ok := err.(*ctrlutils.MetadataEntryAlreadyExistsError); ok { + log.Error(err, "label '%s' already set on resource '%s', but with value '%s' instead of the desired value '%s'", e.Key, req.String(), e.ActualValue, e.DesiredValue) + return rr + } + } + + // set cluster reference, if only the request reference is set + if ar.Spec.ClusterRef == nil { + ar.Spec.ClusterRef = &clustersv1alpha1.NamespacedObjectReference{} + ar.Spec.ClusterRef.Name = c.Name + ar.Spec.ClusterRef.Namespace = c.Namespace + } + + if err := r.PlatformCluster.Client().Patch(ctx, ar, client.MergeFrom(arOld)); err != nil { + rr.ReconcileError = errutils.WithReason(fmt.Errorf("error patching resource '%s': %w", req.String(), err), cconst.ReasonPlatformClusterInteractionProblem) + return rr + } + + return rr +} + +// SetupWithManager sets up the controller with the Manager. +func (r *AccessRequestReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + // watch AccessRequest resources + For(&clustersv1alpha1.AccessRequest{}). + WithEventFilter(predicate.And( + // ignore resources that already have the provider AND profile label set + predicate.Not(predicate.And( + ctrlutils.HasLabelPredicate(clustersv1alpha1.ProviderLabel, ""), + ctrlutils.HasLabelPredicate(clustersv1alpha1.ProfileLabel, ""), + )), + ctrlutils.LabelSelectorPredicate(r.Config.Selector.Completed()), + predicate.Or( + ctrlutils.GotAnnotationPredicate(apiconst.OperationAnnotation, apiconst.OperationAnnotationValueReconcile), + ctrlutils.LostAnnotationPredicate(apiconst.OperationAnnotation, apiconst.OperationAnnotationValueIgnore), + ), + predicate.Not(ctrlutils.HasAnnotationPredicate(apiconst.OperationAnnotation, apiconst.OperationAnnotationValueIgnore)), + )). + Complete(r) +} diff --git a/internal/controllers/accessrequest/controller_test.go b/internal/controllers/accessrequest/controller_test.go new file mode 100644 index 0000000..0011868 --- /dev/null +++ b/internal/controllers/accessrequest/controller_test.go @@ -0,0 +1,148 @@ +package accessrequest_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/openmcp-project/controller-utils/pkg/clusters" + ctrlutils "github.com/openmcp-project/controller-utils/pkg/controller" + testutils "github.com/openmcp-project/controller-utils/pkg/testing" + + clustersv1alpha1 "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1" + cconst "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1/constants" + "github.com/openmcp-project/openmcp-operator/api/install" + "github.com/openmcp-project/openmcp-operator/internal/controllers/accessrequest" +) + +var scheme = install.InstallOperatorAPIs(runtime.NewScheme()) + +func arReconciler(c client.Client) reconcile.Reconciler { + return accessrequest.NewAccessRequestReconciler(clusters.NewTestClusterFromClient("platform", c), nil) +} + +var _ = Describe("AccessRequest Controller", func() { + + It("should add the correct labels to the AccessRequest if a Cluster is referenced directly", func() { + env := testutils.NewEnvironmentBuilder().WithFakeClient(scheme).WithInitObjectPath("testdata", "test-01").WithReconcilerConstructor(arReconciler).Build() + ar := &clustersv1alpha1.AccessRequest{} + Expect(env.Client().Get(env.Ctx, ctrlutils.ObjectKey("mc-access", "bar"), ar)).To(Succeed()) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProviderLabel)) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProfileLabel)) + env.ShouldReconcile(testutils.RequestFromObject(ar)) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed()) + Expect(ar.Labels).To(HaveKeyWithValue(clustersv1alpha1.ProviderLabel, "asdf")) + Expect(ar.Labels).To(HaveKeyWithValue(clustersv1alpha1.ProfileLabel, "default")) + }) + + It("should add the correct labels and cluster reference to the AccessRequest if a Cluster is referenced via a ClusterRequest", func() { + env := testutils.NewEnvironmentBuilder().WithFakeClient(scheme).WithInitObjectPath("testdata", "test-01").WithReconcilerConstructor(arReconciler).Build() + ar := &clustersv1alpha1.AccessRequest{} + Expect(env.Client().Get(env.Ctx, ctrlutils.ObjectKey("mcr-access", "bar"), ar)).To(Succeed()) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProviderLabel)) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProfileLabel)) + Expect(ar.Spec.ClusterRef).To(BeNil()) + env.ShouldReconcile(testutils.RequestFromObject(ar)) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed()) + Expect(ar.Labels).To(HaveKeyWithValue(clustersv1alpha1.ProviderLabel, "asdf")) + Expect(ar.Labels).To(HaveKeyWithValue(clustersv1alpha1.ProfileLabel, "default")) + Expect(ar.Spec.ClusterRef).ToNot(BeNil()) + Expect(ar.Spec.ClusterRef.Name).To(Equal("my-cluster")) + Expect(ar.Spec.ClusterRef.Namespace).To(Equal("foo")) + }) + + It("should fail if the AccessRequest references a ClusterRequest which is not Granted", func() { + env := testutils.NewEnvironmentBuilder().WithFakeClient(scheme).WithInitObjectPath("testdata", "test-02").WithReconcilerConstructor(arReconciler).Build() + ar := &clustersv1alpha1.AccessRequest{} + Expect(env.Client().Get(env.Ctx, ctrlutils.ObjectKey("mcr-access", "bar"), ar)).To(Succeed()) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProviderLabel)) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProfileLabel)) + Expect(ar.Spec.ClusterRef).To(BeNil()) + env.ShouldNotReconcile(testutils.RequestFromObject(ar)) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed()) + Expect(ar.Status.Message).To(ContainSubstring("not granted")) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProviderLabel)) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProfileLabel)) + Expect(ar.Spec.ClusterRef).To(BeNil()) + }) + + It("should fail if the AccessRequest references an unknown Cluster or ClusterRequest", func() { + env := testutils.NewEnvironmentBuilder().WithFakeClient(scheme).WithInitObjectPath("testdata", "test-03").WithReconcilerConstructor(arReconciler).Build() + + ar := &clustersv1alpha1.AccessRequest{} + Expect(env.Client().Get(env.Ctx, ctrlutils.ObjectKey("mc-access", "bar"), ar)).To(Succeed()) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProviderLabel)) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProfileLabel)) + env.ShouldNotReconcile(testutils.RequestFromObject(ar)) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed()) + Expect(ar.Status.Reason).To(Equal(cconst.ReasonInvalidReference)) + Expect(ar.Status.Message).To(ContainSubstring("not found")) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProviderLabel)) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProfileLabel)) + + ar = &clustersv1alpha1.AccessRequest{} + Expect(env.Client().Get(env.Ctx, ctrlutils.ObjectKey("mcr-access", "bar"), ar)).To(Succeed()) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProviderLabel)) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProfileLabel)) + Expect(ar.Spec.ClusterRef).To(BeNil()) + env.ShouldNotReconcile(testutils.RequestFromObject(ar)) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed()) + Expect(ar.Status.Reason).To(Equal(cconst.ReasonInvalidReference)) + Expect(ar.Status.Message).To(ContainSubstring("not found")) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProviderLabel)) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProfileLabel)) + Expect(ar.Spec.ClusterRef).To(BeNil()) + }) + + It("should add the respective other label if either provider or profile label is already set", func() { + env := testutils.NewEnvironmentBuilder().WithFakeClient(scheme).WithInitObjectPath("testdata", "test-04").WithReconcilerConstructor(arReconciler).Build() + + ar := &clustersv1alpha1.AccessRequest{} + Expect(env.Client().Get(env.Ctx, ctrlutils.ObjectKey("mc-access-provider", "bar"), ar)).To(Succeed()) + Expect(ar.Labels).To(HaveKey(clustersv1alpha1.ProviderLabel)) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProfileLabel)) + env.ShouldReconcile(testutils.RequestFromObject(ar)) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed()) + Expect(ar.Labels).To(HaveKeyWithValue(clustersv1alpha1.ProviderLabel, "asdf")) + Expect(ar.Labels).To(HaveKeyWithValue(clustersv1alpha1.ProfileLabel, "default")) + + ar = &clustersv1alpha1.AccessRequest{} + Expect(env.Client().Get(env.Ctx, ctrlutils.ObjectKey("mc-access-profile", "bar"), ar)).To(Succeed()) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProviderLabel)) + Expect(ar.Labels).To(HaveKey(clustersv1alpha1.ProfileLabel)) + env.ShouldReconcile(testutils.RequestFromObject(ar)) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed()) + Expect(ar.Labels).To(HaveKeyWithValue(clustersv1alpha1.ProviderLabel, "asdf")) + Expect(ar.Labels).To(HaveKeyWithValue(clustersv1alpha1.ProfileLabel, "default")) + }) + + It("should not overwrite either label if already set to a different value", func() { + env := testutils.NewEnvironmentBuilder().WithFakeClient(scheme).WithInitObjectPath("testdata", "test-05").WithReconcilerConstructor(arReconciler).Build() + + ar := &clustersv1alpha1.AccessRequest{} + Expect(env.Client().Get(env.Ctx, ctrlutils.ObjectKey("mcr-access-provider", "bar"), ar)).To(Succeed()) + Expect(ar.Labels).To(HaveKey(clustersv1alpha1.ProviderLabel)) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProfileLabel)) + Expect(ar.Spec.ClusterRef).To(BeNil()) + env.ShouldReconcile(testutils.RequestFromObject(ar)) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed()) + Expect(ar.Labels).ToNot(HaveKeyWithValue(clustersv1alpha1.ProviderLabel, "asdf")) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProfileLabel)) + Expect(ar.Spec.ClusterRef).To(BeNil()) + + ar = &clustersv1alpha1.AccessRequest{} + Expect(env.Client().Get(env.Ctx, ctrlutils.ObjectKey("mcr-access-profile", "bar"), ar)).To(Succeed()) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProviderLabel)) + Expect(ar.Labels).To(HaveKey(clustersv1alpha1.ProfileLabel)) + Expect(ar.Spec.ClusterRef).To(BeNil()) + env.ShouldReconcile(testutils.RequestFromObject(ar)) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed()) + Expect(ar.Labels).ToNot(HaveKey(clustersv1alpha1.ProviderLabel)) + Expect(ar.Labels).ToNot(HaveKeyWithValue(clustersv1alpha1.ProfileLabel, "default")) + Expect(ar.Spec.ClusterRef).To(BeNil()) + }) + +}) diff --git a/internal/controllers/accessrequest/suite_test.go b/internal/controllers/accessrequest/suite_test.go new file mode 100644 index 0000000..2ecd65b --- /dev/null +++ b/internal/controllers/accessrequest/suite_test.go @@ -0,0 +1,14 @@ +package accessrequest_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestComponentUtils(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "AccessRequest Controller Test Suite") +} diff --git a/internal/controllers/accessrequest/testdata/test-01/accessrequest-direct.yaml b/internal/controllers/accessrequest/testdata/test-01/accessrequest-direct.yaml new file mode 100644 index 0000000..32cc6c3 --- /dev/null +++ b/internal/controllers/accessrequest/testdata/test-01/accessrequest-direct.yaml @@ -0,0 +1,17 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: AccessRequest +metadata: + name: mc-access + namespace: bar +spec: + clusterRef: + name: my-cluster + namespace: foo + permissions: + - rules: + - apiGroups: + - "*" + resources: + - "*" + verbs: + - "*" diff --git a/internal/controllers/accessrequest/testdata/test-01/accessrequest-indirect.yaml b/internal/controllers/accessrequest/testdata/test-01/accessrequest-indirect.yaml new file mode 100644 index 0000000..aae21d4 --- /dev/null +++ b/internal/controllers/accessrequest/testdata/test-01/accessrequest-indirect.yaml @@ -0,0 +1,17 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: AccessRequest +metadata: + name: mcr-access + namespace: bar +spec: + requestRef: + name: my-cluster + namespace: foo + permissions: + - rules: + - apiGroups: + - "*" + resources: + - "*" + verbs: + - "*" diff --git a/internal/controllers/accessrequest/testdata/test-01/cluster.yaml b/internal/controllers/accessrequest/testdata/test-01/cluster.yaml new file mode 100644 index 0000000..929b89e --- /dev/null +++ b/internal/controllers/accessrequest/testdata/test-01/cluster.yaml @@ -0,0 +1,13 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: Cluster +metadata: + name: my-cluster + namespace: foo +spec: + profile: default + kubernetes: + version: "1.32" + purposes: + - test + tenancy: Exclusive + diff --git a/internal/controllers/accessrequest/testdata/test-01/clusterprofile.yaml b/internal/controllers/accessrequest/testdata/test-01/clusterprofile.yaml new file mode 100644 index 0000000..78ab0e5 --- /dev/null +++ b/internal/controllers/accessrequest/testdata/test-01/clusterprofile.yaml @@ -0,0 +1,18 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: ClusterProfile +metadata: + generation: 1 + name: default +spec: + environment: default + providerConfigRef: + name: foobar + providerRef: + name: asdf + supportedVersions: + - version: 1.32.2 + - version: 1.31.7 + - deprecated: true + version: 1.31.6 + - deprecated: true + version: 1.31.5 diff --git a/internal/controllers/accessrequest/testdata/test-01/clusterrequest.yaml b/internal/controllers/accessrequest/testdata/test-01/clusterrequest.yaml new file mode 100644 index 0000000..3184973 --- /dev/null +++ b/internal/controllers/accessrequest/testdata/test-01/clusterrequest.yaml @@ -0,0 +1,12 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: ClusterRequest +metadata: + name: my-cluster + namespace: foo +spec: + purpose: test +status: + phase: Granted + cluster: + name: my-cluster + namespace: foo diff --git a/internal/controllers/accessrequest/testdata/test-02/accessrequest-indirect.yaml b/internal/controllers/accessrequest/testdata/test-02/accessrequest-indirect.yaml new file mode 100644 index 0000000..aae21d4 --- /dev/null +++ b/internal/controllers/accessrequest/testdata/test-02/accessrequest-indirect.yaml @@ -0,0 +1,17 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: AccessRequest +metadata: + name: mcr-access + namespace: bar +spec: + requestRef: + name: my-cluster + namespace: foo + permissions: + - rules: + - apiGroups: + - "*" + resources: + - "*" + verbs: + - "*" diff --git a/internal/controllers/accessrequest/testdata/test-02/clusterrequest.yaml b/internal/controllers/accessrequest/testdata/test-02/clusterrequest.yaml new file mode 100644 index 0000000..0aff6ed --- /dev/null +++ b/internal/controllers/accessrequest/testdata/test-02/clusterrequest.yaml @@ -0,0 +1,9 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: ClusterRequest +metadata: + name: my-cluster + namespace: foo +spec: + purpose: test +status: + phase: Denied diff --git a/internal/controllers/accessrequest/testdata/test-03/accessrequest-direct.yaml b/internal/controllers/accessrequest/testdata/test-03/accessrequest-direct.yaml new file mode 100644 index 0000000..32cc6c3 --- /dev/null +++ b/internal/controllers/accessrequest/testdata/test-03/accessrequest-direct.yaml @@ -0,0 +1,17 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: AccessRequest +metadata: + name: mc-access + namespace: bar +spec: + clusterRef: + name: my-cluster + namespace: foo + permissions: + - rules: + - apiGroups: + - "*" + resources: + - "*" + verbs: + - "*" diff --git a/internal/controllers/accessrequest/testdata/test-03/accessrequest-indirect.yaml b/internal/controllers/accessrequest/testdata/test-03/accessrequest-indirect.yaml new file mode 100644 index 0000000..aae21d4 --- /dev/null +++ b/internal/controllers/accessrequest/testdata/test-03/accessrequest-indirect.yaml @@ -0,0 +1,17 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: AccessRequest +metadata: + name: mcr-access + namespace: bar +spec: + requestRef: + name: my-cluster + namespace: foo + permissions: + - rules: + - apiGroups: + - "*" + resources: + - "*" + verbs: + - "*" diff --git a/internal/controllers/accessrequest/testdata/test-04/accessrequest-profile.yaml b/internal/controllers/accessrequest/testdata/test-04/accessrequest-profile.yaml new file mode 100644 index 0000000..b4d4a52 --- /dev/null +++ b/internal/controllers/accessrequest/testdata/test-04/accessrequest-profile.yaml @@ -0,0 +1,19 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: AccessRequest +metadata: + name: mc-access-profile + namespace: bar + labels: + profile.clusters.openmcp.cloud: default +spec: + clusterRef: + name: my-cluster + namespace: foo + permissions: + - rules: + - apiGroups: + - "*" + resources: + - "*" + verbs: + - "*" diff --git a/internal/controllers/accessrequest/testdata/test-04/accessrequest-provider.yaml b/internal/controllers/accessrequest/testdata/test-04/accessrequest-provider.yaml new file mode 100644 index 0000000..e216ad7 --- /dev/null +++ b/internal/controllers/accessrequest/testdata/test-04/accessrequest-provider.yaml @@ -0,0 +1,19 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: AccessRequest +metadata: + name: mc-access-provider + namespace: bar + labels: + provider.clusters.openmcp.cloud: asdf +spec: + clusterRef: + name: my-cluster + namespace: foo + permissions: + - rules: + - apiGroups: + - "*" + resources: + - "*" + verbs: + - "*" diff --git a/internal/controllers/accessrequest/testdata/test-04/cluster.yaml b/internal/controllers/accessrequest/testdata/test-04/cluster.yaml new file mode 100644 index 0000000..929b89e --- /dev/null +++ b/internal/controllers/accessrequest/testdata/test-04/cluster.yaml @@ -0,0 +1,13 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: Cluster +metadata: + name: my-cluster + namespace: foo +spec: + profile: default + kubernetes: + version: "1.32" + purposes: + - test + tenancy: Exclusive + diff --git a/internal/controllers/accessrequest/testdata/test-04/clusterprofile.yaml b/internal/controllers/accessrequest/testdata/test-04/clusterprofile.yaml new file mode 100644 index 0000000..78ab0e5 --- /dev/null +++ b/internal/controllers/accessrequest/testdata/test-04/clusterprofile.yaml @@ -0,0 +1,18 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: ClusterProfile +metadata: + generation: 1 + name: default +spec: + environment: default + providerConfigRef: + name: foobar + providerRef: + name: asdf + supportedVersions: + - version: 1.32.2 + - version: 1.31.7 + - deprecated: true + version: 1.31.6 + - deprecated: true + version: 1.31.5 diff --git a/internal/controllers/accessrequest/testdata/test-05/accessrequest-profile.yaml b/internal/controllers/accessrequest/testdata/test-05/accessrequest-profile.yaml new file mode 100644 index 0000000..59d272f --- /dev/null +++ b/internal/controllers/accessrequest/testdata/test-05/accessrequest-profile.yaml @@ -0,0 +1,19 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: AccessRequest +metadata: + name: mcr-access-profile + namespace: bar + labels: + profile.clusters.openmcp.cloud: wrong +spec: + requestRef: + name: my-cluster + namespace: foo + permissions: + - rules: + - apiGroups: + - "*" + resources: + - "*" + verbs: + - "*" diff --git a/internal/controllers/accessrequest/testdata/test-05/accessrequest-provider.yaml b/internal/controllers/accessrequest/testdata/test-05/accessrequest-provider.yaml new file mode 100644 index 0000000..91c6e99 --- /dev/null +++ b/internal/controllers/accessrequest/testdata/test-05/accessrequest-provider.yaml @@ -0,0 +1,19 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: AccessRequest +metadata: + name: mcr-access-provider + namespace: bar + labels: + provider.clusters.openmcp.cloud: wrong +spec: + requestRef: + name: my-cluster + namespace: foo + permissions: + - rules: + - apiGroups: + - "*" + resources: + - "*" + verbs: + - "*" diff --git a/internal/controllers/accessrequest/testdata/test-05/cluster.yaml b/internal/controllers/accessrequest/testdata/test-05/cluster.yaml new file mode 100644 index 0000000..929b89e --- /dev/null +++ b/internal/controllers/accessrequest/testdata/test-05/cluster.yaml @@ -0,0 +1,13 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: Cluster +metadata: + name: my-cluster + namespace: foo +spec: + profile: default + kubernetes: + version: "1.32" + purposes: + - test + tenancy: Exclusive + diff --git a/internal/controllers/accessrequest/testdata/test-05/clusterprofile.yaml b/internal/controllers/accessrequest/testdata/test-05/clusterprofile.yaml new file mode 100644 index 0000000..78ab0e5 --- /dev/null +++ b/internal/controllers/accessrequest/testdata/test-05/clusterprofile.yaml @@ -0,0 +1,18 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: ClusterProfile +metadata: + generation: 1 + name: default +spec: + environment: default + providerConfigRef: + name: foobar + providerRef: + name: asdf + supportedVersions: + - version: 1.32.2 + - version: 1.31.7 + - deprecated: true + version: 1.31.6 + - deprecated: true + version: 1.31.5 diff --git a/internal/controllers/accessrequest/testdata/test-05/clusterrequest.yaml b/internal/controllers/accessrequest/testdata/test-05/clusterrequest.yaml new file mode 100644 index 0000000..3184973 --- /dev/null +++ b/internal/controllers/accessrequest/testdata/test-05/clusterrequest.yaml @@ -0,0 +1,12 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: ClusterRequest +metadata: + name: my-cluster + namespace: foo +spec: + purpose: test +status: + phase: Granted + cluster: + name: my-cluster + namespace: foo diff --git a/internal/controllers/scheduler/controller.go b/internal/controllers/scheduler/controller.go index 7d2edeb..8fe108c 100644 --- a/internal/controllers/scheduler/controller.go +++ b/internal/controllers/scheduler/controller.go @@ -23,6 +23,7 @@ import ( clustersv1alpha1 "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1" cconst "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1/constants" + apiconst "github.com/openmcp-project/openmcp-operator/api/constants" "github.com/openmcp-project/openmcp-operator/internal/config" ) @@ -57,7 +58,7 @@ func (r *ClusterScheduler) Reconcile(ctx context.Context, req reconcile.Request) log := logging.FromContextOrPanic(ctx).WithName(ControllerName) ctx = logging.NewContext(ctx, log) log.Info("Starting reconcile") - rr := r.reconcile(ctx, log, req) + rr := r.reconcile(ctx, req) // status update return ctrlutils.NewStatusUpdaterBuilder[*clustersv1alpha1.ClusterRequest, clustersv1alpha1.RequestPhase, clustersv1alpha1.ConditionStatus](). WithNestedStruct("CommonStatus"). @@ -73,7 +74,8 @@ func (r *ClusterScheduler) Reconcile(ctx context.Context, req reconcile.Request) UpdateStatus(ctx, r.PlatformCluster.Client(), rr) } -func (r *ClusterScheduler) reconcile(ctx context.Context, log logging.Logger, req reconcile.Request) ReconcileResult { +func (r *ClusterScheduler) reconcile(ctx context.Context, req reconcile.Request) ReconcileResult { + log := logging.FromContextOrPanic(ctx) // get ClusterRequest resource cr := &clustersv1alpha1.ClusterRequest{} if err := r.PlatformCluster.Client().Get(ctx, req.NamespacedName, cr); err != nil { @@ -86,15 +88,15 @@ func (r *ClusterScheduler) reconcile(ctx context.Context, log logging.Logger, re // handle operation annotation if cr.GetAnnotations() != nil { - op, ok := cr.GetAnnotations()[clustersv1alpha1.OperationAnnotation] + op, ok := cr.GetAnnotations()[apiconst.OperationAnnotation] if ok { switch op { - case clustersv1alpha1.OperationAnnotationValueIgnore: + case apiconst.OperationAnnotationValueIgnore: log.Info("Ignoring resource due to ignore operation annotation") return ReconcileResult{} - case clustersv1alpha1.OperationAnnotationValueReconcile: + case apiconst.OperationAnnotationValueReconcile: log.Debug("Removing reconcile operation annotation from resource") - if err := ctrlutils.EnsureAnnotation(ctx, r.PlatformCluster.Client(), cr, clustersv1alpha1.OperationAnnotation, "", true, ctrlutils.DELETE); err != nil { + if err := ctrlutils.EnsureAnnotation(ctx, r.PlatformCluster.Client(), cr, apiconst.OperationAnnotation, "", true, ctrlutils.DELETE); err != nil { return ReconcileResult{ReconcileError: errutils.WithReason(fmt.Errorf("error removing operation annotation: %w", err), cconst.ReasonPlatformClusterInteractionProblem)} } } @@ -258,7 +260,7 @@ func (r *ClusterScheduler) handleDelete(ctx context.Context, req reconcile.Reque // fetch all clusters and filter for the ones that have a finalizer from this request fin := cr.FinalizerForCluster() clusterList := &clustersv1alpha1.ClusterList{} - if err := r.PlatformCluster.Client().List(ctx, clusterList, client.MatchingLabelsSelector{Selector: r.Config.CompletedSelectors.Clusters}); err != nil { + if err := r.PlatformCluster.Client().List(ctx, clusterList, client.MatchingLabelsSelector{Selector: r.Config.Selectors.Clusters.Completed()}); err != nil { rr.ReconcileError = errutils.WithReason(fmt.Errorf("error listing Clusters: %w", err), cconst.ReasonPlatformClusterInteractionProblem) return rr } @@ -319,16 +321,14 @@ func (r *ClusterScheduler) SetupWithManager(mgr ctrl.Manager) error { // watch ClusterRequest resources For(&clustersv1alpha1.ClusterRequest{}). WithEventFilter(predicate.And( - ctrlutils.LabelSelectorPredicate(r.Config.CompletedSelectors.Requests), + ctrlutils.LabelSelectorPredicate(r.Config.Selectors.Requests.Completed()), predicate.Or( predicate.GenerationChangedPredicate{}, ctrlutils.DeletionTimestampChangedPredicate{}, - ctrlutils.GotAnnotationPredicate(clustersv1alpha1.OperationAnnotation, clustersv1alpha1.OperationAnnotationValueReconcile), - ctrlutils.LostAnnotationPredicate(clustersv1alpha1.OperationAnnotation, clustersv1alpha1.OperationAnnotationValueIgnore), - ), - predicate.Not( - ctrlutils.HasAnnotationPredicate(clustersv1alpha1.OperationAnnotation, clustersv1alpha1.OperationAnnotationValueIgnore), + ctrlutils.GotAnnotationPredicate(apiconst.OperationAnnotation, apiconst.OperationAnnotationValueReconcile), + ctrlutils.LostAnnotationPredicate(apiconst.OperationAnnotation, apiconst.OperationAnnotationValueIgnore), ), + predicate.Not(ctrlutils.HasAnnotationPredicate(apiconst.OperationAnnotation, apiconst.OperationAnnotationValueIgnore)), )). Complete(r) } @@ -346,7 +346,7 @@ func (r *ClusterScheduler) fetchRelevantClusters(ctx context.Context, cr *cluste namespace = cDef.Template.Namespace } clusterList := &clustersv1alpha1.ClusterList{} - if err := r.PlatformCluster.Client().List(ctx, clusterList, client.InNamespace(namespace), client.MatchingLabelsSelector{Selector: cDef.CompletedSelector}); err != nil { + if err := r.PlatformCluster.Client().List(ctx, clusterList, client.InNamespace(namespace), client.MatchingLabelsSelector{Selector: cDef.Selector.Completed()}); err != nil { return nil, errutils.WithReason(fmt.Errorf("error listing Clusters: %w", err), cconst.ReasonPlatformClusterInteractionProblem) } clusters := make([]*clustersv1alpha1.Cluster, len(clusterList.Items))