From 861ad868bb09abd83ae6c58b087d9663aa5ee15f Mon Sep 17 00:00:00 2001 From: Johannes Aubart Date: Fri, 26 Sep 2025 13:04:16 +0200 Subject: [PATCH 01/12] add improved clusteraccess library --- lib/clusteraccess/advanced/clusteraccess.go | 716 ++++++++++++++++++++ lib/go.mod | 1 + lib/go.sum | 2 + 3 files changed, 719 insertions(+) create mode 100644 lib/clusteraccess/advanced/clusteraccess.go diff --git a/lib/clusteraccess/advanced/clusteraccess.go b/lib/clusteraccess/advanced/clusteraccess.go new file mode 100644 index 0000000..10d5a71 --- /dev/null +++ b/lib/clusteraccess/advanced/clusteraccess.go @@ -0,0 +1,716 @@ +//nolint:revive +package advanced + +import ( + "context" + "fmt" + "maps" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/openmcp-project/controller-utils/pkg/clusters" + maputils "github.com/openmcp-project/controller-utils/pkg/collections/maps" + ctrlutils "github.com/openmcp-project/controller-utils/pkg/controller" + "github.com/openmcp-project/controller-utils/pkg/logging" + + clustersv1alpha1 "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1" + commonapi "github.com/openmcp-project/openmcp-operator/api/common" + openmcpconst "github.com/openmcp-project/openmcp-operator/api/constants" + libutils "github.com/openmcp-project/openmcp-operator/lib/utils" +) + +////////////////// +/// INTERFACES /// +////////////////// + +// Reconciler is an interface for reconciling access k8s clusters based on the openMCP 'Cluster' API. +// It can create ClusterRequests and/or AccessRequests for an amount of clusters. +type Reconciler interface { + // Register registers a cluster to be managed by the reconciler. + Register(reg ClusterRegistration) Reconciler + // WithRetryInterval sets the retry interval. + WithRetryInterval(interval time.Duration) Reconciler + // WithManagedLabels allows to overwrite the managed-by and managed-purpose labels that are set on the created resources. + // Note that the implementation might depend on these labels to identify the resources it created, + // so changing them might lead to unexpected behavior. They also must be unique within the context of this reconciler. + // Use with caution. + WithManagedLabels(managedBy, managedPurpose string) Reconciler + + // Access returns an internal Cluster object granting access to the cluster for the specified request with the specified id. + // Will fail if the cluster is not registered or no AccessRequest is registered for the cluster, or if some other error occurs. + Access(ctx context.Context, request reconcile.Request, id string) (*clusters.Cluster, error) + // AccessRequest fetches the AccessRequest object for the cluster for the specified request with the specified id. + // Will fail if the cluster is not registered or no AccessRequest is registered for the cluster, or if some other error occurs. + AccessRequest(ctx context.Context, request reconcile.Request, id string) (*clustersv1alpha1.AccessRequest, error) + // ClusterRequest fetches the ClusterRequest object for the cluster for the specified request with the specified id. + // Will fail if the cluster is not registered or no ClusterRequest is registered for the cluster, or if some other error occurs. + ClusterRequest(ctx context.Context, request reconcile.Request, id string) (*clustersv1alpha1.ClusterRequest, error) + // Cluster fetches the external Cluster object for the cluster for the specified request with the specified id. + // Will fail if the cluster is not registered or no Cluster can be determined, or if some other error occurs. + Cluster(ctx context.Context, request reconcile.Request, id string) (*clustersv1alpha1.Cluster, error) + + // Reconcile creates the ClusterRequests and/or AccessRequests for the registered clusters. + // This function should be called during all reconciliations of the reconciled object. + // ctx is the context for the reconciliation. + // request is the object that is being reconciled + // It returns a reconcile.Result and an error if the reconciliation failed. + // The reconcile.Result may contain a RequeueAfter value to indicate that the reconciliation should be retried after a certain duration. + // The duration is set by the WithRetryInterval method. + Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) + // ReconcileDelete deletes the ClusterRequests and/or AccessRequests for the registered clusters. + // This function should be called during the deletion of the reconciled object. + // ctx is the context for the reconciliation. + // request is the object that is being reconciled + // It returns a reconcile.Result and an error if the reconciliation failed. + // The reconcile.Result may contain a RequeueAfter value to indicate that the reconciliation should be retried after a certain duration. + // The duration is set by the WithRetryInterval method. + ReconcileDelete(ctx context.Context, request reconcile.Request) (reconcile.Result, error) +} + +type ClusterRegistration interface { + // ID is the unique identifier for the cluster. + ID() string + // Suffix is the suffix to be used for the names of the created resources. + // It must be unique within the context of the reconciler. + // If empty, the ID will be used as suffix. + Suffix() string + // AccessRequestTokenConfig is the token configuration for the AccessRequest to be created for the cluster. + // Might be nil if no AccessRequest should be created or if OIDC access is used. + // Only one of AccessRequestTokenConfig and AccessRequestOIDCConfig should be non-nil. + AccessRequestTokenConfig() *clustersv1alpha1.TokenConfig + // AccessRequestOIDCConfig is the OIDC configuration for the AccessRequest to be created for the cluster. + // Might be nil if no AccessRequest should be created or if token-based access is used. + // Only one of AccessRequestTokenConfig and AccessRequestOIDCConfig should be non-nil. + AccessRequestOIDCConfig() *clustersv1alpha1.OIDCConfig + // ClusterRequestSpec is the spec for the ClusterRequest to be created for the cluster. + // It is nil if no ClusterRequest should be created. + // Only one of ClusterRequestSpec, ClusterReference and ClusterRequestReference should be non-nil. + ClusterRequestSpec() *clustersv1alpha1.ClusterRequestSpec + // ClusterReference returns name and namespace of an existing Cluster resource. + // It is nil if a new ClusterRequest should be created or if the Cluster is referenced by an existing ClusterRequest. + // Only one of ClusterRequestSpec, ClusterReference and ClusterRequestReference should be non-nil. + ClusterReference() *commonapi.ObjectReference + // ClusterRequestReference returns name and namespace of the ClusterRequest resource. + // It is nil if a new ClusterRequest should be created or if the Cluster is referenced directly. + // Only one of ClusterRequestSpec, ClusterReference and ClusterRequestReference should be non-nil. + ClusterRequestReference() *commonapi.ObjectReference + // Scheme is the scheme for the Kubernetes client of the cluster. + // If nil, the default scheme will be used. + Scheme() *runtime.Scheme + // NamespaceGenerator returns a function that generates the namespace on the Platform cluster to use for the created requests. + // The function must take the name and namespace of the reconciled object as parameters and return the namespace or an error. + // The generated namespace must be unique within the context of the reconciler. + // If nil, the StableMCPNamespace function will be used. + NamespaceGenerator() func(requestName, requestNamespace string) (string, error) +} + +type ClusterRegistrationBuilder interface { + // WithTokenAccess enables an AccessRequest for token-based access to be created for the cluster. + // It is mutually exclusive to WithOIDCAccess, calling this method after WithOIDCAccess will override the previous call. + // Passing in a nil cfg will disable AccessRequest creation, if it was token-based before, and have no effect if it was OIDC-based before. + WithTokenAccess(cfg *clustersv1alpha1.TokenConfig) ClusterRegistrationBuilder + // WithOIDCAccess enables an AccessRequest for OIDC-based access to be created for the cluster. + // It is mutually exclusive to WithTokenAccess, calling this method after WithTokenAccess will override the previous call. + // Passing in a nil cfg will disable AccessRequest creation, if it was OIDC-based before, and have no effect if it was token-based before. + WithOIDCAccess(cfg *clustersv1alpha1.OIDCConfig) ClusterRegistrationBuilder + // WithScheme sets the scheme for the Kubernetes client of the cluster. + // If not set or set to nil, the default scheme will be used. + WithScheme(scheme *runtime.Scheme) ClusterRegistrationBuilder + // WithNamespaceGenerator sets the function that generates the namespace on the Platform cluster to use for the created requests. + // If set to nil, the StableMCPNamespace function will be used. + WithNamespaceGenerator(f func(requestName, requestNamespace string) (string, error)) ClusterRegistrationBuilder + // Build builds the ClusterRegistration object. + Build() ClusterRegistration +} + +/////////////////////////////////////////// +/// IMPLEMENTATION: ClusterRegistration /// +/////////////////////////////////////////// + +type clusterRegistrationImpl struct { + id string + suffix string + scheme *runtime.Scheme + accessTokenConfig *clustersv1alpha1.TokenConfig + accessOIDCConfig *clustersv1alpha1.OIDCConfig + clusterRequestSpec *clustersv1alpha1.ClusterRequestSpec + clusterRef *commonapi.ObjectReference + clusterRequestRef *commonapi.ObjectReference + namespaceGenerator func(requestName, requestNamespace string) (string, error) +} + +var _ ClusterRegistration = &clusterRegistrationImpl{} + +// AccessRequestTokenConfig implements ClusterRegistration. +func (c *clusterRegistrationImpl) AccessRequestTokenConfig() *clustersv1alpha1.TokenConfig { + return c.accessTokenConfig.DeepCopy() +} + +// AccessRequestOIDCConfig implements ClusterRegistration. +func (c *clusterRegistrationImpl) AccessRequestOIDCConfig() *clustersv1alpha1.OIDCConfig { + return c.accessOIDCConfig.DeepCopy() +} + +// ClusterRequestSpec implements ClusterRegistration. +func (c *clusterRegistrationImpl) ClusterRequestSpec() *clustersv1alpha1.ClusterRequestSpec { + return c.clusterRequestSpec.DeepCopy() +} + +// ClusterReference implements ClusterRegistration. +func (c *clusterRegistrationImpl) ClusterReference() *commonapi.ObjectReference { + return c.clusterRef.DeepCopy() +} + +// ClusterRequestReference implements ClusterRegistration. +func (c *clusterRegistrationImpl) ClusterRequestReference() *commonapi.ObjectReference { + return c.clusterRequestRef.DeepCopy() +} + +// ID implements ClusterRegistration. +func (c *clusterRegistrationImpl) ID() string { + return c.id +} + +// Suffix implements ClusterRegistration. +func (c *clusterRegistrationImpl) Suffix() string { + if c.suffix != "" { + return c.suffix + } + return c.id +} + +// Scheme implements ClusterRegistration. +func (c *clusterRegistrationImpl) Scheme() *runtime.Scheme { + return c.scheme +} + +// NamespaceGenerator implements ClusterRegistration. +func (c *clusterRegistrationImpl) NamespaceGenerator() func(requestName, requestNamespace string) (string, error) { + if c.namespaceGenerator != nil { + return c.namespaceGenerator + } + return libutils.StableMCPNamespace +} + +////////////////////////////////////////////////// +/// IMPLEMENTATION: ClusterRegistrationBuilder /// +////////////////////////////////////////////////// + +type clusterRegistrationBuilderImpl struct { + clusterRegistrationImpl +} + +var _ ClusterRegistrationBuilder = &clusterRegistrationBuilderImpl{} + +// Build implements ClusterRegistrationBuilder. +func (c *clusterRegistrationBuilderImpl) Build() ClusterRegistration { + return &c.clusterRegistrationImpl +} + +// WithOIDCAccess implements ClusterRegistrationBuilder. +func (c *clusterRegistrationBuilderImpl) WithOIDCAccess(cfg *clustersv1alpha1.OIDCConfig) ClusterRegistrationBuilder { + c.accessOIDCConfig = cfg.DeepCopy() + return c +} + +// WithTokenAccess implements ClusterRegistrationBuilder. +func (c *clusterRegistrationBuilderImpl) WithTokenAccess(cfg *clustersv1alpha1.TokenConfig) ClusterRegistrationBuilder { + c.accessTokenConfig = cfg.DeepCopy() + return c +} + +// WithScheme implements ClusterRegistrationBuilder. +func (c *clusterRegistrationBuilderImpl) WithScheme(scheme *runtime.Scheme) ClusterRegistrationBuilder { + c.scheme = scheme + return c +} + +// WithNamespaceGenerator implements ClusterRegistrationBuilder. +func (c *clusterRegistrationBuilderImpl) WithNamespaceGenerator(f func(requestName, requestNamespace string) (string, error)) ClusterRegistrationBuilder { + c.namespaceGenerator = f + return c +} + +// NewCluster instructs the Reconciler to create and manage a new ClusterRequest. +func NewCluster(id, suffix string, purpose string) ClusterRegistrationBuilder { + return &clusterRegistrationBuilderImpl{ + clusterRegistrationImpl: clusterRegistrationImpl{ + id: id, + suffix: suffix, + clusterRequestSpec: &clustersv1alpha1.ClusterRequestSpec{ + Purpose: purpose, + }, + }, + } +} + +// ExistingCluster instructs the Reconciler to use an existing Cluster resource. +func ExistingCluster(id, suffix, name, namespace string) ClusterRegistrationBuilder { + return &clusterRegistrationBuilderImpl{ + clusterRegistrationImpl: clusterRegistrationImpl{ + id: id, + suffix: suffix, + clusterRef: &commonapi.ObjectReference{ + Name: name, + Namespace: namespace, + }, + }, + } +} + +// ExistingClusterRequest instructs the Reconciler to use an existing Cluster resource that is referenced by the given ClusterRequest. +func ExistingClusterRequest(id, suffix, name, namespace string) ClusterRegistrationBuilder { + return &clusterRegistrationBuilderImpl{ + clusterRegistrationImpl: clusterRegistrationImpl{ + id: id, + suffix: suffix, + clusterRequestRef: &commonapi.ObjectReference{ + Name: name, + Namespace: namespace, + }, + }, + } +} + +////////////////////////////////// +/// IMPLEMENTATION: Reconciler /// +////////////////////////////////// + +type reconcilerImpl struct { + controllerName string + platformClusterClient client.Client + + interval time.Duration + registrations map[string]ClusterRegistration + managedBy string + managedPurpose string +} + +var _ Reconciler = &reconcilerImpl{} + +// Access implements Reconciler. +func (r *reconcilerImpl) Access(ctx context.Context, request reconcile.Request, id string) (*clusters.Cluster, error) { + reg, ok := r.registrations[id] + if !ok { + return nil, fmt.Errorf("no registration found for request '%s' with id '%s'", request.String(), id) + } + ar, err := r.AccessRequest(ctx, request, id) + if err != nil { + return nil, fmt.Errorf("unable to fetch AccessRequest for request '%s' with id '%s': %w", request.String(), id, err) + } + if !ar.Status.IsGranted() { + return nil, fmt.Errorf("AccessRequest '%s/%s' for request '%s' with id '%s' is not granted", ar.Namespace, ar.Name, request.String(), id) + } + access, err := AccessFromAccessRequest(ctx, r.platformClusterClient, id, reg.Scheme(), ar) + if err != nil { + return nil, fmt.Errorf("unable to get access for request '%s' with id '%s': %w", request.String(), id, err) + } + return access, nil +} + +// AccessRequest implements Reconciler. +func (r *reconcilerImpl) AccessRequest(ctx context.Context, request reconcile.Request, id string) (*clustersv1alpha1.AccessRequest, error) { + reg, ok := r.registrations[id] + if !ok { + return nil, fmt.Errorf("no registration found for request '%s' with id '%s'", request.String(), id) + } + if reg.AccessRequestOIDCConfig() == nil && reg.AccessRequestTokenConfig() == nil { + return nil, fmt.Errorf("no AccessRequest configured for request '%s' with id '%s'", request.String(), id) + } + suffix := reg.Suffix() + if suffix == "" { + suffix = reg.ID() + } + namespace, err := reg.NamespaceGenerator()(request.Name, request.Namespace) + if err != nil { + return nil, fmt.Errorf("unable to generate name of platform cluster namespace for request '%s/%s': %w", request.Namespace, request.Name, err) + } + ar := &clustersv1alpha1.AccessRequest{} + ar.Name = StableRequestName(r.controllerName, request, suffix) + ar.Namespace = namespace + if err := r.platformClusterClient.Get(ctx, client.ObjectKeyFromObject(ar), ar); err != nil { + return nil, fmt.Errorf("unable to get AccessRequest '%s/%s' for request '%s' with id '%s': %w", ar.Namespace, ar.Name, request.String(), id, err) + } + return ar, nil +} + +// Cluster implements Reconciler. +func (r *reconcilerImpl) Cluster(ctx context.Context, request reconcile.Request, id string) (*clustersv1alpha1.Cluster, error) { + reg, ok := r.registrations[id] + if !ok { + return nil, fmt.Errorf("no registration found for request '%s' with id '%s'", request.String(), id) + } + clusterRef := reg.ClusterReference() + if clusterRef == nil { + // if no ClusterReference is specified, try to get it from the ClusterRequest + if reg.ClusterRequestReference() != nil { + cr, err := r.ClusterRequest(ctx, request, id) + if err != nil { + return nil, fmt.Errorf("unable to fetch ClusterRequest for request '%s' with id '%s': %w", request.String(), id, err) + } + if cr.Status.Cluster == nil { + return nil, fmt.Errorf("ClusterRequest '%s/%s' for request '%s' with id '%s' has no cluster reference", cr.Namespace, cr.Name, request.String(), id) + } + clusterRef = cr.Status.Cluster.DeepCopy() + } else { + // try to get the Cluster from the AccessRequest + ar, err := r.AccessRequest(ctx, request, id) + if err != nil { + return nil, fmt.Errorf("unable to fetch AccessRequest for request '%s' with id '%s': %w", request.String(), id, err) + } + if ar.Spec.ClusterRef == nil { + return nil, fmt.Errorf("AccessRequest '%s/%s' for request '%s' with id '%s' has no cluster reference", ar.Namespace, ar.Name, request.String(), id) + } + clusterRef = ar.Spec.ClusterRef.DeepCopy() + } + } + + c := &clustersv1alpha1.Cluster{} + if err := r.platformClusterClient.Get(ctx, client.ObjectKey{Name: clusterRef.Name, Namespace: clusterRef.Namespace}, c); err != nil { + return nil, fmt.Errorf("unable to get Cluster '%s/%s' for request '%s' with id '%s': %w", clusterRef.Namespace, clusterRef.Name, request.String(), id, err) + } + return c, nil +} + +// ClusterRequest implements Reconciler. +func (r *reconcilerImpl) ClusterRequest(ctx context.Context, request reconcile.Request, id string) (*clustersv1alpha1.ClusterRequest, error) { + reg, ok := r.registrations[id] + if !ok { + return nil, fmt.Errorf("no registration found for request '%s' with id '%s'", request.String(), id) + } + crRef := reg.ClusterRequestReference() + if crRef == nil { + // if the ClusterRequest is not referenced directly, check if was created or can be retrieved via an AccessRequest + if reg.ClusterRequestSpec() == nil { + // the ClusterRequest was created by this library + suffix := reg.Suffix() + if suffix == "" { + suffix = reg.ID() + } + namespace, err := reg.NamespaceGenerator()(request.Name, request.Namespace) + if err != nil { + return nil, fmt.Errorf("unable to generate name of platform cluster namespace for request '%s/%s': %w", request.Namespace, request.Name, err) + } + crRef = &commonapi.ObjectReference{ + Name: StableRequestName(r.controllerName, request, suffix), + Namespace: namespace, + } + } else { + // check if an AccessRequest can be found and if it contains a ClusterRequest reference + ar, err := r.AccessRequest(ctx, request, id) + if err != nil { + return nil, fmt.Errorf("unable to fetch AccessRequest for request '%s' with id '%s': %w", request.String(), id, err) + } + if ar.Spec.RequestRef == nil { + return nil, fmt.Errorf("no ClusterRequest configured or referenced for request '%s' with id '%s'", request.String(), id) + } + crRef = ar.Spec.RequestRef.DeepCopy() + } + } + + cr := &clustersv1alpha1.ClusterRequest{} + if err := r.platformClusterClient.Get(ctx, client.ObjectKey{Name: crRef.Name, Namespace: crRef.Namespace}, cr); err != nil { + return nil, fmt.Errorf("unable to get ClusterRequest '%s/%s' for request '%s' with id '%s': %w", crRef.Namespace, crRef.Name, request.String(), id, err) + } + return cr, nil +} + +// Reconcile implements Reconciler. +func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + log := logging.FromContextOrDiscard(ctx).WithName("ClusterAccess") + ctx = logging.NewContext(ctx, log) + log.Info("Reconciling cluster access") + + // iterate over registrations and ensure that the corresponding resources are ready + for _, reg := range r.registrations { + rlog := log.WithValues("id", reg.ID()) + rlog.Debug("Processing registration") + suffix := reg.Suffix() + if suffix == "" { + suffix = reg.ID() + } + expectedLabels := map[string]string{} + if r.managedBy != "" { + expectedLabels[openmcpconst.ManagedByLabel] = r.managedBy + } else { + expectedLabels[openmcpconst.ManagedByLabel] = r.controllerName + } + if r.managedPurpose != "" { + expectedLabels[openmcpconst.ManagedPurposeLabel] = r.managedPurpose + } else { + expectedLabels[openmcpconst.ManagedPurposeLabel] = fmt.Sprintf("%s.%s.%s", request.Namespace, request.Name, reg.ID()) + } + + // ensure namespace exists + namespace, err := reg.NamespaceGenerator()(request.Name, request.Namespace) + if err != nil { + return reconcile.Result{}, fmt.Errorf("unable to generate name of platform cluster namespace for request '%s/%s': %w", request.Namespace, request.Name, err) + } + rlog.Debug("Ensuring platform cluster namespace exists", "namespaceName", namespace) + ns := &corev1.Namespace{} + ns.Name = namespace + if err := r.platformClusterClient.Get(ctx, client.ObjectKeyFromObject(ns), ns); err != nil { + if !apierrors.IsNotFound(err) { + return reconcile.Result{}, fmt.Errorf("unable to get platform cluster namespace '%s': %w", ns.Name, err) + } + rlog.Info("Creating platform cluster namespace", "namespaceName", ns.Name) + if err := r.platformClusterClient.Create(ctx, ns); err != nil { + return reconcile.Result{}, fmt.Errorf("unable to create platform cluster namespace '%s': %w", ns.Name, err) + } + } + + // if a ClusterRequest is requested, ensure it exists and is ready + var cr *clustersv1alpha1.ClusterRequest + if reg.ClusterRequestSpec() != nil { + rlog.Debug("Ensuring ClusterRequest exists") + cr = &clustersv1alpha1.ClusterRequest{} + cr.Name = StableRequestName(r.controllerName, request, suffix) + cr.Namespace = namespace + // check if ClusterRequest exists + exists := true + if err := r.platformClusterClient.Get(ctx, client.ObjectKeyFromObject(cr), cr); err != nil { + if !apierrors.IsNotFound(err) { + return reconcile.Result{}, fmt.Errorf("unable to get ClusterRequest '%s/%s': %w", cr.Namespace, cr.Name, err) + } + exists = false + } + if !exists { + rlog.Info("Creating ClusterRequest", "crName", cr.Name, "crNamespace", cr.Namespace) + cr.Labels = map[string]string{} + maps.Copy(cr.Labels, expectedLabels) + cr.Spec = *reg.ClusterRequestSpec().DeepCopy() + if err := r.platformClusterClient.Create(ctx, cr); err != nil { + return reconcile.Result{}, fmt.Errorf("unable to create ClusterRequest '%s/%s': %w", cr.Namespace, cr.Name, err) + } + } + if !cr.Status.IsGranted() { + rlog.Info("Waiting for ClusterRequest to be granted", "crName", cr.Name, "crNamespace", cr.Namespace) + return reconcile.Result{RequeueAfter: r.interval}, nil + } + rlog.Debug("ClusterRequest is granted", "crName", cr.Name, "crNamespace", cr.Namespace) + } + + // if an AccessRequest is requested, ensure it exists and is ready + var ar *clustersv1alpha1.AccessRequest + if reg.AccessRequestOIDCConfig() != nil || reg.AccessRequestTokenConfig() != nil { + rlog.Debug("Ensuring AccessRequest exists") + ar = &clustersv1alpha1.AccessRequest{} + ar.Name = StableRequestName(r.controllerName, request, suffix) + ar.Namespace = namespace + // check if AccessRequest exists + exists := true + if err := r.platformClusterClient.Get(ctx, client.ObjectKeyFromObject(ar), ar); err != nil { + if !apierrors.IsNotFound(err) { + return reconcile.Result{}, fmt.Errorf("unable to get AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) + } + exists = false + } + if !exists { + rlog.Info("Creating AccessRequest", "arName", ar.Name, "arNamespace", ar.Namespace) + // cluster and cluster request references are immutable, so set them only when creating new AccessRequests + if cr != nil { + ar.Spec.RequestRef = &commonapi.ObjectReference{ + Name: cr.Name, + Namespace: cr.Namespace, + } + } else if reg.ClusterReference() != nil { + ar.Spec.ClusterRef = reg.ClusterReference().DeepCopy() + } else if reg.ClusterRequestReference() != nil { + ar.Spec.RequestRef = reg.ClusterRequestReference().DeepCopy() + } else { + return reconcile.Result{}, fmt.Errorf("no ClusterRequestSpec, ClusterReference or ClusterRequestReference specified for registration with id '%s'", reg.ID()) + } + } else { + rlog.Debug("Updating AccessRequest", "arName", ar.Name, "arNamespace", ar.Namespace) + } + if _, err := controllerutil.CreateOrUpdate(ctx, r.platformClusterClient, ar, func() error { + ar.Labels = maputils.Merge(ar.Labels, expectedLabels) + ar.Spec.Token = reg.AccessRequestTokenConfig() + ar.Spec.OIDC = reg.AccessRequestOIDCConfig() + return nil + }); err != nil { + return reconcile.Result{}, fmt.Errorf("unable to create or update AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) + } + if !ar.Status.IsGranted() { + rlog.Info("Waiting for AccessRequest to be granted", "arName", ar.Name, "arNamespace", ar.Namespace) + return reconcile.Result{RequeueAfter: r.interval}, nil + } + } + } + log.Info("Successfully reconciled cluster access") + + return reconcile.Result{}, nil +} + +// ReconcileDelete implements Reconciler. +func (r *reconcilerImpl) ReconcileDelete(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + log := logging.FromContextOrDiscard(ctx).WithName("ClusterAccess") + ctx = logging.NewContext(ctx, log) + log.Info("Reconciling cluster access") + + // iterate over registrations and ensure that the corresponding resources are ready + for _, reg := range r.registrations { + rlog := log.WithValues("id", reg.ID()) + rlog.Debug("Processing registration") + suffix := reg.Suffix() + if suffix == "" { + suffix = reg.ID() + } + expectedLabels := map[string]string{} + if r.managedBy != "" { + expectedLabels[openmcpconst.ManagedByLabel] = r.managedBy + } else { + expectedLabels[openmcpconst.ManagedByLabel] = r.controllerName + } + if r.managedPurpose != "" { + expectedLabels[openmcpconst.ManagedPurposeLabel] = r.managedPurpose + } else { + expectedLabels[openmcpconst.ManagedPurposeLabel] = fmt.Sprintf("%s.%s.%s", request.Namespace, request.Name, reg.ID()) + } + + namespace, err := reg.NamespaceGenerator()(request.Name, request.Namespace) + if err != nil { + return reconcile.Result{}, fmt.Errorf("unable to generate name of platform cluster namespace for request '%s/%s': %w", request.Namespace, request.Name, err) + } + + // if an AccessRequest is requested, ensure it is deleted + if reg.AccessRequestOIDCConfig() != nil || reg.AccessRequestTokenConfig() != nil { //nolint:dupl + rlog.Debug("Ensuring AccessRequest is deleted") + ar := &clustersv1alpha1.AccessRequest{} + ar.Name = StableRequestName(r.controllerName, request, suffix) + ar.Namespace = namespace + // check if AccessRequest exists + if err := r.platformClusterClient.Get(ctx, client.ObjectKeyFromObject(ar), ar); err != nil { + if !apierrors.IsNotFound(err) { + return reconcile.Result{}, fmt.Errorf("unable to get AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) + } + rlog.Debug("AccessRequest already deleted", "arName", ar.Name, "arNamespace", ar.Namespace) + } else { + if ar.DeletionTimestamp.IsZero() { + rlog.Info("Deleting AccessRequest", "arName", ar.Name, "arNamespace", ar.Namespace) + if err := r.platformClusterClient.Delete(ctx, ar); err != nil { + return reconcile.Result{}, fmt.Errorf("unable to delete AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) + } + } else { + rlog.Info("Waiting for AccessRequest to be deleted", "arName", ar.Name, "arNamespace", ar.Namespace) + } + return reconcile.Result{RequeueAfter: r.interval}, nil + } + } + + // if a ClusterRequest is requested, ensure it is deleted + if reg.ClusterRequestSpec() != nil { //nolint:dupl + rlog.Debug("Ensuring ClusterRequest is deleted") + cr := &clustersv1alpha1.ClusterRequest{} + cr.Name = StableRequestName(r.controllerName, request, suffix) + cr.Namespace = namespace + // check if ClusterRequest exists + if err := r.platformClusterClient.Get(ctx, client.ObjectKeyFromObject(cr), cr); err != nil { + if !apierrors.IsNotFound(err) { + return reconcile.Result{}, fmt.Errorf("unable to get ClusterRequest '%s/%s': %w", cr.Namespace, cr.Name, err) + } + rlog.Debug("ClusterRequest already deleted", "crName", cr.Name, "crNamespace", cr.Namespace) + } else { + if cr.DeletionTimestamp.IsZero() { + rlog.Info("Deleting ClusterRequest", "crName", cr.Name, "crNamespace", cr.Namespace) + if err := r.platformClusterClient.Delete(ctx, cr); err != nil { + return reconcile.Result{}, fmt.Errorf("unable to delete ClusterRequest '%s/%s': %w", cr.Namespace, cr.Name, err) + } + } else { + rlog.Info("Waiting for ClusterRequest to be deleted", "crName", cr.Name, "crNamespace", cr.Namespace) + } + return reconcile.Result{RequeueAfter: r.interval}, nil + } + } + + // don't delete the namespace, because it might contain other resources + } + log.Info("Successfully reconciled cluster access") + + return reconcile.Result{}, nil +} + +// Register implements Reconciler. +func (r *reconcilerImpl) Register(reg ClusterRegistration) Reconciler { + r.registrations[reg.ID()] = reg + return r +} + +// WithManagedLabels implements Reconciler. +func (r *reconcilerImpl) WithManagedLabels(managedBy string, managedPurpose string) Reconciler { + r.managedBy = managedBy + r.managedPurpose = managedPurpose + return r +} + +// WithRetryInterval implements Reconciler. +func (r *reconcilerImpl) WithRetryInterval(interval time.Duration) Reconciler { + r.interval = interval + return r +} + +/////////////////////////// +/// AUXILIARY FUNCTIONS /// +/////////////////////////// + +// StableRequestName generates a stable name for a Cluster- or AccessRequest related to an MCP. +// This basically results in '----'. +// If the resulting string exceeds the Kubernetes name length limit of 63 characters, it will be truncated with the last characters (excluding the suffix) replaced by a hash of what was removed. +// If the suffix is empty, it will be omitted (and the preceding hyphen as well). +func StableRequestName(controllerName string, request reconcile.Request, suffix string) string { + return StableRequestNameFromLocalName(controllerName, request.Name, suffix) +} + +// StableRequestNameFromLocalName works like StableRequestName but takes a local name directly instead of a reconcile.Request. +// localName is converted to lowercase before processing. +func StableRequestNameFromLocalName(controllerName, localName, suffix string) string { + controllerName = strings.ToLower(controllerName) + localName = strings.ToLower(localName) + raw := fmt.Sprintf("%s--%s", controllerName, localName) + if suffix != "" { + suffix = strings.ToLower(suffix) + return fmt.Sprintf("%s--%s", ctrlutils.ShortenToXCharactersUnsafe(raw, ctrlutils.K8sMaxNameLength-len(suffix)-2), suffix) + } + return ctrlutils.ShortenToXCharactersUnsafe(raw, ctrlutils.K8sMaxNameLength) +} + +// AccessFromAccessRequest provides access to a k8s cluster based on the given AccessRequest. +func AccessFromAccessRequest(ctx context.Context, platformClusterClient client.Client, id string, scheme *runtime.Scheme, ar *clustersv1alpha1.AccessRequest) (*clusters.Cluster, error) { + if ar.Status.SecretRef == nil { + return nil, fmt.Errorf("AccessRequest '%s/%s' has no secret reference in status", ar.Namespace, ar.Name) + } + + s := &corev1.Secret{} + s.Name = ar.Status.SecretRef.Name + s.Namespace = ar.Status.SecretRef.Namespace + + if err := platformClusterClient.Get(ctx, client.ObjectKeyFromObject(s), s); err != nil { + return nil, fmt.Errorf("unable to get secret '%s/%s' for AccessRequest '%s/%s': %w", s.Namespace, s.Name, ar.Namespace, ar.Name, err) + } + + kubeconfigBytes, ok := s.Data[clustersv1alpha1.SecretKeyKubeconfig] + if !ok { + return nil, fmt.Errorf("kubeconfig key '%s' not found in AccessRequest secret '%s/%s'", clustersv1alpha1.SecretKeyKubeconfig, s.Namespace, s.Name) + } + + config, err := clientcmd.RESTConfigFromKubeConfig(kubeconfigBytes) + if err != nil { + return nil, fmt.Errorf("failed to create rest config from kubeconfig bytes: %w", err) + } + + c := clusters.New(id).WithRESTConfig(config) + + if err = c.InitializeClient(scheme); err != nil { + return nil, fmt.Errorf("failed to initialize client: %w", err) + } + + return c, nil +} diff --git a/lib/go.mod b/lib/go.mod index c1d9c44..46187fa 100644 --- a/lib/go.mod +++ b/lib/go.mod @@ -54,6 +54,7 @@ require ( go.uber.org/zap v1.27.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 // indirect golang.org/x/mod v0.28.0 // indirect golang.org/x/net v0.44.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect diff --git a/lib/go.sum b/lib/go.sum index 35ab80f..9be8ef4 100644 --- a/lib/go.sum +++ b/lib/go.sum @@ -141,6 +141,8 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 h1:TQwNpfvNkxAVlItJf6Cr5JTsVZoC/Sj7K3OZv2Pc14A= +golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= From 382b51ce50516a257bf18c972bafee59999053d1 Mon Sep 17 00:00:00 2001 From: Johannes Aubart Date: Mon, 29 Sep 2025 17:24:56 +0200 Subject: [PATCH 02/12] fix bugs and enable unit test support --- lib/clusteraccess/advanced/clusteraccess.go | 394 ++++++++++++++++++-- 1 file changed, 367 insertions(+), 27 deletions(-) diff --git a/lib/clusteraccess/advanced/clusteraccess.go b/lib/clusteraccess/advanced/clusteraccess.go index 10d5a71..3d3e2b7 100644 --- a/lib/clusteraccess/advanced/clusteraccess.go +++ b/lib/clusteraccess/advanced/clusteraccess.go @@ -5,6 +5,7 @@ import ( "context" "fmt" "maps" + "slices" "strings" "time" @@ -31,18 +32,18 @@ import ( /// INTERFACES /// ////////////////// -// Reconciler is an interface for reconciling access k8s clusters based on the openMCP 'Cluster' API. +// ClusterAccessReconciler is an interface for reconciling access k8s clusters based on the openMCP 'Cluster' API. // It can create ClusterRequests and/or AccessRequests for an amount of clusters. -type Reconciler interface { +type ClusterAccessReconciler interface { // Register registers a cluster to be managed by the reconciler. - Register(reg ClusterRegistration) Reconciler + Register(reg ClusterRegistration) ClusterAccessReconciler // WithRetryInterval sets the retry interval. - WithRetryInterval(interval time.Duration) Reconciler + WithRetryInterval(interval time.Duration) ClusterAccessReconciler // WithManagedLabels allows to overwrite the managed-by and managed-purpose labels that are set on the created resources. // Note that the implementation might depend on these labels to identify the resources it created, // so changing them might lead to unexpected behavior. They also must be unique within the context of this reconciler. // Use with caution. - WithManagedLabels(managedBy, managedPurpose string) Reconciler + WithManagedLabels(managedBy, managedPurpose string) ClusterAccessReconciler // Access returns an internal Cluster object granting access to the cluster for the specified request with the specified id. // Will fail if the cluster is not registered or no AccessRequest is registered for the cluster, or if some other error occurs. @@ -73,8 +74,33 @@ type Reconciler interface { // The reconcile.Result may contain a RequeueAfter value to indicate that the reconciliation should be retried after a certain duration. // The duration is set by the WithRetryInterval method. ReconcileDelete(ctx context.Context, request reconcile.Request) (reconcile.Result, error) + + // WithFakingCallback passes a callback function with a specific key. + // The available keys depend on the implementation. + // The key determines when the callback function is executed. + // This feature is meant for unit testing, where usually no ClusterProvider, which could answer ClusterRequests and AccessRequests, is running. + WithFakingCallback(key string, callback FakingCallback) ClusterAccessReconciler } +// FakingCallback is a function that allows to mock a desired state for unit testing. +// +// The idea behind this is: Before running any reconciliation logic in a unit tests, make sure to pass these callback functions to the cluster access reconciler implementation. +// During the reocncile, the cluster access reconciler will call these functions at specific points, e.g. when it is waiting for an AccessRequest to be approved. +// The time of the execution depends on the key and the implementation of the cluster access reconciler. +// In the callback function, modify the cluster state as desired by mocking actions usually performed by external operators, e.g. set the AccessRequest to approved. +// +// Note that most of the arguments can be nil, depending on when the callback is executed. +// The arguments are: +// - ctx is the context, which is required for interacting with the cluster. +// - platformClusterClient is the client for the platform cluster. +// - key is the key that determined the execution of the callback. +// - req is the reconcile.Request that triggered the reconciliation, if known. +// - cr is the ClusterRequest related to the cluster access reconciliation, if known. +// - ar is the AccessRequest related to the cluster access reconciliation, if known. +// - c is the Cluster related to the cluster access reconciliation, if known. +// - access is the access to the cluster, if already retrieved. +type FakingCallback func(ctx context.Context, platformClusterClient client.Client, key string, req *reconcile.Request, cr *clustersv1alpha1.ClusterRequest, ar *clustersv1alpha1.AccessRequest, c *clustersv1alpha1.Cluster, access *clusters.Cluster) error + type ClusterRegistration interface { // ID is the unique identifier for the cluster. ID() string @@ -105,11 +131,9 @@ type ClusterRegistration interface { // Scheme is the scheme for the Kubernetes client of the cluster. // If nil, the default scheme will be used. Scheme() *runtime.Scheme - // NamespaceGenerator returns a function that generates the namespace on the Platform cluster to use for the created requests. - // The function must take the name and namespace of the reconciled object as parameters and return the namespace or an error. - // The generated namespace must be unique within the context of the reconciler. - // If nil, the StableMCPNamespace function will be used. - NamespaceGenerator() func(requestName, requestNamespace string) (string, error) + // Namespace generates the namespace on the Platform cluster to use for the created requests. + // The generated namespace is expected be unique within the context of the reconciler. + Namespace(requestName, requestNamespace string) (string, error) } type ClusterRegistrationBuilder interface { @@ -125,7 +149,6 @@ type ClusterRegistrationBuilder interface { // If not set or set to nil, the default scheme will be used. WithScheme(scheme *runtime.Scheme) ClusterRegistrationBuilder // WithNamespaceGenerator sets the function that generates the namespace on the Platform cluster to use for the created requests. - // If set to nil, the StableMCPNamespace function will be used. WithNamespaceGenerator(f func(requestName, requestNamespace string) (string, error)) ClusterRegistrationBuilder // Build builds the ClusterRegistration object. Build() ClusterRegistration @@ -192,12 +215,12 @@ func (c *clusterRegistrationImpl) Scheme() *runtime.Scheme { return c.scheme } -// NamespaceGenerator implements ClusterRegistration. -func (c *clusterRegistrationImpl) NamespaceGenerator() func(requestName, requestNamespace string) (string, error) { +// Namespace implements ClusterRegistration. +func (c *clusterRegistrationImpl) Namespace(requestName, requestNamespace string) (string, error) { if c.namespaceGenerator != nil { - return c.namespaceGenerator + return c.namespaceGenerator(requestName, requestNamespace) } - return libutils.StableMCPNamespace + return libutils.StableMCPNamespace(requestName, requestNamespace) } ////////////////////////////////////////////////// @@ -240,13 +263,14 @@ func (c *clusterRegistrationBuilderImpl) WithNamespaceGenerator(f func(requestNa } // NewCluster instructs the Reconciler to create and manage a new ClusterRequest. -func NewCluster(id, suffix string, purpose string) ClusterRegistrationBuilder { +func NewCluster(id, suffix, purpose string, waitForClusterDeletion bool) ClusterRegistrationBuilder { return &clusterRegistrationBuilderImpl{ clusterRegistrationImpl: clusterRegistrationImpl{ id: id, suffix: suffix, clusterRequestSpec: &clustersv1alpha1.ClusterRequestSpec{ - Purpose: purpose, + Purpose: purpose, + WaitForClusterDeletion: &waitForClusterDeletion, }, }, } @@ -292,9 +316,23 @@ type reconcilerImpl struct { registrations map[string]ClusterRegistration managedBy string managedPurpose string + + fakingCallbacks map[string]FakingCallback } -var _ Reconciler = &reconcilerImpl{} +// NewClusterAccessReconciler creates a new Cluster Access Reconciler. +// Note that it needs to be configured further by calling its Register method and optionally its builder-like With* methods. +// This is meant to be instantiated and configured once during controller setup and then its Reconcile or ReconcileDelete methods should be called during each reconciliation of the controller. +func NewClusterAccessReconciler(platformClusterClient client.Client, controllerName string) ClusterAccessReconciler { + return &reconcilerImpl{ + controllerName: controllerName, + platformClusterClient: platformClusterClient, + interval: 5 * time.Second, + registrations: map[string]ClusterRegistration{}, + } +} + +var _ ClusterAccessReconciler = &reconcilerImpl{} // Access implements Reconciler. func (r *reconcilerImpl) Access(ctx context.Context, request reconcile.Request, id string) (*clusters.Cluster, error) { @@ -329,7 +367,7 @@ func (r *reconcilerImpl) AccessRequest(ctx context.Context, request reconcile.Re if suffix == "" { suffix = reg.ID() } - namespace, err := reg.NamespaceGenerator()(request.Name, request.Namespace) + namespace, err := reg.Namespace(request.Name, request.Namespace) if err != nil { return nil, fmt.Errorf("unable to generate name of platform cluster namespace for request '%s/%s': %w", request.Namespace, request.Name, err) } @@ -351,7 +389,7 @@ func (r *reconcilerImpl) Cluster(ctx context.Context, request reconcile.Request, clusterRef := reg.ClusterReference() if clusterRef == nil { // if no ClusterReference is specified, try to get it from the ClusterRequest - if reg.ClusterRequestReference() != nil { + if reg.ClusterRequestReference() != nil || reg.ClusterRequestSpec() != nil { cr, err := r.ClusterRequest(ctx, request, id) if err != nil { return nil, fmt.Errorf("unable to fetch ClusterRequest for request '%s' with id '%s': %w", request.String(), id, err) @@ -389,13 +427,13 @@ func (r *reconcilerImpl) ClusterRequest(ctx context.Context, request reconcile.R crRef := reg.ClusterRequestReference() if crRef == nil { // if the ClusterRequest is not referenced directly, check if was created or can be retrieved via an AccessRequest - if reg.ClusterRequestSpec() == nil { + if reg.ClusterRequestSpec() != nil { // the ClusterRequest was created by this library suffix := reg.Suffix() if suffix == "" { suffix = reg.ID() } - namespace, err := reg.NamespaceGenerator()(request.Name, request.Namespace) + namespace, err := reg.Namespace(request.Name, request.Namespace) if err != nil { return nil, fmt.Errorf("unable to generate name of platform cluster namespace for request '%s/%s': %w", request.Namespace, request.Name, err) } @@ -450,7 +488,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques } // ensure namespace exists - namespace, err := reg.NamespaceGenerator()(request.Name, request.Namespace) + namespace, err := reg.Namespace(request.Name, request.Namespace) if err != nil { return reconcile.Result{}, fmt.Errorf("unable to generate name of platform cluster namespace for request '%s/%s': %w", request.Namespace, request.Name, err) } @@ -493,6 +531,12 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques } if !cr.Status.IsGranted() { rlog.Info("Waiting for ClusterRequest to be granted", "crName", cr.Name, "crNamespace", cr.Namespace) + if fc := r.fakingCallbacks[FakingCallback_WaitingForClusterRequestReadiness]; fc != nil { + rlog.Info("Executing faking callback, this message should only appear in unit tests", "key", FakingCallback_WaitingForClusterRequestReadiness) + if err := fc(ctx, r.platformClusterClient, FakingCallback_WaitingForClusterRequestReadiness, &request, cr, nil, nil, nil); err != nil { + return reconcile.Result{}, fmt.Errorf("faking callback for key '%s' failed: %w", FakingCallback_WaitingForClusterRequestReadiness, err) + } + } return reconcile.Result{RequeueAfter: r.interval}, nil } rlog.Debug("ClusterRequest is granted", "crName", cr.Name, "crNamespace", cr.Namespace) @@ -541,6 +585,12 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques } if !ar.Status.IsGranted() { rlog.Info("Waiting for AccessRequest to be granted", "arName", ar.Name, "arNamespace", ar.Namespace) + if fc := r.fakingCallbacks[FakingCallback_WaitingForAccessRequestReadiness]; fc != nil { + rlog.Info("Executing faking callback, this message should only appear in unit tests", "key", FakingCallback_WaitingForAccessRequestReadiness) + if err := fc(ctx, r.platformClusterClient, FakingCallback_WaitingForAccessRequestReadiness, &request, cr, ar, nil, nil); err != nil { + return reconcile.Result{}, fmt.Errorf("faking callback for key '%s' failed: %w", FakingCallback_WaitingForAccessRequestReadiness, err) + } + } return reconcile.Result{RequeueAfter: r.interval}, nil } } @@ -576,7 +626,7 @@ func (r *reconcilerImpl) ReconcileDelete(ctx context.Context, request reconcile. expectedLabels[openmcpconst.ManagedPurposeLabel] = fmt.Sprintf("%s.%s.%s", request.Namespace, request.Name, reg.ID()) } - namespace, err := reg.NamespaceGenerator()(request.Name, request.Namespace) + namespace, err := reg.Namespace(request.Name, request.Namespace) if err != nil { return reconcile.Result{}, fmt.Errorf("unable to generate name of platform cluster namespace for request '%s/%s': %w", request.Namespace, request.Name, err) } @@ -602,6 +652,12 @@ func (r *reconcilerImpl) ReconcileDelete(ctx context.Context, request reconcile. } else { rlog.Info("Waiting for AccessRequest to be deleted", "arName", ar.Name, "arNamespace", ar.Namespace) } + if fc := r.fakingCallbacks[FakingCallback_WaitingForAccessRequestDeletion]; fc != nil { + rlog.Info("Executing faking callback, this message should only appear in unit tests", "key", FakingCallback_WaitingForAccessRequestDeletion) + if err := fc(ctx, r.platformClusterClient, FakingCallback_WaitingForAccessRequestDeletion, &request, nil, ar, nil, nil); err != nil { + return reconcile.Result{}, fmt.Errorf("faking callback for key '%s' failed: %w", FakingCallback_WaitingForAccessRequestDeletion, err) + } + } return reconcile.Result{RequeueAfter: r.interval}, nil } } @@ -627,6 +683,12 @@ func (r *reconcilerImpl) ReconcileDelete(ctx context.Context, request reconcile. } else { rlog.Info("Waiting for ClusterRequest to be deleted", "crName", cr.Name, "crNamespace", cr.Namespace) } + if fc := r.fakingCallbacks[FakingCallback_WaitingForClusterRequestDeletion]; fc != nil { + rlog.Info("Executing faking callback, this message should only appear in unit tests", "key", FakingCallback_WaitingForClusterRequestDeletion) + if err := fc(ctx, r.platformClusterClient, FakingCallback_WaitingForClusterRequestDeletion, &request, cr, nil, nil, nil); err != nil { + return reconcile.Result{}, fmt.Errorf("faking callback for key '%s' failed: %w", FakingCallback_WaitingForClusterRequestDeletion, err) + } + } return reconcile.Result{RequeueAfter: r.interval}, nil } } @@ -639,24 +701,48 @@ func (r *reconcilerImpl) ReconcileDelete(ctx context.Context, request reconcile. } // Register implements Reconciler. -func (r *reconcilerImpl) Register(reg ClusterRegistration) Reconciler { +func (r *reconcilerImpl) Register(reg ClusterRegistration) ClusterAccessReconciler { r.registrations[reg.ID()] = reg return r } // WithManagedLabels implements Reconciler. -func (r *reconcilerImpl) WithManagedLabels(managedBy string, managedPurpose string) Reconciler { +func (r *reconcilerImpl) WithManagedLabels(managedBy string, managedPurpose string) ClusterAccessReconciler { r.managedBy = managedBy r.managedPurpose = managedPurpose return r } // WithRetryInterval implements Reconciler. -func (r *reconcilerImpl) WithRetryInterval(interval time.Duration) Reconciler { +func (r *reconcilerImpl) WithRetryInterval(interval time.Duration) ClusterAccessReconciler { r.interval = interval return r } +// WithFakingCallback implements Reconciler. +func (r *reconcilerImpl) WithFakingCallback(key string, callback FakingCallback) ClusterAccessReconciler { + if r.fakingCallbacks == nil { + r.fakingCallbacks = map[string]FakingCallback{} + } + r.fakingCallbacks[key] = callback + return r +} + +const ( + // FakingCallback_WaitingForAccessRequestReadiness is a key for a faking callback that is called when the reconciler is waiting for the AccessRequest to be granted. + // Note that the execution happens directly before the return of the reconcile function (with a requeue). This means that the reconciliation needs to run a second time to pick up the changes made in the callback. + FakingCallback_WaitingForAccessRequestReadiness = "WaitingForAccessRequestReadiness" + // FakingCallback_WaitingForClusterRequestReadiness is a key for a faking callback that is called when the reconciler is waiting for the ClusterRequest to be granted. + // Note that the execution happens directly before the return of the reconcile function (with a requeue). This means that the reconciliation needs to run a second time to pick up the changes made in the callback. + FakingCallback_WaitingForClusterRequestReadiness = "WaitingForClusterRequestReadiness" + // FakingCallback_WaitingForAccessRequestDeletion is a key for a faking callback that is called when the reconciler is waiting for the AccessRequest to be deleted. + // Note that the execution happens directly before the return of the reconcile function (with a requeue). This means that the reconciliation needs to run a second time to pick up the changes made in the callback. + FakingCallback_WaitingForAccessRequestDeletion = "WaitingForAccessRequestDeletion" + // FakingCallback_WaitingForClusterRequestDeletion is a key for a faking callback that is called when the reconciler is waiting for the ClusterRequest to be deleted. + // Note that the execution happens directly before the return of the reconcile function (with a requeue). This means that the reconciliation needs to run a second time to pick up the changes made in the callback. + FakingCallback_WaitingForClusterRequestDeletion = "WaitingForClusterRequestDeletion" +) + /////////////////////////// /// AUXILIARY FUNCTIONS /// /////////////////////////// @@ -714,3 +800,257 @@ func AccessFromAccessRequest(ctx context.Context, platformClusterClient client.C return c, nil } + +//////////////////////////////// +/// FAKE AUXILIARY FUNCTIONS /// +//////////////////////////////// + +// FakeClusterRequestReadiness returns a faking callback that sets the ClusterRequest to 'Granted'. +// If the given ClusterSpec is not nil, it creates a corresponding Cluster next to the ClusterRequest, if it doesn't exist yet. +// If during the callback, the Cluster is non-nil, with a non-empty name and namespace, but doesn't exist yet, it will be created with the data from the Cluster, ignoring the given ClusterSpec. +// Otherwise, only the ClusterRequest's status is modified. +// The callback is a no-op if the ClusterRequest is already granted (Cluster reference and existence are not checked in this case). +// It returns an error if the ClusterRequest is nil. +// +// The returned callback is meant to be used with the key stored in FakingCallback_WaitingForClusterRequestReadiness. +func FakeClusterRequestReadiness(clusterSpec *clustersv1alpha1.ClusterSpec) FakingCallback { + return func(ctx context.Context, platformClusterClient client.Client, key string, _ *reconcile.Request, cr *clustersv1alpha1.ClusterRequest, _ *clustersv1alpha1.AccessRequest, c *clustersv1alpha1.Cluster, _ *clusters.Cluster) error { + if cr == nil { + return fmt.Errorf("ClusterRequest is nil") + } + if cr.Status.IsGranted() { + // already granted, nothing to do + return nil + } + + // create cluster, if desired + if c != nil && c.Name != "" && c.Namespace != "" { + // check if cluster exists + if err := platformClusterClient.Get(ctx, client.ObjectKeyFromObject(c), c); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("unable to get Cluster '%s/%s': %w", c.Namespace, c.Name, err) + } + // create cluster + if err := platformClusterClient.Create(ctx, c); err != nil { + return fmt.Errorf("unable to create Cluster '%s/%s': %w", c.Namespace, c.Name, err) + } + } + } else { + // cluster not known, create fake one, if spec is given + c = &clustersv1alpha1.Cluster{} + c.Name = cr.Name + c.Namespace = cr.Namespace + if clusterSpec != nil { + c.Spec = *clusterSpec.DeepCopy() + if err := platformClusterClient.Create(ctx, c); err != nil { + return fmt.Errorf("unable to create Cluster '%s/%s': %w", c.Namespace, c.Name, err) + } + } + } + + // mock ClusterRequest status + old := cr.DeepCopy() + cr.Status.Cluster = &commonapi.ObjectReference{ + Name: c.Name, + Namespace: c.Namespace, + } + cr.Status.Phase = clustersv1alpha1.REQUEST_GRANTED + cr.Status.ObservedGeneration = cr.Generation + if err := platformClusterClient.Status().Patch(ctx, cr, client.MergeFrom(old)); err != nil { + return fmt.Errorf("unable to update status of ClusterRequest '%s/%s': %w", cr.Namespace, cr.Name, err) + } + return nil + } +} + +// FakeAccessRequestReadiness returns a faking callback that sets the AccessRequest to 'Granted'. +// If kcfgData is not nil or empty, it will be used as 'kubeconfig' data in the secret referenced by the AccessRequest. +// Otherwise, the content of the kubeconfig key will just be 'fake'. +// The callback is a no-op if the AccessRequest is already granted (Secret reference and existence are not checked in this case). +// It returns an error if the AccessRequest is nil. +// +// The returned callback is meant to be used with the key stored in FakingCallback_WaitingForAccessRequestReadiness. +func FakeAccessRequestReadiness(kcfgData []byte) FakingCallback { + return func(ctx context.Context, platformClusterClient client.Client, key string, req *reconcile.Request, cr *clustersv1alpha1.ClusterRequest, ar *clustersv1alpha1.AccessRequest, c *clustersv1alpha1.Cluster, access *clusters.Cluster) error { + if ar == nil { + return fmt.Errorf("AccessRequest is nil") + } + if ar.Status.IsGranted() { + // already granted, nothing to do + return nil + } + + // create secret + if len(kcfgData) == 0 { + kcfgData = []byte("fake") + } + s := &corev1.Secret{} + s.Name = ar.Name + s.Namespace = ar.Namespace + s.Data = map[string][]byte{ + clustersv1alpha1.SecretKeyKubeconfig: kcfgData, + } + if err := platformClusterClient.Get(ctx, client.ObjectKeyFromObject(s), s); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("unable to get secret '%s/%s': %w", s.Namespace, s.Name, err) + } + // create secret + if err := platformClusterClient.Create(ctx, s); err != nil { + return fmt.Errorf("unable to create secret '%s/%s': %w", s.Namespace, s.Name, err) + } + } + + // mock AccessRequest status + old := ar.DeepCopy() + ar.Status.SecretRef = &commonapi.ObjectReference{ + Name: s.Name, + Namespace: s.Namespace, + } + ar.Status.Phase = clustersv1alpha1.REQUEST_GRANTED + ar.Status.ObservedGeneration = ar.Generation + if err := platformClusterClient.Status().Patch(ctx, ar, client.MergeFrom(old)); err != nil { + return fmt.Errorf("unable to update status of AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) + } + return nil + } +} + +// FakeClusterRequestDeletion returns a faking callback that removes finalizers from the given ClusterRequest and potentially deletes the referenced Cluster. +// The callback is a no-op if the ClusterRequest is already nil (not found). +// If deleteCluster is true, the Cluster referenced by the ClusterRequest will be deleted, if it exists. +// All finalizers from the finalizersToRemove* slices will be removed from the ClusterRequest and/or Cluster before deletion. +// The ClusterRequest itself is not deleted by this callback, only finalizers are removed. +// +// The returned callback is meant to be used with the key stored in FakingCallback_WaitingForClusterRequestDeletion. +func FakeClusterRequestDeletion(deleteCluster bool, finalizersToRemoveFromClusterRequest, finalizersToRemoveFromCluster []string) FakingCallback { + return func(ctx context.Context, platformClusterClient client.Client, key string, _ *reconcile.Request, cr *clustersv1alpha1.ClusterRequest, _ *clustersv1alpha1.AccessRequest, _ *clustersv1alpha1.Cluster, _ *clusters.Cluster) error { + if cr == nil { + // already deleted, nothing to do + return nil + } + + // delete cluster, if desired + if deleteCluster && cr.Status.Cluster != nil { + c := &clustersv1alpha1.Cluster{} + c.Name = cr.Status.Cluster.Name + c.Namespace = cr.Status.Cluster.Namespace + + if len(finalizersToRemoveFromCluster) > 0 { + // fetch cluster to remove finalizers + if err := platformClusterClient.Get(ctx, client.ObjectKeyFromObject(c), c); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("unable to get Cluster '%s/%s': %w", c.Namespace, c.Name, err) + } + // cluster doesn't exist, nothing to do + c = nil + } else { + cOld := c.DeepCopy() + if removeFinalizers(c, finalizersToRemoveFromCluster...) { + if err := platformClusterClient.Patch(ctx, c, client.MergeFrom(cOld)); err != nil { + return fmt.Errorf("unable to remove finalizers from Cluster '%s/%s': %w", c.Namespace, c.Name, err) + } + } + } + } + + if c != nil { + if err := platformClusterClient.Delete(ctx, c); client.IgnoreNotFound(err) != nil { + return fmt.Errorf("unable to delete Cluster '%s/%s': %w", c.Namespace, c.Name, err) + } + } + } + + // remove finalizers from ClusterRequest, if any + if len(finalizersToRemoveFromClusterRequest) > 0 { + crOld := cr.DeepCopy() + if removeFinalizers(cr, finalizersToRemoveFromClusterRequest...) { + if err := platformClusterClient.Patch(ctx, cr, client.MergeFrom(crOld)); err != nil { + return fmt.Errorf("unable to remove finalizers from ClusterRequest '%s/%s': %w", cr.Namespace, cr.Name, err) + } + } + } + + return nil + } +} + +// FakeAccessRequestDeletion returns a faking callback that removes finalizers from the given AccessRequest and deletes the referenced Secret, if it exists. +// The callback is a no-op if the AccessRequest is already nil (not found). +// It returns an error if the AccessRequest is non-nil but cannot be deleted. +// All finalizers from the finalizersToRemove* slices will be removed from the AccessRequest and/or Secret before deletion. +// The AccessRequest itself is not deleted by this callback, only finalizers are removed. +// +// The returned callback is meant to be used with the key stored in FakingCallback_WaitingForAccessRequestDeletion. +func FakeAccessRequestDeletion(finalizersToRemoveFromAccessRequest, finalizersToRemoveFromSecret []string) FakingCallback { + return func(ctx context.Context, platformClusterClient client.Client, key string, _ *reconcile.Request, _ *clustersv1alpha1.ClusterRequest, ar *clustersv1alpha1.AccessRequest, _ *clustersv1alpha1.Cluster, _ *clusters.Cluster) error { + if ar == nil { + // already deleted, nothing to do + return nil + } + + // delete secret + if ar.Status.SecretRef != nil { + s := &corev1.Secret{} + s.Name = ar.Status.SecretRef.Name + s.Namespace = ar.Status.SecretRef.Namespace + + if len(finalizersToRemoveFromSecret) > 0 { + // fetch secret to remove finalizers + if err := platformClusterClient.Get(ctx, client.ObjectKeyFromObject(s), s); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("unable to get secret '%s/%s': %w", s.Namespace, s.Name, err) + } + // secret doesn't exist, nothing to do + s = nil + } else { + sOld := s.DeepCopy() + if removeFinalizers(s, finalizersToRemoveFromSecret...) { + if err := platformClusterClient.Patch(ctx, s, client.MergeFrom(sOld)); err != nil { + return fmt.Errorf("unable to remove finalizers from secret '%s/%s': %w", s.Namespace, s.Name, err) + } + } + } + } + + if s != nil { + if err := platformClusterClient.Delete(ctx, s); client.IgnoreNotFound(err) != nil { + return fmt.Errorf("unable to delete secret '%s/%s': %w", s.Namespace, s.Name, err) + } + } + } + + // remove finalizers from AccessRequest, if any + if len(finalizersToRemoveFromAccessRequest) > 0 { + arOld := ar.DeepCopy() + if removeFinalizers(ar, finalizersToRemoveFromAccessRequest...) { + if err := platformClusterClient.Patch(ctx, ar, client.MergeFrom(arOld)); err != nil { + return fmt.Errorf("unable to remove finalizers from AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) + } + } + } + + return nil + } +} + +// removeFinalizers takes an object and a list of finalizers to remove from that object. +// If the list contains "*", all finalizers will be removed. +// It updates the object in-place and returns an indicator whether any finalizers were removed. +func removeFinalizers(obj client.Object, finalizersToRemove ...string) bool { + if obj == nil || len(obj.GetFinalizers()) == 0 || len(finalizersToRemove) == 0 { + return false + } + + changed := false + if slices.Contains(finalizersToRemove, "*") { + obj.SetFinalizers(nil) + changed = true + } else { + for _, ftr := range finalizersToRemove { + if controllerutil.RemoveFinalizer(obj, ftr) { + changed = true + } + } + } + return changed +} From 29e14db8c7df77b99e0823920043b93f9fcbd050 Mon Sep 17 00:00:00 2001 From: Johannes Aubart Date: Tue, 30 Sep 2025 16:09:31 +0200 Subject: [PATCH 03/12] add label conflict detection --- lib/clusteraccess/advanced/clusteraccess.go | 170 ++++++++++++++------ lib/utils/utils.go | 2 +- 2 files changed, 121 insertions(+), 51 deletions(-) diff --git a/lib/clusteraccess/advanced/clusteraccess.go b/lib/clusteraccess/advanced/clusteraccess.go index 3d3e2b7..4912a99 100644 --- a/lib/clusteraccess/advanced/clusteraccess.go +++ b/lib/clusteraccess/advanced/clusteraccess.go @@ -43,7 +43,7 @@ type ClusterAccessReconciler interface { // Note that the implementation might depend on these labels to identify the resources it created, // so changing them might lead to unexpected behavior. They also must be unique within the context of this reconciler. // Use with caution. - WithManagedLabels(managedBy, managedPurpose string) ClusterAccessReconciler + WithManagedLabels(gen ManagedLabelGenerator) ClusterAccessReconciler // Access returns an internal Cluster object granting access to the cluster for the specified request with the specified id. // Will fail if the cluster is not registered or no AccessRequest is registered for the cluster, or if some other error occurs. @@ -101,6 +101,13 @@ type ClusterAccessReconciler interface { // - access is the access to the cluster, if already retrieved. type FakingCallback func(ctx context.Context, platformClusterClient client.Client, key string, req *reconcile.Request, cr *clustersv1alpha1.ClusterRequest, ar *clustersv1alpha1.AccessRequest, c *clustersv1alpha1.Cluster, access *clusters.Cluster) error +// ManagedLabelGenerator is a function that generates the managed-by and managed-purpose labels for the created resources. +type ManagedLabelGenerator func(controllerName string, req reconcile.Request, reg ClusterRegistration) (string, string) + +func DefaultManagedLabelGenerator(controllerName string, req reconcile.Request, reg ClusterRegistration) (string, string) { + return controllerName, fmt.Sprintf("%s.%s.%s", req.Namespace, req.Name, reg.ID()) +} + type ClusterRegistration interface { // ID is the unique identifier for the cluster. ID() string @@ -312,10 +319,9 @@ type reconcilerImpl struct { controllerName string platformClusterClient client.Client - interval time.Duration - registrations map[string]ClusterRegistration - managedBy string - managedPurpose string + interval time.Duration + registrations map[string]ClusterRegistration + managedBy ManagedLabelGenerator fakingCallbacks map[string]FakingCallback } @@ -329,6 +335,7 @@ func NewClusterAccessReconciler(platformClusterClient client.Client, controllerN platformClusterClient: platformClusterClient, interval: 5 * time.Second, registrations: map[string]ClusterRegistration{}, + managedBy: DefaultManagedLabelGenerator, } } @@ -475,17 +482,10 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques if suffix == "" { suffix = reg.ID() } + managedBy, managedPurpose := r.managedBy(r.controllerName, request, reg) expectedLabels := map[string]string{} - if r.managedBy != "" { - expectedLabels[openmcpconst.ManagedByLabel] = r.managedBy - } else { - expectedLabels[openmcpconst.ManagedByLabel] = r.controllerName - } - if r.managedPurpose != "" { - expectedLabels[openmcpconst.ManagedPurposeLabel] = r.managedPurpose - } else { - expectedLabels[openmcpconst.ManagedPurposeLabel] = fmt.Sprintf("%s.%s.%s", request.Namespace, request.Name, reg.ID()) - } + expectedLabels[openmcpconst.ManagedByLabel] = managedBy + expectedLabels[openmcpconst.ManagedPurposeLabel] = managedPurpose // ensure namespace exists namespace, err := reg.Namespace(request.Name, request.Namespace) @@ -528,6 +528,15 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques if err := r.platformClusterClient.Create(ctx, cr); err != nil { return reconcile.Result{}, fmt.Errorf("unable to create ClusterRequest '%s/%s': %w", cr.Namespace, cr.Name, err) } + } else { + for k, v := range expectedLabels { + if v2, ok := cr.Labels[k]; !ok || v2 != v { + if !ok { + v2 = "" + } + return reconcile.Result{}, fmt.Errorf("ClusterRequest '%s/%s' already exists but is not managed by this controller (label '%s' is expected to have value '%s', but is '%s')", cr.Namespace, cr.Name, k, v, v2) + } + } } if !cr.Status.IsGranted() { rlog.Info("Waiting for ClusterRequest to be granted", "crName", cr.Name, "crNamespace", cr.Namespace) @@ -573,6 +582,14 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques return reconcile.Result{}, fmt.Errorf("no ClusterRequestSpec, ClusterReference or ClusterRequestReference specified for registration with id '%s'", reg.ID()) } } else { + for k, v := range expectedLabels { + if v2, ok := ar.Labels[k]; !ok || v2 != v { + if !ok { + v2 = "" + } + return reconcile.Result{}, fmt.Errorf("AccessRequest '%s/%s' already exists but is not managed by this controller (label '%s' is expected to have value '%s', but is '%s')", ar.Namespace, ar.Name, k, v, v2) + } + } rlog.Debug("Updating AccessRequest", "arName", ar.Name, "arNamespace", ar.Namespace) } if _, err := controllerutil.CreateOrUpdate(ctx, r.platformClusterClient, ar, func() error { @@ -614,17 +631,10 @@ func (r *reconcilerImpl) ReconcileDelete(ctx context.Context, request reconcile. if suffix == "" { suffix = reg.ID() } + managedBy, managedPurpose := r.managedBy(r.controllerName, request, reg) expectedLabels := map[string]string{} - if r.managedBy != "" { - expectedLabels[openmcpconst.ManagedByLabel] = r.managedBy - } else { - expectedLabels[openmcpconst.ManagedByLabel] = r.controllerName - } - if r.managedPurpose != "" { - expectedLabels[openmcpconst.ManagedPurposeLabel] = r.managedPurpose - } else { - expectedLabels[openmcpconst.ManagedPurposeLabel] = fmt.Sprintf("%s.%s.%s", request.Namespace, request.Name, reg.ID()) - } + expectedLabels[openmcpconst.ManagedByLabel] = managedBy + expectedLabels[openmcpconst.ManagedPurposeLabel] = managedPurpose namespace, err := reg.Namespace(request.Name, request.Namespace) if err != nil { @@ -644,21 +654,32 @@ func (r *reconcilerImpl) ReconcileDelete(ctx context.Context, request reconcile. } rlog.Debug("AccessRequest already deleted", "arName", ar.Name, "arNamespace", ar.Namespace) } else { - if ar.DeletionTimestamp.IsZero() { - rlog.Info("Deleting AccessRequest", "arName", ar.Name, "arNamespace", ar.Namespace) - if err := r.platformClusterClient.Delete(ctx, ar); err != nil { - return reconcile.Result{}, fmt.Errorf("unable to delete AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) + managed := true + for k, v := range expectedLabels { + if v2, ok := ar.Labels[k]; !ok || v2 != v { + managed = false + break } - } else { - rlog.Info("Waiting for AccessRequest to be deleted", "arName", ar.Name, "arNamespace", ar.Namespace) } - if fc := r.fakingCallbacks[FakingCallback_WaitingForAccessRequestDeletion]; fc != nil { - rlog.Info("Executing faking callback, this message should only appear in unit tests", "key", FakingCallback_WaitingForAccessRequestDeletion) - if err := fc(ctx, r.platformClusterClient, FakingCallback_WaitingForAccessRequestDeletion, &request, nil, ar, nil, nil); err != nil { - return reconcile.Result{}, fmt.Errorf("faking callback for key '%s' failed: %w", FakingCallback_WaitingForAccessRequestDeletion, err) + if !managed { + rlog.Info("Skipping AccessRequest '%s/%s' deletion because it is not managed by this controller", ar.Namespace, ar.Name) + } else { + if ar.DeletionTimestamp.IsZero() { + rlog.Info("Deleting AccessRequest", "arName", ar.Name, "arNamespace", ar.Namespace) + if err := r.platformClusterClient.Delete(ctx, ar); err != nil { + return reconcile.Result{}, fmt.Errorf("unable to delete AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) + } + } else { + rlog.Info("Waiting for AccessRequest to be deleted", "arName", ar.Name, "arNamespace", ar.Namespace) } + if fc := r.fakingCallbacks[FakingCallback_WaitingForAccessRequestDeletion]; fc != nil { + rlog.Info("Executing faking callback, this message should only appear in unit tests", "key", FakingCallback_WaitingForAccessRequestDeletion) + if err := fc(ctx, r.platformClusterClient, FakingCallback_WaitingForAccessRequestDeletion, &request, nil, ar, nil, nil); err != nil { + return reconcile.Result{}, fmt.Errorf("faking callback for key '%s' failed: %w", FakingCallback_WaitingForAccessRequestDeletion, err) + } + } + return reconcile.Result{RequeueAfter: r.interval}, nil } - return reconcile.Result{RequeueAfter: r.interval}, nil } } @@ -675,21 +696,32 @@ func (r *reconcilerImpl) ReconcileDelete(ctx context.Context, request reconcile. } rlog.Debug("ClusterRequest already deleted", "crName", cr.Name, "crNamespace", cr.Namespace) } else { - if cr.DeletionTimestamp.IsZero() { - rlog.Info("Deleting ClusterRequest", "crName", cr.Name, "crNamespace", cr.Namespace) - if err := r.platformClusterClient.Delete(ctx, cr); err != nil { - return reconcile.Result{}, fmt.Errorf("unable to delete ClusterRequest '%s/%s': %w", cr.Namespace, cr.Name, err) + managed := true + for k, v := range expectedLabels { + if v2, ok := cr.Labels[k]; !ok || v2 != v { + managed = false + break } - } else { - rlog.Info("Waiting for ClusterRequest to be deleted", "crName", cr.Name, "crNamespace", cr.Namespace) } - if fc := r.fakingCallbacks[FakingCallback_WaitingForClusterRequestDeletion]; fc != nil { - rlog.Info("Executing faking callback, this message should only appear in unit tests", "key", FakingCallback_WaitingForClusterRequestDeletion) - if err := fc(ctx, r.platformClusterClient, FakingCallback_WaitingForClusterRequestDeletion, &request, cr, nil, nil, nil); err != nil { - return reconcile.Result{}, fmt.Errorf("faking callback for key '%s' failed: %w", FakingCallback_WaitingForClusterRequestDeletion, err) + if !managed { + rlog.Info("Skipping ClusterRequest '%s/%s' deletion because it is not managed by this controller", cr.Namespace, cr.Name) + } else { + if cr.DeletionTimestamp.IsZero() { + rlog.Info("Deleting ClusterRequest", "crName", cr.Name, "crNamespace", cr.Namespace) + if err := r.platformClusterClient.Delete(ctx, cr); err != nil { + return reconcile.Result{}, fmt.Errorf("unable to delete ClusterRequest '%s/%s': %w", cr.Namespace, cr.Name, err) + } + } else { + rlog.Info("Waiting for ClusterRequest to be deleted", "crName", cr.Name, "crNamespace", cr.Namespace) } + if fc := r.fakingCallbacks[FakingCallback_WaitingForClusterRequestDeletion]; fc != nil { + rlog.Info("Executing faking callback, this message should only appear in unit tests", "key", FakingCallback_WaitingForClusterRequestDeletion) + if err := fc(ctx, r.platformClusterClient, FakingCallback_WaitingForClusterRequestDeletion, &request, cr, nil, nil, nil); err != nil { + return reconcile.Result{}, fmt.Errorf("faking callback for key '%s' failed: %w", FakingCallback_WaitingForClusterRequestDeletion, err) + } + } + return reconcile.Result{RequeueAfter: r.interval}, nil } - return reconcile.Result{RequeueAfter: r.interval}, nil } } @@ -707,9 +739,11 @@ func (r *reconcilerImpl) Register(reg ClusterRegistration) ClusterAccessReconcil } // WithManagedLabels implements Reconciler. -func (r *reconcilerImpl) WithManagedLabels(managedBy string, managedPurpose string) ClusterAccessReconciler { - r.managedBy = managedBy - r.managedPurpose = managedPurpose +func (r *reconcilerImpl) WithManagedLabels(gen ManagedLabelGenerator) ClusterAccessReconciler { + if gen == nil { + gen = DefaultManagedLabelGenerator + } + r.managedBy = gen return r } @@ -880,6 +914,38 @@ func FakeAccessRequestReadiness(kcfgData []byte) FakingCallback { return nil } + // if a ClusterRequest is referenced, but no Cluster, try to identify the Cluster + if ar.Spec.ClusterRef == nil { + old := ar.DeepCopy() + if c != nil { + ar.Spec.ClusterRef = &commonapi.ObjectReference{ + Name: c.Name, + Namespace: c.Namespace, + } + if err := platformClusterClient.Patch(ctx, ar, client.MergeFrom(old)); err != nil { + return fmt.Errorf("unable to update spec of AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) + } + } else if cr != nil && cr.Status.Cluster != nil { + ar.Spec.ClusterRef = cr.Status.Cluster.DeepCopy() + if err := platformClusterClient.Patch(ctx, ar, client.MergeFrom(old)); err != nil { + return fmt.Errorf("unable to update spec of AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) + } + } else if ar.Spec.RequestRef != nil { + cr2 := &clustersv1alpha1.ClusterRequest{} + cr2.Name = ar.Spec.RequestRef.Name + cr2.Namespace = ar.Spec.RequestRef.Namespace + if err := platformClusterClient.Get(ctx, client.ObjectKeyFromObject(cr2), cr2); err == nil { + if cr2.Status.Cluster != nil { + old := ar.DeepCopy() + ar.Spec.ClusterRef = cr2.Status.Cluster.DeepCopy() + if err := platformClusterClient.Patch(ctx, ar, client.MergeFrom(old)); err != nil { + return fmt.Errorf("unable to update spec of AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) + } + } + } + } + } + // create secret if len(kcfgData) == 0 { kcfgData = []byte("fake") @@ -922,6 +988,8 @@ func FakeAccessRequestReadiness(kcfgData []byte) FakingCallback { // The ClusterRequest itself is not deleted by this callback, only finalizers are removed. // // The returned callback is meant to be used with the key stored in FakingCallback_WaitingForClusterRequestDeletion. +// +//nolint:dupl func FakeClusterRequestDeletion(deleteCluster bool, finalizersToRemoveFromClusterRequest, finalizersToRemoveFromCluster []string) FakingCallback { return func(ctx context.Context, platformClusterClient client.Client, key string, _ *reconcile.Request, cr *clustersv1alpha1.ClusterRequest, _ *clustersv1alpha1.AccessRequest, _ *clustersv1alpha1.Cluster, _ *clusters.Cluster) error { if cr == nil { @@ -981,6 +1049,8 @@ func FakeClusterRequestDeletion(deleteCluster bool, finalizersToRemoveFromCluste // The AccessRequest itself is not deleted by this callback, only finalizers are removed. // // The returned callback is meant to be used with the key stored in FakingCallback_WaitingForAccessRequestDeletion. +// +//nolint:dupl func FakeAccessRequestDeletion(finalizersToRemoveFromAccessRequest, finalizersToRemoveFromSecret []string) FakingCallback { return func(ctx context.Context, platformClusterClient client.Client, key string, _ *reconcile.Request, _ *clustersv1alpha1.ClusterRequest, ar *clustersv1alpha1.AccessRequest, _ *clustersv1alpha1.Cluster, _ *clusters.Cluster) error { if ar == nil { diff --git a/lib/utils/utils.go b/lib/utils/utils.go index 1ee10a2..8449ebc 100644 --- a/lib/utils/utils.go +++ b/lib/utils/utils.go @@ -49,7 +49,7 @@ func StableRequestNameOnboarding(onboardingName, controllerName string) string { return fmt.Sprint(prefixOnboarding, controller.K8sNameHash(onboardingName, controllerName)) } -// StableMCPNamespace computes the namespace on the onboarding cluster that belongs to the given MCP. +// StableMCPNamespace computes the namespace on the platform cluster that belongs to the given MCP. // onboardingName and onboardingNamespace are name and namespace of the MCP resource on the onboarding cluster. func StableMCPNamespace(onboardingName, onboardingNamespace string) (string, error) { res, err := controller.K8sNameUUID(onboardingNamespace, onboardingName) From 7219f578f06b9f8129c1dc3fc12ee45170ff2e1c Mon Sep 17 00:00:00 2001 From: Johannes Aubart Date: Tue, 30 Sep 2025 16:09:47 +0200 Subject: [PATCH 04/12] add unit tests for advanced cluster access reconciler --- .../advanced/clusteraccess_test.go | 474 ++++++++++++++++++ .../advanced/testdata/test-01/cluster-01.yaml | 12 + .../advanced/testdata/test-01/cr-01.yaml | 12 + .../testdata/test-02/ar-conflict.yaml | 6 + .../testdata/test-02/cr-conflict.yaml | 7 + 5 files changed, 511 insertions(+) create mode 100644 lib/clusteraccess/advanced/clusteraccess_test.go create mode 100644 lib/clusteraccess/advanced/testdata/test-01/cluster-01.yaml create mode 100644 lib/clusteraccess/advanced/testdata/test-01/cr-01.yaml create mode 100644 lib/clusteraccess/advanced/testdata/test-02/ar-conflict.yaml create mode 100644 lib/clusteraccess/advanced/testdata/test-02/cr-conflict.yaml diff --git a/lib/clusteraccess/advanced/clusteraccess_test.go b/lib/clusteraccess/advanced/clusteraccess_test.go new file mode 100644 index 0000000..b75f4d0 --- /dev/null +++ b/lib/clusteraccess/advanced/clusteraccess_test.go @@ -0,0 +1,474 @@ +package advanced_test + +import ( + "context" + "fmt" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" + "github.com/onsi/gomega/types" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + testutils "github.com/openmcp-project/controller-utils/pkg/testing" + + clustersv1alpha1 "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1" + commonapi "github.com/openmcp-project/openmcp-operator/api/common" + openmcpconst "github.com/openmcp-project/openmcp-operator/api/constants" + "github.com/openmcp-project/openmcp-operator/api/install" + "github.com/openmcp-project/openmcp-operator/lib/clusteraccess/advanced" + libutils "github.com/openmcp-project/openmcp-operator/lib/utils" +) + +func TestAdvancedClusterAccess(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Advanced Cluster Access Test Suite") +} + +func defaultTestSetup(testDirPathSegments ...string) *testutils.Environment { + scheme := install.InstallOperatorAPIsPlatform(runtime.NewScheme()) + env := testutils.NewEnvironmentBuilder(). + WithFakeClient(scheme). + WithInitObjectPath(testDirPathSegments...). + WithDynamicObjectsWithStatus(&clustersv1alpha1.ClusterRequest{}, &clustersv1alpha1.AccessRequest{}). + Build() + + return env +} + +func defaultClusterAccessReconciler(env *testutils.Environment, controllerName string) advanced.ClusterAccessReconciler { + return advanced.NewClusterAccessReconciler(env.Client(), controllerName). + WithRetryInterval(100*time.Millisecond). + WithFakingCallback(advanced.FakingCallback_WaitingForAccessRequestDeletion, advanced.FakeAccessRequestDeletion([]string{"*"}, []string{"*"})). + WithFakingCallback(advanced.FakingCallback_WaitingForClusterRequestDeletion, advanced.FakeClusterRequestDeletion(true, []string{"*"}, []string{"*"})). + WithFakingCallback(advanced.FakingCallback_WaitingForClusterRequestReadiness, advanced.FakeClusterRequestReadiness(&clustersv1alpha1.ClusterSpec{ + Profile: "my-profile", + Purposes: []string{"test"}, + Tenancy: clustersv1alpha1.TENANCY_EXCLUSIVE, + })). + WithFakingCallback(advanced.FakingCallback_WaitingForAccessRequestReadiness, advanced.FakeAccessRequestReadiness(nil)) +} + +var _ = Describe("Advanced Cluster Access", func() { + + It("should create and delete a ClusterRequest, if configured", func() { + env := defaultTestSetup("testdata", "test-01") + rec := defaultClusterAccessReconciler(env, "foo") + customNamespace := "custom-namespace" + rec.Register(advanced.NewCluster("foobar", "fb", "test", true).Build()) // example 1, without access request + rec.Register(advanced.NewCluster("foobaz", "fz", "test", false).WithNamespaceGenerator(func(requestName, requestNamespace string) (string, error) { + return customNamespace, nil + }).WithTokenAccess(&clustersv1alpha1.TokenConfig{}).Build()) // example 2, with access request + req := testutils.RequestFromStrings("bar", "baz") + + Eventually(expectNoRequeue(env.Ctx, rec, req, false)).Should(Succeed()) + + // EXAMPLE 1 + // check namespace + namespace, err := libutils.StableMCPNamespace(req.Name, req.Namespace) + Expect(err).ToNot(HaveOccurred()) + ns := &corev1.Namespace{} + ns.Name = namespace + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ns), ns)).To(Succeed()) + + // check AccessRequest + _, err = rec.AccessRequest(env.Ctx, req, "foobar") + Expect(err).To(HaveOccurred()) // no access was requested, so this should fail + + // check ClusterRequest + cr, err := rec.ClusterRequest(env.Ctx, req, "foobar") + Expect(err).ToNot(HaveOccurred()) + Expect(cr.Name).To(Equal(advanced.StableRequestName("foo", req, "fb"))) + Expect(cr.Namespace).To(Equal(namespace)) + Expect(cr.Labels).To(HaveKeyWithValue(openmcpconst.ManagedByLabel, "foo")) + Expect(cr.Labels).To(HaveKeyWithValue(openmcpconst.ManagedPurposeLabel, req.Namespace+"."+req.Name+".foobar")) + Expect(cr.Spec.Purpose).To(Equal("test")) + Expect(cr.Spec.WaitForClusterDeletion).To(PointTo(BeTrue())) + crCopy := cr.DeepCopy() + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr), cr)).To(Succeed()) + Expect(cr).To(Equal(crCopy)) // the method should have returned the up-to-date object + + // check Cluster + c, err := rec.Cluster(env.Ctx, req, "foobar") + Expect(err).ToNot(HaveOccurred()) + Expect(c.Name).To(Equal(cr.Status.Cluster.Name)) + Expect(c.Namespace).To(Equal(cr.Status.Cluster.Namespace)) + Expect(c.Spec.Purposes).To(ContainElement("test")) + cCopy := c.DeepCopy() + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(c), c)).To(Succeed()) + Expect(c).To(Equal(cCopy)) // the method should have returned the up-to-date object + + // check cluster access + // cannot be tested at the moment, because the corresponding library expects 'real' kubeconfigs + + // EXAMPLE 2 + // check namespace + ns2 := &corev1.Namespace{} + ns2.Name = customNamespace + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ns2), ns2)).To(Succeed()) + + // check AccessRequest + ar2, err := rec.AccessRequest(env.Ctx, req, "foobaz") + Expect(err).ToNot(HaveOccurred()) + Expect(ar2.Name).To(Equal(advanced.StableRequestName("foo", req, "fz"))) + Expect(ar2.Namespace).To(Equal(customNamespace)) + Expect(ar2.Labels).To(HaveKeyWithValue(openmcpconst.ManagedByLabel, "foo")) + Expect(ar2.Labels).To(HaveKeyWithValue(openmcpconst.ManagedPurposeLabel, req.Namespace+"."+req.Name+".foobaz")) + Expect(ar2.Spec.RequestRef.Name).To(Equal(advanced.StableRequestName("foo", req, "fz"))) + Expect(ar2.Spec.RequestRef.Namespace).To(Equal(customNamespace)) + arCopy := ar2.DeepCopy() + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar2), ar2)).To(Succeed()) + Expect(ar2).To(Equal(arCopy)) // the method should have returned the up-to-date object + + // check ClusterRequest + cr2, err := rec.ClusterRequest(env.Ctx, req, "foobaz") + Expect(err).ToNot(HaveOccurred()) + Expect(cr2.Name).To(Equal(ar2.Spec.RequestRef.Name)) + Expect(cr2.Namespace).To(Equal(ar2.Spec.RequestRef.Namespace)) + Expect(cr2.Labels).To(HaveKeyWithValue(openmcpconst.ManagedByLabel, "foo")) + Expect(cr2.Labels).To(HaveKeyWithValue(openmcpconst.ManagedPurposeLabel, req.Namespace+"."+req.Name+".foobaz")) + Expect(cr2.Spec.Purpose).To(Equal("test")) + cr2Copy := cr2.DeepCopy() + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr2), cr2)).To(Succeed()) + Expect(cr2).To(Equal(cr2Copy)) // the method should have returned the up-to-date object + + // check Cluster + c2, err := rec.Cluster(env.Ctx, req, "foobaz") + Expect(err).ToNot(HaveOccurred()) + Expect(c2.Name).To(Equal(cr2.Status.Cluster.Name)) + Expect(c2.Namespace).To(Equal(cr2.Status.Cluster.Namespace)) + Expect(c2.Spec.Purposes).To(ContainElement("test")) + c2Copy := c2.DeepCopy() + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(c2), c2)).To(Succeed()) + Expect(c2).To(Equal(c2Copy)) // the method should have returned the up-to-date object + + // check cluster access + // cannot be tested at the moment, because the corresponding library expects 'real' kubeconfigs + + // delete everything again, except for namespaces + Eventually(expectNoRequeue(env.Ctx, rec, req, true)).Should(Succeed()) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ns), ns)).To(Succeed()) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr), cr)).To(MatchError(apierrors.IsNotFound, "IsNotFound")) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(c), c)).To(MatchError(apierrors.IsNotFound, "IsNotFound")) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ns2), ns2)).To(Succeed()) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar2), ar2)).To(MatchError(apierrors.IsNotFound, "IsNotFound")) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr2), cr2)).To(MatchError(apierrors.IsNotFound, "IsNotFound")) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(c2), c2)).To(MatchError(apierrors.IsNotFound, "IsNotFound")) + }) + + It("should create and delete an AccessRequest from a ClusterRequest, if configured", func() { + env := defaultTestSetup("testdata", "test-01") + rec := defaultClusterAccessReconciler(env, "foo") + permission := clustersv1alpha1.PermissionsRequest{ + Name: "my-custom-admin", + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"*"}, + Resources: []string{"*"}, + Verbs: []string{"*"}, + }, + }, + } + roleRef := commonapi.RoleRef{ + Name: "cluster-admin", + Kind: "ClusterRole", + } + rec.Register(advanced.ExistingClusterRequest("foobar", "fb", "cr-01", "test").WithTokenAccess(&clustersv1alpha1.TokenConfig{ + Permissions: []clustersv1alpha1.PermissionsRequest{permission}, + RoleRefs: []commonapi.RoleRef{roleRef}, + }).Build()) + req := testutils.RequestFromStrings("bar", "baz") + + Eventually(expectNoRequeue(env.Ctx, rec, req, false)).Should(Succeed()) + + // check AccessRequest + ar, err := rec.AccessRequest(env.Ctx, req, "foobar") + Expect(err).ToNot(HaveOccurred()) + Expect(ar.Name).To(Equal(advanced.StableRequestName("foo", req, "fb"))) + namespace, err := libutils.StableMCPNamespace(req.Name, req.Namespace) + Expect(err).ToNot(HaveOccurred()) + Expect(ar.Namespace).To(Equal(namespace)) + Expect(ar.Labels).To(HaveKeyWithValue(openmcpconst.ManagedByLabel, "foo")) + Expect(ar.Labels).To(HaveKeyWithValue(openmcpconst.ManagedPurposeLabel, req.Namespace+"."+req.Name+".foobar")) + Expect(ar.Spec.RequestRef.Name).To(Equal("cr-01")) + Expect(ar.Spec.RequestRef.Namespace).To(Equal("test")) + Expect(ar.Spec.ClusterRef.Name).To(Equal("cluster-01")) + Expect(ar.Spec.ClusterRef.Namespace).To(Equal("test")) + Expect(ar.Spec.OIDC).To(BeNil()) + Expect(ar.Spec.Token).ToNot(BeNil()) + Expect(ar.Spec.Token.Permissions).To(ConsistOf(permission)) + Expect(ar.Spec.Token.RoleRefs).To(ConsistOf(roleRef)) + arCopy := ar.DeepCopy() + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed()) + Expect(ar).To(Equal(arCopy)) // the method should have returned the up-to-date object + + // check ClusterRequest + cr, err := rec.ClusterRequest(env.Ctx, req, "foobar") + Expect(err).ToNot(HaveOccurred()) + Expect(cr.Name).To(Equal(ar.Spec.RequestRef.Name)) + Expect(cr.Namespace).To(Equal(ar.Spec.RequestRef.Namespace)) + Expect(cr.Status.Cluster.Name).To(Equal(ar.Spec.ClusterRef.Name)) + Expect(cr.Status.Cluster.Namespace).To(Equal(ar.Spec.ClusterRef.Namespace)) + crCopy := cr.DeepCopy() + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr), cr)).To(Succeed()) + Expect(cr).To(Equal(crCopy)) // the method should have returned the up-to-date object + + // check Cluster + c, err := rec.Cluster(env.Ctx, req, "foobar") + Expect(err).ToNot(HaveOccurred()) + Expect(c.Name).To(Equal(cr.Status.Cluster.Name)) + Expect(c.Namespace).To(Equal(cr.Status.Cluster.Namespace)) + cCopy := c.DeepCopy() + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(c), c)).To(Succeed()) + Expect(c).To(Equal(cCopy)) // the method should have returned the up-to-date object + + // check cluster access + // cannot be tested at the moment, because the corresponding library expects 'real' kubeconfigs + + // delete everything again + // ClusterRequest and Cluster were not created by this library, so they should not be deleted + Eventually(expectNoRequeue(env.Ctx, rec, req, true)).Should(Succeed()) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(MatchError(apierrors.IsNotFound, "IsNotFound")) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr), cr)).To(Succeed()) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(c), c)).To(Succeed()) + }) + + It("should create and delete an AccessRequest from a Cluster, if configured", func() { + env := defaultTestSetup("testdata", "test-01") + rec := defaultClusterAccessReconciler(env, "foo") + oidc := &clustersv1alpha1.OIDCConfig{ + OIDCProviderConfig: commonapi.OIDCProviderConfig{ + Name: "my-oidc", + Issuer: "https://example.com/oidc", + ClientID: "my-client-id", + UsernameClaim: "email", + GroupsClaim: "groups", + ExtraScopes: []string{"scope1", "scope2"}, + RoleBindings: []commonapi.RoleBindings{ + { + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.UserKind, + Name: "user1", + }, + }, + RoleRefs: []commonapi.RoleRef{ + { + Name: "my-cluster-admin", + Kind: "ClusterRole", + }, + }, + }, + }, + }, + Roles: []clustersv1alpha1.PermissionsRequest{ + { + Name: "my-custom-admin", + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"*"}, + Resources: []string{"*"}, + Verbs: []string{"*"}, + }, + }, + }, + }, + } + rec.Register(advanced.ExistingCluster("foobar", "fb", "cluster-01", "test").WithOIDCAccess(oidc).Build()) + req := testutils.RequestFromStrings("bar", "baz") + + Eventually(expectNoRequeue(env.Ctx, rec, req, false)).Should(Succeed()) + + // check AccessRequest + ar, err := rec.AccessRequest(env.Ctx, req, "foobar") + Expect(err).ToNot(HaveOccurred()) + Expect(ar.Name).To(Equal(advanced.StableRequestName("foo", req, "fb"))) + namespace, err := libutils.StableMCPNamespace(req.Name, req.Namespace) + Expect(err).ToNot(HaveOccurred()) + Expect(ar.Namespace).To(Equal(namespace)) + Expect(ar.Labels).To(HaveKeyWithValue(openmcpconst.ManagedByLabel, "foo")) + Expect(ar.Labels).To(HaveKeyWithValue(openmcpconst.ManagedPurposeLabel, req.Namespace+"."+req.Name+".foobar")) + Expect(ar.Spec.ClusterRef.Name).To(Equal("cluster-01")) + Expect(ar.Spec.ClusterRef.Namespace).To(Equal("test")) + Expect(ar.Spec.Token).To(BeNil()) + Expect(ar.Spec.OIDC).ToNot(BeNil()) + Expect(ar.Spec.OIDC).To(Equal(oidc)) + arCopy := ar.DeepCopy() + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed()) + Expect(ar).To(Equal(arCopy)) // the method should have returned the up-to-date object + + // check ClusterRequest + _, err = rec.ClusterRequest(env.Ctx, req, "foobar") + Expect(err).To(HaveOccurred()) // no ClusterRequest is involved, so this should fail + + // check Cluster + c, err := rec.Cluster(env.Ctx, req, "foobar") + Expect(err).ToNot(HaveOccurred()) + Expect(c.Name).To(Equal(ar.Spec.ClusterRef.Name)) + Expect(c.Namespace).To(Equal(ar.Spec.ClusterRef.Namespace)) + cCopy := c.DeepCopy() + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(c), c)).To(Succeed()) + Expect(c).To(Equal(cCopy)) // the method should have returned the up-to-date object + + // check cluster access + // cannot be tested at the moment, because the corresponding library expects 'real' kubeconfigs + + // delete everything again + // Cluster was not created by this library, so it should not be deleted + Eventually(expectNoRequeue(env.Ctx, rec, req, true)).Should(Succeed()) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(MatchError(apierrors.IsNotFound, "IsNotFound")) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(c), c)).To(Succeed()) + }) + + It("should take overwritten labels into account", func() { + env := defaultTestSetup("testdata", "test-01") + rec := defaultClusterAccessReconciler(env, "foo").WithManagedLabels(func(controllerName string, req reconcile.Request, reg advanced.ClusterRegistration) (string, string) { + return "my-managed-by", "my-purpose" + }) + rec.Register(advanced.ExistingClusterRequest("foobar", "fb", "cr-01", "test").WithTokenAccess(&clustersv1alpha1.TokenConfig{}).Build()) + req := testutils.RequestFromStrings("bar", "baz") + + Eventually(expectNoRequeue(env.Ctx, rec, req, false)).Should(Succeed()) + + // check AccessRequest + ar, err := rec.AccessRequest(env.Ctx, req, "foobar") + Expect(err).ToNot(HaveOccurred()) + Expect(ar.Name).To(Equal(advanced.StableRequestName("foo", req, "fb"))) + namespace, err := libutils.StableMCPNamespace(req.Name, req.Namespace) + Expect(err).ToNot(HaveOccurred()) + Expect(ar.Namespace).To(Equal(namespace)) + Expect(ar.Labels).To(HaveKeyWithValue(openmcpconst.ManagedByLabel, "my-managed-by")) + Expect(ar.Labels).To(HaveKeyWithValue(openmcpconst.ManagedPurposeLabel, "my-purpose")) + }) + + It("should requeue if the ClusterRequest or AccessRequest is not yet ready", func() { + env := defaultTestSetup("testdata", "test-01") + rec := defaultClusterAccessReconciler(env, "foo") + rec.Register(advanced.NewCluster("foobar", "fb", "test", true).WithTokenAccess(&clustersv1alpha1.TokenConfig{}).Build()) + req := testutils.RequestFromStrings("bar", "baz") + + // first requeue due to ClusterRequest + expectRequeue(env.Ctx, rec, req, false) + // ClusterRequest will be mocked ready now, another requeue for AccessRequest is expected + expectRequeue(env.Ctx, rec, req, false) + expectNoRequeue(env.Ctx, rec, req, false) // now everything should be ready + + // deleting should requeue due to AccessRequest deletion + expectRequeue(env.Ctx, rec, req, true) + // AccessRequest will be mocked deleted now, another requeue for ClusterRequest deletion is expected + expectRequeue(env.Ctx, rec, req, true) + expectNoRequeue(env.Ctx, rec, req, true) // now everything should be deleted + }) + + Context("Conflict Detection", func() { + + It("should correctly handle conflicts with existing ClusterRequests", func() { + env := defaultTestSetup("testdata", "test-02") + rec := defaultClusterAccessReconciler(env, "foo") + rec.Register(advanced.NewCluster("foobar", "fb", "test", true).Build()) + req := testutils.RequestFromStrings("cr-conflict", "baz") + + // ensure that the resource we are trying to conflict with actually exists + namespace, err := libutils.StableMCPNamespace(req.Name, req.Namespace) + Expect(err).ToNot(HaveOccurred()) + cr := &clustersv1alpha1.ClusterRequest{} + cr.Name = advanced.StableRequestName("foo", req, "fb") + cr.Namespace = namespace + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr), cr)).To(Succeed(), "conflicting ClusterRequest '%s/%s' does not exist", cr.Namespace, cr.Name) + + Eventually(expectError(env.Ctx, rec, req, false, WithTransform(func(e error) string { return e.Error() }, And(ContainSubstring("already exists"), ContainSubstring("not managed"))))).Should(Succeed()) + + // deleting should not remove the ClusterRequest, because it is not managed by us + Eventually(expectNoRequeue(env.Ctx, rec, req, true)).Should(Succeed()) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr), cr)).To(Succeed()) + }) + + It("should correctly handle conflicts with existing AccessRequests", func() { + env := defaultTestSetup("testdata", "test-02") + rec := defaultClusterAccessReconciler(env, "foo") + rec.Register(advanced.NewCluster("foobar", "fb", "test", true).WithTokenAccess(&clustersv1alpha1.TokenConfig{}).Build()) + req := testutils.RequestFromStrings("ar-conflict", "baz") + + // ensure that the resource we are trying to conflict with actually exists + namespace, err := libutils.StableMCPNamespace(req.Name, req.Namespace) + Expect(err).ToNot(HaveOccurred()) + ar := &clustersv1alpha1.AccessRequest{} + ar.Name = advanced.StableRequestName("foo", req, "fb") + ar.Namespace = namespace + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed(), "conflicting ClusterRequest '%s/%s' does not exist", ar.Namespace, ar.Name) + + Eventually(expectError(env.Ctx, rec, req, false, WithTransform(func(e error) string { return e.Error() }, And(ContainSubstring("already exists"), ContainSubstring("not managed"))))).Should(Succeed()) + + // deleting should not remove the AccessRequest, because it is not managed by us + Eventually(expectNoRequeue(env.Ctx, rec, req, true)).Should(Succeed()) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed()) + }) + + }) + +}) + +// expectRequeue runs the reconciler's Reconcile method (or ReconcileDelete, if del is true) with the given request and expects a requeueAfter duration greater than zero in the returned result. +// If match is non-empty, the first element is matched against the requeueAfter duration instead of just checking that it's greater than zero. Further elements are ignored. +// It fails if the reconcile returns an error. +// +//nolint:unused +func expectRequeue(ctx context.Context, rec advanced.ClusterAccessReconciler, req reconcile.Request, del bool, match ...types.GomegaMatcher) func(Gomega) { + return func(g Gomega) { + var res reconcile.Result + var err error + if del { + res, err = rec.ReconcileDelete(ctx, req) + } else { + res, err = rec.Reconcile(ctx, req) + } + g.Expect(err).ToNot(HaveOccurred()) + if len(match) > 0 { + g.Expect(res.RequeueAfter).To(match[0], fmt.Sprintf("expected requeueAfter to match %v", match[0])) + } else { + g.Expect(res.RequeueAfter).To(BeNumerically(">", 0), "expected requeueAfter > 0") + } + } +} + +// expectNoRequeue runs the reconciler's Reconcile method (or ReconcileDelete, if del is true) with the given request and expects no requeue in the returned result. +// It fails if the reconcile returns an error. +func expectNoRequeue(ctx context.Context, rec advanced.ClusterAccessReconciler, req reconcile.Request, del bool) func(Gomega) { + return func(g Gomega) { + var res reconcile.Result + var err error + if del { + res, err = rec.ReconcileDelete(ctx, req) + } else { + res, err = rec.Reconcile(ctx, req) + } + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.RequeueAfter).To(BeZero(), "expected requeueAfter to be zero") + } +} + +// expectError runs the reconciler's Reconcile method (or ReconcileDelete, if del is true) with the given request and expects an error to be returned. +// If match is non-empty, the first element is matched against the error instead of just checking that an error occurred. Further elements are ignored. +func expectError(ctx context.Context, rec advanced.ClusterAccessReconciler, req reconcile.Request, del bool, match ...types.GomegaMatcher) func(Gomega) { + return func(g Gomega) { + var err error + if del { + _, err = rec.ReconcileDelete(ctx, req) + } else { + _, err = rec.Reconcile(ctx, req) + } + g.Expect(err).To(HaveOccurred()) + if len(match) > 0 { + g.Expect(err).To(match[0], fmt.Sprintf("expected error to match %v", match[0])) + } + } +} diff --git a/lib/clusteraccess/advanced/testdata/test-01/cluster-01.yaml b/lib/clusteraccess/advanced/testdata/test-01/cluster-01.yaml new file mode 100644 index 0000000..fffb613 --- /dev/null +++ b/lib/clusteraccess/advanced/testdata/test-01/cluster-01.yaml @@ -0,0 +1,12 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: Cluster +metadata: + name: cluster-01 + namespace: test +spec: + profile: my-profile + kubernetes: + version: "1.32" + purposes: + - test + tenancy: Exclusive diff --git a/lib/clusteraccess/advanced/testdata/test-01/cr-01.yaml b/lib/clusteraccess/advanced/testdata/test-01/cr-01.yaml new file mode 100644 index 0000000..89ec3f2 --- /dev/null +++ b/lib/clusteraccess/advanced/testdata/test-01/cr-01.yaml @@ -0,0 +1,12 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: ClusterRequest +metadata: + name: cr-01 + namespace: test +spec: + purpose: test +status: + phase: Granted + cluster: + name: cluster-01 + namespace: test diff --git a/lib/clusteraccess/advanced/testdata/test-02/ar-conflict.yaml b/lib/clusteraccess/advanced/testdata/test-02/ar-conflict.yaml new file mode 100644 index 0000000..fd2454a --- /dev/null +++ b/lib/clusteraccess/advanced/testdata/test-02/ar-conflict.yaml @@ -0,0 +1,6 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: AccessRequest +metadata: + name: foo--ar-conflict--fb + namespace: mcp--afb2bcb3-2889-85f2-88a6-ea4ac5f2dd68 +spec: {} diff --git a/lib/clusteraccess/advanced/testdata/test-02/cr-conflict.yaml b/lib/clusteraccess/advanced/testdata/test-02/cr-conflict.yaml new file mode 100644 index 0000000..666bf06 --- /dev/null +++ b/lib/clusteraccess/advanced/testdata/test-02/cr-conflict.yaml @@ -0,0 +1,7 @@ +apiVersion: clusters.openmcp.cloud/v1alpha1 +kind: ClusterRequest +metadata: + name: foo--cr-conflict--fb + namespace: mcp--3d95cbf8-d468-88c6-9ca8-a3be991689a8 +spec: + purpose: test From 64c27c2afd1c0f2cae26cac959a6163b00a22da1 Mon Sep 17 00:00:00 2001 From: Johannes Aubart Date: Wed, 1 Oct 2025 16:50:28 +0200 Subject: [PATCH 05/12] enable stuff to depend on request --- lib/clusteraccess/advanced/clusteraccess.go | 342 +++++++++++++----- .../advanced/clusteraccess_test.go | 61 +++- .../testdata/test-02/ar-conflict.yaml | 2 +- .../testdata/test-02/cr-conflict.yaml | 2 +- 4 files changed, 289 insertions(+), 118 deletions(-) diff --git a/lib/clusteraccess/advanced/clusteraccess.go b/lib/clusteraccess/advanced/clusteraccess.go index 4912a99..2d0daa0 100644 --- a/lib/clusteraccess/advanced/clusteraccess.go +++ b/lib/clusteraccess/advanced/clusteraccess.go @@ -28,6 +28,8 @@ import ( libutils "github.com/openmcp-project/openmcp-operator/lib/utils" ) +const caControllerName = "ClusterAccess" + ////////////////// /// INTERFACES /// ////////////////// @@ -65,7 +67,8 @@ type ClusterAccessReconciler interface { // It returns a reconcile.Result and an error if the reconciliation failed. // The reconcile.Result may contain a RequeueAfter value to indicate that the reconciliation should be retried after a certain duration. // The duration is set by the WithRetryInterval method. - Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) + // Any additional arguments provided are passed into all methods of the ClusterRegistration objects that are called. + Reconcile(ctx context.Context, request reconcile.Request, additionalData ...any) (reconcile.Result, error) // ReconcileDelete deletes the ClusterRequests and/or AccessRequests for the registered clusters. // This function should be called during the deletion of the reconciled object. // ctx is the context for the reconciliation. @@ -73,7 +76,8 @@ type ClusterAccessReconciler interface { // It returns a reconcile.Result and an error if the reconciliation failed. // The reconcile.Result may contain a RequeueAfter value to indicate that the reconciliation should be retried after a certain duration. // The duration is set by the WithRetryInterval method. - ReconcileDelete(ctx context.Context, request reconcile.Request) (reconcile.Result, error) + // Any additional arguments provided are passed into all methods of the ClusterRegistration objects that are called. + ReconcileDelete(ctx context.Context, request reconcile.Request, additionalData ...any) (reconcile.Result, error) // WithFakingCallback passes a callback function with a specific key. // The available keys depend on the implementation. @@ -102,106 +106,164 @@ type ClusterAccessReconciler interface { type FakingCallback func(ctx context.Context, platformClusterClient client.Client, key string, req *reconcile.Request, cr *clustersv1alpha1.ClusterRequest, ar *clustersv1alpha1.AccessRequest, c *clustersv1alpha1.Cluster, access *clusters.Cluster) error // ManagedLabelGenerator is a function that generates the managed-by and managed-purpose labels for the created resources. -type ManagedLabelGenerator func(controllerName string, req reconcile.Request, reg ClusterRegistration) (string, string) +// The first return value is the value for the managed-by label, the second one is the value for the managed-purpose label. +// The third return value can be nil, or it can contain additional labels to be set on the created resources. +type ManagedLabelGenerator func(controllerName string, req reconcile.Request, reg ClusterRegistration) (string, string, map[string]string) -func DefaultManagedLabelGenerator(controllerName string, req reconcile.Request, reg ClusterRegistration) (string, string) { - return controllerName, fmt.Sprintf("%s.%s.%s", req.Namespace, req.Name, reg.ID()) +func DefaultManagedLabelGenerator(controllerName string, req reconcile.Request, reg ClusterRegistration) (string, string, map[string]string) { + return controllerName, fmt.Sprintf("%s.%s.%s", req.Namespace, req.Name, reg.ID()), nil } type ClusterRegistration interface { + // static (identical, independent from the reconcile request) + // ID is the unique identifier for the cluster. ID() string // Suffix is the suffix to be used for the names of the created resources. // It must be unique within the context of the reconciler. // If empty, the ID will be used as suffix. Suffix() string + // Scheme is the scheme for the Kubernetes client of the cluster. + // If nil, the default scheme will be used. + Scheme() *runtime.Scheme + // AccessRequestAvailable returns true if an AccessRequest can be retrieved from this registration. + AccessRequestAvailable() bool + // ClusterRequestAvailable returns true if a ClusterRequest can be retrieved from this registration. + ClusterRequestAvailable() bool + + // dynamic (can depend on the reconcile request and potential other factors) + // AccessRequestTokenConfig is the token configuration for the AccessRequest to be created for the cluster. // Might be nil if no AccessRequest should be created or if OIDC access is used. // Only one of AccessRequestTokenConfig and AccessRequestOIDCConfig should be non-nil. - AccessRequestTokenConfig() *clustersv1alpha1.TokenConfig + AccessRequestTokenConfig(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.TokenConfig, error) // AccessRequestOIDCConfig is the OIDC configuration for the AccessRequest to be created for the cluster. // Might be nil if no AccessRequest should be created or if token-based access is used. // Only one of AccessRequestTokenConfig and AccessRequestOIDCConfig should be non-nil. - AccessRequestOIDCConfig() *clustersv1alpha1.OIDCConfig + AccessRequestOIDCConfig(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.OIDCConfig, error) // ClusterRequestSpec is the spec for the ClusterRequest to be created for the cluster. // It is nil if no ClusterRequest should be created. // Only one of ClusterRequestSpec, ClusterReference and ClusterRequestReference should be non-nil. - ClusterRequestSpec() *clustersv1alpha1.ClusterRequestSpec + ClusterRequestSpec(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.ClusterRequestSpec, error) // ClusterReference returns name and namespace of an existing Cluster resource. // It is nil if a new ClusterRequest should be created or if the Cluster is referenced by an existing ClusterRequest. // Only one of ClusterRequestSpec, ClusterReference and ClusterRequestReference should be non-nil. - ClusterReference() *commonapi.ObjectReference + ClusterReference(req reconcile.Request, additionalData ...any) (*commonapi.ObjectReference, error) // ClusterRequestReference returns name and namespace of the ClusterRequest resource. // It is nil if a new ClusterRequest should be created or if the Cluster is referenced directly. // Only one of ClusterRequestSpec, ClusterReference and ClusterRequestReference should be non-nil. - ClusterRequestReference() *commonapi.ObjectReference - // Scheme is the scheme for the Kubernetes client of the cluster. - // If nil, the default scheme will be used. - Scheme() *runtime.Scheme + ClusterRequestReference(req reconcile.Request, additionalData ...any) (*commonapi.ObjectReference, error) // Namespace generates the namespace on the Platform cluster to use for the created requests. // The generated namespace is expected be unique within the context of the reconciler. - Namespace(requestName, requestNamespace string) (string, error) + Namespace(req reconcile.Request, additionalData ...any) (string, error) } type ClusterRegistrationBuilder interface { // WithTokenAccess enables an AccessRequest for token-based access to be created for the cluster. - // It is mutually exclusive to WithOIDCAccess, calling this method after WithOIDCAccess will override the previous call. + // Use this method, if the token configuration does not depend on the reconcile request, use WithTokenAccessGenerator otherwise. + // Calling this method will override any previous calls to WithTokenAccess, WithTokenAccessGenerator, WithOIDCAccess, or WithOIDCAccessGenerator. // Passing in a nil cfg will disable AccessRequest creation, if it was token-based before, and have no effect if it was OIDC-based before. WithTokenAccess(cfg *clustersv1alpha1.TokenConfig) ClusterRegistrationBuilder + // WithTokenAccessGenerator is like WithTokenAccess, but takes a function that generates the TokenConfig based on the reconcile request. + // Calling this method will override any previous calls to WithTokenAccess, WithTokenAccessGenerator, WithOIDCAccess, or WithOIDCAccessGenerator. + // Passing in a nil function will disable AccessRequest creation, if it was token-based before, and have no effect if it was OIDC-based before. + WithTokenAccessGenerator(f func(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.TokenConfig, error)) ClusterRegistrationBuilder // WithOIDCAccess enables an AccessRequest for OIDC-based access to be created for the cluster. - // It is mutually exclusive to WithTokenAccess, calling this method after WithTokenAccess will override the previous call. + // Use this method, if the OIDC configuration does not depend on the reconcile request, use WithOIDCAccessGenerator otherwise. + // Calling this method will override any previous calls to WithTokenAccess, WithTokenAccessGenerator, WithOIDCAccess, or WithOIDCAccessGenerator. // Passing in a nil cfg will disable AccessRequest creation, if it was OIDC-based before, and have no effect if it was token-based before. WithOIDCAccess(cfg *clustersv1alpha1.OIDCConfig) ClusterRegistrationBuilder + // WithOIDCAccessGenerator is like WithOIDCAccess, but takes a function that generates the OIDCConfig based on the reconcile request. + // Calling this method will override any previous calls to WithTokenAccess, WithTokenAccessGenerator, WithOIDCAccess, or WithOIDCAccessGenerator. + // Passing in a nil function will disable AccessRequest creation, if it was OIDC-based before, and have no effect if it was token-based before. + WithOIDCAccessGenerator(f func(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.OIDCConfig, error)) ClusterRegistrationBuilder // WithScheme sets the scheme for the Kubernetes client of the cluster. // If not set or set to nil, the default scheme will be used. WithScheme(scheme *runtime.Scheme) ClusterRegistrationBuilder // WithNamespaceGenerator sets the function that generates the namespace on the Platform cluster to use for the created requests. - WithNamespaceGenerator(f func(requestName, requestNamespace string) (string, error)) ClusterRegistrationBuilder + WithNamespaceGenerator(f func(req reconcile.Request, additionalData ...any) (string, error)) ClusterRegistrationBuilder // Build builds the ClusterRegistration object. Build() ClusterRegistration } +// DefaultNamespaceGenerator is a default implementation of a namespace generator. +// It computes a UUID-style hash from the given request. +func DefaultNamespaceGenerator(req reconcile.Request, _ ...any) (string, error) { + return ctrlutils.K8sNameUUID(req.Namespace, req.Name) +} + +// DefaultNamespaceGeneratorForMCP is a default implementation of a namespace generator for MCPs. +// It computes a UUID-style hash from the given request and prefixes it with "mcp--". +func DefaultNamespaceGeneratorForMCP(req reconcile.Request, _ ...any) (string, error) { + return libutils.StableMCPNamespace(req.Name, req.Namespace) +} + /////////////////////////////////////////// /// IMPLEMENTATION: ClusterRegistration /// /////////////////////////////////////////// type clusterRegistrationImpl struct { - id string - suffix string - scheme *runtime.Scheme - accessTokenConfig *clustersv1alpha1.TokenConfig - accessOIDCConfig *clustersv1alpha1.OIDCConfig - clusterRequestSpec *clustersv1alpha1.ClusterRequestSpec - clusterRef *commonapi.ObjectReference - clusterRequestRef *commonapi.ObjectReference - namespaceGenerator func(requestName, requestNamespace string) (string, error) + id string + suffix string + scheme *runtime.Scheme + generateAccessTokenConfig func(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.TokenConfig, error) + generateAccessOIDCConfig func(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.OIDCConfig, error) + generateClusterRequestSpec func(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.ClusterRequestSpec, error) + generateClusterRef func(req reconcile.Request, additionalData ...any) (*commonapi.ObjectReference, error) + generateClusterRequestRef func(req reconcile.Request, additionalData ...any) (*commonapi.ObjectReference, error) + generateNamespace func(req reconcile.Request, additionalData ...any) (string, error) } var _ ClusterRegistration = &clusterRegistrationImpl{} +// AccessRequestAvailable implements ClusterRegistration. +func (c *clusterRegistrationImpl) AccessRequestAvailable() bool { + return c.generateAccessOIDCConfig != nil || c.generateAccessTokenConfig != nil +} + +// ClusterRequestAvailable implements ClusterRegistration. +func (c *clusterRegistrationImpl) ClusterRequestAvailable() bool { + return c.generateClusterRequestSpec != nil || c.generateClusterRequestRef != nil +} + // AccessRequestTokenConfig implements ClusterRegistration. -func (c *clusterRegistrationImpl) AccessRequestTokenConfig() *clustersv1alpha1.TokenConfig { - return c.accessTokenConfig.DeepCopy() +func (c *clusterRegistrationImpl) AccessRequestTokenConfig(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.TokenConfig, error) { + if c.generateAccessTokenConfig == nil { + return nil, nil + } + return c.generateAccessTokenConfig(req, additionalData...) } // AccessRequestOIDCConfig implements ClusterRegistration. -func (c *clusterRegistrationImpl) AccessRequestOIDCConfig() *clustersv1alpha1.OIDCConfig { - return c.accessOIDCConfig.DeepCopy() +func (c *clusterRegistrationImpl) AccessRequestOIDCConfig(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.OIDCConfig, error) { + if c.generateAccessOIDCConfig == nil { + return nil, nil + } + return c.generateAccessOIDCConfig(req, additionalData...) } // ClusterRequestSpec implements ClusterRegistration. -func (c *clusterRegistrationImpl) ClusterRequestSpec() *clustersv1alpha1.ClusterRequestSpec { - return c.clusterRequestSpec.DeepCopy() +func (c *clusterRegistrationImpl) ClusterRequestSpec(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.ClusterRequestSpec, error) { + if c.generateClusterRequestSpec == nil { + return nil, nil + } + return c.generateClusterRequestSpec(req, additionalData...) } // ClusterReference implements ClusterRegistration. -func (c *clusterRegistrationImpl) ClusterReference() *commonapi.ObjectReference { - return c.clusterRef.DeepCopy() +func (c *clusterRegistrationImpl) ClusterReference(req reconcile.Request, additionalData ...any) (*commonapi.ObjectReference, error) { + if c.generateClusterRef == nil { + return nil, nil + } + return c.generateClusterRef(req, additionalData...) } // ClusterRequestReference implements ClusterRegistration. -func (c *clusterRegistrationImpl) ClusterRequestReference() *commonapi.ObjectReference { - return c.clusterRequestRef.DeepCopy() +func (c *clusterRegistrationImpl) ClusterRequestReference(req reconcile.Request, additionalData ...any) (*commonapi.ObjectReference, error) { + if c.generateClusterRequestRef == nil { + return nil, nil + } + return c.generateClusterRequestRef(req, additionalData...) } // ID implements ClusterRegistration. @@ -223,11 +285,11 @@ func (c *clusterRegistrationImpl) Scheme() *runtime.Scheme { } // Namespace implements ClusterRegistration. -func (c *clusterRegistrationImpl) Namespace(requestName, requestNamespace string) (string, error) { - if c.namespaceGenerator != nil { - return c.namespaceGenerator(requestName, requestNamespace) +func (c *clusterRegistrationImpl) Namespace(req reconcile.Request, additionalData ...any) (string, error) { + if c.generateNamespace == nil { + return DefaultNamespaceGenerator(req, additionalData...) } - return libutils.StableMCPNamespace(requestName, requestNamespace) + return c.generateNamespace(req, additionalData...) } ////////////////////////////////////////////////// @@ -247,13 +309,43 @@ func (c *clusterRegistrationBuilderImpl) Build() ClusterRegistration { // WithOIDCAccess implements ClusterRegistrationBuilder. func (c *clusterRegistrationBuilderImpl) WithOIDCAccess(cfg *clustersv1alpha1.OIDCConfig) ClusterRegistrationBuilder { - c.accessOIDCConfig = cfg.DeepCopy() + var f func(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.OIDCConfig, error) + if cfg != nil { + f = func(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.OIDCConfig, error) { + return cfg.DeepCopy(), nil + } + } + return c.WithOIDCAccessGenerator(f) +} + +// WithOIDCAccessGenerator implements ClusterRegistrationBuilder. +func (c *clusterRegistrationBuilderImpl) WithOIDCAccessGenerator(f func(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.OIDCConfig, error)) ClusterRegistrationBuilder { + c.generateAccessOIDCConfig = f + if f != nil && c.generateAccessTokenConfig != nil { + // both access methods configured, disable token + c.generateAccessTokenConfig = nil + } return c } // WithTokenAccess implements ClusterRegistrationBuilder. func (c *clusterRegistrationBuilderImpl) WithTokenAccess(cfg *clustersv1alpha1.TokenConfig) ClusterRegistrationBuilder { - c.accessTokenConfig = cfg.DeepCopy() + var f func(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.TokenConfig, error) + if cfg != nil { + f = func(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.TokenConfig, error) { + return cfg.DeepCopy(), nil + } + } + return c.WithTokenAccessGenerator(f) +} + +// WithTokenAccessGenerator implements ClusterRegistrationBuilder. +func (c *clusterRegistrationBuilderImpl) WithTokenAccessGenerator(f func(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.TokenConfig, error)) ClusterRegistrationBuilder { + c.generateAccessTokenConfig = f + if f != nil && c.generateAccessOIDCConfig != nil { + // both access methods configured, disable OIDC + c.generateAccessOIDCConfig = nil + } return c } @@ -264,49 +356,60 @@ func (c *clusterRegistrationBuilderImpl) WithScheme(scheme *runtime.Scheme) Clus } // WithNamespaceGenerator implements ClusterRegistrationBuilder. -func (c *clusterRegistrationBuilderImpl) WithNamespaceGenerator(f func(requestName, requestNamespace string) (string, error)) ClusterRegistrationBuilder { - c.namespaceGenerator = f +func (c *clusterRegistrationBuilderImpl) WithNamespaceGenerator(f func(req reconcile.Request, additionalData ...any) (string, error)) ClusterRegistrationBuilder { + if f == nil { + f = DefaultNamespaceGenerator + } + c.generateNamespace = f return c } -// NewCluster instructs the Reconciler to create and manage a new ClusterRequest. -func NewCluster(id, suffix, purpose string, waitForClusterDeletion bool) ClusterRegistrationBuilder { +// StaticClusterRequestSpecGenerator is a helper function that just returns deep copies of the given spec. +func StaticClusterRequestSpecGenerator(spec *clustersv1alpha1.ClusterRequestSpec) func(reconcile.Request, ...any) (*clustersv1alpha1.ClusterRequestSpec, error) { + return func(_ reconcile.Request, _ ...any) (*clustersv1alpha1.ClusterRequestSpec, error) { + return spec.DeepCopy(), nil + } +} + +// NewClusterRequest instructs the Reconciler to create and manage a new ClusterRequest. +func NewClusterRequest(id, suffix string, generateClusterRequestSpec func(reconcile.Request, ...any) (*clustersv1alpha1.ClusterRequestSpec, error)) ClusterRegistrationBuilder { return &clusterRegistrationBuilderImpl{ clusterRegistrationImpl: clusterRegistrationImpl{ - id: id, - suffix: suffix, - clusterRequestSpec: &clustersv1alpha1.ClusterRequestSpec{ - Purpose: purpose, - WaitForClusterDeletion: &waitForClusterDeletion, - }, + id: id, + suffix: suffix, + generateClusterRequestSpec: generateClusterRequestSpec, + generateNamespace: DefaultNamespaceGenerator, }, } } +// StaticReferenceGenerator is a helper function that just returns a deep copy of the given reference. +func StaticReferenceGenerator(ref *commonapi.ObjectReference) func(reconcile.Request, ...any) (*commonapi.ObjectReference, error) { + return func(_ reconcile.Request, _ ...any) (*commonapi.ObjectReference, error) { + return ref.DeepCopy(), nil + } +} + // ExistingCluster instructs the Reconciler to use an existing Cluster resource. -func ExistingCluster(id, suffix, name, namespace string) ClusterRegistrationBuilder { +func ExistingCluster(id, suffix string, generateClusterRef func(reconcile.Request, ...any) (*commonapi.ObjectReference, error)) ClusterRegistrationBuilder { return &clusterRegistrationBuilderImpl{ clusterRegistrationImpl: clusterRegistrationImpl{ - id: id, - suffix: suffix, - clusterRef: &commonapi.ObjectReference{ - Name: name, - Namespace: namespace, - }, + id: id, + suffix: suffix, + generateClusterRef: generateClusterRef, + generateNamespace: DefaultNamespaceGenerator, }, } } // ExistingClusterRequest instructs the Reconciler to use an existing Cluster resource that is referenced by the given ClusterRequest. -func ExistingClusterRequest(id, suffix, name, namespace string) ClusterRegistrationBuilder { +func ExistingClusterRequest(id, suffix string, generateClusterRequestRef func(reconcile.Request, ...any) (*commonapi.ObjectReference, error)) ClusterRegistrationBuilder { return &clusterRegistrationBuilderImpl{ clusterRegistrationImpl: clusterRegistrationImpl{ - id: id, - suffix: suffix, - clusterRequestRef: &commonapi.ObjectReference{ - Name: name, - Namespace: namespace, - }, + id: id, + suffix: suffix, + generateClusterRequestRef: generateClusterRequestRef, + generateNamespace: DefaultNamespaceGenerator, }, } } @@ -367,16 +470,16 @@ func (r *reconcilerImpl) AccessRequest(ctx context.Context, request reconcile.Re if !ok { return nil, fmt.Errorf("no registration found for request '%s' with id '%s'", request.String(), id) } - if reg.AccessRequestOIDCConfig() == nil && reg.AccessRequestTokenConfig() == nil { + if !reg.AccessRequestAvailable() { return nil, fmt.Errorf("no AccessRequest configured for request '%s' with id '%s'", request.String(), id) } suffix := reg.Suffix() if suffix == "" { suffix = reg.ID() } - namespace, err := reg.Namespace(request.Name, request.Namespace) + namespace, err := reg.Namespace(request) if err != nil { - return nil, fmt.Errorf("unable to generate name of platform cluster namespace for request '%s/%s': %w", request.Namespace, request.Name, err) + return nil, fmt.Errorf("unable to generate name of platform cluster namespace for request '%s': %w", request.String(), err) } ar := &clustersv1alpha1.AccessRequest{} ar.Name = StableRequestName(r.controllerName, request, suffix) @@ -393,10 +496,13 @@ func (r *reconcilerImpl) Cluster(ctx context.Context, request reconcile.Request, if !ok { return nil, fmt.Errorf("no registration found for request '%s' with id '%s'", request.String(), id) } - clusterRef := reg.ClusterReference() + clusterRef, err := reg.ClusterReference(request) + if err != nil { + return nil, fmt.Errorf("unable to generate Cluster reference from registration for request '%s' with id '%s': %w", request.String(), id, err) + } if clusterRef == nil { // if no ClusterReference is specified, try to get it from the ClusterRequest - if reg.ClusterRequestReference() != nil || reg.ClusterRequestSpec() != nil { + if reg.ClusterRequestAvailable() { cr, err := r.ClusterRequest(ctx, request, id) if err != nil { return nil, fmt.Errorf("unable to fetch ClusterRequest for request '%s' with id '%s': %w", request.String(), id, err) @@ -431,18 +537,25 @@ func (r *reconcilerImpl) ClusterRequest(ctx context.Context, request reconcile.R if !ok { return nil, fmt.Errorf("no registration found for request '%s' with id '%s'", request.String(), id) } - crRef := reg.ClusterRequestReference() + crRef, err := reg.ClusterRequestReference(request) + if err != nil { + return nil, fmt.Errorf("unable to generate ClusterRequest reference from registration for request '%s' with id '%s': %w", request.String(), id, err) + } if crRef == nil { // if the ClusterRequest is not referenced directly, check if was created or can be retrieved via an AccessRequest - if reg.ClusterRequestSpec() != nil { + crSpec, err := reg.ClusterRequestSpec(request) + if err != nil { + return nil, fmt.Errorf("unable to generate ClusterRequest spec from registration for request '%s' with id '%s': %w", request.String(), id, err) + } + if crSpec != nil { // the ClusterRequest was created by this library suffix := reg.Suffix() if suffix == "" { suffix = reg.ID() } - namespace, err := reg.Namespace(request.Name, request.Namespace) + namespace, err := reg.Namespace(request) if err != nil { - return nil, fmt.Errorf("unable to generate name of platform cluster namespace for request '%s/%s': %w", request.Namespace, request.Name, err) + return nil, fmt.Errorf("unable to generate name of platform cluster namespace for request '%s': %w", request.String(), err) } crRef = &commonapi.ObjectReference{ Name: StableRequestName(r.controllerName, request, suffix), @@ -469,8 +582,8 @@ func (r *reconcilerImpl) ClusterRequest(ctx context.Context, request reconcile.R } // Reconcile implements Reconciler. -func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - log := logging.FromContextOrDiscard(ctx).WithName("ClusterAccess") +func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Request, additionalData ...any) (reconcile.Result, error) { + log := logging.FromContextOrDiscard(ctx).WithName(caControllerName) ctx = logging.NewContext(ctx, log) log.Info("Reconciling cluster access") @@ -482,15 +595,16 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques if suffix == "" { suffix = reg.ID() } - managedBy, managedPurpose := r.managedBy(r.controllerName, request, reg) + managedBy, managedPurpose, additionalLabels := r.managedBy(r.controllerName, request, reg) expectedLabels := map[string]string{} expectedLabels[openmcpconst.ManagedByLabel] = managedBy expectedLabels[openmcpconst.ManagedPurposeLabel] = managedPurpose + expectedLabels = maputils.Merge(additionalLabels, expectedLabels) // ensure namespace exists - namespace, err := reg.Namespace(request.Name, request.Namespace) + namespace, err := reg.Namespace(request, additionalData...) if err != nil { - return reconcile.Result{}, fmt.Errorf("unable to generate name of platform cluster namespace for request '%s/%s': %w", request.Namespace, request.Name, err) + return reconcile.Result{}, fmt.Errorf("unable to generate name of platform cluster namespace for request '%s': %w", request.String(), err) } rlog.Debug("Ensuring platform cluster namespace exists", "namespaceName", namespace) ns := &corev1.Namespace{} @@ -507,7 +621,11 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques // if a ClusterRequest is requested, ensure it exists and is ready var cr *clustersv1alpha1.ClusterRequest - if reg.ClusterRequestSpec() != nil { + crSpec, err := reg.ClusterRequestSpec(request, additionalData...) + if err != nil { + return reconcile.Result{}, fmt.Errorf("unable to generate ClusterRequest spec from registration for request '%s' with id '%s': %w", request.String(), reg.ID(), err) + } + if crSpec != nil { rlog.Debug("Ensuring ClusterRequest exists") cr = &clustersv1alpha1.ClusterRequest{} cr.Name = StableRequestName(r.controllerName, request, suffix) @@ -524,7 +642,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques rlog.Info("Creating ClusterRequest", "crName", cr.Name, "crNamespace", cr.Namespace) cr.Labels = map[string]string{} maps.Copy(cr.Labels, expectedLabels) - cr.Spec = *reg.ClusterRequestSpec().DeepCopy() + cr.Spec = *crSpec if err := r.platformClusterClient.Create(ctx, cr); err != nil { return reconcile.Result{}, fmt.Errorf("unable to create ClusterRequest '%s/%s': %w", cr.Namespace, cr.Name, err) } @@ -553,7 +671,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques // if an AccessRequest is requested, ensure it exists and is ready var ar *clustersv1alpha1.AccessRequest - if reg.AccessRequestOIDCConfig() != nil || reg.AccessRequestTokenConfig() != nil { + if reg.AccessRequestAvailable() { rlog.Debug("Ensuring AccessRequest exists") ar = &clustersv1alpha1.AccessRequest{} ar.Name = StableRequestName(r.controllerName, request, suffix) @@ -569,16 +687,35 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques if !exists { rlog.Info("Creating AccessRequest", "arName", ar.Name, "arNamespace", ar.Namespace) // cluster and cluster request references are immutable, so set them only when creating new AccessRequests + referenced := false if cr != nil { ar.Spec.RequestRef = &commonapi.ObjectReference{ Name: cr.Name, Namespace: cr.Namespace, } - } else if reg.ClusterReference() != nil { - ar.Spec.ClusterRef = reg.ClusterReference().DeepCopy() - } else if reg.ClusterRequestReference() != nil { - ar.Spec.RequestRef = reg.ClusterRequestReference().DeepCopy() - } else { + referenced = true + } + if !referenced { + cRef, err := reg.ClusterReference(request, additionalData...) + if err != nil { + return reconcile.Result{}, fmt.Errorf("unable to generate Cluster reference from registration for request '%s' with id '%s': %w", request.String(), reg.ID(), err) + } + if cRef != nil { + ar.Spec.ClusterRef = cRef + referenced = true + } + } + if !referenced { + crRef, err := reg.ClusterRequestReference(request, additionalData...) + if err != nil { + return reconcile.Result{}, fmt.Errorf("unable to generate ClusterRequest reference from registration for request '%s' with id '%s': %w", request.String(), reg.ID(), err) + } + if crRef != nil { + ar.Spec.RequestRef = crRef + referenced = true + } + } + if !referenced { return reconcile.Result{}, fmt.Errorf("no ClusterRequestSpec, ClusterReference or ClusterRequestReference specified for registration with id '%s'", reg.ID()) } } else { @@ -594,8 +731,15 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques } if _, err := controllerutil.CreateOrUpdate(ctx, r.platformClusterClient, ar, func() error { ar.Labels = maputils.Merge(ar.Labels, expectedLabels) - ar.Spec.Token = reg.AccessRequestTokenConfig() - ar.Spec.OIDC = reg.AccessRequestOIDCConfig() + var err error + ar.Spec.Token, err = reg.AccessRequestTokenConfig(request, additionalData...) + if err != nil { + return fmt.Errorf("unable to generate AccessRequest token config from registration: %w", err) + } + ar.Spec.OIDC, err = reg.AccessRequestOIDCConfig(request, additionalData...) + if err != nil { + return fmt.Errorf("unable to generate AccessRequest OIDC config from registration: %w", err) + } return nil }); err != nil { return reconcile.Result{}, fmt.Errorf("unable to create or update AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) @@ -618,8 +762,8 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques } // ReconcileDelete implements Reconciler. -func (r *reconcilerImpl) ReconcileDelete(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - log := logging.FromContextOrDiscard(ctx).WithName("ClusterAccess") +func (r *reconcilerImpl) ReconcileDelete(ctx context.Context, request reconcile.Request, additionalData ...any) (reconcile.Result, error) { + log := logging.FromContextOrDiscard(ctx).WithName(caControllerName) ctx = logging.NewContext(ctx, log) log.Info("Reconciling cluster access") @@ -631,18 +775,19 @@ func (r *reconcilerImpl) ReconcileDelete(ctx context.Context, request reconcile. if suffix == "" { suffix = reg.ID() } - managedBy, managedPurpose := r.managedBy(r.controllerName, request, reg) + managedBy, managedPurpose, additionalLabels := r.managedBy(r.controllerName, request, reg) expectedLabels := map[string]string{} expectedLabels[openmcpconst.ManagedByLabel] = managedBy expectedLabels[openmcpconst.ManagedPurposeLabel] = managedPurpose + expectedLabels = maputils.Merge(additionalLabels, expectedLabels) - namespace, err := reg.Namespace(request.Name, request.Namespace) + namespace, err := reg.Namespace(request, additionalData...) if err != nil { - return reconcile.Result{}, fmt.Errorf("unable to generate name of platform cluster namespace for request '%s/%s': %w", request.Namespace, request.Name, err) + return reconcile.Result{}, fmt.Errorf("unable to generate name of platform cluster namespace for request '%s': %w", request.String(), err) } // if an AccessRequest is requested, ensure it is deleted - if reg.AccessRequestOIDCConfig() != nil || reg.AccessRequestTokenConfig() != nil { //nolint:dupl + if reg.AccessRequestAvailable() { //nolint:dupl rlog.Debug("Ensuring AccessRequest is deleted") ar := &clustersv1alpha1.AccessRequest{} ar.Name = StableRequestName(r.controllerName, request, suffix) @@ -684,7 +829,7 @@ func (r *reconcilerImpl) ReconcileDelete(ctx context.Context, request reconcile. } // if a ClusterRequest is requested, ensure it is deleted - if reg.ClusterRequestSpec() != nil { //nolint:dupl + if reg.ClusterRequestAvailable() { //nolint:dupl rlog.Debug("Ensuring ClusterRequest is deleted") cr := &clustersv1alpha1.ClusterRequest{} cr.Name = StableRequestName(r.controllerName, request, suffix) @@ -734,6 +879,9 @@ func (r *reconcilerImpl) ReconcileDelete(ctx context.Context, request reconcile. // Register implements Reconciler. func (r *reconcilerImpl) Register(reg ClusterRegistration) ClusterAccessReconciler { + if reg == nil { + return r + } r.registrations[reg.ID()] = reg return r } diff --git a/lib/clusteraccess/advanced/clusteraccess_test.go b/lib/clusteraccess/advanced/clusteraccess_test.go index b75f4d0..85f82c3 100644 --- a/lib/clusteraccess/advanced/clusteraccess_test.go +++ b/lib/clusteraccess/advanced/clusteraccess_test.go @@ -15,6 +15,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -25,7 +26,6 @@ import ( openmcpconst "github.com/openmcp-project/openmcp-operator/api/constants" "github.com/openmcp-project/openmcp-operator/api/install" "github.com/openmcp-project/openmcp-operator/lib/clusteraccess/advanced" - libutils "github.com/openmcp-project/openmcp-operator/lib/utils" ) func TestAdvancedClusterAccess(t *testing.T) { @@ -64,8 +64,14 @@ var _ = Describe("Advanced Cluster Access", func() { env := defaultTestSetup("testdata", "test-01") rec := defaultClusterAccessReconciler(env, "foo") customNamespace := "custom-namespace" - rec.Register(advanced.NewCluster("foobar", "fb", "test", true).Build()) // example 1, without access request - rec.Register(advanced.NewCluster("foobaz", "fz", "test", false).WithNamespaceGenerator(func(requestName, requestNamespace string) (string, error) { + rec.Register(advanced.NewClusterRequest("foobar", "fb", advanced.StaticClusterRequestSpecGenerator(&clustersv1alpha1.ClusterRequestSpec{ + Purpose: "test", + WaitForClusterDeletion: ptr.To(true), + })).Build()) // example 1, without access request + rec.Register(advanced.NewClusterRequest("foobaz", "fz", advanced.StaticClusterRequestSpecGenerator(&clustersv1alpha1.ClusterRequestSpec{ + Purpose: "test", + WaitForClusterDeletion: ptr.To(false), + })).WithNamespaceGenerator(func(request reconcile.Request, _ ...any) (string, error) { return customNamespace, nil }).WithTokenAccess(&clustersv1alpha1.TokenConfig{}).Build()) // example 2, with access request req := testutils.RequestFromStrings("bar", "baz") @@ -74,7 +80,7 @@ var _ = Describe("Advanced Cluster Access", func() { // EXAMPLE 1 // check namespace - namespace, err := libutils.StableMCPNamespace(req.Name, req.Namespace) + namespace, err := advanced.DefaultNamespaceGenerator(req) Expect(err).ToNot(HaveOccurred()) ns := &corev1.Namespace{} ns.Name = namespace @@ -182,7 +188,10 @@ var _ = Describe("Advanced Cluster Access", func() { Name: "cluster-admin", Kind: "ClusterRole", } - rec.Register(advanced.ExistingClusterRequest("foobar", "fb", "cr-01", "test").WithTokenAccess(&clustersv1alpha1.TokenConfig{ + rec.Register(advanced.ExistingClusterRequest("foobar", "fb", advanced.StaticReferenceGenerator(&commonapi.ObjectReference{ + Name: "cr-01", + Namespace: "test", + })).WithTokenAccess(&clustersv1alpha1.TokenConfig{ Permissions: []clustersv1alpha1.PermissionsRequest{permission}, RoleRefs: []commonapi.RoleRef{roleRef}, }).Build()) @@ -194,7 +203,7 @@ var _ = Describe("Advanced Cluster Access", func() { ar, err := rec.AccessRequest(env.Ctx, req, "foobar") Expect(err).ToNot(HaveOccurred()) Expect(ar.Name).To(Equal(advanced.StableRequestName("foo", req, "fb"))) - namespace, err := libutils.StableMCPNamespace(req.Name, req.Namespace) + namespace, err := advanced.DefaultNamespaceGenerator(req) Expect(err).ToNot(HaveOccurred()) Expect(ar.Namespace).To(Equal(namespace)) Expect(ar.Labels).To(HaveKeyWithValue(openmcpconst.ManagedByLabel, "foo")) @@ -283,7 +292,10 @@ var _ = Describe("Advanced Cluster Access", func() { }, }, } - rec.Register(advanced.ExistingCluster("foobar", "fb", "cluster-01", "test").WithOIDCAccess(oidc).Build()) + rec.Register(advanced.ExistingCluster("foobar", "fb", advanced.StaticReferenceGenerator(&commonapi.ObjectReference{ + Name: "cluster-01", + Namespace: "test", + })).WithOIDCAccess(oidc).Build()) req := testutils.RequestFromStrings("bar", "baz") Eventually(expectNoRequeue(env.Ctx, rec, req, false)).Should(Succeed()) @@ -292,7 +304,7 @@ var _ = Describe("Advanced Cluster Access", func() { ar, err := rec.AccessRequest(env.Ctx, req, "foobar") Expect(err).ToNot(HaveOccurred()) Expect(ar.Name).To(Equal(advanced.StableRequestName("foo", req, "fb"))) - namespace, err := libutils.StableMCPNamespace(req.Name, req.Namespace) + namespace, err := advanced.DefaultNamespaceGenerator(req) Expect(err).ToNot(HaveOccurred()) Expect(ar.Namespace).To(Equal(namespace)) Expect(ar.Labels).To(HaveKeyWithValue(openmcpconst.ManagedByLabel, "foo")) @@ -331,10 +343,13 @@ var _ = Describe("Advanced Cluster Access", func() { It("should take overwritten labels into account", func() { env := defaultTestSetup("testdata", "test-01") - rec := defaultClusterAccessReconciler(env, "foo").WithManagedLabels(func(controllerName string, req reconcile.Request, reg advanced.ClusterRegistration) (string, string) { - return "my-managed-by", "my-purpose" + rec := defaultClusterAccessReconciler(env, "foo").WithManagedLabels(func(controllerName string, req reconcile.Request, reg advanced.ClusterRegistration) (string, string, map[string]string) { + return "my-managed-by", "my-purpose", map[string]string{"custom-label": "custom-value"} }) - rec.Register(advanced.ExistingClusterRequest("foobar", "fb", "cr-01", "test").WithTokenAccess(&clustersv1alpha1.TokenConfig{}).Build()) + rec.Register(advanced.ExistingClusterRequest("foobar", "fb", advanced.StaticReferenceGenerator(&commonapi.ObjectReference{ + Name: "cr-01", + Namespace: "test", + })).WithTokenAccess(&clustersv1alpha1.TokenConfig{}).Build()) req := testutils.RequestFromStrings("bar", "baz") Eventually(expectNoRequeue(env.Ctx, rec, req, false)).Should(Succeed()) @@ -343,17 +358,21 @@ var _ = Describe("Advanced Cluster Access", func() { ar, err := rec.AccessRequest(env.Ctx, req, "foobar") Expect(err).ToNot(HaveOccurred()) Expect(ar.Name).To(Equal(advanced.StableRequestName("foo", req, "fb"))) - namespace, err := libutils.StableMCPNamespace(req.Name, req.Namespace) + namespace, err := advanced.DefaultNamespaceGenerator(req) Expect(err).ToNot(HaveOccurred()) Expect(ar.Namespace).To(Equal(namespace)) Expect(ar.Labels).To(HaveKeyWithValue(openmcpconst.ManagedByLabel, "my-managed-by")) Expect(ar.Labels).To(HaveKeyWithValue(openmcpconst.ManagedPurposeLabel, "my-purpose")) + Expect(ar.Labels).To(HaveKeyWithValue("custom-label", "custom-value")) }) It("should requeue if the ClusterRequest or AccessRequest is not yet ready", func() { env := defaultTestSetup("testdata", "test-01") rec := defaultClusterAccessReconciler(env, "foo") - rec.Register(advanced.NewCluster("foobar", "fb", "test", true).WithTokenAccess(&clustersv1alpha1.TokenConfig{}).Build()) + rec.Register(advanced.NewClusterRequest("foobar", "fb", advanced.StaticClusterRequestSpecGenerator(&clustersv1alpha1.ClusterRequestSpec{ + Purpose: "test", + WaitForClusterDeletion: ptr.To(true), + })).WithTokenAccess(&clustersv1alpha1.TokenConfig{}).Build()) req := testutils.RequestFromStrings("bar", "baz") // first requeue due to ClusterRequest @@ -374,11 +393,14 @@ var _ = Describe("Advanced Cluster Access", func() { It("should correctly handle conflicts with existing ClusterRequests", func() { env := defaultTestSetup("testdata", "test-02") rec := defaultClusterAccessReconciler(env, "foo") - rec.Register(advanced.NewCluster("foobar", "fb", "test", true).Build()) + rec.Register(advanced.NewClusterRequest("foobar", "fb", advanced.StaticClusterRequestSpecGenerator(&clustersv1alpha1.ClusterRequestSpec{ + Purpose: "test", + WaitForClusterDeletion: ptr.To(true), + })).Build()) req := testutils.RequestFromStrings("cr-conflict", "baz") // ensure that the resource we are trying to conflict with actually exists - namespace, err := libutils.StableMCPNamespace(req.Name, req.Namespace) + namespace, err := advanced.DefaultNamespaceGenerator(req) Expect(err).ToNot(HaveOccurred()) cr := &clustersv1alpha1.ClusterRequest{} cr.Name = advanced.StableRequestName("foo", req, "fb") @@ -395,11 +417,14 @@ var _ = Describe("Advanced Cluster Access", func() { It("should correctly handle conflicts with existing AccessRequests", func() { env := defaultTestSetup("testdata", "test-02") rec := defaultClusterAccessReconciler(env, "foo") - rec.Register(advanced.NewCluster("foobar", "fb", "test", true).WithTokenAccess(&clustersv1alpha1.TokenConfig{}).Build()) + rec.Register(advanced.NewClusterRequest("foobar", "fb", advanced.StaticClusterRequestSpecGenerator(&clustersv1alpha1.ClusterRequestSpec{ + Purpose: "test", + WaitForClusterDeletion: ptr.To(true), + })).WithTokenAccess(&clustersv1alpha1.TokenConfig{}).Build()) req := testutils.RequestFromStrings("ar-conflict", "baz") // ensure that the resource we are trying to conflict with actually exists - namespace, err := libutils.StableMCPNamespace(req.Name, req.Namespace) + namespace, err := advanced.DefaultNamespaceGenerator(req) Expect(err).ToNot(HaveOccurred()) ar := &clustersv1alpha1.AccessRequest{} ar.Name = advanced.StableRequestName("foo", req, "fb") @@ -420,8 +445,6 @@ var _ = Describe("Advanced Cluster Access", func() { // expectRequeue runs the reconciler's Reconcile method (or ReconcileDelete, if del is true) with the given request and expects a requeueAfter duration greater than zero in the returned result. // If match is non-empty, the first element is matched against the requeueAfter duration instead of just checking that it's greater than zero. Further elements are ignored. // It fails if the reconcile returns an error. -// -//nolint:unused func expectRequeue(ctx context.Context, rec advanced.ClusterAccessReconciler, req reconcile.Request, del bool, match ...types.GomegaMatcher) func(Gomega) { return func(g Gomega) { var res reconcile.Result diff --git a/lib/clusteraccess/advanced/testdata/test-02/ar-conflict.yaml b/lib/clusteraccess/advanced/testdata/test-02/ar-conflict.yaml index fd2454a..a9a6006 100644 --- a/lib/clusteraccess/advanced/testdata/test-02/ar-conflict.yaml +++ b/lib/clusteraccess/advanced/testdata/test-02/ar-conflict.yaml @@ -2,5 +2,5 @@ apiVersion: clusters.openmcp.cloud/v1alpha1 kind: AccessRequest metadata: name: foo--ar-conflict--fb - namespace: mcp--afb2bcb3-2889-85f2-88a6-ea4ac5f2dd68 + namespace: afb2bcb3-2889-85f2-88a6-ea4ac5f2dd68 spec: {} diff --git a/lib/clusteraccess/advanced/testdata/test-02/cr-conflict.yaml b/lib/clusteraccess/advanced/testdata/test-02/cr-conflict.yaml index 666bf06..7b33766 100644 --- a/lib/clusteraccess/advanced/testdata/test-02/cr-conflict.yaml +++ b/lib/clusteraccess/advanced/testdata/test-02/cr-conflict.yaml @@ -2,6 +2,6 @@ apiVersion: clusters.openmcp.cloud/v1alpha1 kind: ClusterRequest metadata: name: foo--cr-conflict--fb - namespace: mcp--3d95cbf8-d468-88c6-9ca8-a3be991689a8 + namespace: 3d95cbf8-d468-88c6-9ca8-a3be991689a8 spec: purpose: test From b3b4ab59b4aa7e0049fc39c84f9c75f6e77b3868 Mon Sep 17 00:00:00 2001 From: Johannes Aubart Date: Wed, 1 Oct 2025 17:26:17 +0200 Subject: [PATCH 06/12] make clusteraccess library use the advanced library internally --- lib/clusteraccess/advanced/clusteraccess.go | 11 + lib/clusteraccess/clusteraccess.go | 437 +++++--------------- lib/clusteraccess/clusteraccess_test.go | 29 -- 3 files changed, 112 insertions(+), 365 deletions(-) diff --git a/lib/clusteraccess/advanced/clusteraccess.go b/lib/clusteraccess/advanced/clusteraccess.go index 2d0daa0..cf554a3 100644 --- a/lib/clusteraccess/advanced/clusteraccess.go +++ b/lib/clusteraccess/advanced/clusteraccess.go @@ -38,7 +38,12 @@ const caControllerName = "ClusterAccess" // It can create ClusterRequests and/or AccessRequests for an amount of clusters. type ClusterAccessReconciler interface { // Register registers a cluster to be managed by the reconciler. + // No-op if reg is nil. + // Overwrites any previous registration with the same ID. Register(reg ClusterRegistration) ClusterAccessReconciler + // Unregister unregisters a cluster from being managed by the reconciler. + // No-op if no registration with the given ID exists. + Unregister(id string) ClusterAccessReconciler // WithRetryInterval sets the retry interval. WithRetryInterval(interval time.Duration) ClusterAccessReconciler // WithManagedLabels allows to overwrite the managed-by and managed-purpose labels that are set on the created resources. @@ -886,6 +891,12 @@ func (r *reconcilerImpl) Register(reg ClusterRegistration) ClusterAccessReconcil return r } +// Unregister implements Reconciler. +func (r *reconcilerImpl) Unregister(id string) ClusterAccessReconciler { + delete(r.registrations, id) + return r +} + // WithManagedLabels implements Reconciler. func (r *reconcilerImpl) WithManagedLabels(gen ManagedLabelGenerator) ClusterAccessReconciler { if gen == nil { diff --git a/lib/clusteraccess/clusteraccess.go b/lib/clusteraccess/clusteraccess.go index 667ae34..a83a17e 100644 --- a/lib/clusteraccess/clusteraccess.go +++ b/lib/clusteraccess/clusteraccess.go @@ -3,11 +3,9 @@ package clusteraccess import ( "context" "fmt" - "strings" "time" "github.com/openmcp-project/controller-utils/pkg/logging" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/clientcmd" @@ -18,19 +16,21 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/openmcp-project/controller-utils/pkg/clusters" - ctrlutils "github.com/openmcp-project/controller-utils/pkg/controller" "github.com/openmcp-project/controller-utils/pkg/resources" clustersv1alpha1 "github.com/openmcp-project/openmcp-operator/api/clusters/v1alpha1" commonapi "github.com/openmcp-project/openmcp-operator/api/common" constv1alpha1 "github.com/openmcp-project/openmcp-operator/api/constants" + "github.com/openmcp-project/openmcp-operator/lib/clusteraccess/advanced" libutils "github.com/openmcp-project/openmcp-operator/lib/utils" ) const ( - controllerName = "ClusterAccess" - requestSuffixMCP = "--mcp" - requestSuffixWorkload = "--wl" + controllerName = "ClusterAccess" + idMCP = "mcp" + suffixMCP = "mcp" + idWorkload = "workload" + suffixWorkload = "wl" ) // Reconciler is an interface for reconciling access to openMCP clusters. @@ -60,10 +60,14 @@ type Reconciler interface { // This function will only be successful if the MCP AccessRequest is granted and Reconcile returned without an error // and a reconcile.Result with no RequeueAfter value. MCPCluster(ctx context.Context, request reconcile.Request) (*clusters.Cluster, error) + // MCPAccessRequest returns the AccessRequest for the MCP cluster. + MCPAccessRequest(ctx context.Context, request reconcile.Request) (*clustersv1alpha1.AccessRequest, error) // WorkloadCluster creates a Cluster for the Workload AccessRequest. // This function will only be successful if the Workload AccessRequest is granted and Reconcile returned without an error // and a reconcile.Result with no RequeueAfter value. WorkloadCluster(ctx context.Context, request reconcile.Request) (*clusters.Cluster, error) + // WorkloadAccessRequest returns the AccessRequest for the Workload cluster. + WorkloadAccessRequest(ctx context.Context, request reconcile.Request) (*clustersv1alpha1.AccessRequest, error) // Reconcile creates the ClusterRequests and AccessRequests for the MCP and Workload clusters based on the reconciled object. // This function should be called during all reconciliations of the reconciled object. @@ -84,263 +88,148 @@ type Reconciler interface { } type reconcilerImpl struct { - platformClusterClient client.Client - controllerName string - retryInterval time.Duration - mcpPermissions []clustersv1alpha1.PermissionsRequest - mcpRoleRefs []commonapi.RoleRef - workloadPermissions []clustersv1alpha1.PermissionsRequest - workloadRoleRefs []commonapi.RoleRef - mcpScheme *runtime.Scheme - workloadScheme *runtime.Scheme - skipWorkloadCluster bool + internal advanced.ClusterAccessReconciler + // platformClusterClient client.Client + controllerName string + // retryInterval time.Duration + mcpPermissions []clustersv1alpha1.PermissionsRequest + mcpRoleRefs []commonapi.RoleRef + workloadPermissions []clustersv1alpha1.PermissionsRequest + workloadRoleRefs []commonapi.RoleRef + mcpScheme *runtime.Scheme + workloadScheme *runtime.Scheme + // skipWorkloadCluster bool } // NewClusterAccessReconciler creates a new ClusterAccessReconciler with the given parameters. // platformClusterClient is the client to the platform cluster where the AccessRequests and ClusterRequests are created. // controllerName is the name of the Kubernetes controller, used to create stable request names. func NewClusterAccessReconciler(platformClusterClient client.Client, controllerName string) Reconciler { + rec := advanced.NewClusterAccessReconciler(platformClusterClient, controllerName).WithManagedLabels(func(controllerName string, req reconcile.Request, reg advanced.ClusterRegistration) (string, string, map[string]string) { + _, managedPurpose, _ := advanced.DefaultManagedLabelGenerator(controllerName, req, reg) + return controllerName, managedPurpose, map[string]string{ + constv1alpha1.OnboardingNameLabel: req.Name, + constv1alpha1.OnboardingNamespaceLabel: req.Namespace, + } + }) return &reconcilerImpl{ - platformClusterClient: platformClusterClient, - controllerName: controllerName, - retryInterval: 5 * time.Second, - mcpPermissions: []clustersv1alpha1.PermissionsRequest{}, - mcpRoleRefs: []commonapi.RoleRef{}, - workloadPermissions: []clustersv1alpha1.PermissionsRequest{}, - workloadRoleRefs: []commonapi.RoleRef{}, - mcpScheme: runtime.NewScheme(), - workloadScheme: runtime.NewScheme(), - skipWorkloadCluster: false, - } + internal: rec, + // platformClusterClient: platformClusterClient, + controllerName: controllerName, + // retryInterval: 5 * time.Second, + mcpPermissions: []clustersv1alpha1.PermissionsRequest{}, + mcpRoleRefs: []commonapi.RoleRef{}, + workloadPermissions: []clustersv1alpha1.PermissionsRequest{}, + workloadRoleRefs: []commonapi.RoleRef{}, + mcpScheme: runtime.NewScheme(), + workloadScheme: runtime.NewScheme(), + // skipWorkloadCluster: false, + } +} + +func (r *reconcilerImpl) registerMCP() *reconcilerImpl { + token := &clustersv1alpha1.TokenConfig{ + Permissions: make([]clustersv1alpha1.PermissionsRequest, len(r.mcpPermissions)), + RoleRefs: make([]commonapi.RoleRef, len(r.mcpRoleRefs)), + } + copy(token.Permissions, r.mcpPermissions) + copy(token.RoleRefs, r.mcpRoleRefs) + r.internal.Register(advanced.ExistingClusterRequest(idMCP, suffixMCP, func(req reconcile.Request, _ ...any) (*commonapi.ObjectReference, error) { + namespace, err := libutils.StableMCPNamespace(req.Name, req.Namespace) + if err != nil { + return nil, err + } + return &commonapi.ObjectReference{ + Name: req.Name, + Namespace: namespace, + }, nil + }). + WithNamespaceGenerator(advanced.DefaultNamespaceGeneratorForMCP). + WithTokenAccess(token). + WithScheme(r.mcpScheme). + Build()) + return r +} + +func (r *reconcilerImpl) registerWorkload() *reconcilerImpl { + token := &clustersv1alpha1.TokenConfig{ + Permissions: make([]clustersv1alpha1.PermissionsRequest, len(r.workloadPermissions)), + RoleRefs: make([]commonapi.RoleRef, len(r.workloadRoleRefs)), + } + copy(token.Permissions, r.workloadPermissions) + copy(token.RoleRefs, r.workloadRoleRefs) + r.internal.Register(advanced.NewClusterRequest(idWorkload, suffixWorkload, advanced.StaticClusterRequestSpecGenerator(&clustersv1alpha1.ClusterRequestSpec{ + Purpose: clustersv1alpha1.PURPOSE_WORKLOAD, + })). + WithNamespaceGenerator(advanced.DefaultNamespaceGeneratorForMCP). + WithTokenAccess(token). + WithScheme(r.workloadScheme). + Build()) + return r } func (r *reconcilerImpl) WithRetryInterval(interval time.Duration) Reconciler { - r.retryInterval = interval + r.internal.WithRetryInterval(interval) return r } func (r *reconcilerImpl) WithMCPPermissions(permissions []clustersv1alpha1.PermissionsRequest) Reconciler { r.mcpPermissions = permissions - return r + return r.registerMCP() } func (r *reconcilerImpl) WithMCPRoleRefs(roleRefs []commonapi.RoleRef) Reconciler { r.mcpRoleRefs = roleRefs - return r + return r.registerMCP() } func (r *reconcilerImpl) WithWorkloadPermissions(permissions []clustersv1alpha1.PermissionsRequest) Reconciler { r.workloadPermissions = permissions - return r + return r.registerWorkload() } func (r *reconcilerImpl) WithWorkloadRoleRefs(roleRefs []commonapi.RoleRef) Reconciler { r.workloadRoleRefs = roleRefs - return r + return r.registerWorkload() } func (r *reconcilerImpl) WithMCPScheme(scheme *runtime.Scheme) Reconciler { r.mcpScheme = scheme - return r + return r.registerMCP() } func (r *reconcilerImpl) WithWorkloadScheme(scheme *runtime.Scheme) Reconciler { r.workloadScheme = scheme - return r + return r.registerWorkload() } func (r *reconcilerImpl) SkipWorkloadCluster() Reconciler { - r.skipWorkloadCluster = true + r.internal.Unregister(idWorkload) return r } func (r *reconcilerImpl) MCPCluster(ctx context.Context, request reconcile.Request) (*clusters.Cluster, error) { - platformNamespace, err := libutils.StableMCPNamespace(request.Name, request.Namespace) - if err != nil { - return nil, err - } - mcpAccessRequest := &clustersv1alpha1.AccessRequest{ - ObjectMeta: metav1.ObjectMeta{ - Name: StableRequestName(r.controllerName, request) + requestSuffixMCP, - Namespace: platformNamespace, - }, - } - - if err := r.platformClusterClient.Get(ctx, client.ObjectKeyFromObject(mcpAccessRequest), mcpAccessRequest); err != nil { - return nil, fmt.Errorf("failed to get MCP AccessRequest: %w", err) - } - - mcpCluster, err := createClusterForAccessRequest(ctx, r.platformClusterClient, clustersv1alpha1.PURPOSE_MCP, r.mcpScheme, mcpAccessRequest) - if err != nil { - return nil, fmt.Errorf("failed to create MCP cluster for AccessRequest: %w", err) - } + return r.internal.Access(ctx, request, idMCP) +} - return mcpCluster, nil +func (r *reconcilerImpl) MCPAccessRequest(ctx context.Context, request reconcile.Request) (*clustersv1alpha1.AccessRequest, error) { + return r.internal.AccessRequest(ctx, request, idMCP) } func (r *reconcilerImpl) WorkloadCluster(ctx context.Context, request reconcile.Request) (*clusters.Cluster, error) { - platformNamespace, err := libutils.StableMCPNamespace(request.Name, request.Namespace) - if err != nil { - return nil, err - } - workloadAccessRequest := &clustersv1alpha1.AccessRequest{ - ObjectMeta: metav1.ObjectMeta{ - Name: StableRequestName(r.controllerName, request) + requestSuffixWorkload, - Namespace: platformNamespace, - }, - } - - if err := r.platformClusterClient.Get(ctx, client.ObjectKeyFromObject(workloadAccessRequest), workloadAccessRequest); err != nil { - return nil, fmt.Errorf("failed to get Workload AccessRequest: %w", err) - } - - workloadCluster, err := createClusterForAccessRequest(ctx, r.platformClusterClient, clustersv1alpha1.PURPOSE_WORKLOAD, r.workloadScheme, workloadAccessRequest) - if err != nil { - return nil, fmt.Errorf("failed to create Workload cluster for AccessRequest: %w", err) - } + return r.internal.Access(ctx, request, idWorkload) +} - return workloadCluster, nil +func (r *reconcilerImpl) WorkloadAccessRequest(ctx context.Context, request reconcile.Request) (*clustersv1alpha1.AccessRequest, error) { + return r.internal.AccessRequest(ctx, request, idWorkload) } func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - log := logging.FromContextOrPanic(ctx).WithName(controllerName) - - platformNamespace, err := libutils.StableMCPNamespace(request.Name, request.Namespace) - if err != nil { - return reconcile.Result{}, err - } - requestNamespace := platformNamespace - requestNameMCP := StableRequestName(r.controllerName, request) + requestSuffixMCP - requestNameWorkload := StableRequestName(r.controllerName, request) + requestSuffixWorkload - - metadata := requestMetadata(r.controllerName, request) - - // Check if the request namespace already exists. - // If it does not exist, wait until it is created. - - log.Debug("Wait for request namespace to exist", "requestNamespace", requestNamespace) - - requestNamespaceExists, err := namespaceExists(ctx, r.platformClusterClient, requestNamespace) - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to check if request namespace exists: %w", err) - } - - if !requestNamespaceExists { - log.Debug("Request namespace does not exist", "requestNamespace", requestNamespace) - return reconcile.Result{RequeueAfter: r.retryInterval}, nil - } - - // Create or update the MCP AccessRequest and wait until the MCP cluster is ready. - // This also prevents creating the Workload AccessRequest before there is even a MCP created on the onboarding cluster. - - log.Debug("Create and wait for MCP cluster access request", "accessRequestName", requestNameMCP, "accessRequestNamespace", requestNamespace) - - mcpAccessRequest, err := ensureAccessRequest(ctx, r.platformClusterClient, - requestNameMCP, requestNamespace, &commonapi.ObjectReference{ - Name: request.Name, - Namespace: requestNamespace, - }, nil, r.mcpPermissions, r.mcpRoleRefs, metadata) - - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to create or update MCP AccessRequest: %w", err) - } - - if mcpAccessRequest.Status.IsDenied() { - return reconcile.Result{}, fmt.Errorf("MCP AccessRequest denied") - } - - if !mcpAccessRequest.Status.IsGranted() { - log.Debug("MCP AccessRequest is not yet granted", - "accessRequestName", requestNameMCP, "accessRequestNamespace", requestNamespace, "requestPhase", mcpAccessRequest.Status.Phase) - return reconcile.Result{RequeueAfter: r.retryInterval}, nil - } - - if !r.skipWorkloadCluster { - // Create or update the ClusterRequest for the Workload cluster and wait until it is ready. - - log.Debug("Create and wait for Workload cluster request", "clusterRequestName", requestNameWorkload, "clusterRequestNamespace", requestNamespace) - - workloadRequest, err := ensureClusterRequest(ctx, r.platformClusterClient, requestNameWorkload, requestNamespace, clustersv1alpha1.PURPOSE_WORKLOAD, metadata) - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to create or update Workload ClusterRequest: %w", err) - } - - if workloadRequest.Status.IsDenied() { - return reconcile.Result{}, fmt.Errorf("workload ClusterRequest denied") - } - - if !workloadRequest.Status.IsGranted() { - log.Debug("Workload ClusterRequest is not yet granted", - "clusterRequestName", requestNameWorkload, "clusterRequestNamespace", requestNamespace, "requestPhase", workloadRequest.Status.Phase) - return reconcile.Result{RequeueAfter: r.retryInterval}, nil - } - - // Create or update the AccessRequest for the Workload cluster. - - log.Debug("Create and wait for Workload cluster access request", "accessRequestName", requestNameWorkload, "accessRequestNamespace", requestNamespace) - - workloadAccessRequest, err := ensureAccessRequest(ctx, r.platformClusterClient, - requestNameWorkload, requestNamespace, &commonapi.ObjectReference{ - Name: requestNameWorkload, - Namespace: requestNamespace, - }, nil, r.workloadPermissions, r.workloadRoleRefs, metadata) - - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to create or update Workload AccessRequest: %w", err) - } - - if workloadAccessRequest.Status.IsDenied() { - return reconcile.Result{}, fmt.Errorf("workload AccessRequest denied") - } - - if !workloadAccessRequest.Status.IsGranted() { - log.Debug("Workload AccessRequest is not yet granted", - "accessRequestName", requestNameMCP, "accessRequestNamespace", requestNamespace, "requestPhase", workloadAccessRequest.Status.Phase) - return reconcile.Result{RequeueAfter: r.retryInterval}, nil - } - } - - return reconcile.Result{}, nil + return r.internal.Reconcile(ctx, request) } func (r *reconcilerImpl) ReconcileDelete(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - platformNamespace, err := libutils.StableMCPNamespace(request.Name, request.Namespace) - if err != nil { - return reconcile.Result{}, err - } - requestNamespace := platformNamespace - requestNameMCP := StableRequestName(r.controllerName, request) + requestSuffixMCP - requestNameWorkload := StableRequestName(r.controllerName, request) + requestSuffixWorkload - - workloadAccessDeleted := true - workloadClusterDeleted := true - - if !r.skipWorkloadCluster { - // Delete the Workload AccessRequest if it exists - workloadAccessDeleted, err = deleteAccessRequest(ctx, r.platformClusterClient, requestNameWorkload, requestNamespace) - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to delete Workload AccessRequest: %w", err) - } - - // Delete the Workload ClusterRequest if it exists - workloadClusterDeleted, err = deleteClusterRequest(ctx, r.platformClusterClient, requestNameWorkload, requestNamespace) - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to delete Workload ClusterRequest: %w", err) - } - - } - - // Delete the MCP AccessRequest if it exists - mcpAccessDeleted, err := deleteAccessRequest(ctx, r.platformClusterClient, requestNameMCP, requestNamespace) - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to delete MCP AccessRequest: %w", err) - } - - if !workloadAccessDeleted || !workloadClusterDeleted || !mcpAccessDeleted { - return reconcile.Result{RequeueAfter: r.retryInterval}, nil - } - - return reconcile.Result{}, nil + return r.internal.ReconcileDelete(ctx, request) } // Manager is an interface for managing cluster access. @@ -515,129 +404,6 @@ func (m *managerImpl) wait(ctx context.Context, test func(ctx context.Context) ( return wait.PollUntilContextTimeout(ctx, m.interval, m.timeout, true, test) } -func requestMetadata(controllerName string, request reconcile.Request) resources.MetadataMutator { - metadata := resources.NewMetadataMutator() - metadata.WithLabels(map[string]string{ - constv1alpha1.ManagedByLabel: controllerName, - constv1alpha1.OnboardingNameLabel: request.Name, - constv1alpha1.OnboardingNamespaceLabel: request.Namespace, - }) - - return metadata -} - -func namespaceExists(ctx context.Context, platformClusterClient client.Client, namespace string) (bool, error) { - ns := &corev1.Namespace{} - if err := platformClusterClient.Get(ctx, client.ObjectKey{Name: namespace}, ns); err != nil { - if client.IgnoreNotFound(err) != nil { - return false, fmt.Errorf("failed to check if namespace exists: %w", err) - } - return false, nil // Namespace does not exist - } - return true, nil // Namespace exists -} - -func ensureClusterRequest(ctx context.Context, platformClusterClient client.Client, - requestName, requestNamespace, purpose string, metadata resources.MetadataMutator) (*clustersv1alpha1.ClusterRequest, error) { - - mutator := newClusterRequestMutator(requestName, requestNamespace, purpose) - mutator.WithMetadata(metadata) - - if err := resources.CreateOrUpdateResource[*clustersv1alpha1.ClusterRequest](ctx, platformClusterClient, mutator); err != nil { - return nil, fmt.Errorf("failed to create or update ClusterRequest: %w", err) - } - - cr := mutator.Empty() - if err := platformClusterClient.Get(ctx, client.ObjectKeyFromObject(cr), cr); err != nil { - return nil, fmt.Errorf("failed to get ClusterRequest: %w", err) - } - - return cr, nil -} - -func ensureAccessRequest(ctx context.Context, platformClusterClient client.Client, requestName, requestNamespace string, - requestRef *commonapi.ObjectReference, clusterRef *commonapi.ObjectReference, - permissions []clustersv1alpha1.PermissionsRequest, roleRefs []commonapi.RoleRef, metadata resources.MetadataMutator) (*clustersv1alpha1.AccessRequest, error) { - - mutator := newAccessRequestMutator(requestName, requestNamespace). - WithTokenPermissions(permissions). - WithTokenRoleRefs(roleRefs). - WithMetadata(metadata) - - if requestRef != nil { - mutator = mutator.WithRequestRef(requestRef) - } - - if clusterRef != nil { - mutator = mutator.WithClusterRef(clusterRef) - } - - if err := resources.CreateOrUpdateResource[*clustersv1alpha1.AccessRequest](ctx, platformClusterClient, mutator); err != nil { - return nil, fmt.Errorf("failed to create or update AccessRequest: %w", err) - } - - ar := mutator.Empty() - if err := platformClusterClient.Get(ctx, client.ObjectKeyFromObject(ar), ar); err != nil { - return nil, fmt.Errorf("failed to get AccessRequest: %w", err) - } - - return ar, nil -} - -func deleteAccessRequest(ctx context.Context, platformClusterClient client.Client, requestName, requestNamespace string) (bool, error) { - ar := &clustersv1alpha1.AccessRequest{ - ObjectMeta: metav1.ObjectMeta{ - Name: requestName, - Namespace: requestNamespace, - }, - } - - if err := platformClusterClient.Get(ctx, client.ObjectKeyFromObject(ar), ar); err != nil { - if errors.IsNotFound(err) { - return true, nil // AccessRequest already deleted - } else { - return false, fmt.Errorf("failed to get AccessRequest: %w", err) - } - } - - if err := platformClusterClient.Delete(ctx, ar); err != nil { - if errors.IsNotFound(err) { - return true, nil // AccessRequest already deleted - } else { - return false, fmt.Errorf("failed to delete AccessRequest: %w", err) - } - } - - return true, nil // AccessRequest deleted successfully -} - -func deleteClusterRequest(ctx context.Context, platformClusterClient client.Client, requestName, requestNamespace string) (bool, error) { - cr := &clustersv1alpha1.ClusterRequest{ - ObjectMeta: metav1.ObjectMeta{ - Name: requestName, - Namespace: requestNamespace, - }, - } - - if err := platformClusterClient.Get(ctx, client.ObjectKeyFromObject(cr), cr); err != nil { - if errors.IsNotFound(err) { - return true, nil // ClusterRequest already deleted - } else { - return false, fmt.Errorf("failed to get ClusterRequest: %w", err) - } - } - - if err := platformClusterClient.Delete(ctx, cr); err != nil { - if errors.IsNotFound(err) { - return true, nil // ClusterRequest already deleted - } else { - return false, fmt.Errorf("failed to delete ClusterRequest: %w", err) - } - } - - return true, nil // ClusterRequest deleted successfully -} - func createClusterForAccessRequest(ctx context.Context, platformClusterClient client.Client, purpose string, scheme *runtime.Scheme, accessRequest *clustersv1alpha1.AccessRequest) (*clusters.Cluster, error) { @@ -860,11 +626,10 @@ func (m *accessRequestMutator) Mutate(accessRequest *clustersv1alpha1.AccessRequ // This basically results in '--'. // If the resulting string exceeds the Kubernetes name length limit of 63 characters, it will be truncated with the last characters replaced by a hash of what was removed. func StableRequestName(controllerName string, request reconcile.Request) string { - return StableRequestNameFromLocalName(controllerName, request.Name) + return advanced.StableRequestName(controllerName, request, "") } // StableRequestNameFromLocalName works like StableRequestName but takes a local name directly instead of a reconcile.Request. func StableRequestNameFromLocalName(controllerName, localName string) string { - controllerName = strings.ToLower(controllerName) - return ctrlutils.ShortenToXCharactersUnsafe(fmt.Sprintf("%s--%s", controllerName, localName), ctrlutils.K8sMaxNameLength) + return advanced.StableRequestNameFromLocalName(controllerName, localName, "") } diff --git a/lib/clusteraccess/clusteraccess_test.go b/lib/clusteraccess/clusteraccess_test.go index 8b5bc9e..050024c 100644 --- a/lib/clusteraccess/clusteraccess_test.go +++ b/lib/clusteraccess/clusteraccess_test.go @@ -9,7 +9,6 @@ import ( "github.com/openmcp-project/controller-utils/pkg/clusters" testutils "github.com/openmcp-project/controller-utils/pkg/testing" - corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -171,20 +170,6 @@ var _ = Describe("ClusterAccessReconciler", func() { reconcileResult = env.ShouldReconcile(request, "reconcilerImpl should not return an error") Expect(reconcileResult.RequeueAfter).ToNot(BeZero(), "reconcile should requeue after a delay") - // reconcile now waits until the request namespace is being created - // the format if the request namespace is "ob-" - // create the expected request namespace - requestNamespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: expectedRequestNamespace, - }, - } - - Expect(env.Client().Create(env.Ctx, requestNamespace)).To(Succeed()) - - // reconcile again to process the request - env.ShouldReconcile(request, "reconcilerImpl should not return an error") - // there should be an access request for the MCP cluster created Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(accessRequestMCP), accessRequestMCP)).To(Succeed()) @@ -275,20 +260,6 @@ var _ = Describe("ClusterAccessReconciler", func() { reconcileResult = env.ShouldReconcile(request, "reconcilerImpl should not return an error") Expect(reconcileResult.RequeueAfter).ToNot(BeZero(), "reconcile should requeue after a delay") - // reconcile now waits until the request namespace is being created - // the format if the request namespace is "ob-" - // create the expected request namespace - requestNamespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: expectedRequestNamespace, - }, - } - - Expect(env.Client().Create(env.Ctx, requestNamespace)).To(Succeed()) - - // reconcile again to process the request - env.ShouldReconcile(request, "reconcilerImpl should not return an error") - // there should be an access request for the MCP cluster created Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(accessRequestMCP), accessRequestMCP)).To(Succeed()) From c8761022a3d9468f2e5ade82efd95ccf3fddc35e Mon Sep 17 00:00:00 2001 From: Johannes Aubart Date: Wed, 1 Oct 2025 17:43:36 +0200 Subject: [PATCH 07/12] move parameterization into own interface for simpler method signatures --- lib/clusteraccess/advanced/clusteraccess.go | 181 +++++++++++--------- 1 file changed, 103 insertions(+), 78 deletions(-) diff --git a/lib/clusteraccess/advanced/clusteraccess.go b/lib/clusteraccess/advanced/clusteraccess.go index cf554a3..60cf3f3 100644 --- a/lib/clusteraccess/advanced/clusteraccess.go +++ b/lib/clusteraccess/advanced/clusteraccess.go @@ -57,13 +57,16 @@ type ClusterAccessReconciler interface { Access(ctx context.Context, request reconcile.Request, id string) (*clusters.Cluster, error) // AccessRequest fetches the AccessRequest object for the cluster for the specified request with the specified id. // Will fail if the cluster is not registered or no AccessRequest is registered for the cluster, or if some other error occurs. - AccessRequest(ctx context.Context, request reconcile.Request, id string) (*clustersv1alpha1.AccessRequest, error) + // The same additionalData must be passed into all methods of this ClusterAccessReconciler for the same request and id. + AccessRequest(ctx context.Context, request reconcile.Request, id string, additionalData ...any) (*clustersv1alpha1.AccessRequest, error) // ClusterRequest fetches the ClusterRequest object for the cluster for the specified request with the specified id. // Will fail if the cluster is not registered or no ClusterRequest is registered for the cluster, or if some other error occurs. - ClusterRequest(ctx context.Context, request reconcile.Request, id string) (*clustersv1alpha1.ClusterRequest, error) + // The same additionalData must be passed into all methods of this ClusterAccessReconciler for the same request and id. + ClusterRequest(ctx context.Context, request reconcile.Request, id string, additionalData ...any) (*clustersv1alpha1.ClusterRequest, error) // Cluster fetches the external Cluster object for the cluster for the specified request with the specified id. // Will fail if the cluster is not registered or no Cluster can be determined, or if some other error occurs. - Cluster(ctx context.Context, request reconcile.Request, id string) (*clustersv1alpha1.Cluster, error) + // The same additionalData must be passed into all methods of this ClusterAccessReconciler for the same request and id. + Cluster(ctx context.Context, request reconcile.Request, id string, additionalData ...any) (*clustersv1alpha1.Cluster, error) // Reconcile creates the ClusterRequests and/or AccessRequests for the registered clusters. // This function should be called during all reconciliations of the reconciled object. @@ -120,8 +123,6 @@ func DefaultManagedLabelGenerator(controllerName string, req reconcile.Request, } type ClusterRegistration interface { - // static (identical, independent from the reconcile request) - // ID is the unique identifier for the cluster. ID() string // Suffix is the suffix to be used for the names of the created resources. @@ -135,32 +136,36 @@ type ClusterRegistration interface { AccessRequestAvailable() bool // ClusterRequestAvailable returns true if a ClusterRequest can be retrieved from this registration. ClusterRequestAvailable() bool + // Parameterize turns this ClusterRegistration into a ParamterizedClusterRegistration. + Parameterize(req reconcile.Request, additionalData ...any) ParamterizedClusterRegistration +} - // dynamic (can depend on the reconcile request and potential other factors) +type ParamterizedClusterRegistration interface { + ClusterRegistration // AccessRequestTokenConfig is the token configuration for the AccessRequest to be created for the cluster. // Might be nil if no AccessRequest should be created or if OIDC access is used. // Only one of AccessRequestTokenConfig and AccessRequestOIDCConfig should be non-nil. - AccessRequestTokenConfig(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.TokenConfig, error) + AccessRequestTokenConfig() (*clustersv1alpha1.TokenConfig, error) // AccessRequestOIDCConfig is the OIDC configuration for the AccessRequest to be created for the cluster. // Might be nil if no AccessRequest should be created or if token-based access is used. // Only one of AccessRequestTokenConfig and AccessRequestOIDCConfig should be non-nil. - AccessRequestOIDCConfig(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.OIDCConfig, error) + AccessRequestOIDCConfig() (*clustersv1alpha1.OIDCConfig, error) // ClusterRequestSpec is the spec for the ClusterRequest to be created for the cluster. // It is nil if no ClusterRequest should be created. // Only one of ClusterRequestSpec, ClusterReference and ClusterRequestReference should be non-nil. - ClusterRequestSpec(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.ClusterRequestSpec, error) + ClusterRequestSpec() (*clustersv1alpha1.ClusterRequestSpec, error) // ClusterReference returns name and namespace of an existing Cluster resource. // It is nil if a new ClusterRequest should be created or if the Cluster is referenced by an existing ClusterRequest. // Only one of ClusterRequestSpec, ClusterReference and ClusterRequestReference should be non-nil. - ClusterReference(req reconcile.Request, additionalData ...any) (*commonapi.ObjectReference, error) + ClusterReference() (*commonapi.ObjectReference, error) // ClusterRequestReference returns name and namespace of the ClusterRequest resource. // It is nil if a new ClusterRequest should be created or if the Cluster is referenced directly. // Only one of ClusterRequestSpec, ClusterReference and ClusterRequestReference should be non-nil. - ClusterRequestReference(req reconcile.Request, additionalData ...any) (*commonapi.ObjectReference, error) + ClusterRequestReference() (*commonapi.ObjectReference, error) // Namespace generates the namespace on the Platform cluster to use for the created requests. // The generated namespace is expected be unique within the context of the reconciler. - Namespace(req reconcile.Request, additionalData ...any) (string, error) + Namespace() (string, error) } type ClusterRegistrationBuilder interface { @@ -219,7 +224,32 @@ type clusterRegistrationImpl struct { generateNamespace func(req reconcile.Request, additionalData ...any) (string, error) } +type parameterizedClusterRegistrationImpl struct { + clusterRegistrationImpl + req reconcile.Request + additionalData []any +} + var _ ClusterRegistration = &clusterRegistrationImpl{} +var _ ParamterizedClusterRegistration = ¶meterizedClusterRegistrationImpl{} + +// ID implements ClusterRegistration. +func (c *clusterRegistrationImpl) ID() string { + return c.id +} + +// Suffix implements ClusterRegistration. +func (c *clusterRegistrationImpl) Suffix() string { + if c.suffix != "" { + return c.suffix + } + return c.id +} + +// Scheme implements ClusterRegistration. +func (c *clusterRegistrationImpl) Scheme() *runtime.Scheme { + return c.scheme +} // AccessRequestAvailable implements ClusterRegistration. func (c *clusterRegistrationImpl) AccessRequestAvailable() bool { @@ -231,70 +261,60 @@ func (c *clusterRegistrationImpl) ClusterRequestAvailable() bool { return c.generateClusterRequestSpec != nil || c.generateClusterRequestRef != nil } -// AccessRequestTokenConfig implements ClusterRegistration. -func (c *clusterRegistrationImpl) AccessRequestTokenConfig(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.TokenConfig, error) { +func (c *clusterRegistrationImpl) Parameterize(req reconcile.Request, additionalData ...any) ParamterizedClusterRegistration { + return ¶meterizedClusterRegistrationImpl{ + clusterRegistrationImpl: *c, + req: req, + additionalData: additionalData, + } +} + +// AccessRequestTokenConfig implements ParameterizedClusterRegistration. +func (c *parameterizedClusterRegistrationImpl) AccessRequestTokenConfig() (*clustersv1alpha1.TokenConfig, error) { if c.generateAccessTokenConfig == nil { return nil, nil } - return c.generateAccessTokenConfig(req, additionalData...) + return c.generateAccessTokenConfig(c.req, c.additionalData...) } -// AccessRequestOIDCConfig implements ClusterRegistration. -func (c *clusterRegistrationImpl) AccessRequestOIDCConfig(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.OIDCConfig, error) { +// AccessRequestOIDCConfig implements ParameterizedClusterRegistration. +func (c *parameterizedClusterRegistrationImpl) AccessRequestOIDCConfig() (*clustersv1alpha1.OIDCConfig, error) { if c.generateAccessOIDCConfig == nil { return nil, nil } - return c.generateAccessOIDCConfig(req, additionalData...) + return c.generateAccessOIDCConfig(c.req, c.additionalData...) } -// ClusterRequestSpec implements ClusterRegistration. -func (c *clusterRegistrationImpl) ClusterRequestSpec(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.ClusterRequestSpec, error) { +// ClusterRequestSpec implements ParameterizedClusterRegistration. +func (c *parameterizedClusterRegistrationImpl) ClusterRequestSpec() (*clustersv1alpha1.ClusterRequestSpec, error) { if c.generateClusterRequestSpec == nil { return nil, nil } - return c.generateClusterRequestSpec(req, additionalData...) + return c.generateClusterRequestSpec(c.req, c.additionalData...) } -// ClusterReference implements ClusterRegistration. -func (c *clusterRegistrationImpl) ClusterReference(req reconcile.Request, additionalData ...any) (*commonapi.ObjectReference, error) { +// ClusterReference implements ParameterizedClusterRegistration. +func (c *parameterizedClusterRegistrationImpl) ClusterReference() (*commonapi.ObjectReference, error) { if c.generateClusterRef == nil { return nil, nil } - return c.generateClusterRef(req, additionalData...) + return c.generateClusterRef(c.req, c.additionalData...) } -// ClusterRequestReference implements ClusterRegistration. -func (c *clusterRegistrationImpl) ClusterRequestReference(req reconcile.Request, additionalData ...any) (*commonapi.ObjectReference, error) { +// ClusterRequestReference implements ParameterizedClusterRegistration. +func (c *parameterizedClusterRegistrationImpl) ClusterRequestReference() (*commonapi.ObjectReference, error) { if c.generateClusterRequestRef == nil { return nil, nil } - return c.generateClusterRequestRef(req, additionalData...) -} - -// ID implements ClusterRegistration. -func (c *clusterRegistrationImpl) ID() string { - return c.id + return c.generateClusterRequestRef(c.req, c.additionalData...) } -// Suffix implements ClusterRegistration. -func (c *clusterRegistrationImpl) Suffix() string { - if c.suffix != "" { - return c.suffix - } - return c.id -} - -// Scheme implements ClusterRegistration. -func (c *clusterRegistrationImpl) Scheme() *runtime.Scheme { - return c.scheme -} - -// Namespace implements ClusterRegistration. -func (c *clusterRegistrationImpl) Namespace(req reconcile.Request, additionalData ...any) (string, error) { +// Namespace implements ParameterizedClusterRegistration. +func (c *parameterizedClusterRegistrationImpl) Namespace() (string, error) { if c.generateNamespace == nil { - return DefaultNamespaceGenerator(req, additionalData...) + return DefaultNamespaceGenerator(c.req, c.additionalData...) } - return c.generateNamespace(req, additionalData...) + return c.generateNamespace(c.req, c.additionalData...) } ////////////////////////////////////////////////// @@ -470,19 +490,20 @@ func (r *reconcilerImpl) Access(ctx context.Context, request reconcile.Request, } // AccessRequest implements Reconciler. -func (r *reconcilerImpl) AccessRequest(ctx context.Context, request reconcile.Request, id string) (*clustersv1alpha1.AccessRequest, error) { - reg, ok := r.registrations[id] +func (r *reconcilerImpl) AccessRequest(ctx context.Context, request reconcile.Request, id string, additionalData ...any) (*clustersv1alpha1.AccessRequest, error) { + rawReg, ok := r.registrations[id] if !ok { return nil, fmt.Errorf("no registration found for request '%s' with id '%s'", request.String(), id) } - if !reg.AccessRequestAvailable() { + if !rawReg.AccessRequestAvailable() { return nil, fmt.Errorf("no AccessRequest configured for request '%s' with id '%s'", request.String(), id) } - suffix := reg.Suffix() + suffix := rawReg.Suffix() if suffix == "" { - suffix = reg.ID() + suffix = rawReg.ID() } - namespace, err := reg.Namespace(request) + reg := rawReg.Parameterize(request, additionalData...) + namespace, err := reg.Namespace() if err != nil { return nil, fmt.Errorf("unable to generate name of platform cluster namespace for request '%s': %w", request.String(), err) } @@ -496,12 +517,13 @@ func (r *reconcilerImpl) AccessRequest(ctx context.Context, request reconcile.Re } // Cluster implements Reconciler. -func (r *reconcilerImpl) Cluster(ctx context.Context, request reconcile.Request, id string) (*clustersv1alpha1.Cluster, error) { - reg, ok := r.registrations[id] +func (r *reconcilerImpl) Cluster(ctx context.Context, request reconcile.Request, id string, additionalData ...any) (*clustersv1alpha1.Cluster, error) { + rawReg, ok := r.registrations[id] if !ok { return nil, fmt.Errorf("no registration found for request '%s' with id '%s'", request.String(), id) } - clusterRef, err := reg.ClusterReference(request) + reg := rawReg.Parameterize(request, additionalData...) + clusterRef, err := reg.ClusterReference() if err != nil { return nil, fmt.Errorf("unable to generate Cluster reference from registration for request '%s' with id '%s': %w", request.String(), id, err) } @@ -537,18 +559,19 @@ func (r *reconcilerImpl) Cluster(ctx context.Context, request reconcile.Request, } // ClusterRequest implements Reconciler. -func (r *reconcilerImpl) ClusterRequest(ctx context.Context, request reconcile.Request, id string) (*clustersv1alpha1.ClusterRequest, error) { - reg, ok := r.registrations[id] +func (r *reconcilerImpl) ClusterRequest(ctx context.Context, request reconcile.Request, id string, additionalData ...any) (*clustersv1alpha1.ClusterRequest, error) { + rawReg, ok := r.registrations[id] if !ok { return nil, fmt.Errorf("no registration found for request '%s' with id '%s'", request.String(), id) } - crRef, err := reg.ClusterRequestReference(request) + reg := rawReg.Parameterize(request, additionalData...) + crRef, err := reg.ClusterRequestReference() if err != nil { return nil, fmt.Errorf("unable to generate ClusterRequest reference from registration for request '%s' with id '%s': %w", request.String(), id, err) } if crRef == nil { // if the ClusterRequest is not referenced directly, check if was created or can be retrieved via an AccessRequest - crSpec, err := reg.ClusterRequestSpec(request) + crSpec, err := reg.ClusterRequestSpec() if err != nil { return nil, fmt.Errorf("unable to generate ClusterRequest spec from registration for request '%s' with id '%s': %w", request.String(), id, err) } @@ -558,7 +581,7 @@ func (r *reconcilerImpl) ClusterRequest(ctx context.Context, request reconcile.R if suffix == "" { suffix = reg.ID() } - namespace, err := reg.Namespace(request) + namespace, err := reg.Namespace() if err != nil { return nil, fmt.Errorf("unable to generate name of platform cluster namespace for request '%s': %w", request.String(), err) } @@ -593,13 +616,14 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques log.Info("Reconciling cluster access") // iterate over registrations and ensure that the corresponding resources are ready - for _, reg := range r.registrations { - rlog := log.WithValues("id", reg.ID()) + for _, rawReg := range r.registrations { + rlog := log.WithValues("id", rawReg.ID()) rlog.Debug("Processing registration") - suffix := reg.Suffix() + suffix := rawReg.Suffix() if suffix == "" { - suffix = reg.ID() + suffix = rawReg.ID() } + reg := rawReg.Parameterize(request, additionalData...) managedBy, managedPurpose, additionalLabels := r.managedBy(r.controllerName, request, reg) expectedLabels := map[string]string{} expectedLabels[openmcpconst.ManagedByLabel] = managedBy @@ -607,7 +631,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques expectedLabels = maputils.Merge(additionalLabels, expectedLabels) // ensure namespace exists - namespace, err := reg.Namespace(request, additionalData...) + namespace, err := reg.Namespace() if err != nil { return reconcile.Result{}, fmt.Errorf("unable to generate name of platform cluster namespace for request '%s': %w", request.String(), err) } @@ -626,7 +650,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques // if a ClusterRequest is requested, ensure it exists and is ready var cr *clustersv1alpha1.ClusterRequest - crSpec, err := reg.ClusterRequestSpec(request, additionalData...) + crSpec, err := reg.ClusterRequestSpec() if err != nil { return reconcile.Result{}, fmt.Errorf("unable to generate ClusterRequest spec from registration for request '%s' with id '%s': %w", request.String(), reg.ID(), err) } @@ -701,7 +725,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques referenced = true } if !referenced { - cRef, err := reg.ClusterReference(request, additionalData...) + cRef, err := reg.ClusterReference() if err != nil { return reconcile.Result{}, fmt.Errorf("unable to generate Cluster reference from registration for request '%s' with id '%s': %w", request.String(), reg.ID(), err) } @@ -711,7 +735,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques } } if !referenced { - crRef, err := reg.ClusterRequestReference(request, additionalData...) + crRef, err := reg.ClusterRequestReference() if err != nil { return reconcile.Result{}, fmt.Errorf("unable to generate ClusterRequest reference from registration for request '%s' with id '%s': %w", request.String(), reg.ID(), err) } @@ -737,11 +761,11 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques if _, err := controllerutil.CreateOrUpdate(ctx, r.platformClusterClient, ar, func() error { ar.Labels = maputils.Merge(ar.Labels, expectedLabels) var err error - ar.Spec.Token, err = reg.AccessRequestTokenConfig(request, additionalData...) + ar.Spec.Token, err = reg.AccessRequestTokenConfig() if err != nil { return fmt.Errorf("unable to generate AccessRequest token config from registration: %w", err) } - ar.Spec.OIDC, err = reg.AccessRequestOIDCConfig(request, additionalData...) + ar.Spec.OIDC, err = reg.AccessRequestOIDCConfig() if err != nil { return fmt.Errorf("unable to generate AccessRequest OIDC config from registration: %w", err) } @@ -773,20 +797,21 @@ func (r *reconcilerImpl) ReconcileDelete(ctx context.Context, request reconcile. log.Info("Reconciling cluster access") // iterate over registrations and ensure that the corresponding resources are ready - for _, reg := range r.registrations { - rlog := log.WithValues("id", reg.ID()) + for _, rawReg := range r.registrations { + rlog := log.WithValues("id", rawReg.ID()) rlog.Debug("Processing registration") - suffix := reg.Suffix() + suffix := rawReg.Suffix() if suffix == "" { - suffix = reg.ID() + suffix = rawReg.ID() } + reg := rawReg.Parameterize(request, additionalData...) managedBy, managedPurpose, additionalLabels := r.managedBy(r.controllerName, request, reg) expectedLabels := map[string]string{} expectedLabels[openmcpconst.ManagedByLabel] = managedBy expectedLabels[openmcpconst.ManagedPurposeLabel] = managedPurpose expectedLabels = maputils.Merge(additionalLabels, expectedLabels) - namespace, err := reg.Namespace(request, additionalData...) + namespace, err := reg.Namespace() if err != nil { return reconcile.Result{}, fmt.Errorf("unable to generate name of platform cluster namespace for request '%s': %w", request.String(), err) } From 6d4e37040419e05a56e323701d9257daad9c72de Mon Sep 17 00:00:00 2001 From: Johannes Aubart Date: Thu, 2 Oct 2025 13:44:08 +0200 Subject: [PATCH 08/12] fixes and documentation --- docs/README.md | 4 + docs/libraries/.docnames | 3 + docs/libraries/clusteraccess.md | 210 ++++++++++++++++++ lib/clusteraccess/advanced/clusteraccess.go | 80 ++++--- .../advanced/clusteraccess_test.go | 3 + lib/clusteraccess/clusteraccess.go | 15 +- lib/go.mod | 2 +- 7 files changed, 276 insertions(+), 41 deletions(-) create mode 100644 docs/libraries/.docnames create mode 100644 docs/libraries/clusteraccess.md diff --git a/docs/README.md b/docs/README.md index 8197595..5893938 100644 --- a/docs/README.md +++ b/docs/README.md @@ -8,6 +8,10 @@ - [ManagedControlPlane v2](controller/managedcontrolplane.md) - [Cluster Scheduler](controller/scheduler.md) +## Libraries + +- [The ClusterAccess Library](libraries/clusteraccess.md) + ## Resources - [Cluster Provider: Gardener [Resource Example]](resources/cluster-provider-gardener.md) diff --git a/docs/libraries/.docnames b/docs/libraries/.docnames new file mode 100644 index 0000000..c71097a --- /dev/null +++ b/docs/libraries/.docnames @@ -0,0 +1,3 @@ +{ + "header": "Libraries" +} \ No newline at end of file diff --git a/docs/libraries/clusteraccess.md b/docs/libraries/clusteraccess.md new file mode 100644 index 0000000..4606720 --- /dev/null +++ b/docs/libraries/clusteraccess.md @@ -0,0 +1,210 @@ +# The ClusterAccess Library + +The ClusterAccess library in `lib/clusteraccess` is all about getting access to k8s clusters represented by the `Cluster` resource. It can be used in a few different ways which are outlined in this document. + +## ClusterAccess Manager + +The `Manager` interface mainly specifies the `CreateAndWaitForCluster` and `WaitForClusterAccess` methods. They can be used to create a `ClusterRequest` and `AccessRequest` or just the latter one. + +Note that the methods are expected to wait for readiness of the created resources and therefore not suitable to be used within a controller's `Reconcile` method. This is meant to be used in provider's 'init' subcommand, wich is executed as a one-time job, for example. + +The existing implementation can be instantiated with `NewClusterAccessManager`. + +##### Example + +```go +mgr := clusteraccess.NewClusterAccessManager(platformClusterClient, "my-controller", "my-namespace").WithInterval(30 * time.Second) +access, err := mgr.CreateAndWaitForCluster(...) +``` + +## ClusterAccess Reconciler + +The ClusterAccess Reconciler has the same purpose as the Manager - granting access to clusters. However, it is designed to be used within a controller's `Reconcile` method. Instead of waiting for readiness of the created resources, it returns an interval after which the currently reconciled object should be requeued, if the resources are not yet ready. + +There are two variants of the ClusterAccess Reconciler: simple and advanced. + +The 'simple' ClusterAccess Reconciler lies in `lib/clusteraccess`. It is designed for service providers which reconcile resources belonging to MCPs and need access to the corresponding MCP clusters and additionally a workload cluster. It cannot be used to get access to clusters that are not related to an MCP. + +The 'advanced' ClusterAccess Reconciler is in `lib/clusteraccess/advanced`. It can be configured to grant access to arbitrary clusters, either static or depending on the reconciled object. It is possible to either create a new `ClusterRequest` or reference existing `ClusterRequest` or `Cluster` resources. Due to this flexibility, it is significantly more complex to configure than the simple variant, though. + +### ClusterAccess Reconciler - Simple + +Instantiate the ClusterAccess Reconciler during controller setup and store the instance in the controller's struct. + +During reconciliation, call its `Reconcile` method (or `ReconcileDelete`, if the reconciled object is being deleted), which will ensure the required `ClusterRequest` (if any) and `AccessRequest` resources. If the method returns a `reconcile.Result` with a non-zero `RequeueAfter` value, abort the reconciliation and return the given `reconcile.Result`. If not, reconciliation can continue and the `MCPCluster` and `WorkloadCluster` methods can be used to get access to the respective clusters. + +During reconciliation, only the `MCPCluster`, `MCPAccessRequest`, `WorkloadCluster`, `WorkloadAccessRequest`, `Reconcile`, and `ReconcileDelete` methods of the reconciler must be used. + +##### Example + +```go +// controller constructor +func NewMyController(platformClusterClient client.Client, ...) *MyController { + return &MyController{ + car: clusteraccess.NewClusterAccessReconciler(platformClusterClient, "my-controller"). + WithMCPPermissions(...). + WithMCPScheme(...). + WithWorkloadPermissions(...). + WithWorkloadScheme(...), + } +} + +// reconcile +func (c *MyController) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + ... + + if inDeletion { + res, err := c.car.ReconcileDelete(ctx, req) + if err != nil || res.RequeueAfter > 0 { + return res, err + } + } else { + res, err := c.car.Reconcile(ctx, req) + if err != nil || res.RequeueAfter > 0 { + return res, err + } + + ... + + mcpAccess, err := c.car.MCPCluster(ctx, req) + + ... + } +} +``` + +#### Noteworthy Features + +The ClusterAccess Reconciler's `SkipWorkloadCluster` method can be used during initialization to disable creation of a `ClusterRequest` for a workload cluster. +If for some reason the `AccessRequest` resources are required, they can be retrieved via `MCPAccessRequest` and `WorkloadAccessRequest`. + +### ClusterAccess Reconciler - Advanced + +Instantiate the ClusterAccess Reconciler during controller setup and store the instance in the controller's struct. +```go +// controller constructor +func NewMyController(platformClusterClient client.Client, ...) *MyController { + car := advanced.NewClusterAccessReconciler(platformClusterClient, "my-controller") + + // register clusters and configure reconciler here + + return &MyController{ + car: car, + } +} +``` + +#### Cluster Registrations + +Opposed to the simple ClusterAccess Reconciler, the advanced variant is not limited to MCP and/or workload clusters. Instead, clusters for which access should be provided during the reconciliation need to be registered. The package contains three different constructors for cluster registrations: +- `NewClusterRequest` causes the reconciler to create a new `ClusterRequest` during the first reconciliation. Use this if you need access to a new cluster and not an existing one. + - Note that a `ClusterRequest` does not necessarily result in a new cluster, it could also return a reference to an existing cluster that is eligible for sharing. +- `ExistingCluster` can be used to get access to an existing `Cluster` when name and namespace of the `Cluster` are known. +- `ExistingClusterRequest` can be used to get access to an existing `Cluster` where name and namespace of the `Cluster` are not known, but an existing `ClusterRequest` for the `Cluster` is. + +All of these constructors take similar arguments: +- `id` is an identifier, used to differentiate between multiple clusters that belong to the same reconciled object. It must be unique among all cluster registrations. + - Example: `mcp` and `workload`. + - Registering the same `id` multiple times causes the last registration to overwrite any previous one. +- `suffix` is a suffix to use for the resources (`AccessRequest` and `ClusterRequest`, potentially) created for this cluster registration. It must be uniqe among all cluster registrations. + - If empty, `id` will be used as suffix. + - In the final resource names, the suffix will be separated from the rest of the name via `--`. The suffix must not contain any prefixing dashes or similar separators. + - Example: `mcp` for MCP clusters, `wl` for workload clusters. +- A generator function that generates the required information based on the request (name and namespce of the currently reconciled object) and potential further information. + - `NewClusterRequest` takes a function that returns a `ClusterRequestSpec`. The resulting spec will be used for the created `ClusterRequest`. + - If the spec is actually static and does not depend on the reconciled object, the `StaticClusterRequestSpecGenerator` helper function can be passed with a static spec definition as argument. + - `ExistingCluster` and `ExistingClusterRequest` both take a function that results in an `ObjectReference`, pointing to the existing `Cluster` or `ClusterRequest`, respectively. + - If the returned reference is actually static and does not depend on the reconciled object, the `StaticReferenceGenerator` helper function can be passed with a static reference definition as argument. + - If the registration should use the same name and namespace as the reconciled object, `IdentityReferenceGenerator` can be used. + +These constructors return a `ClusterRegistrationBuilder`. Its `Build` method can be used to turn it into a `ClusterRegistration`, but there are some methods that can be used to configure it before doing that: +- `WithTokenAccessGenerator` takes a function that generates the token configuration to be used in the `AccessRequest` for this cluster registration, depending on the request (= reconciled object). `WithOIDCAccessGenerator` is the equivalent for OIDC-based access. + - If the configuration does not depend on the request, the static alternatives `WithTokenAccess` and `WithOIDCAccess` can be used. + - Only one of either OIDC-based access or token-based access can be configured. This means that all four of the aforementioned methods are mutually exclusive - calling any of them will overwrite any call to any of them that has happened before. + - Note that one of these methods must be used in order to have an `AccessRequest` created for the cluster registration. Otherwise, no access can be provided for the corresponding cluster. There might be use-cases where this is desired, but since the main purpose of this library is to grant access to clusters, they will be rare. +- `WithScheme` can be used to set the scheme for the `client.Client` that can later be retrieved for the cluster registration. Uses the default scheme if not called or called with a `nil` argument. +- `WithNamespaceGenerator` can be used to overwrite the logic for computing the namespace the created resources should be placed in. + - By default, the logic from the `DefaultNamespaceGenerator` function is used. It computes a UUID-style hash from name and namespace of the request. + - For resources that are related to MCPs, the `DefaultNamespaceGeneratorForMCP` should be used instead. It uses the library function that is commonly used to determine the MCP namespace on the platform cluster. + - If a static namespace is desired, the `StaticNamespaceGenerator` helper function can be used. + - `RequestNamespaceGenerator` is a namespace generator that returns the namespace of the reconciled object. + +```go +// controller constructor +func NewMyController(platformClusterClient client.Client, ...) *MyController { + car := advanced.NewClusterAccessReconciler(platformClusterClient, "my-controller") + + // This registers a to-be-created ClusterRequest. + // Its name and namespace are generated by the reconciler. + // Since the spec does not depend on the request, the helper function for static values is used. + // Token-based access for the cluster is configured. + car.Register(advanced.NewClusterRequest("foo", "foo", advanced.StaticClusterRequestSpecGenerator(&clustersv1alpha1.ClusterRequestSpec{ + Purpose: "foo", + })).WithTokenAccess(&clustersv1alpha1.TokenConfig{...}).Build()) + + // This registers an existing cluster with the same name and namespace as the reconciled object. + // Token-based access for the cluster is configured. + // The AccessRequest will be created in the same namespace as the reconciled object (due to the overwritten namespace generator). + car.Register(advanced.ExistingCluster("foobar", "fb", func(req reconcile.Request, _ ...any) (*commonapi.ObjectReference, error) { + return &commonapi.ObjectReference{ // could also use advanced.IdentityReferenceGenerator instead + Name: req.Name, + Namespace: req.Namespace, + } + }).WithTokenAccess(&clustersv1alpha1.TokenConfig{...}).WithScheme(myScheme).WithNamespaceGenerator(advanced.RequestNamespaceGenerator).Build()) + + return &MyController{ + car: car, + } +} +``` + +> The `Register` calls could actually be chained in the form of `advanced.NewClusterAccessReconciler(...).Register(...).Register(...)`. + +**Important:** Registering or unregistering clusters between calls to `Reconcile`/`ReconcileDelete` can lead to unexpected behavior and is discouraged. + +#### Reconciliation + +The reconciliation logic works similar to the 'simple' variant: `Reconcile` creates the required resources and must succeed before any getter calls, while `ReconcileDelete` removes the resources again and therefore has to be called when access to the cluster(s) is no longer needed during the deletion process. Both methods return a `reconcile.Result` and an error. The controller's `Reconcile` function is expected to abort if either the error is not `nil`, or the `reconcile.Result` contains a non-zero `RequeueAfter` duration. + +There are four getter methods that can be called after the cluster access has been successfully reconciled: +- `Access` returns access to the specified cluster (a `client.Client` can be retrieved from the returned struct). +- `AccessRequest` returns the `AccessRequest` for the specified cluster registration. +- `ClusterRequest` returns the `ClusterRequest` for the specified cluster registration. +- `Cluster` returns the `Cluster` for the specified cluster registration. + +Note that not all of these methods will always return something. For example, a registration created via `ExistingCluster(...)` references a `Cluster` directly and can therefore not return a `ClusterRequest`. `Access` and `AccessRequest` will only work if either token-based access or OIDC-based access has been configured during the registration, otherwise there won't be any `AccessRequest`. Any method which cannot return the expected value due to the resource not being configured will simply return `nil` instead, without an error. The error is only returned if something goes wrong during retrieval of the resource. + +#### Additional Data + +While probably not required for most cases, there might be some situations in which the generation of resources requires more information than just the `reconcile.Request`, for example if the controller fetches some kind of configuration that specifies the required access permissions. The ClusterAccess library enables this by allowing arbitrary arguments to be passed into some methods: `Reconcile`, `ReconcileDelete`, as well as the four getter methods `Access`, `AccessRequest`, `ClusterRequest`, and `Cluster` take any amount of optional arguments. Additional arguments that are passed into any of these methods will be passed to the generator functions (which have been passed into `WithTokenAccessGenerator`, `WithOIDCAccessGenerator`, and `WithNamespaceGenerator` during creation of the `ClusterRegistration`), which can use the additional information for generating the namespace or the spec for `AccessRequest` or `ClusterRequest`. + +**Important:** To ensure consistent behavior, different calls of `Reconcile`/`ReconcileDelete` for the same request must always be called with the same additional arguments and any call to one of the getter methods for this request must also be given the same additional arguments. + +#### Testing Environments + +The resources created by the ClusterAccess Reconciler rely on other parts of the openmcp architecture, especially the scheduler (for `ClusterRequest`s) and a ClusterProvider (for `Cluster`s from `ClusterRequest`s and for `AccessRequest`s) which are not always present when testing a controller that uses this library, especially for unit tests. To avoid having multiple code paths in the controller, the ClusterAccess Reconciler offers some form of extension hook mechanism that allows to mock the actions that are usually taken over by other controllers. + +On the `ClusterAccessReconciler`, the `WithFakingCallback` method can be used to register callback functions that are executed at specific points during the reconciler's `Reconciler`/`ReconcileDelete` method, depending on the specified `key`. + +The available keys and the corresponding points of execution depend on the implementation of the `ClusterAccessReconciler` interface. The implementation provided in the package recognizes the following keys: +- `WaitingForClusterRequestReadiness`: The function is executed during `Reconcile`, when a non-zero `RequeueAfter` value is returned because the logic waits for a `ClusterRequest` to become `Granted`. +- `WaitingForAccessRequestReadiness`: The function is executed during `Reconcile`, when a non-zero `RequeueAfter` value is returned because the logic waits for an `AccessRequest` to become `Granted`. +- `WaitingForClusterRequestDeletion`: The function is executed during `ReconcileDelete`, when a non-zero `RequeueAfter` value is returned because the logic waits for a `ClusterRequest` to get its finalizers removed. +- `WaitingForAccessRequestDeletion`: The function is executed during `ReconcileDelete`, when a non-zero `RequeueAfter` value is returned because the logic waits for an `AccessRequest` to get its finalizers removed. + +For all of these keys, the package offers constants that are prefixed with `FakingCallback_`. + +While the signature of a callback function is always the same, any argument except for `ctx`, `platformClusterClient`, and `key` may be nil if not known at the point of execution. + +##### Convenience Implementations + +Because most controllers that use the faking callback feature will probably require a very similar logic for the aforementioned callback keys, the package provides a convenience implementation for each key: +- `FakeClusterRequestReadiness` generates a callback function for the `WaitingForClusterRequestReadiness` key. It creates a `Cluster` next to the `ClusterRequest`, sets the reference to it in the request's `status` and sets the request to `Granted`. + - This mocks cluster scheduler behavior. +- `FakeAccessRequestReadiness` generates a callback function for the `WaitingForAccessRequestReadiness` key. It creates a `Secret` containing a `kubeconfig` key, references the secret in the request's status and sets the `AccessRequest` to `Granted`. + - This mocks ClusterProvider behavior. + - Note that the `Access` getter method currently cannot handle the default kubeconfig written into the secret (which is just `fake`) and will always return an error, unless the method has been provided with a more realistic kubeconfig. +- `FakeClusterRequestDeletion` generates a callback function for the `WaitingForClusterRequestDeletion` key. Depending on its arguments, the generated function can remove specific or all finalizers on `Cluster` and/or `ClusterRequest`, and potentially also delete the `Cluster` resource. + - This mocks cluster scheduler behavior. +- `FakeAccessRequestDeletion` generates a callback function for the `WaitingForAccessRequestDeletion` key. It deletes the `Secret`, potentially removing the specified finalizers from it before, and then removes the configured finalizers from the `AccessRequest`. + - This mocks ClusterProvider behavior. diff --git a/lib/clusteraccess/advanced/clusteraccess.go b/lib/clusteraccess/advanced/clusteraccess.go index 60cf3f3..1e3ca79 100644 --- a/lib/clusteraccess/advanced/clusteraccess.go +++ b/lib/clusteraccess/advanced/clusteraccess.go @@ -54,7 +54,7 @@ type ClusterAccessReconciler interface { // Access returns an internal Cluster object granting access to the cluster for the specified request with the specified id. // Will fail if the cluster is not registered or no AccessRequest is registered for the cluster, or if some other error occurs. - Access(ctx context.Context, request reconcile.Request, id string) (*clusters.Cluster, error) + Access(ctx context.Context, request reconcile.Request, id string, additionalData ...any) (*clusters.Cluster, error) // AccessRequest fetches the AccessRequest object for the cluster for the specified request with the specified id. // Will fail if the cluster is not registered or no AccessRequest is registered for the cluster, or if some other error occurs. // The same additionalData must be passed into all methods of this ClusterAccessReconciler for the same request and id. @@ -196,18 +196,6 @@ type ClusterRegistrationBuilder interface { Build() ClusterRegistration } -// DefaultNamespaceGenerator is a default implementation of a namespace generator. -// It computes a UUID-style hash from the given request. -func DefaultNamespaceGenerator(req reconcile.Request, _ ...any) (string, error) { - return ctrlutils.K8sNameUUID(req.Namespace, req.Name) -} - -// DefaultNamespaceGeneratorForMCP is a default implementation of a namespace generator for MCPs. -// It computes a UUID-style hash from the given request and prefixes it with "mcp--". -func DefaultNamespaceGeneratorForMCP(req reconcile.Request, _ ...any) (string, error) { - return libutils.StableMCPNamespace(req.Name, req.Namespace) -} - /////////////////////////////////////////// /// IMPLEMENTATION: ClusterRegistration /// /////////////////////////////////////////// @@ -389,13 +377,6 @@ func (c *clusterRegistrationBuilderImpl) WithNamespaceGenerator(f func(req recon return c } -// StaticClusterRequestSpecGenerator is a helper function that just returns deep copies of the given spec. -func StaticClusterRequestSpecGenerator(spec *clustersv1alpha1.ClusterRequestSpec) func(reconcile.Request, ...any) (*clustersv1alpha1.ClusterRequestSpec, error) { - return func(_ reconcile.Request, _ ...any) (*clustersv1alpha1.ClusterRequestSpec, error) { - return spec.DeepCopy(), nil - } -} - // NewClusterRequest instructs the Reconciler to create and manage a new ClusterRequest. func NewClusterRequest(id, suffix string, generateClusterRequestSpec func(reconcile.Request, ...any) (*clustersv1alpha1.ClusterRequestSpec, error)) ClusterRegistrationBuilder { return &clusterRegistrationBuilderImpl{ @@ -408,13 +389,6 @@ func NewClusterRequest(id, suffix string, generateClusterRequestSpec func(reconc } } -// StaticReferenceGenerator is a helper function that just returns a deep copy of the given reference. -func StaticReferenceGenerator(ref *commonapi.ObjectReference) func(reconcile.Request, ...any) (*commonapi.ObjectReference, error) { - return func(_ reconcile.Request, _ ...any) (*commonapi.ObjectReference, error) { - return ref.DeepCopy(), nil - } -} - // ExistingCluster instructs the Reconciler to use an existing Cluster resource. func ExistingCluster(id, suffix string, generateClusterRef func(reconcile.Request, ...any) (*commonapi.ObjectReference, error)) ClusterRegistrationBuilder { return &clusterRegistrationBuilderImpl{ @@ -470,12 +444,12 @@ func NewClusterAccessReconciler(platformClusterClient client.Client, controllerN var _ ClusterAccessReconciler = &reconcilerImpl{} // Access implements Reconciler. -func (r *reconcilerImpl) Access(ctx context.Context, request reconcile.Request, id string) (*clusters.Cluster, error) { +func (r *reconcilerImpl) Access(ctx context.Context, request reconcile.Request, id string, additionalData ...any) (*clusters.Cluster, error) { reg, ok := r.registrations[id] if !ok { return nil, fmt.Errorf("no registration found for request '%s' with id '%s'", request.String(), id) } - ar, err := r.AccessRequest(ctx, request, id) + ar, err := r.AccessRequest(ctx, request, id, additionalData...) if err != nil { return nil, fmt.Errorf("unable to fetch AccessRequest for request '%s' with id '%s': %w", request.String(), id, err) } @@ -610,6 +584,8 @@ func (r *reconcilerImpl) ClusterRequest(ctx context.Context, request reconcile.R } // Reconcile implements Reconciler. +// +//nolint:gocyclo func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Request, additionalData ...any) (reconcile.Result, error) { log := logging.FromContextOrDiscard(ctx).WithName(caControllerName) ctx = logging.NewContext(ctx, log) @@ -1019,6 +995,52 @@ func AccessFromAccessRequest(ctx context.Context, platformClusterClient client.C return c, nil } +// StaticNamespaceGenerator returns a namespace generator that always returns the same namespace. +func StaticNamespaceGenerator(namespace string) func(reconcile.Request, ...any) (string, error) { + return func(_ reconcile.Request, _ ...any) (string, error) { + return namespace, nil + } +} + +// RequestNamespaceGenerator is a namespace generator that returns the namespace of the request. +func RequestNamespaceGenerator(req reconcile.Request, _ ...any) (string, error) { + return req.Namespace, nil +} + +// DefaultNamespaceGenerator is a default implementation of a namespace generator. +// It computes a UUID-style hash from the given request. +func DefaultNamespaceGenerator(req reconcile.Request, _ ...any) (string, error) { + return ctrlutils.K8sNameUUID(req.Namespace, req.Name) +} + +// DefaultNamespaceGeneratorForMCP is a default implementation of a namespace generator for MCPs. +// It computes a UUID-style hash from the given request and prefixes it with "mcp--". +func DefaultNamespaceGeneratorForMCP(req reconcile.Request, _ ...any) (string, error) { + return libutils.StableMCPNamespace(req.Name, req.Namespace) +} + +// StaticClusterRequestSpecGenerator is a helper function that returns a ClusterRequestSpec generator which just returns deep copies of the given spec. +func StaticClusterRequestSpecGenerator(spec *clustersv1alpha1.ClusterRequestSpec) func(reconcile.Request, ...any) (*clustersv1alpha1.ClusterRequestSpec, error) { + return func(_ reconcile.Request, _ ...any) (*clustersv1alpha1.ClusterRequestSpec, error) { + return spec.DeepCopy(), nil + } +} + +// StaticReferenceGenerator is a helper function that returns an ObjectReference generator which just returns a deep copy of the given reference. +func StaticReferenceGenerator(ref *commonapi.ObjectReference) func(reconcile.Request, ...any) (*commonapi.ObjectReference, error) { + return func(_ reconcile.Request, _ ...any) (*commonapi.ObjectReference, error) { + return ref.DeepCopy(), nil + } +} + +// IdentityReferenceGenerator is an ObjectReference generator that returns a reference that is identical to the request (name and namespace). +func IdentityReferenceGenerator(req reconcile.Request, _ ...any) (*commonapi.ObjectReference, error) { + return &commonapi.ObjectReference{ + Name: req.Name, + Namespace: req.Namespace, + }, nil +} + //////////////////////////////// /// FAKE AUXILIARY FUNCTIONS /// //////////////////////////////// diff --git a/lib/clusteraccess/advanced/clusteraccess_test.go b/lib/clusteraccess/advanced/clusteraccess_test.go index 85f82c3..434c7c3 100644 --- a/lib/clusteraccess/advanced/clusteraccess_test.go +++ b/lib/clusteraccess/advanced/clusteraccess_test.go @@ -45,6 +45,7 @@ func defaultTestSetup(testDirPathSegments ...string) *testutils.Environment { return env } +//nolint:unparam func defaultClusterAccessReconciler(env *testutils.Environment, controllerName string) advanced.ClusterAccessReconciler { return advanced.NewClusterAccessReconciler(env.Client(), controllerName). WithRetryInterval(100*time.Millisecond). @@ -445,6 +446,8 @@ var _ = Describe("Advanced Cluster Access", func() { // expectRequeue runs the reconciler's Reconcile method (or ReconcileDelete, if del is true) with the given request and expects a requeueAfter duration greater than zero in the returned result. // If match is non-empty, the first element is matched against the requeueAfter duration instead of just checking that it's greater than zero. Further elements are ignored. // It fails if the reconcile returns an error. +// +//nolint:unparam func expectRequeue(ctx context.Context, rec advanced.ClusterAccessReconciler, req reconcile.Request, del bool, match ...types.GomegaMatcher) func(Gomega) { return func(g Gomega) { var res reconcile.Result diff --git a/lib/clusteraccess/clusteraccess.go b/lib/clusteraccess/clusteraccess.go index a83a17e..6383ef1 100644 --- a/lib/clusteraccess/clusteraccess.go +++ b/lib/clusteraccess/clusteraccess.go @@ -26,7 +26,6 @@ import ( ) const ( - controllerName = "ClusterAccess" idMCP = "mcp" suffixMCP = "mcp" idWorkload = "workload" @@ -88,17 +87,14 @@ type Reconciler interface { } type reconcilerImpl struct { - internal advanced.ClusterAccessReconciler - // platformClusterClient client.Client - controllerName string - // retryInterval time.Duration + internal advanced.ClusterAccessReconciler + controllerName string mcpPermissions []clustersv1alpha1.PermissionsRequest mcpRoleRefs []commonapi.RoleRef workloadPermissions []clustersv1alpha1.PermissionsRequest workloadRoleRefs []commonapi.RoleRef mcpScheme *runtime.Scheme workloadScheme *runtime.Scheme - // skipWorkloadCluster bool } // NewClusterAccessReconciler creates a new ClusterAccessReconciler with the given parameters. @@ -113,17 +109,14 @@ func NewClusterAccessReconciler(platformClusterClient client.Client, controllerN } }) return &reconcilerImpl{ - internal: rec, - // platformClusterClient: platformClusterClient, - controllerName: controllerName, - // retryInterval: 5 * time.Second, + internal: rec, + controllerName: controllerName, mcpPermissions: []clustersv1alpha1.PermissionsRequest{}, mcpRoleRefs: []commonapi.RoleRef{}, workloadPermissions: []clustersv1alpha1.PermissionsRequest{}, workloadRoleRefs: []commonapi.RoleRef{}, mcpScheme: runtime.NewScheme(), workloadScheme: runtime.NewScheme(), - // skipWorkloadCluster: false, } } diff --git a/lib/go.mod b/lib/go.mod index 46187fa..80b5d83 100644 --- a/lib/go.mod +++ b/lib/go.mod @@ -12,6 +12,7 @@ require ( k8s.io/api v0.34.1 k8s.io/apimachinery v0.34.1 k8s.io/client-go v0.34.1 + k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d sigs.k8s.io/controller-runtime v0.22.3 ) @@ -72,7 +73,6 @@ require ( k8s.io/apiextensions-apiserver v0.34.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250814151709-d7b6acb124c3 // indirect - k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect From bb8ce87b6c7b0af405e623c37c7abbf53b645f04 Mon Sep 17 00:00:00 2001 From: Johannes Aubart Date: Thu, 2 Oct 2025 15:04:53 +0200 Subject: [PATCH 09/12] fix typo --- lib/clusteraccess/advanced/clusteraccess.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/clusteraccess/advanced/clusteraccess.go b/lib/clusteraccess/advanced/clusteraccess.go index 1e3ca79..d8a7073 100644 --- a/lib/clusteraccess/advanced/clusteraccess.go +++ b/lib/clusteraccess/advanced/clusteraccess.go @@ -137,10 +137,10 @@ type ClusterRegistration interface { // ClusterRequestAvailable returns true if a ClusterRequest can be retrieved from this registration. ClusterRequestAvailable() bool // Parameterize turns this ClusterRegistration into a ParamterizedClusterRegistration. - Parameterize(req reconcile.Request, additionalData ...any) ParamterizedClusterRegistration + Parameterize(req reconcile.Request, additionalData ...any) ParameterizedClusterRegistration } -type ParamterizedClusterRegistration interface { +type ParameterizedClusterRegistration interface { ClusterRegistration // AccessRequestTokenConfig is the token configuration for the AccessRequest to be created for the cluster. @@ -219,7 +219,7 @@ type parameterizedClusterRegistrationImpl struct { } var _ ClusterRegistration = &clusterRegistrationImpl{} -var _ ParamterizedClusterRegistration = ¶meterizedClusterRegistrationImpl{} +var _ ParameterizedClusterRegistration = ¶meterizedClusterRegistrationImpl{} // ID implements ClusterRegistration. func (c *clusterRegistrationImpl) ID() string { @@ -249,7 +249,7 @@ func (c *clusterRegistrationImpl) ClusterRequestAvailable() bool { return c.generateClusterRequestSpec != nil || c.generateClusterRequestRef != nil } -func (c *clusterRegistrationImpl) Parameterize(req reconcile.Request, additionalData ...any) ParamterizedClusterRegistration { +func (c *clusterRegistrationImpl) Parameterize(req reconcile.Request, additionalData ...any) ParameterizedClusterRegistration { return ¶meterizedClusterRegistrationImpl{ clusterRegistrationImpl: *c, req: req, From 5657453bc396ed8c2622c32d771a9ce49cbdf2d4 Mon Sep 17 00:00:00 2001 From: Johannes Aubart Date: Mon, 6 Oct 2025 08:51:05 +0200 Subject: [PATCH 10/12] review feedback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: René Schünemann --- docs/libraries/clusteraccess.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/libraries/clusteraccess.md b/docs/libraries/clusteraccess.md index 4606720..88039f7 100644 --- a/docs/libraries/clusteraccess.md +++ b/docs/libraries/clusteraccess.md @@ -23,7 +23,7 @@ The ClusterAccess Reconciler has the same purpose as the Manager - granting acce There are two variants of the ClusterAccess Reconciler: simple and advanced. -The 'simple' ClusterAccess Reconciler lies in `lib/clusteraccess`. It is designed for service providers which reconcile resources belonging to MCPs and need access to the corresponding MCP clusters and additionally a workload cluster. It cannot be used to get access to clusters that are not related to an MCP. +The 'simple' ClusterAccess Reconciler lies in `lib/clusteraccess`. It is designed for service providers which need acces to the Managed Control Plane cluster to deliver the service API and a Workload cluster which hosts the kubernets workload of a service instance. The 'advanced' ClusterAccess Reconciler is in `lib/clusteraccess/advanced`. It can be configured to grant access to arbitrary clusters, either static or depending on the reconciled object. It is possible to either create a new `ClusterRequest` or reference existing `ClusterRequest` or `Cluster` resources. Due to this flexibility, it is significantly more complex to configure than the simple variant, though. From af5c644a3b9f9797a0ff7409f47ff4d5a0304694 Mon Sep 17 00:00:00 2001 From: Johannes Aubart Date: Fri, 10 Oct 2025 14:50:10 +0200 Subject: [PATCH 11/12] create explicit types for the generator functions passed into the ClusterRegistrationBuilder constructors --- lib/clusteraccess/advanced/clusteraccess.go | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/lib/clusteraccess/advanced/clusteraccess.go b/lib/clusteraccess/advanced/clusteraccess.go index d8a7073..b96dcca 100644 --- a/lib/clusteraccess/advanced/clusteraccess.go +++ b/lib/clusteraccess/advanced/clusteraccess.go @@ -377,8 +377,12 @@ func (c *clusterRegistrationBuilderImpl) WithNamespaceGenerator(f func(req recon return c } +// ClusterRequestSpecGenerator is a function that takes the reconcile.Request and arbitrary additional arguments and generates a ClusterRequestSpec. +// Request and additional arguments depend on the arguments the ClusterAccessReconciler's Reconcile method is called with. +type ClusterRequestSpecGenerator func(req reconcile.Request, additionalData ...any) (*clustersv1alpha1.ClusterRequestSpec, error) + // NewClusterRequest instructs the Reconciler to create and manage a new ClusterRequest. -func NewClusterRequest(id, suffix string, generateClusterRequestSpec func(reconcile.Request, ...any) (*clustersv1alpha1.ClusterRequestSpec, error)) ClusterRegistrationBuilder { +func NewClusterRequest(id, suffix string, generateClusterRequestSpec ClusterRequestSpecGenerator) ClusterRegistrationBuilder { return &clusterRegistrationBuilderImpl{ clusterRegistrationImpl: clusterRegistrationImpl{ id: id, @@ -389,8 +393,14 @@ func NewClusterRequest(id, suffix string, generateClusterRequestSpec func(reconc } } +// ObjectReferenceGenerator is a function that takes the reconcile.Request and arbitrary additional arguments and generates an ObjectReference. +// Request and additional arguments depend on the arguments the ClusterAccessReconciler's Reconcile method is called with. +// The kind of the object the reference refers to depends on the method the function is passed into. +type ObjectReferenceGenerator func(req reconcile.Request, additionalData ...any) (*commonapi.ObjectReference, error) + // ExistingCluster instructs the Reconciler to use an existing Cluster resource. -func ExistingCluster(id, suffix string, generateClusterRef func(reconcile.Request, ...any) (*commonapi.ObjectReference, error)) ClusterRegistrationBuilder { +// The given generateClusterRef function is used to generate the reference to the Cluster resource. +func ExistingCluster(id, suffix string, generateClusterRef ObjectReferenceGenerator) ClusterRegistrationBuilder { return &clusterRegistrationBuilderImpl{ clusterRegistrationImpl: clusterRegistrationImpl{ id: id, @@ -402,7 +412,8 @@ func ExistingCluster(id, suffix string, generateClusterRef func(reconcile.Reques } // ExistingClusterRequest instructs the Reconciler to use an existing Cluster resource that is referenced by the given ClusterRequest. -func ExistingClusterRequest(id, suffix string, generateClusterRequestRef func(reconcile.Request, ...any) (*commonapi.ObjectReference, error)) ClusterRegistrationBuilder { +// The given generateClusterRequestRef function is used to generate the reference to the ClusterRequest resource. +func ExistingClusterRequest(id, suffix string, generateClusterRequestRef ObjectReferenceGenerator) ClusterRegistrationBuilder { return &clusterRegistrationBuilderImpl{ clusterRegistrationImpl: clusterRegistrationImpl{ id: id, From 9553bbaa993652d4892dcb6db8094b7817253f53 Mon Sep 17 00:00:00 2001 From: Johannes Aubart Date: Fri, 10 Oct 2025 15:08:00 +0200 Subject: [PATCH 12/12] add background info about mcp architecture to documentation --- docs/libraries/clusteraccess.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/libraries/clusteraccess.md b/docs/libraries/clusteraccess.md index 88039f7..4290403 100644 --- a/docs/libraries/clusteraccess.md +++ b/docs/libraries/clusteraccess.md @@ -2,6 +2,16 @@ The ClusterAccess library in `lib/clusteraccess` is all about getting access to k8s clusters represented by the `Cluster` resource. It can be used in a few different ways which are outlined in this document. +## Background: MCP Clusters, Workload Clusters, and Service Providers + +For a better understanding of where this library comes from, let's do a quick recap of the openMCP architecture: + +Customers have access to a single, shared **onboarding cluster**. They can create `ManagedControlPlane` resources (MCPs) there and request services by creating *service resources* (e.g. for `Flux`, `Landscaper`, etc.) next to the MCPs. Each MCP results in an **MCP cluster** - the customer has access to the ones belonging to his own MCPs and the APIs for the services he requested (k8s CRDs) will be made available on this cluster. As this is a managed service, the customer should not have access to any managed controllers, therefore the service controllers (e.g. the Flux controller, the Landscaper controller, etc.) are usually running in so-called **workload clusters**. Workload clusters cannot be accessed by customers and controllers for multiple different MCPs can be hosted on the same workload cluster. + +The available services are determined by which **service providers** are deployed in the landscape. A service provider is responsible for watching the service resources and reacting on them. For example, the Landscaper service provider would watch the Landscaper service resource (which is named `Landscaper`) on the onboarding cluster, and if a new one is created next to an MCP, it would deploy a Landscaper instance (meaning the controllers required to run Landscaper) on any workload cluster and configure them to watch the MCP cluster belonging to the MCP the service resource was created next to. This makes the Landscaper resources (`Installation`, `Target`, etc.) available on the MCP cluster so that the customer can use the Landscaper without having to manage its lifecycle. + +As all service providers work similarly, they usually need access to the MCP cluster (to deploy CRDs, for example) and a workload cluster (to deploy the service controllers). As the process for getting access to both clusters is not too intuitive, we decided to build this library to make the development of new service providers easier, more efficient, and less error-prone. + ## ClusterAccess Manager The `Manager` interface mainly specifies the `CreateAndWaitForCluster` and `WaitForClusterAccess` methods. They can be used to create a `ClusterRequest` and `AccessRequest` or just the latter one. @@ -23,7 +33,7 @@ The ClusterAccess Reconciler has the same purpose as the Manager - granting acce There are two variants of the ClusterAccess Reconciler: simple and advanced. -The 'simple' ClusterAccess Reconciler lies in `lib/clusteraccess`. It is designed for service providers which need acces to the Managed Control Plane cluster to deliver the service API and a Workload cluster which hosts the kubernets workload of a service instance. +The 'simple' ClusterAccess Reconciler lies in `lib/clusteraccess`. It is designed for the original use-case: service providers which need acces to the Managed Control Plane cluster to deliver the service API and a Workload cluster which hosts the kubernets workload of a service instance. The 'advanced' ClusterAccess Reconciler is in `lib/clusteraccess/advanced`. It can be configured to grant access to arbitrary clusters, either static or depending on the reconciled object. It is possible to either create a new `ClusterRequest` or reference existing `ClusterRequest` or `Cluster` resources. Due to this flexibility, it is significantly more complex to configure than the simple variant, though.