diff --git a/Makefile b/Makefile index 126676fff28..4dc52c29300 100644 --- a/Makefile +++ b/Makefile @@ -306,6 +306,7 @@ endif test-e2e-sharded: TEST_ARGS ?= test-e2e-sharded: WHAT ?= ./test/e2e... test-e2e-sharded: WORK_DIR ?= . +test-e2e-sharded: SHARDS ?= 2 ifdef ARTIFACT_DIR test-e2e-sharded: LOG_DIR ?= $(ARTIFACT_DIR)/kcp else @@ -316,12 +317,13 @@ test-e2e-sharded: require-kind build-all build-kind-images kind get kubeconfig > "$(WORK_DIR)/.kcp/kind.kubeconfig" rm -f "$(WORK_DIR)/.kcp/admin.kubeconfig" UNSAFE_E2E_HACK_DISABLE_ETCD_FSYNC=true NO_GORUN=1 \ - ./bin/sharded-test-server --quiet --v=2 --log-dir-path="$(LOG_DIR)" --work-dir-path="$(WORK_DIR)" --shard-run-virtual-workspaces=false $(TEST_SERVER_ARGS) --number-of-shards=2 2>&1 & PID=$$!; echo "PID $$PID" && \ + ./bin/sharded-test-server --quiet --v=2 --log-dir-path="$(LOG_DIR)" --work-dir-path="$(WORK_DIR)" --shard-run-virtual-workspaces=false $(TEST_SERVER_ARGS) --number-of-shards=$(SHARDS) 2>&1 & PID=$$!; echo "PID $$PID" && \ trap 'kill -TERM $$PID' TERM INT EXIT && \ while [ ! -f "$(WORK_DIR)/.kcp/admin.kubeconfig" ]; do sleep 1; done && \ NO_GORUN=1 GOOS=$(OS) GOARCH=$(ARCH) \ $(GO_TEST) -race $(COUNT_ARG) $(PARALLELISM_ARG) $(WHAT) $(TEST_ARGS) \ - -args --use-default-kcp-server --root-shard-kubeconfig=$(PWD)/.kcp-0/admin.kubeconfig $(SUITES_ARG) \ + -args --use-default-kcp-server --shard-kubeconfigs=root=$(PWD)/.kcp-0/admin.kubeconfig$(shell if [ $(SHARDS) -gt 1 ]; then seq 1 $$[$(SHARDS) - 1]; fi | while read n; do echo -n ",shard-$$n=$(PWD)/.kcp-$$n/admin.kubeconfig"; done) \ + $(SUITES_ARG) \ --syncer-image="$(SYNCER_IMAGE)" --kcp-test-image="$(TEST_IMAGE)" --pcluster-kubeconfig="$(abspath $(WORK_DIR)/.kcp/kind.kubeconfig)" \ $(if $(value WAIT),|| { echo "Terminated with $$?"; wait "$$PID"; },) @@ -332,6 +334,7 @@ endif test-e2e-sharded-minimal: TEST_ARGS ?= test-e2e-sharded-minimal: WHAT ?= ./test/e2e... test-e2e-sharded-minimal: WORK_DIR ?= . +test-e2e-sharded-minimal: SHARDS ?= 2 ifdef ARTIFACT_DIR test-e2e-sharded-minimal: LOG_DIR ?= $(ARTIFACT_DIR)/kcp else @@ -340,11 +343,12 @@ endif test-e2e-sharded-minimal: build-all mkdir -p "$(LOG_DIR)" "$(WORK_DIR)/.kcp" rm -f "$(WORK_DIR)/.kcp/admin.kubeconfig" - UNSAFE_E2E_HACK_DISABLE_ETCD_FSYNC=true NO_GORUN=1 ./bin/sharded-test-server --quiet --v=2 --log-dir-path="$(LOG_DIR)" --work-dir-path="$(WORK_DIR)" --shard-run-virtual-workspaces=false $(TEST_SERVER_ARGS) --number-of-shards=2 2>&1 & PID=$$!; echo "PID $$PID" && \ + UNSAFE_E2E_HACK_DISABLE_ETCD_FSYNC=true NO_GORUN=1 ./bin/sharded-test-server --quiet --v=2 --log-dir-path="$(LOG_DIR)" --work-dir-path="$(WORK_DIR)" --shard-run-virtual-workspaces=false $(TEST_SERVER_ARGS) --number-of-shards=$(SHARDS) 2>&1 & PID=$$!; echo "PID $$PID" && \ trap 'kill -TERM $$PID' TERM INT EXIT && \ while [ ! -f "$(WORK_DIR)/.kcp/admin.kubeconfig" ]; do sleep 1; done && \ NO_GORUN=1 GOOS=$(OS) GOARCH=$(ARCH) $(GO_TEST) -race $(COUNT_ARG) $(PARALLELISM_ARG) $(WHAT) $(TEST_ARGS) \ - -args --use-default-kcp-server --root-shard-kubeconfig=$(PWD)/.kcp-0/admin.kubeconfig $(SUITES_ARGS) \ + -args --use-default-kcp-server --shard-kubeconfigs=root=$(PWD)/.kcp-0/admin.kubeconfig$(shell if [ $(SHARDS) -gt 1 ]; then seq 1 $$[$(SHARDS) - 1]; fi | while read n; do echo -n ",shard-$$n=$(PWD)/.kcp-$$n/admin.kubeconfig"; done) \ + $(SUITES_ARGS) \ $(if $(value WAIT),|| { echo "Terminated with $$?"; wait "$$PID"; },) .PHONY: test diff --git a/cmd/apigen/main.go b/cmd/apigen/main.go index 510166079ee..f18d9210fac 100644 --- a/cmd/apigen/main.go +++ b/cmd/apigen/main.go @@ -41,6 +41,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/apis/admissionregistration" "sigs.k8s.io/yaml" "github.com/kcp-dev/kcp/pkg/apis/apis" @@ -172,7 +173,7 @@ func loadCustomResourceDefinitions(logger logr.Logger, baseDir string) (map[meta Group: parts[0], Resource: parts[1], } - if gr.Group == apis.GroupName || gr.Group == rbacv1.GroupName { + if gr.Group == apis.GroupName || gr.Group == rbacv1.GroupName || gr.Group == admissionregistration.GroupName { logger.Info(fmt.Sprintf("Skipping CustomResourceDefinition %s from %s", gr.String(), path)) return nil } diff --git a/cmd/sharded-test-server/main.go b/cmd/sharded-test-server/main.go index 87f63593b69..e63aa2d455c 100644 --- a/cmd/sharded-test-server/main.go +++ b/cmd/sharded-test-server/main.go @@ -25,14 +25,20 @@ import ( "path/filepath" "strings" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" machineryutilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/sets" kuser "k8s.io/apiserver/pkg/authentication/user" genericapiserver "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/util/retry" "github.com/kcp-dev/kcp/cmd/sharded-test-server/third_party/library-go/crypto" shard "github.com/kcp-dev/kcp/cmd/test-server/kcp" + "github.com/kcp-dev/kcp/pkg/apis/core" "github.com/kcp-dev/kcp/pkg/authorization/bootstrap" + kcpclientset "github.com/kcp-dev/kcp/pkg/client/clientset/versioned/cluster" ) func main() { @@ -209,7 +215,7 @@ func start(proxyFlags, shardFlags []string, logDirPath, workDirPath string, numb vwPort = "7444" for i := 0; i < numberOfShards; i++ { - vw, err := newVirtualWorkspace(ctx, i, servingCA, hostIP.String(), logDirPath, workDirPath, clientCA) + vw, err := newVirtualWorkspace(ctx, i, servingCA, hostIP.String(), logDirPath, workDirPath, clientCA, cacheServerConfigPath) if err != nil { return err } @@ -262,6 +268,37 @@ func start(proxyFlags, shardFlags []string, logDirPath, workDirPath string, numb return err } + // Label region of shards + clientConfig, err := loadKubeConfig(filepath.Join(workDirPath, ".kcp/admin.kubeconfig"), "base") + if err != nil { + return err + } + config, err := clientConfig.ClientConfig() + if err != nil { + return err + } + client, err := kcpclientset.NewForConfig(config) + if err != nil { + return err + } + for i := range shards { + name := fmt.Sprintf("shard-%d", i) + if i == 0 { + name = "root" + } + + if i >= len(regions) { + break + } + patch := fmt.Sprintf(`{"metadata":{"labels":{"region":%q}}}`, regions[i]) + if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + _, err := client.Cluster(core.RootCluster.Path()).CoreV1alpha1().Shards().Patch(ctx, name, types.MergePatchType, []byte(patch), metav1.PatchOptions{}) + return err + }); err != nil { + return err + } + } + select { case shardIndexErr := <-shardsErrCh: return fmt.Errorf("shard %d exited: %w", shardIndexErr.index, shardIndexErr.error) @@ -278,3 +315,49 @@ type indexErrTuple struct { index int error error } + +func loadKubeConfig(kubeconfigPath, contextName string) (clientcmd.ClientConfig, error) { + fs, err := os.Stat(kubeconfigPath) + if err != nil { + return nil, err + } + if fs.Size() == 0 { + return nil, fmt.Errorf("%s points to an empty file", kubeconfigPath) + } + + rawConfig, err := clientcmd.LoadFromFile(kubeconfigPath) + if err != nil { + return nil, fmt.Errorf("failed to load admin kubeconfig: %w", err) + } + + return clientcmd.NewNonInteractiveClientConfig(*rawConfig, contextName, nil, nil), nil +} + +var regions = []string{ + "us-east-2", + "us-east-1", + "us-west-1", + "us-west-2", + "af-south-1", + "ap-east-1", + "ap-south-2", + "ap-southeast-3", + "ap-south-1", + "ap-northeast-3", + "ap-northeast-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ca-central-1", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "eu-south-1", + "eu-west-3", + "eu-south-2", + "eu-north-1", + "eu-central-2", + "me-south-1", + "me-central-1", + "sa-east-1", +} diff --git a/cmd/sharded-test-server/shard.go b/cmd/sharded-test-server/shard.go index 6f11a44ada5..83d9fddb0a6 100644 --- a/cmd/sharded-test-server/shard.go +++ b/cmd/sharded-test-server/shard.go @@ -103,7 +103,7 @@ func newShard(ctx context.Context, n int, args []string, standaloneVW bool, serv fmt.Sprintf("--shard-virtual-workspace-ca-file=%s", filepath.Join(workDirPath, ".kcp", "serving-ca.crt")), ) if len(cacheServerConfigPath) > 0 { - args = append(args, fmt.Sprintf("--cache-server-kubeconfig-file=%s", cacheServerConfigPath)) + args = append(args, fmt.Sprintf("--cache-kubeconfig=%s", cacheServerConfigPath)) } if standaloneVW { diff --git a/cmd/sharded-test-server/virtual.go b/cmd/sharded-test-server/virtual.go index 1116994375f..111a0ed2dc5 100644 --- a/cmd/sharded-test-server/virtual.go +++ b/cmd/sharded-test-server/virtual.go @@ -59,7 +59,7 @@ type VirtualWorkspace struct { writer headWriter } -func newVirtualWorkspace(ctx context.Context, index int, servingCA *crypto.CA, hostIP string, logDirPath, workDirPath string, clientCA *crypto.CA) (*VirtualWorkspace, error) { +func newVirtualWorkspace(ctx context.Context, index int, servingCA *crypto.CA, hostIP string, logDirPath, workDirPath string, clientCA *crypto.CA, cacheServerConfigPath string) (*VirtualWorkspace, error) { logger := klog.FromContext(ctx) // create serving cert @@ -131,6 +131,7 @@ func newVirtualWorkspace(ctx context.Context, index int, servingCA *crypto.CA, h args := []string{} args = append(args, fmt.Sprintf("--kubeconfig=%s", kubeconfigPath), + fmt.Sprintf("--cache-kubeconfig=%s", cacheServerConfigPath), fmt.Sprintf("--authentication-kubeconfig=%s", authenticationKubeconfigPath), fmt.Sprintf("--client-ca-file=%s", clientCAFilePath), fmt.Sprintf("--tls-private-key-file=%s", servingKeyFile), diff --git a/cmd/virtual-workspaces/command/cmd.go b/cmd/virtual-workspaces/command/cmd.go index c79d10e2581..d6291619057 100644 --- a/cmd/virtual-workspaces/command/cmd.go +++ b/cmd/virtual-workspaces/command/cmd.go @@ -87,6 +87,20 @@ func Run(ctx context.Context, o *options.Options) error { return err } + // parse cache kubeconfig + defaultCacheClientConfig, err := kubeConfig.ClientConfig() + if err != nil { + return err + } + cacheConfig, err := o.Cache.RestConfig(defaultCacheClientConfig) + if err != nil { + return err + } + cacheKcpClusterClient, err := kcpclientset.NewForConfig(cacheConfig) + if err != nil { + return err + } + // Don't throttle nonIdentityConfig.QPS = -1 @@ -127,6 +141,7 @@ func Run(ctx context.Context, o *options.Options) error { return err } wildcardKcpInformers := kcpinformers.NewSharedInformerFactory(kcpClusterClient, 10*time.Minute) + cacheKcpInformers := kcpinformers.NewSharedInformerFactory(cacheKcpClusterClient, 10*time.Minute) if o.ProfilerAddress != "" { //nolint:errcheck,gosec @@ -134,7 +149,7 @@ func Run(ctx context.Context, o *options.Options) error { } // create apiserver - virtualWorkspaces, err := o.VirtualWorkspaces.NewVirtualWorkspaces(identityConfig, o.RootPathPrefix, wildcardKubeInformers, wildcardKcpInformers) + virtualWorkspaces, err := o.VirtualWorkspaces.NewVirtualWorkspaces(identityConfig, o.RootPathPrefix, wildcardKubeInformers, wildcardKcpInformers, cacheKcpInformers) if err != nil { return err } @@ -157,6 +172,7 @@ func Run(ctx context.Context, o *options.Options) error { rootAPIServerConfig, err := virtualrootapiserver.NewRootAPIConfig(recommendedConfig, []virtualrootapiserver.InformerStart{ wildcardKubeInformers.Start, wildcardKcpInformers.Start, + cacheKcpInformers.Start, }, virtualWorkspaces) if err != nil { return err diff --git a/cmd/virtual-workspaces/options/options.go b/cmd/virtual-workspaces/options/options.go index ade909b155c..e61020b9951 100644 --- a/cmd/virtual-workspaces/options/options.go +++ b/cmd/virtual-workspaces/options/options.go @@ -29,6 +29,7 @@ import ( genericapiserveroptions "k8s.io/apiserver/pkg/server/options" "k8s.io/component-base/logs" + cacheoptions "github.com/kcp-dev/kcp/pkg/cache/client/options" virtualworkspacesoptions "github.com/kcp-dev/kcp/pkg/virtual/options" ) @@ -44,6 +45,7 @@ type Options struct { Context string RootPathPrefix string + Cache cacheoptions.Cache SecureServing genericapiserveroptions.SecureServingOptions Authentication genericapiserveroptions.DelegatingAuthenticationOptions Authorization virtualworkspacesoptions.Authorization @@ -61,6 +63,7 @@ func NewOptions() *Options { RootPathPrefix: DefaultRootPathPrefix, + Cache: *cacheoptions.NewCache(), SecureServing: *genericapiserveroptions.NewSecureServingOptions(), Authentication: *genericapiserveroptions.NewDelegatingAuthenticationOptions(), Authorization: *virtualworkspacesoptions.NewAuthorization(), @@ -79,6 +82,7 @@ func NewOptions() *Options { } func (o *Options) AddFlags(flags *pflag.FlagSet) { + o.Cache.AddFlags(flags) o.SecureServing.AddFlags(flags) o.Authentication.AddFlags(flags) o.Logs.AddFlags(flags) @@ -94,6 +98,7 @@ func (o *Options) AddFlags(flags *pflag.FlagSet) { func (o *Options) Validate() error { errs := []error{} + errs = append(errs, o.Cache.Validate()...) errs = append(errs, o.SecureServing.Validate()...) errs = append(errs, o.Authentication.Validate()...) errs = append(errs, o.VirtualWorkspaces.Validate()...) diff --git a/config/crds/admissionregistration.k8s.io_mutatingwebhookconfigurations.yaml b/config/crds/admissionregistration.k8s.io_mutatingwebhookconfigurations.yaml new file mode 100755 index 00000000000..69e90b9407f --- /dev/null +++ b/config/crds/admissionregistration.k8s.io_mutatingwebhookconfigurations.yaml @@ -0,0 +1,367 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/kcp-dev/kubernetes/pull/4 + creationTimestamp: null + name: mutatingwebhookconfigurations.admissionregistration.k8s.io +spec: + conversion: + strategy: None + group: admissionregistration.k8s.io + names: + categories: + - api-extensions + kind: MutatingWebhookConfiguration + listKind: MutatingWebhookConfigurationList + plural: mutatingwebhookconfigurations + singular: mutatingwebhookconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: MutatingWebhookConfiguration describes the configuration of and + admission webhook that accept or reject and may change the object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + webhooks: + description: Webhooks is a list of webhooks and the affected resources + and operations. + items: + description: MutatingWebhook describes an admission webhook and the + resources and operations it applies to. + properties: + admissionReviewVersions: + description: AdmissionReviewVersions is an ordered list of preferred + `AdmissionReview` versions the Webhook expects. API server will + try to use first version in the list which it supports. If none + of the versions specified in this list supported by API server, + validation will fail for this object. If a persisted webhook configuration + specifies allowed versions and does not include any versions known + to the API Server, calls to the webhook will fail and be subject + to the failure policy. + items: + type: string + type: array + clientConfig: + description: ClientConfig defines how to communicate with the hook. + Required + properties: + caBundle: + description: '`caBundle` is a PEM encoded CA bundle which will + be used to validate the webhook''s server certificate. If + unspecified, system trust roots on the apiserver are used.' + format: byte + type: string + service: + description: |- + `service` is a reference to the service for this webhook. Either `service` or `url` must be specified. + + If the webhook is running within the cluster, then you should use `service`. + properties: + name: + description: '`name` is the name of the service. Required' + type: string + namespace: + description: '`namespace` is the namespace of the service. + Required' + type: string + path: + description: '`path` is an optional URL path which will + be sent in any request to this service.' + type: string + port: + description: If specified, the port on the service that + hosting webhook. Default to 443 for backward compatibility. + `port` should be a valid port number (1-65535, inclusive). + format: int32 + type: integer + required: + - namespace + - name + type: object + url: + description: |- + `url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified. + + The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. + + Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster. + + The scheme must be "https"; the URL must begin with "https://". + + A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier. + + Attempting to use a user or basic auth e.g. "user:password@" is not allowed. Fragments ("#...") and query parameters ("?...") are not allowed, either. + type: string + type: object + failurePolicy: + description: FailurePolicy defines how unrecognized errors from + the admission endpoint are handled - allowed values are Ignore + or Fail. Defaults to Fail. + type: string + matchPolicy: + description: |- + matchPolicy defines how the "rules" list is used to match incoming requests. Allowed values are "Exact" or "Equivalent". + + - Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + + - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + + Defaults to "Equivalent" + type: string + name: + description: The name of the admission webhook. Name should be fully + qualified, e.g., imagepolicy.kubernetes.io, where "imagepolicy" + is the name of the webhook, and kubernetes.io is the name of the + organization. Required. + type: string + namespaceSelector: + description: |- + NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook. + + For example, to run the webhook on any objects whose namespace is not associated with "runlevel" of "0" or "1"; you will set the selector as follows: "namespaceSelector": { + "matchExpressions": [ + { + "key": "runlevel", + "operator": "NotIn", + "values": [ + "0", + "1" + ] + } + ] + } + + If instead you want to only run the webhook on any objects whose namespace is associated with the "environment" of "prod" or "staging"; you will set the selector as follows: "namespaceSelector": { + "matchExpressions": [ + { + "key": "environment", + "operator": "In", + "values": [ + "prod", + "staging" + ] + } + ] + } + + See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors. + + Default to the empty LabelSelector, which matches everything. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + objectSelector: + description: ObjectSelector decides whether to run the webhook based + on if the object has matching labels. objectSelector is evaluated + against both the oldObject and newObject that would be sent to + the webhook, and is considered to match if either object matches + the selector. A null object (oldObject in the case of create, + or newObject in the case of delete) or an object that cannot have + labels (like a DeploymentRollback or a PodProxyOptions object) + is not considered to match. Use the object selector only if the + webhook is opt-in, because end users may skip the admission webhook + by setting the labels. Default to the empty LabelSelector, which + matches everything. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + reinvocationPolicy: + description: |- + reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are "Never" and "IfNeeded". + + Never: the webhook will not be called more than once in a single admission evaluation. + + IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. + + Defaults to "Never". + type: string + rules: + description: Rules describes what operations on what resources/subresources + the webhook cares about. The webhook cares about an operation + if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks + and MutatingAdmissionWebhooks from putting the cluster in a state + which cannot be recovered from without completely disabling the + plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + are never called on admission requests for ValidatingWebhookConfiguration + and MutatingWebhookConfiguration objects. + items: + description: RuleWithOperations is a tuple of Operations and Resources. + It is recommended to make sure that all the tuple expansions + are valid. + properties: + apiGroups: + description: APIGroups is the API groups the resources belong + to. '*' is all groups. If '*' is present, the length of + the slice must be one. Required. + items: + type: string + type: array + apiVersions: + description: APIVersions is the API versions the resources + belong to. '*' is all versions. If '*' is present, the length + of the slice must be one. Required. + items: + type: string + type: array + operations: + description: Operations is the operations the admission hook + cares about - CREATE, UPDATE, DELETE, CONNECT or * for all + of those operations and any future admission operations + that are added. If '*' is present, the length of the slice + must be one. Required. + items: + type: string + type: array + resources: + description: |- + Resources is a list of resources this rule applies to. + + For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. + + If wildcard is present, the validation rule will ensure resources do not overlap with each other. + + Depending on the enclosing object, subresources might not be allowed. Required. + items: + type: string + type: array + scope: + description: scope specifies the scope of this rule. Valid + values are "Cluster", "Namespaced", and "*" "Cluster" means + that only cluster-scoped resources will match this rule. + Namespace API objects are cluster-scoped. "Namespaced" means + that only namespaced resources will match this rule. "*" + means that there are no scope restrictions. Subresources + match the scope of their parent resource. Default is "*". + type: string + type: object + type: array + sideEffects: + description: 'SideEffects states whether this webhook has side effects. + Acceptable values are: None, NoneOnDryRun (webhooks created via + v1beta1 may also specify Some or Unknown). Webhooks with side + effects MUST implement a reconciliation system, since a request + may be rejected by a future step in the admission chain and the + side effects therefore need to be undone. Requests with the dryRun + attribute will be auto-rejected if they match a webhook with sideEffects + == Unknown or Some.' + type: string + timeoutSeconds: + description: TimeoutSeconds specifies the timeout for this webhook. + After the timeout passes, the webhook call will be ignored or + the API call will fail based on the failure policy. The timeout + value must be between 1 and 30 seconds. Default to 10 seconds. + format: int32 + type: integer + required: + - admissionReviewVersions + - clientConfig + - name + - sideEffects + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + served: true + storage: true + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: + - v1 diff --git a/config/crds/admissionregistration.k8s.io_validatingwebhookconfigurations.yaml b/config/crds/admissionregistration.k8s.io_validatingwebhookconfigurations.yaml new file mode 100755 index 00000000000..1df6c8bedeb --- /dev/null +++ b/config/crds/admissionregistration.k8s.io_validatingwebhookconfigurations.yaml @@ -0,0 +1,358 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/kcp-dev/kubernetes/pull/4 + creationTimestamp: null + name: validatingwebhookconfigurations.admissionregistration.k8s.io +spec: + conversion: + strategy: None + group: admissionregistration.k8s.io + names: + categories: + - api-extensions + kind: ValidatingWebhookConfiguration + listKind: ValidatingWebhookConfigurationList + plural: validatingwebhookconfigurations + singular: validatingwebhookconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ValidatingWebhookConfiguration describes the configuration of + and admission webhook that accept or reject and object without changing + it. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + webhooks: + description: Webhooks is a list of webhooks and the affected resources + and operations. + items: + description: ValidatingWebhook describes an admission webhook and the + resources and operations it applies to. + properties: + admissionReviewVersions: + description: AdmissionReviewVersions is an ordered list of preferred + `AdmissionReview` versions the Webhook expects. API server will + try to use first version in the list which it supports. If none + of the versions specified in this list supported by API server, + validation will fail for this object. If a persisted webhook configuration + specifies allowed versions and does not include any versions known + to the API Server, calls to the webhook will fail and be subject + to the failure policy. + items: + type: string + type: array + clientConfig: + description: ClientConfig defines how to communicate with the hook. + Required + properties: + caBundle: + description: '`caBundle` is a PEM encoded CA bundle which will + be used to validate the webhook''s server certificate. If + unspecified, system trust roots on the apiserver are used.' + format: byte + type: string + service: + description: |- + `service` is a reference to the service for this webhook. Either `service` or `url` must be specified. + + If the webhook is running within the cluster, then you should use `service`. + properties: + name: + description: '`name` is the name of the service. Required' + type: string + namespace: + description: '`namespace` is the namespace of the service. + Required' + type: string + path: + description: '`path` is an optional URL path which will + be sent in any request to this service.' + type: string + port: + description: If specified, the port on the service that + hosting webhook. Default to 443 for backward compatibility. + `port` should be a valid port number (1-65535, inclusive). + format: int32 + type: integer + required: + - namespace + - name + type: object + url: + description: |- + `url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified. + + The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. + + Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster. + + The scheme must be "https"; the URL must begin with "https://". + + A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier. + + Attempting to use a user or basic auth e.g. "user:password@" is not allowed. Fragments ("#...") and query parameters ("?...") are not allowed, either. + type: string + type: object + failurePolicy: + description: FailurePolicy defines how unrecognized errors from + the admission endpoint are handled - allowed values are Ignore + or Fail. Defaults to Fail. + type: string + matchPolicy: + description: |- + matchPolicy defines how the "rules" list is used to match incoming requests. Allowed values are "Exact" or "Equivalent". + + - Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + + - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + + Defaults to "Equivalent" + type: string + name: + description: The name of the admission webhook. Name should be fully + qualified, e.g., imagepolicy.kubernetes.io, where "imagepolicy" + is the name of the webhook, and kubernetes.io is the name of the + organization. Required. + type: string + namespaceSelector: + description: |- + NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook. + + For example, to run the webhook on any objects whose namespace is not associated with "runlevel" of "0" or "1"; you will set the selector as follows: "namespaceSelector": { + "matchExpressions": [ + { + "key": "runlevel", + "operator": "NotIn", + "values": [ + "0", + "1" + ] + } + ] + } + + If instead you want to only run the webhook on any objects whose namespace is associated with the "environment" of "prod" or "staging"; you will set the selector as follows: "namespaceSelector": { + "matchExpressions": [ + { + "key": "environment", + "operator": "In", + "values": [ + "prod", + "staging" + ] + } + ] + } + + See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors. + + Default to the empty LabelSelector, which matches everything. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + objectSelector: + description: ObjectSelector decides whether to run the webhook based + on if the object has matching labels. objectSelector is evaluated + against both the oldObject and newObject that would be sent to + the webhook, and is considered to match if either object matches + the selector. A null object (oldObject in the case of create, + or newObject in the case of delete) or an object that cannot have + labels (like a DeploymentRollback or a PodProxyOptions object) + is not considered to match. Use the object selector only if the + webhook is opt-in, because end users may skip the admission webhook + by setting the labels. Default to the empty LabelSelector, which + matches everything. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + rules: + description: Rules describes what operations on what resources/subresources + the webhook cares about. The webhook cares about an operation + if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks + and MutatingAdmissionWebhooks from putting the cluster in a state + which cannot be recovered from without completely disabling the + plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + are never called on admission requests for ValidatingWebhookConfiguration + and MutatingWebhookConfiguration objects. + items: + description: RuleWithOperations is a tuple of Operations and Resources. + It is recommended to make sure that all the tuple expansions + are valid. + properties: + apiGroups: + description: APIGroups is the API groups the resources belong + to. '*' is all groups. If '*' is present, the length of + the slice must be one. Required. + items: + type: string + type: array + apiVersions: + description: APIVersions is the API versions the resources + belong to. '*' is all versions. If '*' is present, the length + of the slice must be one. Required. + items: + type: string + type: array + operations: + description: Operations is the operations the admission hook + cares about - CREATE, UPDATE, DELETE, CONNECT or * for all + of those operations and any future admission operations + that are added. If '*' is present, the length of the slice + must be one. Required. + items: + type: string + type: array + resources: + description: |- + Resources is a list of resources this rule applies to. + + For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. + + If wildcard is present, the validation rule will ensure resources do not overlap with each other. + + Depending on the enclosing object, subresources might not be allowed. Required. + items: + type: string + type: array + scope: + description: scope specifies the scope of this rule. Valid + values are "Cluster", "Namespaced", and "*" "Cluster" means + that only cluster-scoped resources will match this rule. + Namespace API objects are cluster-scoped. "Namespaced" means + that only namespaced resources will match this rule. "*" + means that there are no scope restrictions. Subresources + match the scope of their parent resource. Default is "*". + type: string + type: object + type: array + sideEffects: + description: 'SideEffects states whether this webhook has side effects. + Acceptable values are: None, NoneOnDryRun (webhooks created via + v1beta1 may also specify Some or Unknown). Webhooks with side + effects MUST implement a reconciliation system, since a request + may be rejected by a future step in the admission chain and the + side effects therefore need to be undone. Requests with the dryRun + attribute will be auto-rejected if they match a webhook with sideEffects + == Unknown or Some.' + type: string + timeoutSeconds: + description: TimeoutSeconds specifies the timeout for this webhook. + After the timeout passes, the webhook call will be ignored or + the API call will fail based on the failure policy. The timeout + value must be between 1 and 30 seconds. Default to 10 seconds. + format: int32 + type: integer + required: + - admissionReviewVersions + - clientConfig + - name + - sideEffects + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + served: true + storage: true + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: + - v1 diff --git a/config/crds/core.kcp.io_shards.yaml b/config/crds/core.kcp.io_shards.yaml index 4f76337d063..0531c6f47c1 100644 --- a/config/crds/core.kcp.io_shards.yaml +++ b/config/crds/core.kcp.io_shards.yaml @@ -18,6 +18,10 @@ spec: scope: Cluster versions: - additionalPrinterColumns: + - description: The region this workspace is in + jsonPath: .metadata.labels['region'] + name: Region + type: string - description: Type URL to directly connect to the shard jsonPath: .spec.baseURL name: URL diff --git a/config/crds/tenancy.kcp.io_workspaces.yaml b/config/crds/tenancy.kcp.io_workspaces.yaml index 58c99968880..a1cb2bc031a 100644 --- a/config/crds/tenancy.kcp.io_workspaces.yaml +++ b/config/crds/tenancy.kcp.io_workspaces.yaml @@ -23,8 +23,12 @@ spec: jsonPath: .spec.type.name name: Type type: string + - description: The region this workspace is in + jsonPath: .metadata.labels['region'] + name: Region + type: string - description: The current phase (e.g. Scheduling, Initializing, Ready, Deleting) - jsonPath: .metadata.labels['tenancy\.kcp\.dev/phase'] + jsonPath: .metadata.labels['tenancy\.kcp\.io/phase'] name: Phase type: string - description: URL to access the workspace @@ -79,7 +83,7 @@ spec: x-kubernetes-validations: - message: cluster is immutable rule: self == oldSelf - shard: + location: description: "location constraints where this workspace can be scheduled to. \n If the no location is specified, an arbitrary location is chosen." diff --git a/config/root-phase0/apiexport-shards.core.kcp.io.yaml b/config/root-phase0/apiexport-shards.core.kcp.io.yaml index 79a1e5c8ec0..ebf1a761a25 100644 --- a/config/root-phase0/apiexport-shards.core.kcp.io.yaml +++ b/config/root-phase0/apiexport-shards.core.kcp.io.yaml @@ -5,5 +5,5 @@ metadata: name: shards.core.kcp.io spec: latestResourceSchemas: - - v221219-c92ed8152.shards.core.kcp.io + - v230116-943e458f6.shards.core.kcp.io status: {} diff --git a/config/root-phase0/apiexport-tenancy.kcp.io.yaml b/config/root-phase0/apiexport-tenancy.kcp.io.yaml index e935fdda083..4ea35a3bcb0 100644 --- a/config/root-phase0/apiexport-tenancy.kcp.io.yaml +++ b/config/root-phase0/apiexport-tenancy.kcp.io.yaml @@ -7,7 +7,7 @@ spec: latestResourceSchemas: - v221219-c92ed8152.clusterworkspaces.tenancy.kcp.io - v230110-89146c99.workspacetypes.tenancy.kcp.io - - v230111-7449f14f.workspaces.tenancy.kcp.io + - v230116-832a4a55d.workspaces.tenancy.kcp.io maximalPermissionPolicy: local: {} status: {} diff --git a/config/root-phase0/apiresourceschema-logicalclusters.core.kcp.io.yaml b/config/root-phase0/apiresourceschema-logicalclusters.core.kcp.io.yaml index b361e999a5f..5a0b3a8520e 100644 --- a/config/root-phase0/apiresourceschema-logicalclusters.core.kcp.io.yaml +++ b/config/root-phase0/apiresourceschema-logicalclusters.core.kcp.io.yaml @@ -2,7 +2,7 @@ apiVersion: apis.kcp.io/v1alpha1 kind: APIResourceSchema metadata: creationTimestamp: null - name: v221219-dab0c5392.logicalclusters.core.kcp.io + name: v230116-943e458f6.logicalclusters.core.kcp.io spec: group: core.kcp.io names: diff --git a/config/root-phase0/apiresourceschema-shards.core.kcp.io.yaml b/config/root-phase0/apiresourceschema-shards.core.kcp.io.yaml index e8c1a433d0f..a8fdc950cd9 100644 --- a/config/root-phase0/apiresourceschema-shards.core.kcp.io.yaml +++ b/config/root-phase0/apiresourceschema-shards.core.kcp.io.yaml @@ -2,7 +2,7 @@ apiVersion: apis.kcp.io/v1alpha1 kind: APIResourceSchema metadata: creationTimestamp: null - name: v221219-c92ed8152.shards.core.kcp.io + name: v230116-943e458f6.shards.core.kcp.io spec: group: core.kcp.io names: @@ -15,6 +15,10 @@ spec: scope: Cluster versions: - additionalPrinterColumns: + - description: The region this workspace is in + jsonPath: .metadata.labels['region'] + name: Region + type: string - description: Type URL to directly connect to the shard jsonPath: .spec.baseURL name: URL diff --git a/config/root-phase0/apiresourceschema-workspaces.tenancy.kcp.io.yaml b/config/root-phase0/apiresourceschema-workspaces.tenancy.kcp.io.yaml index c02e3d87607..1334e25cfe3 100644 --- a/config/root-phase0/apiresourceschema-workspaces.tenancy.kcp.io.yaml +++ b/config/root-phase0/apiresourceschema-workspaces.tenancy.kcp.io.yaml @@ -2,7 +2,7 @@ apiVersion: apis.kcp.io/v1alpha1 kind: APIResourceSchema metadata: creationTimestamp: null - name: v230111-7449f14f.workspaces.tenancy.kcp.io + name: v230116-832a4a55d.workspaces.tenancy.kcp.io spec: group: tenancy.kcp.io names: @@ -21,8 +21,12 @@ spec: jsonPath: .spec.type.name name: Type type: string + - description: The region this workspace is in + jsonPath: .metadata.labels['region'] + name: Region + type: string - description: The current phase (e.g. Scheduling, Initializing, Ready, Deleting) - jsonPath: .metadata.labels['tenancy\.kcp\.dev/phase'] + jsonPath: .metadata.labels['tenancy\.kcp\.io/phase'] name: Phase type: string - description: URL to access the workspace @@ -76,7 +80,7 @@ spec: x-kubernetes-validations: - message: cluster is immutable rule: self == oldSelf - shard: + location: description: "location constraints where this workspace can be scheduled to. \n If the no location is specified, an arbitrary location is chosen." properties: diff --git a/config/rootcompute/clusterworkspace-compute.yaml b/config/rootcompute/clusterworkspace-compute.yaml index f3fd9015f64..3ade4447ad5 100644 --- a/config/rootcompute/clusterworkspace-compute.yaml +++ b/config/rootcompute/clusterworkspace-compute.yaml @@ -9,3 +9,7 @@ spec: type: name: universal path: root + location: + selector: + matchLabels: + name: root diff --git a/pkg/admission/apibinding/apibinding_admission.go b/pkg/admission/apibinding/apibinding_admission.go index 4657e091617..bb738833e2f 100644 --- a/pkg/admission/apibinding/apibinding_admission.go +++ b/pkg/admission/apibinding/apibinding_admission.go @@ -57,7 +57,11 @@ func Register(plugins *admission.Plugins) { createAuthorizer: delegated.NewDelegatedAuthorizer, } p.getAPIExport = func(path logicalcluster.Path, name string) (*apisv1alpha1.APIExport, error) { - return indexers.ByPathAndName[*apisv1alpha1.APIExport](apisv1alpha1.Resource("apiexports"), p.apiExportIndexer, path, name) + export, err := indexers.ByPathAndName[*apisv1alpha1.APIExport](apisv1alpha1.Resource("apiexports"), p.apiExportIndexer, path, name) + if apierrors.IsNotFound(err) { + return indexers.ByPathAndName[*apisv1alpha1.APIExport](apisv1alpha1.Resource("apiexports"), p.cacheAPIExportIndexer, path, name) + } + return export, err } return p, nil @@ -69,7 +73,8 @@ type apiBindingAdmission struct { getAPIExport func(path logicalcluster.Path, name string) (*apisv1alpha1.APIExport, error) - apiExportIndexer cache.Indexer + apiExportIndexer cache.Indexer + cacheAPIExportIndexer cache.Indexer deepSARClient kcpkubernetesclientset.ClusterInterface createAuthorizer delegated.DelegatedAuthorizerFactory @@ -278,6 +283,12 @@ func (o *apiBindingAdmission) ValidateInitialization() error { if o.deepSARClient == nil { return fmt.Errorf(PluginName + " plugin needs a deepSARClient") } + if o.apiExportIndexer == nil { + return fmt.Errorf(PluginName + " plugin needs an APIExport indexer") + } + if o.cacheAPIExportIndexer == nil { + return fmt.Errorf(PluginName + " plugin needs a cache APIExport indexer") + } return nil } @@ -289,8 +300,17 @@ func (o *apiBindingAdmission) SetDeepSARClient(client kcpkubernetesclientset.Clu func (o *apiBindingAdmission) SetKcpInformers(local, global kcpinformers.SharedInformerFactory) { apiExportsReady := local.Apis().V1alpha1().APIExports().Informer().HasSynced + cacheAPIExportsReady := local.Apis().V1alpha1().APIExports().Informer().HasSynced o.SetReadyFunc(func() bool { - return apiExportsReady() + return apiExportsReady() && cacheAPIExportsReady() }) o.apiExportIndexer = local.Apis().V1alpha1().APIExports().Informer().GetIndexer() + o.cacheAPIExportIndexer = global.Apis().V1alpha1().APIExports().Informer().GetIndexer() + + indexers.AddIfNotPresentOrDie(local.Tenancy().V1alpha1().WorkspaceTypes().Informer().GetIndexer(), cache.Indexers{ + indexers.ByLogicalClusterPathAndName: indexers.IndexByLogicalClusterPathAndName, + }) + indexers.AddIfNotPresentOrDie(global.Tenancy().V1alpha1().WorkspaceTypes().Informer().GetIndexer(), cache.Indexers{ + indexers.ByLogicalClusterPathAndName: indexers.IndexByLogicalClusterPathAndName, + }) } diff --git a/pkg/admission/initializers/initializer.go b/pkg/admission/initializers/initializer.go index 3f668333c00..32e006b00b1 100644 --- a/pkg/admission/initializers/initializer.go +++ b/pkg/admission/initializers/initializer.go @@ -17,6 +17,7 @@ limitations under the License. package initializers import ( + kcpkubernetesinformers "github.com/kcp-dev/client-go/informers" kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" "k8s.io/apiserver/pkg/admission" @@ -28,7 +29,7 @@ import ( ) // NewKcpInformersInitializer returns an admission plugin initializer that injects -// kcp shared informer factories into admission plugins. +// both local and global kcp shared informer factories into admission plugins. func NewKcpInformersInitializer( local, global kcpinformers.SharedInformerFactory, ) *kcpInformersInitializer { @@ -38,6 +39,27 @@ func NewKcpInformersInitializer( } } +type kubeInformersInitializer struct { + localKcpInformers, globalKcpInformers kcpkubernetesinformers.SharedInformerFactory +} + +func (i *kubeInformersInitializer) Initialize(plugin admission.Interface) { + if wants, ok := plugin.(WantsKubeInformers); ok { + wants.SetKubeInformers(i.localKcpInformers, i.globalKcpInformers) + } +} + +// NewKubeInformersInitializer returns an admission plugin initializer that injects +// both local and global kube shared informer factories into admission plugins. +func NewKubeInformersInitializer( + local, global kcpkubernetesinformers.SharedInformerFactory, +) *kubeInformersInitializer { + return &kubeInformersInitializer{ + localKcpInformers: local, + globalKcpInformers: global, + } +} + type kcpInformersInitializer struct { localKcpInformers, globalKcpInformers kcpinformers.SharedInformerFactory } diff --git a/pkg/admission/initializers/interfaces.go b/pkg/admission/initializers/interfaces.go index 3cb015d539b..8174177232d 100644 --- a/pkg/admission/initializers/interfaces.go +++ b/pkg/admission/initializers/interfaces.go @@ -17,6 +17,7 @@ limitations under the License. package initializers import ( + kcpkubernetesinformers "github.com/kcp-dev/client-go/informers" kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" kcpclientset "github.com/kcp-dev/kcp/pkg/client/clientset/versioned/cluster" @@ -24,11 +25,17 @@ import ( ) // WantsKcpInformers interface should be implemented by admission plugins -// that want to have a kcp informer factory injected. +// that want to have both local and global kcp informer factories injected. type WantsKcpInformers interface { SetKcpInformers(local, global kcpinformers.SharedInformerFactory) } +// WantsKubeInformers interface should be implemented by admission plugins +// that want to have both local and global kube informer factories injected. +type WantsKubeInformers interface { + SetKubeInformers(local, global kcpkubernetesinformers.SharedInformerFactory) +} + // WantsKubeClusterClient interface should be implemented by admission plugins // that want to have a kube cluster client injected. type WantsKubeClusterClient interface { diff --git a/pkg/admission/mutatingwebhook/plugin.go b/pkg/admission/mutatingwebhook/plugin.go index e7c3acb47f4..aca889b0645 100644 --- a/pkg/admission/mutatingwebhook/plugin.go +++ b/pkg/admission/mutatingwebhook/plugin.go @@ -20,6 +20,7 @@ import ( "context" "io" + kcpkubernetesinformers "github.com/kcp-dev/client-go/informers" "github.com/kcp-dev/logicalcluster/v3" admissionv1 "k8s.io/api/admission/v1" @@ -30,7 +31,6 @@ import ( "k8s.io/apiserver/pkg/admission/plugin/webhook/config" "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating" - "k8s.io/apiserver/pkg/informerfactoryhack" webhookutil "k8s.io/apiserver/pkg/util/webhook" "k8s.io/client-go/informers" @@ -53,6 +53,7 @@ var ( _ = admission.MutationInterface(&Plugin{}) _ = admission.InitializationValidator(&Plugin{}) _ = kcpinitializers.WantsKcpInformers(&Plugin{}) + _ = kcpinitializers.WantsKubeInformers(&Plugin{}) ) func NewMutatingAdmissionWebhook(configfile io.Reader) (*Plugin, error) { @@ -118,10 +119,12 @@ func (p *Plugin) Admit(ctx context.Context, attr admission.Attributes, o admissi // SetExternalKubeInformerFactory implements the WantsExternalKubeInformerFactory interface. func (p *Plugin) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { - clusterAwareFactory := informerfactoryhack.Unwrap(f) + p.Plugin.SetExternalKubeInformerFactory(f) // for namespaces +} + +func (p *Plugin) SetKubeInformers(local, global kcpkubernetesinformers.SharedInformerFactory) { p.WebhookDispatcher.SetHookSource(func(cluster logicalcluster.Name) generic.Source { - informer := clusterAwareFactory.Admissionregistration().V1().MutatingWebhookConfigurations().Cluster(cluster) + informer := global.Admissionregistration().V1().MutatingWebhookConfigurations().Cluster(cluster) return configuration.NewMutatingWebhookConfigurationManagerForInformer(informer) - }, clusterAwareFactory.Admissionregistration().V1().MutatingWebhookConfigurations().Informer().HasSynced) - p.Plugin.SetExternalKubeInformerFactory(f) + }, global.Admissionregistration().V1().MutatingWebhookConfigurations().Informer().HasSynced) } diff --git a/pkg/admission/validatingwebhook/plugin.go b/pkg/admission/validatingwebhook/plugin.go index d70870d7e47..74a7929c64f 100644 --- a/pkg/admission/validatingwebhook/plugin.go +++ b/pkg/admission/validatingwebhook/plugin.go @@ -20,6 +20,7 @@ import ( "context" "io" + kcpkubernetesinformers "github.com/kcp-dev/client-go/informers" "github.com/kcp-dev/logicalcluster/v3" admissionv1 "k8s.io/api/admission/v1" @@ -30,7 +31,6 @@ import ( "k8s.io/apiserver/pkg/admission/plugin/webhook/config" "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" "k8s.io/apiserver/pkg/admission/plugin/webhook/validating" - "k8s.io/apiserver/pkg/informerfactoryhack" webhookutil "k8s.io/apiserver/pkg/util/webhook" kubernetesinformers "k8s.io/client-go/informers" @@ -53,6 +53,7 @@ var ( _ = admission.ValidationInterface(&Plugin{}) _ = admission.InitializationValidator(&Plugin{}) _ = kcpinitializers.WantsKcpInformers(&Plugin{}) + _ = kcpinitializers.WantsKubeInformers(&Plugin{}) ) func NewValidatingAdmissionWebhook(configfile io.Reader) (*Plugin, error) { @@ -118,10 +119,12 @@ func (p *Plugin) Validate(ctx context.Context, attr admission.Attributes, o admi // SetExternalKubeInformerFactory implements the WantsExternalKubeInformerFactory interface. func (p *Plugin) SetExternalKubeInformerFactory(f kubernetesinformers.SharedInformerFactory) { - clusterAwareFactory := informerfactoryhack.Unwrap(f) + p.Plugin.SetExternalKubeInformerFactory(f) // for namespaces +} + +func (p *Plugin) SetKubeInformers(local, global kcpkubernetesinformers.SharedInformerFactory) { p.WebhookDispatcher.SetHookSource(func(cluster logicalcluster.Name) generic.Source { - informer := clusterAwareFactory.Admissionregistration().V1().ValidatingWebhookConfigurations().Cluster(cluster) + informer := global.Admissionregistration().V1().ValidatingWebhookConfigurations().Cluster(cluster) return configuration.NewValidatingWebhookConfigurationManagerForInformer(informer) - }, clusterAwareFactory.Admissionregistration().V1().ValidatingWebhookConfigurations().Informer().HasSynced) - p.Plugin.SetExternalKubeInformerFactory(f) + }, global.Admissionregistration().V1().ValidatingWebhookConfigurations().Informer().HasSynced) } diff --git a/pkg/apis/core/v1alpha1/shard_types.go b/pkg/apis/core/v1alpha1/shard_types.go index e0dc4657a61..445420cce0e 100644 --- a/pkg/apis/core/v1alpha1/shard_types.go +++ b/pkg/apis/core/v1alpha1/shard_types.go @@ -35,6 +35,7 @@ var RootShard = "root" // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:subresource:status // +kubebuilder:resource:scope=Cluster,categories=kcp +// +kubebuilder:printcolumn:name="Region",type=string,JSONPath=`.metadata.labels['region']`,description="The region this workspace is in" // +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.baseURL`,description="Type URL to directly connect to the shard" // +kubebuilder:printcolumn:name="External URL",type=string,JSONPath=`.spec.externalURL`,description="The URL exposed in logical clusters created on that shard" // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" diff --git a/pkg/apis/tenancy/v1alpha1/types_workspace.go b/pkg/apis/tenancy/v1alpha1/types_workspace.go index b9dd0275f23..5d3de80ad34 100644 --- a/pkg/apis/tenancy/v1alpha1/types_workspace.go +++ b/pkg/apis/tenancy/v1alpha1/types_workspace.go @@ -111,7 +111,8 @@ const LogicalClusterTypeAnnotationKey = "internal.tenancy.kcp.io/type" // +kubebuilder:subresource:status // +kubebuilder:resource:scope=Cluster,categories=kcp,shortName=ws // +kubebuilder:printcolumn:name="Type",type=string,JSONPath=`.spec.type.name`,description="Type of the workspace" -// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.metadata.labels['tenancy\.kcp\.dev/phase']`,description="The current phase (e.g. Scheduling, Initializing, Ready, Deleting)" +// +kubebuilder:printcolumn:name="Region",type=string,JSONPath=`.metadata.labels['region']`,description="The region this workspace is in" +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.metadata.labels['tenancy\.kcp\.io/phase']`,description="The current phase (e.g. Scheduling, Initializing, Ready, Deleting)" // +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.URL`,description="URL to access the workspace" // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" type Workspace struct { @@ -150,7 +151,7 @@ type WorkspaceSpec struct { // If the no location is specified, an arbitrary location is chosen. // // +optional - Location *WorkspaceLocation `json:"shard,omitempty"` + Location *WorkspaceLocation `json:"location,omitempty"` // cluster is the name of the logical cluster this workspace is stored under. // diff --git a/pkg/cache/client/options/cache.go b/pkg/cache/client/options/cache.go new file mode 100644 index 00000000000..315af19c5ce --- /dev/null +++ b/pkg/cache/client/options/cache.go @@ -0,0 +1,66 @@ +/* +Copyright 2023 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "fmt" + + "github.com/spf13/pflag" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + cacheclient "github.com/kcp-dev/kcp/pkg/cache/client" + "github.com/kcp-dev/kcp/pkg/cache/client/shard" +) + +type Cache struct { + KubeconfigFile string +} + +func NewCache() *Cache { + return &Cache{} +} + +func (o *Cache) AddFlags(flags *pflag.FlagSet) { + flags.StringVar(&o.KubeconfigFile, "cache-server-kubeconfig-file", o.KubeconfigFile, "The kubeconfig file of the cache server instance that hosts workspaces.") + flags.MarkDeprecated("cache-server-kubeconfig-file", "use --cache-kubeconfig instead") + + flags.StringVar(&o.KubeconfigFile, "cache-kubeconfig", o.KubeconfigFile, + "The kubeconfig file of the cache server instance that hosts workspaces.") +} + +func (o *Cache) Validate() []error { + return nil +} + +func (o *Cache) RestConfig(fallback *rest.Config) (*rest.Config, error) { + cacheClientConfig := fallback + if len(o.KubeconfigFile) > 0 { + var err error + cacheClientConfig, err = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{ExplicitPath: o.KubeconfigFile}, nil).ClientConfig() + if err != nil { + return nil, fmt.Errorf("failed to load the cache kubeconfig from %q: %w", o.KubeconfigFile, err) + } + } + + rt := cacheclient.WithCacheServiceRoundTripper(cacheClientConfig) + rt = cacheclient.WithShardNameFromContextRoundTripper(rt) + rt = cacheclient.WithDefaultShardRoundTripper(rt, shard.Wildcard) + + return rt, nil +} diff --git a/pkg/cache/server/bootstrap/bootstrap.go b/pkg/cache/server/bootstrap/bootstrap.go index 7299c7ab271..096cd4e65e2 100644 --- a/pkg/cache/server/bootstrap/bootstrap.go +++ b/pkg/cache/server/bootstrap/bootstrap.go @@ -47,12 +47,17 @@ func Bootstrap(ctx context.Context, apiExtensionsClusterClient kcpapiextensionsc {"apis.kcp.io", "apiresourceschemas"}, {"apis.kcp.io", "apiconversions"}, {"apis.kcp.io", "apiexports"}, + {"core.kcp.io", "logicalclusters"}, {"core.kcp.io", "shards"}, {"tenancy.kcp.io", "workspacetypes"}, + {"workload.kcp.io", "synctargets"}, + {"scheduling.kcp.io", "locations"}, {"rbac.authorization.k8s.io", "roles"}, {"rbac.authorization.k8s.io", "clusterroles"}, {"rbac.authorization.k8s.io", "rolebindings"}, {"rbac.authorization.k8s.io", "clusterrolebindings"}, + {"admissionregistration.k8s.io", "mutatingwebhookconfigurations"}, + {"admissionregistration.k8s.io", "validatingwebhookconfigurations"}, } { crd := &apiextensionsv1.CustomResourceDefinition{} if err := configcrds.Unmarshal(fmt.Sprintf("%s_%s.yaml", gr.group, gr.resource), crd); err != nil { diff --git a/pkg/openapi/zz_generated.openapi.go b/pkg/openapi/zz_generated.openapi.go index b2cb9704085..d5af894a773 100644 --- a/pkg/openapi/zz_generated.openapi.go +++ b/pkg/openapi/zz_generated.openapi.go @@ -3661,7 +3661,7 @@ func schema_pkg_apis_tenancy_v1alpha1_WorkspaceSpec(ref common.ReferenceCallback Ref: ref("github.com/kcp-dev/kcp/pkg/apis/tenancy/v1alpha1.WorkspaceTypeReference"), }, }, - "shard": { + "location": { SchemaProps: spec.SchemaProps{ Description: "location constraints where this workspace can be scheduled to.\n\nIf the no location is specified, an arbitrary location is chosen.", Ref: ref("github.com/kcp-dev/kcp/pkg/apis/tenancy/v1alpha1.WorkspaceLocation"), diff --git a/pkg/reconciler/apis/replicateclusterrole/replicateclusterrole_controller.go b/pkg/reconciler/apis/replicateclusterrole/replicateclusterrole_controller.go new file mode 100644 index 00000000000..80e88899506 --- /dev/null +++ b/pkg/reconciler/apis/replicateclusterrole/replicateclusterrole_controller.go @@ -0,0 +1,79 @@ +/* +Copyright 2023 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package replicateclusterrole + +import ( + "strings" + + kcprbacinformers "github.com/kcp-dev/client-go/informers/rbac/v1" + kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" + + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/kcp-dev/kcp/pkg/apis/apis" + apisv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/apis/v1alpha1" + "github.com/kcp-dev/kcp/pkg/reconciler/cache/labelclusterroles" +) + +const ( + ControllerName = "kcp-apis-replicate-clusterrole" +) + +// NewController returns a new controller for labelling ClusterRole that should be replicated. +func NewController( + kubeClusterClient kcpkubernetesclientset.ClusterInterface, + clusterRoleInformer kcprbacinformers.ClusterRoleClusterInformer, + clusterRoleBindingInformer kcprbacinformers.ClusterRoleBindingClusterInformer, +) (labelclusterroles.Controller, error) { + return labelclusterroles.NewController( + ControllerName, + apis.GroupName, + HasBindOrContentRule, + HasMaximalPermissionClaimSubject, + kubeClusterClient, + clusterRoleInformer, + clusterRoleBindingInformer, + ) +} + +func HasBindOrContentRule(cr *rbacv1.ClusterRole) bool { + for _, rule := range cr.Rules { + apiGroups := sets.NewString(rule.APIGroups...) + if !apiGroups.Has(apis.GroupName) && !apiGroups.Has("*") { + continue + } + resources := sets.NewString(rule.Resources...) + verbs := sets.NewString(rule.Verbs...) + if (resources.Has("apiexports") || resources.Has("*")) && (verbs.Has("bind") || verbs.Has("*")) { + return true + } + if resources.Has("apiexports/content") || resources.Has("*") { + return true + } + } + return false +} + +func HasMaximalPermissionClaimSubject(crb *rbacv1.ClusterRoleBinding) bool { + for _, s := range crb.Subjects { + if strings.HasPrefix(s.Name, apisv1alpha1.MaximalPermissionPolicyRBACUserGroupPrefix) && (s.Kind == rbacv1.UserKind || s.Kind == rbacv1.GroupKind) && s.APIGroup == rbacv1.GroupName { + return true + } + } + return false +} diff --git a/pkg/reconciler/apis/replicateclusterrolebinding/replicateclusterrolebinding_controller.go b/pkg/reconciler/apis/replicateclusterrolebinding/replicateclusterrolebinding_controller.go new file mode 100644 index 00000000000..65761152be6 --- /dev/null +++ b/pkg/reconciler/apis/replicateclusterrolebinding/replicateclusterrolebinding_controller.go @@ -0,0 +1,47 @@ +/* +Copyright 2023 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package replicateclusterrolebinding + +import ( + kcprbacinformers "github.com/kcp-dev/client-go/informers/rbac/v1" + kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" + + "github.com/kcp-dev/kcp/pkg/apis/apis" + "github.com/kcp-dev/kcp/pkg/reconciler/apis/replicateclusterrole" + "github.com/kcp-dev/kcp/pkg/reconciler/cache/labelclusterrolebindings" +) + +const ( + ControllerName = "kcp-apis-replicate-clusterrolebinding" +) + +// NewController returns a new controller for labelling ClusterRoleBinding that should be replicated. +func NewController( + kubeClusterClient kcpkubernetesclientset.ClusterInterface, + clusterRoleBindingInformer kcprbacinformers.ClusterRoleBindingClusterInformer, + clusterRoleInformer kcprbacinformers.ClusterRoleClusterInformer, +) (labelclusterrolebindings.Controller, error) { + return labelclusterrolebindings.NewController( + ControllerName, + apis.GroupName, + replicateclusterrole.HasBindOrContentRule, + replicateclusterrole.HasMaximalPermissionClaimSubject, + kubeClusterClient, + clusterRoleBindingInformer, + clusterRoleInformer, + ) +} diff --git a/pkg/reconciler/apis/replicatelogicalcluster/replicatelogicalcluster_controller.go b/pkg/reconciler/apis/replicatelogicalcluster/replicatelogicalcluster_controller.go new file mode 100644 index 00000000000..d7ad1198f50 --- /dev/null +++ b/pkg/reconciler/apis/replicatelogicalcluster/replicatelogicalcluster_controller.go @@ -0,0 +1,105 @@ +/* +Copyright 2023 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package replicatelogicalcluster + +import ( + "fmt" + + kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" + "github.com/kcp-dev/logicalcluster/v3" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/cache" + + "github.com/kcp-dev/kcp/pkg/apis/apis" + apisv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/apis/v1alpha1" + corev1alpha1 "github.com/kcp-dev/kcp/pkg/apis/core/v1alpha1" + kcpclientset "github.com/kcp-dev/kcp/pkg/client/clientset/versioned/cluster" + apisv1alpha1informers "github.com/kcp-dev/kcp/pkg/client/informers/externalversions/apis/v1alpha1" + corev1alpha1informers "github.com/kcp-dev/kcp/pkg/client/informers/externalversions/core/v1alpha1" + "github.com/kcp-dev/kcp/pkg/reconciler/cache/labellogicalcluster" + "github.com/kcp-dev/kcp/pkg/reconciler/cache/replication" +) + +const ( + ControllerName = "kcp-apis-replicate-logicalcluster" +) + +// NewController returns a new controller for labelling LogicalClusters that should be replicated. + +func NewController( + kcpClusterClient kcpclientset.ClusterInterface, + logicalClusterInformer corev1alpha1informers.LogicalClusterClusterInformer, + apiExportInformer apisv1alpha1informers.APIExportClusterInformer, +) labellogicalcluster.Controller { + logicalClusterLister := logicalClusterInformer.Lister() + apiExportIndexer := apiExportInformer.Informer().GetIndexer() + + c := labellogicalcluster.NewController( + ControllerName, + apis.GroupName, + func(cluster *corev1alpha1.LogicalCluster) bool { + // If there are any APIExports for this logical cluster, then the LogicalCluster object should be replicated. + keys, err := apiExportIndexer.IndexKeys(kcpcache.ClusterIndexName, kcpcache.ClusterIndexKey(logicalcluster.From(cluster))) + if err != nil { + runtime.HandleError(fmt.Errorf("failed to list APIExports: %v", err)) + return false + } + return len(keys) > 0 + }, + kcpClusterClient, + logicalClusterInformer, + ) + + // enqueue the logical cluster every time the APIExport changes + enqueueAPIExport := func(obj interface{}) { + if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { + obj = tombstone.Obj + } + + export, ok := obj.(*apisv1alpha1.APIExport) + if !ok { + runtime.HandleError(fmt.Errorf("unexpected object type: %T", obj)) + return + } + + cluster, err := logicalClusterLister.Cluster(logicalcluster.From(export)).Get(corev1alpha1.LogicalClusterName) + if err != nil && !apierrors.IsNotFound(err) { + runtime.HandleError(fmt.Errorf("failed to get logical cluster: %v", err)) + return + } else if !apierrors.IsNotFound(err) { + return + } + + c.EnqueueLogicalCluster(cluster, "reason", "APIExport changed", "apiexport", export.Name) + } + + apiExportInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: replication.IsNoSystemClusterName, + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + enqueueAPIExport(obj) + }, + DeleteFunc: func(obj interface{}) { + enqueueAPIExport(obj) + }, + }, + }) + + return c +} diff --git a/pkg/reconciler/apis/replicationclusterrolebinding/replicationclusterrolebinding_controller.go b/pkg/reconciler/cache/labelclusterrolebindings/labelclusterrolebinding_controller.go similarity index 74% rename from pkg/reconciler/apis/replicationclusterrolebinding/replicationclusterrolebinding_controller.go rename to pkg/reconciler/cache/labelclusterrolebindings/labelclusterrolebinding_controller.go index a124c82e6e3..3e1af1c608f 100644 --- a/pkg/reconciler/apis/replicationclusterrolebinding/replicationclusterrolebinding_controller.go +++ b/pkg/reconciler/cache/labelclusterrolebindings/labelclusterrolebinding_controller.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package replicationclusterrolebinding +package labelclusterrolebindings import ( "context" @@ -38,25 +38,36 @@ import ( "github.com/kcp-dev/kcp/pkg/indexers" "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/reconciler/apis/replicationclusterrole" + "github.com/kcp-dev/kcp/pkg/reconciler/cache/labelclusterroles" + "github.com/kcp-dev/kcp/pkg/reconciler/cache/replication" "github.com/kcp-dev/kcp/pkg/reconciler/committer" ) -const ( - ControllerName = "kcp-apis-replication-clusterrolebinding" -) +type Controller interface { + Start(ctx context.Context, numThreads int) +} // NewController returns a new controller for labelling ClusterRoleBinding that should be replicated. func NewController( + controllerName string, + groupName string, + isRelevantClusterRole func(cr *rbacv1.ClusterRole) bool, + isRelevantClusterRoleBinding func(crb *rbacv1.ClusterRoleBinding) bool, kubeClusterClient kcpkubernetesclientset.ClusterInterface, clusterRoleBindingInformer kcprbacinformers.ClusterRoleBindingClusterInformer, clusterRoleInformer kcprbacinformers.ClusterRoleClusterInformer, -) (*controller, error) { - queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName) +) (Controller, error) { + queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerName) c := &controller{ + controllerName: controllerName, + groupName: groupName, + queue: queue, + isRelevantClusterRole: isRelevantClusterRole, + isRelevantClusterRoleBinding: isRelevantClusterRoleBinding, + kubeClusterClient: kubeClusterClient, clusterRoleBindingLister: clusterRoleBindingInformer.Lister(), @@ -69,30 +80,36 @@ func NewController( } indexers.AddIfNotPresentOrDie(clusterRoleBindingInformer.Informer().GetIndexer(), cache.Indexers{ - replicationclusterrole.ClusterRoleBindingByClusterRoleName: replicationclusterrole.IndexClusterRoleBindingByClusterRoleName, + labelclusterroles.ClusterRoleBindingByClusterRoleName: labelclusterroles.IndexClusterRoleBindingByClusterRoleName, }) - clusterRoleBindingInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - c.enqueueClusterRoleBinding(obj) - }, - UpdateFunc: func(_, newObj interface{}) { - c.enqueueClusterRoleBinding(newObj) - }, - DeleteFunc: func(obj interface{}) { - c.enqueueClusterRoleBinding(obj) + clusterRoleBindingInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: replication.IsNoSystemClusterName, + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + c.enqueueClusterRoleBinding(obj) + }, + UpdateFunc: func(_, newObj interface{}) { + c.enqueueClusterRoleBinding(newObj) + }, + DeleteFunc: func(obj interface{}) { + c.enqueueClusterRoleBinding(obj) + }, }, }) - clusterRoleInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - c.enqueueClusterRole(obj) - }, - UpdateFunc: func(_, newObj interface{}) { - c.enqueueClusterRole(newObj) - }, - DeleteFunc: func(obj interface{}) { - c.enqueueClusterRole(obj) + clusterRoleInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: replication.IsNoSystemClusterName, + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + c.enqueueClusterRole(obj) + }, + UpdateFunc: func(_, newObj interface{}) { + c.enqueueClusterRole(newObj) + }, + DeleteFunc: func(obj interface{}) { + c.enqueueClusterRole(obj) + }, }, }) @@ -103,8 +120,14 @@ func NewController( // 1. either a maximum-permission-policy subject is bound // 2. or a ClusterRole. type controller struct { + controllerName string + groupName string + queue workqueue.RateLimitingInterface + isRelevantClusterRole func(cr *rbacv1.ClusterRole) bool + isRelevantClusterRoleBinding func(crb *rbacv1.ClusterRoleBinding) bool + kubeClusterClient kcpkubernetesclientset.ClusterInterface clusterRoleBindingLister kcprbaclisters.ClusterRoleBindingClusterLister @@ -125,7 +148,7 @@ func (c *controller) enqueueClusterRoleBinding(obj interface{}, values ...interf return } - logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), ControllerName), key) + logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), c.controllerName), key) logger.V(4).WithValues(values...).Info("queueing ClusterRoleBinding") c.queue.Add(key) } @@ -143,7 +166,7 @@ func (c *controller) enqueueClusterRole(obj interface{}) { return } - objs, err := c.clusterRoleBindingIndexer.ByIndex(replicationclusterrole.ClusterRoleBindingByClusterRoleName, key) + objs, err := c.clusterRoleBindingIndexer.ByIndex(labelclusterroles.ClusterRoleBindingByClusterRoleName, key) if err != nil { runtime.HandleError(err) return @@ -159,7 +182,7 @@ func (c *controller) Start(ctx context.Context, numThreads int) { defer runtime.HandleCrash() defer c.queue.ShutDown() - logger := logging.WithReconciler(klog.FromContext(ctx), ControllerName) + logger := logging.WithReconciler(klog.FromContext(ctx), c.controllerName) ctx = klog.NewContext(ctx, logger) logger.Info("Starting controller") defer logger.Info("Shutting down controller") @@ -193,7 +216,7 @@ func (c *controller) processNextWorkItem(ctx context.Context) bool { defer c.queue.Done(key) if requeue, err := c.process(ctx, key); err != nil { - runtime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", ControllerName, key, err)) + runtime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", c.controllerName, key, err)) c.queue.AddRateLimited(key) return true } else if requeue { diff --git a/pkg/reconciler/apis/replicationclusterrolebinding/replicationclusterrolebinding_reconcile.go b/pkg/reconciler/cache/labelclusterrolebindings/labelclusterrolebinding_reconcile.go similarity index 53% rename from pkg/reconciler/apis/replicationclusterrolebinding/replicationclusterrolebinding_reconcile.go rename to pkg/reconciler/cache/labelclusterrolebindings/labelclusterrolebinding_reconcile.go index 7b764e20762..9192e2b7b98 100644 --- a/pkg/reconciler/apis/replicationclusterrolebinding/replicationclusterrolebinding_reconcile.go +++ b/pkg/reconciler/cache/labelclusterrolebindings/labelclusterrolebinding_reconcile.go @@ -14,24 +14,25 @@ See the License for the specific language governing permissions and limitations under the License. */ -package replicationclusterrolebinding +package labelclusterrolebindings import ( "context" - "strings" "github.com/kcp-dev/logicalcluster/v3" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/klog/v2" - apisv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/apis/v1alpha1" kcpcorehelper "github.com/kcp-dev/kcp/pkg/apis/core/helper" - "github.com/kcp-dev/kcp/pkg/reconciler/apis/replicationclusterrole" ) func (c *controller) reconcile(ctx context.Context, crb *rbacv1.ClusterRoleBinding) (bool, error) { r := &reconciler{ + groupName: c.groupName, + isRelevantClusterRole: c.isRelevantClusterRole, + isRelevantClusterRoleBinding: c.isRelevantClusterRoleBinding, getClusterRole: func(cluster logicalcluster.Name, name string) (*rbacv1.ClusterRole, error) { return c.clusterRoleLister.Cluster(cluster).Get(name) }, @@ -40,35 +41,49 @@ func (c *controller) reconcile(ctx context.Context, crb *rbacv1.ClusterRoleBindi } type reconciler struct { - getClusterRole func(cluster logicalcluster.Name, name string) (*rbacv1.ClusterRole, error) + groupName string + isRelevantClusterRole func(cr *rbacv1.ClusterRole) bool + isRelevantClusterRoleBinding func(crb *rbacv1.ClusterRoleBinding) bool + getClusterRole func(cluster logicalcluster.Name, name string) (*rbacv1.ClusterRole, error) } func (r *reconciler) reconcile(ctx context.Context, crb *rbacv1.ClusterRoleBinding) (bool, error) { + logger := klog.FromContext(ctx) + // is a maximum-permission-policy subject? - replicate := false - for _, s := range crb.Subjects { - if strings.HasPrefix(s.Name, apisv1alpha1.MaximalPermissionPolicyRBACUserGroupPrefix) && (s.Kind == rbacv1.UserKind || s.Kind == rbacv1.GroupKind) && s.APIGroup == rbacv1.GroupName { - replicate = true - break - } - } + replicate := r.isRelevantClusterRoleBinding(crb) // references relevant ClusterRole? if !replicate && crb.RoleRef.Kind == "ClusterRole" && crb.RoleRef.APIGroup == rbacv1.GroupName { - cr, err := r.getClusterRole(logicalcluster.From(crb), crb.RoleRef.Name) + localCR, err := r.getClusterRole(logicalcluster.From(crb), crb.RoleRef.Name) if err != nil && !errors.IsNotFound(err) { return false, err } - if cr != nil && replicationclusterrole.HasBindOrContentRule(cr) { + if localCR != nil && r.isRelevantClusterRole(localCR) { replicate = true + } else { + // fall back to possible bootstrap ClusterRole + bootstrapCR, err := r.getClusterRole("system:admin", crb.RoleRef.Name) + if err != nil && !errors.IsNotFound(err) { + return false, err + } + if bootstrapCR != nil && r.isRelevantClusterRole(bootstrapCR) { + replicate = true + } } } // calculate patch if replicate { - crb.Annotations, _ = kcpcorehelper.ReplicateFor(crb.Annotations, "apis.kcp.io") + var changed bool + if crb.Annotations, changed = kcpcorehelper.ReplicateFor(crb.Annotations, r.groupName); changed { + logger.V(2).Info("Replicating ClusterRoleBinding") + } } else { - crb.Annotations, _ = kcpcorehelper.DontReplicateFor(crb.Annotations, "apis.kcp.io") + var changed bool + if crb.Annotations, changed = kcpcorehelper.DontReplicateFor(crb.Annotations, r.groupName); changed { + logger.V(2).Info("Not replicating ClusterRoleBinding") + } } return false, nil diff --git a/pkg/reconciler/apis/replicationclusterrole/indexes.go b/pkg/reconciler/cache/labelclusterroles/indexes.go similarity index 97% rename from pkg/reconciler/apis/replicationclusterrole/indexes.go rename to pkg/reconciler/cache/labelclusterroles/indexes.go index 13ebc62d178..36dfa7d7fc7 100644 --- a/pkg/reconciler/apis/replicationclusterrole/indexes.go +++ b/pkg/reconciler/cache/labelclusterroles/indexes.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package replicationclusterrole +package labelclusterroles import ( "fmt" diff --git a/pkg/reconciler/apis/replicationclusterrole/replicationclusterrole_controller.go b/pkg/reconciler/cache/labelclusterroles/labelclusterrole_controller.go similarity index 76% rename from pkg/reconciler/apis/replicationclusterrole/replicationclusterrole_controller.go rename to pkg/reconciler/cache/labelclusterroles/labelclusterrole_controller.go index f071ea97738..a0d1cb58ed1 100644 --- a/pkg/reconciler/apis/replicationclusterrole/replicationclusterrole_controller.go +++ b/pkg/reconciler/cache/labelclusterroles/labelclusterrole_controller.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package replicationclusterrole +package labelclusterroles import ( "context" @@ -39,22 +39,33 @@ import ( "github.com/kcp-dev/kcp/pkg/indexers" "github.com/kcp-dev/kcp/pkg/logging" + "github.com/kcp-dev/kcp/pkg/reconciler/cache/replication" "github.com/kcp-dev/kcp/pkg/reconciler/committer" ) -const ( - ControllerName = "kcp-apiexport-replication-clusterrole" -) +type Controller interface { + Start(ctx context.Context, numThreads int) +} // NewController returns a new controller for labelling ClusterRole that should be replicated. func NewController( + controllerName string, + groupName string, + isRelevantClusterRole func(cr *rbacv1.ClusterRole) bool, + isRelevantClusterRoleBinding func(crb *rbacv1.ClusterRoleBinding) bool, kubeClusterClient kcpkubernetesclientset.ClusterInterface, clusterRoleInformer kcprbacinformers.ClusterRoleClusterInformer, clusterRoleBindingInformer kcprbacinformers.ClusterRoleBindingClusterInformer, -) (*controller, error) { - queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName) +) (Controller, error) { + queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerName) c := &controller{ + controllerName: controllerName, + groupName: groupName, + + isRelevantClusterRole: isRelevantClusterRole, + isRelevantClusterRoleBinding: isRelevantClusterRoleBinding, + queue: queue, kubeClusterClient: kubeClusterClient, @@ -72,27 +83,33 @@ func NewController( ClusterRoleBindingByClusterRoleName: IndexClusterRoleBindingByClusterRoleName, }) - clusterRoleInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - c.enqueueClusterRole(obj) - }, - UpdateFunc: func(_, newObj interface{}) { - c.enqueueClusterRole(newObj) - }, - DeleteFunc: func(obj interface{}) { - c.enqueueClusterRole(obj) + clusterRoleInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: replication.IsNoSystemClusterName, + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + c.enqueueClusterRole(obj) + }, + UpdateFunc: func(_, newObj interface{}) { + c.enqueueClusterRole(newObj) + }, + DeleteFunc: func(obj interface{}) { + c.enqueueClusterRole(obj) + }, }, }) - clusterRoleBindingInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - c.enqueueClusterRoleBinding(obj) - }, - UpdateFunc: func(_, newObj interface{}) { - c.enqueueClusterRoleBinding(newObj) - }, - DeleteFunc: func(obj interface{}) { - c.enqueueClusterRoleBinding(obj) + clusterRoleBindingInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: replication.IsNoSystemClusterName, + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + c.enqueueClusterRoleBinding(obj) + }, + UpdateFunc: func(_, newObj interface{}) { + c.enqueueClusterRoleBinding(newObj) + }, + DeleteFunc: func(obj interface{}) { + c.enqueueClusterRoleBinding(obj) + }, }, }) @@ -102,6 +119,12 @@ func NewController( // controller reconciles ClusterRoles by labelling them to be replicated when pointing to an // ClusterRole content or verb bind. type controller struct { + controllerName string + groupName string + + isRelevantClusterRole func(cr *rbacv1.ClusterRole) bool + isRelevantClusterRoleBinding func(crb *rbacv1.ClusterRoleBinding) bool + queue workqueue.RateLimitingInterface kubeClusterClient kcpkubernetesclientset.ClusterInterface @@ -124,7 +147,7 @@ func (c *controller) enqueueClusterRole(obj interface{}, values ...interface{}) return } - logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), ControllerName), key) + logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), c.controllerName), key) logger.V(4).WithValues(values...).Info("queueing ClusterRole") c.queue.Add(key) } @@ -145,9 +168,11 @@ func (c *controller) enqueueClusterRoleBinding(obj interface{}) { } cr, err := c.clusterRoleLister.Cluster(logicalcluster.From(crb)).Get(crb.RoleRef.Name) - if err != nil { + if err != nil && !errors.IsNotFound(err) { runtime.HandleError(err) return + } else if errors.IsNotFound(err) { + return // dangling ClusterRole reference, nothing to do } c.enqueueClusterRole(cr, "reason", "ClusterRoleBinding", "ClusterRoleBinding.name", crb.Name) @@ -158,7 +183,7 @@ func (c *controller) Start(ctx context.Context, numThreads int) { defer runtime.HandleCrash() defer c.queue.ShutDown() - logger := logging.WithReconciler(klog.FromContext(ctx), ControllerName) + logger := logging.WithReconciler(klog.FromContext(ctx), c.controllerName) ctx = klog.NewContext(ctx, logger) logger.Info("Starting controller") defer logger.Info("Shutting down controller") @@ -192,7 +217,7 @@ func (c *controller) processNextWorkItem(ctx context.Context) bool { defer c.queue.Done(key) if requeue, err := c.process(ctx, key); err != nil { - runtime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", ControllerName, key, err)) + runtime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", c.controllerName, key, err)) c.queue.AddRateLimited(key) return true } else if requeue { diff --git a/pkg/reconciler/apis/replicationclusterrole/replicationclusterrole_reconcile.go b/pkg/reconciler/cache/labelclusterroles/labelclusterrole_reconcile.go similarity index 62% rename from pkg/reconciler/apis/replicationclusterrole/replicationclusterrole_reconcile.go rename to pkg/reconciler/cache/labelclusterroles/labelclusterrole_reconcile.go index 56c04fafd16..786e2c57239 100644 --- a/pkg/reconciler/apis/replicationclusterrole/replicationclusterrole_reconcile.go +++ b/pkg/reconciler/cache/labelclusterroles/labelclusterrole_reconcile.go @@ -14,27 +14,27 @@ See the License for the specific language governing permissions and limitations under the License. */ -package replicationclusterrole +package labelclusterroles import ( "context" - "strings" kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" "github.com/kcp-dev/logicalcluster/v3" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" - "github.com/kcp-dev/kcp/pkg/apis/apis" - apisv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/apis/v1alpha1" kcpcorehelper "github.com/kcp-dev/kcp/pkg/apis/core/helper" "github.com/kcp-dev/kcp/pkg/indexers" ) func (c *controller) reconcile(ctx context.Context, rb *rbacv1.ClusterRole) (bool, error) { r := &reconciler{ + groupName: c.groupName, + isRelevantClusterRole: c.isRelevantClusterRole, + isRelevantClusterRoleBinding: c.isRelevantClusterRoleBinding, getReferencingClusterRoleBindings: func(cluster logicalcluster.Name, name string) ([]*rbacv1.ClusterRoleBinding, error) { key := kcpcache.ToClusterAwareKey(cluster.String(), "", name) return indexers.ByIndex[*rbacv1.ClusterRoleBinding](c.clusterRoleBindingIndexer, ClusterRoleBindingByClusterRoleName, key) @@ -44,11 +44,18 @@ func (c *controller) reconcile(ctx context.Context, rb *rbacv1.ClusterRole) (boo } type reconciler struct { + groupName string + + isRelevantClusterRole func(cr *rbacv1.ClusterRole) bool + isRelevantClusterRoleBinding func(crb *rbacv1.ClusterRoleBinding) bool + getReferencingClusterRoleBindings func(cluster logicalcluster.Name, name string) ([]*rbacv1.ClusterRoleBinding, error) } func (r *reconciler) reconcile(ctx context.Context, cr *rbacv1.ClusterRole) (bool, error) { - replicate := HasBindOrContentRule(cr) + logger := klog.FromContext(ctx) + + replicate := r.isRelevantClusterRole(cr) if !replicate { objs, err := r.getReferencingClusterRoleBindings(logicalcluster.From(cr), cr.Name) if err != nil { @@ -56,7 +63,7 @@ func (r *reconciler) reconcile(ctx context.Context, cr *rbacv1.ClusterRole) (boo return false, nil // nothing we can do } for _, crb := range objs { - if HasMaximalPermissionClaimSubject(crb) { + if r.isRelevantClusterRoleBinding(crb) { replicate = true break } @@ -64,34 +71,16 @@ func (r *reconciler) reconcile(ctx context.Context, cr *rbacv1.ClusterRole) (boo } if replicate { - cr.Annotations, _ = kcpcorehelper.ReplicateFor(cr.Annotations, "apis.kcp.io") - } else { - cr.Annotations, _ = kcpcorehelper.DontReplicateFor(cr.Annotations, "apis.kcp.io") - } - - return false, nil -} - -func HasBindOrContentRule(cr *rbacv1.ClusterRole) bool { - for _, rule := range cr.Rules { - if !sets.NewString(rule.APIGroups...).Has(apis.GroupName) { - continue - } - if sets.NewString(rule.Resources...).Has("apiexports") && sets.NewString(rule.Verbs...).Has("bind") { - return true + var changed bool + if cr.Annotations, changed = kcpcorehelper.ReplicateFor(cr.Annotations, r.groupName); changed { + logger.V(2).Info("Replicating ClusterRole") } - if sets.NewString(rule.Resources...).Has("apiexports/content") { - return true + } else { + var changed bool + if cr.Annotations, changed = kcpcorehelper.DontReplicateFor(cr.Annotations, r.groupName); changed { + logger.V(2).Info("Not replicating ClusterRole") } } - return false -} -func HasMaximalPermissionClaimSubject(crb *rbacv1.ClusterRoleBinding) bool { - for _, s := range crb.Subjects { - if strings.HasPrefix(s.Name, apisv1alpha1.MaximalPermissionPolicyRBACUserGroupPrefix) && (s.Kind == rbacv1.UserKind || s.Kind == rbacv1.GroupKind) && s.APIGroup == rbacv1.GroupName { - return true - } - } - return false + return false, nil } diff --git a/pkg/reconciler/cache/labellogicalcluster/labellogicalcluster_controller.go b/pkg/reconciler/cache/labellogicalcluster/labellogicalcluster_controller.go new file mode 100644 index 00000000000..f23203466bf --- /dev/null +++ b/pkg/reconciler/cache/labellogicalcluster/labellogicalcluster_controller.go @@ -0,0 +1,221 @@ +/* +Copyright 2023 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labellogicalcluster + +import ( + "context" + "fmt" + "time" + + kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" + + "k8s.io/apimachinery/pkg/api/errors" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + + corev1alpha1 "github.com/kcp-dev/kcp/pkg/apis/core/v1alpha1" + kcpclientset "github.com/kcp-dev/kcp/pkg/client/clientset/versioned/cluster" + corev1alpha1client "github.com/kcp-dev/kcp/pkg/client/clientset/versioned/typed/core/v1alpha1" + corev1alpha1informers "github.com/kcp-dev/kcp/pkg/client/informers/externalversions/core/v1alpha1" + corev1alpha1listers "github.com/kcp-dev/kcp/pkg/client/listers/core/v1alpha1" + "github.com/kcp-dev/kcp/pkg/logging" + "github.com/kcp-dev/kcp/pkg/reconciler/cache/replication" + "github.com/kcp-dev/kcp/pkg/reconciler/committer" +) + +type Controller interface { + EnqueueLogicalCluster(cluster *corev1alpha1.LogicalCluster, values ...interface{}) + + Start(ctx context.Context, numThreads int) +} + +type LogicalCluster = corev1alpha1.LogicalCluster +type LogicalClusterSpec = corev1alpha1.LogicalClusterSpec +type LogicalClusterStatus = corev1alpha1.LogicalClusterStatus +type Patcher = corev1alpha1client.LogicalClusterInterface +type Resource = committer.Resource[*LogicalClusterSpec, *LogicalClusterStatus] +type CommitFunc = func(context.Context, *Resource, *Resource) error + +// NewController returns a new controller for labelling LogicalClusters that should be replicated. +func NewController( + controllerName string, + groupName string, + isRelevantLogicalCluster func(cluster *corev1alpha1.LogicalCluster) bool, + kcpClusterClient kcpclientset.ClusterInterface, + logicalClusterInformer corev1alpha1informers.LogicalClusterClusterInformer, +) Controller { + queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerName) + + c := &controller{ + controllerName: controllerName, + groupName: groupName, + + isRelevantLogicalCluster: isRelevantLogicalCluster, + + queue: queue, + + kcpClusterClient: kcpClusterClient, + + logicalClusterLister: logicalClusterInformer.Lister(), + logicalClusterIndexer: logicalClusterInformer.Informer().GetIndexer(), + + commit: committer.NewCommitter[*LogicalCluster, Patcher, *LogicalClusterSpec, *LogicalClusterStatus](kcpClusterClient.CoreV1alpha1().LogicalClusters()), + } + + logicalClusterInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: replication.IsNoSystemClusterName, + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + c.enqueueLogicalCluster(obj) + }, + UpdateFunc: func(_, newObj interface{}) { + c.enqueueLogicalCluster(newObj) + }, + DeleteFunc: func(obj interface{}) { + c.enqueueLogicalCluster(obj) + }, + }, + }) + + return c +} + +// controller reconciles LogicalClusters by labelling them to be replicated when isRelevantLogicalCluster says so. +type controller struct { + controllerName string + groupName string + + isRelevantLogicalCluster func(cluster *corev1alpha1.LogicalCluster) bool + + queue workqueue.RateLimitingInterface + + kcpClusterClient kcpclientset.ClusterInterface + + logicalClusterLister corev1alpha1listers.LogicalClusterClusterLister + logicalClusterIndexer cache.Indexer + + // commit creates a patch and submits it, if needed. + commit CommitFunc +} + +func (c *controller) EnqueueLogicalCluster(cluster *corev1alpha1.LogicalCluster, values ...interface{}) { + c.enqueueLogicalCluster(cluster, values...) +} + +// enqueueLogicalCluster enqueues an ClusterRole. +func (c *controller) enqueueLogicalCluster(obj interface{}, values ...interface{}) { + key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) + if err != nil { + runtime.HandleError(err) + return + } + + logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), c.controllerName), key) + logger.V(4).WithValues(values...).Info("queueing LogicalCluster") + c.queue.Add(key) +} + +// Start starts the controller, which stops when ctx.Done() is closed. +func (c *controller) Start(ctx context.Context, numThreads int) { + defer runtime.HandleCrash() + defer c.queue.ShutDown() + + logger := logging.WithReconciler(klog.FromContext(ctx), c.controllerName) + ctx = klog.NewContext(ctx, logger) + logger.Info("Starting controller") + defer logger.Info("Shutting down controller") + + for i := 0; i < numThreads; i++ { + go wait.UntilWithContext(ctx, c.startWorker, time.Second) + } + + <-ctx.Done() +} + +func (c *controller) startWorker(ctx context.Context) { + for c.processNextWorkItem(ctx) { + } +} + +func (c *controller) processNextWorkItem(ctx context.Context) bool { + // Wait until there is a new item in the working queue + k, quit := c.queue.Get() + if quit { + return false + } + key := k.(string) + + logger := logging.WithQueueKey(klog.FromContext(ctx), key) + ctx = klog.NewContext(ctx, logger) + logger.V(4).Info("processing key") + + // No matter what, tell the queue we're done with this key, to unblock + // other workers. + defer c.queue.Done(key) + + if requeue, err := c.process(ctx, key); err != nil { + runtime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", c.controllerName, key, err)) + c.queue.AddRateLimited(key) + return true + } else if requeue { + // only requeue if we didn't error, but we still want to requeue + c.queue.Add(key) + return true + } + c.queue.Forget(key) + return true +} + +func (c *controller) process(ctx context.Context, key string) (bool, error) { + parent, _, name, err := kcpcache.SplitMetaClusterNamespaceKey(key) + if err != nil { + runtime.HandleError(err) + return false, nil + } + obj, err := c.logicalClusterLister.Cluster(parent).Get(name) + if err != nil { + if errors.IsNotFound(err) { + return false, nil // object deleted before we handled it + } + return false, err + } + + old := obj + obj = obj.DeepCopy() + + logger := logging.WithObject(klog.FromContext(ctx), obj) + ctx = klog.NewContext(ctx, logger) + + var errs []error + requeue, err := c.reconcile(ctx, obj) + if err != nil { + errs = append(errs, err) + } + + // If the object being reconciled changed as a result, update it. + oldResource := &Resource{ObjectMeta: old.ObjectMeta, Spec: &old.Spec, Status: &old.Status} + newResource := &Resource{ObjectMeta: obj.ObjectMeta, Spec: &obj.Spec, Status: &obj.Status} + if err := c.commit(ctx, oldResource, newResource); err != nil { + errs = append(errs, err) + } + + return requeue, utilerrors.NewAggregate(errs) +} diff --git a/pkg/reconciler/cache/labellogicalcluster/labellogicalcluster_reconcile.go b/pkg/reconciler/cache/labellogicalcluster/labellogicalcluster_reconcile.go new file mode 100644 index 00000000000..28aa5ae863d --- /dev/null +++ b/pkg/reconciler/cache/labellogicalcluster/labellogicalcluster_reconcile.go @@ -0,0 +1,58 @@ +/* +Copyright 2023 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labellogicalcluster + +import ( + "context" + + "k8s.io/klog/v2" + + kcpcorehelper "github.com/kcp-dev/kcp/pkg/apis/core/helper" + corev1alpha1 "github.com/kcp-dev/kcp/pkg/apis/core/v1alpha1" +) + +func (c *controller) reconcile(ctx context.Context, cluster *corev1alpha1.LogicalCluster) (bool, error) { + r := &reconciler{ + groupName: c.groupName, + isRelevantLogicalCluster: c.isRelevantLogicalCluster, + } + return r.reconcile(ctx, cluster) +} + +type reconciler struct { + groupName string + + isRelevantLogicalCluster func(cluster *corev1alpha1.LogicalCluster) bool +} + +func (r *reconciler) reconcile(ctx context.Context, cluster *corev1alpha1.LogicalCluster) (bool, error) { + logger := klog.FromContext(ctx) + + if replicate := r.isRelevantLogicalCluster(cluster); replicate { + var changed bool + if cluster.Annotations, changed = kcpcorehelper.ReplicateFor(cluster.Annotations, r.groupName); changed { + logger.V(2).Info("Replicating LogicalCluster") + } + } else { + var changed bool + if cluster.Annotations, changed = kcpcorehelper.DontReplicateFor(cluster.Annotations, r.groupName); changed { + logger.V(2).Info("Not replicating LogicalCluster") + } + } + + return false, nil +} diff --git a/pkg/reconciler/cache/replication/replication_controller.go b/pkg/reconciler/cache/replication/replication_controller.go index a27c8b1e6f8..7284fe9245e 100644 --- a/pkg/reconciler/cache/replication/replication_controller.go +++ b/pkg/reconciler/cache/replication/replication_controller.go @@ -19,12 +19,14 @@ package replication import ( "context" "fmt" + "strings" "time" kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" kcpdynamic "github.com/kcp-dev/client-go/dynamic" kcpkubernetesinformers "github.com/kcp-dev/client-go/informers" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -84,16 +86,44 @@ func NewController( local: localKcpInformers.Apis().V1alpha1().APIConversions().Informer(), global: globalKcpInformers.Apis().V1alpha1().APIConversions().Informer(), }, + admissionregistrationv1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"): { + kind: "MutatingWebhookConfiguration", + local: localKubeInformers.Admissionregistration().V1().MutatingWebhookConfigurations().Informer(), + global: globalKubeInformers.Admissionregistration().V1().MutatingWebhookConfigurations().Informer(), + }, + admissionregistrationv1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"): { + kind: "ValidatingWebhookConfiguration", + local: localKubeInformers.Admissionregistration().V1().ValidatingWebhookConfigurations().Informer(), + global: globalKubeInformers.Admissionregistration().V1().ValidatingWebhookConfigurations().Informer(), + }, corev1alpha1.SchemeGroupVersion.WithResource("shards"): { kind: "Shard", local: localKcpInformers.Core().V1alpha1().Shards().Informer(), global: globalKcpInformers.Core().V1alpha1().Shards().Informer(), }, + corev1alpha1.SchemeGroupVersion.WithResource("logicalclusters"): { + kind: "LogicalCluster", + filter: func(u *unstructured.Unstructured) bool { + return u.GetAnnotations()[core.ReplicateAnnotationKey] != "" + }, + local: localKcpInformers.Core().V1alpha1().LogicalClusters().Informer(), + global: globalKcpInformers.Core().V1alpha1().LogicalClusters().Informer(), + }, tenancyv1alpha1.SchemeGroupVersion.WithResource("workspacetypes"): { kind: "WorkspaceType", local: localKcpInformers.Tenancy().V1alpha1().WorkspaceTypes().Informer(), global: globalKcpInformers.Tenancy().V1alpha1().WorkspaceTypes().Informer(), }, + tenancyv1alpha1.SchemeGroupVersion.WithResource("synctargets"): { + kind: "SyncTarget", + local: localKcpInformers.Workload().V1alpha1().SyncTargets().Informer(), + global: globalKcpInformers.Workload().V1alpha1().SyncTargets().Informer(), + }, + tenancyv1alpha1.SchemeGroupVersion.WithResource("locations"): { + kind: "Location", + local: localKcpInformers.Scheduling().V1alpha1().Locations().Informer(), + global: globalKcpInformers.Scheduling().V1alpha1().Locations().Informer(), + }, rbacv1.SchemeGroupVersion.WithResource("clusterroles"): { kind: "ClusterRole", filter: func(u *unstructured.Unstructured) bool { @@ -124,16 +154,22 @@ func NewController( // shadow gvr to get the right value in the closure gvr := gvr - info.local.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueueObject(obj, gvr) }, - UpdateFunc: func(_, obj interface{}) { c.enqueueObject(obj, gvr) }, - DeleteFunc: func(obj interface{}) { c.enqueueObject(obj, gvr) }, + info.local.AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: IsNoSystemClusterName, + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.enqueueObject(obj, gvr) }, + UpdateFunc: func(_, obj interface{}) { c.enqueueObject(obj, gvr) }, + DeleteFunc: func(obj interface{}) { c.enqueueObject(obj, gvr) }, + }, }) - info.global.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueueCacheObject(obj, gvr) }, - UpdateFunc: func(_, obj interface{}) { c.enqueueCacheObject(obj, gvr) }, - DeleteFunc: func(obj interface{}) { c.enqueueCacheObject(obj, gvr) }, + info.global.AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: IsNoSystemClusterName, // not really needed, but cannot harm + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.enqueueCacheObject(obj, gvr) }, + UpdateFunc: func(_, obj interface{}) { c.enqueueCacheObject(obj, gvr) }, + DeleteFunc: func(obj interface{}) { c.enqueueCacheObject(obj, gvr) }, + }, }) } @@ -202,6 +238,24 @@ func (c *controller) processNextWorkItem(ctx context.Context) bool { return true } +func IsNoSystemClusterName(obj interface{}) bool { + key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) + if err != nil { + runtime.HandleError(err) + return false + } + + clusterName, _, _, err := kcpcache.SplitMetaClusterNamespaceKey(key) + if err != nil { + runtime.HandleError(err) + return false + } + if strings.HasPrefix(clusterName.String(), "system:") { + return false + } + return true +} + type controller struct { shardName string queue workqueue.RateLimitingInterface diff --git a/pkg/reconciler/core/replicateclusterrole/replicateclusterrole_controller.go b/pkg/reconciler/core/replicateclusterrole/replicateclusterrole_controller.go new file mode 100644 index 00000000000..d9923815018 --- /dev/null +++ b/pkg/reconciler/core/replicateclusterrole/replicateclusterrole_controller.go @@ -0,0 +1,60 @@ +/* +Copyright 2023 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package replicateclusterrole + +import ( + kcprbacinformers "github.com/kcp-dev/client-go/informers/rbac/v1" + kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" + + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/kcp-dev/kcp/pkg/apis/core" + "github.com/kcp-dev/kcp/pkg/reconciler/cache/labelclusterroles" +) + +const ( + ControllerName = "kcp-core-replicate-clusterrole" +) + +// NewController returns a new controller for labelling ClusterRole that should be replicated. +func NewController( + kubeClusterClient kcpkubernetesclientset.ClusterInterface, + clusterRoleInformer kcprbacinformers.ClusterRoleClusterInformer, + clusterRoleBindingInformer kcprbacinformers.ClusterRoleBindingClusterInformer, +) (labelclusterroles.Controller, error) { + return labelclusterroles.NewController( + ControllerName, + core.GroupName, + HasAccessRule, + func(crb *rbacv1.ClusterRoleBinding) bool { return false }, + kubeClusterClient, + clusterRoleInformer, + clusterRoleBindingInformer, + ) +} + +func HasAccessRule(cr *rbacv1.ClusterRole) bool { + for _, rule := range cr.Rules { + nonResources := sets.NewString(rule.NonResourceURLs...) + verbs := sets.NewString(rule.Verbs...) + if (nonResources.Has("/") || nonResources.Has("*") || nonResources.Has("/*")) && (verbs.Has("access") || verbs.Has("*")) { + return true + } + } + return false +} diff --git a/pkg/reconciler/core/replicateclusterrolebinding/replicateclusterrolebinding_controller.go b/pkg/reconciler/core/replicateclusterrolebinding/replicateclusterrolebinding_controller.go new file mode 100644 index 00000000000..0b349fc5e5f --- /dev/null +++ b/pkg/reconciler/core/replicateclusterrolebinding/replicateclusterrolebinding_controller.go @@ -0,0 +1,49 @@ +/* +Copyright 2023 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package replicateclusterrolebinding + +import ( + kcprbacinformers "github.com/kcp-dev/client-go/informers/rbac/v1" + kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" + + rbacv1 "k8s.io/api/rbac/v1" + + "github.com/kcp-dev/kcp/pkg/apis/core" + "github.com/kcp-dev/kcp/pkg/reconciler/cache/labelclusterrolebindings" + replicateclusterrole "github.com/kcp-dev/kcp/pkg/reconciler/tenancy/replicateclusterrole" +) + +const ( + ControllerName = "kcp-core-replicate-clusterrolebinding" +) + +// NewController returns a new controller for labelling ClusterRoleBinding that should be replicated. +func NewController( + kubeClusterClient kcpkubernetesclientset.ClusterInterface, + clusterRoleBindingInformer kcprbacinformers.ClusterRoleBindingClusterInformer, + clusterRoleInformer kcprbacinformers.ClusterRoleClusterInformer, +) (labelclusterrolebindings.Controller, error) { + return labelclusterrolebindings.NewController( + ControllerName, + core.GroupName, + replicateclusterrole.HasUseRule, + func(crb *rbacv1.ClusterRoleBinding) bool { return false }, + kubeClusterClient, + clusterRoleBindingInformer, + clusterRoleInformer, + ) +} diff --git a/pkg/reconciler/tenancy/replicateclusterrole/replicateclusterrole_controller.go b/pkg/reconciler/tenancy/replicateclusterrole/replicateclusterrole_controller.go new file mode 100644 index 00000000000..595df70b44f --- /dev/null +++ b/pkg/reconciler/tenancy/replicateclusterrole/replicateclusterrole_controller.go @@ -0,0 +1,64 @@ +/* +Copyright 2023 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package replicateclusterrole + +import ( + kcprbacinformers "github.com/kcp-dev/client-go/informers/rbac/v1" + kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" + + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/kcp-dev/kcp/pkg/apis/tenancy" + "github.com/kcp-dev/kcp/pkg/reconciler/cache/labelclusterroles" +) + +const ( + ControllerName = "kcp-tenancy-replicate-clusterrole" +) + +// NewController returns a new controller for labelling ClusterRole that should be replicated. +func NewController( + kubeClusterClient kcpkubernetesclientset.ClusterInterface, + clusterRoleInformer kcprbacinformers.ClusterRoleClusterInformer, + clusterRoleBindingInformer kcprbacinformers.ClusterRoleBindingClusterInformer, +) (labelclusterroles.Controller, error) { + return labelclusterroles.NewController( + ControllerName, + tenancy.GroupName, + HasUseRule, + func(crb *rbacv1.ClusterRoleBinding) bool { return false }, + kubeClusterClient, + clusterRoleInformer, + clusterRoleBindingInformer, + ) +} + +func HasUseRule(cr *rbacv1.ClusterRole) bool { + for _, rule := range cr.Rules { + apiGroups := sets.NewString(rule.APIGroups...) + if !apiGroups.Has(tenancy.GroupName) && !apiGroups.Has("*") { + continue + } + resources := sets.NewString(rule.Resources...) + verbs := sets.NewString(rule.Verbs...) + if (resources.Has("workspacetypes") || resources.Has("*")) && (verbs.Has("use") || verbs.Has("initialize") || verbs.Has("*")) { + return true + } + } + return false +} diff --git a/pkg/reconciler/tenancy/replicateclusterrolebinding/replicateclusterrolebinding_controller.go b/pkg/reconciler/tenancy/replicateclusterrolebinding/replicateclusterrolebinding_controller.go new file mode 100644 index 00000000000..cd4a538116b --- /dev/null +++ b/pkg/reconciler/tenancy/replicateclusterrolebinding/replicateclusterrolebinding_controller.go @@ -0,0 +1,49 @@ +/* +Copyright 2023 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package replicateclusterrolebinding + +import ( + kcprbacinformers "github.com/kcp-dev/client-go/informers/rbac/v1" + kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" + + rbacv1 "k8s.io/api/rbac/v1" + + "github.com/kcp-dev/kcp/pkg/apis/tenancy" + "github.com/kcp-dev/kcp/pkg/reconciler/cache/labelclusterrolebindings" + replicateclusterrole "github.com/kcp-dev/kcp/pkg/reconciler/tenancy/replicateclusterrole" +) + +const ( + ControllerName = "kcp-tenancy-replicate-clusterrolebinding" +) + +// NewController returns a new controller for labelling ClusterRoleBinding that should be replicated. +func NewController( + kubeClusterClient kcpkubernetesclientset.ClusterInterface, + clusterRoleBindingInformer kcprbacinformers.ClusterRoleBindingClusterInformer, + clusterRoleInformer kcprbacinformers.ClusterRoleClusterInformer, +) (labelclusterrolebindings.Controller, error) { + return labelclusterrolebindings.NewController( + ControllerName, + tenancy.GroupName, + replicateclusterrole.HasUseRule, + func(crb *rbacv1.ClusterRoleBinding) bool { return false }, + kubeClusterClient, + clusterRoleBindingInformer, + clusterRoleInformer, + ) +} diff --git a/pkg/reconciler/tenancy/replicatelogicalcluster/replicatelogicalcluster_controller.go b/pkg/reconciler/tenancy/replicatelogicalcluster/replicatelogicalcluster_controller.go new file mode 100644 index 00000000000..d95d9b7098e --- /dev/null +++ b/pkg/reconciler/tenancy/replicatelogicalcluster/replicatelogicalcluster_controller.go @@ -0,0 +1,105 @@ +/* +Copyright 2023 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package replicatelogicalcluster + +import ( + "fmt" + + kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" + "github.com/kcp-dev/logicalcluster/v3" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/cache" + + corev1alpha1 "github.com/kcp-dev/kcp/pkg/apis/core/v1alpha1" + "github.com/kcp-dev/kcp/pkg/apis/tenancy" + tenancyv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/tenancy/v1alpha1" + kcpclientset "github.com/kcp-dev/kcp/pkg/client/clientset/versioned/cluster" + corev1alpha1informers "github.com/kcp-dev/kcp/pkg/client/informers/externalversions/core/v1alpha1" + tenancyv1alpha1informers "github.com/kcp-dev/kcp/pkg/client/informers/externalversions/tenancy/v1alpha1" + "github.com/kcp-dev/kcp/pkg/reconciler/cache/labellogicalcluster" + "github.com/kcp-dev/kcp/pkg/reconciler/cache/replication" +) + +const ( + ControllerName = "kcp-tenancy-replicate-logicalcluster" +) + +// NewController returns a new controller for labelling LogicalClusters that should be replicated. + +func NewController( + kcpClusterClient kcpclientset.ClusterInterface, + logicalClusterInformer corev1alpha1informers.LogicalClusterClusterInformer, + workspaceTypeInformer tenancyv1alpha1informers.WorkspaceTypeClusterInformer, +) labellogicalcluster.Controller { + logicalClusterLister := logicalClusterInformer.Lister() + workspaceTypeIndexer := workspaceTypeInformer.Informer().GetIndexer() + + c := labellogicalcluster.NewController( + ControllerName, + tenancy.GroupName, + func(cluster *corev1alpha1.LogicalCluster) bool { + // If there are any WorkspaceTypes for this logical cluster, then the LogicalCluster object should be replicated. + keys, err := workspaceTypeIndexer.IndexKeys(kcpcache.ClusterIndexName, kcpcache.ClusterIndexKey(logicalcluster.From(cluster))) + if err != nil { + runtime.HandleError(fmt.Errorf("failed to list WorkspaceTypes: %v", err)) + return false + } + return len(keys) > 0 + }, + kcpClusterClient, + logicalClusterInformer, + ) + + // enqueue the logical cluster every time a Workspace changes + enqueueWorkspaceType := func(obj interface{}) { + if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { + obj = tombstone.Obj + } + + workspaceType, ok := obj.(*tenancyv1alpha1.WorkspaceType) + if !ok { + runtime.HandleError(fmt.Errorf("unexpected object type: %T", obj)) + return + } + + cluster, err := logicalClusterLister.Cluster(logicalcluster.From(workspaceType)).Get(corev1alpha1.LogicalClusterName) + if err != nil && !apierrors.IsNotFound(err) { + runtime.HandleError(fmt.Errorf("failed to get logical cluster: %v", err)) + return + } else if !apierrors.IsNotFound(err) { + return + } + + c.EnqueueLogicalCluster(cluster, "reason", "WorkspaceType changed", "workspacetype", workspaceType.Name) + } + + workspaceTypeInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: replication.IsNoSystemClusterName, + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + enqueueWorkspaceType(obj) + }, + DeleteFunc: func(obj interface{}) { + enqueueWorkspaceType(obj) + }, + }, + }) + + return c +} diff --git a/pkg/reconciler/tenancy/workspace/workspace_reconcile_scheduling.go b/pkg/reconciler/tenancy/workspace/workspace_reconcile_scheduling.go index db6455a45ce..7454e5371d5 100644 --- a/pkg/reconciler/tenancy/workspace/workspace_reconcile_scheduling.go +++ b/pkg/reconciler/tenancy/workspace/workspace_reconcile_scheduling.go @@ -50,9 +50,9 @@ import ( ) const ( - // workspaceShardAnnotationKey keeps track on which shard LogicalCluster must be scheduled. The value + // WorkspaceShardHashAnnotationKey keeps track on which shard LogicalCluster must be scheduled. The value // is a base36(sha224) hash of the Shard name. - workspaceShardAnnotationKey = "internal.tenancy.kcp.io/shard" + WorkspaceShardHashAnnotationKey = "internal.tenancy.kcp.io/shard" // workspaceClusterAnnotationKey keeps track of the logical cluster on the shard. workspaceClusterAnnotationKey = "internal.tenancy.kcp.io/cluster" ) @@ -84,7 +84,7 @@ func (r *schedulingReconciler) reconcile(ctx context.Context, workspace *tenancy conditions.MarkTrue(workspace, tenancyv1alpha1.WorkspaceScheduled) return reconcileStatusContinue, nil case workspace.Spec.URL == "" || workspace.Spec.Cluster == "": - shardNameHash, hasShard := workspace.Annotations[workspaceShardAnnotationKey] + shardNameHash, hasShard := workspace.Annotations[WorkspaceShardHashAnnotationKey] clusterNameString, hasCluster := workspace.Annotations[workspaceClusterAnnotationKey] clusterName := logicalcluster.Name(clusterNameString) hasFinalizer := sets.NewString(workspace.Finalizers...).Has(corev1alpha1.LogicalClusterFinalizer) @@ -97,20 +97,23 @@ func (r *schedulingReconciler) reconcile(ctx context.Context, workspace *tenancy } if !hasShard { - shardName, reason, err := r.chooseShardAndMarkCondition(logger, workspace) // call first with status side-effect, before any annotation aka spec change + shard, reason, err := r.chooseShardAndMarkCondition(logger, workspace) // call first with status side-effect, before any annotation aka spec change if err != nil { return reconcileStatusStopAndRequeue, err } - if len(shardName) == 0 { + if shard == nil { conditions.MarkFalse(workspace, tenancyv1alpha1.WorkspaceScheduled, tenancyv1alpha1.WorkspaceReasonUnschedulable, conditionsv1alpha1.ConditionSeverityError, reason) return reconcileStatusContinue, nil // retry is automatic when new shards show up } - logger.V(1).Info("Chose shard", "shard", shardName) - shardNameHash = ByBase36Sha224NameValue(shardName) + logger.V(1).Info("Chose shard", "shard", shard.Name) + shardNameHash = ByBase36Sha224NameValue(shard.Name) if workspace.Annotations == nil { workspace.Annotations = map[string]string{} } - workspace.Annotations[workspaceShardAnnotationKey] = shardNameHash + workspace.Annotations[WorkspaceShardHashAnnotationKey] = shardNameHash + if region, found := shard.Labels["region"]; found { + workspace.Labels["region"] = region + } } if !hasCluster { cluster := r.generateClusterName(logicalcluster.From(workspace).Path().Join(workspace.Name)) @@ -169,40 +172,21 @@ func (r *schedulingReconciler) reconcile(ctx context.Context, workspace *tenancy return reconcileStatusContinue, nil } -func (r *schedulingReconciler) chooseShardAndMarkCondition(logger klog.Logger, workspace *tenancyv1alpha1.Workspace) (shard string, reason string, err error) { +func (r *schedulingReconciler) chooseShardAndMarkCondition(logger klog.Logger, workspace *tenancyv1alpha1.Workspace) (shard *corev1alpha1.Shard, reason string, err error) { selector := labels.Everything() if workspace.Spec.Location != nil { if workspace.Spec.Location.Selector != nil { var err error selector, err = metav1.LabelSelectorAsSelector(workspace.Spec.Location.Selector) if err != nil { - return "", fmt.Sprintf("spec.location.selector is invalid: %v", err), nil // don't retry, cannot do anything useful + return nil, fmt.Sprintf("spec.location.selector is invalid: %v", err), nil // don't retry, cannot do anything useful } } } shards, err := r.listShards(selector) if err != nil { - return "", "", err - } - - // schedule onto the root shard. This step is temporary until working with multi-shard env works - // until then we need to assign ws to the root shard otherwise all e2e test will break - if len(shards) > 0 && (workspace.Spec.Location == nil || workspace.Spec.Location.Selector == nil) { - // trim the list to contain only the "root" shard so that we always schedule onto it - for _, shard := range shards { - if shard.Name == "root" { - shards = []*corev1alpha1.Shard{shard} - break - } - } - if len(shards) == 0 { - names := make([]string, 0, len(shards)) - for _, shard := range shards { - names = append(names, shard.Name) - } - return "", "", fmt.Errorf("since no specific shard was requested we default to schedule onto the root shard, but the root shard wasn't found, found shards: %v", names) - } + return nil, "", err } validShards := make([]*corev1alpha1.Shard, 0, len(shards)) @@ -228,10 +212,10 @@ func (r *schedulingReconciler) chooseShardAndMarkCondition(logger klog.Logger, w failures = append(failures, fmt.Errorf(" %s: reason %q, message %q", name, x.reason, x.message)) } logger.Error(utilerrors.NewAggregate(failures), "no valid shards found for workspace, skipping") - return "", "No available shards to schedule the workspace", nil // retry is automatic when new shards show up + return nil, "No available shards to schedule the workspace", nil // retry is automatic when new shards show up } targetShard := validShards[rand.Intn(len(validShards))] - return targetShard.Name, "", nil + return targetShard, "", nil } func (r *schedulingReconciler) createLogicalCluster(ctx context.Context, shard *corev1alpha1.Shard, cluster logicalcluster.Path, parent *corev1alpha1.LogicalCluster, workspace *tenancyv1alpha1.Workspace) error { diff --git a/pkg/reconciler/tenancy/workspacetype/clusterworkspacetype_controller.go b/pkg/reconciler/tenancy/workspacetype/workspacetype_controller.go similarity index 98% rename from pkg/reconciler/tenancy/workspacetype/clusterworkspacetype_controller.go rename to pkg/reconciler/tenancy/workspacetype/workspacetype_controller.go index 06a51df16bd..3bf3711a6c9 100644 --- a/pkg/reconciler/tenancy/workspacetype/clusterworkspacetype_controller.go +++ b/pkg/reconciler/tenancy/workspacetype/workspacetype_controller.go @@ -51,7 +51,7 @@ const ( ControllerName = "kcp-workspacetype" ) -// NewController returns a new controller for APIExports. +// NewController returns a new controller for WorkspaceTypes. func NewController( kcpClusterClient kcpclientset.ClusterInterface, workspaceTypeInformer tenancyinformers.WorkspaceTypeClusterInformer, @@ -108,7 +108,7 @@ func NewController( return c, nil } -// controller reconciles APIExports. It ensures an export's identity secret exists and is valid. +// controller reconciles WorkspaceTypes. It ensures a WorkspaceType has assigned a virtual workspace URL address. type controller struct { queue workqueue.RateLimitingInterface diff --git a/pkg/reconciler/tenancy/workspacetype/clusterworkspacetype_controller_reconcile.go b/pkg/reconciler/tenancy/workspacetype/workspacetype_controller_reconcile.go similarity index 91% rename from pkg/reconciler/tenancy/workspacetype/clusterworkspacetype_controller_reconcile.go rename to pkg/reconciler/tenancy/workspacetype/workspacetype_controller_reconcile.go index fc08bf3af69..120978d10e3 100644 --- a/pkg/reconciler/tenancy/workspacetype/clusterworkspacetype_controller_reconcile.go +++ b/pkg/reconciler/tenancy/workspacetype/workspacetype_controller_reconcile.go @@ -61,14 +61,14 @@ func (c *controller) updateVirtualWorkspaceURLs(ctx context.Context, wt *tenancy desiredURLs := sets.NewString() for _, shard := range shards { - if shard.Spec.ExternalURL == "" { + if shard.Spec.VirtualWorkspaceURL == "" { continue } - u, err := url.Parse(shard.Spec.ExternalURL) + u, err := url.Parse(shard.Spec.VirtualWorkspaceURL) if err != nil { // Should never happen - logger.Error(err, "error parsing shard.spec.externalURL", "externalURL", shard.Spec.ExternalURL) + logger.Error(err, "error parsing shard.spec.virtualWorkspaceURL", "virtualWorkspaceURL", shard.Spec.VirtualWorkspaceURL) continue } diff --git a/pkg/reconciler/tenancy/workspacetype/clusterworkspacetype_controller_reconcile_test.go b/pkg/reconciler/tenancy/workspacetype/workspacetype_controller_reconcile_test.go similarity index 93% rename from pkg/reconciler/tenancy/workspacetype/clusterworkspacetype_controller_reconcile_test.go rename to pkg/reconciler/tenancy/workspacetype/workspacetype_controller_reconcile_test.go index 63e55c6298f..6a2495f36e0 100644 --- a/pkg/reconciler/tenancy/workspacetype/clusterworkspacetype_controller_reconcile_test.go +++ b/pkg/reconciler/tenancy/workspacetype/workspacetype_controller_reconcile_test.go @@ -116,9 +116,9 @@ func TestReconcile(t *testing.T) { { name: "URLs from shards propagate fill empty status", shards: []*corev1alpha1.Shard{ - {Spec: corev1alpha1.ShardSpec{ExternalURL: "https://whatever.com"}}, - {Spec: corev1alpha1.ShardSpec{ExternalURL: "https://something.com"}}, - {Spec: corev1alpha1.ShardSpec{ExternalURL: "https://item.com"}}, + {Spec: corev1alpha1.ShardSpec{VirtualWorkspaceURL: "https://whatever.com"}}, + {Spec: corev1alpha1.ShardSpec{VirtualWorkspaceURL: "https://something.com"}}, + {Spec: corev1alpha1.ShardSpec{VirtualWorkspaceURL: "https://item.com"}}, }, wt: &tenancyv1alpha1.WorkspaceType{ ObjectMeta: metav1.ObjectMeta{ @@ -157,9 +157,9 @@ func TestReconcile(t *testing.T) { { name: "URLs from shards propagate to partially filled status", shards: []*corev1alpha1.Shard{ - {Spec: corev1alpha1.ShardSpec{ExternalURL: "https://whatever.com"}}, - {Spec: corev1alpha1.ShardSpec{ExternalURL: "https://something.com"}}, - {Spec: corev1alpha1.ShardSpec{ExternalURL: "https://item.com"}}, + {Spec: corev1alpha1.ShardSpec{VirtualWorkspaceURL: "https://whatever.com"}}, + {Spec: corev1alpha1.ShardSpec{VirtualWorkspaceURL: "https://something.com"}}, + {Spec: corev1alpha1.ShardSpec{VirtualWorkspaceURL: "https://item.com"}}, }, wt: &tenancyv1alpha1.WorkspaceType{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/server/config.go b/pkg/server/config.go index 5d5b6a6cbd6..f2349adfcfe 100644 --- a/pkg/server/config.go +++ b/pkg/server/config.go @@ -54,8 +54,6 @@ import ( apisv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/apis/v1alpha1" "github.com/kcp-dev/kcp/pkg/authorization" bootstrappolicy "github.com/kcp-dev/kcp/pkg/authorization/bootstrap" - cacheclient "github.com/kcp-dev/kcp/pkg/cache/client" - "github.com/kcp-dev/kcp/pkg/cache/client/shard" kcpclientset "github.com/kcp-dev/kcp/pkg/client/clientset/versioned/cluster" kcpinformers "github.com/kcp-dev/kcp/pkg/client/informers/externalversions" "github.com/kcp-dev/kcp/pkg/conversion" @@ -186,24 +184,15 @@ func NewConfig(opts *kcpserveroptions.CompletedOptions) (*Config, error) { return nil, err } - var cacheClientConfig *rest.Config - if len(c.Options.Cache.KubeconfigFile) > 0 { - cacheClientConfig, err = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{ExplicitPath: c.Options.Cache.KubeconfigFile}, nil).ClientConfig() - if err != nil { - return nil, fmt.Errorf("failed to load the kubeconfig from: %s, for a cache client, err: %w", c.Options.Cache.KubeconfigFile, err) - } - } else { - cacheClientConfig = rest.CopyConfig(c.GenericConfig.LoopbackClientConfig) + cacheClientConfig, err := c.Options.Cache.Client.RestConfig(rest.CopyConfig(c.GenericConfig.LoopbackClientConfig)) + if err != nil { + return nil, err } - rt := cacheclient.WithCacheServiceRoundTripper(cacheClientConfig) - rt = cacheclient.WithShardNameFromContextRoundTripper(rt) - rt = cacheclient.WithDefaultShardRoundTripper(rt, shard.Wildcard) - - cacheKcpClusterClient, err := kcpclientset.NewForConfig(rt) + cacheKcpClusterClient, err := kcpclientset.NewForConfig(cacheClientConfig) if err != nil { return nil, err } - cacheKubeClusterClient, err := kcpkubernetesclientset.NewForConfig(rt) + cacheKubeClusterClient, err := kcpkubernetesclientset.NewForConfig(cacheClientConfig) if err != nil { return nil, err } @@ -215,7 +204,7 @@ func NewConfig(opts *kcpserveroptions.CompletedOptions) (*Config, error) { cacheKubeClusterClient, resyncPeriod, ) - c.CacheDynamicClient, err = kcpdynamic.NewForConfig(rt) + c.CacheDynamicClient, err = kcpdynamic.NewForConfig(cacheClientConfig) if err != nil { return nil, err } @@ -448,6 +437,7 @@ func NewConfig(opts *kcpserveroptions.CompletedOptions) (*Config, error) { admissionPluginInitializers := []admission.PluginInitializer{ kcpadmissioninitializers.NewKcpInformersInitializer(c.KcpSharedInformerFactory, c.CacheKcpSharedInformerFactory), + kcpadmissioninitializers.NewKubeInformersInitializer(c.KubeSharedInformerFactory, c.CacheKubeSharedInformerFactory), kcpadmissioninitializers.NewKubeClusterClientInitializer(c.KubeClusterClient), kcpadmissioninitializers.NewKcpClusterClientInitializer(c.KcpClusterClient), kcpadmissioninitializers.NewDeepSARClientInitializer(c.DeepSARClient), diff --git a/pkg/server/controllers.go b/pkg/server/controllers.go index 5c672d13b53..3266a0f8372 100644 --- a/pkg/server/controllers.go +++ b/pkg/server/controllers.go @@ -62,11 +62,14 @@ import ( "github.com/kcp-dev/kcp/pkg/reconciler/apis/extraannotationsync" "github.com/kcp-dev/kcp/pkg/reconciler/apis/identitycache" "github.com/kcp-dev/kcp/pkg/reconciler/apis/permissionclaimlabel" - "github.com/kcp-dev/kcp/pkg/reconciler/apis/replicationclusterrole" - "github.com/kcp-dev/kcp/pkg/reconciler/apis/replicationclusterrolebinding" + apisreplicateclusterrole "github.com/kcp-dev/kcp/pkg/reconciler/apis/replicateclusterrole" + apisreplicateclusterrolebinding "github.com/kcp-dev/kcp/pkg/reconciler/apis/replicateclusterrolebinding" + apisreplicatelogicalcluster "github.com/kcp-dev/kcp/pkg/reconciler/apis/replicatelogicalcluster" "github.com/kcp-dev/kcp/pkg/reconciler/cache/replication" logicalclusterctrl "github.com/kcp-dev/kcp/pkg/reconciler/core/logicalcluster" "github.com/kcp-dev/kcp/pkg/reconciler/core/logicalclusterdeletion" + coresreplicateclusterrole "github.com/kcp-dev/kcp/pkg/reconciler/core/replicateclusterrole" + corereplicateclusterrolebinding "github.com/kcp-dev/kcp/pkg/reconciler/core/replicateclusterrolebinding" "github.com/kcp-dev/kcp/pkg/reconciler/core/shard" "github.com/kcp-dev/kcp/pkg/reconciler/garbagecollector" "github.com/kcp-dev/kcp/pkg/reconciler/kubequota" @@ -75,6 +78,8 @@ import ( "github.com/kcp-dev/kcp/pkg/reconciler/tenancy/bootstrap" "github.com/kcp-dev/kcp/pkg/reconciler/tenancy/initialization" tenancylogicalcluster "github.com/kcp-dev/kcp/pkg/reconciler/tenancy/logicalcluster" + tenancyreplicateclusterrole "github.com/kcp-dev/kcp/pkg/reconciler/tenancy/replicateclusterrole" + tenancyreplicateclusterrolebinding "github.com/kcp-dev/kcp/pkg/reconciler/tenancy/replicateclusterrolebinding" "github.com/kcp-dev/kcp/pkg/reconciler/tenancy/workspace" "github.com/kcp-dev/kcp/pkg/reconciler/tenancy/workspacetype" workloadsapiexport "github.com/kcp-dev/kcp/pkg/reconciler/workload/apiexport" @@ -911,12 +916,10 @@ func (s *Server) installCRDCleanupController(ctx context.Context, config *rest.C func (s *Server) installAPIExportController(ctx context.Context, config *rest.Config, server *genericapiserver.GenericAPIServer) error { config = rest.CopyConfig(config) config = rest.AddUserAgent(config, apiexport.ControllerName) - kcpClusterClient, err := kcpclientset.NewForConfig(config) if err != nil { return err } - kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(config) if err != nil { return err @@ -934,7 +937,35 @@ func (s *Server) installAPIExportController(ctx context.Context, config *rest.Co return err } - clusterRoleReplication, err := replicationclusterrole.NewController( + return server.AddPostStartHook(postStartHookName(apiexport.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { + logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(apiexport.ControllerName)) + // do custom wait logic here because APIExports+APIBindings are special as system CRDs, + // and the controllers must run as soon as these two informers are up in order to bootstrap + // the rest of the system. Everything else in the kcp clientset is APIBinding based. + if err := wait.PollImmediateInfiniteWithContext(goContext(hookContext), time.Millisecond*100, func(ctx context.Context) (bool, error) { + crdsSynced := s.ApiExtensionsSharedInformerFactory.Apiextensions().V1().CustomResourceDefinitions().Informer().HasSynced() + exportsSynced := s.KcpSharedInformerFactory.Apis().V1alpha1().APIExports().Informer().HasSynced() + return crdsSynced && exportsSynced, nil + }); err != nil { + logger.Error(err, "failed to finish post-start-hook") + return nil // don't klog.Fatal. This only happens when context is cancelled. + } + + go c.Start(goContext(hookContext), 2) + + return nil + }) +} + +func (s *Server) installApisReplicateClusterRoleControllers(ctx context.Context, config *rest.Config, server *genericapiserver.GenericAPIServer) error { + config = rest.CopyConfig(config) + config = rest.AddUserAgent(config, apisreplicateclusterrole.ControllerName) + kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(config) + if err != nil { + return err + } + + c, err := apisreplicateclusterrole.NewController( kubeClusterClient, s.KubeSharedInformerFactory.Rbac().V1().ClusterRoles(), s.KubeSharedInformerFactory.Rbac().V1().ClusterRoleBindings(), @@ -943,7 +974,58 @@ func (s *Server) installAPIExportController(ctx context.Context, config *rest.Co return err } - clusterRoleBindingReplication, err := replicationclusterrolebinding.NewController( + return server.AddPostStartHook(postStartHookName(apisreplicateclusterrole.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { + logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(apisreplicateclusterrole.ControllerName)) + if err := s.waitForSync(hookContext.StopCh); err != nil { + logger.Error(err, "failed to finish post-start-hook") + return nil // don't klog.Fatal. This only happens when context is cancelled. + } + + go c.Start(goContext(hookContext), 2) + + return nil + }) +} + +func (s *Server) installCoreReplicateClusterRoleControllers(ctx context.Context, config *rest.Config, server *genericapiserver.GenericAPIServer) error { + config = rest.CopyConfig(config) + config = rest.AddUserAgent(config, coresreplicateclusterrole.ControllerName) + kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(config) + if err != nil { + return err + } + + c, err := coresreplicateclusterrole.NewController( + kubeClusterClient, + s.KubeSharedInformerFactory.Rbac().V1().ClusterRoles(), + s.KubeSharedInformerFactory.Rbac().V1().ClusterRoleBindings(), + ) + if err != nil { + return err + } + + return server.AddPostStartHook(postStartHookName(coresreplicateclusterrole.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { + logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(coresreplicateclusterrole.ControllerName)) + if err := s.waitForSync(hookContext.StopCh); err != nil { + logger.Error(err, "failed to finish post-start-hook") + return nil // don't klog.Fatal. This only happens when context is cancelled. + } + + go c.Start(goContext(hookContext), 2) + + return nil + }) +} + +func (s *Server) installApisReplicateClusterRoleBindingControllers(ctx context.Context, config *rest.Config, server *genericapiserver.GenericAPIServer) error { + config = rest.CopyConfig(config) + config = rest.AddUserAgent(config, apisreplicateclusterrolebinding.ControllerName) + kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(config) + if err != nil { + return err + } + + c, err := apisreplicateclusterrolebinding.NewController( kubeClusterClient, s.KubeSharedInformerFactory.Rbac().V1().ClusterRoleBindings(), s.KubeSharedInformerFactory.Rbac().V1().ClusterRoles(), @@ -952,23 +1034,131 @@ func (s *Server) installAPIExportController(ctx context.Context, config *rest.Co return err } - return server.AddPostStartHook(postStartHookName(apiexport.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { - logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(apiexport.ControllerName)) - // do custom wait logic here because APIExports+APIBindings are special as system CRDs, - // and the controllers must run as soon as these two informers are up in order to bootstrap - // the rest of the system. Everything else in the kcp clientset is APIBinding based. - if err := wait.PollImmediateInfiniteWithContext(goContext(hookContext), time.Millisecond*100, func(ctx context.Context) (bool, error) { - crdsSynced := s.ApiExtensionsSharedInformerFactory.Apiextensions().V1().CustomResourceDefinitions().Informer().HasSynced() - exportsSynced := s.KcpSharedInformerFactory.Apis().V1alpha1().APIExports().Informer().HasSynced() - return crdsSynced && exportsSynced, nil - }); err != nil { + return server.AddPostStartHook(postStartHookName(apisreplicateclusterrolebinding.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { + logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(apisreplicateclusterrolebinding.ControllerName)) + if err := s.waitForSync(hookContext.StopCh); err != nil { + logger.Error(err, "failed to finish post-start-hook") + return nil // don't klog.Fatal. This only happens when context is cancelled. + } + + go c.Start(goContext(hookContext), 2) + + return nil + }) +} + +func (s *Server) installApisReplicateLogicalClusterControllers(ctx context.Context, config *rest.Config, server *genericapiserver.GenericAPIServer) error { + config = rest.CopyConfig(config) + config = rest.AddUserAgent(config, apisreplicatelogicalcluster.ControllerName) + kcpClusterClient, err := kcpclientset.NewForConfig(config) + if err != nil { + return err + } + + c := apisreplicatelogicalcluster.NewController( + kcpClusterClient, + s.KcpSharedInformerFactory.Core().V1alpha1().LogicalClusters(), + s.KcpSharedInformerFactory.Apis().V1alpha1().APIExports(), + ) + + return server.AddPostStartHook(postStartHookName(apisreplicatelogicalcluster.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { + logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(apisreplicatelogicalcluster.ControllerName)) + if err := s.waitForSync(hookContext.StopCh); err != nil { + logger.Error(err, "failed to finish post-start-hook") + return nil // don't klog.Fatal. This only happens when context is cancelled. + } + + go c.Start(goContext(hookContext), 2) + + return nil + }) +} + +func (s *Server) installCoreReplicateClusterRoleBindingControllers(ctx context.Context, config *rest.Config, server *genericapiserver.GenericAPIServer) error { + config = rest.CopyConfig(config) + config = rest.AddUserAgent(config, corereplicateclusterrolebinding.ControllerName) + kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(config) + if err != nil { + return err + } + + c, err := corereplicateclusterrolebinding.NewController( + kubeClusterClient, + s.KubeSharedInformerFactory.Rbac().V1().ClusterRoleBindings(), + s.KubeSharedInformerFactory.Rbac().V1().ClusterRoles(), + ) + if err != nil { + return err + } + + return server.AddPostStartHook(postStartHookName(corereplicateclusterrolebinding.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { + logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(corereplicateclusterrolebinding.ControllerName)) + if err := s.waitForSync(hookContext.StopCh); err != nil { + logger.Error(err, "failed to finish post-start-hook") + return nil // don't klog.Fatal. This only happens when context is cancelled. + } + + go c.Start(goContext(hookContext), 2) + + return nil + }) +} + +func (s *Server) installTenancyReplicateClusterRoleControllers(ctx context.Context, config *rest.Config, server *genericapiserver.GenericAPIServer) error { + config = rest.CopyConfig(config) + config = rest.AddUserAgent(config, tenancyreplicateclusterrole.ControllerName) + kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(config) + if err != nil { + return err + } + + c, err := tenancyreplicateclusterrole.NewController( + kubeClusterClient, + s.KubeSharedInformerFactory.Rbac().V1().ClusterRoles(), + s.KubeSharedInformerFactory.Rbac().V1().ClusterRoleBindings(), + ) + if err != nil { + return err + } + + return server.AddPostStartHook(postStartHookName(tenancyreplicateclusterrole.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { + logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(tenancyreplicateclusterrole.ControllerName)) + if err := s.waitForSync(hookContext.StopCh); err != nil { + logger.Error(err, "failed to finish post-start-hook") + return nil // don't klog.Fatal. This only happens when context is cancelled. + } + + go c.Start(goContext(hookContext), 2) + + return nil + }) +} + +func (s *Server) installTenancyReplicateClusterRoleBindingControllers(ctx context.Context, config *rest.Config, server *genericapiserver.GenericAPIServer) error { + config = rest.CopyConfig(config) + config = rest.AddUserAgent(config, tenancyreplicateclusterrolebinding.ControllerName) + kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(config) + if err != nil { + return err + } + + c, err := tenancyreplicateclusterrolebinding.NewController( + kubeClusterClient, + s.KubeSharedInformerFactory.Rbac().V1().ClusterRoleBindings(), + s.KubeSharedInformerFactory.Rbac().V1().ClusterRoles(), + ) + if err != nil { + return err + } + + return server.AddPostStartHook(postStartHookName(tenancyreplicateclusterrolebinding.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { + logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(tenancyreplicateclusterrolebinding.ControllerName)) + if err := s.waitForSync(hookContext.StopCh); err != nil { logger.Error(err, "failed to finish post-start-hook") return nil // don't klog.Fatal. This only happens when context is cancelled. } go c.Start(goContext(hookContext), 2) - go clusterRoleReplication.Start(goContext(hookContext), 2) - go clusterRoleBindingReplication.Start(goContext(hookContext), 2) return nil }) diff --git a/pkg/server/options/cache.go b/pkg/server/options/cache.go index cc0ddcc8397..bcf19e193bc 100644 --- a/pkg/server/options/cache.go +++ b/pkg/server/options/cache.go @@ -19,6 +19,7 @@ package options import ( "github.com/spf13/pflag" + cacheclientoptions "github.com/kcp-dev/kcp/pkg/cache/client/options" cacheoptions "github.com/kcp-dev/kcp/pkg/cache/server/options" ) @@ -29,9 +30,12 @@ type cacheCompleted struct { func (c cacheCompleted) Validate() []error { var errs []error + if err := c.Server.Validate(); err != nil { errs = append(errs, err...) } + errs = append(errs, c.Extra.Client.Validate()...) + return errs } @@ -44,18 +48,20 @@ type Extra struct { // Enabled if true indicates that the cache server should be run with the kcp-server (in-process) Enabled bool - // KubeconfigFile path to a file that holds a kubeconfig for the cache server - KubeconfigFile string + Client cacheclientoptions.Cache } func NewCache(rootDir string) *Cache { return &Cache{ Server: cacheoptions.NewOptions(rootDir), + Extra: Extra{ + Client: *cacheclientoptions.NewCache(), + }, } } func (c *Cache) AddFlags(fs *pflag.FlagSet) { - fs.StringVar(&c.KubeconfigFile, "cache-server-kubeconfig-file", c.KubeconfigFile, "Kubeconfig for the cache server this instance connects to (defaults to loopback configuration).") + c.Client.AddFlags(fs) // note do not add cache server's flag c.Server.AddFlags(fs) // it will cause an undefined behavior as some flags will be overwritten (also defined by the kcp server) diff --git a/pkg/server/options/flags.go b/pkg/server/options/flags.go index b4c6d1f2af8..52a7fa038c7 100644 --- a/pkg/server/options/flags.go +++ b/pkg/server/options/flags.go @@ -184,7 +184,7 @@ var ( "sync-target-heartbeat-threshold", // Amount of time to wait for a successful heartbeat before marking the cluster as not ready. // KCP Cache Server flags - "cache-server-kubeconfig-file", // Kubeconfig for the cache server this instance connects to (defaults to loopback configuration). + "cache-kubeconfig", // Kubeconfig for the cache server this instance connects to (defaults to loopback configuration). // generic flags "cors-allowed-origins", // List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled. diff --git a/pkg/server/server.go b/pkg/server/server.go index cddbcd037b1..a17b5764551 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -363,9 +363,9 @@ func (s *Server) Run(ctx context.Context) error { if s.Options.Controllers.EnableAll || enabled.Has("cluster") { // bootstrap root compute workspace - computeBoostraphookName := "rootComputeBoostrap" - if err := s.AddPostStartHook(computeBoostraphookName, func(hookContext genericapiserver.PostStartHookContext) error { - logger := logger.WithValues("postStartHook", computeBoostraphookName) + computeBoostrapHookName := "rootComputeBoostrap" + if err := s.AddPostStartHook(computeBoostrapHookName, func(hookContext genericapiserver.PostStartHookContext) error { + logger := logger.WithValues("postStartHook", computeBoostrapHookName) if s.Options.Extra.ShardName == corev1alpha1.RootShard { // the root ws is only present on the root shard logger.Info("waiting to bootstrap root compute workspace until root phase1 is complete") @@ -441,6 +441,47 @@ func (s *Server) Run(ctx context.Context) error { } } + if s.Options.Controllers.EnableAll || enabled.Has("apisreplicateclusterrole") { + if err := s.installApisReplicateClusterRoleControllers(ctx, controllerConfig, delegationChainHead); err != nil { + return err + } + } + + if s.Options.Controllers.EnableAll || enabled.Has("apisreplicateclusterrolebinding") { + if err := s.installApisReplicateClusterRoleBindingControllers(ctx, controllerConfig, delegationChainHead); err != nil { + return err + } + } + + if s.Options.Controllers.EnableAll || enabled.Has("apisreplicatelogicalcluster") { + if err := s.installApisReplicateLogicalClusterControllers(ctx, controllerConfig, delegationChainHead); err != nil { + return err + } + } + + if s.Options.Controllers.EnableAll || enabled.Has("corereplicateclusterrole") { + if err := s.installCoreReplicateClusterRoleControllers(ctx, controllerConfig, delegationChainHead); err != nil { + return err + } + } + + if s.Options.Controllers.EnableAll || enabled.Has("corereplicateclusterrolebinding") { + if err := s.installCoreReplicateClusterRoleBindingControllers(ctx, controllerConfig, delegationChainHead); err != nil { + return err + } + } + + if s.Options.Controllers.EnableAll || enabled.Has("tenancyreplicateclusterrole") { + if err := s.installTenancyReplicateClusterRoleControllers(ctx, controllerConfig, delegationChainHead); err != nil { + return err + } + } + if s.Options.Controllers.EnableAll || enabled.Has("tenancyreplicationclusterrolebinding") { + if err := s.installTenancyReplicateClusterRoleBindingControllers(ctx, controllerConfig, delegationChainHead); err != nil { + return err + } + } + if s.Options.Controllers.EnableAll || enabled.Has("apiexportendpointslice") { if err := s.installAPIExportEndpointSliceController(ctx, controllerConfig, delegationChainHead); err != nil { return err @@ -496,7 +537,7 @@ func (s *Server) Run(ctx context.Context) error { } } - if len(s.Options.Cache.KubeconfigFile) == 0 { + if len(s.Options.Cache.Client.KubeconfigFile) == 0 { if err := s.installCacheServer(ctx); err != nil { return err } diff --git a/pkg/server/virtual.go b/pkg/server/virtual.go index 29f2e92fa92..62efe6b7fe2 100644 --- a/pkg/server/virtual.go +++ b/pkg/server/virtual.go @@ -55,6 +55,7 @@ func (s *Server) installVirtualWorkspaces( virtualcommandoptions.DefaultRootPathPrefix, s.KubeSharedInformerFactory, s.KcpSharedInformerFactory, + s.CacheKcpSharedInformerFactory, ) if err != nil { return err diff --git a/pkg/virtual/apiexport/builder/build.go b/pkg/virtual/apiexport/builder/build.go index 03336ec4f1a..b3356a59be3 100644 --- a/pkg/virtual/apiexport/builder/build.go +++ b/pkg/virtual/apiexport/builder/build.go @@ -56,7 +56,7 @@ func BuildVirtualWorkspace( kubeClusterClient, deepSARClient kcpkubernetesclientset.ClusterInterface, dynamicClusterClient kcpdynamic.ClusterInterface, kcpClusterClient kcpclientset.ClusterInterface, - wildcardKcpInformers kcpinformers.SharedInformerFactory, + cachedKcpInformers kcpinformers.SharedInformerFactory, ) ([]rootapiserver.NamedVirtualWorkspace, error) { if !strings.HasSuffix(rootPathPrefix, "/") { rootPathPrefix += "/" @@ -88,8 +88,8 @@ func BuildVirtualWorkspace( BootstrapAPISetManagement: func(mainConfig genericapiserver.CompletedConfig) (apidefinition.APIDefinitionSetGetter, error) { apiReconciler, err := apireconciler.NewAPIReconciler( kcpClusterClient, - wildcardKcpInformers.Apis().V1alpha1().APIResourceSchemas(), - wildcardKcpInformers.Apis().V1alpha1().APIExports(), + cachedKcpInformers.Apis().V1alpha1().APIResourceSchemas(), + cachedKcpInformers.Apis().V1alpha1().APIExports(), func(apiResourceSchema *apisv1alpha1.APIResourceSchema, version string, identityHash string, optionalLabelRequirements labels.Requirements) (apidefinition.APIDefinition, error) { ctx, cancelFn := context.WithCancel(context.Background()) @@ -133,8 +133,8 @@ func BuildVirtualWorkspace( defer close(readyCh) for name, informer := range map[string]cache.SharedIndexInformer{ - "apiresourceschemas": wildcardKcpInformers.Apis().V1alpha1().APIResourceSchemas().Informer(), - "apiexports": wildcardKcpInformers.Apis().V1alpha1().APIExports().Informer(), + "apiresourceschemas": cachedKcpInformers.Apis().V1alpha1().APIResourceSchemas().Informer(), + "apiexports": cachedKcpInformers.Apis().V1alpha1().APIExports().Informer(), } { if !cache.WaitForNamedCacheSync(name, hookContext.StopCh, informer.HasSynced) { klog.Background().Error(nil, "informer not synced") @@ -150,7 +150,7 @@ func BuildVirtualWorkspace( return apiReconciler, nil }, - Authorizer: newAuthorizer(kubeClusterClient, deepSARClient, wildcardKcpInformers), + Authorizer: newAuthorizer(kubeClusterClient, deepSARClient, cachedKcpInformers), } return []rootapiserver.NamedVirtualWorkspace{ @@ -223,8 +223,8 @@ func digestUrl(urlPath, rootPathPrefix string) ( return cluster, dynamiccontext.APIDomainKey(key), strings.TrimSuffix(urlPath, realPath), true } -func newAuthorizer(kubeClusterClient, deepSARClient kcpkubernetesclientset.ClusterInterface, kcpinformers kcpinformers.SharedInformerFactory) authorizer.Authorizer { - maximalPermissionAuth := virtualapiexportauth.NewMaximalPermissionAuthorizer(deepSARClient, kcpinformers.Apis().V1alpha1().APIExports()) +func newAuthorizer(kubeClusterClient, deepSARClient kcpkubernetesclientset.ClusterInterface, cachedKcpInformers kcpinformers.SharedInformerFactory) authorizer.Authorizer { + maximalPermissionAuth := virtualapiexportauth.NewMaximalPermissionAuthorizer(deepSARClient, cachedKcpInformers.Apis().V1alpha1().APIExports()) maximalPermissionAuth = authorization.NewDecorator("virtual.apiexport.maxpermissionpolicy.authorization.kcp.io", maximalPermissionAuth).AddAuditLogging().AddAnonymization().AddReasonAnnotation() apiExportsContentAuth := virtualapiexportauth.NewAPIExportsContentAuthorizer(maximalPermissionAuth, kubeClusterClient) diff --git a/pkg/virtual/apiexport/options/options.go b/pkg/virtual/apiexport/options/options.go index 266f26a19d2..af0040235e8 100644 --- a/pkg/virtual/apiexport/options/options.go +++ b/pkg/virtual/apiexport/options/options.go @@ -56,7 +56,7 @@ func (o *APIExport) Validate(flagPrefix string) []error { func (o *APIExport) NewVirtualWorkspaces( rootPathPrefix string, config *rest.Config, - wildcardKcpInformers kcpinformers.SharedInformerFactory, + cachedKcpInformers kcpinformers.SharedInformerFactory, ) (workspaces []rootapiserver.NamedVirtualWorkspace, err error) { config = rest.AddUserAgent(rest.CopyConfig(config), "apiexport-virtual-workspace") kcpClusterClient, err := kcpclientset.NewForConfig(config) @@ -76,5 +76,5 @@ func (o *APIExport) NewVirtualWorkspaces( return nil, err } - return builder.BuildVirtualWorkspace(path.Join(rootPathPrefix, builder.VirtualWorkspaceName), kubeClusterClient, deepSARClient, dynamicClusterClient, kcpClusterClient, wildcardKcpInformers) + return builder.BuildVirtualWorkspace(path.Join(rootPathPrefix, builder.VirtualWorkspaceName), kubeClusterClient, deepSARClient, dynamicClusterClient, kcpClusterClient, cachedKcpInformers) } diff --git a/pkg/virtual/options/options.go b/pkg/virtual/options/options.go index 323d2cb9fd9..32b0ebedd52 100644 --- a/pkg/virtual/options/options.go +++ b/pkg/virtual/options/options.go @@ -65,14 +65,14 @@ func (o *Options) NewVirtualWorkspaces( config *rest.Config, rootPathPrefix string, wildcardKubeInformers kcpkubernetesinformers.SharedInformerFactory, - wildcardKcpInformers kcpinformers.SharedInformerFactory, + wildcardKcpInformers, cachedKcpInformers kcpinformers.SharedInformerFactory, ) ([]rootapiserver.NamedVirtualWorkspace, error) { syncer, err := o.Syncer.NewVirtualWorkspaces(rootPathPrefix, config, wildcardKcpInformers) if err != nil { return nil, err } - apiexports, err := o.APIExport.NewVirtualWorkspaces(rootPathPrefix, config, wildcardKcpInformers) + apiexports, err := o.APIExport.NewVirtualWorkspaces(rootPathPrefix, config, cachedKcpInformers) if err != nil { return nil, err } diff --git a/test/e2e/apibinding/apibinding_deletion_test.go b/test/e2e/apibinding/apibinding_deletion_test.go index 896f683277a..74526052eed 100644 --- a/test/e2e/apibinding/apibinding_deletion_test.go +++ b/test/e2e/apibinding/apibinding_deletion_test.go @@ -103,10 +103,7 @@ func TestAPIBindingDeletion(t *testing.T) { framework.Eventually(t, func() (bool, string) { _, err := kcpClusterClient.Cluster(consumerPath).ApisV1alpha1().APIBindings().Create(ctx, apiBinding, metav1.CreateOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" + return err == nil, fmt.Sprintf("Error creating APIBinding: %v", err) }, wait.ForeverTestTimeout, time.Millisecond*100) t.Logf("Should have finalizer added in apibinding") diff --git a/test/e2e/apibinding/apibinding_permissionclaims_test.go b/test/e2e/apibinding/apibinding_permissionclaims_test.go index 34671c235bd..d9407800b49 100644 --- a/test/e2e/apibinding/apibinding_permissionclaims_test.go +++ b/test/e2e/apibinding/apibinding_permissionclaims_test.go @@ -272,10 +272,7 @@ func bindConsumerToProvider(ctx context.Context, t *testing.T, consumerWorkspace framework.Eventually(t, func() (bool, string) { _, err := kcpClusterClients.Cluster(consumerWorkspace).ApisV1alpha1().APIBindings().Create(ctx, apiBinding, metav1.CreateOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" + return err == nil, fmt.Sprintf("Error creating APIBinding: %v", err) }, wait.ForeverTestTimeout, time.Millisecond*100) consumerWorkspaceClient, err := kcpclientset.NewForConfig(cfg) diff --git a/test/e2e/apibinding/apibinding_protected_test.go b/test/e2e/apibinding/apibinding_protected_test.go index 32b54da79dd..7439188393c 100644 --- a/test/e2e/apibinding/apibinding_protected_test.go +++ b/test/e2e/apibinding/apibinding_protected_test.go @@ -18,6 +18,7 @@ package apibinding import ( "context" + "fmt" "testing" "time" @@ -96,10 +97,7 @@ func TestProtectedAPI(t *testing.T) { framework.Eventually(t, func() (bool, string) { _, err = kcpClusterClient.Cluster(consumerPath).ApisV1alpha1().APIBindings().Create(ctx, apiBinding, metav1.CreateOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" + return err == nil, fmt.Sprintf("Error creating APIBinding: %v", err) }, wait.ForeverTestTimeout, time.Millisecond*100, "failed to create APIBinding") t.Logf("Make sure APIBinding %q in workspace %q is completed and up-to-date", apiBinding.Name, consumerPath) diff --git a/test/e2e/apibinding/apibinding_test.go b/test/e2e/apibinding/apibinding_test.go index b3b7d53a407..7ee71b842a8 100644 --- a/test/e2e/apibinding/apibinding_test.go +++ b/test/e2e/apibinding/apibinding_test.go @@ -33,6 +33,7 @@ import ( "github.com/kcp-dev/logicalcluster/v3" "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" @@ -113,10 +114,7 @@ func TestAPIBindingAPIExportReferenceImmutability(t *testing.T) { framework.Eventually(t, func() (bool, string) { _, err = kcpClusterClient.Cluster(consumerPath).ApisV1alpha1().APIBindings().Create(ctx, apiBinding, metav1.CreateOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" + return err == nil, fmt.Sprintf("Error creating APIBinding: %v", err) }, wait.ForeverTestTimeout, time.Millisecond*100) t.Logf("Make sure we can get the APIBinding we just created") @@ -158,7 +156,6 @@ func TestAPIBinding(t *testing.T) { consumer3ClusterName := logicalcluster.Name(consumer3Workspace.Spec.Cluster) cfg := server.BaseConfig(t) - rootShardCfg := server.RootShardSystemMasterBaseConfig(t) kcpClusterClient, err := kcpclientset.NewForConfig(cfg) require.NoError(t, err, "failed to construct kcp cluster client for server") @@ -168,12 +165,9 @@ func TestAPIBinding(t *testing.T) { shardVirtualWorkspaceURLs := sets.NewString() t.Logf("Getting a list of VirtualWorkspaceURLs assigned to Shards") + shards, err := kcpClusterClient.Cluster(core.RootCluster.Path()).CoreV1alpha1().Shards().List(ctx, metav1.ListOptions{}) + require.NoError(t, err) require.Eventually(t, func() bool { - shards, err := kcpClusterClient.Cluster(core.RootCluster.Path()).CoreV1alpha1().Shards().List(ctx, metav1.ListOptions{}) - if err != nil { - t.Logf("unexpected error while listing shards, err %v", err) - return false - } for _, s := range shards.Items { if len(s.Spec.VirtualWorkspaceURL) == 0 { t.Logf("%q shard hasn't had assigned a virtual workspace URL", s.Name) @@ -226,10 +220,7 @@ func TestAPIBinding(t *testing.T) { framework.Eventually(t, func() (bool, string) { _, err := kcpClusterClient.Cluster(consumerWorkspace).ApisV1alpha1().APIBindings().Create(ctx, apiBinding, metav1.CreateOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" + return err == nil, fmt.Sprintf("Error creating APIBinding: %v", err) }, wait.ForeverTestTimeout, time.Millisecond*100) t.Logf("Make sure %s API group does NOT show up in workspace %q group discovery", wildwest.GroupName, providerPath) @@ -304,10 +295,7 @@ func TestAPIBinding(t *testing.T) { framework.Eventually(t, func() (bool, string) { _, err := kcpClusterClient.Cluster(consumerWorkspace).ApisV1alpha1().APIBindings().Create(ctx, apiBinding, metav1.CreateOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" + return err == nil, fmt.Sprintf("Error creating APIBinding: %v", err) }, wait.ForeverTestTimeout, time.Millisecond*100) t.Logf("Make sure %s cowboys2 conflict with already bound %s cowboys", provider2Path, providerPath) @@ -362,10 +350,6 @@ func TestAPIBinding(t *testing.T) { t.Logf("=== Testing identity wildcards") verifyWildcardList := func(consumerWorkspace logicalcluster.Path, expectedItems int) { - t.Logf("Get %s workspace shard and create a shard client that is able to do wildcard requests", consumerWorkspace) - shardDynamicClusterClients, err := kcpdynamic.NewForConfig(rootShardCfg) - require.NoError(t, err) - t.Logf("Get APIBinding for workspace %s", consumerWorkspace.String()) apiBinding, err := kcpClusterClient.Cluster(consumerWorkspace).ApisV1alpha1().APIBindings().Get(ctx, "cowboys", metav1.GetOptions{}) require.NoError(t, err, "error getting apibinding") @@ -373,20 +357,24 @@ func TestAPIBinding(t *testing.T) { identity := apiBinding.Status.BoundResources[0].Schema.IdentityHash gvrWithIdentity := wildwestv1alpha1.SchemeGroupVersion.WithResource("cowboys:" + identity) - t.Logf("Doing a wildcard identity list for %v against %s workspace shard", gvrWithIdentity, consumerWorkspace) - wildcardIdentityClient := shardDynamicClusterClients.Resource(gvrWithIdentity) - list, err := wildcardIdentityClient.List(ctx, metav1.ListOptions{}) - require.NoError(t, err, "error listing wildcard with identity") - - require.Len(t, list.Items, expectedItems, "unexpected # of cowboys") - var names []string - for _, cowboy := range list.Items { - names = append(names, cowboy.GetName()) + for _, shard := range shards.Items { + t.Logf("Doing a wildcard identity list for %v against %s workspace on shard %s", gvrWithIdentity, consumerWorkspace, shard.Name) + shardDynamicClusterClients, err := kcpdynamic.NewForConfig(server.ShardSystemMasterBaseConfig(t, shard.Name)) + require.NoError(t, err) + list, err := shardDynamicClusterClients.Resource(gvrWithIdentity).List(ctx, metav1.ListOptions{}) + if errors.IsNotFound(err) { + continue // this shard doesn't have the resource because there is no binding + } + require.NoError(t, err, "error listing wildcard with identity") + for _, cowboy := range list.Items { + names = append(names, cowboy.GetName()) + } } cowboyName := fmt.Sprintf("cowboy-%s", consumerWorkspace.Base()) require.Contains(t, names, cowboyName, "missing cowboy %q", cowboyName) + require.Len(t, names, expectedItems, "unexpected # of cowboys") } for _, consumerWorkspace := range consumersOfServiceProvider1 { @@ -397,29 +385,43 @@ func TestAPIBinding(t *testing.T) { t.Logf("=== Verify that in %q (bound to %q) wildcard list works", consumer3Path, provider2Path) verifyWildcardList(consumer3Path, 1) + t.Logf("=== Verify that %s|%s export virtual workspace shows cowboys", provider2Path, exportName) rawConfig, err := server.RawConfig() require.NoError(t, err) - - t.Logf("Smoke test %s|today-cowboys virtual workspace with explicit /cluster/%s", provider2Path, consumer3Path) - vw2ClusterClient, err := kcpdynamic.NewForConfig(apiexportVWConfig(t, rawConfig, provider2ClusterName, "today-cowboys")) + export2, err := kcpClusterClient.Cluster(provider2Path).ApisV1alpha1().APIExports().Get(ctx, exportName, metav1.GetOptions{}) require.NoError(t, err) - gvr := wildwestv1alpha1.SchemeGroupVersion.WithResource("cowboys") - list, err := vw2ClusterClient.Cluster(consumer3ClusterName.Path()).Resource(gvr).Namespace("").List(ctx, metav1.ListOptions{}) - require.NoError(t, err, "error listing through virtual workspace with explicit workspace") - require.Equal(t, 1, len(list.Items), "unexpected # of cowboys through virtual workspace with explicit workspace") - - t.Logf("Smoke test %s|today-cowboys virtual workspace with wildcard", provider2Path) - list, err = vw2ClusterClient.Resource(gvr).List(ctx, metav1.ListOptions{}) - require.NoError(t, err, "error listing through virtual workspace wildcard") - require.Equal(t, 1, len(list.Items), "unexpected # of cowboys through virtual workspace with wildcard") + + foundOnShards := 0 + //nolint:staticcheck // SA1019 VirtualWorkspaces is deprecated but not removed yet + for _, vw := range export2.Status.VirtualWorkspaces { + vw2ClusterClient, err := kcpdynamic.NewForConfig(apiexportVWConfig(t, rawConfig, vw.URL)) + require.NoError(t, err) + + t.Logf("Listing %s|%s cowboys via virtual workspace %s/clusters/%s", provider2Path, exportName, vw.URL, consumer3ClusterName) + gvr := wildwestv1alpha1.SchemeGroupVersion.WithResource("cowboys") + list, err := vw2ClusterClient.Cluster(consumer3ClusterName.Path()).Resource(gvr).Namespace("").List(ctx, metav1.ListOptions{}) + if err != nil { + t.Logf("Error: %v", err) + continue + } + require.Equal(t, 1, len(list.Items), "unexpected # of cowboys through virtual workspace with explicit workspace") + foundOnShards++ + + t.Logf("Listing %s|%s cowboys via virtual workspace wildcard list", provider2Path, exportName) + list, err = vw2ClusterClient.Resource(gvr).List(ctx, metav1.ListOptions{}) + require.NoError(t, err, "error listing through virtual workspace wildcard") + require.Equal(t, 1, len(list.Items), "unexpected # of cowboys through virtual workspace with wildcard") + } + //nolint:staticcheck // SA1019 VirtualWorkspaces is deprecated but not removed yet + require.Equal(t, 1, foundOnShards, "cowboys not found exactly on one shard, but on %d/%d", foundOnShards, len(export2.Status.VirtualWorkspaces)) } -func apiexportVWConfig(t *testing.T, kubeconfig clientcmdapi.Config, clusterName logicalcluster.Name, apiexportName string) *rest.Config { +func apiexportVWConfig(t *testing.T, kubeconfig clientcmdapi.Config, url string) *rest.Config { t.Helper() virtualWorkspaceRawConfig := kubeconfig.DeepCopy() virtualWorkspaceRawConfig.Clusters["apiexport"] = kubeconfig.Clusters["base"].DeepCopy() - virtualWorkspaceRawConfig.Clusters["apiexport"].Server = fmt.Sprintf("%s/services/apiexport/%s/%s/", kubeconfig.Clusters["base"].Server, clusterName.String(), apiexportName) + virtualWorkspaceRawConfig.Clusters["apiexport"].Server = url virtualWorkspaceRawConfig.Contexts["apiexport"] = kubeconfig.Contexts["base"].DeepCopy() virtualWorkspaceRawConfig.Contexts["apiexport"].Cluster = "apiexport" diff --git a/test/e2e/apibinding/apibinding_webhook_test.go b/test/e2e/apibinding/apibinding_webhook_test.go index 5ea3158aa5f..3f826457064 100644 --- a/test/e2e/apibinding/apibinding_webhook_test.go +++ b/test/e2e/apibinding/apibinding_webhook_test.go @@ -19,6 +19,7 @@ package apibinding import ( "context" "crypto/tls" + "fmt" gohttp "net/http" "path/filepath" "testing" @@ -111,10 +112,7 @@ func TestAPIBindingMutatingWebhook(t *testing.T) { framework.Eventually(t, func() (bool, string) { _, err := kcpClusterClient.Cluster(targetPath).ApisV1alpha1().APIBindings().Create(ctx, apiBinding, metav1.CreateOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" + return err == nil, fmt.Sprintf("Error creating APIBinding: %v", err) }, wait.ForeverTestTimeout, time.Millisecond*100) scheme := runtime.NewScheme() @@ -261,10 +259,7 @@ func TestAPIBindingValidatingWebhook(t *testing.T) { framework.Eventually(t, func() (bool, string) { _, err := kcpClients.Cluster(targetPath).ApisV1alpha1().APIBindings().Create(ctx, apiBinding, metav1.CreateOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" + return err == nil, fmt.Sprintf("Error creating APIBinding: %v", err) }, wait.ForeverTestTimeout, time.Millisecond*100) scheme := runtime.NewScheme() diff --git a/test/e2e/apibinding/maximalpermissionpolicy_authorizer_test.go b/test/e2e/apibinding/maximalpermissionpolicy_authorizer_test.go index 69663843d3a..280d9e94c1e 100644 --- a/test/e2e/apibinding/maximalpermissionpolicy_authorizer_test.go +++ b/test/e2e/apibinding/maximalpermissionpolicy_authorizer_test.go @@ -197,9 +197,9 @@ func TestMaximalPermissionPolicyAuthorizer(t *testing.T) { } // create API bindings in consumerWorkspace as user-3 with only bind permissions in serviceProviderWorkspace but not general access. - require.Eventuallyf(t, func() bool { + framework.Eventually(t, func() (bool, string) { _, err = user3KcpClient.Cluster(consumerWorkspace).ApisV1alpha1().APIBindings().Create(ctx, apiBinding, metav1.CreateOptions{}) - return err == nil + return err == nil, fmt.Sprintf("Error creating APIBinding: %v", err) }, wait.ForeverTestTimeout, time.Millisecond*100, "expected user-3 to bind cowboys in %q", consumerWorkspace) consumerWorkspaceClient, err := kcpclientset.NewForConfig(cfg) diff --git a/test/e2e/authorizer/authorizer_test.go b/test/e2e/authorizer/authorizer_test.go index 88fbaf2cce3..b5d830d1dfe 100644 --- a/test/e2e/authorizer/authorizer_test.go +++ b/test/e2e/authorizer/authorizer_test.go @@ -108,7 +108,6 @@ func TestAuthorizer(t *testing.T) { tests := map[string]func(t *testing.T){ "as org member, workspace admin user-1 can access everything": func(t *testing.T) { - t.Helper() _, err := user1KubeClusterClient.Cluster(org1.Join("workspace1")).CoreV1().ConfigMaps("default").List(ctx, metav1.ListOptions{}) require.NoError(t, err) _, err = user1KubeClusterClient.Cluster(org1.Join("workspace1")).CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test"}}, metav1.CreateOptions{}) @@ -117,14 +116,12 @@ func TestAuthorizer(t *testing.T) { require.NoError(t, err) }, "with org access, workspace1 non-admin user-2 can access according to local policy": func(t *testing.T) { - t.Helper() _, err := user2KubeClusterClient.Cluster(org1.Join("workspace1")).CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test"}}, metav1.CreateOptions{}) require.Errorf(t, err, "user-2 should not be able to create namespace in %s", org1.Join("workspace1")) _, err = user2KubeClusterClient.Cluster(org1.Join("workspace1")).CoreV1().Secrets("default").List(ctx, metav1.ListOptions{}) require.NoErrorf(t, err, "user-2 should be able to list secrets in %s as defined in the local policy", org1.Join("workspace1")) }, "with org access, workspace1 non-admin user-2 can access /healthz, /livez, /readyz etc": func(t *testing.T) { - t.Helper() cl := user2KubeClusterClient.RESTClient() requestPath := org1.RequestPath() { @@ -146,26 +143,22 @@ func TestAuthorizer(t *testing.T) { } }, "without org access, org1 workspace1 admin user-1 cannot access org2, not even discovery": func(t *testing.T) { - t.Helper() _, err := user1KubeClusterClient.Cluster(org2.Join("workspace1")).CoreV1().ConfigMaps("default").List(ctx, metav1.ListOptions{}) require.Errorf(t, err, "user-1 should not be able to list configmaps in a different org (%s)", org2.Join("workspace1")) _, err = user1KubeDiscoveryClient.Cluster(org2.Join("workspace1")).ServerResourcesForGroupVersion("rbac.authorization.k8s.io/v1") // can't be core because that always returns nil require.Errorf(t, err, "user-1 should not be able to list server resources in a different org (%s)", org2.Join("workspace1")) }, "as org member, workspace1 admin user-1 cannot access workspace2, not even discovery": func(t *testing.T) { - t.Helper() _, err := user1KubeClusterClient.Cluster(org1.Join("workspace2")).CoreV1().ConfigMaps("default").List(ctx, metav1.ListOptions{}) require.Errorf(t, err, "user-1 should not be able to list configmaps in a different workspace (%s)", org1.Join("workspace2")) _, err = user1KubeDiscoveryClient.Cluster(org2.Join("workspace1")).ServerResourcesForGroupVersion("rbac.authorization.k8s.io/v1") // can't be core because that always returns nil require.Errorf(t, err, "user-1 should not be able to list server resources in a different workspace (%s)", org1.Join("workspace2")) }, "with org access, workspace2 admin user-2 can access workspace2": func(t *testing.T) { - t.Helper() _, err := user2KubeClusterClient.Cluster(org1.Join("workspace2")).CoreV1().ConfigMaps("default").List(ctx, metav1.ListOptions{}) require.NoError(t, err, "user-2 should be able to list configmaps in workspace2 (%s)", org1.Join("workspace2")) }, "cluster admins can use wildcard clusters, non-cluster admin cannot": func(t *testing.T) { - t.Helper() // create client talking directly to root shard to test wildcard requests rootKubeClusterClient, err := kcpkubernetesclientset.NewForConfig(rootShardCfg) require.NoError(t, err) @@ -178,7 +171,6 @@ func TestAuthorizer(t *testing.T) { require.Error(t, err, "Only cluster admins can use all clusters at once") }, "with system:admin permissions, workspace2 non-admin user-3 can list Namespaces with a bootstrap ClusterRole": func(t *testing.T) { - t.Helper() // get workspace2 shard and create a client to tweak the local bootstrap policy shardKubeClusterClient, err := kcpkubernetesclientset.NewForConfig(rootShardCfg) require.NoError(t, err) @@ -233,7 +225,6 @@ func TestAuthorizer(t *testing.T) { }, wait.ForeverTestTimeout, time.Millisecond*100, "User-3 should now be able to list Namespaces in %s", org1.Join("workspace2")) }, "without org access, a deep SAR with user-1 against org2 succeeds even without org access for user-1": func(t *testing.T) { - t.Helper() t.Logf("try to list ConfigMap as user-1 in %q without access, should fail", org2.Join("workspace1")) _, err := user1KubeClusterClient.Cluster(org2.Join("workspace1")).CoreV1().ConfigMaps("default").List(ctx, metav1.ListOptions{}) require.Errorf(t, err, "user-1 should not be able to list configmaps in %q", org2.Join("workspace1")) diff --git a/test/e2e/framework/config.go b/test/e2e/framework/config.go index 4e02168f961..39e3d8605ed 100644 --- a/test/e2e/framework/config.go +++ b/test/e2e/framework/config.go @@ -17,7 +17,6 @@ limitations under the License. package framework import ( - "context" "errors" "flag" "fmt" @@ -28,14 +27,12 @@ import ( "github.com/kcp-dev/logicalcluster/v3" "github.com/stretchr/testify/require" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + cliflag "k8s.io/component-base/cli/flag" "k8s.io/klog/v2" - "github.com/kcp-dev/kcp/pkg/apis/core" - kcpclientset "github.com/kcp-dev/kcp/pkg/client/clientset/versioned/cluster" + corev1alpha1 "github.com/kcp-dev/kcp/pkg/apis/core/v1alpha1" ) func init() { @@ -46,12 +43,13 @@ func init() { } type testConfig struct { - syncerImage string - kcpTestImage string - pclusterKubeconfig string - kcpKubeconfig, rootShardKubeconfig string - useDefaultKCPServer bool - suites string + syncerImage string + kcpTestImage string + pclusterKubeconfig string + kcpKubeconfig string + shardKubeconfigs map[string]string + useDefaultKCPServer bool + suites string } var TestConfig *testConfig @@ -80,11 +78,12 @@ func (c *testConfig) KCPKubeconfig() string { return c.kcpKubeconfig } -func (c *testConfig) RootShardKubeconfig() string { - if c.rootShardKubeconfig == "" { - return c.KCPKubeconfig() +func (c *testConfig) ShardKubeconfig() map[string]string { + if len(c.shardKubeconfigs) == 0 { + return map[string]string{corev1alpha1.RootShard: c.KCPKubeconfig()} } - return c.rootShardKubeconfig + + return c.shardKubeconfigs } func (c *testConfig) Suites() []string { @@ -99,7 +98,7 @@ func init() { func registerFlags(c *testConfig) { flag.StringVar(&c.kcpKubeconfig, "kcp-kubeconfig", "", "Path to the kubeconfig for a kcp server.") - flag.StringVar(&c.rootShardKubeconfig, "root-shard-kubeconfig", "", "Path to the kubeconfig for a kcp shard server. If unset, kcp-kubeconfig is used.") + flag.Var(cliflag.NewMapStringString(&c.shardKubeconfigs), "shard-kubeconfigs", "Paths to the kubeconfigs for a kcp shard server in the format =. If unset, kcp-kubeconfig is used.") flag.StringVar(&c.pclusterKubeconfig, "pcluster-kubeconfig", "", "Path to the kubeconfig for a kubernetes cluster to sync to. Requires --syncer-image.") flag.StringVar(&c.syncerImage, "syncer-image", "", "The syncer image to use with the pcluster. Requires --pcluster-kubeconfig") flag.StringVar(&c.kcpTestImage, "kcp-test-image", "", "The test image to use with the pcluster. Requires --pcluster-kubeconfig") @@ -123,19 +122,3 @@ func WriteLogicalClusterConfig(t *testing.T, rawConfig clientcmdapi.Config, cont logicalConfig := clientcmd.NewNonInteractiveClientConfig(logicalRawConfig, logicalRawConfig.CurrentContext, &clientcmd.ConfigOverrides{}, nil) return logicalConfig, kubeconfigPath } - -// ShardConfig returns a rest config that talk directly to the given shard. -func ShardConfig(t *testing.T, kcpClusterClient kcpclientset.ClusterInterface, shardName string, cfg *rest.Config) *rest.Config { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - shard, err := kcpClusterClient.Cluster(core.RootCluster.Path()).CoreV1alpha1().Shards().Get(ctx, shardName, metav1.GetOptions{}) - require.NoError(t, err) - - shardCfg := rest.CopyConfig(cfg) - shardCfg.Host = shard.Spec.BaseURL - - return shardCfg -} diff --git a/test/e2e/framework/kcp.go b/test/e2e/framework/kcp.go index 152f9a8028c..45e4620db2c 100644 --- a/test/e2e/framework/kcp.go +++ b/test/e2e/framework/kcp.go @@ -53,6 +53,7 @@ import ( "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + corev1alpha1 "github.com/kcp-dev/kcp/pkg/apis/core/v1alpha1" "github.com/kcp-dev/kcp/pkg/embeddedetcd" "github.com/kcp-dev/kcp/pkg/server" "github.com/kcp-dev/kcp/pkg/server/options" @@ -140,7 +141,7 @@ func SharedKcpServer(t *testing.T) RunningServer { // Use a persistent server t.Logf("shared kcp server will target configuration %q", kubeconfig) - server, err := newPersistentKCPServer(serverName, kubeconfig, TestConfig.RootShardKubeconfig()) + server, err := newPersistentKCPServer(serverName, kubeconfig, TestConfig.ShardKubeconfig()) require.NoError(t, err, "failed to create persistent server fixture") return server } @@ -244,6 +245,7 @@ type RunningServer interface { RawConfig() (clientcmdapi.Config, error) BaseConfig(t *testing.T) *rest.Config RootShardSystemMasterBaseConfig(t *testing.T) *rest.Config + ShardSystemMasterBaseConfig(t *testing.T, shard string) *rest.Config Artifact(t *testing.T, producer func() (runtime.Object, error)) } @@ -642,6 +644,16 @@ func (c *kcpServer) RootShardSystemMasterBaseConfig(t *testing.T) *rest.Config { return rest.AddUserAgent(cfg, t.Name()) } +// ShardSystemMasterBaseConfig returns a rest.Config for the "system:admin" context of a given shard. Client-side throttling is disabled (QPS=-1). +func (c *kcpServer) ShardSystemMasterBaseConfig(t *testing.T, shard string) *rest.Config { + t.Helper() + + if shard != corev1alpha1.RootShard { + t.Fatalf("only root shard is supported for now") + } + return c.RootShardSystemMasterBaseConfig(t) +} + // RawConfig exposes a copy of the client config for this server. func (c *kcpServer) RawConfig() (clientcmdapi.Config, error) { c.lock.Lock() @@ -813,10 +825,11 @@ func LoadKubeConfig(kubeconfigPath, contextName string) (clientcmd.ClientConfig, } type unmanagedKCPServer struct { - name string - kubeconfigPath string - rootShardKubeconfigPath string - cfg, rootShardCfg clientcmd.ClientConfig + name string + kubeconfigPath string + shardKubeconfigPaths map[string]string + cfg clientcmd.ClientConfig + shardCfgs map[string]clientcmd.ClientConfig } // newPersistentKCPServer returns a RunningServer for a kubeconfig @@ -824,23 +837,28 @@ type unmanagedKCPServer struct { // kubeconfig is expected to exist prior to running tests against it, // the configuration can be loaded synchronously and no locking is // required to subsequently access it. -func newPersistentKCPServer(name, kubeconfigPath, rootShardKubeconfigPath string) (RunningServer, error) { +func newPersistentKCPServer(name, kubeconfigPath string, shardKubeconfigPaths map[string]string) (RunningServer, error) { cfg, err := LoadKubeConfig(kubeconfigPath, "base") if err != nil { return nil, err } - rootShardCfg, err := LoadKubeConfig(rootShardKubeconfigPath, "base") - if err != nil { - return nil, err + shardCfgs := map[string]clientcmd.ClientConfig{} + for shard, path := range shardKubeconfigPaths { + shardCfg, err := LoadKubeConfig(path, "base") + if err != nil { + return nil, err + } + + shardCfgs[shard] = shardCfg } return &unmanagedKCPServer{ - name: name, - kubeconfigPath: kubeconfigPath, - rootShardKubeconfigPath: rootShardKubeconfigPath, - cfg: cfg, - rootShardCfg: rootShardCfg, + name: name, + kubeconfigPath: kubeconfigPath, + shardKubeconfigPaths: shardKubeconfigPaths, + cfg: cfg, + shardCfgs: shardCfgs, }, nil } @@ -933,7 +951,19 @@ func (s *unmanagedKCPServer) BaseConfig(t *testing.T) *rest.Config { func (s *unmanagedKCPServer) RootShardSystemMasterBaseConfig(t *testing.T) *rest.Config { t.Helper() - raw, err := s.rootShardCfg.RawConfig() + return s.ShardSystemMasterBaseConfig(t, corev1alpha1.RootShard) +} + +// ShardSystemMasterBaseConfig returns a rest.Config for the "system:admin" context of the given shard. Client-side throttling is disabled (QPS=-1). +func (s *unmanagedKCPServer) ShardSystemMasterBaseConfig(t *testing.T, shard string) *rest.Config { + t.Helper() + + cfg, found := s.shardCfgs[shard] + if !found { + t.Fatalf("kubeconfig for shard %q not found", shard) + } + + raw, err := cfg.RawConfig() require.NoError(t, err) config := clientcmd.NewNonInteractiveClientConfig(raw, "system:admin", nil, nil) diff --git a/test/e2e/framework/workspaces.go b/test/e2e/framework/workspaces.go index 2c046035828..13cd2b1b65d 100644 --- a/test/e2e/framework/workspaces.go +++ b/test/e2e/framework/workspaces.go @@ -38,6 +38,7 @@ import ( "github.com/kcp-dev/kcp/pkg/authorization" bootstrappolicy "github.com/kcp-dev/kcp/pkg/authorization/bootstrap" kcpclientset "github.com/kcp-dev/kcp/pkg/client/clientset/versioned/cluster" + reconcilerworkspace "github.com/kcp-dev/kcp/pkg/reconciler/tenancy/workspace" "github.com/kcp-dev/kcp/pkg/server" ) @@ -175,7 +176,24 @@ func newWorkspaceFixture[O WorkspaceOption](t *testing.T, clusterClient kcpclien return true, "" }, wait.ForeverTestTimeout, time.Millisecond*100, "failed to wait for %s workspace %s to become accessible, potentially through eventual consistent workspace index", ws.Spec.Type, parent.Join(ws.Name)) - t.Logf("Created %s workspace %s as /clusters/%s", ws.Spec.Type, parent.Join(ws.Name), ws.Spec.Cluster) + // best effort to get a shard name from the hash in the annotation + hash := ws.Annotations[reconcilerworkspace.WorkspaceShardHashAnnotationKey] + shard := corev1alpha1.RootShard + if reconcilerworkspace.ByBase36Sha224NameValue(shard) != hash { + found := false + for i := 0; i < 10; i++ { + shard = fmt.Sprintf("shard-%d", i) + if reconcilerworkspace.ByBase36Sha224NameValue(shard) == hash { + found = true + break + } + } + if !found { + shard = fmt.Sprintf("hash %s", hash) + } + } + + t.Logf("Created %s workspace %s as /clusters/%s on shard %q", ws.Spec.Type, parent.Join(ws.Name), ws.Spec.Cluster, shard) return ws } diff --git a/test/e2e/garbagecollector/garbagecollector_test.go b/test/e2e/garbagecollector/garbagecollector_test.go index cc766086b33..73a692a645c 100644 --- a/test/e2e/garbagecollector/garbagecollector_test.go +++ b/test/e2e/garbagecollector/garbagecollector_test.go @@ -173,8 +173,10 @@ func TestGarbageCollectorTypesFromBinding(t *testing.T) { kcpClusterClient, err := kcpclientset.NewForConfig(cfg) require.NoError(t, err, "error creating kcp cluster client") - _, err = kcpClusterClient.Cluster(userPath).ApisV1alpha1().APIBindings().Create(c, binding, metav1.CreateOptions{}) - require.NoError(t, err) + framework.Eventually(t, func() (bool, string) { + _, err = kcpClusterClient.Cluster(userPath).ApisV1alpha1().APIBindings().Create(c, binding, metav1.CreateOptions{}) + return err == nil, fmt.Sprintf("Error creating APIBinding: %v", err) + }, wait.ForeverTestTimeout, 100*time.Millisecond, "error creating APIBinding") t.Logf("Wait for the binding to be ready") framework.Eventually(t, func() (bool, string) { diff --git a/test/e2e/quota/quota_test.go b/test/e2e/quota/quota_test.go index 3b9ac95abb3..1e14d07a884 100644 --- a/test/e2e/quota/quota_test.go +++ b/test/e2e/quota/quota_test.go @@ -178,10 +178,7 @@ func TestKubeQuotaCoreV1TypesFromBinding(t *testing.T) { framework.Eventually(t, func() (bool, string) { _, err := kcpClusterClient.Cluster(userPath).ApisV1alpha1().APIBindings().Create(ctx, binding, metav1.CreateOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" + return err == nil, fmt.Sprintf("Error creating APIBinding: %v", err) }, wait.ForeverTestTimeout, 100*time.Millisecond, "error creating APIBinding") t.Logf("Wait for binding to be ready") diff --git a/test/e2e/reconciler/clusterworkspaceshard/controller_test.go b/test/e2e/reconciler/shard/controller_test.go similarity index 100% rename from test/e2e/reconciler/clusterworkspaceshard/controller_test.go rename to test/e2e/reconciler/shard/controller_test.go diff --git a/test/e2e/virtual/apiexport/authorizer_test.go b/test/e2e/virtual/apiexport/authorizer_test.go index 0cb0b655912..bbf30e513ed 100644 --- a/test/e2e/virtual/apiexport/authorizer_test.go +++ b/test/e2e/virtual/apiexport/authorizer_test.go @@ -610,10 +610,7 @@ func TestRootAPIExportAuthorizers(t *testing.T) { } framework.Eventually(t, func() (bool, string) { _, err := userKcpClient.Cluster(userPath).ApisV1alpha1().APIBindings().Create(ctx, apiBinding, metav1.CreateOptions{}) - if err != nil { - return false, fmt.Sprintf("error creating API binding: %v", err) - } - return true, "" + return err == nil, fmt.Sprintf("Error creating APIBinding: %v", err) }, wait.ForeverTestTimeout, time.Millisecond*100, "api binding creation failed") t.Logf("Wait for the binding to be ready") diff --git a/test/e2e/virtual/apiexport/virtualworkspace_test.go b/test/e2e/virtual/apiexport/virtualworkspace_test.go index b9fbec65f7c..8437599f498 100644 --- a/test/e2e/virtual/apiexport/virtualworkspace_test.go +++ b/test/e2e/virtual/apiexport/virtualworkspace_test.go @@ -1045,9 +1045,9 @@ func bindConsumerToProvider(ctx context.Context, t *testing.T, consumerWorkspace consumerWsClient, err := kcpclientset.NewForConfig(cfg) require.NoError(t, err) - require.Eventually(t, func() bool { + framework.Eventually(t, func() (bool, string) { _, err = kcpClients.Cluster(consumerWorkspace).ApisV1alpha1().APIBindings().Create(ctx, apiBinding, metav1.CreateOptions{}) - return err == nil + return err == nil, fmt.Sprintf("Error creating APIBinding: %v", err) }, wait.ForeverTestTimeout, time.Millisecond*100) t.Logf("Make sure %q API group shows up in consumer workspace %q group discovery", wildwest.GroupName, consumerWorkspace)