Skip to content

Commit

Permalink
Merge pull request #2882 from hardys/logical_cluster_admin_fix
Browse files Browse the repository at this point in the history
🐛 Decouple internal and external logical-cluster-admin access
  • Loading branch information
openshift-merge-robot authored Mar 24, 2023
2 parents 60ba17a + 36aa88c commit e65c243
Show file tree
Hide file tree
Showing 15 changed files with 160 additions and 60 deletions.
34 changes: 33 additions & 1 deletion cmd/sharded-test-server/frontproxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,10 @@ func writeShardKubeConfig(workDirPath string) error {
}

func writeLogicalClusterAdminKubeConfig(hostIP, workDirPath string) error {
baseHost := fmt.Sprintf("https://%s", net.JoinHostPort(hostIP, "6443"))
// The logical-cluster-admin-kubeconfig references the root shard, it's
// also used to access other shards directly; also see the external
// config below which is used for connections via the front-proxy
baseHost := fmt.Sprintf("https://%s", net.JoinHostPort(hostIP, "6444"))

var kubeConfig clientcmdapi.Config
kubeConfig.AuthInfos = map[string]*clientcmdapi.AuthInfo{
Expand All @@ -338,3 +341,32 @@ func writeLogicalClusterAdminKubeConfig(hostIP, workDirPath string) error {

return clientcmd.WriteToFile(kubeConfig, filepath.Join(workDirPath, ".kcp/logical-cluster-admin.kubeconfig"))
}

func writeExternalLogicalClusterAdminKubeConfig(hostIP, workDirPath string) error {
// The external config references the front-proxy endpoint
baseHost := fmt.Sprintf("https://%s", net.JoinHostPort(hostIP, "6443"))

var kubeConfig clientcmdapi.Config
kubeConfig.AuthInfos = map[string]*clientcmdapi.AuthInfo{
"external-logical-cluster-admin": {
ClientKey: filepath.Join(workDirPath, ".kcp/external-logical-cluster-admin.key"),
ClientCertificate: filepath.Join(workDirPath, ".kcp/external-logical-cluster-admin.crt"),
},
}
kubeConfig.Clusters = map[string]*clientcmdapi.Cluster{
"base": {
Server: baseHost,
CertificateAuthority: filepath.Join(workDirPath, ".kcp/serving-ca.crt"),
},
}
kubeConfig.Contexts = map[string]*clientcmdapi.Context{
"base": {Cluster: "base", AuthInfo: "external-logical-cluster-admin"},
}
kubeConfig.CurrentContext = "base"

if err := clientcmdapi.FlattenConfig(&kubeConfig); err != nil {
return err
}

return clientcmd.WriteToFile(kubeConfig, filepath.Join(workDirPath, ".kcp/external-logical-cluster-admin.kubeconfig"))
}
20 changes: 19 additions & 1 deletion cmd/sharded-test-server/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,21 @@ func start(proxyFlags, shardFlags []string, logDirPath, workDirPath string, numb
365,
)
if err != nil {
return fmt.Errorf("failed to create kcp-logical-cluster client cert: %w", err)
return fmt.Errorf("failed to create logical-cluster-admin client cert: %w", err)
}

// client cert for external-logical-cluster-admin
_, _, err = clientCA.EnsureClientCertificate(
filepath.Join(workDirPath, ".kcp/external-logical-cluster-admin.crt"),
filepath.Join(workDirPath, ".kcp/external-logical-cluster-admin.key"),
&kuser.DefaultInfo{
Name: "external-logical-cluster-admin",
Groups: []string{bootstrap.SystemExternalLogicalClusterAdmin},
},
365,
)
if err != nil {
return fmt.Errorf("failed to create external-logical-cluster-admin client cert: %w", err)
}

// TODO:(p0lyn0mial): in the future we need a separate group valid only for the proxy
Expand Down Expand Up @@ -202,6 +216,10 @@ func start(proxyFlags, shardFlags []string, logDirPath, workDirPath string, numb
return err
}

if err := writeExternalLogicalClusterAdminKubeConfig(hostIP.String(), workDirPath); err != nil {
return err
}

// start shards
var shards []*shard.Shard
for i := 0; i < numberOfShards; i++ {
Expand Down
1 change: 1 addition & 0 deletions cmd/sharded-test-server/shard.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ func newShard(ctx context.Context, n int, args []string, standaloneVW bool, serv
fmt.Sprintf("--tls-private-key-file=%s", filepath.Join(workDirPath, fmt.Sprintf(".kcp-%d/apiserver.key", n))),
fmt.Sprintf("--secure-port=%d", 6444+n),
fmt.Sprintf("--logical-cluster-admin-kubeconfig=%s", filepath.Join(workDirPath, ".kcp/logical-cluster-admin.kubeconfig")),
fmt.Sprintf("--external-logical-cluster-admin-kubeconfig=%s", filepath.Join(workDirPath, ".kcp/external-logical-cluster-admin.kubeconfig")),
fmt.Sprintf("--shard-client-cert-file=%s", shardClientCert),
fmt.Sprintf("--shard-client-key-file=%s", shardClientCertKey),
fmt.Sprintf("--shard-virtual-workspace-ca-file=%s", filepath.Join(workDirPath, ".kcp", "serving-ca.crt")),
Expand Down
5 changes: 2 additions & 3 deletions pkg/admission/logicalcluster/admission.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ func (o *plugin) Validate(ctx context.Context, a admission.Attributes, _ admissi
}

groups := sets.NewString(a.GetUserInfo().GetGroups()...)
if groups.Has(kuser.SystemPrivilegedGroup) || groups.Has(bootstrap.SystemLogicalClusterAdmin) || groups.Has(bootstrap.SystemKcpWorkspaceBootstrapper) {
if groups.Has(kuser.SystemPrivilegedGroup) || groups.Has(bootstrap.SystemLogicalClusterAdmin) || groups.Has(bootstrap.SystemExternalLogicalClusterAdmin) || groups.Has(bootstrap.SystemKcpWorkspaceBootstrapper) {
return nil
}

Expand Down Expand Up @@ -190,8 +190,7 @@ func (o *plugin) Validate(ctx context.Context, a admission.Attributes, _ admissi
if err != nil {
return fmt.Errorf("LogicalCluster cannot be deleted: %w", err)
}
groups := sets.NewString(a.GetUserInfo().GetGroups()...)
if !logicalCluster.Spec.DirectlyDeletable && !groups.Has(kuser.SystemPrivilegedGroup) && !groups.Has(bootstrap.SystemLogicalClusterAdmin) {
if !logicalCluster.Spec.DirectlyDeletable {
return admission.NewForbidden(a, fmt.Errorf("LogicalCluster cannot be deleted"))
}

Expand Down
8 changes: 8 additions & 0 deletions pkg/admission/logicalcluster/admission_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -336,6 +336,14 @@ func TestValidate(t *testing.T) {
&kuser.DefaultInfo{Groups: []string{"system:kcp:logical-cluster-admin"}},
),
},
{
name: "passed deletion as system:kcp:external-logical-cluster-admin",
clusterName: "root:org:ws",
attr: deleteAttr(
newLogicalCluster("root:org:ws").LogicalCluster,
&kuser.DefaultInfo{Groups: []string{"system:kcp:external-logical-cluster-admin"}},
),
},
{
name: "passed deletion as another user if directly deletable",
clusterName: "root:org:ws",
Expand Down
14 changes: 13 additions & 1 deletion pkg/authorization/bootstrap/policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,13 @@ const (
// We need a separate group (not the privileged system group) for this because system-owned workspaces (e.g. root:users) need
// a workspace owner annotation set, and the owner annotation is skipped/not set for the privileged system group.
SystemKcpWorkspaceBootstrapper = "system:kcp:tenancy:workspace-bootstrapper"
// SystemLogicalClusterAdmin is a group used by the scheduler to create LogicalCluster resources.
// SystemLogicalClusterAdmin is a group used by the workspace scheduler to create LogicalCluster resources.
// This group allows it to skip the entire authorization stack except the bootstrap policy authorizer.
// Otherwise, the requests would be rejected because the LogicalCluster resource does not exist yet.
SystemLogicalClusterAdmin = "system:kcp:logical-cluster-admin"
// SystemExternalLogicalClusterAdmin is a group used by the workspace controllers to manage LogicalCluster
// resources after creation, using a subset of permissions allowed for the internal logical-cluster-admin.
SystemExternalLogicalClusterAdmin = "system:kcp:external-logical-cluster-admin"
// SystemKcpWorkspaceAccessGroup is a group that gives a user system:authenticated access to a workspace.
SystemKcpWorkspaceAccessGroup = "system:kcp:workspace:access"
)
Expand All @@ -49,6 +52,7 @@ func clusterRoleBindings() []rbacv1.ClusterRoleBinding {
clusterRoleBindingCustomName(rbacv1helpers.NewClusterBinding("cluster-admin").Groups(SystemKcpAdminGroup).BindingOrDie(), "system:kcp:admin:cluster-admin"),
clusterRoleBindingCustomName(rbacv1helpers.NewClusterBinding(SystemKcpWorkspaceBootstrapper).Groups(SystemKcpWorkspaceBootstrapper, "apis.kcp.io:binding:"+SystemKcpWorkspaceBootstrapper).BindingOrDie(), SystemKcpWorkspaceBootstrapper),
clusterRoleBindingCustomName(rbacv1helpers.NewClusterBinding(SystemLogicalClusterAdmin).Groups(SystemLogicalClusterAdmin).BindingOrDie(), SystemLogicalClusterAdmin),
clusterRoleBindingCustomName(rbacv1helpers.NewClusterBinding(SystemExternalLogicalClusterAdmin).Groups(SystemExternalLogicalClusterAdmin).BindingOrDie(), SystemExternalLogicalClusterAdmin),
}
}

Expand Down Expand Up @@ -76,6 +80,14 @@ func clusterRoles() []rbacv1.ClusterRole {
rbacv1helpers.NewRule("delete", "update", "get").Groups(tenancy.GroupName).Resources("workspaces").RuleOrDie(),
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: SystemExternalLogicalClusterAdmin},
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("delete", "update", "get").Groups(core.GroupName).Resources("logicalclusters", "logicalclusters/status").RuleOrDie(),
rbacv1helpers.NewRule("delete", "update", "get").Groups(tenancy.GroupName).Resources("workspaces").RuleOrDie(),
rbacv1helpers.NewRule("access").URLs("/").RuleOrDie(),
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: SystemKcpWorkspaceAccessGroup},
Rules: []rbacv1.PolicyRule{
Expand Down
5 changes: 5 additions & 0 deletions pkg/authorization/requiredgroups_authorizer.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,11 @@ func (a *requiredGroupsAuthorizer) Authorize(ctx context.Context, attr authorize
return authorizer.DecisionNoOpinion, "", err
}

// always let external-logical-cluster-admins through
if sets.NewString(attr.GetUser().GetGroups()...).Has(bootstrap.SystemExternalLogicalClusterAdmin) {
return DelegateAuthorization("external logical cluster admin access", a.delegate).Authorize(ctx, attr)
}

// check required groups
value, found := logicalCluster.Annotations[RequiredGroupsAnnotationKey]
if !found {
Expand Down
6 changes: 6 additions & 0 deletions pkg/authorization/requiredgroups_authorizer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,12 @@ func TestRequiredGroupsAuthorizer(t *testing.T) {
wantDecision: authorizer.DecisionAllow,
wantReason: "delegating due to logical cluster admin access",
},
"system:kcp:external-logical-cluster-admin can always pass": {
requestedWorkspace: "root:ready",
requestingUser: newUser("external-lcluster-admin", "system:kcp:external-logical-cluster-admin"),
wantDecision: authorizer.DecisionAllow,
wantReason: "delegating due to external logical cluster admin access",
},
"service account from other cluster is granted access": {
requestedWorkspace: "root:ready",
requestingUser: newServiceAccountWithCluster("sa", "anotherws"),
Expand Down
8 changes: 6 additions & 2 deletions pkg/proxy/options/authentication.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options"

"github.com/kcp-dev/kcp/pkg/authorization/bootstrap"
kcpauthentication "github.com/kcp-dev/kcp/pkg/proxy/authentication"
)

Expand All @@ -49,13 +50,16 @@ type Authentication struct {
// NewAuthentication creates a default Authentication.
func NewAuthentication() *Authentication {
auth := &Authentication{
// Note: when adding new auth methods, also update AdditionalAuthEnabled below
BuiltInOptions: kubeoptions.NewBuiltInAuthenticationOptions().
WithClientCert().
WithOIDC().
WithServiceAccounts().
WithTokenFile(),
// when adding new auth methods, also update AdditionalAuthEnabled below
DropGroups: []string{user.SystemPrivilegedGroup},
// SystemLogicalClusterAdmin is privileged and only for internal traffic,
// SystemExternalLogicalClusterAdmin must be used for all logical-cluster-admin
// requests via the proxy, so we drop SystemLogicalClusterAdmin here
DropGroups: []string{user.SystemPrivilegedGroup, bootstrap.SystemLogicalClusterAdmin},
}
auth.BuiltInOptions.ServiceAccounts.Issuers = []string{"https://kcp.default.svc"}
return auth
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,23 +64,23 @@ func NewController(
kubeClusterClient kcpkubernetesclientset.ClusterInterface,
kcpClusterClient kcpclientset.ClusterInterface,
logicalClusterAdminConfig *rest.Config,
shardExternalURL func() string,
externalLogicalClusterAdminConfig *rest.Config,
metadataClusterClient kcpmetadata.ClusterInterface,
logicalClusterInformer corev1alpha1informers.LogicalClusterClusterInformer,
discoverResourcesFn func(clusterName logicalcluster.Path) ([]*metav1.APIResourceList, error),
) *Controller {
queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName)

c := &Controller{
queue: queue,
kubeClusterClient: kubeClusterClient,
kcpClusterClient: kcpClusterClient,
logicalClusterAdminConfig: logicalClusterAdminConfig,
shardExternalURL: shardExternalURL,
metadataClusterClient: metadataClusterClient,
logicalClusterLister: logicalClusterInformer.Lister(),
deleter: deletion.NewWorkspacedResourcesDeleter(metadataClusterClient, discoverResourcesFn),
commit: committer.NewCommitter[*LogicalCluster, Patcher, *LogicalClusterSpec, *LogicalClusterStatus](kcpClusterClient.CoreV1alpha1().LogicalClusters()),
queue: queue,
kubeClusterClient: kubeClusterClient,
kcpClusterClient: kcpClusterClient,
logicalClusterAdminConfig: logicalClusterAdminConfig,
externalLogicalClusterAdminConfig: externalLogicalClusterAdminConfig,
metadataClusterClient: metadataClusterClient,
logicalClusterLister: logicalClusterInformer.Lister(),
deleter: deletion.NewWorkspacedResourcesDeleter(metadataClusterClient, discoverResourcesFn),
commit: committer.NewCommitter[*LogicalCluster, Patcher, *LogicalClusterSpec, *LogicalClusterStatus](kcpClusterClient.CoreV1alpha1().LogicalClusters()),
}

logicalClusterInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
Expand Down Expand Up @@ -114,9 +114,9 @@ type Controller struct {
kubeClusterClient kcpkubernetesclientset.ClusterInterface
kcpClusterClient kcpclientset.ClusterInterface

logicalClusterAdminConfig *rest.Config
shardExternalURL func() string
dynamicFrontProxyClient kcpdynamic.ClusterInterface
logicalClusterAdminConfig *rest.Config
externalLogicalClusterAdminConfig *rest.Config
dynamicFrontProxyClient kcpdynamic.ClusterInterface

metadataClusterClient kcpmetadata.ClusterInterface

Expand Down Expand Up @@ -148,9 +148,8 @@ func (c *Controller) Start(ctx context.Context, numThreads int) {
defer logger.Info("Shutting down controller")

// a client needed to remove the finalizer from the logical cluster on a different shard
frontProxyConfig := rest.CopyConfig(c.logicalClusterAdminConfig)
frontProxyConfig := rest.CopyConfig(c.externalLogicalClusterAdminConfig)
frontProxyConfig = rest.AddUserAgent(frontProxyConfig, ControllerName)
frontProxyConfig.Host = c.shardExternalURL()
dynamicFrontProxyClient, err := kcpdynamic.NewForConfig(frontProxyConfig)
if err != nil {
runtime.HandleError(err)
Expand Down
18 changes: 8 additions & 10 deletions pkg/reconciler/tenancy/workspace/workspace_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,10 @@ const (

func NewController(
shardName string,
shardExternalURL func() string,
kcpClusterClient kcpclientset.ClusterInterface,
kubeClusterClient kubernetes.ClusterInterface,
logicalClusterAdminConfig *rest.Config,
externalLogicalClusterAdminConfig *rest.Config,
workspaceInformer tenancyv1alpha1informers.WorkspaceClusterInformer,
globalShardInformer corev1alpha1informers.ShardClusterInformer,
globalWorkspaceTypeInformer tenancyv1alpha1informers.WorkspaceTypeClusterInformer,
Expand All @@ -65,10 +65,9 @@ func NewController(
c := &Controller{
queue: queue,

shardName: shardName,
shardExternalURL: shardExternalURL,

logicalClusterAdminConfig: logicalClusterAdminConfig,
shardName: shardName,
logicalClusterAdminConfig: logicalClusterAdminConfig,
externalLogicalClusterAdminConfig: externalLogicalClusterAdminConfig,

kcpClusterClient: kcpClusterClient,
kubeClusterClient: kubeClusterClient,
Expand Down Expand Up @@ -119,9 +118,9 @@ type workspaceResource = committer.Resource[*tenancyv1alpha1.WorkspaceSpec, *ten
type Controller struct {
queue workqueue.RateLimitingInterface

shardName string
shardExternalURL func() string
logicalClusterAdminConfig *rest.Config
shardName string
logicalClusterAdminConfig *rest.Config // for direct shard connections used during scheduling
externalLogicalClusterAdminConfig *rest.Config // for front-proxy connections used during initialization

kcpClusterClient kcpclientset.ClusterInterface
kubeClusterClient kubernetes.ClusterInterface
Expand Down Expand Up @@ -191,8 +190,7 @@ func (c *Controller) Start(ctx context.Context, numThreads int) {
defer c.queue.ShutDown()

// create external client that goes through the front-proxy
externalConfig := rest.CopyConfig(c.logicalClusterAdminConfig)
externalConfig.Host = c.shardExternalURL()
externalConfig := rest.CopyConfig(c.externalLogicalClusterAdminConfig)
kcpExternalClient, err := kcpclientset.NewForConfig(externalConfig)
if err != nil {
runtime.HandleError(err)
Expand Down
14 changes: 11 additions & 3 deletions pkg/server/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,8 +108,8 @@ type ExtraConfig struct {

CacheDynamicClient kcpdynamic.ClusterInterface

// config from which client can be configured
LogicalClusterAdminConfig *rest.Config
LogicalClusterAdminConfig *rest.Config // client config connecting directly to shards, skipping the front proxy
ExternalLogicalClusterAdminConfig *rest.Config // client config connecting to the front proxy

// misc
preHandlerChainMux *handlerChainMuxes
Expand Down Expand Up @@ -277,7 +277,15 @@ func NewConfig(opts kcpserveroptions.CompletedOptions) (*Config, error) {
if len(c.Options.Extra.LogicalClusterAdminKubeconfig) > 0 {
c.LogicalClusterAdminConfig, err = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{ExplicitPath: c.Options.Extra.LogicalClusterAdminKubeconfig}, nil).ClientConfig()
if err != nil {
return nil, fmt.Errorf("failed to load the kubeconfig from: %s, for a logical cluster client, err: %w", c.Options.Extra.LogicalClusterAdminKubeconfig, err)
return nil, fmt.Errorf("failed to load the logical cluster admin kubeconfig from %q: %w", c.Options.Extra.LogicalClusterAdminKubeconfig, err)
}
}

c.ExternalLogicalClusterAdminConfig = rest.CopyConfig(c.GenericConfig.LoopbackClientConfig)
if len(c.Options.Extra.ExternalLogicalClusterAdminKubeconfig) > 0 {
c.ExternalLogicalClusterAdminConfig, err = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{ExplicitPath: c.Options.Extra.ExternalLogicalClusterAdminKubeconfig}, nil).ClientConfig()
if err != nil {
return nil, fmt.Errorf("failed to load the external logical cluster admin kubeconfig from %q: %w", c.Options.Extra.ExternalLogicalClusterAdminKubeconfig, err)
}
}

Expand Down
Loading

0 comments on commit e65c243

Please sign in to comment.