Skip to content

Commit

Permalink
Fix caching of client credentials when users re-login to incorporate …
Browse files Browse the repository at this point in the history
…new attributes (#18097) (#18114)

When users re-login after a failed attempt to access a Kubernetes cluster, Teleport may continue to use the old credentials for cluster access. This behavior results in successive failures until the credential cache expires (~1h).

This PR includes changes made by @r0mant to resolve the cache issue. It introduces certificate expiration in the cache key. Every time the user logs in again, the key will be different because the certificate expiration date is different. Thus, Teleport won't reuse the cached credentials.

Fixes #18070
  • Loading branch information
tigrato authored Nov 3, 2022
1 parent 7d29ab2 commit 0f1f41a
Show file tree
Hide file tree
Showing 2 changed files with 215 additions and 13 deletions.
8 changes: 6 additions & 2 deletions lib/kube/proxy/forwarder.go
Original file line number Diff line number Diff line change
Expand Up @@ -310,6 +310,8 @@ type authContext struct {
// disconnectExpiredCert if set, controls the time when the connection
// should be disconnected because the client cert expires
disconnectExpiredCert time.Time
// certExpires is the client certificate expiration timestamp.
certExpires time.Time
// sessionTTL specifies the duration of the user's session
sessionTTL time.Duration
}
Expand All @@ -321,7 +323,7 @@ func (c authContext) String() string {
func (c *authContext) key() string {
// it is important that the context key contains user, kubernetes groups and certificate expiry,
// so that new logins with different parameters will not reuse this context
return fmt.Sprintf("%v:%v:%v:%v:%v:%v", c.teleportCluster.name, c.User.GetName(), c.kubeUsers, c.kubeGroups, c.kubeCluster, c.disconnectExpiredCert.UTC().Unix())
return fmt.Sprintf("%v:%v:%v:%v:%v:%v", c.teleportCluster.name, c.User.GetName(), c.kubeUsers, c.kubeGroups, c.kubeCluster, c.certExpires.Unix())
}

func (c *authContext) eventClusterMeta() apievents.KubernetesClusterMetadata {
Expand Down Expand Up @@ -605,6 +607,7 @@ func (f *Forwarder) setupContext(ctx auth.Context, req *http.Request, isRemoteUs
kubeUsers: utils.StringsSet(kubeUsers),
recordingConfig: recordingConfig,
kubeCluster: kubeCluster,
certExpires: certExpires,
teleportCluster: teleportClusterClient{
name: teleportClusterName,
remoteAddr: utils.NetAddr{AddrNetwork: "tcp", Addr: req.RemoteAddr},
Expand All @@ -631,7 +634,8 @@ func (f *Forwarder) setupContext(ctx auth.Context, req *http.Request, isRemoteUs
func (f *Forwarder) getKubeGroupsAndUsers(
roles services.AccessChecker,
kubeClusterName string,
sessionTTL time.Duration) (groups, users []string, err error) {
sessionTTL time.Duration,
) (groups, users []string, err error) {
kubeServices, err := f.cfg.CachingAuthClient.GetKubeServices(f.ctx)
if err != nil {
return nil, nil, trace.Wrap(err)
Expand Down
220 changes: 209 additions & 11 deletions lib/kube/proxy/forwarder_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,15 +68,15 @@ var (
Usage: []string{"usage a", "usage b"},
Principals: []string{"principal a", "principal b"},
KubernetesGroups: []string{"remote k8s group a", "remote k8s group b"},
Traits: map[string][]string{"trait a": []string{"b", "c"}},
Traits: map[string][]string{"trait a": {"b", "c"}},
})
unmappedIdentity = auth.WrapIdentity(tlsca.Identity{
Username: "bob",
Groups: []string{"group a", "group b"},
Usage: []string{"usage a", "usage b"},
Principals: []string{"principal a", "principal b"},
KubernetesGroups: []string{"k8s group a", "k8s group b"},
Traits: map[string][]string{"trait a": []string{"b", "c"}},
Traits: map[string][]string{"trait a": {"b", "c"}},
})
)

Expand Down Expand Up @@ -141,7 +141,12 @@ func TestAuthenticate(t *testing.T) {
authPref: authPref,
}

user, err := types.NewUser("user-a")
const (
username = "user-a"
)
certExpiration := time.Now().Add(time.Hour)

user, err := types.NewUser(username)
require.NoError(t, err)

tun := mockRevTunnel{
Expand All @@ -150,7 +155,6 @@ func TestAuthenticate(t *testing.T) {
"local": mockRemoteSite{name: "local"},
},
}

f := &Forwarder{
log: logrus.New(),
cfg: ForwarderConfig{
Expand Down Expand Up @@ -196,6 +200,7 @@ func TestAuthenticate(t *testing.T) {
kubeUsers: utils.StringsSet([]string{"user-a"}),
kubeGroups: utils.StringsSet([]string{"kube-group-a", "kube-group-b", teleport.KubeSystemAuthenticated}),
kubeCluster: "local",
certExpires: certExpiration,
teleportCluster: teleportClusterClient{
name: "local",
remoteAddr: *utils.MustParseAddr(remoteAddr),
Expand All @@ -221,6 +226,7 @@ func TestAuthenticate(t *testing.T) {
kubeUsers: utils.StringsSet([]string{"user-a"}),
kubeGroups: utils.StringsSet([]string{"kube-group-a", "kube-group-b", teleport.KubeSystemAuthenticated}),
kubeCluster: "local",
certExpires: certExpiration,
teleportCluster: teleportClusterClient{
name: "local",
remoteAddr: *utils.MustParseAddr(remoteAddr),
Expand All @@ -245,6 +251,7 @@ func TestAuthenticate(t *testing.T) {
kubeUsers: utils.StringsSet([]string{"user-a"}),
kubeGroups: utils.StringsSet([]string{"kube-group-a", "kube-group-b", teleport.KubeSystemAuthenticated}),
kubeCluster: "local",
certExpires: certExpiration,
teleportCluster: teleportClusterClient{
name: "local",
remoteAddr: *utils.MustParseAddr(remoteAddr),
Expand All @@ -260,8 +267,9 @@ func TestAuthenticate(t *testing.T) {
tunnel: tun,

wantCtx: &authContext{
kubeUsers: utils.StringsSet([]string{"user-a"}),
kubeGroups: utils.StringsSet([]string{teleport.KubeSystemAuthenticated}),
kubeUsers: utils.StringsSet([]string{"user-a"}),
kubeGroups: utils.StringsSet([]string{teleport.KubeSystemAuthenticated}),
certExpires: certExpiration,
teleportCluster: teleportClusterClient{
name: "remote",
remoteAddr: *utils.MustParseAddr(remoteAddr),
Expand All @@ -278,8 +286,9 @@ func TestAuthenticate(t *testing.T) {
tunnel: tun,

wantCtx: &authContext{
kubeUsers: utils.StringsSet([]string{"user-a"}),
kubeGroups: utils.StringsSet([]string{teleport.KubeSystemAuthenticated}),
kubeUsers: utils.StringsSet([]string{"user-a"}),
kubeGroups: utils.StringsSet([]string{teleport.KubeSystemAuthenticated}),
certExpires: certExpiration,
teleportCluster: teleportClusterClient{
name: "remote",
remoteAddr: *utils.MustParseAddr(remoteAddr),
Expand All @@ -296,8 +305,9 @@ func TestAuthenticate(t *testing.T) {
tunnel: tun,

wantCtx: &authContext{
kubeUsers: utils.StringsSet([]string{"user-a"}),
kubeGroups: utils.StringsSet([]string{teleport.KubeSystemAuthenticated}),
kubeUsers: utils.StringsSet([]string{"user-a"}),
kubeGroups: utils.StringsSet([]string{teleport.KubeSystemAuthenticated}),
certExpires: certExpiration,
teleportCluster: teleportClusterClient{
name: "remote",
remoteAddr: *utils.MustParseAddr(remoteAddr),
Expand Down Expand Up @@ -336,6 +346,7 @@ func TestAuthenticate(t *testing.T) {
kubeUsers: utils.StringsSet([]string{"kube-user-a", "kube-user-b"}),
kubeGroups: utils.StringsSet([]string{"kube-group-a", "kube-group-b", teleport.KubeSystemAuthenticated}),
kubeCluster: "local",
certExpires: certExpiration,
teleportCluster: teleportClusterClient{
name: "local",
remoteAddr: *utils.MustParseAddr(remoteAddr),
Expand Down Expand Up @@ -377,6 +388,7 @@ func TestAuthenticate(t *testing.T) {
kubeUsers: utils.StringsSet([]string{"user-a"}),
kubeGroups: utils.StringsSet([]string{"kube-group-a", "kube-group-b", teleport.KubeSystemAuthenticated}),
kubeCluster: "local",
certExpires: certExpiration,
teleportCluster: teleportClusterClient{
name: "local",
remoteAddr: *utils.MustParseAddr(remoteAddr),
Expand Down Expand Up @@ -423,6 +435,7 @@ func TestAuthenticate(t *testing.T) {
kubeUsers: utils.StringsSet([]string{"user-a"}),
kubeGroups: utils.StringsSet([]string{"kube-group-a", "kube-group-b", teleport.KubeSystemAuthenticated}),
kubeCluster: "foo",
certExpires: certExpiration,
teleportCluster: teleportClusterClient{
name: "local",
remoteAddr: *utils.MustParseAddr(remoteAddr),
Expand All @@ -442,6 +455,7 @@ func TestAuthenticate(t *testing.T) {
kubeUsers: utils.StringsSet([]string{"user-a"}),
kubeGroups: utils.StringsSet([]string{teleport.KubeSystemAuthenticated}),
kubeCluster: "foo",
certExpires: certExpiration,
teleportCluster: teleportClusterClient{
name: "remote",
remoteAddr: *utils.MustParseAddr(remoteAddr),
Expand Down Expand Up @@ -480,7 +494,7 @@ func TestAuthenticate(t *testing.T) {
RemoteAddr: remoteAddr,
TLS: &tls.ConnectionState{
PeerCertificates: []*x509.Certificate{
{NotAfter: time.Now().Add(time.Hour)},
{NotAfter: certExpiration},
},
},
}
Expand All @@ -506,6 +520,19 @@ func TestAuthenticate(t *testing.T) {
cmpopts.IgnoreFields(authContext{}, "clientIdleTimeout", "sessionTTL", "Context", "recordingConfig", "disconnectExpiredCert"),
cmpopts.IgnoreFields(teleportClusterClient{}, "dial", "isRemoteClosed"),
))

// validate authCtx.key() to make sure it includes certExpires timestamp.
// this is important to make sure user's credentials are correctly cached
// and once user's re-login, Teleport won't reuse their previous cache entry.
ctxKey := fmt.Sprintf("%v:%v:%v:%v:%v:%v",
tt.wantCtx.teleportCluster.name,
username,
tt.wantCtx.kubeUsers,
tt.wantCtx.kubeGroups,
tt.wantCtx.kubeCluster,
certExpiration.Unix(),
)
require.Equal(t, ctxKey, gotCtx.key())
})
}
}
Expand Down Expand Up @@ -1068,3 +1095,174 @@ func (m *mockWatcher) Events() <-chan types.Event {
func (m *mockWatcher) Done() <-chan struct{} {
return m.ctx.Done()
}

func TestForwarder_clientCreds_cache(t *testing.T) {
now := time.Now()

cache, err := ttlmap.New(10)
require.NoError(t, err)

cl, err := newMockCSRClient()
require.NoError(t, err)

f := &Forwarder{
cfg: ForwarderConfig{
Keygen: testauthority.New(),
AuthClient: cl,
Clock: clockwork.NewFakeClockAt(time.Now().Add(-2 * time.Minute)),
},
clientCredentials: cache,
log: logrus.New(),
}

type args struct {
ctx authContext
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "store first certificate",
args: args{
ctx: authContext{
kubeUsers: utils.StringsSet([]string{"user-a"}),
kubeGroups: utils.StringsSet([]string{"kube-group-a", "kube-group-b", teleport.KubeSystemAuthenticated}),
kubeCluster: "local",
Context: auth.Context{
User: &types.UserV2{
Metadata: types.Metadata{
Name: "user-a",
},
},
Identity: identity,
UnmappedIdentity: unmappedIdentity,
},
certExpires: now.Add(1 * time.Hour),
teleportCluster: teleportClusterClient{
name: "local",
},
sessionTTL: 1 * time.Hour,
},
},
},
{
name: "store certificate with different certExpires value",
args: args{
ctx: authContext{
kubeUsers: utils.StringsSet([]string{"user-a"}),
kubeGroups: utils.StringsSet([]string{"kube-group-a", "kube-group-b", teleport.KubeSystemAuthenticated}),
kubeCluster: "local",
Context: auth.Context{
User: &types.UserV2{
Metadata: types.Metadata{
Name: "user-a",
},
},
Identity: identity,
UnmappedIdentity: unmappedIdentity,
},
certExpires: now.Add(2 * time.Hour),
teleportCluster: teleportClusterClient{
name: "local",
},
sessionTTL: 1 * time.Hour,
},
},
},
{
name: "store certificate with different kube groups",
args: args{
ctx: authContext{
kubeUsers: utils.StringsSet([]string{"user-a"}),
kubeGroups: utils.StringsSet([]string{"kube-group-b", teleport.KubeSystemAuthenticated}),
kubeCluster: "local",
Context: auth.Context{
User: &types.UserV2{
Metadata: types.Metadata{
Name: "user-a",
},
},
Identity: identity,
UnmappedIdentity: unmappedIdentity,
},
certExpires: now.Add(1 * time.Hour),
teleportCluster: teleportClusterClient{
name: "local",
},
sessionTTL: 1 * time.Hour,
},
},
},
{
name: "store certificate with different kube user",
args: args{
ctx: authContext{
kubeUsers: utils.StringsSet([]string{"user-test"}),
kubeGroups: utils.StringsSet([]string{"kube-group-b", teleport.KubeSystemAuthenticated}),
kubeCluster: "local",
Context: auth.Context{
User: &types.UserV2{
Metadata: types.Metadata{
Name: "user-a",
},
},
Identity: identity,
UnmappedIdentity: unmappedIdentity,
},
certExpires: now.Add(1 * time.Hour),
teleportCluster: teleportClusterClient{
name: "local",
},
sessionTTL: 1 * time.Hour,
},
},
},
{
name: "store certificate for different user",
args: args{
ctx: authContext{
kubeUsers: utils.StringsSet([]string{"user-test"}),
kubeGroups: utils.StringsSet([]string{"kube-group-b", teleport.KubeSystemAuthenticated}),
kubeCluster: "local",
Context: auth.Context{
User: &types.UserV2{
Metadata: types.Metadata{
Name: "user-b",
},
},
Identity: identity,
UnmappedIdentity: unmappedIdentity,
},
certExpires: now.Add(1 * time.Hour),
teleportCluster: teleportClusterClient{
name: "local",
},
sessionTTL: 1 * time.Hour,
},
},
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// make sure cert is not cached
cachedTLSCfg := f.getClientCreds(tt.args.ctx)
require.Nil(t, cachedTLSCfg)

// request a new cert
tlsCfg, err := f.requestCertificate(tt.args.ctx)
require.NoError(t, err)

// store the certificate in cache
err = f.saveClientCreds(tt.args.ctx, tlsCfg)
require.NoError(t, err)

// make sure cache has our entry.
cachedTLSCfg = f.getClientCreds(tt.args.ctx)
require.NotNil(t, cachedTLSCfg)
require.Equal(t, tlsCfg, cachedTLSCfg)
})
}
}

0 comments on commit 0f1f41a

Please sign in to comment.