diff --git a/docs/operator-manual/user-management/index.md b/docs/operator-manual/user-management/index.md index 22e3aa9dada98..6ccad26ee191e 100644 --- a/docs/operator-manual/user-management/index.md +++ b/docs/operator-manual/user-management/index.md @@ -90,6 +90,22 @@ argocd account update-password \ argocd account generate-token --account ``` +### Failed logins rate limiting + +Argo CD rejects login attempts after too many failed in order to prevent password brute-forcing. +The following environments variables are available to control throttling settings: + +* `ARGOCD_SESSION_MAX_FAIL_COUNT`: Maximum number of failed logins before Argo CD starts +rejecting login attempts. Default: 5. + +* `ARGOCD_SESSION_FAILURE_WINDOW_SECONDS`: Number of seconds for the failure window. +Default: 300 (5 minutes). If this is set to 0, the failure window is +disabled and the login attempts gets rejected after 10 consecutive logon failures, +regardless of the time frame they happened. + +* `ARGOCD_SESSION_MAX_CACHE_SIZE`: Maximum number of entries allowed in the +cache. Default: 1000 + ## SSO There are two ways that SSO can be configured: diff --git a/server/account/account_test.go b/server/account/account_test.go index 91c522744264a..b31b03f250149 100644 --- a/server/account/account_test.go +++ b/server/account/account_test.go @@ -18,7 +18,6 @@ import ( "github.com/argoproj/argo-cd/pkg/apiclient/account" sessionpkg "github.com/argoproj/argo-cd/pkg/apiclient/session" "github.com/argoproj/argo-cd/server/session" - "github.com/argoproj/argo-cd/util/cache" "github.com/argoproj/argo-cd/util/password" "github.com/argoproj/argo-cd/util/rbac" sessionutil "github.com/argoproj/argo-cd/util/session" @@ -64,8 +63,7 @@ func newTestAccountServerExt(ctx context.Context, enforceFn rbac.ClaimsEnforcerF } kubeclientset := fake.NewSimpleClientset(cm, secret) settingsMgr := settings.NewSettingsManager(ctx, kubeclientset, testNamespace) - sessionCache := cache.NewCache(cache.NewInMemoryCache(0)) - sessionMgr := sessionutil.NewSessionManager(settingsMgr, "", sessionCache) + sessionMgr := sessionutil.NewSessionManager(settingsMgr, "", sessionutil.NewInMemoryUserStateStorage()) enforcer := rbac.NewEnforcer(kubeclientset, testNamespace, common.ArgoCDRBACConfigMapName, nil) enforcer.SetClaimsEnforcerFunc(enforceFn) diff --git a/server/cache/cache.go b/server/cache/cache.go index 4a0c62de97a61..0d6bb29593277 100644 --- a/server/cache/cache.go +++ b/server/cache/cache.go @@ -9,6 +9,8 @@ import ( appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1" cacheutil "github.com/argoproj/argo-cd/util/cache" appstatecache "github.com/argoproj/argo-cd/util/cache/appstate" + "github.com/argoproj/argo-cd/util/oidc" + "github.com/argoproj/argo-cd/util/session" ) var ErrCacheMiss = appstatecache.ErrCacheMiss @@ -17,19 +19,16 @@ type Cache struct { cache *appstatecache.Cache connectionStatusCacheExpiration time.Duration oidcCacheExpiration time.Duration + loginAttemptsExpiration time.Duration } func NewCache( cache *appstatecache.Cache, connectionStatusCacheExpiration time.Duration, oidcCacheExpiration time.Duration, + loginAttemptsExpiration time.Duration, ) *Cache { - return &Cache{cache, connectionStatusCacheExpiration, oidcCacheExpiration} -} - -type OIDCState struct { - // ReturnURL is the URL in which to redirect a user back to after completing an OAuth2 login - ReturnURL string `json:"returnURL"` + return &Cache{cache, connectionStatusCacheExpiration, oidcCacheExpiration, loginAttemptsExpiration} } type ClusterInfo struct { @@ -40,9 +39,11 @@ type ClusterInfo struct { func AddCacheFlagsToCmd(cmd *cobra.Command) func() (*Cache, error) { var connectionStatusCacheExpiration time.Duration var oidcCacheExpiration time.Duration + var loginAttemptsExpiration time.Duration cmd.Flags().DurationVar(&connectionStatusCacheExpiration, "connection-status-cache-expiration", 1*time.Hour, "Cache expiration for cluster/repo connection status") cmd.Flags().DurationVar(&oidcCacheExpiration, "oidc-cache-expiration", 3*time.Minute, "Cache expiration for OIDC state") + cmd.Flags().DurationVar(&loginAttemptsExpiration, "login-attempts-expiration", 24*time.Hour, "Cache expiration for failed login attempts") fn := appstatecache.AddCacheFlagsToCmd(cmd) @@ -52,7 +53,7 @@ func AddCacheFlagsToCmd(cmd *cobra.Command) func() (*Cache, error) { return nil, err } - return NewCache(cache, connectionStatusCacheExpiration, oidcCacheExpiration), nil + return NewCache(cache, connectionStatusCacheExpiration, oidcCacheExpiration, loginAttemptsExpiration), nil } } @@ -64,6 +65,14 @@ func (c *Cache) GetAppManagedResources(appName string, res *[]*appv1.ResourceDif return c.cache.GetAppManagedResources(appName, res) } +func (c *Cache) GetLoginAttempts(attempts *map[string]session.LoginAttempts) error { + return c.cache.GetItem("session|login.attempts", attempts) +} + +func (c *Cache) SetLoginAttempts(attempts map[string]session.LoginAttempts) error { + return c.cache.SetItem("session|login.attempts", attempts, c.loginAttemptsExpiration, attempts == nil) +} + func clusterConnectionStateKey(server string) string { return fmt.Sprintf("cluster|%s|connection-state", server) } @@ -96,13 +105,13 @@ func oidcStateKey(key string) string { return fmt.Sprintf("oidc|%s", key) } -func (c *Cache) GetOIDCState(key string) (*OIDCState, error) { - res := OIDCState{} +func (c *Cache) GetOIDCState(key string) (*oidc.OIDCState, error) { + res := oidc.OIDCState{} err := c.cache.GetItem(oidcStateKey(key), &res) return &res, err } -func (c *Cache) SetOIDCState(key string, state *OIDCState) error { +func (c *Cache) SetOIDCState(key string, state *oidc.OIDCState) error { return c.cache.SetItem(oidcStateKey(key), state, c.oidcCacheExpiration, state == nil) } diff --git a/server/cache/cache_test.go b/server/cache/cache_test.go index 913d090009456..5a7d3ef2ebb78 100644 --- a/server/cache/cache_test.go +++ b/server/cache/cache_test.go @@ -10,6 +10,7 @@ import ( . "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1" cacheutil "github.com/argoproj/argo-cd/util/cache" appstatecache "github.com/argoproj/argo-cd/util/cache/appstate" + "github.com/argoproj/argo-cd/util/oidc" ) type fixtures struct { @@ -24,6 +25,7 @@ func newFixtures() *fixtures { ), 1*time.Minute, 1*time.Minute, + 1*time.Minute, )} } @@ -67,7 +69,7 @@ func TestCache_GetOIDCState(t *testing.T) { _, err := cache.GetOIDCState("my-key") assert.Equal(t, ErrCacheMiss, err) // populate cache - err = cache.SetOIDCState("my-key", &OIDCState{ReturnURL: "my-return-url"}) + err = cache.SetOIDCState("my-key", &oidc.OIDCState{ReturnURL: "my-return-url"}) assert.NoError(t, err) //cache miss _, err = cache.GetOIDCState("other-key") @@ -75,7 +77,7 @@ func TestCache_GetOIDCState(t *testing.T) { // cache hit value, err := cache.GetOIDCState("my-key") assert.NoError(t, err) - assert.Equal(t, &OIDCState{ReturnURL: "my-return-url"}, value) + assert.Equal(t, &oidc.OIDCState{ReturnURL: "my-return-url"}, value) } func TestAddCacheFlagsToCmd(t *testing.T) { diff --git a/server/project/project_test.go b/server/project/project_test.go index edabbe2410be0..bb14819851a4f 100644 --- a/server/project/project_test.go +++ b/server/project/project_test.go @@ -6,9 +6,8 @@ import ( "strings" "testing" - "github.com/google/uuid" - "github.com/dgrijalva/jwt-go" + "github.com/google/uuid" "github.com/stretchr/testify/assert" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -23,7 +22,6 @@ import ( apps "github.com/argoproj/argo-cd/pkg/client/clientset/versioned/fake" "github.com/argoproj/argo-cd/util" "github.com/argoproj/argo-cd/util/assets" - "github.com/argoproj/argo-cd/util/cache" jwtutil "github.com/argoproj/argo-cd/util/jwt" "github.com/argoproj/argo-cd/util/rbac" "github.com/argoproj/argo-cd/util/session" @@ -32,11 +30,6 @@ import ( const testNamespace = "default" -func newSessionCache() *cache.Cache { - sessionCache := cache.NewCache(cache.NewInMemoryCache(0)) - return sessionCache -} - func TestProjectServer(t *testing.T) { kubeclientset := fake.NewSimpleClientset(&corev1.ConfigMap{ ObjectMeta: v1.ObjectMeta{ @@ -292,7 +285,7 @@ func TestProjectServer(t *testing.T) { id := "testId" t.Run("TestCreateTokenDenied", func(t *testing.T) { - sessionMgr := session.NewSessionManager(settingsMgr, "", newSessionCache()) + sessionMgr := session.NewSessionManager(settingsMgr, "", session.NewInMemoryUserStateStorage()) projectWithRole := existingProj.DeepCopy() projectWithRole.Spec.Roles = []v1alpha1.ProjectRole{{Name: tokenName}} projectServer := NewServer("default", fake.NewSimpleClientset(), apps.NewSimpleClientset(projectWithRole), enforcer, util.NewKeyLock(), sessionMgr) @@ -301,7 +294,7 @@ func TestProjectServer(t *testing.T) { }) t.Run("TestCreateTokenSuccessfullyUsingGroup", func(t *testing.T) { - sessionMgr := session.NewSessionManager(settingsMgr, "", newSessionCache()) + sessionMgr := session.NewSessionManager(settingsMgr, "", session.NewInMemoryUserStateStorage()) projectWithRole := existingProj.DeepCopy() projectWithRole.Spec.Roles = []v1alpha1.ProjectRole{{Name: tokenName, Groups: []string{"my-group"}}} projectServer := NewServer("default", fake.NewSimpleClientset(), apps.NewSimpleClientset(projectWithRole), enforcer, util.NewKeyLock(), sessionMgr) @@ -312,7 +305,7 @@ func TestProjectServer(t *testing.T) { _ = enforcer.SetBuiltinPolicy(`p, role:admin, projects, update, *, allow`) t.Run("TestCreateTokenSuccessfully", func(t *testing.T) { - sessionMgr := session.NewSessionManager(settingsMgr, "", newSessionCache()) + sessionMgr := session.NewSessionManager(settingsMgr, "", session.NewInMemoryUserStateStorage()) projectWithRole := existingProj.DeepCopy() projectWithRole.Spec.Roles = []v1alpha1.ProjectRole{{Name: tokenName}} projectServer := NewServer("default", fake.NewSimpleClientset(), apps.NewSimpleClientset(projectWithRole), enforcer, util.NewKeyLock(), sessionMgr) @@ -330,7 +323,7 @@ func TestProjectServer(t *testing.T) { }) t.Run("TestCreateTokenWithIDSuccessfully", func(t *testing.T) { - sessionMgr := session.NewSessionManager(settingsMgr, "", newSessionCache()) + sessionMgr := session.NewSessionManager(settingsMgr, "", session.NewInMemoryUserStateStorage()) projectWithRole := existingProj.DeepCopy() projectWithRole.Spec.Roles = []v1alpha1.ProjectRole{{Name: tokenName}} projectServer := NewServer("default", fake.NewSimpleClientset(), apps.NewSimpleClientset(projectWithRole), enforcer, util.NewKeyLock(), sessionMgr) @@ -348,7 +341,7 @@ func TestProjectServer(t *testing.T) { }) t.Run("TestCreateTokenWithSameIdDeny", func(t *testing.T) { - sessionMgr := session.NewSessionManager(settingsMgr, "", newSessionCache()) + sessionMgr := session.NewSessionManager(settingsMgr, "", session.NewInMemoryUserStateStorage()) projectWithRole := existingProj.DeepCopy() projectWithRole.Spec.Roles = []v1alpha1.ProjectRole{{Name: tokenName}} projectServer := NewServer("default", fake.NewSimpleClientset(), apps.NewSimpleClientset(projectWithRole), enforcer, util.NewKeyLock(), sessionMgr) @@ -373,7 +366,7 @@ func TestProjectServer(t *testing.T) { _ = enforcer.SetBuiltinPolicy(`p, *, *, *, *, deny`) t.Run("TestDeleteTokenDenied", func(t *testing.T) { - sessionMgr := session.NewSessionManager(settingsMgr, "", newSessionCache()) + sessionMgr := session.NewSessionManager(settingsMgr, "", session.NewInMemoryUserStateStorage()) projWithToken := existingProj.DeepCopy() issuedAt := int64(1) secondIssuedAt := issuedAt + 1 @@ -386,7 +379,7 @@ func TestProjectServer(t *testing.T) { }) t.Run("TestDeleteTokenSuccessfullyWithGroup", func(t *testing.T) { - sessionMgr := session.NewSessionManager(settingsMgr, "", newSessionCache()) + sessionMgr := session.NewSessionManager(settingsMgr, "", session.NewInMemoryUserStateStorage()) projWithToken := existingProj.DeepCopy() issuedAt := int64(1) secondIssuedAt := issuedAt + 1 @@ -402,7 +395,7 @@ func TestProjectServer(t *testing.T) { p, role:admin, projects, update, *, allow`) t.Run("TestDeleteTokenSuccessfully", func(t *testing.T) { - sessionMgr := session.NewSessionManager(settingsMgr, "", newSessionCache()) + sessionMgr := session.NewSessionManager(settingsMgr, "", session.NewInMemoryUserStateStorage()) projWithToken := existingProj.DeepCopy() issuedAt := int64(1) secondIssuedAt := issuedAt + 1 @@ -423,7 +416,7 @@ p, role:admin, projects, update, *, allow`) p, role:admin, projects, update, *, allow`) t.Run("TestDeleteTokenByIdSuccessfully", func(t *testing.T) { - sessionMgr := session.NewSessionManager(settingsMgr, "", newSessionCache()) + sessionMgr := session.NewSessionManager(settingsMgr, "", session.NewInMemoryUserStateStorage()) projWithToken := existingProj.DeepCopy() issuedAt := int64(1) secondIssuedAt := issuedAt + 1 @@ -446,7 +439,7 @@ p, role:admin, projects, update, *, allow`) enforcer = newEnforcer(kubeclientset) t.Run("TestCreateTwoTokensInRoleSuccess", func(t *testing.T) { - sessionMgr := session.NewSessionManager(settingsMgr, "", newSessionCache()) + sessionMgr := session.NewSessionManager(settingsMgr, "", session.NewInMemoryUserStateStorage()) projWithToken := existingProj.DeepCopy() tokenName := "testToken" token := v1alpha1.ProjectRole{Name: tokenName, JWTTokens: []v1alpha1.JWTToken{{IssuedAt: 1}}} @@ -611,7 +604,7 @@ p, role:admin, projects, update, *, allow`) }) t.Run("TestSyncWindowsActive", func(t *testing.T) { - sessionMgr := session.NewSessionManager(settingsMgr, "", newSessionCache()) + sessionMgr := session.NewSessionManager(settingsMgr, "", session.NewInMemoryUserStateStorage()) projectWithSyncWindows := existingProj.DeepCopy() projectWithSyncWindows.Spec.SyncWindows = v1alpha1.SyncWindows{} win := &v1alpha1.SyncWindow{Kind: "allow", Schedule: "* * * * *", Duration: "1h"} @@ -624,7 +617,7 @@ p, role:admin, projects, update, *, allow`) }) t.Run("TestGetSyncWindowsStateCannotGetProjectDetails", func(t *testing.T) { - sessionMgr := session.NewSessionManager(settingsMgr, "", newSessionCache()) + sessionMgr := session.NewSessionManager(settingsMgr, "", session.NewInMemoryUserStateStorage()) projectWithSyncWindows := existingProj.DeepCopy() projectWithSyncWindows.Spec.SyncWindows = v1alpha1.SyncWindows{} win := &v1alpha1.SyncWindow{Kind: "allow", Schedule: "* * * * *", Duration: "1h"} @@ -642,7 +635,7 @@ p, role:admin, projects, update, *, allow`) enforcer.SetClaimsEnforcerFunc(nil) ctx := context.WithValue(context.Background(), "claims", &jwt.MapClaims{"groups": []string{"my-group"}}) - sessionMgr := session.NewSessionManager(settingsMgr, "", newSessionCache()) + sessionMgr := session.NewSessionManager(settingsMgr, "", session.NewInMemoryUserStateStorage()) projectWithSyncWindows := existingProj.DeepCopy() win := &v1alpha1.SyncWindow{Kind: "allow", Schedule: "* * * * *", Duration: "1h"} projectWithSyncWindows.Spec.SyncWindows = append(projectWithSyncWindows.Spec.SyncWindows, win) diff --git a/server/server.go b/server/server.go index 0ffed34a628c2..8ef4ae2381a6c 100644 --- a/server/server.go +++ b/server/server.go @@ -74,7 +74,6 @@ import ( "github.com/argoproj/argo-cd/server/version" "github.com/argoproj/argo-cd/util" "github.com/argoproj/argo-cd/util/assets" - cacheutil "github.com/argoproj/argo-cd/util/cache" "github.com/argoproj/argo-cd/util/db" "github.com/argoproj/argo-cd/util/dex" dexutil "github.com/argoproj/argo-cd/util/dex" @@ -179,14 +178,7 @@ func NewServer(ctx context.Context, opts ArgoCDServerOpts) *ArgoCDServer { err = initializeDefaultProject(opts) errors.CheckError(err) - var smgrCache *cacheutil.Cache - if opts.Cache != nil && opts.Cache.GetCache() != nil { - smgrCache = opts.Cache.GetCache() - } else { - log.Warnf("Running session manager with InMemory cache, which is not recommended.") - smgrCache = cacheutil.NewCache(cacheutil.NewInMemoryCache(0)) - } - sessionMgr := util_session.NewSessionManager(settingsMgr, opts.DexServerAddr, smgrCache) + sessionMgr := util_session.NewSessionManager(settingsMgr, opts.DexServerAddr, opts.Cache) factory := appinformer.NewFilteredSharedInformerFactory(opts.AppClientset, 0, opts.Namespace, func(options *metav1.ListOptions) {}) projInformer := factory.Argoproj().V1alpha1().AppProjects().Informer() diff --git a/ui/src/app/login/components/login.tsx b/ui/src/app/login/components/login.tsx index 9556b3b3c60af..46e8491d9c954 100644 --- a/ui/src/app/login/components/login.tsx +++ b/ui/src/app/login/components/login.tsx @@ -18,6 +18,7 @@ export interface LoginForm { interface State { authSettings: AuthSettings; loginError: string; + loginInProgress: boolean; returnUrl: string; ssoLoginError: string; } @@ -36,7 +37,7 @@ export class Login extends React.Component, State> { constructor(props: RouteComponentProps<{}>) { super(props); - this.state = {authSettings: null, loginError: null, returnUrl: null, ssoLoginError: null}; + this.state = {authSettings: null, loginError: null, returnUrl: null, ssoLoginError: null, loginInProgress: false}; } public async componentDidMount() { @@ -93,7 +94,7 @@ export class Login extends React.Component, State> { {this.state.loginError &&
{this.state.loginError}
}
-
@@ -116,9 +117,10 @@ export class Login extends React.Component, State> { private async login(username: string, password: string, returnURL: string) { try { - this.setState({loginError: ''}); + this.setState({loginError: '', loginInProgress: true}); this.appContext.apis.navigation.goto('.', {sso_error: null}); await services.users.login(username, password); + this.setState({loginInProgress: false}); if (returnURL) { const url = new URL(returnURL); this.appContext.apis.navigation.goto(url.pathname + url.search); @@ -126,7 +128,7 @@ export class Login extends React.Component, State> { this.appContext.apis.navigation.goto('/applications'); } } catch (e) { - this.setState({loginError: e.response.body.error}); + this.setState({loginError: e.response.body.error, loginInProgress: false}); } } diff --git a/util/cache/appstate/cache.go b/util/cache/appstate/cache.go index eea4c2f90b26a..ca9300b16ae9e 100644 --- a/util/cache/appstate/cache.go +++ b/util/cache/appstate/cache.go @@ -21,11 +21,6 @@ func NewCache(cache *cacheutil.Cache, appStateCacheExpiration time.Duration) *Ca return &Cache{cache, appStateCacheExpiration} } -type OIDCState struct { - // ReturnURL is the URL in which to redirect a user back to after completing an OAuth2 login - ReturnURL string `json:"returnURL"` -} - func AddCacheFlagsToCmd(cmd *cobra.Command) func() (*Cache, error) { var appStateCacheExpiration time.Duration diff --git a/util/oidc/oidc.go b/util/oidc/oidc.go index 9bc0d0f41e3dd..44562780fda82 100644 --- a/util/oidc/oidc.go +++ b/util/oidc/oidc.go @@ -16,8 +16,8 @@ import ( "golang.org/x/oauth2" "github.com/argoproj/argo-cd/common" - servercache "github.com/argoproj/argo-cd/server/cache" "github.com/argoproj/argo-cd/server/settings/oidc" + appstatecache "github.com/argoproj/argo-cd/util/cache/appstate" "github.com/argoproj/argo-cd/util/dex" httputil "github.com/argoproj/argo-cd/util/http" "github.com/argoproj/argo-cd/util/jwt/zjwt" @@ -43,6 +43,16 @@ type ClaimsRequest struct { IDToken map[string]*oidc.Claim `json:"id_token"` } +type OIDCState struct { + // ReturnURL is the URL in which to redirect a user back to after completing an OAuth2 login + ReturnURL string `json:"returnURL"` +} + +type OIDCStateStorage interface { + GetOIDCState(key string) (*OIDCState, error) + SetOIDCState(key string, state *OIDCState) error +} + type ClientApp struct { // OAuth2 client ID of this application (e.g. argo-cd) clientID string @@ -65,7 +75,7 @@ type ClientApp struct { provider Provider // cache holds temporary nonce tokens to which hold application state values // See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. - cache *servercache.Cache + cache OIDCStateStorage } func GetScopesOrDefault(scopes []string) []string { @@ -77,7 +87,7 @@ func GetScopesOrDefault(scopes []string) []string { // NewClientApp will register the Argo CD client app (either via Dex or external OIDC) and return an // object which has HTTP handlers for handling the HTTP responses for login and callback -func NewClientApp(settings *settings.ArgoCDSettings, cache *servercache.Cache, dexServerAddr, baseHRef string) (*ClientApp, error) { +func NewClientApp(settings *settings.ArgoCDSettings, cache OIDCStateStorage, dexServerAddr, baseHRef string) (*ClientApp, error) { redirectURL, err := settings.RedirectURL() if err != nil { return nil, err @@ -145,7 +155,7 @@ func (a *ClientApp) generateAppState(returnURL string) string { if returnURL == "" { returnURL = a.baseHRef } - err := a.cache.SetOIDCState(randStr, &servercache.OIDCState{ReturnURL: returnURL}) + err := a.cache.SetOIDCState(randStr, &OIDCState{ReturnURL: returnURL}) if err != nil { // This should never happen with the in-memory cache log.Errorf("Failed to set app state: %v", err) @@ -153,10 +163,10 @@ func (a *ClientApp) generateAppState(returnURL string) string { return randStr } -func (a *ClientApp) verifyAppState(state string) (*servercache.OIDCState, error) { +func (a *ClientApp) verifyAppState(state string) (*OIDCState, error) { res, err := a.cache.GetOIDCState(state) if err != nil { - if err == servercache.ErrCacheMiss { + if err == appstatecache.ErrCacheMiss { return nil, fmt.Errorf("unknown app state %s", state) } else { return nil, fmt.Errorf("failed to verify app state %s: %v", state, err) diff --git a/util/session/sessionmanager.go b/util/session/sessionmanager.go index c1ee9df9fba33..d0a9b719282b7 100644 --- a/util/session/sessionmanager.go +++ b/util/session/sessionmanager.go @@ -12,16 +12,14 @@ import ( "strconv" "time" - "github.com/argoproj/argo-cd/server/rbacpolicy" - "github.com/dgrijalva/jwt-go" log "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/argoproj/argo-cd/common" - cacheutil "github.com/argoproj/argo-cd/util/cache" - appstate "github.com/argoproj/argo-cd/util/cache/appstate" + "github.com/argoproj/argo-cd/server/rbacpolicy" + "github.com/argoproj/argo-cd/util/cache/appstate" "github.com/argoproj/argo-cd/util/dex" httputil "github.com/argoproj/argo-cd/util/http" jwtutil "github.com/argoproj/argo-cd/util/jwt" @@ -32,10 +30,35 @@ import ( // SessionManager generates and validates JWT tokens for login sessions. type SessionManager struct { - settingsMgr *settings.SettingsManager - client *http.Client - prov oidcutil.Provider - cache *cacheutil.Cache + settingsMgr *settings.SettingsManager + client *http.Client + prov oidcutil.Provider + storage UserStateStorage + sleep func(d time.Duration) + verificationDelayNoiseEnabled bool +} + +type inMemoryUserStateStorage struct { + attempts map[string]LoginAttempts +} + +func NewInMemoryUserStateStorage() *inMemoryUserStateStorage { + return &inMemoryUserStateStorage{attempts: map[string]LoginAttempts{}} +} + +func (storage *inMemoryUserStateStorage) GetLoginAttempts(attempts *map[string]LoginAttempts) error { + *attempts = storage.attempts + return nil +} + +func (storage *inMemoryUserStateStorage) SetLoginAttempts(attempts map[string]LoginAttempts) error { + storage.attempts = attempts + return nil +} + +type UserStateStorage interface { + GetLoginAttempts(attempts *map[string]LoginAttempts) error + SetLoginAttempts(attempts map[string]LoginAttempts) error } // LoginAttempts is a timestamped counter for failed login attempts @@ -55,15 +78,11 @@ const ( blankPasswordError = "Blank passwords are not allowed" accountDisabled = "Account %s is disabled" usernameTooLongError = "Username is too long (%d bytes max)" - // nolint:varcheck,deadcode,unused - accountDelayed = "Too many login failures. Login for account %s is delayed by %d seconds" ) const ( // Maximum length of username, too keep the cache's memory signature low maxUsernameLength = 32 - // The key prefix to store login attempts in the cache - loginAttemptsCacheKey = "session|login.attempts" // The default maximum session cache size defaultMaxCacheSize = 1000 // The default number of maximum login failures before delay kicks in @@ -76,14 +95,21 @@ const ( defaultLoginDelayMax = 30 // The default time in seconds for the failure window defaultFailureWindow = 300 + // The password verification delay max + verificationDelayNoiseMin = 500 * time.Millisecond + // The password verification delay max + verificationDelayNoiseMax = 1000 * time.Millisecond - // environment variables to control rate limiter behaviour - envLoginDelayStart = "ARGOCD_SESSION_FAILURE_DELAY_START" - envLoginDelayIncrease = "ARGOCD_SESSION_FAILURE_DELAY_INCREASE" - envLoginDelayMax = "ARGOCD_SESSION_FAILURE_DELAY_MAX" - envLoginMaxFailCount = "ARGOCD_SESSION_FAILURE_MAX_FAIL_COUNT" - envLoginFailureWindow = "ARGOCD_SESSION_FAILURE_WINDOW" - envLoginMaxCacheSize = "ARGOCD_SESSION_MAX_CACHE_SIZE" + // environment variables to control rate limiter behaviour: + + // Max number of login failures before login delay kicks in + envLoginMaxFailCount = "ARGOCD_SESSION_FAILURE_MAX_FAIL_COUNT" + + // Number of maximum seconds the login is allowed to delay for. Default: 300 (5 minutes). + envLoginFailureWindowSeconds = "ARGOCD_SESSION_FAILURE_WINDOW_SECONDS" + + // Max number of stored usernames + envLoginMaxCacheSize = "ARGOCD_SESSION_MAX_CACHE_SIZE" ) // Helper function to parse a number from an environment variable. Returns a @@ -122,31 +148,18 @@ func getMaxLoginFailures() int { return parseNumFromEnv(envLoginMaxFailCount, defaultMaxLoginFailures, 1, math.MaxInt32) } -// Returns the number of seconds login should be delayed after maximum number of failures has been reached -func getLoginDelayStart() int { - return parseNumFromEnv(envLoginDelayStart, defaultLoginDelayStart, 1, math.MaxInt32) -} - -// Returns the number of seconds the delay shall be increased by on consecutive login failures -func getLoginDelayIncrease() int { - return parseNumFromEnv(envLoginDelayIncrease, defaultLoginDelayIncrease, 0, math.MaxInt32) -} - -// Returns the number of maximum seconds the login is allowed to delay for -func getLoginDelayMax() int { - return parseNumFromEnv(envLoginDelayMax, defaultLoginDelayMax, 0, math.MaxInt32) -} - // Returns the number of maximum seconds the login is allowed to delay for func getLoginFailureWindow() time.Duration { - return time.Duration(parseNumFromEnv(envLoginFailureWindow, defaultFailureWindow, 0, math.MaxInt32)) + return time.Duration(parseNumFromEnv(envLoginFailureWindowSeconds, defaultFailureWindow, 0, math.MaxInt32)) } // NewSessionManager creates a new session manager from Argo CD settings -func NewSessionManager(settingsMgr *settings.SettingsManager, dexServerAddr string, cache *cacheutil.Cache) *SessionManager { +func NewSessionManager(settingsMgr *settings.SettingsManager, dexServerAddr string, storage UserStateStorage) *SessionManager { s := SessionManager{ - settingsMgr: settingsMgr, - cache: cache, + settingsMgr: settingsMgr, + storage: storage, + sleep: time.Sleep, + verificationDelayNoiseEnabled: true, } settings, err := settingsMgr.GetSettings() if err != nil { @@ -257,10 +270,10 @@ func (mgr *SessionManager) Parse(tokenString string) (jwt.Claims, error) { func (mgr *SessionManager) GetLoginFailures() map[string]LoginAttempts { // Get failures from the cache var failures map[string]LoginAttempts - err := mgr.cache.GetItem(loginAttemptsCacheKey, &failures) + err := mgr.storage.GetLoginAttempts(&failures) if err != nil { if err != appstate.ErrCacheMiss { - log.Errorf("Could not retrieve %s from cache: %s", loginAttemptsCacheKey, err.Error()) + log.Errorf("Could not retrieve login attempts: %v", err) } failures = make(map[string]LoginAttempts) } @@ -334,9 +347,9 @@ func (mgr *SessionManager) updateFailureCount(username string, failed bool) { } } - err := mgr.cache.SetItem(loginAttemptsCacheKey, failures, 0, false) + err := mgr.storage.SetLoginAttempts(failures) if err != nil { - log.Errorf("Could not update session cache: %s", err.Error()) + log.Errorf("Could not update login attempts: %v", err) } } @@ -352,16 +365,10 @@ func (mgr *SessionManager) getFailureCount(username string) LoginAttempts { } // Calculate a login delay for the given login attempt -func (mgr *SessionManager) calculateLogonDelay(attempt LoginAttempts) int { +func (mgr *SessionManager) exceededFailedLoginAttempts(attempt LoginAttempts) bool { maxFails := getMaxLoginFailures() - - delayStart := getLoginDelayStart() - delayIncrease := getLoginDelayIncrease() - delayMax := getLoginDelayMax() failureWindow := getLoginFailureWindow() - duration := 0 - // Whether we are in the failure window for given attempt inWindow := func() bool { if failureWindow == 0 || time.Since(attempt.LastFailed).Seconds() <= float64(failureWindow) { @@ -372,30 +379,40 @@ func (mgr *SessionManager) calculateLogonDelay(attempt LoginAttempts) int { // If we reached max failed attempts within failure window, we need to calc the delay if attempt.FailCount >= maxFails && inWindow() { - increase := (attempt.FailCount - maxFails) * delayIncrease - if increase+delayStart > delayMax { - duration = delayMax - } else { - duration += delayStart + increase - } + return true } - return duration + return false } // VerifyUsernamePassword verifies if a username/password combo is correct func (mgr *SessionManager) VerifyUsernamePassword(username string, password string) error { + if password == "" { + return status.Errorf(codes.Unauthenticated, blankPasswordError) + } // Enforce maximum length of username on local accounts if len(username) > maxUsernameLength { return status.Errorf(codes.InvalidArgument, usernameTooLongError, maxUsernameLength) } - attempt := mgr.getFailureCount(username) + start := time.Now() + if mgr.verificationDelayNoiseEnabled { + defer func() { + // introduces random delay to protect from timing-based user enumeration attack + delayNanoseconds := verificationDelayNoiseMin.Nanoseconds() + + int64(rand.Intn(int(verificationDelayNoiseMax.Nanoseconds()-verificationDelayNoiseMin.Nanoseconds()))) + // take into account amount of time spent since the request start + delayNanoseconds = delayNanoseconds - time.Now().Sub(start).Nanoseconds() + if delayNanoseconds > 0 { + mgr.sleep(time.Duration(delayNanoseconds)) + } + }() + } - duration := mgr.calculateLogonDelay(attempt) - if duration > 0 { - log.Warnf("User %s had too many failed logins (%d), sleeping for %d seconds", username, attempt.FailCount, duration) - time.Sleep(time.Duration(duration) * time.Second) + attempt := mgr.getFailureCount(username) + if mgr.exceededFailedLoginAttempts(attempt) { + log.Warnf("User %s had too many failed logins (%d)", username, attempt.FailCount) + return status.Errorf(codes.Unauthenticated, invalidLoginError) } account, err := mgr.settingsMgr.GetAccount(username) @@ -404,14 +421,15 @@ func (mgr *SessionManager) VerifyUsernamePassword(username string, password stri mgr.updateFailureCount(username, true) err = status.Errorf(codes.Unauthenticated, invalidLoginError) } + // to prevent time-based user enumeration, we must perform a password + // hash cycle to keep response time consistent (if the function were + // to continue and not return here) + _, _ = passwordutil.HashPassword("for_consistent_response_time") return err } if !account.Enabled { return status.Errorf(codes.Unauthenticated, accountDisabled, username) } - if password == "" { - return status.Errorf(codes.Unauthenticated, blankPasswordError) - } valid, _ := passwordutil.VerifyPassword(password, account.PasswordHash) if !valid { diff --git a/util/session/sessionmanager_test.go b/util/session/sessionmanager_test.go index 7675408ed6a6a..1f95f94f4c7e7 100644 --- a/util/session/sessionmanager_test.go +++ b/util/session/sessionmanager_test.go @@ -9,30 +9,20 @@ import ( "testing" "time" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/go-redis/redis" - - "github.com/alicebob/miniredis" - "github.com/dgrijalva/jwt-go" "github.com/stretchr/testify/assert" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" "github.com/argoproj/argo-cd/common" "github.com/argoproj/argo-cd/errors" - "github.com/argoproj/argo-cd/util/cache" "github.com/argoproj/argo-cd/util/password" "github.com/argoproj/argo-cd/util/settings" ) -func newSessionCache() *cache.Cache { - return cache.NewCache(cache.NewInMemoryCache(0)) -} - func getKubeClient(pass string, enabled bool) *fake.Clientset { const defaultSecretKey = "Hello, world!" @@ -62,12 +52,18 @@ func getKubeClient(pass string, enabled bool) *fake.Clientset { }) } +func newSessionManager(settingsMgr *settings.SettingsManager, dexServerAddr string, storage UserStateStorage) *SessionManager { + mgr := NewSessionManager(settingsMgr, dexServerAddr, storage) + mgr.verificationDelayNoiseEnabled = false + return mgr +} + func TestSessionManager(t *testing.T) { const ( defaultSubject = "admin" ) settingsMgr := settings.NewSettingsManager(context.Background(), getKubeClient("pass", true), "argocd") - mgr := NewSessionManager(settingsMgr, "", newSessionCache()) + mgr := newSessionManager(settingsMgr, "", NewInMemoryUserStateStorage()) token, err := mgr.Create(defaultSubject, 0, "") if err != nil { @@ -155,7 +151,7 @@ func TestVerifyUsernamePassword(t *testing.T) { t.Run(tc.name, func(t *testing.T) { settingsMgr := settings.NewSettingsManager(context.Background(), getKubeClient(password, !tc.disabled), "argocd") - mgr := NewSessionManager(settingsMgr, "", newSessionCache()) + mgr := newSessionManager(settingsMgr, "", NewInMemoryUserStateStorage()) err := mgr.VerifyUsernamePassword(tc.userName, tc.password) @@ -170,15 +166,6 @@ func TestVerifyUsernamePassword(t *testing.T) { func TestCacheValueGetters(t *testing.T) { t.Run("Default values", func(t *testing.T) { - lds := getLoginDelayStart() - assert.Equal(t, defaultLoginDelayStart, lds) - - ldi := getLoginDelayIncrease() - assert.Equal(t, defaultLoginDelayIncrease, ldi) - - ldm := getLoginDelayMax() - assert.Equal(t, defaultLoginDelayMax, ldm) - mlf := getMaxLoginFailures() assert.Equal(t, defaultMaxLoginFailures, mlf) @@ -187,132 +174,88 @@ func TestCacheValueGetters(t *testing.T) { }) t.Run("Valid environment overrides", func(t *testing.T) { - os.Setenv(envLoginDelayStart, "5") - os.Setenv(envLoginDelayIncrease, "5") - os.Setenv(envLoginDelayMax, "5") os.Setenv(envLoginMaxFailCount, "5") os.Setenv(envLoginMaxCacheSize, "5") - lds := getLoginDelayStart() - assert.Equal(t, 5, lds) - - ldi := getLoginDelayIncrease() - assert.Equal(t, 5, ldi) - - ldm := getLoginDelayMax() - assert.Equal(t, 5, ldm) - mlf := getMaxLoginFailures() assert.Equal(t, 5, mlf) mcs := getMaximumCacheSize() assert.Equal(t, 5, mcs) - os.Setenv(envLoginDelayStart, "") - os.Setenv(envLoginDelayIncrease, "") - os.Setenv(envLoginDelayMax, "") os.Setenv(envLoginMaxFailCount, "") os.Setenv(envLoginMaxCacheSize, "") }) t.Run("Invalid environment overrides", func(t *testing.T) { - os.Setenv(envLoginDelayStart, "invalid") - os.Setenv(envLoginDelayIncrease, "invalid") - os.Setenv(envLoginDelayMax, "invalid") os.Setenv(envLoginMaxFailCount, "invalid") os.Setenv(envLoginMaxCacheSize, "invalid") - lds := getLoginDelayStart() - assert.Equal(t, defaultLoginDelayStart, lds) - - ldi := getLoginDelayIncrease() - assert.Equal(t, defaultLoginDelayIncrease, ldi) - - ldm := getLoginDelayMax() - assert.Equal(t, defaultLoginDelayMax, ldm) - mlf := getMaxLoginFailures() assert.Equal(t, defaultMaxLoginFailures, mlf) mcs := getMaximumCacheSize() assert.Equal(t, defaultMaxCacheSize, mcs) - os.Setenv(envLoginDelayStart, "") - os.Setenv(envLoginDelayIncrease, "") - os.Setenv(envLoginDelayMax, "") os.Setenv(envLoginMaxFailCount, "") os.Setenv(envLoginMaxCacheSize, "") }) t.Run("Less than allowed in environment overrides", func(t *testing.T) { - os.Setenv(envLoginDelayStart, "-1") - os.Setenv(envLoginDelayIncrease, "-1") - os.Setenv(envLoginDelayMax, "-1") os.Setenv(envLoginMaxFailCount, "-1") os.Setenv(envLoginMaxCacheSize, "-1") - lds := getLoginDelayStart() - assert.Equal(t, defaultLoginDelayStart, lds) - - ldi := getLoginDelayIncrease() - assert.Equal(t, defaultLoginDelayIncrease, ldi) - - ldm := getLoginDelayMax() - assert.Equal(t, defaultLoginDelayMax, ldm) - mlf := getMaxLoginFailures() assert.Equal(t, defaultMaxLoginFailures, mlf) mcs := getMaximumCacheSize() assert.Equal(t, defaultMaxCacheSize, mcs) - os.Setenv(envLoginDelayStart, "") - os.Setenv(envLoginDelayIncrease, "") - os.Setenv(envLoginDelayMax, "") os.Setenv(envLoginMaxFailCount, "") os.Setenv(envLoginMaxCacheSize, "") }) t.Run("Greater than allowed in environment overrides", func(t *testing.T) { - os.Setenv(envLoginDelayStart, fmt.Sprintf("%d", math.MaxInt32+1)) - os.Setenv(envLoginDelayIncrease, fmt.Sprintf("%d", math.MaxInt32+1)) - os.Setenv(envLoginDelayMax, fmt.Sprintf("%d", math.MaxInt32+1)) os.Setenv(envLoginMaxFailCount, fmt.Sprintf("%d", math.MaxInt32+1)) os.Setenv(envLoginMaxCacheSize, fmt.Sprintf("%d", math.MaxInt32+1)) - lds := getLoginDelayStart() - assert.Equal(t, defaultLoginDelayStart, lds) - - ldi := getLoginDelayIncrease() - assert.Equal(t, defaultLoginDelayIncrease, ldi) - - ldm := getLoginDelayMax() - assert.Equal(t, defaultLoginDelayMax, ldm) - mlf := getMaxLoginFailures() assert.Equal(t, defaultMaxLoginFailures, mlf) mcs := getMaximumCacheSize() assert.Equal(t, defaultMaxCacheSize, mcs) - os.Setenv(envLoginDelayStart, "") - os.Setenv(envLoginDelayIncrease, "") - os.Setenv(envLoginDelayMax, "") os.Setenv(envLoginMaxFailCount, "") os.Setenv(envLoginMaxCacheSize, "") }) } -func TestLoginRateLimiter(t *testing.T) { - mr, err := miniredis.Run() - if err != nil { - panic(err) +func TestRandomPasswordVerificationDelay(t *testing.T) { + var sleptFor time.Duration + settingsMgr := settings.NewSettingsManager(context.Background(), getKubeClient("password", true), "argocd") + mgr := newSessionManager(settingsMgr, "", NewInMemoryUserStateStorage()) + mgr.verificationDelayNoiseEnabled = true + mgr.sleep = func(d time.Duration) { + sleptFor = d } - defer mr.Close() + for i := 0; i < 10; i++ { + sleptFor = 0 + start := time.Now() + if !assert.NoError(t, mgr.VerifyUsernamePassword("admin", "password")) { + return + } + totalDuration := time.Now().Sub(start) + sleptFor + assert.GreaterOrEqual(t, totalDuration.Nanoseconds(), verificationDelayNoiseMin.Nanoseconds()) + assert.LessOrEqual(t, totalDuration.Nanoseconds(), verificationDelayNoiseMax.Nanoseconds()) + } +} +func TestLoginRateLimiter(t *testing.T) { settingsMgr := settings.NewSettingsManager(context.Background(), getKubeClient("password", true), "argocd") - mgr := NewSessionManager(settingsMgr, "", cache.NewCache(cache.NewRedisCache(redis.NewClient(&redis.Options{Addr: mr.Addr()}), 0))) + storage := NewInMemoryUserStateStorage() + + mgr := newSessionManager(settingsMgr, "", storage) t.Run("Test login delay valid user", func(t *testing.T) { for i := 0; i < getMaxLoginFailures(); i++ { @@ -320,37 +263,18 @@ func TestLoginRateLimiter(t *testing.T) { assert.Error(t, err) } - // Decrease delay to 3 seconds, so we don't waste too much time in tests - os.Setenv(envLoginDelayStart, "3") - - // The 11th time should have a delay, we test for it + // The 11th time should fail even if password is right { - start := time.Now() - err := mgr.VerifyUsernamePassword("admin", "wrong") + err := mgr.VerifyUsernamePassword("admin", "password") assert.Error(t, err) - end := time.Now() - assert.GreaterOrEqual(t, end.Sub(start).Seconds(), float64(3)) - } - - // Valid password should be validated with delay, too. Delay should have been increased - { - start := time.Now() - err = mgr.VerifyUsernamePassword("admin", "password") - assert.NoError(t, err) - end := time.Now() - assert.GreaterOrEqual(t, end.Unix()-start.Unix(), int64(3+getLoginDelayIncrease())) } + storage.attempts = map[string]LoginAttempts{} // Failed counter should have been reseted, should validate immediately { - start := time.Now() err := mgr.VerifyUsernamePassword("admin", "password") assert.NoError(t, err) - end := time.Now() - assert.LessOrEqual(t, end.Unix()-start.Unix(), int64(1)) } - - os.Setenv(envLoginDelayStart, "") }) t.Run("Test login delay invalid user", func(t *testing.T) { @@ -359,17 +283,8 @@ func TestLoginRateLimiter(t *testing.T) { assert.Error(t, err) } - // Decrease delay to 3 seconds, so we don't waste too much time in tests - os.Setenv(envLoginDelayStart, "3") - - // The 11th time should have a delay, we test for it - start := time.Now() - err = mgr.VerifyUsernamePassword("invalid", "wrong") + err := mgr.VerifyUsernamePassword("invalid", "wrong") assert.Error(t, err) - end := time.Now() - assert.GreaterOrEqual(t, end.Unix()-start.Unix(), int64(3)) - - os.Setenv(envLoginDelayStart, "") }) } @@ -379,21 +294,15 @@ func TestMaxUsernameLength(t *testing.T) { username += "a" } settingsMgr := settings.NewSettingsManager(context.Background(), getKubeClient("password", true), "argocd") - mgr := NewSessionManager(settingsMgr, "", newSessionCache()) + mgr := newSessionManager(settingsMgr, "", NewInMemoryUserStateStorage()) err := mgr.VerifyUsernamePassword(username, "password") assert.Error(t, err) assert.Contains(t, err.Error(), fmt.Sprintf(usernameTooLongError, maxUsernameLength)) } func TestMaxCacheSize(t *testing.T) { - mr, err := miniredis.Run() - if err != nil { - panic(err) - } - defer mr.Close() - settingsMgr := settings.NewSettingsManager(context.Background(), getKubeClient("password", true), "argocd") - mgr := NewSessionManager(settingsMgr, "", cache.NewCache(cache.NewRedisCache(redis.NewClient(&redis.Options{Addr: mr.Addr()}), 0))) + mgr := newSessionManager(settingsMgr, "", NewInMemoryUserStateStorage()) invalidUsers := []string{"invalid1", "invalid2", "invalid3", "invalid4", "invalid5", "invalid6", "invalid7"} // Temporarily decrease max cache size @@ -408,18 +317,12 @@ func TestMaxCacheSize(t *testing.T) { } func TestFailedAttemptsExpiry(t *testing.T) { - mr, err := miniredis.Run() - if err != nil { - panic(err) - } - defer mr.Close() - settingsMgr := settings.NewSettingsManager(context.Background(), getKubeClient("password", true), "argocd") - mgr := NewSessionManager(settingsMgr, "", cache.NewCache(cache.NewRedisCache(redis.NewClient(&redis.Options{Addr: mr.Addr()}), 0))) + mgr := newSessionManager(settingsMgr, "", NewInMemoryUserStateStorage()) invalidUsers := []string{"invalid1", "invalid2", "invalid3", "invalid4", "invalid5", "invalid6", "invalid7"} - os.Setenv(envLoginFailureWindow, "1") + os.Setenv(envLoginFailureWindowSeconds, "1") for _, user := range invalidUsers { err := mgr.VerifyUsernamePassword(user, "password") @@ -428,9 +331,9 @@ func TestFailedAttemptsExpiry(t *testing.T) { time.Sleep(2 * time.Second) - err = mgr.VerifyUsernamePassword("invalid8", "password") + err := mgr.VerifyUsernamePassword("invalid8", "password") assert.Error(t, err) assert.Len(t, mgr.GetLoginFailures(), 1) - os.Setenv(envLoginFailureWindow, "") + os.Setenv(envLoginFailureWindowSeconds, "") }