From 24e6807939c23b40c68fc7b3fd85e7f3df6e71bc Mon Sep 17 00:00:00 2001 From: Rohith Date: Sun, 11 Jun 2017 19:02:13 +0100 Subject: [PATCH 1/3] Zap Logging - removing the github.com/Sirupsen/logrus logger and replacing zap --- doc.go | 3 + forwarding.go | 91 +++++++--------- handlers.go | 84 +++++++-------- middleware.go | 158 +++++++++++++-------------- middleware_test.go | 6 +- misc.go | 11 +- rotation.go | 27 ++--- rotation_test.go | 7 +- server.go | 259 ++++++++++++++++++++++++++++----------------- server_test.go | 6 +- session.go | 13 ++- store_boltdb.go | 17 --- store_redis.go | 17 --- stores.go | 6 +- utils.go | 64 ----------- utils_test.go | 9 -- 16 files changed, 353 insertions(+), 425 deletions(-) diff --git a/doc.go b/doc.go index f6abfcc2..d7c9d904 100644 --- a/doc.go +++ b/doc.go @@ -236,6 +236,9 @@ type Config struct { ForwardingPassword string `json:"forwarding-password" yaml:"forwarding-password" usage:"password to use when logging into the openid provider"` // ForwardingDomains is a collection of domains to signs ForwardingDomains []string `json:"forwarding-domains" yaml:"forwarding-domains" usage:"list of domains which should be signed; everything else is relayed unsigned"` + + // DisableAllLogging indicates no logging at all + DisableAllLogging bool `json:"disable-all-logging" yaml:"disable-all-logging" usage:"disables all logging to stdout and stderr"` } // getVersion returns the proxy version diff --git a/forwarding.go b/forwarding.go index 296d900f..43f1741b 100644 --- a/forwarding.go +++ b/forwarding.go @@ -20,10 +20,10 @@ import ( "net/http" "time" - log "github.com/Sirupsen/logrus" "github.com/gambol99/go-oidc/jose" "github.com/gambol99/go-oidc/oidc" "github.com/labstack/echo" + "go.uber.org/zap" ) // proxyMiddleware is responsible for handles reverse proxy request to the upstream endpoint @@ -39,9 +39,9 @@ func (r *oauthProxy) proxyMiddleware() echo.MiddlewareFunc { // step: is this connection upgrading? if isUpgradedConnection(cx.Request()) { - log.Debugf("upgrading the connnection to %s", cx.Request().Header.Get(headerUpgrade)) + r.log.Debug("upgrading the connnection", zap.String("client_ip", cx.RealIP())) if err := tryUpdateConnection(cx.Request(), cx.Response().Writer, r.endpoint); err != nil { - log.WithFields(log.Fields{"error": err.Error()}).Errorf("failed to upgrade the connection") + r.log.Error("failed to upgrade connection", zap.Error(err)) cx.NoContent(http.StatusInternalServerError) return nil } @@ -73,9 +73,7 @@ func (r *oauthProxy) forwardProxyHandler() func(*http.Request, *http.Response) { // step: create oauth client client, err := r.client.OAuthClient() if err != nil { - log.WithFields(log.Fields{ - "error": err.Error(), - }).Fatal("failed to create an oauth client") + r.log.Fatal("failed to create oauth client", zap.Error(err)) } // step: the loop state @@ -102,17 +100,13 @@ func (r *oauthProxy) forwardProxyHandler() func(*http.Request, *http.Response) { // step: do we have a access token if state.login { - log.WithFields(log.Fields{ - "username": r.config.ForwardingUsername, - }).Infof("requesting access token for user") + r.log.Info("requesting access token for user", + zap.String("username", r.config.ForwardingUsername)) // step: login into the service resp, err := client.UserCredsToken(r.config.ForwardingUsername, r.config.ForwardingPassword) if err != nil { - log.WithFields(log.Fields{ - "error": err.Error(), - }).Error("failed to login to authentication service") - + r.log.Error("failed to login to authentication service", zap.Error(err)) // step: back-off and reschedule <-time.After(time.Duration(5) * time.Second) continue @@ -121,10 +115,7 @@ func (r *oauthProxy) forwardProxyHandler() func(*http.Request, *http.Response) { // step: parse the token token, identity, err := parseToken(resp.AccessToken) if err != nil { - log.WithFields(log.Fields{ - "error": err.Error(), - }).Errorf("failed to parse the access token") - + r.log.Error("failed to parse the access token", zap.Error(err)) // step: we should probably hope and reschedule here <-time.After(time.Duration(5) * time.Second) continue @@ -138,25 +129,22 @@ func (r *oauthProxy) forwardProxyHandler() func(*http.Request, *http.Response) { state.login = false state.refresh = resp.RefreshToken - log.WithFields(log.Fields{ - "subject": state.identity.ID, - "email": state.identity.Email, - "expires": state.expiration.Format(time.RFC3339), - }).Infof("successfully retrieved access token for subject") + r.log.Info("successfully retrieved access token for subject", + zap.String("subject", state.identity.ID), + zap.String("email", state.identity.Email), + zap.String("expires", state.expiration.Format(time.RFC3339))) } else { - log.WithFields(log.Fields{ - "subject": state.identity.ID, - "email": state.identity.Email, - }).Infof("access token is about to expiry") + r.log.Info("access token is about to expiry", + zap.String("subject", state.identity.ID), + zap.String("email", state.identity.Email)) // step: if we a have a refresh token, we need to login again if state.refresh != "" { - log.WithFields(log.Fields{ - "subject": state.identity.ID, - "email": state.identity.Email, - "expires": state.expiration.Format(time.RFC3339), - }).Infof("attempting to refresh the access token") + r.log.Info("attempting to refresh the access token", + zap.String("subject", state.identity.ID), + zap.String("email", state.identity.Email), + zap.String("expires", state.expiration.Format(time.RFC3339))) // step: attempt to refresh the access token, expiration, err := getRefreshedToken(r.client, state.refresh) @@ -164,14 +152,11 @@ func (r *oauthProxy) forwardProxyHandler() func(*http.Request, *http.Response) { state.login = true switch err { case ErrRefreshTokenExpired: - log.WithFields(log.Fields{ - "subject": state.identity.ID, - "email": state.identity.Email, - }).Warningf("the refresh token has expired, need to login again") + r.log.Warn("the refresh token has expired, need to login again", + zap.String("subject", state.identity.ID), + zap.String("email", state.identity.Email)) default: - log.WithFields(log.Fields{ - "error": err.Error(), - }).Errorf("failed to refresh the access token") + r.log.Error("failed to refresh the access token", zap.Error(err)) } continue } @@ -183,33 +168,29 @@ func (r *oauthProxy) forwardProxyHandler() func(*http.Request, *http.Response) { state.login = false // step: add some debugging - log.WithFields(log.Fields{ - "subject": state.identity.ID, - "email": state.identity.Email, - "expires": state.expiration.Format(time.RFC3339), - }).Infof("successfully refreshed the access token") + r.log.Info("successfully refreshed the access token", + zap.String("subject", state.identity.ID), + zap.String("email", state.identity.Email), + zap.String("expires", state.expiration.Format(time.RFC3339))) } else { - log.WithFields(log.Fields{ - "subject": state.identity.ID, - "email": state.identity.Email, - }).Infof("session does not support refresh token, acquiring new token") + r.log.Info("session does not support refresh token, acquiring new token", + zap.String("subject", state.identity.ID), + zap.String("email", state.identity.Email)) - // step: we don't have a refresh token, we must perform a login again + // we don't have a refresh token, we must perform a login again state.wait = false state.login = true } } - // step: wait for an expiration to come close + // wait for an expiration to come close if state.wait { - // step: set the expiration of the access token within a random 85% of actual expiration + // set the expiration of the access token within a random 85% of actual expiration duration := getWithin(state.expiration, 0.85) - - log.WithFields(log.Fields{ - "token_expiration": state.expiration.Format(time.RFC3339), - "renewel_duration": duration.String(), - }).Infof("waiting for expiration of access token") + r.log.Info("waiting for expiration of access token", + zap.String("token_expiration", state.expiration.Format(time.RFC3339)), + zap.String("renewel_duration", duration.String())) <-time.After(duration) } diff --git a/handlers.go b/handlers.go index 9a5ef1e4..297920cb 100644 --- a/handlers.go +++ b/handlers.go @@ -29,9 +29,9 @@ import ( "strings" "time" - log "github.com/Sirupsen/logrus" "github.com/gambol99/go-oidc/oauth2" "github.com/labstack/echo" + "go.uber.org/zap" ) // getRedirectionURL returns the redirectionURL for the oauth flow @@ -78,7 +78,7 @@ func (r *oauthProxy) oauthHandler(cx echo.Context) error { return r.tokenHandler(cx) case metricsURL: if r.config.EnableMetrics { - return r.metricsHandler(cx) + return r.proxyMetricsHandler(cx) } default: return cx.NoContent(http.StatusNotFound) @@ -104,10 +104,7 @@ func (r *oauthProxy) oauthAuthorizationHandler(cx echo.Context) error { } client, err := r.getOAuthClient(r.getRedirectionURL(cx)) if err != nil { - log.WithFields(log.Fields{ - "error": err.Error(), - }).Errorf("failed to retrieve the oauth client for authorization") - + r.log.Error("failed to retrieve the oauth client for authorization", zap.String("error", err.Error())) return cx.NoContent(http.StatusInternalServerError) } @@ -118,11 +115,10 @@ func (r *oauthProxy) oauthAuthorizationHandler(cx echo.Context) error { } authURL := client.AuthCodeURL(cx.QueryParam("state"), accessType, "") - log.WithFields(log.Fields{ - "access_type": accessType, - "auth-url": authURL, - "client_ip": cx.RealIP(), - }).Debugf("incoming authorization request from client address: %s", cx.RealIP()) + r.log.Debug("incoming authorization request from client address", + zap.String("access_type", accessType), + zap.String("auth_url", authURL), + zap.String("client_ip", cx.RealIP())) // step: if we have a custom sign in page, lets display that if r.config.hasCustomSignInPage() { @@ -148,13 +144,13 @@ func (r *oauthProxy) oauthCallbackHandler(cx echo.Context) error { client, err := r.getOAuthClient(r.getRedirectionURL(cx)) if err != nil { - log.WithFields(log.Fields{"error": err.Error()}).Errorf("unable to create a oauth2 client") + r.log.Error("unable to create a oauth2 client", zap.String("error", err.Error())) return cx.NoContent(http.StatusInternalServerError) } resp, err := exchangeAuthenticationCode(client, code) if err != nil { - log.WithFields(log.Fields{"error": err.Error()}).Errorf("unable to exchange code for access token") + r.log.Error("unable to exchange code for access token", zap.String("error", err.Error())) return r.accessForbidden(cx) } @@ -163,7 +159,7 @@ func (r *oauthProxy) oauthCallbackHandler(cx echo.Context) error { // to the ID Token. token, identity, err := parseToken(resp.IDToken) if err != nil { - log.WithFields(log.Fields{"error": err.Error()}).Errorf("unable to parse id token for identity") + r.log.Error("unable to parse id token for identity", zap.String("error", err.Error())) return r.accessForbidden(cx) } access, id, err := parseToken(resp.AccessToken) @@ -171,12 +167,12 @@ func (r *oauthProxy) oauthCallbackHandler(cx echo.Context) error { token = access identity = id } else { - log.WithFields(log.Fields{"error": err.Error()}).Warn("unable to parse the access token, using id token only") + r.log.Warn("unable to parse the access token, using id token only", zap.String("error", err.Error())) } // step: check the access token is valid if err = verifyToken(r.client, token); err != nil { - log.WithFields(log.Fields{"error": err.Error()}).Errorf("unable to verify the id token") + r.log.Error("unable to verify the id token", zap.String("error", err.Error())) return r.accessForbidden(cx) } accessToken := token.Encode() @@ -184,23 +180,22 @@ func (r *oauthProxy) oauthCallbackHandler(cx echo.Context) error { // step: are we encrypting the access token? if r.config.EnableEncryptedToken { if accessToken, err = encodeText(accessToken, r.config.EncryptionKey); err != nil { - log.WithFields(log.Fields{"error": err.Error()}).Error("unable to encode the access token") + r.log.Error("unable to encode the access token", zap.String("error", err.Error())) return cx.NoContent(http.StatusInternalServerError) } } - log.WithFields(log.Fields{ - "email": identity.Email, - "expires": identity.ExpiresAt.Format(time.RFC3339), - "duration": time.Until(identity.ExpiresAt).String(), - }).Info("issuing access token for user") + r.log.Info("issuing access token for user", + zap.String("email", identity.Email), + zap.String("expires", identity.ExpiresAt.Format(time.RFC3339)), + zap.String("duration", time.Until(identity.ExpiresAt).String())) // step: does the response has a refresh token and we are NOT ignore refresh tokens? if r.config.EnableRefreshTokens && resp.RefreshToken != "" { var encrypted string encrypted, err = encodeText(resp.RefreshToken, r.config.EncryptionKey) if err != nil { - log.WithFields(log.Fields{"error": err.Error()}).Errorf("failed to encrypt the refresh token") + r.log.Error("failed to encrypt the refresh token", zap.String("error", err.Error())) return cx.NoContent(http.StatusInternalServerError) } // drop in the access token - cookie expiration = access token @@ -209,7 +204,7 @@ func (r *oauthProxy) oauthCallbackHandler(cx echo.Context) error { switch r.useStore() { case true: if err = r.StoreRefreshToken(token, encrypted); err != nil { - log.WithFields(log.Fields{"error": err.Error()}).Warnf("failed to save the refresh token in the store") + r.log.Warn("failed to save the refresh token in the store", zap.String("error", err.Error())) } default: // notes: not all idp refresh tokens are readable, google for example, so we attempt to decode into @@ -229,10 +224,9 @@ func (r *oauthProxy) oauthCallbackHandler(cx echo.Context) error { if cx.QueryParam("state") != "" { decoded, err := base64.StdEncoding.DecodeString(cx.QueryParam("state")) if err != nil { - log.WithFields(log.Fields{ - "state": cx.QueryParam("state"), - "error": err.Error(), - }).Warnf("unable to decode the state parameter") + r.log.Warn("unable to decode the state parameter", + zap.String("state", cx.QueryParam("state")), + zap.String("error", err.Error())) } else { state = string(decoded) } @@ -284,10 +278,9 @@ func (r *oauthProxy) loginHandler(cx echo.Context) error { return "", http.StatusOK, nil }() if err != nil { - log.WithFields(log.Fields{ - "client_ip": cx.RealIP(), - "error": err.Error, - }).Errorf(errorMsg) + r.log.Error(errorMsg, + zap.String("client_ip", cx.RealIP()), + zap.String("error", err.Error())) return cx.NoContent(code) } @@ -324,9 +317,7 @@ func (r *oauthProxy) logoutHandler(cx echo.Context) error { if r.useStore() { go func() { if err := r.DeleteRefreshToken(user.token); err != nil { - log.WithFields(log.Fields{ - "error": err.Error(), - }).Errorf("unable to remove the refresh token from store") + r.log.Error("unable to remove the refresh token from store", zap.String("error", err.Error())) } }() } @@ -336,7 +327,7 @@ func (r *oauthProxy) logoutHandler(cx echo.Context) error { if revocationURL != "" { client, err := r.client.OAuthClient() if err != nil { - log.WithFields(log.Fields{"error": err.Error()}).Errorf("unable to retrieve the openid client") + r.log.Error("unable to retrieve the openid client", zap.String("error", err.Error())) return cx.NoContent(http.StatusInternalServerError) } @@ -349,7 +340,7 @@ func (r *oauthProxy) logoutHandler(cx echo.Context) error { request, err := http.NewRequest(http.MethodPost, revocationURL, bytes.NewBufferString(fmt.Sprintf("refresh_token=%s", identityToken))) if err != nil { - log.WithFields(log.Fields{"error": err.Error()}).Errorf("unable to construct the revocation request") + r.log.Error("unable to construct the revocation request", zap.String("error", err.Error())) return cx.NoContent(http.StatusInternalServerError) } // step: add the authentication headers and content-type @@ -358,22 +349,19 @@ func (r *oauthProxy) logoutHandler(cx echo.Context) error { response, err := client.HttpClient().Do(request) if err != nil { - log.WithFields(log.Fields{"error": err.Error()}).Errorf("unable to post to revocation endpoint") + r.log.Error("unable to post to revocation endpoint", zap.String("error", err.Error())) return nil } // step: check the response switch response.StatusCode { case http.StatusNoContent: - log.WithFields(log.Fields{ - "user": user.email, - }).Infof("successfully logged out of the endpoint") + r.log.Info("successfully logged out of the endpoint", zap.String("email", user.email)) default: content, _ := ioutil.ReadAll(response.Body) - log.WithFields(log.Fields{ - "status": response.StatusCode, - "response": fmt.Sprintf("%s", content), - }).Errorf("invalid response from revocation endpoint") + r.log.Error("invalid response from revocation endpoint", + zap.Int("status", response.StatusCode), + zap.String("response", fmt.Sprintf("%s", content))) } } // step: should we redirect the user @@ -452,14 +440,14 @@ func (r *oauthProxy) debugHandler(cx echo.Context) error { return nil } -// metricsHandler forwards the request into the prometheus handler -func (r *oauthProxy) metricsHandler(cx echo.Context) error { +// proxyMetricsHandler forwards the request into the prometheus handler +func (r *oauthProxy) proxyMetricsHandler(cx echo.Context) error { if r.config.LocalhostMetrics { if !net.ParseIP(cx.RealIP()).IsLoopback() { return r.accessForbidden(cx) } } - r.prometheusHandler.ServeHTTP(cx.Response().Writer, cx.Request()) + r.metricsHandler.ServeHTTP(cx.Response().Writer, cx.Request()) return nil } diff --git a/middleware.go b/middleware.go index 309ab6a6..3299c0c9 100644 --- a/middleware.go +++ b/middleware.go @@ -23,11 +23,11 @@ import ( "time" "github.com/PuerkitoBio/purell" - log "github.com/Sirupsen/logrus" "github.com/gambol99/go-oidc/jose" "github.com/labstack/echo" "github.com/prometheus/client_golang/prometheus" "github.com/unrolled/secure" + "go.uber.org/zap" ) const normalizeFlags purell.NormalizationFlags = purell.FlagRemoveDotSegments | purell.FlagRemoveDuplicateSlashes @@ -76,14 +76,16 @@ func (r *oauthProxy) loggingMiddleware() echo.MiddlewareFunc { next(cx) latency := time.Since(start) addr := cx.RealIP() - log.WithFields(log.Fields{ - "client_ip": addr, - "method": cx.Request().Method, - "status": cx.Response().Status, - "bytes": cx.Response().Size, - "path": cx.Request().URL.Path, - "latency": latency.String(), - }).Infof("[%d] |%s| |%10v| %-5s %s", cx.Response().Status, addr, latency, cx.Request().Method, cx.Request().URL.Path) + //msg := .Infof("[%d] |%s| |%10v| %-5s %s", cx.Response().Status, addr, latency, cx.Request().Method, cx.Request().URL.Path) + r.log.Info("client request", + zap.Int("response", cx.Response().Status), + zap.String("path", cx.Request().URL.Path), + zap.String("client_ip", addr), + zap.String("method", cx.Request().Method), + zap.Int("status", cx.Response().Status), + zap.Int64("bytes", cx.Response().Size), + zap.String("path", cx.Request().URL.Path), + zap.String("latency", latency.String())) return nil } @@ -92,7 +94,8 @@ func (r *oauthProxy) loggingMiddleware() echo.MiddlewareFunc { // metricsMiddleware is responsible for collecting metrics func (r *oauthProxy) metricsMiddleware() echo.MiddlewareFunc { - log.Infof("enabled the service metrics middleware, available on %s%s", oauthURL, metricsURL) + r.log.Info("enabled the service metrics middleware, available on", + zap.String("path", fmt.Sprintf("%s%s", oauthURL, metricsURL))) statusMetrics := prometheus.NewCounterVec( prometheus.CounterOpts{ @@ -120,20 +123,19 @@ func (r *oauthProxy) authenticationMiddleware(resource *Resource) echo.Middlewar // step: grab the user identity from the request user, err := r.getIdentity(cx.Request()) if err != nil { - log.WithFields(log.Fields{"error": err.Error()}).Errorf("no session found in request, redirecting for authorization") + r.log.Error("no session found in request, redirecting for authorization", zap.Error(err)) return r.redirectToAuthorization(cx) } cx.Set(userContextName, user) // step: skip if we are running skip-token-verification if r.config.SkipTokenVerification { - log.Warnf("skip token verification enabled, skipping verification - TESTING ONLY") + r.log.Warn("skip token verification enabled, skipping verification - TESTING ONLY") if user.isExpired() { - log.WithFields(log.Fields{ - "client_ip": clientIP, - "username": user.name, - "expired_on": user.expiresAt.String(), - }).Errorf("the session has expired and verification switch off") + r.log.Error("the session has expired and verification switch off", + zap.String("client_ip", clientIP), + zap.String("username", user.name), + zap.String("expired_on", user.expiresAt.String())) return r.redirectToAuthorization(cx) } } else { @@ -142,36 +144,32 @@ func (r *oauthProxy) authenticationMiddleware(resource *Resource) echo.Middlewar // expired error we immediately throw an access forbidden - as there is // something messed up in the token if err != ErrAccessTokenExpired { - log.WithFields(log.Fields{ - "client_ip": clientIP, - "error": err.Error(), - }).Errorf("access token failed verification") + r.log.Error("access token failed verification", + zap.String("client_ip", clientIP), + zap.Error(err)) return r.accessForbidden(cx) } // step: check if we are refreshing the access tokens and if not re-auth if !r.config.EnableRefreshTokens { - log.WithFields(log.Fields{ - "client_ip": clientIP, - "email": user.name, - "expired_on": user.expiresAt.String(), - }).Errorf("session expired and access token refreshing is disabled") + r.log.Error("session expired and access token refreshing is disabled", + zap.String("client_ip", clientIP), + zap.String("email", user.name), + zap.String("expired_on", user.expiresAt.String())) return r.redirectToAuthorization(cx) } - log.WithFields(log.Fields{ - "client_ip": clientIP, - "email": user.email, - }).Infof("accces token for user has expired, attemping to refresh the token") + r.log.Info("accces token for user has expired, attemping to refresh the token", + zap.String("client_ip", clientIP), + zap.String("email", user.email)) // step: check if the user has refresh token refresh, encrypted, err := r.retrieveRefreshToken(cx.Request(), user) if err != nil { - log.WithFields(log.Fields{ - "client_ip": clientIP, - "email": user.email, - "error": err.Error(), - }).Errorf("unable to find a refresh token for user") + r.log.Error("unable to find a refresh token for user", + zap.String("client_ip", clientIP), + zap.String("email", user.email), + zap.Error(err)) return r.redirectToAuthorization(cx) } @@ -180,31 +178,29 @@ func (r *oauthProxy) authenticationMiddleware(resource *Resource) echo.Middlewar if err != nil { switch err { case ErrRefreshTokenExpired: - log.WithFields(log.Fields{ - "client_ip": clientIP, - "email": user.email, - }).Warningf("refresh token has expired, cannot retrieve access token") + r.log.Warn("refresh token has expired, cannot retrieve access token", + zap.String("client_ip", clientIP), + zap.String("email", user.email)) r.clearAllCookies(cx.Request(), cx.Response().Writer) default: - log.WithFields(log.Fields{"error": err.Error()}).Errorf("failed to refresh the access token") + r.log.Error("failed to refresh the access token", zap.Error(err)) } return r.redirectToAuthorization(cx) } // get the expiration of the new access token expiresIn := r.getAccessCookieExpiration(token, refresh) - log.WithFields(log.Fields{ - "client_ip": clientIP, - "cookie_name": r.config.CookieAccessName, - "email": user.email, - "expires_in": time.Until(exp), - }).Infof("injecting the refreshed access token cookie") + r.log.Info("injecting the refreshed access token cookie", + zap.String("client_ip", clientIP), + zap.String("cookie_name", r.config.CookieAccessName), + zap.String("email", user.email), + zap.Duration("expires_in", time.Until(exp))) accessToken := token.Encode() if r.config.EnableEncryptedToken { if accessToken, err = encodeText(accessToken, r.config.EncryptionKey); err != nil { - log.WithFields(log.Fields{"error": err.Error()}).Error("unable to encode the access token") + r.log.Error("unable to encode the access token", zap.Error(err)) return cx.NoContent(http.StatusInternalServerError) } } @@ -214,10 +210,10 @@ func (r *oauthProxy) authenticationMiddleware(resource *Resource) echo.Middlewar if r.useStore() { go func(old, new jose.JWT, encrypted string) { if err := r.DeleteRefreshToken(old); err != nil { - log.WithFields(log.Fields{"error": err.Error()}).Errorf("failed to remove old token") + r.log.Error("failed to remove old token", zap.Error(err)) } if err := r.StoreRefreshToken(new, encrypted); err != nil { - log.WithFields(log.Fields{"error": err.Error()}).Errorf("failed to store refresh token") + r.log.Error("failed to store refresh token", zap.Error(err)) return } }(user.token, token, encrypted) @@ -249,12 +245,11 @@ func (r *oauthProxy) admissionMiddleware(resource *Resource) echo.MiddlewareFunc // step: we need to check the roles if roles := len(resource.Roles); roles > 0 { if !hasRoles(resource.Roles, user.roles) { - log.WithFields(log.Fields{ - "access": "denied", - "email": user.email, - "resource": resource.URL, - "required": resource.getRoles(), - }).Warnf("access denied, invalid roles") + r.log.Warn("access denied, invalid roles", + zap.String("access", "denied"), + zap.String("email", user.email), + zap.String("resource", resource.URL), + zap.String("required", resource.getRoles())) return r.accessForbidden(cx) } @@ -265,49 +260,44 @@ func (r *oauthProxy) admissionMiddleware(resource *Resource) echo.MiddlewareFunc // step: if the claim is NOT in the token, we access deny value, found, err := user.claims.StringClaim(claimName) if err != nil { - log.WithFields(log.Fields{ - "access": "denied", - "email": user.email, - "resource": resource.URL, - "error": err.Error(), - }).Errorf("unable to extract the claim from token") + r.log.Error("unable to extract the claim from token", + zap.String("access", "denied"), + zap.String("email", user.email), + zap.String("resource", resource.URL), + zap.Error(err)) return r.accessForbidden(cx) } if !found { - log.WithFields(log.Fields{ - "access": "denied", - "claim": claimName, - "email": user.email, - "resource": resource.URL, - }).Warnf("the token does not have the claim") + r.log.Warn("the token does not have the claim", + zap.String("access", "denied"), + zap.String("claim", claimName), + zap.String("email", user.email), + zap.String("resource", resource.URL)) return r.accessForbidden(cx) } // step: check the claim is the same if !match.MatchString(value) { - log.WithFields(log.Fields{ - "access": "denied", - "claim": claimName, - "email": user.email, - "issued": value, - "required": match, - "resource": resource.URL, - }).Warnf("the token claims does not match claim requirement") + r.log.Warn("the token claims does not match claim requirement", + zap.String("access", "denied"), + zap.String("claim", claimName), + zap.String("email", user.email), + zap.String("issued", value), + zap.String("required", match.String()), + zap.String("resource", resource.URL)) return r.accessForbidden(cx) } } - log.WithFields(log.Fields{ - "access": "permitted", - "client": user.audience, - "email": user.email, - "expires": time.Until(user.expiresAt).String(), - "resource": resource.URL, - }).Debugf("access permitted to resource") + r.log.Debug("access permitted to resource", + zap.String("access", "permitted"), + zap.String("email", user.email), + zap.Duration("expires", time.Until(user.expiresAt)), + zap.String("resource", resource.URL)) return next(cx) } @@ -351,7 +341,7 @@ func (r *oauthProxy) headersMiddleware(custom []string) echo.MiddlewareFunc { // securityMiddleware performs numerous security checks on the request func (r *oauthProxy) securityMiddleware() echo.MiddlewareFunc { - log.Info("enabling the security filter middleware") + r.log.Info("enabling the security filter middleware") secure := secure.New(secure.Options{ AllowedHosts: r.config.Hostnames, BrowserXssFilter: r.config.EnableBrowserXSSFilter, @@ -364,7 +354,7 @@ func (r *oauthProxy) securityMiddleware() echo.MiddlewareFunc { return func(next echo.HandlerFunc) echo.HandlerFunc { return func(cx echo.Context) error { if err := secure.Process(cx.Response().Writer, cx.Request()); err != nil { - log.WithFields(log.Fields{"error": err.Error()}).Errorf("failed security middleware") + r.log.Error("failed security middleware", zap.Error(err)) return r.accessForbidden(cx) } return next(cx) diff --git a/middleware_test.go b/middleware_test.go index 553c87a8..2c1012f4 100644 --- a/middleware_test.go +++ b/middleware_test.go @@ -18,17 +18,18 @@ package main import ( "fmt" "io/ioutil" + "log" "net" "net/http" "strings" "testing" "time" - log "github.com/Sirupsen/logrus" "github.com/gambol99/go-oidc/jose" "github.com/go-resty/resty" "github.com/labstack/echo/middleware" "github.com/stretchr/testify/assert" + "go.uber.org/zap" ) type fakeRequest struct { @@ -83,13 +84,14 @@ func newFakeProxy(c *Config) *fakeProxy { if err != nil { panic("failed to create fake proxy service, error: " + err.Error()) } + proxy.log = zap.NewNop() proxy.upstream = &fakeUpstreamService{} if err = proxy.Run(); err != nil { panic("failed to create the proxy service, error: " + err.Error()) } c.RedirectionURL = fmt.Sprintf("http://%s", proxy.listener.Addr().String()) // step: we need to update the client configs - if proxy.client, proxy.idp, proxy.idpClient, err = newOpenIDClient(c); err != nil { + if proxy.client, proxy.idp, proxy.idpClient, err = proxy.newOpenIDClient(); err != nil { panic("failed to recreate the openid client, error: " + err.Error()) } diff --git a/misc.go b/misc.go index 82d9c06f..b0541fad 100644 --- a/misc.go +++ b/misc.go @@ -22,9 +22,9 @@ import ( "path" "time" - log "github.com/Sirupsen/logrus" "github.com/gambol99/go-oidc/jose" "github.com/labstack/echo" + "go.uber.org/zap" ) // revokeProxy is responsible to stopping the middleware from proxying the request @@ -40,10 +40,9 @@ func (r *oauthProxy) accessForbidden(cx echo.Context) error { tplName := path.Base(r.config.ForbiddenPage) err := cx.Render(http.StatusForbidden, tplName, r.config.Tags) if err != nil { - log.WithFields(log.Fields{ - "error": err, - "template": tplName, - }).Error("unable to render the template") + r.log.Error("unable to render the template", + zap.Error(err), + zap.String("template", tplName)) } return err @@ -71,7 +70,7 @@ func (r *oauthProxy) redirectToAuthorization(cx echo.Context) error { // step: if verification is switched off, we can't authorization if r.config.SkipTokenVerification { - log.Errorf("refusing to redirection to authorization endpoint, skip token verification switched on") + r.log.Error("refusing to redirection to authorization endpoint, skip token verification switched on") return cx.NoContent(http.StatusForbidden) } diff --git a/rotation.go b/rotation.go index 1c623f4a..3d8479ee 100644 --- a/rotation.go +++ b/rotation.go @@ -21,8 +21,8 @@ import ( "path" "sync" - log "github.com/Sirupsen/logrus" "github.com/fsnotify/fsnotify" + "go.uber.org/zap" ) type certificationRotation struct { @@ -33,10 +33,12 @@ type certificationRotation struct { certificateFile string // the privateKeyFile is the path of the private key privateKeyFile string + // the logger for this service + log *zap.Logger } // newCertificateRotator creates a new certificate -func newCertificateRotator(cert, key string) (*certificationRotation, error) { +func newCertificateRotator(cert, key string, log *zap.Logger) (*certificationRotation, error) { // step: attempt to load the certificate certificate, err := tls.LoadX509KeyPair(cert, key) if err != nil { @@ -46,13 +48,17 @@ func newCertificateRotator(cert, key string) (*certificationRotation, error) { return &certificationRotation{ certificate: certificate, certificateFile: cert, + log: log, privateKeyFile: key, }, nil } // watch is responsible for adding a file notification and watch on the files for changes func (c *certificationRotation) watch() error { - log.Infof("adding a file watch on the certificates, certificate: %s, key: %s", c.certificateFile, c.privateKeyFile) + c.log.Info("adding a file watch on the certificates, certificate", + zap.String("certificate", c.certificateFile), + zap.String("private_key", c.privateKeyFile)) + watcher, err := fsnotify.NewWatcher() if err != nil { return err @@ -67,7 +73,7 @@ func (c *certificationRotation) watch() error { // step: watching for events filewatchPaths := []string{c.certificateFile, c.privateKeyFile} go func() { - log.Info("starting to watch changes to the tls certificate files") + c.log.Info("starting to watch changes to the tls certificate files") for { select { case event := <-watcher.Events: @@ -79,20 +85,17 @@ func (c *certificationRotation) watch() error { // step: reload the certificate certificate, err := tls.LoadX509KeyPair(c.certificateFile, c.privateKeyFile) if err != nil { - log.WithFields(log.Fields{ - "filename": event.Name, - "error": err.Error(), - }).Error("unable to load the updated certificate") + c.log.Error("unable to load the updated certificate", + zap.String("filename", event.Name), + zap.Error(err)) } // step: load the new certificate c.storeCertificate(certificate) // step: print a debug message for us - log.Infof("replacing the server certifacte with updated version") + c.log.Info("replacing the server certifacte with updated version") } case err := <-watcher.Errors: - log.WithFields(log.Fields{ - "error": err.Error(), - }).Error("recieved an error from the file watcher") + c.log.Error("recieved an error from the file watcher", zap.Error(err)) } } }() diff --git a/rotation_test.go b/rotation_test.go index b67515aa..01f96af7 100644 --- a/rotation_test.go +++ b/rotation_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "go.uber.org/zap" ) const ( @@ -28,7 +29,7 @@ const ( ) func newTestCertificateRotator(t *testing.T) *certificationRotation { - c, err := newCertificateRotator(testCertificateFile, testPrivateKeyFile) + c, err := newCertificateRotator(testCertificateFile, testPrivateKeyFile, zap.NewNop()) assert.NotNil(t, c) assert.Equal(t, testCertificateFile, c.certificateFile) assert.Equal(t, testPrivateKeyFile, c.privateKeyFile) @@ -40,13 +41,13 @@ func newTestCertificateRotator(t *testing.T) *certificationRotation { } func TestNewCeritifacteRotator(t *testing.T) { - c, err := newCertificateRotator(testCertificateFile, testPrivateKeyFile) + c, err := newCertificateRotator(testCertificateFile, testPrivateKeyFile, zap.NewNop()) assert.NotNil(t, c) assert.NoError(t, err) } func TestNewCeritifacteRotatorFailure(t *testing.T) { - c, err := newCertificateRotator("./tests/does_not_exist", testPrivateKeyFile) + c, err := newCertificateRotator("./tests/does_not_exist", testPrivateKeyFile, zap.NewNop()) assert.Nil(t, c) assert.Error(t, err) } diff --git a/server.go b/server.go index 2c1892dc..b8e5a577 100644 --- a/server.go +++ b/server.go @@ -18,6 +18,7 @@ package main import ( "crypto/tls" "crypto/x509" + "errors" "fmt" "html/template" "io" @@ -32,101 +33,82 @@ import ( httplog "log" - log "github.com/Sirupsen/logrus" "github.com/armon/go-proxyproto" "github.com/gambol99/go-oidc/oidc" "github.com/gambol99/goproxy" "github.com/labstack/echo" "github.com/labstack/echo/middleware" "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" ) type oauthProxy struct { - // the proxy configuration - config *Config - // the http service - router http.Handler - // the opened client - client *oidc.Client - // the openid provider configuration - idp oidc.ProviderConfig - // the provider http client - idpClient *http.Client - // the proxy client - upstream reverseProxy - // the upstream endpoint url - endpoint *url.URL - // the templates for the custom pages - templates *template.Template - // the store interface - store storage - // the prometheus handler - prometheusHandler http.Handler - // the default server - server *http.Server - // the default listener - listener net.Listener + client *oidc.Client + config *Config + endpoint *url.URL + idp oidc.ProviderConfig + idpClient *http.Client + listener net.Listener + log *zap.Logger + metricsHandler http.Handler + router http.Handler + server *http.Server + store storage + templates *template.Template + upstream reverseProxy } func init() { - // step: ensure all time is in UTC - time.LoadLocation("UTC") - // step: set the core - runtime.GOMAXPROCS(runtime.NumCPU()) + time.LoadLocation("UTC") // ensure all time is in UTC + runtime.GOMAXPROCS(runtime.NumCPU()) // set the core } // newProxy create's a new proxy from configuration func newProxy(config *Config) (*oauthProxy, error) { - var err error - // step: set the logging - httplog.SetOutput(ioutil.Discard) - if config.EnableJSONLogging { - log.SetFormatter(&log.JSONFormatter{}) - } - if config.Verbose { - log.SetLevel(log.DebugLevel) - httplog.SetOutput(os.Stderr) + // create the service logger + log, err := createLogger(config) + if err != nil { + return nil, err } - log.Infof("starting %s, author: %s, version: %s, ", prog, author, version) - + log.Info("starting the service", zap.String("prog", prog), zap.String("author", author), zap.String("version", version)) svc := &oauthProxy{ - config: config, - prometheusHandler: prometheus.Handler(), + config: config, + log: log, + metricsHandler: prometheus.Handler(), } - // step: parse the upstream endpoint + // parse the upstream endpoint if svc.endpoint, err = url.Parse(config.Upstream); err != nil { return nil, err } - // step: initialize the store if any + // initialize the store if any if config.StoreURL != "" { if svc.store, err = createStorage(config.StoreURL); err != nil { return nil, err } } - // step: initialize the openid client + // initialize the openid client if !config.SkipTokenVerification { - if svc.client, svc.idp, svc.idpClient, err = newOpenIDClient(config); err != nil { + if svc.client, svc.idp, svc.idpClient, err = svc.newOpenIDClient(); err != nil { return nil, err } } else { - log.Warnf("TESTING ONLY CONFIG - the verification of the token have been disabled") + log.Warn("TESTING ONLY CONFIG - the verification of the token have been disabled") } if config.ClientID == "" && config.ClientSecret == "" { - log.Warnf("client credentials are not set, depending on provider (confidential|public) you might be unable to auth") + log.Warn("client credentials are not set, depending on provider (confidential|public) you might be unable to auth") } - // step: are we running in forwarding more? - switch config.EnableForwarding { - case true: + // are we running in forwarding mode? + if config.EnableForwarding { if err := svc.createForwardingProxy(); err != nil { return nil, err } - default: + } else { if err := svc.createReverseProxy(); err != nil { return nil, err } @@ -135,9 +117,34 @@ func newProxy(config *Config) (*oauthProxy, error) { return svc, nil } +// createLogger is responsible for creating the service logger +func createLogger(config *Config) (*zap.Logger, error) { + httplog.SetOutput(ioutil.Discard) // disable the http logger + if config.DisableAllLogging { + return zap.NewNop(), nil + } + + c := zap.NewProductionConfig() + c.DisableStacktrace = true + c.DisableCaller = true + // are we enabling json logging? + if !config.EnableJSONLogging { + c.Encoding = "console" + } + // are we running verbose mode? + if config.Verbose { + httplog.SetOutput(os.Stderr) + c.DisableCaller = false + c.Development = true + c.Level = zap.NewAtomicLevelAt(zap.DebugLevel) + } + + return c.Build() +} + // createReverseProxy creates a reverse proxy func (r *oauthProxy) createReverseProxy() error { - log.Infof("enabled reverse proxy mode, upstream url: %s", r.config.Upstream) + r.log.Info("enabled reverse proxy mode, upstream url", zap.String("url", r.config.Upstream)) if err := r.createUpstreamProxy(r.endpoint); err != nil { return err } @@ -148,7 +155,7 @@ func (r *oauthProxy) createReverseProxy() error { engine.Use(middleware.Recover()) if r.config.EnableProfiling { - log.Warn("enabling the debug profiling on /debug/pprof") + r.log.Warn("enabling the debug profiling on /debug/pprof") engine.Any("/debug/pprof/:name", r.debugHandler) } if r.config.EnableLogging { @@ -182,11 +189,14 @@ func (r *oauthProxy) createReverseProxy() error { // step: provision in the protected resources for _, x := range r.config.Resources { if x.URL[len(x.URL)-1:] == "/" { - log.Warnf("the resource url: %s is not a prefix, you probably want %s* or %s* to protect the resource", x.URL, x.URL, strings.TrimRight(x.URL, "/")) + r.log.Warn("the resource url is not a prefix", + zap.String("resource", x.URL), + zap.String("change", x.URL), + zap.String("ammended", strings.TrimRight(x.URL, "/"))) } } for _, x := range r.config.Resources { - log.Infof("protecting resource: %s", x) + r.log.Info("protecting resource", zap.String("resource", x.String())) switch x.WhiteListed { case false: engine.Match(x.Methods, x.URL, emptyHandler, r.authenticationMiddleware(x), r.admissionMiddleware(x), r.headersMiddleware(r.config.AddClaims)) @@ -195,13 +205,13 @@ func (r *oauthProxy) createReverseProxy() error { } } for name, value := range r.config.MatchClaims { - log.Infof("the token must container the claim: %s, required: %s", name, value) + r.log.Info("the token must contain", zap.String("claim", name), zap.String("value", value)) } if r.config.RedirectionURL == "" { - log.Warnf("no redirection url has been set, will use host headers") + r.log.Warn("no redirection url has been set, will use host headers") } if r.config.EnableEncryptedToken { - log.Info("session access tokens will be encrypted") + r.log.Info("session access tokens will be encrypted") } engine.Use(r.proxyMiddleware()) @@ -211,10 +221,10 @@ func (r *oauthProxy) createReverseProxy() error { // createForwardingProxy creates a forwarding proxy func (r *oauthProxy) createForwardingProxy() error { - log.Infof("enabling forward signing mode, listening on %s", r.config.Listen) + r.log.Info("enabling forward signing mode, listening on", zap.String("interface", r.config.Listen)) if r.config.SkipUpstreamTLSVerify { - log.Warnf("TLS verification switched off. In forward signing mode it's recommended you verify! (--skip-upstream-tls-verify=false)") + r.log.Warn("TLS verification switched off. In forward signing mode it's recommended you verify! (--skip-upstream-tls-verify=false)") } if err := r.createUpstreamProxy(nil); err != nil { return err @@ -251,15 +261,14 @@ func (r *oauthProxy) createForwardingProxy() error { if resp != nil && r.config.EnableLogging { start := ctx.UserData.(time.Time) latency := time.Since(start) - - log.WithFields(log.Fields{ - "method": resp.Request.Method, - "status": resp.StatusCode, - "bytes": resp.ContentLength, - "host": resp.Request.Host, - "path": resp.Request.URL.Path, - "latency": latency.String(), - }).Infof("[%d] |%s| |%10v| %-5s %s", resp.StatusCode, resp.Request.Host, latency, resp.Request.Method, resp.Request.URL.Path) + r.log.Info("client request", + zap.String("method", resp.Request.Method), + zap.String("path", resp.Request.URL.Path), + zap.Int("status", resp.StatusCode), + zap.Int64("bytes", resp.ContentLength), + zap.String("host", resp.Request.Host), + zap.String("path", resp.Request.URL.Path), + zap.String("latency", latency.String())) } return resp @@ -276,7 +285,7 @@ func (r *oauthProxy) createForwardingProxy() error { // Run starts the proxy service func (r *oauthProxy) Run() error { - listener, err := createHTTPListener(listenerConfig{ + listener, err := r.createHTTPListener(listenerConfig{ listen: r.config.Listen, certificate: r.config.TLSCertificate, privateKey: r.config.TLSPrivateKey, @@ -296,20 +305,18 @@ func (r *oauthProxy) Run() error { r.listener = listener go func() { - log.Infof("keycloak proxy service starting on %s", r.config.Listen) + r.log.Info("keycloak proxy service starting on", zap.String("interface", r.config.Listen)) if err = server.Serve(listener); err != nil { if err != http.ErrServerClosed { - log.WithFields(log.Fields{ - "error": err.Error(), - }).Fatalf("failed to start the http service") + r.log.Fatal("failed to start the http service", zap.Error(err)) } } }() // step: are we running http service as well? if r.config.ListenHTTP != "" { - log.Infof("keycloak proxy http service starting on %s", r.config.ListenHTTP) - httpListener, err := createHTTPListener(listenerConfig{ + r.log.Info("keycloak proxy http service starting on", zap.String("interface", r.config.ListenHTTP)) + httpListener, err := r.createHTTPListener(listenerConfig{ listen: r.config.ListenHTTP, proxyProtocol: r.config.EnableProxyProtocol, }) @@ -322,9 +329,7 @@ func (r *oauthProxy) Run() error { } go func() { if err := httpsvc.Serve(httpListener); err != nil { - log.WithFields(log.Fields{ - "error": err.Error(), - }).Fatalf("failed to start the http redirect service") + r.log.Fatal("failed to start the http redirect service", zap.Error(err)) } }() } @@ -343,7 +348,7 @@ type listenerConfig struct { } // createHTTPListener is responsible for creating a listening socket -func createHTTPListener(config listenerConfig) (net.Listener, error) { +func (r *oauthProxy) createHTTPListener(config listenerConfig) (net.Listener, error) { var listener net.Listener var err error @@ -355,7 +360,7 @@ func createHTTPListener(config listenerConfig) (net.Listener, error) { return nil, err } } - log.Infof("listening on unix socket: %s", config.listen) + r.log.Info("listening on unix socket", zap.String("interface", config.listen)) if listener, err = net.Listen("unix", socket); err != nil { return nil, err } @@ -367,15 +372,16 @@ func createHTTPListener(config listenerConfig) (net.Listener, error) { // step: does it require proxy protocol? if config.proxyProtocol { - log.Infof("enabling the proxy protocol on listener: %s", config.listen) + r.log.Info("enabling the proxy protocol on listener", zap.String("interface", config.listen)) listener = &proxyproto.Listener{Listener: listener} } // step: does the socket require TLS? if config.certificate != "" && config.privateKey != "" { - log.Infof("tls enabled, certificate: %s, key: %s", config.certificate, config.privateKey) + r.log.Info("tls support enabled", + zap.String("certificate", config.certificate), zap.String("private_key", config.privateKey)) // step: creating a certificate rotation - rotate, err := newCertificateRotator(config.certificate, config.privateKey) + rotate, err := newCertificateRotator(config.certificate, config.privateKey, r.log) if err != nil { return nil, err } @@ -412,9 +418,10 @@ func (r *oauthProxy) createUpstreamProxy(upstream *url.URL) error { Timeout: r.config.UpstreamTimeout, }).Dial - // step: are we using a unix socket? + // are we using a unix socket? if upstream != nil && upstream.Scheme == "unix" { - log.Infof("using the unix domain socket: %s%s for upstream", upstream.Host, upstream.Path) + r.log.Info("using unix socket for upstream", zap.String("socket", fmt.Sprintf("%s%s", upstream.Host, upstream.Path))) + socketPath := fmt.Sprintf("%s%s", upstream.Host, upstream.Path) dialer = func(network, address string) (net.Conn, error) { return net.Dial("unix", socketPath) @@ -424,18 +431,19 @@ func (r *oauthProxy) createUpstreamProxy(upstream *url.URL) error { upstream.Scheme = "http" } - // step: create the upstream tls configure + // create the upstream tls configure tlsConfig := &tls.Config{ InsecureSkipVerify: r.config.SkipUpstreamTLSVerify, } - // step: are we using a client certificate + // are we using a client certificate // @TODO provide a means of reload on the client certificate when it expires. I'm not sure if it's just a // case of update the http transport settings - Also we to place this go-routine? if r.config.TLSClientCertificate != "" { cert, err := ioutil.ReadFile(r.config.TLSClientCertificate) if err != nil { - log.Fatal(err) + r.log.Error("unable to read client certificate", zap.String("path", r.config.TLSClientCertificate), zap.Error(err)) + return err } pool := x509.NewCertPool() pool.AppendCertsFromPEM(cert) @@ -443,12 +451,12 @@ func (r *oauthProxy) createUpstreamProxy(upstream *url.URL) error { tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert } - // step: create the forwarding proxy + // create the forwarding proxy proxy := goproxy.NewProxyHttpServer() proxy.Logger = httplog.New(ioutil.Discard, "", 0) r.upstream = proxy - // step: update the tls configuration of the reverse proxy + // update the tls configuration of the reverse proxy r.upstream.(*goproxy.ProxyHttpServer).Tr = &http.Transport{ Dial: dialer, TLSClientConfig: tlsConfig, @@ -463,17 +471,17 @@ func (r *oauthProxy) createTemplates() error { var list []string if r.config.SignInPage != "" { - log.Debugf("loading the custom sign in page: %s", r.config.SignInPage) + r.log.Debug("loading the custom sign in page", zap.String("page", r.config.SignInPage)) list = append(list, r.config.SignInPage) } if r.config.ForbiddenPage != "" { - log.Debugf("loading the custom sign forbidden page: %s", r.config.ForbiddenPage) + r.log.Debug("loading the custom sign forbidden page", zap.String("page", r.config.ForbiddenPage)) list = append(list, r.config.ForbiddenPage) } if len(list) > 0 { - log.Infof("loading the custom templates: %s", strings.Join(list, ",")) + r.log.Info("loading the custom templates", zap.String("templates", strings.Join(list, ","))) r.templates = template.Must(template.ParseFiles(list...)) r.router.(*echo.Echo).Renderer = r } @@ -485,3 +493,64 @@ func (r *oauthProxy) createTemplates() error { func (r *oauthProxy) Render(w io.Writer, name string, data interface{}, c echo.Context) error { return r.templates.ExecuteTemplate(w, name, data) } + +// newOpenIDClient initializes the openID configuration, note: the redirection url is deliberately left blank +// in order to retrieve it from the host header on request +func (r *oauthProxy) newOpenIDClient() (*oidc.Client, oidc.ProviderConfig, *http.Client, error) { + var err error + var config oidc.ProviderConfig + + // step: fix up the url if required, the underlining lib will add the .well-known/openid-configuration to the discovery url for us. + if strings.HasSuffix(r.config.DiscoveryURL, "/.well-known/openid-configuration") { + r.config.DiscoveryURL = strings.TrimSuffix(r.config.DiscoveryURL, "/.well-known/openid-configuration") + } + + // step: create a idp http client + hc := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: r.config.SkipOpenIDProviderTLSVerify, + }, + }, + Timeout: time.Second * 10, + } + + // step: attempt to retrieve the provider configuration + completeCh := make(chan bool) + go func() { + for { + r.log.Info("attempting to retrieve configuration discovery url", zap.String("url", r.config.DiscoveryURL)) + if config, err = oidc.FetchProviderConfig(hc, r.config.DiscoveryURL); err == nil { + break // break and complete + } + r.log.Warn("failed to get provider configuration from discovery", zap.Error(err)) + time.Sleep(time.Second * 3) + } + completeCh <- true + }() + // wait for timeout or successful retrieval + select { + case <-time.After(30 * time.Second): + return nil, config, nil, errors.New("failed to retrieve the provider configuration from discovery url") + case <-completeCh: + r.log.Info("successfully retrieved openid configuration from the discovery") + } + + client, err := oidc.NewClient(oidc.ClientConfig{ + ProviderConfig: config, + Credentials: oidc.ClientCredentials{ + ID: r.config.ClientID, + Secret: r.config.ClientSecret, + }, + RedirectURL: fmt.Sprintf("%s/oauth/callback", r.config.RedirectionURL), + Scope: append(r.config.Scopes, oidc.DefaultScope...), + HTTPClient: hc, + }) + if err != nil { + return nil, config, hc, err + } + // start the provider sync for key rotation + client.SyncProviderConfig(r.config.DiscoveryURL) + + return client, config, hc, nil +} diff --git a/server_test.go b/server_test.go index 28e518d6..28c1837c 100644 --- a/server_test.go +++ b/server_test.go @@ -27,7 +27,6 @@ import ( "testing" "time" - log "github.com/Sirupsen/logrus" "github.com/gambol99/go-oidc/jose" "github.com/stretchr/testify/assert" ) @@ -317,7 +316,7 @@ func newTestProxyService(config *Config) (*oauthProxy, *fakeAuthServer, string) config.RedirectionURL = service.URL // step: we need to update the client config - if proxy.client, proxy.idp, proxy.idpClient, err = newOpenIDClient(config); err != nil { + if proxy.client, proxy.idp, proxy.idpClient, err = proxy.newOpenIDClient(); err != nil { panic("failed to recreate the openid client, error: " + err.Error()) } @@ -347,6 +346,9 @@ func newFakeKeycloakConfig() *Config { Listen: "127.0.0.1:0", EnableAuthorizationHeader: true, EnableLoginHandler: true, + EnableLogging: false, + DisableAllLogging: true, + Verbose: false, Scopes: []string{}, Resources: []*Resource{ { diff --git a/session.go b/session.go index a8423693..6bb11f90 100644 --- a/session.go +++ b/session.go @@ -19,8 +19,8 @@ import ( "net/http" "strings" - log "github.com/Sirupsen/logrus" "github.com/gambol99/go-oidc/jose" + "go.uber.org/zap" ) // getIdentity retrieves the user identity from a request, either from a session cookie or a bearer token @@ -46,12 +46,11 @@ func (r *oauthProxy) getIdentity(req *http.Request) (*userContext, error) { } user.bearerToken = isBearer - log.WithFields(log.Fields{ - "id": user.id, - "name": user.name, - "email": user.email, - "roles": strings.Join(user.roles, ","), - }).Debugf("found the user identity: %s in the request", user.email) + r.log.Debug("found the user identity", + zap.String("id", user.id), + zap.String("name", user.name), + zap.String("email", user.email), + zap.String("roles", strings.Join(user.roles, ","))) return user, nil } diff --git a/store_boltdb.go b/store_boltdb.go index 92181684..135b97b1 100644 --- a/store_boltdb.go +++ b/store_boltdb.go @@ -21,7 +21,6 @@ import ( "strings" "time" - log "github.com/Sirupsen/logrus" "github.com/boltdb/bolt" ) @@ -42,8 +41,6 @@ type boltdbStore struct { func newBoltDBStore(location *url.URL) (storage, error) { // step: drop the initial slash path := strings.TrimPrefix(location.Path, "/") - - log.Infof("creating the bolddb store, file: %s", path) db, err := bolt.Open(path, 0600, &bolt.Options{ Timeout: 10 * time.Second, }) @@ -64,11 +61,6 @@ func newBoltDBStore(location *url.URL) (storage, error) { // Set adds a token to the store func (r *boltdbStore) Set(key, value string) error { - log.WithFields(log.Fields{ - "key": key, - "value": value, - }).Debugf("adding the key: %s in store", key) - return r.client.Update(func(tx *bolt.Tx) error { bucket := tx.Bucket([]byte(dbName)) if bucket == nil { @@ -80,10 +72,6 @@ func (r *boltdbStore) Set(key, value string) error { // Get retrieves a token from the store func (r *boltdbStore) Get(key string) (string, error) { - log.WithFields(log.Fields{ - "key": key, - }).Debugf("retrieving the key: %s from store", key) - var value string err := r.client.View(func(tx *bolt.Tx) error { bucket := tx.Bucket([]byte(dbName)) @@ -99,10 +87,6 @@ func (r *boltdbStore) Get(key string) (string, error) { // Delete removes the key from the bucket func (r *boltdbStore) Delete(key string) error { - log.WithFields(log.Fields{ - "key": key, - }).Debugf("deleting the key: %s from store", key) - return r.client.Update(func(tx *bolt.Tx) error { bucket := tx.Bucket([]byte(dbName)) if bucket == nil { @@ -114,6 +98,5 @@ func (r *boltdbStore) Delete(key string) error { // Close closes of any open resources func (r *boltdbStore) Close() error { - log.Infof("closing the resourcese for boltdb store") return r.client.Close() } diff --git a/store_redis.go b/store_redis.go index aeb92f62..8979ce20 100644 --- a/store_redis.go +++ b/store_redis.go @@ -19,7 +19,6 @@ import ( "net/url" "time" - log "github.com/Sirupsen/logrus" redis "gopkg.in/redis.v4" ) @@ -29,8 +28,6 @@ type redisStore struct { // newRedisStore creates a new redis store func newRedisStore(location *url.URL) (storage, error) { - log.Infof("creating a redis client for store: %s", location.Host) - // step: get any password password := "" if location.User != nil { @@ -51,11 +48,6 @@ func newRedisStore(location *url.URL) (storage, error) { // Set adds a token to the store func (r redisStore) Set(key, value string) error { - log.WithFields(log.Fields{ - "key": key, - "value": value, - }).Debugf("adding the key: %s to the store", key) - if err := r.client.Set(key, value, time.Duration(0)); err.Err() != nil { return err.Err() } @@ -65,10 +57,6 @@ func (r redisStore) Set(key, value string) error { // Get retrieves a token from the store func (r redisStore) Get(key string) (string, error) { - log.WithFields(log.Fields{ - "key": key, - }).Debugf("retrieving the key: %s from store", key) - result := r.client.Get(key) if result.Err() != nil { return "", result.Err() @@ -79,16 +67,11 @@ func (r redisStore) Get(key string) (string, error) { // Delete remove the key func (r redisStore) Delete(key string) error { - log.WithFields(log.Fields{ - "key": key, - }).Debugf("deleting the key: %s from store", key) - return r.client.Del(key).Err() } // Close closes of any open resources func (r redisStore) Close() error { - log.Infof("closing the resourcese for redis store") if r.client != nil { return r.client.Close() } diff --git a/stores.go b/stores.go index 4eda6fea..6277db8a 100644 --- a/stores.go +++ b/stores.go @@ -19,8 +19,8 @@ import ( "fmt" "net/url" - log "github.com/Sirupsen/logrus" "github.com/gambol99/go-oidc/jose" + "go.uber.org/zap" ) // createStorage creates the store client for use @@ -71,9 +71,7 @@ func (r *oauthProxy) GetRefreshToken(token jose.JWT) (string, error) { // DeleteRefreshToken removes a key from the store func (r *oauthProxy) DeleteRefreshToken(token jose.JWT) error { if err := r.store.Delete(getHashKey(&token)); err != nil { - log.WithFields(log.Fields{ - "error": err.Error(), - }).Errorf("unable to delete token") + r.log.Error("unable to delete token", zap.Error(err)) return err } diff --git a/utils.go b/utils.go index 6a9ae0b0..3cc47a16 100644 --- a/utils.go +++ b/utils.go @@ -40,7 +40,6 @@ import ( "unicode" "unicode/utf8" - log "github.com/Sirupsen/logrus" "github.com/gambol99/go-oidc/jose" "github.com/gambol99/go-oidc/oidc" "github.com/labstack/echo" @@ -144,69 +143,6 @@ func decodeText(state, key string) (string, error) { return string(encoded), nil } -// newOpenIDClient initializes the openID configuration, note: the redirection url is deliberately left blank -// in order to retrieve it from the host header on request -func newOpenIDClient(cfg *Config) (*oidc.Client, oidc.ProviderConfig, *http.Client, error) { - var err error - var config oidc.ProviderConfig - - // fix up the url if required, the underlining lib will add the .well-known/openid-configuration to the discovery url for us. - if strings.HasSuffix(cfg.DiscoveryURL, "/.well-known/openid-configuration") { - cfg.DiscoveryURL = strings.TrimSuffix(cfg.DiscoveryURL, "/.well-known/openid-configuration") - } - - hc := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: cfg.SkipOpenIDProviderTLSVerify, - }, - IdleConnTimeout: time.Second * 10, - }, - Timeout: time.Second * 10, - } - - // attempt to retrieve the provider configuration - completeCh := make(chan bool) - go func() { - for { - log.Infof("attempting to retrieve openid configuration from discovery url: %s", cfg.DiscoveryURL) - if config, err = oidc.FetchProviderConfig(hc, cfg.DiscoveryURL); err == nil { - break // break and complete - } - log.Warnf("failed to get provider configuration from discovery url: %s, %s", cfg.DiscoveryURL, err) - time.Sleep(time.Second * 3) - } - completeCh <- true - }() - // wait for timeout or successful retrieval - select { - case <-time.After(30 * time.Second): - return nil, config, nil, errors.New("failed to retrieve the provider configuration from discovery url") - case <-completeCh: - log.Infof("successfully retrieved the openid configuration from the discovery url: %s", cfg.DiscoveryURL) - } - - client, err := oidc.NewClient(oidc.ClientConfig{ - ProviderConfig: config, - Credentials: oidc.ClientCredentials{ - ID: cfg.ClientID, - Secret: cfg.ClientSecret, - }, - RedirectURL: fmt.Sprintf("%s/oauth/callback", cfg.RedirectionURL), - Scope: append(cfg.Scopes, oidc.DefaultScope...), - SkipClientIDCheck: cfg.SkipClientID, - HTTPClient: hc, - }) - if err != nil { - return nil, config, hc, err - } - - // step: start the provider sync for key rotation - client.SyncProviderConfig(cfg.DiscoveryURL) - - return client, config, hc, nil -} - // decodeKeyPairs converts a list of strings (key=pair) to a map func decodeKeyPairs(list []string) (map[string]string, error) { kp := make(map[string]string) diff --git a/utils_test.go b/utils_test.go index cc43bf37..eabafb43 100644 --- a/utils_test.go +++ b/utils_test.go @@ -30,15 +30,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestNewOpenIDClient(t *testing.T) { - _, auth, _ := newTestProxyService(nil) - client, _, _, err := newOpenIDClient(&Config{ - DiscoveryURL: auth.location.String() + "/auth/realms/hod-test", - }) - assert.NoError(t, err) - assert.NotNil(t, client) -} - func TestDecodeKeyPairs(t *testing.T) { testCases := []struct { List []string From 5198c4b5f5d3a88d73c316d67e2da078351704c5 Mon Sep 17 00:00:00 2001 From: Rohith Date: Sun, 11 Jun 2017 19:21:29 +0100 Subject: [PATCH 2/3] - updating the dependencies, removing logrus and adding zap --- Godeps/Godeps.json | 77 +-- server.go | 9 +- server_test.go | 20 +- utils.go | 1 - vendor/github.com/Sirupsen/logrus/.gitignore | 1 - vendor/github.com/Sirupsen/logrus/.travis.yml | 9 - .../github.com/Sirupsen/logrus/CHANGELOG.md | 66 --- vendor/github.com/Sirupsen/logrus/README.md | 390 --------------- vendor/github.com/Sirupsen/logrus/doc.go | 26 - vendor/github.com/Sirupsen/logrus/entry.go | 264 ---------- vendor/github.com/Sirupsen/logrus/exported.go | 193 -------- .../github.com/Sirupsen/logrus/formatter.go | 45 -- vendor/github.com/Sirupsen/logrus/hooks.go | 34 -- .../Sirupsen/logrus/json_formatter.go | 41 -- vendor/github.com/Sirupsen/logrus/logger.go | 212 -------- vendor/github.com/Sirupsen/logrus/logrus.go | 143 ------ .../Sirupsen/logrus/terminal_bsd.go | 9 - .../Sirupsen/logrus/terminal_linux.go | 12 - .../Sirupsen/logrus/terminal_notwindows.go | 21 - .../Sirupsen/logrus/terminal_solaris.go | 15 - .../Sirupsen/logrus/terminal_windows.go | 27 - .../Sirupsen/logrus/text_formatter.go | 161 ------ vendor/github.com/Sirupsen/logrus/writer.go | 53 -- .../github.com/coreos/go-oidc/http/client.go | 7 - vendor/github.com/coreos/go-oidc/http/doc.go | 2 - vendor/github.com/coreos/go-oidc/http/http.go | 156 ------ vendor/github.com/coreos/go-oidc/http/url.go | 29 -- .../github.com/coreos/go-oidc/jose/claims.go | 126 ----- vendor/github.com/coreos/go-oidc/jose/doc.go | 2 - vendor/github.com/coreos/go-oidc/jose/jose.go | 112 ----- vendor/github.com/coreos/go-oidc/jose/jwk.go | 135 ----- vendor/github.com/coreos/go-oidc/jose/jws.go | 51 -- vendor/github.com/coreos/go-oidc/jose/jwt.go | 82 --- vendor/github.com/coreos/go-oidc/jose/sig.go | 24 - .../github.com/coreos/go-oidc/jose/sig_rsa.go | 67 --- vendor/github.com/coreos/go-oidc/key/doc.go | 2 - vendor/github.com/coreos/go-oidc/key/key.go | 153 ------ .../github.com/coreos/go-oidc/key/manager.go | 99 ---- vendor/github.com/coreos/go-oidc/key/repo.go | 55 --- .../github.com/coreos/go-oidc/key/rotate.go | 159 ------ vendor/github.com/coreos/go-oidc/key/sync.go | 91 ---- .../github.com/coreos/go-oidc/oauth2/doc.go | 2 - .../github.com/coreos/go-oidc/oauth2/error.go | 29 -- .../coreos/go-oidc/oauth2/oauth2.go | 416 ---------------- vendor/go.uber.org/atomic/.codecov.yml | 15 + vendor/go.uber.org/atomic/.gitignore | 11 + vendor/go.uber.org/atomic/.travis.yml | 23 + .../atomic/LICENSE.txt} | 4 +- vendor/go.uber.org/atomic/Makefile | 64 +++ vendor/go.uber.org/atomic/README.md | 36 ++ vendor/go.uber.org/atomic/atomic.go | 309 ++++++++++++ vendor/go.uber.org/atomic/glide.lock | 17 + vendor/go.uber.org/atomic/glide.yaml | 6 + vendor/go.uber.org/atomic/string.go | 49 ++ vendor/go.uber.org/zap/.codecov.yml | 17 + vendor/go.uber.org/zap/.gitignore | 28 ++ vendor/go.uber.org/zap/.readme.tmpl | 94 ++++ vendor/go.uber.org/zap/.travis.yml | 21 + vendor/go.uber.org/zap/CHANGELOG.md | 228 +++++++++ vendor/go.uber.org/zap/CONTRIBUTING.md | 79 +++ vendor/go.uber.org/zap/LICENSE.txt | 19 + vendor/go.uber.org/zap/Makefile | 72 +++ vendor/go.uber.org/zap/README.md | 119 +++++ vendor/go.uber.org/zap/array.go | 355 +++++++++++++ vendor/go.uber.org/zap/buffer/buffer.go | 106 ++++ vendor/go.uber.org/zap/buffer/pool.go | 49 ++ vendor/go.uber.org/zap/check_license.sh | 15 + vendor/go.uber.org/zap/config.go | 231 +++++++++ vendor/go.uber.org/zap/doc.go | 22 + vendor/go.uber.org/zap/encoder.go | 73 +++ vendor/go.uber.org/zap/field.go | 324 ++++++++++++ vendor/go.uber.org/zap/flag.go | 38 ++ vendor/go.uber.org/zap/glide.lock | 72 +++ vendor/go.uber.org/zap/glide.yaml | 32 ++ vendor/go.uber.org/zap/global.go | 114 +++++ vendor/go.uber.org/zap/http_handler.go | 81 +++ .../zap/internal/bufferpool/bufferpool.go | 31 ++ .../go.uber.org/zap/internal/color/color.go | 44 ++ vendor/go.uber.org/zap/internal/exit/exit.go | 64 +++ .../zap/internal/multierror/multierror.go | 72 +++ vendor/go.uber.org/zap/level.go | 128 +++++ vendor/go.uber.org/zap/logger.go | 277 +++++++++++ vendor/go.uber.org/zap/options.go | 102 ++++ vendor/go.uber.org/zap/stacktrace.go | 98 ++++ vendor/go.uber.org/zap/sugar.go | 294 +++++++++++ vendor/go.uber.org/zap/time.go | 27 + vendor/go.uber.org/zap/writer.go | 91 ++++ .../zap/zapcore/console_encoder.go | 140 ++++++ vendor/go.uber.org/zap/zapcore/core.go | 113 +++++ vendor/go.uber.org/zap/zapcore/doc.go | 24 + vendor/go.uber.org/zap/zapcore/encoder.go | 324 ++++++++++++ vendor/go.uber.org/zap/zapcore/entry.go | 256 ++++++++++ vendor/go.uber.org/zap/zapcore/field.go | 211 ++++++++ vendor/go.uber.org/zap/zapcore/hook.go | 68 +++ .../go.uber.org/zap/zapcore/json_encoder.go | 466 ++++++++++++++++++ vendor/go.uber.org/zap/zapcore/level.go | 175 +++++++ .../go.uber.org/zap/zapcore/level_strings.go | 46 ++ vendor/go.uber.org/zap/zapcore/marshaler.go | 53 ++ .../go.uber.org/zap/zapcore/memory_encoder.go | 179 +++++++ vendor/go.uber.org/zap/zapcore/sampler.go | 134 +++++ vendor/go.uber.org/zap/zapcore/tee.go | 81 +++ .../go.uber.org/zap/zapcore/write_syncer.go | 123 +++++ 102 files changed, 6403 insertions(+), 3569 deletions(-) delete mode 100644 vendor/github.com/Sirupsen/logrus/.gitignore delete mode 100644 vendor/github.com/Sirupsen/logrus/.travis.yml delete mode 100644 vendor/github.com/Sirupsen/logrus/CHANGELOG.md delete mode 100644 vendor/github.com/Sirupsen/logrus/README.md delete mode 100644 vendor/github.com/Sirupsen/logrus/doc.go delete mode 100644 vendor/github.com/Sirupsen/logrus/entry.go delete mode 100644 vendor/github.com/Sirupsen/logrus/exported.go delete mode 100644 vendor/github.com/Sirupsen/logrus/formatter.go delete mode 100644 vendor/github.com/Sirupsen/logrus/hooks.go delete mode 100644 vendor/github.com/Sirupsen/logrus/json_formatter.go delete mode 100644 vendor/github.com/Sirupsen/logrus/logger.go delete mode 100644 vendor/github.com/Sirupsen/logrus/logrus.go delete mode 100644 vendor/github.com/Sirupsen/logrus/terminal_bsd.go delete mode 100644 vendor/github.com/Sirupsen/logrus/terminal_linux.go delete mode 100644 vendor/github.com/Sirupsen/logrus/terminal_notwindows.go delete mode 100644 vendor/github.com/Sirupsen/logrus/terminal_solaris.go delete mode 100644 vendor/github.com/Sirupsen/logrus/terminal_windows.go delete mode 100644 vendor/github.com/Sirupsen/logrus/text_formatter.go delete mode 100644 vendor/github.com/Sirupsen/logrus/writer.go delete mode 100644 vendor/github.com/coreos/go-oidc/http/client.go delete mode 100644 vendor/github.com/coreos/go-oidc/http/doc.go delete mode 100644 vendor/github.com/coreos/go-oidc/http/http.go delete mode 100644 vendor/github.com/coreos/go-oidc/http/url.go delete mode 100644 vendor/github.com/coreos/go-oidc/jose/claims.go delete mode 100644 vendor/github.com/coreos/go-oidc/jose/doc.go delete mode 100644 vendor/github.com/coreos/go-oidc/jose/jose.go delete mode 100644 vendor/github.com/coreos/go-oidc/jose/jwk.go delete mode 100644 vendor/github.com/coreos/go-oidc/jose/jws.go delete mode 100644 vendor/github.com/coreos/go-oidc/jose/jwt.go delete mode 100755 vendor/github.com/coreos/go-oidc/jose/sig.go delete mode 100755 vendor/github.com/coreos/go-oidc/jose/sig_rsa.go delete mode 100644 vendor/github.com/coreos/go-oidc/key/doc.go delete mode 100644 vendor/github.com/coreos/go-oidc/key/key.go delete mode 100644 vendor/github.com/coreos/go-oidc/key/manager.go delete mode 100644 vendor/github.com/coreos/go-oidc/key/repo.go delete mode 100644 vendor/github.com/coreos/go-oidc/key/rotate.go delete mode 100644 vendor/github.com/coreos/go-oidc/key/sync.go delete mode 100644 vendor/github.com/coreos/go-oidc/oauth2/doc.go delete mode 100644 vendor/github.com/coreos/go-oidc/oauth2/error.go delete mode 100644 vendor/github.com/coreos/go-oidc/oauth2/oauth2.go create mode 100644 vendor/go.uber.org/atomic/.codecov.yml create mode 100644 vendor/go.uber.org/atomic/.gitignore create mode 100644 vendor/go.uber.org/atomic/.travis.yml rename vendor/{github.com/Sirupsen/logrus/LICENSE => go.uber.org/atomic/LICENSE.txt} (94%) create mode 100644 vendor/go.uber.org/atomic/Makefile create mode 100644 vendor/go.uber.org/atomic/README.md create mode 100644 vendor/go.uber.org/atomic/atomic.go create mode 100644 vendor/go.uber.org/atomic/glide.lock create mode 100644 vendor/go.uber.org/atomic/glide.yaml create mode 100644 vendor/go.uber.org/atomic/string.go create mode 100644 vendor/go.uber.org/zap/.codecov.yml create mode 100644 vendor/go.uber.org/zap/.gitignore create mode 100644 vendor/go.uber.org/zap/.readme.tmpl create mode 100644 vendor/go.uber.org/zap/.travis.yml create mode 100644 vendor/go.uber.org/zap/CHANGELOG.md create mode 100644 vendor/go.uber.org/zap/CONTRIBUTING.md create mode 100644 vendor/go.uber.org/zap/LICENSE.txt create mode 100644 vendor/go.uber.org/zap/Makefile create mode 100644 vendor/go.uber.org/zap/README.md create mode 100644 vendor/go.uber.org/zap/array.go create mode 100644 vendor/go.uber.org/zap/buffer/buffer.go create mode 100644 vendor/go.uber.org/zap/buffer/pool.go create mode 100755 vendor/go.uber.org/zap/check_license.sh create mode 100644 vendor/go.uber.org/zap/config.go create mode 100644 vendor/go.uber.org/zap/doc.go create mode 100644 vendor/go.uber.org/zap/encoder.go create mode 100644 vendor/go.uber.org/zap/field.go create mode 100644 vendor/go.uber.org/zap/flag.go create mode 100644 vendor/go.uber.org/zap/glide.lock create mode 100644 vendor/go.uber.org/zap/glide.yaml create mode 100644 vendor/go.uber.org/zap/global.go create mode 100644 vendor/go.uber.org/zap/http_handler.go create mode 100644 vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go create mode 100644 vendor/go.uber.org/zap/internal/color/color.go create mode 100644 vendor/go.uber.org/zap/internal/exit/exit.go create mode 100644 vendor/go.uber.org/zap/internal/multierror/multierror.go create mode 100644 vendor/go.uber.org/zap/level.go create mode 100644 vendor/go.uber.org/zap/logger.go create mode 100644 vendor/go.uber.org/zap/options.go create mode 100644 vendor/go.uber.org/zap/stacktrace.go create mode 100644 vendor/go.uber.org/zap/sugar.go create mode 100644 vendor/go.uber.org/zap/time.go create mode 100644 vendor/go.uber.org/zap/writer.go create mode 100644 vendor/go.uber.org/zap/zapcore/console_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/core.go create mode 100644 vendor/go.uber.org/zap/zapcore/doc.go create mode 100644 vendor/go.uber.org/zap/zapcore/encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/entry.go create mode 100644 vendor/go.uber.org/zap/zapcore/field.go create mode 100644 vendor/go.uber.org/zap/zapcore/hook.go create mode 100644 vendor/go.uber.org/zap/zapcore/json_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/level.go create mode 100644 vendor/go.uber.org/zap/zapcore/level_strings.go create mode 100644 vendor/go.uber.org/zap/zapcore/marshaler.go create mode 100644 vendor/go.uber.org/zap/zapcore/memory_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/sampler.go create mode 100644 vendor/go.uber.org/zap/zapcore/tee.go create mode 100644 vendor/go.uber.org/zap/zapcore/write_syncer.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index e42e79ac..849acdbc 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -12,11 +12,6 @@ "ImportPath": "github.com/PuerkitoBio/urlesc", "Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e" }, - { - "ImportPath": "github.com/Sirupsen/logrus", - "Comment": "v0.10.0-14-g081307d", - "Rev": "081307d9bc1364753142d5962fc1d795c742baaf" - }, { "ImportPath": "github.com/armon/go-proxyproto", "Rev": "609d6338d3a76ec26ac3fe7045a164d9a58436e7" @@ -30,22 +25,6 @@ "Comment": "v1.2.0-13-g144418e", "Rev": "144418e1475d8bf7abbdc48583500f1a20c62ea7" }, - { - "ImportPath": "github.com/coreos/go-oidc/http", - "Rev": "c797a55f1c1001ec3169f1d0fbb4c5523563bec6" - }, - { - "ImportPath": "github.com/coreos/go-oidc/jose", - "Rev": "c797a55f1c1001ec3169f1d0fbb4c5523563bec6" - }, - { - "ImportPath": "github.com/coreos/go-oidc/key", - "Rev": "c797a55f1c1001ec3169f1d0fbb4c5523563bec6" - }, - { - "ImportPath": "github.com/coreos/go-oidc/oauth2", - "Rev": "c797a55f1c1001ec3169f1d0fbb4c5523563bec6" - }, { "ImportPath": "github.com/coreos/pkg/health", "Comment": "v3-2-g447b7ec", @@ -75,10 +54,18 @@ "Comment": "v1.4.2-2-gfd9ec7d", "Rev": "fd9ec7deca8bf46ecd2a795baaacf2b3a9be1197" }, + { + "ImportPath": "github.com/gambol99/go-oidc/http", + "Rev": "2111f98a1397a35f1800f4c3c354a7abebbef75c" + }, { "ImportPath": "github.com/gambol99/go-oidc/jose", "Rev": "2111f98a1397a35f1800f4c3c354a7abebbef75c" }, + { + "ImportPath": "github.com/gambol99/go-oidc/key", + "Rev": "2111f98a1397a35f1800f4c3c354a7abebbef75c" + }, { "ImportPath": "github.com/gambol99/go-oidc/oauth2", "Rev": "2111f98a1397a35f1800f4c3c354a7abebbef75c" @@ -206,6 +193,46 @@ "ImportPath": "github.com/valyala/fasttemplate", "Rev": "dcecefd839c4193db0d35b88ec65b4c12d360ab0" }, + { + "ImportPath": "go.uber.org/atomic", + "Comment": "v1.2.0-7-g0506d69", + "Rev": "0506d69f5564c56e25797bf7183c28921d4c6360" + }, + { + "ImportPath": "go.uber.org/zap", + "Comment": "v1.4.1-1-g54371c6", + "Rev": "54371c67da1bc746325e5582e48521a5db5d64ca" + }, + { + "ImportPath": "go.uber.org/zap/buffer", + "Comment": "v1.4.1-1-g54371c6", + "Rev": "54371c67da1bc746325e5582e48521a5db5d64ca" + }, + { + "ImportPath": "go.uber.org/zap/internal/bufferpool", + "Comment": "v1.4.1-1-g54371c6", + "Rev": "54371c67da1bc746325e5582e48521a5db5d64ca" + }, + { + "ImportPath": "go.uber.org/zap/internal/color", + "Comment": "v1.4.1-1-g54371c6", + "Rev": "54371c67da1bc746325e5582e48521a5db5d64ca" + }, + { + "ImportPath": "go.uber.org/zap/internal/exit", + "Comment": "v1.4.1-1-g54371c6", + "Rev": "54371c67da1bc746325e5582e48521a5db5d64ca" + }, + { + "ImportPath": "go.uber.org/zap/internal/multierror", + "Comment": "v1.4.1-1-g54371c6", + "Rev": "54371c67da1bc746325e5582e48521a5db5d64ca" + }, + { + "ImportPath": "go.uber.org/zap/zapcore", + "Comment": "v1.4.1-1-g54371c6", + "Rev": "54371c67da1bc746325e5582e48521a5db5d64ca" + }, { "ImportPath": "golang.org/x/crypto/acme", "Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8" @@ -279,14 +306,6 @@ "ImportPath": "gopkg.in/yaml.v2", "Rev": "49c95bdc21843256fb6c4e0d370a05f24a0bf213" }, - { - "ImportPath": "github.com/gambol99/go-oidc/http", - "Rev": "2111f98a1397a35f1800f4c3c354a7abebbef75c" - }, - { - "ImportPath": "github.com/gambol99/go-oidc/key", - "Rev": "2111f98a1397a35f1800f4c3c354a7abebbef75c" - }, { "ImportPath": "github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew", "Comment": "v1.1.4-66-gf6abca5", diff --git a/server.go b/server.go index b8e5a577..c8271123 100644 --- a/server.go +++ b/server.go @@ -537,14 +537,15 @@ func (r *oauthProxy) newOpenIDClient() (*oidc.Client, oidc.ProviderConfig, *http } client, err := oidc.NewClient(oidc.ClientConfig{ - ProviderConfig: config, Credentials: oidc.ClientCredentials{ ID: r.config.ClientID, Secret: r.config.ClientSecret, }, - RedirectURL: fmt.Sprintf("%s/oauth/callback", r.config.RedirectionURL), - Scope: append(r.config.Scopes, oidc.DefaultScope...), - HTTPClient: hc, + HTTPClient: hc, + RedirectURL: fmt.Sprintf("%s/oauth/callback", r.config.RedirectionURL), + ProviderConfig: config, + Scope: append(r.config.Scopes, oidc.DefaultScope...), + SkipClientIDCheck: r.config.SkipClientID, }) if err != nil { return nil, config, hc, err diff --git a/server_test.go b/server_test.go index 28c1837c..b37e7fbc 100644 --- a/server_test.go +++ b/server_test.go @@ -19,7 +19,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "net/http" "net/http/httptest" "net/url" @@ -295,7 +294,6 @@ func newTestService() string { } func newTestProxyService(config *Config) (*oauthProxy, *fakeAuthServer, string) { - log.SetOutput(ioutil.Discard) auth := newFakeAuthServer() if config == nil { config = newFakeKeycloakConfig() @@ -338,18 +336,18 @@ func newFakeHTTPRequest(method, path string) *http.Request { func newFakeKeycloakConfig() *Config { return &Config{ - ClientID: fakeClientID, - ClientSecret: fakeSecret, - CookieAccessName: "kc-access", - CookieRefreshName: "kc-state", - DiscoveryURL: "127.0.0.1:0", - Listen: "127.0.0.1:0", + ClientID: fakeClientID, + ClientSecret: fakeSecret, + CookieAccessName: "kc-access", + CookieRefreshName: "kc-state", + DisableAllLogging: true, + DiscoveryURL: "127.0.0.1:0", EnableAuthorizationHeader: true, - EnableLoginHandler: true, EnableLogging: false, - DisableAllLogging: true, - Verbose: false, + EnableLoginHandler: true, + Listen: "127.0.0.1:0", Scopes: []string{}, + Verbose: true, Resources: []*Resource{ { URL: fakeAdminRoleURL, diff --git a/utils.go b/utils.go index 3cc47a16..ec6ea218 100644 --- a/utils.go +++ b/utils.go @@ -41,7 +41,6 @@ import ( "unicode/utf8" "github.com/gambol99/go-oidc/jose" - "github.com/gambol99/go-oidc/oidc" "github.com/labstack/echo" "github.com/urfave/cli" "gopkg.in/yaml.v2" diff --git a/vendor/github.com/Sirupsen/logrus/.gitignore b/vendor/github.com/Sirupsen/logrus/.gitignore deleted file mode 100644 index 66be63a0..00000000 --- a/vendor/github.com/Sirupsen/logrus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -logrus diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml deleted file mode 100644 index ff23150d..00000000 --- a/vendor/github.com/Sirupsen/logrus/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.3 - - 1.4 - - 1.5 - - tip -install: - - go get -t ./... -script: GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v ./... diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index f2c2bc21..00000000 --- a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,66 +0,0 @@ -# 0.10.0 - -* feature: Add a test hook (#180) -* feature: `ParseLevel` is now case-insensitive (#326) -* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) -* performance: avoid re-allocations on `WithFields` (#335) - -# 0.9.0 - -* logrus/text_formatter: don't emit empty msg -* logrus/hooks/airbrake: move out of main repository -* logrus/hooks/sentry: move out of main repository -* logrus/hooks/papertrail: move out of main repository -* logrus/hooks/bugsnag: move out of main repository -* logrus/core: run tests with `-race` -* logrus/core: detect TTY based on `stderr` -* logrus/core: support `WithError` on logger -* logrus/core: Solaris support - -# 0.8.7 - -* logrus/core: fix possible race (#216) -* logrus/doc: small typo fixes and doc improvements - - -# 0.8.6 - -* hooks/raven: allow passing an initialized client - -# 0.8.5 - -* logrus/core: revert #208 - -# 0.8.4 - -* formatter/text: fix data race (#218) - -# 0.8.3 - -* logrus/core: fix entry log level (#208) -* logrus/core: improve performance of text formatter by 40% -* logrus/core: expose `LevelHooks` type -* logrus/core: add support for DragonflyBSD and NetBSD -* formatter/text: print structs more verbosely - -# 0.8.2 - -* logrus: fix more Fatal family functions - -# 0.8.1 - -* logrus: fix not exiting on `Fatalf` and `Fatalln` - -# 0.8.0 - -* logrus: defaults to stderr instead of stdout -* hooks/sentry: add special field for `*http.Request` -* formatter/text: ignore Windows for colors - -# 0.7.3 - -* formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md deleted file mode 100644 index 6f04c8a1..00000000 --- a/vendor/github.com/Sirupsen/logrus/README.md +++ /dev/null @@ -1,390 +0,0 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus) - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0). Logrus itself is completely stable and has been used in -many large deployments. The core API is unlikely to change much but please -version control your Logrus to make sure you aren't fetching latest `master` on -every build.** - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 -``` - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/Sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/Sirupsen/logrus" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Output to stderr instead of stdout, could also be a file. - log.SetOutput(os.Stderr) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "github.com/Sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stderr - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging though logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/Sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook(123, "xyz", "production")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` -Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). - -| Hook | Description | -| ----- | ----------- | -| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | -| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | -| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | -| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | -| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | -| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | -| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | -| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | -| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | -| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | -| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) | -| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | -| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | -| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | -| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | -| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | -| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka | -| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | -| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| -| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| -| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | - -#### Level logging - -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/Sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(&log.JSONFormatter{}) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true` -* `logrus.JSONFormatter`. Logs fields as JSON. -* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events. - - ```go - logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"}) - ``` - -Third party logging formatters: - -* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. -* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - -#### Tools - -| Tool | Description | -| ---- | ----------- | -|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| - -#### Testing - -Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: - -* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook -* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): - -```go -logger, hook := NewNullLogger() -logger.Error("Hello error") - -assert.Equal(1, len(hook.Entries)) -assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) -assert.Equal("Hello error", hook.LastEntry().Message) - -hook.Reset() -assert.Nil(hook.LastEntry()) -``` diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go deleted file mode 100644 index dddd5f87..00000000 --- a/vendor/github.com/Sirupsen/logrus/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Package logrus is a structured logger for Go, completely API compatible with the standard library logger. - - -The simplest way to use Logrus is simply the package-level exported logger: - - package main - - import ( - log "github.com/Sirupsen/logrus" - ) - - func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "number": 1, - "size": 10, - }).Info("A walrus appears") - } - -Output: - time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 - -For a full guide visit https://github.com/Sirupsen/logrus -*/ -package logrus diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go deleted file mode 100644 index 89e966e7..00000000 --- a/vendor/github.com/Sirupsen/logrus/entry.go +++ /dev/null @@ -1,264 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "io" - "os" - "time" -) - -// Defines the key when adding errors using WithError. -var ErrorKey = "error" - -// An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Debug, Info, -// Warn, Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. -type Entry struct { - Logger *Logger - - // Contains all the fields set by the user. - Data Fields - - // Time at which the log entry was created - Time time.Time - - // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic - Level Level - - // Message passed to Debug, Info, Warn, Error, Fatal or Panic - Message string -} - -func NewEntry(logger *Logger) *Entry { - return &Entry{ - Logger: logger, - // Default is three fields, give a little extra room - Data: make(Fields, 5), - } -} - -// Returns a reader for the entry, which is a proxy to the formatter. -func (entry *Entry) Reader() (*bytes.Buffer, error) { - serialized, err := entry.Logger.Formatter.Format(entry) - return bytes.NewBuffer(serialized), err -} - -// Returns the string representation from the reader and ultimately the -// formatter. -func (entry *Entry) String() (string, error) { - reader, err := entry.Reader() - if err != nil { - return "", err - } - - return reader.String(), err -} - -// Add an error as single field (using the key defined in ErrorKey) to the Entry. -func (entry *Entry) WithError(err error) *Entry { - return entry.WithField(ErrorKey, err) -} - -// Add a single field to the Entry. -func (entry *Entry) WithField(key string, value interface{}) *Entry { - return entry.WithFields(Fields{key: value}) -} - -// Add a map of fields to the Entry. -func (entry *Entry) WithFields(fields Fields) *Entry { - data := make(Fields, len(entry.Data)+len(fields)) - for k, v := range entry.Data { - data[k] = v - } - for k, v := range fields { - data[k] = v - } - return &Entry{Logger: entry.Logger, Data: data} -} - -// This function is not declared with a pointer value because otherwise -// race conditions will occur when using multiple goroutines -func (entry Entry) log(level Level, msg string) { - entry.Time = time.Now() - entry.Level = level - entry.Message = msg - - if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - entry.Logger.mu.Unlock() - } - - reader, err := entry.Reader() - if err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - entry.Logger.mu.Unlock() - } - - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - - _, err = io.Copy(entry.Logger.Out, reader) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(&entry) - } -} - -func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.log(DebugLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Print(args ...interface{}) { - entry.Info(args...) -} - -func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.log(InfoLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.log(WarnLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warning(args ...interface{}) { - entry.Warn(args...) -} - -func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.log(ErrorLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.log(FatalLevel, fmt.Sprint(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.log(PanicLevel, fmt.Sprint(args...)) - } - panic(fmt.Sprint(args...)) -} - -// Entry Printf family functions - -func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Printf(format string, args ...interface{}) { - entry.Infof(format, args...) -} - -func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Warningf(format string, args ...interface{}) { - entry.Warnf(format, args...) -} - -func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(fmt.Sprintf(format, args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(fmt.Sprintf(format, args...)) - } -} - -// Entry Println family functions - -func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Println(args ...interface{}) { - entry.Infoln(args...) -} - -func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Warningln(args ...interface{}) { - entry.Warnln(args...) -} - -func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(entry.sprintlnn(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(entry.sprintlnn(args...)) - } -} - -// Sprintlnn => Sprint no newline. This is to get the behavior of how -// fmt.Sprintln where spaces are always added between operands, regardless of -// their type. Instead of vendoring the Sprintln implementation to spare a -// string allocation, we do the simplest thing. -func (entry *Entry) sprintlnn(args ...interface{}) string { - msg := fmt.Sprintln(args...) - return msg[:len(msg)-1] -} diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go deleted file mode 100644 index 9a0120ac..00000000 --- a/vendor/github.com/Sirupsen/logrus/exported.go +++ /dev/null @@ -1,193 +0,0 @@ -package logrus - -import ( - "io" -) - -var ( - // std is the name of the standard logger in stdlib `log` - std = New() -) - -func StandardLogger() *Logger { - return std -} - -// SetOutput sets the standard logger output. -func SetOutput(out io.Writer) { - std.mu.Lock() - defer std.mu.Unlock() - std.Out = out -} - -// SetFormatter sets the standard logger formatter. -func SetFormatter(formatter Formatter) { - std.mu.Lock() - defer std.mu.Unlock() - std.Formatter = formatter -} - -// SetLevel sets the standard logger level. -func SetLevel(level Level) { - std.mu.Lock() - defer std.mu.Unlock() - std.Level = level -} - -// GetLevel returns the standard logger level. -func GetLevel() Level { - std.mu.Lock() - defer std.mu.Unlock() - return std.Level -} - -// AddHook adds a hook to the standard logger hooks. -func AddHook(hook Hook) { - std.mu.Lock() - defer std.mu.Unlock() - std.Hooks.Add(hook) -} - -// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. -func WithError(err error) *Entry { - return std.WithField(ErrorKey, err) -} - -// WithField creates an entry from the standard logger and adds a field to -// it. If you want multiple fields, use `WithFields`. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithField(key string, value interface{}) *Entry { - return std.WithField(key, value) -} - -// WithFields creates an entry from the standard logger and adds multiple -// fields to it. This is simply a helper for `WithField`, invoking it -// once for each field. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithFields(fields Fields) *Entry { - return std.WithFields(fields) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - std.Debug(args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - std.Print(args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - std.Info(args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - std.Warn(args...) -} - -// Warning logs a message at level Warn on the standard logger. -func Warning(args ...interface{}) { - std.Warning(args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - std.Error(args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - std.Panic(args...) -} - -// Fatal logs a message at level Fatal on the standard logger. -func Fatal(args ...interface{}) { - std.Fatal(args...) -} - -// Debugf logs a message at level Debug on the standard logger. -func Debugf(format string, args ...interface{}) { - std.Debugf(format, args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - std.Printf(format, args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - std.Infof(format, args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - std.Warnf(format, args...) -} - -// Warningf logs a message at level Warn on the standard logger. -func Warningf(format string, args ...interface{}) { - std.Warningf(format, args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - std.Errorf(format, args...) -} - -// Panicf logs a message at level Panic on the standard logger. -func Panicf(format string, args ...interface{}) { - std.Panicf(format, args...) -} - -// Fatalf logs a message at level Fatal on the standard logger. -func Fatalf(format string, args ...interface{}) { - std.Fatalf(format, args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - std.Debugln(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - std.Println(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - std.Infoln(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - std.Warnln(args...) -} - -// Warningln logs a message at level Warn on the standard logger. -func Warningln(args ...interface{}) { - std.Warningln(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - std.Errorln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - std.Panicln(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger. -func Fatalln(args ...interface{}) { - std.Fatalln(args...) -} diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go deleted file mode 100644 index b5fbe934..00000000 --- a/vendor/github.com/Sirupsen/logrus/formatter.go +++ /dev/null @@ -1,45 +0,0 @@ -package logrus - -import "time" - -const DefaultTimestampFormat = time.RFC3339 - -// The Formatter interface is used to implement a custom Formatter. It takes an -// `Entry`. It exposes all the fields, including the default ones: -// -// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. -// * `entry.Data["time"]`. The timestamp. -// * `entry.Data["level"]. The level the entry was logged at. -// -// Any additional fields added with `WithField` or `WithFields` are also in -// `entry.Data`. Format is expected to return an array of bytes which are then -// logged to `logger.Out`. -type Formatter interface { - Format(*Entry) ([]byte, error) -} - -// This is to not silently overwrite `time`, `msg` and `level` fields when -// dumping it. If this code wasn't there doing: -// -// logrus.WithField("level", 1).Info("hello") -// -// Would just silently drop the user provided level. Instead with this code -// it'll logged as: -// -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} -// -// It's not exported because it's still using Data in an opinionated way. It's to -// avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields) { - if t, ok := data["time"]; ok { - data["fields.time"] = t - } - - if m, ok := data["msg"]; ok { - data["fields.msg"] = m - } - - if l, ok := data["level"]; ok { - data["fields.level"] = l - } -} diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go deleted file mode 100644 index 3f151cdc..00000000 --- a/vendor/github.com/Sirupsen/logrus/hooks.go +++ /dev/null @@ -1,34 +0,0 @@ -package logrus - -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not -// fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for -// the logging calls for levels returned from `Levels()` to block. -type Hook interface { - Levels() []Level - Fire(*Entry) error -} - -// Internal type for storing the hooks on a logger instance. -type LevelHooks map[Level][]Hook - -// Add a hook to an instance of logger. This is called with -// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks LevelHooks) Add(hook Hook) { - for _, level := range hook.Levels() { - hooks[level] = append(hooks[level], hook) - } -} - -// Fire all the hooks for the passed level. Used by `entry.log` to fire -// appropriate hooks for a log entry. -func (hooks LevelHooks) Fire(level Level, entry *Entry) error { - for _, hook := range hooks[level] { - if err := hook.Fire(entry); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go deleted file mode 100644 index 2ad6dc5c..00000000 --- a/vendor/github.com/Sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,41 +0,0 @@ -package logrus - -import ( - "encoding/json" - "fmt" -) - -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - TimestampFormat string -} - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+3) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/Sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - prefixFieldClashes(data) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat - } - - data["time"] = entry.Time.Format(timestampFormat) - data["msg"] = entry.Message - data["level"] = entry.Level.String() - - serialized, err := json.Marshal(data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go deleted file mode 100644 index 2fdb2317..00000000 --- a/vendor/github.com/Sirupsen/logrus/logger.go +++ /dev/null @@ -1,212 +0,0 @@ -package logrus - -import ( - "io" - "os" - "sync" -) - -type Logger struct { - // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stderr`. You can also set this to - // something more adventorous, such as logging to Kafka. - Out io.Writer - // Hooks for the logger instance. These allow firing events based on logging - // levels and log entries. For example, to send errors to an error tracking - // service, log to StatsD or dump the core on fatal errors. - Hooks LevelHooks - // All log entries pass through the formatter before logged to Out. The - // included formatters are `TextFormatter` and `JSONFormatter` for which - // TextFormatter is the default. In development (when a TTY is attached) it - // logs with colors, but to a file it wouldn't. You can easily implement your - // own that implements the `Formatter` interface, see the `README` or included - // formatters for examples. - Formatter Formatter - // The logging level the logger should log at. This is typically (and defaults - // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. `logrus.Debug` is useful in - Level Level - // Used to sync writing to the log. - mu sync.Mutex -} - -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just -// instantiate your own: -// -// var log = &Logger{ -// Out: os.Stderr, -// Formatter: new(JSONFormatter), -// Hooks: make(LevelHooks), -// Level: logrus.DebugLevel, -// } -// -// It's recommended to make this a global instance called `log`. -func New() *Logger { - return &Logger{ - Out: os.Stderr, - Formatter: new(TextFormatter), - Hooks: make(LevelHooks), - Level: InfoLevel, - } -} - -// Adds a field to the log entry, note that you it doesn't log until you call -// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. -// If you want multiple fields, use `WithFields`. -func (logger *Logger) WithField(key string, value interface{}) *Entry { - return NewEntry(logger).WithField(key, value) -} - -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. -func (logger *Logger) WithFields(fields Fields) *Entry { - return NewEntry(logger).WithFields(fields) -} - -// Add an error as single field to the log entry. All it does is call -// `WithError` for the given `error`. -func (logger *Logger) WithError(err error) *Entry { - return NewEntry(logger).WithError(err) -} - -func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugf(format, args...) - } -} - -func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infof(format, args...) - } -} - -func (logger *Logger) Printf(format string, args ...interface{}) { - NewEntry(logger).Printf(format, args...) -} - -func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorf(format, args...) - } -} - -func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalf(format, args...) - } - os.Exit(1) -} - -func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicf(format, args...) - } -} - -func (logger *Logger) Debug(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debug(args...) - } -} - -func (logger *Logger) Info(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Info(args...) - } -} - -func (logger *Logger) Print(args ...interface{}) { - NewEntry(logger).Info(args...) -} - -func (logger *Logger) Warn(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Warning(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Error(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Error(args...) - } -} - -func (logger *Logger) Fatal(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatal(args...) - } - os.Exit(1) -} - -func (logger *Logger) Panic(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panic(args...) - } -} - -func (logger *Logger) Debugln(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugln(args...) - } -} - -func (logger *Logger) Infoln(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infoln(args...) - } -} - -func (logger *Logger) Println(args ...interface{}) { - NewEntry(logger).Println(args...) -} - -func (logger *Logger) Warnln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Warningln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Errorln(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorln(args...) - } -} - -func (logger *Logger) Fatalln(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalln(args...) - } - os.Exit(1) -} - -func (logger *Logger) Panicln(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicln(args...) - } -} diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go deleted file mode 100644 index e5966911..00000000 --- a/vendor/github.com/Sirupsen/logrus/logrus.go +++ /dev/null @@ -1,143 +0,0 @@ -package logrus - -import ( - "fmt" - "log" - "strings" -) - -// Fields type, used to pass to `WithFields`. -type Fields map[string]interface{} - -// Level type -type Level uint8 - -// Convert the Level to a string. E.g. PanicLevel becomes "panic". -func (level Level) String() string { - switch level { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warning" - case ErrorLevel: - return "error" - case FatalLevel: - return "fatal" - case PanicLevel: - return "panic" - } - - return "unknown" -} - -// ParseLevel takes a string level and returns the Logrus log level constant. -func ParseLevel(lvl string) (Level, error) { - switch strings.ToLower(lvl) { - case "panic": - return PanicLevel, nil - case "fatal": - return FatalLevel, nil - case "error": - return ErrorLevel, nil - case "warn", "warning": - return WarnLevel, nil - case "info": - return InfoLevel, nil - case "debug": - return DebugLevel, nil - } - - var l Level - return l, fmt.Errorf("not a valid logrus Level: %q", lvl) -} - -// A constant exposing all logging levels -var AllLevels = []Level{ - PanicLevel, - FatalLevel, - ErrorLevel, - WarnLevel, - InfoLevel, - DebugLevel, -} - -// These are the different logging levels. You can set the logging level to log -// on your instance of logger, obtained with `logrus.New()`. -const ( - // PanicLevel level, highest level of severity. Logs and then calls panic with the - // message passed to Debug, Info, ... - PanicLevel Level = iota - // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the - // logging level is set to Panic. - FatalLevel - // ErrorLevel level. Logs. Used for errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel - // InfoLevel level. General operational entries about what's going on inside the - // application. - InfoLevel - // DebugLevel level. Usually only enabled when debugging. Very verbose logging. - DebugLevel -) - -// Won't compile if StdLogger can't be realized by a log.Logger -var ( - _ StdLogger = &log.Logger{} - _ StdLogger = &Entry{} - _ StdLogger = &Logger{} -) - -// StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. -type StdLogger interface { - Print(...interface{}) - Printf(string, ...interface{}) - Println(...interface{}) - - Fatal(...interface{}) - Fatalf(string, ...interface{}) - Fatalln(...interface{}) - - Panic(...interface{}) - Panicf(string, ...interface{}) - Panicln(...interface{}) -} - -// The FieldLogger interface generalizes the Entry and Logger types -type FieldLogger interface { - WithField(key string, value interface{}) *Entry - WithFields(fields Fields) *Entry - WithError(err error) *Entry - - Debugf(format string, args ...interface{}) - Infof(format string, args ...interface{}) - Printf(format string, args ...interface{}) - Warnf(format string, args ...interface{}) - Warningf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) - Fatalf(format string, args ...interface{}) - Panicf(format string, args ...interface{}) - - Debug(args ...interface{}) - Info(args ...interface{}) - Print(args ...interface{}) - Warn(args ...interface{}) - Warning(args ...interface{}) - Error(args ...interface{}) - Fatal(args ...interface{}) - Panic(args ...interface{}) - - Debugln(args ...interface{}) - Infoln(args ...interface{}) - Println(args ...interface{}) - Warnln(args ...interface{}) - Warningln(args ...interface{}) - Errorln(args ...interface{}) - Fatalln(args ...interface{}) - Panicln(args ...interface{}) -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go deleted file mode 100644 index 71f8d67a..00000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build darwin freebsd openbsd netbsd dragonfly - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go deleted file mode 100644 index a2c0b40d..00000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_linux.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TCGETS - -type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go deleted file mode 100644 index b343b3a3..00000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go +++ /dev/null @@ -1,21 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin freebsd openbsd netbsd dragonfly - -package logrus - -import ( - "syscall" - "unsafe" -) - -// IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stderr - var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go deleted file mode 100644 index 3e70bf7b..00000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build solaris - -package logrus - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA) - return err == nil -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go deleted file mode 100644 index 0146845d..00000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package logrus - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stderr - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go deleted file mode 100644 index 6afd0e03..00000000 --- a/vendor/github.com/Sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,161 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "runtime" - "sort" - "strings" - "time" -) - -const ( - nocolor = 0 - red = 31 - green = 32 - yellow = 33 - blue = 34 - gray = 37 -) - -var ( - baseTimestamp time.Time - isTerminal bool -) - -func init() { - baseTimestamp = time.Now() - isTerminal = IsTerminal() -} - -func miniTS() int { - return int(time.Since(baseTimestamp) / time.Second) -} - -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - - // Force disabling colors. - DisableColors bool - - // Disable timestamp logging. useful when output is redirected to logging - // system that already adds timestamps. - DisableTimestamp bool - - // Enable logging the full timestamp when a TTY is attached instead of just - // the time passed since beginning of execution. - FullTimestamp bool - - // TimestampFormat to use for display when a full timestamp is printed - TimestampFormat string - - // The fields are sorted by default for a consistent output. For applications - // that log extremely frequently and don't use the JSON formatter this may not - // be desired. - DisableSorting bool -} - -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - var keys []string = make([]string, 0, len(entry.Data)) - for k := range entry.Data { - keys = append(keys, k) - } - - if !f.DisableSorting { - sort.Strings(keys) - } - - b := &bytes.Buffer{} - - prefixFieldClashes(entry.Data) - - isColorTerminal := isTerminal && (runtime.GOOS != "windows") - isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat - } - if isColored { - f.printColored(b, entry, keys, timestampFormat) - } else { - if !f.DisableTimestamp { - f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) - } - f.appendKeyValue(b, "level", entry.Level.String()) - if entry.Message != "" { - f.appendKeyValue(b, "msg", entry.Message) - } - for _, key := range keys { - f.appendKeyValue(b, key, entry.Data[key]) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { - var levelColor int - switch entry.Level { - case DebugLevel: - levelColor = gray - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String())[0:4] - - if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) - } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) - } - for _, k := range keys { - v := entry.Data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v) - } -} - -func needsQuoting(text string) bool { - for _, ch := range text { - if !((ch >= 'a' && ch <= 'z') || - (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.') { - return true - } - } - return false -} - -func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { - - b.WriteString(key) - b.WriteByte('=') - - switch value := value.(type) { - case string: - if !needsQuoting(value) { - b.WriteString(value) - } else { - fmt.Fprintf(b, "%q", value) - } - case error: - errmsg := value.Error() - if !needsQuoting(errmsg) { - b.WriteString(errmsg) - } else { - fmt.Fprintf(b, "%q", value) - } - default: - fmt.Fprint(b, value) - } - - b.WriteByte(' ') -} diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go deleted file mode 100644 index f74d2aa5..00000000 --- a/vendor/github.com/Sirupsen/logrus/writer.go +++ /dev/null @@ -1,53 +0,0 @@ -package logrus - -import ( - "bufio" - "io" - "runtime" -) - -func (logger *Logger) Writer() *io.PipeWriter { - return logger.WriterLevel(InfoLevel) -} - -func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { - reader, writer := io.Pipe() - - var printFunc func(args ...interface{}) - switch level { - case DebugLevel: - printFunc = logger.Debug - case InfoLevel: - printFunc = logger.Info - case WarnLevel: - printFunc = logger.Warn - case ErrorLevel: - printFunc = logger.Error - case FatalLevel: - printFunc = logger.Fatal - case PanicLevel: - printFunc = logger.Panic - default: - printFunc = logger.Print - } - - go logger.writerScanner(reader, printFunc) - runtime.SetFinalizer(writer, writerFinalizer) - - return writer -} - -func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - printFunc(scanner.Text()) - } - if err := scanner.Err(); err != nil { - logger.Errorf("Error while reading from Writer: %s", err) - } - reader.Close() -} - -func writerFinalizer(writer *io.PipeWriter) { - writer.Close() -} diff --git a/vendor/github.com/coreos/go-oidc/http/client.go b/vendor/github.com/coreos/go-oidc/http/client.go deleted file mode 100644 index fd079b49..00000000 --- a/vendor/github.com/coreos/go-oidc/http/client.go +++ /dev/null @@ -1,7 +0,0 @@ -package http - -import "net/http" - -type Client interface { - Do(*http.Request) (*http.Response, error) -} diff --git a/vendor/github.com/coreos/go-oidc/http/doc.go b/vendor/github.com/coreos/go-oidc/http/doc.go deleted file mode 100644 index 5687e8b8..00000000 --- a/vendor/github.com/coreos/go-oidc/http/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package http is DEPRECATED. Use net/http instead. -package http diff --git a/vendor/github.com/coreos/go-oidc/http/http.go b/vendor/github.com/coreos/go-oidc/http/http.go deleted file mode 100644 index c3f51215..00000000 --- a/vendor/github.com/coreos/go-oidc/http/http.go +++ /dev/null @@ -1,156 +0,0 @@ -package http - -import ( - "encoding/base64" - "encoding/json" - "errors" - "log" - "net/http" - "net/url" - "path" - "strconv" - "strings" - "time" -) - -func WriteError(w http.ResponseWriter, code int, msg string) { - e := struct { - Error string `json:"error"` - }{ - Error: msg, - } - b, err := json.Marshal(e) - if err != nil { - log.Printf("go-oidc: failed to marshal %#v: %v", e, err) - code = http.StatusInternalServerError - b = []byte(`{"error":"server_error"}`) - } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(code) - w.Write(b) -} - -// BasicAuth parses a username and password from the request's -// Authorization header. This was pulled from golang master: -// https://codereview.appspot.com/76540043 -func BasicAuth(r *http.Request) (username, password string, ok bool) { - auth := r.Header.Get("Authorization") - if auth == "" { - return - } - - if !strings.HasPrefix(auth, "Basic ") { - return - } - c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")) - if err != nil { - return - } - cs := string(c) - s := strings.IndexByte(cs, ':') - if s < 0 { - return - } - return cs[:s], cs[s+1:], true -} - -func cacheControlMaxAge(hdr string) (time.Duration, bool, error) { - for _, field := range strings.Split(hdr, ",") { - parts := strings.SplitN(strings.TrimSpace(field), "=", 2) - k := strings.ToLower(strings.TrimSpace(parts[0])) - if k != "max-age" { - continue - } - - if len(parts) == 1 { - return 0, false, errors.New("max-age has no value") - } - - v := strings.TrimSpace(parts[1]) - if v == "" { - return 0, false, errors.New("max-age has empty value") - } - - age, err := strconv.Atoi(v) - if err != nil { - return 0, false, err - } - - if age <= 0 { - return 0, false, nil - } - - return time.Duration(age) * time.Second, true, nil - } - - return 0, false, nil -} - -func expires(date, expires string) (time.Duration, bool, error) { - if date == "" || expires == "" { - return 0, false, nil - } - - te, err := time.Parse(time.RFC1123, expires) - if err != nil { - return 0, false, err - } - - td, err := time.Parse(time.RFC1123, date) - if err != nil { - return 0, false, err - } - - ttl := te.Sub(td) - - // headers indicate data already expired, caller should not - // have to care about this case - if ttl <= 0 { - return 0, false, nil - } - - return ttl, true, nil -} - -func Cacheable(hdr http.Header) (time.Duration, bool, error) { - ttl, ok, err := cacheControlMaxAge(hdr.Get("Cache-Control")) - if err != nil || ok { - return ttl, ok, err - } - - return expires(hdr.Get("Date"), hdr.Get("Expires")) -} - -// MergeQuery appends additional query values to an existing URL. -func MergeQuery(u url.URL, q url.Values) url.URL { - uv := u.Query() - for k, vs := range q { - for _, v := range vs { - uv.Add(k, v) - } - } - u.RawQuery = uv.Encode() - return u -} - -// NewResourceLocation appends a resource id to the end of the requested URL path. -func NewResourceLocation(reqURL *url.URL, id string) string { - var u url.URL - u = *reqURL - u.Path = path.Join(u.Path, id) - u.RawQuery = "" - u.Fragment = "" - return u.String() -} - -// CopyRequest returns a clone of the provided *http.Request. -// The returned object is a shallow copy of the struct and a -// deep copy of its Header field. -func CopyRequest(r *http.Request) *http.Request { - r2 := *r - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return &r2 -} diff --git a/vendor/github.com/coreos/go-oidc/http/url.go b/vendor/github.com/coreos/go-oidc/http/url.go deleted file mode 100644 index df60eb1a..00000000 --- a/vendor/github.com/coreos/go-oidc/http/url.go +++ /dev/null @@ -1,29 +0,0 @@ -package http - -import ( - "errors" - "net/url" -) - -// ParseNonEmptyURL checks that a string is a parsable URL which is also not empty -// since `url.Parse("")` does not return an error. Must contian a scheme and a host. -func ParseNonEmptyURL(u string) (*url.URL, error) { - if u == "" { - return nil, errors.New("url is empty") - } - - ur, err := url.Parse(u) - if err != nil { - return nil, err - } - - if ur.Scheme == "" { - return nil, errors.New("url scheme is empty") - } - - if ur.Host == "" { - return nil, errors.New("url host is empty") - } - - return ur, nil -} diff --git a/vendor/github.com/coreos/go-oidc/jose/claims.go b/vendor/github.com/coreos/go-oidc/jose/claims.go deleted file mode 100644 index 8b48bfd2..00000000 --- a/vendor/github.com/coreos/go-oidc/jose/claims.go +++ /dev/null @@ -1,126 +0,0 @@ -package jose - -import ( - "encoding/json" - "fmt" - "math" - "time" -) - -type Claims map[string]interface{} - -func (c Claims) Add(name string, value interface{}) { - c[name] = value -} - -func (c Claims) StringClaim(name string) (string, bool, error) { - cl, ok := c[name] - if !ok { - return "", false, nil - } - - v, ok := cl.(string) - if !ok { - return "", false, fmt.Errorf("unable to parse claim as string: %v", name) - } - - return v, true, nil -} - -func (c Claims) StringsClaim(name string) ([]string, bool, error) { - cl, ok := c[name] - if !ok { - return nil, false, nil - } - - if v, ok := cl.([]string); ok { - return v, true, nil - } - - // When unmarshaled, []string will become []interface{}. - if v, ok := cl.([]interface{}); ok { - var ret []string - for _, vv := range v { - str, ok := vv.(string) - if !ok { - return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name) - } - ret = append(ret, str) - } - return ret, true, nil - } - - return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name) -} - -func (c Claims) Int64Claim(name string) (int64, bool, error) { - cl, ok := c[name] - if !ok { - return 0, false, nil - } - - v, ok := cl.(int64) - if !ok { - vf, ok := cl.(float64) - if !ok { - return 0, false, fmt.Errorf("unable to parse claim as int64: %v", name) - } - v = int64(vf) - } - - return v, true, nil -} - -func (c Claims) Float64Claim(name string) (float64, bool, error) { - cl, ok := c[name] - if !ok { - return 0, false, nil - } - - v, ok := cl.(float64) - if !ok { - vi, ok := cl.(int64) - if !ok { - return 0, false, fmt.Errorf("unable to parse claim as float64: %v", name) - } - v = float64(vi) - } - - return v, true, nil -} - -func (c Claims) TimeClaim(name string) (time.Time, bool, error) { - v, ok, err := c.Float64Claim(name) - if !ok || err != nil { - return time.Time{}, ok, err - } - - s := math.Trunc(v) - ns := (v - s) * math.Pow(10, 9) - return time.Unix(int64(s), int64(ns)).UTC(), true, nil -} - -func decodeClaims(payload []byte) (Claims, error) { - var c Claims - if err := json.Unmarshal(payload, &c); err != nil { - return nil, fmt.Errorf("malformed JWT claims, unable to decode: %v", err) - } - return c, nil -} - -func marshalClaims(c Claims) ([]byte, error) { - b, err := json.Marshal(c) - if err != nil { - return nil, err - } - return b, nil -} - -func encodeClaims(c Claims) (string, error) { - b, err := marshalClaims(c) - if err != nil { - return "", err - } - - return encodeSegment(b), nil -} diff --git a/vendor/github.com/coreos/go-oidc/jose/doc.go b/vendor/github.com/coreos/go-oidc/jose/doc.go deleted file mode 100644 index b5e13217..00000000 --- a/vendor/github.com/coreos/go-oidc/jose/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package jose is DEPRECATED. Use gopkg.in/square/go-jose.v2 instead. -package jose diff --git a/vendor/github.com/coreos/go-oidc/jose/jose.go b/vendor/github.com/coreos/go-oidc/jose/jose.go deleted file mode 100644 index 62099265..00000000 --- a/vendor/github.com/coreos/go-oidc/jose/jose.go +++ /dev/null @@ -1,112 +0,0 @@ -package jose - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "strings" -) - -const ( - HeaderMediaType = "typ" - HeaderKeyAlgorithm = "alg" - HeaderKeyID = "kid" -) - -const ( - // Encryption Algorithm Header Parameter Values for JWS - // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-6 - AlgHS256 = "HS256" - AlgHS384 = "HS384" - AlgHS512 = "HS512" - AlgRS256 = "RS256" - AlgRS384 = "RS384" - AlgRS512 = "RS512" - AlgES256 = "ES256" - AlgES384 = "ES384" - AlgES512 = "ES512" - AlgPS256 = "PS256" - AlgPS384 = "PS384" - AlgPS512 = "PS512" - AlgNone = "none" -) - -const ( - // Algorithm Header Parameter Values for JWE - // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#section-4.1 - AlgRSA15 = "RSA1_5" - AlgRSAOAEP = "RSA-OAEP" - AlgRSAOAEP256 = "RSA-OAEP-256" - AlgA128KW = "A128KW" - AlgA192KW = "A192KW" - AlgA256KW = "A256KW" - AlgDir = "dir" - AlgECDHES = "ECDH-ES" - AlgECDHESA128KW = "ECDH-ES+A128KW" - AlgECDHESA192KW = "ECDH-ES+A192KW" - AlgECDHESA256KW = "ECDH-ES+A256KW" - AlgA128GCMKW = "A128GCMKW" - AlgA192GCMKW = "A192GCMKW" - AlgA256GCMKW = "A256GCMKW" - AlgPBES2HS256A128KW = "PBES2-HS256+A128KW" - AlgPBES2HS384A192KW = "PBES2-HS384+A192KW" - AlgPBES2HS512A256KW = "PBES2-HS512+A256KW" -) - -const ( - // Encryption Algorithm Header Parameter Values for JWE - // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-22 - EncA128CBCHS256 = "A128CBC-HS256" - EncA128CBCHS384 = "A128CBC-HS384" - EncA256CBCHS512 = "A256CBC-HS512" - EncA128GCM = "A128GCM" - EncA192GCM = "A192GCM" - EncA256GCM = "A256GCM" -) - -type JOSEHeader map[string]string - -func (j JOSEHeader) Validate() error { - if _, exists := j[HeaderKeyAlgorithm]; !exists { - return fmt.Errorf("header missing %q parameter", HeaderKeyAlgorithm) - } - - return nil -} - -func decodeHeader(seg string) (JOSEHeader, error) { - b, err := decodeSegment(seg) - if err != nil { - return nil, err - } - - var h JOSEHeader - err = json.Unmarshal(b, &h) - if err != nil { - return nil, err - } - - return h, nil -} - -func encodeHeader(h JOSEHeader) (string, error) { - b, err := json.Marshal(h) - if err != nil { - return "", err - } - - return encodeSegment(b), nil -} - -// Decode JWT specific base64url encoding with padding stripped -func decodeSegment(seg string) ([]byte, error) { - if l := len(seg) % 4; l != 0 { - seg += strings.Repeat("=", 4-l) - } - return base64.URLEncoding.DecodeString(seg) -} - -// Encode JWT specific base64url encoding with padding stripped -func encodeSegment(seg []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") -} diff --git a/vendor/github.com/coreos/go-oidc/jose/jwk.go b/vendor/github.com/coreos/go-oidc/jose/jwk.go deleted file mode 100644 index 119f073f..00000000 --- a/vendor/github.com/coreos/go-oidc/jose/jwk.go +++ /dev/null @@ -1,135 +0,0 @@ -package jose - -import ( - "bytes" - "encoding/base64" - "encoding/binary" - "encoding/json" - "math/big" - "strings" -) - -// JSON Web Key -// https://tools.ietf.org/html/draft-ietf-jose-json-web-key-36#page-5 -type JWK struct { - ID string - Type string - Alg string - Use string - Exponent int - Modulus *big.Int - Secret []byte -} - -type jwkJSON struct { - ID string `json:"kid"` - Type string `json:"kty"` - Alg string `json:"alg"` - Use string `json:"use"` - Exponent string `json:"e"` - Modulus string `json:"n"` -} - -func (j *JWK) MarshalJSON() ([]byte, error) { - t := jwkJSON{ - ID: j.ID, - Type: j.Type, - Alg: j.Alg, - Use: j.Use, - Exponent: encodeExponent(j.Exponent), - Modulus: encodeModulus(j.Modulus), - } - - return json.Marshal(&t) -} - -func (j *JWK) UnmarshalJSON(data []byte) error { - var t jwkJSON - err := json.Unmarshal(data, &t) - if err != nil { - return err - } - - e, err := decodeExponent(t.Exponent) - if err != nil { - return err - } - - n, err := decodeModulus(t.Modulus) - if err != nil { - return err - } - - j.ID = t.ID - j.Type = t.Type - j.Alg = t.Alg - j.Use = t.Use - j.Exponent = e - j.Modulus = n - - return nil -} - -type JWKSet struct { - Keys []JWK `json:"keys"` -} - -func decodeExponent(e string) (int, error) { - decE, err := decodeBase64URLPaddingOptional(e) - if err != nil { - return 0, err - } - var eBytes []byte - if len(decE) < 8 { - eBytes = make([]byte, 8-len(decE), 8) - eBytes = append(eBytes, decE...) - } else { - eBytes = decE - } - eReader := bytes.NewReader(eBytes) - var E uint64 - err = binary.Read(eReader, binary.BigEndian, &E) - if err != nil { - return 0, err - } - return int(E), nil -} - -func encodeExponent(e int) string { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, uint64(e)) - var idx int - for ; idx < 8; idx++ { - if b[idx] != 0x0 { - break - } - } - return base64.RawURLEncoding.EncodeToString(b[idx:]) -} - -// Turns a URL encoded modulus of a key into a big int. -func decodeModulus(n string) (*big.Int, error) { - decN, err := decodeBase64URLPaddingOptional(n) - if err != nil { - return nil, err - } - N := big.NewInt(0) - N.SetBytes(decN) - return N, nil -} - -func encodeModulus(n *big.Int) string { - return base64.RawURLEncoding.EncodeToString(n.Bytes()) -} - -// decodeBase64URLPaddingOptional decodes Base64 whether there is padding or not. -// The stdlib version currently doesn't handle this. -// We can get rid of this is if this bug: -// https://github.com/golang/go/issues/4237 -// ever closes. -func decodeBase64URLPaddingOptional(e string) ([]byte, error) { - if m := len(e) % 4; m != 0 { - e += strings.Repeat("=", 4-m) - } - return base64.URLEncoding.DecodeString(e) -} diff --git a/vendor/github.com/coreos/go-oidc/jose/jws.go b/vendor/github.com/coreos/go-oidc/jose/jws.go deleted file mode 100644 index 1049ece8..00000000 --- a/vendor/github.com/coreos/go-oidc/jose/jws.go +++ /dev/null @@ -1,51 +0,0 @@ -package jose - -import ( - "fmt" - "strings" -) - -type JWS struct { - RawHeader string - Header JOSEHeader - RawPayload string - Payload []byte - Signature []byte -} - -// Given a raw encoded JWS token parses it and verifies the structure. -func ParseJWS(raw string) (JWS, error) { - parts := strings.Split(raw, ".") - if len(parts) != 3 { - return JWS{}, fmt.Errorf("malformed JWS, only %d segments", len(parts)) - } - - rawSig := parts[2] - jws := JWS{ - RawHeader: parts[0], - RawPayload: parts[1], - } - - header, err := decodeHeader(jws.RawHeader) - if err != nil { - return JWS{}, fmt.Errorf("malformed JWS, unable to decode header, %s", err) - } - if err = header.Validate(); err != nil { - return JWS{}, fmt.Errorf("malformed JWS, %s", err) - } - jws.Header = header - - payload, err := decodeSegment(jws.RawPayload) - if err != nil { - return JWS{}, fmt.Errorf("malformed JWS, unable to decode payload: %s", err) - } - jws.Payload = payload - - sig, err := decodeSegment(rawSig) - if err != nil { - return JWS{}, fmt.Errorf("malformed JWS, unable to decode signature: %s", err) - } - jws.Signature = sig - - return jws, nil -} diff --git a/vendor/github.com/coreos/go-oidc/jose/jwt.go b/vendor/github.com/coreos/go-oidc/jose/jwt.go deleted file mode 100644 index 3b3e9634..00000000 --- a/vendor/github.com/coreos/go-oidc/jose/jwt.go +++ /dev/null @@ -1,82 +0,0 @@ -package jose - -import "strings" - -type JWT JWS - -func ParseJWT(token string) (jwt JWT, err error) { - jws, err := ParseJWS(token) - if err != nil { - return - } - - return JWT(jws), nil -} - -func NewJWT(header JOSEHeader, claims Claims) (jwt JWT, err error) { - jwt = JWT{} - - jwt.Header = header - jwt.Header[HeaderMediaType] = "JWT" - - claimBytes, err := marshalClaims(claims) - if err != nil { - return - } - jwt.Payload = claimBytes - - eh, err := encodeHeader(header) - if err != nil { - return - } - jwt.RawHeader = eh - - ec, err := encodeClaims(claims) - if err != nil { - return - } - jwt.RawPayload = ec - - return -} - -func (j *JWT) KeyID() (string, bool) { - kID, ok := j.Header[HeaderKeyID] - return kID, ok -} - -func (j *JWT) Claims() (Claims, error) { - return decodeClaims(j.Payload) -} - -// Encoded data part of the token which may be signed. -func (j *JWT) Data() string { - return strings.Join([]string{j.RawHeader, j.RawPayload}, ".") -} - -// Full encoded JWT token string in format: header.claims.signature -func (j *JWT) Encode() string { - d := j.Data() - s := encodeSegment(j.Signature) - return strings.Join([]string{d, s}, ".") -} - -func NewSignedJWT(claims Claims, s Signer) (*JWT, error) { - header := JOSEHeader{ - HeaderKeyAlgorithm: s.Alg(), - HeaderKeyID: s.ID(), - } - - jwt, err := NewJWT(header, claims) - if err != nil { - return nil, err - } - - sig, err := s.Sign([]byte(jwt.Data())) - if err != nil { - return nil, err - } - jwt.Signature = sig - - return &jwt, nil -} diff --git a/vendor/github.com/coreos/go-oidc/jose/sig.go b/vendor/github.com/coreos/go-oidc/jose/sig.go deleted file mode 100755 index 7b2b253c..00000000 --- a/vendor/github.com/coreos/go-oidc/jose/sig.go +++ /dev/null @@ -1,24 +0,0 @@ -package jose - -import ( - "fmt" -) - -type Verifier interface { - ID() string - Alg() string - Verify(sig []byte, data []byte) error -} - -type Signer interface { - Verifier - Sign(data []byte) (sig []byte, err error) -} - -func NewVerifier(jwk JWK) (Verifier, error) { - if jwk.Type != "RSA" { - return nil, fmt.Errorf("unsupported key type %q", jwk.Type) - } - - return NewVerifierRSA(jwk) -} diff --git a/vendor/github.com/coreos/go-oidc/jose/sig_rsa.go b/vendor/github.com/coreos/go-oidc/jose/sig_rsa.go deleted file mode 100755 index 004e45dd..00000000 --- a/vendor/github.com/coreos/go-oidc/jose/sig_rsa.go +++ /dev/null @@ -1,67 +0,0 @@ -package jose - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "fmt" -) - -type VerifierRSA struct { - KeyID string - Hash crypto.Hash - PublicKey rsa.PublicKey -} - -type SignerRSA struct { - PrivateKey rsa.PrivateKey - VerifierRSA -} - -func NewVerifierRSA(jwk JWK) (*VerifierRSA, error) { - if jwk.Alg != "" && jwk.Alg != "RS256" { - return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg) - } - - v := VerifierRSA{ - KeyID: jwk.ID, - PublicKey: rsa.PublicKey{ - N: jwk.Modulus, - E: jwk.Exponent, - }, - Hash: crypto.SHA256, - } - - return &v, nil -} - -func NewSignerRSA(kid string, key rsa.PrivateKey) *SignerRSA { - return &SignerRSA{ - PrivateKey: key, - VerifierRSA: VerifierRSA{ - KeyID: kid, - PublicKey: key.PublicKey, - Hash: crypto.SHA256, - }, - } -} - -func (v *VerifierRSA) ID() string { - return v.KeyID -} - -func (v *VerifierRSA) Alg() string { - return "RS256" -} - -func (v *VerifierRSA) Verify(sig []byte, data []byte) error { - h := v.Hash.New() - h.Write(data) - return rsa.VerifyPKCS1v15(&v.PublicKey, v.Hash, h.Sum(nil), sig) -} - -func (s *SignerRSA) Sign(data []byte) ([]byte, error) { - h := s.Hash.New() - h.Write(data) - return rsa.SignPKCS1v15(rand.Reader, &s.PrivateKey, s.Hash, h.Sum(nil)) -} diff --git a/vendor/github.com/coreos/go-oidc/key/doc.go b/vendor/github.com/coreos/go-oidc/key/doc.go deleted file mode 100644 index 936eec74..00000000 --- a/vendor/github.com/coreos/go-oidc/key/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package key is DEPRECATED. Use github.com/coreos/go-oidc instead. -package key diff --git a/vendor/github.com/coreos/go-oidc/key/key.go b/vendor/github.com/coreos/go-oidc/key/key.go deleted file mode 100644 index 208c1fc1..00000000 --- a/vendor/github.com/coreos/go-oidc/key/key.go +++ /dev/null @@ -1,153 +0,0 @@ -package key - -import ( - "crypto/rand" - "crypto/rsa" - "encoding/hex" - "encoding/json" - "io" - "time" - - "github.com/coreos/go-oidc/jose" -) - -func NewPublicKey(jwk jose.JWK) *PublicKey { - return &PublicKey{jwk: jwk} -} - -type PublicKey struct { - jwk jose.JWK -} - -func (k *PublicKey) MarshalJSON() ([]byte, error) { - return json.Marshal(&k.jwk) -} - -func (k *PublicKey) UnmarshalJSON(data []byte) error { - var jwk jose.JWK - if err := json.Unmarshal(data, &jwk); err != nil { - return err - } - k.jwk = jwk - return nil -} - -func (k *PublicKey) ID() string { - return k.jwk.ID -} - -func (k *PublicKey) Verifier() (jose.Verifier, error) { - return jose.NewVerifierRSA(k.jwk) -} - -type PrivateKey struct { - KeyID string - PrivateKey *rsa.PrivateKey -} - -func (k *PrivateKey) ID() string { - return k.KeyID -} - -func (k *PrivateKey) Signer() jose.Signer { - return jose.NewSignerRSA(k.ID(), *k.PrivateKey) -} - -func (k *PrivateKey) JWK() jose.JWK { - return jose.JWK{ - ID: k.KeyID, - Type: "RSA", - Alg: "RS256", - Use: "sig", - Exponent: k.PrivateKey.PublicKey.E, - Modulus: k.PrivateKey.PublicKey.N, - } -} - -type KeySet interface { - ExpiresAt() time.Time -} - -type PublicKeySet struct { - keys []PublicKey - index map[string]*PublicKey - expiresAt time.Time -} - -func NewPublicKeySet(jwks []jose.JWK, exp time.Time) *PublicKeySet { - keys := make([]PublicKey, len(jwks)) - index := make(map[string]*PublicKey) - for i, jwk := range jwks { - keys[i] = *NewPublicKey(jwk) - index[keys[i].ID()] = &keys[i] - } - return &PublicKeySet{ - keys: keys, - index: index, - expiresAt: exp, - } -} - -func (s *PublicKeySet) ExpiresAt() time.Time { - return s.expiresAt -} - -func (s *PublicKeySet) Keys() []PublicKey { - return s.keys -} - -func (s *PublicKeySet) Key(id string) *PublicKey { - return s.index[id] -} - -type PrivateKeySet struct { - keys []*PrivateKey - ActiveKeyID string - expiresAt time.Time -} - -func NewPrivateKeySet(keys []*PrivateKey, exp time.Time) *PrivateKeySet { - return &PrivateKeySet{ - keys: keys, - ActiveKeyID: keys[0].ID(), - expiresAt: exp.UTC(), - } -} - -func (s *PrivateKeySet) Keys() []*PrivateKey { - return s.keys -} - -func (s *PrivateKeySet) ExpiresAt() time.Time { - return s.expiresAt -} - -func (s *PrivateKeySet) Active() *PrivateKey { - for i, k := range s.keys { - if k.ID() == s.ActiveKeyID { - return s.keys[i] - } - } - - return nil -} - -type GeneratePrivateKeyFunc func() (*PrivateKey, error) - -func GeneratePrivateKey() (*PrivateKey, error) { - pk, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return nil, err - } - keyID := make([]byte, 20) - if _, err := io.ReadFull(rand.Reader, keyID); err != nil { - return nil, err - } - - k := PrivateKey{ - KeyID: hex.EncodeToString(keyID), - PrivateKey: pk, - } - - return &k, nil -} diff --git a/vendor/github.com/coreos/go-oidc/key/manager.go b/vendor/github.com/coreos/go-oidc/key/manager.go deleted file mode 100644 index 476ab6a8..00000000 --- a/vendor/github.com/coreos/go-oidc/key/manager.go +++ /dev/null @@ -1,99 +0,0 @@ -package key - -import ( - "errors" - "time" - - "github.com/jonboulle/clockwork" - - "github.com/coreos/go-oidc/jose" - "github.com/coreos/pkg/health" -) - -type PrivateKeyManager interface { - ExpiresAt() time.Time - Signer() (jose.Signer, error) - JWKs() ([]jose.JWK, error) - PublicKeys() ([]PublicKey, error) - - WritableKeySetRepo - health.Checkable -} - -func NewPrivateKeyManager() PrivateKeyManager { - return &privateKeyManager{ - clock: clockwork.NewRealClock(), - } -} - -type privateKeyManager struct { - keySet *PrivateKeySet - clock clockwork.Clock -} - -func (m *privateKeyManager) ExpiresAt() time.Time { - if m.keySet == nil { - return m.clock.Now().UTC() - } - - return m.keySet.ExpiresAt() -} - -func (m *privateKeyManager) Signer() (jose.Signer, error) { - if err := m.Healthy(); err != nil { - return nil, err - } - - return m.keySet.Active().Signer(), nil -} - -func (m *privateKeyManager) JWKs() ([]jose.JWK, error) { - if err := m.Healthy(); err != nil { - return nil, err - } - - keys := m.keySet.Keys() - jwks := make([]jose.JWK, len(keys)) - for i, k := range keys { - jwks[i] = k.JWK() - } - return jwks, nil -} - -func (m *privateKeyManager) PublicKeys() ([]PublicKey, error) { - jwks, err := m.JWKs() - if err != nil { - return nil, err - } - keys := make([]PublicKey, len(jwks)) - for i, jwk := range jwks { - keys[i] = *NewPublicKey(jwk) - } - return keys, nil -} - -func (m *privateKeyManager) Healthy() error { - if m.keySet == nil { - return errors.New("private key manager uninitialized") - } - - if len(m.keySet.Keys()) == 0 { - return errors.New("private key manager zero keys") - } - - if m.keySet.ExpiresAt().Before(m.clock.Now().UTC()) { - return errors.New("private key manager keys expired") - } - - return nil -} - -func (m *privateKeyManager) Set(keySet KeySet) error { - privKeySet, ok := keySet.(*PrivateKeySet) - if !ok { - return errors.New("unable to cast to PrivateKeySet") - } - - m.keySet = privKeySet - return nil -} diff --git a/vendor/github.com/coreos/go-oidc/key/repo.go b/vendor/github.com/coreos/go-oidc/key/repo.go deleted file mode 100644 index 1acdeb36..00000000 --- a/vendor/github.com/coreos/go-oidc/key/repo.go +++ /dev/null @@ -1,55 +0,0 @@ -package key - -import ( - "errors" - "sync" -) - -var ErrorNoKeys = errors.New("no keys found") - -type WritableKeySetRepo interface { - Set(KeySet) error -} - -type ReadableKeySetRepo interface { - Get() (KeySet, error) -} - -type PrivateKeySetRepo interface { - WritableKeySetRepo - ReadableKeySetRepo -} - -func NewPrivateKeySetRepo() PrivateKeySetRepo { - return &memPrivateKeySetRepo{} -} - -type memPrivateKeySetRepo struct { - mu sync.RWMutex - pks PrivateKeySet -} - -func (r *memPrivateKeySetRepo) Set(ks KeySet) error { - pks, ok := ks.(*PrivateKeySet) - if !ok { - return errors.New("unable to cast to PrivateKeySet") - } else if pks == nil { - return errors.New("nil KeySet") - } - - r.mu.Lock() - defer r.mu.Unlock() - - r.pks = *pks - return nil -} - -func (r *memPrivateKeySetRepo) Get() (KeySet, error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.pks.keys == nil { - return nil, ErrorNoKeys - } - return KeySet(&r.pks), nil -} diff --git a/vendor/github.com/coreos/go-oidc/key/rotate.go b/vendor/github.com/coreos/go-oidc/key/rotate.go deleted file mode 100644 index bc6cdfb1..00000000 --- a/vendor/github.com/coreos/go-oidc/key/rotate.go +++ /dev/null @@ -1,159 +0,0 @@ -package key - -import ( - "errors" - "log" - "time" - - ptime "github.com/coreos/pkg/timeutil" - "github.com/jonboulle/clockwork" -) - -var ( - ErrorPrivateKeysExpired = errors.New("private keys have expired") -) - -func NewPrivateKeyRotator(repo PrivateKeySetRepo, ttl time.Duration) *PrivateKeyRotator { - return &PrivateKeyRotator{ - repo: repo, - ttl: ttl, - - keep: 2, - generateKey: GeneratePrivateKey, - clock: clockwork.NewRealClock(), - } -} - -type PrivateKeyRotator struct { - repo PrivateKeySetRepo - generateKey GeneratePrivateKeyFunc - clock clockwork.Clock - keep int - ttl time.Duration -} - -func (r *PrivateKeyRotator) expiresAt() time.Time { - return r.clock.Now().UTC().Add(r.ttl) -} - -func (r *PrivateKeyRotator) Healthy() error { - pks, err := r.privateKeySet() - if err != nil { - return err - } - - if r.clock.Now().After(pks.ExpiresAt()) { - return ErrorPrivateKeysExpired - } - - return nil -} - -func (r *PrivateKeyRotator) privateKeySet() (*PrivateKeySet, error) { - ks, err := r.repo.Get() - if err != nil { - return nil, err - } - - pks, ok := ks.(*PrivateKeySet) - if !ok { - return nil, errors.New("unable to cast to PrivateKeySet") - } - return pks, nil -} - -func (r *PrivateKeyRotator) nextRotation() (time.Duration, error) { - pks, err := r.privateKeySet() - if err == ErrorNoKeys { - return 0, nil - } - if err != nil { - return 0, err - } - - now := r.clock.Now() - - // Ideally, we want to rotate after half the TTL has elapsed. - idealRotationTime := pks.ExpiresAt().Add(-r.ttl / 2) - - // If we are past the ideal rotation time, rotate immediatly. - return max(0, idealRotationTime.Sub(now)), nil -} - -func max(a, b time.Duration) time.Duration { - if a > b { - return a - } - return b -} - -func (r *PrivateKeyRotator) Run() chan struct{} { - attempt := func() { - k, err := r.generateKey() - if err != nil { - log.Printf("go-oidc: failed generating signing key: %v", err) - return - } - - exp := r.expiresAt() - if err := rotatePrivateKeys(r.repo, k, r.keep, exp); err != nil { - log.Printf("go-oidc: key rotation failed: %v", err) - return - } - } - - stop := make(chan struct{}) - go func() { - for { - var nextRotation time.Duration - var sleep time.Duration - var err error - for { - if nextRotation, err = r.nextRotation(); err == nil { - break - } - sleep = ptime.ExpBackoff(sleep, time.Minute) - log.Printf("go-oidc: error getting nextRotation, retrying in %v: %v", sleep, err) - time.Sleep(sleep) - } - - select { - case <-r.clock.After(nextRotation): - attempt() - case <-stop: - return - } - } - }() - - return stop -} - -func rotatePrivateKeys(repo PrivateKeySetRepo, k *PrivateKey, keep int, exp time.Time) error { - ks, err := repo.Get() - if err != nil && err != ErrorNoKeys { - return err - } - - var keys []*PrivateKey - if ks != nil { - pks, ok := ks.(*PrivateKeySet) - if !ok { - return errors.New("unable to cast to PrivateKeySet") - } - keys = pks.Keys() - } - - keys = append([]*PrivateKey{k}, keys...) - if l := len(keys); l > keep { - keys = keys[0:keep] - } - - nks := PrivateKeySet{ - keys: keys, - ActiveKeyID: k.ID(), - expiresAt: exp, - } - - return repo.Set(KeySet(&nks)) -} diff --git a/vendor/github.com/coreos/go-oidc/key/sync.go b/vendor/github.com/coreos/go-oidc/key/sync.go deleted file mode 100644 index b887f7b5..00000000 --- a/vendor/github.com/coreos/go-oidc/key/sync.go +++ /dev/null @@ -1,91 +0,0 @@ -package key - -import ( - "errors" - "log" - "time" - - "github.com/jonboulle/clockwork" - - "github.com/coreos/pkg/timeutil" -) - -func NewKeySetSyncer(r ReadableKeySetRepo, w WritableKeySetRepo) *KeySetSyncer { - return &KeySetSyncer{ - readable: r, - writable: w, - clock: clockwork.NewRealClock(), - } -} - -type KeySetSyncer struct { - readable ReadableKeySetRepo - writable WritableKeySetRepo - clock clockwork.Clock -} - -func (s *KeySetSyncer) Run() chan struct{} { - stop := make(chan struct{}) - go func() { - var failing bool - var next time.Duration - for { - exp, err := syncKeySet(s.readable, s.writable, s.clock) - if err != nil || exp == 0 { - if !failing { - failing = true - next = time.Second - } else { - next = timeutil.ExpBackoff(next, time.Minute) - } - if exp == 0 { - log.Printf("Synced to already expired key set, retrying in %v: %v", next, err) - - } else { - log.Printf("Failed syncing key set, retrying in %v: %v", next, err) - } - } else { - failing = false - next = exp / 2 - } - - select { - case <-s.clock.After(next): - continue - case <-stop: - return - } - } - }() - - return stop -} - -func Sync(r ReadableKeySetRepo, w WritableKeySetRepo) (time.Duration, error) { - return syncKeySet(r, w, clockwork.NewRealClock()) -} - -// syncKeySet copies the keyset from r to the KeySet at w and returns the duration in which the KeySet will expire. -// If keyset has already expired, returns a zero duration. -func syncKeySet(r ReadableKeySetRepo, w WritableKeySetRepo, clock clockwork.Clock) (exp time.Duration, err error) { - var ks KeySet - ks, err = r.Get() - if err != nil { - return - } - - if ks == nil { - err = errors.New("no source KeySet") - return - } - - if err = w.Set(ks); err != nil { - return - } - - now := clock.Now() - if ks.ExpiresAt().After(now) { - exp = ks.ExpiresAt().Sub(now) - } - return -} diff --git a/vendor/github.com/coreos/go-oidc/oauth2/doc.go b/vendor/github.com/coreos/go-oidc/oauth2/doc.go deleted file mode 100644 index 52eb3085..00000000 --- a/vendor/github.com/coreos/go-oidc/oauth2/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package oauth2 is DEPRECATED. Use golang.org/x/oauth instead. -package oauth2 diff --git a/vendor/github.com/coreos/go-oidc/oauth2/error.go b/vendor/github.com/coreos/go-oidc/oauth2/error.go deleted file mode 100644 index 50d89094..00000000 --- a/vendor/github.com/coreos/go-oidc/oauth2/error.go +++ /dev/null @@ -1,29 +0,0 @@ -package oauth2 - -const ( - ErrorAccessDenied = "access_denied" - ErrorInvalidClient = "invalid_client" - ErrorInvalidGrant = "invalid_grant" - ErrorInvalidRequest = "invalid_request" - ErrorServerError = "server_error" - ErrorUnauthorizedClient = "unauthorized_client" - ErrorUnsupportedGrantType = "unsupported_grant_type" - ErrorUnsupportedResponseType = "unsupported_response_type" -) - -type Error struct { - Type string `json:"error"` - Description string `json:"error_description,omitempty"` - State string `json:"state,omitempty"` -} - -func (e *Error) Error() string { - if e.Description != "" { - return e.Type + ": " + e.Description - } - return e.Type -} - -func NewError(typ string) *Error { - return &Error{Type: typ} -} diff --git a/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go b/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go deleted file mode 100644 index 72d1d671..00000000 --- a/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go +++ /dev/null @@ -1,416 +0,0 @@ -package oauth2 - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "mime" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - - phttp "github.com/coreos/go-oidc/http" -) - -// ResponseTypesEqual compares two response_type values. If either -// contains a space, it is treated as an unordered list. For example, -// comparing "code id_token" and "id_token code" would evaluate to true. -func ResponseTypesEqual(r1, r2 string) bool { - if !strings.Contains(r1, " ") || !strings.Contains(r2, " ") { - // fast route, no split needed - return r1 == r2 - } - - // split, sort, and compare - r1Fields := strings.Fields(r1) - r2Fields := strings.Fields(r2) - if len(r1Fields) != len(r2Fields) { - return false - } - sort.Strings(r1Fields) - sort.Strings(r2Fields) - for i, r1Field := range r1Fields { - if r1Field != r2Fields[i] { - return false - } - } - return true -} - -const ( - // OAuth2.0 response types registered by OIDC. - // - // See: https://openid.net/specs/oauth-v2-multiple-response-types-1_0.html#RegistryContents - ResponseTypeCode = "code" - ResponseTypeCodeIDToken = "code id_token" - ResponseTypeCodeIDTokenToken = "code id_token token" - ResponseTypeIDToken = "id_token" - ResponseTypeIDTokenToken = "id_token token" - ResponseTypeToken = "token" - ResponseTypeNone = "none" -) - -const ( - GrantTypeAuthCode = "authorization_code" - GrantTypeClientCreds = "client_credentials" - GrantTypeUserCreds = "password" - GrantTypeImplicit = "implicit" - GrantTypeRefreshToken = "refresh_token" - - AuthMethodClientSecretPost = "client_secret_post" - AuthMethodClientSecretBasic = "client_secret_basic" - AuthMethodClientSecretJWT = "client_secret_jwt" - AuthMethodPrivateKeyJWT = "private_key_jwt" -) - -type Config struct { - Credentials ClientCredentials - Scope []string - RedirectURL string - AuthURL string - TokenURL string - - // Must be one of the AuthMethodXXX methods above. Right now, only - // AuthMethodClientSecretPost and AuthMethodClientSecretBasic are supported. - AuthMethod string -} - -type Client struct { - hc phttp.Client - creds ClientCredentials - scope []string - authURL *url.URL - redirectURL *url.URL - tokenURL *url.URL - authMethod string -} - -type ClientCredentials struct { - ID string - Secret string -} - -func NewClient(hc phttp.Client, cfg Config) (c *Client, err error) { - if len(cfg.Credentials.ID) == 0 { - err = errors.New("missing client id") - return - } - - if len(cfg.Credentials.Secret) == 0 { - err = errors.New("missing client secret") - return - } - - if cfg.AuthMethod == "" { - cfg.AuthMethod = AuthMethodClientSecretBasic - } else if cfg.AuthMethod != AuthMethodClientSecretPost && cfg.AuthMethod != AuthMethodClientSecretBasic { - err = fmt.Errorf("auth method %q is not supported", cfg.AuthMethod) - return - } - - au, err := phttp.ParseNonEmptyURL(cfg.AuthURL) - if err != nil { - return - } - - tu, err := phttp.ParseNonEmptyURL(cfg.TokenURL) - if err != nil { - return - } - - // Allow empty redirect URL in the case where the client - // only needs to verify a given token. - ru, err := url.Parse(cfg.RedirectURL) - if err != nil { - return - } - - c = &Client{ - creds: cfg.Credentials, - scope: cfg.Scope, - redirectURL: ru, - authURL: au, - tokenURL: tu, - hc: hc, - authMethod: cfg.AuthMethod, - } - - return -} - -// Return the embedded HTTP client -func (c *Client) HttpClient() phttp.Client { - return c.hc -} - -// Generate the url for initial redirect to oauth provider. -func (c *Client) AuthCodeURL(state, accessType, prompt string) string { - v := c.commonURLValues() - v.Set("state", state) - if strings.ToLower(accessType) == "offline" { - v.Set("access_type", "offline") - } - - if prompt != "" { - v.Set("prompt", prompt) - } - v.Set("response_type", "code") - - q := v.Encode() - u := *c.authURL - if u.RawQuery == "" { - u.RawQuery = q - } else { - u.RawQuery += "&" + q - } - return u.String() -} - -func (c *Client) commonURLValues() url.Values { - return url.Values{ - "redirect_uri": {c.redirectURL.String()}, - "scope": {strings.Join(c.scope, " ")}, - "client_id": {c.creds.ID}, - } -} - -func (c *Client) newAuthenticatedRequest(urlToken string, values url.Values) (*http.Request, error) { - var req *http.Request - var err error - switch c.authMethod { - case AuthMethodClientSecretPost: - values.Set("client_secret", c.creds.Secret) - req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode())) - if err != nil { - return nil, err - } - case AuthMethodClientSecretBasic: - req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode())) - if err != nil { - return nil, err - } - encodedID := url.QueryEscape(c.creds.ID) - encodedSecret := url.QueryEscape(c.creds.Secret) - req.SetBasicAuth(encodedID, encodedSecret) - default: - panic("misconfigured client: auth method not supported") - } - - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - return req, nil - -} - -// ClientCredsToken posts the client id and secret to obtain a token scoped to the OAuth2 client via the "client_credentials" grant type. -// May not be supported by all OAuth2 servers. -func (c *Client) ClientCredsToken(scope []string) (result TokenResponse, err error) { - v := url.Values{ - "scope": {strings.Join(scope, " ")}, - "grant_type": {GrantTypeClientCreds}, - } - - req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v) - if err != nil { - return - } - - resp, err := c.hc.Do(req) - if err != nil { - return - } - defer resp.Body.Close() - - return parseTokenResponse(resp) -} - -// UserCredsToken posts the username and password to obtain a token scoped to the OAuth2 client via the "password" grant_type -// May not be supported by all OAuth2 servers. -func (c *Client) UserCredsToken(username, password string) (result TokenResponse, err error) { - v := url.Values{ - "scope": {strings.Join(c.scope, " ")}, - "grant_type": {GrantTypeUserCreds}, - "username": {username}, - "password": {password}, - } - - req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v) - if err != nil { - return - } - - resp, err := c.hc.Do(req) - if err != nil { - return - } - defer resp.Body.Close() - - return parseTokenResponse(resp) -} - -// RequestToken requests a token from the Token Endpoint with the specified grantType. -// If 'grantType' == GrantTypeAuthCode, then 'value' should be the authorization code. -// If 'grantType' == GrantTypeRefreshToken, then 'value' should be the refresh token. -func (c *Client) RequestToken(grantType, value string) (result TokenResponse, err error) { - v := c.commonURLValues() - - v.Set("grant_type", grantType) - v.Set("client_secret", c.creds.Secret) - switch grantType { - case GrantTypeAuthCode: - v.Set("code", value) - case GrantTypeRefreshToken: - v.Set("refresh_token", value) - default: - err = fmt.Errorf("unsupported grant_type: %v", grantType) - return - } - - req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v) - if err != nil { - return - } - - resp, err := c.hc.Do(req) - if err != nil { - return - } - defer resp.Body.Close() - - return parseTokenResponse(resp) -} - -func parseTokenResponse(resp *http.Response) (result TokenResponse, err error) { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return - } - badStatusCode := resp.StatusCode < 200 || resp.StatusCode > 299 - - contentType, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) - if err != nil { - return - } - - result = TokenResponse{ - RawBody: body, - } - - newError := func(typ, desc, state string) error { - if typ == "" { - return fmt.Errorf("unrecognized error %s", body) - } - return &Error{typ, desc, state} - } - - if contentType == "application/x-www-form-urlencoded" || contentType == "text/plain" { - var vals url.Values - vals, err = url.ParseQuery(string(body)) - if err != nil { - return - } - if error := vals.Get("error"); error != "" || badStatusCode { - err = newError(error, vals.Get("error_description"), vals.Get("state")) - return - } - e := vals.Get("expires_in") - if e == "" { - e = vals.Get("expires") - } - if e != "" { - result.Expires, err = strconv.Atoi(e) - if err != nil { - return - } - } - result.AccessToken = vals.Get("access_token") - result.TokenType = vals.Get("token_type") - result.IDToken = vals.Get("id_token") - result.RefreshToken = vals.Get("refresh_token") - result.Scope = vals.Get("scope") - } else { - var r struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - IDToken string `json:"id_token"` - RefreshToken string `json:"refresh_token"` - Scope string `json:"scope"` - State string `json:"state"` - ExpiresIn json.Number `json:"expires_in"` // Azure AD returns string - Expires int `json:"expires"` - Error string `json:"error"` - Desc string `json:"error_description"` - } - if err = json.Unmarshal(body, &r); err != nil { - return - } - if r.Error != "" || badStatusCode { - err = newError(r.Error, r.Desc, r.State) - return - } - result.AccessToken = r.AccessToken - result.TokenType = r.TokenType - result.IDToken = r.IDToken - result.RefreshToken = r.RefreshToken - result.Scope = r.Scope - if expiresIn, err := r.ExpiresIn.Int64(); err != nil { - result.Expires = r.Expires - } else { - result.Expires = int(expiresIn) - } - } - return -} - -type TokenResponse struct { - AccessToken string - TokenType string - Expires int - IDToken string - RefreshToken string // OPTIONAL. - Scope string // OPTIONAL, if identical to the scope requested by the client, otherwise, REQUIRED. - RawBody []byte // In case callers need some other non-standard info from the token response -} - -type AuthCodeRequest struct { - ResponseType string - ClientID string - RedirectURL *url.URL - Scope []string - State string -} - -func ParseAuthCodeRequest(q url.Values) (AuthCodeRequest, error) { - acr := AuthCodeRequest{ - ResponseType: q.Get("response_type"), - ClientID: q.Get("client_id"), - State: q.Get("state"), - Scope: make([]string, 0), - } - - qs := strings.TrimSpace(q.Get("scope")) - if qs != "" { - acr.Scope = strings.Split(qs, " ") - } - - err := func() error { - if acr.ClientID == "" { - return NewError(ErrorInvalidRequest) - } - - redirectURL := q.Get("redirect_uri") - if redirectURL != "" { - ru, err := url.Parse(redirectURL) - if err != nil { - return NewError(ErrorInvalidRequest) - } - acr.RedirectURL = ru - } - - return nil - }() - - return acr, err -} diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml new file mode 100644 index 00000000..6d4d1be7 --- /dev/null +++ b/vendor/go.uber.org/atomic/.codecov.yml @@ -0,0 +1,15 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore new file mode 100644 index 00000000..0a4504f1 --- /dev/null +++ b/vendor/go.uber.org/atomic/.gitignore @@ -0,0 +1,11 @@ +.DS_Store +/vendor +/cover +cover.out +lint.log + +# Binaries +*.test + +# Profiling output +*.prof diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml new file mode 100644 index 00000000..b6a6930b --- /dev/null +++ b/vendor/go.uber.org/atomic/.travis.yml @@ -0,0 +1,23 @@ +sudo: false +language: go +go_import_path: go.uber.org/atomic + +go: + - 1.7 + - 1.8 + - tip + +cache: + directories: + - vendor + +install: + - make install_ci + +script: + - make test_ci + - scripts/test-ubergo.sh + - make lint + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/go.uber.org/atomic/LICENSE.txt similarity index 94% rename from vendor/github.com/Sirupsen/logrus/LICENSE rename to vendor/go.uber.org/atomic/LICENSE.txt index f090cb42..8765c9fb 100644 --- a/vendor/github.com/Sirupsen/logrus/LICENSE +++ b/vendor/go.uber.org/atomic/LICENSE.txt @@ -1,6 +1,4 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen +Copyright (c) 2016 Uber Technologies, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile new file mode 100644 index 00000000..dfc63d9d --- /dev/null +++ b/vendor/go.uber.org/atomic/Makefile @@ -0,0 +1,64 @@ +PACKAGES := $(shell glide nv) +# Many Go tools take file globs or directories as arguments instead of packages. +PACKAGE_FILES ?= *.go + + +# The linting tools evolve with each Go version, so run them only on the latest +# stable release. +GO_VERSION := $(shell go version | cut -d " " -f 3) +GO_MINOR_VERSION := $(word 2,$(subst ., ,$(GO_VERSION))) +LINTABLE_MINOR_VERSIONS := 7 8 +ifneq ($(filter $(LINTABLE_MINOR_VERSIONS),$(GO_MINOR_VERSION)),) +SHOULD_LINT := true +endif + + +export GO15VENDOREXPERIMENT=1 + + +.PHONY: build +build: + go build -i $(PACKAGES) + + +.PHONY: install +install: + glide --version || go get github.com/Masterminds/glide + glide install + + +.PHONY: test +test: + go test -cover -race $(PACKAGES) + + +.PHONY: install_ci +install_ci: install + go get github.com/wadey/gocovmerge + go get github.com/mattn/goveralls + go get golang.org/x/tools/cmd/cover +ifdef SHOULD_LINT + go get github.com/golang/lint/golint +endif + +.PHONY: lint +lint: +ifdef SHOULD_LINT + @rm -rf lint.log + @echo "Checking formatting..." + @gofmt -d -s $(PACKAGE_FILES) 2>&1 | tee lint.log + @echo "Checking vet..." + @$(foreach dir,$(PACKAGE_FILES),go tool vet $(dir) 2>&1 | tee -a lint.log;) + @echo "Checking lint..." + @$(foreach dir,$(PKGS),golint $(dir) 2>&1 | tee -a lint.log;) + @echo "Checking for unresolved FIXMEs..." + @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log + @[ ! -s lint.log ] +else + @echo "Skipping linters on" $(GO_VERSION) +endif + + +.PHONY: test_ci +test_ci: install_ci build + ./scripts/cover.sh $(shell go list $(PACKAGES)) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md new file mode 100644 index 00000000..6505abf6 --- /dev/null +++ b/vendor/go.uber.org/atomic/README.md @@ -0,0 +1,36 @@ +# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] + +Simple wrappers for primitive types to enforce atomic access. + +## Installation +`go get -u go.uber.org/atomic` + +## Usage +The standard library's `sync/atomic` is powerful, but it's easy to forget which +variables must be accessed atomically. `go.uber.org/atomic` preserves all the +functionality of the standard library, but wraps the primitive types to +provide a safer, more convenient API. + +```go +var atom atomic.Uint32 +atom.Store(42) +atom.Sub(2) +atom.CAS(40, 11) +``` + +See the [documentation][doc] for a complete API specification. + +## Development Status +Stable. + +
+Released under the [MIT License](LICENSE.txt). + +[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg +[doc]: https://godoc.org/go.uber.org/atomic +[ci-img]: https://travis-ci.org/uber-go/atomic.svg?branch=master +[ci]: https://travis-ci.org/uber-go/atomic +[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/atomic +[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic +[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/atomic.go b/vendor/go.uber.org/atomic/atomic.go new file mode 100644 index 00000000..1ca50dc3 --- /dev/null +++ b/vendor/go.uber.org/atomic/atomic.go @@ -0,0 +1,309 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package atomic provides simple wrappers around numerics to enforce atomic +// access. +package atomic + +import ( + "math" + "sync/atomic" +) + +// Int32 is an atomic wrapper around an int32. +type Int32 struct{ v int32 } + +// NewInt32 creates an Int32. +func NewInt32(i int32) *Int32 { + return &Int32{i} +} + +// Load atomically loads the wrapped value. +func (i *Int32) Load() int32 { + return atomic.LoadInt32(&i.v) +} + +// Add atomically adds to the wrapped int32 and returns the new value. +func (i *Int32) Add(n int32) int32 { + return atomic.AddInt32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int32 and returns the new value. +func (i *Int32) Sub(n int32) int32 { + return atomic.AddInt32(&i.v, -n) +} + +// Inc atomically increments the wrapped int32 and returns the new value. +func (i *Int32) Inc() int32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Int32) Dec() int32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int32) CAS(old, new int32) bool { + return atomic.CompareAndSwapInt32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int32) Store(n int32) { + atomic.StoreInt32(&i.v, n) +} + +// Swap atomically swaps the wrapped int32 and returns the old value. +func (i *Int32) Swap(n int32) int32 { + return atomic.SwapInt32(&i.v, n) +} + +// Int64 is an atomic wrapper around an int64. +type Int64 struct{ v int64 } + +// NewInt64 creates an Int64. +func NewInt64(i int64) *Int64 { + return &Int64{i} +} + +// Load atomically loads the wrapped value. +func (i *Int64) Load() int64 { + return atomic.LoadInt64(&i.v) +} + +// Add atomically adds to the wrapped int64 and returns the new value. +func (i *Int64) Add(n int64) int64 { + return atomic.AddInt64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int64 and returns the new value. +func (i *Int64) Sub(n int64) int64 { + return atomic.AddInt64(&i.v, -n) +} + +// Inc atomically increments the wrapped int64 and returns the new value. +func (i *Int64) Inc() int64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int64 and returns the new value. +func (i *Int64) Dec() int64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int64) CAS(old, new int64) bool { + return atomic.CompareAndSwapInt64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int64) Store(n int64) { + atomic.StoreInt64(&i.v, n) +} + +// Swap atomically swaps the wrapped int64 and returns the old value. +func (i *Int64) Swap(n int64) int64 { + return atomic.SwapInt64(&i.v, n) +} + +// Uint32 is an atomic wrapper around an uint32. +type Uint32 struct{ v uint32 } + +// NewUint32 creates a Uint32. +func NewUint32(i uint32) *Uint32 { + return &Uint32{i} +} + +// Load atomically loads the wrapped value. +func (i *Uint32) Load() uint32 { + return atomic.LoadUint32(&i.v) +} + +// Add atomically adds to the wrapped uint32 and returns the new value. +func (i *Uint32) Add(n uint32) uint32 { + return atomic.AddUint32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint32 and returns the new value. +func (i *Uint32) Sub(n uint32) uint32 { + return atomic.AddUint32(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint32 and returns the new value. +func (i *Uint32) Inc() uint32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Uint32) Dec() uint32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint32) CAS(old, new uint32) bool { + return atomic.CompareAndSwapUint32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint32) Store(n uint32) { + atomic.StoreUint32(&i.v, n) +} + +// Swap atomically swaps the wrapped uint32 and returns the old value. +func (i *Uint32) Swap(n uint32) uint32 { + return atomic.SwapUint32(&i.v, n) +} + +// Uint64 is an atomic wrapper around a uint64. +type Uint64 struct{ v uint64 } + +// NewUint64 creates a Uint64. +func NewUint64(i uint64) *Uint64 { + return &Uint64{i} +} + +// Load atomically loads the wrapped value. +func (i *Uint64) Load() uint64 { + return atomic.LoadUint64(&i.v) +} + +// Add atomically adds to the wrapped uint64 and returns the new value. +func (i *Uint64) Add(n uint64) uint64 { + return atomic.AddUint64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint64 and returns the new value. +func (i *Uint64) Sub(n uint64) uint64 { + return atomic.AddUint64(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint64 and returns the new value. +func (i *Uint64) Inc() uint64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint64 and returns the new value. +func (i *Uint64) Dec() uint64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint64) CAS(old, new uint64) bool { + return atomic.CompareAndSwapUint64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint64) Store(n uint64) { + atomic.StoreUint64(&i.v, n) +} + +// Swap atomically swaps the wrapped uint64 and returns the old value. +func (i *Uint64) Swap(n uint64) uint64 { + return atomic.SwapUint64(&i.v, n) +} + +// Bool is an atomic Boolean. +type Bool struct{ v uint32 } + +// NewBool creates a Bool. +func NewBool(initial bool) *Bool { + return &Bool{boolToInt(initial)} +} + +// Load atomically loads the Boolean. +func (b *Bool) Load() bool { + return truthy(atomic.LoadUint32(&b.v)) +} + +// CAS is an atomic compare-and-swap. +func (b *Bool) CAS(old, new bool) bool { + return atomic.CompareAndSwapUint32(&b.v, boolToInt(old), boolToInt(new)) +} + +// Store atomically stores the passed value. +func (b *Bool) Store(new bool) { + atomic.StoreUint32(&b.v, boolToInt(new)) +} + +// Swap sets the given value and returns the previous value. +func (b *Bool) Swap(new bool) bool { + return truthy(atomic.SwapUint32(&b.v, boolToInt(new))) +} + +// Toggle atomically negates the Boolean and returns the previous value. +func (b *Bool) Toggle() bool { + return truthy(atomic.AddUint32(&b.v, 1) - 1) +} + +func truthy(n uint32) bool { + return n&1 == 1 +} + +func boolToInt(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// Float64 is an atomic wrapper around float64. +type Float64 struct { + v uint64 +} + +// NewFloat64 creates a Float64. +func NewFloat64(f float64) *Float64 { + return &Float64{math.Float64bits(f)} +} + +// Load atomically loads the wrapped value. +func (f *Float64) Load() float64 { + return math.Float64frombits(atomic.LoadUint64(&f.v)) +} + +// Store atomically stores the passed value. +func (f *Float64) Store(s float64) { + atomic.StoreUint64(&f.v, math.Float64bits(s)) +} + +// Add atomically adds to the wrapped float64 and returns the new value. +func (f *Float64) Add(s float64) float64 { + for { + old := f.Load() + new := old + s + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float64 and returns the new value. +func (f *Float64) Sub(s float64) float64 { + return f.Add(-s) +} + +// CAS is an atomic compare-and-swap. +func (f *Float64) CAS(old, new float64) bool { + return atomic.CompareAndSwapUint64(&f.v, math.Float64bits(old), math.Float64bits(new)) +} + +// Value shadows the type of the same name from sync/atomic +// https://godoc.org/sync/atomic#Value +type Value struct{ atomic.Value } diff --git a/vendor/go.uber.org/atomic/glide.lock b/vendor/go.uber.org/atomic/glide.lock new file mode 100644 index 00000000..3c72c599 --- /dev/null +++ b/vendor/go.uber.org/atomic/glide.lock @@ -0,0 +1,17 @@ +hash: f14d51408e3e0e4f73b34e4039484c78059cd7fc5f4996fdd73db20dc8d24f53 +updated: 2016-10-27T00:10:51.16960137-07:00 +imports: [] +testImports: +- name: github.com/davecgh/go-spew + version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d + subpackages: + - spew +- name: github.com/pmezard/go-difflib + version: d8ed2627bdf02c080bf22230dbb337003b7aba2d + subpackages: + - difflib +- name: github.com/stretchr/testify + version: d77da356e56a7428ad25149ca77381849a6a5232 + subpackages: + - assert + - require diff --git a/vendor/go.uber.org/atomic/glide.yaml b/vendor/go.uber.org/atomic/glide.yaml new file mode 100644 index 00000000..4cf608ec --- /dev/null +++ b/vendor/go.uber.org/atomic/glide.yaml @@ -0,0 +1,6 @@ +package: go.uber.org/atomic +testImport: +- package: github.com/stretchr/testify + subpackages: + - assert + - require diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go new file mode 100644 index 00000000..ede8136f --- /dev/null +++ b/vendor/go.uber.org/atomic/string.go @@ -0,0 +1,49 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// String is an atomic type-safe wrapper around Value for strings. +type String struct{ v Value } + +// NewString creates a String. +func NewString(str string) *String { + s := &String{} + if str != "" { + s.Store(str) + } + return s +} + +// Load atomically loads the wrapped string. +func (s *String) Load() string { + v := s.v.Load() + if v == nil { + return "" + } + return v.(string) +} + +// Store atomically stores the passed string. +// Note: Converting the string to an interface{} to store in the Value +// requires an allocation. +func (s *String) Store(str string) { + s.v.Store(str) +} diff --git a/vendor/go.uber.org/zap/.codecov.yml b/vendor/go.uber.org/zap/.codecov.yml new file mode 100644 index 00000000..06ab1be4 --- /dev/null +++ b/vendor/go.uber.org/zap/.codecov.yml @@ -0,0 +1,17 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure +ignore: + - internal/readme/readme.go + diff --git a/vendor/go.uber.org/zap/.gitignore b/vendor/go.uber.org/zap/.gitignore new file mode 100644 index 00000000..08fbde6c --- /dev/null +++ b/vendor/go.uber.org/zap/.gitignore @@ -0,0 +1,28 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +vendor + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.pprof +*.out +*.log diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl new file mode 100644 index 00000000..9cf43c81 --- /dev/null +++ b/vendor/go.uber.org/zap/.readme.tmpl @@ -0,0 +1,94 @@ +# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Blazing fast, structured, leveled logging in Go. + +## Installation + +`go get -u go.uber.org/zap` + +Note that zap only supports the two most recent minor versions of Go. + +## Quick Start + +In contexts where performance is nice, but not critical, use the +`SugaredLogger`. It's 4-10x faster than than other structured logging libraries +and includes both structured and `printf`-style APIs. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() // flushes buffer, if any +sugar := logger.Sugar() +sugar.Infow("Failed to fetch URL.", + // Structured context as loosely-typed key-value pairs. + "url", url, + "attempt", 3, + "backoff", time.Second, +) +sugar.Infof("Failed to fetch URL: %s", url) +``` + +When performance and type safety are critical, use the `Logger`. It's even faster than +the `SugaredLogger` and allocates far less, but it only supports structured logging. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() +logger.Info("Failed to fetch URL.", + // Structured context as strongly-typed Field values. + zap.String("url", url), + zap.Int("attempt", 3), + zap.Duration("backoff", time.Second), +) +``` + +## Performance + +For applications that log in the hot path, reflection-based serialization and +string formatting are prohibitively expensive — they're CPU-intensive and +make many small allocations. Put differently, using `encoding/json` and +`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. + +Zap takes a different approach. It includes a reflection-free, zero-allocation +JSON encoder, and the base `Logger` strives to avoid serialization overhead and +allocations wherever possible. By building the high-level `SugaredLogger` on +that foundation, zap lets users *choose* when they need to count every +allocation and when they'd prefer a more familiar, loosely-typed API. + +As measured by its own [benchmarking suite][], not only is zap more performant +than comparable structured logging libraries — it's also faster than the +standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) + +Log a message and 10 fields: + +{{.BenchmarkAddingFields}} + +Log a message with a logger that already has 10 fields of context: + +{{.BenchmarkAccumulatedContext}} + +Log a static string, without any context or `printf`-style templating: + +{{.BenchmarkWithoutFields}} + +## Development Status: Stable +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin zap +to `^1`. + +
+ +Released under the [MIT License](LICENSE.txt). + +1 In particular, keep in mind that we may be +benchmarking against slightly older versions of other libraries. Versions are +pinned in zap's [glide.lock][] file. [↩](#anchor-versions) + +[doc-img]: https://godoc.org/go.uber.org/zap?status.svg +[doc]: https://godoc.org/go.uber.org/zap +[ci-img]: https://travis-ci.org/uber-go/zap.svg?branch=master +[ci]: https://travis-ci.org/uber-go/zap +[cov-img]: https://coveralls.io/repos/github/uber-go/zap/badge.svg?branch=master +[cov]: https://coveralls.io/github/uber-go/zap?branch=master +[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks +[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock diff --git a/vendor/go.uber.org/zap/.travis.yml b/vendor/go.uber.org/zap/.travis.yml new file mode 100644 index 00000000..a6258769 --- /dev/null +++ b/vendor/go.uber.org/zap/.travis.yml @@ -0,0 +1,21 @@ +language: go +sudo: false +go: + - 1.7 + - 1.8 +go_import_path: go.uber.org/zap +env: + global: + - GO15VENDOREXPERIMENT=1 + - TEST_TIMEOUT_SCALE=10 +cache: + directories: + - vendor +install: + - make dependencies +script: + - make lint + - make test + - make bench +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md new file mode 100644 index 00000000..9878e26a --- /dev/null +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -0,0 +1,228 @@ +# Changelog + +## v1.4.1 (08 June 2017) + +This release fixes two bugs. + +Bugfixes: + +* [#435][]: Support a variety of case conventions when unmarshaling levels. +* [#444][]: Fix a panic in the observer. + +## v1.4.0 (12 May 2017) + +This release adds a few small features and is fully backward-compatible. + +Enhancements: + +* [#424][]: Add a `LineEnding` field to `EncoderConfig`, allowing users to + override the Unix-style default. +* [#425][]: Preserve time zones when logging times. +* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a + variety of operations a bit simpler. + +## v1.3.0 (25 Apr 2017) + +This release adds an enhancement to zap's testing helpers as well as the +ability to marshal an AtomicLevel. It is fully backward-compatible. + +Enhancements: + +* [#415][]: Add a substring-filtering helper to zap's observer. This is + particularly useful when testing the `SugaredLogger`. +* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`. + +## v1.2.0 (13 Apr 2017) + +This release adds a gRPC compatibility wrapper. It is fully backward-compatible. + +Enhancements: + +* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements + `grpclog.Logger`. + +## v1.1.0 (31 Mar 2017) + +This release fixes two bugs and adds some enhancements to zap's testing helpers. +It is fully backward-compatible. + +Bugfixes: + +* [#385][]: Fix caller path trimming on Windows. +* [#396][]: Fix a panic when attempting to use non-existent directories with + zap's configuration struct. + +Enhancements: + +* [#386][]: Add filtering helpers to zaptest's observing logger. + +Thanks to @moitias for contributing to this release. + +## v1.0.0 (14 Mar 2017) + +This is zap's first stable release. All exported APIs are now final, and no +further breaking changes will be made in the 1.x release series. Anyone using a +semver-aware dependency manager should now pin to `^1`. + +Breaking changes: + +* [#366][]: Add byte-oriented APIs to encoders to log UTF-8 encoded text without + casting from `[]byte` to `string`. +* [#364][]: To support buffering outputs, add `Sync` methods to `zapcore.Core`, + `zap.Logger`, and `zap.SugaredLogger`. +* [#371][]: Rename the `testutils` package to `zaptest`, which is less likely to + clash with other testing helpers. + +Bugfixes: + +* [#362][]: Make the ISO8601 time formatters fixed-width, which is friendlier + for tab-separated console output. +* [#369][]: Remove the automatic locks in `zapcore.NewCore`, which allows zap to + work with concurrency-safe `WriteSyncer` implementations. +* [#347][]: Stop reporting errors when trying to `fsync` standard out on Linux + systems. +* [#373][]: Report the correct caller from zap's standard library + interoperability wrappers. + +Enhancements: + +* [#348][]: Add a registry allowing third-party encodings to work with zap's + built-in `Config`. +* [#327][]: Make the representation of logger callers configurable (like times, + levels, and durations). +* [#376][]: Allow third-party encoders to use their own buffer pools, which + removes the last performance advantage that zap's encoders have over plugins. +* [#346][]: Add `CombineWriteSyncers`, a convenience function to tee multiple + `WriteSyncer`s and lock the result. +* [#365][]: Make zap's stacktraces compatible with mid-stack inlining (coming in + Go 1.9). +* [#372][]: Export zap's observing logger as `zaptest/observer`. This makes it + easier for particularly punctilious users to unit test their application's + logging. + +Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their +contributions to this release. + +## v1.0.0-rc.3 (7 Mar 2017) + +This is the third release candidate for zap's stable release. There are no +breaking changes. + +Bugfixes: + +* [#339][]: Byte slices passed to `zap.Any` are now correctly treated as binary blobs + rather than `[]uint8`. + +Enhancements: + +* [#307][]: Users can opt into colored output for log levels. +* [#353][]: In addition to hijacking the output of the standard library's + package-global logging functions, users can now construct a zap-backed + `log.Logger` instance. +* [#311][]: Frames from common runtime functions and some of zap's internal + machinery are now omitted from stacktraces. + +Thanks to @ansel1 and @suyash for their contributions to this release. + +## v1.0.0-rc.2 (21 Feb 2017) + +This is the second release candidate for zap's stable release. It includes two +breaking changes. + +Breaking changes: + +* [#316][]: Zap's global loggers are now fully concurrency-safe + (previously, users had to ensure that `ReplaceGlobals` was called before the + loggers were in use). However, they must now be accessed via the `L()` and + `S()` functions. Users can update their projects with + + ``` + gofmt -r "zap.L -> zap.L()" -w . + gofmt -r "zap.S -> zap.S()" -w . + ``` +* [#309][] and [#317][]: RC1 was mistakenly shipped with invalid + JSON and YAML struct tags on all config structs. This release fixes the tags + and adds static analysis to prevent similar bugs in the future. + +Bugfixes: + +* [#321][]: Redirecting the standard library's `log` output now + correctly reports the logger's caller. + +Enhancements: + +* [#325][] and [#333][]: Zap now transparently supports non-standard, rich + errors like those produced by `github.com/pkg/errors`. +* [#326][]: Though `New(nil)` continues to return a no-op logger, `NewNop()` is + now preferred. Users can update their projects with `gofmt -r 'zap.New(nil) -> + zap.NewNop()' -w .`. +* [#300][]: Incorrectly importing zap as `github.com/uber-go/zap` now returns a + more informative error. + +Thanks to @skipor and @chapsuk for their contributions to this release. + +## v1.0.0-rc.1 (14 Feb 2017) + +This is the first release candidate for zap's stable release. There are multiple +breaking changes and improvements from the pre-release version. Most notably: + +* **Zap's import path is now "go.uber.org/zap"** — all users will + need to update their code. +* User-facing types and functions remain in the `zap` package. Code relevant + largely to extension authors is now in the `zapcore` package. +* The `zapcore.Core` type makes it easy for third-party packages to use zap's + internals but provide a different user-facing API. +* `Logger` is now a concrete type instead of an interface. +* A less verbose (though slower) logging API is included by default. +* Package-global loggers `L` and `S` are included. +* A human-friendly console encoder is included. +* A declarative config struct allows common logger configurations to be managed + as configuration instead of code. +* Sampling is more accurate, and doesn't depend on the standard library's shared + timer heap. + +## v0.1.0-beta.1 (6 Feb 2017) + +This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and +upgrade at their leisure. Since this is the first tagged release, there are no +backward compatibility concerns and all functionality is new. + +Early zap adopters should pin to the 0.1.x minor version until they're ready to +upgrade to the upcoming stable release. + +[#316]: https://github.com/uber-go/zap/pull/316 +[#309]: https://github.com/uber-go/zap/pull/309 +[#317]: https://github.com/uber-go/zap/pull/317 +[#321]: https://github.com/uber-go/zap/pull/321 +[#325]: https://github.com/uber-go/zap/pull/325 +[#333]: https://github.com/uber-go/zap/pull/333 +[#326]: https://github.com/uber-go/zap/pull/326 +[#300]: https://github.com/uber-go/zap/pull/300 +[#339]: https://github.com/uber-go/zap/pull/339 +[#307]: https://github.com/uber-go/zap/pull/307 +[#353]: https://github.com/uber-go/zap/pull/353 +[#311]: https://github.com/uber-go/zap/pull/311 +[#366]: https://github.com/uber-go/zap/pull/366 +[#364]: https://github.com/uber-go/zap/pull/364 +[#371]: https://github.com/uber-go/zap/pull/371 +[#362]: https://github.com/uber-go/zap/pull/362 +[#369]: https://github.com/uber-go/zap/pull/369 +[#347]: https://github.com/uber-go/zap/pull/347 +[#373]: https://github.com/uber-go/zap/pull/373 +[#348]: https://github.com/uber-go/zap/pull/348 +[#327]: https://github.com/uber-go/zap/pull/327 +[#376]: https://github.com/uber-go/zap/pull/376 +[#346]: https://github.com/uber-go/zap/pull/346 +[#365]: https://github.com/uber-go/zap/pull/365 +[#372]: https://github.com/uber-go/zap/pull/372 +[#385]: https://github.com/uber-go/zap/pull/385 +[#396]: https://github.com/uber-go/zap/pull/396 +[#386]: https://github.com/uber-go/zap/pull/386 +[#402]: https://github.com/uber-go/zap/pull/402 +[#415]: https://github.com/uber-go/zap/pull/415 +[#416]: https://github.com/uber-go/zap/pull/416 +[#424]: https://github.com/uber-go/zap/pull/424 +[#425]: https://github.com/uber-go/zap/pull/425 +[#431]: https://github.com/uber-go/zap/pull/431 +[#435]: https://github.com/uber-go/zap/pull/435 +[#444]: https://github.com/uber-go/zap/pull/444 diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md new file mode 100644 index 00000000..6007373a --- /dev/null +++ b/vendor/go.uber.org/zap/CONTRIBUTING.md @@ -0,0 +1,79 @@ +# Contributing + +We'd love your help making zap the very best structured logging library in Go! + +If you'd like to add new exported APIs, please [open an issue][open-issue] +describing your proposal — discussing API changes ahead of time makes +pull request review much smoother. + +Note that you'll need to sign [Uber's Contributor License Agreement][cla] +before we can accept any of your contributions. If necessary, a bot will remind +you to accept the CLA when you open your pull request. + +## Setup + +[Fork][fork], then clone the repository: + +``` +mkdir -p $GOPATH/src/go.uber.org +cd $GOPATH/src/go.uber.org +git clone git@github.com:your_github_username/zap.git +cd zap +git remote add upstream https://github.com/uber-go/zap.git +git fetch upstream +``` + +Install zap's dependencies: + +``` +make dependencies +``` + +Make sure that the tests and the linters pass: + +``` +make test +make lint +``` + +If you're not using the minor version of Go specified in the Makefile's +`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is +fine, but it means that you'll only discover lint failures after you open your +pull request. + +## Making Changes + +Start by creating a new branch for your changes: + +``` +cd $GOPATH/src/go.uber.org/zap +git checkout master +git fetch upstream +git rebase upstream/master +git checkout -b cool_new_feature +``` + +Make your changes, then ensure that `make lint` and `make test` still pass. If +you're satisfied with your changes, push them to your fork. + +``` +git push origin cool_new_feature +``` + +Then use the GitHub UI to open a pull request. + +At this point, you're waiting on us to review your changes. We *try* to respond +to issues and pull requests within a few business days, and we may suggest some +improvements or alternatives. Once your changes are approved, one of the +project maintainers will merge them. + +We're much more likely to approve your changes if you: + +* Add tests for new functionality. +* Write a [good commit message][commit-message]. +* Maintain backward compatibility. + +[fork]: https://github.com/uber-go/zap/fork +[open-issue]: https://github.com/uber-go/zap/issues/new +[cla]: https://cla-assistant.io/uber-go/zap +[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE.txt new file mode 100644 index 00000000..8765c9fb --- /dev/null +++ b/vendor/go.uber.org/zap/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile new file mode 100644 index 00000000..cca25ae0 --- /dev/null +++ b/vendor/go.uber.org/zap/Makefile @@ -0,0 +1,72 @@ +export GO15VENDOREXPERIMENT=1 + +BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem +PKGS ?= $(shell glide novendor) +# Many Go tools take file globs or directories as arguments instead of packages. +PKG_FILES ?= *.go zapcore benchmarks buffer zapgrpc zaptest zaptest/observer internal/bufferpool internal/exit internal/multierror internal/color + +# The linting tools evolve with each Go version, so run them only on the latest +# stable release. +GO_VERSION := $(shell go version | cut -d " " -f 3) +GO_MINOR_VERSION := $(word 2,$(subst ., ,$(GO_VERSION))) +LINTABLE_MINOR_VERSIONS := 8 +ifneq ($(filter $(LINTABLE_MINOR_VERSIONS),$(GO_MINOR_VERSION)),) +SHOULD_LINT := true +endif + + +.PHONY: all +all: lint test + +.PHONY: dependencies +dependencies: + @echo "Installing Glide and locked dependencies..." + glide --version || go get -u -f github.com/Masterminds/glide + glide install + @echo "Installing test dependencies..." + go install ./vendor/github.com/axw/gocov/gocov + go install ./vendor/github.com/mattn/goveralls +ifdef SHOULD_LINT + @echo "Installing golint..." + go install ./vendor/github.com/golang/lint/golint +else + @echo "Not installing golint, since we don't expect to lint on" $(GO_VERSION) +endif + +# Disable printf-like invocation checking due to testify.assert.Error() +VET_RULES := -printf=false + +.PHONY: lint +lint: +ifdef SHOULD_LINT + @rm -rf lint.log + @echo "Checking formatting..." + @gofmt -d -s $(PKG_FILES) 2>&1 | tee lint.log + @echo "Installing test dependencies for vet..." + @go test -i $(PKGS) + @echo "Checking vet..." + @$(foreach dir,$(PKG_FILES),go tool vet $(VET_RULES) $(dir) 2>&1 | tee -a lint.log;) + @echo "Checking lint..." + @$(foreach dir,$(PKGS),golint $(dir) 2>&1 | tee -a lint.log;) + @echo "Checking for unresolved FIXMEs..." + @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log + @echo "Checking for license headers..." + @./check_license.sh | tee -a lint.log + @[ ! -s lint.log ] +else + @echo "Skipping linters on" $(GO_VERSION) +endif + +.PHONY: test +test: + go test -race $(PKGS) + +.PHONY: bench +BENCH ?= . +bench: + @$(foreach pkg,$(PKGS),go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) $(pkg);) + +.PHONY: updatereadme +updatereadme: + rm -f README.md + cat .readme.tmpl | go run internal/readme/readme.go > README.md diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md new file mode 100644 index 00000000..c9a40434 --- /dev/null +++ b/vendor/go.uber.org/zap/README.md @@ -0,0 +1,119 @@ +# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Blazing fast, structured, leveled logging in Go. + +## Installation + +`go get -u go.uber.org/zap` + +Note that zap only supports the two most recent minor versions of Go. + +## Quick Start + +In contexts where performance is nice, but not critical, use the +`SugaredLogger`. It's 4-10x faster than than other structured logging libraries +and includes both structured and `printf`-style APIs. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() // flushes buffer, if any +sugar := logger.Sugar() +sugar.Infow("Failed to fetch URL.", + // Structured context as loosely-typed key-value pairs. + "url", url, + "attempt", 3, + "backoff", time.Second, +) +sugar.Infof("Failed to fetch URL: %s", url) +``` + +When performance and type safety are critical, use the `Logger`. It's even faster than +the `SugaredLogger` and allocates far less, but it only supports structured logging. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() +logger.Info("Failed to fetch URL.", + // Structured context as strongly-typed Field values. + zap.String("url", url), + zap.Int("attempt", 3), + zap.Duration("backoff", time.Second), +) +``` + +## Performance + +For applications that log in the hot path, reflection-based serialization and +string formatting are prohibitively expensive — they're CPU-intensive and +make many small allocations. Put differently, using `encoding/json` and +`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. + +Zap takes a different approach. It includes a reflection-free, zero-allocation +JSON encoder, and the base `Logger` strives to avoid serialization overhead and +allocations wherever possible. By building the high-level `SugaredLogger` on +that foundation, zap lets users *choose* when they need to count every +allocation and when they'd prefer a more familiar, loosely-typed API. + +As measured by its own [benchmarking suite][], not only is zap more performant +than comparable structured logging libraries — it's also faster than the +standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) + +Log a message and 10 fields: + +| Library | Time | Bytes Allocated | Objects Allocated | +| :--- | :---: | :---: | :---: | +| :zap: zap | 1526 ns/op | 704 B/op | 2 allocs/op | +| :zap: zap (sugared) | 2274 ns/op | 1610 B/op | 20 allocs/op | +| go-kit | 5854 ns/op | 2895 B/op | 66 allocs/op | +| logrus | 9117 ns/op | 6092 B/op | 78 allocs/op | +| lion | 9408 ns/op | 5807 B/op | 63 allocs/op | +| apex/log | 17007 ns/op | 3832 B/op | 65 allocs/op | +| log15 | 21290 ns/op | 5632 B/op | 93 allocs/op | + +Log a message with a logger that already has 10 fields of context: + +| Library | Time | Bytes Allocated | Objects Allocated | +| :--- | :---: | :---: | :---: | +| :zap: zap | 446 ns/op | 0 B/op | 0 allocs/op | +| :zap: zap (sugared) | 599 ns/op | 80 B/op | 2 allocs/op | +| lion | 5231 ns/op | 4074 B/op | 38 allocs/op | +| go-kit | 6424 ns/op | 3046 B/op | 52 allocs/op | +| logrus | 7578 ns/op | 4564 B/op | 63 allocs/op | +| apex/log | 15697 ns/op | 2898 B/op | 51 allocs/op | +| log15 | 15879 ns/op | 2642 B/op | 44 allocs/op | + +Log a static string, without any context or `printf`-style templating: + +| Library | Time | Bytes Allocated | Objects Allocated | +| :--- | :---: | :---: | :---: | +| :zap: zap | 418 ns/op | 0 B/op | 0 allocs/op | +| standard library | 524 ns/op | 80 B/op | 2 allocs/op | +| :zap: zap (sugared) | 628 ns/op | 80 B/op | 2 allocs/op | +| go-kit | 1011 ns/op | 656 B/op | 13 allocs/op | +| lion | 1382 ns/op | 1224 B/op | 10 allocs/op | +| logrus | 2263 ns/op | 1505 B/op | 27 allocs/op | +| apex/log | 3198 ns/op | 584 B/op | 11 allocs/op | +| log15 | 5787 ns/op | 1592 B/op | 26 allocs/op | + +## Development Status: Stable +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin zap +to `^1`. + +
+ +Released under the [MIT License](LICENSE.txt). + +1 In particular, keep in mind that we may be +benchmarking against slightly older versions of other libraries. Versions are +pinned in zap's [glide.lock][] file. [↩](#anchor-versions) + +[doc-img]: https://godoc.org/go.uber.org/zap?status.svg +[doc]: https://godoc.org/go.uber.org/zap +[ci-img]: https://travis-ci.org/uber-go/zap.svg?branch=master +[ci]: https://travis-ci.org/uber-go/zap +[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/zap +[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks +[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go new file mode 100644 index 00000000..784c6df0 --- /dev/null +++ b/vendor/go.uber.org/zap/array.go @@ -0,0 +1,355 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "sync" + "time" + + "go.uber.org/zap/zapcore" +) + +var _errArrayElemPool = sync.Pool{New: func() interface{} { + return &errArrayElem{} +}} + +// Array constructs a field with the given key and ArrayMarshaler. It provides +// a flexible, but still type-safe and efficient, way to add array-like types +// to the logging context. The struct's MarshalLogArray method is called lazily. +func Array(key string, val zapcore.ArrayMarshaler) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val} +} + +// Bools constructs a field that carries a slice of bools. +func Bools(key string, bs []bool) zapcore.Field { + return Array(key, bools(bs)) +} + +// ByteStrings constructs a field that carries a slice of []byte, each of which +// must be UTF-8 encoded text. +func ByteStrings(key string, bss [][]byte) zapcore.Field { + return Array(key, byteStringsArray(bss)) +} + +// Complex128s constructs a field that carries a slice of complex numbers. +func Complex128s(key string, nums []complex128) zapcore.Field { + return Array(key, complex128s(nums)) +} + +// Complex64s constructs a field that carries a slice of complex numbers. +func Complex64s(key string, nums []complex64) zapcore.Field { + return Array(key, complex64s(nums)) +} + +// Durations constructs a field that carries a slice of time.Durations. +func Durations(key string, ds []time.Duration) zapcore.Field { + return Array(key, durations(ds)) +} + +// Float64s constructs a field that carries a slice of floats. +func Float64s(key string, nums []float64) zapcore.Field { + return Array(key, float64s(nums)) +} + +// Float32s constructs a field that carries a slice of floats. +func Float32s(key string, nums []float32) zapcore.Field { + return Array(key, float32s(nums)) +} + +// Ints constructs a field that carries a slice of integers. +func Ints(key string, nums []int) zapcore.Field { + return Array(key, ints(nums)) +} + +// Int64s constructs a field that carries a slice of integers. +func Int64s(key string, nums []int64) zapcore.Field { + return Array(key, int64s(nums)) +} + +// Int32s constructs a field that carries a slice of integers. +func Int32s(key string, nums []int32) zapcore.Field { + return Array(key, int32s(nums)) +} + +// Int16s constructs a field that carries a slice of integers. +func Int16s(key string, nums []int16) zapcore.Field { + return Array(key, int16s(nums)) +} + +// Int8s constructs a field that carries a slice of integers. +func Int8s(key string, nums []int8) zapcore.Field { + return Array(key, int8s(nums)) +} + +// Strings constructs a field that carries a slice of strings. +func Strings(key string, ss []string) zapcore.Field { + return Array(key, stringArray(ss)) +} + +// Times constructs a field that carries a slice of time.Times. +func Times(key string, ts []time.Time) zapcore.Field { + return Array(key, times(ts)) +} + +// Uints constructs a field that carries a slice of unsigned integers. +func Uints(key string, nums []uint) zapcore.Field { + return Array(key, uints(nums)) +} + +// Uint64s constructs a field that carries a slice of unsigned integers. +func Uint64s(key string, nums []uint64) zapcore.Field { + return Array(key, uint64s(nums)) +} + +// Uint32s constructs a field that carries a slice of unsigned integers. +func Uint32s(key string, nums []uint32) zapcore.Field { + return Array(key, uint32s(nums)) +} + +// Uint16s constructs a field that carries a slice of unsigned integers. +func Uint16s(key string, nums []uint16) zapcore.Field { + return Array(key, uint16s(nums)) +} + +// Uint8s constructs a field that carries a slice of unsigned integers. +func Uint8s(key string, nums []uint8) zapcore.Field { + return Array(key, uint8s(nums)) +} + +// Uintptrs constructs a field that carries a slice of pointer addresses. +func Uintptrs(key string, us []uintptr) zapcore.Field { + return Array(key, uintptrs(us)) +} + +// Errors constructs a field that carries a slice of errors. +func Errors(key string, errs []error) zapcore.Field { + return Array(key, errArray(errs)) +} + +type bools []bool + +func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bs { + arr.AppendBool(bs[i]) + } + return nil +} + +type byteStringsArray [][]byte + +func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bss { + arr.AppendByteString(bss[i]) + } + return nil +} + +type complex128s []complex128 + +func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex128(nums[i]) + } + return nil +} + +type complex64s []complex64 + +func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex64(nums[i]) + } + return nil +} + +type durations []time.Duration + +func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ds { + arr.AppendDuration(ds[i]) + } + return nil +} + +type float64s []float64 + +func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat64(nums[i]) + } + return nil +} + +type float32s []float32 + +func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat32(nums[i]) + } + return nil +} + +type ints []int + +func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt(nums[i]) + } + return nil +} + +type int64s []int64 + +func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt64(nums[i]) + } + return nil +} + +type int32s []int32 + +func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt32(nums[i]) + } + return nil +} + +type int16s []int16 + +func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt16(nums[i]) + } + return nil +} + +type int8s []int8 + +func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt8(nums[i]) + } + return nil +} + +type stringArray []string + +func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ss { + arr.AppendString(ss[i]) + } + return nil +} + +type times []time.Time + +func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ts { + arr.AppendTime(ts[i]) + } + return nil +} + +type uints []uint + +func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint(nums[i]) + } + return nil +} + +type uint64s []uint64 + +func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint64(nums[i]) + } + return nil +} + +type uint32s []uint32 + +func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint32(nums[i]) + } + return nil +} + +type uint16s []uint16 + +func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint16(nums[i]) + } + return nil +} + +type uint8s []uint8 + +func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint8(nums[i]) + } + return nil +} + +type uintptrs []uintptr + +func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUintptr(nums[i]) + } + return nil +} + +type errArray []error + +func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + // To represent each error as an object with an "error" attribute and + // potentially an "errorVerbose" attribute, we need to wrap it in a + // type that implements LogObjectMarshaler. To prevent this from + // allocating, pool the wrapper type. + elem := _errArrayElemPool.Get().(*errArrayElem) + elem.error = errs[i] + arr.AppendObject(elem) + elem.error = nil + _errArrayElemPool.Put(elem) + } + return nil +} + +type errArrayElem struct { + error +} + +func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error { + // Re-use the error field's logic, which supports non-standard error types. + Error(e.error).AddTo(enc) + return nil +} diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go new file mode 100644 index 00000000..ea6fdc8d --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -0,0 +1,106 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package buffer provides a thin wrapper around a byte slice. Unlike the +// standard library's bytes.Buffer, it supports a portion of the strconv +// package's zero-allocation formatters. +package buffer + +import "strconv" + +const _size = 1024 // by default, create 1 KiB buffers + +// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so +// the only way to construct one is via a Pool. +type Buffer struct { + bs []byte + pool Pool +} + +// AppendByte writes a single byte to the Buffer. +func (b *Buffer) AppendByte(v byte) { + b.bs = append(b.bs, v) +} + +// AppendString writes a string to the Buffer. +func (b *Buffer) AppendString(s string) { + b.bs = append(b.bs, s...) +} + +// AppendInt appends an integer to the underlying buffer (assuming base 10). +func (b *Buffer) AppendInt(i int64) { + b.bs = strconv.AppendInt(b.bs, i, 10) +} + +// AppendUint appends an unsigned integer to the underlying buffer (assuming +// base 10). +func (b *Buffer) AppendUint(i uint64) { + b.bs = strconv.AppendUint(b.bs, i, 10) +} + +// AppendBool appends a bool to the underlying buffer. +func (b *Buffer) AppendBool(v bool) { + b.bs = strconv.AppendBool(b.bs, v) +} + +// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN +// or +/- Inf. +func (b *Buffer) AppendFloat(f float64, bitSize int) { + b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize) +} + +// Len returns the length of the underlying byte slice. +func (b *Buffer) Len() int { + return len(b.bs) +} + +// Cap returns the capacity of the underlying byte slice. +func (b *Buffer) Cap() int { + return cap(b.bs) +} + +// Bytes returns a mutable reference to the underlying byte slice. +func (b *Buffer) Bytes() []byte { + return b.bs +} + +// String returns a string copy of the underlying byte slice. +func (b *Buffer) String() string { + return string(b.bs) +} + +// Reset resets the underlying byte slice. Subsequent writes re-use the slice's +// backing array. +func (b *Buffer) Reset() { + b.bs = b.bs[:0] +} + +// Write implements io.Writer. +func (b *Buffer) Write(bs []byte) (int, error) { + b.bs = append(b.bs, bs...) + return len(bs), nil +} + +// Free returns the Buffer to its Pool. +// +// Callers must not retain references to the Buffer after calling Free. +func (b *Buffer) Free() { + b.pool.put(b) +} diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go new file mode 100644 index 00000000..8fb3e202 --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/pool.go @@ -0,0 +1,49 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package buffer + +import "sync" + +// A Pool is a type-safe wrapper around a sync.Pool. +type Pool struct { + p *sync.Pool +} + +// NewPool constructs a new Pool. +func NewPool() Pool { + return Pool{p: &sync.Pool{ + New: func() interface{} { + return &Buffer{bs: make([]byte, 0, _size)} + }, + }} +} + +// Get retrieves a Buffer from the pool, creating one if necessary. +func (p Pool) Get() *Buffer { + buf := p.p.Get().(*Buffer) + buf.Reset() + buf.pool = p + return buf +} + +func (p Pool) put(buf *Buffer) { + p.p.Put(buf) +} diff --git a/vendor/go.uber.org/zap/check_license.sh b/vendor/go.uber.org/zap/check_license.sh new file mode 100755 index 00000000..5c79ad8d --- /dev/null +++ b/vendor/go.uber.org/zap/check_license.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +text=`head -1 LICENSE.txt` + +ERROR_COUNT=0 +while read file +do + head -1 ${file} | grep -q "${text}" + if [ $? -ne 0 ]; then + echo "$file is missing license header." + (( ERROR_COUNT++ )) + fi +done < <(git ls-files "*\.go") + +exit $ERROR_COUNT diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go new file mode 100644 index 00000000..89c355d1 --- /dev/null +++ b/vendor/go.uber.org/zap/config.go @@ -0,0 +1,231 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "sort" + "time" + + "go.uber.org/zap/zapcore" +) + +// SamplingConfig sets a sampling strategy for the logger. Sampling +// caps the global CPU and I/O load that logging puts on your process while +// attempting to preserve a representative subset of your logs. +// +// Values configured here are per-second. See zapcore.NewSampler for details. +type SamplingConfig struct { + Initial int `json:"initial" yaml:"initial"` + Thereafter int `json:"thereafter" yaml:"thereafter"` +} + +// Config offers a declarative way to construct a logger. +// +// It doesn't do anything that can't be done with New, Options, and the various +// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to +// toggle common options. +type Config struct { + // Level is the minimum enabled logging level. Note that this is a dynamic + // level, so calling Config.Level.SetLevel will atomically change the log + // level of all loggers descended from this config. The zero value is + // InfoLevel. + Level AtomicLevel `json:"level" yaml:"level"` + // Development puts the logger in development mode, which changes the + // behavior of DPanicLevel and takes stacktraces more liberally. + Development bool `json:"development" yaml:"development"` + // DisableCaller stops annotating logs with the calling function's file + // name and line number. By default, all logs are annotated. + DisableCaller bool `json:"disableCaller" yaml:"disableCaller"` + // DisableStacktrace completely disables automatic stacktrace capturing. By + // default, stacktraces are captured for WarnLevel and above logs in + // development and ErrorLevel and above in production. + DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"` + // Sampling sets a sampling policy. A nil SamplingConfig disables sampling. + Sampling *SamplingConfig `json:"sampling" yaml:"sampling"` + // Encoding sets the logger's encoding. Valid values are "json" and + // "console". + Encoding string `json:"encoding" yaml:"encoding"` + // EncoderConfig sets options for the chosen encoder. See + // zapcore.EncoderConfig for details. + EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"` + // OutputPaths is a list of paths to write logging output to. See Open for + // details. + OutputPaths []string `json:"outputPaths" yaml:"outputPaths"` + // ErrorOutputPaths is a list of paths to write internal logger errors to. + // The default is standard error. + ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"` + // InitialFields is a collection of fields to add to the root logger. + InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"` +} + +// NewProductionEncoderConfig returns an opinionated EncoderConfig for +// production environments. +func NewProductionEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + TimeKey: "ts", + LevelKey: "level", + NameKey: "logger", + CallerKey: "caller", + MessageKey: "msg", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.EpochTimeEncoder, + EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewProductionConfig is the recommended production configuration. Logging is +// enabled at InfoLevel and above. +// +// It uses a JSON encoder, writes to standard error, and enables sampling. +// Stacktraces are automatically included on logs of ErrorLevel and above. +func NewProductionConfig() Config { + return Config{ + Level: NewAtomicLevelAt(InfoLevel), + Development: false, + Sampling: &SamplingConfig{ + Initial: 100, + Thereafter: 100, + }, + Encoding: "json", + EncoderConfig: NewProductionEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for +// development environments. +func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + // Keys can be anything except the empty string. + TimeKey: "T", + LevelKey: "L", + NameKey: "N", + CallerKey: "C", + MessageKey: "M", + StacktraceKey: "S", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.CapitalLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewDevelopmentConfig is a reasonable development configuration. Logging is +// enabled at DebugLevel and above. +// +// It enables development mode (which makes DPanicLevel logs panic), uses a +// console encoder, writes to standard error, and disables sampling. +// Stacktraces are automatically included on logs of WarnLevel and above. +func NewDevelopmentConfig() Config { + return Config{ + Level: NewAtomicLevelAt(DebugLevel), + Development: true, + Encoding: "console", + EncoderConfig: NewDevelopmentEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// Build constructs a logger from the Config and Options. +func (cfg Config) Build(opts ...Option) (*Logger, error) { + enc, err := cfg.buildEncoder() + if err != nil { + return nil, err + } + + sink, errSink, err := cfg.openSinks() + if err != nil { + return nil, err + } + + log := New( + zapcore.NewCore(enc, sink, cfg.Level), + cfg.buildOptions(errSink)..., + ) + if len(opts) > 0 { + log = log.WithOptions(opts...) + } + return log, nil +} + +func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option { + opts := []Option{ErrorOutput(errSink)} + + if cfg.Development { + opts = append(opts, Development()) + } + + if !cfg.DisableCaller { + opts = append(opts, AddCaller()) + } + + stackLevel := ErrorLevel + if cfg.Development { + stackLevel = WarnLevel + } + if !cfg.DisableStacktrace { + opts = append(opts, AddStacktrace(stackLevel)) + } + + if cfg.Sampling != nil { + opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core { + return zapcore.NewSampler(core, time.Second, int(cfg.Sampling.Initial), int(cfg.Sampling.Thereafter)) + })) + } + + if len(cfg.InitialFields) > 0 { + fs := make([]zapcore.Field, 0, len(cfg.InitialFields)) + keys := make([]string, 0, len(cfg.InitialFields)) + for k := range cfg.InitialFields { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fs = append(fs, Any(k, cfg.InitialFields[k])) + } + opts = append(opts, Fields(fs...)) + } + + return opts +} + +func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) { + sink, closeOut, err := Open(cfg.OutputPaths...) + if err != nil { + return nil, nil, err + } + errSink, _, err := Open(cfg.ErrorOutputPaths...) + if err != nil { + closeOut() + return nil, nil, err + } + return sink, errSink, nil +} + +func (cfg Config) buildEncoder() (zapcore.Encoder, error) { + return newEncoder(cfg.Encoding, cfg.EncoderConfig) +} diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go new file mode 100644 index 00000000..0c585803 --- /dev/null +++ b/vendor/go.uber.org/zap/doc.go @@ -0,0 +1,22 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zap provides fast, structured, leveled logging in Go. +package zap diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go new file mode 100644 index 00000000..c5a84454 --- /dev/null +++ b/vendor/go.uber.org/zap/encoder.go @@ -0,0 +1,73 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "sync" + + "go.uber.org/zap/zapcore" +) + +var ( + errNoEncoderNameSpecified = errors.New("no encoder name specified") + + _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){ + "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewConsoleEncoder(encoderConfig), nil + }, + "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewJSONEncoder(encoderConfig), nil + }, + } + _encoderMutex sync.RWMutex +) + +// RegisterEncoder registers an encoder constructor for the given name. +// +// If an encoder with the same name already exists, this will return an error. +// By default, the encoders "json" and "console" are registered. +func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error { + _encoderMutex.Lock() + defer _encoderMutex.Unlock() + if name == "" { + return errNoEncoderNameSpecified + } + if _, ok := _encoderNameToConstructor[name]; ok { + return fmt.Errorf("encoder already registered for name %q", name) + } + _encoderNameToConstructor[name] = constructor + return nil +} + +func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + _encoderMutex.RLock() + defer _encoderMutex.RUnlock() + if name == "" { + return nil, errNoEncoderNameSpecified + } + constructor, ok := _encoderNameToConstructor[name] + if !ok { + return nil, fmt.Errorf("no encoder registered for name %q", name) + } + return constructor(encoderConfig) +} diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go new file mode 100644 index 00000000..c81a13d7 --- /dev/null +++ b/vendor/go.uber.org/zap/field.go @@ -0,0 +1,324 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "math" + "time" + + "go.uber.org/zap/zapcore" +) + +// Skip constructs a no-op field. +func Skip() zapcore.Field { + return zapcore.Field{Type: zapcore.SkipType} +} + +// Binary constructs a field that carries an opaque binary blob. +// +// Binary data is serialized in an encoding-appropriate format. For example, +// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text, +// use ByteString. +func Binary(key string, val []byte) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.BinaryType, Interface: val} +} + +// Bool constructs a field that carries a bool. +func Bool(key string, val bool) zapcore.Field { + var ival int64 + if val { + ival = 1 + } + return zapcore.Field{Key: key, Type: zapcore.BoolType, Integer: ival} +} + +// ByteString constructs a field that carries UTF-8 encoded text as a []byte. +// To log opaque binary blobs (which aren't necessarily valid UTF-8), use +// Binary. +func ByteString(key string, val []byte) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.ByteStringType, Interface: val} +} + +// Complex128 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex128 to +// interface{}). +func Complex128(key string, val complex128) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.Complex128Type, Interface: val} +} + +// Complex64 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex64 to +// interface{}). +func Complex64(key string, val complex64) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.Complex64Type, Interface: val} +} + +// Float64 constructs a field that carries a float64. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float64(key string, val float64) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))} +} + +// Float32 constructs a field that carries a float32. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float32(key string, val float32) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))} +} + +// Int constructs a field with the given key and value. +func Int(key string, val int) zapcore.Field { + return Int64(key, int64(val)) +} + +// Int64 constructs a field with the given key and value. +func Int64(key string, val int64) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.Int64Type, Integer: val} +} + +// Int32 constructs a field with the given key and value. +func Int32(key string, val int32) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)} +} + +// Int16 constructs a field with the given key and value. +func Int16(key string, val int16) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)} +} + +// Int8 constructs a field with the given key and value. +func Int8(key string, val int8) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)} +} + +// String constructs a field with the given key and value. +func String(key string, val string) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.StringType, String: val} +} + +// Uint constructs a field with the given key and value. +func Uint(key string, val uint) zapcore.Field { + return Uint64(key, uint64(val)) +} + +// Uint64 constructs a field with the given key and value. +func Uint64(key string, val uint64) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)} +} + +// Uint32 constructs a field with the given key and value. +func Uint32(key string, val uint32) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)} +} + +// Uint16 constructs a field with the given key and value. +func Uint16(key string, val uint16) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)} +} + +// Uint8 constructs a field with the given key and value. +func Uint8(key string, val uint8) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)} +} + +// Uintptr constructs a field with the given key and value. +func Uintptr(key string, val uintptr) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)} +} + +// Reflect constructs a field with the given key and an arbitrary object. It uses +// an encoding-appropriate, reflection-based function to lazily serialize nearly +// any object into the logging context, but it's relatively slow and +// allocation-heavy. Outside tests, Any is always a better choice. +// +// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect +// includes the error message in the final log output. +func Reflect(key string, val interface{}) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.ReflectType, Interface: val} +} + +// Namespace creates a named, isolated scope within the logger's context. All +// subsequent fields will be added to the new namespace. +// +// This helps prevent key collisions when injecting loggers into sub-components +// or third-party libraries. +func Namespace(key string) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.NamespaceType} +} + +// Stringer constructs a field with the given key and the output of the value's +// String method. The Stringer's String method is called lazily. +func Stringer(key string, val fmt.Stringer) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.StringerType, Interface: val} +} + +// Time constructs a zapcore.Field with the given key and value. The encoder +// controls how the time is serialized. +func Time(key string, val time.Time) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} +} + +// Error is shorthand for the common idiom NamedError("error", err). +func Error(err error) zapcore.Field { + return NamedError("error", err) +} + +// NamedError constructs a field that lazily stores err.Error() under the +// provided key. Errors which also implement fmt.Formatter (like those produced +// by github.com/pkg/errors) will also have their verbose representation stored +// under key+"Verbose". If passed a nil error, the field is a no-op. +// +// For the common case in which the key is simply "error", the Error function +// is shorter and less repetitive. +func NamedError(key string, err error) zapcore.Field { + if err == nil { + return Skip() + } + return zapcore.Field{Key: key, Type: zapcore.ErrorType, Interface: err} +} + +// Stack constructs a field that stores a stacktrace of the current goroutine +// under provided key. Keep in mind that taking a stacktrace is eager and +// expensive (relatively speaking); this function both makes an allocation and +// takes about two microseconds. +func Stack(key string) zapcore.Field { + // Returning the stacktrace as a string costs an allocation, but saves us + // from expanding the zapcore.Field union struct to include a byte slice. Since + // taking a stacktrace is already so expensive (~10us), the extra allocation + // is okay. + return String(key, takeStacktrace()) +} + +// Duration constructs a field with the given key and value. The encoder +// controls how the duration is serialized. +func Duration(key string, val time.Duration) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)} +} + +// Object constructs a field with the given key and ObjectMarshaler. It +// provides a flexible, but still type-safe and efficient, way to add map- or +// struct-like user-defined types to the logging context. The struct's +// MarshalLogObject method is called lazily. +func Object(key string, val zapcore.ObjectMarshaler) zapcore.Field { + return zapcore.Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} +} + +// Any takes a key and an arbitrary value and chooses the best way to represent +// them as a field, falling back to a reflection-based approach only if +// necessary. +// +// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between +// them. To minimize suprise, []byte values are treated as binary blobs, byte +// values are treated as uint8, and runes are always treated as integers. +func Any(key string, value interface{}) zapcore.Field { + switch val := value.(type) { + case zapcore.ObjectMarshaler: + return Object(key, val) + case zapcore.ArrayMarshaler: + return Array(key, val) + case bool: + return Bool(key, val) + case []bool: + return Bools(key, val) + case complex128: + return Complex128(key, val) + case []complex128: + return Complex128s(key, val) + case complex64: + return Complex64(key, val) + case []complex64: + return Complex64s(key, val) + case float64: + return Float64(key, val) + case []float64: + return Float64s(key, val) + case float32: + return Float32(key, val) + case []float32: + return Float32s(key, val) + case int: + return Int(key, val) + case []int: + return Ints(key, val) + case int64: + return Int64(key, val) + case []int64: + return Int64s(key, val) + case int32: + return Int32(key, val) + case []int32: + return Int32s(key, val) + case int16: + return Int16(key, val) + case []int16: + return Int16s(key, val) + case int8: + return Int8(key, val) + case []int8: + return Int8s(key, val) + case string: + return String(key, val) + case []string: + return Strings(key, val) + case uint: + return Uint(key, val) + case []uint: + return Uints(key, val) + case uint64: + return Uint64(key, val) + case []uint64: + return Uint64s(key, val) + case uint32: + return Uint32(key, val) + case []uint32: + return Uint32s(key, val) + case uint16: + return Uint16(key, val) + case []uint16: + return Uint16s(key, val) + case uint8: + return Uint8(key, val) + case []byte: + return Binary(key, val) + case uintptr: + return Uintptr(key, val) + case []uintptr: + return Uintptrs(key, val) + case time.Time: + return Time(key, val) + case []time.Time: + return Times(key, val) + case time.Duration: + return Duration(key, val) + case []time.Duration: + return Durations(key, val) + case error: + return NamedError(key, val) + case []error: + return Errors(key, val) + case fmt.Stringer: + return Stringer(key, val) + default: + return Reflect(key, val) + } +} diff --git a/vendor/go.uber.org/zap/flag.go b/vendor/go.uber.org/zap/flag.go new file mode 100644 index 00000000..5cf541e9 --- /dev/null +++ b/vendor/go.uber.org/zap/flag.go @@ -0,0 +1,38 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "flag" + + "go.uber.org/zap/zapcore" +) + +// LevelFlag defines a Level flag with specified name, default value and +// usage string. The return value is the address of a Level value that stores +// the value of the flag. +// +// Note that you can also use any non-nil *Level as a flag.Value. +func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level { + lvl := defaultLevel + flag.Var(&lvl, name, usage) + return &lvl +} diff --git a/vendor/go.uber.org/zap/glide.lock b/vendor/go.uber.org/zap/glide.lock new file mode 100644 index 00000000..fc48f276 --- /dev/null +++ b/vendor/go.uber.org/zap/glide.lock @@ -0,0 +1,72 @@ +hash: e0772fca67e6ddb8b10aad5a68c2db217e94f31c4b8646753a8bdf8d27f8ab6f +updated: 2017-02-27T14:29:10.445907666-08:00 +imports: +- name: go.uber.org/atomic + version: 0c9e689d64f004564b79d9a663634756df322902 +testImports: +- name: github.com/apex/log + version: 4ea85e918cc8389903d5f12d7ccac5c23ab7d89b + subpackages: + - handlers/json +- name: github.com/axw/gocov + version: c77561ca0c0cb1ed5d4ce4a912a75f5532566422 + subpackages: + - gocov +- name: github.com/davecgh/go-spew + version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 + subpackages: + - spew +- name: github.com/fatih/color + version: 5ec5d9d3c2cf82e9688b34e9bc27a94d616a7193 +- name: github.com/go-kit/kit + version: 0d6c91c61a1b3ac2467facd58a06fe89fd34aa3c + subpackages: + - log +- name: github.com/go-logfmt/logfmt + version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 +- name: github.com/go-stack/stack + version: 100eb0c0a9c5b306ca2fb4f165df21d80ada4b82 +- name: github.com/golang/lint + version: b8599f7d71e7fead76b25aeb919c0e2558672f4a + subpackages: + - golint +- name: github.com/kr/logfmt + version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 +- name: github.com/mattn/go-colorable + version: d228849504861217f796da67fae4f6e347643f15 +- name: github.com/mattn/go-isatty + version: 30a891c33c7cde7b02a981314b4228ec99380cca +- name: github.com/mattn/goveralls + version: 8482d0ebd2a64f95ce02d2b9b7065b0d6fe9151b +- name: github.com/pborman/uuid + version: 1b00554d822231195d1babd97ff4a781231955c9 +- name: github.com/pkg/errors + version: 645ef00459ed84a119197bfb8d8205042c6df63d +- name: github.com/pmezard/go-difflib + version: d8ed2627bdf02c080bf22230dbb337003b7aba2d + subpackages: + - difflib +- name: github.com/satori/go.uuid + version: 879c5887cd475cd7864858769793b2ceb0d44feb +- name: github.com/sirupsen/logrus + version: 9b48ece7fc373043054858f8c0d362665e866004 +- name: github.com/stretchr/testify + version: 4d4bfba8f1d1027c4fdbe371823030df51419987 + subpackages: + - assert + - require +- name: go.pedge.io/lion + version: d0ba5a16db3710bd9af5511722d8ddaf897fb0c1 +- name: golang.org/x/sys + version: d4feaf1a7e61e1d9e79e6c4e76c6349e9cab0a03 + subpackages: + - unix +- name: golang.org/x/tools + version: 496819729719f9d07692195e0a94d6edd2251389 + subpackages: + - cover +- name: gopkg.in/inconshreveable/log15.v2 + version: b105bd37f74e5d9dc7b6ad7806715c7a2b83fd3f + subpackages: + - stack + - term diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml new file mode 100644 index 00000000..4ea7495b --- /dev/null +++ b/vendor/go.uber.org/zap/glide.yaml @@ -0,0 +1,32 @@ +package: go.uber.org/zap +license: MIT +import: +- package: go.uber.org/atomic + version: ^1 +testImport: +- package: github.com/satori/go.uuid +- package: github.com/sirupsen/logrus +- package: github.com/apex/log + subpackages: + - handlers/json +- package: github.com/go-kit/kit + subpackages: + - log +- package: github.com/stretchr/testify + subpackages: + - assert + - require +- package: gopkg.in/inconshreveable/log15.v2 +- package: github.com/mattn/goveralls +- package: github.com/pborman/uuid +- package: github.com/pkg/errors +- package: go.pedge.io/lion +- package: golang.org/x/tools + subpackages: + - cover +- package: github.com/golang/lint + subpackages: + - golint +- package: github.com/axw/gocov + subpackages: + - gocov diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go new file mode 100644 index 00000000..805aefd3 --- /dev/null +++ b/vendor/go.uber.org/zap/global.go @@ -0,0 +1,114 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "bytes" + "log" + "os" + "sync" +) + +const ( + _stdLogDefaultDepth = 2 + _loggerWriterDepth = 1 +) + +var ( + _globalMu sync.RWMutex + _globalL = NewNop() + _globalS = _globalL.Sugar() +) + +// L returns the global Logger, which can be reconfigured with ReplaceGlobals. +// +// It's safe for concurrent use. +func L() *Logger { + _globalMu.RLock() + l := _globalL + _globalMu.RUnlock() + return l +} + +// S returns the global SugaredLogger, which can be reconfigured with +// ReplaceGlobals. +// +// It's safe for concurrent use. +func S() *SugaredLogger { + _globalMu.RLock() + s := _globalS + _globalMu.RUnlock() + return s +} + +// ReplaceGlobals replaces the global Logger and the SugaredLogger, and returns +// a function to restore the original values. +// +// It's safe for concurrent use. +func ReplaceGlobals(logger *Logger) func() { + _globalMu.Lock() + prev := _globalL + _globalL = logger + _globalS = logger.Sugar() + _globalMu.Unlock() + return func() { ReplaceGlobals(prev) } +} + +// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at +// InfoLevel. To redirect the standard library's package-global logging +// functions, use RedirectStdLog instead. +func NewStdLog(l *Logger) *log.Logger { + return log.New(&loggerWriter{l.WithOptions( + AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth), + )}, "" /* prefix */, 0 /* flags */) +} + +// RedirectStdLog redirects output from the standard library's package-global +// logger to the supplied logger at InfoLevel. Since zap already handles caller +// annotations, timestamps, etc., it automatically disables the standard +// library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stdout. +func RedirectStdLog(l *Logger) func() { + flags := log.Flags() + prefix := log.Prefix() + log.SetFlags(0) + log.SetPrefix("") + log.SetOutput(&loggerWriter{l.WithOptions( + AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth), + )}) + return func() { + log.SetFlags(flags) + log.SetPrefix(prefix) + log.SetOutput(os.Stderr) + } +} + +type loggerWriter struct { + logger *Logger +} + +func (l *loggerWriter) Write(p []byte) (int, error) { + p = bytes.TrimSpace(p) + l.logger.Info(string(p)) + return len(p), nil +} diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go new file mode 100644 index 00000000..f171c384 --- /dev/null +++ b/vendor/go.uber.org/zap/http_handler.go @@ -0,0 +1,81 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "encoding/json" + "fmt" + "net/http" + + "go.uber.org/zap/zapcore" +) + +// ServeHTTP is a simple JSON endpoint that can report on or change the current +// logging level. +// +// GET requests return a JSON description of the current logging level. PUT +// requests change the logging level and expect a payload like: +// {"level":"info"} +// +// It's perfectly safe to change the logging level while a program is running. +func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { + type errorResponse struct { + Error string `json:"error"` + } + type payload struct { + Level *zapcore.Level `json:"level"` + } + + enc := json.NewEncoder(w) + + switch r.Method { + + case "GET": + current := lvl.Level() + enc.Encode(payload{Level: ¤t}) + + case "PUT": + var req payload + + if errmess := func() string { + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return fmt.Sprintf("Request body must be well-formed JSON: %v", err) + } + if req.Level == nil { + return "Must specify a logging level." + } + return "" + }(); errmess != "" { + w.WriteHeader(http.StatusBadRequest) + enc.Encode(errorResponse{Error: errmess}) + return + } + + lvl.SetLevel(*req.Level) + enc.Encode(req) + + default: + w.WriteHeader(http.StatusMethodNotAllowed) + enc.Encode(errorResponse{ + Error: "Only GET and PUT are supported.", + }) + } +} diff --git a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go new file mode 100644 index 00000000..dad583aa --- /dev/null +++ b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go @@ -0,0 +1,31 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package bufferpool houses zap's shared internal buffer pool. Third-party +// packages can recreate the same functionality with buffers.NewPool. +package bufferpool + +import "go.uber.org/zap/buffer" + +var ( + _pool = buffer.NewPool() + // Get retrieves a buffer from the pool, creating one if necessary. + Get = _pool.Get +) diff --git a/vendor/go.uber.org/zap/internal/color/color.go b/vendor/go.uber.org/zap/internal/color/color.go new file mode 100644 index 00000000..c4d5d02a --- /dev/null +++ b/vendor/go.uber.org/zap/internal/color/color.go @@ -0,0 +1,44 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package color adds coloring functionality for TTY output. +package color + +import "fmt" + +// Foreground colors. +const ( + Black Color = iota + 30 + Red + Green + Yellow + Blue + Magenta + Cyan + White +) + +// Color represents a text color. +type Color uint8 + +// Add adds the coloring to the given string. +func (c Color) Add(s string) string { + return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s) +} diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go new file mode 100644 index 00000000..dfc5b05f --- /dev/null +++ b/vendor/go.uber.org/zap/internal/exit/exit.go @@ -0,0 +1,64 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package exit provides stubs so that unit tests can exercise code that calls +// os.Exit(1). +package exit + +import "os" + +var real = func() { os.Exit(1) } + +// Exit normally terminates the process by calling os.Exit(1). If the package +// is stubbed, it instead records a call in the testing spy. +func Exit() { + real() +} + +// A StubbedExit is a testing fake for os.Exit. +type StubbedExit struct { + Exited bool + prev func() +} + +// Stub substitutes a fake for the call to os.Exit(1). +func Stub() *StubbedExit { + s := &StubbedExit{prev: real} + real = s.exit + return s +} + +// WithStub runs the supplied function with Exit stubbed. It returns the stub +// used, so that users can test whether the process would have crashed. +func WithStub(f func()) *StubbedExit { + s := Stub() + defer s.Unstub() + f() + return s +} + +// Unstub restores the previous exit function. +func (se *StubbedExit) Unstub() { + real = se.prev +} + +func (se *StubbedExit) exit() { + se.Exited = true +} diff --git a/vendor/go.uber.org/zap/internal/multierror/multierror.go b/vendor/go.uber.org/zap/internal/multierror/multierror.go new file mode 100644 index 00000000..e5639877 --- /dev/null +++ b/vendor/go.uber.org/zap/internal/multierror/multierror.go @@ -0,0 +1,72 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package multierror provides a simple way to treat a collection of errors as +// a single error. +package multierror + +import "go.uber.org/zap/internal/bufferpool" + +// implement the standard lib's error interface on a private type so that we +// can't forget to call Error.AsError(). +type errSlice []error + +func (es errSlice) Error() string { + b := bufferpool.Get() + for i, err := range es { + if i > 0 { + b.AppendByte(';') + b.AppendByte(' ') + } + b.AppendString(err.Error()) + } + ret := b.String() + b.Free() + return ret +} + +// Error wraps a []error to implement the error interface. +type Error struct { + errs errSlice +} + +// AsError converts the collection to a single error value. +// +// Note that failing to use AsError will almost certainly lead to bugs with +// non-nil interfaces containing nil concrete values. +func (e Error) AsError() error { + switch len(e.errs) { + case 0: + return nil + case 1: + return e.errs[0] + default: + return e.errs + } +} + +// Append adds an error to the collection. Adding a nil error is a no-op. +func (e Error) Append(err error) Error { + if err == nil { + return e + } + e.errs = append(e.errs, err) + return e +} diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go new file mode 100644 index 00000000..614648ab --- /dev/null +++ b/vendor/go.uber.org/zap/level.go @@ -0,0 +1,128 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "go.uber.org/atomic" + "go.uber.org/zap/zapcore" +) + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel = zapcore.DebugLevel + // InfoLevel is the default logging priority. + InfoLevel = zapcore.InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel = zapcore.WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel = zapcore.ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel = zapcore.DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel = zapcore.PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel = zapcore.FatalLevel +) + +// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with +// an anonymous function. +type LevelEnablerFunc func(zapcore.Level) bool + +// Enabled calls the wrapped function. +func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) } + +// An AtomicLevel is an atomically changeable, dynamic logging level. It lets +// you safely change the log level of a tree of loggers (the root logger and +// any children created by adding context) at runtime. +// +// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to +// alter its level. +// +// AtomicLevels must be created with the NewAtomicLevel constructor to allocate +// their internal atomic pointer. +type AtomicLevel struct { + l *atomic.Int32 +} + +// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging +// enabled. +func NewAtomicLevel() AtomicLevel { + return AtomicLevel{ + l: atomic.NewInt32(int32(InfoLevel)), + } +} + +// NewAtomicLevelAt is a convienence function that creates an AtomicLevel +// and then calls SetLevel with the given level. +func NewAtomicLevelAt(l zapcore.Level) AtomicLevel { + a := NewAtomicLevel() + a.SetLevel(l) + return a +} + +// Enabled implements the zapcore.LevelEnabler interface, which allows the +// AtomicLevel to be used in place of traditional static levels. +func (lvl AtomicLevel) Enabled(l zapcore.Level) bool { + return lvl.Level().Enabled(l) +} + +// Level returns the minimum enabled log level. +func (lvl AtomicLevel) Level() zapcore.Level { + return zapcore.Level(int8(lvl.l.Load())) +} + +// SetLevel alters the logging level. +func (lvl AtomicLevel) SetLevel(l zapcore.Level) { + lvl.l.Store(int32(l)) +} + +// String returns the string representation of the underlying Level. +func (lvl AtomicLevel) String() string { + return lvl.Level().String() +} + +// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text +// representations as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl *AtomicLevel) UnmarshalText(text []byte) error { + if lvl.l == nil { + lvl.l = &atomic.Int32{} + } + + var l zapcore.Level + if err := l.UnmarshalText(text); err != nil { + return err + } + + lvl.SetLevel(l) + return nil +} + +// MarshalText marshals the AtomicLevel to a byte slice. It uses the same +// text representation as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl AtomicLevel) MarshalText() (text []byte, err error) { + return lvl.Level().MarshalText() +} diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go new file mode 100644 index 00000000..d478d12b --- /dev/null +++ b/vendor/go.uber.org/zap/logger.go @@ -0,0 +1,277 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + "time" + + "go.uber.org/zap/zapcore" +) + +// A Logger provides fast, leveled, structured logging. All methods are safe for +// concurrent use. +// +// If you'd prefer a slower but less verbose logger, see the SugaredLogger. +type Logger struct { + core zapcore.Core + + development bool + name string + errorOutput zapcore.WriteSyncer + + addCaller bool + addStack zapcore.LevelEnabler + + callerSkip int +} + +// New constructs a new Logger from the provided zapcore.Core and Options. If +// the passed zapcore.Core is nil, we fall back to using a no-op +// implementation. +// +// This is the most flexible way to construct a Logger, but also the most +// verbose. For typical use cases, NewProduction and NewDevelopment are more +// convenient. +func New(core zapcore.Core, options ...Option) *Logger { + if core == nil { + return NewNop() + } + log := &Logger{ + core: core, + errorOutput: zapcore.Lock(os.Stderr), + addStack: zapcore.FatalLevel + 1, + } + return log.WithOptions(options...) +} + +// NewNop returns a no-op Logger. It never writes out logs or internal errors, +// and it never runs user-defined hooks. +// +// Using WithOptions to replace the Core or error output of a no-op Logger can +// re-enable logging. +func NewNop() *Logger { + return &Logger{ + core: zapcore.NewNopCore(), + errorOutput: zapcore.AddSync(ioutil.Discard), + addStack: zapcore.FatalLevel + 1, + } +} + +// NewProduction builds a sensible production Logger that writes InfoLevel and +// above logs to standard error as JSON. +// +// It's a shortcut for NewProductionConfig().Build(...Option). +func NewProduction(options ...Option) (*Logger, error) { + return NewProductionConfig().Build(options...) +} + +// NewDevelopment builds a development Logger that writes DebugLevel and above +// logs to standard error in a human-friendly format. +// +// It's a shortcut for NewDevelopmentConfig().Build(...Option). +func NewDevelopment(options ...Option) (*Logger, error) { + return NewDevelopmentConfig().Build(options...) +} + +// Sugar converts a Logger to a SugaredLogger. +func (log *Logger) Sugar() *SugaredLogger { + core := log.clone() + core.callerSkip += 2 + return &SugaredLogger{core} +} + +// Named adds a new path segment to the logger's name. Segments are joined by +// periods. +func (log *Logger) Named(s string) *Logger { + if s == "" { + return log + } + l := log.clone() + if log.name == "" { + l.name = s + } else { + l.name = strings.Join([]string{l.name, s}, ".") + } + return l +} + +// WithOptions clones the current Logger, applies the supplied Options, and +// returns the resulting Logger. It's safe to use concurrently. +func (log *Logger) WithOptions(opts ...Option) *Logger { + c := log.clone() + for _, opt := range opts { + opt.apply(c) + } + return c +} + +// With creates a child logger and adds structured context to it. Fields added +// to the child don't affect the parent, and vice versa. +func (log *Logger) With(fields ...zapcore.Field) *Logger { + if len(fields) == 0 { + return log + } + l := log.clone() + l.core = l.core.With(fields) + return l +} + +// Check returns a CheckedEntry if logging a message at the specified level +// is enabled. It's a completely optional optimization; in high-performance +// applications, Check can help avoid allocating a slice to hold fields. +func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + return log.check(lvl, msg) +} + +// Debug logs a message at DebugLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Debug(msg string, fields ...zapcore.Field) { + if ce := log.check(DebugLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Info logs a message at InfoLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Info(msg string, fields ...zapcore.Field) { + if ce := log.check(InfoLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Warn logs a message at WarnLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Warn(msg string, fields ...zapcore.Field) { + if ce := log.check(WarnLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Error logs a message at ErrorLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Error(msg string, fields ...zapcore.Field) { + if ce := log.check(ErrorLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// DPanic logs a message at DPanicLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// If the logger is in development mode, it then panics (DPanic means +// "development panic"). This is useful for catching errors that are +// recoverable, but shouldn't ever happen. +func (log *Logger) DPanic(msg string, fields ...zapcore.Field) { + if ce := log.check(DPanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Panic logs a message at PanicLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then panics, even if logging at PanicLevel is disabled. +func (log *Logger) Panic(msg string, fields ...zapcore.Field) { + if ce := log.check(PanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Fatal logs a message at FatalLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then calls os.Exit(1), even if logging at FatalLevel is disabled. +func (log *Logger) Fatal(msg string, fields ...zapcore.Field) { + if ce := log.check(FatalLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Sync flushes any buffered log entries. +func (log *Logger) Sync() error { + return log.core.Sync() +} + +// Core returns the underlying zapcore.Core. +func (log *Logger) Core() zapcore.Core { + return log.core +} + +func (log *Logger) clone() *Logger { + copy := *log + return © +} + +func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + // check must always be called directly by a method in the Logger interface + // (e.g., Check, Info, Fatal). + const callerSkipOffset = 2 + + // Create basic checked entry thru the core; this will be non-nil if the + // log message will actually be written somewhere. + ent := zapcore.Entry{ + LoggerName: log.name, + Time: time.Now(), + Level: lvl, + Message: msg, + } + ce := log.core.Check(ent, nil) + willWrite := ce != nil + + // Set up any required terminal behavior. + switch ent.Level { + case zapcore.PanicLevel: + ce = ce.Should(ent, zapcore.WriteThenPanic) + case zapcore.FatalLevel: + ce = ce.Should(ent, zapcore.WriteThenFatal) + case zapcore.DPanicLevel: + if log.development { + ce = ce.Should(ent, zapcore.WriteThenPanic) + } + } + + // Only do further annotation if we're going to write this message; checked + // entries that exist only for terminal behavior don't benefit from + // annotation. + if !willWrite { + return ce + } + + // Thread the error output through to the CheckedEntry. + ce.ErrorOutput = log.errorOutput + if log.addCaller { + ce.Entry.Caller = zapcore.NewEntryCaller(runtime.Caller(log.callerSkip + callerSkipOffset)) + if !ce.Entry.Caller.Defined { + fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC()) + log.errorOutput.Sync() + } + } + if log.addStack.Enabled(ce.Entry.Level) { + ce.Entry.Stack = Stack("").String + } + + return ce +} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go new file mode 100644 index 00000000..c80146ee --- /dev/null +++ b/vendor/go.uber.org/zap/options.go @@ -0,0 +1,102 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import "go.uber.org/zap/zapcore" + +// Option is used to set options for the logger. +type Option interface { + apply(*Logger) +} + +// optionFunc wraps a func so it satisfies the Option interface. +type optionFunc func(*Logger) + +func (f optionFunc) apply(log *Logger) { + f(log) +} + +// WrapCore wraps or replaces the Logger's underlying zapcore.Core. +func WrapCore(f func(zapcore.Core) zapcore.Core) Option { + return optionFunc(func(log *Logger) { + log.core = f(log.core) + }) +} + +// Hooks registers functions which will be called each time the Logger writes +// out an Entry. Repeated use of Hooks is additive. +// +// See zapcore.RegisterHooks for details. +func Hooks(hooks ...func(zapcore.Entry) error) Option { + return optionFunc(func(log *Logger) { + log.core = zapcore.RegisterHooks(log.core, hooks...) + }) +} + +// Fields adds fields to the Logger. +func Fields(fs ...zapcore.Field) Option { + return optionFunc(func(log *Logger) { + log.core = log.core.With(fs) + }) +} + +// ErrorOutput sets the destination for errors generated by the logger. The +// supplied WriteSyncer must be safe for concurrent use. +// +// The Open and zapcore.Lock functions are the simplest ways to make files safe +// for concurrent use. +func ErrorOutput(w zapcore.WriteSyncer) Option { + return optionFunc(func(log *Logger) { + log.errorOutput = w + }) +} + +// Development puts the logger in development mode, which alters the behavior +// of the DPanic method. +func Development() Option { + return optionFunc(func(log *Logger) { + log.development = true + }) +} + +// AddCaller configures the Logger to annotate each message with the filename +// and line number of zap's caller. +func AddCaller() Option { + return optionFunc(func(log *Logger) { + log.addCaller = true + }) +} + +// AddCallerSkip increases the number of callers skipped by caller annotation +// (as enabled by the AddCaller option). +func AddCallerSkip(skip int) Option { + return optionFunc(func(log *Logger) { + log.callerSkip += skip + }) +} + +// AddStacktrace configures the Logger to record a stack trace for all messages at +// or above a given level. +func AddStacktrace(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + log.addStack = lvl + }) +} diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go new file mode 100644 index 00000000..3d2a22c2 --- /dev/null +++ b/vendor/go.uber.org/zap/stacktrace.go @@ -0,0 +1,98 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "runtime" + "strings" + "sync" + + "go.uber.org/zap/internal/bufferpool" +) + +var ( + _stacktraceIgnorePrefixes = []string{ + "runtime.goexit", + "runtime.main", + } + _stacktracePool = sync.Pool{ + New: func() interface{} { + return newProgramCounters(64) + }, + } +) + +func takeStacktrace() string { + buffer := bufferpool.Get() + defer buffer.Free() + programCounters := _stacktracePool.Get().(*programCounters) + defer _stacktracePool.Put(programCounters) + + for { + // Skip the call to runtime.Counters and takeStacktrace so that the + // program counters start at the caller of takeStacktrace. + n := runtime.Callers(2, programCounters.pcs) + if n < cap(programCounters.pcs) { + programCounters.pcs = programCounters.pcs[:n] + break + } + // Don't put the too-short counter slice back into the pool; this lets + // the pool adjust if we consistently take deep stacktraces. + programCounters = newProgramCounters(len(programCounters.pcs) * 2) + } + + i := 0 + frames := runtime.CallersFrames(programCounters.pcs) + for frame, more := frames.Next(); more; frame, more = frames.Next() { + if shouldIgnoreStacktraceFunction(frame.Function) { + continue + } + if i != 0 { + buffer.AppendByte('\n') + } + i++ + buffer.AppendString(frame.Function) + buffer.AppendByte('\n') + buffer.AppendByte('\t') + buffer.AppendString(frame.File) + buffer.AppendByte(':') + buffer.AppendInt(int64(frame.Line)) + } + + return buffer.String() +} + +func shouldIgnoreStacktraceFunction(function string) bool { + for _, prefix := range _stacktraceIgnorePrefixes { + if strings.HasPrefix(function, prefix) { + return true + } + } + return false +} + +type programCounters struct { + pcs []uintptr +} + +func newProgramCounters(size int) *programCounters { + return &programCounters{make([]uintptr, size)} +} diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go new file mode 100644 index 00000000..71640b1c --- /dev/null +++ b/vendor/go.uber.org/zap/sugar.go @@ -0,0 +1,294 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + + "go.uber.org/zap/internal/multierror" + "go.uber.org/zap/zapcore" +) + +const ( + _oddNumberErrMsg = "Ignored key without a value." + _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." +) + +// A SugaredLogger wraps the base Logger functionality in a slower, but less +// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar +// method. +type SugaredLogger struct { + base *Logger +} + +// Desugar unwraps a SugaredLogger, exposing the original Logger. +func (s *SugaredLogger) Desugar() *Logger { + base := s.base.clone() + base.callerSkip -= 2 + return base +} + +// Named adds a sub-scope to the logger's name. See Logger.Named for details. +func (s *SugaredLogger) Named(name string) *SugaredLogger { + return &SugaredLogger{base: s.base.Named(name)} +} + +// With adds a variadic number of fields to the logging context. It accepts a +// mix of strongly-typed zapcore.Field objects and loosely-typed key-value +// pairs. When processing pairs, the first element of the pair is used as the +// field key and the second as the field value. +// +// For example, +// sugaredLogger.With( +// "hello", "world", +// "failure", errors.New("oh no"), +// Stack(), +// "count", 42, +// "user", User{Name: "alice"}, +// ) +// is the equivalent of +// baseLogger.With( +// String("hello", "world"), +// String("failure", "oh no"), +// Stack(), +// Int("count", 42), +// Object("user", User{Name: "alice"}), +// ) +// +// Note that the keys in key-value pairs should be strings. In development, +// passing a non-string key panics. In production, the logger is more +// forgiving: a separate error is logged, but the key-value pair is skipped and +// execution continues. Passing an orphaned key triggers similar behavior: +// panics in development and errors in production. +func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} +} + +// Debug uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Debug(args ...interface{}) { + s.log(DebugLevel, "", args, nil) +} + +// Info uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Info(args ...interface{}) { + s.log(InfoLevel, "", args, nil) +} + +// Warn uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Warn(args ...interface{}) { + s.log(WarnLevel, "", args, nil) +} + +// Error uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Error(args ...interface{}) { + s.log(ErrorLevel, "", args, nil) +} + +// DPanic uses fmt.Sprint to construct and log a message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanic(args ...interface{}) { + s.log(DPanicLevel, "", args, nil) +} + +// Panic uses fmt.Sprint to construct and log a message, then panics. +func (s *SugaredLogger) Panic(args ...interface{}) { + s.log(PanicLevel, "", args, nil) +} + +// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. +func (s *SugaredLogger) Fatal(args ...interface{}) { + s.log(FatalLevel, "", args, nil) +} + +// Debugf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Debugf(template string, args ...interface{}) { + s.log(DebugLevel, template, args, nil) +} + +// Infof uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Infof(template string, args ...interface{}) { + s.log(InfoLevel, template, args, nil) +} + +// Warnf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Warnf(template string, args ...interface{}) { + s.log(WarnLevel, template, args, nil) +} + +// Errorf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Errorf(template string, args ...interface{}) { + s.log(ErrorLevel, template, args, nil) +} + +// DPanicf uses fmt.Sprintf to log a templated message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { + s.log(DPanicLevel, template, args, nil) +} + +// Panicf uses fmt.Sprintf to log a templated message, then panics. +func (s *SugaredLogger) Panicf(template string, args ...interface{}) { + s.log(PanicLevel, template, args, nil) +} + +// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. +func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { + s.log(FatalLevel, template, args, nil) +} + +// Debugw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +// +// When debug-level logging is disabled, this is much faster than +// s.With(keysAndValues).Debug(msg) +func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { + s.log(DebugLevel, msg, nil, keysAndValues) +} + +// Infow logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) { + s.log(InfoLevel, msg, nil, keysAndValues) +} + +// Warnw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) { + s.log(WarnLevel, msg, nil, keysAndValues) +} + +// Errorw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) { + s.log(ErrorLevel, msg, nil, keysAndValues) +} + +// DPanicw logs a message with some additional context. In development, the +// logger then panics. (See DPanicLevel for details.) The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) { + s.log(DPanicLevel, msg, nil, keysAndValues) +} + +// Panicw logs a message with some additional context, then panics. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) { + s.log(PanicLevel, msg, nil, keysAndValues) +} + +// Fatalw logs a message with some additional context, then calls os.Exit. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { + s.log(FatalLevel, msg, nil, keysAndValues) +} + +// Sync flushes any buffered log entries. +func (s *SugaredLogger) Sync() error { + return s.base.Sync() +} + +func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { + // If logging at this level is completely disabled, skip the overhead of + // string formatting. + if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + // Format with Sprint, Sprintf, or neither. + msg := template + if msg == "" && len(fmtArgs) > 0 { + msg = fmt.Sprint(fmtArgs...) + } else if msg != "" && len(fmtArgs) > 0 { + msg = fmt.Sprintf(template, fmtArgs...) + } + + if ce := s.base.Check(lvl, msg); ce != nil { + ce.Write(s.sweetenFields(context)...) + } +} + +func (s *SugaredLogger) sweetenFields(args []interface{}) []zapcore.Field { + if len(args) == 0 { + return nil + } + + // Allocate enough space for the worst case; if users pass only structured + // fields, we shouldn't penalize them with extra allocations. + fields := make([]zapcore.Field, 0, len(args)) + var invalid invalidPairs + + for i := 0; i < len(args); { + // This is a strongly-typed field. Consume it and move on. + if f, ok := args[i].(zapcore.Field); ok { + fields = append(fields, f) + i++ + continue + } + + // Make sure this element isn't a dangling key. + if i == len(args)-1 { + s.base.DPanic(_oddNumberErrMsg, Any("ignored", args[i])) + break + } + + // Consume this value and the next, treating them as a key-value pair. If the + // key isn't a string, add this pair to the slice of invalid pairs. + key, val := args[i], args[i+1] + if keyStr, ok := key.(string); !ok { + // Subsequent errors are likely, so allocate once up front. + if cap(invalid) == 0 { + invalid = make(invalidPairs, 0, len(args)/2) + } + invalid = append(invalid, invalidPair{i, key, val}) + } else { + fields = append(fields, Any(keyStr, val)) + } + i += 2 + } + + // If we encountered any invalid key-value pairs, log an error. + if len(invalid) > 0 { + s.base.DPanic(_nonStringKeyErrMsg, Array("invalid", invalid)) + } + return fields +} + +type invalidPair struct { + position int + key, value interface{} +} + +func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error { + enc.AddInt64("position", int64(p.position)) + Any("key", p.key).AddTo(enc) + Any("value", p.value).AddTo(enc) + return nil +} + +type invalidPairs []invalidPair + +func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error { + var errs multierror.Error + for i := range ps { + errs = errs.Append(enc.AppendObject(ps[i])) + } + return errs.AsError() +} diff --git a/vendor/go.uber.org/zap/time.go b/vendor/go.uber.org/zap/time.go new file mode 100644 index 00000000..c5a1f162 --- /dev/null +++ b/vendor/go.uber.org/zap/time.go @@ -0,0 +1,27 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import "time" + +func timeToMillis(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond) +} diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go new file mode 100644 index 00000000..de39e599 --- /dev/null +++ b/vendor/go.uber.org/zap/writer.go @@ -0,0 +1,91 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "io/ioutil" + "os" + + "go.uber.org/zap/internal/multierror" + "go.uber.org/zap/zapcore" +) + +// Open is a high-level wrapper that takes a variadic number of paths, opens or +// creates each of the specified files, and combines them into a locked +// WriteSyncer. It also returns any error encountered and a function to close +// any opened files. +// +// Passing no paths returns a no-op WriteSyncer. The special paths "stdout" and +// "stderr" are interpreted as os.Stdout and os.Stderr, respectively. +func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { + writers, close, err := open(paths) + if err != nil { + return nil, nil, err + } + + writer := CombineWriteSyncers(writers...) + return writer, close, nil +} + +func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { + var errs multierror.Error + writers := make([]zapcore.WriteSyncer, 0, len(paths)) + files := make([]*os.File, 0, len(paths)) + close := func() { + for _, f := range files { + f.Close() + } + } + for _, path := range paths { + switch path { + case "stdout": + writers = append(writers, os.Stdout) + // Don't close standard out. + continue + case "stderr": + writers = append(writers, os.Stderr) + // Don't close standard error. + continue + } + f, err := os.OpenFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) + errs = errs.Append(err) + if err == nil { + writers = append(writers, f) + files = append(files, f) + } + } + + if err := errs.AsError(); err != nil { + close() + return writers, nil, err + } + + return writers, close, nil +} + +// CombineWriteSyncers combines multiple WriteSyncers into a single, locked +// WriteSyncer. +func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer { + if len(writers) == 0 { + return zapcore.AddSync(ioutil.Discard) + } + return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...)) +} diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go new file mode 100644 index 00000000..9fc89e92 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -0,0 +1,140 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "sync" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +var _sliceEncoderPool = sync.Pool{ + New: func() interface{} { + return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)} + }, +} + +func getSliceEncoder() *sliceArrayEncoder { + return _sliceEncoderPool.Get().(*sliceArrayEncoder) +} + +func putSliceEncoder(e *sliceArrayEncoder) { + e.elems = e.elems[:0] + _sliceEncoderPool.Put(e) +} + +type consoleEncoder struct { + *jsonEncoder +} + +// NewConsoleEncoder creates an encoder whose output is designed for human - +// rather than machine - consumption. It serializes the core log entry data +// (message, level, timestamp, etc.) in a plain-text format and leaves the +// structured context as JSON. +// +// Note that although the console encoder doesn't use the keys specified in the +// encoder configuration, it will omit any element whose key is set to the empty +// string. +func NewConsoleEncoder(cfg EncoderConfig) Encoder { + return consoleEncoder{newJSONEncoder(cfg, true)} +} + +func (c consoleEncoder) Clone() Encoder { + return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)} +} + +func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + line := bufferpool.Get() + + // We don't want the entry's metadata to be quoted and escaped (if it's + // encoded as strings), which means that we can't use the JSON encoder. The + // simplest option is to use the memory encoder and fmt.Fprint. + // + // If this ever becomes a performance bottleneck, we can implement + // ArrayEncoder for our plain-text format. + arr := getSliceEncoder() + if c.TimeKey != "" && c.EncodeTime != nil { + c.EncodeTime(ent.Time, arr) + } + if c.LevelKey != "" && c.EncodeLevel != nil { + c.EncodeLevel(ent.Level, arr) + } + if ent.LoggerName != "" && c.NameKey != "" { + arr.AppendString(ent.LoggerName) + } + if ent.Caller.Defined && c.CallerKey != "" && c.EncodeCaller != nil { + c.EncodeCaller(ent.Caller, arr) + } + for i := range arr.elems { + if i > 0 { + line.AppendByte('\t') + } + fmt.Fprint(line, arr.elems[i]) + } + putSliceEncoder(arr) + + // Add the message itself. + if c.MessageKey != "" { + c.addTabIfNecessary(line) + line.AppendString(ent.Message) + } + + // Add any structured context. + c.writeContext(line, fields) + + // If there's no stacktrace key, honor that; this allows users to force + // single-line output. + if ent.Stack != "" && c.StacktraceKey != "" { + line.AppendByte('\n') + line.AppendString(ent.Stack) + } + + if c.LineEnding != "" { + line.AppendString(c.LineEnding) + } else { + line.AppendString(DefaultLineEnding) + } + return line, nil +} + +func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { + context := c.jsonEncoder.Clone().(*jsonEncoder) + defer context.buf.Free() + + addFields(context, extra) + context.closeOpenNamespaces() + if context.buf.Len() == 0 { + return + } + + c.addTabIfNecessary(line) + line.AppendByte('{') + line.Write(context.buf.Bytes()) + line.AppendByte('}') +} + +func (c consoleEncoder) addTabIfNecessary(line *buffer.Buffer) { + if line.Len() > 0 { + line.AppendByte('\t') + } +} diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go new file mode 100644 index 00000000..a1ef8b03 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/core.go @@ -0,0 +1,113 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// Core is a minimal, fast logger interface. It's designed for library authors +// to wrap in a more user-friendly API. +type Core interface { + LevelEnabler + + // With adds structured context to the Core. + With([]Field) Core + // Check determines whether the supplied Entry should be logged (using the + // embedded LevelEnabler and possibly some extra logic). If the entry + // should be logged, the Core adds itself to the CheckedEntry and returns + // the result. + // + // Callers must use Check before calling Write. + Check(Entry, *CheckedEntry) *CheckedEntry + // Write serializes the Entry and any Fields supplied at the log site and + // writes them to their destination. + // + // If called, Write should always log the Entry and Fields; it should not + // replicate the logic of Check. + Write(Entry, []Field) error + // Sync flushes buffered logs (if any). + Sync() error +} + +type nopCore struct{} + +// NewNopCore returns a no-op Core. +func NewNopCore() Core { return nopCore{} } +func (nopCore) Enabled(Level) bool { return false } +func (n nopCore) With([]Field) Core { return n } +func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce } +func (nopCore) Write(Entry, []Field) error { return nil } +func (nopCore) Sync() error { return nil } + +// NewCore creates a Core that writes logs to a WriteSyncer. +func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core { + return &ioCore{ + LevelEnabler: enab, + enc: enc, + out: ws, + } +} + +type ioCore struct { + LevelEnabler + enc Encoder + out WriteSyncer +} + +func (c *ioCore) With(fields []Field) Core { + clone := c.clone() + addFields(clone.enc, fields) + return clone +} + +func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if c.Enabled(ent.Level) { + return ce.AddCore(ent, c) + } + return ce +} + +func (c *ioCore) Write(ent Entry, fields []Field) error { + buf, err := c.enc.EncodeEntry(ent, fields) + if err != nil { + return err + } + _, err = c.out.Write(buf.Bytes()) + buf.Free() + if err != nil { + return err + } + if ent.Level > ErrorLevel { + // Since we may be crashing the program, sync the output. Ignore Sync + // errors, pending a clean solution to issue #370. + c.Sync() + } + return nil +} + +func (c *ioCore) Sync() error { + return c.out.Sync() +} + +func (c *ioCore) clone() *ioCore { + return &ioCore{ + LevelEnabler: c.LevelEnabler, + enc: c.enc.Clone(), + out: c.out, + } +} diff --git a/vendor/go.uber.org/zap/zapcore/doc.go b/vendor/go.uber.org/zap/zapcore/doc.go new file mode 100644 index 00000000..e4af3377 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zapcore defines and implements the low-level interfaces upon which +// zap is built. By providing alternate implementations of these interfaces, +// external packages can extend zap's capabilities. +package zapcore diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go new file mode 100644 index 00000000..a026f467 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -0,0 +1,324 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "time" + + "go.uber.org/zap/buffer" +) + +// DefaultLineEnding defines the default line ending when writing logs. +// Alternate line endings specified in EncoderConfig can override this +// behavior. +const DefaultLineEnding = "\n" + +// A LevelEncoder serializes a Level to a primitive type. +type LevelEncoder func(Level, PrimitiveArrayEncoder) + +// LowercaseLevelEncoder serializes a Level to a lowercase string. For example, +// InfoLevel is serialized to "info". +func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.String()) +} + +// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring. +// For example, InfoLevel is serialized to "info" and colored blue. +func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToLowercaseColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.String()) + } + enc.AppendString(s) +} + +// CapitalLevelEncoder serializes a Level to an all-caps string. For example, +// InfoLevel is serialized to "INFO". +func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.CapitalString()) +} + +// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color. +// For example, InfoLevel is serialized to "INFO" and colored blue. +func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToCapitalColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.CapitalString()) + } + enc.AppendString(s) +} + +// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to +// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder, +// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else +// is unmarshaled to LowercaseLevelEncoder. +func (e *LevelEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "capital": + *e = CapitalLevelEncoder + case "capitalColor": + *e = CapitalColorLevelEncoder + case "color": + *e = LowercaseColorLevelEncoder + default: + *e = LowercaseLevelEncoder + } + return nil +} + +// A TimeEncoder serializes a time.Time to a primitive type. +type TimeEncoder func(time.Time, PrimitiveArrayEncoder) + +// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds +// since the Unix epoch. +func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + sec := float64(nanos) / float64(time.Second) + enc.AppendFloat64(sec) +} + +// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of +// milliseconds since the Unix epoch. +func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + millis := float64(nanos) / float64(time.Millisecond) + enc.AppendFloat64(millis) +} + +// EpochNanosTimeEncoder serializes a time.Time to an integer number of +// nanoseconds since the Unix epoch. +func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + enc.AppendInt64(t.UnixNano()) +} + +// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string +// with millisecond precision. +func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + enc.AppendString(t.Format("2006-01-02T15:04:05.000Z0700")) +} + +// UnmarshalText unmarshals text to a TimeEncoder. "iso8601" and "ISO8601" are +// unmarshaled to ISO8601TimeEncoder, "millis" is unmarshaled to +// EpochMillisTimeEncoder, and anything else is unmarshaled to EpochTimeEncoder. +func (e *TimeEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "iso8601", "ISO8601": + *e = ISO8601TimeEncoder + case "millis": + *e = EpochMillisTimeEncoder + case "nanos": + *e = EpochNanosTimeEncoder + default: + *e = EpochTimeEncoder + } + return nil +} + +// A DurationEncoder serializes a time.Duration to a primitive type. +type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) + +// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. +func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendFloat64(float64(d) / float64(time.Second)) +} + +// NanosDurationEncoder serializes a time.Duration to an integer number of +// nanoseconds elapsed. +func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(int64(d)) +} + +// StringDurationEncoder serializes a time.Duration using its built-in String +// method. +func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendString(d.String()) +} + +// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled +// to StringDurationEncoder, and anything else is unmarshaled to +// NanosDurationEncoder. +func (e *DurationEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "string": + *e = StringDurationEncoder + case "nanos": + *e = NanosDurationEncoder + default: + *e = SecondsDurationEncoder + } + return nil +} + +// A CallerEncoder serializes an EntryCaller to a primitive type. +type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) + +// FullCallerEncoder serializes a caller in /full/path/to/package/file:line +// format. +func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.String()) +} + +// ShortCallerEncoder serializes a caller in package/file:line format, trimming +// all but the final directory from the full path. +func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.TrimmedPath()) +} + +// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to +// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder. +func (e *CallerEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullCallerEncoder + default: + *e = ShortCallerEncoder + } + return nil +} + +// An EncoderConfig allows users to configure the concrete encoders supplied by +// zapcore. +type EncoderConfig struct { + // Set the keys used for each log entry. If any key is empty, that portion + // of the entry is omitted. + MessageKey string `json:"messageKey" yaml:"messageKey"` + LevelKey string `json:"levelKey" yaml:"levelKey"` + TimeKey string `json:"timeKey" yaml:"timeKey"` + NameKey string `json:"nameKey" yaml:"nameKey"` + CallerKey string `json:"callerKey" yaml:"callerKey"` + StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` + LineEnding string `json:"lineEnding" yaml:"lineEnding"` + // Configure the primitive representations of common complex types. For + // example, some users may want all time.Times serialized as floating-point + // seconds since epoch, while others may prefer ISO8601 strings. + EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"` + EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"` + EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"` + EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"` +} + +// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a +// map- or struct-like object to the logging context. Like maps, ObjectEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ObjectEncoder interface { + // Logging-specific marshalers. + AddArray(key string, marshaler ArrayMarshaler) error + AddObject(key string, marshaler ObjectMarshaler) error + + // Built-in types. + AddBinary(key string, value []byte) // for arbitrary bytes + AddByteString(key string, value []byte) // for UTF-8 encoded bytes + AddBool(key string, value bool) + AddComplex128(key string, value complex128) + AddComplex64(key string, value complex64) + AddDuration(key string, value time.Duration) + AddFloat64(key string, value float64) + AddFloat32(key string, value float32) + AddInt(key string, value int) + AddInt64(key string, value int64) + AddInt32(key string, value int32) + AddInt16(key string, value int16) + AddInt8(key string, value int8) + AddString(key, value string) + AddTime(key string, value time.Time) + AddUint(key string, value uint) + AddUint64(key string, value uint64) + AddUint32(key string, value uint32) + AddUint16(key string, value uint16) + AddUint8(key string, value uint8) + AddUintptr(key string, value uintptr) + + // AddReflected uses reflection to serialize arbitrary objects, so it's slow + // and allocation-heavy. + AddReflected(key string, value interface{}) error + // OpenNamespace opens an isolated namespace where all subsequent fields will + // be added. Applications can use namespaces to prevent key collisions when + // injecting loggers into sub-components or third-party libraries. + OpenNamespace(key string) +} + +// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding +// array-like objects to the logging context. Of note, it supports mixed-type +// arrays even though they aren't typical in Go. Like slices, ArrayEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ArrayEncoder interface { + // Built-in types. + PrimitiveArrayEncoder + + // Time-related types. + AppendDuration(time.Duration) + AppendTime(time.Time) + + // Logging-specific marshalers. + AppendArray(ArrayMarshaler) error + AppendObject(ObjectMarshaler) error + + // AppendReflected uses reflection to serialize arbitrary objects, so it's + // slow and allocation-heavy. + AppendReflected(value interface{}) error +} + +// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals +// only in Go's built-in types. It's included only so that Duration- and +// TimeEncoders cannot trigger infinite recursion. +type PrimitiveArrayEncoder interface { + // Built-in types. + AppendBool(bool) + AppendByteString([]byte) // for UTF-8 encoded bytes + AppendComplex128(complex128) + AppendComplex64(complex64) + AppendFloat64(float64) + AppendFloat32(float32) + AppendInt(int) + AppendInt64(int64) + AppendInt32(int32) + AppendInt16(int16) + AppendInt8(int8) + AppendString(string) + AppendUint(uint) + AppendUint64(uint64) + AppendUint32(uint32) + AppendUint16(uint16) + AppendUint8(uint8) + AppendUintptr(uintptr) +} + +// Encoder is a format-agnostic interface for all log entry marshalers. Since +// log encoders don't need to support the same wide range of use cases as +// general-purpose marshalers, it's possible to make them faster and +// lower-allocation. +// +// Implementations of the ObjectEncoder interface's methods can, of course, +// freely modify the receiver. However, the Clone and EncodeEntry methods will +// be called concurrently and shouldn't modify the receiver. +type Encoder interface { + ObjectEncoder + + // Clone copies the encoder, ensuring that adding fields to the copy doesn't + // affect the original. + Clone() Encoder + + // EncodeEntry encodes an entry and fields, along with any accumulated + // context, into a byte buffer and returns it. + EncodeEntry(Entry, []Field) (*buffer.Buffer, error) +} diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go new file mode 100644 index 00000000..a0fe7370 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -0,0 +1,256 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "strings" + "sync" + "time" + + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/exit" + "go.uber.org/zap/internal/multierror" +) + +var ( + _cePool = sync.Pool{New: func() interface{} { + // Pre-allocate some space for cores. + return &CheckedEntry{ + cores: make([]Core, 4), + } + }} +) + +func getCheckedEntry() *CheckedEntry { + ce := _cePool.Get().(*CheckedEntry) + ce.reset() + return ce +} + +func putCheckedEntry(ce *CheckedEntry) { + if ce == nil { + return + } + _cePool.Put(ce) +} + +// NewEntryCaller makes an EntryCaller from the return signature of +// runtime.Caller. +func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller { + if !ok { + return EntryCaller{} + } + return EntryCaller{ + PC: pc, + File: file, + Line: line, + Defined: true, + } +} + +// EntryCaller represents the caller of a logging function. +type EntryCaller struct { + Defined bool + PC uintptr + File string + Line int +} + +// String returns the full path and line number of the caller. +func (ec EntryCaller) String() string { + return ec.FullPath() +} + +// FullPath returns a /full/path/to/package/file:line description of the +// caller. +func (ec EntryCaller) FullPath() string { + if !ec.Defined { + return "undefined" + } + buf := bufferpool.Get() + buf.AppendString(ec.File) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// TrimmedPath returns a package/file:line description of the caller, +// preserving only the leaf directory name and file name. +func (ec EntryCaller) TrimmedPath() string { + if !ec.Defined { + return "undefined" + } + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + // + // Find the last separator. + // + idx := strings.LastIndexByte(ec.File, '/') + if idx == -1 { + return ec.FullPath() + } + // Find the penultimate separator. + idx = strings.LastIndexByte(ec.File[:idx], '/') + if idx == -1 { + return ec.FullPath() + } + buf := bufferpool.Get() + // Keep everything after the penultimate separator. + buf.AppendString(ec.File[idx+1:]) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// An Entry represents a complete log message. The entry's structured context +// is already serialized, but the log level, time, message, and call site +// information are available for inspection and modification. +// +// Entries are pooled, so any functions that accept them MUST be careful not to +// retain references to them. +type Entry struct { + Level Level + Time time.Time + LoggerName string + Message string + Caller EntryCaller + Stack string +} + +// CheckWriteAction indicates what action to take after a log entry is +// processed. Actions are ordered in increasing severity. +type CheckWriteAction uint8 + +const ( + // WriteThenNoop indicates that nothing special needs to be done. It's the + // default behavior. + WriteThenNoop CheckWriteAction = iota + // WriteThenPanic causes a panic after Write. + WriteThenPanic + // WriteThenFatal causes a fatal os.Exit after Write. + WriteThenFatal +) + +// CheckedEntry is an Entry together with a collection of Cores that have +// already agreed to log it. +// +// CheckedEntry references should be created by calling AddCore or Should on a +// nil *CheckedEntry. References are returned to a pool after Write, and MUST +// NOT be retained after calling their Write method. +type CheckedEntry struct { + Entry + ErrorOutput WriteSyncer + dirty bool // best-effort detection of pool misuse + should CheckWriteAction + cores []Core +} + +func (ce *CheckedEntry) reset() { + ce.Entry = Entry{} + ce.ErrorOutput = nil + ce.dirty = false + ce.should = WriteThenNoop + for i := range ce.cores { + // don't keep references to cores + ce.cores[i] = nil + } + ce.cores = ce.cores[:0] +} + +// Write writes the entry to the stored Cores, returns any errors, and returns +// the CheckedEntry reference to a pool for immediate re-use. Finally, it +// executes any required CheckWriteAction. +func (ce *CheckedEntry) Write(fields ...Field) { + if ce == nil { + return + } + + if ce.dirty { + if ce.ErrorOutput != nil { + // Make a best effort to detect unsafe re-use of this CheckedEntry. + // If the entry is dirty, log an internal error; because the + // CheckedEntry is being used after it was returned to the pool, + // the message may be an amalgamation from multiple call sites. + fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", time.Now(), ce.Entry) + ce.ErrorOutput.Sync() + } + return + } + ce.dirty = true + + var errs multierror.Error + for i := range ce.cores { + errs = errs.Append(ce.cores[i].Write(ce.Entry, fields)) + } + if ce.ErrorOutput != nil { + if err := errs.AsError(); err != nil { + fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", time.Now(), err) + ce.ErrorOutput.Sync() + } + } + + should, msg := ce.should, ce.Message + putCheckedEntry(ce) + + switch should { + case WriteThenPanic: + panic(msg) + case WriteThenFatal: + exit.Exit() + } +} + +// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be +// used by Core.Check implementations, and is safe to call on nil CheckedEntry +// references. +func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.cores = append(ce.cores, core) + return ce +} + +// Should sets this CheckedEntry's CheckWriteAction, which controls whether a +// Core will panic or fatal after writing this log entry. Like AddCore, it's +// safe to call on nil CheckedEntry references. +func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.should = should + return ce +} diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go new file mode 100644 index 00000000..4f722008 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -0,0 +1,211 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "fmt" + "math" + "reflect" + "time" +) + +// A FieldType indicates which member of the Field union struct should be used +// and how it should be serialized. +type FieldType uint8 + +const ( + // UnknownType is the default field type. Attempting to add it to an encoder will panic. + UnknownType FieldType = iota + // ArrayMarshalerType indicates that the field carries an ArrayMarshaler. + ArrayMarshalerType + // ObjectMarshalerType indicates that the field carries an ObjectMarshaler. + ObjectMarshalerType + // BinaryType indicates that the field carries an opaque binary blob. + BinaryType + // BoolType indicates that the field carries a bool. + BoolType + // ByteStringType indicates that the field carries UTF-8 encoded bytes. + ByteStringType + // Complex128Type indicates that the field carries a complex128. + Complex128Type + // Complex64Type indicates that the field carries a complex128. + Complex64Type + // DurationType indicates that the field carries a time.Duration. + DurationType + // Float64Type indicates that the field carries a float64. + Float64Type + // Float32Type indicates that the field carries a float32. + Float32Type + // Int64Type indicates that the field carries an int64. + Int64Type + // Int32Type indicates that the field carries an int32. + Int32Type + // Int16Type indicates that the field carries an int16. + Int16Type + // Int8Type indicates that the field carries an int8. + Int8Type + // StringType indicates that the field carries a string. + StringType + // TimeType indicates that the field carries a time.Time. + TimeType + // Uint64Type indicates that the field carries a uint64. + Uint64Type + // Uint32Type indicates that the field carries a uint32. + Uint32Type + // Uint16Type indicates that the field carries a uint16. + Uint16Type + // Uint8Type indicates that the field carries a uint8. + Uint8Type + // UintptrType indicates that the field carries a uintptr. + UintptrType + // ReflectType indicates that the field carries an interface{}, which should + // be serialized using reflection. + ReflectType + // NamespaceType signals the beginning of an isolated namespace. All + // subsequent fields should be added to the new namespace. + NamespaceType + // StringerType indicates that the field carries a fmt.Stringer. + StringerType + // ErrorType indicates that the field carries an error. + ErrorType + // SkipType indicates that the field is a no-op. + SkipType +) + +// A Field is a marshaling operation used to add a key-value pair to a logger's +// context. Most fields are lazily marshaled, so it's inexpensive to add fields +// to disabled debug-level log statements. +type Field struct { + Key string + Type FieldType + Integer int64 + String string + Interface interface{} +} + +// AddTo exports a field through the ObjectEncoder interface. It's primarily +// useful to library authors, and shouldn't be necessary in most applications. +func (f Field) AddTo(enc ObjectEncoder) { + var err error + + switch f.Type { + case ArrayMarshalerType: + err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler)) + case ObjectMarshalerType: + err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler)) + case BinaryType: + enc.AddBinary(f.Key, f.Interface.([]byte)) + case BoolType: + enc.AddBool(f.Key, f.Integer == 1) + case ByteStringType: + enc.AddByteString(f.Key, f.Interface.([]byte)) + case Complex128Type: + enc.AddComplex128(f.Key, f.Interface.(complex128)) + case Complex64Type: + enc.AddComplex64(f.Key, f.Interface.(complex64)) + case DurationType: + enc.AddDuration(f.Key, time.Duration(f.Integer)) + case Float64Type: + enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer))) + case Float32Type: + enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer))) + case Int64Type: + enc.AddInt64(f.Key, f.Integer) + case Int32Type: + enc.AddInt32(f.Key, int32(f.Integer)) + case Int16Type: + enc.AddInt16(f.Key, int16(f.Integer)) + case Int8Type: + enc.AddInt8(f.Key, int8(f.Integer)) + case StringType: + enc.AddString(f.Key, f.String) + case TimeType: + if f.Interface != nil { + enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location))) + } else { + // Fall back to UTC if location is nil. + enc.AddTime(f.Key, time.Unix(0, f.Integer)) + } + case Uint64Type: + enc.AddUint64(f.Key, uint64(f.Integer)) + case Uint32Type: + enc.AddUint32(f.Key, uint32(f.Integer)) + case Uint16Type: + enc.AddUint16(f.Key, uint16(f.Integer)) + case Uint8Type: + enc.AddUint8(f.Key, uint8(f.Integer)) + case UintptrType: + enc.AddUintptr(f.Key, uintptr(f.Integer)) + case ReflectType: + err = enc.AddReflected(f.Key, f.Interface) + case NamespaceType: + enc.OpenNamespace(f.Key) + case StringerType: + enc.AddString(f.Key, f.Interface.(fmt.Stringer).String()) + case ErrorType: + val := f.Interface.(error) + basic := val.Error() + enc.AddString(f.Key, basic) + if fancy, ok := val.(fmt.Formatter); ok { + verbose := fmt.Sprintf("%+v", fancy) + if verbose != basic { + // This is a rich error type, like those produced by + // github.com/pkg/errors. + enc.AddString(f.Key+"Verbose", verbose) + } + } + case SkipType: + break + default: + panic(fmt.Sprintf("unknown field type: %v", f)) + } + + if err != nil { + enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error()) + } +} + +// Equals returns whether two fields are equal. For non-primitive types such as +// errors, marshalers, or reflect types, it uses reflect.DeepEqual. +func (f Field) Equals(other Field) bool { + if f.Type != other.Type { + return false + } + if f.Key != other.Key { + return false + } + + switch f.Type { + case BinaryType, ByteStringType: + return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte)) + case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType: + return reflect.DeepEqual(f.Interface, other.Interface) + default: + return f == other + } +} + +func addFields(enc ObjectEncoder, fields []Field) { + for i := range fields { + fields[i].AddTo(enc) + } +} diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go new file mode 100644 index 00000000..dc096a29 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/hook.go @@ -0,0 +1,68 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/zap/internal/multierror" + +type hooked struct { + Core + funcs []func(Entry) error +} + +// RegisterHooks wraps a Core and runs a collection of user-defined callback +// hooks each time a message is logged. Execution of the callbacks is blocking. +// +// This offers users an easy way to register simple callbacks (e.g., metrics +// collection) without implementing the full Core interface. +func RegisterHooks(core Core, hooks ...func(Entry) error) Core { + funcs := append([]func(Entry) error{}, hooks...) + return &hooked{ + Core: core, + funcs: funcs, + } +} + +func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + // Let the wrapped Core decide whether to log this message or not. This + // also gives the downstream a chance to register itself directly with the + // CheckedEntry. + if downstream := h.Core.Check(ent, ce); downstream != nil { + return downstream.AddCore(ent, h) + } + return ce +} + +func (h *hooked) With(fields []Field) Core { + return &hooked{ + Core: h.Core.With(fields), + funcs: h.funcs, + } +} + +func (h *hooked) Write(ent Entry, _ []Field) error { + // Since our downstream had a chance to register itself directly with the + // CheckedMessage, we don't need to call it here. + var errs multierror.Error + for i := range h.funcs { + errs = errs.Append(h.funcs[i](ent)) + } + return errs.AsError() +} diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go new file mode 100644 index 00000000..7d4a22c8 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -0,0 +1,466 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/base64" + "encoding/json" + "math" + "sync" + "time" + "unicode/utf8" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +// For JSON-escaping; see jsonEncoder.safeAddString below. +const _hex = "0123456789abcdef" + +var _jsonPool = sync.Pool{New: func() interface{} { + return &jsonEncoder{} +}} + +func getJSONEncoder() *jsonEncoder { + return _jsonPool.Get().(*jsonEncoder) +} + +func putJSONEncoder(enc *jsonEncoder) { + enc.EncoderConfig = nil + enc.buf = nil + enc.spaced = false + enc.openNamespaces = 0 + _jsonPool.Put(enc) +} + +type jsonEncoder struct { + *EncoderConfig + buf *buffer.Buffer + spaced bool // include spaces after colons and commas + openNamespaces int +} + +// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder +// appropriately escapes all field keys and values. +// +// Note that the encoder doesn't deduplicate keys, so it's possible to produce +// a message like +// {"foo":"bar","foo":"baz"} +// This is permitted by the JSON specification, but not encouraged. Many +// libraries will ignore duplicate key-value pairs (typically keeping the last +// pair) when unmarshaling, but users should attempt to avoid adding duplicate +// keys. +func NewJSONEncoder(cfg EncoderConfig) Encoder { + return newJSONEncoder(cfg, false) +} + +func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder { + return &jsonEncoder{ + EncoderConfig: &cfg, + buf: bufferpool.Get(), + spaced: spaced, + } +} + +func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error { + enc.addKey(key) + return enc.AppendArray(arr) +} + +func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error { + enc.addKey(key) + return enc.AppendObject(obj) +} + +func (enc *jsonEncoder) AddBinary(key string, val []byte) { + enc.AddString(key, base64.StdEncoding.EncodeToString(val)) +} + +func (enc *jsonEncoder) AddByteString(key string, val []byte) { + enc.addKey(key) + enc.AppendByteString(val) +} + +func (enc *jsonEncoder) AddBool(key string, val bool) { + enc.addKey(key) + enc.AppendBool(val) +} + +func (enc *jsonEncoder) AddComplex128(key string, val complex128) { + enc.addKey(key) + enc.AppendComplex128(val) +} + +func (enc *jsonEncoder) AddDuration(key string, val time.Duration) { + enc.addKey(key) + enc.AppendDuration(val) +} + +func (enc *jsonEncoder) AddFloat64(key string, val float64) { + enc.addKey(key) + enc.AppendFloat64(val) +} + +func (enc *jsonEncoder) AddInt64(key string, val int64) { + enc.addKey(key) + enc.AppendInt64(val) +} + +func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { + marshaled, err := json.Marshal(obj) + if err != nil { + return err + } + enc.addKey(key) + _, err = enc.buf.Write(marshaled) + return err +} + +func (enc *jsonEncoder) OpenNamespace(key string) { + enc.addKey(key) + enc.buf.AppendByte('{') + enc.openNamespaces++ +} + +func (enc *jsonEncoder) AddString(key, val string) { + enc.addKey(key) + enc.AppendString(val) +} + +func (enc *jsonEncoder) AddTime(key string, val time.Time) { + enc.addKey(key) + enc.AppendTime(val) +} + +func (enc *jsonEncoder) AddUint64(key string, val uint64) { + enc.addKey(key) + enc.AppendUint64(val) +} + +func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error { + enc.addElementSeparator() + enc.buf.AppendByte('[') + err := arr.MarshalLogArray(enc) + enc.buf.AppendByte(']') + return err +} + +func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error { + enc.addElementSeparator() + enc.buf.AppendByte('{') + err := obj.MarshalLogObject(enc) + enc.buf.AppendByte('}') + return err +} + +func (enc *jsonEncoder) AppendBool(val bool) { + enc.addElementSeparator() + enc.buf.AppendBool(val) +} + +func (enc *jsonEncoder) AppendByteString(val []byte) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddByteString(val) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendComplex128(val complex128) { + enc.addElementSeparator() + // Cast to a platform-independent, fixed-size type. + r, i := float64(real(val)), float64(imag(val)) + enc.buf.AppendByte('"') + // Because we're always in a quoted string, we can use strconv without + // special-casing NaN and +/-Inf. + enc.buf.AppendFloat(r, 64) + enc.buf.AppendByte('+') + enc.buf.AppendFloat(i, 64) + enc.buf.AppendByte('i') + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendDuration(val time.Duration) { + cur := enc.buf.Len() + enc.EncodeDuration(val, enc) + if cur == enc.buf.Len() { + // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep + // JSON valid. + enc.AppendInt64(int64(val)) + } +} + +func (enc *jsonEncoder) AppendInt64(val int64) { + enc.addElementSeparator() + enc.buf.AppendInt(val) +} + +func (enc *jsonEncoder) AppendReflected(val interface{}) error { + marshaled, err := json.Marshal(val) + if err != nil { + return err + } + enc.addElementSeparator() + _, err = enc.buf.Write(marshaled) + return err +} + +func (enc *jsonEncoder) AppendString(val string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(val) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendTime(val time.Time) { + cur := enc.buf.Len() + enc.EncodeTime(val, enc) + if cur == enc.buf.Len() { + // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep + // output JSON valid. + enc.AppendInt64(val.UnixNano()) + } +} + +func (enc *jsonEncoder) AppendUint64(val uint64) { + enc.addElementSeparator() + enc.buf.AppendUint(val) +} + +func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) } +func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) } +func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) } +func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } +func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } +func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } + +func (enc *jsonEncoder) Clone() Encoder { + clone := enc.clone() + clone.buf.Write(enc.buf.Bytes()) + return clone +} + +func (enc *jsonEncoder) clone() *jsonEncoder { + clone := getJSONEncoder() + clone.EncoderConfig = enc.EncoderConfig + clone.spaced = enc.spaced + clone.openNamespaces = enc.openNamespaces + clone.buf = bufferpool.Get() + return clone +} + +func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + final := enc.clone() + final.buf.AppendByte('{') + + if final.LevelKey != "" { + final.addKey(final.LevelKey) + cur := final.buf.Len() + final.EncodeLevel(ent.Level, final) + if cur == final.buf.Len() { + // User-supplied EncodeLevel was a no-op. Fall back to strings to keep + // output JSON valid. + final.AppendString(ent.Level.String()) + } + } + if final.TimeKey != "" { + final.AddTime(final.TimeKey, ent.Time) + } + if ent.LoggerName != "" && final.NameKey != "" { + final.addKey(final.NameKey) + final.AppendString(ent.LoggerName) + } + if ent.Caller.Defined && final.CallerKey != "" { + final.addKey(final.CallerKey) + cur := final.buf.Len() + final.EncodeCaller(ent.Caller, final) + if cur == final.buf.Len() { + // User-supplied EncodeCaller was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.Caller.String()) + } + } + if final.MessageKey != "" { + final.addKey(enc.MessageKey) + final.AppendString(ent.Message) + } + if enc.buf.Len() > 0 { + final.addElementSeparator() + final.buf.Write(enc.buf.Bytes()) + } + addFields(final, fields) + final.closeOpenNamespaces() + if ent.Stack != "" && final.StacktraceKey != "" { + final.AddString(final.StacktraceKey, ent.Stack) + } + final.buf.AppendByte('}') + if final.LineEnding != "" { + final.buf.AppendString(final.LineEnding) + } else { + final.buf.AppendString(DefaultLineEnding) + } + + ret := final.buf + putJSONEncoder(final) + return ret, nil +} + +func (enc *jsonEncoder) truncate() { + enc.buf.Reset() +} + +func (enc *jsonEncoder) closeOpenNamespaces() { + for i := 0; i < enc.openNamespaces; i++ { + enc.buf.AppendByte('}') + } +} + +func (enc *jsonEncoder) addKey(key string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(key) + enc.buf.AppendByte('"') + enc.buf.AppendByte(':') + if enc.spaced { + enc.buf.AppendByte(' ') + } +} + +func (enc *jsonEncoder) addElementSeparator() { + last := enc.buf.Len() - 1 + if last < 0 { + return + } + switch enc.buf.Bytes()[last] { + case '{', '[', ':', ',', ' ': + return + default: + enc.buf.AppendByte(',') + if enc.spaced { + enc.buf.AppendByte(' ') + } + } +} + +func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { + enc.addElementSeparator() + switch { + case math.IsNaN(val): + enc.buf.AppendString(`"NaN"`) + case math.IsInf(val, 1): + enc.buf.AppendString(`"+Inf"`) + case math.IsInf(val, -1): + enc.buf.AppendString(`"-Inf"`) + default: + enc.buf.AppendFloat(val, bitSize) + } +} + +// safeAddString JSON-escapes a string and appends it to the internal buffer. +// Unlike the standard library's encoder, it doesn't attempt to protect the +// user from browser vulnerabilities or JSONP-related problems. +func (enc *jsonEncoder) safeAddString(s string) { + for i := 0; i < len(s); { + if enc.tryAddRuneSelf(s[i]) { + i++ + continue + } + r, size := utf8.DecodeRuneInString(s[i:]) + if enc.tryAddRuneError(r, size) { + i++ + continue + } + enc.buf.AppendString(s[i : i+size]) + i += size + } +} + +// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. +func (enc *jsonEncoder) safeAddByteString(s []byte) { + for i := 0; i < len(s); { + if enc.tryAddRuneSelf(s[i]) { + i++ + continue + } + r, size := utf8.DecodeRune(s[i:]) + if enc.tryAddRuneError(r, size) { + i++ + continue + } + enc.buf.Write(s[i : i+size]) + i += size + } +} + +// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. +func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { + if b >= utf8.RuneSelf { + return false + } + if 0x20 <= b && b != '\\' && b != '"' { + enc.buf.AppendByte(b) + return true + } + switch b { + case '\\', '"': + enc.buf.AppendByte('\\') + enc.buf.AppendByte(b) + case '\n': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('n') + case '\r': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('r') + case '\t': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('t') + default: + // Encode bytes < 0x20, except for the escape sequences above. + enc.buf.AppendString(`\u00`) + enc.buf.AppendByte(_hex[b>>4]) + enc.buf.AppendByte(_hex[b&0xF]) + } + return true +} + +func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { + if r == utf8.RuneError && size == 1 { + enc.buf.AppendString(`\ufffd`) + return true + } + return false +} diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go new file mode 100644 index 00000000..e575c9f4 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level.go @@ -0,0 +1,175 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "errors" + "fmt" +) + +var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level") + +// A Level is a logging priority. Higher levels are more important. +type Level int8 + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel Level = iota - 1 + // InfoLevel is the default logging priority. + InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel + + _minLevel = DebugLevel + _maxLevel = FatalLevel +) + +// String returns a lower-case ASCII representation of the log level. +func (l Level) String() string { + switch l { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warn" + case ErrorLevel: + return "error" + case DPanicLevel: + return "dpanic" + case PanicLevel: + return "panic" + case FatalLevel: + return "fatal" + default: + return fmt.Sprintf("Level(%d)", l) + } +} + +// CapitalString returns an all-caps ASCII representation of the log level. +func (l Level) CapitalString() string { + // Printing levels in all-caps is common enough that we should export this + // functionality. + switch l { + case DebugLevel: + return "DEBUG" + case InfoLevel: + return "INFO" + case WarnLevel: + return "WARN" + case ErrorLevel: + return "ERROR" + case DPanicLevel: + return "DPANIC" + case PanicLevel: + return "PANIC" + case FatalLevel: + return "FATAL" + default: + return fmt.Sprintf("LEVEL(%d)", l) + } +} + +// MarshalText marshals the Level to text. Note that the text representation +// drops the -Level suffix (see example). +func (l Level) MarshalText() ([]byte, error) { + return []byte(l.String()), nil +} + +// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText +// expects the text representation of a Level to drop the -Level suffix (see +// example). +// +// In particular, this makes it easy to configure logging levels using YAML, +// TOML, or JSON files. +func (l *Level) UnmarshalText(text []byte) error { + if l == nil { + return errUnmarshalNilLevel + } + if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) { + return fmt.Errorf("unrecognized level: %q", text) + } + return nil +} + +func (l *Level) unmarshalText(text []byte) bool { + switch string(text) { + case "debug", "DEBUG": + *l = DebugLevel + case "info", "INFO", "": // make the zero value useful + *l = InfoLevel + case "warn", "WARN": + *l = WarnLevel + case "error", "ERROR": + *l = ErrorLevel + case "dpanic", "DPANIC": + *l = DPanicLevel + case "panic", "PANIC": + *l = PanicLevel + case "fatal", "FATAL": + *l = FatalLevel + default: + return false + } + return true +} + +// Set sets the level for the flag.Value interface. +func (l *Level) Set(s string) error { + return l.UnmarshalText([]byte(s)) +} + +// Get gets the level for the flag.Getter interface. +func (l *Level) Get() interface{} { + return *l +} + +// Enabled returns true if the given level is at or above this level. +func (l Level) Enabled(lvl Level) bool { + return lvl >= l +} + +// LevelEnabler decides whether a given logging level is enabled when logging a +// message. +// +// Enablers are intended to be used to implement deterministic filters; +// concerns like sampling are better implemented as a Core. +// +// Each concrete Level value implements a static LevelEnabler which returns +// true for itself and all higher logging levels. For example WarnLevel.Enabled() +// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and +// FatalLevel, but return false for InfoLevel and DebugLevel. +type LevelEnabler interface { + Enabled(Level) bool +} diff --git a/vendor/go.uber.org/zap/zapcore/level_strings.go b/vendor/go.uber.org/zap/zapcore/level_strings.go new file mode 100644 index 00000000..7af8dadc --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level_strings.go @@ -0,0 +1,46 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/zap/internal/color" + +var ( + _levelToColor = map[Level]color.Color{ + DebugLevel: color.Magenta, + InfoLevel: color.Blue, + WarnLevel: color.Yellow, + ErrorLevel: color.Red, + DPanicLevel: color.Red, + PanicLevel: color.Red, + FatalLevel: color.Red, + } + _unknownLevelColor = color.Red + + _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor)) + _levelToCapitalColorString = make(map[Level]string, len(_levelToColor)) +) + +func init() { + for level, color := range _levelToColor { + _levelToLowercaseColorString[level] = color.Add(level.String()) + _levelToCapitalColorString[level] = color.Add(level.CapitalString()) + } +} diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go new file mode 100644 index 00000000..2627a653 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/marshaler.go @@ -0,0 +1,53 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// ObjectMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +type ObjectMarshaler interface { + MarshalLogObject(ObjectEncoder) error +} + +// ObjectMarshalerFunc is a type adapter that turns a function into an +// ObjectMarshaler. +type ObjectMarshalerFunc func(ObjectEncoder) error + +// MarshalLogObject calls the underlying function. +func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error { + return f(enc) +} + +// ArrayMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +type ArrayMarshaler interface { + MarshalLogArray(ArrayEncoder) error +} + +// ArrayMarshalerFunc is a type adapter that turns a function into an +// ArrayMarshaler. +type ArrayMarshalerFunc func(ArrayEncoder) error + +// MarshalLogArray calls the underlying function. +func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error { + return f(enc) +} diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go new file mode 100644 index 00000000..4805c8a9 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/memory_encoder.go @@ -0,0 +1,179 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "time" + +// MapObjectEncoder is an ObjectEncoder backed by a simple +// map[string]interface{}. It's not fast enough for production use, but it's +// helpful in tests. +type MapObjectEncoder struct { + // Fields contains the entire encoded log context. + Fields map[string]interface{} + // cur is a pointer to the namespace we're currently writing to. + cur map[string]interface{} +} + +// NewMapObjectEncoder creates a new map-backed ObjectEncoder. +func NewMapObjectEncoder() *MapObjectEncoder { + m := make(map[string]interface{}) + return &MapObjectEncoder{ + Fields: m, + cur: m, + } +} + +// AddArray implements ObjectEncoder. +func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error { + arr := &sliceArrayEncoder{} + err := v.MarshalLogArray(arr) + m.cur[key] = arr.elems + return err +} + +// AddObject implements ObjectEncoder. +func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error { + newMap := NewMapObjectEncoder() + m.cur[k] = newMap.Fields + return v.MarshalLogObject(newMap) +} + +// AddBinary implements ObjectEncoder. +func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v } + +// AddByteString implements ObjectEncoder. +func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = v } + +// AddBool implements ObjectEncoder. +func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v } + +// AddDuration implements ObjectEncoder. +func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v } + +// AddComplex128 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v } + +// AddComplex64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v } + +// AddFloat64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v } + +// AddFloat32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v } + +// AddInt implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v } + +// AddInt64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v } + +// AddInt32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v } + +// AddInt16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v } + +// AddInt8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v } + +// AddString implements ObjectEncoder. +func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v } + +// AddTime implements ObjectEncoder. +func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v } + +// AddUint implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v } + +// AddUint64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v } + +// AddUint32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v } + +// AddUint16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v } + +// AddUint8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v } + +// AddUintptr implements ObjectEncoder. +func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v } + +// AddReflected implements ObjectEncoder. +func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error { + m.cur[k] = v + return nil +} + +// OpenNamespace implements ObjectEncoder. +func (m *MapObjectEncoder) OpenNamespace(k string) { + ns := make(map[string]interface{}) + m.cur[k] = ns + m.cur = ns +} + +// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like +// the MapObjectEncoder, it's not designed for production use. +type sliceArrayEncoder struct { + elems []interface{} +} + +func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error { + enc := &sliceArrayEncoder{} + err := v.MarshalLogArray(enc) + s.elems = append(s.elems, enc.elems) + return err +} + +func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error { + m := NewMapObjectEncoder() + err := v.MarshalLogObject(m) + s.elems = append(s.elems, m.Fields) + return err +} + +func (s *sliceArrayEncoder) AppendReflected(v interface{}) error { + s.elems = append(s.elems, v) + return nil +} + +func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) } diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go new file mode 100644 index 00000000..e3164186 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -0,0 +1,134 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "time" + + "go.uber.org/atomic" +) + +const ( + _numLevels = _maxLevel - _minLevel + 1 + _countersPerLevel = 4096 +) + +type counter struct { + resetAt atomic.Int64 + counter atomic.Uint64 +} + +type counters [_numLevels][_countersPerLevel]counter + +func newCounters() *counters { + return &counters{} +} + +func (cs *counters) get(lvl Level, key string) *counter { + i := lvl - _minLevel + j := fnv32a(key) % _countersPerLevel + return &cs[i][j] +} + +// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc +func fnv32a(s string) uint32 { + const ( + offset32 = 2166136261 + prime32 = 16777619 + ) + hash := uint32(offset32) + for i := 0; i < len(s); i++ { + hash ^= uint32(s[i]) + hash *= prime32 + } + return hash +} + +func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { + tn := t.UnixNano() + resetAfter := c.resetAt.Load() + if resetAfter > tn { + return c.counter.Inc() + } + + c.counter.Store(1) + + newResetAfter := tn + tick.Nanoseconds() + if !c.resetAt.CAS(resetAfter, newResetAfter) { + // We raced with another goroutine trying to reset, and it also reset + // the counter to 1, so we need to reincrement the counter. + return c.counter.Inc() + } + + return 1 +} + +type sampler struct { + Core + + counts *counters + tick time.Duration + first, thereafter uint64 +} + +// NewSampler creates a Core that samples incoming entries, which caps the CPU +// and I/O load of logging while attempting to preserve a representative subset +// of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// Keep in mind that zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { + return &sampler{ + Core: core, + tick: tick, + counts: newCounters(), + first: uint64(first), + thereafter: uint64(thereafter), + } +} + +func (s *sampler) With(fields []Field) Core { + return &sampler{ + Core: s.Core.With(fields), + tick: s.tick, + counts: s.counts, + first: s.first, + thereafter: s.thereafter, + } +} + +func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !s.Enabled(ent.Level) { + return ce + } + + counter := s.counts.get(ent.Level, ent.Message) + n := counter.IncCheckReset(ent.Time, s.tick) + if n > s.first && (n-s.first)%s.thereafter != 0 { + return ce + } + return s.Core.Check(ent, ce) +} diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go new file mode 100644 index 00000000..3a86533a --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/tee.go @@ -0,0 +1,81 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/zap/internal/multierror" + +type multiCore []Core + +// NewTee creates a Core that duplicates log entries into two or more +// underlying Cores. +// +// Calling it with a single Core returns the input unchanged, and calling +// it with no input returns a no-op Core. +func NewTee(cores ...Core) Core { + switch len(cores) { + case 0: + return NewNopCore() + case 1: + return cores[0] + default: + return multiCore(cores) + } +} + +func (mc multiCore) With(fields []Field) Core { + clone := make(multiCore, len(mc)) + for i := range mc { + clone[i] = mc[i].With(fields) + } + return clone +} + +func (mc multiCore) Enabled(lvl Level) bool { + for i := range mc { + if mc[i].Enabled(lvl) { + return true + } + } + return false +} + +func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + for i := range mc { + ce = mc[i].Check(ent, ce) + } + return ce +} + +func (mc multiCore) Write(ent Entry, fields []Field) error { + var errs multierror.Error + for i := range mc { + errs = errs.Append(mc[i].Write(ent, fields)) + } + return errs.AsError() +} + +func (mc multiCore) Sync() error { + var errs multierror.Error + for i := range mc { + errs = errs.Append(mc[i].Sync()) + } + return errs.AsError() +} diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go new file mode 100644 index 00000000..2a923764 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/write_syncer.go @@ -0,0 +1,123 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "io" + "sync" + + "go.uber.org/zap/internal/multierror" +) + +// A WriteSyncer is an io.Writer that can also flush any buffered data. Note +// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer. +type WriteSyncer interface { + io.Writer + Sync() error +} + +// AddSync converts an io.Writer to a WriteSyncer. It attempts to be +// intelligent: if the concrete type of the io.Writer implements WriteSyncer, +// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync. +func AddSync(w io.Writer) WriteSyncer { + switch w := w.(type) { + case WriteSyncer: + return w + default: + return writerWrapper{w} + } +} + +type lockedWriteSyncer struct { + sync.Mutex + ws WriteSyncer +} + +// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In +// particular, *os.Files must be locked before use. +func Lock(ws WriteSyncer) WriteSyncer { + if _, ok := ws.(*lockedWriteSyncer); ok { + // no need to layer on another lock + return ws + } + return &lockedWriteSyncer{ws: ws} +} + +func (s *lockedWriteSyncer) Write(bs []byte) (int, error) { + s.Lock() + n, err := s.ws.Write(bs) + s.Unlock() + return n, err +} + +func (s *lockedWriteSyncer) Sync() error { + s.Lock() + err := s.ws.Sync() + s.Unlock() + return err +} + +type writerWrapper struct { + io.Writer +} + +func (w writerWrapper) Sync() error { + return nil +} + +type multiWriteSyncer []WriteSyncer + +// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes +// and sync calls, much like io.MultiWriter. +func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer { + if len(ws) == 1 { + return ws[0] + } + // Copy to protect against https://github.com/golang/go/issues/7809 + return multiWriteSyncer(append([]WriteSyncer(nil), ws...)) +} + +// See https://golang.org/src/io/multi.go +// When not all underlying syncers write the same number of bytes, +// the smallest number is returned even though Write() is called on +// all of them. +func (ws multiWriteSyncer) Write(p []byte) (int, error) { + var errs multierror.Error + nWritten := 0 + for _, w := range ws { + n, err := w.Write(p) + errs = errs.Append(err) + if nWritten == 0 && n != 0 { + nWritten = n + } else if n < nWritten { + nWritten = n + } + } + return nWritten, errs.AsError() +} + +func (ws multiWriteSyncer) Sync() error { + var errs multierror.Error + for _, w := range ws { + errs = errs.Append(w.Sync()) + } + return errs.AsError() +} From 8778916f0a9421fe6d09f7218501e014100332a8 Mon Sep 17 00:00:00 2001 From: Rohith Date: Mon, 12 Jun 2017 14:01:55 +0100 Subject: [PATCH 3/3] - updated the changelog to reflect the changes --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1c620dee..8f3683e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ FEATURES * added the --enable-encrypted-token option to enable encrypting the access token:wq * added the --skip-client-id option to permit skipping the verification of the auduence against client in token [#PR236](https://github.com/gambol99/keycloak-proxy/pull/236) * updated the base image to apline 3.6 in commit [0fdebaf821](https://github.com/gambol99/keycloak-proxy/pull/236/commits/0fdebaf8215e9480896f01ec7ab2ef7caa242da1) +* moved to use zap for the logging [#PR237](https://github.com/gambol99/keycloak-proxy/pull/237) BREAKING CHANGES: * the proxy no longer uses prefixes for resources, if you wish to use wildcard urls you need