diff --git a/Gopkg.lock b/Gopkg.lock index a95fe4c194..5faa689b17 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -44,14 +44,6 @@ revision = "f7be6aa2bc7b2e38edf816b08b582782194a1c02" version = "v1.16.0" -[[projects]] - digest = "1:e38e4b563c03b533cc4f41bafbc97de9dd5aeb81263ef84ceab9b9589a0b7390" - name = "github.com/Sirupsen/logrus" - packages = ["."] - pruneopts = "NUT" - revision = "55eb11d21d2a31a3cc93838241d04800f52e823d" - version = "v0.7.3" - [[projects]] digest = "1:a1b2cc5ad53b08fbfb4dfe962a2e145325acb4b874ea44beb02d4b5b158c2079" name = "github.com/Unknwon/com" @@ -750,6 +742,14 @@ revision = "1744e2970ca51c86172c8190fadad617561ed6e7" version = "v1.0.0" +[[projects]] + digest = "1:b2339e83ce9b5c4f79405f949429a7f68a9a904fed903c672aac1e7ceb7f5f02" + name = "github.com/sirupsen/logrus" + packages = ["."] + pruneopts = "NUT" + revision = "3e01752db0189b9157070a0e1668a620f9a85da2" + version = "v1.0.6" + [[projects]] digest = "1:1f0b284a6858827de4c27c66b49b2b25df3e16b031c2b57b7892273131e7dd2b" name = "github.com/smartystreets/assertions" @@ -835,6 +835,14 @@ source = "https://github.com/jaegertracing/jaeger-lib.git" version = "v1.2.1" +[[projects]] + branch = "master" + digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8" + name = "golang.org/x/crypto" + packages = ["ssh/terminal"] + pruneopts = "NUT" + revision = "0e37d006457bf46f9e6692014ba72ef82c33022c" + [[projects]] digest = "1:e182379b28b5ab095534a23a80a2020d082da6d4421102ea5ed950499790baf2" name = "golang.org/x/net" @@ -977,7 +985,6 @@ "github.com/Dieterbe/artisanalhistogram/hist15s", "github.com/Dieterbe/profiletrigger/heap", "github.com/Shopify/sarama", - "github.com/Sirupsen/logrus", "github.com/alyu/configparser", "github.com/bitly/go-hostpool", "github.com/davecgh/go-spew/spew", @@ -1020,6 +1027,7 @@ "github.com/rakyll/globalconf", "github.com/rs/cors", "github.com/sergi/go-diff/diffmatchpatch", + "github.com/sirupsen/logrus", "github.com/smartystreets/goconvey/convey", "github.com/syndtr/goleveldb/leveldb", "github.com/syndtr/goleveldb/leveldb/opt", diff --git a/Gopkg.toml b/Gopkg.toml index 017e9e866f..9d16c7b88d 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -36,10 +36,6 @@ unused-packages = true name = "github.com/Shopify/sarama" version = "1.10.1" -[[constraint]] - name = "github.com/Sirupsen/logrus" - version = "0.7.3" - [[constraint]] name = "github.com/alyu/configparser" branch = "master" @@ -144,6 +140,10 @@ unused-packages = true name = "github.com/sergi/go-diff" version = "v1.0.0" +[[constraint]] + name = "github.com/sirupsen/logrus" + version = "1.0.6" + [[constraint]] name = "github.com/smartystreets/goconvey" branch = "master" diff --git a/cluster/config.go b/cluster/config.go index 3f46808bf5..006aa542b6 100644 --- a/cluster/config.go +++ b/cluster/config.go @@ -7,8 +7,8 @@ import ( "net/http" "time" - "github.com/raintank/worldping-api/pkg/log" "github.com/rakyll/globalconf" + log "github.com/sirupsen/logrus" ) var ( @@ -79,13 +79,13 @@ func ConfigProcess() { // check settings in cluster section if !validMode(mode) { - log.Fatal(4, "CLU Config: invalid cluster operating mode") + log.Fatal("CLU Config: invalid cluster operating mode") } Mode = ModeType(mode) if httpTimeout == 0 { - log.Fatal(4, "CLU Config: http-timeout must be a non-zero duration string like 60s") + log.Fatal("CLU Config: http-timeout must be a non-zero duration string, i.e. 60s") } transport = &http.Transport{ @@ -109,14 +109,16 @@ func ConfigProcess() { // check settings in swim section if swimUseConfig != "manual" && swimUseConfig != "default-lan" && swimUseConfig != "default-local" && swimUseConfig != "default-wan" { - log.Fatal(4, "CLU Config: invalid swim-use-config setting") + log.Fatal("CLU Config: invalid swim-use-config setting") } if swimUseConfig == "manual" { var err error swimBindAddr, err = net.ResolveTCPAddr("tcp", swimBindAddrStr) if err != nil { - log.Fatal(4, "CLU Config: swim-bind-addr is not a valid TCP address: %s", err.Error()) + log.WithFields(log.Fields{ + "swim.bind.addr": err.Error(), + }).Fatal("CLU Config: swim-bind-addr is not a valid TCP address") } } } diff --git a/cluster/manager.go b/cluster/manager.go index 7b6e4895b0..244f5d977f 100644 --- a/cluster/manager.go +++ b/cluster/manager.go @@ -10,7 +10,7 @@ import ( "github.com/grafana/metrictank/stats" "github.com/hashicorp/memberlist" - "github.com/raintank/worldping-api/pkg/log" + log "github.com/sirupsen/logrus" ) var ( @@ -106,7 +106,7 @@ func NewMemberlistManager(thisNode HTTPNode) *MemberlistManager { case "default-wan": mgr.cfg = memberlist.DefaultWANConfig() default: - panic("invalid swimUseConfig. should already have been validated") + log.Panic("invalid swimUseConfig. should already have been validated") } mgr.cfg.Events = mgr mgr.cfg.Delegate = mgr @@ -118,10 +118,15 @@ func NewMemberlistManager(thisNode HTTPNode) *MemberlistManager { } func (c *MemberlistManager) Start() { - log.Info("CLU Start: Starting cluster on %s:%d", c.cfg.BindAddr, c.cfg.BindPort) + log.WithFields(log.Fields{ + "bind.addr": c.cfg.BindAddr, + "bind.port": c.cfg.BindPort, + }).Info("CLU Start: Starting cluster") list, err := memberlist.Create(c.cfg) if err != nil { - log.Fatal(4, "CLU Start: Failed to create memberlist: %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Info("CLU Start: Failed to create memberlist") } c.setList(list) @@ -130,9 +135,13 @@ func (c *MemberlistManager) Start() { } n, err := list.Join(strings.Split(peersStr, ",")) if err != nil { - log.Fatal(4, "CLU Start: Failed to join cluster: %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("CLU Start: Failed to join cluster") } - log.Info("CLU Start: joined to %d nodes in cluster", n) + log.WithFields(log.Fields{ + "num.nodes": n, + }).Info("CLU Start: joined to nodes in cluster") } func (c *MemberlistManager) setList(list *memberlist.Memberlist) { @@ -214,11 +223,17 @@ func (c *MemberlistManager) NotifyJoin(node *memberlist.Node) { if len(node.Meta) == 0 { return } - log.Info("CLU manager: HTTPNode %s with address %s has joined the cluster", node.Name, node.Addr.String()) + log.WithFields(log.Fields{ + "node.name": node.Name, + "http.addr": node.Addr.String(), + }).Info("CLU manager: HTTPNode has joined the cluster") member := HTTPNode{} err := json.Unmarshal(node.Meta, &member) if err != nil { - log.Error(3, "CLU manager: Failed to decode node meta from %s: %s", node.Name, err.Error()) + log.WithFields(log.Fields{ + "node.name": node.Name, + "error": err.Error(), + }).Error("CLU manager: Failed to decode node meta") unmarshalErrJoin.Inc() return } @@ -234,7 +249,9 @@ func (c *MemberlistManager) NotifyLeave(node *memberlist.Node) { eventsLeave.Inc() c.Lock() defer c.Unlock() - log.Info("CLU manager: HTTPNode %s has left the cluster", node.Name) + log.WithFields(log.Fields{ + "node.name": node.Name, + }).Info("CLU manager: HTTPNode has left the cluster") delete(c.members, node.Name) c.clusterStats() } @@ -249,7 +266,10 @@ func (c *MemberlistManager) NotifyUpdate(node *memberlist.Node) { member := HTTPNode{} err := json.Unmarshal(node.Meta, &member) if err != nil { - log.Error(3, "CLU manager: Failed to decode node meta from %s: %s", node.Name, err.Error()) + log.WithFields(log.Fields{ + "node.name": node.Name, + "error": err.Error(), + }).Error("CLU manager: Failed to decode node meta") unmarshalErrUpdate.Inc() // if the node is known, lets mark it as notReady until it starts sending valid data again. if p, ok := c.members[node.Name]; ok { @@ -266,7 +286,11 @@ func (c *MemberlistManager) NotifyUpdate(node *memberlist.Node) { member.local = true } c.members[node.Name] = member - log.Info("CLU manager: HTTPNode %s at %s has been updated - %s", node.Name, node.Addr.String(), node.Meta) + log.WithFields(log.Fields{ + "node.name": node.Name, + "node.addr": node.Addr.String(), + "node.meta": node.Meta, + }).Info("CLU manager: HTTPNode has been updated") c.clusterStats() } @@ -286,7 +310,9 @@ func (c *MemberlistManager) NodeMeta(limit int) []byte { meta, err := json.Marshal(c.members[c.nodeName]) c.RUnlock() if err != nil { - log.Fatal(4, "CLU manager: %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("CLU manager") } return meta } diff --git a/cluster/node.go b/cluster/node.go index b207c58604..45d284d91d 100644 --- a/cluster/node.go +++ b/cluster/node.go @@ -13,7 +13,7 @@ import ( "github.com/grafana/metrictank/tracing" opentracing "github.com/opentracing/opentracing-go" tags "github.com/opentracing/opentracing-go/ext" - "github.com/raintank/worldping-api/pkg/log" + log "github.com/sirupsen/logrus" ) //go:generate stringer -type=NodeState @@ -157,7 +157,9 @@ func (n HTTPNode) Post(ctx context.Context, name, path string, body Traceable) ( carrier := opentracing.HTTPHeadersCarrier(req.Header) err = Tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier) if err != nil { - log.Error(3, "CLU failed to inject span into headers: %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("CLU failed to inject span into headers") } req.Header.Add("Content-Type", "application/json") @@ -178,7 +180,9 @@ func (n HTTPNode) Post(ctx context.Context, name, path string, body Traceable) ( // then abort the http request. select { case <-ctx.Done(): - log.Debug("CLU HTTPNode: context canceled. terminating request to peer %s", n.Name) + log.WithFields(log.Fields{ + "peer.name": n.Name, + }).Debug("CLU HTTPNode: context canceled. terminating request to peer") transport.CancelRequest(req) <-c // Wait for client.Do but ignore result case resp := <-c: @@ -186,7 +190,10 @@ func (n HTTPNode) Post(ctx context.Context, name, path string, body Traceable) ( rsp := resp.r if err != nil { tags.Error.Set(span, true) - log.Error(3, "CLU HTTPNode: error trying to talk to peer %s: %s", n.Name, err.Error()) + log.WithFields(log.Fields{ + "peer.name": n.Name, + "error": err.Error(), + }).Error("CLU HTTPNode: error trying to talk to peer") return nil, NewError(http.StatusServiceUnavailable, errors.New("error trying to talk to peer")) } return handleResp(rsp) diff --git a/cmd/metrictank/metrictank.go b/cmd/metrictank/metrictank.go index 05a94e51fd..4bfcec1f6d 100644 --- a/cmd/metrictank/metrictank.go +++ b/cmd/metrictank/metrictank.go @@ -27,6 +27,7 @@ import ( inCarbon "github.com/grafana/metrictank/input/carbon" inKafkaMdm "github.com/grafana/metrictank/input/kafkamdm" inPrometheus "github.com/grafana/metrictank/input/prometheus" + "github.com/grafana/metrictank/logger" "github.com/grafana/metrictank/mdata" "github.com/grafana/metrictank/mdata/cache" "github.com/grafana/metrictank/mdata/notifierKafka" @@ -35,8 +36,8 @@ import ( statsConfig "github.com/grafana/metrictank/stats/config" cassandraStore "github.com/grafana/metrictank/store/cassandra" "github.com/raintank/dur" - "github.com/raintank/worldping-api/pkg/log" "github.com/rakyll/globalconf" + log "github.com/sirupsen/logrus" ) var ( @@ -79,7 +80,7 @@ var ( ) func init() { - flag.IntVar(&logLevel, "log-level", 2, "log level. 0=TRACE|1=DEBUG|2=INFO|3=WARN|4=ERROR|5=CRITICAL|6=FATAL") + flag.IntVar(&logLevel, "log-level", 4, "log level. 0=PANIC|1=FATAL|2=ERROR|3=WARN|4=INFO|5=DEBUG") } func main() { @@ -139,7 +140,11 @@ func main() { Set up Logger ***********************************/ - log.NewLogger(0, "console", fmt.Sprintf(`{"level": %d, "formatting":false}`, logLevel)) + formatter := &logger.TextFormatter{} + formatter.TimestampFormat = "2006-01-02 15:04:05.000" + formatter.QuoteEmptyFields = true + + log.SetFormatter(formatter) mdata.LogLevel = logLevel memory.LogLevel = logLevel @@ -148,26 +153,26 @@ func main() { // workaround for https://github.com/grafana/grafana/issues/4055 switch logLevel { case 0: - log.Level(log.TRACE) + log.SetLevel(log.PanicLevel) case 1: - log.Level(log.DEBUG) + log.SetLevel(log.FatalLevel) case 2: - log.Level(log.INFO) + log.SetLevel(log.ErrorLevel) case 3: - log.Level(log.WARN) + log.SetLevel(log.WarnLevel) case 4: - log.Level(log.ERROR) + log.SetLevel(log.InfoLevel) case 5: - log.Level(log.CRITICAL) - case 6: - log.Level(log.FATAL) + log.SetLevel(log.DebugLevel) + default: + log.SetLevel(log.InfoLevel) } /*********************************** Validate settings needed for clustering ***********************************/ if *instance == "" { - log.Fatal(4, "instance can't be empty") + log.Fatal("instance can't be empty") } /*********************************** @@ -182,7 +187,9 @@ func main() { addrParts := strings.Split(api.Addr, ":") port, err := strconv.ParseInt(addrParts[len(addrParts)-1], 10, 64) if err != nil { - log.Fatal(4, "Could not parse port from listenAddr. %s", api.Addr) + log.WithFields(log.Fields{ + "listen.address": api.Addr, + }).Fatal("could not parse port from listen address") } cluster.Init(*instance, gitHash, startupTime, scheme, int(port)) @@ -198,7 +205,7 @@ func main() { mdata.ConfigProcess() if !inCarbon.Enabled && !inKafkaMdm.Enabled && !inPrometheus.Enabled { - log.Fatal(4, "you should enable at least 1 input plugin") + log.Fatal("you should enable at least 1 input plugin") } sec := dur.MustParseNDuration("warm-up-period", *warmUpPeriodStr) @@ -215,7 +222,9 @@ func main() { trigger, _ := heap.New(*proftrigPath, *proftrigHeapThresh, proftrigMinDiff, time.Duration(proftrigFreq)*time.Second, errors) go func() { for e := range errors { - log.Error(0, "profiletrigger heap: %s", e) + log.WithFields(log.Fields{ + "error": e.Error(), + }).Error("profiletrigger heap") } }() go trigger.Run() @@ -236,7 +245,10 @@ func main() { /*********************************** Report Version ***********************************/ - log.Info("Metrictank starting. Built from %s - Go version %s", gitHash, runtime.Version()) + log.WithFields(log.Fields{ + "build.git.hash": gitHash, + "go.version": runtime.Version(), + }).Info("metrictank starting") // metric version.%s is the version of metrictank running. The metric value is always 1 mtVersion := stats.NewBool(fmt.Sprintf("version.%s", strings.Replace(gitHash, ".", "_", -1))) mtVersion.Set(true) @@ -257,14 +269,18 @@ func main() { for _, tagSpec := range tagSpecs { split := strings.Split(tagSpec, ":") if len(split) != 2 { - log.Fatal(4, "cannot parse tracing-add-tags value %q", tagSpec) + log.WithFields(log.Fields{ + "value": tagSpec, + }).Fatal("cannot parse tracing-add-tags value") } tags[split[0]] = split[1] } } tracer, traceCloser, err := conf.GetTracer(*tracingEnabled, *tracingAddr, tags) if err != nil { - log.Fatal(4, "Could not initialize jaeger tracer: %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("could not initialize jaeger tracer") } defer traceCloser.Close() @@ -273,7 +289,9 @@ func main() { ***********************************/ store, err = cassandraStore.NewCassandraStore(cassandraStore.CliConfig, mdata.TTLs()) if err != nil { - log.Fatal(4, "failed to initialize cassandra. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("failed to initialize cassandra") } store.SetTracer(tracer) @@ -306,7 +324,11 @@ func main() { } if cluster.Mode == cluster.ModeMulti && len(inputs) > 1 { - log.Warn("It is not recommended to run a multi-node cluster with more than 1 input plugin.") + log.WithFields(log.Fields{ + "carbon.enabled": inCarbon.Enabled, + "prometheus.enabled": inPrometheus.Enabled, + "kafkamdm.enabled": inKafkaMdm.Enabled, + }).Warn("it is not recommended to run a multi-node cluster with more than 1 input plugin") } /*********************************** @@ -320,26 +342,26 @@ func main() { pre := time.Now() if *publicOrg < 0 { - log.Fatal(4, "public-org cannot be <0") + log.Fatal("public-org cannot be < 0") } idx.OrgIdPublic = uint32(*publicOrg) if memory.Enabled { if metricIndex != nil { - log.Fatal(4, "Only 1 metricIndex handler can be enabled.") + log.Fatal("only 1 metricIndex handler can be enabled") } metricIndex = memory.New() } if cassandra.Enabled { if metricIndex != nil { - log.Fatal(4, "Only 1 metricIndex handler can be enabled.") + log.Fatal("only 1 metricIndex handler can be enabled") } metricIndex = cassandra.New() } if metricIndex == nil { - log.Fatal(4, "No metricIndex handlers enabled.") + log.Fatal("no metricIndex handlers enabled") } /*********************************** @@ -347,7 +369,9 @@ func main() { ***********************************/ apiServer, err = api.NewServer() if err != nil { - log.Fatal(4, "Failed to start API. %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("failed to start API") } apiServer.BindMetricIndex(metricIndex) @@ -364,9 +388,13 @@ func main() { ***********************************/ err = metricIndex.Init() if err != nil { - log.Fatal(4, "failed to initialize metricIndex: %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("failed to initialize metricIndex") } - log.Info("metricIndex initialized in %s. starting data consumption", time.Now().Sub(pre)) + log.WithFields(log.Fields{ + "initialization.duration": time.Now().Sub(pre), + }).Info("metricIndex initialized, starting data consumption") /*********************************** Initialize MetricPersist notifiers @@ -423,9 +451,11 @@ func main() { ***********************************/ select { case sig := <-sigChan: - log.Info("Received signal %q. Shutting down", sig) + log.WithFields(log.Fields{ + "signal": sig, + }).Info("received signal. shutting down") case <-pluginFatal: - log.Info("An input plugin signalled a fatal error. Shutting down") + log.Info("an input plugin signalled a fatal error. shutting down") } shutdown() } @@ -445,9 +475,13 @@ func shutdown() { for _, plugin := range inputs { wg.Add(1) go func(plugin input.Plugin) { - log.Info("Shutting down %s consumer", plugin.Name()) + log.WithFields(log.Fields{ + "plugin.name": plugin.Name(), + }).Info("shutting down consumer") plugin.Stop() - log.Info("%s consumer finished shutdown", plugin.Name()) + log.WithFields(log.Fields{ + "plugin.name": plugin.Name(), + }).Info("consumer finished shutdown") wg.Done() }(plugin) } @@ -458,7 +492,7 @@ func shutdown() { }() select { case <-timer.C: - log.Warn("Plugins taking too long to shutdown, not waiting any longer.") + log.Warn("plugins taking too long to shutdown, not waiting any longer") case <-pluginsStopped: timer.Stop() } @@ -466,6 +500,5 @@ func shutdown() { log.Info("closing store") store.Stop() metricIndex.Stop() - log.Info("terminating.") - log.Close() + log.Info("terminating") } diff --git a/cmd/mt-aggs-explain/main.go b/cmd/mt-aggs-explain/main.go index e41b9264c8..a9e6a001ba 100644 --- a/cmd/mt-aggs-explain/main.go +++ b/cmd/mt-aggs-explain/main.go @@ -6,9 +6,10 @@ import ( "os" "runtime" - log "github.com/Sirupsen/logrus" "github.com/grafana/metrictank/conf" "github.com/grafana/metrictank/consolidation" + "github.com/grafana/metrictank/logger" + log "github.com/sirupsen/logrus" ) var ( @@ -18,6 +19,13 @@ var ( ) func main() { + formatter := &logger.TextFormatter{} + formatter.TimestampFormat = "2006-01-02 15:04:05.000" + formatter.QuoteEmptyFields = true + + log.SetFormatter(formatter) + log.SetLevel(log.InfoLevel) + flag.Usage = func() { fmt.Println("mt-aggs-explain") fmt.Println() @@ -45,7 +53,10 @@ func main() { } aggs, err := conf.ReadAggregations(aggsFile) if err != nil { - log.Fatalf("can't read aggregations file %q: %s", aggsFile, err.Error()) + log.WithFields(log.Fields{ + "file": aggsFile, + "error": err.Error(), + }).Fatal("can't read aggregations file") } if *metric != "" { diff --git a/cmd/mt-explain/main.go b/cmd/mt-explain/main.go index 95aeaab176..753351b81d 100644 --- a/cmd/mt-explain/main.go +++ b/cmd/mt-explain/main.go @@ -3,12 +3,13 @@ package main import ( "flag" "fmt" - "log" "os" "time" "github.com/grafana/metrictank/expr" + "github.com/grafana/metrictank/logger" "github.com/raintank/dur" + log "github.com/sirupsen/logrus" ) func main() { @@ -18,6 +19,13 @@ func main() { mdp := flag.Int("mdp", 800, "max data points to return") timeZoneStr := flag.String("time-zone", "local", "time-zone to use for interpreting from/to when needed. (check your config)") + formatter := &logger.TextFormatter{} + formatter.TimestampFormat = "2006-01-02 15:04:05.000" + formatter.QuoteEmptyFields = true + + log.SetFormatter(formatter) + log.SetLevel(log.InfoLevel) + flag.Usage = func() { fmt.Println("mt-explain") fmt.Println("Explains the execution plan for a given query / set of targets") @@ -32,7 +40,6 @@ func main() { flag.Parse() if flag.NArg() == 0 { log.Fatal("no target specified") - os.Exit(-1) } targets := flag.Args() @@ -44,7 +51,10 @@ func main() { var err error loc, err = time.LoadLocation(*timeZoneStr) if err != nil { - log.Fatal(err) + log.WithFields(log.Fields{ + "error": err.Error(), + "time.zone": *timeZoneStr, + }).Fatal("failed to load time zone") } } @@ -54,28 +64,38 @@ func main() { fromUnix, err := dur.ParseDateTime(*from, loc, now, defaultFrom) if err != nil { - log.Fatal(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("failed to parse date and time") } toUnix, err := dur.ParseDateTime(*to, loc, now, defaultTo) if err != nil { - log.Fatal(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("failed to parse date and time") } exps, err := expr.ParseMany(targets) if err != nil { - fmt.Println("Error while parsing:", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("error while parsing") return } plan, err := expr.NewPlan(exps, fromUnix, toUnix, uint32(*mdp), *stable, nil) if err != nil { if fun, ok := err.(expr.ErrUnknownFunction); ok { - fmt.Printf("Unsupported function %q: must defer query to graphite\n", string(fun)) + log.WithFields(log.Fields{ + "function": string(fun), + }).Info("unsupported function, must defer query to graphite") plan.Dump(os.Stdout) return } - fmt.Println("Error while planning", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("error while planning") return } plan.Dump(os.Stdout) diff --git a/cmd/mt-index-cat/main.go b/cmd/mt-index-cat/main.go index 6e1d1e69c6..b96943b4cb 100644 --- a/cmd/mt-index-cat/main.go +++ b/cmd/mt-index-cat/main.go @@ -3,7 +3,6 @@ package main import ( "flag" "fmt" - "log" "os" "strconv" "strings" @@ -11,8 +10,10 @@ import ( "github.com/grafana/metrictank/cmd/mt-index-cat/out" "github.com/grafana/metrictank/idx/cassandra" + "github.com/grafana/metrictank/logger" "github.com/raintank/dur" "github.com/raintank/schema" + log "github.com/sirupsen/logrus" ) func perror(err error) { @@ -48,6 +49,13 @@ func main() { outputs := []string{"dump", "list", "vegeta-render", "vegeta-render-patterns"} + formatter := &logger.TextFormatter{} + formatter.TimestampFormat = "2006-01-02 15:04:05.000" + formatter.QuoteEmptyFields = true + + log.SetFormatter(formatter) + log.SetLevel(log.InfoLevel) + flag.Usage = func() { fmt.Println("mt-index-cat") fmt.Println() @@ -106,7 +114,9 @@ func main() { } } if !found { - log.Printf("invalid output %q", format) + log.WithFields(log.Fields{ + "format": format, + }).Print("invalid output") flag.Usage() os.Exit(-1) } @@ -117,13 +127,13 @@ func main() { } } if cassI == 0 { - log.Println("only indextype 'cass' supported") + log.Info("only index type 'cass' supported") flag.Usage() os.Exit(1) } if tags != "" && tags != "valid" && tags != "invalid" && tags != "some" && tags != "none" { - log.Println("invalid tags filter") + log.Info("invalid tags filter") flag.Usage() os.Exit(1) } diff --git a/cmd/mt-index-cat/out/out.go b/cmd/mt-index-cat/out/out.go index 78f4189938..f9ff64dd69 100644 --- a/cmd/mt-index-cat/out/out.go +++ b/cmd/mt-index-cat/out/out.go @@ -10,6 +10,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/raintank/schema" + log "github.com/sirupsen/logrus" ) var QueryTime int64 @@ -108,7 +109,7 @@ func Template(format string) func(d schema.MetricDefinition) { return func(d schema.MetricDefinition) { err := tpl.Execute(os.Stdout, d) if err != nil { - panic(err) + log.Panic(err) } } } diff --git a/cmd/mt-index-migrate/main.go b/cmd/mt-index-migrate/main.go index fff7afc26e..ca89ce6707 100644 --- a/cmd/mt-index-migrate/main.go +++ b/cmd/mt-index-migrate/main.go @@ -9,14 +9,15 @@ import ( "github.com/gocql/gocql" "github.com/grafana/metrictank/cluster/partitioner" + "github.com/grafana/metrictank/logger" "github.com/grafana/metrictank/util" "github.com/raintank/schema" - "github.com/raintank/worldping-api/pkg/log" + log "github.com/sirupsen/logrus" ) var ( dryRun = flag.Bool("dry-run", true, "run in dry-run mode. No changes will be made.") - logLevel = flag.Int("log-level", 2, "log level. 0=TRACE|1=DEBUG|2=INFO|3=WARN|4=ERROR|5=CRITICAL|6=FATAL") + logLevel = flag.Int("log-level", 4, "log level. 0=PANIC|1=FATAL|2=ERROR|3=WARN|4=INFO|5=DEBUG") srcCassAddr = flag.String("src-cass-addr", "localhost", "Address of cassandra host to migrate from.") dstCassAddr = flag.String("dst-cass-addr", "localhost", "Address of cassandra host to migrate to.") srcKeyspace = flag.String("src-keyspace", "raintank", "Cassandra keyspace in use on source.") @@ -29,6 +30,16 @@ var ( ) func main() { + formatter := &logger.TextFormatter{} + formatter.TimestampFormat = "2006-01-02 15:04:05.000" + formatter.QuoteEmptyFields = true + + log.SetFormatter(formatter) + if *logLevel < 0 || *logLevel > 5 { + *logLevel = 4 + } + log.SetLevel(log.AllLevels[*logLevel]) + flag.Usage = func() { fmt.Fprintln(os.Stderr, "mt-index-migrate") fmt.Fprintln(os.Stderr) @@ -39,7 +50,6 @@ func main() { flag.PrintDefaults() } flag.Parse() - log.NewLogger(0, "console", fmt.Sprintf(`{"level": %d, "formatting":false}`, *logLevel)) defsChan := make(chan *schema.MetricDefinition, 100) @@ -51,7 +61,9 @@ func main() { srcCluster.Keyspace = *srcKeyspace srcSession, err := srcCluster.CreateSession() if err != nil { - log.Fatal(4, "failed to create cql session for source cassandra. %s", err) + log.WithFields(log.Fields{ + "error": err, + }).Fatal("failed to create cql session for source cassandra") } dstCluster := gocql.NewCluster(*dstCassAddr) dstCluster.Consistency = gocql.ParseConsistency("one") @@ -61,14 +73,18 @@ func main() { dstCluster.Keyspace = *dstKeyspace dstSession, err := dstCluster.CreateSession() if err != nil { - log.Fatal(4, "failed to create cql session for destination cassandra. %s", err) + log.WithFields(log.Fields{ + "error": err, + }).Fatal("failed to create cql session for destination cassandra") } // ensure the dest table exists. schemaTable := util.ReadEntry(*schemaFile, "schema_table").(string) err = dstSession.Query(fmt.Sprintf(schemaTable, *dstKeyspace)).Exec() if err != nil { - log.Fatal(4, "cassandra-idx failed to initialize cassandra table. %s", err) + log.WithFields(log.Fields{ + "error": err, + }).Fatal("cassandra-idx failed to initialize cassandra table") } wg.Add(1) @@ -117,7 +133,9 @@ func writeDefs(session *gocql.Session, defsChan chan *schema.MetricDefinition) { def.LastUpdate).Exec(); err != nil { if (attempts % 20) == 0 { - log.Warn("cassandra-idx Failed to write def to cassandra. it will be retried. %s", err) + log.WithFields(log.Fields{ + "error": err, + }).Warn("cassandra-idx failed to write def to cassandra. it will be retried") } sleepTime := 100 * attempts if sleepTime > 2000 { @@ -127,12 +145,17 @@ func writeDefs(session *gocql.Session, defsChan chan *schema.MetricDefinition) { attempts++ } else { success = true - log.Debug("cassandra-idx metricDef saved to cassandra. %s", def.Id) + log.WithFields(log.Fields{ + "id": def.Id, + }).Debug("cassandra-idx metricDef saved to cassandra") counter++ } } } - log.Info("Inserted %d metricDefs in %s", counter, time.Since(pre).String()) + log.WithFields(log.Fields{ + "num.defs": counter, + "time.taken": time.Since(pre).String(), + }).Info("Inserted metricDefs") } func getDefs(session *gocql.Session, defsChan chan *schema.MetricDefinition) { @@ -141,7 +164,9 @@ func getDefs(session *gocql.Session, defsChan chan *schema.MetricDefinition) { defer close(defsChan) partitioner, err := partitioner.NewKafka(*partitionScheme) if err != nil { - log.Fatal(4, "failed to initialize partitioner. %s", err) + log.WithFields(log.Fields{ + "error": err, + }).Fatal("failed to initialize partitioner") } iter := session.Query("SELECT id, orgid, partition, name, interval, unit, mtype, tags, lastupdate from metric_idx").Iter() @@ -153,7 +178,10 @@ func getDefs(session *gocql.Session, defsChan chan *schema.MetricDefinition) { for iter.Scan(&id, &orgId, &partition, &name, &interval, &unit, &mtype, &tags, &lastupdate) { mkey, err := schema.MKeyFromString(id) if err != nil { - log.Error(3, "could not parse ID %q: %s -> skipping", id, err) + log.WithFields(log.Fields{ + "id": id, + "error": err, + }).Error("could not parse ID, skipping") continue } mdef := schema.MetricDefinition{ @@ -167,13 +195,17 @@ func getDefs(session *gocql.Session, defsChan chan *schema.MetricDefinition) { Tags: tags, LastUpdate: lastupdate, } - log.Debug("retrieved %s from old index.", mdef.Id) + log.WithFields(log.Fields{ + "id": mdef.Id, + }).Debug("retrieved id from old index") if *numPartitions == 1 { mdef.Partition = 0 } else { p, err := partitioner.Partition(&mdef, int32(*numPartitions)) if err != nil { - log.Fatal(4, "failed to get partition id of metric. %s", err) + log.WithFields(log.Fields{ + "error": err, + }).Fatal("failed to get partition id of metric") } else { mdef.Partition = p } diff --git a/cmd/mt-kafka-mdm-sniff-out-of-order/main.go b/cmd/mt-kafka-mdm-sniff-out-of-order/main.go index ee3f5faa7a..b66f44243b 100644 --- a/cmd/mt-kafka-mdm-sniff-out-of-order/main.go +++ b/cmd/mt-kafka-mdm-sniff-out-of-order/main.go @@ -14,11 +14,12 @@ import ( "time" inKafkaMdm "github.com/grafana/metrictank/input/kafkamdm" + "github.com/grafana/metrictank/logger" "github.com/grafana/metrictank/stats" "github.com/raintank/schema" "github.com/raintank/schema/msg" - "github.com/raintank/worldping-api/pkg/log" "github.com/rakyll/globalconf" + log "github.com/sirupsen/logrus" ) var ( @@ -76,7 +77,10 @@ func (ip *inputOOOFinder) ProcessMetricData(metric *schema.MetricData, partition } mkey, err := schema.MKeyFromString(metric.Id) if err != nil { - log.Error(0, "could not parse id %q: %s", metric.Id, err) + log.WithFields(log.Fields{ + "id": metric.Id, + "error": err, + }).Error("could not parse id") return } @@ -104,7 +108,9 @@ func (ip *inputOOOFinder) ProcessMetricData(metric *schema.MetricData, partition tracker.DeltaSeen = uint32(now.Seen.Unix()) - uint32(tracker.Head.Seen.Unix()) err := ip.tpl.Execute(os.Stdout, tracker) if err != nil { - log.Error(0, "executing template: %s", err) + log.WithFields(log.Fields{ + "error": err, + }).Error("executing template") } ip.data[mkey] = tracker } @@ -140,7 +146,9 @@ func (ip *inputOOOFinder) ProcessMetricPoint(mp schema.MetricPoint, format msg.F tracker.DeltaSeen = uint32(now.Seen.Unix()) - uint32(tracker.Head.Seen.Unix()) err := ip.tpl.Execute(os.Stdout, tracker) if err != nil { - log.Error(0, "executing template: %s", err) + log.WithFields(log.Fields{ + "error": err, + }).Error("executing template") } ip.data[mp.MKey] = tracker } @@ -149,6 +157,13 @@ func (ip *inputOOOFinder) ProcessMetricPoint(mp schema.MetricPoint, format msg.F } func main() { + formatter := &logger.TextFormatter{} + formatter.TimestampFormat = "2006-01-02 15:04:05.000" + formatter.QuoteEmptyFields = true + + log.SetFormatter(formatter) + log.SetLevel(log.InfoLevel) + flag.Usage = func() { fmt.Fprintln(os.Stderr, "mt-kafka-mdm-sniff-out-of-order") fmt.Fprintln(os.Stderr) @@ -178,7 +193,6 @@ func main() { flag.PrintDefaults() } flag.Parse() - log.NewLogger(0, "console", fmt.Sprintf(`{"level": %d, "formatting":false}`, 2)) instance := "mt-kafka-mdm-sniff-out-of-order" + strconv.Itoa(rand.Int()) // Only try and parse the conf file if it exists @@ -191,7 +205,9 @@ func main() { EnvPrefix: "MT_", }) if err != nil { - log.Fatal(4, "error with configuration file: %s", err) + log.WithFields(log.Fields{ + "error": err, + }).Fatal("error with configuration file") os.Exit(1) } inKafkaMdm.ConfigSetup() @@ -213,7 +229,9 @@ func main() { signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) select { case sig := <-sigChan: - log.Info("Received signal %q. Shutting down", sig) + log.WithFields(log.Fields{ + "signal": sig, + }).Info("Received signal. Shutting down") case <-pluginFatal: log.Info("Mdm input plugin signalled a fatal error. Shutting down") } diff --git a/cmd/mt-kafka-mdm-sniff/main.go b/cmd/mt-kafka-mdm-sniff/main.go index f645ba1e43..f733fd650d 100644 --- a/cmd/mt-kafka-mdm-sniff/main.go +++ b/cmd/mt-kafka-mdm-sniff/main.go @@ -14,11 +14,12 @@ import ( "time" inKafkaMdm "github.com/grafana/metrictank/input/kafkamdm" + "github.com/grafana/metrictank/logger" "github.com/grafana/metrictank/stats" "github.com/raintank/schema" "github.com/raintank/schema/msg" - "github.com/raintank/worldping-api/pkg/log" "github.com/rakyll/globalconf" + log "github.com/sirupsen/logrus" ) var ( @@ -92,7 +93,9 @@ func (ip inputPrinter) ProcessMetricData(metric *schema.MetricData, partition in stdoutLock.Unlock() if err != nil { - log.Error(0, "executing template: %s", err) + log.WithFields(log.Fields{ + "error": err, + }).Error("executing template") } } @@ -109,11 +112,20 @@ func (ip inputPrinter) ProcessMetricPoint(point schema.MetricPoint, format msg.F }) stdoutLock.Unlock() if err != nil { - log.Error(0, "executing template: %s", err) + log.WithFields(log.Fields{ + "error": err, + }).Error("executing template") } } func main() { + formatter := &logger.TextFormatter{} + formatter.TimestampFormat = "2006-01-02 15:04:05.000" + formatter.QuoteEmptyFields = true + + log.SetFormatter(formatter) + log.SetLevel(log.InfoLevel) + flag.Usage = func() { fmt.Fprintln(os.Stderr, "mt-kafka-mdm-sniff") fmt.Fprintln(os.Stderr) @@ -125,7 +137,6 @@ func main() { fmt.Fprintln(os.Stderr, "example: mt-kafka-mdm-sniff -format-point '{{.Time | date}}'") } flag.Parse() - log.NewLogger(0, "console", fmt.Sprintf(`{"level": %d, "formatting":false}`, 2)) instance := "mt-kafka-mdm-sniff" + strconv.Itoa(rand.Int()) // Only try and parse the conf file if it exists @@ -138,7 +149,9 @@ func main() { EnvPrefix: "MT_", }) if err != nil { - log.Fatal(4, "error with configuration file: %s", err) + log.WithFields(log.Fields{ + "error": err, + }).Fatal("error with configuration file") os.Exit(1) } inKafkaMdm.ConfigSetup() @@ -160,7 +173,9 @@ func main() { signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) select { case sig := <-sigChan: - log.Info("Received signal %q. Shutting down", sig) + log.WithFields(log.Fields{ + "signal": sig, + }).Info("received signal. shutting down") case <-pluginFatal: log.Info("Mdm input plugin signalled a fatal error. Shutting down") } diff --git a/cmd/mt-schemas-explain/main.go b/cmd/mt-schemas-explain/main.go index c16f7891cf..81dae4578e 100644 --- a/cmd/mt-schemas-explain/main.go +++ b/cmd/mt-schemas-explain/main.go @@ -7,9 +7,10 @@ import ( "runtime" "time" - log "github.com/Sirupsen/logrus" "github.com/grafana/metrictank/conf" + "github.com/grafana/metrictank/logger" "github.com/grafana/metrictank/store/cassandra" + log "github.com/sirupsen/logrus" ) var ( @@ -21,6 +22,13 @@ var ( ) func main() { + formatter := &logger.TextFormatter{} + formatter.TimestampFormat = "2006-01-02 15:04:05.000" + formatter.QuoteEmptyFields = true + + log.SetFormatter(formatter) + log.SetLevel(log.InfoLevel) + flag.Usage = func() { fmt.Println("mt-schemas-explain") fmt.Println() @@ -48,7 +56,10 @@ func main() { } schemas, err := conf.ReadSchemas(schemasFile) if err != nil { - log.Fatalf("can't read schemas file %q: %s", schemasFile, err.Error()) + log.WithFields(log.Fields{ + "file": schemasFile, + "error": err.Error(), + }).Fatal("can't read schemas files") } if *metric != "" { diff --git a/cmd/mt-split-metrics-by-ttl/main.go b/cmd/mt-split-metrics-by-ttl/main.go index 519fdc8f57..744bbd6bd7 100644 --- a/cmd/mt-split-metrics-by-ttl/main.go +++ b/cmd/mt-split-metrics-by-ttl/main.go @@ -8,8 +8,10 @@ import ( "path" "strings" + "github.com/grafana/metrictank/logger" "github.com/grafana/metrictank/store/cassandra" "github.com/raintank/dur" + log "github.com/sirupsen/logrus" ) func main() { @@ -34,6 +36,13 @@ func main() { flag.StringVar(&storeConfig.Username, "cassandra-username", storeConfig.Username, "username for authentication") flag.StringVar(&storeConfig.Password, "cassandra-password", storeConfig.Password, "password for authentication") + formatter := &logger.TextFormatter{} + formatter.TimestampFormat = "2006-01-02 15:04:05.000" + formatter.QuoteEmptyFields = true + + log.SetFormatter(formatter) + log.SetLevel(log.InfoLevel) + flag.Usage = func() { fmt.Fprintln(os.Stderr, "mt-split-metrics-by-ttl [flags] ttl [ttl...]") fmt.Fprintln(os.Stderr) @@ -56,23 +65,31 @@ func main() { tmpDir, err := ioutil.TempDir(os.TempDir(), storeConfig.Keyspace) if err != nil { - panic(fmt.Sprintf("Failed to get temp dir: %s", tmpDir)) + log.WithFields(log.Fields{ + "directory": tmpDir, + }).Panic("failed to get temporary directory") } snapshotDir := path.Join(tmpDir, "snapshot") err = os.Mkdir(snapshotDir, 0700) if err != nil { - panic(fmt.Sprintf("Error creating directory: %s", err)) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("failed to create snapshot directory") } store, err := cassandra.NewCassandraStore(storeConfig, ttls) if err != nil { - panic(fmt.Sprintf("Failed to instantiate cassandra: %s", err)) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("failed to instantiate cassandra") } // create directory/link structure that we need to define the future table names err = os.Mkdir(path.Join(tmpDir, storeConfig.Keyspace), 0700) if err != nil { - panic(fmt.Sprintf("Failed to create directory: %s", err)) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("failed to create directory") } namedTableLinks := make([]string, len(store.TTLTables)) tableNames := make([]string, len(store.TTLTables)) @@ -82,7 +99,9 @@ func main() { namedTableLinks[i] = path.Join(tmpDir, storeConfig.Keyspace, table.Name) err := os.Symlink(snapshotDir, namedTableLinks[i]) if err != nil { - panic(fmt.Sprintf("Error when creating symlink: %s", err)) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("failed to create symlink") } i++ } diff --git a/cmd/mt-store-cat/main.go b/cmd/mt-store-cat/main.go index 0a3f7fa2ca..2b35fdfd00 100644 --- a/cmd/mt-store-cat/main.go +++ b/cmd/mt-store-cat/main.go @@ -11,9 +11,10 @@ import ( "github.com/raintank/schema" - log "github.com/Sirupsen/logrus" "github.com/grafana/metrictank/conf" + "github.com/grafana/metrictank/logger" opentracing "github.com/opentracing/opentracing-go" + log "github.com/sirupsen/logrus" "github.com/grafana/metrictank/store/cassandra" "github.com/raintank/dur" @@ -64,6 +65,13 @@ func main() { flag.StringVar(&storeConfig.Password, "cassandra-password", storeConfig.Password, "password for authentication") flag.StringVar(&storeConfig.SchemaFile, "cassandra-schema-file", storeConfig.SchemaFile, "File containing the needed schemas in case database needs initializing") + formatter := &logger.TextFormatter{} + formatter.TimestampFormat = "2006-01-02 15:04:05.000" + formatter.QuoteEmptyFields = true + + log.SetFormatter(formatter) + log.SetLevel(log.InfoLevel) + flag.Usage = func() { fmt.Println("mt-store-cat") fmt.Println() @@ -135,14 +143,16 @@ func main() { EnvPrefix: "MT_", }) if err != nil { - log.Fatal(4, "error with configuration file: %s", err) + log.WithFields(log.Fields{ + "error": err, + }).Fatal("error with configuration file") os.Exit(1) } config.ParseAll() if *groupTTL != "s" && *groupTTL != "m" && *groupTTL != "h" && *groupTTL != "d" { - log.Fatal(4, "groupTTL must be one of s/m/h/d") + log.Fatal("groupTTL must be one of s/m/h/d") os.Exit(1) } @@ -159,30 +169,41 @@ func main() { var err error loc, err = time.LoadLocation(*timeZoneStr) if err != nil { - log.Fatal(err) + log.WithFields(log.Fields{ + "error": err.Error(), + "time.zone": *timeZoneStr, + }).Fatal("failed to load time zone") } } store, err := cassandra.NewCassandraStore(storeConfig, nil) if err != nil { - log.Fatal(4, "failed to initialize cassandra. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("failed to initialize cassandra") } tracer, traceCloser, err := conf.GetTracer(false, "", nil) if err != nil { - log.Fatal(4, "Could not initialize jaeger tracer: %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("failed to initialize jaeger tracer") } defer traceCloser.Close() store.SetTracer(tracer) err = store.FindExistingTables(storeConfig.Keyspace) if err != nil { - log.Fatal(4, "failed to read tables from cassandra. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("failed to read tables from cassandra") } if tableSelector == "tables" { tables, err := getTables(store, "") if err != nil { - log.Fatal(4, "%s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("failed to get tables from cassandra") } for _, table := range tables { fmt.Printf("%s (ttl %d hours)\n", table.Name, table.TTL) @@ -191,7 +212,10 @@ func main() { } tables, err := getTables(store, tableSelector) if err != nil { - log.Fatal(4, "%s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + "table": tableSelector, + }).Fatal("failed to get table from cassandra") } var fromUnix, toUnix uint32 @@ -203,12 +227,16 @@ func main() { fromUnix, err = dur.ParseDateTime(*from, loc, now, defaultFrom) if err != nil { - log.Fatal(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("failed to parse date time") } toUnix, err = dur.ParseDateTime(*to, loc, now, defaultTo) if err != nil { - log.Fatal(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("failed to parse date time") } } var metrics []Metric @@ -219,7 +247,9 @@ func main() { if format == "points" || format == "point-summary" { metrics, err = getMetrics(store, "") if err != nil { - log.Error(3, "cassandra query error. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("cassandra query error") return } } @@ -227,7 +257,9 @@ func main() { fmt.Println("# Looking for these metrics:") metrics, err = getMetrics(store, strings.Replace(metricSelector, "prefix:", "", 1)) if err != nil { - log.Error(3, "cassandra query error. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("cassandra query error") return } for _, m := range metrics { @@ -236,7 +268,9 @@ func main() { } else { amkey, err := schema.AMKeyFromString(metricSelector) if err != nil { - log.Error(3, "can't parse metric selector as AMKey: %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("can't parse metric selector as AMKey") return } @@ -244,7 +278,9 @@ func main() { metrics, err = getMetric(store, amkey) if err != nil { - log.Error(3, "cassandra query error. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("cassandra query error") return } if len(metrics) == 0 { diff --git a/cmd/mt-store-cat/metrics.go b/cmd/mt-store-cat/metrics.go index 4daa4be474..020425abb6 100644 --- a/cmd/mt-store-cat/metrics.go +++ b/cmd/mt-store-cat/metrics.go @@ -1,13 +1,13 @@ package main import ( - "fmt" "sort" "strings" "github.com/raintank/schema" "github.com/grafana/metrictank/store/cassandra" + log "github.com/sirupsen/logrus" ) type Metric struct { @@ -35,7 +35,10 @@ func getMetrics(store *cassandra.CassandraStore, prefix string) ([]Metric, error if strings.HasPrefix(m.name, prefix) { mkey, err := schema.MKeyFromString(idString) if err != nil { - panic(err) + log.WithFields(log.Fields{ + "error": err.Error(), + "id": idString, + }).Panic("failed to get mkey from id") } m.AMKey = schema.AMKey{ MKey: mkey, @@ -60,7 +63,10 @@ func getMetric(store *cassandra.CassandraStore, amkey schema.AMKey) ([]Metric, e for iter.Scan(idString, &m.name) { mkey, err := schema.MKeyFromString(idString) if err != nil { - panic(err) + log.WithFields(log.Fields{ + "error": err.Error(), + "id": idString, + }).Panic("failed to get mkey from id") } m.AMKey = schema.AMKey{ MKey: mkey, @@ -69,7 +75,10 @@ func getMetric(store *cassandra.CassandraStore, amkey schema.AMKey) ([]Metric, e metrics = append(metrics, m) } if len(metrics) > 1 { - panic(fmt.Sprintf("wtf. found more than one entry for id %v: %v", amkey, metrics)) + log.WithFields(log.Fields{ + "id": amkey, + "metrics": metrics, + }).Panic("found more than one entry for id") } err := iter.Close() if err != nil { diff --git a/cmd/mt-store-cat/out.go b/cmd/mt-store-cat/out.go index a45beeceb7..b5afc05d4e 100644 --- a/cmd/mt-store-cat/out.go +++ b/cmd/mt-store-cat/out.go @@ -4,8 +4,8 @@ import ( "fmt" "sort" - log "github.com/Sirupsen/logrus" "github.com/gocql/gocql" + log "github.com/sirupsen/logrus" ) type bucket struct { @@ -60,6 +60,8 @@ func showKeyTTL(iter *gocql.Iter, groupTTL string) { } err := iter.Close() if err != nil { - log.Error(3, "cassandra query error. %s", err) + log.WithFields(log.Fields{ + "error": err, + }).Error("cassandra query error") } } diff --git a/cmd/mt-store-cat/series.go b/cmd/mt-store-cat/series.go index 72d44475ed..6d7226d096 100644 --- a/cmd/mt-store-cat/series.go +++ b/cmd/mt-store-cat/series.go @@ -11,6 +11,7 @@ import ( "github.com/grafana/metrictank/mdata/chunk" "github.com/grafana/metrictank/store/cassandra" "github.com/raintank/schema" + log "github.com/sirupsen/logrus" ) func points(ctx context.Context, store *cassandra.CassandraStore, tables []cassandra.Table, metrics []Metric, fromUnix, toUnix, fix uint32) { @@ -24,7 +25,9 @@ func points(ctx context.Context, store *cassandra.CassandraStore, tables []cassa } else { igens, err := store.SearchTable(ctx, metric.AMKey, table, fromUnix, toUnix) if err != nil { - panic(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("failed to retrieve data from table") } printNormal(igens, fromUnix, toUnix) } @@ -43,7 +46,9 @@ func pointSummary(ctx context.Context, store *cassandra.CassandraStore, tables [ } else { igens, err := store.SearchTable(ctx, metric.AMKey, table, fromUnix, toUnix) if err != nil { - panic(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("failed to retrieve data from table") } printSummary(igens, fromUnix, toUnix) } @@ -55,7 +60,9 @@ func getSeries(ctx context.Context, store *cassandra.CassandraStore, table cassa var points []schema.Point itgens, err := store.SearchTable(ctx, amkey, table, fromUnix, toUnix) if err != nil { - panic(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("failed to retrieve data from table") } for i, itgen := range itgens { diff --git a/cmd/mt-store-cp-experimental/main.go b/cmd/mt-store-cp-experimental/main.go index 04bd0900ad..e86b37918a 100644 --- a/cmd/mt-store-cp-experimental/main.go +++ b/cmd/mt-store-cp-experimental/main.go @@ -3,7 +3,6 @@ package main import ( "flag" "fmt" - "log" "math" "os" "strconv" @@ -12,7 +11,10 @@ import ( "sync/atomic" "time" + log "github.com/sirupsen/logrus" + "github.com/gocql/gocql" + "github.com/grafana/metrictank/logger" "github.com/grafana/metrictank/store/cassandra" hostpool "github.com/hailocab/go-hostpool" ) @@ -61,6 +63,13 @@ var ( ) func main() { + formatter := &logger.TextFormatter{} + formatter.TimestampFormat = "2006-01-02 15:04:05.000" + formatter.QuoteEmptyFields = true + + log.SetFormatter(formatter) + log.SetLevel(log.InfoLevel) + flag.Usage = func() { fmt.Fprintln(os.Stderr, "mt-store-cp [flags] table-in [table-out]") fmt.Fprintln(os.Stderr) @@ -86,19 +95,23 @@ func main() { } if sourceCassandraAddrs == destCassandraAddrs && tableIn == tableOut { - panic("Source and destination cannot be the same") + log.Panic("source and destination cannot be the same") } sourceSession, err := NewCassandraStore(sourceCassandraAddrs) if err != nil { - panic(fmt.Sprintf("Failed to instantiate source cassandra: %s", err)) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("failed to instantiate source cassandra") } destSession, err := NewCassandraStore(destCassandraAddrs) if err != nil { - panic(fmt.Sprintf("Failed to instantiate dest cassandra: %s", err)) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("failed to instantiate dest cassandra") } update(sourceSession, destSession, tableIn, tableOut) @@ -160,29 +173,42 @@ func fetchPartitionIds(sourceSession *gocql.Session) { if *partitions == "*" { return } - log.Println("Fetching ids for partitions ", *partitions) + log.WithFields(log.Fields{ + "partitions": *partitions, + }).Info("fetching ids for partitions") partitionIdMap = make(map[string]struct{}) partitionStrs := strings.Split(*partitions, ",") selectQuery := fmt.Sprintf("SELECT id FROM %s where partition=?", *idxTable) for _, p := range partitionStrs { if *verbose { - log.Println("Fetching ids for partition ", p) + log.WithFields(log.Fields{ + "partition": p, + }).Info("fetching ids for partition") } partition, err := strconv.Atoi(p) if err != nil { - panic(fmt.Sprintf("Could not parse partition %q, error = %s", p, err)) + log.WithFields(log.Fields{ + "error": err.Error(), + "partition": p, + }).Panic("could not parse partition") } keyItr := sourceSession.Query(selectQuery, partition).Iter() var key string for keyItr.Scan(&key) { partitionIdMap[key] = struct{}{} if len(partitionIdMap)%10000 == 0 { - log.Println("Loading...", len(partitionIdMap), " ids processed, processing partition ", p) + log.WithFields(log.Fields{ + "num.ids.processed": len(partitionIdMap), + "current.partition": p, + }).Info("loading...") } } err = keyItr.Close() if err != nil { - panic(fmt.Sprintf("Failed querying for partition key %q, error = %s", p, err)) + log.WithFields(log.Fields{ + "error": err.Error(), + "partition.key": p, + }).Panic("failed querying for partition key") } } } @@ -224,8 +250,16 @@ func printProgress(id int, token int64, doneRowsSnap uint64) { ratioLeft := (1 - completeness) / completeness timeRemaining := time.Duration(float64(timeElapsed) * ratioLeft) rowsPerSec := doneRowsSnap / (uint64(1) + uint64(timeElapsed/time.Second)) - log.Printf("WORKING: id=%d processed %d keys, %d rows, last token = %d, %.1f%% complete, elapsed=%v, remaining=%v, rows/s=%d", - id, doneKeysSnap, doneRowsSnap, token, completeness*100, roundToSeconds(timeElapsed), roundToSeconds(timeRemaining), rowsPerSec) + log.WithFields(log.Fields{ + "id": id, + "keys.processed": doneKeysSnap, + "rows.processed": doneRowsSnap, + "last.token": token, + "completed": strconv.FormatFloat((completeness*100), 'f', 1, 64) + "%", + "elapsed": roundToSeconds(timeElapsed), + "remaining": roundToSeconds(timeRemaining), + "rows.per.second": rowsPerSec, + }).Info("working") } func publishBatchUntilSuccess(destSession *gocql.Session, batch *gocql.Batch) *gocql.Batch { @@ -238,7 +272,9 @@ func publishBatchUntilSuccess(destSession *gocql.Session, batch *gocql.Batch) *g if err == nil { break } - fmt.Fprintf(os.Stderr, "ERROR: failed to publish batch, trying again. error = %q\n", err) + log.WithFields(log.Fields{ + "error": err, + }).Error("failed to publish batch, trying again") } return destSession.NewBatch(gocql.UnloggedBatch) @@ -270,14 +306,27 @@ func worker(id int, jobs <-chan string, wg *sync.WaitGroup, sourceSession, destS for iter.Scan(&token, &ts, &data, &ttl) { if *verbose { - log.Printf("id=%d processing rownum=%d table=%q key=%q ts=%d query=%q data='%x'\n", id, atomic.LoadUint64(&doneRows)+1, tableIn, key, ts, query, data) + log.WithFields(log.Fields{ + "id": id, + "row": atomic.LoadUint64(&doneRows), + "table": tableIn, + "key": key, + "ts": ts, + "query": query, + "data": data, + }).Info("working") } batch.Query(insertQuery, data, key, ts, ttl) if batch.Size() >= *maxBatchSize { if *verbose { - log.Printf("id=%d sending batch size=%d for key=%q ts=%d'\n", id, batch.Size(), key, ts) + log.WithFields(log.Fields{ + "id": id, + "batch.size": batch.Size(), + "key": key, + "ts": ts, + }).Info("sending batch") } batch = publishBatchUntilSuccess(destSession, batch) } @@ -298,7 +347,13 @@ func worker(id int, jobs <-chan string, wg *sync.WaitGroup, sourceSession, destS if err != nil { doneKeysSnap := atomic.LoadUint64(&doneKeys) doneRowsSnap := atomic.LoadUint64(&doneRows) - fmt.Fprintf(os.Stderr, "ERROR: id=%d failed querying %s: %q. processed %d keys, %d rows\n", id, tableIn, err, doneKeysSnap, doneRowsSnap) + log.WithFields(log.Fields{ + "id": id, + "table": tableIn, + "error": err, + "keys.processed": doneKeysSnap, + "rows.processed": doneRowsSnap, + }).Error("failed querying") } atomic.AddUint64(&doneKeys, 1) } @@ -332,7 +387,12 @@ func update(sourceSession, destSession *gocql.Session, tableIn, tableOut string) err := keyItr.Close() if err != nil { - fmt.Fprintf(os.Stderr, "ERROR: failed querying %s: %q. processed %d keys, %d rows\n", tableIn, err, doneKeys, doneRows) + log.WithFields(log.Fields{ + "table": tableIn, + "error": err, + "keys.processed": doneKeys, + "rows.processed": doneRows, + }).Error("failed querying") } else { break } @@ -341,5 +401,8 @@ func update(sourceSession, destSession *gocql.Session, tableIn, tableOut string) close(jobs) wg.Wait() - log.Printf("DONE. Processed %d keys, %d rows\n", doneKeys, doneRows) + log.WithFields(log.Fields{ + "keys.processed": doneKeys, + "rows.processed": doneRows, + }).Info("done") } diff --git a/cmd/mt-update-ttl/main.go b/cmd/mt-update-ttl/main.go index 569f615134..af41e3f002 100644 --- a/cmd/mt-update-ttl/main.go +++ b/cmd/mt-update-ttl/main.go @@ -3,15 +3,18 @@ package main import ( "flag" "fmt" - "log" "math" "os" + "strconv" "strings" "sync" "sync/atomic" "time" + log "github.com/sirupsen/logrus" + "github.com/gocql/gocql" + "github.com/grafana/metrictank/logger" "github.com/grafana/metrictank/store/cassandra" hostpool "github.com/hailocab/go-hostpool" "github.com/raintank/dur" @@ -50,6 +53,13 @@ var ( ) func main() { + formatter := &logger.TextFormatter{} + formatter.TimestampFormat = "2006-01-02 15:04:05.000" + formatter.QuoteEmptyFields = true + + log.SetFormatter(formatter) + log.SetLevel(log.InfoLevel) + flag.Usage = func() { fmt.Fprintln(os.Stderr, "mt-update-ttl [flags] ttl table-in [table-out]") fmt.Fprintln(os.Stderr) @@ -77,7 +87,9 @@ func main() { session, err := NewCassandraStore() if err != nil { - panic(fmt.Sprintf("Failed to instantiate cassandra: %s", err)) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("failed to instantiate cassandra") } update(session, ttl, tableIn, tableOut) @@ -174,26 +186,52 @@ func worker(id int, jobs <-chan string, wg *sync.WaitGroup, session *gocql.Sessi query = fmt.Sprintf("INSERT INTO %s (data, key, ts) values(?,?,?) USING TTL %d", tableOut, newTTL) } if *verbose { - log.Printf("id=%d processing rownum=%d table=%q key=%q ts=%d query=%q data='%x'\n", id, atomic.LoadUint64(&doneRows)+1, tableIn, key, ts, query, data) + log.WithFields(log.Fields{ + "id": id, + "row": atomic.LoadUint64(&doneRows), + "table": tableIn, + "key": key, + "ts": ts, + "query": query, + "data": data, + }).Info("processing") } err := session.Query(query, data, key, ts).Exec() if err != nil { - fmt.Fprintf(os.Stderr, "ERROR: id=%d failed updating %s %s %d: %q", id, tableOut, key, ts, err) + log.WithFields(log.Fields{ + "id": id, + "table": tableOut, + "key": key, + "ts": ts, + "error": err, + }).Error("failed updating") } doneRowsSnap := atomic.AddUint64(&doneRows, 1) if doneRowsSnap%10000 == 0 { doneKeysSnap := atomic.LoadUint64(&doneKeys) completeness := completenessEstimate(token) - log.Printf("WORKING: id=%d processed %d keys, %d rows. (last token: %d, completeness estimate %.1f%%)", id, doneKeysSnap, doneRowsSnap, token, completeness*100) + log.WithFields(log.Fields{ + "id": id, + "keys.processed": doneKeysSnap, + "rows.processed": doneRowsSnap, + "last.token": token, + "completed": strconv.FormatFloat((completeness*100), 'f', 1, 64) + "%", + }).Info("working") } } err := iter.Close() if err != nil { doneKeysSnap := atomic.LoadUint64(&doneKeys) doneRowsSnap := atomic.LoadUint64(&doneRows) - fmt.Fprintf(os.Stderr, "ERROR: id=%d failed querying %s: %q. processed %d keys, %d rows", id, tableIn, err, doneKeysSnap, doneRowsSnap) + log.WithFields(log.Fields{ + "id": id, + "table": tableIn, + "error": err, + "keys.processed": doneKeysSnap, + "rows.processed": doneRowsSnap, + }).Error("failed querying") } atomic.AddUint64(&doneKeys, 1) } @@ -219,11 +257,19 @@ func update(session *gocql.Session, ttl int, tableIn, tableOut string) { close(jobs) err := keyItr.Close() if err != nil { - fmt.Fprintf(os.Stderr, "ERROR: failed querying %s: %q. processed %d keys, %d rows", tableIn, err, doneKeys, doneRows) + log.WithFields(log.Fields{ + "table": tableIn, + "error": err, + "keys.processed": doneKeys, + "rows.processed": doneRows, + }).Error("failed querying") wg.Wait() os.Exit(2) } wg.Wait() - log.Printf("DONE. Processed %d keys, %d rows", doneKeys, doneRows) + log.WithFields(log.Fields{ + "keys.processed": doneKeys, + "rows.processed": doneRows, + }).Info("done") } diff --git a/cmd/mt-view-boundaries/main.go b/cmd/mt-view-boundaries/main.go index a40e5c68ae..6016d65249 100644 --- a/cmd/mt-view-boundaries/main.go +++ b/cmd/mt-view-boundaries/main.go @@ -3,10 +3,12 @@ package main import ( "flag" "fmt" - "log" "runtime" "time" + log "github.com/sirupsen/logrus" + + "github.com/grafana/metrictank/logger" "github.com/grafana/metrictank/mdata/chunk" "github.com/grafana/metrictank/store/cassandra" "github.com/raintank/dur" @@ -39,6 +41,13 @@ func display(span int64, boundaryType string) { } func main() { + formatter := &logger.TextFormatter{} + formatter.TimestampFormat = "2006-01-02 15:04:05.000" + formatter.QuoteEmptyFields = true + + log.SetFormatter(formatter) + log.SetLevel(log.InfoLevel) + flag.Usage = func() { fmt.Println("mt-view-boundaries") fmt.Println() @@ -60,7 +69,9 @@ func main() { span := dur.MustParseNDuration("span", *spanStr) _, ok := chunk.RevChunkSpans[span] if !ok { - log.Fatal(4, "chunkSpan %s is not a valid value (https://github.com/grafana/metrictank/blob/master/docs/data-knobs.md#valid-chunk-spans)", *spanStr) + log.WithFields(log.Fields{ + "span": *spanStr, + }).Fatal("chunkSpan is not a valid value (https://github.com/grafana/metrictank/blob/master/docs/data-knobs.md#valid-chunk-spans)") } fmt.Println() display(int64(span), "specified span") diff --git a/cmd/mt-whisper-importer-reader/main.go b/cmd/mt-whisper-importer-reader/main.go index 78918e7f9f..b8fa476c39 100644 --- a/cmd/mt-whisper-importer-reader/main.go +++ b/cmd/mt-whisper-importer-reader/main.go @@ -18,12 +18,13 @@ import ( "sync/atomic" "time" - log "github.com/Sirupsen/logrus" "github.com/grafana/metrictank/conf" + "github.com/grafana/metrictank/logger" "github.com/grafana/metrictank/mdata/chunk" "github.com/grafana/metrictank/mdata/chunk/archive" "github.com/kisielk/whisper-go/whisper" "github.com/raintank/schema" + log "github.com/sirupsen/logrus" ) var ( @@ -106,6 +107,11 @@ var ( func main() { var err error flag.Parse() + + formatter := &logger.TextFormatter{} + formatter.TimestampFormat = "2006-01-02 15:04:05.000" + formatter.QuoteEmptyFields = true + log.SetFormatter(formatter) if *verbose { log.SetLevel(log.DebugLevel) } else { @@ -115,14 +121,18 @@ func main() { nameFilter = regexp.MustCompile(*nameFilterPattern) schemas, err = conf.ReadSchemas(*dstSchemas) if err != nil { - panic(fmt.Sprintf("Error when parsing schemas file: %q", err)) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("error when parsing schemas file") } var pos *posTracker if len(*positionFile) > 0 { pos, err = NewPositionTracker(*positionFile) if err != nil { - log.Fatalf("Error instantiating position tracker: %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("error instantiating position tracker") } defer pos.Close() } @@ -148,12 +158,18 @@ func processFromChan(pos *posTracker, files chan string, wg *sync.WaitGroup) { for file := range files { fd, err := os.Open(file) if err != nil { - log.Errorf("Failed to open whisper file %q: %q\n", file, err) + log.WithFields(log.Fields{ + "error": err.Error(), + "file": file, + }).Error("failed to open whisper file") continue } w, err := whisper.OpenWhisper(fd) if err != nil { - log.Errorf("Failed to open whisper file %q: %q\n", file, err) + log.WithFields(log.Fields{ + "error": err.Error(), + "file": file, + }).Error("failed to open whisper file") continue } @@ -161,7 +177,9 @@ func processFromChan(pos *posTracker, files chan string, wg *sync.WaitGroup) { log.Debugf("Processing file %s (%s)", file, name) met, err := getMetric(w, file, name) if err != nil { - log.Errorf("Failed to get metric: %q", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("failed to get metric") continue } @@ -170,14 +188,19 @@ func processFromChan(pos *posTracker, files chan string, wg *sync.WaitGroup) { for !success { b, err := met.MarshalCompressed() if err != nil { - log.Errorf("Failed to encode metric: %q", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("failed to get metric") continue } size := b.Len() req, err := http.NewRequest("POST", *httpEndpoint, io.Reader(b)) if err != nil { - log.Fatal(fmt.Sprintf("Cannot construct request to http endpoint %q: %q", *httpEndpoint, err)) + log.WithFields(log.Fields{ + "http.endpoint": *httpEndpoint, + "error": err.Error(), + }).Fatal("cannot construct request to http endpoint") } req.Header.Set("Content-Type", "application/json") @@ -192,15 +215,35 @@ func processFromChan(pos *posTracker, files chan string, wg *sync.WaitGroup) { passed := time.Now().Sub(pre).Seconds() if err != nil || resp.StatusCode >= 300 { if err != nil { - log.Warningf("Error posting %s (%d bytes), to endpoint %q (attempt %d/%fs, retrying): %s", name, size, *httpEndpoint, attempts, passed, err) + log.WithFields(log.Fields{ + "name": name, + "size.bytes": size, + "http.endpoint": *httpEndpoint, + "attempts": attempts, + "time.elapsed.seconds": passed, + "error": err.Error(), + }).Warn("error posting to endpoint, retrying") attempts++ continue } else { - log.Warningf("Error posting %s (%d bytes) to endpoint %q status %d (attempt %d/%fs, retrying)", name, size, *httpEndpoint, resp.StatusCode, attempts, passed) + log.WithFields(log.Fields{ + "name": name, + "size.bytes": size, + "http.endpoint": *httpEndpoint, + "resp.status.code": resp.StatusCode, + "attempts": attempts, + "time.elapsed.seconds": passed, + "error": err.Error(), + }).Warn("error posting to endpoint, retrying") } attempts++ } else { - log.Debugf("Posted %s (%d bytes) to endpoint %q in %f seconds", name, size, *httpEndpoint, passed) + log.WithFields(log.Fields{ + "name": name, + "size.bytes": size, + "http.endpoint": *httpEndpoint, + "time.elapsed.seconds": passed, + }).Debug("posted to endpoint") success = true } io.Copy(ioutil.Discard, resp.Body) @@ -213,7 +256,10 @@ func processFromChan(pos *posTracker, files chan string, wg *sync.WaitGroup) { processed := atomic.AddUint32(&processedCount, 1) if processed%100 == 0 { skipped := atomic.LoadUint32(&skippedCount) - log.Infof("Processed %d files, %d skipped", processed, skipped) + log.WithFields(log.Fields{ + "files.processed": processed, + "files.skipped": skipped, + }).Info("processed files") } } wg.Done() @@ -305,11 +351,19 @@ func getMetric(w *whisper.Whisper, file, name string) (archive.Metric, error) { } mkey, err := schema.MKeyFromString(md.Id) if err != nil { - panic(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("failed to retrieve mkey from id") } rowKey := getRowKey(retIdx, mkey, m, retention.SecondsPerPoint) encodedChunks := encodedChunksFromPoints(p, uint32(retention.SecondsPerPoint), retention.ChunkSpan) - log.Debugf("Archive %d Method %s got %d points = %d chunks at a span of %d", retIdx, m, len(p), len(encodedChunks), retention.ChunkSpan) + log.WithFields(log.Fields{ + "archive": retIdx, + "method": m, + "num.points": len(p), + "num.chunks": len(encodedChunks), + "span": retention.ChunkSpan, + }).Info("archive operation") res.Archives = append(res.Archives, archive.Archive{ SecondsPerPoint: uint32(retention.SecondsPerPoint), Points: uint32(retention.NumberOfPoints), @@ -365,7 +419,10 @@ func encodedChunksFromPoints(points []whisper.Point, intervalIn, chunkSpan uint3 err := c.Push(point.Timestamp, point.Value) if err != nil { - panic(fmt.Sprintf("ERROR: Failed to push value into chunk at t0 %d: %q", t0, err)) + log.WithFields(log.Fields{ + "t0.chunk": t0, + "error": err.Error(), + }).Panic("failed to push value into chunk at t0") } } @@ -389,7 +446,10 @@ func getFileListIntoChan(pos *posTracker, fileChan chan string) { } name := getMetricName(path) if !nameFilter.Match([]byte(getMetricName(name))) { - log.Debugf("Skipping file %s with name %s", path, name) + log.WithFields(log.Fields{ + "path": path, + "file": name, + }).Panic("skipping file") atomic.AddUint32(&skippedCount, 1) return nil } @@ -397,7 +457,9 @@ func getFileListIntoChan(pos *posTracker, fileChan chan string) { return nil } if pos != nil && pos.IsDone(path) { - log.Debugf("Skipping file %s because it was listed as already done", path) + log.WithFields(log.Fields{ + "file": path, + }).Panic("skipping file because it was listed as already done") return nil } diff --git a/cmd/mt-whisper-importer-writer/main.go b/cmd/mt-whisper-importer-writer/main.go index b94ef674fe..537bde5143 100644 --- a/cmd/mt-whisper-importer-writer/main.go +++ b/cmd/mt-whisper-importer-writer/main.go @@ -12,16 +12,17 @@ import ( "github.com/raintank/schema" - log "github.com/Sirupsen/logrus" "github.com/gocql/gocql" "github.com/grafana/metrictank/cluster" "github.com/grafana/metrictank/cluster/partitioner" "github.com/grafana/metrictank/idx" "github.com/grafana/metrictank/idx/cassandra" + "github.com/grafana/metrictank/logger" "github.com/grafana/metrictank/mdata/chunk" "github.com/grafana/metrictank/mdata/chunk/archive" cassandraStore "github.com/grafana/metrictank/store/cassandra" "github.com/raintank/dur" + log "github.com/sirupsen/logrus" ) var ( @@ -110,6 +111,13 @@ func main() { cassFlags := cassandra.ConfigSetup() + formatter := &logger.TextFormatter{} + formatter.TimestampFormat = "2006-01-02 15:04:05.000" + formatter.QuoteEmptyFields = true + + log.SetFormatter(formatter) + log.SetLevel(log.InfoLevel) + flag.Usage = func() { fmt.Println("mt-whisper-importer-writer") fmt.Println() @@ -157,7 +165,9 @@ func main() { store, err := cassandraStore.NewCassandraStore(storeConfig, nil) if err != nil { - panic(fmt.Sprintf("Failed to initialize cassandra: %q", err)) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("failed to initialize cassandra") } splits := strings.Split(*ttlsStr, ",") @@ -169,7 +179,9 @@ func main() { p, err := partitioner.NewKafka(*partitionScheme) if err != nil { - panic(fmt.Sprintf("Failed to instantiate partitioner: %q", err)) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("failed to instantiate partitioner") } cluster.Init("mt-whisper-importer-writer", gitHash, time.Now(), "http", int(80)) @@ -189,10 +201,14 @@ func main() { http.HandleFunc(*uriPath, server.chunksHandler) http.HandleFunc("/healthz", server.healthzHandler) - log.Infof("Listening on %q", *httpEndpoint) + log.WithFields(log.Fields{ + "http.endpoint": *httpEndpoint, + }).Info("listening") err = http.ListenAndServe(*httpEndpoint, nil) if err != nil { - panic(fmt.Sprintf("Error creating listener: %q", err)) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("error creating listener") } } @@ -217,9 +233,13 @@ func (s *Server) chunksHandler(w http.ResponseWriter, req *http.Request) { return } - log.Debugf( - "Receiving Id:%s OrgId:%d Name:%s AggMeth:%d ArchCnt:%d", - metric.MetricData.Id, metric.MetricData.OrgId, metric.MetricData.Name, metric.AggregationMethod, len(metric.Archives)) + log.WithFields(log.Fields{ + "id": metric.MetricData.Id, + "org.id": metric.MetricData.OrgId, + "name": metric.MetricData.Name, + "aggregation.method": metric.AggregationMethod, + "num.archive": len(metric.Archives), + }).Debug("receiving") if len(metric.Archives) == 0 { throwError("Metric has no archives") @@ -249,10 +269,14 @@ func (s *Server) chunksHandler(w http.ResponseWriter, req *http.Request) { throwError(fmt.Sprintf("Failed to get selected table %d in %+v", tableTTL, s.TTLTables)) return } - log.Debugf( - "inserting %d chunks of archive %d with ttl %d into table %s with ttl %d and key %s", - len(a.Chunks), archiveIdx, archiveTTL, table.Name, tableTTL, a.RowKey, - ) + log.WithFields(log.Fields{ + "num.chunks": len(a.Chunks), + "archive.index": archiveIdx, + "archive.ttl": archiveTTL, + "table.name": table.Name, + "table.ttl": tableTTL, + "key": a.RowKey, + }).Panic("inserting chunks") s.insertChunks(table.Name, a.RowKey, tableTTL, a.Chunks) } } @@ -264,7 +288,9 @@ func (s *Server) insertChunks(table, id string, ttl uint32, itergens []chunk.Ite } else { query = fmt.Sprintf("INSERT INTO %s (key, ts, data) values (?,?,?) IF NOT EXISTS USING TTL %d", table, ttl) } - log.Debug(query) + log.WithFields(log.Fields{ + "query": query, + }).Debug("insertChunks query") for _, ig := range itergens { rowKey := fmt.Sprintf("%s_%d", id, ig.Ts/cassandraStore.Month_sec) success := false @@ -273,7 +299,10 @@ func (s *Server) insertChunks(table, id string, ttl uint32, itergens []chunk.Ite err := s.Session.Query(query, rowKey, ig.Ts, cassandraStore.PrepareChunkData(ig.Span, ig.Bytes())).Exec() if err != nil { if (attempts % 20) == 0 { - log.Warnf("CS: failed to save chunk to cassandra after %d attempts. %s", attempts+1, err) + log.WithFields(log.Fields{ + "attempts": attempts + 1, + "error": err, + }).Warn("CS: failed to save chunk to cassandra") } sleepTime := 100 * attempts if sleepTime > 2000 { diff --git a/consolidation/consolidation.go b/consolidation/consolidation.go index a6de6cbf2b..8a9f1b7991 100644 --- a/consolidation/consolidation.go +++ b/consolidation/consolidation.go @@ -3,11 +3,11 @@ package consolidation import ( "errors" - "fmt" "github.com/raintank/schema" "github.com/grafana/metrictank/batch" + log "github.com/sirupsen/logrus" ) // consolidator is a highlevel description of a point consolidation method @@ -60,7 +60,11 @@ func (c Consolidator) String() string { case Sum: return "SumConsolidator" } - panic(fmt.Sprintf("Consolidator.String(): unknown consolidator %d", c)) + log.WithFields(log.Fields{ + "consolidator": c, + }).Panic("Consolidator.String(): unknown consolidator") + // This return will never be reached due to the Panic, but Go complains if it is omitted + return "" } // provide the name of a stored archive @@ -68,9 +72,9 @@ func (c Consolidator) String() string { func (c Consolidator) Archive() schema.Method { switch c { case None: - panic("cannot get an archive for no consolidation") + log.Panic("cannot get an archive for no consolidation") case Avg: - panic("avg consolidator has no matching Archive(). you need sum and cnt") + log.Panic("avg consolidator has no matching Archive(). you need sum and cnt") case Cnt: return schema.Cnt case Lst: @@ -82,7 +86,11 @@ func (c Consolidator) Archive() schema.Method { case Sum: return schema.Sum } - panic(fmt.Sprintf("Consolidator.Archive(): unknown consolidator %q", c)) + log.WithFields(log.Fields{ + "consolidator": c, + }).Panic("Consolidator.Archive(): unknown consolidator") + // This return will never be reached due to the Panic, but Go complains if it is omitted + return schema.Sum } func FromArchive(archive schema.Method) Consolidator { diff --git a/docs/tools.md b/docs/tools.md index 945cc64236..7d95740c6e 100644 --- a/docs/tools.md +++ b/docs/tools.md @@ -165,7 +165,7 @@ Flags: -dst-keyspace string Cassandra keyspace in use on destination. (default "raintank") -log-level int - log level. 0=TRACE|1=DEBUG|2=INFO|3=WARN|4=ERROR|5=CRITICAL|6=FATAL (default 2) + log level. 0=PANIC|1=FATAL|2=ERROR|3=WARN|4=INFO|5=DEBUG (default 4) -num-partitions int number of partitions in cluster (default 1) -partition-scheme string diff --git a/expr/parse.go b/expr/parse.go index 5261187921..3d8bc76295 100644 --- a/expr/parse.go +++ b/expr/parse.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/metrictank/api/models" "github.com/grafana/metrictank/util" - "github.com/raintank/worldping-api/pkg/log" + log "github.com/sirupsen/logrus" ) var ( @@ -174,7 +174,7 @@ func parseArgList(e string) (string, []*expr, map[string]*expr, string, error) { ) if e[0] != '(' { - panic("arg list should start with paren. calling code should have asserted this") + log.Panic("arg list should start with paren. calling code should have asserted this") } ArgString := e[1:] @@ -330,7 +330,7 @@ FOR: func parseString(s string) (string, string, error) { if s[0] != '\'' && s[0] != '"' { - panic("string should start with open quote. calling code should have asserted this") + log.Panic("string should start with open quote. calling code should have asserted this") } match := s[0] @@ -393,7 +393,9 @@ func extractMetric(m string) string { } if quoteChar != 0 { - log.Warn("extractMetric: encountered unterminated string literal in %s", m) + log.WithFields(log.Fields{ + "string": m, + }).Warn("extractMetric: encountered unterminated string literal") return "" } diff --git a/idx/cassandra/cassandra.go b/idx/cassandra/cassandra.go index 9b9d7a005a..a2eb9bd797 100644 --- a/idx/cassandra/cassandra.go +++ b/idx/cassandra/cassandra.go @@ -16,8 +16,8 @@ import ( "github.com/grafana/metrictank/stats" "github.com/grafana/metrictank/util" "github.com/raintank/schema" - "github.com/raintank/worldping-api/pkg/log" "github.com/rakyll/globalconf" + log "github.com/sirupsen/logrus" ) var ( @@ -170,12 +170,14 @@ func (c *CasIdx) InitBare() error { // create the keyspace or ensure it exists if createKeyspace { - log.Info("cassandra-idx: ensuring that keyspace %s exist.", keyspace) + log.WithFields(log.Fields{ + "keyspace": keyspace, + }).Info("cassandra-idx: ensuring that keyspace exists") err = tmpSession.Query(fmt.Sprintf(schemaKeyspace, keyspace)).Exec() if err != nil { return fmt.Errorf("failed to initialize cassandra keyspace: %s", err) } - log.Info("cassandra-idx: ensuring that table metric_idx exist.") + log.Info("cassandra-idx: ensuring that table metric_idx exists") err = tmpSession.Query(fmt.Sprintf(schemaTable, keyspace)).Exec() if err != nil { return fmt.Errorf("failed to initialize cassandra table: %s", err) @@ -188,7 +190,9 @@ func (c *CasIdx) InitBare() error { if attempt >= 5 { return fmt.Errorf("cassandra keyspace not found. %d attempts", attempt) } - log.Warn("cassandra-idx cassandra keyspace not found. retrying in 5s. attempt: %d", attempt) + log.WithFields(log.Fields{ + "attempt": attempt, + }).Warn("cassandra-idx: cassandra keyspace not found, retrying in 5s") time.Sleep(5 * time.Second) } else { if _, ok := keyspaceMetadata.Tables["metric_idx"]; ok { @@ -197,7 +201,9 @@ func (c *CasIdx) InitBare() error { if attempt >= 5 { return fmt.Errorf("cassandra table not found. %d attempts", attempt) } - log.Warn("cassandra-idx cassandra table not found. retrying in 5s. attempt: %d", attempt) + log.WithFields(log.Fields{ + "attempt": attempt, + }).Warn("cassandra-idx: cassandra table not found, retrying in 5s") time.Sleep(5 * time.Second) } } @@ -220,7 +226,9 @@ func (c *CasIdx) InitBare() error { // Init makes sure the needed keyspace, table, index in cassandra exists, creates the session, // rebuilds the in-memory index, sets up write queues, metrics and pruning routines func (c *CasIdx) Init() error { - log.Info("initializing cassandra-idx. Hosts=%s", hosts) + log.WithFields(log.Fields{ + "hosts": hosts, + }).Info("initializing cassandra-idx") if err := c.MemoryIdx.Init(); err != nil { return err } @@ -234,7 +242,9 @@ func (c *CasIdx) Init() error { for i := 0; i < numConns; i++ { go c.processWriteQueue() } - log.Info("cassandra-idx started %d writeQueue handlers", numConns) + log.WithFields(log.Fields{ + "num.write.queue.handlers": numConns, + }).Info("cassandra-idx: started write queue handlers") } //Rebuild the in-memory index. @@ -368,7 +378,10 @@ func (c *CasIdx) rebuildIndex() { defs = c.LoadPartitions(cluster.Manager.GetPartitions(), defs, staleTs) num := c.MemoryIdx.Load(defs) - log.Info("cassandra-idx Rebuilding Memory Index Complete. Imported %d. Took %s", num, time.Since(pre)) + log.WithFields(log.Fields{ + "num.imported": num, + "time.elapsed": time.Since(pre), + }).Info("cassandra-idx: Rebuilding Memory Index Complete") } func (c *CasIdx) Load(defs []schema.MetricDefinition, cutoff uint32) []schema.MetricDefinition { @@ -397,7 +410,10 @@ func (c *CasIdx) load(defs []schema.MetricDefinition, iter cqlIterator, cutoff u for iter.Scan(&id, &orgId, &partition, &name, &interval, &unit, &mtype, &tags, &lastupdate) { mkey, err := schema.MKeyFromString(id) if err != nil { - log.Error(3, "cassandra-idx: load() could not parse ID %q: %s -> skipping", id, err) + log.WithFields(log.Fields{ + "id": id, + "error": err.Error(), + }).Error("cassandra-idx: load() could not parse id, skipping") continue } if orgId < 0 { @@ -419,7 +435,9 @@ func (c *CasIdx) load(defs []schema.MetricDefinition, iter cqlIterator, cutoff u defsByNames[nameWithTags] = append(defsByNames[nameWithTags], mdef) } if err := iter.Close(); err != nil { - log.Fatal(4, "Could not close iterator: %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("could not close iterator") } NAMES: @@ -447,7 +465,9 @@ func (c *CasIdx) processWriteQueue() { qry := `INSERT INTO metric_idx (id, orgid, partition, name, interval, unit, mtype, tags, lastupdate) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)` for req = range c.writeQueue { if err != nil { - log.Error(3, "Failed to marshal metricDef. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("failed to marshal metricDef") continue } statQueryInsertWaitDuration.Value(time.Since(req.recvTime)) @@ -471,7 +491,9 @@ func (c *CasIdx) processWriteQueue() { statQueryInsertFail.Inc() errmetrics.Inc(err) if (attempts % 20) == 0 { - log.Warn("cassandra-idx Failed to write def to cassandra. it will be retried. %s", err) + log.WithFields(log.Fields{ + "error": err, + }).Warn("cassandra-idx: failed to write def to cassandra, it will be retried") } sleepTime := 100 * attempts if sleepTime > 2000 { @@ -483,7 +505,9 @@ func (c *CasIdx) processWriteQueue() { success = true statQueryInsertExecDuration.Value(time.Since(pre)) statQueryInsertOk.Inc() - log.Debug("cassandra-idx metricDef saved to cassandra. %s", req.def.Id) + log.WithFields(log.Fields{ + "id": req.def.Id, + }).Warn("cassandra-idx: metricDef saved to cassandra") } } } @@ -501,7 +525,9 @@ func (c *CasIdx) Delete(orgId uint32, pattern string) ([]idx.Archive, error) { for _, def := range defs { err = c.deleteDef(def.Id, def.Partition) if err != nil { - log.Error(3, "cassandra-idx: %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("cassandra-idx: ") } } } @@ -519,7 +545,10 @@ func (c *CasIdx) deleteDef(key schema.MKey, part int32) error { if err != nil { statQueryDeleteFail.Inc() errmetrics.Inc(err) - log.Error(3, "cassandra-idx Failed to delete metricDef %s from cassandra. %s", keyStr, err) + log.WithFields(log.Fields{ + "key": keyStr, + "error": err, + }).Error("cassandra-idx: Failed to delete metricDef from cassandra") time.Sleep(time.Second) } else { statQueryDeleteOk.Inc() @@ -533,7 +562,9 @@ func (c *CasIdx) deleteDef(key schema.MKey, part int32) error { func (c *CasIdx) deleteDefAsync(key schema.MKey, part int32) { go func() { if err := c.deleteDef(key, part); err != nil { - log.Error(3, err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("cassandra-idx: ") } }() } @@ -548,11 +579,15 @@ func (c *CasIdx) Prune(oldest time.Time) ([]idx.Archive, error) { func (c *CasIdx) prune() { ticker := time.NewTicker(pruneInterval) for range ticker.C { - log.Debug("cassandra-idx: pruning items from index that have not been seen for %s", maxStale.String()) + log.WithFields(log.Fields{ + "time.stale": maxStale.String(), + }).Debug("cassandra-idx: pruning items from index that have not been seen in a while") staleTs := time.Now().Add(maxStale * -1) _, err := c.Prune(staleTs) if err != nil { - log.Error(3, "cassandra-idx: prune error. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("cassandra-idx: prune error") } } } diff --git a/idx/cassandra/cassandra_test.go b/idx/cassandra/cassandra_test.go index 69ed74feb8..6e87acaa5f 100644 --- a/idx/cassandra/cassandra_test.go +++ b/idx/cassandra/cassandra_test.go @@ -12,6 +12,7 @@ import ( "github.com/grafana/metrictank/idx" "github.com/grafana/metrictank/test" "github.com/raintank/schema" + log "github.com/sirupsen/logrus" . "github.com/smartystreets/goconvey/convey" ) @@ -455,7 +456,9 @@ func insertDefs(ix idx.MetricIndex, i int) { data.SetId() mkey, err := schema.MKeyFromString(data.Id) if err != nil { - panic(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("failed to get mkey from id") } ix.AddOrUpdate(mkey, data, 1) } diff --git a/idx/memory/memory.go b/idx/memory/memory.go index 592f79c187..b0c0149f8f 100755 --- a/idx/memory/memory.go +++ b/idx/memory/memory.go @@ -15,8 +15,8 @@ import ( "github.com/grafana/metrictank/mdata" "github.com/grafana/metrictank/stats" "github.com/raintank/schema" - "github.com/raintank/worldping-api/pkg/log" "github.com/rakyll/globalconf" + log "github.com/sirupsen/logrus" ) var ( @@ -233,9 +233,9 @@ func (m *MemoryIdx) Update(point schema.MetricPoint, partition int32) (idx.Archi existing, ok := m.defById[point.MKey] if ok { - if LogLevel < 2 { - log.Debug("memory-idx: metricDef with id %v already in index", point.MKey) - } + log.WithFields(log.Fields{ + "id": point.MKey, + }).Debug("memory-idx: metricDef with id already in index") bumpLastUpdate(&existing.LastUpdate, int64(point.Time)) @@ -259,7 +259,9 @@ func (m *MemoryIdx) AddOrUpdate(mkey schema.MKey, data *schema.MetricData, parti existing, ok := m.defById[mkey] if ok { - log.Debug("memory-idx: metricDef with id %s already in index.", mkey) + log.WithFields(log.Fields{ + "metricdef.id": mkey, + }).Debug("memory-idx: metricDef with id already in index") bumpLastUpdate(&existing.LastUpdate, data.Time) oldPart := atomic.SwapInt32(&existing.Partition, partition) statUpdate.Inc() @@ -311,7 +313,10 @@ func (m *MemoryIdx) indexTags(def *schema.MetricDefinition) { // should never happen because every tag in the index // must have a valid format invalidTag.Inc() - log.Error(3, "memory-idx: Tag %q of id %q has an invalid format", tag, def.Id) + log.WithFields(log.Fields{ + "tag": tag, + "id": def.Id, + }).Error("memory-idx: Tag has an invalid format") continue } @@ -336,7 +341,10 @@ func (m *MemoryIdx) deindexTags(tags TagIndex, def *schema.MetricDefinition) boo // should never happen because every tag in the index // must have a valid format invalidTag.Inc() - log.Error(3, "memory-idx: Tag %q of id %q has an invalid format", tag, def.Id) + log.WithFields(log.Fields{ + "tag": tag, + "id": def.Id, + }).Error("memory-idx: Tag has an invalid format") continue } @@ -400,7 +408,9 @@ func (m *MemoryIdx) add(def *schema.MetricDefinition) idx.Archive { if _, ok := m.defById[def.Id]; !ok { m.defById[def.Id] = archive statAdd.Inc() - log.Debug("memory-idx: adding %s to DefById", path) + log.WithFields(log.Fields{ + "path": path, + }).Debug("memory-idx: adding entry to DefById") } return *archive } @@ -408,7 +418,9 @@ func (m *MemoryIdx) add(def *schema.MetricDefinition) idx.Archive { //first check to see if a tree has been created for this OrgId tree, ok := m.tree[def.OrgId] if !ok || len(tree.Items) == 0 { - log.Debug("memory-idx: first metricDef seen for orgId %d", def.OrgId) + log.WithFields(log.Fields{ + "org.id": def.OrgId, + }).Debug("memory-idx: first metricDef seen for orgId") root := &Node{ Path: "", Children: make([]string, 0), @@ -423,7 +435,10 @@ func (m *MemoryIdx) add(def *schema.MetricDefinition) idx.Archive { // An existing leaf is possible if there are multiple metricDefs for the same path due // to different tags or interval if node, ok := tree.Items[path]; ok { - log.Debug("memory-idx: existing index entry for %s. Adding %s to Defs list", path, def.Id) + log.WithFields(log.Fields{ + "path": path, + "id": def.Id, + }).Debug("memory-idx: existing index entry, adding id to Defs list") node.Defs = append(node.Defs, def.Id) m.defById[def.Id] = archive statAdd.Inc() @@ -440,12 +455,18 @@ func (m *MemoryIdx) add(def *schema.MetricDefinition) idx.Archive { branch := path[:pos] prevNode := path[pos+1 : prevPos] if n, ok := tree.Items[branch]; ok { - log.Debug("memory-idx: adding %s as child of %s", prevNode, n.Path) + log.WithFields(log.Fields{ + "child.node": prevNode, + "parent.node": n.Path, + }).Debug("memory-idx: adding child node") n.Children = append(n.Children, prevNode) break } - log.Debug("memory-idx: creating branch %s with child %s", branch, prevNode) + log.WithFields(log.Fields{ + "branch": branch, + "child.node": prevNode, + }).Debug("memory-idx: creating branch with child") tree.Items[branch] = &Node{ Path: branch, Children: []string{prevNode}, @@ -459,13 +480,17 @@ func (m *MemoryIdx) add(def *schema.MetricDefinition) idx.Archive { if pos == -1 { // need to add to the root node. branch := path[:prevPos] - log.Debug("memory-idx: no existing branches found for %s. Adding to the root node.", branch) + log.WithFields(log.Fields{ + "branch": branch, + }).Debug("memory-idx: no existing branches found, adding to the root node") n := tree.Items[""] n.Children = append(n.Children, branch) } // Add leaf node - log.Debug("memory-idx: creating leaf %s", path) + log.WithFields(log.Fields{ + "leaf": path, + }).Debug("memory-idx: creating leaf") tree.Items[path] = &Node{ Path: path, Children: []string{}, @@ -553,7 +578,9 @@ func (m *MemoryIdx) TagDetails(orgId uint32, key, filter string, from int64) (ma def, ok := m.defById[id] if !ok { corruptIndex.Inc() - log.Error(3, "memory-idx: corrupt. ID %q is in tag index but not in the byId lookup table", id) + log.WithFields(log.Fields{ + "id": id, + }).Error("memory-idx: corrupt, ID is in tag index but not in the byId lookup table") continue } @@ -709,7 +736,9 @@ func (m *MemoryIdx) FindTagValues(orgId uint32, tag, prefix string, expressions // should never happen because every ID in the tag index // must be present in the byId lookup table corruptIndex.Inc() - log.Error(3, "memory-idx: ID %q is in tag index but not in the byId lookup table", id) + log.WithFields(log.Fields{ + "id": id, + }).Error("memory-idx: ID is in tag index but not in the byId lookup table") continue } @@ -824,7 +853,9 @@ func (m *MemoryIdx) hasOneMetricFrom(tags TagIndex, tag string, from int64) bool def, ok := m.defById[id] if !ok { corruptIndex.Inc() - log.Error(3, "memory-idx: corrupt. ID %q is in tag index but not in the byId lookup table", id) + log.WithFields(log.Fields{ + "id": id, + }).Error("memory-idx: corrupt, ID is in tag index but not in the byId lookup table") continue } @@ -859,7 +890,9 @@ func (m *MemoryIdx) FindByTag(orgId uint32, expressions []string, from int64) ([ def, ok := m.defById[id] if !ok { corruptIndex.Inc() - log.Error(3, "memory-idx: corrupt. ID %q has been given, but it is not in the byId lookup table", id) + log.WithFields(log.Fields{ + "id": id, + }).Error("memory-idx: corrupt, ID has been given, but it is not in the byId lookup table") continue } @@ -908,7 +941,10 @@ func (m *MemoryIdx) Find(orgId uint32, pattern string, from int64) ([]idx.Node, } matchedNodes = append(matchedNodes, publicNodes...) } - log.Debug("memory-idx: %d nodes matching pattern %s found", len(matchedNodes), pattern) + log.WithFields(log.Fields{ + "num.nodes": len(matchedNodes), + "pattern": pattern, + }).Error("memory-idx: found nodes matching pattern") results := make([]idx.Node, 0) byPath := make(map[string]struct{}) // construct the output slice of idx.Node's such that there is only 1 idx.Node @@ -928,10 +964,22 @@ func (m *MemoryIdx) Find(orgId uint32, pattern string, from int64) ([]idx.Node, def := m.defById[id] if from != 0 && atomic.LoadInt64(&def.LastUpdate) < from { statFiltered.Inc() - log.Debug("memory-idx: from is %d, so skipping %s which has LastUpdate %d", from, def.Id, atomic.LoadInt64(&def.LastUpdate)) + log.WithFields(log.Fields{ + "from": from, + "id": def.Id, + "last.update": atomic.LoadInt64(&def.LastUpdate), + }).Debug("memory-idx: skipping stale id") continue } - log.Debug("memory-idx Find: adding to path %s archive id=%s name=%s int=%d schemaId=%d aggId=%d lastSave=%d", n.Path, def.Id, def.Name, def.Interval, def.SchemaId, def.AggId, def.LastSave) + log.WithFields(log.Fields{ + "path": n.Path, + "archive.id": def.Id, + "name": def.Name, + "interval": def.Interval, + "schema.id": def.SchemaId, + "agg.id": def.AggId, + "last.save": def.LastSave, + }).Debug("memory-idx Find: adding") idxNode.Defs = append(idxNode.Defs, *def) } if len(idxNode.Defs) == 0 { @@ -941,10 +989,15 @@ func (m *MemoryIdx) Find(orgId uint32, pattern string, from int64) ([]idx.Node, results = append(results, idxNode) byPath[n.Path] = struct{}{} } else { - log.Debug("memory-idx: path %s already seen", n.Path) + log.WithFields(log.Fields{ + "path": n.Path, + }).Error("memory-idx: path already seen") } } - log.Debug("memory-idx: %d nodes has %d unique paths.", len(matchedNodes), len(results)) + log.WithFields(log.Fields{ + "num.nodes": len(matchedNodes), + "unique.paths": len(results), + }).Error("memory-idx: nodes have unique paths") statFindDuration.Value(time.Since(pre)) return results, nil } @@ -953,7 +1006,9 @@ func (m *MemoryIdx) Find(orgId uint32, pattern string, from int64) ([]idx.Node, func (m *MemoryIdx) find(orgId uint32, pattern string) ([]*Node, error) { tree, ok := m.tree[orgId] if !ok { - log.Debug("memory-idx: orgId %d has no metrics indexed.", orgId) + log.WithFields(log.Fields{ + "org.id": orgId, + }).Error("memory-idx: no metrics indexed for orgId") return nil, nil } @@ -974,7 +1029,10 @@ func (m *MemoryIdx) find(orgId uint32, pattern string) ([]*Node, error) { pos := len(nodes) for i := 0; i < len(nodes); i++ { if strings.ContainsAny(nodes[i], "*{}[]?") { - log.Debug("memory-idx: found first pattern sequence at node %s pos %d", nodes[i], i) + log.WithFields(log.Fields{ + "node": nodes[i], + "position": i, + }).Error("memory-idx: found first pattern sequence at node") pos = i break } @@ -983,17 +1041,28 @@ func (m *MemoryIdx) find(orgId uint32, pattern string) ([]*Node, error) { if pos != 0 { branch = strings.Join(nodes[:pos], ".") } - log.Debug("memory-idx: starting search at orgId %d, node %q", orgId, branch) + log.WithFields(log.Fields{ + "org.id": orgId, + "node": branch, + }).Error("memory-idx: starting search") startNode, ok := tree.Items[branch] if !ok { - log.Debug("memory-idx: branch %q does not exist in the index for orgId %d", branch, orgId) + log.WithFields(log.Fields{ + "org.id": orgId, + "branch": branch, + }).Debug("memory-idx: branch does not exist in the index") return nil, nil } if startNode == nil { corruptIndex.Inc() - log.Error(3, "memory-idx: startNode is nil. org=%d,patt=%q,pos=%d,branch=%q", orgId, pattern, pos, branch) + log.WithFields(log.Fields{ + "org.id": orgId, + "pattern": pattern, + "position": pos, + "branch": branch, + }).Error("memory-idx: startNode is nil") return nil, errors.NewInternal("hit an empty path in the index") } @@ -1010,11 +1079,18 @@ func (m *MemoryIdx) find(orgId uint32, pattern string) ([]*Node, error) { var grandChildren []*Node for _, c := range children { if !c.HasChildren() { - log.Debug("memory-idx: end of branch reached at %s with no match found for %s", c.Path, pattern) + log.WithFields(log.Fields{ + "path": c.Path, + "pattern": pattern, + }).Error("memory-idx: end of branch reached with no match found") // expecting a branch continue } - log.Debug("memory-idx: searching %d children of %s that match %s", len(c.Children), c.Path, nodes[i]) + log.WithFields(log.Fields{ + "num.children": len(c.Children), + "path": c.Path, + "pattern": nodes[i], + }).Error("memory-idx: searching children for match") matches := matcher(c.Children) for _, m := range matches { newBranch := c.Path + "." + m @@ -1024,7 +1100,13 @@ func (m *MemoryIdx) find(orgId uint32, pattern string) ([]*Node, error) { grandChild := tree.Items[newBranch] if grandChild == nil { corruptIndex.Inc() - log.Error(3, "memory-idx: grandChild is nil. org=%d,patt=%q,i=%d,pos=%d,p=%q,path=%q", orgId, pattern, i, pos, p, newBranch) + log.WithFields(log.Fields{ + "org.id": orgId, + "pattern": pattern, + "position": pos, + "p": p, + "path": newBranch, + }).Error("memory-idx: grandChild is nil") return nil, errors.NewInternal("hit an empty path in the index") } @@ -1038,7 +1120,9 @@ func (m *MemoryIdx) find(orgId uint32, pattern string) ([]*Node, error) { } } - log.Debug("memory-idx: reached pattern length. %d nodes matched", len(children)) + log.WithFields(log.Fields{ + "num.matched.nodes": len(children), + }).Error("memory-idx: reached pattern length") return children, nil } @@ -1151,16 +1235,23 @@ func (m *MemoryIdx) delete(orgId uint32, n *Node, deleteEmptyParents, deleteChil tree := m.tree[orgId] deletedDefs := make([]idx.Archive, 0) if deleteChildren && n.HasChildren() { - log.Debug("memory-idx: deleting branch %s", n.Path) + log.WithFields(log.Fields{ + "branch": n.Path, + }).Error("memory-idx: deleting branch") // walk up the tree to find all leaf nodes and delete them. for _, child := range n.Children { node, ok := tree.Items[n.Path+"."+child] if !ok { corruptIndex.Inc() - log.Error(3, "memory-idx: node %q missing. Index is corrupt.", n.Path+"."+child) + log.WithFields(log.Fields{ + "node": n.Path + "." + child, + }).Error("memory-idx: node missing, Index is corrupt") continue } - log.Debug("memory-idx: deleting child %s from branch %s", node.Path, n.Path) + log.WithFields(log.Fields{ + "child": node.Path, + "branch": n.Path, + }).Error("memory-idx: deleting child from branch") deleted := m.delete(orgId, node, false, true) deletedDefs = append(deletedDefs, deleted...) } @@ -1169,7 +1260,9 @@ func (m *MemoryIdx) delete(orgId uint32, n *Node, deleteEmptyParents, deleteChil // delete the metricDefs for _, id := range n.Defs { - log.Debug("memory-idx: deleting %s from index", id) + log.WithFields(log.Fields{ + "id": id, + }).Debug("memory-idx: deleting id from index") deletedDefs = append(deletedDefs, *m.defById[id]) delete(m.defById, id) } @@ -1195,11 +1288,16 @@ func (m *MemoryIdx) delete(orgId uint32, n *Node, deleteEmptyParents, deleteChil nodes := strings.Split(n.Path, ".") for i := len(nodes) - 1; i >= 0; i-- { branch := strings.Join(nodes[:i], ".") - log.Debug("memory-idx: removing %s from branch %s", nodes[i], branch) + log.WithFields(log.Fields{ + "node": nodes[i], + "branch": branch, + }).Error("memory-idx: removing node from branch") bNode, ok := tree.Items[branch] if !ok { corruptIndex.Inc() - log.Error(3, "memory-idx: node %s missing. Index is corrupt.", branch) + log.WithFields(log.Fields{ + "node": branch, + }).Error("memory-idx: node missing, Index is corrupt") continue } if len(bNode.Children) > 1 { @@ -1208,11 +1306,16 @@ func (m *MemoryIdx) delete(orgId uint32, n *Node, deleteEmptyParents, deleteChil if child != nodes[i] { newChildren = append(newChildren, child) } else { - log.Debug("memory-idx: %s removed from children list of branch %s", child, bNode.Path) + log.WithFields(log.Fields{ + "child": child, + "branch": bNode.Path, + }).Debug("memory-idx: child removed from branch") } } bNode.Children = newChildren - log.Debug("memory-idx: branch %s has other children. Leaving it in place", bNode.Path) + log.WithFields(log.Fields{ + "branch": bNode.Path, + }).Error("memory-idx: branch has other children, leaving it in place") // no need to delete any parents as they are needed by this node and its // remaining children break @@ -1220,21 +1323,31 @@ func (m *MemoryIdx) delete(orgId uint32, n *Node, deleteEmptyParents, deleteChil if len(bNode.Children) == 0 { corruptIndex.Inc() - log.Error(3, "memory-idx: branch %s has no children while trying to delete %s. Index is corrupt", branch, nodes[i]) + log.WithFields(log.Fields{ + "branch": branch, + "node": nodes[i], + }).Error("memory-idx: branch has no children while trying to delete node, index is corrupt") break } if bNode.Children[0] != nodes[i] { corruptIndex.Inc() - log.Error(3, "memory-idx: %s not in children list for branch %s. Index is corrupt", nodes[i], branch) + log.WithFields(log.Fields{ + "child": nodes[i], + "branch": branch, + }).Error("memory-idx: child is not in children list for branch, index is corrupt") break } bNode.Children = nil if bNode.Leaf() { - log.Debug("memory-idx: branch %s is also a leaf node, keeping it.", branch) + log.WithFields(log.Fields{ + "branch": branch, + }).Error("memory-idx: branch is also a leaf node, keeping it") break } - log.Debug("memory-idx: branch %s has no children and is not a leaf node, deleting it.", branch) + log.WithFields(log.Fields{ + "branch": branch, + }).Error("memory-idx: branch has no children and is not a leaf node, deleting it") delete(tree.Items, branch) } @@ -1334,11 +1447,17 @@ DEFS: n, ok := tree.Items[path] if !ok { m.Unlock() - log.Debug("memory-idx: series %s for orgId:%d was identified for pruning but cannot be found.", path, org) + log.WithFields(log.Fields{ + "series": path, + "org.id": org, + }).Debug("memory-idx: series for orgId was identified for pruning but cannot be found") continue } - log.Debug("memory-idx: series %s for orgId:%d is stale. pruning it.", n.Path, org) + log.WithFields(log.Fields{ + "series": n.Path, + "org.id": org, + }).Debug("memory-idx: series for orgId is stale, pruning it") defs := m.delete(org, n, true, false) pruned = append(pruned, defs...) } @@ -1347,7 +1466,9 @@ DEFS: statMetricsActive.Add(-1 * len(pruned)) - log.Info("memory-idx: pruning stale metricDefs from memory for all orgs took %s", time.Since(pre).String()) + log.WithFields(log.Fields{ + "time.taken": time.Since(pre).String(), + }).Info("memory-idx: pruning stale metricDefs from memory for all orgs") statPruneDuration.Value(time.Since(pre)) return pruned, nil @@ -1375,7 +1496,10 @@ func getMatcher(path string) (func([]string) []string, error) { for _, p := range patterns { r, err := regexp.Compile(toRegexp(p)) if err != nil { - log.Debug("memory-idx: regexp failed to compile. %s - %s", p, err) + log.WithFields(log.Fields{ + "pattern": p, + "error": err.Error(), + }).Debug("memory-idx: regexp failed to compile") return nil, errors.NewBadRequest(err.Error()) } regexes = append(regexes, r) @@ -1386,7 +1510,10 @@ func getMatcher(path string) (func([]string) []string, error) { for _, r := range regexes { for _, c := range children { if r.MatchString(c) { - log.Debug("memory-idx: %s =~ %s", c, r.String()) + log.WithFields(log.Fields{ + "pattern": r.String(), + "child": c, + }).Debug("memory-idx: child matches") matches = append(matches, c) } } @@ -1401,7 +1528,10 @@ func getMatcher(path string) (func([]string) []string, error) { for _, p := range patterns { for _, c := range children { if c == p { - log.Debug("memory-idx: %s matches %s", c, p) + log.WithFields(log.Fields{ + "pattern": p, + "child": c, + }).Debug("memory-0idx: child matches") results = append(results, c) break } diff --git a/idx/memory/memory_find_test.go b/idx/memory/memory_find_test.go index 5e23962de8..a1042c9b39 100644 --- a/idx/memory/memory_find_test.go +++ b/idx/memory/memory_find_test.go @@ -10,6 +10,7 @@ import ( "github.com/grafana/metrictank/idx" "github.com/raintank/schema" + log "github.com/sirupsen/logrus" ) var ( @@ -690,7 +691,9 @@ func ixFind(b *testing.B, org uint32, q int) { b.Helper() nodes, err := ix.Find(org, queries[q].Pattern, 0) if err != nil { - panic(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("error") } if len(nodes) != queries[q].ExpectedResults { for _, n := range nodes { @@ -764,7 +767,9 @@ func BenchmarkConcurrent8Find(b *testing.B) { func ixFindByTag(b *testing.B, org uint32, q int) { series, err := ix.FindByTag(org, tagQueries[q].Expressions, 0) if err != nil { - panic(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("error") } if len(series) != tagQueries[q].ExpectedResults { for _, s := range series { diff --git a/idx/memory/tag_query.go b/idx/memory/tag_query.go index 920fd6ad9b..17fe7f2ce4 100644 --- a/idx/memory/tag_query.go +++ b/idx/memory/tag_query.go @@ -12,7 +12,7 @@ import ( "github.com/raintank/schema" "github.com/grafana/metrictank/idx" - "github.com/raintank/worldping-api/pkg/log" + log "github.com/sirupsen/logrus" ) var ( @@ -626,7 +626,10 @@ func (q *TagQuery) testByTagMatch(def *idx.Archive) bool { equal := strings.Index(tag, "=") if equal < 0 { corruptIndex.Inc() - log.Error(3, "memory-idx: ID %q has tag %q in index without '=' sign", def.Id, tag) + log.WithFields(log.Fields{ + "id": def.Id, + "tag": tag, + }).Error("memory-idx: id has tag in index without '=' sign") continue } key := tag[:equal] @@ -733,7 +736,9 @@ func (q *TagQuery) filterIdsFromChan(idCh, resCh chan schema.MKey) { // should never happen because every ID in the tag index // must be present in the byId lookup table corruptIndex.Inc() - log.Error(3, "memory-idx: ID %q is in tag index but not in the byId lookup table", id) + log.WithFields(log.Fields{ + "id": id, + }).Error("memory-idx: id is in tag index but not in the byId lookup table") continue } @@ -852,7 +857,9 @@ IDS: // should never happen because every ID in the tag index // must be present in the byId lookup table corruptIndex.Inc() - log.Error(3, "memory-idx: ID %q is in tag index but not in the byId lookup table", id) + log.WithFields(log.Fields{ + "id": id, + }).Error("memory-idx: id is in tag index but not in the byId lookup table") continue } @@ -863,7 +870,10 @@ IDS: equal := strings.Index(tag, "=") if equal < 0 { corruptIndex.Inc() - log.Error(3, "memory-idx: ID %q has tag %q in index without '=' sign", id, tag) + log.WithFields(log.Fields{ + "id": id, + "tag": tag, + }).Error("memory-idx: id has tag in index without '=' sign") continue } diff --git a/input/carbon/carbon.go b/input/carbon/carbon.go index 8c6d80ffdb..8e9d33bc33 100644 --- a/input/carbon/carbon.go +++ b/input/carbon/carbon.go @@ -15,8 +15,8 @@ import ( "github.com/grafana/metrictank/stats" "github.com/metrics20/go-metrics20/carbon20" "github.com/raintank/schema" - "github.com/raintank/worldping-api/pkg/log" "github.com/rakyll/globalconf" + log "github.com/sirupsen/logrus" ) // metric input.carbon.metrics_per_message is how many metrics per message were seen. in carbon's case this is always 1. @@ -93,7 +93,9 @@ func ConfigProcess() { func New() *Carbon { addrT, err := net.ResolveTCPAddr("tcp", addr) if err != nil { - log.Fatal(4, "carbon-in: %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("carbon-in: error") } return &Carbon{ addrStr: addr, @@ -110,11 +112,15 @@ func (c *Carbon) Start(handler input.Handler, fatal chan struct{}) error { c.Handler = handler l, err := net.ListenTCP("tcp", c.addr) if nil != err { - log.Error(4, "carbon-in: %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("carbon-in: error") return err } c.listener = l - log.Info("carbon-in: listening on %v/tcp", c.addr) + log.WithFields(log.Fields{ + "tcp.addr": c.addr.String(), + }).Info("carbon-in: listening on tcp address") c.quit = make(chan struct{}) go c.accept() return nil @@ -140,7 +146,9 @@ func (c *Carbon) accept() { return default: } - log.Error(4, "carbon-in: Accept Error: %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("carbon-in: Accept Error") return } c.handlerWaitGroup.Add(1) @@ -150,7 +158,7 @@ func (c *Carbon) accept() { } func (c *Carbon) Stop() { - log.Info("carbon-in: shutting down.") + log.Info("carbon-in: shutting down") close(c.quit) c.listener.Close() c.connTrack.CloseAll() @@ -176,7 +184,9 @@ func (c *Carbon) handle(conn net.Conn) { default: } if io.EOF != err { - log.Error(4, "carbon-in: Recv error: %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("carbon-in: Recv error") } break } @@ -185,7 +195,9 @@ func (c *Carbon) handle(conn net.Conn) { key, val, ts, err := carbon20.ValidatePacket(buf, carbon20.MediumLegacy, carbon20.NoneM20) if err != nil { metricsDecodeErr.Inc() - log.Error(4, "carbon-in: invalid metric: %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("carbon-in: invalid metric") continue } nameSplits := strings.Split(string(key), ";") diff --git a/input/input.go b/input/input.go index f0a4640dcd..689e2bce7c 100644 --- a/input/input.go +++ b/input/input.go @@ -11,7 +11,7 @@ import ( "github.com/grafana/metrictank/idx" "github.com/grafana/metrictank/mdata" "github.com/grafana/metrictank/stats" - "github.com/raintank/worldping-api/pkg/log" + log "github.com/sirupsen/logrus" ) type Handler interface { @@ -64,7 +64,9 @@ func (in DefaultHandler) ProcessMetricPoint(point schema.MetricPoint, format msg } if !point.Valid() { in.invalidMP.Inc() - log.Debug("in: Invalid metric %v", point) + log.WithFields(log.Fields{ + "point": point, + }).Debug("in: invalid metric") return } @@ -86,18 +88,26 @@ func (in DefaultHandler) ProcessMetricData(md *schema.MetricData, partition int3 err := md.Validate() if err != nil { in.invalidMD.Inc() - log.Debug("in: Invalid metric %v: %s", md, err) + log.WithFields(log.Fields{ + "metric.data": md, + "error": err.Error(), + }).Debug("in: invalid metric") return } if md.Time == 0 { in.invalidMD.Inc() - log.Warn("in: invalid metric. metric.Time is 0. %s", md.Id) + log.WithFields(log.Fields{ + "id": md.Id, + }).Warn("in: invalid metric, metric.Time is 0") return } mkey, err := schema.MKeyFromString(md.Id) if err != nil { - log.Error(3, "in: Invalid metric %v: could not parse ID: %s", md, err) + log.WithFields(log.Fields{ + "metric.data": md, + "error": err.Error(), + }).Error("in: invalid metric, could not parse id") return } diff --git a/input/kafkamdm/kafkamdm.go b/input/kafkamdm/kafkamdm.go index c29d34003e..83cc77725e 100644 --- a/input/kafkamdm/kafkamdm.go +++ b/input/kafkamdm/kafkamdm.go @@ -12,8 +12,8 @@ import ( "github.com/raintank/schema/msg" "github.com/Shopify/sarama" - "github.com/raintank/worldping-api/pkg/log" "github.com/rakyll/globalconf" + log "github.com/sirupsen/logrus" "github.com/grafana/metrictank/cluster" "github.com/grafana/metrictank/input" @@ -94,13 +94,13 @@ func ConfigProcess(instance string) { } if offsetCommitInterval == 0 { - log.Fatal(4, "kafkamdm: offset-commit-interval must be greater then 0") + log.Fatal("kafkamdm: offset-commit-interval must be greater then 0") } if consumerMaxWaitTime == 0 { - log.Fatal(4, "kafkamdm: consumer-max-wait-time must be greater then 0") + log.Fatal("kafkamdm: consumer-max-wait-time must be greater then 0") } if consumerMaxProcessingTime == 0 { - log.Fatal(4, "kafkamdm: consumer-max-processing-time must be greater then 0") + log.Fatal("kafkamdm: consumer-max-processing-time must be greater then 0") } var err error switch offsetStr { @@ -110,13 +110,17 @@ func ConfigProcess(instance string) { default: offsetDuration, err = time.ParseDuration(offsetStr) if err != nil { - log.Fatal(4, "kafkamdm: invalid offest format. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("kafkamdm: invalid offset format") } } offsetMgr, err = kafka.NewOffsetMgr(DataDir) if err != nil { - log.Fatal(4, "kafka-mdm couldnt create offsetMgr. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("kafkamdm: couldn't create offsetMgr") } brokers = strings.Split(brokerStr, ",") topics = strings.Split(topicStr, ",") @@ -133,20 +137,28 @@ func ConfigProcess(instance string) { config.Version = sarama.V0_10_0_0 err = config.Validate() if err != nil { - log.Fatal(2, "kafka-mdm invalid config: %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("kafkamdm: invalid config") } // validate our partitions client, err := sarama.NewClient(brokers, config) if err != nil { - log.Fatal(4, "kafka-mdm failed to create client. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("kafkamdm: failed to create client") } defer client.Close() availParts, err := kafka.GetPartitions(client, topics) if err != nil { - log.Fatal(4, "kafka-mdm: %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("kafkamdm: GetPartitions error") } - log.Info("kafka-mdm: available partitions %v", availParts) + log.WithFields(log.Fields{ + "available.partitions": availParts, + }).Info("kafkamdm: partitions available") if partitionStr == "*" { partitions = availParts } else { @@ -154,13 +166,17 @@ func ConfigProcess(instance string) { for _, part := range parts { i, err := strconv.Atoi(part) if err != nil { - log.Fatal(4, "could not parse partition %q. partitions must be '*' or a comma separated list of id's", part) + log.WithFields(log.Fields{ + "partition": part, + }).Fatal("kafkamdm: could not parse partition, partitions must be '*' or a comma separated list of id's") } partitions = append(partitions, int32(i)) } missing := kafka.DiffPartitions(partitions, availParts) if len(missing) > 0 { - log.Fatal(4, "kafka-mdm: configured partitions not in list of available partitions. missing %v", missing) + log.WithFields(log.Fields{ + "missing.partitions": missing, + }).Fatal("kafkamdm: configured partitions not in list of available partitions") } } // record our partitions so others (MetricIdx) can use the partitioning information. @@ -186,13 +202,17 @@ func ConfigProcess(instance string) { func New() *KafkaMdm { client, err := sarama.NewClient(brokers, config) if err != nil { - log.Fatal(4, "kafka-mdm failed to create client. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("kafkamdm: failed to create client") } consumer, err := sarama.NewConsumerFromClient(client) if err != nil { - log.Fatal(2, "kafka-mdm failed to create consumer: %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("kafkamdm: failed to create consumer") } - log.Info("kafka-mdm consumer created without error") + log.Info("kafkamdm: consumer created without error") k := KafkaMdm{ consumer: consumer, client: client, @@ -218,14 +238,22 @@ func (k *KafkaMdm) Start(handler input.Handler, fatal chan struct{}) error { case "last": offset, err = offsetMgr.Last(topic, partition) if err != nil { - log.Error(4, "kafka-mdm: Failed to get %q duration offset for %s:%d. %q", offsetStr, topic, partition, err) + log.WithFields(log.Fields{ + "offset": offsetStr, + "topic": topic, + "partition": partition, + "error": err.Error(), + }).Error("kafkamdm: failed to get duration offset") return err } default: offset, err = k.client.GetOffset(topic, partition, time.Now().Add(-1*offsetDuration).UnixNano()/int64(time.Millisecond)) if err != nil { offset = sarama.OffsetOldest - log.Warn("kafka-mdm failed to get offset %s: %s -> will use oldest instead", offsetDuration, err) + log.WithFields(log.Fields{ + "offset.duration": offsetDuration, + "error": err.Error(), + }).Warn("kafkamdm: failed to get offset, will use oldest instead") } } k.wg.Add(1) @@ -263,7 +291,9 @@ func (k *KafkaMdm) tryGetOffset(topic string, partition int32, offset int64, att if attempt == attempts { break } - log.Warn("kafka-mdm %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Warn("kafkamdm: error") attempt += 1 time.Sleep(sleep) } @@ -281,7 +311,9 @@ func (k *KafkaMdm) consumePartition(topic string, partition int32, currentOffset // determine the pos of the topic and the initial offset of our consumer newest, err := k.tryGetOffset(topic, partition, sarama.OffsetNewest, 7, time.Second*10) if err != nil { - log.Error(3, "kafka-mdm %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("kafkamdm: failed to get offset") close(k.fatal) return } @@ -290,7 +322,9 @@ func (k *KafkaMdm) consumePartition(topic string, partition int32, currentOffset } else if currentOffset == sarama.OffsetOldest { currentOffset, err = k.tryGetOffset(topic, partition, sarama.OffsetOldest, 7, time.Second*10) if err != nil { - log.Error(3, "kafka-mdm %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("kafkamdm: failed to get offset") close(k.fatal) return } @@ -300,10 +334,18 @@ func (k *KafkaMdm) consumePartition(topic string, partition int32, currentOffset partitionLogSizeMetric.Set(int(newest)) partitionLagMetric.Set(int(newest - currentOffset)) - log.Info("kafka-mdm: consuming from %s:%d from offset %d", topic, partition, currentOffset) + log.WithFields(log.Fields{ + "topic": topic, + "partition": partition, + "current.offset": currentOffset, + }).Info("kafkamdm: consuming") pc, err := k.consumer.ConsumePartition(topic, partition, currentOffset) if err != nil { - log.Error(4, "kafka-mdm: failed to start partitionConsumer for %s:%d. %s", topic, partition, err) + log.WithFields(log.Fields{ + "topic": topic, + "partition": partition, + "error": err.Error(), + }).Error("kafkamdm: failed to start partitionConsumer") close(k.fatal) return } @@ -314,26 +356,42 @@ func (k *KafkaMdm) consumePartition(topic string, partition int32, currentOffset case msg, ok := <-messages: // https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions#why-am-i-getting-a-nil-message-from-the-sarama-consumer if !ok { - log.Error(3, "kafka-mdm: kafka consumer for %s:%d has shutdown. stop consuming", topic, partition) + log.WithFields(log.Fields{ + "topic": topic, + "partition": partition, + }).Error("kafkamdm: kafka consumer has shutdown, stop consuming") if err := offsetMgr.Commit(topic, partition, currentOffset); err != nil { - log.Error(3, "kafka-mdm failed to commit offset for %s:%d, %s", topic, partition, err) + log.WithFields(log.Fields{ + "topic": topic, + "partition": partition, + "error": err.Error(), + }).Error("kafkamdm: failed to commit offset") } close(k.fatal) return } - if LogLevel < 2 { - log.Debug("kafka-mdm received message: Topic %s, Partition: %d, Offset: %d, Key: %x", msg.Topic, msg.Partition, msg.Offset, msg.Key) - } + log.WithFields(log.Fields{ + "topic": msg.Topic, + "partition": msg.Partition, + "offset": msg.Offset, + "key": msg.Key, + }).Debug("kafkamdm: received message") k.handleMsg(msg.Value, partition) currentOffset = msg.Offset case ts := <-ticker.C: if err := offsetMgr.Commit(topic, partition, currentOffset); err != nil { - log.Error(3, "kafka-mdm failed to commit offset for %s:%d, %s", topic, partition, err) + log.WithFields(log.Fields{ + "topic": topic, + "partition": partition, + "error": err.Error(), + }).Error("kafkamdm: failed to commit offset") } k.lagMonitor.StoreOffset(partition, currentOffset, ts) newest, err := k.tryGetOffset(topic, partition, sarama.OffsetNewest, 1, 0) if err != nil { - log.Error(3, "kafka-mdm %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("kafkamdm: failed to get offset") } else { partitionLogSizeMetric.Set(int(newest)) } @@ -347,9 +405,16 @@ func (k *KafkaMdm) consumePartition(topic string, partition int32, currentOffset case <-k.stopConsuming: pc.Close() if err := offsetMgr.Commit(topic, partition, currentOffset); err != nil { - log.Error(3, "kafka-mdm failed to commit offset for %s:%d, %s", topic, partition, err) + log.WithFields(log.Fields{ + "topic": topic, + "partition": partition, + "error": err.Error(), + }).Error("kafkamdm: failed to commit offset") } - log.Info("kafka-mdm consumer for %s:%d ended.", topic, partition) + log.WithFields(log.Fields{ + "topic": topic, + "partition": partition, + }).Info("kafkamdm: consumer ended") return } } @@ -361,7 +426,9 @@ func (k *KafkaMdm) handleMsg(data []byte, partition int32) { _, point, err := msg.ReadPointMsg(data, uint32(orgId)) if err != nil { metricsDecodeErr.Inc() - log.Error(3, "kafka-mdm decode error, skipping message. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("kafkamdm: decode error, skipping message") return } k.Handler.ProcessMetricPoint(point, format, partition) @@ -372,7 +439,9 @@ func (k *KafkaMdm) handleMsg(data []byte, partition int32) { _, err := md.UnmarshalMsg(data) if err != nil { metricsDecodeErr.Inc() - log.Error(3, "kafka-mdm decode error, skipping message. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("kafkamdm: decode error, skipping message") return } metricsPerMessage.ValueUint32(1) diff --git a/input/prometheus/prometheus.go b/input/prometheus/prometheus.go index 2f65c5e0b1..e04e77dcb4 100644 --- a/input/prometheus/prometheus.go +++ b/input/prometheus/prometheus.go @@ -13,8 +13,8 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/prompb" "github.com/raintank/schema" - "github.com/raintank/worldping-api/pkg/log" "github.com/rakyll/globalconf" + log "github.com/sirupsen/logrus" ) var ( @@ -71,14 +71,18 @@ func (p *prometheusWriteHandler) handle(w http.ResponseWriter, req *http.Request if err != nil { w.WriteHeader(400) w.Write([]byte(fmt.Sprintf("Read Error, %v", err))) - log.Error(3, "Read Error, %v", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("prometheus-in: read error") return } reqBuf, err := snappy.Decode(nil, compressed) if err != nil { w.WriteHeader(400) w.Write([]byte(fmt.Sprintf("Decode Error, %v", err))) - log.Error(3, "Decode Error, %v", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("prometheus-in: decode error") return } @@ -86,7 +90,9 @@ func (p *prometheusWriteHandler) handle(w http.ResponseWriter, req *http.Request if err := proto.Unmarshal(reqBuf, &req); err != nil { w.WriteHeader(400) w.Write([]byte(fmt.Sprintf("Unmarshal Error, %v", err))) - log.Error(3, "Unmarshal Error, %v", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("prometheus-in: unmarshal error") return } @@ -119,7 +125,9 @@ func (p *prometheusWriteHandler) handle(w http.ResponseWriter, req *http.Request } else { w.WriteHeader(400) w.Write([]byte("invalid metric received: __name__ label can not equal \"\"")) - log.Warn("prometheus metric received with empty name: %v", ts.String()) + log.WithFields(log.Fields{ + "timeseries": ts.String(), + }).Warn("prometheus-in: metric received with empty name") return } } diff --git a/kafka/offsetMgr.go b/kafka/offsetMgr.go index 9c6eee4c92..70827d5e67 100644 --- a/kafka/offsetMgr.go +++ b/kafka/offsetMgr.go @@ -8,7 +8,7 @@ import ( "path/filepath" "sync" - "github.com/raintank/worldping-api/pkg/log" + log "github.com/sirupsen/logrus" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/storage" @@ -56,7 +56,7 @@ func NewOffsetMgr(dir string) (*OffsetMgr, error) { db, err := leveldb.OpenFile(dbFile, &opt.Options{}) if err != nil { if _, ok := err.(*storage.ErrCorrupted); ok { - log.Warn("partitionOffsets.db is corrupt. Recovering.") + log.Warn("partitionOffsets.db is corrupt, recovering") db, err = leveldb.RecoverFile(dbFile, &opt.Options{}) if err != nil { return nil, err @@ -65,7 +65,9 @@ func NewOffsetMgr(dir string) (*OffsetMgr, error) { return nil, err } } - log.Info("Opened %s", dbFile) + log.WithFields(log.Fields{ + "file": dbFile, + }).Info("opened file") mgr := &OffsetMgr{ path: dbFile, db: db, @@ -89,7 +91,7 @@ func (o *OffsetMgr) Close() { o.Lock() o.users-- if o.users == 0 { - log.Info("Closing partitionsOffset DB.") + log.Info("closing partitionsOffset DB") o.db.Close() // remove the mgr from the registry @@ -106,7 +108,11 @@ func (o *OffsetMgr) Commit(topic string, partition int32, offset int64) error { if err := binary.Write(data, binary.LittleEndian, offset); err != nil { return err } - log.Debug("committing offset %d for %s:%d to partitionsOffset.db", offset, topic, partition) + log.WithFields(log.Fields{ + "offset": offset, + "topic": topic, + "partition": partition, + }).Debug("committing offsent to partitionsOffset.db") return o.db.Put(key.Bytes(), data.Bytes(), &opt.WriteOptions{Sync: true}) } @@ -116,7 +122,10 @@ func (o *OffsetMgr) Last(topic string, partition int32) (int64, error) { data, err := o.db.Get(key.Bytes(), nil) if err != nil { if err == leveldb.ErrNotFound { - log.Debug("no offset recorded for %s:%d", topic, partition) + log.WithFields(log.Fields{ + "topic": topic, + "partition": partition, + }).Debug("no offset recorded") return -1, nil } return 0, err @@ -126,6 +135,10 @@ func (o *OffsetMgr) Last(topic string, partition int32) (int64, error) { if err != nil { return 0, err } - log.Debug("found saved offset %d for %s:%d", offset, topic, partition) + log.WithFields(log.Fields{ + "offset": offset, + "topic": topic, + "partition": partition, + }).Debug("found saved offset") return offset, nil } diff --git a/logger/logger.go b/logger/logger.go new file mode 100644 index 0000000000..a85d0fa06d --- /dev/null +++ b/logger/logger.go @@ -0,0 +1,154 @@ +// Package logger has a custom TextFormatter for use with logrus +package logger + +import ( + "bytes" + "fmt" + "sort" + "strings" + "sync" + "time" + + "github.com/sirupsen/logrus" +) + +const defaultTimestampFormat = time.RFC3339 + +// TextFormatter formats logs into text +type TextFormatter struct { + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps + DisableTimestamp bool + + // Disable the conversion of the log levels to uppercase + DisableUppercase bool + + // Timestamp format to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output + DisableSorting bool + + // Wrap empty fields in quotes if true + QuoteEmptyFields bool + + // Can be set to the override the default quoting character " + // with something else. For example: ', or `. + QuoteCharacter string + + // The name of the module (webserver-5, redis-2, cluster-kafka-2, etc...) + // prints before the log message + ModuleName string + + sync.Once +} + +func (f *TextFormatter) init(entry *logrus.Entry) { + if len(f.QuoteCharacter) == 0 { + f.QuoteCharacter = "\"" + } +} + +// Format renders a single log entry +func (f *TextFormatter) Format(entry *logrus.Entry) ([]byte, error) { + var b *bytes.Buffer + + keys := make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + lastKeyIdx := len(keys) - 1 + + if !f.DisableSorting { + sort.Strings(keys) + } + + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + + f.Do(func() { f.init(entry) }) + + if !f.DisableTimestamp { + b.WriteString(entry.Time.Format(timestampFormat)) + b.WriteByte(' ') + } + + if !f.DisableUppercase { + b.WriteString(strings.ToUpper(entry.Level.String())) + } else { + b.WriteString(entry.Level.String()) + } + b.WriteByte(' ') + + if f.ModuleName != "" { + b.WriteString(f.ModuleName) + b.WriteByte(' ') + } + + if entry.Message != "" { + b.WriteString(entry.Message) + if lastKeyIdx >= 0 { + b.WriteByte(' ') + } + } + + for i, key := range keys { + f.appendKeyValue(b, key, entry.Data[key], lastKeyIdx != i) + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) needsQuoting(text string) bool { + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}, appendSpace bool) { + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) + + if appendSpace { + b.WriteByte(' ') + } +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + switch value := value.(type) { + case string: + if !f.needsQuoting(value) { + b.WriteString(value) + } else { + fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter) + } + case error: + errmsg := value.Error() + if !f.needsQuoting(errmsg) { + b.WriteString(errmsg) + } else { + fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter) + } + default: + fmt.Fprint(b, value) + } +} diff --git a/mdata/aggmetric.go b/mdata/aggmetric.go index b5f69d588d..01e02b1014 100644 --- a/mdata/aggmetric.go +++ b/mdata/aggmetric.go @@ -13,7 +13,7 @@ import ( "github.com/grafana/metrictank/mdata/cache" "github.com/grafana/metrictank/mdata/chunk" "github.com/raintank/schema" - "github.com/raintank/worldping-api/pkg/log" + log "github.com/sirupsen/logrus" ) var ErrInvalidRange = errors.New("AggMetric: invalid range: from must be less than to") @@ -88,9 +88,10 @@ func (a *AggMetric) SyncChunkSaveState(ts uint32) { if ts > a.lastSaveStart { a.lastSaveStart = ts } - if LogLevel < 2 { - log.Debug("AM metric %s at chunk T0=%d has been saved.", a.Key, ts) - } + log.WithFields(log.Fields{ + "key": a.Key, + "chunk.t0": ts, + }).Debug("AM: metric has been saved") } // Sync the saved state of a chunk by its T0. @@ -100,9 +101,9 @@ func (a *AggMetric) SyncAggregatedChunkSaveState(ts uint32, consolidator consoli if a.span == aggSpan { switch consolidator { case consolidation.None: - panic("cannot get an archive for no consolidation") + log.Panic("AM: cannot get an archive for no consolidation") case consolidation.Avg: - panic("avg consolidator has no matching Archive(). you need sum and cnt") + log.Panic("AM: avg consolidator has no matching Archive(). you need sum and cnt") case consolidation.Cnt: if a.cntMetric != nil { a.cntMetric.SyncChunkSaveState(ts) @@ -129,7 +130,10 @@ func (a *AggMetric) SyncAggregatedChunkSaveState(ts uint32, consolidator consoli } return default: - panic(fmt.Sprintf("internal error: no such consolidator %q with span %d", consolidator, aggSpan)) + log.WithFields(log.Fields{ + "consolidator": consolidator, + "span": aggSpan, + }).Panic("AM: internal error: no such consolidator with span") } } } @@ -137,7 +141,11 @@ func (a *AggMetric) SyncAggregatedChunkSaveState(ts uint32, consolidator consoli func (a *AggMetric) getChunk(pos int) *chunk.Chunk { if pos < 0 || pos >= len(a.Chunks) { - panic(fmt.Sprintf("aggmetric %s queried for chunk %d out of %d chunks", a.Key, pos, len(a.Chunks))) + log.WithFields(log.Fields{ + "key": a.Key, + "chunk": pos, + "total.chunks": len(a.Chunks), + }).Panic("AM: aggmetric queried for chunk") } return a.Chunks[pos] } @@ -150,12 +158,16 @@ func (a *AggMetric) GetAggregated(consolidator consolidation.Consolidator, aggSp switch consolidator { case consolidation.None: err := errors.New("internal error: AggMetric.GetAggregated(): cannot get an archive for no consolidation") - log.Error(3, "AM: %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("AM: no consolidation") badConsolidator.Inc() return Result{}, err case consolidation.Avg: err := errors.New("internal error: AggMetric.GetAggregated(): avg consolidator has no matching Archive(). you need sum and cnt") - log.Error(3, "AM: %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("AM: average consolidation") badConsolidator.Inc() return Result{}, err case consolidation.Cnt: @@ -170,7 +182,9 @@ func (a *AggMetric) GetAggregated(consolidator consolidation.Consolidator, aggSp agg = a.sumMetric default: err := fmt.Errorf("internal error: AggMetric.GetAggregated(): unknown consolidator %q", consolidator) - log.Error(3, "AM: %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("AM: error") badConsolidator.Inc() return Result{}, err } @@ -181,7 +195,9 @@ func (a *AggMetric) GetAggregated(consolidator consolidation.Consolidator, aggSp } } err := fmt.Errorf("internal error: AggMetric.GetAggregated(): unknown aggSpan %d", aggSpan) - log.Error(3, "AM: %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("AM: unknown aggSpan") badAggSpan.Inc() return Result{}, err } @@ -194,9 +210,14 @@ func (a *AggMetric) GetAggregated(consolidator consolidation.Consolidator, aggSp // * oldest point we have, so that if your query needs data before it, the caller knows when to query the store func (a *AggMetric) Get(from, to uint32) (Result, error) { pre := time.Now() - if LogLevel < 2 { - log.Debug("AM %s Get(): %d - %d (%s - %s) span:%ds", a.Key, from, to, TS(from), TS(to), to-from-1) - } + log.WithFields(log.Fields{ + "key": a.Key, + "from": from, + "to": to, + "timestamp.from": TS(from), + "timestamp.to": TS(to), + "span": to - from - 1, + }).Debug("AM: Get()") if from >= to { return Result{}, ErrInvalidRange } @@ -219,9 +240,9 @@ func (a *AggMetric) Get(from, to uint32) (Result, error) { if len(a.Chunks) == 0 { // we dont have any data yet. - if LogLevel < 2 { - log.Debug("AM %s Get(): no data for requested range.", a.Key) - } + log.WithFields(log.Fields{ + "key": a.Key, + }).Debug("AM: Get(), no data for requested range") return result, nil } @@ -239,9 +260,9 @@ func (a *AggMetric) Get(from, to uint32) (Result, error) { // only aware of older data and not the newer data in cassandra. this is unlikely // and it's better to not serve this scenario well in favor of the above case. // seems like a fair tradeoff anyway that you have to refill all the way first. - if LogLevel < 2 { - log.Debug("AM %s Get(): no data for requested range.", a.Key) - } + log.WithFields(log.Fields{ + "key": a.Key, + }).Debug("AM: Get(), no data for requested range") result.Oldest = from return result, nil } @@ -264,15 +285,17 @@ func (a *AggMetric) Get(from, to uint32) (Result, error) { oldestChunk := a.getChunk(oldestPos) if oldestChunk == nil { - log.Error(3, "%s", ErrNilChunk) + log.WithFields(log.Fields{ + "error": ErrNilChunk.Error(), + }).Error("AM: failed to get oldest chunk") return result, ErrNilChunk } if to <= oldestChunk.T0 { // the requested time range ends before any data we have. - if LogLevel < 2 { - log.Debug("AM %s Get(): no data for requested range", a.Key) - } + log.WithFields(log.Fields{ + "key": a.Key, + }).Debug("AM: Get(), no data for requested range") if oldestChunk.First { result.Oldest = a.firstTs } else { @@ -291,7 +314,9 @@ func (a *AggMetric) Get(from, to uint32) (Result, error) { oldestChunk = a.getChunk(oldestPos) if oldestChunk == nil { result.Oldest = to - log.Error(3, "%s", ErrNilChunk) + log.WithFields(log.Fields{ + "error": ErrNilChunk.Error(), + }).Error("AM: failed to get oldest chunk") return result, ErrNilChunk } } @@ -311,7 +336,9 @@ func (a *AggMetric) Get(from, to uint32) (Result, error) { newestChunk = a.getChunk(newestPos) if newestChunk == nil { result.Oldest = to - log.Error(3, "%s", ErrNilChunk) + log.WithFields(log.Fields{ + "error": ErrNilChunk.Error(), + }).Error("AM: failed to get newest chunk") return result, ErrNilChunk } } @@ -344,9 +371,12 @@ func (a *AggMetric) Get(from, to uint32) (Result, error) { // this function must only be called while holding the lock func (a *AggMetric) addAggregators(ts uint32, val float64) { for _, agg := range a.aggregators { - if LogLevel < 2 { - log.Debug("AM %s pushing %d,%f to aggregator %d", a.Key, ts, val, agg.span) - } + log.WithFields(log.Fields{ + "key": a.Key, + "timestamp": ts, + "value": val, + "aggregator": agg.span, + }).Debug("AM: pushing to aggregator") agg.Add(ts, val) } } @@ -378,7 +408,7 @@ func (a *AggMetric) persist(pos int) { // b) a primary failed and this node was promoted to be primary but metric consuming is lagging. // c) chunk was persisted by GC (stale) and then new data triggered another persist call // d) dropFirstChunk is enabled and this is the first chunk - log.Debug("AM persist(): duplicate persist call for chunk.") + log.Debug("AM: persist(), duplicate persist call for chunk.") return } @@ -403,9 +433,10 @@ func (a *AggMetric) persist(pos int) { } previousChunk := a.Chunks[previousPos] for (previousChunk.T0 < chunk.T0) && (a.lastSaveStart < previousChunk.T0) { - if LogLevel < 2 { - log.Debug("AM persist(): old chunk needs saving. Adding %s:%d to writeQueue", a.Key, previousChunk.T0) - } + log.WithFields(log.Fields{ + "key": a.Key, + "chunk": previousChunk.T0, + }).Debug("AM: persist(), old chunk needs saving, adding to write queue") pending = append(pending, &ChunkWriteRequest{ Metric: a, Key: a.Key, @@ -424,9 +455,9 @@ func (a *AggMetric) persist(pos int) { // Every chunk with a T0 <= this chunks' T0 is now either saved, or in the writeQueue. a.lastSaveStart = chunk.T0 - if LogLevel < 2 { - log.Debug("AM persist(): sending %d chunks to write queue", len(pending)) - } + log.WithFields(log.Fields{ + "num.chunks": len(pending), + }).Debug("AM: persist(), sending chunks to write queue") pendingChunk := len(pending) - 1 @@ -438,9 +469,12 @@ func (a *AggMetric) persist(pos int) { // last-to-first ensuring that older data is added to the store // before newer data. for pendingChunk >= 0 { - if LogLevel < 2 { - log.Debug("AM persist(): sealing chunk %d/%d (%s:%d) and adding to write queue.", pendingChunk, len(pending), a.Key, chunk.T0) - } + log.WithFields(log.Fields{ + "pending.chunk": pendingChunk, + "total.chunks": len(pending), + "current.key": a.Key, + "current.chunk": chunk.T0, + }).Debug("AM: persist(), sealing chunk and adding to write queue") a.store.Add(pending[pendingChunk]) pendingChunk-- } @@ -484,10 +518,17 @@ func (a *AggMetric) add(ts uint32, val float64) { a.firstTs = ts if err := a.Chunks[0].Push(ts, val); err != nil { - panic(fmt.Sprintf("FATAL ERROR: this should never happen. Pushing initial value <%d,%f> to new chunk at pos 0 failed: %q", ts, val, err)) + log.WithFields(log.Fields{ + "timestamp": ts, + "value": val, + "error": err.Error(), + }).Panic("AM: this should never happen, pushing initial value to new chunk at pos 0 failed") } - log.Debug("AM %s Add(): created first chunk with first point: %v", a.Key, a.Chunks[0]) + log.WithFields(log.Fields{ + "key": a.Key, + "first.point": a.Chunks[0], + }).Debug("AM: add(), created first chunk") a.lastWrite = uint32(time.Now().Unix()) if a.dropFirstChunk { a.lastSaveStart = t0 @@ -509,14 +550,25 @@ func (a *AggMetric) add(ts uint32, val float64) { } if err := currentChunk.Push(ts, val); err != nil { - log.Debug("AM failed to add metric to chunk for %s. %s", a.Key, err) + log.WithFields(log.Fields{ + "key": a.Key, + "error": err.Error(), + }).Debug("AM: failed to add metric to chunk") metricsTooOld.Inc() return } a.lastWrite = uint32(time.Now().Unix()) - log.Debug("AM %s Add(): pushed new value to last chunk: %v", a.Key, a.Chunks[0]) + log.WithFields(log.Fields{ + "key": a.Key, + "last.chunk": a.Chunks[0], + }).Debug("AM: add(), pushed new value to last chunk") } else if t0 < currentChunk.T0 { - log.Debug("AM Point at %d has t0 %d, goes back into previous chunk. CurrentChunk t0: %d, LastTs: %d", ts, t0, currentChunk.T0, currentChunk.LastTs) + log.WithFields(log.Fields{ + "point.timestamp": ts, + "point.t0": t0, + "current.chunk.t0": currentChunk.T0, + "current.chunk.last.timestamp": currentChunk.LastTs, + }).Debug("AM: point goes back into previous chunk") metricsTooOld.Inc() return } else { @@ -530,9 +582,10 @@ func (a *AggMetric) add(ts uint32, val float64) { a.pushToCache(currentChunk) // If we are a primary node, then add the chunk to the write queue to be saved to Cassandra if cluster.Manager.IsPrimary() { - if LogLevel < 2 { - log.Debug("AM persist(): node is primary, saving chunk. %s T0: %d", a.Key, currentChunk.T0) - } + log.WithFields(log.Fields{ + "key": a.Key, + "chunk": currentChunk.T0, + }).Debug("AM: persist(), node is primary, saving chunk") // persist the chunk. If the writeQueue is full, then this will block. a.persist(a.CurrentChunkPos) } @@ -546,17 +599,36 @@ func (a *AggMetric) add(ts uint32, val float64) { if len(a.Chunks) < int(a.NumChunks) { a.Chunks = append(a.Chunks, chunk.New(t0)) if err := a.Chunks[a.CurrentChunkPos].Push(ts, val); err != nil { - panic(fmt.Sprintf("FATAL ERROR: this should never happen. Pushing initial value <%d,%f> to new chunk at pos %d failed: %q", ts, val, a.CurrentChunkPos, err)) + log.WithFields(log.Fields{ + "timestamp": ts, + "value": val, + "chunk.position": a.CurrentChunkPos, + "error": err.Error(), + }).Panic("AM: this should never happen, pushing initial value to new chunk failed") } - log.Debug("AM %s Add(): added new chunk to buffer. now %d chunks. and added the new point: %s", a.Key, a.CurrentChunkPos+1, a.Chunks[a.CurrentChunkPos]) + log.WithFields(log.Fields{ + "key": a.Key, + "num.chunks": a.CurrentChunkPos + 1, + "point": a.Chunks[a.CurrentChunkPos], + }).Debug("AM: add(), added new chunk to buffer and added new point") } else { chunkClear.Inc() a.Chunks[a.CurrentChunkPos].Clear() a.Chunks[a.CurrentChunkPos] = chunk.New(t0) if err := a.Chunks[a.CurrentChunkPos].Push(ts, val); err != nil { - panic(fmt.Sprintf("FATAL ERROR: this should never happen. Pushing initial value <%d,%f> to new chunk at pos %d failed: %q", ts, val, a.CurrentChunkPos, err)) + log.WithFields(log.Fields{ + "timestamp": ts, + "value": val, + "chunk.position": a.CurrentChunkPos, + "error": err.Error(), + }).Panic("AM: this should never happen, pushing initial value to new chunk failed") } - log.Debug("AM %s Add(): cleared chunk at %d of %d and replaced with new. and added the new point: %s", a.Key, a.CurrentChunkPos, len(a.Chunks), a.Chunks[a.CurrentChunkPos]) + log.WithFields(log.Fields{ + "key": a.Key, + "current.chunk": a.CurrentChunkPos, + "total.chunks": len(a.Chunks), + "point": a.Chunks[a.CurrentChunkPos], + }).Debug("AM: add(), cleared chunk and replaced with new, then added new point") } a.lastWrite = uint32(time.Now().Unix()) @@ -643,12 +715,16 @@ func (a *AggMetric) GC(now, chunkMinTs, metricMinTs uint32) bool { } else { // chunk hasn't been written to in a while, and is not yet closed. Let's close it and persist it if // we are a primary - log.Debug("Found stale Chunk, adding end-of-stream bytes. key: %v T0: %d", a.Key, currentChunk.T0) + log.WithFields(log.Fields{ + "key": a.Key, + "chunk.t0": currentChunk.T0, + }).Debug("AM: found stale chunk, adding end-of-stream bytes") currentChunk.Finish() if cluster.Manager.IsPrimary() { - if LogLevel < 2 { - log.Debug("AM persist(): node is primary, saving chunk. %v T0: %d", a.Key, currentChunk.T0) - } + log.WithFields(log.Fields{ + "key": a.Key, + "chunk.t0": currentChunk.T0, + }).Debug("AM: persist(), node is primary, saving chunk") // persist the chunk. If the writeQueue is full, then this will block. a.persist(a.CurrentChunkPos) } diff --git a/mdata/aggmetrics.go b/mdata/aggmetrics.go index 8d4df49a7c..01852b03f5 100644 --- a/mdata/aggmetrics.go +++ b/mdata/aggmetrics.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/metrictank/mdata/cache" "github.com/raintank/schema" - "github.com/raintank/worldping-api/pkg/log" + log "github.com/sirupsen/logrus" ) // AggMetrics is an in-memory store of AggMetric objects @@ -48,7 +48,7 @@ func (ms *AggMetrics) GC() { unix := time.Duration(time.Now().UnixNano()) diff := ms.gcInterval - (unix % ms.gcInterval) time.Sleep(diff + time.Minute) - log.Info("checking for stale chunks that need persisting.") + log.Info("checking for stale chunks that need persisting") now := uint32(time.Now().Unix()) chunkMinTs := now - uint32(ms.chunkMaxStale) metricMinTs := now - uint32(ms.metricMaxStale) @@ -68,7 +68,9 @@ func (ms *AggMetrics) GC() { a := ms.Metrics[key] ms.RUnlock() if a.GC(now, chunkMinTs, metricMinTs) { - log.Debug("metric %s is stale. Purging data from memory.", key) + log.WithFields(log.Fields{ + "metric": key, + }).Debug("metric is stale, purging data from memory") ms.Lock() delete(ms.Metrics, key) metricsActive.Set(len(ms.Metrics)) diff --git a/mdata/aggregator.go b/mdata/aggregator.go index 1b65c3cc3e..3c45d53084 100644 --- a/mdata/aggregator.go +++ b/mdata/aggregator.go @@ -4,6 +4,7 @@ import ( "github.com/grafana/metrictank/conf" "github.com/grafana/metrictank/mdata/cache" "github.com/raintank/schema" + log "github.com/sirupsen/logrus" ) // AggBoundary returns ts if it is a boundary, or the next boundary otherwise. @@ -31,7 +32,7 @@ type Aggregator struct { func NewAggregator(store Store, cachePusher cache.CachePusher, key schema.AMKey, ret conf.Retention, agg conf.Aggregation, dropFirstChunk bool) *Aggregator { if len(agg.AggregationMethod) == 0 { - panic("NewAggregator called without aggregations. this should never happen") + log.Panic("NewAggregator called without aggregations. this should never happen") } span := uint32(ret.SecondsPerPoint) aggregator := &Aggregator{ @@ -112,7 +113,7 @@ func (agg *Aggregator) Add(ts uint32, val float64) { agg.currentBoundary = boundary agg.agg.Add(val) } else { - panic("aggregator: boundary < agg.currentBoundary. ts > lastSeen should already have been asserted") + log.Panic("aggregator: boundary < agg.currentBoundary. ts > lastSeen should already have been asserted") } } diff --git a/mdata/cache/ccache.go b/mdata/cache/ccache.go index cac3c2d5e3..62a547c0cc 100644 --- a/mdata/cache/ccache.go +++ b/mdata/cache/ccache.go @@ -13,8 +13,8 @@ import ( "github.com/grafana/metrictank/tracing" opentracing "github.com/opentracing/opentracing-go" "github.com/raintank/schema" - "github.com/raintank/worldping-api/pkg/log" "github.com/rakyll/globalconf" + log "github.com/sirupsen/logrus" ) var ( @@ -240,7 +240,10 @@ func (c *CCache) evict(target *accnt.EvictTarget) { return } - log.Debug("CCache evict: evicting chunk %d on metric %s\n", target.Ts, target.Metric) + log.WithFields(log.Fields{ + "chunk": target.Ts, + "metric": target.Metric, + }).Debug("CCache: evict(), evicting chunk on metric") length := c.metricCache[target.Metric].Del(target.Ts) if length == 0 { delete(c.metricCache, target.Metric) diff --git a/mdata/cache/ccache_metric.go b/mdata/cache/ccache_metric.go index a6236800c6..9a40c43d73 100644 --- a/mdata/cache/ccache_metric.go +++ b/mdata/cache/ccache_metric.go @@ -9,7 +9,7 @@ import ( "github.com/grafana/metrictank/mdata/chunk" opentracing "github.com/opentracing/opentracing-go" "github.com/raintank/schema" - "github.com/raintank/worldping-api/pkg/log" + log "github.com/sirupsen/logrus" ) // CCacheMetric caches data chunks @@ -106,15 +106,22 @@ func (mc *CCacheMetric) AddRange(prev uint32, itergens []chunk.IterGen) { if prev == 0 { prev = res } else if prev != res { - log.Warn("CCacheMetric AddRange: 'prev' param disagrees with seek: key = %s, prev = %d, seek = %d", - mc.MKey.String(), prev, res) + log.WithFields(log.Fields{ + "key": mc.MKey.String(), + "prev": prev, + "seek": res, + }).Warn("CCacheMetric: AddRange(), 'prev' param disagrees with seek") } } // if the previous chunk is cached, link it if prev != 0 && (ts-prev) != (itergens[1].Ts-ts) { - log.Warn("CCacheMetric AddRange: Bad prev begin used: key = %s, prev = %d, itergens[0].Ts = %d, itergens[1].Ts = %d", - mc.MKey.String(), prev, itergens[0].Ts, itergens[1].Ts) + log.WithFields(log.Fields{ + "key": mc.MKey.String(), + "prev": prev, + "itergens.0.ts": itergens[0].Ts, + "itergens.1.ts": itergens[1].Ts, + }).Warn("CCacheMetric: AddRange(), bad prev being used") prev = 0 } else if _, ok := mc.chunks[prev]; ok { mc.chunks[prev].Next = ts @@ -217,7 +224,10 @@ func (mc *CCacheMetric) Add(prev uint32, itergen chunk.IterGen) { nextTs := mc.nextTs(ts) - log.Debug("CCacheMetric Add: caching chunk ts %d, nextTs %d", ts, nextTs) + log.WithFields(log.Fields{ + "timestamp": ts, + "next.timestamp": nextTs, + }).Debug("CCacheMetric: Add(), caching chunk") // if previous chunk has not been passed we try to be smart and figure it out. // this is common in a scenario where a metric continuously gets queried @@ -227,8 +237,11 @@ func (mc *CCacheMetric) Add(prev uint32, itergen chunk.IterGen) { if prev == 0 { prev = res } else if prev != res { - log.Warn("CCacheMetric Add: 'prev' param disagrees with seek: key = %s, prev = %d, seek = %d", - mc.MKey.String(), prev, res) + log.WithFields(log.Fields{ + "key": mc.MKey.String(), + "prev": prev, + "seek": res, + }).Warn("CCacheMetric: Add(), 'prev' param disagrees with seek") } } @@ -314,16 +327,23 @@ func (mc *CCacheMetric) lastTs() uint32 { // if not found or can't be sure returns 0, false // assumes we already have at least a read lock func (mc *CCacheMetric) seekAsc(ts uint32) (uint32, bool) { - log.Debug("CCacheMetric seekAsc: seeking for %d in the keys %+d", ts, mc.keys) + log.WithFields(log.Fields{ + "timestamp": ts, + "keys": mc.keys, + }).Debug("CCacheMetric: seekAsc(), seeking for timestamp in the keys") for i := 0; i < len(mc.keys) && mc.keys[i] <= ts; i++ { if mc.nextTs(mc.keys[i]) > ts { - log.Debug("CCacheMetric seekAsc: seek found ts %d is between %d and %d", ts, mc.keys[i], mc.nextTs(mc.keys[i])) + log.WithFields(log.Fields{ + "timestamp": ts, + "first.key": mc.keys[i], + "last.key": mc.nextTs(mc.keys[i]), + }).Debug("CCacheMetric: seekAsc(), seek found ts between keys") return mc.keys[i], true } } - log.Debug("CCacheMetric seekAsc: seekAsc unsuccessful") + log.Debug("CCacheMetric: seekAsc(), unsuccessful") return 0, false } @@ -331,16 +351,23 @@ func (mc *CCacheMetric) seekAsc(ts uint32) (uint32, bool) { // if not found or can't be sure returns 0, false // assumes we already have at least a read lock func (mc *CCacheMetric) seekDesc(ts uint32) (uint32, bool) { - log.Debug("CCacheMetric seekDesc: seeking for %d in the keys %+d", ts, mc.keys) + log.WithFields(log.Fields{ + "timestamp": ts, + "keys": mc.keys, + }).Debug("CCacheMetric: seekDesc(), seeking for timestamp in keys") for i := len(mc.keys) - 1; i >= 0 && mc.nextTs(mc.keys[i]) > ts; i-- { if mc.keys[i] <= ts { - log.Debug("CCacheMetric seekDesc: seek found ts %d is between %d and %d", ts, mc.keys[i], mc.nextTs(mc.keys[i])) + log.WithFields(log.Fields{ + "timestamp": ts, + "first.key": mc.keys[i], + "last.key": mc.nextTs(mc.keys[i]), + }).Debug("CCacheMetric: seekDesc(), seek found timestamp between keys") return mc.keys[i], true } } - log.Debug("CCacheMetric seekDesc: seekDesc unsuccessful") + log.Debug("CCacheMetric: seekDesc() unsuccessful") return 0, false } @@ -352,7 +379,9 @@ func (mc *CCacheMetric) searchForward(ctx context.Context, metric schema.AMKey, // add all consecutive chunks to search results, starting at the one containing "from" for ; ts != 0; ts = mc.chunks[ts].Next { - log.Debug("CCacheMetric searchForward: forward search adds chunk ts %d to start", ts) + log.WithFields(log.Fields{ + "timestamp": ts, + }).Debug("CCacheMetric: searchForward(), forward search adds chunk to start") res.Start = append(res.Start, mc.chunks[ts].Itgen) nextTs := mc.nextTs(ts) res.From = nextTs @@ -362,7 +391,13 @@ func (mc *CCacheMetric) searchForward(ctx context.Context, metric schema.AMKey, break } if mc.chunks[ts].Next != 0 && ts >= mc.chunks[ts].Next { - log.Warn("CCacheMetric: suspected bug suppressed. searchForward(%q, %d, %d, res) ts is %d while Next is %d", metric, from, until, ts, mc.chunks[ts].Next) + log.WithFields(log.Fields{ + "metric": metric, + "from": from, + "until": until, + "current.timestamp": ts, + "next.timestamp": mc.chunks[ts].Next, + }).Warn("CCacheMetric: searchForward(), suspected bug suppressed") span := opentracing.SpanFromContext(ctx) span.SetTag("searchForwardBug", true) searchFwdBug.Inc() @@ -382,7 +417,9 @@ func (mc *CCacheMetric) searchBackward(from, until uint32, res *CCSearchResult) break } - log.Debug("CCacheMetric searchBackward: backward search adds chunk ts %d to end", ts) + log.WithFields(log.Fields{ + "timestamp": ts, + }).Debug("CCacheMetric: searchBackward(), backward search adds chunk to end") res.End = append(res.End, mc.chunks[ts].Itgen) res.Until = ts } @@ -417,7 +454,11 @@ func (mc *CCacheMetric) Search(ctx context.Context, metric schema.AMKey, res *CC } if !res.Complete && res.From > res.Until { - log.Warn("CCacheMetric Search: Found from > until (%d/%d), key = %s, printing chunks\n", res.From, res.Until, mc.MKey.String()) + log.WithFields(log.Fields{ + "from": res.From, + "until": res.Until, + "key": mc.MKey.String(), + }).Warn("CCacheMetric: Search(), found from > until, printing chunks") mc.debugMetric(from-7200, until+7200) res.Complete = false res.Start = res.Start[:0] @@ -428,11 +469,18 @@ func (mc *CCacheMetric) Search(ctx context.Context, metric schema.AMKey, res *CC } func (mc *CCacheMetric) debugMetric(from, until uint32) { - log.Warn("CCacheMetric debugMetric: --- debugging metric between %d and %d ---\n", from, until) + log.WithFields(log.Fields{ + "from": from, + "until": until, + }).Warn("CCacheMetric: debugMetric(), debugging metric between timestamps") for _, key := range mc.keys { if key >= from && key <= until { - log.Warn("CCacheMetric debugMetric: ts %d; prev %d; next %d\n", key, mc.chunks[key].Prev, mc.chunks[key].Next) + log.WithFields(log.Fields{ + "key": key, + "prev": mc.chunks[key].Prev, + "next": mc.chunks[key].Next, + }).Warn("CCacheMetric: debugMetric()") } } - log.Warn("CCacheMetric debugMetric: ------------------------\n") + log.Warn("CCacheMetric: debugMetric() ------------------------\n") } diff --git a/mdata/init.go b/mdata/init.go index 910438e19d..cf8ac3a3ee 100644 --- a/mdata/init.go +++ b/mdata/init.go @@ -11,8 +11,8 @@ import ( "github.com/grafana/metrictank/stats" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/raintank/worldping-api/pkg/log" "github.com/rakyll/globalconf" + log "github.com/sirupsen/logrus" ) var ( @@ -93,7 +93,10 @@ func ConfigProcess() { Schemas, err = conf.ReadSchemas(schemasFile) if err != nil { - log.Fatal(3, "can't read schemas file %q: %s", schemasFile, err.Error()) + log.WithFields(log.Fields{ + "file": schemasFile, + "error": err.Error(), + }).Fatal("can't read schemas file") } // === read storage-aggregation.conf === @@ -108,10 +111,16 @@ func ConfigProcess() { if err == nil { Aggregations, err = conf.ReadAggregations(aggFile) if err != nil { - log.Fatal(3, "can't read storage-aggregation file %q: %s", aggFile, err.Error()) + log.WithFields(log.Fields{ + "file": aggFile, + "error": err.Error(), + }).Fatal("can't read storage-aggregation file") } } else { - log.Info("Could not read %s: %s: using defaults", aggFile, err) + log.WithFields(log.Fields{ + "file": aggFile, + "error": err.Error(), + }).Info("could not read file using defaults") Aggregations = conf.NewAggregations() } } diff --git a/mdata/notifier.go b/mdata/notifier.go index 883e1066f6..8b71448474 100644 --- a/mdata/notifier.go +++ b/mdata/notifier.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/metrictank/consolidation" "github.com/grafana/metrictank/idx" "github.com/grafana/metrictank/stats" - "github.com/raintank/worldping-api/pkg/log" + log "github.com/sirupsen/logrus" ) var ( @@ -54,21 +54,28 @@ func Handle(metrics Metrics, data []byte, idx idx.MetricIndex) { batch := PersistMessageBatch{} err := json.Unmarshal(data[1:], &batch) if err != nil { - log.Error(3, "failed to unmarsh batch message. skipping.", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("notifier: failed to unmarshal batch message, skipping") return } messagesReceived.Add(len(batch.SavedChunks)) for _, c := range batch.SavedChunks { amkey, err := schema.AMKeyFromString(c.Key) if err != nil { - log.Error(3, "notifier: failed to convert %q to AMKey: %s -- skipping", c.Key, err) + log.WithFields(log.Fields{ + "key": c.Key, + "error": err.Error(), + }).Error("notifier: failed to convert key to AMKey, skipping") continue } // we only need to handle saves for series that we know about. // if the series is not in the index, then we dont need to worry about it. def, ok := idx.Get(amkey.MKey) if !ok { - log.Debug("notifier: skipping metric with MKey %s as it is not in the index", amkey.MKey) + log.WithFields(log.Fields{ + "mkey": amkey.MKey, + }).Debug("notifier: skipping metric for mkey as it is not in the index") continue } agg := metrics.GetOrCreate(amkey.MKey, def.SchemaId, def.AggId) @@ -81,7 +88,9 @@ func Handle(metrics Metrics, data []byte, idx idx.MetricIndex) { } } } else { - log.Error(3, "notifier: unknown version %d", version) + log.WithFields(log.Fields{ + "version": version, + }).Error("notifier: unknown version") } return } diff --git a/mdata/notifierKafka/cfg.go b/mdata/notifierKafka/cfg.go index 67e7ea76d8..7b04661b42 100644 --- a/mdata/notifierKafka/cfg.go +++ b/mdata/notifierKafka/cfg.go @@ -10,8 +10,8 @@ import ( "github.com/Shopify/sarama" "github.com/grafana/metrictank/kafka" "github.com/grafana/metrictank/stats" - "github.com/raintank/worldping-api/pkg/log" "github.com/rakyll/globalconf" + log "github.com/sirupsen/logrus" ) var Enabled bool @@ -63,7 +63,9 @@ func ConfigProcess(instance string) { default: offsetDuration, err = time.ParseDuration(offsetStr) if err != nil { - log.Fatal(4, "kafka-cluster: invalid offest format. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("kafka-cluster: invalid offset format") } } brokers = strings.Split(brokerStr, ",") @@ -78,12 +80,16 @@ func ConfigProcess(instance string) { config.Producer.Partitioner = sarama.NewManualPartitioner err = config.Validate() if err != nil { - log.Fatal(2, "kafka-cluster invalid consumer config: %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("kafka-cluster: invalid consumer config") } backlogProcessTimeout, err = time.ParseDuration(backlogProcessTimeoutStr) if err != nil { - log.Fatal(4, "kafka-cluster: unable to parse backlog-process-timeout. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("kafka-cluster: unable to parse backlog-process-timeout") } if partitionStr != "*" { @@ -91,7 +97,9 @@ func ConfigProcess(instance string) { for _, part := range parts { i, err := strconv.Atoi(part) if err != nil { - log.Fatal(4, "kafka-cluster: could not parse partition %q. partitions must be '*' or a comma separated list of id's", part) + log.WithFields(log.Fields{ + "partition": part, + }).Fatal("kafka-cluster: could not parse partition, partitions must be '*' or a comma separated list of id's") } partitions = append(partitions, int32(i)) } @@ -99,20 +107,26 @@ func ConfigProcess(instance string) { // validate our partitions client, err := sarama.NewClient(brokers, config) if err != nil { - log.Fatal(4, "kafka-cluster failed to create client. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("kafka-cluster: failed to create client") } defer client.Close() availParts, err := kafka.GetPartitions(client, []string{topic}) if err != nil { - log.Fatal(4, "kafka-cluster: %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("kafka-cluster: failed to get partitions") } if partitionStr == "*" { partitions = availParts } else { missing := kafka.DiffPartitions(partitions, availParts) if len(missing) > 0 { - log.Fatal(4, "kafka-cluster: configured partitions not in list of available partitions. missing %v", missing) + log.WithFields(log.Fields{ + "missing": missing, + }).Fatal("kafka-cluster: configured partitions not in list of available partitions") } } @@ -128,7 +142,11 @@ func ConfigProcess(instance string) { for _, part := range partitions { offset, err := client.GetOffset(topic, part, sarama.OffsetNewest) if err != nil { - log.Fatal(4, "kakfa-cluster: failed to get newest offset for topic %s part %d: %s", topic, part, err) + log.WithFields(log.Fields{ + "topic": topic, + "partition": part, + "error": err.Error(), + }).Fatal("kafka-cluster: failed to get newest offset") } bootTimeOffsets[part] = offset // metric cluster.notifier.kafka.partition.%d.offset is the current offset for the partition (%d) that we have consumed @@ -139,5 +157,7 @@ func ConfigProcess(instance string) { // partition (%d) that we have not yet consumed. partitionLag[part] = stats.NewGauge64(fmt.Sprintf("cluster.notifier.kafka.partition.%d.lag", part)) } - log.Info("kafka-cluster: consuming from partitions %v", partitions) + log.WithFields(log.Fields{ + "partitions": partitions, + }).Info("kafka-cluster: consuming from partitions") } diff --git a/mdata/notifierKafka/notifierKafka.go b/mdata/notifierKafka/notifierKafka.go index 9946cbd9f8..d04d312545 100644 --- a/mdata/notifierKafka/notifierKafka.go +++ b/mdata/notifierKafka/notifierKafka.go @@ -14,7 +14,7 @@ import ( "github.com/grafana/metrictank/kafka" "github.com/grafana/metrictank/mdata" "github.com/grafana/metrictank/util" - "github.com/raintank/worldping-api/pkg/log" + log "github.com/sirupsen/logrus" ) type NotifierKafka struct { @@ -38,22 +38,30 @@ type NotifierKafka struct { func New(instance string, metrics mdata.Metrics, idx idx.MetricIndex) *NotifierKafka { client, err := sarama.NewClient(brokers, config) if err != nil { - log.Fatal(2, "kafka-cluster failed to start client: %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("kafka-cluster: failed to start client") } consumer, err := sarama.NewConsumerFromClient(client) if err != nil { - log.Fatal(2, "kafka-cluster failed to initialize consumer: %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("kafka-cluster: failed to initialize consumer") } - log.Info("kafka-cluster consumer initialized without error") + log.Info("kafka-cluster: consumer initialized without error") producer, err := sarama.NewSyncProducerFromClient(client) if err != nil { - log.Fatal(2, "kafka-cluster failed to initialize producer: %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("kafka-cluster: failed to intialize producer") } offsetMgr, err := kafka.NewOffsetMgr(dataDir) if err != nil { - log.Fatal(2, "kafka-cluster couldnt create offsetMgr. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("kafka-cluster: couldn't create offset manager") } c := NotifierKafka{ @@ -90,13 +98,21 @@ func (c *NotifierKafka) start() { case "last": offset, err = c.offsetMgr.Last(topic, partition) if err != nil { - log.Fatal(4, "kafka-cluster: Failed to get %q duration offset for %s:%d. %q", offsetStr, topic, partition, err) + log.WithFields(log.Fields{ + "offset": offsetStr, + "topic": topic, + "partition": partition, + "error": err.Error(), + }).Fatal("kafka-cluster: failed to get offset duration") } default: offset, err = c.client.GetOffset(topic, partition, time.Now().Add(-1*offsetDuration).UnixNano()/int64(time.Millisecond)) if err != nil { offset = sarama.OffsetOldest - log.Warn("kafka-cluster failed to get offset %s: %s -> will use oldest instead", offsetDuration, err) + log.WithFields(log.Fields{ + "offset.duration": offsetDuration, + "error": err.Error(), + }).Warn("kafka-cluster: failed to get offset, will use oldest instead") } } partitionLogSize[partition].Set(int(bootTimeOffsets[partition])) @@ -120,9 +136,13 @@ func (c *NotifierKafka) start() { select { case <-time.After(backlogProcessTimeout): - log.Warn("kafka-cluster: Processing metricPersist backlog has taken too long, giving up lock after %s.", backlogProcessTimeout) + log.WithFields(log.Fields{ + "backlog.timeout": backlogProcessTimeout, + }).Warn("kafka-cluster: processing metricPersist backlog has taken too long, giving up lock") case <-backlogProcessed: - log.Info("kafka-cluster: metricPersist backlog processed in %s.", time.Since(pre)) + log.WithFields(log.Fields{ + "time.taken": time.Since(pre), + }).Info("kafka-cluster: metricPersist backlog processed") } } @@ -133,9 +153,17 @@ func (c *NotifierKafka) consumePartition(topic string, partition int32, currentO pc, err := c.consumer.ConsumePartition(topic, partition, currentOffset) if err != nil { - log.Fatal(4, "kafka-cluster: failed to start partitionConsumer for %s:%d. %s", topic, partition, err) + log.WithFields(log.Fields{ + "topic": topic, + "partition": partition, + "error": err.Error(), + }).Fatal("kafka-cluster: failed to start partitionConsumer") } - log.Info("kafka-cluster: consuming from %s:%d from offset %d", topic, partition, currentOffset) + log.WithFields(log.Fields{ + "topic": topic, + "partition": partition, + "offset": currentOffset, + }).Info("kafka-cluster: consuming from offset") messages := pc.Messages() ticker := time.NewTicker(offsetCommitInterval) @@ -149,14 +177,21 @@ func (c *NotifierKafka) consumePartition(topic string, partition int32, currentO for { select { case msg := <-messages: - if mdata.LogLevel < 2 { - log.Debug("kafka-cluster received message: Topic %s, Partition: %d, Offset: %d, Key: %x", msg.Topic, msg.Partition, msg.Offset, msg.Key) - } + log.WithFields(log.Fields{ + "topic": msg.Topic, + "partition": msg.Partition, + "offset": msg.Offset, + "key": msg.Key, + }).Debug("kafka-cluster: received message") mdata.Handle(c.metrics, msg.Value, c.idx) currentOffset = msg.Offset case <-ticker.C: if err := c.offsetMgr.Commit(topic, partition, currentOffset); err != nil { - log.Error(3, "kafka-cluster failed to commit offset for %s:%d, %s", topic, partition, err) + log.WithFields(log.Fields{ + "topic": topic, + "partition": partition, + "error": err.Error(), + }).Error("kafka-cluster: failed to commit offset") } if startingUp && currentOffset >= bootTimeOffset { processBacklog.Done() @@ -164,7 +199,11 @@ func (c *NotifierKafka) consumePartition(topic string, partition int32, currentO } offset, err := c.client.GetOffset(topic, partition, sarama.OffsetNewest) if err != nil { - log.Error(3, "kafka-mdm failed to get log-size of partition %s:%d. %s", topic, partition, err) + log.WithFields(log.Fields{ + "topic": topic, + "partition": partition, + "error": err.Error(), + }).Error("kafkamdm: failed to get log-size of partition") } else { partitionLogSizeMetric.Set(int(offset)) } @@ -179,9 +218,16 @@ func (c *NotifierKafka) consumePartition(topic string, partition int32, currentO case <-c.stopConsuming: pc.Close() if err := c.offsetMgr.Commit(topic, partition, currentOffset); err != nil { - log.Error(3, "kafka-cluster failed to commit offset for %s:%d, %s", topic, partition, err) + log.WithFields(log.Fields{ + "topic": topic, + "partition": partition, + "err": err.Error(), + }).Error("kafka-cluster: failed to commit offset") } - log.Info("kafka-cluster consumer for %s:%d ended.", topic, partition) + log.WithFields(log.Fields{ + "topic": topic, + "partition": partition, + }).Info("kafka-cluster: consumer ended") return } } @@ -235,13 +281,17 @@ func (c *NotifierKafka) flush() { for i, msg := range c.buf { amkey, err := schema.AMKeyFromString(msg.Key) if err != nil { - log.Error(3, "kafka-cluster: failed to parse key %q", msg.Key) + log.WithFields(log.Fields{ + "key": msg.Key, + }).Error("kafka-cluster: failed to parse key") continue } def, ok := c.idx.Get(amkey.MKey) if !ok { - log.Error(3, "kafka-cluster: failed to lookup metricDef with id %s", msg.Key) + log.WithFields(log.Fields{ + "key": msg.Key, + }).Error("kafka-cluster: failed to lookup metricDef") continue } buf := bytes.NewBuffer(c.bPool.Get()) @@ -250,7 +300,7 @@ func (c *NotifierKafka) flush() { pMsg = mdata.PersistMessageBatch{Instance: c.instance, SavedChunks: c.buf[i : i+1]} err = encoder.Encode(&pMsg) if err != nil { - log.Fatal(4, "kafka-cluster failed to marshal persistMessage to json.") + log.Fatal("kafka-cluster: failed to marshal persistMessage to json.") } messagesSize.Value(buf.Len()) kafkaMsg := &sarama.ProducerMessage{ @@ -264,12 +314,16 @@ func (c *NotifierKafka) flush() { c.buf = nil go func() { - log.Debug("kafka-cluster sending %d batch metricPersist messages", len(payload)) + log.WithFields(log.Fields{ + "num.messages": len(payload), + }).Debug("kafka-cluster: sending batch metricPersist messages") sent := false for !sent { err := c.producer.SendMessages(payload) if err != nil { - log.Warn("kafka-cluster publisher %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Warn("kafka-cluster: publisher") } else { sent = true } diff --git a/mdata/notifierNsq/cfg.go b/mdata/notifierNsq/cfg.go index 085d5666cf..20a106e7b7 100644 --- a/mdata/notifierNsq/cfg.go +++ b/mdata/notifierNsq/cfg.go @@ -8,9 +8,10 @@ package notifierNsq import ( "flag" - "log" "strings" + log "github.com/sirupsen/logrus" + "github.com/grafana/metrictank/stats" "github.com/nsqio/go-nsq" "github.com/raintank/misc/app" @@ -52,7 +53,7 @@ func ConfigProcess() { return } if topic == "" { - log.Fatal(4, "topic for nsq-cluster cannot be empty") + log.Fatal("nsq-cluster: topic cannot be empty") } nsqdAdds = strings.Split(nsqdTCPAddrs, ",") @@ -70,7 +71,9 @@ func ConfigProcess() { pCfg.UserAgent = "metrictank-cluster" err := app.ParseOpts(pCfg, producerOpts) if err != nil { - log.Fatal(4, "nsq-cluster: failed to parse nsq producer options. %s", err) + log.WithFields(log.Fields{ + "error": err, + }).Fatal("nsq-cluster: failed to parse nsq producer options") } // consumer @@ -78,7 +81,9 @@ func ConfigProcess() { cCfg.UserAgent = "metrictank-cluster" err = app.ParseOpts(cCfg, consumerOpts) if err != nil { - log.Fatal(4, "nsq-cluster: failed to parse nsq consumer options. %s", err) + log.WithFields(log.Fields{ + "error": err, + }).Fatal("nsq-cluster: failed to parse nsq consumer options") } cCfg.MaxInFlight = maxInFlight } diff --git a/mdata/notifierNsq/notifierNsq.go b/mdata/notifierNsq/notifierNsq.go index e275b036ac..5715d7d833 100644 --- a/mdata/notifierNsq/notifierNsq.go +++ b/mdata/notifierNsq/notifierNsq.go @@ -12,7 +12,7 @@ import ( "github.com/grafana/metrictank/mdata/notifierNsq/instrumented_nsq" "github.com/grafana/metrictank/stats" "github.com/nsqio/go-nsq" - "github.com/raintank/worldping-api/pkg/log" + log "github.com/sirupsen/logrus" ) var ( @@ -40,7 +40,9 @@ func New(instance string, metrics mdata.Metrics, idx idx.MetricIndex) *NotifierN for _, addr := range nsqdAdds { producer, err := nsq.NewProducer(addr, pCfg) if err != nil { - log.Fatal(4, "nsq-cluster failed creating producer %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("nsq-cluster: failed creating producer") } producers[addr] = producer } @@ -48,7 +50,9 @@ func New(instance string, metrics mdata.Metrics, idx idx.MetricIndex) *NotifierN // consumers consumer, err := insq.NewConsumer(topic, channel, cCfg, "cluster.notifier.nsq.metric_persist.%s") if err != nil { - log.Fatal(4, "nsq-cluster failed to create NSQ consumer. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("nsq-cluster: failed to create nsq consumer") } c := &NotifierNSQ{ instance: instance, @@ -60,13 +64,17 @@ func New(instance string, metrics mdata.Metrics, idx idx.MetricIndex) *NotifierN err = consumer.ConnectToNSQDs(nsqdAdds) if err != nil { - log.Fatal(4, "nsq-cluster failed to connect to NSQDs. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("nsq-cluster: failed to connect to nsqd") } - log.Info("nsq-cluster persist consumer connected to nsqd") + log.Info("nsq-cluster: persist consumer connected to nsqd") err = consumer.ConnectToNSQLookupds(lookupdAdds) if err != nil { - log.Fatal(4, "nsq-cluster failed to connect to NSQLookupds. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("nsq-cluster: failed to connec to NSQLookupds") } go c.run() return c @@ -107,11 +115,13 @@ func (c *NotifierNSQ) flush() { c.buf = nil go func() { - log.Debug("CLU nsq-cluster sending %d batch metricPersist messages", len(msg.SavedChunks)) + log.WithFields(log.Fields{ + "num.messages": len(msg.SavedChunks), + }).Debug("CLU nsq-cluster: sending batch metricPersist messages") data, err := json.Marshal(&msg) if err != nil { - log.Fatal(4, "CLU nsq-cluster failed to marshal persistMessage to json.") + log.Fatal("CLU nsq-cluster: failed to marshal persistMessage to json.") } buf := new(bytes.Buffer) binary.Write(buf, binary.LittleEndian, uint8(mdata.PersistMessageBatchV1)) @@ -130,7 +140,10 @@ func (c *NotifierNSQ) flush() { // successfully, then sending a nil error will mark the host as alive again. hostPoolResponse.Mark(err) if err != nil { - log.Warn("CLU nsq-cluster publisher marking host %s as faulty due to %s", hostPoolResponse.Host(), err) + log.WithFields(log.Fields{ + "host": hostPoolResponse.Host(), + "error": err.Error(), + }).Warn("CLU nsq-cluster: publisher marking host as faulty") } else { sent = true } diff --git a/mdata/reorder_buffer_test.go b/mdata/reorder_buffer_test.go index 6f3db36d47..2df54bed2f 100644 --- a/mdata/reorder_buffer_test.go +++ b/mdata/reorder_buffer_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/raintank/schema" + log "github.com/sirupsen/logrus" ) func testAddAndGet(t *testing.T, reorderWindow uint32, testData, expectedData []schema.Point, expectAdded, expectAddFail, expectReordered uint32) []schema.Point { @@ -371,7 +372,7 @@ func benchmarkROBAdd(b *testing.B, window, shufgroup int) { out, _ = rob.Add(data[i].Ts, data[i].Val) } if len(out) > 1000 { - panic("this clause should never fire. only exists for compiler not to optimize away the results") + log.Panic("this clause should never fire. only exists for compiler not to optimize away the results") } } diff --git a/stacktest/docker/docker.go b/stacktest/docker/docker.go index 991903f52a..660cf74801 100644 --- a/stacktest/docker/docker.go +++ b/stacktest/docker/docker.go @@ -8,6 +8,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/client" "github.com/grafana/metrictank/stacktest/track" + log "github.com/sirupsen/logrus" ) var cli *client.Client @@ -16,7 +17,9 @@ func init() { var err error cli, err = client.NewEnvClient() if err != nil { - panic(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("failed to create new client") } } diff --git a/stacktest/docker/path.go b/stacktest/docker/path.go index 67d89fd095..5a9d776f44 100644 --- a/stacktest/docker/path.go +++ b/stacktest/docker/path.go @@ -4,8 +4,10 @@ import ( "os" "strings" - homedir "github.com/mitchellh/go-homedir" p "path" + + homedir "github.com/mitchellh/go-homedir" + log "github.com/sirupsen/logrus" ) // path takes a relative path within the metrictank repository and returns the full absolute filepath, @@ -16,7 +18,10 @@ func Path(dst string) string { var err error gopath, err = homedir.Expand("~/go") if err != nil { - panic(err) + log.WithFields(log.Fields{ + "error": err.Error(), + "gopath": gopath, + }).Panic("failed to get path") } } firstPath := strings.Split(gopath, ":")[0] diff --git a/stacktest/fakemetrics/fakemetrics.go b/stacktest/fakemetrics/fakemetrics.go index bed79f8157..6ae3fc068e 100644 --- a/stacktest/fakemetrics/fakemetrics.go +++ b/stacktest/fakemetrics/fakemetrics.go @@ -2,9 +2,10 @@ package fakemetrics import ( "fmt" - "log" "time" + log "github.com/sirupsen/logrus" + "github.com/grafana/metrictank/clock" "github.com/grafana/metrictank/stacktest/fakemetrics/out" "github.com/grafana/metrictank/stacktest/fakemetrics/out/carbon" @@ -55,7 +56,9 @@ func NewKafka(num int) *FakeMetrics { stats, _ := helper.New(false, "", "standard", "", "") out, err := kafkamdm.New("mdm", []string{"localhost:9092"}, "none", stats, "lastNum") if err != nil { - log.Fatal(4, "failed to create kafka-mdm output. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("failed to create kafka-mdm output") } return NewFakeMetrics(generateMetrics(num), out, stats) } @@ -64,7 +67,9 @@ func NewCarbon(num int) *FakeMetrics { stats, _ := helper.New(false, "", "standard", "", "") out, err := carbon.New("localhost:2003", stats) if err != nil { - log.Fatal(4, "failed to create kafka-mdm output. %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("failed to create kafka-mdm output") } return NewFakeMetrics(generateMetrics(num), out, stats) } @@ -94,7 +99,9 @@ func (f *FakeMetrics) run() { } err := f.o.Flush(f.metrics) if err != nil { - panic(fmt.Sprintf("failed to send data to output: %s", err)) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("failed to send data to output") } } } diff --git a/stacktest/fakemetrics/out/kafkamdm/kafkamdm.go b/stacktest/fakemetrics/out/kafkamdm/kafkamdm.go index 7ab443b334..58f65a7669 100644 --- a/stacktest/fakemetrics/out/kafkamdm/kafkamdm.go +++ b/stacktest/fakemetrics/out/kafkamdm/kafkamdm.go @@ -13,7 +13,7 @@ import ( "github.com/grafana/metrictank/stacktest/fakemetrics/out" "github.com/raintank/met" "github.com/raintank/schema" - "github.com/raintank/worldping-api/pkg/log" + log "github.com/sirupsen/logrus" ) type KafkaMdm struct { @@ -162,7 +162,11 @@ func (k *KafkaMdm) Flush(metrics []*schema.MetricData) error { k.PublishErrors.Inc(1) if errors, ok := err.(sarama.ProducerErrors); ok { for i := 0; i < 10 && i < len(errors); i++ { - log.Error(4, "ProducerError %d/%d: %s", i, len(errors), errors[i].Error()) + log.WithFields(log.Fields{ + "current.error": i, + "total.errors": len(errors), + "error": errors[i].Error(), + }).Error("ProducerError") } } return err diff --git a/stacktest/fakemetrics/out/saramahelper.go b/stacktest/fakemetrics/out/saramahelper.go index 60f0f62823..226d9d7f2f 100644 --- a/stacktest/fakemetrics/out/saramahelper.go +++ b/stacktest/fakemetrics/out/saramahelper.go @@ -2,7 +2,7 @@ package out import ( "github.com/Shopify/sarama" - "github.com/raintank/worldping-api/pkg/log" + log "github.com/sirupsen/logrus" ) func GetCompression(codec string) sarama.CompressionCodec { @@ -14,7 +14,9 @@ func GetCompression(codec string) sarama.CompressionCodec { case "snappy": return sarama.CompressionSnappy default: - log.Fatal(5, "unknown compression codec %q", codec) + log.WithFields(log.Fields{ + "codec": codec, + }).Fatal("unknown compression codec") return 0 // make go compiler happy, needs a return *roll eyes* } } diff --git a/stacktest/grafana/grafana.go b/stacktest/grafana/grafana.go index 786ab2d3b5..ee91d62bef 100644 --- a/stacktest/grafana/grafana.go +++ b/stacktest/grafana/grafana.go @@ -3,9 +3,10 @@ package grafana import ( "bytes" "encoding/json" - "fmt" "net/http" "time" + + log "github.com/sirupsen/logrus" ) var grafanaClient *http.Client @@ -31,21 +32,29 @@ func PostAnnotation(msg string) { } b, err := json.Marshal(a) if err != nil { - panic(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("failed to marshal data") } var reader *bytes.Reader reader = bytes.NewReader(b) req, err := http.NewRequest("POST", "http://admin:admin@localhost:3000/api/annotations", reader) if err != nil { - panic(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("request failed") } req.Header.Add("Content-Type", "application/json") resp, err := grafanaClient.Do(req) if err != nil { - panic(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("response failed") } if resp.StatusCode != 200 { - panic(fmt.Sprintf("grafana annotation post response %s", resp.Status)) + log.WithFields(log.Fields{ + "response": resp.Status, + }).Panic("grafana annotation post response") } }() } diff --git a/stacktest/graphite/graphite.go b/stacktest/graphite/graphite.go index f64abc13a9..981f6cd9ef 100644 --- a/stacktest/graphite/graphite.go +++ b/stacktest/graphite/graphite.go @@ -8,6 +8,8 @@ import ( "net/url" "sync" "time" + + log "github.com/sirupsen/logrus" ) var renderClient *http.Client @@ -25,7 +27,9 @@ func renderQuery(base, target, from string) Response { url := fmt.Sprintf("%s/render?target=%s&format=json&from=%s", base, target, from) req, err := http.NewRequest("GET", url, nil) if err != nil { - panic(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("request failed") } req.Header.Add("X-Org-Id", "1") // only really needed for MT, not for graphite. oh well... //fmt.Println("requesting", url) @@ -157,7 +161,10 @@ func CheckMT(endpoints []int, query, from string, dur time.Duration, reqs int, v // note: could take 2 seconds longer than foreseen due to the client timeout, but anything longer may be a problem, wg.Wait() if time.Since(pre) > (110*dur/100)+2*time.Second { - panic(fmt.Sprintf("checkMT ran too long for some reason. expected %s. took actually %s. system overloaded?", dur, time.Since(pre))) + log.WithFields(log.Fields{ + "expected.duration": dur, + "actual.duration": time.Since(pre), + }).Panic("checkMT ran too long, system overloaded?") } return CheckResults{ Valid: ret.valid, diff --git a/stacktest/tests/chaos_cluster/chaos_cluster_test.go b/stacktest/tests/chaos_cluster/chaos_cluster_test.go index 31eb16df0e..0d74fea21f 100644 --- a/stacktest/tests/chaos_cluster/chaos_cluster_test.go +++ b/stacktest/tests/chaos_cluster/chaos_cluster_test.go @@ -3,13 +3,14 @@ package chaos_cluster import ( "context" "fmt" - "log" "os" "os/exec" "reflect" "testing" "time" + log "github.com/sirupsen/logrus" + "github.com/davecgh/go-spew/spew" "github.com/grafana/metrictank/stacktest/docker" "github.com/grafana/metrictank/stacktest/fakemetrics" @@ -33,7 +34,9 @@ func TestMain(m *testing.M) { cmd.Dir = docker.Path("docker/docker-chaos") err := cmd.Start() if err != nil { - log.Fatal(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("docker-compose down failed") } fmt.Println("launching docker-chaos stack...") @@ -42,12 +45,16 @@ func TestMain(m *testing.M) { tracker, err = track.NewTracker(cmd, false, false, "launch-stdout", "launch-stderr") if err != nil { - log.Fatal(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("failed to create new tracker") } err = cmd.Start() if err != nil { - log.Fatal(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("docker-compose up failed") } retcode := m.Run() @@ -56,7 +63,9 @@ func TestMain(m *testing.M) { fmt.Println("stopping docker-compose stack...") cancelFunc() if err := cmd.Wait(); err != nil { - log.Printf("ERROR: could not cleanly shutdown running docker-compose command: %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Info("could not cleanly shutdown running docker-compose command") retcode = 1 } diff --git a/stacktest/tests/end2end_carbon/end2end_carbon_test.go b/stacktest/tests/end2end_carbon/end2end_carbon_test.go index a050b2f36f..57211b35d1 100644 --- a/stacktest/tests/end2end_carbon/end2end_carbon_test.go +++ b/stacktest/tests/end2end_carbon/end2end_carbon_test.go @@ -1,13 +1,14 @@ package end2end_carbon import ( - "log" "os" "os/exec" "syscall" "testing" "time" + log "github.com/sirupsen/logrus" + "github.com/davecgh/go-spew/spew" "github.com/grafana/metrictank/stacktest/docker" "github.com/grafana/metrictank/stacktest/fakemetrics" @@ -24,13 +25,17 @@ var fm *fakemetrics.FakeMetrics const metricsPerSecond = 1000 func TestMain(m *testing.M) { - log.Println("launching docker-dev stack...") + log.Info("launching docker-dev stack...") version := exec.Command("docker-compose", "version") output, err := version.CombinedOutput() if err != nil { - log.Fatal(err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("failed to CombinedOutput() from version") } - log.Println(string(output)) + log.WithFields(log.Fields{ + "output": output, + }).Info("version CombinedOutput()") // TODO: should probably use -V flag here. // introduced here https://github.com/docker/compose/releases/tag/1.19.0 @@ -40,18 +45,22 @@ func TestMain(m *testing.M) { tracker, err = track.NewTracker(cmd, false, false, "launch-stdout", "launch-stderr") if err != nil { - log.Fatal(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("failed to create new tracker") } err = cmd.Start() if err != nil { - log.Fatal(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("docker-compose up failed") } retcode := m.Run() fm.Close() - log.Println("stopping docker-compose stack...") + log.Info("stopping docker-compose stack...") cmd.Process.Signal(syscall.SIGINT) // note: even when we don't care about the output, it's best to consume it before calling cmd.Wait() // even though the cmd.Wait docs say it will wait for stdout/stderr copying to complete @@ -61,10 +70,12 @@ func TestMain(m *testing.M) { // 130 means ctrl-C (interrupt) which is what we want if err != nil && err.Error() != "exit status 130" { - log.Printf("ERROR: could not cleanly shutdown running docker-compose command: %s", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Info("could not cleanly shutdown running docker-compose command") retcode = 1 } else { - log.Println("docker-compose stack is shut down") + log.Info("docker-compose stack is shut down") } os.Exit(retcode) @@ -77,8 +88,8 @@ func TestStartup(t *testing.T) { } select { case <-tracker.Match(matchers): - log.Println("stack now running.") - log.Println("Go to http://localhost:3000 (and login as admin:admin) to see what's going on") + log.Info("stack now running.") + log.Info("Go to http://localhost:3000 (and login as admin:admin) to see what's going on") case <-time.After(time.Second * 70): grafana.PostAnnotation("TestStartup:FAIL") t.Fatal("timed out while waiting for all metrictank instances to come up") @@ -97,7 +108,11 @@ func TestBaseIngestWorkload(t *testing.T) { a := graphite.ValidateTargets(exp)(resp) b := graphite.ValidatorLenNulls(1, 8)(resp) c := graphite.ValidatorAvgWindowed(8, graphite.Ge(metricsPerSecond))(resp) - log.Printf("condition target names %t - condition len & nulls %t - condition avg value %t", a, b, c) + log.WithFields(log.Fields{ + "condition.target.names": a, + "condition.len.nulls": b, + "condition.average.value": c, + }).Info("conditions") return a && b && c }) if !suc6 { diff --git a/stacktest/track/tracker.go b/stacktest/track/tracker.go index a5266a2941..0c58129231 100644 --- a/stacktest/track/tracker.go +++ b/stacktest/track/tracker.go @@ -7,6 +7,8 @@ import ( "os/exec" "regexp" "sync" + + log "github.com/sirupsen/logrus" ) // Tracker allows to track stdout and stderr of running commands @@ -116,7 +118,9 @@ func (t *Tracker) manage(logStdout, logStderr bool) { } matcherCtx = tmp case err := <-t.errChan: - panic(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("received error") } if doneStdout && doneStderr { t.wg.Done() diff --git a/stats/config/init.go b/stats/config/init.go index cdcefafe15..5fa367e3e3 100644 --- a/stats/config/init.go +++ b/stats/config/init.go @@ -6,8 +6,8 @@ import ( "time" "github.com/grafana/metrictank/stats" - "github.com/raintank/worldping-api/pkg/log" "github.com/rakyll/globalconf" + log "github.com/sirupsen/logrus" ) var enabled bool @@ -42,11 +42,13 @@ func Start() { _, err := stats.NewProcessReporter() if err != nil { - log.Fatal(2, "stats: could not initialize process reporter: %v", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Fatal("stats: could not initialize process reporter") } stats.NewGraphite(prefix, addr, interval, bufferSize, timeout) } else { stats.NewDevnull() - log.Warn("running metrictank without instrumentation.") + log.Warn("stats: running metrictank without instrumentation") } } diff --git a/stats/out_graphite.go b/stats/out_graphite.go index 44b3482935..04e6dda115 100644 --- a/stats/out_graphite.go +++ b/stats/out_graphite.go @@ -7,7 +7,7 @@ import ( "sync" "time" - "github.com/raintank/worldping-api/pkg/log" + log "github.com/sirupsen/logrus" ) var ( @@ -56,7 +56,9 @@ func NewGraphite(prefix, addr string, interval, bufferSize int, timeout time.Dur func (g *Graphite) reporter(interval int) { ticker := tick(time.Duration(interval) * time.Second) for now := range ticker { - log.Debug("stats flushing for", now, "to graphite") + log.WithFields(log.Fields{ + "ticker": now, + }).Debug("stats flushing to graphite") queueItems.Value(len(g.toGraphite)) if cap(g.toGraphite) != 0 && len(g.toGraphite) == cap(g.toGraphite) { // no space in buffer, no use in doing any work @@ -95,11 +97,16 @@ func (g *Graphite) writer() { time.Sleep(time.Second) conn, err = net.Dial("tcp", g.addr) if err == nil { - log.Info("stats now connected to %s", g.addr) + log.WithFields(log.Fields{ + "tcp.addr": g.addr, + }).Info("stats now connected to graphite") wg.Add(1) go g.checkEOF(conn, &wg) } else { - log.Warn("stats dialing %s failed: %s. will retry", g.addr, err.Error()) + log.WithFields(log.Fields{ + "tcp.addr": g.addr, + "error": err.Error(), + }).Warn("stats dialing failed, will retry") } connected.Set(conn != nil) } @@ -117,7 +124,10 @@ func (g *Graphite) writer() { ok = true flushDuration.Value(time.Since(pre)) } else { - log.Warn("stats failed to write to graphite: %s (took %s). will retry...", err, time.Now().Sub(pre)) + log.WithFields(log.Fields{ + "time.elapsed": time.Now().Sub(pre), + "error": err.Error(), + }).Warn("stats failed to write to graphite, will retry") conn.Close() wg.Wait() conn = nil @@ -136,19 +146,23 @@ func (g *Graphite) checkEOF(conn net.Conn, wg *sync.WaitGroup) { for { num, err := conn.Read(b) if err == io.EOF { - log.Info("Graphite.checkEOF: remote closed conn. closing conn") + log.Info("Graphite.checkEOF: remote closed conn, closing conn") conn.Close() return } // in case the remote behaves badly (out of spec for carbon protocol) if num != 0 { - log.Warn("Graphite.checkEOF: read unexpected data from peer: %s\n", b[:num]) + log.WithFields(log.Fields{ + "data": b[:num], + }).Warn("Graphite.checkEOF: read unexpected data from peer") continue } if err != io.EOF { - log.Warn("Graphite.checkEOF: %s. closing conn\n", err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Warn("Graphite.checkEOF: error, closing conn\n") conn.Close() return } diff --git a/stats/registry.go b/stats/registry.go index 95fbcff15e..8eb0574f69 100644 --- a/stats/registry.go +++ b/stats/registry.go @@ -1,12 +1,11 @@ package stats import ( - "fmt" "reflect" "sync" -) -var errFmtMetricExists = "fatal: metric %q already exists as type %T" + log "github.com/sirupsen/logrus" +) // Registry tracks metrics and reporters type Registry struct { @@ -31,7 +30,10 @@ func (r *Registry) getOrAdd(name string, metric GraphiteMetric) GraphiteMetric { r.Unlock() return existing } - panic(fmt.Sprintf(errFmtMetricExists, name, existing)) + log.WithFields(log.Fields{ + "existing.metric.name": name, + "existing.metric.type": existing, + }).Panic("metric already exists") } r.metrics[name] = metric r.Unlock() diff --git a/store/cassandra/cassandra.go b/store/cassandra/cassandra.go index ba8c75c64e..8012493e44 100644 --- a/store/cassandra/cassandra.go +++ b/store/cassandra/cassandra.go @@ -23,7 +23,7 @@ import ( "github.com/hailocab/go-hostpool" opentracing "github.com/opentracing/opentracing-go" tags "github.com/opentracing/opentracing-go/ext" - "github.com/raintank/worldping-api/pkg/log" + log "github.com/sirupsen/logrus" ) // write aggregated data to cassandra. @@ -102,7 +102,9 @@ func PrepareChunkData(span uint32, data []byte) []byte { spanCode, ok := chunk.RevChunkSpans[span] if !ok { // it's probably better to panic than to persist the chunk with a wrong length - panic(fmt.Sprintf("Chunk span invalid: %d", span)) + log.WithFields(log.Fields{ + "span": span, + }).Panic("cassandra_store: chunk span invalid") } binary.Write(buf, binary.LittleEndian, spanCode) buf.Write(data) @@ -118,7 +120,9 @@ func ConvertTimeout(timeout string, defaultUnit time.Duration) time.Duration { } timeoutD, err := time.ParseDuration(timeout) if err != nil { - log.Fatal(1, "cassandra_store: invalid duration value %q", timeout) + log.WithFields(log.Fields{ + "duration": timeout, + }).Fatal("cassandra_store: invalid duration value") } return timeoutD } @@ -150,7 +154,9 @@ func NewCassandraStore(config *StoreConfig, ttls []uint32) (*CassandraStore, err var err error tmpSession, err := cluster.CreateSession() if err != nil { - log.Error(3, "cassandra_store: failed to create cassandra session. %s", err.Error()) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("cassandra_store: failed to create cassandra session") return nil, err } @@ -161,13 +167,17 @@ func NewCassandraStore(config *StoreConfig, ttls []uint32) (*CassandraStore, err // create or verify the metrictank keyspace if config.CreateKeyspace { - log.Info("cassandra_store: ensuring that keyspace %s exists.", config.Keyspace) + log.WithFields(log.Fields{ + "keyspace": config.Keyspace, + }).Info("cassandra_store: ensuring that keyspace exists") err = tmpSession.Query(fmt.Sprintf(schemaKeyspace, config.Keyspace)).Exec() if err != nil { return nil, err } for _, table := range ttlTables { - log.Info("cassandra_store: ensuring that table %s exists.", table.Name) + log.WithFields(log.Fields{ + "table": table.Name, + }).Info("cassandra_store: ensuring that table exists") err := tmpSession.Query(fmt.Sprintf(schemaTable, config.Keyspace, table.Name, table.WindowSize, table.WindowSize*60*60)).Exec() if err != nil { return nil, err @@ -184,7 +194,9 @@ func NewCassandraStore(config *StoreConfig, ttls []uint32) (*CassandraStore, err for attempt := 1; attempt > 0; attempt++ { keyspaceMetadata, err = tmpSession.KeyspaceMetadata(config.Keyspace) if err != nil { - log.Warn("cassandra keyspace not found; attempt: %v", attempt) + log.WithFields(log.Fields{ + "attempt": attempt, + }).Warn("cassandra_store: keyspace not found") if attempt >= 5 { return nil, err } @@ -192,7 +204,10 @@ func NewCassandraStore(config *StoreConfig, ttls []uint32) (*CassandraStore, err } else { for _, table := range ttlTables { if _, ok := keyspaceMetadata.Tables[table.Name]; !ok { - log.Warn("cassandra table %s not found; attempt: %v", table.Name, attempt) + log.WithFields(log.Fields{ + "table": table.Name, + "attempt": attempt, + }).Warn("cassandra_store: table not found") if attempt >= 5 { return nil, err } @@ -240,7 +255,9 @@ func NewCassandraStore(config *StoreConfig, ttls []uint32) (*CassandraStore, err if err != nil { return nil, err } - log.Debug("CS: created session with config %+v", config) + log.WithFields(log.Fields{ + "config": config, + }).Debug("cassandra_store: created session with config") c := &CassandraStore{ Session: session, writeQueues: make([]chan *mdata.ChunkWriteRequest, config.WriteConcurrency), @@ -327,7 +344,11 @@ func (c *CassandraStore) processWriteQueue(queue chan *mdata.ChunkWriteRequest, meter.Value(len(queue)) case cwr := <-queue: meter.Value(len(queue)) - log.Debug("CS: starting to save %s:%d %v", cwr.Key, cwr.Chunk.T0, cwr.Chunk) + log.WithFields(log.Fields{ + "key": cwr.Key, + "chunk.t0": cwr.Chunk.T0, + "chunk": cwr.Chunk, + }).Debug("cassandra_store: starting to save") //log how long the chunk waited in the queue before we attempted to save to cassandra cassPutWaitDuration.Value(time.Now().Sub(cwr.Timestamp)) @@ -342,12 +363,20 @@ func (c *CassandraStore) processWriteQueue(queue chan *mdata.ChunkWriteRequest, success = true cwr.Metric.SyncChunkSaveState(cwr.Chunk.T0) mdata.SendPersistMessage(keyStr, cwr.Chunk.T0) - log.Debug("CS: save complete. %s:%d %v", keyStr, cwr.Chunk.T0, cwr.Chunk) + log.WithFields(log.Fields{ + "key": keyStr, + "chunk.t0": cwr.Chunk.T0, + "chunk": cwr.Chunk, + }).Debug("cassandra_store: save complete") chunkSaveOk.Inc() } else { errmetrics.Inc(err) if (attempts % 20) == 0 { - log.Warn("CS: failed to save chunk to cassandra after %d attempts. %v, %s", attempts+1, cwr.Chunk, err) + log.WithFields(log.Fields{ + "attempts": attempts + 1, + "chunk": cwr.Chunk, + "error": err.Error(), + }).Warn("cassandra_store: failed to save chunk to cassandra") } chunkSaveFail.Inc() sleepTime := 100 * attempts diff --git a/test/key.go b/test/key.go index 2f00d6466c..ce9e3b01ad 100644 --- a/test/key.go +++ b/test/key.go @@ -5,6 +5,7 @@ import ( "reflect" "github.com/raintank/schema" + log "github.com/sirupsen/logrus" ) func GetAMKey(suffix int) schema.AMKey { @@ -23,7 +24,9 @@ func GetMKey(suffix int) schema.MKey { func MustMKeyFromString(id string) schema.MKey { mkey, err := schema.MKeyFromString(id) if err != nil { - panic(err) + log.WithFields(log.Fields{ + "error": err.Error(), + }).Panic("failed to get mkey from id") } return mkey } diff --git a/util/template.go b/util/template.go index a9765b207b..7a91f7b226 100644 --- a/util/template.go +++ b/util/template.go @@ -2,7 +2,7 @@ package util import ( "github.com/pelletier/go-toml" - "log" + log "github.com/sirupsen/logrus" ) var tomlFiles = make(map[string]*toml.Tree) @@ -14,7 +14,10 @@ func readTomlFile(TomlFilename string) *toml.Tree { } tree, err := toml.LoadFile(TomlFilename) if err != nil { - log.Fatalf("Error decoding file %q:\n%s\n", TomlFilename, err) + log.WithFields(log.Fields{ + "file": TomlFilename, + "error": err.Error(), + }).Fatal("error decoding file") } tomlFiles[TomlFilename] = tree return tree @@ -24,7 +27,10 @@ func ReadEntry(TomlFilename string, EntryName string) interface{} { tree := readTomlFile(TomlFilename) val := tree.Get(EntryName) if val == nil { - log.Fatalf("Error %q does not exist in %q", EntryName, TomlFilename) + log.WithFields(log.Fields{ + "entry": EntryName, + "file": TomlFilename, + }).Fatal("could not find entry in file") } return val } diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go deleted file mode 100644 index dcc4f1d9fd..0000000000 --- a/vendor/github.com/Sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,40 +0,0 @@ -package logrus - -import ( - "encoding/json" - "fmt" -) - -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - TimestampFormat string -} - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+3) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/Sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - prefixFieldClashes(data) - - if f.TimestampFormat == "" { - f.TimestampFormat = DefaultTimestampFormat - } - - data["time"] = entry.Time.Format(f.TimestampFormat) - data["msg"] = entry.Message - data["level"] = entry.Level.String() - - serialized, err := json.Marshal(data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go deleted file mode 100644 index da928a3750..0000000000 --- a/vendor/github.com/Sirupsen/logrus/logger.go +++ /dev/null @@ -1,203 +0,0 @@ -package logrus - -import ( - "io" - "os" - "sync" -) - -type Logger struct { - // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stdout`. You can also set this to - // something more adventorous, such as logging to Kafka. - Out io.Writer - // Hooks for the logger instance. These allow firing events based on logging - // levels and log entries. For example, to send errors to an error tracking - // service, log to StatsD or dump the core on fatal errors. - Hooks levelHooks - // All log entries pass through the formatter before logged to Out. The - // included formatters are `TextFormatter` and `JSONFormatter` for which - // TextFormatter is the default. In development (when a TTY is attached) it - // logs with colors, but to a file it wouldn't. You can easily implement your - // own that implements the `Formatter` interface, see the `README` or included - // formatters for examples. - Formatter Formatter - // The logging level the logger should log at. This is typically (and defaults - // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. `logrus.Debug` is useful in - Level Level - // Used to sync writing to the log. - mu sync.Mutex -} - -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just -// instantiate your own: -// -// var log = &Logger{ -// Out: os.Stderr, -// Formatter: new(JSONFormatter), -// Hooks: make(levelHooks), -// Level: logrus.DebugLevel, -// } -// -// It's recommended to make this a global instance called `log`. -func New() *Logger { - return &Logger{ - Out: os.Stdout, - Formatter: new(TextFormatter), - Hooks: make(levelHooks), - Level: InfoLevel, - } -} - -// Adds a field to the log entry, note that you it doesn't log until you call -// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. -// Ff you want multiple fields, use `WithFields`. -func (logger *Logger) WithField(key string, value interface{}) *Entry { - return NewEntry(logger).WithField(key, value) -} - -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. -func (logger *Logger) WithFields(fields Fields) *Entry { - return NewEntry(logger).WithFields(fields) -} - -func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugf(format, args...) - } -} - -func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infof(format, args...) - } -} - -func (logger *Logger) Printf(format string, args ...interface{}) { - NewEntry(logger).Printf(format, args...) -} - -func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorf(format, args...) - } -} - -func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalf(format, args...) - } -} - -func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicf(format, args...) - } -} - -func (logger *Logger) Debug(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debug(args...) - } -} - -func (logger *Logger) Info(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Info(args...) - } -} - -func (logger *Logger) Print(args ...interface{}) { - NewEntry(logger).Info(args...) -} - -func (logger *Logger) Warn(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Warning(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Error(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Error(args...) - } -} - -func (logger *Logger) Fatal(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatal(args...) - } -} - -func (logger *Logger) Panic(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panic(args...) - } -} - -func (logger *Logger) Debugln(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugln(args...) - } -} - -func (logger *Logger) Infoln(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infoln(args...) - } -} - -func (logger *Logger) Println(args ...interface{}) { - NewEntry(logger).Println(args...) -} - -func (logger *Logger) Warnln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Warningln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Errorln(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorln(args...) - } -} - -func (logger *Logger) Fatalln(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalln(args...) - } -} - -func (logger *Logger) Panicln(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicln(args...) - } -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go b/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go deleted file mode 100644 index 0428ee5d52..0000000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin. -*/ -package logrus - -import ( - "syscall" -) - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]uint8 - Ispeed uint32 - Ospeed uint32 -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go deleted file mode 100644 index b8bebc13ee..0000000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go +++ /dev/null @@ -1,21 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin freebsd openbsd - -package logrus - -import ( - "syscall" - "unsafe" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go b/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go deleted file mode 100644 index af609a53d6..0000000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go +++ /dev/null @@ -1,7 +0,0 @@ -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go deleted file mode 100644 index 2e09f6f7e3..0000000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package logrus - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go deleted file mode 100644 index 612417ff9c..0000000000 --- a/vendor/github.com/Sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,149 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "sort" - "strings" - "time" -) - -const ( - nocolor = 0 - red = 31 - green = 32 - yellow = 33 - blue = 34 - gray = 37 -) - -var ( - baseTimestamp time.Time - isTerminal bool -) - -func init() { - baseTimestamp = time.Now() - isTerminal = IsTerminal() -} - -func miniTS() int { - return int(time.Since(baseTimestamp) / time.Second) -} - -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - - // Force disabling colors. - DisableColors bool - - // Disable timestamp logging. useful when output is redirected to logging - // system that already adds timestamps. - DisableTimestamp bool - - // Enable logging the full timestamp when a TTY is attached instead of just - // the time passed since beginning of execution. - FullTimestamp bool - - // TimestampFormat to use for display when a full timestamp is printed - TimestampFormat string - - // The fields are sorted by default for a consistent output. For applications - // that log extremely frequently and don't use the JSON formatter this may not - // be desired. - DisableSorting bool -} - -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - var keys []string = make([]string, 0, len(entry.Data)) - for k := range entry.Data { - keys = append(keys, k) - } - - if !f.DisableSorting { - sort.Strings(keys) - } - - b := &bytes.Buffer{} - - prefixFieldClashes(entry.Data) - - isColored := (f.ForceColors || isTerminal) && !f.DisableColors - - if f.TimestampFormat == "" { - f.TimestampFormat = DefaultTimestampFormat - } - if isColored { - f.printColored(b, entry, keys) - } else { - if !f.DisableTimestamp { - f.appendKeyValue(b, "time", entry.Time.Format(f.TimestampFormat)) - } - f.appendKeyValue(b, "level", entry.Level.String()) - f.appendKeyValue(b, "msg", entry.Message) - for _, key := range keys { - f.appendKeyValue(b, key, entry.Data[key]) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) { - var levelColor int - switch entry.Level { - case DebugLevel: - levelColor = gray - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String())[0:4] - - if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) - } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message) - } - for _, k := range keys { - v := entry.Data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v) - } -} - -func needsQuoting(text string) bool { - for _, ch := range text { - if !((ch >= 'a' && ch <= 'z') || - (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.') { - return false - } - } - return true -} - -func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) { - switch value.(type) { - case string: - if needsQuoting(value.(string)) { - fmt.Fprintf(b, "%v=%s ", key, value) - } else { - fmt.Fprintf(b, "%v=%q ", key, value) - } - case error: - if needsQuoting(value.(error).Error()) { - fmt.Fprintf(b, "%v=%s ", key, value) - } else { - fmt.Fprintf(b, "%v=%q ", key, value) - } - default: - fmt.Fprintf(b, "%v=%v ", key, value) - } -} diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go deleted file mode 100644 index 1e30b1c753..0000000000 --- a/vendor/github.com/Sirupsen/logrus/writer.go +++ /dev/null @@ -1,31 +0,0 @@ -package logrus - -import ( - "bufio" - "io" - "runtime" -) - -func (logger *Logger) Writer() *io.PipeWriter { - reader, writer := io.Pipe() - - go logger.writerScanner(reader) - runtime.SetFinalizer(writer, writerFinalizer) - - return writer -} - -func (logger *Logger) writerScanner(reader *io.PipeReader) { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - logger.Print(scanner.Text()) - } - if err := scanner.Err(); err != nil { - logger.Errorf("Error while reading from Writer: %s", err) - } - reader.Close() -} - -func writerFinalizer(writer *io.PipeWriter) { - writer.Close() -} diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE similarity index 100% rename from vendor/github.com/Sirupsen/logrus/LICENSE rename to vendor/github.com/sirupsen/logrus/LICENSE diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go new file mode 100644 index 0000000000..8af90637a9 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/alt_exit.go @@ -0,0 +1,64 @@ +package logrus + +// The following code was sourced and modified from the +// https://github.com/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke +// all handlers. The handlers will also be invoked when any Fatal log entry is +// made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go new file mode 100644 index 0000000000..da67aba06d --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/sirupsen/logrus +*/ +package logrus diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go similarity index 62% rename from vendor/github.com/Sirupsen/logrus/entry.go rename to vendor/github.com/sirupsen/logrus/entry.go index 17fe6f707b..473bd1a0d3 100644 --- a/vendor/github.com/Sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -3,11 +3,24 @@ package logrus import ( "bytes" "fmt" - "io" "os" + "sync" "time" ) +var bufferPool *sync.Pool + +func init() { + bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + // An entry is the final or intermediate Logrus logging entry. It contains all // the fields passed with WithField{,s}. It's finally logged when Debug, Info, // Warn, Error, Fatal or Panic is called on it. These objects can be reused and @@ -22,35 +35,38 @@ type Entry struct { Time time.Time // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + // This field will be set on entry firing and the value will be equal to the one in Logger struct field. Level Level // Message passed to Debug, Info, Warn, Error, Fatal or Panic Message string + + // When formatter is called in entry.log(), an Buffer may be set to entry + Buffer *bytes.Buffer } func NewEntry(logger *Logger) *Entry { return &Entry{ Logger: logger, - // Default is three fields, give a little extra room + // Default is five fields, give a little extra room Data: make(Fields, 5), } } -// Returns a reader for the entry, which is a proxy to the formatter. -func (entry *Entry) Reader() (*bytes.Buffer, error) { - serialized, err := entry.Logger.Formatter.Format(entry) - return bytes.NewBuffer(serialized), err -} - // Returns the string representation from the reader and ultimately the // formatter. func (entry *Entry) String() (string, error) { - reader, err := entry.Reader() + serialized, err := entry.Logger.Formatter.Format(entry) if err != nil { return "", err } + str := string(serialized) + return str, nil +} - return reader.String(), err +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) } // Add a single field to the Entry. @@ -60,52 +76,82 @@ func (entry *Entry) WithField(key string, value interface{}) *Entry { // Add a map of fields to the Entry. func (entry *Entry) WithFields(fields Fields) *Entry { - data := Fields{} + data := make(Fields, len(entry.Data)+len(fields)) for k, v := range entry.Data { data[k] = v } for k, v := range fields { data[k] = v } - return &Entry{Logger: entry.Logger, Data: data} + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time} +} + +// Overrides the time of the Entry. +func (entry *Entry) WithTime(t time.Time) *Entry { + return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t} } -func (entry *Entry) log(level Level, msg string) { - entry.Time = time.Now() +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + + // Default to now, but allow users to override if they want. + // + // We don't have to worry about polluting future calls to Entry#log() + // with this assignment because this function is declared with a + // non-pointer receiver. + if entry.Time.IsZero() { + entry.Time = time.Now() + } + entry.Level = level entry.Message = msg - if err := entry.Logger.Hooks.Fire(level, entry); err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - entry.Logger.mu.Unlock() - } + entry.fireHooks() - reader, err := entry.Reader() - if err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - entry.Logger.mu.Unlock() - } + buffer = bufferPool.Get().(*bytes.Buffer) + buffer.Reset() + defer bufferPool.Put(buffer) + entry.Buffer = buffer - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() + entry.write() - _, err = io.Copy(entry.Logger.Out, reader) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } + entry.Buffer = nil // To avoid Entry#log() returning a value that only would make sense for // panic() to use in Entry#Panic(), we avoid the allocation by checking // directly here. if level <= PanicLevel { - panic(entry) + panic(&entry) + } +} + +func (entry *Entry) fireHooks() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + err := entry.Logger.Hooks.Fire(entry.Level, entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + } +} + +func (entry *Entry) write() { + serialized, err := entry.Logger.Formatter.Format(entry) + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + } else { + _, err = entry.Logger.Out.Write(serialized) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } } } func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { + if entry.Logger.level() >= DebugLevel { entry.log(DebugLevel, fmt.Sprint(args...)) } } @@ -115,13 +161,13 @@ func (entry *Entry) Print(args ...interface{}) { } func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { + if entry.Logger.level() >= InfoLevel { entry.log(InfoLevel, fmt.Sprint(args...)) } } func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { + if entry.Logger.level() >= WarnLevel { entry.log(WarnLevel, fmt.Sprint(args...)) } } @@ -131,20 +177,20 @@ func (entry *Entry) Warning(args ...interface{}) { } func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { + if entry.Logger.level() >= ErrorLevel { entry.log(ErrorLevel, fmt.Sprint(args...)) } } func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { + if entry.Logger.level() >= FatalLevel { entry.log(FatalLevel, fmt.Sprint(args...)) } - os.Exit(1) + Exit(1) } func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { + if entry.Logger.level() >= PanicLevel { entry.log(PanicLevel, fmt.Sprint(args...)) } panic(fmt.Sprint(args...)) @@ -153,13 +199,13 @@ func (entry *Entry) Panic(args ...interface{}) { // Entry Printf family functions func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.Level >= DebugLevel { + if entry.Logger.level() >= DebugLevel { entry.Debug(fmt.Sprintf(format, args...)) } } func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.Level >= InfoLevel { + if entry.Logger.level() >= InfoLevel { entry.Info(fmt.Sprintf(format, args...)) } } @@ -169,7 +215,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) { } func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.Level >= WarnLevel { + if entry.Logger.level() >= WarnLevel { entry.Warn(fmt.Sprintf(format, args...)) } } @@ -179,19 +225,20 @@ func (entry *Entry) Warningf(format string, args ...interface{}) { } func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { + if entry.Logger.level() >= ErrorLevel { entry.Error(fmt.Sprintf(format, args...)) } } func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.Level >= FatalLevel { + if entry.Logger.level() >= FatalLevel { entry.Fatal(fmt.Sprintf(format, args...)) } + Exit(1) } func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.Level >= PanicLevel { + if entry.Logger.level() >= PanicLevel { entry.Panic(fmt.Sprintf(format, args...)) } } @@ -199,13 +246,13 @@ func (entry *Entry) Panicf(format string, args ...interface{}) { // Entry Println family functions func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { + if entry.Logger.level() >= DebugLevel { entry.Debug(entry.sprintlnn(args...)) } } func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { + if entry.Logger.level() >= InfoLevel { entry.Info(entry.sprintlnn(args...)) } } @@ -215,7 +262,7 @@ func (entry *Entry) Println(args ...interface{}) { } func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { + if entry.Logger.level() >= WarnLevel { entry.Warn(entry.sprintlnn(args...)) } } @@ -225,19 +272,20 @@ func (entry *Entry) Warningln(args ...interface{}) { } func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { + if entry.Logger.level() >= ErrorLevel { entry.Error(entry.sprintlnn(args...)) } } func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { + if entry.Logger.level() >= FatalLevel { entry.Fatal(entry.sprintlnn(args...)) } + Exit(1) } func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { + if entry.Logger.level() >= PanicLevel { entry.Panic(entry.sprintlnn(args...)) } } diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go similarity index 83% rename from vendor/github.com/Sirupsen/logrus/exported.go rename to vendor/github.com/sirupsen/logrus/exported.go index a67e1b802d..eb612a6f3e 100644 --- a/vendor/github.com/Sirupsen/logrus/exported.go +++ b/vendor/github.com/sirupsen/logrus/exported.go @@ -2,6 +2,7 @@ package logrus import ( "io" + "time" ) var ( @@ -15,9 +16,7 @@ func StandardLogger() *Logger { // SetOutput sets the standard logger output. func SetOutput(out io.Writer) { - std.mu.Lock() - defer std.mu.Unlock() - std.Out = out + std.SetOutput(out) } // SetFormatter sets the standard logger formatter. @@ -31,14 +30,14 @@ func SetFormatter(formatter Formatter) { func SetLevel(level Level) { std.mu.Lock() defer std.mu.Unlock() - std.Level = level + std.SetLevel(level) } // GetLevel returns the standard logger level. func GetLevel() Level { std.mu.Lock() defer std.mu.Unlock() - return std.Level + return std.level() } // AddHook adds a hook to the standard logger hooks. @@ -48,6 +47,11 @@ func AddHook(hook Hook) { std.Hooks.Add(hook) } +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + // WithField creates an entry from the standard logger and adds a field to // it. If you want multiple fields, use `WithFields`. // @@ -67,6 +71,15 @@ func WithFields(fields Fields) *Entry { return std.WithFields(fields) } +// WithTime creats an entry from the standard logger and overrides the time of +// logs generated with it. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithTime(t time.Time) *Entry { + return std.WithTime(t) +} + // Debug logs a message at level Debug on the standard logger. func Debug(args ...interface{}) { std.Debug(args...) @@ -102,7 +115,7 @@ func Panic(args ...interface{}) { std.Panic(args...) } -// Fatal logs a message at level Fatal on the standard logger. +// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatal(args ...interface{}) { std.Fatal(args...) } @@ -142,7 +155,7 @@ func Panicf(format string, args ...interface{}) { std.Panicf(format, args...) } -// Fatalf logs a message at level Fatal on the standard logger. +// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatalf(format string, args ...interface{}) { std.Fatalf(format, args...) } @@ -182,7 +195,7 @@ func Panicln(args ...interface{}) { std.Panicln(args...) } -// Fatalln logs a message at level Fatal on the standard logger. +// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatalln(args ...interface{}) { std.Fatalln(args...) } diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go similarity index 69% rename from vendor/github.com/Sirupsen/logrus/formatter.go rename to vendor/github.com/sirupsen/logrus/formatter.go index 104d689f18..83c74947be 100644 --- a/vendor/github.com/Sirupsen/logrus/formatter.go +++ b/vendor/github.com/sirupsen/logrus/formatter.go @@ -2,7 +2,7 @@ package logrus import "time" -const DefaultTimestampFormat = time.RFC3339 +const defaultTimestampFormat = time.RFC3339 // The Formatter interface is used to implement a custom Formatter. It takes an // `Entry`. It exposes all the fields, including the default ones: @@ -30,19 +30,22 @@ type Formatter interface { // // It's not exported because it's still using Data in an opinionated way. It's to // avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields) { - _, ok := data["time"] - if ok { - data["fields.time"] = data["time"] +func prefixFieldClashes(data Fields, fieldMap FieldMap) { + timeKey := fieldMap.resolve(FieldKeyTime) + if t, ok := data[timeKey]; ok { + data["fields."+timeKey] = t + delete(data, timeKey) } - _, ok = data["msg"] - if ok { - data["fields.msg"] = data["msg"] + msgKey := fieldMap.resolve(FieldKeyMsg) + if m, ok := data[msgKey]; ok { + data["fields."+msgKey] = m + delete(data, msgKey) } - _, ok = data["level"] - if ok { - data["fields.level"] = data["level"] + levelKey := fieldMap.resolve(FieldKeyLevel) + if l, ok := data[levelKey]; ok { + data["fields."+levelKey] = l + delete(data, levelKey) } } diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go similarity index 87% rename from vendor/github.com/Sirupsen/logrus/hooks.go rename to vendor/github.com/sirupsen/logrus/hooks.go index 0da2b3653f..3f151cdc39 100644 --- a/vendor/github.com/Sirupsen/logrus/hooks.go +++ b/vendor/github.com/sirupsen/logrus/hooks.go @@ -11,11 +11,11 @@ type Hook interface { } // Internal type for storing the hooks on a logger instance. -type levelHooks map[Level][]Hook +type LevelHooks map[Level][]Hook // Add a hook to an instance of logger. This is called with // `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks levelHooks) Add(hook Hook) { +func (hooks LevelHooks) Add(hook Hook) { for _, level := range hook.Levels() { hooks[level] = append(hooks[level], hook) } @@ -23,7 +23,7 @@ func (hooks levelHooks) Add(hook Hook) { // Fire all the hooks for the passed level. Used by `entry.log` to fire // appropriate hooks for a log entry. -func (hooks levelHooks) Fire(level Level, entry *Entry) error { +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { for _, hook := range hooks[level] { if err := hook.Fire(entry); err != nil { return err diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go new file mode 100644 index 0000000000..dab17610f1 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -0,0 +1,89 @@ +package logrus + +import ( + "encoding/json" + "fmt" +) + +type fieldKey string + +// FieldMap allows customization of the key names for default fields. +type FieldMap map[fieldKey]string + +// Default key names for the default fields +const ( + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" +) + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +// JSONFormatter formats logs into parsable json +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. + DataKey string + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // }, + // } + FieldMap FieldMap +} + +// Format renders a single log entry +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+3) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + + if f.DataKey != "" { + newData := make(Fields, 4) + newData[f.DataKey] = data + data = newData + } + + prefixFieldClashes(data, f.FieldMap) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + + serialized, err := json.Marshal(data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go new file mode 100644 index 0000000000..342f7977d8 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -0,0 +1,337 @@ +package logrus + +import ( + "io" + "os" + "sync" + "sync/atomic" + "time" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventorous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool +} + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + logger.entryPool.Put(entry) +} + +// Adds a field to the log entry, note that it doesn't log until you call +// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +// Overrides the time of the log entry. +func (logger *Logger) WithTime(t time.Time) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithTime(t) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + if logger.level() >= DebugLevel { + entry := logger.newEntry() + entry.Debugf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + if logger.level() >= InfoLevel { + entry := logger.newEntry() + entry.Infof(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + if logger.level() >= ErrorLevel { + entry := logger.newEntry() + entry.Errorf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + if logger.level() >= FatalLevel { + entry := logger.newEntry() + entry.Fatalf(format, args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + if logger.level() >= PanicLevel { + entry := logger.newEntry() + entry.Panicf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debug(args ...interface{}) { + if logger.level() >= DebugLevel { + entry := logger.newEntry() + entry.Debug(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Info(args ...interface{}) { + if logger.level() >= InfoLevel { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warning(args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Error(args ...interface{}) { + if logger.level() >= ErrorLevel { + entry := logger.newEntry() + entry.Error(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatal(args ...interface{}) { + if logger.level() >= FatalLevel { + entry := logger.newEntry() + entry.Fatal(args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + if logger.level() >= PanicLevel { + entry := logger.newEntry() + entry.Panic(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debugln(args ...interface{}) { + if logger.level() >= DebugLevel { + entry := logger.newEntry() + entry.Debugln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infoln(args ...interface{}) { + if logger.level() >= InfoLevel { + entry := logger.newEntry() + entry.Infoln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningln(args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorln(args ...interface{}) { + if logger.level() >= ErrorLevel { + entry := logger.newEntry() + entry.Errorln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalln(args ...interface{}) { + if logger.level() >= FatalLevel { + entry := logger.newEntry() + entry.Fatalln(args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + if logger.level() >= PanicLevel { + entry := logger.newEntry() + entry.Panicln(args...) + logger.releaseEntry(entry) + } +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} + +func (logger *Logger) level() Level { + return Level(atomic.LoadUint32((*uint32)(&logger.Level))) +} + +func (logger *Logger) SetLevel(level Level) { + atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) +} + +func (logger *Logger) SetOutput(out io.Writer) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Out = out +} + +func (logger *Logger) AddHook(hook Hook) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Hooks.Add(hook) +} diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go similarity index 63% rename from vendor/github.com/Sirupsen/logrus/logrus.go rename to vendor/github.com/sirupsen/logrus/logrus.go index 43ee12e90e..dd38999741 100644 --- a/vendor/github.com/Sirupsen/logrus/logrus.go +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -3,13 +3,14 @@ package logrus import ( "fmt" "log" + "strings" ) // Fields type, used to pass to `WithFields`. type Fields map[string]interface{} // Level type -type Level uint8 +type Level uint32 // Convert the Level to a string. E.g. PanicLevel becomes "panic". func (level Level) String() string { @@ -33,7 +34,7 @@ func (level Level) String() string { // ParseLevel takes a string level and returns the Logrus log level constant. func ParseLevel(lvl string) (Level, error) { - switch lvl { + switch strings.ToLower(lvl) { case "panic": return PanicLevel, nil case "fatal": @@ -52,6 +53,16 @@ func ParseLevel(lvl string) (Level, error) { return l, fmt.Errorf("not a valid logrus Level: %q", lvl) } +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, +} + // These are the different logging levels. You can set the logging level to log // on your instance of logger, obtained with `logrus.New()`. const ( @@ -74,7 +85,11 @@ const ( ) // Won't compile if StdLogger can't be realized by a log.Logger -var _ StdLogger = &log.Logger{} +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) // StdLogger is what your logrus-enabled library should take, that way // it'll accept a stdlib logger and a logrus logger. There's no standard @@ -92,3 +107,37 @@ type StdLogger interface { Panicf(string, ...interface{}) Panicln(...interface{}) } + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_bsd.go new file mode 100644 index 0000000000..4880d13d26 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_bsd.go @@ -0,0 +1,10 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine,!gopherjs + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA + +type Termios unix.Termios diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go new file mode 100644 index 0000000000..3de08e802f --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go @@ -0,0 +1,11 @@ +// +build appengine gopherjs + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return true +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go new file mode 100644 index 0000000000..067047a123 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go @@ -0,0 +1,19 @@ +// +build !appengine,!gopherjs + +package logrus + +import ( + "io" + "os" + + "golang.org/x/crypto/ssh/terminal" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + return terminal.IsTerminal(int(v.Fd())) + default: + return false + } +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/sirupsen/logrus/terminal_linux.go similarity index 61% rename from vendor/github.com/Sirupsen/logrus/terminal_linux.go rename to vendor/github.com/sirupsen/logrus/terminal_linux.go index a2c0b40db6..f29a0097c8 100644 --- a/vendor/github.com/Sirupsen/logrus/terminal_linux.go +++ b/vendor/github.com/sirupsen/logrus/terminal_linux.go @@ -3,10 +3,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build !appengine,!gopherjs + package logrus -import "syscall" +import "golang.org/x/sys/unix" -const ioctlReadTermios = syscall.TCGETS +const ioctlReadTermios = unix.TCGETS -type Termios syscall.Termios +type Termios unix.Termios diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go new file mode 100644 index 0000000000..3e55040304 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -0,0 +1,195 @@ +package logrus + +import ( + "bytes" + "fmt" + "sort" + "strings" + "sync" + "time" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 36 + gray = 37 +) + +var ( + baseTimestamp time.Time + emptyFieldMap FieldMap +) + +func init() { + baseTimestamp = time.Now() +} + +// TextFormatter formats logs into text +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool + + // Disables the truncation of the level text to 4 characters. + DisableLevelTruncation bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // Whether the logger's out is to a terminal + isTerminal bool + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &TextFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message"}} + FieldMap FieldMap + + sync.Once +} + +func (f *TextFormatter) init(entry *Entry) { + if entry.Logger != nil { + f.isTerminal = checkIfTerminal(entry.Logger.Out) + } +} + +// Format renders a single log entry +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + prefixFieldClashes(entry.Data, f.FieldMap) + + keys := make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + + if !f.DisableSorting { + sort.Strings(keys) + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + f.Do(func() { f.init(entry) }) + + isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + if isColored { + f.printColored(b, entry, keys, timestampFormat) + } else { + if !f.DisableTimestamp { + f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyTime), entry.Time.Format(timestampFormat)) + } + f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyLevel), entry.Level.String()) + if entry.Message != "" { + f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyMsg), entry.Message) + } + for _, key := range keys { + f.appendKeyValue(b, key, entry.Data[key]) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String()) + if !f.DisableLevelTruncation { + levelText = levelText[0:4] + } + + if f.DisableTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) + } else if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func (f *TextFormatter) needsQuoting(text string) bool { + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + if b.Len() > 0 { + b.WriteByte(' ') + } + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) + } + + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) + } +} diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go new file mode 100644 index 0000000000..7bdebedc60 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -0,0 +1,62 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + + switch level { + case DebugLevel: + printFunc = entry.Debug + case InfoLevel: + printFunc = entry.Info + case WarnLevel: + printFunc = entry.Warn + case ErrorLevel: + printFunc = entry.Error + case FatalLevel: + printFunc = entry.Fatal + case PanicLevel: + printFunc = entry.Panic + default: + printFunc = entry.Print + } + + go entry.writerScanner(reader, printFunc) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + printFunc(scanner.Text()) + } + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS new file mode 100644 index 0000000000..2b00ddba0d --- /dev/null +++ b/vendor/golang.org/x/crypto/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS new file mode 100644 index 0000000000..1fbd3e976f --- /dev/null +++ b/vendor/golang.org/x/crypto/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go new file mode 100644 index 0000000000..9a887598ff --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go @@ -0,0 +1,951 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +import ( + "bytes" + "io" + "sync" + "unicode/utf8" +) + +// EscapeCodes contains escape sequences that can be written to the terminal in +// order to achieve different styles of text. +type EscapeCodes struct { + // Foreground colors + Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte + + // Reset all attributes + Reset []byte +} + +var vt100EscapeCodes = EscapeCodes{ + Black: []byte{keyEscape, '[', '3', '0', 'm'}, + Red: []byte{keyEscape, '[', '3', '1', 'm'}, + Green: []byte{keyEscape, '[', '3', '2', 'm'}, + Yellow: []byte{keyEscape, '[', '3', '3', 'm'}, + Blue: []byte{keyEscape, '[', '3', '4', 'm'}, + Magenta: []byte{keyEscape, '[', '3', '5', 'm'}, + Cyan: []byte{keyEscape, '[', '3', '6', 'm'}, + White: []byte{keyEscape, '[', '3', '7', 'm'}, + + Reset: []byte{keyEscape, '[', '0', 'm'}, +} + +// Terminal contains the state for running a VT100 terminal that is capable of +// reading lines of input. +type Terminal struct { + // AutoCompleteCallback, if non-null, is called for each keypress with + // the full input line and the current position of the cursor (in + // bytes, as an index into |line|). If it returns ok=false, the key + // press is processed normally. Otherwise it returns a replacement line + // and the new cursor position. + AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool) + + // Escape contains a pointer to the escape codes for this terminal. + // It's always a valid pointer, although the escape codes themselves + // may be empty if the terminal doesn't support them. + Escape *EscapeCodes + + // lock protects the terminal and the state in this object from + // concurrent processing of a key press and a Write() call. + lock sync.Mutex + + c io.ReadWriter + prompt []rune + + // line is the current line being entered. + line []rune + // pos is the logical position of the cursor in line + pos int + // echo is true if local echo is enabled + echo bool + // pasteActive is true iff there is a bracketed paste operation in + // progress. + pasteActive bool + + // cursorX contains the current X value of the cursor where the left + // edge is 0. cursorY contains the row number where the first row of + // the current line is 0. + cursorX, cursorY int + // maxLine is the greatest value of cursorY so far. + maxLine int + + termWidth, termHeight int + + // outBuf contains the terminal data to be sent. + outBuf []byte + // remainder contains the remainder of any partial key sequences after + // a read. It aliases into inBuf. + remainder []byte + inBuf [256]byte + + // history contains previously entered commands so that they can be + // accessed with the up and down keys. + history stRingBuffer + // historyIndex stores the currently accessed history entry, where zero + // means the immediately previous entry. + historyIndex int + // When navigating up and down the history it's possible to return to + // the incomplete, initial line. That value is stored in + // historyPending. + historyPending string +} + +// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is +// a local terminal, that terminal must first have been put into raw mode. +// prompt is a string that is written at the start of each input line (i.e. +// "> "). +func NewTerminal(c io.ReadWriter, prompt string) *Terminal { + return &Terminal{ + Escape: &vt100EscapeCodes, + c: c, + prompt: []rune(prompt), + termWidth: 80, + termHeight: 24, + echo: true, + historyIndex: -1, + } +} + +const ( + keyCtrlD = 4 + keyCtrlU = 21 + keyEnter = '\r' + keyEscape = 27 + keyBackspace = 127 + keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota + keyUp + keyDown + keyLeft + keyRight + keyAltLeft + keyAltRight + keyHome + keyEnd + keyDeleteWord + keyDeleteLine + keyClearScreen + keyPasteStart + keyPasteEnd +) + +var ( + crlf = []byte{'\r', '\n'} + pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'} + pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'} +) + +// bytesToKey tries to parse a key sequence from b. If successful, it returns +// the key and the remainder of the input. Otherwise it returns utf8.RuneError. +func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { + if len(b) == 0 { + return utf8.RuneError, nil + } + + if !pasteActive { + switch b[0] { + case 1: // ^A + return keyHome, b[1:] + case 5: // ^E + return keyEnd, b[1:] + case 8: // ^H + return keyBackspace, b[1:] + case 11: // ^K + return keyDeleteLine, b[1:] + case 12: // ^L + return keyClearScreen, b[1:] + case 23: // ^W + return keyDeleteWord, b[1:] + } + } + + if b[0] != keyEscape { + if !utf8.FullRune(b) { + return utf8.RuneError, b + } + r, l := utf8.DecodeRune(b) + return r, b[l:] + } + + if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' { + switch b[2] { + case 'A': + return keyUp, b[3:] + case 'B': + return keyDown, b[3:] + case 'C': + return keyRight, b[3:] + case 'D': + return keyLeft, b[3:] + case 'H': + return keyHome, b[3:] + case 'F': + return keyEnd, b[3:] + } + } + + if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' { + switch b[5] { + case 'C': + return keyAltRight, b[6:] + case 'D': + return keyAltLeft, b[6:] + } + } + + if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) { + return keyPasteStart, b[6:] + } + + if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) { + return keyPasteEnd, b[6:] + } + + // If we get here then we have a key that we don't recognise, or a + // partial sequence. It's not clear how one should find the end of a + // sequence without knowing them all, but it seems that [a-zA-Z~] only + // appears at the end of a sequence. + for i, c := range b[0:] { + if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' { + return keyUnknown, b[i+1:] + } + } + + return utf8.RuneError, b +} + +// queue appends data to the end of t.outBuf +func (t *Terminal) queue(data []rune) { + t.outBuf = append(t.outBuf, []byte(string(data))...) +} + +var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'} +var space = []rune{' '} + +func isPrintable(key rune) bool { + isInSurrogateArea := key >= 0xd800 && key <= 0xdbff + return key >= 32 && !isInSurrogateArea +} + +// moveCursorToPos appends data to t.outBuf which will move the cursor to the +// given, logical position in the text. +func (t *Terminal) moveCursorToPos(pos int) { + if !t.echo { + return + } + + x := visualLength(t.prompt) + pos + y := x / t.termWidth + x = x % t.termWidth + + up := 0 + if y < t.cursorY { + up = t.cursorY - y + } + + down := 0 + if y > t.cursorY { + down = y - t.cursorY + } + + left := 0 + if x < t.cursorX { + left = t.cursorX - x + } + + right := 0 + if x > t.cursorX { + right = x - t.cursorX + } + + t.cursorX = x + t.cursorY = y + t.move(up, down, left, right) +} + +func (t *Terminal) move(up, down, left, right int) { + movement := make([]rune, 3*(up+down+left+right)) + m := movement + for i := 0; i < up; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'A' + m = m[3:] + } + for i := 0; i < down; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'B' + m = m[3:] + } + for i := 0; i < left; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'D' + m = m[3:] + } + for i := 0; i < right; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'C' + m = m[3:] + } + + t.queue(movement) +} + +func (t *Terminal) clearLineToRight() { + op := []rune{keyEscape, '[', 'K'} + t.queue(op) +} + +const maxLineLength = 4096 + +func (t *Terminal) setLine(newLine []rune, newPos int) { + if t.echo { + t.moveCursorToPos(0) + t.writeLine(newLine) + for i := len(newLine); i < len(t.line); i++ { + t.writeLine(space) + } + t.moveCursorToPos(newPos) + } + t.line = newLine + t.pos = newPos +} + +func (t *Terminal) advanceCursor(places int) { + t.cursorX += places + t.cursorY += t.cursorX / t.termWidth + if t.cursorY > t.maxLine { + t.maxLine = t.cursorY + } + t.cursorX = t.cursorX % t.termWidth + + if places > 0 && t.cursorX == 0 { + // Normally terminals will advance the current position + // when writing a character. But that doesn't happen + // for the last character in a line. However, when + // writing a character (except a new line) that causes + // a line wrap, the position will be advanced two + // places. + // + // So, if we are stopping at the end of a line, we + // need to write a newline so that our cursor can be + // advanced to the next line. + t.outBuf = append(t.outBuf, '\r', '\n') + } +} + +func (t *Terminal) eraseNPreviousChars(n int) { + if n == 0 { + return + } + + if t.pos < n { + n = t.pos + } + t.pos -= n + t.moveCursorToPos(t.pos) + + copy(t.line[t.pos:], t.line[n+t.pos:]) + t.line = t.line[:len(t.line)-n] + if t.echo { + t.writeLine(t.line[t.pos:]) + for i := 0; i < n; i++ { + t.queue(space) + } + t.advanceCursor(n) + t.moveCursorToPos(t.pos) + } +} + +// countToLeftWord returns then number of characters from the cursor to the +// start of the previous word. +func (t *Terminal) countToLeftWord() int { + if t.pos == 0 { + return 0 + } + + pos := t.pos - 1 + for pos > 0 { + if t.line[pos] != ' ' { + break + } + pos-- + } + for pos > 0 { + if t.line[pos] == ' ' { + pos++ + break + } + pos-- + } + + return t.pos - pos +} + +// countToRightWord returns then number of characters from the cursor to the +// start of the next word. +func (t *Terminal) countToRightWord() int { + pos := t.pos + for pos < len(t.line) { + if t.line[pos] == ' ' { + break + } + pos++ + } + for pos < len(t.line) { + if t.line[pos] != ' ' { + break + } + pos++ + } + return pos - t.pos +} + +// visualLength returns the number of visible glyphs in s. +func visualLength(runes []rune) int { + inEscapeSeq := false + length := 0 + + for _, r := range runes { + switch { + case inEscapeSeq: + if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') { + inEscapeSeq = false + } + case r == '\x1b': + inEscapeSeq = true + default: + length++ + } + } + + return length +} + +// handleKey processes the given key and, optionally, returns a line of text +// that the user has entered. +func (t *Terminal) handleKey(key rune) (line string, ok bool) { + if t.pasteActive && key != keyEnter { + t.addKeyToLine(key) + return + } + + switch key { + case keyBackspace: + if t.pos == 0 { + return + } + t.eraseNPreviousChars(1) + case keyAltLeft: + // move left by a word. + t.pos -= t.countToLeftWord() + t.moveCursorToPos(t.pos) + case keyAltRight: + // move right by a word. + t.pos += t.countToRightWord() + t.moveCursorToPos(t.pos) + case keyLeft: + if t.pos == 0 { + return + } + t.pos-- + t.moveCursorToPos(t.pos) + case keyRight: + if t.pos == len(t.line) { + return + } + t.pos++ + t.moveCursorToPos(t.pos) + case keyHome: + if t.pos == 0 { + return + } + t.pos = 0 + t.moveCursorToPos(t.pos) + case keyEnd: + if t.pos == len(t.line) { + return + } + t.pos = len(t.line) + t.moveCursorToPos(t.pos) + case keyUp: + entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1) + if !ok { + return "", false + } + if t.historyIndex == -1 { + t.historyPending = string(t.line) + } + t.historyIndex++ + runes := []rune(entry) + t.setLine(runes, len(runes)) + case keyDown: + switch t.historyIndex { + case -1: + return + case 0: + runes := []rune(t.historyPending) + t.setLine(runes, len(runes)) + t.historyIndex-- + default: + entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1) + if ok { + t.historyIndex-- + runes := []rune(entry) + t.setLine(runes, len(runes)) + } + } + case keyEnter: + t.moveCursorToPos(len(t.line)) + t.queue([]rune("\r\n")) + line = string(t.line) + ok = true + t.line = t.line[:0] + t.pos = 0 + t.cursorX = 0 + t.cursorY = 0 + t.maxLine = 0 + case keyDeleteWord: + // Delete zero or more spaces and then one or more characters. + t.eraseNPreviousChars(t.countToLeftWord()) + case keyDeleteLine: + // Delete everything from the current cursor position to the + // end of line. + for i := t.pos; i < len(t.line); i++ { + t.queue(space) + t.advanceCursor(1) + } + t.line = t.line[:t.pos] + t.moveCursorToPos(t.pos) + case keyCtrlD: + // Erase the character under the current position. + // The EOF case when the line is empty is handled in + // readLine(). + if t.pos < len(t.line) { + t.pos++ + t.eraseNPreviousChars(1) + } + case keyCtrlU: + t.eraseNPreviousChars(t.pos) + case keyClearScreen: + // Erases the screen and moves the cursor to the home position. + t.queue([]rune("\x1b[2J\x1b[H")) + t.queue(t.prompt) + t.cursorX, t.cursorY = 0, 0 + t.advanceCursor(visualLength(t.prompt)) + t.setLine(t.line, t.pos) + default: + if t.AutoCompleteCallback != nil { + prefix := string(t.line[:t.pos]) + suffix := string(t.line[t.pos:]) + + t.lock.Unlock() + newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key) + t.lock.Lock() + + if completeOk { + t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos])) + return + } + } + if !isPrintable(key) { + return + } + if len(t.line) == maxLineLength { + return + } + t.addKeyToLine(key) + } + return +} + +// addKeyToLine inserts the given key at the current position in the current +// line. +func (t *Terminal) addKeyToLine(key rune) { + if len(t.line) == cap(t.line) { + newLine := make([]rune, len(t.line), 2*(1+len(t.line))) + copy(newLine, t.line) + t.line = newLine + } + t.line = t.line[:len(t.line)+1] + copy(t.line[t.pos+1:], t.line[t.pos:]) + t.line[t.pos] = key + if t.echo { + t.writeLine(t.line[t.pos:]) + } + t.pos++ + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) writeLine(line []rune) { + for len(line) != 0 { + remainingOnLine := t.termWidth - t.cursorX + todo := len(line) + if todo > remainingOnLine { + todo = remainingOnLine + } + t.queue(line[:todo]) + t.advanceCursor(visualLength(line[:todo])) + line = line[todo:] + } +} + +// writeWithCRLF writes buf to w but replaces all occurrences of \n with \r\n. +func writeWithCRLF(w io.Writer, buf []byte) (n int, err error) { + for len(buf) > 0 { + i := bytes.IndexByte(buf, '\n') + todo := len(buf) + if i >= 0 { + todo = i + } + + var nn int + nn, err = w.Write(buf[:todo]) + n += nn + if err != nil { + return n, err + } + buf = buf[todo:] + + if i >= 0 { + if _, err = w.Write(crlf); err != nil { + return n, err + } + n++ + buf = buf[1:] + } + } + + return n, nil +} + +func (t *Terminal) Write(buf []byte) (n int, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + if t.cursorX == 0 && t.cursorY == 0 { + // This is the easy case: there's nothing on the screen that we + // have to move out of the way. + return writeWithCRLF(t.c, buf) + } + + // We have a prompt and possibly user input on the screen. We + // have to clear it first. + t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */) + t.cursorX = 0 + t.clearLineToRight() + + for t.cursorY > 0 { + t.move(1 /* up */, 0, 0, 0) + t.cursorY-- + t.clearLineToRight() + } + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + + if n, err = writeWithCRLF(t.c, buf); err != nil { + return + } + + t.writeLine(t.prompt) + if t.echo { + t.writeLine(t.line) + } + + t.moveCursorToPos(t.pos) + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + return +} + +// ReadPassword temporarily changes the prompt and reads a password, without +// echo, from the terminal. +func (t *Terminal) ReadPassword(prompt string) (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + oldPrompt := t.prompt + t.prompt = []rune(prompt) + t.echo = false + + line, err = t.readLine() + + t.prompt = oldPrompt + t.echo = true + + return +} + +// ReadLine returns a line of input from the terminal. +func (t *Terminal) ReadLine() (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + return t.readLine() +} + +func (t *Terminal) readLine() (line string, err error) { + // t.lock must be held at this point + + if t.cursorX == 0 && t.cursorY == 0 { + t.writeLine(t.prompt) + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + } + + lineIsPasted := t.pasteActive + + for { + rest := t.remainder + lineOk := false + for !lineOk { + var key rune + key, rest = bytesToKey(rest, t.pasteActive) + if key == utf8.RuneError { + break + } + if !t.pasteActive { + if key == keyCtrlD { + if len(t.line) == 0 { + return "", io.EOF + } + } + if key == keyPasteStart { + t.pasteActive = true + if len(t.line) == 0 { + lineIsPasted = true + } + continue + } + } else if key == keyPasteEnd { + t.pasteActive = false + continue + } + if !t.pasteActive { + lineIsPasted = false + } + line, lineOk = t.handleKey(key) + } + if len(rest) > 0 { + n := copy(t.inBuf[:], rest) + t.remainder = t.inBuf[:n] + } else { + t.remainder = nil + } + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + if lineOk { + if t.echo { + t.historyIndex = -1 + t.history.Add(line) + } + if lineIsPasted { + err = ErrPasteIndicator + } + return + } + + // t.remainder is a slice at the beginning of t.inBuf + // containing a partial key sequence + readBuf := t.inBuf[len(t.remainder):] + var n int + + t.lock.Unlock() + n, err = t.c.Read(readBuf) + t.lock.Lock() + + if err != nil { + return + } + + t.remainder = t.inBuf[:n+len(t.remainder)] + } +} + +// SetPrompt sets the prompt to be used when reading subsequent lines. +func (t *Terminal) SetPrompt(prompt string) { + t.lock.Lock() + defer t.lock.Unlock() + + t.prompt = []rune(prompt) +} + +func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) { + // Move cursor to column zero at the start of the line. + t.move(t.cursorY, 0, t.cursorX, 0) + t.cursorX, t.cursorY = 0, 0 + t.clearLineToRight() + for t.cursorY < numPrevLines { + // Move down a line + t.move(0, 1, 0, 0) + t.cursorY++ + t.clearLineToRight() + } + // Move back to beginning. + t.move(t.cursorY, 0, 0, 0) + t.cursorX, t.cursorY = 0, 0 + + t.queue(t.prompt) + t.advanceCursor(visualLength(t.prompt)) + t.writeLine(t.line) + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) SetSize(width, height int) error { + t.lock.Lock() + defer t.lock.Unlock() + + if width == 0 { + width = 1 + } + + oldWidth := t.termWidth + t.termWidth, t.termHeight = width, height + + switch { + case width == oldWidth: + // If the width didn't change then nothing else needs to be + // done. + return nil + case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0: + // If there is nothing on current line and no prompt printed, + // just do nothing + return nil + case width < oldWidth: + // Some terminals (e.g. xterm) will truncate lines that were + // too long when shinking. Others, (e.g. gnome-terminal) will + // attempt to wrap them. For the former, repainting t.maxLine + // works great, but that behaviour goes badly wrong in the case + // of the latter because they have doubled every full line. + + // We assume that we are working on a terminal that wraps lines + // and adjust the cursor position based on every previous line + // wrapping and turning into two. This causes the prompt on + // xterms to move upwards, which isn't great, but it avoids a + // huge mess with gnome-terminal. + if t.cursorX >= t.termWidth { + t.cursorX = t.termWidth - 1 + } + t.cursorY *= 2 + t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2) + case width > oldWidth: + // If the terminal expands then our position calculations will + // be wrong in the future because we think the cursor is + // |t.pos| chars into the string, but there will be a gap at + // the end of any wrapped line. + // + // But the position will actually be correct until we move, so + // we can move back to the beginning and repaint everything. + t.clearAndRepaintLinePlusNPrevious(t.maxLine) + } + + _, err := t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + return err +} + +type pasteIndicatorError struct{} + +func (pasteIndicatorError) Error() string { + return "terminal: ErrPasteIndicator not correctly handled" +} + +// ErrPasteIndicator may be returned from ReadLine as the error, in addition +// to valid line data. It indicates that bracketed paste mode is enabled and +// that the returned line consists only of pasted data. Programs may wish to +// interpret pasted data more literally than typed data. +var ErrPasteIndicator = pasteIndicatorError{} + +// SetBracketedPasteMode requests that the terminal bracket paste operations +// with markers. Not all terminals support this but, if it is supported, then +// enabling this mode will stop any autocomplete callback from running due to +// pastes. Additionally, any lines that are completely pasted will be returned +// from ReadLine with the error set to ErrPasteIndicator. +func (t *Terminal) SetBracketedPasteMode(on bool) { + if on { + io.WriteString(t.c, "\x1b[?2004h") + } else { + io.WriteString(t.c, "\x1b[?2004l") + } +} + +// stRingBuffer is a ring buffer of strings. +type stRingBuffer struct { + // entries contains max elements. + entries []string + max int + // head contains the index of the element most recently added to the ring. + head int + // size contains the number of elements in the ring. + size int +} + +func (s *stRingBuffer) Add(a string) { + if s.entries == nil { + const defaultNumEntries = 100 + s.entries = make([]string, defaultNumEntries) + s.max = defaultNumEntries + } + + s.head = (s.head + 1) % s.max + s.entries[s.head] = a + if s.size < s.max { + s.size++ + } +} + +// NthPreviousEntry returns the value passed to the nth previous call to Add. +// If n is zero then the immediately prior value is returned, if one, then the +// next most recent, and so on. If such an element doesn't exist then ok is +// false. +func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { + if n >= s.size { + return "", false + } + index := s.head - n + if index < 0 { + index += s.max + } + return s.entries[index], true +} + +// readPasswordLine reads from reader until it finds \n or io.EOF. +// The slice returned does not include the \n. +// readPasswordLine also ignores any \r it finds. +func readPasswordLine(reader io.Reader) ([]byte, error) { + var buf [1]byte + var ret []byte + + for { + n, err := reader.Read(buf[:]) + if n > 0 { + switch buf[0] { + case '\n': + return ret, nil + case '\r': + // remove \r from passwords on Windows + default: + ret = append(ret, buf[0]) + } + continue + } + if err != nil { + if err == io.EOF && len(ret) > 0 { + return ret, nil + } + return ret, err + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go new file mode 100644 index 0000000000..731c89a284 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util.go @@ -0,0 +1,114 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal // import "golang.org/x/crypto/ssh/terminal" + +import ( + "golang.org/x/sys/unix" +) + +// State contains the state of a terminal. +type State struct { + termios unix.Termios +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { + return nil, err + } + + oldState := State{termios: *termios} + + // This attempts to replicate the behaviour documented for cfmakeraw in + // the termios(3) manpage. + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON + termios.Oflag &^= unix.OPOST + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN + termios.Cflag &^= unix.CSIZE | unix.PARENB + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, termios); err != nil { + return nil, err + } + + return &oldState, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { + return nil, err + } + + return &State{termios: *termios}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + return unix.IoctlSetTermios(fd, ioctlWriteTermios, &state.termios) +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { + return -1, -1, err + } + return int(ws.Col), int(ws.Row), nil +} + +// passwordReader is an io.Reader that reads from a specific file descriptor. +type passwordReader int + +func (r passwordReader) Read(buf []byte) (int, error) { + return unix.Read(int(r), buf) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { + return nil, err + } + + newState := *termios + newState.Lflag &^= unix.ECHO + newState.Lflag |= unix.ICANON | unix.ISIG + newState.Iflag |= unix.ICRNL + if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &newState); err != nil { + return nil, err + } + + defer unix.IoctlSetTermios(fd, ioctlWriteTermios, termios) + + return readPasswordLine(passwordReader(fd)) +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go new file mode 100644 index 0000000000..cb23a59049 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package terminal + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA +const ioctlWriteTermios = unix.TIOCSETA diff --git a/vendor/github.com/Sirupsen/logrus/terminal_darwin.go b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go similarity index 55% rename from vendor/github.com/Sirupsen/logrus/terminal_darwin.go rename to vendor/golang.org/x/crypto/ssh/terminal/util_linux.go index 8fe02a4aec..5fadfe8a1d 100644 --- a/vendor/github.com/Sirupsen/logrus/terminal_darwin.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go @@ -1,12 +1,10 @@ -// Based on ssh/terminal: // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package logrus +package terminal -import "syscall" +import "golang.org/x/sys/unix" -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios +const ioctlReadTermios = unix.TCGETS +const ioctlWriteTermios = unix.TCSETS diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go new file mode 100644 index 0000000000..799f049f04 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go @@ -0,0 +1,58 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "fmt" + "runtime" +) + +type State struct{} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + return false +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + return nil, fmt.Errorf("terminal: MakeRaw not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + return nil, fmt.Errorf("terminal: GetState not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + return fmt.Errorf("terminal: Restore not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + return 0, 0, fmt.Errorf("terminal: GetSize not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + return nil, fmt.Errorf("terminal: ReadPassword not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go new file mode 100644 index 0000000000..9e41b9f43f --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go @@ -0,0 +1,124 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package terminal // import "golang.org/x/crypto/ssh/terminal" + +import ( + "golang.org/x/sys/unix" + "io" + "syscall" +) + +// State contains the state of a terminal. +type State struct { + termios unix.Termios +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + _, err := unix.IoctlGetTermio(fd, unix.TCGETA) + return err == nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + // see also: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c + val, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + oldState := *val + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + err = unix.IoctlSetTermios(fd, unix.TCSETS, &newState) + if err != nil { + return nil, err + } + + defer unix.IoctlSetTermios(fd, unix.TCSETS, &oldState) + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(fd, buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} + +// MakeRaw puts the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +// see http://cr.illumos.org/~webrev/andy_js/1060/ +func MakeRaw(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + + oldState := State{termios: *termios} + + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON + termios.Oflag &^= unix.OPOST + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN + termios.Cflag &^= unix.CSIZE | unix.PARENB + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + + if err := unix.IoctlSetTermios(fd, unix.TCSETS, termios); err != nil { + return nil, err + } + + return &oldState, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, oldState *State) error { + return unix.IoctlSetTermios(fd, unix.TCSETS, &oldState.termios) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + + return &State{termios: *termios}, nil +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { + return 0, 0, err + } + return int(ws.Col), int(ws.Row), nil +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go new file mode 100644 index 0000000000..8618955df7 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go @@ -0,0 +1,103 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "os" + + "golang.org/x/sys/windows" +) + +type State struct { + mode uint32 +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var st uint32 + err := windows.GetConsoleMode(windows.Handle(fd), &st) + return err == nil +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var st uint32 + if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { + return nil, err + } + raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { + return nil, err + } + return &State{st}, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var st uint32 + if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { + return nil, err + } + return &State{st}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + return windows.SetConsoleMode(windows.Handle(fd), state.mode) +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + var info windows.ConsoleScreenBufferInfo + if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil { + return 0, 0, err + } + return int(info.Size.X), int(info.Size.Y), nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var st uint32 + if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { + return nil, err + } + old := st + + st &^= (windows.ENABLE_ECHO_INPUT) + st |= (windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + if err := windows.SetConsoleMode(windows.Handle(fd), st); err != nil { + return nil, err + } + + defer windows.SetConsoleMode(windows.Handle(fd), old) + + var h windows.Handle + p, _ := windows.GetCurrentProcess() + if err := windows.DuplicateHandle(p, windows.Handle(fd), p, &h, 0, false, windows.DUPLICATE_SAME_ACCESS); err != nil { + return nil, err + } + + f := os.NewFile(uintptr(h), "stdin") + defer f.Close() + return readPasswordLine(f) +}