From 648b2be2f2740af0611541c9d88f448d6284e944 Mon Sep 17 00:00:00 2001 From: Abhinav Gupta Date: Sun, 23 Jan 2022 09:38:44 -0800 Subject: [PATCH 1/7] Benchmark for AddCaller+AddStacktrace Adds a benchmark that tests the performance of a Zap logger with both, `AddCaller` and `AddStacktrace` options enabled. --- logger_bench_test.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/logger_bench_test.go b/logger_bench_test.go index 71c6b5911..41e661577 100644 --- a/logger_bench_test.go +++ b/logger_bench_test.go @@ -179,6 +179,24 @@ func BenchmarkAddCallerHook(b *testing.B) { }) } +func BenchmarkAddCallerAndStacktrace(b *testing.B) { + logger := New( + zapcore.NewCore( + zapcore.NewJSONEncoder(NewProductionConfig().EncoderConfig), + &ztest.Discarder{}, + InfoLevel, + ), + AddCaller(), + AddStacktrace(WarnLevel), + ) + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + logger.Warn("Caller and stacktrace.") + } + }) +} + func Benchmark10Fields(b *testing.B) { withBenchedLogger(b, func(log *Logger) { log.Info("Ten fields, passed at the log site.", From de178a2dfd73d062bf4a1ae520ac2b0ca62f718b Mon Sep 17 00:00:00 2001 From: Abhinav Gupta Date: Sun, 23 Jan 2022 09:39:00 -0800 Subject: [PATCH 2/7] Optimize AddCaller+AddStacktrace by sharing stacks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If we use the `AddCaller(true)` option for a logger with `AddStacktrace(level)`, which captures a stack trace for the specified level or higher, Zap currently captures inspects the stack twice: - `getCallerFrame` retrieves information about the immediate caller of the log entry - `StackSkip` calls `takeStacktrace` to capture the entire stack trace and build a string from it For the case where both caller and stack trace are requested, we can avoid redundant work by sharing information about the call stack between the logic for the two. To accomplish this, the following high-level pieces were added: type stacktrace captureStacktrace(skip int, depth stacktraceDepth) *stacktrace func (*stacktrace) Next() (runtime.Frame, bool) type stackFormatter func newStackFormatter(*buffer.Buffer) func (*stackFormatter) FormatStack(*stacktrace) func (*stackFormatter) FormatFrame(runtime.Frame) `captureStacktrace` captures stack traces (either just one frame or the entire stack) that can then be inspected manually (to extract caller information) and formatted into a string representation either one-frame-at-a-time or wholesale with `stackFormatter`. This allows us reorganize logic that applies the AddCaller and AddStacktrace options so that it can: - capture as much of the stack as it needs - extract caller information from the first frame if we need it - format that frame and the remaining frames into a stack trace --- Benchmarking: I ran the included benchmark before and after the patch with the following flags and compared the results with benchstat. ``` % go test -run '^$' -bench AddCallerAndStack -benchmem -count 10 [...] % benchstat before.txt after.txt name old time/op new time/op delta AddCallerAndStacktrace-2 4.68µs ± 7% 3.63µs ± 7% -22.35% (p=0.000 n=10+10) name old alloc/op new alloc/op delta AddCallerAndStacktrace-2 632B ± 0% 416B ± 0% -34.18% (p=0.000 n=10+10) name old allocs/op new allocs/op delta AddCallerAndStacktrace-2 5.00 ± 0% 3.00 ± 0% -40.00% (p=0.000 n=10+10) ``` Allocations for this code path are down from 5 to 2, and CPU down by ~20%. To check for regressions, I also ran all existing benchmarks with "Caller" or "Stack" in their names: ``` % go test -run '^$' -bench 'Caller|Stack' -benchmem -count 10 [...] % benchstat before.txt after.txt name old time/op new time/op delta StackField-2 3.28µs ± 2% 3.49µs ± 2% +6.38% (p=0.000 n=9+9) AddCallerHook-2 1.89µs ± 2% 1.92µs ± 3% ~ (p=0.055 n=10+9) TakeStacktrace-2 3.17µs ± 1% 3.60µs ± 8% +13.63% (p=0.000 n=8+9) name old alloc/op new alloc/op delta StackField-2 528B ± 0% 528B ± 0% ~ (all equal) AddCallerHook-2 248B ± 0% 240B ± 0% -3.23% (p=0.000 n=10+10) TakeStacktrace-2 416B ± 0% 416B ± 0% ~ (all equal) name old allocs/op new allocs/op delta StackField-2 3.00 ± 0% 3.00 ± 0% ~ (all equal) AddCallerHook-2 3.00 ± 0% 2.00 ± 0% -33.33% (p=0.000 n=10+10) TakeStacktrace-2 2.00 ± 0% 2.00 ± 0% ~ (all equal) ``` AddCaller costs one less allocation, but the cost of capturing a stack trace is *slightly* higher. I think that may be worth it for the 20%+ improvement on Caller+Stack and reduced allocations. --- logger.go | 67 +++++++++++-------- stacktrace.go | 175 ++++++++++++++++++++++++++++++++++++++------------ 2 files changed, 174 insertions(+), 68 deletions(-) diff --git a/logger.go b/logger.go index c5948a6cf..12204e189 100644 --- a/logger.go +++ b/logger.go @@ -24,9 +24,9 @@ import ( "fmt" "io/ioutil" "os" - "runtime" "strings" + "go.uber.org/zap/internal/bufferpool" "go.uber.org/zap/zapcore" ) @@ -259,8 +259,10 @@ func (log *Logger) clone() *Logger { } func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { - // check must always be called directly by a method in the Logger interface - // (e.g., Check, Info, Fatal). + // Logger.check must always be called directly by a method in the + // Logger interface (e.g., Check, Info, Fatal). + // This skipps Logger.check and the Info/Fatal/Check/etc. method that + // called it. const callerSkipOffset = 2 // Check the level first to reduce the cost of disabled log calls. @@ -307,42 +309,55 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Thread the error output through to the CheckedEntry. ce.ErrorOutput = log.errorOutput - if log.addCaller { - frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset) - if !defined { + + addStack := log.addStack.Enabled(ce.Level) + if !log.addCaller && !addStack { + return ce + } + + // Adding the caller or stack trace requires capturing the callers of + // this function. We'll share information between these two. + stackDepth := stacktraceFirst + if addStack { + stackDepth = stacktraceFull + } + stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth) + defer stack.Free() + + if stack.Count() == 0 { + if log.addCaller { fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) log.errorOutput.Sync() } + return ce + } + frame, more := stack.Next() + + if log.addCaller { ce.Caller = zapcore.EntryCaller{ - Defined: defined, + Defined: frame.PC != 0, PC: frame.PC, File: frame.File, Line: frame.Line, Function: frame.Function, } } - if log.addStack.Enabled(ce.Level) { - ce.Stack = StackSkip("", log.callerSkip+callerSkipOffset).String - } - return ce -} + if addStack { + buffer := bufferpool.Get() + defer buffer.Free() -// getCallerFrame gets caller frame. The argument skip is the number of stack -// frames to ascend, with 0 identifying the caller of getCallerFrame. The -// boolean ok is false if it was not possible to recover the information. -// -// Note: This implementation is similar to runtime.Caller, but it returns the whole frame. -func getCallerFrame(skip int) (frame runtime.Frame, ok bool) { - const skipOffset = 2 // skip getCallerFrame and Callers - - pc := make([]uintptr, 1) - numFrames := runtime.Callers(skip+skipOffset, pc) - if numFrames < 1 { - return + stackfmt := newStackFormatter(buffer) + + // We've already extracted the first frame, so format that + // separately and defer to stackfmt for the rest. + stackfmt.FormatFrame(frame) + if more { + stackfmt.FormatStack(stack) + } + ce.Stack = buffer.String() } - frame, _ = runtime.CallersFrames(pc).Next() - return frame, frame.PC != 0 + return ce } diff --git a/stacktrace.go b/stacktrace.go index 0cf8c1ddf..2b2cbb88c 100644 --- a/stacktrace.go +++ b/stacktrace.go @@ -24,62 +24,153 @@ import ( "runtime" "sync" + "go.uber.org/zap/buffer" "go.uber.org/zap/internal/bufferpool" ) -var ( - _stacktracePool = sync.Pool{ - New: func() interface{} { - return newProgramCounters(64) - }, - } +var _stacktracePool = sync.Pool{ + New: func() interface{} { + return &stacktrace{ + storage: make([]uintptr, 64), + } + }, +} + +type stacktrace struct { + pcs []uintptr // program counters; always a subslice of storage + frames *runtime.Frames + + // The size of pcs varies depending on requirements: + // it will be one if the only the first frame was requested, + // and otherwise it will reflect the depth of the call stack. + // + // storage decouples the slice we need (pcs) from the slice we pool. + // We will always allocate a reasonably large storage, but we'll use + // only as much of it as we need. + storage []uintptr +} + +// stacktraceDepth specifies how deep of a stack trace should be captured. +type stacktraceDepth int + +const ( + // stacktraceFirst captures only the first frame. + stacktraceFirst stacktraceDepth = iota + + // stacktraceFull captures the entire call stack, allocating more + // storage for it if needed. + stacktraceFull ) -func takeStacktrace(skip int) string { - buffer := bufferpool.Get() - defer buffer.Free() - programCounters := _stacktracePool.Get().(*programCounters) - defer _stacktracePool.Put(programCounters) - - var numFrames int - for { - // Skip the call to runtime.Callers and takeStacktrace so that the - // program counters start at the caller of takeStacktrace. - numFrames = runtime.Callers(skip+2, programCounters.pcs) - if numFrames < len(programCounters.pcs) { - break - } - // Don't put the too-short counter slice back into the pool; this lets - // the pool adjust if we consistently take deep stacktraces. - programCounters = newProgramCounters(len(programCounters.pcs) * 2) +// captureStacktrace captures a stack trace of the specified depth, skipping +// the provided number of frames. skip=0 identifies the caller of +// captureStacktrace. +// +// The caller must call Free on the returned stacktrace after using it. +func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace { + stack := _stacktracePool.Get().(*stacktrace) + + switch depth { + case stacktraceFirst: + stack.pcs = stack.storage[:1] + case stacktraceFull: + stack.pcs = stack.storage } - i := 0 - frames := runtime.CallersFrames(programCounters.pcs[:numFrames]) + // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers + // itself. +2 to skip captureStacktrace and runtime.Callers. + numFrames := runtime.Callers( + skip+2, + stack.pcs, + ) - // Note: On the last iteration, frames.Next() returns false, with a valid - // frame, but we ignore this frame. The last frame is a a runtime frame which - // adds noise, since it's only either runtime.main or runtime.goexit. - for frame, more := frames.Next(); more; frame, more = frames.Next() { - if i != 0 { - buffer.AppendByte('\n') + // runtime.Callers truncates the recorded stacktrace if there is no + // room in the provided slice. For the full stack trace, keep expanding + // storage until there are fewer frames than there is room. + if depth == stacktraceFull { + pcs := stack.pcs + for numFrames == len(pcs) { + pcs = make([]uintptr, len(pcs)*2) + numFrames = runtime.Callers(skip+2, pcs) } - i++ - buffer.AppendString(frame.Function) - buffer.AppendByte('\n') - buffer.AppendByte('\t') - buffer.AppendString(frame.File) - buffer.AppendByte(':') - buffer.AppendInt(int64(frame.Line)) + + // Discard old storage instead of returning it to the pool. + // This will adjust the pool size over time if stack traces are + // consistently very deep. + stack.storage = pcs + stack.pcs = pcs[:numFrames] + } else { + stack.pcs = stack.pcs[:numFrames] } + stack.frames = runtime.CallersFrames(stack.pcs) + return stack +} + +// Free releases resources associated with this stacktrace +// and returns it back to the pool. +func (st *stacktrace) Free() { + st.frames = nil + st.pcs = nil + _stacktracePool.Put(st) +} + +// Count reports the total number of frames in this stacktrace. +// Count DOES NOT change as Next is called. +func (st *stacktrace) Count() int { + return len(st.pcs) +} + +// Next returns the next frame in the stack trace, +// and a boolean indicating whether there are more after it. +func (st *stacktrace) Next() (_ runtime.Frame, more bool) { + return st.frames.Next() +} + +func takeStacktrace(skip int) string { + stack := captureStacktrace(skip+1, stacktraceFull) + defer stack.Free() + + buffer := bufferpool.Get() + defer buffer.Free() + + stackfmt := newStackFormatter(buffer) + stackfmt.FormatStack(stack) return buffer.String() } -type programCounters struct { - pcs []uintptr +// stackFormatter formats a stack trace into a readable string representation. +type stackFormatter struct { + i int // number of frames already written + b *buffer.Buffer +} + +// newStackFormatter builds a new stackFormatter. +func newStackFormatter(b *buffer.Buffer) stackFormatter { + return stackFormatter{b: b} +} + +// FormatStack formats all remaining frames in the provided stacktrace -- minus +// the final runtime.main/runtime.goexit frame. +func (sf *stackFormatter) FormatStack(stack *stacktrace) { + // Note: On the last iteration, frames.Next() returns false, with a valid + // frame, but we ignore this frame. The last frame is a a runtime frame which + // adds noise, since it's only either runtime.main or runtime.goexit. + for frame, more := stack.Next(); more; frame, more = stack.Next() { + sf.FormatFrame(frame) + } } -func newProgramCounters(size int) *programCounters { - return &programCounters{make([]uintptr, size)} +// FormatFrame formats the given frame. +func (sf *stackFormatter) FormatFrame(frame runtime.Frame) { + if sf.i != 0 { + sf.b.AppendByte('\n') + } + sf.i++ + sf.b.AppendString(frame.Function) + sf.b.AppendByte('\n') + sf.b.AppendByte('\t') + sf.b.AppendString(frame.File) + sf.b.AppendByte(':') + sf.b.AppendInt(int64(frame.Line)) } From b09d4022452e0190b6a431fe081455ceecff934e Mon Sep 17 00:00:00 2001 From: Abhinav Gupta Date: Mon, 24 Jan 2022 18:10:30 -0800 Subject: [PATCH 3/7] Test takeStacktrace with a deep stack --- stacktrace_test.go | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/stacktrace_test.go b/stacktrace_test.go index d473029ee..82b6af38e 100644 --- a/stacktrace_test.go +++ b/stacktrace_test.go @@ -21,6 +21,7 @@ package zap import ( + "bytes" "strings" "testing" @@ -67,8 +68,39 @@ func TestTakeStacktraceWithSkipInnerFunc(t *testing.T) { ) } +func TestTakeStacktraceDeepStack(t *testing.T) { + const ( + N = 500 + withStackDepthName = "go.uber.org/zap.withStackDepth" + ) + withStackDepth(N, func() { + trace := takeStacktrace(0) + for found := 0; found < N; found++ { + i := strings.Index(trace, withStackDepthName) + if i < 0 { + t.Fatalf(`expected %v occurrences of %q, found %d`, + N, withStackDepthName, found) + } + trace = trace[i+len(withStackDepthName):] + } + }) +} + func BenchmarkTakeStacktrace(b *testing.B) { for i := 0; i < b.N; i++ { takeStacktrace(0) } } + +func withStackDepth(depth int, f func()) { + var recurse func(rune) rune + recurse = func(r rune) rune { + if r > 0 { + bytes.Map(recurse, []byte(string([]rune{r - 1}))) + } else { + f() + } + return 0 + } + recurse(rune(depth)) +} From 9cbe14e29c9455a97b951f7b372ae7f487a80535 Mon Sep 17 00:00:00 2001 From: Prashant Varanasi Date: Wed, 26 Jan 2022 11:01:19 -0800 Subject: [PATCH 4/7] Avoid CallersFrames from escaping (#1053) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The `runtime.CallersFrames` method doesn't do much other than returning a struct pointer wrapping the passed in `pcs`. Calling this method twice ends up simplifying some of the logic (not having to format the first stack separately from the remaining), and ends up brining the performance of `TakeStacktrace` similar to `master`. ``` # benchstat comparing 3 runs of master vs this branch, each run using benchtime=10s name old time/op new time/op delta TakeStacktrace 1.47µs ± 2% 1.48µs ± 2% ~ (p=1.000 n=3+3) name old alloc/op new alloc/op delta TakeStacktrace 496B ± 0% 496B ± 0% ~ (all equal) name old allocs/op new allocs/op delta TakeStacktrace 2.00 ± 0% 2.00 ± 0% ~ (all equal) ``` --- logger.go | 10 ++-------- stacktrace.go | 18 ++++++++---------- 2 files changed, 10 insertions(+), 18 deletions(-) diff --git a/logger.go b/logger.go index 12204e189..d01c0745c 100644 --- a/logger.go +++ b/logger.go @@ -332,7 +332,7 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { return ce } - frame, more := stack.Next() + frame := stack.First() if log.addCaller { ce.Caller = zapcore.EntryCaller{ @@ -349,13 +349,7 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { defer buffer.Free() stackfmt := newStackFormatter(buffer) - - // We've already extracted the first frame, so format that - // separately and defer to stackfmt for the rest. - stackfmt.FormatFrame(frame) - if more { - stackfmt.FormatStack(stack) - } + stackfmt.FormatStack(stack) ce.Stack = buffer.String() } diff --git a/stacktrace.go b/stacktrace.go index 2b2cbb88c..9c5cd8fda 100644 --- a/stacktrace.go +++ b/stacktrace.go @@ -37,8 +37,7 @@ var _stacktracePool = sync.Pool{ } type stacktrace struct { - pcs []uintptr // program counters; always a subslice of storage - frames *runtime.Frames + pcs []uintptr // program counters; always a subslice of storage // The size of pcs varies depending on requirements: // it will be one if the only the first frame was requested, @@ -103,14 +102,12 @@ func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace { stack.pcs = stack.pcs[:numFrames] } - stack.frames = runtime.CallersFrames(stack.pcs) return stack } // Free releases resources associated with this stacktrace // and returns it back to the pool. func (st *stacktrace) Free() { - st.frames = nil st.pcs = nil _stacktracePool.Put(st) } @@ -121,10 +118,10 @@ func (st *stacktrace) Count() int { return len(st.pcs) } -// Next returns the next frame in the stack trace, -// and a boolean indicating whether there are more after it. -func (st *stacktrace) Next() (_ runtime.Frame, more bool) { - return st.frames.Next() +// First returns the first frame captured as part of the stacktrace. +func (st *stacktrace) First() runtime.Frame { + frame, _ := runtime.CallersFrames(st.pcs).Next() + return frame } func takeStacktrace(skip int) string { @@ -150,13 +147,14 @@ func newStackFormatter(b *buffer.Buffer) stackFormatter { return stackFormatter{b: b} } -// FormatStack formats all remaining frames in the provided stacktrace -- minus +// FormatStack formats all frames in the provided stacktrace -- minus // the final runtime.main/runtime.goexit frame. func (sf *stackFormatter) FormatStack(stack *stacktrace) { // Note: On the last iteration, frames.Next() returns false, with a valid // frame, but we ignore this frame. The last frame is a a runtime frame which // adds noise, since it's only either runtime.main or runtime.goexit. - for frame, more := stack.Next(); more; frame, more = stack.Next() { + frames := runtime.CallersFrames(stack.pcs) + for frame, more := frames.Next(); more; frame, more = frames.Next() { sf.FormatFrame(frame) } } From 440d5987307471eefcdee8b59c18997e3d1b375a Mon Sep 17 00:00:00 2001 From: Abhinav Gupta Date: Wed, 26 Jan 2022 13:07:17 -0800 Subject: [PATCH 5/7] Revert "Avoid CallersFrames from escaping (#1053)" This reverts commit 9cbe14e29c9455a97b951f7b372ae7f487a80535. --- logger.go | 10 ++++++++-- stacktrace.go | 18 ++++++++++-------- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/logger.go b/logger.go index d01c0745c..12204e189 100644 --- a/logger.go +++ b/logger.go @@ -332,7 +332,7 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { return ce } - frame := stack.First() + frame, more := stack.Next() if log.addCaller { ce.Caller = zapcore.EntryCaller{ @@ -349,7 +349,13 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { defer buffer.Free() stackfmt := newStackFormatter(buffer) - stackfmt.FormatStack(stack) + + // We've already extracted the first frame, so format that + // separately and defer to stackfmt for the rest. + stackfmt.FormatFrame(frame) + if more { + stackfmt.FormatStack(stack) + } ce.Stack = buffer.String() } diff --git a/stacktrace.go b/stacktrace.go index 9c5cd8fda..2b2cbb88c 100644 --- a/stacktrace.go +++ b/stacktrace.go @@ -37,7 +37,8 @@ var _stacktracePool = sync.Pool{ } type stacktrace struct { - pcs []uintptr // program counters; always a subslice of storage + pcs []uintptr // program counters; always a subslice of storage + frames *runtime.Frames // The size of pcs varies depending on requirements: // it will be one if the only the first frame was requested, @@ -102,12 +103,14 @@ func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace { stack.pcs = stack.pcs[:numFrames] } + stack.frames = runtime.CallersFrames(stack.pcs) return stack } // Free releases resources associated with this stacktrace // and returns it back to the pool. func (st *stacktrace) Free() { + st.frames = nil st.pcs = nil _stacktracePool.Put(st) } @@ -118,10 +121,10 @@ func (st *stacktrace) Count() int { return len(st.pcs) } -// First returns the first frame captured as part of the stacktrace. -func (st *stacktrace) First() runtime.Frame { - frame, _ := runtime.CallersFrames(st.pcs).Next() - return frame +// Next returns the next frame in the stack trace, +// and a boolean indicating whether there are more after it. +func (st *stacktrace) Next() (_ runtime.Frame, more bool) { + return st.frames.Next() } func takeStacktrace(skip int) string { @@ -147,14 +150,13 @@ func newStackFormatter(b *buffer.Buffer) stackFormatter { return stackFormatter{b: b} } -// FormatStack formats all frames in the provided stacktrace -- minus +// FormatStack formats all remaining frames in the provided stacktrace -- minus // the final runtime.main/runtime.goexit frame. func (sf *stackFormatter) FormatStack(stack *stacktrace) { // Note: On the last iteration, frames.Next() returns false, with a valid // frame, but we ignore this frame. The last frame is a a runtime frame which // adds noise, since it's only either runtime.main or runtime.goexit. - frames := runtime.CallersFrames(stack.pcs) - for frame, more := frames.Next(); more; frame, more = frames.Next() { + for frame, more := stack.Next(); more; frame, more = stack.Next() { sf.FormatFrame(frame) } } From 83f6340a9ad56b74d651d58b9d4ce4cdd25f2399 Mon Sep 17 00:00:00 2001 From: Abhinav Gupta Date: Thu, 27 Jan 2022 08:47:19 -0800 Subject: [PATCH 6/7] typo in logger.go Co-authored-by: Sung Yoon Whang --- logger.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/logger.go b/logger.go index 12204e189..087c74222 100644 --- a/logger.go +++ b/logger.go @@ -261,7 +261,7 @@ func (log *Logger) clone() *Logger { func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Logger.check must always be called directly by a method in the // Logger interface (e.g., Check, Info, Fatal). - // This skipps Logger.check and the Info/Fatal/Check/etc. method that + // This skips Logger.check and the Info/Fatal/Check/etc. method that // called it. const callerSkipOffset = 2 From 4de4fc5c28613f22141669b973bc1201b956ebbc Mon Sep 17 00:00:00 2001 From: Abhinav Gupta Date: Thu, 27 Jan 2022 08:49:36 -0800 Subject: [PATCH 7/7] stackFormatter: Replace counter with a bool Track whether we've written at least one frame, and use that to decide if we need the prefix. --- stacktrace.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stacktrace.go b/stacktrace.go index 2b2cbb88c..3d187fa56 100644 --- a/stacktrace.go +++ b/stacktrace.go @@ -141,8 +141,8 @@ func takeStacktrace(skip int) string { // stackFormatter formats a stack trace into a readable string representation. type stackFormatter struct { - i int // number of frames already written - b *buffer.Buffer + b *buffer.Buffer + nonEmpty bool // whehther we've written at least one frame already } // newStackFormatter builds a new stackFormatter. @@ -163,10 +163,10 @@ func (sf *stackFormatter) FormatStack(stack *stacktrace) { // FormatFrame formats the given frame. func (sf *stackFormatter) FormatFrame(frame runtime.Frame) { - if sf.i != 0 { + if sf.nonEmpty { sf.b.AppendByte('\n') } - sf.i++ + sf.nonEmpty = true sf.b.AppendString(frame.Function) sf.b.AppendByte('\n') sf.b.AppendByte('\t')