1 // Copyright (c) 2023 Uber Technologies, Inc. 2 // 3 // Permission is hereby granted, free of charge, to any person obtaining a copy 4 // of this software and associated documentation files (the "Software"), to deal 5 // in the Software without restriction, including without limitation the rights 6 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 // copies of the Software, and to permit persons to whom the Software is 8 // furnished to do so, subject to the following conditions: 9 // 10 // The above copyright notice and this permission notice shall be included in 11 // all copies or substantial portions of the Software. 12 // 13 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 // THE SOFTWARE. 20 21 // Package stacktrace provides support for gathering stack traces 22 // efficiently. 23 package stacktrace 24 25 import ( 26 "runtime" 27 28 "go.uber.org/zap/buffer" 29 "go.uber.org/zap/internal/bufferpool" 30 "go.uber.org/zap/internal/pool" 31 ) 32 33 var _stackPool = pool.New(func() *Stack { 34 return &Stack{ 35 storage: make([]uintptr, 64), 36 } 37 }) 38 39 // Stack is a captured stack trace. 40 type Stack struct { 41 pcs []uintptr // program counters; always a subslice of storage 42 frames *runtime.Frames 43 44 // The size of pcs varies depending on requirements: 45 // it will be one if the only the first frame was requested, 46 // and otherwise it will reflect the depth of the call stack. 47 // 48 // storage decouples the slice we need (pcs) from the slice we pool. 49 // We will always allocate a reasonably large storage, but we'll use 50 // only as much of it as we need. 51 storage []uintptr 52 } 53 54 // Depth specifies how deep of a stack trace should be captured. 55 type Depth int 56 57 const ( 58 // First captures only the first frame. 59 First Depth = iota 60 61 // Full captures the entire call stack, allocating more 62 // storage for it if needed. 63 Full 64 ) 65 66 // Capture captures a stack trace of the specified depth, skipping 67 // the provided number of frames. skip=0 identifies the caller of 68 // Capture. 69 // 70 // The caller must call Free on the returned stacktrace after using it. 71 func Capture(skip int, depth Depth) *Stack { 72 stack := _stackPool.Get() 73 74 switch depth { 75 case First: 76 stack.pcs = stack.storage[:1] 77 case Full: 78 stack.pcs = stack.storage 79 } 80 81 // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers 82 // itself. +2 to skip captureStacktrace and runtime.Callers. 83 numFrames := runtime.Callers( 84 skip+2, 85 stack.pcs, 86 ) 87 88 // runtime.Callers truncates the recorded stacktrace if there is no 89 // room in the provided slice. For the full stack trace, keep expanding 90 // storage until there are fewer frames than there is room. 91 if depth == Full { 92 pcs := stack.pcs 93 for numFrames == len(pcs) { 94 pcs = make([]uintptr, len(pcs)*2) 95 numFrames = runtime.Callers(skip+2, pcs) 96 } 97 98 // Discard old storage instead of returning it to the pool. 99 // This will adjust the pool size over time if stack traces are 100 // consistently very deep. 101 stack.storage = pcs 102 stack.pcs = pcs[:numFrames] 103 } else { 104 stack.pcs = stack.pcs[:numFrames] 105 } 106 107 stack.frames = runtime.CallersFrames(stack.pcs) 108 return stack 109 } 110 111 // Free releases resources associated with this stacktrace 112 // and returns it back to the pool. 113 func (st *Stack) Free() { 114 st.frames = nil 115 st.pcs = nil 116 _stackPool.Put(st) 117 } 118 119 // Count reports the total number of frames in this stacktrace. 120 // Count DOES NOT change as Next is called. 121 func (st *Stack) Count() int { 122 return len(st.pcs) 123 } 124 125 // Next returns the next frame in the stack trace, 126 // and a boolean indicating whether there are more after it. 127 func (st *Stack) Next() (_ runtime.Frame, more bool) { 128 return st.frames.Next() 129 } 130 131 // Take returns a string representation of the current stacktrace. 132 // 133 // skip is the number of frames to skip before recording the stack trace. 134 // skip=0 identifies the caller of Take. 135 func Take(skip int) string { 136 stack := Capture(skip+1, Full) 137 defer stack.Free() 138 139 buffer := bufferpool.Get() 140 defer buffer.Free() 141 142 stackfmt := NewFormatter(buffer) 143 stackfmt.FormatStack(stack) 144 return buffer.String() 145 } 146 147 // Formatter formats a stack trace into a readable string representation. 148 type Formatter struct { 149 b *buffer.Buffer 150 nonEmpty bool // whehther we've written at least one frame already 151 } 152 153 // NewFormatter builds a new Formatter. 154 func NewFormatter(b *buffer.Buffer) Formatter { 155 return Formatter{b: b} 156 } 157 158 // FormatStack formats all remaining frames in the provided stacktrace -- minus 159 // the final runtime.main/runtime.goexit frame. 160 func (sf *Formatter) FormatStack(stack *Stack) { 161 // Note: On the last iteration, frames.Next() returns false, with a valid 162 // frame, but we ignore this frame. The last frame is a runtime frame which 163 // adds noise, since it's only either runtime.main or runtime.goexit. 164 for frame, more := stack.Next(); more; frame, more = stack.Next() { 165 sf.FormatFrame(frame) 166 } 167 } 168 169 // FormatFrame formats the given frame. 170 func (sf *Formatter) FormatFrame(frame runtime.Frame) { 171 if sf.nonEmpty { 172 sf.b.AppendByte('\n') 173 } 174 sf.nonEmpty = true 175 sf.b.AppendString(frame.Function) 176 sf.b.AppendByte('\n') 177 sf.b.AppendByte('\t') 178 sf.b.AppendString(frame.File) 179 sf.b.AppendByte(':') 180 sf.b.AppendInt(int64(frame.Line)) 181 } 182