1
2
3
4
5
6
7
8
9 package testkit
10
11 import (
12 "bytes"
13 "encoding/binary"
14 "fmt"
15 "os"
16 "regexp"
17 "strings"
18
19 "golang.org/x/exp/trace"
20 "golang.org/x/exp/trace/internal/event"
21 "golang.org/x/exp/trace/internal/event/go122"
22 "golang.org/x/exp/trace/internal/raw"
23 "golang.org/x/exp/trace/internal/version"
24 "golang.org/x/tools/txtar"
25 )
26
27 func Main(f func(*Trace)) {
28
29 out, err := os.Create(os.Args[1])
30 if err != nil {
31 panic(err.Error())
32 }
33 defer out.Close()
34
35
36 trace := NewTrace()
37
38
39 f(trace)
40
41
42 if _, err := out.Write(trace.Generate()); err != nil {
43 panic(err.Error())
44 }
45 }
46
47
48
49
50
51
52
53
54
55 type Trace struct {
56
57 ver version.Version
58 names map[string]event.Type
59 specs []event.Spec
60 events []raw.Event
61 gens []*Generation
62 validTimestamps bool
63
64
65 bad bool
66 badMatch *regexp.Regexp
67 }
68
69
70 func NewTrace() *Trace {
71 ver := version.Go122
72 return &Trace{
73 names: event.Names(ver.Specs()),
74 specs: ver.Specs(),
75 validTimestamps: true,
76 }
77 }
78
79
80
81 func (t *Trace) ExpectFailure(pattern string) {
82 t.bad = true
83 t.badMatch = regexp.MustCompile(pattern)
84 }
85
86
87 func (t *Trace) ExpectSuccess() {
88 t.bad = false
89 }
90
91
92
93
94 func (t *Trace) RawEvent(typ event.Type, data []byte, args ...uint64) {
95 t.events = append(t.events, t.createEvent(typ, data, args...))
96 }
97
98
99
100
101 func (t *Trace) DisableTimestamps() {
102 t.validTimestamps = false
103 }
104
105
106
107
108
109 func (t *Trace) Generation(gen uint64) *Generation {
110 g := &Generation{
111 trace: t,
112 gen: gen,
113 strings: make(map[string]uint64),
114 stacks: make(map[stack]uint64),
115 }
116 t.gens = append(t.gens, g)
117 return g
118 }
119
120
121 func (t *Trace) Generate() []byte {
122
123 var buf bytes.Buffer
124 tw, err := raw.NewTextWriter(&buf, version.Go122)
125 if err != nil {
126 panic(err.Error())
127 }
128
129
130 for _, e := range t.events {
131 tw.WriteEvent(e)
132 }
133
134
135 for _, g := range t.gens {
136 g.writeEventsTo(tw)
137 }
138
139
140 expect := []byte("SUCCESS\n")
141 if t.bad {
142 expect = []byte(fmt.Sprintf("FAILURE %q\n", t.badMatch))
143 }
144
145
146 return txtar.Format(&txtar.Archive{
147 Files: []txtar.File{
148 {Name: "expect", Data: expect},
149 {Name: "trace", Data: buf.Bytes()},
150 },
151 })
152 }
153
154 func (t *Trace) createEvent(ev event.Type, data []byte, args ...uint64) raw.Event {
155 spec := t.specs[ev]
156 if ev != go122.EvStack {
157 if arity := len(spec.Args); len(args) != arity {
158 panic(fmt.Sprintf("expected %d args for %s, got %d", arity, spec.Name, len(args)))
159 }
160 }
161 return raw.Event{
162 Version: version.Go122,
163 Ev: ev,
164 Args: args,
165 Data: data,
166 }
167 }
168
169 type stack struct {
170 stk [32]trace.StackFrame
171 len int
172 }
173
174 var (
175 NoString = ""
176 NoStack = []trace.StackFrame{}
177 )
178
179
180 type Generation struct {
181 trace *Trace
182 gen uint64
183 batches []*Batch
184 strings map[string]uint64
185 stacks map[stack]uint64
186
187
188 ignoreStringBatchSizeLimit bool
189 ignoreStackBatchSizeLimit bool
190 }
191
192
193
194
195 func (g *Generation) Batch(thread trace.ThreadID, time Time) *Batch {
196 if !g.trace.validTimestamps {
197 time = 0
198 }
199 b := &Batch{
200 gen: g,
201 thread: thread,
202 timestamp: time,
203 }
204 g.batches = append(g.batches, b)
205 return b
206 }
207
208
209
210
211
212 func (g *Generation) String(s string) uint64 {
213 if len(s) == 0 {
214 return 0
215 }
216 if id, ok := g.strings[s]; ok {
217 return id
218 }
219 id := uint64(len(g.strings) + 1)
220 g.strings[s] = id
221 return id
222 }
223
224
225
226
227
228 func (g *Generation) Stack(stk []trace.StackFrame) uint64 {
229 if len(stk) == 0 {
230 return 0
231 }
232 if len(stk) > 32 {
233 panic("stack too big for test")
234 }
235 var stkc stack
236 copy(stkc.stk[:], stk)
237 stkc.len = len(stk)
238 if id, ok := g.stacks[stkc]; ok {
239 return id
240 }
241 id := uint64(len(g.stacks) + 1)
242 g.stacks[stkc] = id
243 return id
244 }
245
246
247 func (g *Generation) writeEventsTo(tw *raw.TextWriter) {
248
249 for _, b := range g.batches {
250 b.writeEventsTo(tw)
251 }
252
253
254 b := g.newStructuralBatch()
255 b.RawEvent(go122.EvFrequency, nil, 15625000)
256 b.writeEventsTo(tw)
257
258
259 b = g.newStructuralBatch()
260 b.RawEvent(go122.EvStacks, nil)
261 for stk, id := range g.stacks {
262 stk := stk.stk[:stk.len]
263 args := []uint64{id}
264 for _, f := range stk {
265 args = append(args, f.PC, g.String(f.Func), g.String(f.File), f.Line)
266 }
267 b.RawEvent(go122.EvStack, nil, args...)
268
269
270 if !g.ignoreStackBatchSizeLimit && b.size > go122.MaxBatchSize/2 {
271 b.writeEventsTo(tw)
272 b = g.newStructuralBatch()
273 }
274 }
275 b.writeEventsTo(tw)
276
277
278 b = g.newStructuralBatch()
279 b.RawEvent(go122.EvStrings, nil)
280 for s, id := range g.strings {
281 b.RawEvent(go122.EvString, []byte(s), id)
282
283
284 if !g.ignoreStringBatchSizeLimit && b.size > go122.MaxBatchSize/2 {
285 b.writeEventsTo(tw)
286 b = g.newStructuralBatch()
287 }
288 }
289 b.writeEventsTo(tw)
290 }
291
292 func (g *Generation) newStructuralBatch() *Batch {
293 return &Batch{gen: g, thread: trace.NoThread}
294 }
295
296
297 type Batch struct {
298 gen *Generation
299 thread trace.ThreadID
300 timestamp Time
301 size uint64
302 events []raw.Event
303 }
304
305
306
307
308 func (b *Batch) Event(name string, args ...any) {
309 ev, ok := b.gen.trace.names[name]
310 if !ok {
311 panic(fmt.Sprintf("invalid or unknown event %s", name))
312 }
313 var uintArgs []uint64
314 argOff := 0
315 if b.gen.trace.specs[ev].IsTimedEvent {
316 if b.gen.trace.validTimestamps {
317 uintArgs = []uint64{1}
318 } else {
319 uintArgs = []uint64{0}
320 }
321 argOff = 1
322 }
323 spec := b.gen.trace.specs[ev]
324 if arity := len(spec.Args) - argOff; len(args) != arity {
325 panic(fmt.Sprintf("expected %d args for %s, got %d", arity, spec.Name, len(args)))
326 }
327 for i, arg := range args {
328 uintArgs = append(uintArgs, b.uintArgFor(arg, spec.Args[i+argOff]))
329 }
330 b.RawEvent(ev, nil, uintArgs...)
331 }
332
333 func (b *Batch) uintArgFor(arg any, argSpec string) uint64 {
334 components := strings.SplitN(argSpec, "_", 2)
335 typStr := components[0]
336 if len(components) == 2 {
337 typStr = components[1]
338 }
339 var u uint64
340 switch typStr {
341 case "value":
342 u = arg.(uint64)
343 case "stack":
344 u = b.gen.Stack(arg.([]trace.StackFrame))
345 case "seq":
346 u = uint64(arg.(Seq))
347 case "pstatus":
348 u = uint64(arg.(go122.ProcStatus))
349 case "gstatus":
350 u = uint64(arg.(go122.GoStatus))
351 case "g":
352 u = uint64(arg.(trace.GoID))
353 case "m":
354 u = uint64(arg.(trace.ThreadID))
355 case "p":
356 u = uint64(arg.(trace.ProcID))
357 case "string":
358 u = b.gen.String(arg.(string))
359 case "task":
360 u = uint64(arg.(trace.TaskID))
361 default:
362 panic(fmt.Sprintf("unsupported arg type %q for spec %q", typStr, argSpec))
363 }
364 return u
365 }
366
367
368
369
370 func (b *Batch) RawEvent(typ event.Type, data []byte, args ...uint64) {
371 ev := b.gen.trace.createEvent(typ, data, args...)
372
373
374 b.size += 1
375 var buf [binary.MaxVarintLen64]byte
376 for _, arg := range args {
377 b.size += uint64(binary.PutUvarint(buf[:], arg))
378 }
379 if len(data) != 0 {
380 b.size += uint64(binary.PutUvarint(buf[:], uint64(len(data))))
381 b.size += uint64(len(data))
382 }
383
384
385 b.events = append(b.events, ev)
386 }
387
388
389 func (b *Batch) writeEventsTo(tw *raw.TextWriter) {
390 tw.WriteEvent(raw.Event{
391 Version: version.Go122,
392 Ev: go122.EvEventBatch,
393 Args: []uint64{b.gen.gen, uint64(b.thread), uint64(b.timestamp), b.size},
394 })
395 for _, e := range b.events {
396 tw.WriteEvent(e)
397 }
398 }
399
400
401 type Seq uint64
402
403
404
405 type Time uint64
406
View as plain text