Source file
src/runtime/trace.go
1
2
3
4
5
6
7
8
9
10
11
12
13 package runtime
14
15 import (
16 "internal/goarch"
17 "runtime/internal/atomic"
18 "runtime/internal/sys"
19 "unsafe"
20 )
21
22
23 const (
24 traceEvNone = 0
25 traceEvBatch = 1
26 traceEvFrequency = 2
27 traceEvStack = 3
28 traceEvGomaxprocs = 4
29 traceEvProcStart = 5
30 traceEvProcStop = 6
31 traceEvGCStart = 7
32 traceEvGCDone = 8
33 traceEvGCSTWStart = 9
34 traceEvGCSTWDone = 10
35 traceEvGCSweepStart = 11
36 traceEvGCSweepDone = 12
37 traceEvGoCreate = 13
38 traceEvGoStart = 14
39 traceEvGoEnd = 15
40 traceEvGoStop = 16
41 traceEvGoSched = 17
42 traceEvGoPreempt = 18
43 traceEvGoSleep = 19
44 traceEvGoBlock = 20
45 traceEvGoUnblock = 21
46 traceEvGoBlockSend = 22
47 traceEvGoBlockRecv = 23
48 traceEvGoBlockSelect = 24
49 traceEvGoBlockSync = 25
50 traceEvGoBlockCond = 26
51 traceEvGoBlockNet = 27
52 traceEvGoSysCall = 28
53 traceEvGoSysExit = 29
54 traceEvGoSysBlock = 30
55 traceEvGoWaiting = 31
56 traceEvGoInSyscall = 32
57 traceEvHeapAlloc = 33
58 traceEvHeapGoal = 34
59 traceEvTimerGoroutine = 35
60 traceEvFutileWakeup = 36
61 traceEvString = 37
62 traceEvGoStartLocal = 38
63 traceEvGoUnblockLocal = 39
64 traceEvGoSysExitLocal = 40
65 traceEvGoStartLabel = 41
66 traceEvGoBlockGC = 42
67 traceEvGCMarkAssistStart = 43
68 traceEvGCMarkAssistDone = 44
69 traceEvUserTaskCreate = 45
70 traceEvUserTaskEnd = 46
71 traceEvUserRegion = 47
72 traceEvUserLog = 48
73 traceEvCPUSample = 49
74 traceEvCount = 50
75
76
77
78 )
79
80 const (
81
82
83
84
85
86
87
88
89
90 traceTickDiv = 16 + 48*(goarch.Is386|goarch.IsAmd64)
91
92
93
94 traceStackSize = 128
95
96 traceGlobProc = -1
97
98 traceBytesPerNumber = 10
99
100 traceArgCountShift = 6
101
102
103
104
105
106
107 traceFutileWakeup byte = 128
108 )
109
110
111 var trace struct {
112
113
114 lock mutex
115 lockOwner *g
116 enabled bool
117 shutdown bool
118 headerWritten bool
119 footerWritten bool
120 shutdownSema uint32
121 seqStart uint64
122 ticksStart int64
123 ticksEnd int64
124 timeStart int64
125 timeEnd int64
126 seqGC uint64
127 reading traceBufPtr
128 empty traceBufPtr
129 fullHead traceBufPtr
130 fullTail traceBufPtr
131 stackTab traceStackTable
132
133
134
135
136
137
138
139
140
141
142 cpuLogRead *profBuf
143
144
145
146 cpuLogBuf traceBufPtr
147
148 reader atomic.Pointer[g]
149
150 signalLock atomic.Uint32
151 cpuLogWrite *profBuf
152
153
154
155
156
157
158
159 stringsLock mutex
160 strings map[string]uint64
161 stringSeq uint64
162
163
164 markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
165
166 bufLock mutex
167 buf traceBufPtr
168 }
169
170
171 type traceBufHeader struct {
172 link traceBufPtr
173 lastTicks uint64
174 pos int
175 stk [traceStackSize]uintptr
176 }
177
178
179 type traceBuf struct {
180 _ sys.NotInHeap
181 traceBufHeader
182 arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte
183 }
184
185
186
187
188
189
190
191
192 type traceBufPtr uintptr
193
194 func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) }
195 func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
196 func traceBufPtrOf(b *traceBuf) traceBufPtr {
197 return traceBufPtr(unsafe.Pointer(b))
198 }
199
200
201
202
203
204
205 func StartTrace() error {
206
207
208
209
210
211 stopTheWorldGC("start tracing")
212
213
214 lock(&sched.sysmonlock)
215
216
217
218
219
220
221 lock(&trace.bufLock)
222
223 if trace.enabled || trace.shutdown {
224 unlock(&trace.bufLock)
225 unlock(&sched.sysmonlock)
226 startTheWorldGC()
227 return errorString("tracing is already enabled")
228 }
229
230
231
232
233
234
235
236
237 mp := getg().m
238 mp.startingtrace = true
239
240
241 stkBuf := make([]uintptr, traceStackSize)
242 stackID := traceStackID(mp, stkBuf, 2)
243
244 profBuf := newProfBuf(2, profBufWordCount, profBufTagCount)
245 trace.cpuLogRead = profBuf
246
247
248
249
250
251
252
253
254 atomicstorep(unsafe.Pointer(&trace.cpuLogWrite), unsafe.Pointer(profBuf))
255
256
257 forEachGRace(func(gp *g) {
258 status := readgstatus(gp)
259 if status != _Gdead {
260 gp.traceseq = 0
261 gp.tracelastp = getg().m.p
262
263 id := trace.stackTab.put([]uintptr{startPCforTrace(gp.startpc) + sys.PCQuantum})
264 traceEvent(traceEvGoCreate, -1, gp.goid, uint64(id), stackID)
265 }
266 if status == _Gwaiting {
267
268 gp.traceseq++
269 traceEvent(traceEvGoWaiting, -1, gp.goid)
270 }
271 if status == _Gsyscall {
272 gp.traceseq++
273 traceEvent(traceEvGoInSyscall, -1, gp.goid)
274 } else if status == _Gdead && gp.m != nil && gp.m.isextra {
275
276
277
278 gp.traceseq = 0
279 gp.tracelastp = getg().m.p
280
281 id := trace.stackTab.put([]uintptr{startPCforTrace(0) + sys.PCQuantum})
282 traceEvent(traceEvGoCreate, -1, gp.goid, uint64(id), stackID)
283 gp.traceseq++
284 traceEvent(traceEvGoInSyscall, -1, gp.goid)
285 } else {
286 gp.sysblocktraced = false
287 }
288 })
289 traceProcStart()
290 traceGoStart()
291
292
293
294
295 trace.ticksStart = cputicks()
296 trace.timeStart = nanotime()
297 trace.headerWritten = false
298 trace.footerWritten = false
299
300
301
302
303 trace.stringSeq = 0
304 trace.strings = make(map[string]uint64)
305
306 trace.seqGC = 0
307 mp.startingtrace = false
308 trace.enabled = true
309
310
311 _, pid, bufp := traceAcquireBuffer()
312 for i, label := range gcMarkWorkerModeStrings[:] {
313 trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label)
314 }
315 traceReleaseBuffer(pid)
316
317 unlock(&trace.bufLock)
318
319 unlock(&sched.sysmonlock)
320
321 startTheWorldGC()
322 return nil
323 }
324
325
326
327 func StopTrace() {
328
329
330 stopTheWorldGC("stop tracing")
331
332
333 lock(&sched.sysmonlock)
334
335
336 lock(&trace.bufLock)
337
338 if !trace.enabled {
339 unlock(&trace.bufLock)
340 unlock(&sched.sysmonlock)
341 startTheWorldGC()
342 return
343 }
344
345 traceGoSched()
346
347 atomicstorep(unsafe.Pointer(&trace.cpuLogWrite), nil)
348 trace.cpuLogRead.close()
349 traceReadCPU()
350
351
352
353 for _, p := range allp[:cap(allp)] {
354 buf := p.tracebuf
355 if buf != 0 {
356 traceFullQueue(buf)
357 p.tracebuf = 0
358 }
359 }
360 if trace.buf != 0 {
361 buf := trace.buf
362 trace.buf = 0
363 if buf.ptr().pos != 0 {
364 traceFullQueue(buf)
365 }
366 }
367 if trace.cpuLogBuf != 0 {
368 buf := trace.cpuLogBuf
369 trace.cpuLogBuf = 0
370 if buf.ptr().pos != 0 {
371 traceFullQueue(buf)
372 }
373 }
374
375 for {
376 trace.ticksEnd = cputicks()
377 trace.timeEnd = nanotime()
378
379 if trace.timeEnd != trace.timeStart {
380 break
381 }
382 osyield()
383 }
384
385 trace.enabled = false
386 trace.shutdown = true
387 unlock(&trace.bufLock)
388
389 unlock(&sched.sysmonlock)
390
391 startTheWorldGC()
392
393
394
395 semacquire(&trace.shutdownSema)
396 if raceenabled {
397 raceacquire(unsafe.Pointer(&trace.shutdownSema))
398 }
399
400 systemstack(func() {
401
402 lock(&trace.lock)
403 for _, p := range allp[:cap(allp)] {
404 if p.tracebuf != 0 {
405 throw("trace: non-empty trace buffer in proc")
406 }
407 }
408 if trace.buf != 0 {
409 throw("trace: non-empty global trace buffer")
410 }
411 if trace.fullHead != 0 || trace.fullTail != 0 {
412 throw("trace: non-empty full trace buffer")
413 }
414 if trace.reading != 0 || trace.reader.Load() != nil {
415 throw("trace: reading after shutdown")
416 }
417 for trace.empty != 0 {
418 buf := trace.empty
419 trace.empty = buf.ptr().link
420 sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
421 }
422 trace.strings = nil
423 trace.shutdown = false
424 trace.cpuLogRead = nil
425 unlock(&trace.lock)
426 })
427 }
428
429
430
431
432
433
434 func ReadTrace() []byte {
435 top:
436 var buf []byte
437 var park bool
438 systemstack(func() {
439 buf, park = readTrace0()
440 })
441 if park {
442 gopark(func(gp *g, _ unsafe.Pointer) bool {
443 if !trace.reader.CompareAndSwapNoWB(nil, gp) {
444
445
446 return false
447 }
448
449 if g2 := traceReader(); gp == g2 {
450
451
452
453 return false
454 } else if g2 != nil {
455 printlock()
456 println("runtime: got trace reader", g2, g2.goid)
457 throw("unexpected trace reader")
458 }
459
460 return true
461 }, nil, waitReasonTraceReaderBlocked, traceEvGoBlock, 2)
462 goto top
463 }
464
465 return buf
466 }
467
468
469
470
471
472 func readTrace0() (buf []byte, park bool) {
473 if raceenabled {
474
475 if getg().racectx != 0 {
476 throw("expected racectx == 0")
477 }
478 getg().racectx = getg().m.curg.racectx
479
480
481 defer func() { getg().racectx = 0 }()
482 }
483
484
485
486
487
488
489
490 lock(&trace.lock)
491 trace.lockOwner = getg().m.curg
492
493 if trace.reader.Load() != nil {
494
495
496
497 trace.lockOwner = nil
498 unlock(&trace.lock)
499 println("runtime: ReadTrace called from multiple goroutines simultaneously")
500 return nil, false
501 }
502
503 if buf := trace.reading; buf != 0 {
504 buf.ptr().link = trace.empty
505 trace.empty = buf
506 trace.reading = 0
507 }
508
509 if !trace.headerWritten {
510 trace.headerWritten = true
511 trace.lockOwner = nil
512 unlock(&trace.lock)
513 return []byte("go 1.19 trace\x00\x00\x00"), false
514 }
515
516
517 if !trace.footerWritten && !trace.shutdown {
518 traceReadCPU()
519 }
520
521 if trace.fullHead == 0 && !trace.shutdown {
522
523
524
525 trace.lockOwner = nil
526 unlock(&trace.lock)
527 return nil, true
528 }
529 newFull:
530 assertLockHeld(&trace.lock)
531
532 if trace.fullHead != 0 {
533 buf := traceFullDequeue()
534 trace.reading = buf
535 trace.lockOwner = nil
536 unlock(&trace.lock)
537 return buf.ptr().arr[:buf.ptr().pos], false
538 }
539
540
541 if !trace.footerWritten {
542 trace.footerWritten = true
543
544 freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
545 if freq <= 0 {
546 throw("trace: ReadTrace got invalid frequency")
547 }
548 trace.lockOwner = nil
549 unlock(&trace.lock)
550
551
552 bufp := traceFlush(0, 0)
553 buf := bufp.ptr()
554 buf.byte(traceEvFrequency | 0<<traceArgCountShift)
555 buf.varint(uint64(freq))
556
557
558
559
560 bufp = trace.stackTab.dump(bufp)
561
562
563 lock(&trace.lock)
564 traceFullQueue(bufp)
565 goto newFull
566 }
567
568 if trace.shutdown {
569 trace.lockOwner = nil
570 unlock(&trace.lock)
571 if raceenabled {
572
573
574
575 racerelease(unsafe.Pointer(&trace.shutdownSema))
576 }
577
578 semrelease(&trace.shutdownSema)
579 return nil, false
580 }
581
582 trace.lockOwner = nil
583 unlock(&trace.lock)
584 println("runtime: spurious wakeup of trace reader")
585 return nil, false
586 }
587
588
589
590
591
592
593
594 func traceReader() *g {
595
596 if traceReaderAvailable() == nil {
597 return nil
598 }
599 lock(&trace.lock)
600 gp := traceReaderAvailable()
601 if gp == nil || !trace.reader.CompareAndSwapNoWB(gp, nil) {
602 unlock(&trace.lock)
603 return nil
604 }
605 unlock(&trace.lock)
606 return gp
607 }
608
609
610
611
612 func traceReaderAvailable() *g {
613 if trace.fullHead != 0 || trace.shutdown {
614 return trace.reader.Load()
615 }
616 return nil
617 }
618
619
620
621
622
623
624 func traceProcFree(pp *p) {
625 buf := pp.tracebuf
626 pp.tracebuf = 0
627 if buf == 0 {
628 return
629 }
630 lock(&trace.lock)
631 traceFullQueue(buf)
632 unlock(&trace.lock)
633 }
634
635
636 func traceFullQueue(buf traceBufPtr) {
637 buf.ptr().link = 0
638 if trace.fullHead == 0 {
639 trace.fullHead = buf
640 } else {
641 trace.fullTail.ptr().link = buf
642 }
643 trace.fullTail = buf
644 }
645
646
647 func traceFullDequeue() traceBufPtr {
648 buf := trace.fullHead
649 if buf == 0 {
650 return 0
651 }
652 trace.fullHead = buf.ptr().link
653 if trace.fullHead == 0 {
654 trace.fullTail = 0
655 }
656 buf.ptr().link = 0
657 return buf
658 }
659
660
661
662
663
664
665 func traceEvent(ev byte, skip int, args ...uint64) {
666 mp, pid, bufp := traceAcquireBuffer()
667
668
669
670
671
672
673
674
675
676
677
678 if !trace.enabled && !mp.startingtrace {
679 traceReleaseBuffer(pid)
680 return
681 }
682
683 if skip > 0 {
684 if getg() == mp.curg {
685 skip++
686 }
687 }
688 traceEventLocked(0, mp, pid, bufp, ev, 0, skip, args...)
689 traceReleaseBuffer(pid)
690 }
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709 func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, stackID uint32, skip int, args ...uint64) {
710 buf := bufp.ptr()
711
712 maxSize := 2 + 5*traceBytesPerNumber + extraBytes
713 if buf == nil || len(buf.arr)-buf.pos < maxSize {
714 systemstack(func() {
715 buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
716 })
717 bufp.set(buf)
718 }
719
720
721
722 ticks := uint64(cputicks()) / traceTickDiv
723 tickDiff := ticks - buf.lastTicks
724 if tickDiff == 0 {
725 ticks = buf.lastTicks + 1
726 tickDiff = 1
727 }
728
729 buf.lastTicks = ticks
730 narg := byte(len(args))
731 if stackID != 0 || skip >= 0 {
732 narg++
733 }
734
735
736 if narg > 3 {
737 narg = 3
738 }
739 startPos := buf.pos
740 buf.byte(ev | narg<<traceArgCountShift)
741 var lenp *byte
742 if narg == 3 {
743
744 buf.varint(0)
745 lenp = &buf.arr[buf.pos-1]
746 }
747 buf.varint(tickDiff)
748 for _, a := range args {
749 buf.varint(a)
750 }
751 if stackID != 0 {
752 buf.varint(uint64(stackID))
753 } else if skip == 0 {
754 buf.varint(0)
755 } else if skip > 0 {
756 buf.varint(traceStackID(mp, buf.stk[:], skip))
757 }
758 evSize := buf.pos - startPos
759 if evSize > maxSize {
760 throw("invalid length of trace event")
761 }
762 if lenp != nil {
763
764 *lenp = byte(evSize - 2)
765 }
766 }
767
768
769
770
771 func traceCPUSample(gp *g, pp *p, stk []uintptr) {
772 if !trace.enabled {
773
774
775 return
776 }
777
778
779 now := cputicks()
780
781
782
783
784 var hdr [2]uint64
785 if pp != nil {
786
787
788 hdr[0] = uint64(pp.id)<<1 | 0b1
789 } else {
790 hdr[0] = 0b10
791 }
792 if gp != nil {
793 hdr[1] = gp.goid
794 }
795
796
797 for !trace.signalLock.CompareAndSwap(0, 1) {
798
799 osyield()
800 }
801
802 if log := (*profBuf)(atomic.Loadp(unsafe.Pointer(&trace.cpuLogWrite))); log != nil {
803
804
805
806 log.write(nil, now, hdr[:], stk)
807 }
808
809 trace.signalLock.Store(0)
810 }
811
812 func traceReadCPU() {
813 bufp := &trace.cpuLogBuf
814
815 for {
816 data, tags, _ := trace.cpuLogRead.read(profBufNonBlocking)
817 if len(data) == 0 {
818 break
819 }
820 for len(data) > 0 {
821 if len(data) < 4 || data[0] > uint64(len(data)) {
822 break
823 }
824 if data[0] < 4 || tags != nil && len(tags) < 1 {
825 break
826 }
827 if len(tags) < 1 {
828 break
829 }
830 timestamp := data[1]
831 ppid := data[2] >> 1
832 if hasP := (data[2] & 0b1) != 0; !hasP {
833 ppid = ^uint64(0)
834 }
835 goid := data[3]
836 stk := data[4:data[0]]
837 empty := len(stk) == 1 && data[2] == 0 && data[3] == 0
838 data = data[data[0]:]
839
840
841
842
843 tags = tags[1:]
844
845 if empty {
846
847
848
849
850
851
852 continue
853 }
854
855 buf := bufp.ptr()
856 if buf == nil {
857 systemstack(func() {
858 *bufp = traceFlush(*bufp, 0)
859 })
860 buf = bufp.ptr()
861 }
862 for i := range stk {
863 if i >= len(buf.stk) {
864 break
865 }
866 buf.stk[i] = uintptr(stk[i])
867 }
868 stackID := trace.stackTab.put(buf.stk[:len(stk)])
869
870 traceEventLocked(0, nil, 0, bufp, traceEvCPUSample, stackID, 1, timestamp/traceTickDiv, ppid, goid)
871 }
872 }
873 }
874
875 func traceStackID(mp *m, buf []uintptr, skip int) uint64 {
876 gp := getg()
877 curgp := mp.curg
878 var nstk int
879 if curgp == gp {
880 nstk = callers(skip+1, buf)
881 } else if curgp != nil {
882 nstk = gcallers(curgp, skip, buf)
883 }
884 if nstk > 0 {
885 nstk--
886 }
887 if nstk > 0 && curgp.goid == 1 {
888 nstk--
889 }
890 id := trace.stackTab.put(buf[:nstk])
891 return uint64(id)
892 }
893
894
895 func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
896
897
898
899 lockRankMayTraceFlush()
900
901 mp = acquirem()
902 if p := mp.p.ptr(); p != nil {
903 return mp, p.id, &p.tracebuf
904 }
905 lock(&trace.bufLock)
906 return mp, traceGlobProc, &trace.buf
907 }
908
909
910 func traceReleaseBuffer(pid int32) {
911 if pid == traceGlobProc {
912 unlock(&trace.bufLock)
913 }
914 releasem(getg().m)
915 }
916
917
918
919 func lockRankMayTraceFlush() {
920 owner := trace.lockOwner
921 dolock := owner == nil || owner != getg().m.curg
922 if dolock {
923 lockWithRankMayAcquire(&trace.lock, getLockRank(&trace.lock))
924 }
925 }
926
927
928
929
930
931
932 func traceFlush(buf traceBufPtr, pid int32) traceBufPtr {
933 owner := trace.lockOwner
934 dolock := owner == nil || owner != getg().m.curg
935 if dolock {
936 lock(&trace.lock)
937 }
938 if buf != 0 {
939 traceFullQueue(buf)
940 }
941 if trace.empty != 0 {
942 buf = trace.empty
943 trace.empty = buf.ptr().link
944 } else {
945 buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
946 if buf == 0 {
947 throw("trace: out of memory")
948 }
949 }
950 bufp := buf.ptr()
951 bufp.link.set(nil)
952 bufp.pos = 0
953
954
955 ticks := uint64(cputicks()) / traceTickDiv
956 if ticks == bufp.lastTicks {
957 ticks = bufp.lastTicks + 1
958 }
959 bufp.lastTicks = ticks
960 bufp.byte(traceEvBatch | 1<<traceArgCountShift)
961 bufp.varint(uint64(pid))
962 bufp.varint(ticks)
963
964 if dolock {
965 unlock(&trace.lock)
966 }
967 return buf
968 }
969
970
971 func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) {
972 if s == "" {
973 return 0, bufp
974 }
975
976 lock(&trace.stringsLock)
977 if raceenabled {
978
979
980 raceacquire(unsafe.Pointer(&trace.stringsLock))
981 }
982
983 if id, ok := trace.strings[s]; ok {
984 if raceenabled {
985 racerelease(unsafe.Pointer(&trace.stringsLock))
986 }
987 unlock(&trace.stringsLock)
988
989 return id, bufp
990 }
991
992 trace.stringSeq++
993 id := trace.stringSeq
994 trace.strings[s] = id
995
996 if raceenabled {
997 racerelease(unsafe.Pointer(&trace.stringsLock))
998 }
999 unlock(&trace.stringsLock)
1000
1001
1002
1003
1004
1005
1006 buf := bufp.ptr()
1007 size := 1 + 2*traceBytesPerNumber + len(s)
1008 if buf == nil || len(buf.arr)-buf.pos < size {
1009 systemstack(func() {
1010 buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
1011 bufp.set(buf)
1012 })
1013 }
1014 buf.byte(traceEvString)
1015 buf.varint(id)
1016
1017
1018
1019 slen := len(s)
1020 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
1021 slen = room
1022 }
1023
1024 buf.varint(uint64(slen))
1025 buf.pos += copy(buf.arr[buf.pos:], s[:slen])
1026
1027 bufp.set(buf)
1028 return id, bufp
1029 }
1030
1031
1032 func (buf *traceBuf) varint(v uint64) {
1033 pos := buf.pos
1034 for ; v >= 0x80; v >>= 7 {
1035 buf.arr[pos] = 0x80 | byte(v)
1036 pos++
1037 }
1038 buf.arr[pos] = byte(v)
1039 pos++
1040 buf.pos = pos
1041 }
1042
1043
1044
1045
1046
1047 func (buf *traceBuf) varintAt(pos int, v uint64) {
1048 for i := 0; i < traceBytesPerNumber; i++ {
1049 if i < traceBytesPerNumber-1 {
1050 buf.arr[pos] = 0x80 | byte(v)
1051 } else {
1052 buf.arr[pos] = byte(v)
1053 }
1054 v >>= 7
1055 pos++
1056 }
1057 }
1058
1059
1060 func (buf *traceBuf) byte(v byte) {
1061 buf.arr[buf.pos] = v
1062 buf.pos++
1063 }
1064
1065
1066
1067 type traceStackTable struct {
1068 lock mutex
1069 seq uint32
1070 mem traceAlloc
1071 tab [1 << 13]traceStackPtr
1072 }
1073
1074
1075 type traceStack struct {
1076 link traceStackPtr
1077 hash uintptr
1078 id uint32
1079 n int
1080 stk [0]uintptr
1081 }
1082
1083 type traceStackPtr uintptr
1084
1085 func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
1086
1087
1088 func (ts *traceStack) stack() []uintptr {
1089 return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
1090 }
1091
1092
1093
1094 func (tab *traceStackTable) put(pcs []uintptr) uint32 {
1095 if len(pcs) == 0 {
1096 return 0
1097 }
1098 hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
1099
1100 if id := tab.find(pcs, hash); id != 0 {
1101 return id
1102 }
1103
1104
1105 var id uint32
1106 systemstack(func() {
1107 lock(&tab.lock)
1108 if id = tab.find(pcs, hash); id != 0 {
1109 unlock(&tab.lock)
1110 return
1111 }
1112
1113 tab.seq++
1114 stk := tab.newStack(len(pcs))
1115 stk.hash = hash
1116 stk.id = tab.seq
1117 id = stk.id
1118 stk.n = len(pcs)
1119 stkpc := stk.stack()
1120 for i, pc := range pcs {
1121 stkpc[i] = pc
1122 }
1123 part := int(hash % uintptr(len(tab.tab)))
1124 stk.link = tab.tab[part]
1125 atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
1126 unlock(&tab.lock)
1127 })
1128 return id
1129 }
1130
1131
1132 func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
1133 part := int(hash % uintptr(len(tab.tab)))
1134 Search:
1135 for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
1136 if stk.hash == hash && stk.n == len(pcs) {
1137 for i, stkpc := range stk.stack() {
1138 if stkpc != pcs[i] {
1139 continue Search
1140 }
1141 }
1142 return stk.id
1143 }
1144 }
1145 return 0
1146 }
1147
1148
1149 func (tab *traceStackTable) newStack(n int) *traceStack {
1150 return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*goarch.PtrSize))
1151 }
1152
1153
1154
1155 func traceFrames(bufp traceBufPtr, pcs []uintptr) ([]traceFrame, traceBufPtr) {
1156 frames := make([]traceFrame, 0, len(pcs))
1157 ci := CallersFrames(pcs)
1158 for {
1159 var frame traceFrame
1160 f, more := ci.Next()
1161 frame, bufp = traceFrameForPC(bufp, 0, f)
1162 frames = append(frames, frame)
1163 if !more {
1164 return frames, bufp
1165 }
1166 }
1167 }
1168
1169
1170
1171
1172
1173
1174
1175 func (tab *traceStackTable) dump(bufp traceBufPtr) traceBufPtr {
1176 for i := range tab.tab {
1177 stk := tab.tab[i].ptr()
1178 for ; stk != nil; stk = stk.link.ptr() {
1179 var frames []traceFrame
1180 frames, bufp = traceFrames(bufp, stk.stack())
1181
1182
1183
1184
1185 maxSize := 1 + traceBytesPerNumber + (2+4*len(frames))*traceBytesPerNumber
1186
1187 if buf := bufp.ptr(); len(buf.arr)-buf.pos < maxSize {
1188 bufp = traceFlush(bufp, 0)
1189 }
1190
1191
1192 buf := bufp.ptr()
1193 buf.byte(traceEvStack | 3<<traceArgCountShift)
1194 lenPos := buf.pos
1195 buf.pos += traceBytesPerNumber
1196
1197
1198 recPos := buf.pos
1199 buf.varint(uint64(stk.id))
1200 buf.varint(uint64(len(frames)))
1201 for _, frame := range frames {
1202 buf.varint(uint64(frame.PC))
1203 buf.varint(frame.funcID)
1204 buf.varint(frame.fileID)
1205 buf.varint(frame.line)
1206 }
1207
1208
1209 buf.varintAt(lenPos, uint64(buf.pos-recPos))
1210 }
1211 }
1212
1213 tab.mem.drop()
1214 *tab = traceStackTable{}
1215 lockInit(&((*tab).lock), lockRankTraceStackTab)
1216
1217 return bufp
1218 }
1219
1220 type traceFrame struct {
1221 PC uintptr
1222 funcID uint64
1223 fileID uint64
1224 line uint64
1225 }
1226
1227
1228
1229 func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr) {
1230 bufp := &buf
1231 var frame traceFrame
1232 frame.PC = f.PC
1233
1234 fn := f.Function
1235 const maxLen = 1 << 10
1236 if len(fn) > maxLen {
1237 fn = fn[len(fn)-maxLen:]
1238 }
1239 frame.funcID, bufp = traceString(bufp, pid, fn)
1240 frame.line = uint64(f.Line)
1241 file := f.File
1242 if len(file) > maxLen {
1243 file = file[len(file)-maxLen:]
1244 }
1245 frame.fileID, bufp = traceString(bufp, pid, file)
1246 return frame, (*bufp)
1247 }
1248
1249
1250
1251 type traceAlloc struct {
1252 head traceAllocBlockPtr
1253 off uintptr
1254 }
1255
1256
1257
1258
1259
1260
1261 type traceAllocBlock struct {
1262 _ sys.NotInHeap
1263 next traceAllocBlockPtr
1264 data [64<<10 - goarch.PtrSize]byte
1265 }
1266
1267
1268 type traceAllocBlockPtr uintptr
1269
1270 func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) }
1271 func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
1272
1273
1274 func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
1275 n = alignUp(n, goarch.PtrSize)
1276 if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
1277 if n > uintptr(len(a.head.ptr().data)) {
1278 throw("trace: alloc too large")
1279 }
1280 block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
1281 if block == nil {
1282 throw("trace: out of memory")
1283 }
1284 block.next.set(a.head.ptr())
1285 a.head.set(block)
1286 a.off = 0
1287 }
1288 p := &a.head.ptr().data[a.off]
1289 a.off += n
1290 return unsafe.Pointer(p)
1291 }
1292
1293
1294 func (a *traceAlloc) drop() {
1295 for a.head != 0 {
1296 block := a.head.ptr()
1297 a.head.set(block.next.ptr())
1298 sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
1299 }
1300 }
1301
1302
1303
1304 func traceGomaxprocs(procs int32) {
1305 traceEvent(traceEvGomaxprocs, 1, uint64(procs))
1306 }
1307
1308 func traceProcStart() {
1309 traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
1310 }
1311
1312 func traceProcStop(pp *p) {
1313
1314
1315 mp := acquirem()
1316 oldp := mp.p
1317 mp.p.set(pp)
1318 traceEvent(traceEvProcStop, -1)
1319 mp.p = oldp
1320 releasem(mp)
1321 }
1322
1323 func traceGCStart() {
1324 traceEvent(traceEvGCStart, 3, trace.seqGC)
1325 trace.seqGC++
1326 }
1327
1328 func traceGCDone() {
1329 traceEvent(traceEvGCDone, -1)
1330 }
1331
1332 func traceGCSTWStart(kind int) {
1333 traceEvent(traceEvGCSTWStart, -1, uint64(kind))
1334 }
1335
1336 func traceGCSTWDone() {
1337 traceEvent(traceEvGCSTWDone, -1)
1338 }
1339
1340
1341
1342
1343
1344
1345 func traceGCSweepStart() {
1346
1347
1348 pp := getg().m.p.ptr()
1349 if pp.traceSweep {
1350 throw("double traceGCSweepStart")
1351 }
1352 pp.traceSweep, pp.traceSwept, pp.traceReclaimed = true, 0, 0
1353 }
1354
1355
1356
1357
1358
1359 func traceGCSweepSpan(bytesSwept uintptr) {
1360 pp := getg().m.p.ptr()
1361 if pp.traceSweep {
1362 if pp.traceSwept == 0 {
1363 traceEvent(traceEvGCSweepStart, 1)
1364 }
1365 pp.traceSwept += bytesSwept
1366 }
1367 }
1368
1369 func traceGCSweepDone() {
1370 pp := getg().m.p.ptr()
1371 if !pp.traceSweep {
1372 throw("missing traceGCSweepStart")
1373 }
1374 if pp.traceSwept != 0 {
1375 traceEvent(traceEvGCSweepDone, -1, uint64(pp.traceSwept), uint64(pp.traceReclaimed))
1376 }
1377 pp.traceSweep = false
1378 }
1379
1380 func traceGCMarkAssistStart() {
1381 traceEvent(traceEvGCMarkAssistStart, 1)
1382 }
1383
1384 func traceGCMarkAssistDone() {
1385 traceEvent(traceEvGCMarkAssistDone, -1)
1386 }
1387
1388 func traceGoCreate(newg *g, pc uintptr) {
1389 newg.traceseq = 0
1390 newg.tracelastp = getg().m.p
1391
1392 id := trace.stackTab.put([]uintptr{startPCforTrace(pc) + sys.PCQuantum})
1393 traceEvent(traceEvGoCreate, 2, newg.goid, uint64(id))
1394 }
1395
1396 func traceGoStart() {
1397 gp := getg().m.curg
1398 pp := gp.m.p
1399 gp.traceseq++
1400 if pp.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
1401 traceEvent(traceEvGoStartLabel, -1, gp.goid, gp.traceseq, trace.markWorkerLabels[pp.ptr().gcMarkWorkerMode])
1402 } else if gp.tracelastp == pp {
1403 traceEvent(traceEvGoStartLocal, -1, gp.goid)
1404 } else {
1405 gp.tracelastp = pp
1406 traceEvent(traceEvGoStart, -1, gp.goid, gp.traceseq)
1407 }
1408 }
1409
1410 func traceGoEnd() {
1411 traceEvent(traceEvGoEnd, -1)
1412 }
1413
1414 func traceGoSched() {
1415 gp := getg()
1416 gp.tracelastp = gp.m.p
1417 traceEvent(traceEvGoSched, 1)
1418 }
1419
1420 func traceGoPreempt() {
1421 gp := getg()
1422 gp.tracelastp = gp.m.p
1423 traceEvent(traceEvGoPreempt, 1)
1424 }
1425
1426 func traceGoPark(traceEv byte, skip int) {
1427 if traceEv&traceFutileWakeup != 0 {
1428 traceEvent(traceEvFutileWakeup, -1)
1429 }
1430 traceEvent(traceEv & ^traceFutileWakeup, skip)
1431 }
1432
1433 func traceGoUnpark(gp *g, skip int) {
1434 pp := getg().m.p
1435 gp.traceseq++
1436 if gp.tracelastp == pp {
1437 traceEvent(traceEvGoUnblockLocal, skip, gp.goid)
1438 } else {
1439 gp.tracelastp = pp
1440 traceEvent(traceEvGoUnblock, skip, gp.goid, gp.traceseq)
1441 }
1442 }
1443
1444 func traceGoSysCall() {
1445 traceEvent(traceEvGoSysCall, 1)
1446 }
1447
1448 func traceGoSysExit(ts int64) {
1449 if ts != 0 && ts < trace.ticksStart {
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459 ts = 0
1460 }
1461 gp := getg().m.curg
1462 gp.traceseq++
1463 gp.tracelastp = gp.m.p
1464 traceEvent(traceEvGoSysExit, -1, gp.goid, gp.traceseq, uint64(ts)/traceTickDiv)
1465 }
1466
1467 func traceGoSysBlock(pp *p) {
1468
1469
1470 mp := acquirem()
1471 oldp := mp.p
1472 mp.p.set(pp)
1473 traceEvent(traceEvGoSysBlock, -1)
1474 mp.p = oldp
1475 releasem(mp)
1476 }
1477
1478 func traceHeapAlloc(live uint64) {
1479 traceEvent(traceEvHeapAlloc, -1, live)
1480 }
1481
1482 func traceHeapGoal() {
1483 heapGoal := gcController.heapGoal()
1484 if heapGoal == ^uint64(0) {
1485
1486 traceEvent(traceEvHeapGoal, -1, 0)
1487 } else {
1488 traceEvent(traceEvHeapGoal, -1, heapGoal)
1489 }
1490 }
1491
1492
1493
1494
1495
1496 func trace_userTaskCreate(id, parentID uint64, taskType string) {
1497 if !trace.enabled {
1498 return
1499 }
1500
1501
1502 mp, pid, bufp := traceAcquireBuffer()
1503 if !trace.enabled && !mp.startingtrace {
1504 traceReleaseBuffer(pid)
1505 return
1506 }
1507
1508 typeStringID, bufp := traceString(bufp, pid, taskType)
1509 traceEventLocked(0, mp, pid, bufp, traceEvUserTaskCreate, 0, 3, id, parentID, typeStringID)
1510 traceReleaseBuffer(pid)
1511 }
1512
1513
1514 func trace_userTaskEnd(id uint64) {
1515 traceEvent(traceEvUserTaskEnd, 2, id)
1516 }
1517
1518
1519 func trace_userRegion(id, mode uint64, name string) {
1520 if !trace.enabled {
1521 return
1522 }
1523
1524 mp, pid, bufp := traceAcquireBuffer()
1525 if !trace.enabled && !mp.startingtrace {
1526 traceReleaseBuffer(pid)
1527 return
1528 }
1529
1530 nameStringID, bufp := traceString(bufp, pid, name)
1531 traceEventLocked(0, mp, pid, bufp, traceEvUserRegion, 0, 3, id, mode, nameStringID)
1532 traceReleaseBuffer(pid)
1533 }
1534
1535
1536 func trace_userLog(id uint64, category, message string) {
1537 if !trace.enabled {
1538 return
1539 }
1540
1541 mp, pid, bufp := traceAcquireBuffer()
1542 if !trace.enabled && !mp.startingtrace {
1543 traceReleaseBuffer(pid)
1544 return
1545 }
1546
1547 categoryID, bufp := traceString(bufp, pid, category)
1548
1549 extraSpace := traceBytesPerNumber + len(message)
1550 traceEventLocked(extraSpace, mp, pid, bufp, traceEvUserLog, 0, 3, id, categoryID)
1551
1552
1553 buf := bufp.ptr()
1554
1555
1556
1557 slen := len(message)
1558 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
1559 slen = room
1560 }
1561 buf.varint(uint64(slen))
1562 buf.pos += copy(buf.arr[buf.pos:], message[:slen])
1563
1564 traceReleaseBuffer(pid)
1565 }
1566
1567
1568
1569 func startPCforTrace(pc uintptr) uintptr {
1570 f := findfunc(pc)
1571 if !f.valid() {
1572 return pc
1573 }
1574 w := funcdata(f, _FUNCDATA_WrapInfo)
1575 if w == nil {
1576 return pc
1577 }
1578 return f.datap.textAddr(*(*uint32)(w))
1579 }
1580
View as plain text