Source file
src/runtime/mprof.go
1
2
3
4
5
6
7
8 package runtime
9
10 import (
11 "internal/abi"
12 "runtime/internal/atomic"
13 "runtime/internal/sys"
14 "unsafe"
15 )
16
17
18 var (
19
20 profInsertLock mutex
21
22 profBlockLock mutex
23
24 profMemActiveLock mutex
25
26
27 profMemFutureLock [len(memRecord{}.future)]mutex
28 )
29
30
31
32
33 const (
34
35 memProfile bucketType = 1 + iota
36 blockProfile
37 mutexProfile
38
39
40 buckHashSize = 179999
41
42
43 maxStack = 32
44 )
45
46 type bucketType int
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61 type bucket struct {
62 _ sys.NotInHeap
63 next *bucket
64 allnext *bucket
65 typ bucketType
66 hash uintptr
67 size uintptr
68 nstk uintptr
69 }
70
71
72
73 type memRecord struct {
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118 active memRecordCycle
119
120
121
122
123
124
125
126
127
128
129
130 future [3]memRecordCycle
131 }
132
133
134 type memRecordCycle struct {
135 allocs, frees uintptr
136 alloc_bytes, free_bytes uintptr
137 }
138
139
140 func (a *memRecordCycle) add(b *memRecordCycle) {
141 a.allocs += b.allocs
142 a.frees += b.frees
143 a.alloc_bytes += b.alloc_bytes
144 a.free_bytes += b.free_bytes
145 }
146
147
148
149 type blockRecord struct {
150 count float64
151 cycles int64
152 }
153
154 var (
155 mbuckets atomic.UnsafePointer
156 bbuckets atomic.UnsafePointer
157 xbuckets atomic.UnsafePointer
158 buckhash atomic.UnsafePointer
159
160 mProfCycle mProfCycleHolder
161 )
162
163 type buckhashArray [buckHashSize]atomic.UnsafePointer
164
165 const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
166
167
168
169
170
171 type mProfCycleHolder struct {
172 value atomic.Uint32
173 }
174
175
176 func (c *mProfCycleHolder) read() (cycle uint32) {
177 v := c.value.Load()
178 cycle = v >> 1
179 return cycle
180 }
181
182
183
184 func (c *mProfCycleHolder) setFlushed() (cycle uint32, alreadyFlushed bool) {
185 for {
186 prev := c.value.Load()
187 cycle = prev >> 1
188 alreadyFlushed = (prev & 0x1) != 0
189 next := prev | 0x1
190 if c.value.CompareAndSwap(prev, next) {
191 return cycle, alreadyFlushed
192 }
193 }
194 }
195
196
197
198 func (c *mProfCycleHolder) increment() {
199
200
201
202 for {
203 prev := c.value.Load()
204 cycle := prev >> 1
205 cycle = (cycle + 1) % mProfCycleWrap
206 next := cycle << 1
207 if c.value.CompareAndSwap(prev, next) {
208 break
209 }
210 }
211 }
212
213
214 func newBucket(typ bucketType, nstk int) *bucket {
215 size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
216 switch typ {
217 default:
218 throw("invalid profile bucket type")
219 case memProfile:
220 size += unsafe.Sizeof(memRecord{})
221 case blockProfile, mutexProfile:
222 size += unsafe.Sizeof(blockRecord{})
223 }
224
225 b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
226 b.typ = typ
227 b.nstk = uintptr(nstk)
228 return b
229 }
230
231
232 func (b *bucket) stk() []uintptr {
233 stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
234 return stk[:b.nstk:b.nstk]
235 }
236
237
238 func (b *bucket) mp() *memRecord {
239 if b.typ != memProfile {
240 throw("bad use of bucket.mp")
241 }
242 data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
243 return (*memRecord)(data)
244 }
245
246
247 func (b *bucket) bp() *blockRecord {
248 if b.typ != blockProfile && b.typ != mutexProfile {
249 throw("bad use of bucket.bp")
250 }
251 data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
252 return (*blockRecord)(data)
253 }
254
255
256 func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
257 bh := (*buckhashArray)(buckhash.Load())
258 if bh == nil {
259 lock(&profInsertLock)
260
261 bh = (*buckhashArray)(buckhash.Load())
262 if bh == nil {
263 bh = (*buckhashArray)(sysAlloc(unsafe.Sizeof(buckhashArray{}), &memstats.buckhash_sys))
264 if bh == nil {
265 throw("runtime: cannot allocate memory")
266 }
267 buckhash.StoreNoWB(unsafe.Pointer(bh))
268 }
269 unlock(&profInsertLock)
270 }
271
272
273 var h uintptr
274 for _, pc := range stk {
275 h += pc
276 h += h << 10
277 h ^= h >> 6
278 }
279
280 h += size
281 h += h << 10
282 h ^= h >> 6
283
284 h += h << 3
285 h ^= h >> 11
286
287 i := int(h % buckHashSize)
288
289 for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
290 if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
291 return b
292 }
293 }
294
295 if !alloc {
296 return nil
297 }
298
299 lock(&profInsertLock)
300
301 for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
302 if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
303 unlock(&profInsertLock)
304 return b
305 }
306 }
307
308
309 b := newBucket(typ, len(stk))
310 copy(b.stk(), stk)
311 b.hash = h
312 b.size = size
313
314 var allnext *atomic.UnsafePointer
315 if typ == memProfile {
316 allnext = &mbuckets
317 } else if typ == mutexProfile {
318 allnext = &xbuckets
319 } else {
320 allnext = &bbuckets
321 }
322
323 b.next = (*bucket)(bh[i].Load())
324 b.allnext = (*bucket)(allnext.Load())
325
326 bh[i].StoreNoWB(unsafe.Pointer(b))
327 allnext.StoreNoWB(unsafe.Pointer(b))
328
329 unlock(&profInsertLock)
330 return b
331 }
332
333 func eqslice(x, y []uintptr) bool {
334 if len(x) != len(y) {
335 return false
336 }
337 for i, xi := range x {
338 if xi != y[i] {
339 return false
340 }
341 }
342 return true
343 }
344
345
346
347
348
349
350
351
352
353 func mProf_NextCycle() {
354 mProfCycle.increment()
355 }
356
357
358
359
360
361
362
363
364 func mProf_Flush() {
365 cycle, alreadyFlushed := mProfCycle.setFlushed()
366 if alreadyFlushed {
367 return
368 }
369
370 index := cycle % uint32(len(memRecord{}.future))
371 lock(&profMemActiveLock)
372 lock(&profMemFutureLock[index])
373 mProf_FlushLocked(index)
374 unlock(&profMemFutureLock[index])
375 unlock(&profMemActiveLock)
376 }
377
378
379
380
381
382 func mProf_FlushLocked(index uint32) {
383 assertLockHeld(&profMemActiveLock)
384 assertLockHeld(&profMemFutureLock[index])
385 head := (*bucket)(mbuckets.Load())
386 for b := head; b != nil; b = b.allnext {
387 mp := b.mp()
388
389
390
391 mpc := &mp.future[index]
392 mp.active.add(mpc)
393 *mpc = memRecordCycle{}
394 }
395 }
396
397
398
399
400
401 func mProf_PostSweep() {
402
403
404
405
406
407 cycle := mProfCycle.read() + 1
408
409 index := cycle % uint32(len(memRecord{}.future))
410 lock(&profMemActiveLock)
411 lock(&profMemFutureLock[index])
412 mProf_FlushLocked(index)
413 unlock(&profMemFutureLock[index])
414 unlock(&profMemActiveLock)
415 }
416
417
418 func mProf_Malloc(p unsafe.Pointer, size uintptr) {
419 var stk [maxStack]uintptr
420 nstk := callers(4, stk[:])
421
422 index := (mProfCycle.read() + 2) % uint32(len(memRecord{}.future))
423
424 b := stkbucket(memProfile, size, stk[:nstk], true)
425 mp := b.mp()
426 mpc := &mp.future[index]
427
428 lock(&profMemFutureLock[index])
429 mpc.allocs++
430 mpc.alloc_bytes += size
431 unlock(&profMemFutureLock[index])
432
433
434
435
436
437 systemstack(func() {
438 setprofilebucket(p, b)
439 })
440 }
441
442
443 func mProf_Free(b *bucket, size uintptr) {
444 index := (mProfCycle.read() + 1) % uint32(len(memRecord{}.future))
445
446 mp := b.mp()
447 mpc := &mp.future[index]
448
449 lock(&profMemFutureLock[index])
450 mpc.frees++
451 mpc.free_bytes += size
452 unlock(&profMemFutureLock[index])
453 }
454
455 var blockprofilerate uint64
456
457
458
459
460
461
462
463 func SetBlockProfileRate(rate int) {
464 var r int64
465 if rate <= 0 {
466 r = 0
467 } else if rate == 1 {
468 r = 1
469 } else {
470
471 r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
472 if r == 0 {
473 r = 1
474 }
475 }
476
477 atomic.Store64(&blockprofilerate, uint64(r))
478 }
479
480 func blockevent(cycles int64, skip int) {
481 if cycles <= 0 {
482 cycles = 1
483 }
484
485 rate := int64(atomic.Load64(&blockprofilerate))
486 if blocksampled(cycles, rate) {
487 saveblockevent(cycles, rate, skip+1, blockProfile)
488 }
489 }
490
491
492
493 func blocksampled(cycles, rate int64) bool {
494 if rate <= 0 || (rate > cycles && int64(fastrand())%rate > cycles) {
495 return false
496 }
497 return true
498 }
499
500 func saveblockevent(cycles, rate int64, skip int, which bucketType) {
501 gp := getg()
502 var nstk int
503 var stk [maxStack]uintptr
504 if gp.m.curg == nil || gp.m.curg == gp {
505 nstk = callers(skip, stk[:])
506 } else {
507 nstk = gcallers(gp.m.curg, skip, stk[:])
508 }
509 b := stkbucket(which, 0, stk[:nstk], true)
510 bp := b.bp()
511
512 lock(&profBlockLock)
513
514
515
516
517
518 if which == blockProfile && cycles < rate {
519
520 bp.count += float64(rate) / float64(cycles)
521 bp.cycles += rate
522 } else if which == mutexProfile {
523 bp.count += float64(rate)
524 bp.cycles += rate * cycles
525 } else {
526 bp.count++
527 bp.cycles += cycles
528 }
529 unlock(&profBlockLock)
530 }
531
532 var mutexprofilerate uint64
533
534
535
536
537
538
539
540
541 func SetMutexProfileFraction(rate int) int {
542 if rate < 0 {
543 return int(mutexprofilerate)
544 }
545 old := mutexprofilerate
546 atomic.Store64(&mutexprofilerate, uint64(rate))
547 return int(old)
548 }
549
550
551 func mutexevent(cycles int64, skip int) {
552 if cycles < 0 {
553 cycles = 0
554 }
555 rate := int64(atomic.Load64(&mutexprofilerate))
556
557
558 if rate > 0 && int64(fastrand())%rate == 0 {
559 saveblockevent(cycles, rate, skip+1, mutexProfile)
560 }
561 }
562
563
564
565
566 type StackRecord struct {
567 Stack0 [32]uintptr
568 }
569
570
571
572 func (r *StackRecord) Stack() []uintptr {
573 for i, v := range r.Stack0 {
574 if v == 0 {
575 return r.Stack0[0:i]
576 }
577 }
578 return r.Stack0[0:]
579 }
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595 var MemProfileRate int = 512 * 1024
596
597
598
599
600 var disableMemoryProfiling bool
601
602
603
604 type MemProfileRecord struct {
605 AllocBytes, FreeBytes int64
606 AllocObjects, FreeObjects int64
607 Stack0 [32]uintptr
608 }
609
610
611 func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
612
613
614 func (r *MemProfileRecord) InUseObjects() int64 {
615 return r.AllocObjects - r.FreeObjects
616 }
617
618
619
620 func (r *MemProfileRecord) Stack() []uintptr {
621 for i, v := range r.Stack0 {
622 if v == 0 {
623 return r.Stack0[0:i]
624 }
625 }
626 return r.Stack0[0:]
627 }
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650 func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
651 cycle := mProfCycle.read()
652
653
654
655 index := cycle % uint32(len(memRecord{}.future))
656 lock(&profMemActiveLock)
657 lock(&profMemFutureLock[index])
658 mProf_FlushLocked(index)
659 unlock(&profMemFutureLock[index])
660 clear := true
661 head := (*bucket)(mbuckets.Load())
662 for b := head; b != nil; b = b.allnext {
663 mp := b.mp()
664 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
665 n++
666 }
667 if mp.active.allocs != 0 || mp.active.frees != 0 {
668 clear = false
669 }
670 }
671 if clear {
672
673
674
675
676 n = 0
677 for b := head; b != nil; b = b.allnext {
678 mp := b.mp()
679 for c := range mp.future {
680 lock(&profMemFutureLock[c])
681 mp.active.add(&mp.future[c])
682 mp.future[c] = memRecordCycle{}
683 unlock(&profMemFutureLock[c])
684 }
685 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
686 n++
687 }
688 }
689 }
690 if n <= len(p) {
691 ok = true
692 idx := 0
693 for b := head; b != nil; b = b.allnext {
694 mp := b.mp()
695 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
696 record(&p[idx], b)
697 idx++
698 }
699 }
700 }
701 unlock(&profMemActiveLock)
702 return
703 }
704
705
706 func record(r *MemProfileRecord, b *bucket) {
707 mp := b.mp()
708 r.AllocBytes = int64(mp.active.alloc_bytes)
709 r.FreeBytes = int64(mp.active.free_bytes)
710 r.AllocObjects = int64(mp.active.allocs)
711 r.FreeObjects = int64(mp.active.frees)
712 if raceenabled {
713 racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(MemProfile))
714 }
715 if msanenabled {
716 msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
717 }
718 if asanenabled {
719 asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
720 }
721 copy(r.Stack0[:], b.stk())
722 for i := int(b.nstk); i < len(r.Stack0); i++ {
723 r.Stack0[i] = 0
724 }
725 }
726
727 func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
728 lock(&profMemActiveLock)
729 head := (*bucket)(mbuckets.Load())
730 for b := head; b != nil; b = b.allnext {
731 mp := b.mp()
732 fn(b, b.nstk, &b.stk()[0], b.size, mp.active.allocs, mp.active.frees)
733 }
734 unlock(&profMemActiveLock)
735 }
736
737
738
739 type BlockProfileRecord struct {
740 Count int64
741 Cycles int64
742 StackRecord
743 }
744
745
746
747
748
749
750
751
752 func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
753 lock(&profBlockLock)
754 head := (*bucket)(bbuckets.Load())
755 for b := head; b != nil; b = b.allnext {
756 n++
757 }
758 if n <= len(p) {
759 ok = true
760 for b := head; b != nil; b = b.allnext {
761 bp := b.bp()
762 r := &p[0]
763 r.Count = int64(bp.count)
764
765
766 if r.Count == 0 {
767 r.Count = 1
768 }
769 r.Cycles = bp.cycles
770 if raceenabled {
771 racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(BlockProfile))
772 }
773 if msanenabled {
774 msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
775 }
776 if asanenabled {
777 asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
778 }
779 i := copy(r.Stack0[:], b.stk())
780 for ; i < len(r.Stack0); i++ {
781 r.Stack0[i] = 0
782 }
783 p = p[1:]
784 }
785 }
786 unlock(&profBlockLock)
787 return
788 }
789
790
791
792
793
794
795
796 func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
797 lock(&profBlockLock)
798 head := (*bucket)(xbuckets.Load())
799 for b := head; b != nil; b = b.allnext {
800 n++
801 }
802 if n <= len(p) {
803 ok = true
804 for b := head; b != nil; b = b.allnext {
805 bp := b.bp()
806 r := &p[0]
807 r.Count = int64(bp.count)
808 r.Cycles = bp.cycles
809 i := copy(r.Stack0[:], b.stk())
810 for ; i < len(r.Stack0); i++ {
811 r.Stack0[i] = 0
812 }
813 p = p[1:]
814 }
815 }
816 unlock(&profBlockLock)
817 return
818 }
819
820
821
822
823
824
825
826 func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
827 first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
828 for mp := first; mp != nil; mp = mp.alllink {
829 n++
830 }
831 if n <= len(p) {
832 ok = true
833 i := 0
834 for mp := first; mp != nil; mp = mp.alllink {
835 p[i].Stack0 = mp.createstack
836 i++
837 }
838 }
839 return
840 }
841
842
843 func runtime_goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
844 return goroutineProfileWithLabels(p, labels)
845 }
846
847 const go119ConcurrentGoroutineProfile = true
848
849
850 func goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
851 if labels != nil && len(labels) != len(p) {
852 labels = nil
853 }
854
855 if go119ConcurrentGoroutineProfile {
856 return goroutineProfileWithLabelsConcurrent(p, labels)
857 }
858 return goroutineProfileWithLabelsSync(p, labels)
859 }
860
861 var goroutineProfile = struct {
862 sema uint32
863 active bool
864 offset atomic.Int64
865 records []StackRecord
866 labels []unsafe.Pointer
867 }{
868 sema: 1,
869 }
870
871
872
873
874
875
876
877
878
879
880
881
882 type goroutineProfileState uint32
883
884 const (
885 goroutineProfileAbsent goroutineProfileState = iota
886 goroutineProfileInProgress
887 goroutineProfileSatisfied
888 )
889
890 type goroutineProfileStateHolder atomic.Uint32
891
892 func (p *goroutineProfileStateHolder) Load() goroutineProfileState {
893 return goroutineProfileState((*atomic.Uint32)(p).Load())
894 }
895
896 func (p *goroutineProfileStateHolder) Store(value goroutineProfileState) {
897 (*atomic.Uint32)(p).Store(uint32(value))
898 }
899
900 func (p *goroutineProfileStateHolder) CompareAndSwap(old, new goroutineProfileState) bool {
901 return (*atomic.Uint32)(p).CompareAndSwap(uint32(old), uint32(new))
902 }
903
904 func goroutineProfileWithLabelsConcurrent(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
905 semacquire(&goroutineProfile.sema)
906
907 ourg := getg()
908
909 stopTheWorld("profile")
910
911
912
913
914
915
916
917 n = int(gcount())
918 if fingStatus.Load()&fingRunningFinalizer != 0 {
919 n++
920 }
921
922 if n > len(p) {
923
924
925
926 startTheWorld()
927 semrelease(&goroutineProfile.sema)
928 return n, false
929 }
930
931
932 sp := getcallersp()
933 pc := getcallerpc()
934 systemstack(func() {
935 saveg(pc, sp, ourg, &p[0])
936 })
937 ourg.goroutineProfiled.Store(goroutineProfileSatisfied)
938 goroutineProfile.offset.Store(1)
939
940
941
942
943
944
945 goroutineProfile.active = true
946 goroutineProfile.records = p
947 goroutineProfile.labels = labels
948
949
950
951 if fing != nil {
952 fing.goroutineProfiled.Store(goroutineProfileSatisfied)
953 if readgstatus(fing) != _Gdead && !isSystemGoroutine(fing, false) {
954 doRecordGoroutineProfile(fing)
955 }
956 }
957 startTheWorld()
958
959
960
961
962
963
964
965
966
967
968
969
970 forEachGRace(func(gp1 *g) {
971 tryRecordGoroutineProfile(gp1, Gosched)
972 })
973
974 stopTheWorld("profile cleanup")
975 endOffset := goroutineProfile.offset.Swap(0)
976 goroutineProfile.active = false
977 goroutineProfile.records = nil
978 goroutineProfile.labels = nil
979 startTheWorld()
980
981
982
983 forEachGRace(func(gp1 *g) {
984 gp1.goroutineProfiled.Store(goroutineProfileAbsent)
985 })
986
987 if raceenabled {
988 raceacquire(unsafe.Pointer(&labelSync))
989 }
990
991 if n != int(endOffset) {
992
993
994
995
996
997
998
999
1000
1001 }
1002
1003 semrelease(&goroutineProfile.sema)
1004 return n, true
1005 }
1006
1007
1008
1009
1010
1011 func tryRecordGoroutineProfileWB(gp1 *g) {
1012 if getg().m.p.ptr() == nil {
1013 throw("no P available, write barriers are forbidden")
1014 }
1015 tryRecordGoroutineProfile(gp1, osyield)
1016 }
1017
1018
1019
1020
1021 func tryRecordGoroutineProfile(gp1 *g, yield func()) {
1022 if readgstatus(gp1) == _Gdead {
1023
1024
1025
1026
1027 return
1028 }
1029 if isSystemGoroutine(gp1, true) {
1030
1031
1032 return
1033 }
1034
1035 for {
1036 prev := gp1.goroutineProfiled.Load()
1037 if prev == goroutineProfileSatisfied {
1038
1039
1040 break
1041 }
1042 if prev == goroutineProfileInProgress {
1043
1044
1045 yield()
1046 continue
1047 }
1048
1049
1050
1051
1052
1053
1054 mp := acquirem()
1055 if gp1.goroutineProfiled.CompareAndSwap(goroutineProfileAbsent, goroutineProfileInProgress) {
1056 doRecordGoroutineProfile(gp1)
1057 gp1.goroutineProfiled.Store(goroutineProfileSatisfied)
1058 }
1059 releasem(mp)
1060 }
1061 }
1062
1063
1064
1065
1066
1067
1068
1069
1070 func doRecordGoroutineProfile(gp1 *g) {
1071 if readgstatus(gp1) == _Grunning {
1072 print("doRecordGoroutineProfile gp1=", gp1.goid, "\n")
1073 throw("cannot read stack of running goroutine")
1074 }
1075
1076 offset := int(goroutineProfile.offset.Add(1)) - 1
1077
1078 if offset >= len(goroutineProfile.records) {
1079
1080
1081
1082 return
1083 }
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093 systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &goroutineProfile.records[offset]) })
1094
1095 if goroutineProfile.labels != nil {
1096 goroutineProfile.labels[offset] = gp1.labels
1097 }
1098 }
1099
1100 func goroutineProfileWithLabelsSync(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
1101 gp := getg()
1102
1103 isOK := func(gp1 *g) bool {
1104
1105
1106 return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1, false)
1107 }
1108
1109 stopTheWorld("profile")
1110
1111
1112 n = 1
1113 forEachGRace(func(gp1 *g) {
1114 if isOK(gp1) {
1115 n++
1116 }
1117 })
1118
1119 if n <= len(p) {
1120 ok = true
1121 r, lbl := p, labels
1122
1123
1124 sp := getcallersp()
1125 pc := getcallerpc()
1126 systemstack(func() {
1127 saveg(pc, sp, gp, &r[0])
1128 })
1129 r = r[1:]
1130
1131
1132 if labels != nil {
1133 lbl[0] = gp.labels
1134 lbl = lbl[1:]
1135 }
1136
1137
1138 forEachGRace(func(gp1 *g) {
1139 if !isOK(gp1) {
1140 return
1141 }
1142
1143 if len(r) == 0 {
1144
1145
1146 return
1147 }
1148
1149
1150
1151
1152 systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &r[0]) })
1153 if labels != nil {
1154 lbl[0] = gp1.labels
1155 lbl = lbl[1:]
1156 }
1157 r = r[1:]
1158 })
1159 }
1160
1161 if raceenabled {
1162 raceacquire(unsafe.Pointer(&labelSync))
1163 }
1164
1165 startTheWorld()
1166 return n, ok
1167 }
1168
1169
1170
1171
1172
1173
1174
1175 func GoroutineProfile(p []StackRecord) (n int, ok bool) {
1176
1177 return goroutineProfileWithLabels(p, nil)
1178 }
1179
1180 func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
1181 n := gentraceback(pc, sp, 0, gp, 0, &r.Stack0[0], len(r.Stack0), nil, nil, 0)
1182 if n < len(r.Stack0) {
1183 r.Stack0[n] = 0
1184 }
1185 }
1186
1187
1188
1189
1190
1191 func Stack(buf []byte, all bool) int {
1192 if all {
1193 stopTheWorld("stack trace")
1194 }
1195
1196 n := 0
1197 if len(buf) > 0 {
1198 gp := getg()
1199 sp := getcallersp()
1200 pc := getcallerpc()
1201 systemstack(func() {
1202 g0 := getg()
1203
1204
1205
1206 g0.m.traceback = 1
1207 g0.writebuf = buf[0:0:len(buf)]
1208 goroutineheader(gp)
1209 traceback(pc, sp, 0, gp)
1210 if all {
1211 tracebackothers(gp)
1212 }
1213 g0.m.traceback = 0
1214 n = len(g0.writebuf)
1215 g0.writebuf = nil
1216 })
1217 }
1218
1219 if all {
1220 startTheWorld()
1221 }
1222 return n
1223 }
1224
1225
1226
1227 var tracelock mutex
1228
1229 func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
1230 lock(&tracelock)
1231 gp := getg()
1232 gp.m.traceback = 2
1233 if typ == nil {
1234 print("tracealloc(", p, ", ", hex(size), ")\n")
1235 } else {
1236 print("tracealloc(", p, ", ", hex(size), ", ", typ.string(), ")\n")
1237 }
1238 if gp.m.curg == nil || gp == gp.m.curg {
1239 goroutineheader(gp)
1240 pc := getcallerpc()
1241 sp := getcallersp()
1242 systemstack(func() {
1243 traceback(pc, sp, 0, gp)
1244 })
1245 } else {
1246 goroutineheader(gp.m.curg)
1247 traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg)
1248 }
1249 print("\n")
1250 gp.m.traceback = 0
1251 unlock(&tracelock)
1252 }
1253
1254 func tracefree(p unsafe.Pointer, size uintptr) {
1255 lock(&tracelock)
1256 gp := getg()
1257 gp.m.traceback = 2
1258 print("tracefree(", p, ", ", hex(size), ")\n")
1259 goroutineheader(gp)
1260 pc := getcallerpc()
1261 sp := getcallersp()
1262 systemstack(func() {
1263 traceback(pc, sp, 0, gp)
1264 })
1265 print("\n")
1266 gp.m.traceback = 0
1267 unlock(&tracelock)
1268 }
1269
1270 func tracegc() {
1271 lock(&tracelock)
1272 gp := getg()
1273 gp.m.traceback = 2
1274 print("tracegc()\n")
1275
1276 tracebackothers(gp)
1277 print("end tracegc\n")
1278 print("\n")
1279 gp.m.traceback = 0
1280 unlock(&tracelock)
1281 }
1282
View as plain text