Source file
src/runtime/stack.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "runtime/internal/atomic"
13 "runtime/internal/sys"
14 "unsafe"
15 )
16
17
66
67 const (
68
69
70
71
72 _StackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
73
74
75 _StackMin = 2048
76
77
78
79 _FixedStack0 = _StackMin + _StackSystem
80 _FixedStack1 = _FixedStack0 - 1
81 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
82 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
83 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
84 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
85 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
86 _FixedStack = _FixedStack6 + 1
87
88
89
90
91
92
93 _StackBig = 4096
94
95
96
97
98
99
100
101
102 _StackGuard = 928*sys.StackGuardMultiplier + _StackSystem
103
104
105
106
107 _StackSmall = 128
108
109
110
111
112 _StackLimit = _StackGuard - _StackSystem - _StackSmall
113 )
114
115 const (
116
117
118
119
120
121 stackDebug = 0
122 stackFromSystem = 0
123 stackFaultOnFree = 0
124 stackPoisonCopy = 0
125 stackNoCache = 0
126
127
128 debugCheckBP = false
129 )
130
131 const (
132 uintptrMask = 1<<(8*goarch.PtrSize) - 1
133
134
135
136
137
138
139
140 stackPreempt = uintptrMask & -1314
141
142
143
144 stackFork = uintptrMask & -1234
145
146
147
148 stackForceMove = uintptrMask & -275
149
150
151 stackPoisonMin = uintptrMask & -4096
152 )
153
154
155
156
157
158
159
160 var stackpool [_NumStackOrders]struct {
161 item stackpoolItem
162 _ [(cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte
163 }
164
165 type stackpoolItem struct {
166 _ sys.NotInHeap
167 mu mutex
168 span mSpanList
169 }
170
171
172 var stackLarge struct {
173 lock mutex
174 free [heapAddrBits - pageShift]mSpanList
175 }
176
177 func stackinit() {
178 if _StackCacheSize&_PageMask != 0 {
179 throw("cache size must be a multiple of page size")
180 }
181 for i := range stackpool {
182 stackpool[i].item.span.init()
183 lockInit(&stackpool[i].item.mu, lockRankStackpool)
184 }
185 for i := range stackLarge.free {
186 stackLarge.free[i].init()
187 lockInit(&stackLarge.lock, lockRankStackLarge)
188 }
189 }
190
191
192 func stacklog2(n uintptr) int {
193 log2 := 0
194 for n > 1 {
195 n >>= 1
196 log2++
197 }
198 return log2
199 }
200
201
202
203 func stackpoolalloc(order uint8) gclinkptr {
204 list := &stackpool[order].item.span
205 s := list.first
206 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
207 if s == nil {
208
209 s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
210 if s == nil {
211 throw("out of memory")
212 }
213 if s.allocCount != 0 {
214 throw("bad allocCount")
215 }
216 if s.manualFreeList.ptr() != nil {
217 throw("bad manualFreeList")
218 }
219 osStackAlloc(s)
220 s.elemsize = _FixedStack << order
221 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
222 x := gclinkptr(s.base() + i)
223 x.ptr().next = s.manualFreeList
224 s.manualFreeList = x
225 }
226 list.insert(s)
227 }
228 x := s.manualFreeList
229 if x.ptr() == nil {
230 throw("span has no free stacks")
231 }
232 s.manualFreeList = x.ptr().next
233 s.allocCount++
234 if s.manualFreeList.ptr() == nil {
235
236 list.remove(s)
237 }
238 return x
239 }
240
241
242 func stackpoolfree(x gclinkptr, order uint8) {
243 s := spanOfUnchecked(uintptr(x))
244 if s.state.get() != mSpanManual {
245 throw("freeing stack not in a stack span")
246 }
247 if s.manualFreeList.ptr() == nil {
248
249 stackpool[order].item.span.insert(s)
250 }
251 x.ptr().next = s.manualFreeList
252 s.manualFreeList = x
253 s.allocCount--
254 if gcphase == _GCoff && s.allocCount == 0 {
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270 stackpool[order].item.span.remove(s)
271 s.manualFreeList = 0
272 osStackFree(s)
273 mheap_.freeManual(s, spanAllocStack)
274 }
275 }
276
277
278
279
280
281 func stackcacherefill(c *mcache, order uint8) {
282 if stackDebug >= 1 {
283 print("stackcacherefill order=", order, "\n")
284 }
285
286
287
288 var list gclinkptr
289 var size uintptr
290 lock(&stackpool[order].item.mu)
291 for size < _StackCacheSize/2 {
292 x := stackpoolalloc(order)
293 x.ptr().next = list
294 list = x
295 size += _FixedStack << order
296 }
297 unlock(&stackpool[order].item.mu)
298 c.stackcache[order].list = list
299 c.stackcache[order].size = size
300 }
301
302
303 func stackcacherelease(c *mcache, order uint8) {
304 if stackDebug >= 1 {
305 print("stackcacherelease order=", order, "\n")
306 }
307 x := c.stackcache[order].list
308 size := c.stackcache[order].size
309 lock(&stackpool[order].item.mu)
310 for size > _StackCacheSize/2 {
311 y := x.ptr().next
312 stackpoolfree(x, order)
313 x = y
314 size -= _FixedStack << order
315 }
316 unlock(&stackpool[order].item.mu)
317 c.stackcache[order].list = x
318 c.stackcache[order].size = size
319 }
320
321
322 func stackcache_clear(c *mcache) {
323 if stackDebug >= 1 {
324 print("stackcache clear\n")
325 }
326 for order := uint8(0); order < _NumStackOrders; order++ {
327 lock(&stackpool[order].item.mu)
328 x := c.stackcache[order].list
329 for x.ptr() != nil {
330 y := x.ptr().next
331 stackpoolfree(x, order)
332 x = y
333 }
334 c.stackcache[order].list = 0
335 c.stackcache[order].size = 0
336 unlock(&stackpool[order].item.mu)
337 }
338 }
339
340
341
342
343
344
345
346 func stackalloc(n uint32) stack {
347
348
349
350 thisg := getg()
351 if thisg != thisg.m.g0 {
352 throw("stackalloc not on scheduler stack")
353 }
354 if n&(n-1) != 0 {
355 throw("stack size not a power of 2")
356 }
357 if stackDebug >= 1 {
358 print("stackalloc ", n, "\n")
359 }
360
361 if debug.efence != 0 || stackFromSystem != 0 {
362 n = uint32(alignUp(uintptr(n), physPageSize))
363 v := sysAlloc(uintptr(n), &memstats.stacks_sys)
364 if v == nil {
365 throw("out of memory (stackalloc)")
366 }
367 return stack{uintptr(v), uintptr(v) + uintptr(n)}
368 }
369
370
371
372
373 var v unsafe.Pointer
374 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
375 order := uint8(0)
376 n2 := n
377 for n2 > _FixedStack {
378 order++
379 n2 >>= 1
380 }
381 var x gclinkptr
382 if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
383
384
385
386
387 lock(&stackpool[order].item.mu)
388 x = stackpoolalloc(order)
389 unlock(&stackpool[order].item.mu)
390 } else {
391 c := thisg.m.p.ptr().mcache
392 x = c.stackcache[order].list
393 if x.ptr() == nil {
394 stackcacherefill(c, order)
395 x = c.stackcache[order].list
396 }
397 c.stackcache[order].list = x.ptr().next
398 c.stackcache[order].size -= uintptr(n)
399 }
400 v = unsafe.Pointer(x)
401 } else {
402 var s *mspan
403 npage := uintptr(n) >> _PageShift
404 log2npage := stacklog2(npage)
405
406
407 lock(&stackLarge.lock)
408 if !stackLarge.free[log2npage].isEmpty() {
409 s = stackLarge.free[log2npage].first
410 stackLarge.free[log2npage].remove(s)
411 }
412 unlock(&stackLarge.lock)
413
414 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
415
416 if s == nil {
417
418 s = mheap_.allocManual(npage, spanAllocStack)
419 if s == nil {
420 throw("out of memory")
421 }
422 osStackAlloc(s)
423 s.elemsize = uintptr(n)
424 }
425 v = unsafe.Pointer(s.base())
426 }
427
428 if raceenabled {
429 racemalloc(v, uintptr(n))
430 }
431 if msanenabled {
432 msanmalloc(v, uintptr(n))
433 }
434 if asanenabled {
435 asanunpoison(v, uintptr(n))
436 }
437 if stackDebug >= 1 {
438 print(" allocated ", v, "\n")
439 }
440 return stack{uintptr(v), uintptr(v) + uintptr(n)}
441 }
442
443
444
445
446
447
448
449 func stackfree(stk stack) {
450 gp := getg()
451 v := unsafe.Pointer(stk.lo)
452 n := stk.hi - stk.lo
453 if n&(n-1) != 0 {
454 throw("stack not a power of 2")
455 }
456 if stk.lo+n < stk.hi {
457 throw("bad stack size")
458 }
459 if stackDebug >= 1 {
460 println("stackfree", v, n)
461 memclrNoHeapPointers(v, n)
462 }
463 if debug.efence != 0 || stackFromSystem != 0 {
464 if debug.efence != 0 || stackFaultOnFree != 0 {
465 sysFault(v, n)
466 } else {
467 sysFree(v, n, &memstats.stacks_sys)
468 }
469 return
470 }
471 if msanenabled {
472 msanfree(v, n)
473 }
474 if asanenabled {
475 asanpoison(v, n)
476 }
477 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
478 order := uint8(0)
479 n2 := n
480 for n2 > _FixedStack {
481 order++
482 n2 >>= 1
483 }
484 x := gclinkptr(v)
485 if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
486 lock(&stackpool[order].item.mu)
487 stackpoolfree(x, order)
488 unlock(&stackpool[order].item.mu)
489 } else {
490 c := gp.m.p.ptr().mcache
491 if c.stackcache[order].size >= _StackCacheSize {
492 stackcacherelease(c, order)
493 }
494 x.ptr().next = c.stackcache[order].list
495 c.stackcache[order].list = x
496 c.stackcache[order].size += n
497 }
498 } else {
499 s := spanOfUnchecked(uintptr(v))
500 if s.state.get() != mSpanManual {
501 println(hex(s.base()), v)
502 throw("bad span state")
503 }
504 if gcphase == _GCoff {
505
506
507 osStackFree(s)
508 mheap_.freeManual(s, spanAllocStack)
509 } else {
510
511
512
513
514
515 log2npage := stacklog2(s.npages)
516 lock(&stackLarge.lock)
517 stackLarge.free[log2npage].insert(s)
518 unlock(&stackLarge.lock)
519 }
520 }
521 }
522
523 var maxstacksize uintptr = 1 << 20
524
525 var maxstackceiling = maxstacksize
526
527 var ptrnames = []string{
528 0: "scalar",
529 1: "ptr",
530 }
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560 type adjustinfo struct {
561 old stack
562 delta uintptr
563 cache pcvalueCache
564
565
566 sghi uintptr
567 }
568
569
570
571 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
572 pp := (*uintptr)(vpp)
573 p := *pp
574 if stackDebug >= 4 {
575 print(" ", pp, ":", hex(p), "\n")
576 }
577 if adjinfo.old.lo <= p && p < adjinfo.old.hi {
578 *pp = p + adjinfo.delta
579 if stackDebug >= 3 {
580 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
581 }
582 }
583 }
584
585
586
587 type bitvector struct {
588 n int32
589 bytedata *uint8
590 }
591
592
593
594
595
596 func (bv *bitvector) ptrbit(i uintptr) uint8 {
597 b := *(addb(bv.bytedata, i/8))
598 return (b >> (i % 8)) & 1
599 }
600
601
602
603 func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
604 minp := adjinfo.old.lo
605 maxp := adjinfo.old.hi
606 delta := adjinfo.delta
607 num := uintptr(bv.n)
608
609
610
611
612
613 useCAS := uintptr(scanp) < adjinfo.sghi
614 for i := uintptr(0); i < num; i += 8 {
615 if stackDebug >= 4 {
616 for j := uintptr(0); j < 8; j++ {
617 print(" ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
618 }
619 }
620 b := *(addb(bv.bytedata, i/8))
621 for b != 0 {
622 j := uintptr(sys.TrailingZeros8(b))
623 b &= b - 1
624 pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
625 retry:
626 p := *pp
627 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
628
629
630 getg().m.traceback = 2
631 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
632 throw("invalid pointer found on stack")
633 }
634 if minp <= p && p < maxp {
635 if stackDebug >= 3 {
636 print("adjust ptr ", hex(p), " ", funcname(f), "\n")
637 }
638 if useCAS {
639 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
640 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
641 goto retry
642 }
643 } else {
644 *pp = p + delta
645 }
646 }
647 }
648 }
649 }
650
651
652 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
653 adjinfo := (*adjustinfo)(arg)
654 if frame.continpc == 0 {
655
656 return true
657 }
658 f := frame.fn
659 if stackDebug >= 2 {
660 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
661 }
662 if f.funcID == funcID_systemstack_switch {
663
664
665
666 return true
667 }
668
669 locals, args, objs := frame.getStackMap(&adjinfo.cache, true)
670
671
672 if locals.n > 0 {
673 size := uintptr(locals.n) * goarch.PtrSize
674 adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
675 }
676
677
678
679 if goarch.ArchFamily == goarch.AMD64 && frame.argp-frame.varp == 2*goarch.PtrSize {
680 if stackDebug >= 3 {
681 print(" saved bp\n")
682 }
683 if debugCheckBP {
684
685
686 bp := *(*uintptr)(unsafe.Pointer(frame.varp))
687 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
688 println("runtime: found invalid frame pointer")
689 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
690 throw("bad frame pointer")
691 }
692 }
693 adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
694 }
695
696
697 if args.n > 0 {
698 if stackDebug >= 3 {
699 print(" args\n")
700 }
701 adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
702 }
703
704
705
706 if frame.varp != 0 {
707 for i := range objs {
708 obj := &objs[i]
709 off := obj.off
710 base := frame.varp
711 if off >= 0 {
712 base = frame.argp
713 }
714 p := base + uintptr(off)
715 if p < frame.sp {
716
717
718
719 continue
720 }
721 ptrdata := obj.ptrdata()
722 gcdata := obj.gcdata()
723 var s *mspan
724 if obj.useGCProg() {
725
726 s = materializeGCProg(ptrdata, gcdata)
727 gcdata = (*byte)(unsafe.Pointer(s.startAddr))
728 }
729 for i := uintptr(0); i < ptrdata; i += goarch.PtrSize {
730 if *addb(gcdata, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
731 adjustpointer(adjinfo, unsafe.Pointer(p+i))
732 }
733 }
734 if s != nil {
735 dematerializeGCProg(s)
736 }
737 }
738 }
739
740 return true
741 }
742
743 func adjustctxt(gp *g, adjinfo *adjustinfo) {
744 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
745 if !framepointer_enabled {
746 return
747 }
748 if debugCheckBP {
749 bp := gp.sched.bp
750 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
751 println("runtime: found invalid top frame pointer")
752 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
753 throw("bad top frame pointer")
754 }
755 }
756 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
757 }
758
759 func adjustdefers(gp *g, adjinfo *adjustinfo) {
760
761
762
763 adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
764 for d := gp._defer; d != nil; d = d.link {
765 adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
766 adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
767 adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
768 adjustpointer(adjinfo, unsafe.Pointer(&d.link))
769 adjustpointer(adjinfo, unsafe.Pointer(&d.varp))
770 adjustpointer(adjinfo, unsafe.Pointer(&d.fd))
771 }
772 }
773
774 func adjustpanics(gp *g, adjinfo *adjustinfo) {
775
776
777 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
778 }
779
780 func adjustsudogs(gp *g, adjinfo *adjustinfo) {
781
782
783 for s := gp.waiting; s != nil; s = s.waitlink {
784 adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
785 }
786 }
787
788 func fillstack(stk stack, b byte) {
789 for p := stk.lo; p < stk.hi; p++ {
790 *(*byte)(unsafe.Pointer(p)) = b
791 }
792 }
793
794 func findsghi(gp *g, stk stack) uintptr {
795 var sghi uintptr
796 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
797 p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
798 if stk.lo <= p && p < stk.hi && p > sghi {
799 sghi = p
800 }
801 }
802 return sghi
803 }
804
805
806
807
808 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
809 if gp.waiting == nil {
810 return 0
811 }
812
813
814 var lastc *hchan
815 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
816 if sg.c != lastc {
817
818
819
820
821
822
823
824
825
826 lockWithRank(&sg.c.lock, lockRankHchanLeaf)
827 }
828 lastc = sg.c
829 }
830
831
832 adjustsudogs(gp, adjinfo)
833
834
835
836
837 var sgsize uintptr
838 if adjinfo.sghi != 0 {
839 oldBot := adjinfo.old.hi - used
840 newBot := oldBot + adjinfo.delta
841 sgsize = adjinfo.sghi - oldBot
842 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
843 }
844
845
846 lastc = nil
847 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
848 if sg.c != lastc {
849 unlock(&sg.c.lock)
850 }
851 lastc = sg.c
852 }
853
854 return sgsize
855 }
856
857
858
859 func copystack(gp *g, newsize uintptr) {
860 if gp.syscallsp != 0 {
861 throw("stack growth not allowed in system call")
862 }
863 old := gp.stack
864 if old.lo == 0 {
865 throw("nil stackbase")
866 }
867 used := old.hi - gp.sched.sp
868
869
870
871
872 gcController.addScannableStack(getg().m.p.ptr(), int64(newsize)-int64(old.hi-old.lo))
873
874
875 new := stackalloc(uint32(newsize))
876 if stackPoisonCopy != 0 {
877 fillstack(new, 0xfd)
878 }
879 if stackDebug >= 1 {
880 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
881 }
882
883
884 var adjinfo adjustinfo
885 adjinfo.old = old
886 adjinfo.delta = new.hi - old.hi
887
888
889 ncopy := used
890 if !gp.activeStackChans {
891 if newsize < old.hi-old.lo && gp.parkingOnChan.Load() {
892
893
894
895
896 throw("racy sudog adjustment due to parking on channel")
897 }
898 adjustsudogs(gp, &adjinfo)
899 } else {
900
901
902
903
904
905
906
907 adjinfo.sghi = findsghi(gp, old)
908
909
910
911 ncopy -= syncadjustsudogs(gp, used, &adjinfo)
912 }
913
914
915 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
916
917
918
919
920 adjustctxt(gp, &adjinfo)
921 adjustdefers(gp, &adjinfo)
922 adjustpanics(gp, &adjinfo)
923 if adjinfo.sghi != 0 {
924 adjinfo.sghi += adjinfo.delta
925 }
926
927
928 gp.stack = new
929 gp.stackguard0 = new.lo + _StackGuard
930 gp.sched.sp = new.hi - used
931 gp.stktopsp += adjinfo.delta
932
933
934 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
935
936
937 if stackPoisonCopy != 0 {
938 fillstack(old, 0xfc)
939 }
940 stackfree(old)
941 }
942
943
944 func round2(x int32) int32 {
945 s := uint(0)
946 for 1<<s < x {
947 s++
948 }
949 return 1 << s
950 }
951
952
953
954
955
956
957
958
959
960
961
962
963
964 func newstack() {
965 thisg := getg()
966
967 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
968 throw("stack growth after fork")
969 }
970 if thisg.m.morebuf.g.ptr() != thisg.m.curg {
971 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
972 morebuf := thisg.m.morebuf
973 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
974 throw("runtime: wrong goroutine in newstack")
975 }
976
977 gp := thisg.m.curg
978
979 if thisg.m.curg.throwsplit {
980
981 morebuf := thisg.m.morebuf
982 gp.syscallsp = morebuf.sp
983 gp.syscallpc = morebuf.pc
984 pcname, pcoff := "(unknown)", uintptr(0)
985 f := findfunc(gp.sched.pc)
986 if f.valid() {
987 pcname = funcname(f)
988 pcoff = gp.sched.pc - f.entry()
989 }
990 print("runtime: newstack at ", pcname, "+", hex(pcoff),
991 " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
992 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
993 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
994
995 thisg.m.traceback = 2
996 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
997 throw("runtime: stack split at bad time")
998 }
999
1000 morebuf := thisg.m.morebuf
1001 thisg.m.morebuf.pc = 0
1002 thisg.m.morebuf.lr = 0
1003 thisg.m.morebuf.sp = 0
1004 thisg.m.morebuf.g = 0
1005
1006
1007
1008
1009 stackguard0 := atomic.Loaduintptr(&gp.stackguard0)
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023 preempt := stackguard0 == stackPreempt
1024 if preempt {
1025 if !canPreemptM(thisg.m) {
1026
1027
1028 gp.stackguard0 = gp.stack.lo + _StackGuard
1029 gogo(&gp.sched)
1030 }
1031 }
1032
1033 if gp.stack.lo == 0 {
1034 throw("missing stack in newstack")
1035 }
1036 sp := gp.sched.sp
1037 if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM {
1038
1039 sp -= goarch.PtrSize
1040 }
1041 if stackDebug >= 1 || sp < gp.stack.lo {
1042 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
1043 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
1044 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
1045 }
1046 if sp < gp.stack.lo {
1047 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
1048 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
1049 throw("runtime: split stack overflow")
1050 }
1051
1052 if preempt {
1053 if gp == thisg.m.g0 {
1054 throw("runtime: preempt g0")
1055 }
1056 if thisg.m.p == 0 && thisg.m.locks == 0 {
1057 throw("runtime: g is running but p is not")
1058 }
1059
1060 if gp.preemptShrink {
1061
1062
1063 gp.preemptShrink = false
1064 shrinkstack(gp)
1065 }
1066
1067 if gp.preemptStop {
1068 preemptPark(gp)
1069 }
1070
1071
1072 gopreempt_m(gp)
1073 }
1074
1075
1076 oldsize := gp.stack.hi - gp.stack.lo
1077 newsize := oldsize * 2
1078
1079
1080
1081
1082 if f := findfunc(gp.sched.pc); f.valid() {
1083 max := uintptr(funcMaxSPDelta(f))
1084 needed := max + _StackGuard
1085 used := gp.stack.hi - gp.sched.sp
1086 for newsize-used < needed {
1087 newsize *= 2
1088 }
1089 }
1090
1091 if stackguard0 == stackForceMove {
1092
1093
1094
1095 newsize = oldsize
1096 }
1097
1098 if newsize > maxstacksize || newsize > maxstackceiling {
1099 if maxstacksize < maxstackceiling {
1100 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
1101 } else {
1102 print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n")
1103 }
1104 print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
1105 throw("stack overflow")
1106 }
1107
1108
1109
1110 casgstatus(gp, _Grunning, _Gcopystack)
1111
1112
1113
1114 copystack(gp, newsize)
1115 if stackDebug >= 1 {
1116 print("stack grow done\n")
1117 }
1118 casgstatus(gp, _Gcopystack, _Grunning)
1119 gogo(&gp.sched)
1120 }
1121
1122
1123 func nilfunc() {
1124 *(*uint8)(nil) = 0
1125 }
1126
1127
1128
1129 func gostartcallfn(gobuf *gobuf, fv *funcval) {
1130 var fn unsafe.Pointer
1131 if fv != nil {
1132 fn = unsafe.Pointer(fv.fn)
1133 } else {
1134 fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
1135 }
1136 gostartcall(gobuf, fn, unsafe.Pointer(fv))
1137 }
1138
1139
1140
1141
1142 func isShrinkStackSafe(gp *g) bool {
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155 return gp.syscallsp == 0 && !gp.asyncSafePoint && !gp.parkingOnChan.Load()
1156 }
1157
1158
1159
1160
1161
1162 func shrinkstack(gp *g) {
1163 if gp.stack.lo == 0 {
1164 throw("missing stack in shrinkstack")
1165 }
1166 if s := readgstatus(gp); s&_Gscan == 0 {
1167
1168
1169
1170 if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
1171
1172 throw("bad status in shrinkstack")
1173 }
1174 }
1175 if !isShrinkStackSafe(gp) {
1176 throw("shrinkstack at bad time")
1177 }
1178
1179
1180
1181 if gp == getg().m.curg && gp.m.libcallsp != 0 {
1182 throw("shrinking stack in libcall")
1183 }
1184
1185 if debug.gcshrinkstackoff > 0 {
1186 return
1187 }
1188 f := findfunc(gp.startpc)
1189 if f.valid() && f.funcID == funcID_gcBgMarkWorker {
1190
1191
1192 return
1193 }
1194
1195 oldsize := gp.stack.hi - gp.stack.lo
1196 newsize := oldsize / 2
1197
1198
1199 if newsize < _FixedStack {
1200 return
1201 }
1202
1203
1204
1205
1206
1207 avail := gp.stack.hi - gp.stack.lo
1208 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
1209 return
1210 }
1211
1212 if stackDebug > 0 {
1213 print("shrinking stack ", oldsize, "->", newsize, "\n")
1214 }
1215
1216 copystack(gp, newsize)
1217 }
1218
1219
1220 func freeStackSpans() {
1221
1222 for order := range stackpool {
1223 lock(&stackpool[order].item.mu)
1224 list := &stackpool[order].item.span
1225 for s := list.first; s != nil; {
1226 next := s.next
1227 if s.allocCount == 0 {
1228 list.remove(s)
1229 s.manualFreeList = 0
1230 osStackFree(s)
1231 mheap_.freeManual(s, spanAllocStack)
1232 }
1233 s = next
1234 }
1235 unlock(&stackpool[order].item.mu)
1236 }
1237
1238
1239 lock(&stackLarge.lock)
1240 for i := range stackLarge.free {
1241 for s := stackLarge.free[i].first; s != nil; {
1242 next := s.next
1243 stackLarge.free[i].remove(s)
1244 osStackFree(s)
1245 mheap_.freeManual(s, spanAllocStack)
1246 s = next
1247 }
1248 }
1249 unlock(&stackLarge.lock)
1250 }
1251
1252
1253
1254 type stackObjectRecord struct {
1255
1256
1257
1258 off int32
1259 size int32
1260 _ptrdata int32
1261 gcdataoff uint32
1262 }
1263
1264 func (r *stackObjectRecord) useGCProg() bool {
1265 return r._ptrdata < 0
1266 }
1267
1268 func (r *stackObjectRecord) ptrdata() uintptr {
1269 x := r._ptrdata
1270 if x < 0 {
1271 return uintptr(-x)
1272 }
1273 return uintptr(x)
1274 }
1275
1276
1277 func (r *stackObjectRecord) gcdata() *byte {
1278 ptr := uintptr(unsafe.Pointer(r))
1279 var mod *moduledata
1280 for datap := &firstmoduledata; datap != nil; datap = datap.next {
1281 if datap.gofunc <= ptr && ptr < datap.end {
1282 mod = datap
1283 break
1284 }
1285 }
1286
1287
1288
1289 res := mod.rodata + uintptr(r.gcdataoff)
1290 return (*byte)(unsafe.Pointer(res))
1291 }
1292
1293
1294
1295
1296
1297 func morestackc() {
1298 throw("attempt to execute system stack code on user stack")
1299 }
1300
1301
1302
1303
1304
1305 var startingStackSize uint32 = _FixedStack
1306
1307 func gcComputeStartingStackSize() {
1308 if debug.adaptivestackstart == 0 {
1309 return
1310 }
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321 var scannedStackSize uint64
1322 var scannedStacks uint64
1323 for _, p := range allp {
1324 scannedStackSize += p.scannedStackSize
1325 scannedStacks += p.scannedStacks
1326
1327 p.scannedStackSize = 0
1328 p.scannedStacks = 0
1329 }
1330 if scannedStacks == 0 {
1331 startingStackSize = _FixedStack
1332 return
1333 }
1334 avg := scannedStackSize/scannedStacks + _StackGuard
1335
1336
1337 if avg > uint64(maxstacksize) {
1338 avg = uint64(maxstacksize)
1339 }
1340 if avg < _FixedStack {
1341 avg = _FixedStack
1342 }
1343
1344 startingStackSize = uint32(round2(int32(avg)))
1345 }
1346
View as plain text