Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/exithook"
14 "internal/runtime/sys"
15 "internal/stringslite"
16 "unsafe"
17 )
18
19
20 var modinfo string
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116 var (
117 m0 m
118 g0 g
119 mcache0 *mcache
120 raceprocctx0 uintptr
121 raceFiniLock mutex
122 )
123
124
125
126 var runtime_inittasks []*initTask
127
128
129
130
131
132 var main_init_done chan bool
133
134
135 func main_main()
136
137
138 var mainStarted bool
139
140
141 var runtimeInitTime int64
142
143
144 var initSigmask sigset
145
146
147 func main() {
148 mp := getg().m
149
150
151
152 mp.g0.racectx = 0
153
154
155
156
157 if goarch.PtrSize == 8 {
158 maxstacksize = 1000000000
159 } else {
160 maxstacksize = 250000000
161 }
162
163
164
165
166 maxstackceiling = 2 * maxstacksize
167
168
169 mainStarted = true
170
171 if haveSysmon {
172 systemstack(func() {
173 newm(sysmon, nil, -1)
174 })
175 }
176
177
178
179
180
181
182
183 lockOSThread()
184
185 if mp != &m0 {
186 throw("runtime.main not on m0")
187 }
188
189
190
191 runtimeInitTime = nanotime()
192 if runtimeInitTime == 0 {
193 throw("nanotime returning zero")
194 }
195
196 if debug.inittrace != 0 {
197 inittrace.id = getg().goid
198 inittrace.active = true
199 }
200
201 doInit(runtime_inittasks)
202
203
204 needUnlock := true
205 defer func() {
206 if needUnlock {
207 unlockOSThread()
208 }
209 }()
210
211 gcenable()
212
213 main_init_done = make(chan bool)
214 if iscgo {
215 if _cgo_pthread_key_created == nil {
216 throw("_cgo_pthread_key_created missing")
217 }
218
219 if _cgo_thread_start == nil {
220 throw("_cgo_thread_start missing")
221 }
222 if GOOS != "windows" {
223 if _cgo_setenv == nil {
224 throw("_cgo_setenv missing")
225 }
226 if _cgo_unsetenv == nil {
227 throw("_cgo_unsetenv missing")
228 }
229 }
230 if _cgo_notify_runtime_init_done == nil {
231 throw("_cgo_notify_runtime_init_done missing")
232 }
233
234
235 if set_crosscall2 == nil {
236 throw("set_crosscall2 missing")
237 }
238 set_crosscall2()
239
240
241
242 startTemplateThread()
243 cgocall(_cgo_notify_runtime_init_done, nil)
244 }
245
246
247
248
249
250
251
252
253 for m := &firstmoduledata; m != nil; m = m.next {
254 doInit(m.inittasks)
255 }
256
257
258
259 inittrace.active = false
260
261 close(main_init_done)
262
263 needUnlock = false
264 unlockOSThread()
265
266 if isarchive || islibrary {
267
268
269 if GOARCH == "wasm" {
270
271
272
273
274
275
276
277 pause(sys.GetCallerSP() - 16)
278 panic("unreachable")
279 }
280 return
281 }
282 fn := main_main
283 fn()
284 if raceenabled {
285 runExitHooks(0)
286 racefini()
287 }
288
289
290
291
292
293 if runningPanicDefers.Load() != 0 {
294
295 for c := 0; c < 1000; c++ {
296 if runningPanicDefers.Load() == 0 {
297 break
298 }
299 Gosched()
300 }
301 }
302 if panicking.Load() != 0 {
303 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
304 }
305 runExitHooks(0)
306
307 exit(0)
308 for {
309 var x *int32
310 *x = 0
311 }
312 }
313
314
315
316
317 func os_beforeExit(exitCode int) {
318 runExitHooks(exitCode)
319 if exitCode == 0 && raceenabled {
320 racefini()
321 }
322 }
323
324 func init() {
325 exithook.Gosched = Gosched
326 exithook.Goid = func() uint64 { return getg().goid }
327 exithook.Throw = throw
328 }
329
330 func runExitHooks(code int) {
331 exithook.Run(code)
332 }
333
334
335 func init() {
336 go forcegchelper()
337 }
338
339 func forcegchelper() {
340 forcegc.g = getg()
341 lockInit(&forcegc.lock, lockRankForcegc)
342 for {
343 lock(&forcegc.lock)
344 if forcegc.idle.Load() {
345 throw("forcegc: phase error")
346 }
347 forcegc.idle.Store(true)
348 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
349
350 if debug.gctrace > 0 {
351 println("GC forced")
352 }
353
354 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
355 }
356 }
357
358
359
360
361
362 func Gosched() {
363 checkTimeouts()
364 mcall(gosched_m)
365 }
366
367
368
369
370
371 func goschedguarded() {
372 mcall(goschedguarded_m)
373 }
374
375
376
377
378
379
380 func goschedIfBusy() {
381 gp := getg()
382
383
384 if !gp.preempt && sched.npidle.Load() > 0 {
385 return
386 }
387 mcall(gosched_m)
388 }
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
419 if reason != waitReasonSleep {
420 checkTimeouts()
421 }
422 mp := acquirem()
423 gp := mp.curg
424 status := readgstatus(gp)
425 if status != _Grunning && status != _Gscanrunning {
426 throw("gopark: bad g status")
427 }
428 mp.waitlock = lock
429 mp.waitunlockf = unlockf
430 gp.waitreason = reason
431 mp.waitTraceBlockReason = traceReason
432 mp.waitTraceSkip = traceskip
433 releasem(mp)
434
435 mcall(park_m)
436 }
437
438
439
440 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
441 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
442 }
443
444
445
446
447
448
449
450
451
452
453
454 func goready(gp *g, traceskip int) {
455 systemstack(func() {
456 ready(gp, traceskip, true)
457 })
458 }
459
460
461 func acquireSudog() *sudog {
462
463
464
465
466
467
468
469
470 mp := acquirem()
471 pp := mp.p.ptr()
472 if len(pp.sudogcache) == 0 {
473 lock(&sched.sudoglock)
474
475 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
476 s := sched.sudogcache
477 sched.sudogcache = s.next
478 s.next = nil
479 pp.sudogcache = append(pp.sudogcache, s)
480 }
481 unlock(&sched.sudoglock)
482
483 if len(pp.sudogcache) == 0 {
484 pp.sudogcache = append(pp.sudogcache, new(sudog))
485 }
486 }
487 n := len(pp.sudogcache)
488 s := pp.sudogcache[n-1]
489 pp.sudogcache[n-1] = nil
490 pp.sudogcache = pp.sudogcache[:n-1]
491 if s.elem != nil {
492 throw("acquireSudog: found s.elem != nil in cache")
493 }
494 releasem(mp)
495 return s
496 }
497
498
499 func releaseSudog(s *sudog) {
500 if s.elem != nil {
501 throw("runtime: sudog with non-nil elem")
502 }
503 if s.isSelect {
504 throw("runtime: sudog with non-false isSelect")
505 }
506 if s.next != nil {
507 throw("runtime: sudog with non-nil next")
508 }
509 if s.prev != nil {
510 throw("runtime: sudog with non-nil prev")
511 }
512 if s.waitlink != nil {
513 throw("runtime: sudog with non-nil waitlink")
514 }
515 if s.c != nil {
516 throw("runtime: sudog with non-nil c")
517 }
518 gp := getg()
519 if gp.param != nil {
520 throw("runtime: releaseSudog with non-nil gp.param")
521 }
522 mp := acquirem()
523 pp := mp.p.ptr()
524 if len(pp.sudogcache) == cap(pp.sudogcache) {
525
526 var first, last *sudog
527 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
528 n := len(pp.sudogcache)
529 p := pp.sudogcache[n-1]
530 pp.sudogcache[n-1] = nil
531 pp.sudogcache = pp.sudogcache[:n-1]
532 if first == nil {
533 first = p
534 } else {
535 last.next = p
536 }
537 last = p
538 }
539 lock(&sched.sudoglock)
540 last.next = sched.sudogcache
541 sched.sudogcache = first
542 unlock(&sched.sudoglock)
543 }
544 pp.sudogcache = append(pp.sudogcache, s)
545 releasem(mp)
546 }
547
548
549 func badmcall(fn func(*g)) {
550 throw("runtime: mcall called on m->g0 stack")
551 }
552
553 func badmcall2(fn func(*g)) {
554 throw("runtime: mcall function returned")
555 }
556
557 func badreflectcall() {
558 panic(plainError("arg size to reflect.call more than 1GB"))
559 }
560
561
562
563 func badmorestackg0() {
564 if !crashStackImplemented {
565 writeErrStr("fatal: morestack on g0\n")
566 return
567 }
568
569 g := getg()
570 switchToCrashStack(func() {
571 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
572 g.m.traceback = 2
573 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
574 print("\n")
575
576 throw("morestack on g0")
577 })
578 }
579
580
581
582 func badmorestackgsignal() {
583 writeErrStr("fatal: morestack on gsignal\n")
584 }
585
586
587 func badctxt() {
588 throw("ctxt != 0")
589 }
590
591
592
593 var gcrash g
594
595 var crashingG atomic.Pointer[g]
596
597
598
599
600
601
602
603
604
605 func switchToCrashStack(fn func()) {
606 me := getg()
607 if crashingG.CompareAndSwapNoWB(nil, me) {
608 switchToCrashStack0(fn)
609 abort()
610 }
611 if crashingG.Load() == me {
612
613 writeErrStr("fatal: recursive switchToCrashStack\n")
614 abort()
615 }
616
617 usleep_no_g(100)
618 writeErrStr("fatal: concurrent switchToCrashStack\n")
619 abort()
620 }
621
622
623
624
625 const crashStackImplemented = GOOS != "windows"
626
627
628 func switchToCrashStack0(fn func())
629
630 func lockedOSThread() bool {
631 gp := getg()
632 return gp.lockedm != 0 && gp.m.lockedg != 0
633 }
634
635 var (
636
637
638
639
640
641
642 allglock mutex
643 allgs []*g
644
645
646
647
648
649
650
651
652
653
654
655
656
657 allglen uintptr
658 allgptr **g
659 )
660
661 func allgadd(gp *g) {
662 if readgstatus(gp) == _Gidle {
663 throw("allgadd: bad status Gidle")
664 }
665
666 lock(&allglock)
667 allgs = append(allgs, gp)
668 if &allgs[0] != allgptr {
669 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
670 }
671 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
672 unlock(&allglock)
673 }
674
675
676
677
678 func allGsSnapshot() []*g {
679 assertWorldStoppedOrLockHeld(&allglock)
680
681
682
683
684
685
686 return allgs[:len(allgs):len(allgs)]
687 }
688
689
690 func atomicAllG() (**g, uintptr) {
691 length := atomic.Loaduintptr(&allglen)
692 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
693 return ptr, length
694 }
695
696
697 func atomicAllGIndex(ptr **g, i uintptr) *g {
698 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
699 }
700
701
702
703
704 func forEachG(fn func(gp *g)) {
705 lock(&allglock)
706 for _, gp := range allgs {
707 fn(gp)
708 }
709 unlock(&allglock)
710 }
711
712
713
714
715
716 func forEachGRace(fn func(gp *g)) {
717 ptr, length := atomicAllG()
718 for i := uintptr(0); i < length; i++ {
719 gp := atomicAllGIndex(ptr, i)
720 fn(gp)
721 }
722 return
723 }
724
725 const (
726
727
728 _GoidCacheBatch = 16
729 )
730
731
732
733 func cpuinit(env string) {
734 switch GOOS {
735 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
736 cpu.DebugOptions = true
737 }
738 cpu.Initialize(env)
739
740
741
742 switch GOARCH {
743 case "386", "amd64":
744 x86HasPOPCNT = cpu.X86.HasPOPCNT
745 x86HasSSE41 = cpu.X86.HasSSE41
746 x86HasFMA = cpu.X86.HasFMA
747
748 case "arm":
749 armHasVFPv4 = cpu.ARM.HasVFPv4
750
751 case "arm64":
752 arm64HasATOMICS = cpu.ARM64.HasATOMICS
753
754 case "loong64":
755 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
756 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
757 loong64HasLSX = cpu.Loong64.HasLSX
758 }
759 }
760
761
762
763
764 func getGodebugEarly() string {
765 const prefix = "GODEBUG="
766 var env string
767 switch GOOS {
768 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
769
770
771
772 n := int32(0)
773 for argv_index(argv, argc+1+n) != nil {
774 n++
775 }
776
777 for i := int32(0); i < n; i++ {
778 p := argv_index(argv, argc+1+i)
779 s := unsafe.String(p, findnull(p))
780
781 if stringslite.HasPrefix(s, prefix) {
782 env = gostring(p)[len(prefix):]
783 break
784 }
785 }
786 }
787 return env
788 }
789
790
791
792
793
794
795
796
797
798 func schedinit() {
799 lockInit(&sched.lock, lockRankSched)
800 lockInit(&sched.sysmonlock, lockRankSysmon)
801 lockInit(&sched.deferlock, lockRankDefer)
802 lockInit(&sched.sudoglock, lockRankSudog)
803 lockInit(&deadlock, lockRankDeadlock)
804 lockInit(&paniclk, lockRankPanic)
805 lockInit(&allglock, lockRankAllg)
806 lockInit(&allpLock, lockRankAllp)
807 lockInit(&reflectOffs.lock, lockRankReflectOffs)
808 lockInit(&finlock, lockRankFin)
809 lockInit(&cpuprof.lock, lockRankCpuprof)
810 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
811 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
812 traceLockInit()
813
814
815
816 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
817
818 lockVerifyMSize()
819
820
821
822 gp := getg()
823 if raceenabled {
824 gp.racectx, raceprocctx0 = raceinit()
825 }
826
827 sched.maxmcount = 10000
828 crashFD.Store(^uintptr(0))
829
830
831 worldStopped()
832
833 ticks.init()
834 moduledataverify()
835 stackinit()
836 mallocinit()
837 godebug := getGodebugEarly()
838 cpuinit(godebug)
839 randinit()
840 alginit()
841 mcommoninit(gp.m, -1)
842 modulesinit()
843 typelinksinit()
844 itabsinit()
845 stkobjinit()
846
847 sigsave(&gp.m.sigmask)
848 initSigmask = gp.m.sigmask
849
850 goargs()
851 goenvs()
852 secure()
853 checkfds()
854 parsedebugvars()
855 gcinit()
856
857
858
859 gcrash.stack = stackalloc(16384)
860 gcrash.stackguard0 = gcrash.stack.lo + 1000
861 gcrash.stackguard1 = gcrash.stack.lo + 1000
862
863
864
865
866
867 if disableMemoryProfiling {
868 MemProfileRate = 0
869 }
870
871
872 mProfStackInit(gp.m)
873
874 lock(&sched.lock)
875 sched.lastpoll.Store(nanotime())
876 procs := ncpu
877 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
878 procs = n
879 }
880 if procresize(procs) != nil {
881 throw("unknown runnable goroutine during bootstrap")
882 }
883 unlock(&sched.lock)
884
885
886 worldStarted()
887
888 if buildVersion == "" {
889
890
891 buildVersion = "unknown"
892 }
893 if len(modinfo) == 1 {
894
895
896 modinfo = ""
897 }
898 }
899
900 func dumpgstatus(gp *g) {
901 thisg := getg()
902 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
903 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
904 }
905
906
907 func checkmcount() {
908 assertLockHeld(&sched.lock)
909
910
911
912
913
914
915
916
917
918 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
919 if count > sched.maxmcount {
920 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
921 throw("thread exhaustion")
922 }
923 }
924
925
926
927
928
929 func mReserveID() int64 {
930 assertLockHeld(&sched.lock)
931
932 if sched.mnext+1 < sched.mnext {
933 throw("runtime: thread ID overflow")
934 }
935 id := sched.mnext
936 sched.mnext++
937 checkmcount()
938 return id
939 }
940
941
942 func mcommoninit(mp *m, id int64) {
943 gp := getg()
944
945
946 if gp != gp.m.g0 {
947 callers(1, mp.createstack[:])
948 }
949
950 lock(&sched.lock)
951
952 if id >= 0 {
953 mp.id = id
954 } else {
955 mp.id = mReserveID()
956 }
957
958 mrandinit(mp)
959
960 mpreinit(mp)
961 if mp.gsignal != nil {
962 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
963 }
964
965
966
967 mp.alllink = allm
968
969
970
971 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
972 unlock(&sched.lock)
973
974
975 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
976 mp.cgoCallers = new(cgoCallers)
977 }
978 mProfStackInit(mp)
979 }
980
981
982
983
984
985 func mProfStackInit(mp *m) {
986 if debug.profstackdepth == 0 {
987
988
989 return
990 }
991 mp.profStack = makeProfStackFP()
992 mp.mLockProfile.stack = makeProfStackFP()
993 }
994
995
996
997
998 func makeProfStackFP() []uintptr {
999
1000
1001
1002
1003
1004
1005 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1006 }
1007
1008
1009
1010 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1011
1012
1013 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1014
1015 func (mp *m) becomeSpinning() {
1016 mp.spinning = true
1017 sched.nmspinning.Add(1)
1018 sched.needspinning.Store(0)
1019 }
1020
1021 func (mp *m) hasCgoOnStack() bool {
1022 return mp.ncgo > 0 || mp.isextra
1023 }
1024
1025 const (
1026
1027
1028 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1029
1030
1031
1032 osHasLowResClockInt = goos.IsWindows
1033
1034
1035
1036 osHasLowResClock = osHasLowResClockInt > 0
1037 )
1038
1039
1040 func ready(gp *g, traceskip int, next bool) {
1041 status := readgstatus(gp)
1042
1043
1044 mp := acquirem()
1045 if status&^_Gscan != _Gwaiting {
1046 dumpgstatus(gp)
1047 throw("bad g->status in ready")
1048 }
1049
1050
1051 trace := traceAcquire()
1052 casgstatus(gp, _Gwaiting, _Grunnable)
1053 if trace.ok() {
1054 trace.GoUnpark(gp, traceskip)
1055 traceRelease(trace)
1056 }
1057 runqput(mp.p.ptr(), gp, next)
1058 wakep()
1059 releasem(mp)
1060 }
1061
1062
1063
1064 const freezeStopWait = 0x7fffffff
1065
1066
1067
1068 var freezing atomic.Bool
1069
1070
1071
1072
1073 func freezetheworld() {
1074 freezing.Store(true)
1075 if debug.dontfreezetheworld > 0 {
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100 usleep(1000)
1101 return
1102 }
1103
1104
1105
1106
1107 for i := 0; i < 5; i++ {
1108
1109 sched.stopwait = freezeStopWait
1110 sched.gcwaiting.Store(true)
1111
1112 if !preemptall() {
1113 break
1114 }
1115 usleep(1000)
1116 }
1117
1118 usleep(1000)
1119 preemptall()
1120 usleep(1000)
1121 }
1122
1123
1124
1125
1126
1127 func readgstatus(gp *g) uint32 {
1128 return gp.atomicstatus.Load()
1129 }
1130
1131
1132
1133
1134
1135 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1136 success := false
1137
1138
1139 switch oldval {
1140 default:
1141 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1142 dumpgstatus(gp)
1143 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1144 case _Gscanrunnable,
1145 _Gscanwaiting,
1146 _Gscanrunning,
1147 _Gscansyscall,
1148 _Gscanpreempted:
1149 if newval == oldval&^_Gscan {
1150 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1151 }
1152 }
1153 if !success {
1154 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1155 dumpgstatus(gp)
1156 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1157 }
1158 releaseLockRankAndM(lockRankGscan)
1159 }
1160
1161
1162
1163 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1164 switch oldval {
1165 case _Grunnable,
1166 _Grunning,
1167 _Gwaiting,
1168 _Gsyscall:
1169 if newval == oldval|_Gscan {
1170 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1171 if r {
1172 acquireLockRankAndM(lockRankGscan)
1173 }
1174 return r
1175
1176 }
1177 }
1178 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1179 throw("castogscanstatus")
1180 panic("not reached")
1181 }
1182
1183
1184
1185 var casgstatusAlwaysTrack = false
1186
1187
1188
1189
1190
1191
1192
1193 func casgstatus(gp *g, oldval, newval uint32) {
1194 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1195 systemstack(func() {
1196
1197
1198 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1199 throw("casgstatus: bad incoming values")
1200 })
1201 }
1202
1203 lockWithRankMayAcquire(nil, lockRankGscan)
1204
1205
1206 const yieldDelay = 5 * 1000
1207 var nextYield int64
1208
1209
1210
1211 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1212 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1213 systemstack(func() {
1214
1215
1216 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1217 })
1218 }
1219 if i == 0 {
1220 nextYield = nanotime() + yieldDelay
1221 }
1222 if nanotime() < nextYield {
1223 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1224 procyield(1)
1225 }
1226 } else {
1227 osyield()
1228 nextYield = nanotime() + yieldDelay/2
1229 }
1230 }
1231
1232 if gp.syncGroup != nil {
1233 systemstack(func() {
1234 gp.syncGroup.changegstatus(gp, oldval, newval)
1235 })
1236 }
1237
1238 if oldval == _Grunning {
1239
1240 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1241 gp.tracking = true
1242 }
1243 gp.trackingSeq++
1244 }
1245 if !gp.tracking {
1246 return
1247 }
1248
1249
1250
1251
1252
1253
1254 switch oldval {
1255 case _Grunnable:
1256
1257
1258
1259 now := nanotime()
1260 gp.runnableTime += now - gp.trackingStamp
1261 gp.trackingStamp = 0
1262 case _Gwaiting:
1263 if !gp.waitreason.isMutexWait() {
1264
1265 break
1266 }
1267
1268
1269
1270
1271
1272 now := nanotime()
1273 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1274 gp.trackingStamp = 0
1275 }
1276 switch newval {
1277 case _Gwaiting:
1278 if !gp.waitreason.isMutexWait() {
1279
1280 break
1281 }
1282
1283 now := nanotime()
1284 gp.trackingStamp = now
1285 case _Grunnable:
1286
1287
1288 now := nanotime()
1289 gp.trackingStamp = now
1290 case _Grunning:
1291
1292
1293
1294 gp.tracking = false
1295 sched.timeToRun.record(gp.runnableTime)
1296 gp.runnableTime = 0
1297 }
1298 }
1299
1300
1301
1302
1303 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1304
1305 gp.waitreason = reason
1306 casgstatus(gp, old, _Gwaiting)
1307 }
1308
1309
1310
1311
1312
1313 func casGToWaitingForGC(gp *g, old uint32, reason waitReason) {
1314 if !reason.isWaitingForGC() {
1315 throw("casGToWaitingForGC with non-isWaitingForGC wait reason")
1316 }
1317 casGToWaiting(gp, old, reason)
1318 }
1319
1320
1321
1322
1323
1324 func casGToPreemptScan(gp *g, old, new uint32) {
1325 if old != _Grunning || new != _Gscan|_Gpreempted {
1326 throw("bad g transition")
1327 }
1328 acquireLockRankAndM(lockRankGscan)
1329 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1330 }
1331
1332
1333
1334
1335
1336
1337 }
1338
1339
1340
1341
1342 func casGFromPreempted(gp *g, old, new uint32) bool {
1343 if old != _Gpreempted || new != _Gwaiting {
1344 throw("bad g transition")
1345 }
1346 gp.waitreason = waitReasonPreempted
1347 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1348 return false
1349 }
1350 if sg := gp.syncGroup; sg != nil {
1351 sg.changegstatus(gp, _Gpreempted, _Gwaiting)
1352 }
1353 return true
1354 }
1355
1356
1357 type stwReason uint8
1358
1359
1360
1361
1362 const (
1363 stwUnknown stwReason = iota
1364 stwGCMarkTerm
1365 stwGCSweepTerm
1366 stwWriteHeapDump
1367 stwGoroutineProfile
1368 stwGoroutineProfileCleanup
1369 stwAllGoroutinesStack
1370 stwReadMemStats
1371 stwAllThreadsSyscall
1372 stwGOMAXPROCS
1373 stwStartTrace
1374 stwStopTrace
1375 stwForTestCountPagesInUse
1376 stwForTestReadMetricsSlow
1377 stwForTestReadMemStatsSlow
1378 stwForTestPageCachePagesLeaked
1379 stwForTestResetDebugLog
1380 )
1381
1382 func (r stwReason) String() string {
1383 return stwReasonStrings[r]
1384 }
1385
1386 func (r stwReason) isGC() bool {
1387 return r == stwGCMarkTerm || r == stwGCSweepTerm
1388 }
1389
1390
1391
1392
1393 var stwReasonStrings = [...]string{
1394 stwUnknown: "unknown",
1395 stwGCMarkTerm: "GC mark termination",
1396 stwGCSweepTerm: "GC sweep termination",
1397 stwWriteHeapDump: "write heap dump",
1398 stwGoroutineProfile: "goroutine profile",
1399 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1400 stwAllGoroutinesStack: "all goroutines stack trace",
1401 stwReadMemStats: "read mem stats",
1402 stwAllThreadsSyscall: "AllThreadsSyscall",
1403 stwGOMAXPROCS: "GOMAXPROCS",
1404 stwStartTrace: "start trace",
1405 stwStopTrace: "stop trace",
1406 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1407 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1408 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1409 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1410 stwForTestResetDebugLog: "ResetDebugLog (test)",
1411 }
1412
1413
1414
1415 type worldStop struct {
1416 reason stwReason
1417 startedStopping int64
1418 finishedStopping int64
1419 stoppingCPUTime int64
1420 }
1421
1422
1423
1424
1425 var stopTheWorldContext worldStop
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444 func stopTheWorld(reason stwReason) worldStop {
1445 semacquire(&worldsema)
1446 gp := getg()
1447 gp.m.preemptoff = reason.String()
1448 systemstack(func() {
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463 casGToWaitingForGC(gp, _Grunning, waitReasonStoppingTheWorld)
1464 stopTheWorldContext = stopTheWorldWithSema(reason)
1465 casgstatus(gp, _Gwaiting, _Grunning)
1466 })
1467 return stopTheWorldContext
1468 }
1469
1470
1471
1472
1473 func startTheWorld(w worldStop) {
1474 systemstack(func() { startTheWorldWithSema(0, w) })
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491 mp := acquirem()
1492 mp.preemptoff = ""
1493 semrelease1(&worldsema, true, 0)
1494 releasem(mp)
1495 }
1496
1497
1498
1499
1500 func stopTheWorldGC(reason stwReason) worldStop {
1501 semacquire(&gcsema)
1502 return stopTheWorld(reason)
1503 }
1504
1505
1506
1507
1508 func startTheWorldGC(w worldStop) {
1509 startTheWorld(w)
1510 semrelease(&gcsema)
1511 }
1512
1513
1514 var worldsema uint32 = 1
1515
1516
1517
1518
1519
1520
1521
1522 var gcsema uint32 = 1
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554 func stopTheWorldWithSema(reason stwReason) worldStop {
1555 trace := traceAcquire()
1556 if trace.ok() {
1557 trace.STWStart(reason)
1558 traceRelease(trace)
1559 }
1560 gp := getg()
1561
1562
1563
1564 if gp.m.locks > 0 {
1565 throw("stopTheWorld: holding locks")
1566 }
1567
1568 lock(&sched.lock)
1569 start := nanotime()
1570 sched.stopwait = gomaxprocs
1571 sched.gcwaiting.Store(true)
1572 preemptall()
1573
1574 gp.m.p.ptr().status = _Pgcstop
1575 gp.m.p.ptr().gcStopTime = start
1576 sched.stopwait--
1577
1578 trace = traceAcquire()
1579 for _, pp := range allp {
1580 s := pp.status
1581 if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
1582 if trace.ok() {
1583 trace.ProcSteal(pp, false)
1584 }
1585 pp.syscalltick++
1586 pp.gcStopTime = nanotime()
1587 sched.stopwait--
1588 }
1589 }
1590 if trace.ok() {
1591 traceRelease(trace)
1592 }
1593
1594
1595 now := nanotime()
1596 for {
1597 pp, _ := pidleget(now)
1598 if pp == nil {
1599 break
1600 }
1601 pp.status = _Pgcstop
1602 pp.gcStopTime = nanotime()
1603 sched.stopwait--
1604 }
1605 wait := sched.stopwait > 0
1606 unlock(&sched.lock)
1607
1608
1609 if wait {
1610 for {
1611
1612 if notetsleep(&sched.stopnote, 100*1000) {
1613 noteclear(&sched.stopnote)
1614 break
1615 }
1616 preemptall()
1617 }
1618 }
1619
1620 finish := nanotime()
1621 startTime := finish - start
1622 if reason.isGC() {
1623 sched.stwStoppingTimeGC.record(startTime)
1624 } else {
1625 sched.stwStoppingTimeOther.record(startTime)
1626 }
1627
1628
1629
1630
1631
1632 stoppingCPUTime := int64(0)
1633 bad := ""
1634 if sched.stopwait != 0 {
1635 bad = "stopTheWorld: not stopped (stopwait != 0)"
1636 } else {
1637 for _, pp := range allp {
1638 if pp.status != _Pgcstop {
1639 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1640 }
1641 if pp.gcStopTime == 0 && bad == "" {
1642 bad = "stopTheWorld: broken CPU time accounting"
1643 }
1644 stoppingCPUTime += finish - pp.gcStopTime
1645 pp.gcStopTime = 0
1646 }
1647 }
1648 if freezing.Load() {
1649
1650
1651
1652
1653 lock(&deadlock)
1654 lock(&deadlock)
1655 }
1656 if bad != "" {
1657 throw(bad)
1658 }
1659
1660 worldStopped()
1661
1662 return worldStop{
1663 reason: reason,
1664 startedStopping: start,
1665 finishedStopping: finish,
1666 stoppingCPUTime: stoppingCPUTime,
1667 }
1668 }
1669
1670
1671
1672
1673
1674
1675
1676 func startTheWorldWithSema(now int64, w worldStop) int64 {
1677 assertWorldStopped()
1678
1679 mp := acquirem()
1680 if netpollinited() {
1681 list, delta := netpoll(0)
1682 injectglist(&list)
1683 netpollAdjustWaiters(delta)
1684 }
1685 lock(&sched.lock)
1686
1687 procs := gomaxprocs
1688 if newprocs != 0 {
1689 procs = newprocs
1690 newprocs = 0
1691 }
1692 p1 := procresize(procs)
1693 sched.gcwaiting.Store(false)
1694 if sched.sysmonwait.Load() {
1695 sched.sysmonwait.Store(false)
1696 notewakeup(&sched.sysmonnote)
1697 }
1698 unlock(&sched.lock)
1699
1700 worldStarted()
1701
1702 for p1 != nil {
1703 p := p1
1704 p1 = p1.link.ptr()
1705 if p.m != 0 {
1706 mp := p.m.ptr()
1707 p.m = 0
1708 if mp.nextp != 0 {
1709 throw("startTheWorld: inconsistent mp->nextp")
1710 }
1711 mp.nextp.set(p)
1712 notewakeup(&mp.park)
1713 } else {
1714
1715 newm(nil, p, -1)
1716 }
1717 }
1718
1719
1720 if now == 0 {
1721 now = nanotime()
1722 }
1723 totalTime := now - w.startedStopping
1724 if w.reason.isGC() {
1725 sched.stwTotalTimeGC.record(totalTime)
1726 } else {
1727 sched.stwTotalTimeOther.record(totalTime)
1728 }
1729 trace := traceAcquire()
1730 if trace.ok() {
1731 trace.STWDone()
1732 traceRelease(trace)
1733 }
1734
1735
1736
1737
1738 wakep()
1739
1740 releasem(mp)
1741
1742 return now
1743 }
1744
1745
1746
1747 func usesLibcall() bool {
1748 switch GOOS {
1749 case "aix", "darwin", "illumos", "ios", "solaris", "windows":
1750 return true
1751 case "openbsd":
1752 return GOARCH != "mips64"
1753 }
1754 return false
1755 }
1756
1757
1758
1759 func mStackIsSystemAllocated() bool {
1760 switch GOOS {
1761 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1762 return true
1763 case "openbsd":
1764 return GOARCH != "mips64"
1765 }
1766 return false
1767 }
1768
1769
1770
1771 func mstart()
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782 func mstart0() {
1783 gp := getg()
1784
1785 osStack := gp.stack.lo == 0
1786 if osStack {
1787
1788
1789
1790
1791
1792
1793
1794
1795 size := gp.stack.hi
1796 if size == 0 {
1797 size = 16384 * sys.StackGuardMultiplier
1798 }
1799 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1800 gp.stack.lo = gp.stack.hi - size + 1024
1801 }
1802
1803
1804 gp.stackguard0 = gp.stack.lo + stackGuard
1805
1806
1807 gp.stackguard1 = gp.stackguard0
1808 mstart1()
1809
1810
1811 if mStackIsSystemAllocated() {
1812
1813
1814
1815 osStack = true
1816 }
1817 mexit(osStack)
1818 }
1819
1820
1821
1822
1823
1824 func mstart1() {
1825 gp := getg()
1826
1827 if gp != gp.m.g0 {
1828 throw("bad runtime·mstart")
1829 }
1830
1831
1832
1833
1834
1835
1836
1837 gp.sched.g = guintptr(unsafe.Pointer(gp))
1838 gp.sched.pc = sys.GetCallerPC()
1839 gp.sched.sp = sys.GetCallerSP()
1840
1841 asminit()
1842 minit()
1843
1844
1845
1846 if gp.m == &m0 {
1847 mstartm0()
1848 }
1849
1850 if debug.dataindependenttiming == 1 {
1851 sys.EnableDIT()
1852 }
1853
1854 if fn := gp.m.mstartfn; fn != nil {
1855 fn()
1856 }
1857
1858 if gp.m != &m0 {
1859 acquirep(gp.m.nextp.ptr())
1860 gp.m.nextp = 0
1861 }
1862 schedule()
1863 }
1864
1865
1866
1867
1868
1869
1870
1871 func mstartm0() {
1872
1873
1874
1875 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1876 cgoHasExtraM = true
1877 newextram()
1878 }
1879 initsig(false)
1880 }
1881
1882
1883
1884
1885 func mPark() {
1886 gp := getg()
1887 notesleep(&gp.m.park)
1888 noteclear(&gp.m.park)
1889 }
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901 func mexit(osStack bool) {
1902 mp := getg().m
1903
1904 if mp == &m0 {
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916 handoffp(releasep())
1917 lock(&sched.lock)
1918 sched.nmfreed++
1919 checkdead()
1920 unlock(&sched.lock)
1921 mPark()
1922 throw("locked m0 woke up")
1923 }
1924
1925 sigblock(true)
1926 unminit()
1927
1928
1929 if mp.gsignal != nil {
1930 stackfree(mp.gsignal.stack)
1931
1932
1933
1934
1935 mp.gsignal = nil
1936 }
1937
1938
1939 vgetrandomDestroy(mp)
1940
1941
1942 lock(&sched.lock)
1943 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1944 if *pprev == mp {
1945 *pprev = mp.alllink
1946 goto found
1947 }
1948 }
1949 throw("m not found in allm")
1950 found:
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965 mp.freeWait.Store(freeMWait)
1966 mp.freelink = sched.freem
1967 sched.freem = mp
1968 unlock(&sched.lock)
1969
1970 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
1971 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
1972
1973
1974 handoffp(releasep())
1975
1976
1977
1978
1979
1980 lock(&sched.lock)
1981 sched.nmfreed++
1982 checkdead()
1983 unlock(&sched.lock)
1984
1985 if GOOS == "darwin" || GOOS == "ios" {
1986
1987
1988 if mp.signalPending.Load() != 0 {
1989 pendingPreemptSignals.Add(-1)
1990 }
1991 }
1992
1993
1994
1995 mdestroy(mp)
1996
1997 if osStack {
1998
1999 mp.freeWait.Store(freeMRef)
2000
2001
2002
2003 return
2004 }
2005
2006
2007
2008
2009
2010 exitThread(&mp.freeWait)
2011 }
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023 func forEachP(reason waitReason, fn func(*p)) {
2024 systemstack(func() {
2025 gp := getg().m.curg
2026
2027
2028
2029
2030
2031
2032
2033
2034 casGToWaitingForGC(gp, _Grunning, reason)
2035 forEachPInternal(fn)
2036 casgstatus(gp, _Gwaiting, _Grunning)
2037 })
2038 }
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049 func forEachPInternal(fn func(*p)) {
2050 mp := acquirem()
2051 pp := getg().m.p.ptr()
2052
2053 lock(&sched.lock)
2054 if sched.safePointWait != 0 {
2055 throw("forEachP: sched.safePointWait != 0")
2056 }
2057 sched.safePointWait = gomaxprocs - 1
2058 sched.safePointFn = fn
2059
2060
2061 for _, p2 := range allp {
2062 if p2 != pp {
2063 atomic.Store(&p2.runSafePointFn, 1)
2064 }
2065 }
2066 preemptall()
2067
2068
2069
2070
2071
2072
2073
2074 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2075 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2076 fn(p)
2077 sched.safePointWait--
2078 }
2079 }
2080
2081 wait := sched.safePointWait > 0
2082 unlock(&sched.lock)
2083
2084
2085 fn(pp)
2086
2087
2088
2089 for _, p2 := range allp {
2090 s := p2.status
2091
2092
2093
2094 trace := traceAcquire()
2095 if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
2096 if trace.ok() {
2097
2098 trace.ProcSteal(p2, false)
2099 traceRelease(trace)
2100 }
2101 p2.syscalltick++
2102 handoffp(p2)
2103 } else if trace.ok() {
2104 traceRelease(trace)
2105 }
2106 }
2107
2108
2109 if wait {
2110 for {
2111
2112
2113
2114
2115 if notetsleep(&sched.safePointNote, 100*1000) {
2116 noteclear(&sched.safePointNote)
2117 break
2118 }
2119 preemptall()
2120 }
2121 }
2122 if sched.safePointWait != 0 {
2123 throw("forEachP: not done")
2124 }
2125 for _, p2 := range allp {
2126 if p2.runSafePointFn != 0 {
2127 throw("forEachP: P did not run fn")
2128 }
2129 }
2130
2131 lock(&sched.lock)
2132 sched.safePointFn = nil
2133 unlock(&sched.lock)
2134 releasem(mp)
2135 }
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148 func runSafePointFn() {
2149 p := getg().m.p.ptr()
2150
2151
2152
2153 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2154 return
2155 }
2156 sched.safePointFn(p)
2157 lock(&sched.lock)
2158 sched.safePointWait--
2159 if sched.safePointWait == 0 {
2160 notewakeup(&sched.safePointNote)
2161 }
2162 unlock(&sched.lock)
2163 }
2164
2165
2166
2167
2168 var cgoThreadStart unsafe.Pointer
2169
2170 type cgothreadstart struct {
2171 g guintptr
2172 tls *uint64
2173 fn unsafe.Pointer
2174 }
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185 func allocm(pp *p, fn func(), id int64) *m {
2186 allocmLock.rlock()
2187
2188
2189
2190
2191 acquirem()
2192
2193 gp := getg()
2194 if gp.m.p == 0 {
2195 acquirep(pp)
2196 }
2197
2198
2199
2200 if sched.freem != nil {
2201 lock(&sched.lock)
2202 var newList *m
2203 for freem := sched.freem; freem != nil; {
2204
2205 wait := freem.freeWait.Load()
2206 if wait == freeMWait {
2207 next := freem.freelink
2208 freem.freelink = newList
2209 newList = freem
2210 freem = next
2211 continue
2212 }
2213
2214
2215
2216 if traceEnabled() || traceShuttingDown() {
2217 traceThreadDestroy(freem)
2218 }
2219
2220
2221
2222 if wait == freeMStack {
2223
2224
2225
2226 systemstack(func() {
2227 stackfree(freem.g0.stack)
2228 })
2229 }
2230 freem = freem.freelink
2231 }
2232 sched.freem = newList
2233 unlock(&sched.lock)
2234 }
2235
2236 mp := new(m)
2237 mp.mstartfn = fn
2238 mcommoninit(mp, id)
2239
2240
2241
2242 if iscgo || mStackIsSystemAllocated() {
2243 mp.g0 = malg(-1)
2244 } else {
2245 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2246 }
2247 mp.g0.m = mp
2248
2249 if pp == gp.m.p.ptr() {
2250 releasep()
2251 }
2252
2253 releasem(gp.m)
2254 allocmLock.runlock()
2255 return mp
2256 }
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297 func needm(signal bool) {
2298 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2299
2300
2301
2302
2303
2304
2305 writeErrStr("fatal error: cgo callback before cgo call\n")
2306 exit(1)
2307 }
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317 var sigmask sigset
2318 sigsave(&sigmask)
2319 sigblock(false)
2320
2321
2322
2323
2324 mp, last := getExtraM()
2325
2326
2327
2328
2329
2330
2331
2332
2333 mp.needextram = last
2334
2335
2336 mp.sigmask = sigmask
2337
2338
2339
2340 osSetupTLS(mp)
2341
2342
2343
2344 setg(mp.g0)
2345 sp := sys.GetCallerSP()
2346 callbackUpdateSystemStack(mp, sp, signal)
2347
2348
2349
2350
2351 mp.isExtraInC = false
2352
2353
2354 asminit()
2355 minit()
2356
2357
2358
2359
2360
2361
2362 var trace traceLocker
2363 if !signal {
2364 trace = traceAcquire()
2365 }
2366
2367
2368 casgstatus(mp.curg, _Gdead, _Gsyscall)
2369 sched.ngsys.Add(-1)
2370
2371 if !signal {
2372 if trace.ok() {
2373 trace.GoCreateSyscall(mp.curg)
2374 traceRelease(trace)
2375 }
2376 }
2377 mp.isExtraInSig = signal
2378 }
2379
2380
2381
2382
2383 func needAndBindM() {
2384 needm(false)
2385
2386 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2387 cgoBindM()
2388 }
2389 }
2390
2391
2392
2393
2394 func newextram() {
2395 c := extraMWaiters.Swap(0)
2396 if c > 0 {
2397 for i := uint32(0); i < c; i++ {
2398 oneNewExtraM()
2399 }
2400 } else if extraMLength.Load() == 0 {
2401
2402 oneNewExtraM()
2403 }
2404 }
2405
2406
2407 func oneNewExtraM() {
2408
2409
2410
2411
2412
2413 mp := allocm(nil, nil, -1)
2414 gp := malg(4096)
2415 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2416 gp.sched.sp = gp.stack.hi
2417 gp.sched.sp -= 4 * goarch.PtrSize
2418 gp.sched.lr = 0
2419 gp.sched.g = guintptr(unsafe.Pointer(gp))
2420 gp.syscallpc = gp.sched.pc
2421 gp.syscallsp = gp.sched.sp
2422 gp.stktopsp = gp.sched.sp
2423
2424
2425
2426
2427 casgstatus(gp, _Gidle, _Gdead)
2428 gp.m = mp
2429 mp.curg = gp
2430 mp.isextra = true
2431
2432 mp.isExtraInC = true
2433 mp.lockedInt++
2434 mp.lockedg.set(gp)
2435 gp.lockedm.set(mp)
2436 gp.goid = sched.goidgen.Add(1)
2437 if raceenabled {
2438 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2439 }
2440
2441 allgadd(gp)
2442
2443
2444
2445
2446
2447 sched.ngsys.Add(1)
2448
2449
2450 addExtraM(mp)
2451 }
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486 func dropm() {
2487
2488
2489
2490 mp := getg().m
2491
2492
2493
2494
2495
2496 var trace traceLocker
2497 if !mp.isExtraInSig {
2498 trace = traceAcquire()
2499 }
2500
2501
2502 casgstatus(mp.curg, _Gsyscall, _Gdead)
2503 mp.curg.preemptStop = false
2504 sched.ngsys.Add(1)
2505
2506 if !mp.isExtraInSig {
2507 if trace.ok() {
2508 trace.GoDestroySyscall()
2509 traceRelease(trace)
2510 }
2511 }
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526 mp.syscalltick--
2527
2528
2529
2530 mp.curg.trace.reset()
2531
2532
2533
2534
2535 if traceEnabled() || traceShuttingDown() {
2536
2537
2538
2539
2540
2541
2542
2543 lock(&sched.lock)
2544 traceThreadDestroy(mp)
2545 unlock(&sched.lock)
2546 }
2547 mp.isExtraInSig = false
2548
2549
2550
2551
2552
2553 sigmask := mp.sigmask
2554 sigblock(false)
2555 unminit()
2556
2557 setg(nil)
2558
2559
2560
2561 g0 := mp.g0
2562 g0.stack.hi = 0
2563 g0.stack.lo = 0
2564 g0.stackguard0 = 0
2565 g0.stackguard1 = 0
2566 mp.g0StackAccurate = false
2567
2568 putExtraM(mp)
2569
2570 msigrestore(sigmask)
2571 }
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593 func cgoBindM() {
2594 if GOOS == "windows" || GOOS == "plan9" {
2595 fatal("bindm in unexpected GOOS")
2596 }
2597 g := getg()
2598 if g.m.g0 != g {
2599 fatal("the current g is not g0")
2600 }
2601 if _cgo_bindm != nil {
2602 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2603 }
2604 }
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617 func getm() uintptr {
2618 return uintptr(unsafe.Pointer(getg().m))
2619 }
2620
2621 var (
2622
2623
2624
2625
2626
2627
2628 extraM atomic.Uintptr
2629
2630 extraMLength atomic.Uint32
2631
2632 extraMWaiters atomic.Uint32
2633
2634
2635 extraMInUse atomic.Uint32
2636 )
2637
2638
2639
2640
2641
2642
2643
2644
2645 func lockextra(nilokay bool) *m {
2646 const locked = 1
2647
2648 incr := false
2649 for {
2650 old := extraM.Load()
2651 if old == locked {
2652 osyield_no_g()
2653 continue
2654 }
2655 if old == 0 && !nilokay {
2656 if !incr {
2657
2658
2659
2660 extraMWaiters.Add(1)
2661 incr = true
2662 }
2663 usleep_no_g(1)
2664 continue
2665 }
2666 if extraM.CompareAndSwap(old, locked) {
2667 return (*m)(unsafe.Pointer(old))
2668 }
2669 osyield_no_g()
2670 continue
2671 }
2672 }
2673
2674
2675 func unlockextra(mp *m, delta int32) {
2676 extraMLength.Add(delta)
2677 extraM.Store(uintptr(unsafe.Pointer(mp)))
2678 }
2679
2680
2681
2682
2683
2684
2685
2686
2687 func getExtraM() (mp *m, last bool) {
2688 mp = lockextra(false)
2689 extraMInUse.Add(1)
2690 unlockextra(mp.schedlink.ptr(), -1)
2691 return mp, mp.schedlink.ptr() == nil
2692 }
2693
2694
2695
2696
2697
2698 func putExtraM(mp *m) {
2699 extraMInUse.Add(-1)
2700 addExtraM(mp)
2701 }
2702
2703
2704
2705
2706 func addExtraM(mp *m) {
2707 mnext := lockextra(true)
2708 mp.schedlink.set(mnext)
2709 unlockextra(mp, 1)
2710 }
2711
2712 var (
2713
2714
2715
2716 allocmLock rwmutex
2717
2718
2719
2720
2721 execLock rwmutex
2722 )
2723
2724
2725
2726 const (
2727 failthreadcreate = "runtime: failed to create new OS thread\n"
2728 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2729 )
2730
2731
2732
2733
2734 var newmHandoff struct {
2735 lock mutex
2736
2737
2738
2739 newm muintptr
2740
2741
2742
2743 waiting bool
2744 wake note
2745
2746
2747
2748
2749 haveTemplateThread uint32
2750 }
2751
2752
2753
2754
2755
2756
2757
2758
2759 func newm(fn func(), pp *p, id int64) {
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770 acquirem()
2771
2772 mp := allocm(pp, fn, id)
2773 mp.nextp.set(pp)
2774 mp.sigmask = initSigmask
2775 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787 lock(&newmHandoff.lock)
2788 if newmHandoff.haveTemplateThread == 0 {
2789 throw("on a locked thread with no template thread")
2790 }
2791 mp.schedlink = newmHandoff.newm
2792 newmHandoff.newm.set(mp)
2793 if newmHandoff.waiting {
2794 newmHandoff.waiting = false
2795 notewakeup(&newmHandoff.wake)
2796 }
2797 unlock(&newmHandoff.lock)
2798
2799
2800
2801 releasem(getg().m)
2802 return
2803 }
2804 newm1(mp)
2805 releasem(getg().m)
2806 }
2807
2808 func newm1(mp *m) {
2809 if iscgo {
2810 var ts cgothreadstart
2811 if _cgo_thread_start == nil {
2812 throw("_cgo_thread_start missing")
2813 }
2814 ts.g.set(mp.g0)
2815 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2816 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2817 if msanenabled {
2818 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2819 }
2820 if asanenabled {
2821 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2822 }
2823 execLock.rlock()
2824 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2825 execLock.runlock()
2826 return
2827 }
2828 execLock.rlock()
2829 newosproc(mp)
2830 execLock.runlock()
2831 }
2832
2833
2834
2835
2836
2837 func startTemplateThread() {
2838 if GOARCH == "wasm" {
2839 return
2840 }
2841
2842
2843
2844 mp := acquirem()
2845 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2846 releasem(mp)
2847 return
2848 }
2849 newm(templateThread, nil, -1)
2850 releasem(mp)
2851 }
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865 func templateThread() {
2866 lock(&sched.lock)
2867 sched.nmsys++
2868 checkdead()
2869 unlock(&sched.lock)
2870
2871 for {
2872 lock(&newmHandoff.lock)
2873 for newmHandoff.newm != 0 {
2874 newm := newmHandoff.newm.ptr()
2875 newmHandoff.newm = 0
2876 unlock(&newmHandoff.lock)
2877 for newm != nil {
2878 next := newm.schedlink.ptr()
2879 newm.schedlink = 0
2880 newm1(newm)
2881 newm = next
2882 }
2883 lock(&newmHandoff.lock)
2884 }
2885 newmHandoff.waiting = true
2886 noteclear(&newmHandoff.wake)
2887 unlock(&newmHandoff.lock)
2888 notesleep(&newmHandoff.wake)
2889 }
2890 }
2891
2892
2893
2894 func stopm() {
2895 gp := getg()
2896
2897 if gp.m.locks != 0 {
2898 throw("stopm holding locks")
2899 }
2900 if gp.m.p != 0 {
2901 throw("stopm holding p")
2902 }
2903 if gp.m.spinning {
2904 throw("stopm spinning")
2905 }
2906
2907 lock(&sched.lock)
2908 mput(gp.m)
2909 unlock(&sched.lock)
2910 mPark()
2911 acquirep(gp.m.nextp.ptr())
2912 gp.m.nextp = 0
2913 }
2914
2915 func mspinning() {
2916
2917 getg().m.spinning = true
2918 }
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937 func startm(pp *p, spinning, lockheld bool) {
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954 mp := acquirem()
2955 if !lockheld {
2956 lock(&sched.lock)
2957 }
2958 if pp == nil {
2959 if spinning {
2960
2961
2962
2963 throw("startm: P required for spinning=true")
2964 }
2965 pp, _ = pidleget(0)
2966 if pp == nil {
2967 if !lockheld {
2968 unlock(&sched.lock)
2969 }
2970 releasem(mp)
2971 return
2972 }
2973 }
2974 nmp := mget()
2975 if nmp == nil {
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990 id := mReserveID()
2991 unlock(&sched.lock)
2992
2993 var fn func()
2994 if spinning {
2995
2996 fn = mspinning
2997 }
2998 newm(fn, pp, id)
2999
3000 if lockheld {
3001 lock(&sched.lock)
3002 }
3003
3004
3005 releasem(mp)
3006 return
3007 }
3008 if !lockheld {
3009 unlock(&sched.lock)
3010 }
3011 if nmp.spinning {
3012 throw("startm: m is spinning")
3013 }
3014 if nmp.nextp != 0 {
3015 throw("startm: m has p")
3016 }
3017 if spinning && !runqempty(pp) {
3018 throw("startm: p has runnable gs")
3019 }
3020
3021 nmp.spinning = spinning
3022 nmp.nextp.set(pp)
3023 notewakeup(&nmp.park)
3024
3025
3026 releasem(mp)
3027 }
3028
3029
3030
3031
3032
3033 func handoffp(pp *p) {
3034
3035
3036
3037
3038 if !runqempty(pp) || sched.runqsize != 0 {
3039 startm(pp, false, false)
3040 return
3041 }
3042
3043 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3044 startm(pp, false, false)
3045 return
3046 }
3047
3048 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) {
3049 startm(pp, false, false)
3050 return
3051 }
3052
3053
3054 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3055 sched.needspinning.Store(0)
3056 startm(pp, true, false)
3057 return
3058 }
3059 lock(&sched.lock)
3060 if sched.gcwaiting.Load() {
3061 pp.status = _Pgcstop
3062 pp.gcStopTime = nanotime()
3063 sched.stopwait--
3064 if sched.stopwait == 0 {
3065 notewakeup(&sched.stopnote)
3066 }
3067 unlock(&sched.lock)
3068 return
3069 }
3070 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3071 sched.safePointFn(pp)
3072 sched.safePointWait--
3073 if sched.safePointWait == 0 {
3074 notewakeup(&sched.safePointNote)
3075 }
3076 }
3077 if sched.runqsize != 0 {
3078 unlock(&sched.lock)
3079 startm(pp, false, false)
3080 return
3081 }
3082
3083
3084 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3085 unlock(&sched.lock)
3086 startm(pp, false, false)
3087 return
3088 }
3089
3090
3091
3092 when := pp.timers.wakeTime()
3093 pidleput(pp, 0)
3094 unlock(&sched.lock)
3095
3096 if when != 0 {
3097 wakeNetPoller(when)
3098 }
3099 }
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114 func wakep() {
3115
3116
3117 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3118 return
3119 }
3120
3121
3122
3123
3124
3125
3126 mp := acquirem()
3127
3128 var pp *p
3129 lock(&sched.lock)
3130 pp, _ = pidlegetSpinning(0)
3131 if pp == nil {
3132 if sched.nmspinning.Add(-1) < 0 {
3133 throw("wakep: negative nmspinning")
3134 }
3135 unlock(&sched.lock)
3136 releasem(mp)
3137 return
3138 }
3139
3140
3141
3142
3143 unlock(&sched.lock)
3144
3145 startm(pp, true, false)
3146
3147 releasem(mp)
3148 }
3149
3150
3151
3152 func stoplockedm() {
3153 gp := getg()
3154
3155 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3156 throw("stoplockedm: inconsistent locking")
3157 }
3158 if gp.m.p != 0 {
3159
3160 pp := releasep()
3161 handoffp(pp)
3162 }
3163 incidlelocked(1)
3164
3165 mPark()
3166 status := readgstatus(gp.m.lockedg.ptr())
3167 if status&^_Gscan != _Grunnable {
3168 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3169 dumpgstatus(gp.m.lockedg.ptr())
3170 throw("stoplockedm: not runnable")
3171 }
3172 acquirep(gp.m.nextp.ptr())
3173 gp.m.nextp = 0
3174 }
3175
3176
3177
3178
3179
3180 func startlockedm(gp *g) {
3181 mp := gp.lockedm.ptr()
3182 if mp == getg().m {
3183 throw("startlockedm: locked to me")
3184 }
3185 if mp.nextp != 0 {
3186 throw("startlockedm: m has p")
3187 }
3188
3189 incidlelocked(-1)
3190 pp := releasep()
3191 mp.nextp.set(pp)
3192 notewakeup(&mp.park)
3193 stopm()
3194 }
3195
3196
3197
3198 func gcstopm() {
3199 gp := getg()
3200
3201 if !sched.gcwaiting.Load() {
3202 throw("gcstopm: not waiting for gc")
3203 }
3204 if gp.m.spinning {
3205 gp.m.spinning = false
3206
3207
3208 if sched.nmspinning.Add(-1) < 0 {
3209 throw("gcstopm: negative nmspinning")
3210 }
3211 }
3212 pp := releasep()
3213 lock(&sched.lock)
3214 pp.status = _Pgcstop
3215 pp.gcStopTime = nanotime()
3216 sched.stopwait--
3217 if sched.stopwait == 0 {
3218 notewakeup(&sched.stopnote)
3219 }
3220 unlock(&sched.lock)
3221 stopm()
3222 }
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233 func execute(gp *g, inheritTime bool) {
3234 mp := getg().m
3235
3236 if goroutineProfile.active {
3237
3238
3239
3240 tryRecordGoroutineProfile(gp, nil, osyield)
3241 }
3242
3243
3244
3245 mp.curg = gp
3246 gp.m = mp
3247 casgstatus(gp, _Grunnable, _Grunning)
3248 gp.waitsince = 0
3249 gp.preempt = false
3250 gp.stackguard0 = gp.stack.lo + stackGuard
3251 if !inheritTime {
3252 mp.p.ptr().schedtick++
3253 }
3254
3255
3256 hz := sched.profilehz
3257 if mp.profilehz != hz {
3258 setThreadCPUProfiler(hz)
3259 }
3260
3261 trace := traceAcquire()
3262 if trace.ok() {
3263 trace.GoStart()
3264 traceRelease(trace)
3265 }
3266
3267 gogo(&gp.sched)
3268 }
3269
3270
3271
3272
3273
3274 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3275 mp := getg().m
3276
3277
3278
3279
3280
3281 top:
3282 pp := mp.p.ptr()
3283 if sched.gcwaiting.Load() {
3284 gcstopm()
3285 goto top
3286 }
3287 if pp.runSafePointFn != 0 {
3288 runSafePointFn()
3289 }
3290
3291
3292
3293
3294
3295 now, pollUntil, _ := pp.timers.check(0)
3296
3297
3298 if traceEnabled() || traceShuttingDown() {
3299 gp := traceReader()
3300 if gp != nil {
3301 trace := traceAcquire()
3302 casgstatus(gp, _Gwaiting, _Grunnable)
3303 if trace.ok() {
3304 trace.GoUnpark(gp, 0)
3305 traceRelease(trace)
3306 }
3307 return gp, false, true
3308 }
3309 }
3310
3311
3312 if gcBlackenEnabled != 0 {
3313 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3314 if gp != nil {
3315 return gp, false, true
3316 }
3317 now = tnow
3318 }
3319
3320
3321
3322
3323 if pp.schedtick%61 == 0 && sched.runqsize > 0 {
3324 lock(&sched.lock)
3325 gp := globrunqget(pp, 1)
3326 unlock(&sched.lock)
3327 if gp != nil {
3328 return gp, false, false
3329 }
3330 }
3331
3332
3333 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3334 if gp := wakefing(); gp != nil {
3335 ready(gp, 0, true)
3336 }
3337 }
3338 if *cgo_yield != nil {
3339 asmcgocall(*cgo_yield, nil)
3340 }
3341
3342
3343 if gp, inheritTime := runqget(pp); gp != nil {
3344 return gp, inheritTime, false
3345 }
3346
3347
3348 if sched.runqsize != 0 {
3349 lock(&sched.lock)
3350 gp := globrunqget(pp, 0)
3351 unlock(&sched.lock)
3352 if gp != nil {
3353 return gp, false, false
3354 }
3355 }
3356
3357
3358
3359
3360
3361
3362
3363
3364 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3365 if list, delta := netpoll(0); !list.empty() {
3366 gp := list.pop()
3367 injectglist(&list)
3368 netpollAdjustWaiters(delta)
3369 trace := traceAcquire()
3370 casgstatus(gp, _Gwaiting, _Grunnable)
3371 if trace.ok() {
3372 trace.GoUnpark(gp, 0)
3373 traceRelease(trace)
3374 }
3375 return gp, false, false
3376 }
3377 }
3378
3379
3380
3381
3382
3383
3384 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3385 if !mp.spinning {
3386 mp.becomeSpinning()
3387 }
3388
3389 gp, inheritTime, tnow, w, newWork := stealWork(now)
3390 if gp != nil {
3391
3392 return gp, inheritTime, false
3393 }
3394 if newWork {
3395
3396
3397 goto top
3398 }
3399
3400 now = tnow
3401 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3402
3403 pollUntil = w
3404 }
3405 }
3406
3407
3408
3409
3410
3411 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() {
3412 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3413 if node != nil {
3414 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3415 gp := node.gp.ptr()
3416
3417 trace := traceAcquire()
3418 casgstatus(gp, _Gwaiting, _Grunnable)
3419 if trace.ok() {
3420 trace.GoUnpark(gp, 0)
3421 traceRelease(trace)
3422 }
3423 return gp, false, false
3424 }
3425 gcController.removeIdleMarkWorker()
3426 }
3427
3428
3429
3430
3431
3432 gp, otherReady := beforeIdle(now, pollUntil)
3433 if gp != nil {
3434 trace := traceAcquire()
3435 casgstatus(gp, _Gwaiting, _Grunnable)
3436 if trace.ok() {
3437 trace.GoUnpark(gp, 0)
3438 traceRelease(trace)
3439 }
3440 return gp, false, false
3441 }
3442 if otherReady {
3443 goto top
3444 }
3445
3446
3447
3448
3449
3450 allpSnapshot := allp
3451
3452
3453 idlepMaskSnapshot := idlepMask
3454 timerpMaskSnapshot := timerpMask
3455
3456
3457 lock(&sched.lock)
3458 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3459 unlock(&sched.lock)
3460 goto top
3461 }
3462 if sched.runqsize != 0 {
3463 gp := globrunqget(pp, 0)
3464 unlock(&sched.lock)
3465 return gp, false, false
3466 }
3467 if !mp.spinning && sched.needspinning.Load() == 1 {
3468
3469 mp.becomeSpinning()
3470 unlock(&sched.lock)
3471 goto top
3472 }
3473 if releasep() != pp {
3474 throw("findrunnable: wrong p")
3475 }
3476 now = pidleput(pp, now)
3477 unlock(&sched.lock)
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515 wasSpinning := mp.spinning
3516 if mp.spinning {
3517 mp.spinning = false
3518 if sched.nmspinning.Add(-1) < 0 {
3519 throw("findrunnable: negative nmspinning")
3520 }
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533 lock(&sched.lock)
3534 if sched.runqsize != 0 {
3535 pp, _ := pidlegetSpinning(0)
3536 if pp != nil {
3537 gp := globrunqget(pp, 0)
3538 if gp == nil {
3539 throw("global runq empty with non-zero runqsize")
3540 }
3541 unlock(&sched.lock)
3542 acquirep(pp)
3543 mp.becomeSpinning()
3544 return gp, false, false
3545 }
3546 }
3547 unlock(&sched.lock)
3548
3549 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3550 if pp != nil {
3551 acquirep(pp)
3552 mp.becomeSpinning()
3553 goto top
3554 }
3555
3556
3557 pp, gp := checkIdleGCNoP()
3558 if pp != nil {
3559 acquirep(pp)
3560 mp.becomeSpinning()
3561
3562
3563 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3564 trace := traceAcquire()
3565 casgstatus(gp, _Gwaiting, _Grunnable)
3566 if trace.ok() {
3567 trace.GoUnpark(gp, 0)
3568 traceRelease(trace)
3569 }
3570 return gp, false, false
3571 }
3572
3573
3574
3575
3576
3577
3578
3579 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3580 }
3581
3582
3583 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3584 sched.pollUntil.Store(pollUntil)
3585 if mp.p != 0 {
3586 throw("findrunnable: netpoll with p")
3587 }
3588 if mp.spinning {
3589 throw("findrunnable: netpoll with spinning")
3590 }
3591 delay := int64(-1)
3592 if pollUntil != 0 {
3593 if now == 0 {
3594 now = nanotime()
3595 }
3596 delay = pollUntil - now
3597 if delay < 0 {
3598 delay = 0
3599 }
3600 }
3601 if faketime != 0 {
3602
3603 delay = 0
3604 }
3605 list, delta := netpoll(delay)
3606
3607 now = nanotime()
3608 sched.pollUntil.Store(0)
3609 sched.lastpoll.Store(now)
3610 if faketime != 0 && list.empty() {
3611
3612
3613 stopm()
3614 goto top
3615 }
3616 lock(&sched.lock)
3617 pp, _ := pidleget(now)
3618 unlock(&sched.lock)
3619 if pp == nil {
3620 injectglist(&list)
3621 netpollAdjustWaiters(delta)
3622 } else {
3623 acquirep(pp)
3624 if !list.empty() {
3625 gp := list.pop()
3626 injectglist(&list)
3627 netpollAdjustWaiters(delta)
3628 trace := traceAcquire()
3629 casgstatus(gp, _Gwaiting, _Grunnable)
3630 if trace.ok() {
3631 trace.GoUnpark(gp, 0)
3632 traceRelease(trace)
3633 }
3634 return gp, false, false
3635 }
3636 if wasSpinning {
3637 mp.becomeSpinning()
3638 }
3639 goto top
3640 }
3641 } else if pollUntil != 0 && netpollinited() {
3642 pollerPollUntil := sched.pollUntil.Load()
3643 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3644 netpollBreak()
3645 }
3646 }
3647 stopm()
3648 goto top
3649 }
3650
3651
3652
3653
3654
3655 func pollWork() bool {
3656 if sched.runqsize != 0 {
3657 return true
3658 }
3659 p := getg().m.p.ptr()
3660 if !runqempty(p) {
3661 return true
3662 }
3663 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3664 if list, delta := netpoll(0); !list.empty() {
3665 injectglist(&list)
3666 netpollAdjustWaiters(delta)
3667 return true
3668 }
3669 }
3670 return false
3671 }
3672
3673
3674
3675
3676
3677
3678
3679 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3680 pp := getg().m.p.ptr()
3681
3682 ranTimer := false
3683
3684 const stealTries = 4
3685 for i := 0; i < stealTries; i++ {
3686 stealTimersOrRunNextG := i == stealTries-1
3687
3688 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3689 if sched.gcwaiting.Load() {
3690
3691 return nil, false, now, pollUntil, true
3692 }
3693 p2 := allp[enum.position()]
3694 if pp == p2 {
3695 continue
3696 }
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3712 tnow, w, ran := p2.timers.check(now)
3713 now = tnow
3714 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3715 pollUntil = w
3716 }
3717 if ran {
3718
3719
3720
3721
3722
3723
3724
3725
3726 if gp, inheritTime := runqget(pp); gp != nil {
3727 return gp, inheritTime, now, pollUntil, ranTimer
3728 }
3729 ranTimer = true
3730 }
3731 }
3732
3733
3734 if !idlepMask.read(enum.position()) {
3735 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3736 return gp, false, now, pollUntil, ranTimer
3737 }
3738 }
3739 }
3740 }
3741
3742
3743
3744
3745 return nil, false, now, pollUntil, ranTimer
3746 }
3747
3748
3749
3750
3751
3752
3753 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3754 for id, p2 := range allpSnapshot {
3755 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3756 lock(&sched.lock)
3757 pp, _ := pidlegetSpinning(0)
3758 if pp == nil {
3759
3760 unlock(&sched.lock)
3761 return nil
3762 }
3763 unlock(&sched.lock)
3764 return pp
3765 }
3766 }
3767
3768
3769 return nil
3770 }
3771
3772
3773
3774
3775 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3776 for id, p2 := range allpSnapshot {
3777 if timerpMaskSnapshot.read(uint32(id)) {
3778 w := p2.timers.wakeTime()
3779 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3780 pollUntil = w
3781 }
3782 }
3783 }
3784
3785 return pollUntil
3786 }
3787
3788
3789
3790
3791
3792 func checkIdleGCNoP() (*p, *g) {
3793
3794
3795
3796
3797
3798
3799 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3800 return nil, nil
3801 }
3802 if !gcMarkWorkAvailable(nil) {
3803 return nil, nil
3804 }
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823 lock(&sched.lock)
3824 pp, now := pidlegetSpinning(0)
3825 if pp == nil {
3826 unlock(&sched.lock)
3827 return nil, nil
3828 }
3829
3830
3831 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3832 pidleput(pp, now)
3833 unlock(&sched.lock)
3834 return nil, nil
3835 }
3836
3837 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3838 if node == nil {
3839 pidleput(pp, now)
3840 unlock(&sched.lock)
3841 gcController.removeIdleMarkWorker()
3842 return nil, nil
3843 }
3844
3845 unlock(&sched.lock)
3846
3847 return pp, node.gp.ptr()
3848 }
3849
3850
3851
3852
3853 func wakeNetPoller(when int64) {
3854 if sched.lastpoll.Load() == 0 {
3855
3856
3857
3858
3859 pollerPollUntil := sched.pollUntil.Load()
3860 if pollerPollUntil == 0 || pollerPollUntil > when {
3861 netpollBreak()
3862 }
3863 } else {
3864
3865
3866 if GOOS != "plan9" {
3867 wakep()
3868 }
3869 }
3870 }
3871
3872 func resetspinning() {
3873 gp := getg()
3874 if !gp.m.spinning {
3875 throw("resetspinning: not a spinning m")
3876 }
3877 gp.m.spinning = false
3878 nmspinning := sched.nmspinning.Add(-1)
3879 if nmspinning < 0 {
3880 throw("findrunnable: negative nmspinning")
3881 }
3882
3883
3884
3885 wakep()
3886 }
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896 func injectglist(glist *gList) {
3897 if glist.empty() {
3898 return
3899 }
3900
3901
3902
3903 head := glist.head.ptr()
3904 var tail *g
3905 qsize := 0
3906 trace := traceAcquire()
3907 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
3908 tail = gp
3909 qsize++
3910 casgstatus(gp, _Gwaiting, _Grunnable)
3911 if trace.ok() {
3912 trace.GoUnpark(gp, 0)
3913 }
3914 }
3915 if trace.ok() {
3916 traceRelease(trace)
3917 }
3918
3919
3920 var q gQueue
3921 q.head.set(head)
3922 q.tail.set(tail)
3923 *glist = gList{}
3924
3925 startIdle := func(n int) {
3926 for i := 0; i < n; i++ {
3927 mp := acquirem()
3928 lock(&sched.lock)
3929
3930 pp, _ := pidlegetSpinning(0)
3931 if pp == nil {
3932 unlock(&sched.lock)
3933 releasem(mp)
3934 break
3935 }
3936
3937 startm(pp, false, true)
3938 unlock(&sched.lock)
3939 releasem(mp)
3940 }
3941 }
3942
3943 pp := getg().m.p.ptr()
3944 if pp == nil {
3945 lock(&sched.lock)
3946 globrunqputbatch(&q, int32(qsize))
3947 unlock(&sched.lock)
3948 startIdle(qsize)
3949 return
3950 }
3951
3952 npidle := int(sched.npidle.Load())
3953 var (
3954 globq gQueue
3955 n int
3956 )
3957 for n = 0; n < npidle && !q.empty(); n++ {
3958 g := q.pop()
3959 globq.pushBack(g)
3960 }
3961 if n > 0 {
3962 lock(&sched.lock)
3963 globrunqputbatch(&globq, int32(n))
3964 unlock(&sched.lock)
3965 startIdle(n)
3966 qsize -= n
3967 }
3968
3969 if !q.empty() {
3970 runqputbatch(pp, &q, qsize)
3971 }
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986 wakep()
3987 }
3988
3989
3990
3991 func schedule() {
3992 mp := getg().m
3993
3994 if mp.locks != 0 {
3995 throw("schedule: holding locks")
3996 }
3997
3998 if mp.lockedg != 0 {
3999 stoplockedm()
4000 execute(mp.lockedg.ptr(), false)
4001 }
4002
4003
4004
4005 if mp.incgo {
4006 throw("schedule: in cgo")
4007 }
4008
4009 top:
4010 pp := mp.p.ptr()
4011 pp.preempt = false
4012
4013
4014
4015
4016 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4017 throw("schedule: spinning with local work")
4018 }
4019
4020 gp, inheritTime, tryWakeP := findRunnable()
4021
4022 if debug.dontfreezetheworld > 0 && freezing.Load() {
4023
4024
4025
4026
4027
4028
4029
4030 lock(&deadlock)
4031 lock(&deadlock)
4032 }
4033
4034
4035
4036
4037 if mp.spinning {
4038 resetspinning()
4039 }
4040
4041 if sched.disable.user && !schedEnabled(gp) {
4042
4043
4044
4045 lock(&sched.lock)
4046 if schedEnabled(gp) {
4047
4048
4049 unlock(&sched.lock)
4050 } else {
4051 sched.disable.runnable.pushBack(gp)
4052 sched.disable.n++
4053 unlock(&sched.lock)
4054 goto top
4055 }
4056 }
4057
4058
4059
4060 if tryWakeP {
4061 wakep()
4062 }
4063 if gp.lockedm != 0 {
4064
4065
4066 startlockedm(gp)
4067 goto top
4068 }
4069
4070 execute(gp, inheritTime)
4071 }
4072
4073
4074
4075
4076
4077
4078
4079
4080 func dropg() {
4081 gp := getg()
4082
4083 setMNoWB(&gp.m.curg.m, nil)
4084 setGNoWB(&gp.m.curg, nil)
4085 }
4086
4087 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4088 unlock((*mutex)(lock))
4089 return true
4090 }
4091
4092
4093 func park_m(gp *g) {
4094 mp := getg().m
4095
4096 trace := traceAcquire()
4097
4098
4099
4100
4101
4102 sg := gp.syncGroup
4103 if sg != nil {
4104 sg.incActive()
4105 }
4106
4107 if trace.ok() {
4108
4109
4110
4111 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4112 }
4113
4114
4115 casgstatus(gp, _Grunning, _Gwaiting)
4116 if trace.ok() {
4117 traceRelease(trace)
4118 }
4119
4120 dropg()
4121
4122 if fn := mp.waitunlockf; fn != nil {
4123 ok := fn(gp, mp.waitlock)
4124 mp.waitunlockf = nil
4125 mp.waitlock = nil
4126 if !ok {
4127 trace := traceAcquire()
4128 casgstatus(gp, _Gwaiting, _Grunnable)
4129 if sg != nil {
4130 sg.decActive()
4131 }
4132 if trace.ok() {
4133 trace.GoUnpark(gp, 2)
4134 traceRelease(trace)
4135 }
4136 execute(gp, true)
4137 }
4138 }
4139
4140 if sg != nil {
4141 sg.decActive()
4142 }
4143
4144 schedule()
4145 }
4146
4147 func goschedImpl(gp *g, preempted bool) {
4148 trace := traceAcquire()
4149 status := readgstatus(gp)
4150 if status&^_Gscan != _Grunning {
4151 dumpgstatus(gp)
4152 throw("bad g status")
4153 }
4154 if trace.ok() {
4155
4156
4157
4158 if preempted {
4159 trace.GoPreempt()
4160 } else {
4161 trace.GoSched()
4162 }
4163 }
4164 casgstatus(gp, _Grunning, _Grunnable)
4165 if trace.ok() {
4166 traceRelease(trace)
4167 }
4168
4169 dropg()
4170 lock(&sched.lock)
4171 globrunqput(gp)
4172 unlock(&sched.lock)
4173
4174 if mainStarted {
4175 wakep()
4176 }
4177
4178 schedule()
4179 }
4180
4181
4182 func gosched_m(gp *g) {
4183 goschedImpl(gp, false)
4184 }
4185
4186
4187 func goschedguarded_m(gp *g) {
4188 if !canPreemptM(gp.m) {
4189 gogo(&gp.sched)
4190 }
4191 goschedImpl(gp, false)
4192 }
4193
4194 func gopreempt_m(gp *g) {
4195 goschedImpl(gp, true)
4196 }
4197
4198
4199
4200
4201 func preemptPark(gp *g) {
4202 status := readgstatus(gp)
4203 if status&^_Gscan != _Grunning {
4204 dumpgstatus(gp)
4205 throw("bad g status")
4206 }
4207
4208 if gp.asyncSafePoint {
4209
4210
4211
4212 f := findfunc(gp.sched.pc)
4213 if !f.valid() {
4214 throw("preempt at unknown pc")
4215 }
4216 if f.flag&abi.FuncFlagSPWrite != 0 {
4217 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4218 throw("preempt SPWRITE")
4219 }
4220 }
4221
4222
4223
4224
4225
4226
4227
4228 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4229 dropg()
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246 trace := traceAcquire()
4247 if trace.ok() {
4248 trace.GoPark(traceBlockPreempted, 0)
4249 }
4250 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4251 if trace.ok() {
4252 traceRelease(trace)
4253 }
4254 schedule()
4255 }
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271 func goyield() {
4272 checkTimeouts()
4273 mcall(goyield_m)
4274 }
4275
4276 func goyield_m(gp *g) {
4277 trace := traceAcquire()
4278 pp := gp.m.p.ptr()
4279 if trace.ok() {
4280
4281
4282
4283 trace.GoPreempt()
4284 }
4285 casgstatus(gp, _Grunning, _Grunnable)
4286 if trace.ok() {
4287 traceRelease(trace)
4288 }
4289 dropg()
4290 runqput(pp, gp, false)
4291 schedule()
4292 }
4293
4294
4295 func goexit1() {
4296 if raceenabled {
4297 if gp := getg(); gp.syncGroup != nil {
4298 racereleasemergeg(gp, gp.syncGroup.raceaddr())
4299 }
4300 racegoend()
4301 }
4302 trace := traceAcquire()
4303 if trace.ok() {
4304 trace.GoEnd()
4305 traceRelease(trace)
4306 }
4307 mcall(goexit0)
4308 }
4309
4310
4311 func goexit0(gp *g) {
4312 gdestroy(gp)
4313 schedule()
4314 }
4315
4316 func gdestroy(gp *g) {
4317 mp := getg().m
4318 pp := mp.p.ptr()
4319
4320 casgstatus(gp, _Grunning, _Gdead)
4321 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4322 if isSystemGoroutine(gp, false) {
4323 sched.ngsys.Add(-1)
4324 }
4325 gp.m = nil
4326 locked := gp.lockedm != 0
4327 gp.lockedm = 0
4328 mp.lockedg = 0
4329 gp.preemptStop = false
4330 gp.paniconfault = false
4331 gp._defer = nil
4332 gp._panic = nil
4333 gp.writebuf = nil
4334 gp.waitreason = waitReasonZero
4335 gp.param = nil
4336 gp.labels = nil
4337 gp.timer = nil
4338 gp.syncGroup = nil
4339
4340 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4341
4342
4343
4344 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4345 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4346 gcController.bgScanCredit.Add(scanCredit)
4347 gp.gcAssistBytes = 0
4348 }
4349
4350 dropg()
4351
4352 if GOARCH == "wasm" {
4353 gfput(pp, gp)
4354 return
4355 }
4356
4357 if locked && mp.lockedInt != 0 {
4358 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4359 if mp.isextra {
4360 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4361 }
4362 throw("exited a goroutine internally locked to the OS thread")
4363 }
4364 gfput(pp, gp)
4365 if locked {
4366
4367
4368
4369
4370
4371
4372 if GOOS != "plan9" {
4373 gogo(&mp.g0.sched)
4374 } else {
4375
4376
4377 mp.lockedExt = 0
4378 }
4379 }
4380 }
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390 func save(pc, sp, bp uintptr) {
4391 gp := getg()
4392
4393 if gp == gp.m.g0 || gp == gp.m.gsignal {
4394
4395
4396
4397
4398
4399 throw("save on system g not allowed")
4400 }
4401
4402 gp.sched.pc = pc
4403 gp.sched.sp = sp
4404 gp.sched.lr = 0
4405 gp.sched.ret = 0
4406 gp.sched.bp = bp
4407
4408
4409
4410 if gp.sched.ctxt != nil {
4411 badctxt()
4412 }
4413 }
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439 func reentersyscall(pc, sp, bp uintptr) {
4440 trace := traceAcquire()
4441 gp := getg()
4442
4443
4444
4445 gp.m.locks++
4446
4447
4448
4449
4450
4451 gp.stackguard0 = stackPreempt
4452 gp.throwsplit = true
4453
4454
4455 save(pc, sp, bp)
4456 gp.syscallsp = sp
4457 gp.syscallpc = pc
4458 gp.syscallbp = bp
4459 casgstatus(gp, _Grunning, _Gsyscall)
4460 if staticLockRanking {
4461
4462
4463 save(pc, sp, bp)
4464 }
4465 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4466 systemstack(func() {
4467 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4468 throw("entersyscall")
4469 })
4470 }
4471 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4472 systemstack(func() {
4473 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4474 throw("entersyscall")
4475 })
4476 }
4477
4478 if trace.ok() {
4479 systemstack(func() {
4480 trace.GoSysCall()
4481 traceRelease(trace)
4482 })
4483
4484
4485
4486 save(pc, sp, bp)
4487 }
4488
4489 if sched.sysmonwait.Load() {
4490 systemstack(entersyscall_sysmon)
4491 save(pc, sp, bp)
4492 }
4493
4494 if gp.m.p.ptr().runSafePointFn != 0 {
4495
4496 systemstack(runSafePointFn)
4497 save(pc, sp, bp)
4498 }
4499
4500 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4501 pp := gp.m.p.ptr()
4502 pp.m = 0
4503 gp.m.oldp.set(pp)
4504 gp.m.p = 0
4505 atomic.Store(&pp.status, _Psyscall)
4506 if sched.gcwaiting.Load() {
4507 systemstack(entersyscall_gcwait)
4508 save(pc, sp, bp)
4509 }
4510
4511 gp.m.locks--
4512 }
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528 func entersyscall() {
4529
4530
4531
4532
4533 fp := getcallerfp()
4534 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4535 }
4536
4537 func entersyscall_sysmon() {
4538 lock(&sched.lock)
4539 if sched.sysmonwait.Load() {
4540 sched.sysmonwait.Store(false)
4541 notewakeup(&sched.sysmonnote)
4542 }
4543 unlock(&sched.lock)
4544 }
4545
4546 func entersyscall_gcwait() {
4547 gp := getg()
4548 pp := gp.m.oldp.ptr()
4549
4550 lock(&sched.lock)
4551 trace := traceAcquire()
4552 if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
4553 if trace.ok() {
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563 trace.ProcSteal(pp, true)
4564 traceRelease(trace)
4565 }
4566 pp.gcStopTime = nanotime()
4567 pp.syscalltick++
4568 if sched.stopwait--; sched.stopwait == 0 {
4569 notewakeup(&sched.stopnote)
4570 }
4571 } else if trace.ok() {
4572 traceRelease(trace)
4573 }
4574 unlock(&sched.lock)
4575 }
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589 func entersyscallblock() {
4590 gp := getg()
4591
4592 gp.m.locks++
4593 gp.throwsplit = true
4594 gp.stackguard0 = stackPreempt
4595 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4596 gp.m.p.ptr().syscalltick++
4597
4598
4599 pc := sys.GetCallerPC()
4600 sp := sys.GetCallerSP()
4601 bp := getcallerfp()
4602 save(pc, sp, bp)
4603 gp.syscallsp = gp.sched.sp
4604 gp.syscallpc = gp.sched.pc
4605 gp.syscallbp = gp.sched.bp
4606 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4607 sp1 := sp
4608 sp2 := gp.sched.sp
4609 sp3 := gp.syscallsp
4610 systemstack(func() {
4611 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4612 throw("entersyscallblock")
4613 })
4614 }
4615 casgstatus(gp, _Grunning, _Gsyscall)
4616 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4617 systemstack(func() {
4618 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4619 throw("entersyscallblock")
4620 })
4621 }
4622 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4623 systemstack(func() {
4624 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4625 throw("entersyscallblock")
4626 })
4627 }
4628
4629 systemstack(entersyscallblock_handoff)
4630
4631
4632 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4633
4634 gp.m.locks--
4635 }
4636
4637 func entersyscallblock_handoff() {
4638 trace := traceAcquire()
4639 if trace.ok() {
4640 trace.GoSysCall()
4641 traceRelease(trace)
4642 }
4643 handoffp(releasep())
4644 }
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666 func exitsyscall() {
4667 gp := getg()
4668
4669 gp.m.locks++
4670 if sys.GetCallerSP() > gp.syscallsp {
4671 throw("exitsyscall: syscall frame is no longer valid")
4672 }
4673
4674 gp.waitsince = 0
4675 oldp := gp.m.oldp.ptr()
4676 gp.m.oldp = 0
4677 if exitsyscallfast(oldp) {
4678
4679
4680 if goroutineProfile.active {
4681
4682
4683
4684 systemstack(func() {
4685 tryRecordGoroutineProfileWB(gp)
4686 })
4687 }
4688 trace := traceAcquire()
4689 if trace.ok() {
4690 lostP := oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick
4691 systemstack(func() {
4692
4693
4694
4695
4696 trace.GoSysExit(lostP)
4697 if lostP {
4698
4699
4700
4701
4702 trace.GoStart()
4703 }
4704 })
4705 }
4706
4707 gp.m.p.ptr().syscalltick++
4708
4709 casgstatus(gp, _Gsyscall, _Grunning)
4710 if trace.ok() {
4711 traceRelease(trace)
4712 }
4713
4714
4715
4716 gp.syscallsp = 0
4717 gp.m.locks--
4718 if gp.preempt {
4719
4720 gp.stackguard0 = stackPreempt
4721 } else {
4722
4723 gp.stackguard0 = gp.stack.lo + stackGuard
4724 }
4725 gp.throwsplit = false
4726
4727 if sched.disable.user && !schedEnabled(gp) {
4728
4729 Gosched()
4730 }
4731
4732 return
4733 }
4734
4735 gp.m.locks--
4736
4737
4738 mcall(exitsyscall0)
4739
4740
4741
4742
4743
4744
4745
4746 gp.syscallsp = 0
4747 gp.m.p.ptr().syscalltick++
4748 gp.throwsplit = false
4749 }
4750
4751
4752 func exitsyscallfast(oldp *p) bool {
4753
4754 if sched.stopwait == freezeStopWait {
4755 return false
4756 }
4757
4758
4759 trace := traceAcquire()
4760 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
4761
4762 wirep(oldp)
4763 exitsyscallfast_reacquired(trace)
4764 if trace.ok() {
4765 traceRelease(trace)
4766 }
4767 return true
4768 }
4769 if trace.ok() {
4770 traceRelease(trace)
4771 }
4772
4773
4774 if sched.pidle != 0 {
4775 var ok bool
4776 systemstack(func() {
4777 ok = exitsyscallfast_pidle()
4778 })
4779 if ok {
4780 return true
4781 }
4782 }
4783 return false
4784 }
4785
4786
4787
4788
4789
4790
4791 func exitsyscallfast_reacquired(trace traceLocker) {
4792 gp := getg()
4793 if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
4794 if trace.ok() {
4795
4796
4797
4798 systemstack(func() {
4799
4800
4801 trace.ProcSteal(gp.m.p.ptr(), true)
4802 trace.ProcStart()
4803 })
4804 }
4805 gp.m.p.ptr().syscalltick++
4806 }
4807 }
4808
4809 func exitsyscallfast_pidle() bool {
4810 lock(&sched.lock)
4811 pp, _ := pidleget(0)
4812 if pp != nil && sched.sysmonwait.Load() {
4813 sched.sysmonwait.Store(false)
4814 notewakeup(&sched.sysmonnote)
4815 }
4816 unlock(&sched.lock)
4817 if pp != nil {
4818 acquirep(pp)
4819 return true
4820 }
4821 return false
4822 }
4823
4824
4825
4826
4827
4828
4829
4830 func exitsyscall0(gp *g) {
4831 var trace traceLocker
4832 traceExitingSyscall()
4833 trace = traceAcquire()
4834 casgstatus(gp, _Gsyscall, _Grunnable)
4835 traceExitedSyscall()
4836 if trace.ok() {
4837
4838
4839
4840
4841 trace.GoSysExit(true)
4842 traceRelease(trace)
4843 }
4844 dropg()
4845 lock(&sched.lock)
4846 var pp *p
4847 if schedEnabled(gp) {
4848 pp, _ = pidleget(0)
4849 }
4850 var locked bool
4851 if pp == nil {
4852 globrunqput(gp)
4853
4854
4855
4856
4857
4858
4859 locked = gp.lockedm != 0
4860 } else if sched.sysmonwait.Load() {
4861 sched.sysmonwait.Store(false)
4862 notewakeup(&sched.sysmonnote)
4863 }
4864 unlock(&sched.lock)
4865 if pp != nil {
4866 acquirep(pp)
4867 execute(gp, false)
4868 }
4869 if locked {
4870
4871
4872
4873
4874 stoplockedm()
4875 execute(gp, false)
4876 }
4877 stopm()
4878 schedule()
4879 }
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890
4891
4892
4893 func syscall_runtime_BeforeFork() {
4894 gp := getg().m.curg
4895
4896
4897
4898
4899 gp.m.locks++
4900 sigsave(&gp.m.sigmask)
4901 sigblock(false)
4902
4903
4904
4905
4906
4907 gp.stackguard0 = stackFork
4908 }
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919
4920
4921
4922 func syscall_runtime_AfterFork() {
4923 gp := getg().m.curg
4924
4925
4926 gp.stackguard0 = gp.stack.lo + stackGuard
4927
4928 msigrestore(gp.m.sigmask)
4929
4930 gp.m.locks--
4931 }
4932
4933
4934
4935 var inForkedChild bool
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956 func syscall_runtime_AfterForkInChild() {
4957
4958
4959
4960
4961 inForkedChild = true
4962
4963 clearSignalHandlers()
4964
4965
4966
4967 msigrestore(getg().m.sigmask)
4968
4969 inForkedChild = false
4970 }
4971
4972
4973
4974
4975 var pendingPreemptSignals atomic.Int32
4976
4977
4978
4979
4980 func syscall_runtime_BeforeExec() {
4981
4982 execLock.lock()
4983
4984
4985
4986 if GOOS == "darwin" || GOOS == "ios" {
4987 for pendingPreemptSignals.Load() > 0 {
4988 osyield()
4989 }
4990 }
4991 }
4992
4993
4994
4995
4996 func syscall_runtime_AfterExec() {
4997 execLock.unlock()
4998 }
4999
5000
5001 func malg(stacksize int32) *g {
5002 newg := new(g)
5003 if stacksize >= 0 {
5004 stacksize = round2(stackSystem + stacksize)
5005 systemstack(func() {
5006 newg.stack = stackalloc(uint32(stacksize))
5007 })
5008 newg.stackguard0 = newg.stack.lo + stackGuard
5009 newg.stackguard1 = ^uintptr(0)
5010
5011
5012 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5013 }
5014 return newg
5015 }
5016
5017
5018
5019
5020 func newproc(fn *funcval) {
5021 gp := getg()
5022 pc := sys.GetCallerPC()
5023 systemstack(func() {
5024 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5025
5026 pp := getg().m.p.ptr()
5027 runqput(pp, newg, true)
5028
5029 if mainStarted {
5030 wakep()
5031 }
5032 })
5033 }
5034
5035
5036
5037
5038 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5039 if fn == nil {
5040 fatal("go of nil func value")
5041 }
5042
5043 mp := acquirem()
5044 pp := mp.p.ptr()
5045 newg := gfget(pp)
5046 if newg == nil {
5047 newg = malg(stackMin)
5048 casgstatus(newg, _Gidle, _Gdead)
5049 allgadd(newg)
5050 }
5051 if newg.stack.hi == 0 {
5052 throw("newproc1: newg missing stack")
5053 }
5054
5055 if readgstatus(newg) != _Gdead {
5056 throw("newproc1: new g is not Gdead")
5057 }
5058
5059 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5060 totalSize = alignUp(totalSize, sys.StackAlign)
5061 sp := newg.stack.hi - totalSize
5062 if usesLR {
5063
5064 *(*uintptr)(unsafe.Pointer(sp)) = 0
5065 prepGoExitFrame(sp)
5066 }
5067 if GOARCH == "arm64" {
5068
5069 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5070 }
5071
5072 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5073 newg.sched.sp = sp
5074 newg.stktopsp = sp
5075 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5076 newg.sched.g = guintptr(unsafe.Pointer(newg))
5077 gostartcallfn(&newg.sched, fn)
5078 newg.parentGoid = callergp.goid
5079 newg.gopc = callerpc
5080 newg.ancestors = saveAncestors(callergp)
5081 newg.startpc = fn.fn
5082 if isSystemGoroutine(newg, false) {
5083 sched.ngsys.Add(1)
5084 } else {
5085
5086 newg.syncGroup = callergp.syncGroup
5087 if mp.curg != nil {
5088 newg.labels = mp.curg.labels
5089 }
5090 if goroutineProfile.active {
5091
5092
5093
5094
5095
5096 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5097 }
5098 }
5099
5100 newg.trackingSeq = uint8(cheaprand())
5101 if newg.trackingSeq%gTrackingPeriod == 0 {
5102 newg.tracking = true
5103 }
5104 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5105
5106
5107 trace := traceAcquire()
5108 var status uint32 = _Grunnable
5109 if parked {
5110 status = _Gwaiting
5111 newg.waitreason = waitreason
5112 }
5113 if pp.goidcache == pp.goidcacheend {
5114
5115
5116
5117 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5118 pp.goidcache -= _GoidCacheBatch - 1
5119 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5120 }
5121 newg.goid = pp.goidcache
5122 casgstatus(newg, _Gdead, status)
5123 pp.goidcache++
5124 newg.trace.reset()
5125 if trace.ok() {
5126 trace.GoCreate(newg, newg.startpc, parked)
5127 traceRelease(trace)
5128 }
5129
5130
5131 if raceenabled {
5132 newg.racectx = racegostart(callerpc)
5133 newg.raceignore = 0
5134 if newg.labels != nil {
5135
5136
5137 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5138 }
5139 }
5140 releasem(mp)
5141
5142 return newg
5143 }
5144
5145
5146
5147
5148 func saveAncestors(callergp *g) *[]ancestorInfo {
5149
5150 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5151 return nil
5152 }
5153 var callerAncestors []ancestorInfo
5154 if callergp.ancestors != nil {
5155 callerAncestors = *callergp.ancestors
5156 }
5157 n := int32(len(callerAncestors)) + 1
5158 if n > debug.tracebackancestors {
5159 n = debug.tracebackancestors
5160 }
5161 ancestors := make([]ancestorInfo, n)
5162 copy(ancestors[1:], callerAncestors)
5163
5164 var pcs [tracebackInnerFrames]uintptr
5165 npcs := gcallers(callergp, 0, pcs[:])
5166 ipcs := make([]uintptr, npcs)
5167 copy(ipcs, pcs[:])
5168 ancestors[0] = ancestorInfo{
5169 pcs: ipcs,
5170 goid: callergp.goid,
5171 gopc: callergp.gopc,
5172 }
5173
5174 ancestorsp := new([]ancestorInfo)
5175 *ancestorsp = ancestors
5176 return ancestorsp
5177 }
5178
5179
5180
5181 func gfput(pp *p, gp *g) {
5182 if readgstatus(gp) != _Gdead {
5183 throw("gfput: bad status (not Gdead)")
5184 }
5185
5186 stksize := gp.stack.hi - gp.stack.lo
5187
5188 if stksize != uintptr(startingStackSize) {
5189
5190 stackfree(gp.stack)
5191 gp.stack.lo = 0
5192 gp.stack.hi = 0
5193 gp.stackguard0 = 0
5194 }
5195
5196 pp.gFree.push(gp)
5197 pp.gFree.n++
5198 if pp.gFree.n >= 64 {
5199 var (
5200 inc int32
5201 stackQ gQueue
5202 noStackQ gQueue
5203 )
5204 for pp.gFree.n >= 32 {
5205 gp := pp.gFree.pop()
5206 pp.gFree.n--
5207 if gp.stack.lo == 0 {
5208 noStackQ.push(gp)
5209 } else {
5210 stackQ.push(gp)
5211 }
5212 inc++
5213 }
5214 lock(&sched.gFree.lock)
5215 sched.gFree.noStack.pushAll(noStackQ)
5216 sched.gFree.stack.pushAll(stackQ)
5217 sched.gFree.n += inc
5218 unlock(&sched.gFree.lock)
5219 }
5220 }
5221
5222
5223
5224 func gfget(pp *p) *g {
5225 retry:
5226 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5227 lock(&sched.gFree.lock)
5228
5229 for pp.gFree.n < 32 {
5230
5231 gp := sched.gFree.stack.pop()
5232 if gp == nil {
5233 gp = sched.gFree.noStack.pop()
5234 if gp == nil {
5235 break
5236 }
5237 }
5238 sched.gFree.n--
5239 pp.gFree.push(gp)
5240 pp.gFree.n++
5241 }
5242 unlock(&sched.gFree.lock)
5243 goto retry
5244 }
5245 gp := pp.gFree.pop()
5246 if gp == nil {
5247 return nil
5248 }
5249 pp.gFree.n--
5250 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5251
5252
5253
5254 systemstack(func() {
5255 stackfree(gp.stack)
5256 gp.stack.lo = 0
5257 gp.stack.hi = 0
5258 gp.stackguard0 = 0
5259 })
5260 }
5261 if gp.stack.lo == 0 {
5262
5263 systemstack(func() {
5264 gp.stack = stackalloc(startingStackSize)
5265 })
5266 gp.stackguard0 = gp.stack.lo + stackGuard
5267 } else {
5268 if raceenabled {
5269 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5270 }
5271 if msanenabled {
5272 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5273 }
5274 if asanenabled {
5275 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5276 }
5277 }
5278 return gp
5279 }
5280
5281
5282 func gfpurge(pp *p) {
5283 var (
5284 inc int32
5285 stackQ gQueue
5286 noStackQ gQueue
5287 )
5288 for !pp.gFree.empty() {
5289 gp := pp.gFree.pop()
5290 pp.gFree.n--
5291 if gp.stack.lo == 0 {
5292 noStackQ.push(gp)
5293 } else {
5294 stackQ.push(gp)
5295 }
5296 inc++
5297 }
5298 lock(&sched.gFree.lock)
5299 sched.gFree.noStack.pushAll(noStackQ)
5300 sched.gFree.stack.pushAll(stackQ)
5301 sched.gFree.n += inc
5302 unlock(&sched.gFree.lock)
5303 }
5304
5305
5306 func Breakpoint() {
5307 breakpoint()
5308 }
5309
5310
5311
5312
5313
5314
5315 func dolockOSThread() {
5316 if GOARCH == "wasm" {
5317 return
5318 }
5319 gp := getg()
5320 gp.m.lockedg.set(gp)
5321 gp.lockedm.set(gp.m)
5322 }
5323
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340 func LockOSThread() {
5341 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5342
5343
5344
5345 startTemplateThread()
5346 }
5347 gp := getg()
5348 gp.m.lockedExt++
5349 if gp.m.lockedExt == 0 {
5350 gp.m.lockedExt--
5351 panic("LockOSThread nesting overflow")
5352 }
5353 dolockOSThread()
5354 }
5355
5356
5357 func lockOSThread() {
5358 getg().m.lockedInt++
5359 dolockOSThread()
5360 }
5361
5362
5363
5364
5365
5366
5367 func dounlockOSThread() {
5368 if GOARCH == "wasm" {
5369 return
5370 }
5371 gp := getg()
5372 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5373 return
5374 }
5375 gp.m.lockedg = 0
5376 gp.lockedm = 0
5377 }
5378
5379
5380
5381
5382
5383
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393 func UnlockOSThread() {
5394 gp := getg()
5395 if gp.m.lockedExt == 0 {
5396 return
5397 }
5398 gp.m.lockedExt--
5399 dounlockOSThread()
5400 }
5401
5402
5403 func unlockOSThread() {
5404 gp := getg()
5405 if gp.m.lockedInt == 0 {
5406 systemstack(badunlockosthread)
5407 }
5408 gp.m.lockedInt--
5409 dounlockOSThread()
5410 }
5411
5412 func badunlockosthread() {
5413 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5414 }
5415
5416 func gcount() int32 {
5417 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - sched.ngsys.Load()
5418 for _, pp := range allp {
5419 n -= pp.gFree.n
5420 }
5421
5422
5423
5424 if n < 1 {
5425 n = 1
5426 }
5427 return n
5428 }
5429
5430 func mcount() int32 {
5431 return int32(sched.mnext - sched.nmfreed)
5432 }
5433
5434 var prof struct {
5435 signalLock atomic.Uint32
5436
5437
5438
5439 hz atomic.Int32
5440 }
5441
5442 func _System() { _System() }
5443 func _ExternalCode() { _ExternalCode() }
5444 func _LostExternalCode() { _LostExternalCode() }
5445 func _GC() { _GC() }
5446 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5447 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5448 func _VDSO() { _VDSO() }
5449
5450
5451
5452
5453
5454 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5455 if prof.hz.Load() == 0 {
5456 return
5457 }
5458
5459
5460
5461
5462 if mp != nil && mp.profilehz == 0 {
5463 return
5464 }
5465
5466
5467
5468
5469
5470
5471
5472 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5473 if f := findfunc(pc); f.valid() {
5474 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5475 cpuprof.lostAtomic++
5476 return
5477 }
5478 }
5479 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5480
5481
5482
5483 cpuprof.lostAtomic++
5484 return
5485 }
5486 }
5487
5488
5489
5490
5491
5492
5493
5494 getg().m.mallocing++
5495
5496 var u unwinder
5497 var stk [maxCPUProfStack]uintptr
5498 n := 0
5499 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5500 cgoOff := 0
5501
5502
5503
5504
5505
5506 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5507 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5508 cgoOff++
5509 }
5510 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5511 mp.cgoCallers[0] = 0
5512 }
5513
5514
5515 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5516 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5517
5518
5519 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5520 } else if mp != nil && mp.vdsoSP != 0 {
5521
5522
5523 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5524 } else {
5525 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5526 }
5527 n += tracebackPCs(&u, 0, stk[n:])
5528
5529 if n <= 0 {
5530
5531
5532 n = 2
5533 if inVDSOPage(pc) {
5534 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5535 } else if pc > firstmoduledata.etext {
5536
5537 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5538 }
5539 stk[0] = pc
5540 if mp.preemptoff != "" {
5541 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5542 } else {
5543 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5544 }
5545 }
5546
5547 if prof.hz.Load() != 0 {
5548
5549
5550
5551 var tagPtr *unsafe.Pointer
5552 if gp != nil && gp.m != nil && gp.m.curg != nil {
5553 tagPtr = &gp.m.curg.labels
5554 }
5555 cpuprof.add(tagPtr, stk[:n])
5556
5557 gprof := gp
5558 var mp *m
5559 var pp *p
5560 if gp != nil && gp.m != nil {
5561 if gp.m.curg != nil {
5562 gprof = gp.m.curg
5563 }
5564 mp = gp.m
5565 pp = gp.m.p.ptr()
5566 }
5567 traceCPUSample(gprof, mp, pp, stk[:n])
5568 }
5569 getg().m.mallocing--
5570 }
5571
5572
5573
5574 func setcpuprofilerate(hz int32) {
5575
5576 if hz < 0 {
5577 hz = 0
5578 }
5579
5580
5581
5582 gp := getg()
5583 gp.m.locks++
5584
5585
5586
5587
5588 setThreadCPUProfiler(0)
5589
5590 for !prof.signalLock.CompareAndSwap(0, 1) {
5591 osyield()
5592 }
5593 if prof.hz.Load() != hz {
5594 setProcessCPUProfiler(hz)
5595 prof.hz.Store(hz)
5596 }
5597 prof.signalLock.Store(0)
5598
5599 lock(&sched.lock)
5600 sched.profilehz = hz
5601 unlock(&sched.lock)
5602
5603 if hz != 0 {
5604 setThreadCPUProfiler(hz)
5605 }
5606
5607 gp.m.locks--
5608 }
5609
5610
5611
5612 func (pp *p) init(id int32) {
5613 pp.id = id
5614 pp.status = _Pgcstop
5615 pp.sudogcache = pp.sudogbuf[:0]
5616 pp.deferpool = pp.deferpoolbuf[:0]
5617 pp.wbBuf.reset()
5618 if pp.mcache == nil {
5619 if id == 0 {
5620 if mcache0 == nil {
5621 throw("missing mcache?")
5622 }
5623
5624
5625 pp.mcache = mcache0
5626 } else {
5627 pp.mcache = allocmcache()
5628 }
5629 }
5630 if raceenabled && pp.raceprocctx == 0 {
5631 if id == 0 {
5632 pp.raceprocctx = raceprocctx0
5633 raceprocctx0 = 0
5634 } else {
5635 pp.raceprocctx = raceproccreate()
5636 }
5637 }
5638 lockInit(&pp.timers.mu, lockRankTimers)
5639
5640
5641
5642 timerpMask.set(id)
5643
5644
5645 idlepMask.clear(id)
5646 }
5647
5648
5649
5650
5651
5652 func (pp *p) destroy() {
5653 assertLockHeld(&sched.lock)
5654 assertWorldStopped()
5655
5656
5657 for pp.runqhead != pp.runqtail {
5658
5659 pp.runqtail--
5660 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5661
5662 globrunqputhead(gp)
5663 }
5664 if pp.runnext != 0 {
5665 globrunqputhead(pp.runnext.ptr())
5666 pp.runnext = 0
5667 }
5668
5669
5670 getg().m.p.ptr().timers.take(&pp.timers)
5671
5672
5673 if gcphase != _GCoff {
5674 wbBufFlush1(pp)
5675 pp.gcw.dispose()
5676 }
5677 for i := range pp.sudogbuf {
5678 pp.sudogbuf[i] = nil
5679 }
5680 pp.sudogcache = pp.sudogbuf[:0]
5681 pp.pinnerCache = nil
5682 for j := range pp.deferpoolbuf {
5683 pp.deferpoolbuf[j] = nil
5684 }
5685 pp.deferpool = pp.deferpoolbuf[:0]
5686 systemstack(func() {
5687 for i := 0; i < pp.mspancache.len; i++ {
5688
5689 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5690 }
5691 pp.mspancache.len = 0
5692 lock(&mheap_.lock)
5693 pp.pcache.flush(&mheap_.pages)
5694 unlock(&mheap_.lock)
5695 })
5696 freemcache(pp.mcache)
5697 pp.mcache = nil
5698 gfpurge(pp)
5699 if raceenabled {
5700 if pp.timers.raceCtx != 0 {
5701
5702
5703
5704
5705
5706 mp := getg().m
5707 phold := mp.p.ptr()
5708 mp.p.set(pp)
5709
5710 racectxend(pp.timers.raceCtx)
5711 pp.timers.raceCtx = 0
5712
5713 mp.p.set(phold)
5714 }
5715 raceprocdestroy(pp.raceprocctx)
5716 pp.raceprocctx = 0
5717 }
5718 pp.gcAssistTime = 0
5719 pp.status = _Pdead
5720 }
5721
5722
5723
5724
5725
5726
5727
5728
5729
5730 func procresize(nprocs int32) *p {
5731 assertLockHeld(&sched.lock)
5732 assertWorldStopped()
5733
5734 old := gomaxprocs
5735 if old < 0 || nprocs <= 0 {
5736 throw("procresize: invalid arg")
5737 }
5738 trace := traceAcquire()
5739 if trace.ok() {
5740 trace.Gomaxprocs(nprocs)
5741 traceRelease(trace)
5742 }
5743
5744
5745 now := nanotime()
5746 if sched.procresizetime != 0 {
5747 sched.totaltime += int64(old) * (now - sched.procresizetime)
5748 }
5749 sched.procresizetime = now
5750
5751 maskWords := (nprocs + 31) / 32
5752
5753
5754 if nprocs > int32(len(allp)) {
5755
5756
5757 lock(&allpLock)
5758 if nprocs <= int32(cap(allp)) {
5759 allp = allp[:nprocs]
5760 } else {
5761 nallp := make([]*p, nprocs)
5762
5763
5764 copy(nallp, allp[:cap(allp)])
5765 allp = nallp
5766 }
5767
5768 if maskWords <= int32(cap(idlepMask)) {
5769 idlepMask = idlepMask[:maskWords]
5770 timerpMask = timerpMask[:maskWords]
5771 } else {
5772 nidlepMask := make([]uint32, maskWords)
5773
5774 copy(nidlepMask, idlepMask)
5775 idlepMask = nidlepMask
5776
5777 ntimerpMask := make([]uint32, maskWords)
5778 copy(ntimerpMask, timerpMask)
5779 timerpMask = ntimerpMask
5780 }
5781 unlock(&allpLock)
5782 }
5783
5784
5785 for i := old; i < nprocs; i++ {
5786 pp := allp[i]
5787 if pp == nil {
5788 pp = new(p)
5789 }
5790 pp.init(i)
5791 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5792 }
5793
5794 gp := getg()
5795 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5796
5797 gp.m.p.ptr().status = _Prunning
5798 gp.m.p.ptr().mcache.prepareForSweep()
5799 } else {
5800
5801
5802
5803
5804
5805 if gp.m.p != 0 {
5806 trace := traceAcquire()
5807 if trace.ok() {
5808
5809
5810
5811 trace.GoSched()
5812 trace.ProcStop(gp.m.p.ptr())
5813 traceRelease(trace)
5814 }
5815 gp.m.p.ptr().m = 0
5816 }
5817 gp.m.p = 0
5818 pp := allp[0]
5819 pp.m = 0
5820 pp.status = _Pidle
5821 acquirep(pp)
5822 trace := traceAcquire()
5823 if trace.ok() {
5824 trace.GoStart()
5825 traceRelease(trace)
5826 }
5827 }
5828
5829
5830 mcache0 = nil
5831
5832
5833 for i := nprocs; i < old; i++ {
5834 pp := allp[i]
5835 pp.destroy()
5836
5837 }
5838
5839
5840 if int32(len(allp)) != nprocs {
5841 lock(&allpLock)
5842 allp = allp[:nprocs]
5843 idlepMask = idlepMask[:maskWords]
5844 timerpMask = timerpMask[:maskWords]
5845 unlock(&allpLock)
5846 }
5847
5848 var runnablePs *p
5849 for i := nprocs - 1; i >= 0; i-- {
5850 pp := allp[i]
5851 if gp.m.p.ptr() == pp {
5852 continue
5853 }
5854 pp.status = _Pidle
5855 if runqempty(pp) {
5856 pidleput(pp, now)
5857 } else {
5858 pp.m.set(mget())
5859 pp.link.set(runnablePs)
5860 runnablePs = pp
5861 }
5862 }
5863 stealOrder.reset(uint32(nprocs))
5864 var int32p *int32 = &gomaxprocs
5865 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
5866 if old != nprocs {
5867
5868 gcCPULimiter.resetCapacity(now, nprocs)
5869 }
5870 return runnablePs
5871 }
5872
5873
5874
5875
5876
5877
5878
5879 func acquirep(pp *p) {
5880
5881 wirep(pp)
5882
5883
5884
5885
5886
5887 pp.mcache.prepareForSweep()
5888
5889 trace := traceAcquire()
5890 if trace.ok() {
5891 trace.ProcStart()
5892 traceRelease(trace)
5893 }
5894 }
5895
5896
5897
5898
5899
5900
5901
5902 func wirep(pp *p) {
5903 gp := getg()
5904
5905 if gp.m.p != 0 {
5906
5907
5908 systemstack(func() {
5909 throw("wirep: already in go")
5910 })
5911 }
5912 if pp.m != 0 || pp.status != _Pidle {
5913
5914
5915 systemstack(func() {
5916 id := int64(0)
5917 if pp.m != 0 {
5918 id = pp.m.ptr().id
5919 }
5920 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
5921 throw("wirep: invalid p state")
5922 })
5923 }
5924 gp.m.p.set(pp)
5925 pp.m.set(gp.m)
5926 pp.status = _Prunning
5927 }
5928
5929
5930 func releasep() *p {
5931 trace := traceAcquire()
5932 if trace.ok() {
5933 trace.ProcStop(getg().m.p.ptr())
5934 traceRelease(trace)
5935 }
5936 return releasepNoTrace()
5937 }
5938
5939
5940 func releasepNoTrace() *p {
5941 gp := getg()
5942
5943 if gp.m.p == 0 {
5944 throw("releasep: invalid arg")
5945 }
5946 pp := gp.m.p.ptr()
5947 if pp.m.ptr() != gp.m || pp.status != _Prunning {
5948 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
5949 throw("releasep: invalid p state")
5950 }
5951 gp.m.p = 0
5952 pp.m = 0
5953 pp.status = _Pidle
5954 return pp
5955 }
5956
5957 func incidlelocked(v int32) {
5958 lock(&sched.lock)
5959 sched.nmidlelocked += v
5960 if v > 0 {
5961 checkdead()
5962 }
5963 unlock(&sched.lock)
5964 }
5965
5966
5967
5968
5969 func checkdead() {
5970 assertLockHeld(&sched.lock)
5971
5972
5973
5974
5975
5976
5977 if (islibrary || isarchive) && GOARCH != "wasm" {
5978 return
5979 }
5980
5981
5982
5983
5984
5985 if panicking.Load() > 0 {
5986 return
5987 }
5988
5989
5990
5991
5992
5993 var run0 int32
5994 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
5995 run0 = 1
5996 }
5997
5998 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
5999 if run > run0 {
6000 return
6001 }
6002 if run < 0 {
6003 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6004 unlock(&sched.lock)
6005 throw("checkdead: inconsistent counts")
6006 }
6007
6008 grunning := 0
6009 forEachG(func(gp *g) {
6010 if isSystemGoroutine(gp, false) {
6011 return
6012 }
6013 s := readgstatus(gp)
6014 switch s &^ _Gscan {
6015 case _Gwaiting,
6016 _Gpreempted:
6017 grunning++
6018 case _Grunnable,
6019 _Grunning,
6020 _Gsyscall:
6021 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6022 unlock(&sched.lock)
6023 throw("checkdead: runnable g")
6024 }
6025 })
6026 if grunning == 0 {
6027 unlock(&sched.lock)
6028 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6029 }
6030
6031
6032 if faketime != 0 {
6033 if when := timeSleepUntil(); when < maxWhen {
6034 faketime = when
6035
6036
6037 pp, _ := pidleget(faketime)
6038 if pp == nil {
6039
6040
6041 unlock(&sched.lock)
6042 throw("checkdead: no p for timer")
6043 }
6044 mp := mget()
6045 if mp == nil {
6046
6047
6048 unlock(&sched.lock)
6049 throw("checkdead: no m for timer")
6050 }
6051
6052
6053
6054 sched.nmspinning.Add(1)
6055 mp.spinning = true
6056 mp.nextp.set(pp)
6057 notewakeup(&mp.park)
6058 return
6059 }
6060 }
6061
6062
6063 for _, pp := range allp {
6064 if len(pp.timers.heap) > 0 {
6065 return
6066 }
6067 }
6068
6069 unlock(&sched.lock)
6070 fatal("all goroutines are asleep - deadlock!")
6071 }
6072
6073
6074
6075
6076
6077
6078 var forcegcperiod int64 = 2 * 60 * 1e9
6079
6080
6081
6082 var needSysmonWorkaround bool = false
6083
6084
6085
6086
6087 const haveSysmon = GOARCH != "wasm"
6088
6089
6090
6091
6092 func sysmon() {
6093 lock(&sched.lock)
6094 sched.nmsys++
6095 checkdead()
6096 unlock(&sched.lock)
6097
6098 lasttrace := int64(0)
6099 idle := 0
6100 delay := uint32(0)
6101
6102 for {
6103 if idle == 0 {
6104 delay = 20
6105 } else if idle > 50 {
6106 delay *= 2
6107 }
6108 if delay > 10*1000 {
6109 delay = 10 * 1000
6110 }
6111 usleep(delay)
6112
6113
6114
6115
6116
6117
6118
6119
6120
6121
6122
6123
6124
6125
6126
6127
6128 now := nanotime()
6129 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6130 lock(&sched.lock)
6131 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6132 syscallWake := false
6133 next := timeSleepUntil()
6134 if next > now {
6135 sched.sysmonwait.Store(true)
6136 unlock(&sched.lock)
6137
6138
6139 sleep := forcegcperiod / 2
6140 if next-now < sleep {
6141 sleep = next - now
6142 }
6143 shouldRelax := sleep >= osRelaxMinNS
6144 if shouldRelax {
6145 osRelax(true)
6146 }
6147 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6148 if shouldRelax {
6149 osRelax(false)
6150 }
6151 lock(&sched.lock)
6152 sched.sysmonwait.Store(false)
6153 noteclear(&sched.sysmonnote)
6154 }
6155 if syscallWake {
6156 idle = 0
6157 delay = 20
6158 }
6159 }
6160 unlock(&sched.lock)
6161 }
6162
6163 lock(&sched.sysmonlock)
6164
6165
6166 now = nanotime()
6167
6168
6169 if *cgo_yield != nil {
6170 asmcgocall(*cgo_yield, nil)
6171 }
6172
6173 lastpoll := sched.lastpoll.Load()
6174 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6175 sched.lastpoll.CompareAndSwap(lastpoll, now)
6176 list, delta := netpoll(0)
6177 if !list.empty() {
6178
6179
6180
6181
6182
6183
6184
6185 incidlelocked(-1)
6186 injectglist(&list)
6187 incidlelocked(1)
6188 netpollAdjustWaiters(delta)
6189 }
6190 }
6191 if GOOS == "netbsd" && needSysmonWorkaround {
6192
6193
6194
6195
6196
6197
6198
6199
6200
6201
6202
6203
6204
6205
6206
6207 if next := timeSleepUntil(); next < now {
6208 startm(nil, false, false)
6209 }
6210 }
6211 if scavenger.sysmonWake.Load() != 0 {
6212
6213 scavenger.wake()
6214 }
6215
6216
6217 if retake(now) != 0 {
6218 idle = 0
6219 } else {
6220 idle++
6221 }
6222
6223 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6224 lock(&forcegc.lock)
6225 forcegc.idle.Store(false)
6226 var list gList
6227 list.push(forcegc.g)
6228 injectglist(&list)
6229 unlock(&forcegc.lock)
6230 }
6231 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6232 lasttrace = now
6233 schedtrace(debug.scheddetail > 0)
6234 }
6235 unlock(&sched.sysmonlock)
6236 }
6237 }
6238
6239 type sysmontick struct {
6240 schedtick uint32
6241 syscalltick uint32
6242 schedwhen int64
6243 syscallwhen int64
6244 }
6245
6246
6247
6248 const forcePreemptNS = 10 * 1000 * 1000
6249
6250 func retake(now int64) uint32 {
6251 n := 0
6252
6253
6254 lock(&allpLock)
6255
6256
6257
6258 for i := 0; i < len(allp); i++ {
6259 pp := allp[i]
6260 if pp == nil {
6261
6262
6263 continue
6264 }
6265 pd := &pp.sysmontick
6266 s := pp.status
6267 sysretake := false
6268 if s == _Prunning || s == _Psyscall {
6269
6270
6271
6272
6273 t := int64(pp.schedtick)
6274 if int64(pd.schedtick) != t {
6275 pd.schedtick = uint32(t)
6276 pd.schedwhen = now
6277 } else if pd.schedwhen+forcePreemptNS <= now {
6278 preemptone(pp)
6279
6280
6281 sysretake = true
6282 }
6283 }
6284 if s == _Psyscall {
6285
6286 t := int64(pp.syscalltick)
6287 if !sysretake && int64(pd.syscalltick) != t {
6288 pd.syscalltick = uint32(t)
6289 pd.syscallwhen = now
6290 continue
6291 }
6292
6293
6294
6295 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6296 continue
6297 }
6298
6299 unlock(&allpLock)
6300
6301
6302
6303
6304 incidlelocked(-1)
6305 trace := traceAcquire()
6306 if atomic.Cas(&pp.status, s, _Pidle) {
6307 if trace.ok() {
6308 trace.ProcSteal(pp, false)
6309 traceRelease(trace)
6310 }
6311 n++
6312 pp.syscalltick++
6313 handoffp(pp)
6314 } else if trace.ok() {
6315 traceRelease(trace)
6316 }
6317 incidlelocked(1)
6318 lock(&allpLock)
6319 }
6320 }
6321 unlock(&allpLock)
6322 return uint32(n)
6323 }
6324
6325
6326
6327
6328
6329
6330 func preemptall() bool {
6331 res := false
6332 for _, pp := range allp {
6333 if pp.status != _Prunning {
6334 continue
6335 }
6336 if preemptone(pp) {
6337 res = true
6338 }
6339 }
6340 return res
6341 }
6342
6343
6344
6345
6346
6347
6348
6349
6350
6351
6352
6353 func preemptone(pp *p) bool {
6354 mp := pp.m.ptr()
6355 if mp == nil || mp == getg().m {
6356 return false
6357 }
6358 gp := mp.curg
6359 if gp == nil || gp == mp.g0 {
6360 return false
6361 }
6362
6363 gp.preempt = true
6364
6365
6366
6367
6368
6369 gp.stackguard0 = stackPreempt
6370
6371
6372 if preemptMSupported && debug.asyncpreemptoff == 0 {
6373 pp.preempt = true
6374 preemptM(mp)
6375 }
6376
6377 return true
6378 }
6379
6380 var starttime int64
6381
6382 func schedtrace(detailed bool) {
6383 now := nanotime()
6384 if starttime == 0 {
6385 starttime = now
6386 }
6387
6388 lock(&sched.lock)
6389 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
6390 if detailed {
6391 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6392 }
6393
6394
6395
6396 for i, pp := range allp {
6397 mp := pp.m.ptr()
6398 h := atomic.Load(&pp.runqhead)
6399 t := atomic.Load(&pp.runqtail)
6400 if detailed {
6401 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6402 if mp != nil {
6403 print(mp.id)
6404 } else {
6405 print("nil")
6406 }
6407 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers.heap), "\n")
6408 } else {
6409
6410
6411 print(" ")
6412 if i == 0 {
6413 print("[")
6414 }
6415 print(t - h)
6416 if i == len(allp)-1 {
6417 print("]\n")
6418 }
6419 }
6420 }
6421
6422 if !detailed {
6423 unlock(&sched.lock)
6424 return
6425 }
6426
6427 for mp := allm; mp != nil; mp = mp.alllink {
6428 pp := mp.p.ptr()
6429 print(" M", mp.id, ": p=")
6430 if pp != nil {
6431 print(pp.id)
6432 } else {
6433 print("nil")
6434 }
6435 print(" curg=")
6436 if mp.curg != nil {
6437 print(mp.curg.goid)
6438 } else {
6439 print("nil")
6440 }
6441 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6442 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6443 print(lockedg.goid)
6444 } else {
6445 print("nil")
6446 }
6447 print("\n")
6448 }
6449
6450 forEachG(func(gp *g) {
6451 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6452 if gp.m != nil {
6453 print(gp.m.id)
6454 } else {
6455 print("nil")
6456 }
6457 print(" lockedm=")
6458 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6459 print(lockedm.id)
6460 } else {
6461 print("nil")
6462 }
6463 print("\n")
6464 })
6465 unlock(&sched.lock)
6466 }
6467
6468
6469
6470
6471
6472
6473 func schedEnableUser(enable bool) {
6474 lock(&sched.lock)
6475 if sched.disable.user == !enable {
6476 unlock(&sched.lock)
6477 return
6478 }
6479 sched.disable.user = !enable
6480 if enable {
6481 n := sched.disable.n
6482 sched.disable.n = 0
6483 globrunqputbatch(&sched.disable.runnable, n)
6484 unlock(&sched.lock)
6485 for ; n != 0 && sched.npidle.Load() != 0; n-- {
6486 startm(nil, false, false)
6487 }
6488 } else {
6489 unlock(&sched.lock)
6490 }
6491 }
6492
6493
6494
6495
6496
6497 func schedEnabled(gp *g) bool {
6498 assertLockHeld(&sched.lock)
6499
6500 if sched.disable.user {
6501 return isSystemGoroutine(gp, true)
6502 }
6503 return true
6504 }
6505
6506
6507
6508
6509
6510
6511 func mput(mp *m) {
6512 assertLockHeld(&sched.lock)
6513
6514 mp.schedlink = sched.midle
6515 sched.midle.set(mp)
6516 sched.nmidle++
6517 checkdead()
6518 }
6519
6520
6521
6522
6523
6524
6525 func mget() *m {
6526 assertLockHeld(&sched.lock)
6527
6528 mp := sched.midle.ptr()
6529 if mp != nil {
6530 sched.midle = mp.schedlink
6531 sched.nmidle--
6532 }
6533 return mp
6534 }
6535
6536
6537
6538
6539
6540
6541 func globrunqput(gp *g) {
6542 assertLockHeld(&sched.lock)
6543
6544 sched.runq.pushBack(gp)
6545 sched.runqsize++
6546 }
6547
6548
6549
6550
6551
6552
6553 func globrunqputhead(gp *g) {
6554 assertLockHeld(&sched.lock)
6555
6556 sched.runq.push(gp)
6557 sched.runqsize++
6558 }
6559
6560
6561
6562
6563
6564
6565
6566 func globrunqputbatch(batch *gQueue, n int32) {
6567 assertLockHeld(&sched.lock)
6568
6569 sched.runq.pushBackAll(*batch)
6570 sched.runqsize += n
6571 *batch = gQueue{}
6572 }
6573
6574
6575
6576 func globrunqget(pp *p, max int32) *g {
6577 assertLockHeld(&sched.lock)
6578
6579 if sched.runqsize == 0 {
6580 return nil
6581 }
6582
6583 n := sched.runqsize/gomaxprocs + 1
6584 if n > sched.runqsize {
6585 n = sched.runqsize
6586 }
6587 if max > 0 && n > max {
6588 n = max
6589 }
6590 if n > int32(len(pp.runq))/2 {
6591 n = int32(len(pp.runq)) / 2
6592 }
6593
6594 sched.runqsize -= n
6595
6596 gp := sched.runq.pop()
6597 n--
6598 for ; n > 0; n-- {
6599 gp1 := sched.runq.pop()
6600 runqput(pp, gp1, false)
6601 }
6602 return gp
6603 }
6604
6605
6606 type pMask []uint32
6607
6608
6609 func (p pMask) read(id uint32) bool {
6610 word := id / 32
6611 mask := uint32(1) << (id % 32)
6612 return (atomic.Load(&p[word]) & mask) != 0
6613 }
6614
6615
6616 func (p pMask) set(id int32) {
6617 word := id / 32
6618 mask := uint32(1) << (id % 32)
6619 atomic.Or(&p[word], mask)
6620 }
6621
6622
6623 func (p pMask) clear(id int32) {
6624 word := id / 32
6625 mask := uint32(1) << (id % 32)
6626 atomic.And(&p[word], ^mask)
6627 }
6628
6629
6630
6631
6632
6633
6634
6635
6636
6637
6638
6639
6640 func pidleput(pp *p, now int64) int64 {
6641 assertLockHeld(&sched.lock)
6642
6643 if !runqempty(pp) {
6644 throw("pidleput: P has non-empty run queue")
6645 }
6646 if now == 0 {
6647 now = nanotime()
6648 }
6649 if pp.timers.len.Load() == 0 {
6650 timerpMask.clear(pp.id)
6651 }
6652 idlepMask.set(pp.id)
6653 pp.link = sched.pidle
6654 sched.pidle.set(pp)
6655 sched.npidle.Add(1)
6656 if !pp.limiterEvent.start(limiterEventIdle, now) {
6657 throw("must be able to track idle limiter event")
6658 }
6659 return now
6660 }
6661
6662
6663
6664
6665
6666
6667
6668
6669 func pidleget(now int64) (*p, int64) {
6670 assertLockHeld(&sched.lock)
6671
6672 pp := sched.pidle.ptr()
6673 if pp != nil {
6674
6675 if now == 0 {
6676 now = nanotime()
6677 }
6678 timerpMask.set(pp.id)
6679 idlepMask.clear(pp.id)
6680 sched.pidle = pp.link
6681 sched.npidle.Add(-1)
6682 pp.limiterEvent.stop(limiterEventIdle, now)
6683 }
6684 return pp, now
6685 }
6686
6687
6688
6689
6690
6691
6692
6693
6694
6695
6696
6697 func pidlegetSpinning(now int64) (*p, int64) {
6698 assertLockHeld(&sched.lock)
6699
6700 pp, now := pidleget(now)
6701 if pp == nil {
6702
6703
6704
6705 sched.needspinning.Store(1)
6706 return nil, now
6707 }
6708
6709 return pp, now
6710 }
6711
6712
6713
6714 func runqempty(pp *p) bool {
6715
6716
6717
6718
6719 for {
6720 head := atomic.Load(&pp.runqhead)
6721 tail := atomic.Load(&pp.runqtail)
6722 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
6723 if tail == atomic.Load(&pp.runqtail) {
6724 return head == tail && runnext == 0
6725 }
6726 }
6727 }
6728
6729
6730
6731
6732
6733
6734
6735
6736
6737
6738 const randomizeScheduler = raceenabled
6739
6740
6741
6742
6743
6744
6745 func runqput(pp *p, gp *g, next bool) {
6746 if !haveSysmon && next {
6747
6748
6749
6750
6751
6752
6753
6754
6755 next = false
6756 }
6757 if randomizeScheduler && next && randn(2) == 0 {
6758 next = false
6759 }
6760
6761 if next {
6762 retryNext:
6763 oldnext := pp.runnext
6764 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
6765 goto retryNext
6766 }
6767 if oldnext == 0 {
6768 return
6769 }
6770
6771 gp = oldnext.ptr()
6772 }
6773
6774 retry:
6775 h := atomic.LoadAcq(&pp.runqhead)
6776 t := pp.runqtail
6777 if t-h < uint32(len(pp.runq)) {
6778 pp.runq[t%uint32(len(pp.runq))].set(gp)
6779 atomic.StoreRel(&pp.runqtail, t+1)
6780 return
6781 }
6782 if runqputslow(pp, gp, h, t) {
6783 return
6784 }
6785
6786 goto retry
6787 }
6788
6789
6790
6791 func runqputslow(pp *p, gp *g, h, t uint32) bool {
6792 var batch [len(pp.runq)/2 + 1]*g
6793
6794
6795 n := t - h
6796 n = n / 2
6797 if n != uint32(len(pp.runq)/2) {
6798 throw("runqputslow: queue is not full")
6799 }
6800 for i := uint32(0); i < n; i++ {
6801 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6802 }
6803 if !atomic.CasRel(&pp.runqhead, h, h+n) {
6804 return false
6805 }
6806 batch[n] = gp
6807
6808 if randomizeScheduler {
6809 for i := uint32(1); i <= n; i++ {
6810 j := cheaprandn(i + 1)
6811 batch[i], batch[j] = batch[j], batch[i]
6812 }
6813 }
6814
6815
6816 for i := uint32(0); i < n; i++ {
6817 batch[i].schedlink.set(batch[i+1])
6818 }
6819 var q gQueue
6820 q.head.set(batch[0])
6821 q.tail.set(batch[n])
6822
6823
6824 lock(&sched.lock)
6825 globrunqputbatch(&q, int32(n+1))
6826 unlock(&sched.lock)
6827 return true
6828 }
6829
6830
6831
6832
6833
6834 func runqputbatch(pp *p, q *gQueue, qsize int) {
6835 h := atomic.LoadAcq(&pp.runqhead)
6836 t := pp.runqtail
6837 n := uint32(0)
6838 for !q.empty() && t-h < uint32(len(pp.runq)) {
6839 gp := q.pop()
6840 pp.runq[t%uint32(len(pp.runq))].set(gp)
6841 t++
6842 n++
6843 }
6844 qsize -= int(n)
6845
6846 if randomizeScheduler {
6847 off := func(o uint32) uint32 {
6848 return (pp.runqtail + o) % uint32(len(pp.runq))
6849 }
6850 for i := uint32(1); i < n; i++ {
6851 j := cheaprandn(i + 1)
6852 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
6853 }
6854 }
6855
6856 atomic.StoreRel(&pp.runqtail, t)
6857 if !q.empty() {
6858 lock(&sched.lock)
6859 globrunqputbatch(q, int32(qsize))
6860 unlock(&sched.lock)
6861 }
6862 }
6863
6864
6865
6866
6867
6868 func runqget(pp *p) (gp *g, inheritTime bool) {
6869
6870 next := pp.runnext
6871
6872
6873
6874 if next != 0 && pp.runnext.cas(next, 0) {
6875 return next.ptr(), true
6876 }
6877
6878 for {
6879 h := atomic.LoadAcq(&pp.runqhead)
6880 t := pp.runqtail
6881 if t == h {
6882 return nil, false
6883 }
6884 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
6885 if atomic.CasRel(&pp.runqhead, h, h+1) {
6886 return gp, false
6887 }
6888 }
6889 }
6890
6891
6892
6893 func runqdrain(pp *p) (drainQ gQueue, n uint32) {
6894 oldNext := pp.runnext
6895 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
6896 drainQ.pushBack(oldNext.ptr())
6897 n++
6898 }
6899
6900 retry:
6901 h := atomic.LoadAcq(&pp.runqhead)
6902 t := pp.runqtail
6903 qn := t - h
6904 if qn == 0 {
6905 return
6906 }
6907 if qn > uint32(len(pp.runq)) {
6908 goto retry
6909 }
6910
6911 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
6912 goto retry
6913 }
6914
6915
6916
6917
6918
6919
6920
6921
6922 for i := uint32(0); i < qn; i++ {
6923 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6924 drainQ.pushBack(gp)
6925 n++
6926 }
6927 return
6928 }
6929
6930
6931
6932
6933
6934 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
6935 for {
6936 h := atomic.LoadAcq(&pp.runqhead)
6937 t := atomic.LoadAcq(&pp.runqtail)
6938 n := t - h
6939 n = n - n/2
6940 if n == 0 {
6941 if stealRunNextG {
6942
6943 if next := pp.runnext; next != 0 {
6944 if pp.status == _Prunning {
6945
6946
6947
6948
6949
6950
6951
6952
6953
6954
6955 if !osHasLowResTimer {
6956 usleep(3)
6957 } else {
6958
6959
6960
6961 osyield()
6962 }
6963 }
6964 if !pp.runnext.cas(next, 0) {
6965 continue
6966 }
6967 batch[batchHead%uint32(len(batch))] = next
6968 return 1
6969 }
6970 }
6971 return 0
6972 }
6973 if n > uint32(len(pp.runq)/2) {
6974 continue
6975 }
6976 for i := uint32(0); i < n; i++ {
6977 g := pp.runq[(h+i)%uint32(len(pp.runq))]
6978 batch[(batchHead+i)%uint32(len(batch))] = g
6979 }
6980 if atomic.CasRel(&pp.runqhead, h, h+n) {
6981 return n
6982 }
6983 }
6984 }
6985
6986
6987
6988
6989 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
6990 t := pp.runqtail
6991 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
6992 if n == 0 {
6993 return nil
6994 }
6995 n--
6996 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
6997 if n == 0 {
6998 return gp
6999 }
7000 h := atomic.LoadAcq(&pp.runqhead)
7001 if t-h+n >= uint32(len(pp.runq)) {
7002 throw("runqsteal: runq overflow")
7003 }
7004 atomic.StoreRel(&pp.runqtail, t+n)
7005 return gp
7006 }
7007
7008
7009
7010 type gQueue struct {
7011 head guintptr
7012 tail guintptr
7013 }
7014
7015
7016 func (q *gQueue) empty() bool {
7017 return q.head == 0
7018 }
7019
7020
7021 func (q *gQueue) push(gp *g) {
7022 gp.schedlink = q.head
7023 q.head.set(gp)
7024 if q.tail == 0 {
7025 q.tail.set(gp)
7026 }
7027 }
7028
7029
7030 func (q *gQueue) pushBack(gp *g) {
7031 gp.schedlink = 0
7032 if q.tail != 0 {
7033 q.tail.ptr().schedlink.set(gp)
7034 } else {
7035 q.head.set(gp)
7036 }
7037 q.tail.set(gp)
7038 }
7039
7040
7041
7042 func (q *gQueue) pushBackAll(q2 gQueue) {
7043 if q2.tail == 0 {
7044 return
7045 }
7046 q2.tail.ptr().schedlink = 0
7047 if q.tail != 0 {
7048 q.tail.ptr().schedlink = q2.head
7049 } else {
7050 q.head = q2.head
7051 }
7052 q.tail = q2.tail
7053 }
7054
7055
7056
7057 func (q *gQueue) pop() *g {
7058 gp := q.head.ptr()
7059 if gp != nil {
7060 q.head = gp.schedlink
7061 if q.head == 0 {
7062 q.tail = 0
7063 }
7064 }
7065 return gp
7066 }
7067
7068
7069 func (q *gQueue) popList() gList {
7070 stack := gList{q.head}
7071 *q = gQueue{}
7072 return stack
7073 }
7074
7075
7076
7077 type gList struct {
7078 head guintptr
7079 }
7080
7081
7082 func (l *gList) empty() bool {
7083 return l.head == 0
7084 }
7085
7086
7087 func (l *gList) push(gp *g) {
7088 gp.schedlink = l.head
7089 l.head.set(gp)
7090 }
7091
7092
7093 func (l *gList) pushAll(q gQueue) {
7094 if !q.empty() {
7095 q.tail.ptr().schedlink = l.head
7096 l.head = q.head
7097 }
7098 }
7099
7100
7101 func (l *gList) pop() *g {
7102 gp := l.head.ptr()
7103 if gp != nil {
7104 l.head = gp.schedlink
7105 }
7106 return gp
7107 }
7108
7109
7110 func setMaxThreads(in int) (out int) {
7111 lock(&sched.lock)
7112 out = int(sched.maxmcount)
7113 if in > 0x7fffffff {
7114 sched.maxmcount = 0x7fffffff
7115 } else {
7116 sched.maxmcount = int32(in)
7117 }
7118 checkmcount()
7119 unlock(&sched.lock)
7120 return
7121 }
7122
7123
7124
7125
7126
7127
7128
7129
7130
7131
7132
7133
7134
7135 func procPin() int {
7136 gp := getg()
7137 mp := gp.m
7138
7139 mp.locks++
7140 return int(mp.p.ptr().id)
7141 }
7142
7143
7144
7145
7146
7147
7148
7149
7150
7151
7152
7153
7154
7155 func procUnpin() {
7156 gp := getg()
7157 gp.m.locks--
7158 }
7159
7160
7161
7162 func sync_runtime_procPin() int {
7163 return procPin()
7164 }
7165
7166
7167
7168 func sync_runtime_procUnpin() {
7169 procUnpin()
7170 }
7171
7172
7173
7174 func sync_atomic_runtime_procPin() int {
7175 return procPin()
7176 }
7177
7178
7179
7180 func sync_atomic_runtime_procUnpin() {
7181 procUnpin()
7182 }
7183
7184
7185
7186
7187
7188 func internal_sync_runtime_canSpin(i int) bool {
7189
7190
7191
7192
7193
7194 if i >= active_spin || ncpu <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7195 return false
7196 }
7197 if p := getg().m.p.ptr(); !runqempty(p) {
7198 return false
7199 }
7200 return true
7201 }
7202
7203
7204
7205 func internal_sync_runtime_doSpin() {
7206 procyield(active_spin_cnt)
7207 }
7208
7209
7210
7211
7212
7213
7214
7215
7216
7217
7218
7219
7220
7221
7222
7223 func sync_runtime_canSpin(i int) bool {
7224 return internal_sync_runtime_canSpin(i)
7225 }
7226
7227
7228
7229
7230
7231
7232
7233
7234
7235
7236
7237
7238
7239 func sync_runtime_doSpin() {
7240 internal_sync_runtime_doSpin()
7241 }
7242
7243 var stealOrder randomOrder
7244
7245
7246
7247
7248
7249 type randomOrder struct {
7250 count uint32
7251 coprimes []uint32
7252 }
7253
7254 type randomEnum struct {
7255 i uint32
7256 count uint32
7257 pos uint32
7258 inc uint32
7259 }
7260
7261 func (ord *randomOrder) reset(count uint32) {
7262 ord.count = count
7263 ord.coprimes = ord.coprimes[:0]
7264 for i := uint32(1); i <= count; i++ {
7265 if gcd(i, count) == 1 {
7266 ord.coprimes = append(ord.coprimes, i)
7267 }
7268 }
7269 }
7270
7271 func (ord *randomOrder) start(i uint32) randomEnum {
7272 return randomEnum{
7273 count: ord.count,
7274 pos: i % ord.count,
7275 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7276 }
7277 }
7278
7279 func (enum *randomEnum) done() bool {
7280 return enum.i == enum.count
7281 }
7282
7283 func (enum *randomEnum) next() {
7284 enum.i++
7285 enum.pos = (enum.pos + enum.inc) % enum.count
7286 }
7287
7288 func (enum *randomEnum) position() uint32 {
7289 return enum.pos
7290 }
7291
7292 func gcd(a, b uint32) uint32 {
7293 for b != 0 {
7294 a, b = b, a%b
7295 }
7296 return a
7297 }
7298
7299
7300
7301 type initTask struct {
7302 state uint32
7303 nfns uint32
7304
7305 }
7306
7307
7308
7309 var inittrace tracestat
7310
7311 type tracestat struct {
7312 active bool
7313 id uint64
7314 allocs uint64
7315 bytes uint64
7316 }
7317
7318 func doInit(ts []*initTask) {
7319 for _, t := range ts {
7320 doInit1(t)
7321 }
7322 }
7323
7324 func doInit1(t *initTask) {
7325 switch t.state {
7326 case 2:
7327 return
7328 case 1:
7329 throw("recursive call during initialization - linker skew")
7330 default:
7331 t.state = 1
7332
7333 var (
7334 start int64
7335 before tracestat
7336 )
7337
7338 if inittrace.active {
7339 start = nanotime()
7340
7341 before = inittrace
7342 }
7343
7344 if t.nfns == 0 {
7345
7346 throw("inittask with no functions")
7347 }
7348
7349 firstFunc := add(unsafe.Pointer(t), 8)
7350 for i := uint32(0); i < t.nfns; i++ {
7351 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
7352 f := *(*func())(unsafe.Pointer(&p))
7353 f()
7354 }
7355
7356 if inittrace.active {
7357 end := nanotime()
7358
7359 after := inittrace
7360
7361 f := *(*func())(unsafe.Pointer(&firstFunc))
7362 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
7363
7364 var sbuf [24]byte
7365 print("init ", pkg, " @")
7366 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
7367 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
7368 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
7369 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
7370 print("\n")
7371 }
7372
7373 t.state = 2
7374 }
7375 }
7376
View as plain text