Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "runtime/internal/atomic"
12 "runtime/internal/sys"
13 "unsafe"
14 )
15
16
17 var modinfo string
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113 var (
114 m0 m
115 g0 g
116 mcache0 *mcache
117 raceprocctx0 uintptr
118 )
119
120
121 var runtime_inittask initTask
122
123
124 var main_inittask initTask
125
126
127
128
129
130 var main_init_done chan bool
131
132
133 func main_main()
134
135
136 var mainStarted bool
137
138
139 var runtimeInitTime int64
140
141
142 var initSigmask sigset
143
144
145 func main() {
146 mp := getg().m
147
148
149
150 mp.g0.racectx = 0
151
152
153
154
155 if goarch.PtrSize == 8 {
156 maxstacksize = 1000000000
157 } else {
158 maxstacksize = 250000000
159 }
160
161
162
163
164 maxstackceiling = 2 * maxstacksize
165
166
167 mainStarted = true
168
169 if GOARCH != "wasm" {
170 systemstack(func() {
171 newm(sysmon, nil, -1)
172 })
173 }
174
175
176
177
178
179
180
181 lockOSThread()
182
183 if mp != &m0 {
184 throw("runtime.main not on m0")
185 }
186
187
188
189 runtimeInitTime = nanotime()
190 if runtimeInitTime == 0 {
191 throw("nanotime returning zero")
192 }
193
194 if debug.inittrace != 0 {
195 inittrace.id = getg().goid
196 inittrace.active = true
197 }
198
199 doInit(&runtime_inittask)
200
201
202 needUnlock := true
203 defer func() {
204 if needUnlock {
205 unlockOSThread()
206 }
207 }()
208
209 gcenable()
210
211 main_init_done = make(chan bool)
212 if iscgo {
213 if _cgo_thread_start == nil {
214 throw("_cgo_thread_start missing")
215 }
216 if GOOS != "windows" {
217 if _cgo_setenv == nil {
218 throw("_cgo_setenv missing")
219 }
220 if _cgo_unsetenv == nil {
221 throw("_cgo_unsetenv missing")
222 }
223 }
224 if _cgo_notify_runtime_init_done == nil {
225 throw("_cgo_notify_runtime_init_done missing")
226 }
227
228
229 startTemplateThread()
230 cgocall(_cgo_notify_runtime_init_done, nil)
231 }
232
233 doInit(&main_inittask)
234
235
236
237 inittrace.active = false
238
239 close(main_init_done)
240
241 needUnlock = false
242 unlockOSThread()
243
244 if isarchive || islibrary {
245
246
247 return
248 }
249 fn := main_main
250 fn()
251 if raceenabled {
252 runExitHooks(0)
253 racefini()
254 }
255
256
257
258
259
260 if runningPanicDefers.Load() != 0 {
261
262 for c := 0; c < 1000; c++ {
263 if runningPanicDefers.Load() == 0 {
264 break
265 }
266 Gosched()
267 }
268 }
269 if panicking.Load() != 0 {
270 gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1)
271 }
272 runExitHooks(0)
273
274 exit(0)
275 for {
276 var x *int32
277 *x = 0
278 }
279 }
280
281
282
283
284 func os_beforeExit(exitCode int) {
285 runExitHooks(exitCode)
286 if exitCode == 0 && raceenabled {
287 racefini()
288 }
289 }
290
291
292 func init() {
293 go forcegchelper()
294 }
295
296 func forcegchelper() {
297 forcegc.g = getg()
298 lockInit(&forcegc.lock, lockRankForcegc)
299 for {
300 lock(&forcegc.lock)
301 if forcegc.idle.Load() {
302 throw("forcegc: phase error")
303 }
304 forcegc.idle.Store(true)
305 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceEvGoBlock, 1)
306
307 if debug.gctrace > 0 {
308 println("GC forced")
309 }
310
311 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
312 }
313 }
314
315
316
317
318
319 func Gosched() {
320 checkTimeouts()
321 mcall(gosched_m)
322 }
323
324
325
326
327
328 func goschedguarded() {
329 mcall(goschedguarded_m)
330 }
331
332
333
334
335
336
337 func goschedIfBusy() {
338 gp := getg()
339
340
341 if !gp.preempt && sched.npidle.Load() > 0 {
342 return
343 }
344 mcall(gosched_m)
345 }
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) {
365 if reason != waitReasonSleep {
366 checkTimeouts()
367 }
368 mp := acquirem()
369 gp := mp.curg
370 status := readgstatus(gp)
371 if status != _Grunning && status != _Gscanrunning {
372 throw("gopark: bad g status")
373 }
374 mp.waitlock = lock
375 mp.waitunlockf = unlockf
376 gp.waitreason = reason
377 mp.waittraceev = traceEv
378 mp.waittraceskip = traceskip
379 releasem(mp)
380
381 mcall(park_m)
382 }
383
384
385
386 func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) {
387 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
388 }
389
390 func goready(gp *g, traceskip int) {
391 systemstack(func() {
392 ready(gp, traceskip, true)
393 })
394 }
395
396
397 func acquireSudog() *sudog {
398
399
400
401
402
403
404
405
406 mp := acquirem()
407 pp := mp.p.ptr()
408 if len(pp.sudogcache) == 0 {
409 lock(&sched.sudoglock)
410
411 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
412 s := sched.sudogcache
413 sched.sudogcache = s.next
414 s.next = nil
415 pp.sudogcache = append(pp.sudogcache, s)
416 }
417 unlock(&sched.sudoglock)
418
419 if len(pp.sudogcache) == 0 {
420 pp.sudogcache = append(pp.sudogcache, new(sudog))
421 }
422 }
423 n := len(pp.sudogcache)
424 s := pp.sudogcache[n-1]
425 pp.sudogcache[n-1] = nil
426 pp.sudogcache = pp.sudogcache[:n-1]
427 if s.elem != nil {
428 throw("acquireSudog: found s.elem != nil in cache")
429 }
430 releasem(mp)
431 return s
432 }
433
434
435 func releaseSudog(s *sudog) {
436 if s.elem != nil {
437 throw("runtime: sudog with non-nil elem")
438 }
439 if s.isSelect {
440 throw("runtime: sudog with non-false isSelect")
441 }
442 if s.next != nil {
443 throw("runtime: sudog with non-nil next")
444 }
445 if s.prev != nil {
446 throw("runtime: sudog with non-nil prev")
447 }
448 if s.waitlink != nil {
449 throw("runtime: sudog with non-nil waitlink")
450 }
451 if s.c != nil {
452 throw("runtime: sudog with non-nil c")
453 }
454 gp := getg()
455 if gp.param != nil {
456 throw("runtime: releaseSudog with non-nil gp.param")
457 }
458 mp := acquirem()
459 pp := mp.p.ptr()
460 if len(pp.sudogcache) == cap(pp.sudogcache) {
461
462 var first, last *sudog
463 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
464 n := len(pp.sudogcache)
465 p := pp.sudogcache[n-1]
466 pp.sudogcache[n-1] = nil
467 pp.sudogcache = pp.sudogcache[:n-1]
468 if first == nil {
469 first = p
470 } else {
471 last.next = p
472 }
473 last = p
474 }
475 lock(&sched.sudoglock)
476 last.next = sched.sudogcache
477 sched.sudogcache = first
478 unlock(&sched.sudoglock)
479 }
480 pp.sudogcache = append(pp.sudogcache, s)
481 releasem(mp)
482 }
483
484
485 func badmcall(fn func(*g)) {
486 throw("runtime: mcall called on m->g0 stack")
487 }
488
489 func badmcall2(fn func(*g)) {
490 throw("runtime: mcall function returned")
491 }
492
493 func badreflectcall() {
494 panic(plainError("arg size to reflect.call more than 1GB"))
495 }
496
497
498
499 func badmorestackg0() {
500 writeErrStr("fatal: morestack on g0\n")
501 }
502
503
504
505 func badmorestackgsignal() {
506 writeErrStr("fatal: morestack on gsignal\n")
507 }
508
509
510 func badctxt() {
511 throw("ctxt != 0")
512 }
513
514 func lockedOSThread() bool {
515 gp := getg()
516 return gp.lockedm != 0 && gp.m.lockedg != 0
517 }
518
519 var (
520
521
522
523
524
525
526 allglock mutex
527 allgs []*g
528
529
530
531
532
533
534
535
536
537
538
539
540
541 allglen uintptr
542 allgptr **g
543 )
544
545 func allgadd(gp *g) {
546 if readgstatus(gp) == _Gidle {
547 throw("allgadd: bad status Gidle")
548 }
549
550 lock(&allglock)
551 allgs = append(allgs, gp)
552 if &allgs[0] != allgptr {
553 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
554 }
555 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
556 unlock(&allglock)
557 }
558
559
560
561
562 func allGsSnapshot() []*g {
563 assertWorldStoppedOrLockHeld(&allglock)
564
565
566
567
568
569
570 return allgs[:len(allgs):len(allgs)]
571 }
572
573
574 func atomicAllG() (**g, uintptr) {
575 length := atomic.Loaduintptr(&allglen)
576 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
577 return ptr, length
578 }
579
580
581 func atomicAllGIndex(ptr **g, i uintptr) *g {
582 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
583 }
584
585
586
587
588 func forEachG(fn func(gp *g)) {
589 lock(&allglock)
590 for _, gp := range allgs {
591 fn(gp)
592 }
593 unlock(&allglock)
594 }
595
596
597
598
599
600 func forEachGRace(fn func(gp *g)) {
601 ptr, length := atomicAllG()
602 for i := uintptr(0); i < length; i++ {
603 gp := atomicAllGIndex(ptr, i)
604 fn(gp)
605 }
606 return
607 }
608
609 const (
610
611
612 _GoidCacheBatch = 16
613 )
614
615
616
617 func cpuinit(env string) {
618 switch GOOS {
619 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
620 cpu.DebugOptions = true
621 }
622 cpu.Initialize(env)
623
624
625
626 switch GOARCH {
627 case "386", "amd64":
628 x86HasPOPCNT = cpu.X86.HasPOPCNT
629 x86HasSSE41 = cpu.X86.HasSSE41
630 x86HasFMA = cpu.X86.HasFMA
631
632 case "arm":
633 armHasVFPv4 = cpu.ARM.HasVFPv4
634
635 case "arm64":
636 arm64HasATOMICS = cpu.ARM64.HasATOMICS
637 }
638 }
639
640
641
642
643 func getGodebugEarly() string {
644 const prefix = "GODEBUG="
645 var env string
646 switch GOOS {
647 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
648
649
650
651 n := int32(0)
652 for argv_index(argv, argc+1+n) != nil {
653 n++
654 }
655
656 for i := int32(0); i < n; i++ {
657 p := argv_index(argv, argc+1+i)
658 s := unsafe.String(p, findnull(p))
659
660 if hasPrefix(s, prefix) {
661 env = gostring(p)[len(prefix):]
662 break
663 }
664 }
665 }
666 return env
667 }
668
669
670
671
672
673
674
675
676
677 func schedinit() {
678 lockInit(&sched.lock, lockRankSched)
679 lockInit(&sched.sysmonlock, lockRankSysmon)
680 lockInit(&sched.deferlock, lockRankDefer)
681 lockInit(&sched.sudoglock, lockRankSudog)
682 lockInit(&deadlock, lockRankDeadlock)
683 lockInit(&paniclk, lockRankPanic)
684 lockInit(&allglock, lockRankAllg)
685 lockInit(&allpLock, lockRankAllp)
686 lockInit(&reflectOffs.lock, lockRankReflectOffs)
687 lockInit(&finlock, lockRankFin)
688 lockInit(&trace.bufLock, lockRankTraceBuf)
689 lockInit(&trace.stringsLock, lockRankTraceStrings)
690 lockInit(&trace.lock, lockRankTrace)
691 lockInit(&cpuprof.lock, lockRankCpuprof)
692 lockInit(&trace.stackTab.lock, lockRankTraceStackTab)
693
694
695
696 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
697
698
699
700 gp := getg()
701 if raceenabled {
702 gp.racectx, raceprocctx0 = raceinit()
703 }
704
705 sched.maxmcount = 10000
706
707
708 worldStopped()
709
710 moduledataverify()
711 stackinit()
712 mallocinit()
713 godebug := getGodebugEarly()
714 initPageTrace(godebug)
715 cpuinit(godebug)
716 alginit()
717 fastrandinit()
718 mcommoninit(gp.m, -1)
719 modulesinit()
720 typelinksinit()
721 itabsinit()
722 stkobjinit()
723
724 sigsave(&gp.m.sigmask)
725 initSigmask = gp.m.sigmask
726
727 goargs()
728 goenvs()
729 parsedebugvars()
730 gcinit()
731
732
733
734
735
736 if disableMemoryProfiling {
737 MemProfileRate = 0
738 }
739
740 lock(&sched.lock)
741 sched.lastpoll.Store(nanotime())
742 procs := ncpu
743 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
744 procs = n
745 }
746 if procresize(procs) != nil {
747 throw("unknown runnable goroutine during bootstrap")
748 }
749 unlock(&sched.lock)
750
751
752 worldStarted()
753
754
755
756
757 if debug.cgocheck > 1 {
758 writeBarrier.cgo = true
759 writeBarrier.enabled = true
760 for _, pp := range allp {
761 pp.wbBuf.reset()
762 }
763 }
764
765 if buildVersion == "" {
766
767
768 buildVersion = "unknown"
769 }
770 if len(modinfo) == 1 {
771
772
773 modinfo = ""
774 }
775 }
776
777 func dumpgstatus(gp *g) {
778 thisg := getg()
779 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
780 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
781 }
782
783
784 func checkmcount() {
785 assertLockHeld(&sched.lock)
786
787 if mcount() > sched.maxmcount {
788 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
789 throw("thread exhaustion")
790 }
791 }
792
793
794
795
796
797 func mReserveID() int64 {
798 assertLockHeld(&sched.lock)
799
800 if sched.mnext+1 < sched.mnext {
801 throw("runtime: thread ID overflow")
802 }
803 id := sched.mnext
804 sched.mnext++
805 checkmcount()
806 return id
807 }
808
809
810 func mcommoninit(mp *m, id int64) {
811 gp := getg()
812
813
814 if gp != gp.m.g0 {
815 callers(1, mp.createstack[:])
816 }
817
818 lock(&sched.lock)
819
820 if id >= 0 {
821 mp.id = id
822 } else {
823 mp.id = mReserveID()
824 }
825
826 lo := uint32(int64Hash(uint64(mp.id), fastrandseed))
827 hi := uint32(int64Hash(uint64(cputicks()), ^fastrandseed))
828 if lo|hi == 0 {
829 hi = 1
830 }
831
832
833 if goarch.BigEndian {
834 mp.fastrand = uint64(lo)<<32 | uint64(hi)
835 } else {
836 mp.fastrand = uint64(hi)<<32 | uint64(lo)
837 }
838
839 mpreinit(mp)
840 if mp.gsignal != nil {
841 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
842 }
843
844
845
846 mp.alllink = allm
847
848
849
850 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
851 unlock(&sched.lock)
852
853
854 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
855 mp.cgoCallers = new(cgoCallers)
856 }
857 }
858
859 func (mp *m) becomeSpinning() {
860 mp.spinning = true
861 sched.nmspinning.Add(1)
862 sched.needspinning.Store(0)
863 }
864
865 var fastrandseed uintptr
866
867 func fastrandinit() {
868 s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:]
869 getRandomData(s)
870 }
871
872
873 func ready(gp *g, traceskip int, next bool) {
874 if trace.enabled {
875 traceGoUnpark(gp, traceskip)
876 }
877
878 status := readgstatus(gp)
879
880
881 mp := acquirem()
882 if status&^_Gscan != _Gwaiting {
883 dumpgstatus(gp)
884 throw("bad g->status in ready")
885 }
886
887
888 casgstatus(gp, _Gwaiting, _Grunnable)
889 runqput(mp.p.ptr(), gp, next)
890 wakep()
891 releasem(mp)
892 }
893
894
895
896 const freezeStopWait = 0x7fffffff
897
898
899
900 var freezing atomic.Bool
901
902
903
904
905 func freezetheworld() {
906 freezing.Store(true)
907
908
909
910 for i := 0; i < 5; i++ {
911
912 sched.stopwait = freezeStopWait
913 sched.gcwaiting.Store(true)
914
915 if !preemptall() {
916 break
917 }
918 usleep(1000)
919 }
920
921 usleep(1000)
922 preemptall()
923 usleep(1000)
924 }
925
926
927
928
929
930 func readgstatus(gp *g) uint32 {
931 return gp.atomicstatus.Load()
932 }
933
934
935
936
937
938 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
939 success := false
940
941
942 switch oldval {
943 default:
944 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
945 dumpgstatus(gp)
946 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
947 case _Gscanrunnable,
948 _Gscanwaiting,
949 _Gscanrunning,
950 _Gscansyscall,
951 _Gscanpreempted:
952 if newval == oldval&^_Gscan {
953 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
954 }
955 }
956 if !success {
957 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
958 dumpgstatus(gp)
959 throw("casfrom_Gscanstatus: gp->status is not in scan state")
960 }
961 releaseLockRank(lockRankGscan)
962 }
963
964
965
966 func castogscanstatus(gp *g, oldval, newval uint32) bool {
967 switch oldval {
968 case _Grunnable,
969 _Grunning,
970 _Gwaiting,
971 _Gsyscall:
972 if newval == oldval|_Gscan {
973 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
974 if r {
975 acquireLockRank(lockRankGscan)
976 }
977 return r
978
979 }
980 }
981 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
982 throw("castogscanstatus")
983 panic("not reached")
984 }
985
986
987
988 var casgstatusAlwaysTrack = false
989
990
991
992
993
994
995
996 func casgstatus(gp *g, oldval, newval uint32) {
997 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
998 systemstack(func() {
999 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1000 throw("casgstatus: bad incoming values")
1001 })
1002 }
1003
1004 acquireLockRank(lockRankGscan)
1005 releaseLockRank(lockRankGscan)
1006
1007
1008 const yieldDelay = 5 * 1000
1009 var nextYield int64
1010
1011
1012
1013 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1014 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1015 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1016 }
1017 if i == 0 {
1018 nextYield = nanotime() + yieldDelay
1019 }
1020 if nanotime() < nextYield {
1021 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1022 procyield(1)
1023 }
1024 } else {
1025 osyield()
1026 nextYield = nanotime() + yieldDelay/2
1027 }
1028 }
1029
1030 if oldval == _Grunning {
1031
1032 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1033 gp.tracking = true
1034 }
1035 gp.trackingSeq++
1036 }
1037 if !gp.tracking {
1038 return
1039 }
1040
1041
1042
1043
1044
1045
1046 switch oldval {
1047 case _Grunnable:
1048
1049
1050
1051 now := nanotime()
1052 gp.runnableTime += now - gp.trackingStamp
1053 gp.trackingStamp = 0
1054 case _Gwaiting:
1055 if !gp.waitreason.isMutexWait() {
1056
1057 break
1058 }
1059
1060
1061
1062
1063
1064 now := nanotime()
1065 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1066 gp.trackingStamp = 0
1067 }
1068 switch newval {
1069 case _Gwaiting:
1070 if !gp.waitreason.isMutexWait() {
1071
1072 break
1073 }
1074
1075 now := nanotime()
1076 gp.trackingStamp = now
1077 case _Grunnable:
1078
1079
1080 now := nanotime()
1081 gp.trackingStamp = now
1082 case _Grunning:
1083
1084
1085
1086 gp.tracking = false
1087 sched.timeToRun.record(gp.runnableTime)
1088 gp.runnableTime = 0
1089 }
1090 }
1091
1092
1093
1094
1095 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1096
1097 gp.waitreason = reason
1098 casgstatus(gp, old, _Gwaiting)
1099 }
1100
1101
1102
1103
1104
1105
1106
1107
1108 func casgcopystack(gp *g) uint32 {
1109 for {
1110 oldstatus := readgstatus(gp) &^ _Gscan
1111 if oldstatus != _Gwaiting && oldstatus != _Grunnable {
1112 throw("copystack: bad status, not Gwaiting or Grunnable")
1113 }
1114 if gp.atomicstatus.CompareAndSwap(oldstatus, _Gcopystack) {
1115 return oldstatus
1116 }
1117 }
1118 }
1119
1120
1121
1122
1123
1124 func casGToPreemptScan(gp *g, old, new uint32) {
1125 if old != _Grunning || new != _Gscan|_Gpreempted {
1126 throw("bad g transition")
1127 }
1128 acquireLockRank(lockRankGscan)
1129 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1130 }
1131 }
1132
1133
1134
1135
1136 func casGFromPreempted(gp *g, old, new uint32) bool {
1137 if old != _Gpreempted || new != _Gwaiting {
1138 throw("bad g transition")
1139 }
1140 gp.waitreason = waitReasonPreempted
1141 return gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting)
1142 }
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158 func stopTheWorld(reason string) {
1159 semacquire(&worldsema)
1160 gp := getg()
1161 gp.m.preemptoff = reason
1162 systemstack(func() {
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174 casGToWaiting(gp, _Grunning, waitReasonStoppingTheWorld)
1175 stopTheWorldWithSema()
1176 casgstatus(gp, _Gwaiting, _Grunning)
1177 })
1178 }
1179
1180
1181 func startTheWorld() {
1182 systemstack(func() { startTheWorldWithSema(false) })
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199 mp := acquirem()
1200 mp.preemptoff = ""
1201 semrelease1(&worldsema, true, 0)
1202 releasem(mp)
1203 }
1204
1205
1206
1207
1208 func stopTheWorldGC(reason string) {
1209 semacquire(&gcsema)
1210 stopTheWorld(reason)
1211 }
1212
1213
1214 func startTheWorldGC() {
1215 startTheWorld()
1216 semrelease(&gcsema)
1217 }
1218
1219
1220 var worldsema uint32 = 1
1221
1222
1223
1224
1225
1226
1227
1228 var gcsema uint32 = 1
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252 func stopTheWorldWithSema() {
1253 gp := getg()
1254
1255
1256
1257 if gp.m.locks > 0 {
1258 throw("stopTheWorld: holding locks")
1259 }
1260
1261 lock(&sched.lock)
1262 sched.stopwait = gomaxprocs
1263 sched.gcwaiting.Store(true)
1264 preemptall()
1265
1266 gp.m.p.ptr().status = _Pgcstop
1267 sched.stopwait--
1268
1269 for _, pp := range allp {
1270 s := pp.status
1271 if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
1272 if trace.enabled {
1273 traceGoSysBlock(pp)
1274 traceProcStop(pp)
1275 }
1276 pp.syscalltick++
1277 sched.stopwait--
1278 }
1279 }
1280
1281 now := nanotime()
1282 for {
1283 pp, _ := pidleget(now)
1284 if pp == nil {
1285 break
1286 }
1287 pp.status = _Pgcstop
1288 sched.stopwait--
1289 }
1290 wait := sched.stopwait > 0
1291 unlock(&sched.lock)
1292
1293
1294 if wait {
1295 for {
1296
1297 if notetsleep(&sched.stopnote, 100*1000) {
1298 noteclear(&sched.stopnote)
1299 break
1300 }
1301 preemptall()
1302 }
1303 }
1304
1305
1306 bad := ""
1307 if sched.stopwait != 0 {
1308 bad = "stopTheWorld: not stopped (stopwait != 0)"
1309 } else {
1310 for _, pp := range allp {
1311 if pp.status != _Pgcstop {
1312 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1313 }
1314 }
1315 }
1316 if freezing.Load() {
1317
1318
1319
1320
1321 lock(&deadlock)
1322 lock(&deadlock)
1323 }
1324 if bad != "" {
1325 throw(bad)
1326 }
1327
1328 worldStopped()
1329 }
1330
1331 func startTheWorldWithSema(emitTraceEvent bool) int64 {
1332 assertWorldStopped()
1333
1334 mp := acquirem()
1335 if netpollinited() {
1336 list := netpoll(0)
1337 injectglist(&list)
1338 }
1339 lock(&sched.lock)
1340
1341 procs := gomaxprocs
1342 if newprocs != 0 {
1343 procs = newprocs
1344 newprocs = 0
1345 }
1346 p1 := procresize(procs)
1347 sched.gcwaiting.Store(false)
1348 if sched.sysmonwait.Load() {
1349 sched.sysmonwait.Store(false)
1350 notewakeup(&sched.sysmonnote)
1351 }
1352 unlock(&sched.lock)
1353
1354 worldStarted()
1355
1356 for p1 != nil {
1357 p := p1
1358 p1 = p1.link.ptr()
1359 if p.m != 0 {
1360 mp := p.m.ptr()
1361 p.m = 0
1362 if mp.nextp != 0 {
1363 throw("startTheWorld: inconsistent mp->nextp")
1364 }
1365 mp.nextp.set(p)
1366 notewakeup(&mp.park)
1367 } else {
1368
1369 newm(nil, p, -1)
1370 }
1371 }
1372
1373
1374 startTime := nanotime()
1375 if emitTraceEvent {
1376 traceGCSTWDone()
1377 }
1378
1379
1380
1381
1382 wakep()
1383
1384 releasem(mp)
1385
1386 return startTime
1387 }
1388
1389
1390
1391 func usesLibcall() bool {
1392 switch GOOS {
1393 case "aix", "darwin", "illumos", "ios", "solaris", "windows":
1394 return true
1395 case "openbsd":
1396 return GOARCH == "386" || GOARCH == "amd64" || GOARCH == "arm" || GOARCH == "arm64"
1397 }
1398 return false
1399 }
1400
1401
1402
1403 func mStackIsSystemAllocated() bool {
1404 switch GOOS {
1405 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1406 return true
1407 case "openbsd":
1408 switch GOARCH {
1409 case "386", "amd64", "arm", "arm64":
1410 return true
1411 }
1412 }
1413 return false
1414 }
1415
1416
1417
1418 func mstart()
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429 func mstart0() {
1430 gp := getg()
1431
1432 osStack := gp.stack.lo == 0
1433 if osStack {
1434
1435
1436
1437
1438
1439
1440
1441
1442 size := gp.stack.hi
1443 if size == 0 {
1444 size = 8192 * sys.StackGuardMultiplier
1445 }
1446 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1447 gp.stack.lo = gp.stack.hi - size + 1024
1448 }
1449
1450
1451 gp.stackguard0 = gp.stack.lo + _StackGuard
1452
1453
1454 gp.stackguard1 = gp.stackguard0
1455 mstart1()
1456
1457
1458 if mStackIsSystemAllocated() {
1459
1460
1461
1462 osStack = true
1463 }
1464 mexit(osStack)
1465 }
1466
1467
1468
1469
1470
1471 func mstart1() {
1472 gp := getg()
1473
1474 if gp != gp.m.g0 {
1475 throw("bad runtime·mstart")
1476 }
1477
1478
1479
1480
1481
1482
1483
1484 gp.sched.g = guintptr(unsafe.Pointer(gp))
1485 gp.sched.pc = getcallerpc()
1486 gp.sched.sp = getcallersp()
1487
1488 asminit()
1489 minit()
1490
1491
1492
1493 if gp.m == &m0 {
1494 mstartm0()
1495 }
1496
1497 if fn := gp.m.mstartfn; fn != nil {
1498 fn()
1499 }
1500
1501 if gp.m != &m0 {
1502 acquirep(gp.m.nextp.ptr())
1503 gp.m.nextp = 0
1504 }
1505 schedule()
1506 }
1507
1508
1509
1510
1511
1512
1513
1514 func mstartm0() {
1515
1516
1517
1518 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1519 cgoHasExtraM = true
1520 newextram()
1521 }
1522 initsig(false)
1523 }
1524
1525
1526
1527
1528 func mPark() {
1529 gp := getg()
1530 notesleep(&gp.m.park)
1531 noteclear(&gp.m.park)
1532 }
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544 func mexit(osStack bool) {
1545 mp := getg().m
1546
1547 if mp == &m0 {
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559 handoffp(releasep())
1560 lock(&sched.lock)
1561 sched.nmfreed++
1562 checkdead()
1563 unlock(&sched.lock)
1564 mPark()
1565 throw("locked m0 woke up")
1566 }
1567
1568 sigblock(true)
1569 unminit()
1570
1571
1572 if mp.gsignal != nil {
1573 stackfree(mp.gsignal.stack)
1574
1575
1576
1577
1578 mp.gsignal = nil
1579 }
1580
1581
1582 lock(&sched.lock)
1583 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1584 if *pprev == mp {
1585 *pprev = mp.alllink
1586 goto found
1587 }
1588 }
1589 throw("m not found in allm")
1590 found:
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600 mp.freeWait.Store(freeMWait)
1601 mp.freelink = sched.freem
1602 sched.freem = mp
1603 unlock(&sched.lock)
1604
1605 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
1606
1607
1608 handoffp(releasep())
1609
1610
1611
1612
1613
1614 lock(&sched.lock)
1615 sched.nmfreed++
1616 checkdead()
1617 unlock(&sched.lock)
1618
1619 if GOOS == "darwin" || GOOS == "ios" {
1620
1621
1622 if mp.signalPending.Load() != 0 {
1623 pendingPreemptSignals.Add(-1)
1624 }
1625 }
1626
1627
1628
1629 mdestroy(mp)
1630
1631 if osStack {
1632
1633 mp.freeWait.Store(freeMRef)
1634
1635
1636
1637 return
1638 }
1639
1640
1641
1642
1643
1644 exitThread(&mp.freeWait)
1645 }
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658 func forEachP(fn func(*p)) {
1659 mp := acquirem()
1660 pp := getg().m.p.ptr()
1661
1662 lock(&sched.lock)
1663 if sched.safePointWait != 0 {
1664 throw("forEachP: sched.safePointWait != 0")
1665 }
1666 sched.safePointWait = gomaxprocs - 1
1667 sched.safePointFn = fn
1668
1669
1670 for _, p2 := range allp {
1671 if p2 != pp {
1672 atomic.Store(&p2.runSafePointFn, 1)
1673 }
1674 }
1675 preemptall()
1676
1677
1678
1679
1680
1681
1682
1683 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
1684 if atomic.Cas(&p.runSafePointFn, 1, 0) {
1685 fn(p)
1686 sched.safePointWait--
1687 }
1688 }
1689
1690 wait := sched.safePointWait > 0
1691 unlock(&sched.lock)
1692
1693
1694 fn(pp)
1695
1696
1697
1698 for _, p2 := range allp {
1699 s := p2.status
1700 if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
1701 if trace.enabled {
1702 traceGoSysBlock(p2)
1703 traceProcStop(p2)
1704 }
1705 p2.syscalltick++
1706 handoffp(p2)
1707 }
1708 }
1709
1710
1711 if wait {
1712 for {
1713
1714
1715
1716
1717 if notetsleep(&sched.safePointNote, 100*1000) {
1718 noteclear(&sched.safePointNote)
1719 break
1720 }
1721 preemptall()
1722 }
1723 }
1724 if sched.safePointWait != 0 {
1725 throw("forEachP: not done")
1726 }
1727 for _, p2 := range allp {
1728 if p2.runSafePointFn != 0 {
1729 throw("forEachP: P did not run fn")
1730 }
1731 }
1732
1733 lock(&sched.lock)
1734 sched.safePointFn = nil
1735 unlock(&sched.lock)
1736 releasem(mp)
1737 }
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750 func runSafePointFn() {
1751 p := getg().m.p.ptr()
1752
1753
1754
1755 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
1756 return
1757 }
1758 sched.safePointFn(p)
1759 lock(&sched.lock)
1760 sched.safePointWait--
1761 if sched.safePointWait == 0 {
1762 notewakeup(&sched.safePointNote)
1763 }
1764 unlock(&sched.lock)
1765 }
1766
1767
1768
1769
1770 var cgoThreadStart unsafe.Pointer
1771
1772 type cgothreadstart struct {
1773 g guintptr
1774 tls *uint64
1775 fn unsafe.Pointer
1776 }
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787 func allocm(pp *p, fn func(), id int64) *m {
1788 allocmLock.rlock()
1789
1790
1791
1792
1793 acquirem()
1794
1795 gp := getg()
1796 if gp.m.p == 0 {
1797 acquirep(pp)
1798 }
1799
1800
1801
1802 if sched.freem != nil {
1803 lock(&sched.lock)
1804 var newList *m
1805 for freem := sched.freem; freem != nil; {
1806 wait := freem.freeWait.Load()
1807 if wait == freeMWait {
1808 next := freem.freelink
1809 freem.freelink = newList
1810 newList = freem
1811 freem = next
1812 continue
1813 }
1814
1815
1816
1817 if wait == freeMStack {
1818
1819
1820
1821 systemstack(func() {
1822 stackfree(freem.g0.stack)
1823 })
1824 }
1825 freem = freem.freelink
1826 }
1827 sched.freem = newList
1828 unlock(&sched.lock)
1829 }
1830
1831 mp := new(m)
1832 mp.mstartfn = fn
1833 mcommoninit(mp, id)
1834
1835
1836
1837 if iscgo || mStackIsSystemAllocated() {
1838 mp.g0 = malg(-1)
1839 } else {
1840 mp.g0 = malg(8192 * sys.StackGuardMultiplier)
1841 }
1842 mp.g0.m = mp
1843
1844 if pp == gp.m.p.ptr() {
1845 releasep()
1846 }
1847
1848 releasem(gp.m)
1849 allocmLock.runlock()
1850 return mp
1851 }
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888 func needm() {
1889 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1890
1891
1892
1893
1894
1895
1896 writeErrStr("fatal error: cgo callback before cgo call\n")
1897 exit(1)
1898 }
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908 var sigmask sigset
1909 sigsave(&sigmask)
1910 sigblock(false)
1911
1912
1913
1914
1915
1916 mp := lockextra(false)
1917
1918
1919
1920
1921
1922
1923
1924
1925 mp.needextram = mp.schedlink == 0
1926 extraMCount--
1927 unlockextra(mp.schedlink.ptr())
1928
1929
1930 mp.sigmask = sigmask
1931
1932
1933
1934 osSetupTLS(mp)
1935
1936
1937
1938
1939
1940
1941 setg(mp.g0)
1942 gp := getg()
1943 gp.stack.hi = getcallersp() + 1024
1944 gp.stack.lo = getcallersp() - 32*1024
1945 gp.stackguard0 = gp.stack.lo + _StackGuard
1946
1947
1948 asminit()
1949 minit()
1950
1951
1952 casgstatus(mp.curg, _Gdead, _Gsyscall)
1953 sched.ngsys.Add(-1)
1954 }
1955
1956
1957
1958
1959 func newextram() {
1960 c := extraMWaiters.Swap(0)
1961 if c > 0 {
1962 for i := uint32(0); i < c; i++ {
1963 oneNewExtraM()
1964 }
1965 } else {
1966
1967 mp := lockextra(true)
1968 unlockextra(mp)
1969 if mp == nil {
1970 oneNewExtraM()
1971 }
1972 }
1973 }
1974
1975
1976 func oneNewExtraM() {
1977
1978
1979
1980
1981
1982 mp := allocm(nil, nil, -1)
1983 gp := malg(4096)
1984 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
1985 gp.sched.sp = gp.stack.hi
1986 gp.sched.sp -= 4 * goarch.PtrSize
1987 gp.sched.lr = 0
1988 gp.sched.g = guintptr(unsafe.Pointer(gp))
1989 gp.syscallpc = gp.sched.pc
1990 gp.syscallsp = gp.sched.sp
1991 gp.stktopsp = gp.sched.sp
1992
1993
1994
1995
1996 casgstatus(gp, _Gidle, _Gdead)
1997 gp.m = mp
1998 mp.curg = gp
1999 mp.isextra = true
2000 mp.lockedInt++
2001 mp.lockedg.set(gp)
2002 gp.lockedm.set(mp)
2003 gp.goid = sched.goidgen.Add(1)
2004 gp.sysblocktraced = true
2005 if raceenabled {
2006 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2007 }
2008 if trace.enabled {
2009
2010
2011
2012 traceGoCreate(gp, 0)
2013 gp.traceseq++
2014 traceEvent(traceEvGoInSyscall, -1, gp.goid)
2015 }
2016
2017 allgadd(gp)
2018
2019
2020
2021
2022
2023 sched.ngsys.Add(1)
2024
2025
2026 mnext := lockextra(true)
2027 mp.schedlink.set(mnext)
2028 extraMCount++
2029 unlockextra(mp)
2030 }
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055 func dropm() {
2056
2057
2058
2059 mp := getg().m
2060
2061
2062 casgstatus(mp.curg, _Gsyscall, _Gdead)
2063 mp.curg.preemptStop = false
2064 sched.ngsys.Add(1)
2065
2066
2067
2068
2069
2070 sigmask := mp.sigmask
2071 sigblock(false)
2072 unminit()
2073
2074 mnext := lockextra(true)
2075 extraMCount++
2076 mp.schedlink.set(mnext)
2077
2078 setg(nil)
2079
2080
2081 unlockextra(mp)
2082
2083 msigrestore(sigmask)
2084 }
2085
2086
2087 func getm() uintptr {
2088 return uintptr(unsafe.Pointer(getg().m))
2089 }
2090
2091 var extram atomic.Uintptr
2092 var extraMCount uint32
2093 var extraMWaiters atomic.Uint32
2094
2095
2096
2097
2098
2099
2100
2101
2102 func lockextra(nilokay bool) *m {
2103 const locked = 1
2104
2105 incr := false
2106 for {
2107 old := extram.Load()
2108 if old == locked {
2109 osyield_no_g()
2110 continue
2111 }
2112 if old == 0 && !nilokay {
2113 if !incr {
2114
2115
2116
2117 extraMWaiters.Add(1)
2118 incr = true
2119 }
2120 usleep_no_g(1)
2121 continue
2122 }
2123 if extram.CompareAndSwap(old, locked) {
2124 return (*m)(unsafe.Pointer(old))
2125 }
2126 osyield_no_g()
2127 continue
2128 }
2129 }
2130
2131
2132 func unlockextra(mp *m) {
2133 extram.Store(uintptr(unsafe.Pointer(mp)))
2134 }
2135
2136 var (
2137
2138
2139
2140 allocmLock rwmutex
2141
2142
2143
2144
2145 execLock rwmutex
2146 )
2147
2148
2149
2150 const (
2151 failthreadcreate = "runtime: failed to create new OS thread\n"
2152 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2153 )
2154
2155
2156
2157
2158 var newmHandoff struct {
2159 lock mutex
2160
2161
2162
2163 newm muintptr
2164
2165
2166
2167 waiting bool
2168 wake note
2169
2170
2171
2172
2173 haveTemplateThread uint32
2174 }
2175
2176
2177
2178
2179
2180
2181
2182
2183 func newm(fn func(), pp *p, id int64) {
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194 acquirem()
2195
2196 mp := allocm(pp, fn, id)
2197 mp.nextp.set(pp)
2198 mp.sigmask = initSigmask
2199 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211 lock(&newmHandoff.lock)
2212 if newmHandoff.haveTemplateThread == 0 {
2213 throw("on a locked thread with no template thread")
2214 }
2215 mp.schedlink = newmHandoff.newm
2216 newmHandoff.newm.set(mp)
2217 if newmHandoff.waiting {
2218 newmHandoff.waiting = false
2219 notewakeup(&newmHandoff.wake)
2220 }
2221 unlock(&newmHandoff.lock)
2222
2223
2224
2225 releasem(getg().m)
2226 return
2227 }
2228 newm1(mp)
2229 releasem(getg().m)
2230 }
2231
2232 func newm1(mp *m) {
2233 if iscgo {
2234 var ts cgothreadstart
2235 if _cgo_thread_start == nil {
2236 throw("_cgo_thread_start missing")
2237 }
2238 ts.g.set(mp.g0)
2239 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2240 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2241 if msanenabled {
2242 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2243 }
2244 if asanenabled {
2245 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2246 }
2247 execLock.rlock()
2248 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2249 execLock.runlock()
2250 return
2251 }
2252 execLock.rlock()
2253 newosproc(mp)
2254 execLock.runlock()
2255 }
2256
2257
2258
2259
2260
2261 func startTemplateThread() {
2262 if GOARCH == "wasm" {
2263 return
2264 }
2265
2266
2267
2268 mp := acquirem()
2269 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2270 releasem(mp)
2271 return
2272 }
2273 newm(templateThread, nil, -1)
2274 releasem(mp)
2275 }
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289 func templateThread() {
2290 lock(&sched.lock)
2291 sched.nmsys++
2292 checkdead()
2293 unlock(&sched.lock)
2294
2295 for {
2296 lock(&newmHandoff.lock)
2297 for newmHandoff.newm != 0 {
2298 newm := newmHandoff.newm.ptr()
2299 newmHandoff.newm = 0
2300 unlock(&newmHandoff.lock)
2301 for newm != nil {
2302 next := newm.schedlink.ptr()
2303 newm.schedlink = 0
2304 newm1(newm)
2305 newm = next
2306 }
2307 lock(&newmHandoff.lock)
2308 }
2309 newmHandoff.waiting = true
2310 noteclear(&newmHandoff.wake)
2311 unlock(&newmHandoff.lock)
2312 notesleep(&newmHandoff.wake)
2313 }
2314 }
2315
2316
2317
2318 func stopm() {
2319 gp := getg()
2320
2321 if gp.m.locks != 0 {
2322 throw("stopm holding locks")
2323 }
2324 if gp.m.p != 0 {
2325 throw("stopm holding p")
2326 }
2327 if gp.m.spinning {
2328 throw("stopm spinning")
2329 }
2330
2331 lock(&sched.lock)
2332 mput(gp.m)
2333 unlock(&sched.lock)
2334 mPark()
2335 acquirep(gp.m.nextp.ptr())
2336 gp.m.nextp = 0
2337 }
2338
2339 func mspinning() {
2340
2341 getg().m.spinning = true
2342 }
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356 func startm(pp *p, spinning bool) {
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373 mp := acquirem()
2374 lock(&sched.lock)
2375 if pp == nil {
2376 if spinning {
2377
2378
2379
2380 throw("startm: P required for spinning=true")
2381 }
2382 pp, _ = pidleget(0)
2383 if pp == nil {
2384 unlock(&sched.lock)
2385 releasem(mp)
2386 return
2387 }
2388 }
2389 nmp := mget()
2390 if nmp == nil {
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403 id := mReserveID()
2404 unlock(&sched.lock)
2405
2406 var fn func()
2407 if spinning {
2408
2409 fn = mspinning
2410 }
2411 newm(fn, pp, id)
2412
2413
2414 releasem(mp)
2415 return
2416 }
2417 unlock(&sched.lock)
2418 if nmp.spinning {
2419 throw("startm: m is spinning")
2420 }
2421 if nmp.nextp != 0 {
2422 throw("startm: m has p")
2423 }
2424 if spinning && !runqempty(pp) {
2425 throw("startm: p has runnable gs")
2426 }
2427
2428 nmp.spinning = spinning
2429 nmp.nextp.set(pp)
2430 notewakeup(&nmp.park)
2431
2432
2433 releasem(mp)
2434 }
2435
2436
2437
2438
2439
2440 func handoffp(pp *p) {
2441
2442
2443
2444
2445 if !runqempty(pp) || sched.runqsize != 0 {
2446 startm(pp, false)
2447 return
2448 }
2449
2450 if (trace.enabled || trace.shutdown) && traceReaderAvailable() != nil {
2451 startm(pp, false)
2452 return
2453 }
2454
2455 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) {
2456 startm(pp, false)
2457 return
2458 }
2459
2460
2461 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
2462 sched.needspinning.Store(0)
2463 startm(pp, true)
2464 return
2465 }
2466 lock(&sched.lock)
2467 if sched.gcwaiting.Load() {
2468 pp.status = _Pgcstop
2469 sched.stopwait--
2470 if sched.stopwait == 0 {
2471 notewakeup(&sched.stopnote)
2472 }
2473 unlock(&sched.lock)
2474 return
2475 }
2476 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
2477 sched.safePointFn(pp)
2478 sched.safePointWait--
2479 if sched.safePointWait == 0 {
2480 notewakeup(&sched.safePointNote)
2481 }
2482 }
2483 if sched.runqsize != 0 {
2484 unlock(&sched.lock)
2485 startm(pp, false)
2486 return
2487 }
2488
2489
2490 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
2491 unlock(&sched.lock)
2492 startm(pp, false)
2493 return
2494 }
2495
2496
2497
2498 when := nobarrierWakeTime(pp)
2499 pidleput(pp, 0)
2500 unlock(&sched.lock)
2501
2502 if when != 0 {
2503 wakeNetPoller(when)
2504 }
2505 }
2506
2507
2508
2509
2510 func wakep() {
2511
2512
2513 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
2514 return
2515 }
2516
2517
2518
2519
2520
2521
2522 mp := acquirem()
2523
2524 var pp *p
2525 lock(&sched.lock)
2526 pp, _ = pidlegetSpinning(0)
2527 if pp == nil {
2528 if sched.nmspinning.Add(-1) < 0 {
2529 throw("wakep: negative nmspinning")
2530 }
2531 unlock(&sched.lock)
2532 releasem(mp)
2533 return
2534 }
2535
2536
2537
2538
2539 unlock(&sched.lock)
2540
2541 startm(pp, true)
2542
2543 releasem(mp)
2544 }
2545
2546
2547
2548 func stoplockedm() {
2549 gp := getg()
2550
2551 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
2552 throw("stoplockedm: inconsistent locking")
2553 }
2554 if gp.m.p != 0 {
2555
2556 pp := releasep()
2557 handoffp(pp)
2558 }
2559 incidlelocked(1)
2560
2561 mPark()
2562 status := readgstatus(gp.m.lockedg.ptr())
2563 if status&^_Gscan != _Grunnable {
2564 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
2565 dumpgstatus(gp.m.lockedg.ptr())
2566 throw("stoplockedm: not runnable")
2567 }
2568 acquirep(gp.m.nextp.ptr())
2569 gp.m.nextp = 0
2570 }
2571
2572
2573
2574
2575
2576 func startlockedm(gp *g) {
2577 mp := gp.lockedm.ptr()
2578 if mp == getg().m {
2579 throw("startlockedm: locked to me")
2580 }
2581 if mp.nextp != 0 {
2582 throw("startlockedm: m has p")
2583 }
2584
2585 incidlelocked(-1)
2586 pp := releasep()
2587 mp.nextp.set(pp)
2588 notewakeup(&mp.park)
2589 stopm()
2590 }
2591
2592
2593
2594 func gcstopm() {
2595 gp := getg()
2596
2597 if !sched.gcwaiting.Load() {
2598 throw("gcstopm: not waiting for gc")
2599 }
2600 if gp.m.spinning {
2601 gp.m.spinning = false
2602
2603
2604 if sched.nmspinning.Add(-1) < 0 {
2605 throw("gcstopm: negative nmspinning")
2606 }
2607 }
2608 pp := releasep()
2609 lock(&sched.lock)
2610 pp.status = _Pgcstop
2611 sched.stopwait--
2612 if sched.stopwait == 0 {
2613 notewakeup(&sched.stopnote)
2614 }
2615 unlock(&sched.lock)
2616 stopm()
2617 }
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628 func execute(gp *g, inheritTime bool) {
2629 mp := getg().m
2630
2631 if goroutineProfile.active {
2632
2633
2634
2635 tryRecordGoroutineProfile(gp, osyield)
2636 }
2637
2638
2639
2640 mp.curg = gp
2641 gp.m = mp
2642 casgstatus(gp, _Grunnable, _Grunning)
2643 gp.waitsince = 0
2644 gp.preempt = false
2645 gp.stackguard0 = gp.stack.lo + _StackGuard
2646 if !inheritTime {
2647 mp.p.ptr().schedtick++
2648 }
2649
2650
2651 hz := sched.profilehz
2652 if mp.profilehz != hz {
2653 setThreadCPUProfiler(hz)
2654 }
2655
2656 if trace.enabled {
2657
2658
2659 if gp.syscallsp != 0 && gp.sysblocktraced {
2660 traceGoSysExit(gp.sysexitticks)
2661 }
2662 traceGoStart()
2663 }
2664
2665 gogo(&gp.sched)
2666 }
2667
2668
2669
2670
2671
2672 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
2673 mp := getg().m
2674
2675
2676
2677
2678
2679 top:
2680 pp := mp.p.ptr()
2681 if sched.gcwaiting.Load() {
2682 gcstopm()
2683 goto top
2684 }
2685 if pp.runSafePointFn != 0 {
2686 runSafePointFn()
2687 }
2688
2689
2690
2691
2692
2693 now, pollUntil, _ := checkTimers(pp, 0)
2694
2695
2696 if trace.enabled || trace.shutdown {
2697 gp := traceReader()
2698 if gp != nil {
2699 casgstatus(gp, _Gwaiting, _Grunnable)
2700 traceGoUnpark(gp, 0)
2701 return gp, false, true
2702 }
2703 }
2704
2705
2706 if gcBlackenEnabled != 0 {
2707 gp, tnow := gcController.findRunnableGCWorker(pp, now)
2708 if gp != nil {
2709 return gp, false, true
2710 }
2711 now = tnow
2712 }
2713
2714
2715
2716
2717 if pp.schedtick%61 == 0 && sched.runqsize > 0 {
2718 lock(&sched.lock)
2719 gp := globrunqget(pp, 1)
2720 unlock(&sched.lock)
2721 if gp != nil {
2722 return gp, false, false
2723 }
2724 }
2725
2726
2727 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
2728 if gp := wakefing(); gp != nil {
2729 ready(gp, 0, true)
2730 }
2731 }
2732 if *cgo_yield != nil {
2733 asmcgocall(*cgo_yield, nil)
2734 }
2735
2736
2737 if gp, inheritTime := runqget(pp); gp != nil {
2738 return gp, inheritTime, false
2739 }
2740
2741
2742 if sched.runqsize != 0 {
2743 lock(&sched.lock)
2744 gp := globrunqget(pp, 0)
2745 unlock(&sched.lock)
2746 if gp != nil {
2747 return gp, false, false
2748 }
2749 }
2750
2751
2752
2753
2754
2755
2756
2757
2758 if netpollinited() && netpollWaiters.Load() > 0 && sched.lastpoll.Load() != 0 {
2759 if list := netpoll(0); !list.empty() {
2760 gp := list.pop()
2761 injectglist(&list)
2762 casgstatus(gp, _Gwaiting, _Grunnable)
2763 if trace.enabled {
2764 traceGoUnpark(gp, 0)
2765 }
2766 return gp, false, false
2767 }
2768 }
2769
2770
2771
2772
2773
2774
2775 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
2776 if !mp.spinning {
2777 mp.becomeSpinning()
2778 }
2779
2780 gp, inheritTime, tnow, w, newWork := stealWork(now)
2781 if gp != nil {
2782
2783 return gp, inheritTime, false
2784 }
2785 if newWork {
2786
2787
2788 goto top
2789 }
2790
2791 now = tnow
2792 if w != 0 && (pollUntil == 0 || w < pollUntil) {
2793
2794 pollUntil = w
2795 }
2796 }
2797
2798
2799
2800
2801
2802 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() {
2803 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
2804 if node != nil {
2805 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
2806 gp := node.gp.ptr()
2807 casgstatus(gp, _Gwaiting, _Grunnable)
2808 if trace.enabled {
2809 traceGoUnpark(gp, 0)
2810 }
2811 return gp, false, false
2812 }
2813 gcController.removeIdleMarkWorker()
2814 }
2815
2816
2817
2818
2819
2820 gp, otherReady := beforeIdle(now, pollUntil)
2821 if gp != nil {
2822 casgstatus(gp, _Gwaiting, _Grunnable)
2823 if trace.enabled {
2824 traceGoUnpark(gp, 0)
2825 }
2826 return gp, false, false
2827 }
2828 if otherReady {
2829 goto top
2830 }
2831
2832
2833
2834
2835
2836 allpSnapshot := allp
2837
2838
2839 idlepMaskSnapshot := idlepMask
2840 timerpMaskSnapshot := timerpMask
2841
2842
2843 lock(&sched.lock)
2844 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
2845 unlock(&sched.lock)
2846 goto top
2847 }
2848 if sched.runqsize != 0 {
2849 gp := globrunqget(pp, 0)
2850 unlock(&sched.lock)
2851 return gp, false, false
2852 }
2853 if !mp.spinning && sched.needspinning.Load() == 1 {
2854
2855 mp.becomeSpinning()
2856 unlock(&sched.lock)
2857 goto top
2858 }
2859 if releasep() != pp {
2860 throw("findrunnable: wrong p")
2861 }
2862 now = pidleput(pp, now)
2863 unlock(&sched.lock)
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901 wasSpinning := mp.spinning
2902 if mp.spinning {
2903 mp.spinning = false
2904 if sched.nmspinning.Add(-1) < 0 {
2905 throw("findrunnable: negative nmspinning")
2906 }
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
2919 if pp != nil {
2920 acquirep(pp)
2921 mp.becomeSpinning()
2922 goto top
2923 }
2924
2925
2926 pp, gp := checkIdleGCNoP()
2927 if pp != nil {
2928 acquirep(pp)
2929 mp.becomeSpinning()
2930
2931
2932 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
2933 casgstatus(gp, _Gwaiting, _Grunnable)
2934 if trace.enabled {
2935 traceGoUnpark(gp, 0)
2936 }
2937 return gp, false, false
2938 }
2939
2940
2941
2942
2943
2944
2945
2946 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
2947 }
2948
2949
2950 if netpollinited() && (netpollWaiters.Load() > 0 || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
2951 sched.pollUntil.Store(pollUntil)
2952 if mp.p != 0 {
2953 throw("findrunnable: netpoll with p")
2954 }
2955 if mp.spinning {
2956 throw("findrunnable: netpoll with spinning")
2957 }
2958
2959 now = nanotime()
2960 delay := int64(-1)
2961 if pollUntil != 0 {
2962 delay = pollUntil - now
2963 if delay < 0 {
2964 delay = 0
2965 }
2966 }
2967 if faketime != 0 {
2968
2969 delay = 0
2970 }
2971 list := netpoll(delay)
2972 sched.pollUntil.Store(0)
2973 sched.lastpoll.Store(now)
2974 if faketime != 0 && list.empty() {
2975
2976
2977 stopm()
2978 goto top
2979 }
2980 lock(&sched.lock)
2981 pp, _ := pidleget(now)
2982 unlock(&sched.lock)
2983 if pp == nil {
2984 injectglist(&list)
2985 } else {
2986 acquirep(pp)
2987 if !list.empty() {
2988 gp := list.pop()
2989 injectglist(&list)
2990 casgstatus(gp, _Gwaiting, _Grunnable)
2991 if trace.enabled {
2992 traceGoUnpark(gp, 0)
2993 }
2994 return gp, false, false
2995 }
2996 if wasSpinning {
2997 mp.becomeSpinning()
2998 }
2999 goto top
3000 }
3001 } else if pollUntil != 0 && netpollinited() {
3002 pollerPollUntil := sched.pollUntil.Load()
3003 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3004 netpollBreak()
3005 }
3006 }
3007 stopm()
3008 goto top
3009 }
3010
3011
3012
3013
3014
3015 func pollWork() bool {
3016 if sched.runqsize != 0 {
3017 return true
3018 }
3019 p := getg().m.p.ptr()
3020 if !runqempty(p) {
3021 return true
3022 }
3023 if netpollinited() && netpollWaiters.Load() > 0 && sched.lastpoll.Load() != 0 {
3024 if list := netpoll(0); !list.empty() {
3025 injectglist(&list)
3026 return true
3027 }
3028 }
3029 return false
3030 }
3031
3032
3033
3034
3035
3036
3037
3038 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3039 pp := getg().m.p.ptr()
3040
3041 ranTimer := false
3042
3043 const stealTries = 4
3044 for i := 0; i < stealTries; i++ {
3045 stealTimersOrRunNextG := i == stealTries-1
3046
3047 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
3048 if sched.gcwaiting.Load() {
3049
3050 return nil, false, now, pollUntil, true
3051 }
3052 p2 := allp[enum.position()]
3053 if pp == p2 {
3054 continue
3055 }
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3071 tnow, w, ran := checkTimers(p2, now)
3072 now = tnow
3073 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3074 pollUntil = w
3075 }
3076 if ran {
3077
3078
3079
3080
3081
3082
3083
3084
3085 if gp, inheritTime := runqget(pp); gp != nil {
3086 return gp, inheritTime, now, pollUntil, ranTimer
3087 }
3088 ranTimer = true
3089 }
3090 }
3091
3092
3093 if !idlepMask.read(enum.position()) {
3094 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3095 return gp, false, now, pollUntil, ranTimer
3096 }
3097 }
3098 }
3099 }
3100
3101
3102
3103
3104 return nil, false, now, pollUntil, ranTimer
3105 }
3106
3107
3108
3109
3110
3111
3112 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3113 for id, p2 := range allpSnapshot {
3114 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3115 lock(&sched.lock)
3116 pp, _ := pidlegetSpinning(0)
3117 if pp == nil {
3118
3119 unlock(&sched.lock)
3120 return nil
3121 }
3122 unlock(&sched.lock)
3123 return pp
3124 }
3125 }
3126
3127
3128 return nil
3129 }
3130
3131
3132
3133
3134 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3135 for id, p2 := range allpSnapshot {
3136 if timerpMaskSnapshot.read(uint32(id)) {
3137 w := nobarrierWakeTime(p2)
3138 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3139 pollUntil = w
3140 }
3141 }
3142 }
3143
3144 return pollUntil
3145 }
3146
3147
3148
3149
3150
3151 func checkIdleGCNoP() (*p, *g) {
3152
3153
3154
3155
3156
3157
3158 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3159 return nil, nil
3160 }
3161 if !gcMarkWorkAvailable(nil) {
3162 return nil, nil
3163 }
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182 lock(&sched.lock)
3183 pp, now := pidlegetSpinning(0)
3184 if pp == nil {
3185 unlock(&sched.lock)
3186 return nil, nil
3187 }
3188
3189
3190 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3191 pidleput(pp, now)
3192 unlock(&sched.lock)
3193 return nil, nil
3194 }
3195
3196 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3197 if node == nil {
3198 pidleput(pp, now)
3199 unlock(&sched.lock)
3200 gcController.removeIdleMarkWorker()
3201 return nil, nil
3202 }
3203
3204 unlock(&sched.lock)
3205
3206 return pp, node.gp.ptr()
3207 }
3208
3209
3210
3211
3212 func wakeNetPoller(when int64) {
3213 if sched.lastpoll.Load() == 0 {
3214
3215
3216
3217
3218 pollerPollUntil := sched.pollUntil.Load()
3219 if pollerPollUntil == 0 || pollerPollUntil > when {
3220 netpollBreak()
3221 }
3222 } else {
3223
3224
3225 if GOOS != "plan9" {
3226 wakep()
3227 }
3228 }
3229 }
3230
3231 func resetspinning() {
3232 gp := getg()
3233 if !gp.m.spinning {
3234 throw("resetspinning: not a spinning m")
3235 }
3236 gp.m.spinning = false
3237 nmspinning := sched.nmspinning.Add(-1)
3238 if nmspinning < 0 {
3239 throw("findrunnable: negative nmspinning")
3240 }
3241
3242
3243
3244 wakep()
3245 }
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255 func injectglist(glist *gList) {
3256 if glist.empty() {
3257 return
3258 }
3259 if trace.enabled {
3260 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
3261 traceGoUnpark(gp, 0)
3262 }
3263 }
3264
3265
3266
3267 head := glist.head.ptr()
3268 var tail *g
3269 qsize := 0
3270 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
3271 tail = gp
3272 qsize++
3273 casgstatus(gp, _Gwaiting, _Grunnable)
3274 }
3275
3276
3277 var q gQueue
3278 q.head.set(head)
3279 q.tail.set(tail)
3280 *glist = gList{}
3281
3282 startIdle := func(n int) {
3283 for i := 0; i < n; i++ {
3284 mp := acquirem()
3285 lock(&sched.lock)
3286
3287 pp, _ := pidlegetSpinning(0)
3288 if pp == nil {
3289 unlock(&sched.lock)
3290 releasem(mp)
3291 break
3292 }
3293
3294 unlock(&sched.lock)
3295 startm(pp, false)
3296 releasem(mp)
3297 }
3298 }
3299
3300 pp := getg().m.p.ptr()
3301 if pp == nil {
3302 lock(&sched.lock)
3303 globrunqputbatch(&q, int32(qsize))
3304 unlock(&sched.lock)
3305 startIdle(qsize)
3306 return
3307 }
3308
3309 npidle := int(sched.npidle.Load())
3310 var globq gQueue
3311 var n int
3312 for n = 0; n < npidle && !q.empty(); n++ {
3313 g := q.pop()
3314 globq.pushBack(g)
3315 }
3316 if n > 0 {
3317 lock(&sched.lock)
3318 globrunqputbatch(&globq, int32(n))
3319 unlock(&sched.lock)
3320 startIdle(n)
3321 qsize -= n
3322 }
3323
3324 if !q.empty() {
3325 runqputbatch(pp, &q, qsize)
3326 }
3327 }
3328
3329
3330
3331 func schedule() {
3332 mp := getg().m
3333
3334 if mp.locks != 0 {
3335 throw("schedule: holding locks")
3336 }
3337
3338 if mp.lockedg != 0 {
3339 stoplockedm()
3340 execute(mp.lockedg.ptr(), false)
3341 }
3342
3343
3344
3345 if mp.incgo {
3346 throw("schedule: in cgo")
3347 }
3348
3349 top:
3350 pp := mp.p.ptr()
3351 pp.preempt = false
3352
3353
3354
3355
3356 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
3357 throw("schedule: spinning with local work")
3358 }
3359
3360 gp, inheritTime, tryWakeP := findRunnable()
3361
3362
3363
3364
3365 if mp.spinning {
3366 resetspinning()
3367 }
3368
3369 if sched.disable.user && !schedEnabled(gp) {
3370
3371
3372
3373 lock(&sched.lock)
3374 if schedEnabled(gp) {
3375
3376
3377 unlock(&sched.lock)
3378 } else {
3379 sched.disable.runnable.pushBack(gp)
3380 sched.disable.n++
3381 unlock(&sched.lock)
3382 goto top
3383 }
3384 }
3385
3386
3387
3388 if tryWakeP {
3389 wakep()
3390 }
3391 if gp.lockedm != 0 {
3392
3393
3394 startlockedm(gp)
3395 goto top
3396 }
3397
3398 execute(gp, inheritTime)
3399 }
3400
3401
3402
3403
3404
3405
3406
3407
3408 func dropg() {
3409 gp := getg()
3410
3411 setMNoWB(&gp.m.curg.m, nil)
3412 setGNoWB(&gp.m.curg, nil)
3413 }
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425 func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
3426
3427
3428 next := pp.timer0When.Load()
3429 nextAdj := pp.timerModifiedEarliest.Load()
3430 if next == 0 || (nextAdj != 0 && nextAdj < next) {
3431 next = nextAdj
3432 }
3433
3434 if next == 0 {
3435
3436 return now, 0, false
3437 }
3438
3439 if now == 0 {
3440 now = nanotime()
3441 }
3442 if now < next {
3443
3444
3445
3446
3447 if pp != getg().m.p.ptr() || int(pp.deletedTimers.Load()) <= int(pp.numTimers.Load()/4) {
3448 return now, next, false
3449 }
3450 }
3451
3452 lock(&pp.timersLock)
3453
3454 if len(pp.timers) > 0 {
3455 adjusttimers(pp, now)
3456 for len(pp.timers) > 0 {
3457
3458
3459 if tw := runtimer(pp, now); tw != 0 {
3460 if tw > 0 {
3461 pollUntil = tw
3462 }
3463 break
3464 }
3465 ran = true
3466 }
3467 }
3468
3469
3470
3471
3472 if pp == getg().m.p.ptr() && int(pp.deletedTimers.Load()) > len(pp.timers)/4 {
3473 clearDeletedTimers(pp)
3474 }
3475
3476 unlock(&pp.timersLock)
3477
3478 return now, pollUntil, ran
3479 }
3480
3481 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
3482 unlock((*mutex)(lock))
3483 return true
3484 }
3485
3486
3487 func park_m(gp *g) {
3488 mp := getg().m
3489
3490 if trace.enabled {
3491 traceGoPark(mp.waittraceev, mp.waittraceskip)
3492 }
3493
3494
3495
3496 casgstatus(gp, _Grunning, _Gwaiting)
3497 dropg()
3498
3499 if fn := mp.waitunlockf; fn != nil {
3500 ok := fn(gp, mp.waitlock)
3501 mp.waitunlockf = nil
3502 mp.waitlock = nil
3503 if !ok {
3504 if trace.enabled {
3505 traceGoUnpark(gp, 2)
3506 }
3507 casgstatus(gp, _Gwaiting, _Grunnable)
3508 execute(gp, true)
3509 }
3510 }
3511 schedule()
3512 }
3513
3514 func goschedImpl(gp *g) {
3515 status := readgstatus(gp)
3516 if status&^_Gscan != _Grunning {
3517 dumpgstatus(gp)
3518 throw("bad g status")
3519 }
3520 casgstatus(gp, _Grunning, _Grunnable)
3521 dropg()
3522 lock(&sched.lock)
3523 globrunqput(gp)
3524 unlock(&sched.lock)
3525
3526 schedule()
3527 }
3528
3529
3530 func gosched_m(gp *g) {
3531 if trace.enabled {
3532 traceGoSched()
3533 }
3534 goschedImpl(gp)
3535 }
3536
3537
3538 func goschedguarded_m(gp *g) {
3539
3540 if !canPreemptM(gp.m) {
3541 gogo(&gp.sched)
3542 }
3543
3544 if trace.enabled {
3545 traceGoSched()
3546 }
3547 goschedImpl(gp)
3548 }
3549
3550 func gopreempt_m(gp *g) {
3551 if trace.enabled {
3552 traceGoPreempt()
3553 }
3554 goschedImpl(gp)
3555 }
3556
3557
3558
3559
3560 func preemptPark(gp *g) {
3561 if trace.enabled {
3562 traceGoPark(traceEvGoBlock, 0)
3563 }
3564 status := readgstatus(gp)
3565 if status&^_Gscan != _Grunning {
3566 dumpgstatus(gp)
3567 throw("bad g status")
3568 }
3569
3570 if gp.asyncSafePoint {
3571
3572
3573
3574 f := findfunc(gp.sched.pc)
3575 if !f.valid() {
3576 throw("preempt at unknown pc")
3577 }
3578 if f.flag&funcFlag_SPWRITE != 0 {
3579 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
3580 throw("preempt SPWRITE")
3581 }
3582 }
3583
3584
3585
3586
3587
3588
3589
3590 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
3591 dropg()
3592 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
3593 schedule()
3594 }
3595
3596
3597
3598
3599 func goyield() {
3600 checkTimeouts()
3601 mcall(goyield_m)
3602 }
3603
3604 func goyield_m(gp *g) {
3605 if trace.enabled {
3606 traceGoPreempt()
3607 }
3608 pp := gp.m.p.ptr()
3609 casgstatus(gp, _Grunning, _Grunnable)
3610 dropg()
3611 runqput(pp, gp, false)
3612 schedule()
3613 }
3614
3615
3616 func goexit1() {
3617 if raceenabled {
3618 racegoend()
3619 }
3620 if trace.enabled {
3621 traceGoEnd()
3622 }
3623 mcall(goexit0)
3624 }
3625
3626
3627 func goexit0(gp *g) {
3628 mp := getg().m
3629 pp := mp.p.ptr()
3630
3631 casgstatus(gp, _Grunning, _Gdead)
3632 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
3633 if isSystemGoroutine(gp, false) {
3634 sched.ngsys.Add(-1)
3635 }
3636 gp.m = nil
3637 locked := gp.lockedm != 0
3638 gp.lockedm = 0
3639 mp.lockedg = 0
3640 gp.preemptStop = false
3641 gp.paniconfault = false
3642 gp._defer = nil
3643 gp._panic = nil
3644 gp.writebuf = nil
3645 gp.waitreason = waitReasonZero
3646 gp.param = nil
3647 gp.labels = nil
3648 gp.timer = nil
3649
3650 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
3651
3652
3653
3654 assistWorkPerByte := gcController.assistWorkPerByte.Load()
3655 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
3656 gcController.bgScanCredit.Add(scanCredit)
3657 gp.gcAssistBytes = 0
3658 }
3659
3660 dropg()
3661
3662 if GOARCH == "wasm" {
3663 gfput(pp, gp)
3664 schedule()
3665 }
3666
3667 if mp.lockedInt != 0 {
3668 print("invalid m->lockedInt = ", mp.lockedInt, "\n")
3669 throw("internal lockOSThread error")
3670 }
3671 gfput(pp, gp)
3672 if locked {
3673
3674
3675
3676
3677
3678
3679 if GOOS != "plan9" {
3680 gogo(&mp.g0.sched)
3681 } else {
3682
3683
3684 mp.lockedExt = 0
3685 }
3686 }
3687 schedule()
3688 }
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698 func save(pc, sp uintptr) {
3699 gp := getg()
3700
3701 if gp == gp.m.g0 || gp == gp.m.gsignal {
3702
3703
3704
3705
3706
3707 throw("save on system g not allowed")
3708 }
3709
3710 gp.sched.pc = pc
3711 gp.sched.sp = sp
3712 gp.sched.lr = 0
3713 gp.sched.ret = 0
3714
3715
3716
3717 if gp.sched.ctxt != nil {
3718 badctxt()
3719 }
3720 }
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759 func reentersyscall(pc, sp uintptr) {
3760 gp := getg()
3761
3762
3763
3764 gp.m.locks++
3765
3766
3767
3768
3769
3770 gp.stackguard0 = stackPreempt
3771 gp.throwsplit = true
3772
3773
3774 save(pc, sp)
3775 gp.syscallsp = sp
3776 gp.syscallpc = pc
3777 casgstatus(gp, _Grunning, _Gsyscall)
3778 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
3779 systemstack(func() {
3780 print("entersyscall inconsistent ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
3781 throw("entersyscall")
3782 })
3783 }
3784
3785 if trace.enabled {
3786 systemstack(traceGoSysCall)
3787
3788
3789
3790 save(pc, sp)
3791 }
3792
3793 if sched.sysmonwait.Load() {
3794 systemstack(entersyscall_sysmon)
3795 save(pc, sp)
3796 }
3797
3798 if gp.m.p.ptr().runSafePointFn != 0 {
3799
3800 systemstack(runSafePointFn)
3801 save(pc, sp)
3802 }
3803
3804 gp.m.syscalltick = gp.m.p.ptr().syscalltick
3805 gp.sysblocktraced = true
3806 pp := gp.m.p.ptr()
3807 pp.m = 0
3808 gp.m.oldp.set(pp)
3809 gp.m.p = 0
3810 atomic.Store(&pp.status, _Psyscall)
3811 if sched.gcwaiting.Load() {
3812 systemstack(entersyscall_gcwait)
3813 save(pc, sp)
3814 }
3815
3816 gp.m.locks--
3817 }
3818
3819
3820
3821
3822
3823
3824
3825 func entersyscall() {
3826 reentersyscall(getcallerpc(), getcallersp())
3827 }
3828
3829 func entersyscall_sysmon() {
3830 lock(&sched.lock)
3831 if sched.sysmonwait.Load() {
3832 sched.sysmonwait.Store(false)
3833 notewakeup(&sched.sysmonnote)
3834 }
3835 unlock(&sched.lock)
3836 }
3837
3838 func entersyscall_gcwait() {
3839 gp := getg()
3840 pp := gp.m.oldp.ptr()
3841
3842 lock(&sched.lock)
3843 if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
3844 if trace.enabled {
3845 traceGoSysBlock(pp)
3846 traceProcStop(pp)
3847 }
3848 pp.syscalltick++
3849 if sched.stopwait--; sched.stopwait == 0 {
3850 notewakeup(&sched.stopnote)
3851 }
3852 }
3853 unlock(&sched.lock)
3854 }
3855
3856
3857
3858
3859 func entersyscallblock() {
3860 gp := getg()
3861
3862 gp.m.locks++
3863 gp.throwsplit = true
3864 gp.stackguard0 = stackPreempt
3865 gp.m.syscalltick = gp.m.p.ptr().syscalltick
3866 gp.sysblocktraced = true
3867 gp.m.p.ptr().syscalltick++
3868
3869
3870 pc := getcallerpc()
3871 sp := getcallersp()
3872 save(pc, sp)
3873 gp.syscallsp = gp.sched.sp
3874 gp.syscallpc = gp.sched.pc
3875 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
3876 sp1 := sp
3877 sp2 := gp.sched.sp
3878 sp3 := gp.syscallsp
3879 systemstack(func() {
3880 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
3881 throw("entersyscallblock")
3882 })
3883 }
3884 casgstatus(gp, _Grunning, _Gsyscall)
3885 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
3886 systemstack(func() {
3887 print("entersyscallblock inconsistent ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
3888 throw("entersyscallblock")
3889 })
3890 }
3891
3892 systemstack(entersyscallblock_handoff)
3893
3894
3895 save(getcallerpc(), getcallersp())
3896
3897 gp.m.locks--
3898 }
3899
3900 func entersyscallblock_handoff() {
3901 if trace.enabled {
3902 traceGoSysCall()
3903 traceGoSysBlock(getg().m.p.ptr())
3904 }
3905 handoffp(releasep())
3906 }
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920 func exitsyscall() {
3921 gp := getg()
3922
3923 gp.m.locks++
3924 if getcallersp() > gp.syscallsp {
3925 throw("exitsyscall: syscall frame is no longer valid")
3926 }
3927
3928 gp.waitsince = 0
3929 oldp := gp.m.oldp.ptr()
3930 gp.m.oldp = 0
3931 if exitsyscallfast(oldp) {
3932
3933
3934 if goroutineProfile.active {
3935
3936
3937
3938 systemstack(func() {
3939 tryRecordGoroutineProfileWB(gp)
3940 })
3941 }
3942 if trace.enabled {
3943 if oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick {
3944 systemstack(traceGoStart)
3945 }
3946 }
3947
3948 gp.m.p.ptr().syscalltick++
3949
3950 casgstatus(gp, _Gsyscall, _Grunning)
3951
3952
3953
3954 gp.syscallsp = 0
3955 gp.m.locks--
3956 if gp.preempt {
3957
3958 gp.stackguard0 = stackPreempt
3959 } else {
3960
3961 gp.stackguard0 = gp.stack.lo + _StackGuard
3962 }
3963 gp.throwsplit = false
3964
3965 if sched.disable.user && !schedEnabled(gp) {
3966
3967 Gosched()
3968 }
3969
3970 return
3971 }
3972
3973 gp.sysexitticks = 0
3974 if trace.enabled {
3975
3976
3977 for oldp != nil && oldp.syscalltick == gp.m.syscalltick {
3978 osyield()
3979 }
3980
3981
3982
3983
3984 gp.sysexitticks = cputicks()
3985 }
3986
3987 gp.m.locks--
3988
3989
3990 mcall(exitsyscall0)
3991
3992
3993
3994
3995
3996
3997
3998 gp.syscallsp = 0
3999 gp.m.p.ptr().syscalltick++
4000 gp.throwsplit = false
4001 }
4002
4003
4004 func exitsyscallfast(oldp *p) bool {
4005 gp := getg()
4006
4007
4008 if sched.stopwait == freezeStopWait {
4009 return false
4010 }
4011
4012
4013 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
4014
4015 wirep(oldp)
4016 exitsyscallfast_reacquired()
4017 return true
4018 }
4019
4020
4021 if sched.pidle != 0 {
4022 var ok bool
4023 systemstack(func() {
4024 ok = exitsyscallfast_pidle()
4025 if ok && trace.enabled {
4026 if oldp != nil {
4027
4028
4029 for oldp.syscalltick == gp.m.syscalltick {
4030 osyield()
4031 }
4032 }
4033 traceGoSysExit(0)
4034 }
4035 })
4036 if ok {
4037 return true
4038 }
4039 }
4040 return false
4041 }
4042
4043
4044
4045
4046
4047
4048 func exitsyscallfast_reacquired() {
4049 gp := getg()
4050 if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
4051 if trace.enabled {
4052
4053
4054
4055 systemstack(func() {
4056
4057 traceGoSysBlock(gp.m.p.ptr())
4058
4059 traceGoSysExit(0)
4060 })
4061 }
4062 gp.m.p.ptr().syscalltick++
4063 }
4064 }
4065
4066 func exitsyscallfast_pidle() bool {
4067 lock(&sched.lock)
4068 pp, _ := pidleget(0)
4069 if pp != nil && sched.sysmonwait.Load() {
4070 sched.sysmonwait.Store(false)
4071 notewakeup(&sched.sysmonnote)
4072 }
4073 unlock(&sched.lock)
4074 if pp != nil {
4075 acquirep(pp)
4076 return true
4077 }
4078 return false
4079 }
4080
4081
4082
4083
4084
4085
4086
4087 func exitsyscall0(gp *g) {
4088 casgstatus(gp, _Gsyscall, _Grunnable)
4089 dropg()
4090 lock(&sched.lock)
4091 var pp *p
4092 if schedEnabled(gp) {
4093 pp, _ = pidleget(0)
4094 }
4095 var locked bool
4096 if pp == nil {
4097 globrunqput(gp)
4098
4099
4100
4101
4102
4103
4104 locked = gp.lockedm != 0
4105 } else if sched.sysmonwait.Load() {
4106 sched.sysmonwait.Store(false)
4107 notewakeup(&sched.sysmonnote)
4108 }
4109 unlock(&sched.lock)
4110 if pp != nil {
4111 acquirep(pp)
4112 execute(gp, false)
4113 }
4114 if locked {
4115
4116
4117
4118
4119 stoplockedm()
4120 execute(gp, false)
4121 }
4122 stopm()
4123 schedule()
4124 }
4125
4126
4127
4128
4129
4130 func syscall_runtime_BeforeFork() {
4131 gp := getg().m.curg
4132
4133
4134
4135
4136 gp.m.locks++
4137 sigsave(&gp.m.sigmask)
4138 sigblock(false)
4139
4140
4141
4142
4143
4144 gp.stackguard0 = stackFork
4145 }
4146
4147
4148
4149
4150
4151 func syscall_runtime_AfterFork() {
4152 gp := getg().m.curg
4153
4154
4155 gp.stackguard0 = gp.stack.lo + _StackGuard
4156
4157 msigrestore(gp.m.sigmask)
4158
4159 gp.m.locks--
4160 }
4161
4162
4163
4164 var inForkedChild bool
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177 func syscall_runtime_AfterForkInChild() {
4178
4179
4180
4181
4182 inForkedChild = true
4183
4184 clearSignalHandlers()
4185
4186
4187
4188 msigrestore(getg().m.sigmask)
4189
4190 inForkedChild = false
4191 }
4192
4193
4194
4195
4196 var pendingPreemptSignals atomic.Int32
4197
4198
4199
4200
4201 func syscall_runtime_BeforeExec() {
4202
4203 execLock.lock()
4204
4205
4206
4207 if GOOS == "darwin" || GOOS == "ios" {
4208 for pendingPreemptSignals.Load() > 0 {
4209 osyield()
4210 }
4211 }
4212 }
4213
4214
4215
4216
4217 func syscall_runtime_AfterExec() {
4218 execLock.unlock()
4219 }
4220
4221
4222 func malg(stacksize int32) *g {
4223 newg := new(g)
4224 if stacksize >= 0 {
4225 stacksize = round2(_StackSystem + stacksize)
4226 systemstack(func() {
4227 newg.stack = stackalloc(uint32(stacksize))
4228 })
4229 newg.stackguard0 = newg.stack.lo + _StackGuard
4230 newg.stackguard1 = ^uintptr(0)
4231
4232
4233 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
4234 }
4235 return newg
4236 }
4237
4238
4239
4240
4241 func newproc(fn *funcval) {
4242 gp := getg()
4243 pc := getcallerpc()
4244 systemstack(func() {
4245 newg := newproc1(fn, gp, pc)
4246
4247 pp := getg().m.p.ptr()
4248 runqput(pp, newg, true)
4249
4250 if mainStarted {
4251 wakep()
4252 }
4253 })
4254 }
4255
4256
4257
4258
4259 func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
4260 if fn == nil {
4261 fatal("go of nil func value")
4262 }
4263
4264 mp := acquirem()
4265 pp := mp.p.ptr()
4266 newg := gfget(pp)
4267 if newg == nil {
4268 newg = malg(_StackMin)
4269 casgstatus(newg, _Gidle, _Gdead)
4270 allgadd(newg)
4271 }
4272 if newg.stack.hi == 0 {
4273 throw("newproc1: newg missing stack")
4274 }
4275
4276 if readgstatus(newg) != _Gdead {
4277 throw("newproc1: new g is not Gdead")
4278 }
4279
4280 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
4281 totalSize = alignUp(totalSize, sys.StackAlign)
4282 sp := newg.stack.hi - totalSize
4283 spArg := sp
4284 if usesLR {
4285
4286 *(*uintptr)(unsafe.Pointer(sp)) = 0
4287 prepGoExitFrame(sp)
4288 spArg += sys.MinFrameSize
4289 }
4290
4291 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
4292 newg.sched.sp = sp
4293 newg.stktopsp = sp
4294 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
4295 newg.sched.g = guintptr(unsafe.Pointer(newg))
4296 gostartcallfn(&newg.sched, fn)
4297 newg.gopc = callerpc
4298 newg.ancestors = saveAncestors(callergp)
4299 newg.startpc = fn.fn
4300 if isSystemGoroutine(newg, false) {
4301 sched.ngsys.Add(1)
4302 } else {
4303
4304 if mp.curg != nil {
4305 newg.labels = mp.curg.labels
4306 }
4307 if goroutineProfile.active {
4308
4309
4310
4311
4312
4313 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
4314 }
4315 }
4316
4317 newg.trackingSeq = uint8(fastrand())
4318 if newg.trackingSeq%gTrackingPeriod == 0 {
4319 newg.tracking = true
4320 }
4321 casgstatus(newg, _Gdead, _Grunnable)
4322 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
4323
4324 if pp.goidcache == pp.goidcacheend {
4325
4326
4327
4328 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
4329 pp.goidcache -= _GoidCacheBatch - 1
4330 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
4331 }
4332 newg.goid = pp.goidcache
4333 pp.goidcache++
4334 if raceenabled {
4335 newg.racectx = racegostart(callerpc)
4336 if newg.labels != nil {
4337
4338
4339 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
4340 }
4341 }
4342 if trace.enabled {
4343 traceGoCreate(newg, newg.startpc)
4344 }
4345 releasem(mp)
4346
4347 return newg
4348 }
4349
4350
4351
4352
4353 func saveAncestors(callergp *g) *[]ancestorInfo {
4354
4355 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
4356 return nil
4357 }
4358 var callerAncestors []ancestorInfo
4359 if callergp.ancestors != nil {
4360 callerAncestors = *callergp.ancestors
4361 }
4362 n := int32(len(callerAncestors)) + 1
4363 if n > debug.tracebackancestors {
4364 n = debug.tracebackancestors
4365 }
4366 ancestors := make([]ancestorInfo, n)
4367 copy(ancestors[1:], callerAncestors)
4368
4369 var pcs [_TracebackMaxFrames]uintptr
4370 npcs := gcallers(callergp, 0, pcs[:])
4371 ipcs := make([]uintptr, npcs)
4372 copy(ipcs, pcs[:])
4373 ancestors[0] = ancestorInfo{
4374 pcs: ipcs,
4375 goid: callergp.goid,
4376 gopc: callergp.gopc,
4377 }
4378
4379 ancestorsp := new([]ancestorInfo)
4380 *ancestorsp = ancestors
4381 return ancestorsp
4382 }
4383
4384
4385
4386 func gfput(pp *p, gp *g) {
4387 if readgstatus(gp) != _Gdead {
4388 throw("gfput: bad status (not Gdead)")
4389 }
4390
4391 stksize := gp.stack.hi - gp.stack.lo
4392
4393 if stksize != uintptr(startingStackSize) {
4394
4395 stackfree(gp.stack)
4396 gp.stack.lo = 0
4397 gp.stack.hi = 0
4398 gp.stackguard0 = 0
4399 }
4400
4401 pp.gFree.push(gp)
4402 pp.gFree.n++
4403 if pp.gFree.n >= 64 {
4404 var (
4405 inc int32
4406 stackQ gQueue
4407 noStackQ gQueue
4408 )
4409 for pp.gFree.n >= 32 {
4410 gp := pp.gFree.pop()
4411 pp.gFree.n--
4412 if gp.stack.lo == 0 {
4413 noStackQ.push(gp)
4414 } else {
4415 stackQ.push(gp)
4416 }
4417 inc++
4418 }
4419 lock(&sched.gFree.lock)
4420 sched.gFree.noStack.pushAll(noStackQ)
4421 sched.gFree.stack.pushAll(stackQ)
4422 sched.gFree.n += inc
4423 unlock(&sched.gFree.lock)
4424 }
4425 }
4426
4427
4428
4429 func gfget(pp *p) *g {
4430 retry:
4431 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
4432 lock(&sched.gFree.lock)
4433
4434 for pp.gFree.n < 32 {
4435
4436 gp := sched.gFree.stack.pop()
4437 if gp == nil {
4438 gp = sched.gFree.noStack.pop()
4439 if gp == nil {
4440 break
4441 }
4442 }
4443 sched.gFree.n--
4444 pp.gFree.push(gp)
4445 pp.gFree.n++
4446 }
4447 unlock(&sched.gFree.lock)
4448 goto retry
4449 }
4450 gp := pp.gFree.pop()
4451 if gp == nil {
4452 return nil
4453 }
4454 pp.gFree.n--
4455 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
4456
4457
4458
4459 systemstack(func() {
4460 stackfree(gp.stack)
4461 gp.stack.lo = 0
4462 gp.stack.hi = 0
4463 gp.stackguard0 = 0
4464 })
4465 }
4466 if gp.stack.lo == 0 {
4467
4468 systemstack(func() {
4469 gp.stack = stackalloc(startingStackSize)
4470 })
4471 gp.stackguard0 = gp.stack.lo + _StackGuard
4472 } else {
4473 if raceenabled {
4474 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4475 }
4476 if msanenabled {
4477 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4478 }
4479 if asanenabled {
4480 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4481 }
4482 }
4483 return gp
4484 }
4485
4486
4487 func gfpurge(pp *p) {
4488 var (
4489 inc int32
4490 stackQ gQueue
4491 noStackQ gQueue
4492 )
4493 for !pp.gFree.empty() {
4494 gp := pp.gFree.pop()
4495 pp.gFree.n--
4496 if gp.stack.lo == 0 {
4497 noStackQ.push(gp)
4498 } else {
4499 stackQ.push(gp)
4500 }
4501 inc++
4502 }
4503 lock(&sched.gFree.lock)
4504 sched.gFree.noStack.pushAll(noStackQ)
4505 sched.gFree.stack.pushAll(stackQ)
4506 sched.gFree.n += inc
4507 unlock(&sched.gFree.lock)
4508 }
4509
4510
4511 func Breakpoint() {
4512 breakpoint()
4513 }
4514
4515
4516
4517
4518
4519
4520 func dolockOSThread() {
4521 if GOARCH == "wasm" {
4522 return
4523 }
4524 gp := getg()
4525 gp.m.lockedg.set(gp)
4526 gp.lockedm.set(gp.m)
4527 }
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545 func LockOSThread() {
4546 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
4547
4548
4549
4550 startTemplateThread()
4551 }
4552 gp := getg()
4553 gp.m.lockedExt++
4554 if gp.m.lockedExt == 0 {
4555 gp.m.lockedExt--
4556 panic("LockOSThread nesting overflow")
4557 }
4558 dolockOSThread()
4559 }
4560
4561
4562 func lockOSThread() {
4563 getg().m.lockedInt++
4564 dolockOSThread()
4565 }
4566
4567
4568
4569
4570
4571
4572 func dounlockOSThread() {
4573 if GOARCH == "wasm" {
4574 return
4575 }
4576 gp := getg()
4577 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
4578 return
4579 }
4580 gp.m.lockedg = 0
4581 gp.lockedm = 0
4582 }
4583
4584
4585
4586
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598 func UnlockOSThread() {
4599 gp := getg()
4600 if gp.m.lockedExt == 0 {
4601 return
4602 }
4603 gp.m.lockedExt--
4604 dounlockOSThread()
4605 }
4606
4607
4608 func unlockOSThread() {
4609 gp := getg()
4610 if gp.m.lockedInt == 0 {
4611 systemstack(badunlockosthread)
4612 }
4613 gp.m.lockedInt--
4614 dounlockOSThread()
4615 }
4616
4617 func badunlockosthread() {
4618 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
4619 }
4620
4621 func gcount() int32 {
4622 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - sched.ngsys.Load()
4623 for _, pp := range allp {
4624 n -= pp.gFree.n
4625 }
4626
4627
4628
4629 if n < 1 {
4630 n = 1
4631 }
4632 return n
4633 }
4634
4635 func mcount() int32 {
4636 return int32(sched.mnext - sched.nmfreed)
4637 }
4638
4639 var prof struct {
4640 signalLock atomic.Uint32
4641
4642
4643
4644 hz atomic.Int32
4645 }
4646
4647 func _System() { _System() }
4648 func _ExternalCode() { _ExternalCode() }
4649 func _LostExternalCode() { _LostExternalCode() }
4650 func _GC() { _GC() }
4651 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
4652 func _VDSO() { _VDSO() }
4653
4654
4655
4656
4657
4658 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
4659 if prof.hz.Load() == 0 {
4660 return
4661 }
4662
4663
4664
4665
4666 if mp != nil && mp.profilehz == 0 {
4667 return
4668 }
4669
4670
4671
4672
4673
4674
4675
4676 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
4677 if f := findfunc(pc); f.valid() {
4678 if hasPrefix(funcname(f), "runtime/internal/atomic") {
4679 cpuprof.lostAtomic++
4680 return
4681 }
4682 }
4683 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
4684
4685
4686
4687 cpuprof.lostAtomic++
4688 return
4689 }
4690 }
4691
4692
4693
4694
4695
4696
4697
4698 getg().m.mallocing++
4699
4700 var stk [maxCPUProfStack]uintptr
4701 n := 0
4702 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
4703 cgoOff := 0
4704
4705
4706
4707
4708
4709 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
4710 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
4711 cgoOff++
4712 }
4713 copy(stk[:], mp.cgoCallers[:cgoOff])
4714 mp.cgoCallers[0] = 0
4715 }
4716
4717
4718 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0)
4719 if n > 0 {
4720 n += cgoOff
4721 }
4722 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
4723
4724
4725 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[n], len(stk[n:]), nil, nil, 0)
4726 } else if mp != nil && mp.vdsoSP != 0 {
4727
4728
4729 n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[n], len(stk[n:]), nil, nil, _TraceJumpStack)
4730 } else {
4731 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
4732 }
4733
4734 if n <= 0 {
4735
4736
4737 n = 2
4738 if inVDSOPage(pc) {
4739 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
4740 } else if pc > firstmoduledata.etext {
4741
4742 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
4743 }
4744 stk[0] = pc
4745 if mp.preemptoff != "" {
4746 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
4747 } else {
4748 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
4749 }
4750 }
4751
4752 if prof.hz.Load() != 0 {
4753
4754
4755
4756 var tagPtr *unsafe.Pointer
4757 if gp != nil && gp.m != nil && gp.m.curg != nil {
4758 tagPtr = &gp.m.curg.labels
4759 }
4760 cpuprof.add(tagPtr, stk[:n])
4761
4762 gprof := gp
4763 var pp *p
4764 if gp != nil && gp.m != nil {
4765 if gp.m.curg != nil {
4766 gprof = gp.m.curg
4767 }
4768 pp = gp.m.p.ptr()
4769 }
4770 traceCPUSample(gprof, pp, stk[:n])
4771 }
4772 getg().m.mallocing--
4773 }
4774
4775
4776
4777 func setcpuprofilerate(hz int32) {
4778
4779 if hz < 0 {
4780 hz = 0
4781 }
4782
4783
4784
4785 gp := getg()
4786 gp.m.locks++
4787
4788
4789
4790
4791 setThreadCPUProfiler(0)
4792
4793 for !prof.signalLock.CompareAndSwap(0, 1) {
4794 osyield()
4795 }
4796 if prof.hz.Load() != hz {
4797 setProcessCPUProfiler(hz)
4798 prof.hz.Store(hz)
4799 }
4800 prof.signalLock.Store(0)
4801
4802 lock(&sched.lock)
4803 sched.profilehz = hz
4804 unlock(&sched.lock)
4805
4806 if hz != 0 {
4807 setThreadCPUProfiler(hz)
4808 }
4809
4810 gp.m.locks--
4811 }
4812
4813
4814
4815 func (pp *p) init(id int32) {
4816 pp.id = id
4817 pp.status = _Pgcstop
4818 pp.sudogcache = pp.sudogbuf[:0]
4819 pp.deferpool = pp.deferpoolbuf[:0]
4820 pp.wbBuf.reset()
4821 if pp.mcache == nil {
4822 if id == 0 {
4823 if mcache0 == nil {
4824 throw("missing mcache?")
4825 }
4826
4827
4828 pp.mcache = mcache0
4829 } else {
4830 pp.mcache = allocmcache()
4831 }
4832 }
4833 if raceenabled && pp.raceprocctx == 0 {
4834 if id == 0 {
4835 pp.raceprocctx = raceprocctx0
4836 raceprocctx0 = 0
4837 } else {
4838 pp.raceprocctx = raceproccreate()
4839 }
4840 }
4841 lockInit(&pp.timersLock, lockRankTimers)
4842
4843
4844
4845 timerpMask.set(id)
4846
4847
4848 idlepMask.clear(id)
4849 }
4850
4851
4852
4853
4854
4855 func (pp *p) destroy() {
4856 assertLockHeld(&sched.lock)
4857 assertWorldStopped()
4858
4859
4860 for pp.runqhead != pp.runqtail {
4861
4862 pp.runqtail--
4863 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
4864
4865 globrunqputhead(gp)
4866 }
4867 if pp.runnext != 0 {
4868 globrunqputhead(pp.runnext.ptr())
4869 pp.runnext = 0
4870 }
4871 if len(pp.timers) > 0 {
4872 plocal := getg().m.p.ptr()
4873
4874
4875
4876
4877 lock(&plocal.timersLock)
4878 lock(&pp.timersLock)
4879 moveTimers(plocal, pp.timers)
4880 pp.timers = nil
4881 pp.numTimers.Store(0)
4882 pp.deletedTimers.Store(0)
4883 pp.timer0When.Store(0)
4884 unlock(&pp.timersLock)
4885 unlock(&plocal.timersLock)
4886 }
4887
4888 if gcphase != _GCoff {
4889 wbBufFlush1(pp)
4890 pp.gcw.dispose()
4891 }
4892 for i := range pp.sudogbuf {
4893 pp.sudogbuf[i] = nil
4894 }
4895 pp.sudogcache = pp.sudogbuf[:0]
4896 for j := range pp.deferpoolbuf {
4897 pp.deferpoolbuf[j] = nil
4898 }
4899 pp.deferpool = pp.deferpoolbuf[:0]
4900 systemstack(func() {
4901 for i := 0; i < pp.mspancache.len; i++ {
4902
4903 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
4904 }
4905 pp.mspancache.len = 0
4906 lock(&mheap_.lock)
4907 pp.pcache.flush(&mheap_.pages)
4908 unlock(&mheap_.lock)
4909 })
4910 freemcache(pp.mcache)
4911 pp.mcache = nil
4912 gfpurge(pp)
4913 traceProcFree(pp)
4914 if raceenabled {
4915 if pp.timerRaceCtx != 0 {
4916
4917
4918
4919
4920
4921 mp := getg().m
4922 phold := mp.p.ptr()
4923 mp.p.set(pp)
4924
4925 racectxend(pp.timerRaceCtx)
4926 pp.timerRaceCtx = 0
4927
4928 mp.p.set(phold)
4929 }
4930 raceprocdestroy(pp.raceprocctx)
4931 pp.raceprocctx = 0
4932 }
4933 pp.gcAssistTime = 0
4934 pp.status = _Pdead
4935 }
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945 func procresize(nprocs int32) *p {
4946 assertLockHeld(&sched.lock)
4947 assertWorldStopped()
4948
4949 old := gomaxprocs
4950 if old < 0 || nprocs <= 0 {
4951 throw("procresize: invalid arg")
4952 }
4953 if trace.enabled {
4954 traceGomaxprocs(nprocs)
4955 }
4956
4957
4958 now := nanotime()
4959 if sched.procresizetime != 0 {
4960 sched.totaltime += int64(old) * (now - sched.procresizetime)
4961 }
4962 sched.procresizetime = now
4963
4964 maskWords := (nprocs + 31) / 32
4965
4966
4967 if nprocs > int32(len(allp)) {
4968
4969
4970 lock(&allpLock)
4971 if nprocs <= int32(cap(allp)) {
4972 allp = allp[:nprocs]
4973 } else {
4974 nallp := make([]*p, nprocs)
4975
4976
4977 copy(nallp, allp[:cap(allp)])
4978 allp = nallp
4979 }
4980
4981 if maskWords <= int32(cap(idlepMask)) {
4982 idlepMask = idlepMask[:maskWords]
4983 timerpMask = timerpMask[:maskWords]
4984 } else {
4985 nidlepMask := make([]uint32, maskWords)
4986
4987 copy(nidlepMask, idlepMask)
4988 idlepMask = nidlepMask
4989
4990 ntimerpMask := make([]uint32, maskWords)
4991 copy(ntimerpMask, timerpMask)
4992 timerpMask = ntimerpMask
4993 }
4994 unlock(&allpLock)
4995 }
4996
4997
4998 for i := old; i < nprocs; i++ {
4999 pp := allp[i]
5000 if pp == nil {
5001 pp = new(p)
5002 }
5003 pp.init(i)
5004 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5005 }
5006
5007 gp := getg()
5008 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5009
5010 gp.m.p.ptr().status = _Prunning
5011 gp.m.p.ptr().mcache.prepareForSweep()
5012 } else {
5013
5014
5015
5016
5017
5018 if gp.m.p != 0 {
5019 if trace.enabled {
5020
5021
5022
5023 traceGoSched()
5024 traceProcStop(gp.m.p.ptr())
5025 }
5026 gp.m.p.ptr().m = 0
5027 }
5028 gp.m.p = 0
5029 pp := allp[0]
5030 pp.m = 0
5031 pp.status = _Pidle
5032 acquirep(pp)
5033 if trace.enabled {
5034 traceGoStart()
5035 }
5036 }
5037
5038
5039 mcache0 = nil
5040
5041
5042 for i := nprocs; i < old; i++ {
5043 pp := allp[i]
5044 pp.destroy()
5045
5046 }
5047
5048
5049 if int32(len(allp)) != nprocs {
5050 lock(&allpLock)
5051 allp = allp[:nprocs]
5052 idlepMask = idlepMask[:maskWords]
5053 timerpMask = timerpMask[:maskWords]
5054 unlock(&allpLock)
5055 }
5056
5057 var runnablePs *p
5058 for i := nprocs - 1; i >= 0; i-- {
5059 pp := allp[i]
5060 if gp.m.p.ptr() == pp {
5061 continue
5062 }
5063 pp.status = _Pidle
5064 if runqempty(pp) {
5065 pidleput(pp, now)
5066 } else {
5067 pp.m.set(mget())
5068 pp.link.set(runnablePs)
5069 runnablePs = pp
5070 }
5071 }
5072 stealOrder.reset(uint32(nprocs))
5073 var int32p *int32 = &gomaxprocs
5074 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
5075 if old != nprocs {
5076
5077 gcCPULimiter.resetCapacity(now, nprocs)
5078 }
5079 return runnablePs
5080 }
5081
5082
5083
5084
5085
5086
5087
5088 func acquirep(pp *p) {
5089
5090 wirep(pp)
5091
5092
5093
5094
5095
5096 pp.mcache.prepareForSweep()
5097
5098 if trace.enabled {
5099 traceProcStart()
5100 }
5101 }
5102
5103
5104
5105
5106
5107
5108
5109 func wirep(pp *p) {
5110 gp := getg()
5111
5112 if gp.m.p != 0 {
5113 throw("wirep: already in go")
5114 }
5115 if pp.m != 0 || pp.status != _Pidle {
5116 id := int64(0)
5117 if pp.m != 0 {
5118 id = pp.m.ptr().id
5119 }
5120 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
5121 throw("wirep: invalid p state")
5122 }
5123 gp.m.p.set(pp)
5124 pp.m.set(gp.m)
5125 pp.status = _Prunning
5126 }
5127
5128
5129 func releasep() *p {
5130 gp := getg()
5131
5132 if gp.m.p == 0 {
5133 throw("releasep: invalid arg")
5134 }
5135 pp := gp.m.p.ptr()
5136 if pp.m.ptr() != gp.m || pp.status != _Prunning {
5137 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
5138 throw("releasep: invalid p state")
5139 }
5140 if trace.enabled {
5141 traceProcStop(gp.m.p.ptr())
5142 }
5143 gp.m.p = 0
5144 pp.m = 0
5145 pp.status = _Pidle
5146 return pp
5147 }
5148
5149 func incidlelocked(v int32) {
5150 lock(&sched.lock)
5151 sched.nmidlelocked += v
5152 if v > 0 {
5153 checkdead()
5154 }
5155 unlock(&sched.lock)
5156 }
5157
5158
5159
5160
5161 func checkdead() {
5162 assertLockHeld(&sched.lock)
5163
5164
5165
5166
5167 if islibrary || isarchive {
5168 return
5169 }
5170
5171
5172
5173
5174
5175 if panicking.Load() > 0 {
5176 return
5177 }
5178
5179
5180
5181
5182
5183 var run0 int32
5184 if !iscgo && cgoHasExtraM {
5185 mp := lockextra(true)
5186 haveExtraM := extraMCount > 0
5187 unlockextra(mp)
5188 if haveExtraM {
5189 run0 = 1
5190 }
5191 }
5192
5193 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
5194 if run > run0 {
5195 return
5196 }
5197 if run < 0 {
5198 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
5199 throw("checkdead: inconsistent counts")
5200 }
5201
5202 grunning := 0
5203 forEachG(func(gp *g) {
5204 if isSystemGoroutine(gp, false) {
5205 return
5206 }
5207 s := readgstatus(gp)
5208 switch s &^ _Gscan {
5209 case _Gwaiting,
5210 _Gpreempted:
5211 grunning++
5212 case _Grunnable,
5213 _Grunning,
5214 _Gsyscall:
5215 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
5216 throw("checkdead: runnable g")
5217 }
5218 })
5219 if grunning == 0 {
5220 unlock(&sched.lock)
5221 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
5222 }
5223
5224
5225 if faketime != 0 {
5226 if when := timeSleepUntil(); when < maxWhen {
5227 faketime = when
5228
5229
5230 pp, _ := pidleget(faketime)
5231 if pp == nil {
5232
5233
5234 throw("checkdead: no p for timer")
5235 }
5236 mp := mget()
5237 if mp == nil {
5238
5239
5240 throw("checkdead: no m for timer")
5241 }
5242
5243
5244
5245 sched.nmspinning.Add(1)
5246 mp.spinning = true
5247 mp.nextp.set(pp)
5248 notewakeup(&mp.park)
5249 return
5250 }
5251 }
5252
5253
5254 for _, pp := range allp {
5255 if len(pp.timers) > 0 {
5256 return
5257 }
5258 }
5259
5260 unlock(&sched.lock)
5261 fatal("all goroutines are asleep - deadlock!")
5262 }
5263
5264
5265
5266
5267
5268
5269 var forcegcperiod int64 = 2 * 60 * 1e9
5270
5271
5272
5273 var needSysmonWorkaround bool = false
5274
5275
5276
5277
5278 func sysmon() {
5279 lock(&sched.lock)
5280 sched.nmsys++
5281 checkdead()
5282 unlock(&sched.lock)
5283
5284 lasttrace := int64(0)
5285 idle := 0
5286 delay := uint32(0)
5287
5288 for {
5289 if idle == 0 {
5290 delay = 20
5291 } else if idle > 50 {
5292 delay *= 2
5293 }
5294 if delay > 10*1000 {
5295 delay = 10 * 1000
5296 }
5297 usleep(delay)
5298
5299
5300
5301
5302
5303
5304
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314 now := nanotime()
5315 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
5316 lock(&sched.lock)
5317 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
5318 syscallWake := false
5319 next := timeSleepUntil()
5320 if next > now {
5321 sched.sysmonwait.Store(true)
5322 unlock(&sched.lock)
5323
5324
5325 sleep := forcegcperiod / 2
5326 if next-now < sleep {
5327 sleep = next - now
5328 }
5329 shouldRelax := sleep >= osRelaxMinNS
5330 if shouldRelax {
5331 osRelax(true)
5332 }
5333 syscallWake = notetsleep(&sched.sysmonnote, sleep)
5334 if shouldRelax {
5335 osRelax(false)
5336 }
5337 lock(&sched.lock)
5338 sched.sysmonwait.Store(false)
5339 noteclear(&sched.sysmonnote)
5340 }
5341 if syscallWake {
5342 idle = 0
5343 delay = 20
5344 }
5345 }
5346 unlock(&sched.lock)
5347 }
5348
5349 lock(&sched.sysmonlock)
5350
5351
5352 now = nanotime()
5353
5354
5355 if *cgo_yield != nil {
5356 asmcgocall(*cgo_yield, nil)
5357 }
5358
5359 lastpoll := sched.lastpoll.Load()
5360 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
5361 sched.lastpoll.CompareAndSwap(lastpoll, now)
5362 list := netpoll(0)
5363 if !list.empty() {
5364
5365
5366
5367
5368
5369
5370
5371 incidlelocked(-1)
5372 injectglist(&list)
5373 incidlelocked(1)
5374 }
5375 }
5376 if GOOS == "netbsd" && needSysmonWorkaround {
5377
5378
5379
5380
5381
5382
5383
5384
5385
5386
5387
5388
5389
5390
5391
5392 if next := timeSleepUntil(); next < now {
5393 startm(nil, false)
5394 }
5395 }
5396 if scavenger.sysmonWake.Load() != 0 {
5397
5398 scavenger.wake()
5399 }
5400
5401
5402 if retake(now) != 0 {
5403 idle = 0
5404 } else {
5405 idle++
5406 }
5407
5408 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
5409 lock(&forcegc.lock)
5410 forcegc.idle.Store(false)
5411 var list gList
5412 list.push(forcegc.g)
5413 injectglist(&list)
5414 unlock(&forcegc.lock)
5415 }
5416 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
5417 lasttrace = now
5418 schedtrace(debug.scheddetail > 0)
5419 }
5420 unlock(&sched.sysmonlock)
5421 }
5422 }
5423
5424 type sysmontick struct {
5425 schedtick uint32
5426 schedwhen int64
5427 syscalltick uint32
5428 syscallwhen int64
5429 }
5430
5431
5432
5433 const forcePreemptNS = 10 * 1000 * 1000
5434
5435 func retake(now int64) uint32 {
5436 n := 0
5437
5438
5439 lock(&allpLock)
5440
5441
5442
5443 for i := 0; i < len(allp); i++ {
5444 pp := allp[i]
5445 if pp == nil {
5446
5447
5448 continue
5449 }
5450 pd := &pp.sysmontick
5451 s := pp.status
5452 sysretake := false
5453 if s == _Prunning || s == _Psyscall {
5454
5455 t := int64(pp.schedtick)
5456 if int64(pd.schedtick) != t {
5457 pd.schedtick = uint32(t)
5458 pd.schedwhen = now
5459 } else if pd.schedwhen+forcePreemptNS <= now {
5460 preemptone(pp)
5461
5462
5463 sysretake = true
5464 }
5465 }
5466 if s == _Psyscall {
5467
5468 t := int64(pp.syscalltick)
5469 if !sysretake && int64(pd.syscalltick) != t {
5470 pd.syscalltick = uint32(t)
5471 pd.syscallwhen = now
5472 continue
5473 }
5474
5475
5476
5477 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
5478 continue
5479 }
5480
5481 unlock(&allpLock)
5482
5483
5484
5485
5486 incidlelocked(-1)
5487 if atomic.Cas(&pp.status, s, _Pidle) {
5488 if trace.enabled {
5489 traceGoSysBlock(pp)
5490 traceProcStop(pp)
5491 }
5492 n++
5493 pp.syscalltick++
5494 handoffp(pp)
5495 }
5496 incidlelocked(1)
5497 lock(&allpLock)
5498 }
5499 }
5500 unlock(&allpLock)
5501 return uint32(n)
5502 }
5503
5504
5505
5506
5507
5508
5509 func preemptall() bool {
5510 res := false
5511 for _, pp := range allp {
5512 if pp.status != _Prunning {
5513 continue
5514 }
5515 if preemptone(pp) {
5516 res = true
5517 }
5518 }
5519 return res
5520 }
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532 func preemptone(pp *p) bool {
5533 mp := pp.m.ptr()
5534 if mp == nil || mp == getg().m {
5535 return false
5536 }
5537 gp := mp.curg
5538 if gp == nil || gp == mp.g0 {
5539 return false
5540 }
5541
5542 gp.preempt = true
5543
5544
5545
5546
5547
5548 gp.stackguard0 = stackPreempt
5549
5550
5551 if preemptMSupported && debug.asyncpreemptoff == 0 {
5552 pp.preempt = true
5553 preemptM(mp)
5554 }
5555
5556 return true
5557 }
5558
5559 var starttime int64
5560
5561 func schedtrace(detailed bool) {
5562 now := nanotime()
5563 if starttime == 0 {
5564 starttime = now
5565 }
5566
5567 lock(&sched.lock)
5568 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
5569 if detailed {
5570 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
5571 }
5572
5573
5574
5575 for i, pp := range allp {
5576 mp := pp.m.ptr()
5577 h := atomic.Load(&pp.runqhead)
5578 t := atomic.Load(&pp.runqtail)
5579 if detailed {
5580 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
5581 if mp != nil {
5582 print(mp.id)
5583 } else {
5584 print("nil")
5585 }
5586 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers), "\n")
5587 } else {
5588
5589
5590 print(" ")
5591 if i == 0 {
5592 print("[")
5593 }
5594 print(t - h)
5595 if i == len(allp)-1 {
5596 print("]\n")
5597 }
5598 }
5599 }
5600
5601 if !detailed {
5602 unlock(&sched.lock)
5603 return
5604 }
5605
5606 for mp := allm; mp != nil; mp = mp.alllink {
5607 pp := mp.p.ptr()
5608 print(" M", mp.id, ": p=")
5609 if pp != nil {
5610 print(pp.id)
5611 } else {
5612 print("nil")
5613 }
5614 print(" curg=")
5615 if mp.curg != nil {
5616 print(mp.curg.goid)
5617 } else {
5618 print("nil")
5619 }
5620 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
5621 if lockedg := mp.lockedg.ptr(); lockedg != nil {
5622 print(lockedg.goid)
5623 } else {
5624 print("nil")
5625 }
5626 print("\n")
5627 }
5628
5629 forEachG(func(gp *g) {
5630 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
5631 if gp.m != nil {
5632 print(gp.m.id)
5633 } else {
5634 print("nil")
5635 }
5636 print(" lockedm=")
5637 if lockedm := gp.lockedm.ptr(); lockedm != nil {
5638 print(lockedm.id)
5639 } else {
5640 print("nil")
5641 }
5642 print("\n")
5643 })
5644 unlock(&sched.lock)
5645 }
5646
5647
5648
5649
5650
5651
5652 func schedEnableUser(enable bool) {
5653 lock(&sched.lock)
5654 if sched.disable.user == !enable {
5655 unlock(&sched.lock)
5656 return
5657 }
5658 sched.disable.user = !enable
5659 if enable {
5660 n := sched.disable.n
5661 sched.disable.n = 0
5662 globrunqputbatch(&sched.disable.runnable, n)
5663 unlock(&sched.lock)
5664 for ; n != 0 && sched.npidle.Load() != 0; n-- {
5665 startm(nil, false)
5666 }
5667 } else {
5668 unlock(&sched.lock)
5669 }
5670 }
5671
5672
5673
5674
5675
5676 func schedEnabled(gp *g) bool {
5677 assertLockHeld(&sched.lock)
5678
5679 if sched.disable.user {
5680 return isSystemGoroutine(gp, true)
5681 }
5682 return true
5683 }
5684
5685
5686
5687
5688
5689
5690 func mput(mp *m) {
5691 assertLockHeld(&sched.lock)
5692
5693 mp.schedlink = sched.midle
5694 sched.midle.set(mp)
5695 sched.nmidle++
5696 checkdead()
5697 }
5698
5699
5700
5701
5702
5703
5704 func mget() *m {
5705 assertLockHeld(&sched.lock)
5706
5707 mp := sched.midle.ptr()
5708 if mp != nil {
5709 sched.midle = mp.schedlink
5710 sched.nmidle--
5711 }
5712 return mp
5713 }
5714
5715
5716
5717
5718
5719
5720 func globrunqput(gp *g) {
5721 assertLockHeld(&sched.lock)
5722
5723 sched.runq.pushBack(gp)
5724 sched.runqsize++
5725 }
5726
5727
5728
5729
5730
5731
5732 func globrunqputhead(gp *g) {
5733 assertLockHeld(&sched.lock)
5734
5735 sched.runq.push(gp)
5736 sched.runqsize++
5737 }
5738
5739
5740
5741
5742
5743
5744
5745 func globrunqputbatch(batch *gQueue, n int32) {
5746 assertLockHeld(&sched.lock)
5747
5748 sched.runq.pushBackAll(*batch)
5749 sched.runqsize += n
5750 *batch = gQueue{}
5751 }
5752
5753
5754
5755 func globrunqget(pp *p, max int32) *g {
5756 assertLockHeld(&sched.lock)
5757
5758 if sched.runqsize == 0 {
5759 return nil
5760 }
5761
5762 n := sched.runqsize/gomaxprocs + 1
5763 if n > sched.runqsize {
5764 n = sched.runqsize
5765 }
5766 if max > 0 && n > max {
5767 n = max
5768 }
5769 if n > int32(len(pp.runq))/2 {
5770 n = int32(len(pp.runq)) / 2
5771 }
5772
5773 sched.runqsize -= n
5774
5775 gp := sched.runq.pop()
5776 n--
5777 for ; n > 0; n-- {
5778 gp1 := sched.runq.pop()
5779 runqput(pp, gp1, false)
5780 }
5781 return gp
5782 }
5783
5784
5785 type pMask []uint32
5786
5787
5788 func (p pMask) read(id uint32) bool {
5789 word := id / 32
5790 mask := uint32(1) << (id % 32)
5791 return (atomic.Load(&p[word]) & mask) != 0
5792 }
5793
5794
5795 func (p pMask) set(id int32) {
5796 word := id / 32
5797 mask := uint32(1) << (id % 32)
5798 atomic.Or(&p[word], mask)
5799 }
5800
5801
5802 func (p pMask) clear(id int32) {
5803 word := id / 32
5804 mask := uint32(1) << (id % 32)
5805 atomic.And(&p[word], ^mask)
5806 }
5807
5808
5809
5810
5811
5812
5813
5814
5815
5816
5817
5818
5819
5820
5821
5822
5823
5824
5825
5826
5827
5828
5829
5830
5831
5832
5833 func updateTimerPMask(pp *p) {
5834 if pp.numTimers.Load() > 0 {
5835 return
5836 }
5837
5838
5839
5840
5841 lock(&pp.timersLock)
5842 if pp.numTimers.Load() == 0 {
5843 timerpMask.clear(pp.id)
5844 }
5845 unlock(&pp.timersLock)
5846 }
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857
5858
5859 func pidleput(pp *p, now int64) int64 {
5860 assertLockHeld(&sched.lock)
5861
5862 if !runqempty(pp) {
5863 throw("pidleput: P has non-empty run queue")
5864 }
5865 if now == 0 {
5866 now = nanotime()
5867 }
5868 updateTimerPMask(pp)
5869 idlepMask.set(pp.id)
5870 pp.link = sched.pidle
5871 sched.pidle.set(pp)
5872 sched.npidle.Add(1)
5873 if !pp.limiterEvent.start(limiterEventIdle, now) {
5874 throw("must be able to track idle limiter event")
5875 }
5876 return now
5877 }
5878
5879
5880
5881
5882
5883
5884
5885
5886 func pidleget(now int64) (*p, int64) {
5887 assertLockHeld(&sched.lock)
5888
5889 pp := sched.pidle.ptr()
5890 if pp != nil {
5891
5892 if now == 0 {
5893 now = nanotime()
5894 }
5895 timerpMask.set(pp.id)
5896 idlepMask.clear(pp.id)
5897 sched.pidle = pp.link
5898 sched.npidle.Add(-1)
5899 pp.limiterEvent.stop(limiterEventIdle, now)
5900 }
5901 return pp, now
5902 }
5903
5904
5905
5906
5907
5908
5909
5910
5911
5912
5913
5914 func pidlegetSpinning(now int64) (*p, int64) {
5915 assertLockHeld(&sched.lock)
5916
5917 pp, now := pidleget(now)
5918 if pp == nil {
5919
5920
5921
5922 sched.needspinning.Store(1)
5923 return nil, now
5924 }
5925
5926 return pp, now
5927 }
5928
5929
5930
5931 func runqempty(pp *p) bool {
5932
5933
5934
5935
5936 for {
5937 head := atomic.Load(&pp.runqhead)
5938 tail := atomic.Load(&pp.runqtail)
5939 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
5940 if tail == atomic.Load(&pp.runqtail) {
5941 return head == tail && runnext == 0
5942 }
5943 }
5944 }
5945
5946
5947
5948
5949
5950
5951
5952
5953
5954
5955 const randomizeScheduler = raceenabled
5956
5957
5958
5959
5960
5961
5962 func runqput(pp *p, gp *g, next bool) {
5963 if randomizeScheduler && next && fastrandn(2) == 0 {
5964 next = false
5965 }
5966
5967 if next {
5968 retryNext:
5969 oldnext := pp.runnext
5970 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
5971 goto retryNext
5972 }
5973 if oldnext == 0 {
5974 return
5975 }
5976
5977 gp = oldnext.ptr()
5978 }
5979
5980 retry:
5981 h := atomic.LoadAcq(&pp.runqhead)
5982 t := pp.runqtail
5983 if t-h < uint32(len(pp.runq)) {
5984 pp.runq[t%uint32(len(pp.runq))].set(gp)
5985 atomic.StoreRel(&pp.runqtail, t+1)
5986 return
5987 }
5988 if runqputslow(pp, gp, h, t) {
5989 return
5990 }
5991
5992 goto retry
5993 }
5994
5995
5996
5997 func runqputslow(pp *p, gp *g, h, t uint32) bool {
5998 var batch [len(pp.runq)/2 + 1]*g
5999
6000
6001 n := t - h
6002 n = n / 2
6003 if n != uint32(len(pp.runq)/2) {
6004 throw("runqputslow: queue is not full")
6005 }
6006 for i := uint32(0); i < n; i++ {
6007 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6008 }
6009 if !atomic.CasRel(&pp.runqhead, h, h+n) {
6010 return false
6011 }
6012 batch[n] = gp
6013
6014 if randomizeScheduler {
6015 for i := uint32(1); i <= n; i++ {
6016 j := fastrandn(i + 1)
6017 batch[i], batch[j] = batch[j], batch[i]
6018 }
6019 }
6020
6021
6022 for i := uint32(0); i < n; i++ {
6023 batch[i].schedlink.set(batch[i+1])
6024 }
6025 var q gQueue
6026 q.head.set(batch[0])
6027 q.tail.set(batch[n])
6028
6029
6030 lock(&sched.lock)
6031 globrunqputbatch(&q, int32(n+1))
6032 unlock(&sched.lock)
6033 return true
6034 }
6035
6036
6037
6038
6039
6040 func runqputbatch(pp *p, q *gQueue, qsize int) {
6041 h := atomic.LoadAcq(&pp.runqhead)
6042 t := pp.runqtail
6043 n := uint32(0)
6044 for !q.empty() && t-h < uint32(len(pp.runq)) {
6045 gp := q.pop()
6046 pp.runq[t%uint32(len(pp.runq))].set(gp)
6047 t++
6048 n++
6049 }
6050 qsize -= int(n)
6051
6052 if randomizeScheduler {
6053 off := func(o uint32) uint32 {
6054 return (pp.runqtail + o) % uint32(len(pp.runq))
6055 }
6056 for i := uint32(1); i < n; i++ {
6057 j := fastrandn(i + 1)
6058 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
6059 }
6060 }
6061
6062 atomic.StoreRel(&pp.runqtail, t)
6063 if !q.empty() {
6064 lock(&sched.lock)
6065 globrunqputbatch(q, int32(qsize))
6066 unlock(&sched.lock)
6067 }
6068 }
6069
6070
6071
6072
6073
6074 func runqget(pp *p) (gp *g, inheritTime bool) {
6075
6076 next := pp.runnext
6077
6078
6079
6080 if next != 0 && pp.runnext.cas(next, 0) {
6081 return next.ptr(), true
6082 }
6083
6084 for {
6085 h := atomic.LoadAcq(&pp.runqhead)
6086 t := pp.runqtail
6087 if t == h {
6088 return nil, false
6089 }
6090 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
6091 if atomic.CasRel(&pp.runqhead, h, h+1) {
6092 return gp, false
6093 }
6094 }
6095 }
6096
6097
6098
6099 func runqdrain(pp *p) (drainQ gQueue, n uint32) {
6100 oldNext := pp.runnext
6101 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
6102 drainQ.pushBack(oldNext.ptr())
6103 n++
6104 }
6105
6106 retry:
6107 h := atomic.LoadAcq(&pp.runqhead)
6108 t := pp.runqtail
6109 qn := t - h
6110 if qn == 0 {
6111 return
6112 }
6113 if qn > uint32(len(pp.runq)) {
6114 goto retry
6115 }
6116
6117 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
6118 goto retry
6119 }
6120
6121
6122
6123
6124
6125
6126
6127
6128 for i := uint32(0); i < qn; i++ {
6129 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6130 drainQ.pushBack(gp)
6131 n++
6132 }
6133 return
6134 }
6135
6136
6137
6138
6139
6140 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
6141 for {
6142 h := atomic.LoadAcq(&pp.runqhead)
6143 t := atomic.LoadAcq(&pp.runqtail)
6144 n := t - h
6145 n = n - n/2
6146 if n == 0 {
6147 if stealRunNextG {
6148
6149 if next := pp.runnext; next != 0 {
6150 if pp.status == _Prunning {
6151
6152
6153
6154
6155
6156
6157
6158
6159
6160
6161 if GOOS != "windows" && GOOS != "openbsd" && GOOS != "netbsd" {
6162 usleep(3)
6163 } else {
6164
6165
6166
6167 osyield()
6168 }
6169 }
6170 if !pp.runnext.cas(next, 0) {
6171 continue
6172 }
6173 batch[batchHead%uint32(len(batch))] = next
6174 return 1
6175 }
6176 }
6177 return 0
6178 }
6179 if n > uint32(len(pp.runq)/2) {
6180 continue
6181 }
6182 for i := uint32(0); i < n; i++ {
6183 g := pp.runq[(h+i)%uint32(len(pp.runq))]
6184 batch[(batchHead+i)%uint32(len(batch))] = g
6185 }
6186 if atomic.CasRel(&pp.runqhead, h, h+n) {
6187 return n
6188 }
6189 }
6190 }
6191
6192
6193
6194
6195 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
6196 t := pp.runqtail
6197 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
6198 if n == 0 {
6199 return nil
6200 }
6201 n--
6202 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
6203 if n == 0 {
6204 return gp
6205 }
6206 h := atomic.LoadAcq(&pp.runqhead)
6207 if t-h+n >= uint32(len(pp.runq)) {
6208 throw("runqsteal: runq overflow")
6209 }
6210 atomic.StoreRel(&pp.runqtail, t+n)
6211 return gp
6212 }
6213
6214
6215
6216 type gQueue struct {
6217 head guintptr
6218 tail guintptr
6219 }
6220
6221
6222 func (q *gQueue) empty() bool {
6223 return q.head == 0
6224 }
6225
6226
6227 func (q *gQueue) push(gp *g) {
6228 gp.schedlink = q.head
6229 q.head.set(gp)
6230 if q.tail == 0 {
6231 q.tail.set(gp)
6232 }
6233 }
6234
6235
6236 func (q *gQueue) pushBack(gp *g) {
6237 gp.schedlink = 0
6238 if q.tail != 0 {
6239 q.tail.ptr().schedlink.set(gp)
6240 } else {
6241 q.head.set(gp)
6242 }
6243 q.tail.set(gp)
6244 }
6245
6246
6247
6248 func (q *gQueue) pushBackAll(q2 gQueue) {
6249 if q2.tail == 0 {
6250 return
6251 }
6252 q2.tail.ptr().schedlink = 0
6253 if q.tail != 0 {
6254 q.tail.ptr().schedlink = q2.head
6255 } else {
6256 q.head = q2.head
6257 }
6258 q.tail = q2.tail
6259 }
6260
6261
6262
6263 func (q *gQueue) pop() *g {
6264 gp := q.head.ptr()
6265 if gp != nil {
6266 q.head = gp.schedlink
6267 if q.head == 0 {
6268 q.tail = 0
6269 }
6270 }
6271 return gp
6272 }
6273
6274
6275 func (q *gQueue) popList() gList {
6276 stack := gList{q.head}
6277 *q = gQueue{}
6278 return stack
6279 }
6280
6281
6282
6283 type gList struct {
6284 head guintptr
6285 }
6286
6287
6288 func (l *gList) empty() bool {
6289 return l.head == 0
6290 }
6291
6292
6293 func (l *gList) push(gp *g) {
6294 gp.schedlink = l.head
6295 l.head.set(gp)
6296 }
6297
6298
6299 func (l *gList) pushAll(q gQueue) {
6300 if !q.empty() {
6301 q.tail.ptr().schedlink = l.head
6302 l.head = q.head
6303 }
6304 }
6305
6306
6307 func (l *gList) pop() *g {
6308 gp := l.head.ptr()
6309 if gp != nil {
6310 l.head = gp.schedlink
6311 }
6312 return gp
6313 }
6314
6315
6316 func setMaxThreads(in int) (out int) {
6317 lock(&sched.lock)
6318 out = int(sched.maxmcount)
6319 if in > 0x7fffffff {
6320 sched.maxmcount = 0x7fffffff
6321 } else {
6322 sched.maxmcount = int32(in)
6323 }
6324 checkmcount()
6325 unlock(&sched.lock)
6326 return
6327 }
6328
6329
6330 func procPin() int {
6331 gp := getg()
6332 mp := gp.m
6333
6334 mp.locks++
6335 return int(mp.p.ptr().id)
6336 }
6337
6338
6339 func procUnpin() {
6340 gp := getg()
6341 gp.m.locks--
6342 }
6343
6344
6345
6346 func sync_runtime_procPin() int {
6347 return procPin()
6348 }
6349
6350
6351
6352 func sync_runtime_procUnpin() {
6353 procUnpin()
6354 }
6355
6356
6357
6358 func sync_atomic_runtime_procPin() int {
6359 return procPin()
6360 }
6361
6362
6363
6364 func sync_atomic_runtime_procUnpin() {
6365 procUnpin()
6366 }
6367
6368
6369
6370
6371
6372 func sync_runtime_canSpin(i int) bool {
6373
6374
6375
6376
6377
6378 if i >= active_spin || ncpu <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
6379 return false
6380 }
6381 if p := getg().m.p.ptr(); !runqempty(p) {
6382 return false
6383 }
6384 return true
6385 }
6386
6387
6388
6389 func sync_runtime_doSpin() {
6390 procyield(active_spin_cnt)
6391 }
6392
6393 var stealOrder randomOrder
6394
6395
6396
6397
6398
6399 type randomOrder struct {
6400 count uint32
6401 coprimes []uint32
6402 }
6403
6404 type randomEnum struct {
6405 i uint32
6406 count uint32
6407 pos uint32
6408 inc uint32
6409 }
6410
6411 func (ord *randomOrder) reset(count uint32) {
6412 ord.count = count
6413 ord.coprimes = ord.coprimes[:0]
6414 for i := uint32(1); i <= count; i++ {
6415 if gcd(i, count) == 1 {
6416 ord.coprimes = append(ord.coprimes, i)
6417 }
6418 }
6419 }
6420
6421 func (ord *randomOrder) start(i uint32) randomEnum {
6422 return randomEnum{
6423 count: ord.count,
6424 pos: i % ord.count,
6425 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
6426 }
6427 }
6428
6429 func (enum *randomEnum) done() bool {
6430 return enum.i == enum.count
6431 }
6432
6433 func (enum *randomEnum) next() {
6434 enum.i++
6435 enum.pos = (enum.pos + enum.inc) % enum.count
6436 }
6437
6438 func (enum *randomEnum) position() uint32 {
6439 return enum.pos
6440 }
6441
6442 func gcd(a, b uint32) uint32 {
6443 for b != 0 {
6444 a, b = b, a%b
6445 }
6446 return a
6447 }
6448
6449
6450
6451 type initTask struct {
6452
6453 state uintptr
6454 ndeps uintptr
6455 nfns uintptr
6456
6457
6458 }
6459
6460
6461
6462 var inittrace tracestat
6463
6464 type tracestat struct {
6465 active bool
6466 id uint64
6467 allocs uint64
6468 bytes uint64
6469 }
6470
6471 func doInit(t *initTask) {
6472 switch t.state {
6473 case 2:
6474 return
6475 case 1:
6476 throw("recursive call during initialization - linker skew")
6477 default:
6478 t.state = 1
6479
6480 for i := uintptr(0); i < t.ndeps; i++ {
6481 p := add(unsafe.Pointer(t), (3+i)*goarch.PtrSize)
6482 t2 := *(**initTask)(p)
6483 doInit(t2)
6484 }
6485
6486 if t.nfns == 0 {
6487 t.state = 2
6488 return
6489 }
6490
6491 var (
6492 start int64
6493 before tracestat
6494 )
6495
6496 if inittrace.active {
6497 start = nanotime()
6498
6499 before = inittrace
6500 }
6501
6502 firstFunc := add(unsafe.Pointer(t), (3+t.ndeps)*goarch.PtrSize)
6503 for i := uintptr(0); i < t.nfns; i++ {
6504 p := add(firstFunc, i*goarch.PtrSize)
6505 f := *(*func())(unsafe.Pointer(&p))
6506 f()
6507 }
6508
6509 if inittrace.active {
6510 end := nanotime()
6511
6512 after := inittrace
6513
6514 f := *(*func())(unsafe.Pointer(&firstFunc))
6515 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
6516
6517 var sbuf [24]byte
6518 print("init ", pkg, " @")
6519 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
6520 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
6521 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
6522 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
6523 print("\n")
6524 }
6525
6526 t.state = 2
6527 }
6528 }
6529
View as plain text