Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "runtime/internal/atomic"
12 "runtime/internal/sys"
13 "unsafe"
14 )
15
16
17 var modinfo string
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113 var (
114 m0 m
115 g0 g
116 mcache0 *mcache
117 raceprocctx0 uintptr
118 raceFiniLock mutex
119 )
120
121
122
123 var runtime_inittasks []*initTask
124
125
126
127
128
129 var main_init_done chan bool
130
131
132 func main_main()
133
134
135 var mainStarted bool
136
137
138 var runtimeInitTime int64
139
140
141 var initSigmask sigset
142
143
144 func main() {
145 mp := getg().m
146
147
148
149 mp.g0.racectx = 0
150
151
152
153
154 if goarch.PtrSize == 8 {
155 maxstacksize = 1000000000
156 } else {
157 maxstacksize = 250000000
158 }
159
160
161
162
163 maxstackceiling = 2 * maxstacksize
164
165
166 mainStarted = true
167
168 if GOARCH != "wasm" {
169 systemstack(func() {
170 newm(sysmon, nil, -1)
171 })
172 }
173
174
175
176
177
178
179
180 lockOSThread()
181
182 if mp != &m0 {
183 throw("runtime.main not on m0")
184 }
185
186
187
188 runtimeInitTime = nanotime()
189 if runtimeInitTime == 0 {
190 throw("nanotime returning zero")
191 }
192
193 if debug.inittrace != 0 {
194 inittrace.id = getg().goid
195 inittrace.active = true
196 }
197
198 doInit(runtime_inittasks)
199
200
201 needUnlock := true
202 defer func() {
203 if needUnlock {
204 unlockOSThread()
205 }
206 }()
207
208 gcenable()
209
210 main_init_done = make(chan bool)
211 if iscgo {
212 if _cgo_pthread_key_created == nil {
213 throw("_cgo_pthread_key_created missing")
214 }
215
216 if _cgo_thread_start == nil {
217 throw("_cgo_thread_start missing")
218 }
219 if GOOS != "windows" {
220 if _cgo_setenv == nil {
221 throw("_cgo_setenv missing")
222 }
223 if _cgo_unsetenv == nil {
224 throw("_cgo_unsetenv missing")
225 }
226 }
227 if _cgo_notify_runtime_init_done == nil {
228 throw("_cgo_notify_runtime_init_done missing")
229 }
230
231
232 if set_crosscall2 == nil {
233 throw("set_crosscall2 missing")
234 }
235 set_crosscall2()
236
237
238
239 startTemplateThread()
240 cgocall(_cgo_notify_runtime_init_done, nil)
241 }
242
243
244
245
246
247
248 for _, m := range activeModules() {
249 doInit(m.inittasks)
250 }
251
252
253
254 inittrace.active = false
255
256 close(main_init_done)
257
258 needUnlock = false
259 unlockOSThread()
260
261 if isarchive || islibrary {
262
263
264 return
265 }
266 fn := main_main
267 fn()
268 if raceenabled {
269 runExitHooks(0)
270 racefini()
271 }
272
273
274
275
276
277 if runningPanicDefers.Load() != 0 {
278
279 for c := 0; c < 1000; c++ {
280 if runningPanicDefers.Load() == 0 {
281 break
282 }
283 Gosched()
284 }
285 }
286 if panicking.Load() != 0 {
287 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
288 }
289 runExitHooks(0)
290
291 exit(0)
292 for {
293 var x *int32
294 *x = 0
295 }
296 }
297
298
299
300
301 func os_beforeExit(exitCode int) {
302 runExitHooks(exitCode)
303 if exitCode == 0 && raceenabled {
304 racefini()
305 }
306 }
307
308
309 func init() {
310 go forcegchelper()
311 }
312
313 func forcegchelper() {
314 forcegc.g = getg()
315 lockInit(&forcegc.lock, lockRankForcegc)
316 for {
317 lock(&forcegc.lock)
318 if forcegc.idle.Load() {
319 throw("forcegc: phase error")
320 }
321 forcegc.idle.Store(true)
322 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
323
324 if debug.gctrace > 0 {
325 println("GC forced")
326 }
327
328 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
329 }
330 }
331
332
333
334
335
336 func Gosched() {
337 checkTimeouts()
338 mcall(gosched_m)
339 }
340
341
342
343
344
345 func goschedguarded() {
346 mcall(goschedguarded_m)
347 }
348
349
350
351
352
353
354 func goschedIfBusy() {
355 gp := getg()
356
357
358 if !gp.preempt && sched.npidle.Load() > 0 {
359 return
360 }
361 mcall(gosched_m)
362 }
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
382 if reason != waitReasonSleep {
383 checkTimeouts()
384 }
385 mp := acquirem()
386 gp := mp.curg
387 status := readgstatus(gp)
388 if status != _Grunning && status != _Gscanrunning {
389 throw("gopark: bad g status")
390 }
391 mp.waitlock = lock
392 mp.waitunlockf = unlockf
393 gp.waitreason = reason
394 mp.waitTraceBlockReason = traceReason
395 mp.waitTraceSkip = traceskip
396 releasem(mp)
397
398 mcall(park_m)
399 }
400
401
402
403 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
404 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
405 }
406
407 func goready(gp *g, traceskip int) {
408 systemstack(func() {
409 ready(gp, traceskip, true)
410 })
411 }
412
413
414 func acquireSudog() *sudog {
415
416
417
418
419
420
421
422
423 mp := acquirem()
424 pp := mp.p.ptr()
425 if len(pp.sudogcache) == 0 {
426 lock(&sched.sudoglock)
427
428 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
429 s := sched.sudogcache
430 sched.sudogcache = s.next
431 s.next = nil
432 pp.sudogcache = append(pp.sudogcache, s)
433 }
434 unlock(&sched.sudoglock)
435
436 if len(pp.sudogcache) == 0 {
437 pp.sudogcache = append(pp.sudogcache, new(sudog))
438 }
439 }
440 n := len(pp.sudogcache)
441 s := pp.sudogcache[n-1]
442 pp.sudogcache[n-1] = nil
443 pp.sudogcache = pp.sudogcache[:n-1]
444 if s.elem != nil {
445 throw("acquireSudog: found s.elem != nil in cache")
446 }
447 releasem(mp)
448 return s
449 }
450
451
452 func releaseSudog(s *sudog) {
453 if s.elem != nil {
454 throw("runtime: sudog with non-nil elem")
455 }
456 if s.isSelect {
457 throw("runtime: sudog with non-false isSelect")
458 }
459 if s.next != nil {
460 throw("runtime: sudog with non-nil next")
461 }
462 if s.prev != nil {
463 throw("runtime: sudog with non-nil prev")
464 }
465 if s.waitlink != nil {
466 throw("runtime: sudog with non-nil waitlink")
467 }
468 if s.c != nil {
469 throw("runtime: sudog with non-nil c")
470 }
471 gp := getg()
472 if gp.param != nil {
473 throw("runtime: releaseSudog with non-nil gp.param")
474 }
475 mp := acquirem()
476 pp := mp.p.ptr()
477 if len(pp.sudogcache) == cap(pp.sudogcache) {
478
479 var first, last *sudog
480 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
481 n := len(pp.sudogcache)
482 p := pp.sudogcache[n-1]
483 pp.sudogcache[n-1] = nil
484 pp.sudogcache = pp.sudogcache[:n-1]
485 if first == nil {
486 first = p
487 } else {
488 last.next = p
489 }
490 last = p
491 }
492 lock(&sched.sudoglock)
493 last.next = sched.sudogcache
494 sched.sudogcache = first
495 unlock(&sched.sudoglock)
496 }
497 pp.sudogcache = append(pp.sudogcache, s)
498 releasem(mp)
499 }
500
501
502 func badmcall(fn func(*g)) {
503 throw("runtime: mcall called on m->g0 stack")
504 }
505
506 func badmcall2(fn func(*g)) {
507 throw("runtime: mcall function returned")
508 }
509
510 func badreflectcall() {
511 panic(plainError("arg size to reflect.call more than 1GB"))
512 }
513
514
515
516 func badmorestackg0() {
517 writeErrStr("fatal: morestack on g0\n")
518 }
519
520
521
522 func badmorestackgsignal() {
523 writeErrStr("fatal: morestack on gsignal\n")
524 }
525
526
527 func badctxt() {
528 throw("ctxt != 0")
529 }
530
531 func lockedOSThread() bool {
532 gp := getg()
533 return gp.lockedm != 0 && gp.m.lockedg != 0
534 }
535
536 var (
537
538
539
540
541
542
543 allglock mutex
544 allgs []*g
545
546
547
548
549
550
551
552
553
554
555
556
557
558 allglen uintptr
559 allgptr **g
560 )
561
562 func allgadd(gp *g) {
563 if readgstatus(gp) == _Gidle {
564 throw("allgadd: bad status Gidle")
565 }
566
567 lock(&allglock)
568 allgs = append(allgs, gp)
569 if &allgs[0] != allgptr {
570 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
571 }
572 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
573 unlock(&allglock)
574 }
575
576
577
578
579 func allGsSnapshot() []*g {
580 assertWorldStoppedOrLockHeld(&allglock)
581
582
583
584
585
586
587 return allgs[:len(allgs):len(allgs)]
588 }
589
590
591 func atomicAllG() (**g, uintptr) {
592 length := atomic.Loaduintptr(&allglen)
593 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
594 return ptr, length
595 }
596
597
598 func atomicAllGIndex(ptr **g, i uintptr) *g {
599 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
600 }
601
602
603
604
605 func forEachG(fn func(gp *g)) {
606 lock(&allglock)
607 for _, gp := range allgs {
608 fn(gp)
609 }
610 unlock(&allglock)
611 }
612
613
614
615
616
617 func forEachGRace(fn func(gp *g)) {
618 ptr, length := atomicAllG()
619 for i := uintptr(0); i < length; i++ {
620 gp := atomicAllGIndex(ptr, i)
621 fn(gp)
622 }
623 return
624 }
625
626 const (
627
628
629 _GoidCacheBatch = 16
630 )
631
632
633
634 func cpuinit(env string) {
635 switch GOOS {
636 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
637 cpu.DebugOptions = true
638 }
639 cpu.Initialize(env)
640
641
642
643 switch GOARCH {
644 case "386", "amd64":
645 x86HasPOPCNT = cpu.X86.HasPOPCNT
646 x86HasSSE41 = cpu.X86.HasSSE41
647 x86HasFMA = cpu.X86.HasFMA
648
649 case "arm":
650 armHasVFPv4 = cpu.ARM.HasVFPv4
651
652 case "arm64":
653 arm64HasATOMICS = cpu.ARM64.HasATOMICS
654 }
655 }
656
657
658
659
660 func getGodebugEarly() string {
661 const prefix = "GODEBUG="
662 var env string
663 switch GOOS {
664 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
665
666
667
668 n := int32(0)
669 for argv_index(argv, argc+1+n) != nil {
670 n++
671 }
672
673 for i := int32(0); i < n; i++ {
674 p := argv_index(argv, argc+1+i)
675 s := unsafe.String(p, findnull(p))
676
677 if hasPrefix(s, prefix) {
678 env = gostring(p)[len(prefix):]
679 break
680 }
681 }
682 }
683 return env
684 }
685
686
687
688
689
690
691
692
693
694 func schedinit() {
695 lockInit(&sched.lock, lockRankSched)
696 lockInit(&sched.sysmonlock, lockRankSysmon)
697 lockInit(&sched.deferlock, lockRankDefer)
698 lockInit(&sched.sudoglock, lockRankSudog)
699 lockInit(&deadlock, lockRankDeadlock)
700 lockInit(&paniclk, lockRankPanic)
701 lockInit(&allglock, lockRankAllg)
702 lockInit(&allpLock, lockRankAllp)
703 lockInit(&reflectOffs.lock, lockRankReflectOffs)
704 lockInit(&finlock, lockRankFin)
705 lockInit(&cpuprof.lock, lockRankCpuprof)
706 traceLockInit()
707
708
709
710 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
711
712
713
714 gp := getg()
715 if raceenabled {
716 gp.racectx, raceprocctx0 = raceinit()
717 }
718
719 sched.maxmcount = 10000
720
721
722 worldStopped()
723
724 moduledataverify()
725 stackinit()
726 mallocinit()
727 godebug := getGodebugEarly()
728 initPageTrace(godebug)
729 cpuinit(godebug)
730 alginit()
731 fastrandinit()
732 mcommoninit(gp.m, -1)
733 modulesinit()
734 typelinksinit()
735 itabsinit()
736 stkobjinit()
737
738 sigsave(&gp.m.sigmask)
739 initSigmask = gp.m.sigmask
740
741 goargs()
742 goenvs()
743 secure()
744 parsedebugvars()
745 gcinit()
746
747
748
749
750
751 if disableMemoryProfiling {
752 MemProfileRate = 0
753 }
754
755 lock(&sched.lock)
756 sched.lastpoll.Store(nanotime())
757 procs := ncpu
758 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
759 procs = n
760 }
761 if procresize(procs) != nil {
762 throw("unknown runnable goroutine during bootstrap")
763 }
764 unlock(&sched.lock)
765
766
767 worldStarted()
768
769 if buildVersion == "" {
770
771
772 buildVersion = "unknown"
773 }
774 if len(modinfo) == 1 {
775
776
777 modinfo = ""
778 }
779 }
780
781 func dumpgstatus(gp *g) {
782 thisg := getg()
783 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
784 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
785 }
786
787
788 func checkmcount() {
789 assertLockHeld(&sched.lock)
790
791
792
793
794
795
796
797
798
799 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
800 if count > sched.maxmcount {
801 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
802 throw("thread exhaustion")
803 }
804 }
805
806
807
808
809
810 func mReserveID() int64 {
811 assertLockHeld(&sched.lock)
812
813 if sched.mnext+1 < sched.mnext {
814 throw("runtime: thread ID overflow")
815 }
816 id := sched.mnext
817 sched.mnext++
818 checkmcount()
819 return id
820 }
821
822
823 func mcommoninit(mp *m, id int64) {
824 gp := getg()
825
826
827 if gp != gp.m.g0 {
828 callers(1, mp.createstack[:])
829 }
830
831 lock(&sched.lock)
832
833 if id >= 0 {
834 mp.id = id
835 } else {
836 mp.id = mReserveID()
837 }
838
839 lo := uint32(int64Hash(uint64(mp.id), fastrandseed))
840 hi := uint32(int64Hash(uint64(cputicks()), ^fastrandseed))
841 if lo|hi == 0 {
842 hi = 1
843 }
844
845
846 if goarch.BigEndian {
847 mp.fastrand = uint64(lo)<<32 | uint64(hi)
848 } else {
849 mp.fastrand = uint64(hi)<<32 | uint64(lo)
850 }
851
852 mpreinit(mp)
853 if mp.gsignal != nil {
854 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
855 }
856
857
858
859 mp.alllink = allm
860
861
862
863 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
864 unlock(&sched.lock)
865
866
867 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
868 mp.cgoCallers = new(cgoCallers)
869 }
870 }
871
872 func (mp *m) becomeSpinning() {
873 mp.spinning = true
874 sched.nmspinning.Add(1)
875 sched.needspinning.Store(0)
876 }
877
878 func (mp *m) hasCgoOnStack() bool {
879 return mp.ncgo > 0 || mp.isextra
880 }
881
882 var fastrandseed uintptr
883
884 func fastrandinit() {
885 s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:]
886 getRandomData(s)
887 }
888
889
890 func ready(gp *g, traceskip int, next bool) {
891 if traceEnabled() {
892 traceGoUnpark(gp, traceskip)
893 }
894
895 status := readgstatus(gp)
896
897
898 mp := acquirem()
899 if status&^_Gscan != _Gwaiting {
900 dumpgstatus(gp)
901 throw("bad g->status in ready")
902 }
903
904
905 casgstatus(gp, _Gwaiting, _Grunnable)
906 runqput(mp.p.ptr(), gp, next)
907 wakep()
908 releasem(mp)
909 }
910
911
912
913 const freezeStopWait = 0x7fffffff
914
915
916
917 var freezing atomic.Bool
918
919
920
921
922 func freezetheworld() {
923 freezing.Store(true)
924 if debug.dontfreezetheworld > 0 {
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949 usleep(1000)
950 return
951 }
952
953
954
955
956 for i := 0; i < 5; i++ {
957
958 sched.stopwait = freezeStopWait
959 sched.gcwaiting.Store(true)
960
961 if !preemptall() {
962 break
963 }
964 usleep(1000)
965 }
966
967 usleep(1000)
968 preemptall()
969 usleep(1000)
970 }
971
972
973
974
975
976 func readgstatus(gp *g) uint32 {
977 return gp.atomicstatus.Load()
978 }
979
980
981
982
983
984 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
985 success := false
986
987
988 switch oldval {
989 default:
990 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
991 dumpgstatus(gp)
992 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
993 case _Gscanrunnable,
994 _Gscanwaiting,
995 _Gscanrunning,
996 _Gscansyscall,
997 _Gscanpreempted:
998 if newval == oldval&^_Gscan {
999 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1000 }
1001 }
1002 if !success {
1003 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1004 dumpgstatus(gp)
1005 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1006 }
1007 releaseLockRank(lockRankGscan)
1008 }
1009
1010
1011
1012 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1013 switch oldval {
1014 case _Grunnable,
1015 _Grunning,
1016 _Gwaiting,
1017 _Gsyscall:
1018 if newval == oldval|_Gscan {
1019 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1020 if r {
1021 acquireLockRank(lockRankGscan)
1022 }
1023 return r
1024
1025 }
1026 }
1027 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1028 throw("castogscanstatus")
1029 panic("not reached")
1030 }
1031
1032
1033
1034 var casgstatusAlwaysTrack = false
1035
1036
1037
1038
1039
1040
1041
1042 func casgstatus(gp *g, oldval, newval uint32) {
1043 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1044 systemstack(func() {
1045 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1046 throw("casgstatus: bad incoming values")
1047 })
1048 }
1049
1050 acquireLockRank(lockRankGscan)
1051 releaseLockRank(lockRankGscan)
1052
1053
1054 const yieldDelay = 5 * 1000
1055 var nextYield int64
1056
1057
1058
1059 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1060 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1061 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1062 }
1063 if i == 0 {
1064 nextYield = nanotime() + yieldDelay
1065 }
1066 if nanotime() < nextYield {
1067 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1068 procyield(1)
1069 }
1070 } else {
1071 osyield()
1072 nextYield = nanotime() + yieldDelay/2
1073 }
1074 }
1075
1076 if oldval == _Grunning {
1077
1078 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1079 gp.tracking = true
1080 }
1081 gp.trackingSeq++
1082 }
1083 if !gp.tracking {
1084 return
1085 }
1086
1087
1088
1089
1090
1091
1092 switch oldval {
1093 case _Grunnable:
1094
1095
1096
1097 now := nanotime()
1098 gp.runnableTime += now - gp.trackingStamp
1099 gp.trackingStamp = 0
1100 case _Gwaiting:
1101 if !gp.waitreason.isMutexWait() {
1102
1103 break
1104 }
1105
1106
1107
1108
1109
1110 now := nanotime()
1111 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1112 gp.trackingStamp = 0
1113 }
1114 switch newval {
1115 case _Gwaiting:
1116 if !gp.waitreason.isMutexWait() {
1117
1118 break
1119 }
1120
1121 now := nanotime()
1122 gp.trackingStamp = now
1123 case _Grunnable:
1124
1125
1126 now := nanotime()
1127 gp.trackingStamp = now
1128 case _Grunning:
1129
1130
1131
1132 gp.tracking = false
1133 sched.timeToRun.record(gp.runnableTime)
1134 gp.runnableTime = 0
1135 }
1136 }
1137
1138
1139
1140
1141 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1142
1143 gp.waitreason = reason
1144 casgstatus(gp, old, _Gwaiting)
1145 }
1146
1147
1148
1149
1150
1151
1152
1153
1154 func casgcopystack(gp *g) uint32 {
1155 for {
1156 oldstatus := readgstatus(gp) &^ _Gscan
1157 if oldstatus != _Gwaiting && oldstatus != _Grunnable {
1158 throw("copystack: bad status, not Gwaiting or Grunnable")
1159 }
1160 if gp.atomicstatus.CompareAndSwap(oldstatus, _Gcopystack) {
1161 return oldstatus
1162 }
1163 }
1164 }
1165
1166
1167
1168
1169
1170 func casGToPreemptScan(gp *g, old, new uint32) {
1171 if old != _Grunning || new != _Gscan|_Gpreempted {
1172 throw("bad g transition")
1173 }
1174 acquireLockRank(lockRankGscan)
1175 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1176 }
1177 }
1178
1179
1180
1181
1182 func casGFromPreempted(gp *g, old, new uint32) bool {
1183 if old != _Gpreempted || new != _Gwaiting {
1184 throw("bad g transition")
1185 }
1186 gp.waitreason = waitReasonPreempted
1187 return gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting)
1188 }
1189
1190
1191 type stwReason uint8
1192
1193
1194
1195
1196 const (
1197 stwUnknown stwReason = iota
1198 stwGCMarkTerm
1199 stwGCSweepTerm
1200 stwWriteHeapDump
1201 stwGoroutineProfile
1202 stwGoroutineProfileCleanup
1203 stwAllGoroutinesStack
1204 stwReadMemStats
1205 stwAllThreadsSyscall
1206 stwGOMAXPROCS
1207 stwStartTrace
1208 stwStopTrace
1209 stwForTestCountPagesInUse
1210 stwForTestReadMetricsSlow
1211 stwForTestReadMemStatsSlow
1212 stwForTestPageCachePagesLeaked
1213 stwForTestResetDebugLog
1214 )
1215
1216 func (r stwReason) String() string {
1217 return stwReasonStrings[r]
1218 }
1219
1220
1221
1222
1223 var stwReasonStrings = [...]string{
1224 stwUnknown: "unknown",
1225 stwGCMarkTerm: "GC mark termination",
1226 stwGCSweepTerm: "GC sweep termination",
1227 stwWriteHeapDump: "write heap dump",
1228 stwGoroutineProfile: "goroutine profile",
1229 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1230 stwAllGoroutinesStack: "all goroutines stack trace",
1231 stwReadMemStats: "read mem stats",
1232 stwAllThreadsSyscall: "AllThreadsSyscall",
1233 stwGOMAXPROCS: "GOMAXPROCS",
1234 stwStartTrace: "start trace",
1235 stwStopTrace: "stop trace",
1236 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1237 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1238 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1239 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1240 stwForTestResetDebugLog: "ResetDebugLog (test)",
1241 }
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257 func stopTheWorld(reason stwReason) {
1258 semacquire(&worldsema)
1259 gp := getg()
1260 gp.m.preemptoff = reason.String()
1261 systemstack(func() {
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273 casGToWaiting(gp, _Grunning, waitReasonStoppingTheWorld)
1274 stopTheWorldWithSema(reason)
1275 casgstatus(gp, _Gwaiting, _Grunning)
1276 })
1277 }
1278
1279
1280 func startTheWorld() {
1281 systemstack(func() { startTheWorldWithSema() })
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298 mp := acquirem()
1299 mp.preemptoff = ""
1300 semrelease1(&worldsema, true, 0)
1301 releasem(mp)
1302 }
1303
1304
1305
1306
1307 func stopTheWorldGC(reason stwReason) {
1308 semacquire(&gcsema)
1309 stopTheWorld(reason)
1310 }
1311
1312
1313 func startTheWorldGC() {
1314 startTheWorld()
1315 semrelease(&gcsema)
1316 }
1317
1318
1319 var worldsema uint32 = 1
1320
1321
1322
1323
1324
1325
1326
1327 var gcsema uint32 = 1
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351 func stopTheWorldWithSema(reason stwReason) {
1352 if traceEnabled() {
1353 traceSTWStart(reason)
1354 }
1355 gp := getg()
1356
1357
1358
1359 if gp.m.locks > 0 {
1360 throw("stopTheWorld: holding locks")
1361 }
1362
1363 lock(&sched.lock)
1364 sched.stopwait = gomaxprocs
1365 sched.gcwaiting.Store(true)
1366 preemptall()
1367
1368 gp.m.p.ptr().status = _Pgcstop
1369 sched.stopwait--
1370
1371 for _, pp := range allp {
1372 s := pp.status
1373 if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
1374 if traceEnabled() {
1375 traceGoSysBlock(pp)
1376 traceProcStop(pp)
1377 }
1378 pp.syscalltick++
1379 sched.stopwait--
1380 }
1381 }
1382
1383 now := nanotime()
1384 for {
1385 pp, _ := pidleget(now)
1386 if pp == nil {
1387 break
1388 }
1389 pp.status = _Pgcstop
1390 sched.stopwait--
1391 }
1392 wait := sched.stopwait > 0
1393 unlock(&sched.lock)
1394
1395
1396 if wait {
1397 for {
1398
1399 if notetsleep(&sched.stopnote, 100*1000) {
1400 noteclear(&sched.stopnote)
1401 break
1402 }
1403 preemptall()
1404 }
1405 }
1406
1407
1408 bad := ""
1409 if sched.stopwait != 0 {
1410 bad = "stopTheWorld: not stopped (stopwait != 0)"
1411 } else {
1412 for _, pp := range allp {
1413 if pp.status != _Pgcstop {
1414 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1415 }
1416 }
1417 }
1418 if freezing.Load() {
1419
1420
1421
1422
1423 lock(&deadlock)
1424 lock(&deadlock)
1425 }
1426 if bad != "" {
1427 throw(bad)
1428 }
1429
1430 worldStopped()
1431 }
1432
1433 func startTheWorldWithSema() int64 {
1434 assertWorldStopped()
1435
1436 mp := acquirem()
1437 if netpollinited() {
1438 list := netpoll(0)
1439 injectglist(&list)
1440 }
1441 lock(&sched.lock)
1442
1443 procs := gomaxprocs
1444 if newprocs != 0 {
1445 procs = newprocs
1446 newprocs = 0
1447 }
1448 p1 := procresize(procs)
1449 sched.gcwaiting.Store(false)
1450 if sched.sysmonwait.Load() {
1451 sched.sysmonwait.Store(false)
1452 notewakeup(&sched.sysmonnote)
1453 }
1454 unlock(&sched.lock)
1455
1456 worldStarted()
1457
1458 for p1 != nil {
1459 p := p1
1460 p1 = p1.link.ptr()
1461 if p.m != 0 {
1462 mp := p.m.ptr()
1463 p.m = 0
1464 if mp.nextp != 0 {
1465 throw("startTheWorld: inconsistent mp->nextp")
1466 }
1467 mp.nextp.set(p)
1468 notewakeup(&mp.park)
1469 } else {
1470
1471 newm(nil, p, -1)
1472 }
1473 }
1474
1475
1476 startTime := nanotime()
1477 if traceEnabled() {
1478 traceSTWDone()
1479 }
1480
1481
1482
1483
1484 wakep()
1485
1486 releasem(mp)
1487
1488 return startTime
1489 }
1490
1491
1492
1493 func usesLibcall() bool {
1494 switch GOOS {
1495 case "aix", "darwin", "illumos", "ios", "solaris", "windows":
1496 return true
1497 case "openbsd":
1498 return GOARCH == "386" || GOARCH == "amd64" || GOARCH == "arm" || GOARCH == "arm64"
1499 }
1500 return false
1501 }
1502
1503
1504
1505 func mStackIsSystemAllocated() bool {
1506 switch GOOS {
1507 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1508 return true
1509 case "openbsd":
1510 switch GOARCH {
1511 case "386", "amd64", "arm", "arm64":
1512 return true
1513 }
1514 }
1515 return false
1516 }
1517
1518
1519
1520 func mstart()
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531 func mstart0() {
1532 gp := getg()
1533
1534 osStack := gp.stack.lo == 0
1535 if osStack {
1536
1537
1538
1539
1540
1541
1542
1543
1544 size := gp.stack.hi
1545 if size == 0 {
1546 size = 16384 * sys.StackGuardMultiplier
1547 }
1548 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1549 gp.stack.lo = gp.stack.hi - size + 1024
1550 }
1551
1552
1553 gp.stackguard0 = gp.stack.lo + stackGuard
1554
1555
1556 gp.stackguard1 = gp.stackguard0
1557 mstart1()
1558
1559
1560 if mStackIsSystemAllocated() {
1561
1562
1563
1564 osStack = true
1565 }
1566 mexit(osStack)
1567 }
1568
1569
1570
1571
1572
1573 func mstart1() {
1574 gp := getg()
1575
1576 if gp != gp.m.g0 {
1577 throw("bad runtime·mstart")
1578 }
1579
1580
1581
1582
1583
1584
1585
1586 gp.sched.g = guintptr(unsafe.Pointer(gp))
1587 gp.sched.pc = getcallerpc()
1588 gp.sched.sp = getcallersp()
1589
1590 asminit()
1591 minit()
1592
1593
1594
1595 if gp.m == &m0 {
1596 mstartm0()
1597 }
1598
1599 if fn := gp.m.mstartfn; fn != nil {
1600 fn()
1601 }
1602
1603 if gp.m != &m0 {
1604 acquirep(gp.m.nextp.ptr())
1605 gp.m.nextp = 0
1606 }
1607 schedule()
1608 }
1609
1610
1611
1612
1613
1614
1615
1616 func mstartm0() {
1617
1618
1619
1620 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1621 cgoHasExtraM = true
1622 newextram()
1623 }
1624 initsig(false)
1625 }
1626
1627
1628
1629
1630 func mPark() {
1631 gp := getg()
1632 notesleep(&gp.m.park)
1633 noteclear(&gp.m.park)
1634 }
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646 func mexit(osStack bool) {
1647 mp := getg().m
1648
1649 if mp == &m0 {
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661 handoffp(releasep())
1662 lock(&sched.lock)
1663 sched.nmfreed++
1664 checkdead()
1665 unlock(&sched.lock)
1666 mPark()
1667 throw("locked m0 woke up")
1668 }
1669
1670 sigblock(true)
1671 unminit()
1672
1673
1674 if mp.gsignal != nil {
1675 stackfree(mp.gsignal.stack)
1676
1677
1678
1679
1680 mp.gsignal = nil
1681 }
1682
1683
1684 lock(&sched.lock)
1685 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1686 if *pprev == mp {
1687 *pprev = mp.alllink
1688 goto found
1689 }
1690 }
1691 throw("m not found in allm")
1692 found:
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702 mp.freeWait.Store(freeMWait)
1703 mp.freelink = sched.freem
1704 sched.freem = mp
1705 unlock(&sched.lock)
1706
1707 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
1708
1709
1710 handoffp(releasep())
1711
1712
1713
1714
1715
1716 lock(&sched.lock)
1717 sched.nmfreed++
1718 checkdead()
1719 unlock(&sched.lock)
1720
1721 if GOOS == "darwin" || GOOS == "ios" {
1722
1723
1724 if mp.signalPending.Load() != 0 {
1725 pendingPreemptSignals.Add(-1)
1726 }
1727 }
1728
1729
1730
1731 mdestroy(mp)
1732
1733 if osStack {
1734
1735 mp.freeWait.Store(freeMRef)
1736
1737
1738
1739 return
1740 }
1741
1742
1743
1744
1745
1746 exitThread(&mp.freeWait)
1747 }
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760 func forEachP(fn func(*p)) {
1761 mp := acquirem()
1762 pp := getg().m.p.ptr()
1763
1764 lock(&sched.lock)
1765 if sched.safePointWait != 0 {
1766 throw("forEachP: sched.safePointWait != 0")
1767 }
1768 sched.safePointWait = gomaxprocs - 1
1769 sched.safePointFn = fn
1770
1771
1772 for _, p2 := range allp {
1773 if p2 != pp {
1774 atomic.Store(&p2.runSafePointFn, 1)
1775 }
1776 }
1777 preemptall()
1778
1779
1780
1781
1782
1783
1784
1785 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
1786 if atomic.Cas(&p.runSafePointFn, 1, 0) {
1787 fn(p)
1788 sched.safePointWait--
1789 }
1790 }
1791
1792 wait := sched.safePointWait > 0
1793 unlock(&sched.lock)
1794
1795
1796 fn(pp)
1797
1798
1799
1800 for _, p2 := range allp {
1801 s := p2.status
1802 if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
1803 if traceEnabled() {
1804 traceGoSysBlock(p2)
1805 traceProcStop(p2)
1806 }
1807 p2.syscalltick++
1808 handoffp(p2)
1809 }
1810 }
1811
1812
1813 if wait {
1814 for {
1815
1816
1817
1818
1819 if notetsleep(&sched.safePointNote, 100*1000) {
1820 noteclear(&sched.safePointNote)
1821 break
1822 }
1823 preemptall()
1824 }
1825 }
1826 if sched.safePointWait != 0 {
1827 throw("forEachP: not done")
1828 }
1829 for _, p2 := range allp {
1830 if p2.runSafePointFn != 0 {
1831 throw("forEachP: P did not run fn")
1832 }
1833 }
1834
1835 lock(&sched.lock)
1836 sched.safePointFn = nil
1837 unlock(&sched.lock)
1838 releasem(mp)
1839 }
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852 func runSafePointFn() {
1853 p := getg().m.p.ptr()
1854
1855
1856
1857 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
1858 return
1859 }
1860 sched.safePointFn(p)
1861 lock(&sched.lock)
1862 sched.safePointWait--
1863 if sched.safePointWait == 0 {
1864 notewakeup(&sched.safePointNote)
1865 }
1866 unlock(&sched.lock)
1867 }
1868
1869
1870
1871
1872 var cgoThreadStart unsafe.Pointer
1873
1874 type cgothreadstart struct {
1875 g guintptr
1876 tls *uint64
1877 fn unsafe.Pointer
1878 }
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889 func allocm(pp *p, fn func(), id int64) *m {
1890 allocmLock.rlock()
1891
1892
1893
1894
1895 acquirem()
1896
1897 gp := getg()
1898 if gp.m.p == 0 {
1899 acquirep(pp)
1900 }
1901
1902
1903
1904 if sched.freem != nil {
1905 lock(&sched.lock)
1906 var newList *m
1907 for freem := sched.freem; freem != nil; {
1908 wait := freem.freeWait.Load()
1909 if wait == freeMWait {
1910 next := freem.freelink
1911 freem.freelink = newList
1912 newList = freem
1913 freem = next
1914 continue
1915 }
1916
1917
1918
1919 if wait == freeMStack {
1920
1921
1922
1923 systemstack(func() {
1924 stackfree(freem.g0.stack)
1925 })
1926 }
1927 freem = freem.freelink
1928 }
1929 sched.freem = newList
1930 unlock(&sched.lock)
1931 }
1932
1933 mp := new(m)
1934 mp.mstartfn = fn
1935 mcommoninit(mp, id)
1936
1937
1938
1939 if iscgo || mStackIsSystemAllocated() {
1940 mp.g0 = malg(-1)
1941 } else {
1942 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
1943 }
1944 mp.g0.m = mp
1945
1946 if pp == gp.m.p.ptr() {
1947 releasep()
1948 }
1949
1950 releasem(gp.m)
1951 allocmLock.runlock()
1952 return mp
1953 }
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994 func needm(signal bool) {
1995 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1996
1997
1998
1999
2000
2001
2002 writeErrStr("fatal error: cgo callback before cgo call\n")
2003 exit(1)
2004 }
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014 var sigmask sigset
2015 sigsave(&sigmask)
2016 sigblock(false)
2017
2018
2019
2020
2021 mp, last := getExtraM()
2022
2023
2024
2025
2026
2027
2028
2029
2030 mp.needextram = last
2031
2032
2033 mp.sigmask = sigmask
2034
2035
2036
2037 osSetupTLS(mp)
2038
2039
2040
2041
2042
2043
2044
2045 setg(mp.g0)
2046 gp := getg()
2047 gp.stack.hi = getcallersp() + 1024
2048 gp.stack.lo = getcallersp() - 32*1024
2049 if !signal && _cgo_getstackbound != nil {
2050
2051
2052
2053
2054
2055 var bounds [2]uintptr
2056 asmcgocall(_cgo_getstackbound, unsafe.Pointer(&bounds))
2057
2058 if bounds[0] != 0 {
2059 gp.stack.lo = bounds[0]
2060 gp.stack.hi = bounds[1]
2061 }
2062 }
2063 gp.stackguard0 = gp.stack.lo + stackGuard
2064
2065
2066
2067
2068 mp.isExtraInC = false
2069
2070
2071 asminit()
2072 minit()
2073
2074
2075 casgstatus(mp.curg, _Gdead, _Gsyscall)
2076 sched.ngsys.Add(-1)
2077 }
2078
2079
2080
2081
2082 func needAndBindM() {
2083 needm(false)
2084
2085 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2086 cgoBindM()
2087 }
2088 }
2089
2090
2091
2092
2093 func newextram() {
2094 c := extraMWaiters.Swap(0)
2095 if c > 0 {
2096 for i := uint32(0); i < c; i++ {
2097 oneNewExtraM()
2098 }
2099 } else if extraMLength.Load() == 0 {
2100
2101 oneNewExtraM()
2102 }
2103 }
2104
2105
2106 func oneNewExtraM() {
2107
2108
2109
2110
2111
2112 mp := allocm(nil, nil, -1)
2113 gp := malg(4096)
2114 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2115 gp.sched.sp = gp.stack.hi
2116 gp.sched.sp -= 4 * goarch.PtrSize
2117 gp.sched.lr = 0
2118 gp.sched.g = guintptr(unsafe.Pointer(gp))
2119 gp.syscallpc = gp.sched.pc
2120 gp.syscallsp = gp.sched.sp
2121 gp.stktopsp = gp.sched.sp
2122
2123
2124
2125
2126 casgstatus(gp, _Gidle, _Gdead)
2127 gp.m = mp
2128 mp.curg = gp
2129 mp.isextra = true
2130
2131 mp.isExtraInC = true
2132 mp.lockedInt++
2133 mp.lockedg.set(gp)
2134 gp.lockedm.set(mp)
2135 gp.goid = sched.goidgen.Add(1)
2136 if raceenabled {
2137 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2138 }
2139 if traceEnabled() {
2140 traceOneNewExtraM(gp)
2141 }
2142
2143 allgadd(gp)
2144
2145
2146
2147
2148
2149 sched.ngsys.Add(1)
2150
2151
2152 addExtraM(mp)
2153 }
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183 func dropm() {
2184
2185
2186
2187 mp := getg().m
2188
2189
2190 casgstatus(mp.curg, _Gsyscall, _Gdead)
2191 mp.curg.preemptStop = false
2192 sched.ngsys.Add(1)
2193
2194
2195
2196
2197
2198 sigmask := mp.sigmask
2199 sigblock(false)
2200 unminit()
2201
2202 setg(nil)
2203
2204 putExtraM(mp)
2205
2206 msigrestore(sigmask)
2207 }
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229 func cgoBindM() {
2230 if GOOS == "windows" || GOOS == "plan9" {
2231 fatal("bindm in unexpected GOOS")
2232 }
2233 g := getg()
2234 if g.m.g0 != g {
2235 fatal("the current g is not g0")
2236 }
2237 if _cgo_bindm != nil {
2238 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2239 }
2240 }
2241
2242
2243 func getm() uintptr {
2244 return uintptr(unsafe.Pointer(getg().m))
2245 }
2246
2247 var (
2248
2249
2250
2251
2252
2253
2254 extraM atomic.Uintptr
2255
2256 extraMLength atomic.Uint32
2257
2258 extraMWaiters atomic.Uint32
2259
2260
2261 extraMInUse atomic.Uint32
2262 )
2263
2264
2265
2266
2267
2268
2269
2270
2271 func lockextra(nilokay bool) *m {
2272 const locked = 1
2273
2274 incr := false
2275 for {
2276 old := extraM.Load()
2277 if old == locked {
2278 osyield_no_g()
2279 continue
2280 }
2281 if old == 0 && !nilokay {
2282 if !incr {
2283
2284
2285
2286 extraMWaiters.Add(1)
2287 incr = true
2288 }
2289 usleep_no_g(1)
2290 continue
2291 }
2292 if extraM.CompareAndSwap(old, locked) {
2293 return (*m)(unsafe.Pointer(old))
2294 }
2295 osyield_no_g()
2296 continue
2297 }
2298 }
2299
2300
2301 func unlockextra(mp *m, delta int32) {
2302 extraMLength.Add(delta)
2303 extraM.Store(uintptr(unsafe.Pointer(mp)))
2304 }
2305
2306
2307
2308
2309
2310
2311
2312
2313 func getExtraM() (mp *m, last bool) {
2314 mp = lockextra(false)
2315 extraMInUse.Add(1)
2316 unlockextra(mp.schedlink.ptr(), -1)
2317 return mp, mp.schedlink.ptr() == nil
2318 }
2319
2320
2321
2322
2323
2324 func putExtraM(mp *m) {
2325 extraMInUse.Add(-1)
2326 addExtraM(mp)
2327 }
2328
2329
2330
2331
2332 func addExtraM(mp *m) {
2333 mnext := lockextra(true)
2334 mp.schedlink.set(mnext)
2335 unlockextra(mp, 1)
2336 }
2337
2338 var (
2339
2340
2341
2342 allocmLock rwmutex
2343
2344
2345
2346
2347 execLock rwmutex
2348 )
2349
2350
2351
2352 const (
2353 failthreadcreate = "runtime: failed to create new OS thread\n"
2354 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2355 )
2356
2357
2358
2359
2360 var newmHandoff struct {
2361 lock mutex
2362
2363
2364
2365 newm muintptr
2366
2367
2368
2369 waiting bool
2370 wake note
2371
2372
2373
2374
2375 haveTemplateThread uint32
2376 }
2377
2378
2379
2380
2381
2382
2383
2384
2385 func newm(fn func(), pp *p, id int64) {
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396 acquirem()
2397
2398 mp := allocm(pp, fn, id)
2399 mp.nextp.set(pp)
2400 mp.sigmask = initSigmask
2401 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413 lock(&newmHandoff.lock)
2414 if newmHandoff.haveTemplateThread == 0 {
2415 throw("on a locked thread with no template thread")
2416 }
2417 mp.schedlink = newmHandoff.newm
2418 newmHandoff.newm.set(mp)
2419 if newmHandoff.waiting {
2420 newmHandoff.waiting = false
2421 notewakeup(&newmHandoff.wake)
2422 }
2423 unlock(&newmHandoff.lock)
2424
2425
2426
2427 releasem(getg().m)
2428 return
2429 }
2430 newm1(mp)
2431 releasem(getg().m)
2432 }
2433
2434 func newm1(mp *m) {
2435 if iscgo {
2436 var ts cgothreadstart
2437 if _cgo_thread_start == nil {
2438 throw("_cgo_thread_start missing")
2439 }
2440 ts.g.set(mp.g0)
2441 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2442 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2443 if msanenabled {
2444 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2445 }
2446 if asanenabled {
2447 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2448 }
2449 execLock.rlock()
2450 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2451 execLock.runlock()
2452 return
2453 }
2454 execLock.rlock()
2455 newosproc(mp)
2456 execLock.runlock()
2457 }
2458
2459
2460
2461
2462
2463 func startTemplateThread() {
2464 if GOARCH == "wasm" {
2465 return
2466 }
2467
2468
2469
2470 mp := acquirem()
2471 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2472 releasem(mp)
2473 return
2474 }
2475 newm(templateThread, nil, -1)
2476 releasem(mp)
2477 }
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491 func templateThread() {
2492 lock(&sched.lock)
2493 sched.nmsys++
2494 checkdead()
2495 unlock(&sched.lock)
2496
2497 for {
2498 lock(&newmHandoff.lock)
2499 for newmHandoff.newm != 0 {
2500 newm := newmHandoff.newm.ptr()
2501 newmHandoff.newm = 0
2502 unlock(&newmHandoff.lock)
2503 for newm != nil {
2504 next := newm.schedlink.ptr()
2505 newm.schedlink = 0
2506 newm1(newm)
2507 newm = next
2508 }
2509 lock(&newmHandoff.lock)
2510 }
2511 newmHandoff.waiting = true
2512 noteclear(&newmHandoff.wake)
2513 unlock(&newmHandoff.lock)
2514 notesleep(&newmHandoff.wake)
2515 }
2516 }
2517
2518
2519
2520 func stopm() {
2521 gp := getg()
2522
2523 if gp.m.locks != 0 {
2524 throw("stopm holding locks")
2525 }
2526 if gp.m.p != 0 {
2527 throw("stopm holding p")
2528 }
2529 if gp.m.spinning {
2530 throw("stopm spinning")
2531 }
2532
2533 lock(&sched.lock)
2534 mput(gp.m)
2535 unlock(&sched.lock)
2536 mPark()
2537 acquirep(gp.m.nextp.ptr())
2538 gp.m.nextp = 0
2539 }
2540
2541 func mspinning() {
2542
2543 getg().m.spinning = true
2544 }
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563 func startm(pp *p, spinning, lockheld bool) {
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580 mp := acquirem()
2581 if !lockheld {
2582 lock(&sched.lock)
2583 }
2584 if pp == nil {
2585 if spinning {
2586
2587
2588
2589 throw("startm: P required for spinning=true")
2590 }
2591 pp, _ = pidleget(0)
2592 if pp == nil {
2593 if !lockheld {
2594 unlock(&sched.lock)
2595 }
2596 releasem(mp)
2597 return
2598 }
2599 }
2600 nmp := mget()
2601 if nmp == nil {
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616 id := mReserveID()
2617 unlock(&sched.lock)
2618
2619 var fn func()
2620 if spinning {
2621
2622 fn = mspinning
2623 }
2624 newm(fn, pp, id)
2625
2626 if lockheld {
2627 lock(&sched.lock)
2628 }
2629
2630
2631 releasem(mp)
2632 return
2633 }
2634 if !lockheld {
2635 unlock(&sched.lock)
2636 }
2637 if nmp.spinning {
2638 throw("startm: m is spinning")
2639 }
2640 if nmp.nextp != 0 {
2641 throw("startm: m has p")
2642 }
2643 if spinning && !runqempty(pp) {
2644 throw("startm: p has runnable gs")
2645 }
2646
2647 nmp.spinning = spinning
2648 nmp.nextp.set(pp)
2649 notewakeup(&nmp.park)
2650
2651
2652 releasem(mp)
2653 }
2654
2655
2656
2657
2658
2659 func handoffp(pp *p) {
2660
2661
2662
2663
2664 if !runqempty(pp) || sched.runqsize != 0 {
2665 startm(pp, false, false)
2666 return
2667 }
2668
2669 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
2670 startm(pp, false, false)
2671 return
2672 }
2673
2674 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) {
2675 startm(pp, false, false)
2676 return
2677 }
2678
2679
2680 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
2681 sched.needspinning.Store(0)
2682 startm(pp, true, false)
2683 return
2684 }
2685 lock(&sched.lock)
2686 if sched.gcwaiting.Load() {
2687 pp.status = _Pgcstop
2688 sched.stopwait--
2689 if sched.stopwait == 0 {
2690 notewakeup(&sched.stopnote)
2691 }
2692 unlock(&sched.lock)
2693 return
2694 }
2695 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
2696 sched.safePointFn(pp)
2697 sched.safePointWait--
2698 if sched.safePointWait == 0 {
2699 notewakeup(&sched.safePointNote)
2700 }
2701 }
2702 if sched.runqsize != 0 {
2703 unlock(&sched.lock)
2704 startm(pp, false, false)
2705 return
2706 }
2707
2708
2709 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
2710 unlock(&sched.lock)
2711 startm(pp, false, false)
2712 return
2713 }
2714
2715
2716
2717 when := nobarrierWakeTime(pp)
2718 pidleput(pp, 0)
2719 unlock(&sched.lock)
2720
2721 if when != 0 {
2722 wakeNetPoller(when)
2723 }
2724 }
2725
2726
2727
2728
2729 func wakep() {
2730
2731
2732 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
2733 return
2734 }
2735
2736
2737
2738
2739
2740
2741 mp := acquirem()
2742
2743 var pp *p
2744 lock(&sched.lock)
2745 pp, _ = pidlegetSpinning(0)
2746 if pp == nil {
2747 if sched.nmspinning.Add(-1) < 0 {
2748 throw("wakep: negative nmspinning")
2749 }
2750 unlock(&sched.lock)
2751 releasem(mp)
2752 return
2753 }
2754
2755
2756
2757
2758 unlock(&sched.lock)
2759
2760 startm(pp, true, false)
2761
2762 releasem(mp)
2763 }
2764
2765
2766
2767 func stoplockedm() {
2768 gp := getg()
2769
2770 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
2771 throw("stoplockedm: inconsistent locking")
2772 }
2773 if gp.m.p != 0 {
2774
2775 pp := releasep()
2776 handoffp(pp)
2777 }
2778 incidlelocked(1)
2779
2780 mPark()
2781 status := readgstatus(gp.m.lockedg.ptr())
2782 if status&^_Gscan != _Grunnable {
2783 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
2784 dumpgstatus(gp.m.lockedg.ptr())
2785 throw("stoplockedm: not runnable")
2786 }
2787 acquirep(gp.m.nextp.ptr())
2788 gp.m.nextp = 0
2789 }
2790
2791
2792
2793
2794
2795 func startlockedm(gp *g) {
2796 mp := gp.lockedm.ptr()
2797 if mp == getg().m {
2798 throw("startlockedm: locked to me")
2799 }
2800 if mp.nextp != 0 {
2801 throw("startlockedm: m has p")
2802 }
2803
2804 incidlelocked(-1)
2805 pp := releasep()
2806 mp.nextp.set(pp)
2807 notewakeup(&mp.park)
2808 stopm()
2809 }
2810
2811
2812
2813 func gcstopm() {
2814 gp := getg()
2815
2816 if !sched.gcwaiting.Load() {
2817 throw("gcstopm: not waiting for gc")
2818 }
2819 if gp.m.spinning {
2820 gp.m.spinning = false
2821
2822
2823 if sched.nmspinning.Add(-1) < 0 {
2824 throw("gcstopm: negative nmspinning")
2825 }
2826 }
2827 pp := releasep()
2828 lock(&sched.lock)
2829 pp.status = _Pgcstop
2830 sched.stopwait--
2831 if sched.stopwait == 0 {
2832 notewakeup(&sched.stopnote)
2833 }
2834 unlock(&sched.lock)
2835 stopm()
2836 }
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847 func execute(gp *g, inheritTime bool) {
2848 mp := getg().m
2849
2850 if goroutineProfile.active {
2851
2852
2853
2854 tryRecordGoroutineProfile(gp, osyield)
2855 }
2856
2857
2858
2859 mp.curg = gp
2860 gp.m = mp
2861 casgstatus(gp, _Grunnable, _Grunning)
2862 gp.waitsince = 0
2863 gp.preempt = false
2864 gp.stackguard0 = gp.stack.lo + stackGuard
2865 if !inheritTime {
2866 mp.p.ptr().schedtick++
2867 }
2868
2869
2870 hz := sched.profilehz
2871 if mp.profilehz != hz {
2872 setThreadCPUProfiler(hz)
2873 }
2874
2875 if traceEnabled() {
2876
2877
2878 if gp.syscallsp != 0 {
2879 traceGoSysExit()
2880 }
2881 traceGoStart()
2882 }
2883
2884 gogo(&gp.sched)
2885 }
2886
2887
2888
2889
2890
2891 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
2892 mp := getg().m
2893
2894
2895
2896
2897
2898 top:
2899 pp := mp.p.ptr()
2900 if sched.gcwaiting.Load() {
2901 gcstopm()
2902 goto top
2903 }
2904 if pp.runSafePointFn != 0 {
2905 runSafePointFn()
2906 }
2907
2908
2909
2910
2911
2912 now, pollUntil, _ := checkTimers(pp, 0)
2913
2914
2915 if traceEnabled() || traceShuttingDown() {
2916 gp := traceReader()
2917 if gp != nil {
2918 casgstatus(gp, _Gwaiting, _Grunnable)
2919 traceGoUnpark(gp, 0)
2920 return gp, false, true
2921 }
2922 }
2923
2924
2925 if gcBlackenEnabled != 0 {
2926 gp, tnow := gcController.findRunnableGCWorker(pp, now)
2927 if gp != nil {
2928 return gp, false, true
2929 }
2930 now = tnow
2931 }
2932
2933
2934
2935
2936 if pp.schedtick%61 == 0 && sched.runqsize > 0 {
2937 lock(&sched.lock)
2938 gp := globrunqget(pp, 1)
2939 unlock(&sched.lock)
2940 if gp != nil {
2941 return gp, false, false
2942 }
2943 }
2944
2945
2946 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
2947 if gp := wakefing(); gp != nil {
2948 ready(gp, 0, true)
2949 }
2950 }
2951 if *cgo_yield != nil {
2952 asmcgocall(*cgo_yield, nil)
2953 }
2954
2955
2956 if gp, inheritTime := runqget(pp); gp != nil {
2957 return gp, inheritTime, false
2958 }
2959
2960
2961 if sched.runqsize != 0 {
2962 lock(&sched.lock)
2963 gp := globrunqget(pp, 0)
2964 unlock(&sched.lock)
2965 if gp != nil {
2966 return gp, false, false
2967 }
2968 }
2969
2970
2971
2972
2973
2974
2975
2976
2977 if netpollinited() && netpollWaiters.Load() > 0 && sched.lastpoll.Load() != 0 {
2978 if list := netpoll(0); !list.empty() {
2979 gp := list.pop()
2980 injectglist(&list)
2981 casgstatus(gp, _Gwaiting, _Grunnable)
2982 if traceEnabled() {
2983 traceGoUnpark(gp, 0)
2984 }
2985 return gp, false, false
2986 }
2987 }
2988
2989
2990
2991
2992
2993
2994 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
2995 if !mp.spinning {
2996 mp.becomeSpinning()
2997 }
2998
2999 gp, inheritTime, tnow, w, newWork := stealWork(now)
3000 if gp != nil {
3001
3002 return gp, inheritTime, false
3003 }
3004 if newWork {
3005
3006
3007 goto top
3008 }
3009
3010 now = tnow
3011 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3012
3013 pollUntil = w
3014 }
3015 }
3016
3017
3018
3019
3020
3021 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() {
3022 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3023 if node != nil {
3024 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3025 gp := node.gp.ptr()
3026 casgstatus(gp, _Gwaiting, _Grunnable)
3027 if traceEnabled() {
3028 traceGoUnpark(gp, 0)
3029 }
3030 return gp, false, false
3031 }
3032 gcController.removeIdleMarkWorker()
3033 }
3034
3035
3036
3037
3038
3039 gp, otherReady := beforeIdle(now, pollUntil)
3040 if gp != nil {
3041 casgstatus(gp, _Gwaiting, _Grunnable)
3042 if traceEnabled() {
3043 traceGoUnpark(gp, 0)
3044 }
3045 return gp, false, false
3046 }
3047 if otherReady {
3048 goto top
3049 }
3050
3051
3052
3053
3054
3055 allpSnapshot := allp
3056
3057
3058 idlepMaskSnapshot := idlepMask
3059 timerpMaskSnapshot := timerpMask
3060
3061
3062 lock(&sched.lock)
3063 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3064 unlock(&sched.lock)
3065 goto top
3066 }
3067 if sched.runqsize != 0 {
3068 gp := globrunqget(pp, 0)
3069 unlock(&sched.lock)
3070 return gp, false, false
3071 }
3072 if !mp.spinning && sched.needspinning.Load() == 1 {
3073
3074 mp.becomeSpinning()
3075 unlock(&sched.lock)
3076 goto top
3077 }
3078 if releasep() != pp {
3079 throw("findrunnable: wrong p")
3080 }
3081 now = pidleput(pp, now)
3082 unlock(&sched.lock)
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120 wasSpinning := mp.spinning
3121 if mp.spinning {
3122 mp.spinning = false
3123 if sched.nmspinning.Add(-1) < 0 {
3124 throw("findrunnable: negative nmspinning")
3125 }
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3138 if pp != nil {
3139 acquirep(pp)
3140 mp.becomeSpinning()
3141 goto top
3142 }
3143
3144
3145 pp, gp := checkIdleGCNoP()
3146 if pp != nil {
3147 acquirep(pp)
3148 mp.becomeSpinning()
3149
3150
3151 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3152 casgstatus(gp, _Gwaiting, _Grunnable)
3153 if traceEnabled() {
3154 traceGoUnpark(gp, 0)
3155 }
3156 return gp, false, false
3157 }
3158
3159
3160
3161
3162
3163
3164
3165 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3166 }
3167
3168
3169 if netpollinited() && (netpollWaiters.Load() > 0 || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3170 sched.pollUntil.Store(pollUntil)
3171 if mp.p != 0 {
3172 throw("findrunnable: netpoll with p")
3173 }
3174 if mp.spinning {
3175 throw("findrunnable: netpoll with spinning")
3176 }
3177 delay := int64(-1)
3178 if pollUntil != 0 {
3179 if now == 0 {
3180 now = nanotime()
3181 }
3182 delay = pollUntil - now
3183 if delay < 0 {
3184 delay = 0
3185 }
3186 }
3187 if faketime != 0 {
3188
3189 delay = 0
3190 }
3191 list := netpoll(delay)
3192
3193 now = nanotime()
3194 sched.pollUntil.Store(0)
3195 sched.lastpoll.Store(now)
3196 if faketime != 0 && list.empty() {
3197
3198
3199 stopm()
3200 goto top
3201 }
3202 lock(&sched.lock)
3203 pp, _ := pidleget(now)
3204 unlock(&sched.lock)
3205 if pp == nil {
3206 injectglist(&list)
3207 } else {
3208 acquirep(pp)
3209 if !list.empty() {
3210 gp := list.pop()
3211 injectglist(&list)
3212 casgstatus(gp, _Gwaiting, _Grunnable)
3213 if traceEnabled() {
3214 traceGoUnpark(gp, 0)
3215 }
3216 return gp, false, false
3217 }
3218 if wasSpinning {
3219 mp.becomeSpinning()
3220 }
3221 goto top
3222 }
3223 } else if pollUntil != 0 && netpollinited() {
3224 pollerPollUntil := sched.pollUntil.Load()
3225 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3226 netpollBreak()
3227 }
3228 }
3229 stopm()
3230 goto top
3231 }
3232
3233
3234
3235
3236
3237 func pollWork() bool {
3238 if sched.runqsize != 0 {
3239 return true
3240 }
3241 p := getg().m.p.ptr()
3242 if !runqempty(p) {
3243 return true
3244 }
3245 if netpollinited() && netpollWaiters.Load() > 0 && sched.lastpoll.Load() != 0 {
3246 if list := netpoll(0); !list.empty() {
3247 injectglist(&list)
3248 return true
3249 }
3250 }
3251 return false
3252 }
3253
3254
3255
3256
3257
3258
3259
3260 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3261 pp := getg().m.p.ptr()
3262
3263 ranTimer := false
3264
3265 const stealTries = 4
3266 for i := 0; i < stealTries; i++ {
3267 stealTimersOrRunNextG := i == stealTries-1
3268
3269 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
3270 if sched.gcwaiting.Load() {
3271
3272 return nil, false, now, pollUntil, true
3273 }
3274 p2 := allp[enum.position()]
3275 if pp == p2 {
3276 continue
3277 }
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3293 tnow, w, ran := checkTimers(p2, now)
3294 now = tnow
3295 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3296 pollUntil = w
3297 }
3298 if ran {
3299
3300
3301
3302
3303
3304
3305
3306
3307 if gp, inheritTime := runqget(pp); gp != nil {
3308 return gp, inheritTime, now, pollUntil, ranTimer
3309 }
3310 ranTimer = true
3311 }
3312 }
3313
3314
3315 if !idlepMask.read(enum.position()) {
3316 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3317 return gp, false, now, pollUntil, ranTimer
3318 }
3319 }
3320 }
3321 }
3322
3323
3324
3325
3326 return nil, false, now, pollUntil, ranTimer
3327 }
3328
3329
3330
3331
3332
3333
3334 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3335 for id, p2 := range allpSnapshot {
3336 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3337 lock(&sched.lock)
3338 pp, _ := pidlegetSpinning(0)
3339 if pp == nil {
3340
3341 unlock(&sched.lock)
3342 return nil
3343 }
3344 unlock(&sched.lock)
3345 return pp
3346 }
3347 }
3348
3349
3350 return nil
3351 }
3352
3353
3354
3355
3356 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3357 for id, p2 := range allpSnapshot {
3358 if timerpMaskSnapshot.read(uint32(id)) {
3359 w := nobarrierWakeTime(p2)
3360 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3361 pollUntil = w
3362 }
3363 }
3364 }
3365
3366 return pollUntil
3367 }
3368
3369
3370
3371
3372
3373 func checkIdleGCNoP() (*p, *g) {
3374
3375
3376
3377
3378
3379
3380 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3381 return nil, nil
3382 }
3383 if !gcMarkWorkAvailable(nil) {
3384 return nil, nil
3385 }
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404 lock(&sched.lock)
3405 pp, now := pidlegetSpinning(0)
3406 if pp == nil {
3407 unlock(&sched.lock)
3408 return nil, nil
3409 }
3410
3411
3412 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3413 pidleput(pp, now)
3414 unlock(&sched.lock)
3415 return nil, nil
3416 }
3417
3418 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3419 if node == nil {
3420 pidleput(pp, now)
3421 unlock(&sched.lock)
3422 gcController.removeIdleMarkWorker()
3423 return nil, nil
3424 }
3425
3426 unlock(&sched.lock)
3427
3428 return pp, node.gp.ptr()
3429 }
3430
3431
3432
3433
3434 func wakeNetPoller(when int64) {
3435 if sched.lastpoll.Load() == 0 {
3436
3437
3438
3439
3440 pollerPollUntil := sched.pollUntil.Load()
3441 if pollerPollUntil == 0 || pollerPollUntil > when {
3442 netpollBreak()
3443 }
3444 } else {
3445
3446
3447 if GOOS != "plan9" {
3448 wakep()
3449 }
3450 }
3451 }
3452
3453 func resetspinning() {
3454 gp := getg()
3455 if !gp.m.spinning {
3456 throw("resetspinning: not a spinning m")
3457 }
3458 gp.m.spinning = false
3459 nmspinning := sched.nmspinning.Add(-1)
3460 if nmspinning < 0 {
3461 throw("findrunnable: negative nmspinning")
3462 }
3463
3464
3465
3466 wakep()
3467 }
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477 func injectglist(glist *gList) {
3478 if glist.empty() {
3479 return
3480 }
3481 if traceEnabled() {
3482 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
3483 traceGoUnpark(gp, 0)
3484 }
3485 }
3486
3487
3488
3489 head := glist.head.ptr()
3490 var tail *g
3491 qsize := 0
3492 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
3493 tail = gp
3494 qsize++
3495 casgstatus(gp, _Gwaiting, _Grunnable)
3496 }
3497
3498
3499 var q gQueue
3500 q.head.set(head)
3501 q.tail.set(tail)
3502 *glist = gList{}
3503
3504 startIdle := func(n int) {
3505 for i := 0; i < n; i++ {
3506 mp := acquirem()
3507 lock(&sched.lock)
3508
3509 pp, _ := pidlegetSpinning(0)
3510 if pp == nil {
3511 unlock(&sched.lock)
3512 releasem(mp)
3513 break
3514 }
3515
3516 startm(pp, false, true)
3517 unlock(&sched.lock)
3518 releasem(mp)
3519 }
3520 }
3521
3522 pp := getg().m.p.ptr()
3523 if pp == nil {
3524 lock(&sched.lock)
3525 globrunqputbatch(&q, int32(qsize))
3526 unlock(&sched.lock)
3527 startIdle(qsize)
3528 return
3529 }
3530
3531 npidle := int(sched.npidle.Load())
3532 var globq gQueue
3533 var n int
3534 for n = 0; n < npidle && !q.empty(); n++ {
3535 g := q.pop()
3536 globq.pushBack(g)
3537 }
3538 if n > 0 {
3539 lock(&sched.lock)
3540 globrunqputbatch(&globq, int32(n))
3541 unlock(&sched.lock)
3542 startIdle(n)
3543 qsize -= n
3544 }
3545
3546 if !q.empty() {
3547 runqputbatch(pp, &q, qsize)
3548 }
3549 }
3550
3551
3552
3553 func schedule() {
3554 mp := getg().m
3555
3556 if mp.locks != 0 {
3557 throw("schedule: holding locks")
3558 }
3559
3560 if mp.lockedg != 0 {
3561 stoplockedm()
3562 execute(mp.lockedg.ptr(), false)
3563 }
3564
3565
3566
3567 if mp.incgo {
3568 throw("schedule: in cgo")
3569 }
3570
3571 top:
3572 pp := mp.p.ptr()
3573 pp.preempt = false
3574
3575
3576
3577
3578 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
3579 throw("schedule: spinning with local work")
3580 }
3581
3582 gp, inheritTime, tryWakeP := findRunnable()
3583
3584 if debug.dontfreezetheworld > 0 && freezing.Load() {
3585
3586
3587
3588
3589
3590
3591
3592 lock(&deadlock)
3593 lock(&deadlock)
3594 }
3595
3596
3597
3598
3599 if mp.spinning {
3600 resetspinning()
3601 }
3602
3603 if sched.disable.user && !schedEnabled(gp) {
3604
3605
3606
3607 lock(&sched.lock)
3608 if schedEnabled(gp) {
3609
3610
3611 unlock(&sched.lock)
3612 } else {
3613 sched.disable.runnable.pushBack(gp)
3614 sched.disable.n++
3615 unlock(&sched.lock)
3616 goto top
3617 }
3618 }
3619
3620
3621
3622 if tryWakeP {
3623 wakep()
3624 }
3625 if gp.lockedm != 0 {
3626
3627
3628 startlockedm(gp)
3629 goto top
3630 }
3631
3632 execute(gp, inheritTime)
3633 }
3634
3635
3636
3637
3638
3639
3640
3641
3642 func dropg() {
3643 gp := getg()
3644
3645 setMNoWB(&gp.m.curg.m, nil)
3646 setGNoWB(&gp.m.curg, nil)
3647 }
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659 func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
3660
3661
3662 next := pp.timer0When.Load()
3663 nextAdj := pp.timerModifiedEarliest.Load()
3664 if next == 0 || (nextAdj != 0 && nextAdj < next) {
3665 next = nextAdj
3666 }
3667
3668 if next == 0 {
3669
3670 return now, 0, false
3671 }
3672
3673 if now == 0 {
3674 now = nanotime()
3675 }
3676 if now < next {
3677
3678
3679
3680
3681 if pp != getg().m.p.ptr() || int(pp.deletedTimers.Load()) <= int(pp.numTimers.Load()/4) {
3682 return now, next, false
3683 }
3684 }
3685
3686 lock(&pp.timersLock)
3687
3688 if len(pp.timers) > 0 {
3689 adjusttimers(pp, now)
3690 for len(pp.timers) > 0 {
3691
3692
3693 if tw := runtimer(pp, now); tw != 0 {
3694 if tw > 0 {
3695 pollUntil = tw
3696 }
3697 break
3698 }
3699 ran = true
3700 }
3701 }
3702
3703
3704
3705
3706 if pp == getg().m.p.ptr() && int(pp.deletedTimers.Load()) > len(pp.timers)/4 {
3707 clearDeletedTimers(pp)
3708 }
3709
3710 unlock(&pp.timersLock)
3711
3712 return now, pollUntil, ran
3713 }
3714
3715 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
3716 unlock((*mutex)(lock))
3717 return true
3718 }
3719
3720
3721 func park_m(gp *g) {
3722 mp := getg().m
3723
3724 if traceEnabled() {
3725 traceGoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
3726 }
3727
3728
3729
3730 casgstatus(gp, _Grunning, _Gwaiting)
3731 dropg()
3732
3733 if fn := mp.waitunlockf; fn != nil {
3734 ok := fn(gp, mp.waitlock)
3735 mp.waitunlockf = nil
3736 mp.waitlock = nil
3737 if !ok {
3738 if traceEnabled() {
3739 traceGoUnpark(gp, 2)
3740 }
3741 casgstatus(gp, _Gwaiting, _Grunnable)
3742 execute(gp, true)
3743 }
3744 }
3745 schedule()
3746 }
3747
3748 func goschedImpl(gp *g) {
3749 status := readgstatus(gp)
3750 if status&^_Gscan != _Grunning {
3751 dumpgstatus(gp)
3752 throw("bad g status")
3753 }
3754 casgstatus(gp, _Grunning, _Grunnable)
3755 dropg()
3756 lock(&sched.lock)
3757 globrunqput(gp)
3758 unlock(&sched.lock)
3759
3760 schedule()
3761 }
3762
3763
3764 func gosched_m(gp *g) {
3765 if traceEnabled() {
3766 traceGoSched()
3767 }
3768 goschedImpl(gp)
3769 }
3770
3771
3772 func goschedguarded_m(gp *g) {
3773
3774 if !canPreemptM(gp.m) {
3775 gogo(&gp.sched)
3776 }
3777
3778 if traceEnabled() {
3779 traceGoSched()
3780 }
3781 goschedImpl(gp)
3782 }
3783
3784 func gopreempt_m(gp *g) {
3785 if traceEnabled() {
3786 traceGoPreempt()
3787 }
3788 goschedImpl(gp)
3789 }
3790
3791
3792
3793
3794 func preemptPark(gp *g) {
3795 if traceEnabled() {
3796 traceGoPark(traceBlockPreempted, 0)
3797 }
3798 status := readgstatus(gp)
3799 if status&^_Gscan != _Grunning {
3800 dumpgstatus(gp)
3801 throw("bad g status")
3802 }
3803
3804 if gp.asyncSafePoint {
3805
3806
3807
3808 f := findfunc(gp.sched.pc)
3809 if !f.valid() {
3810 throw("preempt at unknown pc")
3811 }
3812 if f.flag&abi.FuncFlagSPWrite != 0 {
3813 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
3814 throw("preempt SPWRITE")
3815 }
3816 }
3817
3818
3819
3820
3821
3822
3823
3824 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
3825 dropg()
3826 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
3827 schedule()
3828 }
3829
3830
3831
3832
3833 func goyield() {
3834 checkTimeouts()
3835 mcall(goyield_m)
3836 }
3837
3838 func goyield_m(gp *g) {
3839 if traceEnabled() {
3840 traceGoPreempt()
3841 }
3842 pp := gp.m.p.ptr()
3843 casgstatus(gp, _Grunning, _Grunnable)
3844 dropg()
3845 runqput(pp, gp, false)
3846 schedule()
3847 }
3848
3849
3850 func goexit1() {
3851 if raceenabled {
3852 racegoend()
3853 }
3854 if traceEnabled() {
3855 traceGoEnd()
3856 }
3857 mcall(goexit0)
3858 }
3859
3860
3861 func goexit0(gp *g) {
3862 mp := getg().m
3863 pp := mp.p.ptr()
3864
3865 casgstatus(gp, _Grunning, _Gdead)
3866 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
3867 if isSystemGoroutine(gp, false) {
3868 sched.ngsys.Add(-1)
3869 }
3870 gp.m = nil
3871 locked := gp.lockedm != 0
3872 gp.lockedm = 0
3873 mp.lockedg = 0
3874 gp.preemptStop = false
3875 gp.paniconfault = false
3876 gp._defer = nil
3877 gp._panic = nil
3878 gp.writebuf = nil
3879 gp.waitreason = waitReasonZero
3880 gp.param = nil
3881 gp.labels = nil
3882 gp.timer = nil
3883
3884 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
3885
3886
3887
3888 assistWorkPerByte := gcController.assistWorkPerByte.Load()
3889 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
3890 gcController.bgScanCredit.Add(scanCredit)
3891 gp.gcAssistBytes = 0
3892 }
3893
3894 dropg()
3895
3896 if GOARCH == "wasm" {
3897 gfput(pp, gp)
3898 schedule()
3899 }
3900
3901 if mp.lockedInt != 0 {
3902 print("invalid m->lockedInt = ", mp.lockedInt, "\n")
3903 throw("internal lockOSThread error")
3904 }
3905 gfput(pp, gp)
3906 if locked {
3907
3908
3909
3910
3911
3912
3913 if GOOS != "plan9" {
3914 gogo(&mp.g0.sched)
3915 } else {
3916
3917
3918 mp.lockedExt = 0
3919 }
3920 }
3921 schedule()
3922 }
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932 func save(pc, sp uintptr) {
3933 gp := getg()
3934
3935 if gp == gp.m.g0 || gp == gp.m.gsignal {
3936
3937
3938
3939
3940
3941 throw("save on system g not allowed")
3942 }
3943
3944 gp.sched.pc = pc
3945 gp.sched.sp = sp
3946 gp.sched.lr = 0
3947 gp.sched.ret = 0
3948
3949
3950
3951 if gp.sched.ctxt != nil {
3952 badctxt()
3953 }
3954 }
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993 func reentersyscall(pc, sp uintptr) {
3994 gp := getg()
3995
3996
3997
3998 gp.m.locks++
3999
4000
4001
4002
4003
4004 gp.stackguard0 = stackPreempt
4005 gp.throwsplit = true
4006
4007
4008 save(pc, sp)
4009 gp.syscallsp = sp
4010 gp.syscallpc = pc
4011 casgstatus(gp, _Grunning, _Gsyscall)
4012 if staticLockRanking {
4013
4014
4015 save(pc, sp)
4016 }
4017 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4018 systemstack(func() {
4019 print("entersyscall inconsistent ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4020 throw("entersyscall")
4021 })
4022 }
4023
4024 if traceEnabled() {
4025 systemstack(traceGoSysCall)
4026
4027
4028
4029 save(pc, sp)
4030 }
4031
4032 if sched.sysmonwait.Load() {
4033 systemstack(entersyscall_sysmon)
4034 save(pc, sp)
4035 }
4036
4037 if gp.m.p.ptr().runSafePointFn != 0 {
4038
4039 systemstack(runSafePointFn)
4040 save(pc, sp)
4041 }
4042
4043 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4044 pp := gp.m.p.ptr()
4045 pp.m = 0
4046 gp.m.oldp.set(pp)
4047 gp.m.p = 0
4048 atomic.Store(&pp.status, _Psyscall)
4049 if sched.gcwaiting.Load() {
4050 systemstack(entersyscall_gcwait)
4051 save(pc, sp)
4052 }
4053
4054 gp.m.locks--
4055 }
4056
4057
4058
4059
4060
4061
4062
4063 func entersyscall() {
4064 reentersyscall(getcallerpc(), getcallersp())
4065 }
4066
4067 func entersyscall_sysmon() {
4068 lock(&sched.lock)
4069 if sched.sysmonwait.Load() {
4070 sched.sysmonwait.Store(false)
4071 notewakeup(&sched.sysmonnote)
4072 }
4073 unlock(&sched.lock)
4074 }
4075
4076 func entersyscall_gcwait() {
4077 gp := getg()
4078 pp := gp.m.oldp.ptr()
4079
4080 lock(&sched.lock)
4081 if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
4082 if traceEnabled() {
4083 traceGoSysBlock(pp)
4084 traceProcStop(pp)
4085 }
4086 pp.syscalltick++
4087 if sched.stopwait--; sched.stopwait == 0 {
4088 notewakeup(&sched.stopnote)
4089 }
4090 }
4091 unlock(&sched.lock)
4092 }
4093
4094
4095
4096
4097 func entersyscallblock() {
4098 gp := getg()
4099
4100 gp.m.locks++
4101 gp.throwsplit = true
4102 gp.stackguard0 = stackPreempt
4103 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4104 gp.m.p.ptr().syscalltick++
4105
4106
4107 pc := getcallerpc()
4108 sp := getcallersp()
4109 save(pc, sp)
4110 gp.syscallsp = gp.sched.sp
4111 gp.syscallpc = gp.sched.pc
4112 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4113 sp1 := sp
4114 sp2 := gp.sched.sp
4115 sp3 := gp.syscallsp
4116 systemstack(func() {
4117 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4118 throw("entersyscallblock")
4119 })
4120 }
4121 casgstatus(gp, _Grunning, _Gsyscall)
4122 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4123 systemstack(func() {
4124 print("entersyscallblock inconsistent ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4125 throw("entersyscallblock")
4126 })
4127 }
4128
4129 systemstack(entersyscallblock_handoff)
4130
4131
4132 save(getcallerpc(), getcallersp())
4133
4134 gp.m.locks--
4135 }
4136
4137 func entersyscallblock_handoff() {
4138 if traceEnabled() {
4139 traceGoSysCall()
4140 traceGoSysBlock(getg().m.p.ptr())
4141 }
4142 handoffp(releasep())
4143 }
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157 func exitsyscall() {
4158 gp := getg()
4159
4160 gp.m.locks++
4161 if getcallersp() > gp.syscallsp {
4162 throw("exitsyscall: syscall frame is no longer valid")
4163 }
4164
4165 gp.waitsince = 0
4166 oldp := gp.m.oldp.ptr()
4167 gp.m.oldp = 0
4168 if exitsyscallfast(oldp) {
4169
4170
4171 if goroutineProfile.active {
4172
4173
4174
4175 systemstack(func() {
4176 tryRecordGoroutineProfileWB(gp)
4177 })
4178 }
4179 if traceEnabled() {
4180 if oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick {
4181 systemstack(traceGoStart)
4182 }
4183 }
4184
4185 gp.m.p.ptr().syscalltick++
4186
4187 casgstatus(gp, _Gsyscall, _Grunning)
4188
4189
4190
4191 gp.syscallsp = 0
4192 gp.m.locks--
4193 if gp.preempt {
4194
4195 gp.stackguard0 = stackPreempt
4196 } else {
4197
4198 gp.stackguard0 = gp.stack.lo + stackGuard
4199 }
4200 gp.throwsplit = false
4201
4202 if sched.disable.user && !schedEnabled(gp) {
4203
4204 Gosched()
4205 }
4206
4207 return
4208 }
4209
4210 if traceEnabled() {
4211
4212
4213 for oldp != nil && oldp.syscalltick == gp.m.syscalltick {
4214 osyield()
4215 }
4216
4217
4218
4219
4220 gp.trace.sysExitTime = traceClockNow()
4221 }
4222
4223 gp.m.locks--
4224
4225
4226 mcall(exitsyscall0)
4227
4228
4229
4230
4231
4232
4233
4234 gp.syscallsp = 0
4235 gp.m.p.ptr().syscalltick++
4236 gp.throwsplit = false
4237 }
4238
4239
4240 func exitsyscallfast(oldp *p) bool {
4241 gp := getg()
4242
4243
4244 if sched.stopwait == freezeStopWait {
4245 return false
4246 }
4247
4248
4249 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
4250
4251 wirep(oldp)
4252 exitsyscallfast_reacquired()
4253 return true
4254 }
4255
4256
4257 if sched.pidle != 0 {
4258 var ok bool
4259 systemstack(func() {
4260 ok = exitsyscallfast_pidle()
4261 if ok && traceEnabled() {
4262 if oldp != nil {
4263
4264
4265 for oldp.syscalltick == gp.m.syscalltick {
4266 osyield()
4267 }
4268 }
4269 traceGoSysExit()
4270 }
4271 })
4272 if ok {
4273 return true
4274 }
4275 }
4276 return false
4277 }
4278
4279
4280
4281
4282
4283
4284 func exitsyscallfast_reacquired() {
4285 gp := getg()
4286 if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
4287 if traceEnabled() {
4288
4289
4290
4291 systemstack(func() {
4292
4293 traceGoSysBlock(gp.m.p.ptr())
4294
4295 traceGoSysExit()
4296 })
4297 }
4298 gp.m.p.ptr().syscalltick++
4299 }
4300 }
4301
4302 func exitsyscallfast_pidle() bool {
4303 lock(&sched.lock)
4304 pp, _ := pidleget(0)
4305 if pp != nil && sched.sysmonwait.Load() {
4306 sched.sysmonwait.Store(false)
4307 notewakeup(&sched.sysmonnote)
4308 }
4309 unlock(&sched.lock)
4310 if pp != nil {
4311 acquirep(pp)
4312 return true
4313 }
4314 return false
4315 }
4316
4317
4318
4319
4320
4321
4322
4323 func exitsyscall0(gp *g) {
4324 casgstatus(gp, _Gsyscall, _Grunnable)
4325 dropg()
4326 lock(&sched.lock)
4327 var pp *p
4328 if schedEnabled(gp) {
4329 pp, _ = pidleget(0)
4330 }
4331 var locked bool
4332 if pp == nil {
4333 globrunqput(gp)
4334
4335
4336
4337
4338
4339
4340 locked = gp.lockedm != 0
4341 } else if sched.sysmonwait.Load() {
4342 sched.sysmonwait.Store(false)
4343 notewakeup(&sched.sysmonnote)
4344 }
4345 unlock(&sched.lock)
4346 if pp != nil {
4347 acquirep(pp)
4348 execute(gp, false)
4349 }
4350 if locked {
4351
4352
4353
4354
4355 stoplockedm()
4356 execute(gp, false)
4357 }
4358 stopm()
4359 schedule()
4360 }
4361
4362
4363
4364
4365
4366 func syscall_runtime_BeforeFork() {
4367 gp := getg().m.curg
4368
4369
4370
4371
4372 gp.m.locks++
4373 sigsave(&gp.m.sigmask)
4374 sigblock(false)
4375
4376
4377
4378
4379
4380 gp.stackguard0 = stackFork
4381 }
4382
4383
4384
4385
4386
4387 func syscall_runtime_AfterFork() {
4388 gp := getg().m.curg
4389
4390
4391 gp.stackguard0 = gp.stack.lo + stackGuard
4392
4393 msigrestore(gp.m.sigmask)
4394
4395 gp.m.locks--
4396 }
4397
4398
4399
4400 var inForkedChild bool
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413 func syscall_runtime_AfterForkInChild() {
4414
4415
4416
4417
4418 inForkedChild = true
4419
4420 clearSignalHandlers()
4421
4422
4423
4424 msigrestore(getg().m.sigmask)
4425
4426 inForkedChild = false
4427 }
4428
4429
4430
4431
4432 var pendingPreemptSignals atomic.Int32
4433
4434
4435
4436
4437 func syscall_runtime_BeforeExec() {
4438
4439 execLock.lock()
4440
4441
4442
4443 if GOOS == "darwin" || GOOS == "ios" {
4444 for pendingPreemptSignals.Load() > 0 {
4445 osyield()
4446 }
4447 }
4448 }
4449
4450
4451
4452
4453 func syscall_runtime_AfterExec() {
4454 execLock.unlock()
4455 }
4456
4457
4458 func malg(stacksize int32) *g {
4459 newg := new(g)
4460 if stacksize >= 0 {
4461 stacksize = round2(stackSystem + stacksize)
4462 systemstack(func() {
4463 newg.stack = stackalloc(uint32(stacksize))
4464 })
4465 newg.stackguard0 = newg.stack.lo + stackGuard
4466 newg.stackguard1 = ^uintptr(0)
4467
4468
4469 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
4470 }
4471 return newg
4472 }
4473
4474
4475
4476
4477 func newproc(fn *funcval) {
4478 gp := getg()
4479 pc := getcallerpc()
4480 systemstack(func() {
4481 newg := newproc1(fn, gp, pc)
4482
4483 pp := getg().m.p.ptr()
4484 runqput(pp, newg, true)
4485
4486 if mainStarted {
4487 wakep()
4488 }
4489 })
4490 }
4491
4492
4493
4494
4495 func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
4496 if fn == nil {
4497 fatal("go of nil func value")
4498 }
4499
4500 mp := acquirem()
4501 pp := mp.p.ptr()
4502 newg := gfget(pp)
4503 if newg == nil {
4504 newg = malg(stackMin)
4505 casgstatus(newg, _Gidle, _Gdead)
4506 allgadd(newg)
4507 }
4508 if newg.stack.hi == 0 {
4509 throw("newproc1: newg missing stack")
4510 }
4511
4512 if readgstatus(newg) != _Gdead {
4513 throw("newproc1: new g is not Gdead")
4514 }
4515
4516 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
4517 totalSize = alignUp(totalSize, sys.StackAlign)
4518 sp := newg.stack.hi - totalSize
4519 spArg := sp
4520 if usesLR {
4521
4522 *(*uintptr)(unsafe.Pointer(sp)) = 0
4523 prepGoExitFrame(sp)
4524 spArg += sys.MinFrameSize
4525 }
4526
4527 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
4528 newg.sched.sp = sp
4529 newg.stktopsp = sp
4530 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
4531 newg.sched.g = guintptr(unsafe.Pointer(newg))
4532 gostartcallfn(&newg.sched, fn)
4533 newg.parentGoid = callergp.goid
4534 newg.gopc = callerpc
4535 newg.ancestors = saveAncestors(callergp)
4536 newg.startpc = fn.fn
4537 if isSystemGoroutine(newg, false) {
4538 sched.ngsys.Add(1)
4539 } else {
4540
4541 if mp.curg != nil {
4542 newg.labels = mp.curg.labels
4543 }
4544 if goroutineProfile.active {
4545
4546
4547
4548
4549
4550 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
4551 }
4552 }
4553
4554 newg.trackingSeq = uint8(fastrand())
4555 if newg.trackingSeq%gTrackingPeriod == 0 {
4556 newg.tracking = true
4557 }
4558 casgstatus(newg, _Gdead, _Grunnable)
4559 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
4560
4561 if pp.goidcache == pp.goidcacheend {
4562
4563
4564
4565 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
4566 pp.goidcache -= _GoidCacheBatch - 1
4567 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
4568 }
4569 newg.goid = pp.goidcache
4570 pp.goidcache++
4571 if raceenabled {
4572 newg.racectx = racegostart(callerpc)
4573 newg.raceignore = 0
4574 if newg.labels != nil {
4575
4576
4577 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
4578 }
4579 }
4580 if traceEnabled() {
4581 traceGoCreate(newg, newg.startpc)
4582 }
4583 releasem(mp)
4584
4585 return newg
4586 }
4587
4588
4589
4590
4591 func saveAncestors(callergp *g) *[]ancestorInfo {
4592
4593 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
4594 return nil
4595 }
4596 var callerAncestors []ancestorInfo
4597 if callergp.ancestors != nil {
4598 callerAncestors = *callergp.ancestors
4599 }
4600 n := int32(len(callerAncestors)) + 1
4601 if n > debug.tracebackancestors {
4602 n = debug.tracebackancestors
4603 }
4604 ancestors := make([]ancestorInfo, n)
4605 copy(ancestors[1:], callerAncestors)
4606
4607 var pcs [tracebackInnerFrames]uintptr
4608 npcs := gcallers(callergp, 0, pcs[:])
4609 ipcs := make([]uintptr, npcs)
4610 copy(ipcs, pcs[:])
4611 ancestors[0] = ancestorInfo{
4612 pcs: ipcs,
4613 goid: callergp.goid,
4614 gopc: callergp.gopc,
4615 }
4616
4617 ancestorsp := new([]ancestorInfo)
4618 *ancestorsp = ancestors
4619 return ancestorsp
4620 }
4621
4622
4623
4624 func gfput(pp *p, gp *g) {
4625 if readgstatus(gp) != _Gdead {
4626 throw("gfput: bad status (not Gdead)")
4627 }
4628
4629 stksize := gp.stack.hi - gp.stack.lo
4630
4631 if stksize != uintptr(startingStackSize) {
4632
4633 stackfree(gp.stack)
4634 gp.stack.lo = 0
4635 gp.stack.hi = 0
4636 gp.stackguard0 = 0
4637 }
4638
4639 pp.gFree.push(gp)
4640 pp.gFree.n++
4641 if pp.gFree.n >= 64 {
4642 var (
4643 inc int32
4644 stackQ gQueue
4645 noStackQ gQueue
4646 )
4647 for pp.gFree.n >= 32 {
4648 gp := pp.gFree.pop()
4649 pp.gFree.n--
4650 if gp.stack.lo == 0 {
4651 noStackQ.push(gp)
4652 } else {
4653 stackQ.push(gp)
4654 }
4655 inc++
4656 }
4657 lock(&sched.gFree.lock)
4658 sched.gFree.noStack.pushAll(noStackQ)
4659 sched.gFree.stack.pushAll(stackQ)
4660 sched.gFree.n += inc
4661 unlock(&sched.gFree.lock)
4662 }
4663 }
4664
4665
4666
4667 func gfget(pp *p) *g {
4668 retry:
4669 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
4670 lock(&sched.gFree.lock)
4671
4672 for pp.gFree.n < 32 {
4673
4674 gp := sched.gFree.stack.pop()
4675 if gp == nil {
4676 gp = sched.gFree.noStack.pop()
4677 if gp == nil {
4678 break
4679 }
4680 }
4681 sched.gFree.n--
4682 pp.gFree.push(gp)
4683 pp.gFree.n++
4684 }
4685 unlock(&sched.gFree.lock)
4686 goto retry
4687 }
4688 gp := pp.gFree.pop()
4689 if gp == nil {
4690 return nil
4691 }
4692 pp.gFree.n--
4693 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
4694
4695
4696
4697 systemstack(func() {
4698 stackfree(gp.stack)
4699 gp.stack.lo = 0
4700 gp.stack.hi = 0
4701 gp.stackguard0 = 0
4702 })
4703 }
4704 if gp.stack.lo == 0 {
4705
4706 systemstack(func() {
4707 gp.stack = stackalloc(startingStackSize)
4708 })
4709 gp.stackguard0 = gp.stack.lo + stackGuard
4710 } else {
4711 if raceenabled {
4712 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4713 }
4714 if msanenabled {
4715 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4716 }
4717 if asanenabled {
4718 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4719 }
4720 }
4721 return gp
4722 }
4723
4724
4725 func gfpurge(pp *p) {
4726 var (
4727 inc int32
4728 stackQ gQueue
4729 noStackQ gQueue
4730 )
4731 for !pp.gFree.empty() {
4732 gp := pp.gFree.pop()
4733 pp.gFree.n--
4734 if gp.stack.lo == 0 {
4735 noStackQ.push(gp)
4736 } else {
4737 stackQ.push(gp)
4738 }
4739 inc++
4740 }
4741 lock(&sched.gFree.lock)
4742 sched.gFree.noStack.pushAll(noStackQ)
4743 sched.gFree.stack.pushAll(stackQ)
4744 sched.gFree.n += inc
4745 unlock(&sched.gFree.lock)
4746 }
4747
4748
4749 func Breakpoint() {
4750 breakpoint()
4751 }
4752
4753
4754
4755
4756
4757
4758 func dolockOSThread() {
4759 if GOARCH == "wasm" {
4760 return
4761 }
4762 gp := getg()
4763 gp.m.lockedg.set(gp)
4764 gp.lockedm.set(gp.m)
4765 }
4766
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783 func LockOSThread() {
4784 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
4785
4786
4787
4788 startTemplateThread()
4789 }
4790 gp := getg()
4791 gp.m.lockedExt++
4792 if gp.m.lockedExt == 0 {
4793 gp.m.lockedExt--
4794 panic("LockOSThread nesting overflow")
4795 }
4796 dolockOSThread()
4797 }
4798
4799
4800 func lockOSThread() {
4801 getg().m.lockedInt++
4802 dolockOSThread()
4803 }
4804
4805
4806
4807
4808
4809
4810 func dounlockOSThread() {
4811 if GOARCH == "wasm" {
4812 return
4813 }
4814 gp := getg()
4815 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
4816 return
4817 }
4818 gp.m.lockedg = 0
4819 gp.lockedm = 0
4820 }
4821
4822
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836 func UnlockOSThread() {
4837 gp := getg()
4838 if gp.m.lockedExt == 0 {
4839 return
4840 }
4841 gp.m.lockedExt--
4842 dounlockOSThread()
4843 }
4844
4845
4846 func unlockOSThread() {
4847 gp := getg()
4848 if gp.m.lockedInt == 0 {
4849 systemstack(badunlockosthread)
4850 }
4851 gp.m.lockedInt--
4852 dounlockOSThread()
4853 }
4854
4855 func badunlockosthread() {
4856 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
4857 }
4858
4859 func gcount() int32 {
4860 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - sched.ngsys.Load()
4861 for _, pp := range allp {
4862 n -= pp.gFree.n
4863 }
4864
4865
4866
4867 if n < 1 {
4868 n = 1
4869 }
4870 return n
4871 }
4872
4873 func mcount() int32 {
4874 return int32(sched.mnext - sched.nmfreed)
4875 }
4876
4877 var prof struct {
4878 signalLock atomic.Uint32
4879
4880
4881
4882 hz atomic.Int32
4883 }
4884
4885 func _System() { _System() }
4886 func _ExternalCode() { _ExternalCode() }
4887 func _LostExternalCode() { _LostExternalCode() }
4888 func _GC() { _GC() }
4889 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
4890 func _VDSO() { _VDSO() }
4891
4892
4893
4894
4895
4896 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
4897 if prof.hz.Load() == 0 {
4898 return
4899 }
4900
4901
4902
4903
4904 if mp != nil && mp.profilehz == 0 {
4905 return
4906 }
4907
4908
4909
4910
4911
4912
4913
4914 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
4915 if f := findfunc(pc); f.valid() {
4916 if hasPrefix(funcname(f), "runtime/internal/atomic") {
4917 cpuprof.lostAtomic++
4918 return
4919 }
4920 }
4921 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
4922
4923
4924
4925 cpuprof.lostAtomic++
4926 return
4927 }
4928 }
4929
4930
4931
4932
4933
4934
4935
4936 getg().m.mallocing++
4937
4938 var u unwinder
4939 var stk [maxCPUProfStack]uintptr
4940 n := 0
4941 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
4942 cgoOff := 0
4943
4944
4945
4946
4947
4948 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
4949 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
4950 cgoOff++
4951 }
4952 n += copy(stk[:], mp.cgoCallers[:cgoOff])
4953 mp.cgoCallers[0] = 0
4954 }
4955
4956
4957 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
4958 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
4959
4960
4961 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
4962 } else if mp != nil && mp.vdsoSP != 0 {
4963
4964
4965 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
4966 } else {
4967 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
4968 }
4969 n += tracebackPCs(&u, 0, stk[n:])
4970
4971 if n <= 0 {
4972
4973
4974 n = 2
4975 if inVDSOPage(pc) {
4976 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
4977 } else if pc > firstmoduledata.etext {
4978
4979 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
4980 }
4981 stk[0] = pc
4982 if mp.preemptoff != "" {
4983 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
4984 } else {
4985 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
4986 }
4987 }
4988
4989 if prof.hz.Load() != 0 {
4990
4991
4992
4993 var tagPtr *unsafe.Pointer
4994 if gp != nil && gp.m != nil && gp.m.curg != nil {
4995 tagPtr = &gp.m.curg.labels
4996 }
4997 cpuprof.add(tagPtr, stk[:n])
4998
4999 gprof := gp
5000 var pp *p
5001 if gp != nil && gp.m != nil {
5002 if gp.m.curg != nil {
5003 gprof = gp.m.curg
5004 }
5005 pp = gp.m.p.ptr()
5006 }
5007 traceCPUSample(gprof, pp, stk[:n])
5008 }
5009 getg().m.mallocing--
5010 }
5011
5012
5013
5014 func setcpuprofilerate(hz int32) {
5015
5016 if hz < 0 {
5017 hz = 0
5018 }
5019
5020
5021
5022 gp := getg()
5023 gp.m.locks++
5024
5025
5026
5027
5028 setThreadCPUProfiler(0)
5029
5030 for !prof.signalLock.CompareAndSwap(0, 1) {
5031 osyield()
5032 }
5033 if prof.hz.Load() != hz {
5034 setProcessCPUProfiler(hz)
5035 prof.hz.Store(hz)
5036 }
5037 prof.signalLock.Store(0)
5038
5039 lock(&sched.lock)
5040 sched.profilehz = hz
5041 unlock(&sched.lock)
5042
5043 if hz != 0 {
5044 setThreadCPUProfiler(hz)
5045 }
5046
5047 gp.m.locks--
5048 }
5049
5050
5051
5052 func (pp *p) init(id int32) {
5053 pp.id = id
5054 pp.status = _Pgcstop
5055 pp.sudogcache = pp.sudogbuf[:0]
5056 pp.deferpool = pp.deferpoolbuf[:0]
5057 pp.wbBuf.reset()
5058 if pp.mcache == nil {
5059 if id == 0 {
5060 if mcache0 == nil {
5061 throw("missing mcache?")
5062 }
5063
5064
5065 pp.mcache = mcache0
5066 } else {
5067 pp.mcache = allocmcache()
5068 }
5069 }
5070 if raceenabled && pp.raceprocctx == 0 {
5071 if id == 0 {
5072 pp.raceprocctx = raceprocctx0
5073 raceprocctx0 = 0
5074 } else {
5075 pp.raceprocctx = raceproccreate()
5076 }
5077 }
5078 lockInit(&pp.timersLock, lockRankTimers)
5079
5080
5081
5082 timerpMask.set(id)
5083
5084
5085 idlepMask.clear(id)
5086 }
5087
5088
5089
5090
5091
5092 func (pp *p) destroy() {
5093 assertLockHeld(&sched.lock)
5094 assertWorldStopped()
5095
5096
5097 for pp.runqhead != pp.runqtail {
5098
5099 pp.runqtail--
5100 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5101
5102 globrunqputhead(gp)
5103 }
5104 if pp.runnext != 0 {
5105 globrunqputhead(pp.runnext.ptr())
5106 pp.runnext = 0
5107 }
5108 if len(pp.timers) > 0 {
5109 plocal := getg().m.p.ptr()
5110
5111
5112
5113
5114 lock(&plocal.timersLock)
5115 lock(&pp.timersLock)
5116 moveTimers(plocal, pp.timers)
5117 pp.timers = nil
5118 pp.numTimers.Store(0)
5119 pp.deletedTimers.Store(0)
5120 pp.timer0When.Store(0)
5121 unlock(&pp.timersLock)
5122 unlock(&plocal.timersLock)
5123 }
5124
5125 if gcphase != _GCoff {
5126 wbBufFlush1(pp)
5127 pp.gcw.dispose()
5128 }
5129 for i := range pp.sudogbuf {
5130 pp.sudogbuf[i] = nil
5131 }
5132 pp.sudogcache = pp.sudogbuf[:0]
5133 pp.pinnerCache = nil
5134 for j := range pp.deferpoolbuf {
5135 pp.deferpoolbuf[j] = nil
5136 }
5137 pp.deferpool = pp.deferpoolbuf[:0]
5138 systemstack(func() {
5139 for i := 0; i < pp.mspancache.len; i++ {
5140
5141 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5142 }
5143 pp.mspancache.len = 0
5144 lock(&mheap_.lock)
5145 pp.pcache.flush(&mheap_.pages)
5146 unlock(&mheap_.lock)
5147 })
5148 freemcache(pp.mcache)
5149 pp.mcache = nil
5150 gfpurge(pp)
5151 traceProcFree(pp)
5152 if raceenabled {
5153 if pp.timerRaceCtx != 0 {
5154
5155
5156
5157
5158
5159 mp := getg().m
5160 phold := mp.p.ptr()
5161 mp.p.set(pp)
5162
5163 racectxend(pp.timerRaceCtx)
5164 pp.timerRaceCtx = 0
5165
5166 mp.p.set(phold)
5167 }
5168 raceprocdestroy(pp.raceprocctx)
5169 pp.raceprocctx = 0
5170 }
5171 pp.gcAssistTime = 0
5172 pp.status = _Pdead
5173 }
5174
5175
5176
5177
5178
5179
5180
5181
5182
5183 func procresize(nprocs int32) *p {
5184 assertLockHeld(&sched.lock)
5185 assertWorldStopped()
5186
5187 old := gomaxprocs
5188 if old < 0 || nprocs <= 0 {
5189 throw("procresize: invalid arg")
5190 }
5191 if traceEnabled() {
5192 traceGomaxprocs(nprocs)
5193 }
5194
5195
5196 now := nanotime()
5197 if sched.procresizetime != 0 {
5198 sched.totaltime += int64(old) * (now - sched.procresizetime)
5199 }
5200 sched.procresizetime = now
5201
5202 maskWords := (nprocs + 31) / 32
5203
5204
5205 if nprocs > int32(len(allp)) {
5206
5207
5208 lock(&allpLock)
5209 if nprocs <= int32(cap(allp)) {
5210 allp = allp[:nprocs]
5211 } else {
5212 nallp := make([]*p, nprocs)
5213
5214
5215 copy(nallp, allp[:cap(allp)])
5216 allp = nallp
5217 }
5218
5219 if maskWords <= int32(cap(idlepMask)) {
5220 idlepMask = idlepMask[:maskWords]
5221 timerpMask = timerpMask[:maskWords]
5222 } else {
5223 nidlepMask := make([]uint32, maskWords)
5224
5225 copy(nidlepMask, idlepMask)
5226 idlepMask = nidlepMask
5227
5228 ntimerpMask := make([]uint32, maskWords)
5229 copy(ntimerpMask, timerpMask)
5230 timerpMask = ntimerpMask
5231 }
5232 unlock(&allpLock)
5233 }
5234
5235
5236 for i := old; i < nprocs; i++ {
5237 pp := allp[i]
5238 if pp == nil {
5239 pp = new(p)
5240 }
5241 pp.init(i)
5242 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5243 }
5244
5245 gp := getg()
5246 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5247
5248 gp.m.p.ptr().status = _Prunning
5249 gp.m.p.ptr().mcache.prepareForSweep()
5250 } else {
5251
5252
5253
5254
5255
5256 if gp.m.p != 0 {
5257 if traceEnabled() {
5258
5259
5260
5261 traceGoSched()
5262 traceProcStop(gp.m.p.ptr())
5263 }
5264 gp.m.p.ptr().m = 0
5265 }
5266 gp.m.p = 0
5267 pp := allp[0]
5268 pp.m = 0
5269 pp.status = _Pidle
5270 acquirep(pp)
5271 if traceEnabled() {
5272 traceGoStart()
5273 }
5274 }
5275
5276
5277 mcache0 = nil
5278
5279
5280 for i := nprocs; i < old; i++ {
5281 pp := allp[i]
5282 pp.destroy()
5283
5284 }
5285
5286
5287 if int32(len(allp)) != nprocs {
5288 lock(&allpLock)
5289 allp = allp[:nprocs]
5290 idlepMask = idlepMask[:maskWords]
5291 timerpMask = timerpMask[:maskWords]
5292 unlock(&allpLock)
5293 }
5294
5295 var runnablePs *p
5296 for i := nprocs - 1; i >= 0; i-- {
5297 pp := allp[i]
5298 if gp.m.p.ptr() == pp {
5299 continue
5300 }
5301 pp.status = _Pidle
5302 if runqempty(pp) {
5303 pidleput(pp, now)
5304 } else {
5305 pp.m.set(mget())
5306 pp.link.set(runnablePs)
5307 runnablePs = pp
5308 }
5309 }
5310 stealOrder.reset(uint32(nprocs))
5311 var int32p *int32 = &gomaxprocs
5312 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
5313 if old != nprocs {
5314
5315 gcCPULimiter.resetCapacity(now, nprocs)
5316 }
5317 return runnablePs
5318 }
5319
5320
5321
5322
5323
5324
5325
5326 func acquirep(pp *p) {
5327
5328 wirep(pp)
5329
5330
5331
5332
5333
5334 pp.mcache.prepareForSweep()
5335
5336 if traceEnabled() {
5337 traceProcStart()
5338 }
5339 }
5340
5341
5342
5343
5344
5345
5346
5347 func wirep(pp *p) {
5348 gp := getg()
5349
5350 if gp.m.p != 0 {
5351 throw("wirep: already in go")
5352 }
5353 if pp.m != 0 || pp.status != _Pidle {
5354 id := int64(0)
5355 if pp.m != 0 {
5356 id = pp.m.ptr().id
5357 }
5358 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
5359 throw("wirep: invalid p state")
5360 }
5361 gp.m.p.set(pp)
5362 pp.m.set(gp.m)
5363 pp.status = _Prunning
5364 }
5365
5366
5367 func releasep() *p {
5368 gp := getg()
5369
5370 if gp.m.p == 0 {
5371 throw("releasep: invalid arg")
5372 }
5373 pp := gp.m.p.ptr()
5374 if pp.m.ptr() != gp.m || pp.status != _Prunning {
5375 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
5376 throw("releasep: invalid p state")
5377 }
5378 if traceEnabled() {
5379 traceProcStop(gp.m.p.ptr())
5380 }
5381 gp.m.p = 0
5382 pp.m = 0
5383 pp.status = _Pidle
5384 return pp
5385 }
5386
5387 func incidlelocked(v int32) {
5388 lock(&sched.lock)
5389 sched.nmidlelocked += v
5390 if v > 0 {
5391 checkdead()
5392 }
5393 unlock(&sched.lock)
5394 }
5395
5396
5397
5398
5399 func checkdead() {
5400 assertLockHeld(&sched.lock)
5401
5402
5403
5404
5405 if islibrary || isarchive {
5406 return
5407 }
5408
5409
5410
5411
5412
5413 if panicking.Load() > 0 {
5414 return
5415 }
5416
5417
5418
5419
5420
5421 var run0 int32
5422 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
5423 run0 = 1
5424 }
5425
5426 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
5427 if run > run0 {
5428 return
5429 }
5430 if run < 0 {
5431 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
5432 unlock(&sched.lock)
5433 throw("checkdead: inconsistent counts")
5434 }
5435
5436 grunning := 0
5437 forEachG(func(gp *g) {
5438 if isSystemGoroutine(gp, false) {
5439 return
5440 }
5441 s := readgstatus(gp)
5442 switch s &^ _Gscan {
5443 case _Gwaiting,
5444 _Gpreempted:
5445 grunning++
5446 case _Grunnable,
5447 _Grunning,
5448 _Gsyscall:
5449 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
5450 unlock(&sched.lock)
5451 throw("checkdead: runnable g")
5452 }
5453 })
5454 if grunning == 0 {
5455 unlock(&sched.lock)
5456 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
5457 }
5458
5459
5460 if faketime != 0 {
5461 if when := timeSleepUntil(); when < maxWhen {
5462 faketime = when
5463
5464
5465 pp, _ := pidleget(faketime)
5466 if pp == nil {
5467
5468
5469 unlock(&sched.lock)
5470 throw("checkdead: no p for timer")
5471 }
5472 mp := mget()
5473 if mp == nil {
5474
5475
5476 unlock(&sched.lock)
5477 throw("checkdead: no m for timer")
5478 }
5479
5480
5481
5482 sched.nmspinning.Add(1)
5483 mp.spinning = true
5484 mp.nextp.set(pp)
5485 notewakeup(&mp.park)
5486 return
5487 }
5488 }
5489
5490
5491 for _, pp := range allp {
5492 if len(pp.timers) > 0 {
5493 return
5494 }
5495 }
5496
5497 unlock(&sched.lock)
5498 fatal("all goroutines are asleep - deadlock!")
5499 }
5500
5501
5502
5503
5504
5505
5506 var forcegcperiod int64 = 2 * 60 * 1e9
5507
5508
5509
5510 var needSysmonWorkaround bool = false
5511
5512
5513
5514
5515 func sysmon() {
5516 lock(&sched.lock)
5517 sched.nmsys++
5518 checkdead()
5519 unlock(&sched.lock)
5520
5521 lasttrace := int64(0)
5522 idle := 0
5523 delay := uint32(0)
5524
5525 for {
5526 if idle == 0 {
5527 delay = 20
5528 } else if idle > 50 {
5529 delay *= 2
5530 }
5531 if delay > 10*1000 {
5532 delay = 10 * 1000
5533 }
5534 usleep(delay)
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548
5549
5550
5551 now := nanotime()
5552 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
5553 lock(&sched.lock)
5554 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
5555 syscallWake := false
5556 next := timeSleepUntil()
5557 if next > now {
5558 sched.sysmonwait.Store(true)
5559 unlock(&sched.lock)
5560
5561
5562 sleep := forcegcperiod / 2
5563 if next-now < sleep {
5564 sleep = next - now
5565 }
5566 shouldRelax := sleep >= osRelaxMinNS
5567 if shouldRelax {
5568 osRelax(true)
5569 }
5570 syscallWake = notetsleep(&sched.sysmonnote, sleep)
5571 if shouldRelax {
5572 osRelax(false)
5573 }
5574 lock(&sched.lock)
5575 sched.sysmonwait.Store(false)
5576 noteclear(&sched.sysmonnote)
5577 }
5578 if syscallWake {
5579 idle = 0
5580 delay = 20
5581 }
5582 }
5583 unlock(&sched.lock)
5584 }
5585
5586 lock(&sched.sysmonlock)
5587
5588
5589 now = nanotime()
5590
5591
5592 if *cgo_yield != nil {
5593 asmcgocall(*cgo_yield, nil)
5594 }
5595
5596 lastpoll := sched.lastpoll.Load()
5597 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
5598 sched.lastpoll.CompareAndSwap(lastpoll, now)
5599 list := netpoll(0)
5600 if !list.empty() {
5601
5602
5603
5604
5605
5606
5607
5608 incidlelocked(-1)
5609 injectglist(&list)
5610 incidlelocked(1)
5611 }
5612 }
5613 if GOOS == "netbsd" && needSysmonWorkaround {
5614
5615
5616
5617
5618
5619
5620
5621
5622
5623
5624
5625
5626
5627
5628
5629 if next := timeSleepUntil(); next < now {
5630 startm(nil, false, false)
5631 }
5632 }
5633 if scavenger.sysmonWake.Load() != 0 {
5634
5635 scavenger.wake()
5636 }
5637
5638
5639 if retake(now) != 0 {
5640 idle = 0
5641 } else {
5642 idle++
5643 }
5644
5645 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
5646 lock(&forcegc.lock)
5647 forcegc.idle.Store(false)
5648 var list gList
5649 list.push(forcegc.g)
5650 injectglist(&list)
5651 unlock(&forcegc.lock)
5652 }
5653 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
5654 lasttrace = now
5655 schedtrace(debug.scheddetail > 0)
5656 }
5657 unlock(&sched.sysmonlock)
5658 }
5659 }
5660
5661 type sysmontick struct {
5662 schedtick uint32
5663 schedwhen int64
5664 syscalltick uint32
5665 syscallwhen int64
5666 }
5667
5668
5669
5670 const forcePreemptNS = 10 * 1000 * 1000
5671
5672 func retake(now int64) uint32 {
5673 n := 0
5674
5675
5676 lock(&allpLock)
5677
5678
5679
5680 for i := 0; i < len(allp); i++ {
5681 pp := allp[i]
5682 if pp == nil {
5683
5684
5685 continue
5686 }
5687 pd := &pp.sysmontick
5688 s := pp.status
5689 sysretake := false
5690 if s == _Prunning || s == _Psyscall {
5691
5692 t := int64(pp.schedtick)
5693 if int64(pd.schedtick) != t {
5694 pd.schedtick = uint32(t)
5695 pd.schedwhen = now
5696 } else if pd.schedwhen+forcePreemptNS <= now {
5697 preemptone(pp)
5698
5699
5700 sysretake = true
5701 }
5702 }
5703 if s == _Psyscall {
5704
5705 t := int64(pp.syscalltick)
5706 if !sysretake && int64(pd.syscalltick) != t {
5707 pd.syscalltick = uint32(t)
5708 pd.syscallwhen = now
5709 continue
5710 }
5711
5712
5713
5714 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
5715 continue
5716 }
5717
5718 unlock(&allpLock)
5719
5720
5721
5722
5723 incidlelocked(-1)
5724 if atomic.Cas(&pp.status, s, _Pidle) {
5725 if traceEnabled() {
5726 traceGoSysBlock(pp)
5727 traceProcStop(pp)
5728 }
5729 n++
5730 pp.syscalltick++
5731 handoffp(pp)
5732 }
5733 incidlelocked(1)
5734 lock(&allpLock)
5735 }
5736 }
5737 unlock(&allpLock)
5738 return uint32(n)
5739 }
5740
5741
5742
5743
5744
5745
5746 func preemptall() bool {
5747 res := false
5748 for _, pp := range allp {
5749 if pp.status != _Prunning {
5750 continue
5751 }
5752 if preemptone(pp) {
5753 res = true
5754 }
5755 }
5756 return res
5757 }
5758
5759
5760
5761
5762
5763
5764
5765
5766
5767
5768
5769 func preemptone(pp *p) bool {
5770 mp := pp.m.ptr()
5771 if mp == nil || mp == getg().m {
5772 return false
5773 }
5774 gp := mp.curg
5775 if gp == nil || gp == mp.g0 {
5776 return false
5777 }
5778
5779 gp.preempt = true
5780
5781
5782
5783
5784
5785 gp.stackguard0 = stackPreempt
5786
5787
5788 if preemptMSupported && debug.asyncpreemptoff == 0 {
5789 pp.preempt = true
5790 preemptM(mp)
5791 }
5792
5793 return true
5794 }
5795
5796 var starttime int64
5797
5798 func schedtrace(detailed bool) {
5799 now := nanotime()
5800 if starttime == 0 {
5801 starttime = now
5802 }
5803
5804 lock(&sched.lock)
5805 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
5806 if detailed {
5807 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
5808 }
5809
5810
5811
5812 for i, pp := range allp {
5813 mp := pp.m.ptr()
5814 h := atomic.Load(&pp.runqhead)
5815 t := atomic.Load(&pp.runqtail)
5816 if detailed {
5817 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
5818 if mp != nil {
5819 print(mp.id)
5820 } else {
5821 print("nil")
5822 }
5823 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers), "\n")
5824 } else {
5825
5826
5827 print(" ")
5828 if i == 0 {
5829 print("[")
5830 }
5831 print(t - h)
5832 if i == len(allp)-1 {
5833 print("]\n")
5834 }
5835 }
5836 }
5837
5838 if !detailed {
5839 unlock(&sched.lock)
5840 return
5841 }
5842
5843 for mp := allm; mp != nil; mp = mp.alllink {
5844 pp := mp.p.ptr()
5845 print(" M", mp.id, ": p=")
5846 if pp != nil {
5847 print(pp.id)
5848 } else {
5849 print("nil")
5850 }
5851 print(" curg=")
5852 if mp.curg != nil {
5853 print(mp.curg.goid)
5854 } else {
5855 print("nil")
5856 }
5857 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
5858 if lockedg := mp.lockedg.ptr(); lockedg != nil {
5859 print(lockedg.goid)
5860 } else {
5861 print("nil")
5862 }
5863 print("\n")
5864 }
5865
5866 forEachG(func(gp *g) {
5867 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
5868 if gp.m != nil {
5869 print(gp.m.id)
5870 } else {
5871 print("nil")
5872 }
5873 print(" lockedm=")
5874 if lockedm := gp.lockedm.ptr(); lockedm != nil {
5875 print(lockedm.id)
5876 } else {
5877 print("nil")
5878 }
5879 print("\n")
5880 })
5881 unlock(&sched.lock)
5882 }
5883
5884
5885
5886
5887
5888
5889 func schedEnableUser(enable bool) {
5890 lock(&sched.lock)
5891 if sched.disable.user == !enable {
5892 unlock(&sched.lock)
5893 return
5894 }
5895 sched.disable.user = !enable
5896 if enable {
5897 n := sched.disable.n
5898 sched.disable.n = 0
5899 globrunqputbatch(&sched.disable.runnable, n)
5900 unlock(&sched.lock)
5901 for ; n != 0 && sched.npidle.Load() != 0; n-- {
5902 startm(nil, false, false)
5903 }
5904 } else {
5905 unlock(&sched.lock)
5906 }
5907 }
5908
5909
5910
5911
5912
5913 func schedEnabled(gp *g) bool {
5914 assertLockHeld(&sched.lock)
5915
5916 if sched.disable.user {
5917 return isSystemGoroutine(gp, true)
5918 }
5919 return true
5920 }
5921
5922
5923
5924
5925
5926
5927 func mput(mp *m) {
5928 assertLockHeld(&sched.lock)
5929
5930 mp.schedlink = sched.midle
5931 sched.midle.set(mp)
5932 sched.nmidle++
5933 checkdead()
5934 }
5935
5936
5937
5938
5939
5940
5941 func mget() *m {
5942 assertLockHeld(&sched.lock)
5943
5944 mp := sched.midle.ptr()
5945 if mp != nil {
5946 sched.midle = mp.schedlink
5947 sched.nmidle--
5948 }
5949 return mp
5950 }
5951
5952
5953
5954
5955
5956
5957 func globrunqput(gp *g) {
5958 assertLockHeld(&sched.lock)
5959
5960 sched.runq.pushBack(gp)
5961 sched.runqsize++
5962 }
5963
5964
5965
5966
5967
5968
5969 func globrunqputhead(gp *g) {
5970 assertLockHeld(&sched.lock)
5971
5972 sched.runq.push(gp)
5973 sched.runqsize++
5974 }
5975
5976
5977
5978
5979
5980
5981
5982 func globrunqputbatch(batch *gQueue, n int32) {
5983 assertLockHeld(&sched.lock)
5984
5985 sched.runq.pushBackAll(*batch)
5986 sched.runqsize += n
5987 *batch = gQueue{}
5988 }
5989
5990
5991
5992 func globrunqget(pp *p, max int32) *g {
5993 assertLockHeld(&sched.lock)
5994
5995 if sched.runqsize == 0 {
5996 return nil
5997 }
5998
5999 n := sched.runqsize/gomaxprocs + 1
6000 if n > sched.runqsize {
6001 n = sched.runqsize
6002 }
6003 if max > 0 && n > max {
6004 n = max
6005 }
6006 if n > int32(len(pp.runq))/2 {
6007 n = int32(len(pp.runq)) / 2
6008 }
6009
6010 sched.runqsize -= n
6011
6012 gp := sched.runq.pop()
6013 n--
6014 for ; n > 0; n-- {
6015 gp1 := sched.runq.pop()
6016 runqput(pp, gp1, false)
6017 }
6018 return gp
6019 }
6020
6021
6022 type pMask []uint32
6023
6024
6025 func (p pMask) read(id uint32) bool {
6026 word := id / 32
6027 mask := uint32(1) << (id % 32)
6028 return (atomic.Load(&p[word]) & mask) != 0
6029 }
6030
6031
6032 func (p pMask) set(id int32) {
6033 word := id / 32
6034 mask := uint32(1) << (id % 32)
6035 atomic.Or(&p[word], mask)
6036 }
6037
6038
6039 func (p pMask) clear(id int32) {
6040 word := id / 32
6041 mask := uint32(1) << (id % 32)
6042 atomic.And(&p[word], ^mask)
6043 }
6044
6045
6046
6047
6048
6049
6050
6051
6052
6053
6054
6055
6056
6057
6058
6059
6060
6061
6062
6063
6064
6065
6066
6067
6068
6069
6070 func updateTimerPMask(pp *p) {
6071 if pp.numTimers.Load() > 0 {
6072 return
6073 }
6074
6075
6076
6077
6078 lock(&pp.timersLock)
6079 if pp.numTimers.Load() == 0 {
6080 timerpMask.clear(pp.id)
6081 }
6082 unlock(&pp.timersLock)
6083 }
6084
6085
6086
6087
6088
6089
6090
6091
6092
6093
6094
6095
6096 func pidleput(pp *p, now int64) int64 {
6097 assertLockHeld(&sched.lock)
6098
6099 if !runqempty(pp) {
6100 throw("pidleput: P has non-empty run queue")
6101 }
6102 if now == 0 {
6103 now = nanotime()
6104 }
6105 updateTimerPMask(pp)
6106 idlepMask.set(pp.id)
6107 pp.link = sched.pidle
6108 sched.pidle.set(pp)
6109 sched.npidle.Add(1)
6110 if !pp.limiterEvent.start(limiterEventIdle, now) {
6111 throw("must be able to track idle limiter event")
6112 }
6113 return now
6114 }
6115
6116
6117
6118
6119
6120
6121
6122
6123 func pidleget(now int64) (*p, int64) {
6124 assertLockHeld(&sched.lock)
6125
6126 pp := sched.pidle.ptr()
6127 if pp != nil {
6128
6129 if now == 0 {
6130 now = nanotime()
6131 }
6132 timerpMask.set(pp.id)
6133 idlepMask.clear(pp.id)
6134 sched.pidle = pp.link
6135 sched.npidle.Add(-1)
6136 pp.limiterEvent.stop(limiterEventIdle, now)
6137 }
6138 return pp, now
6139 }
6140
6141
6142
6143
6144
6145
6146
6147
6148
6149
6150
6151 func pidlegetSpinning(now int64) (*p, int64) {
6152 assertLockHeld(&sched.lock)
6153
6154 pp, now := pidleget(now)
6155 if pp == nil {
6156
6157
6158
6159 sched.needspinning.Store(1)
6160 return nil, now
6161 }
6162
6163 return pp, now
6164 }
6165
6166
6167
6168 func runqempty(pp *p) bool {
6169
6170
6171
6172
6173 for {
6174 head := atomic.Load(&pp.runqhead)
6175 tail := atomic.Load(&pp.runqtail)
6176 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
6177 if tail == atomic.Load(&pp.runqtail) {
6178 return head == tail && runnext == 0
6179 }
6180 }
6181 }
6182
6183
6184
6185
6186
6187
6188
6189
6190
6191
6192 const randomizeScheduler = raceenabled
6193
6194
6195
6196
6197
6198
6199 func runqput(pp *p, gp *g, next bool) {
6200 if randomizeScheduler && next && fastrandn(2) == 0 {
6201 next = false
6202 }
6203
6204 if next {
6205 retryNext:
6206 oldnext := pp.runnext
6207 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
6208 goto retryNext
6209 }
6210 if oldnext == 0 {
6211 return
6212 }
6213
6214 gp = oldnext.ptr()
6215 }
6216
6217 retry:
6218 h := atomic.LoadAcq(&pp.runqhead)
6219 t := pp.runqtail
6220 if t-h < uint32(len(pp.runq)) {
6221 pp.runq[t%uint32(len(pp.runq))].set(gp)
6222 atomic.StoreRel(&pp.runqtail, t+1)
6223 return
6224 }
6225 if runqputslow(pp, gp, h, t) {
6226 return
6227 }
6228
6229 goto retry
6230 }
6231
6232
6233
6234 func runqputslow(pp *p, gp *g, h, t uint32) bool {
6235 var batch [len(pp.runq)/2 + 1]*g
6236
6237
6238 n := t - h
6239 n = n / 2
6240 if n != uint32(len(pp.runq)/2) {
6241 throw("runqputslow: queue is not full")
6242 }
6243 for i := uint32(0); i < n; i++ {
6244 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6245 }
6246 if !atomic.CasRel(&pp.runqhead, h, h+n) {
6247 return false
6248 }
6249 batch[n] = gp
6250
6251 if randomizeScheduler {
6252 for i := uint32(1); i <= n; i++ {
6253 j := fastrandn(i + 1)
6254 batch[i], batch[j] = batch[j], batch[i]
6255 }
6256 }
6257
6258
6259 for i := uint32(0); i < n; i++ {
6260 batch[i].schedlink.set(batch[i+1])
6261 }
6262 var q gQueue
6263 q.head.set(batch[0])
6264 q.tail.set(batch[n])
6265
6266
6267 lock(&sched.lock)
6268 globrunqputbatch(&q, int32(n+1))
6269 unlock(&sched.lock)
6270 return true
6271 }
6272
6273
6274
6275
6276
6277 func runqputbatch(pp *p, q *gQueue, qsize int) {
6278 h := atomic.LoadAcq(&pp.runqhead)
6279 t := pp.runqtail
6280 n := uint32(0)
6281 for !q.empty() && t-h < uint32(len(pp.runq)) {
6282 gp := q.pop()
6283 pp.runq[t%uint32(len(pp.runq))].set(gp)
6284 t++
6285 n++
6286 }
6287 qsize -= int(n)
6288
6289 if randomizeScheduler {
6290 off := func(o uint32) uint32 {
6291 return (pp.runqtail + o) % uint32(len(pp.runq))
6292 }
6293 for i := uint32(1); i < n; i++ {
6294 j := fastrandn(i + 1)
6295 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
6296 }
6297 }
6298
6299 atomic.StoreRel(&pp.runqtail, t)
6300 if !q.empty() {
6301 lock(&sched.lock)
6302 globrunqputbatch(q, int32(qsize))
6303 unlock(&sched.lock)
6304 }
6305 }
6306
6307
6308
6309
6310
6311 func runqget(pp *p) (gp *g, inheritTime bool) {
6312
6313 next := pp.runnext
6314
6315
6316
6317 if next != 0 && pp.runnext.cas(next, 0) {
6318 return next.ptr(), true
6319 }
6320
6321 for {
6322 h := atomic.LoadAcq(&pp.runqhead)
6323 t := pp.runqtail
6324 if t == h {
6325 return nil, false
6326 }
6327 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
6328 if atomic.CasRel(&pp.runqhead, h, h+1) {
6329 return gp, false
6330 }
6331 }
6332 }
6333
6334
6335
6336 func runqdrain(pp *p) (drainQ gQueue, n uint32) {
6337 oldNext := pp.runnext
6338 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
6339 drainQ.pushBack(oldNext.ptr())
6340 n++
6341 }
6342
6343 retry:
6344 h := atomic.LoadAcq(&pp.runqhead)
6345 t := pp.runqtail
6346 qn := t - h
6347 if qn == 0 {
6348 return
6349 }
6350 if qn > uint32(len(pp.runq)) {
6351 goto retry
6352 }
6353
6354 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
6355 goto retry
6356 }
6357
6358
6359
6360
6361
6362
6363
6364
6365 for i := uint32(0); i < qn; i++ {
6366 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6367 drainQ.pushBack(gp)
6368 n++
6369 }
6370 return
6371 }
6372
6373
6374
6375
6376
6377 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
6378 for {
6379 h := atomic.LoadAcq(&pp.runqhead)
6380 t := atomic.LoadAcq(&pp.runqtail)
6381 n := t - h
6382 n = n - n/2
6383 if n == 0 {
6384 if stealRunNextG {
6385
6386 if next := pp.runnext; next != 0 {
6387 if pp.status == _Prunning {
6388
6389
6390
6391
6392
6393
6394
6395
6396
6397
6398 if GOOS != "windows" && GOOS != "openbsd" && GOOS != "netbsd" {
6399 usleep(3)
6400 } else {
6401
6402
6403
6404 osyield()
6405 }
6406 }
6407 if !pp.runnext.cas(next, 0) {
6408 continue
6409 }
6410 batch[batchHead%uint32(len(batch))] = next
6411 return 1
6412 }
6413 }
6414 return 0
6415 }
6416 if n > uint32(len(pp.runq)/2) {
6417 continue
6418 }
6419 for i := uint32(0); i < n; i++ {
6420 g := pp.runq[(h+i)%uint32(len(pp.runq))]
6421 batch[(batchHead+i)%uint32(len(batch))] = g
6422 }
6423 if atomic.CasRel(&pp.runqhead, h, h+n) {
6424 return n
6425 }
6426 }
6427 }
6428
6429
6430
6431
6432 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
6433 t := pp.runqtail
6434 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
6435 if n == 0 {
6436 return nil
6437 }
6438 n--
6439 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
6440 if n == 0 {
6441 return gp
6442 }
6443 h := atomic.LoadAcq(&pp.runqhead)
6444 if t-h+n >= uint32(len(pp.runq)) {
6445 throw("runqsteal: runq overflow")
6446 }
6447 atomic.StoreRel(&pp.runqtail, t+n)
6448 return gp
6449 }
6450
6451
6452
6453 type gQueue struct {
6454 head guintptr
6455 tail guintptr
6456 }
6457
6458
6459 func (q *gQueue) empty() bool {
6460 return q.head == 0
6461 }
6462
6463
6464 func (q *gQueue) push(gp *g) {
6465 gp.schedlink = q.head
6466 q.head.set(gp)
6467 if q.tail == 0 {
6468 q.tail.set(gp)
6469 }
6470 }
6471
6472
6473 func (q *gQueue) pushBack(gp *g) {
6474 gp.schedlink = 0
6475 if q.tail != 0 {
6476 q.tail.ptr().schedlink.set(gp)
6477 } else {
6478 q.head.set(gp)
6479 }
6480 q.tail.set(gp)
6481 }
6482
6483
6484
6485 func (q *gQueue) pushBackAll(q2 gQueue) {
6486 if q2.tail == 0 {
6487 return
6488 }
6489 q2.tail.ptr().schedlink = 0
6490 if q.tail != 0 {
6491 q.tail.ptr().schedlink = q2.head
6492 } else {
6493 q.head = q2.head
6494 }
6495 q.tail = q2.tail
6496 }
6497
6498
6499
6500 func (q *gQueue) pop() *g {
6501 gp := q.head.ptr()
6502 if gp != nil {
6503 q.head = gp.schedlink
6504 if q.head == 0 {
6505 q.tail = 0
6506 }
6507 }
6508 return gp
6509 }
6510
6511
6512 func (q *gQueue) popList() gList {
6513 stack := gList{q.head}
6514 *q = gQueue{}
6515 return stack
6516 }
6517
6518
6519
6520 type gList struct {
6521 head guintptr
6522 }
6523
6524
6525 func (l *gList) empty() bool {
6526 return l.head == 0
6527 }
6528
6529
6530 func (l *gList) push(gp *g) {
6531 gp.schedlink = l.head
6532 l.head.set(gp)
6533 }
6534
6535
6536 func (l *gList) pushAll(q gQueue) {
6537 if !q.empty() {
6538 q.tail.ptr().schedlink = l.head
6539 l.head = q.head
6540 }
6541 }
6542
6543
6544 func (l *gList) pop() *g {
6545 gp := l.head.ptr()
6546 if gp != nil {
6547 l.head = gp.schedlink
6548 }
6549 return gp
6550 }
6551
6552
6553 func setMaxThreads(in int) (out int) {
6554 lock(&sched.lock)
6555 out = int(sched.maxmcount)
6556 if in > 0x7fffffff {
6557 sched.maxmcount = 0x7fffffff
6558 } else {
6559 sched.maxmcount = int32(in)
6560 }
6561 checkmcount()
6562 unlock(&sched.lock)
6563 return
6564 }
6565
6566
6567 func procPin() int {
6568 gp := getg()
6569 mp := gp.m
6570
6571 mp.locks++
6572 return int(mp.p.ptr().id)
6573 }
6574
6575
6576 func procUnpin() {
6577 gp := getg()
6578 gp.m.locks--
6579 }
6580
6581
6582
6583 func sync_runtime_procPin() int {
6584 return procPin()
6585 }
6586
6587
6588
6589 func sync_runtime_procUnpin() {
6590 procUnpin()
6591 }
6592
6593
6594
6595 func sync_atomic_runtime_procPin() int {
6596 return procPin()
6597 }
6598
6599
6600
6601 func sync_atomic_runtime_procUnpin() {
6602 procUnpin()
6603 }
6604
6605
6606
6607
6608
6609 func sync_runtime_canSpin(i int) bool {
6610
6611
6612
6613
6614
6615 if i >= active_spin || ncpu <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
6616 return false
6617 }
6618 if p := getg().m.p.ptr(); !runqempty(p) {
6619 return false
6620 }
6621 return true
6622 }
6623
6624
6625
6626 func sync_runtime_doSpin() {
6627 procyield(active_spin_cnt)
6628 }
6629
6630 var stealOrder randomOrder
6631
6632
6633
6634
6635
6636 type randomOrder struct {
6637 count uint32
6638 coprimes []uint32
6639 }
6640
6641 type randomEnum struct {
6642 i uint32
6643 count uint32
6644 pos uint32
6645 inc uint32
6646 }
6647
6648 func (ord *randomOrder) reset(count uint32) {
6649 ord.count = count
6650 ord.coprimes = ord.coprimes[:0]
6651 for i := uint32(1); i <= count; i++ {
6652 if gcd(i, count) == 1 {
6653 ord.coprimes = append(ord.coprimes, i)
6654 }
6655 }
6656 }
6657
6658 func (ord *randomOrder) start(i uint32) randomEnum {
6659 return randomEnum{
6660 count: ord.count,
6661 pos: i % ord.count,
6662 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
6663 }
6664 }
6665
6666 func (enum *randomEnum) done() bool {
6667 return enum.i == enum.count
6668 }
6669
6670 func (enum *randomEnum) next() {
6671 enum.i++
6672 enum.pos = (enum.pos + enum.inc) % enum.count
6673 }
6674
6675 func (enum *randomEnum) position() uint32 {
6676 return enum.pos
6677 }
6678
6679 func gcd(a, b uint32) uint32 {
6680 for b != 0 {
6681 a, b = b, a%b
6682 }
6683 return a
6684 }
6685
6686
6687
6688 type initTask struct {
6689 state uint32
6690 nfns uint32
6691
6692 }
6693
6694
6695
6696 var inittrace tracestat
6697
6698 type tracestat struct {
6699 active bool
6700 id uint64
6701 allocs uint64
6702 bytes uint64
6703 }
6704
6705 func doInit(ts []*initTask) {
6706 for _, t := range ts {
6707 doInit1(t)
6708 }
6709 }
6710
6711 func doInit1(t *initTask) {
6712 switch t.state {
6713 case 2:
6714 return
6715 case 1:
6716 throw("recursive call during initialization - linker skew")
6717 default:
6718 t.state = 1
6719
6720 var (
6721 start int64
6722 before tracestat
6723 )
6724
6725 if inittrace.active {
6726 start = nanotime()
6727
6728 before = inittrace
6729 }
6730
6731 if t.nfns == 0 {
6732
6733 throw("inittask with no functions")
6734 }
6735
6736 firstFunc := add(unsafe.Pointer(t), 8)
6737 for i := uint32(0); i < t.nfns; i++ {
6738 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
6739 f := *(*func())(unsafe.Pointer(&p))
6740 f()
6741 }
6742
6743 if inittrace.active {
6744 end := nanotime()
6745
6746 after := inittrace
6747
6748 f := *(*func())(unsafe.Pointer(&firstFunc))
6749 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
6750
6751 var sbuf [24]byte
6752 print("init ", pkg, " @")
6753 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
6754 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
6755 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
6756 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
6757 print("\n")
6758 }
6759
6760 t.state = 2
6761 }
6762 }
6763
View as plain text