Source file
src/runtime/export_test.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/goarch"
11 "internal/goos"
12 "runtime/internal/atomic"
13 "runtime/internal/sys"
14 "unsafe"
15 )
16
17 var Fadd64 = fadd64
18 var Fsub64 = fsub64
19 var Fmul64 = fmul64
20 var Fdiv64 = fdiv64
21 var F64to32 = f64to32
22 var F32to64 = f32to64
23 var Fcmp64 = fcmp64
24 var Fintto64 = fintto64
25 var F64toint = f64toint
26
27 var Entersyscall = entersyscall
28 var Exitsyscall = exitsyscall
29 var LockedOSThread = lockedOSThread
30 var Xadduintptr = atomic.Xadduintptr
31
32 var Fastlog2 = fastlog2
33
34 var Atoi = atoi
35 var Atoi32 = atoi32
36 var ParseByteCount = parseByteCount
37
38 var Nanotime = nanotime
39 var NetpollBreak = netpollBreak
40 var Usleep = usleep
41
42 var PhysPageSize = physPageSize
43 var PhysHugePageSize = physHugePageSize
44
45 var NetpollGenericInit = netpollGenericInit
46
47 var Memmove = memmove
48 var MemclrNoHeapPointers = memclrNoHeapPointers
49
50 var LockPartialOrder = lockPartialOrder
51
52 type LockRank lockRank
53
54 func (l LockRank) String() string {
55 return lockRank(l).String()
56 }
57
58 const PreemptMSupported = preemptMSupported
59
60 type LFNode struct {
61 Next uint64
62 Pushcnt uintptr
63 }
64
65 func LFStackPush(head *uint64, node *LFNode) {
66 (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
67 }
68
69 func LFStackPop(head *uint64) *LFNode {
70 return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
71 }
72
73 func Netpoll(delta int64) {
74 systemstack(func() {
75 netpoll(delta)
76 })
77 }
78
79 func GCMask(x any) (ret []byte) {
80 systemstack(func() {
81 ret = getgcmask(x)
82 })
83 return
84 }
85
86 func RunSchedLocalQueueTest() {
87 _p_ := new(p)
88 gs := make([]g, len(_p_.runq))
89 Escape(gs)
90 for i := 0; i < len(_p_.runq); i++ {
91 if g, _ := runqget(_p_); g != nil {
92 throw("runq is not empty initially")
93 }
94 for j := 0; j < i; j++ {
95 runqput(_p_, &gs[i], false)
96 }
97 for j := 0; j < i; j++ {
98 if g, _ := runqget(_p_); g != &gs[i] {
99 print("bad element at iter ", i, "/", j, "\n")
100 throw("bad element")
101 }
102 }
103 if g, _ := runqget(_p_); g != nil {
104 throw("runq is not empty afterwards")
105 }
106 }
107 }
108
109 func RunSchedLocalQueueStealTest() {
110 p1 := new(p)
111 p2 := new(p)
112 gs := make([]g, len(p1.runq))
113 Escape(gs)
114 for i := 0; i < len(p1.runq); i++ {
115 for j := 0; j < i; j++ {
116 gs[j].sig = 0
117 runqput(p1, &gs[j], false)
118 }
119 gp := runqsteal(p2, p1, true)
120 s := 0
121 if gp != nil {
122 s++
123 gp.sig++
124 }
125 for {
126 gp, _ = runqget(p2)
127 if gp == nil {
128 break
129 }
130 s++
131 gp.sig++
132 }
133 for {
134 gp, _ = runqget(p1)
135 if gp == nil {
136 break
137 }
138 gp.sig++
139 }
140 for j := 0; j < i; j++ {
141 if gs[j].sig != 1 {
142 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
143 throw("bad element")
144 }
145 }
146 if s != i/2 && s != i/2+1 {
147 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
148 throw("bad steal")
149 }
150 }
151 }
152
153 func RunSchedLocalQueueEmptyTest(iters int) {
154
155
156
157
158 done := make(chan bool, 1)
159 p := new(p)
160 gs := make([]g, 2)
161 Escape(gs)
162 ready := new(uint32)
163 for i := 0; i < iters; i++ {
164 *ready = 0
165 next0 := (i & 1) == 0
166 next1 := (i & 2) == 0
167 runqput(p, &gs[0], next0)
168 go func() {
169 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
170 }
171 if runqempty(p) {
172 println("next:", next0, next1)
173 throw("queue is empty")
174 }
175 done <- true
176 }()
177 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
178 }
179 runqput(p, &gs[1], next1)
180 runqget(p)
181 <-done
182 runqget(p)
183 }
184 }
185
186 var (
187 StringHash = stringHash
188 BytesHash = bytesHash
189 Int32Hash = int32Hash
190 Int64Hash = int64Hash
191 MemHash = memhash
192 MemHash32 = memhash32
193 MemHash64 = memhash64
194 EfaceHash = efaceHash
195 IfaceHash = ifaceHash
196 )
197
198 var UseAeshash = &useAeshash
199
200 func MemclrBytes(b []byte) {
201 s := (*slice)(unsafe.Pointer(&b))
202 memclrNoHeapPointers(s.array, uintptr(s.len))
203 }
204
205 const HashLoad = hashLoad
206
207
208 func GostringW(w []uint16) (s string) {
209 systemstack(func() {
210 s = gostringw(&w[0])
211 })
212 return
213 }
214
215 var Open = open
216 var Close = closefd
217 var Read = read
218 var Write = write
219
220 func Envs() []string { return envs }
221 func SetEnvs(e []string) { envs = e }
222
223
224
225 func BenchSetType(n int, x any) {
226 e := *efaceOf(&x)
227 t := e._type
228 var size uintptr
229 var p unsafe.Pointer
230 switch t.kind & kindMask {
231 case kindPtr:
232 t = (*ptrtype)(unsafe.Pointer(t)).elem
233 size = t.size
234 p = e.data
235 case kindSlice:
236 slice := *(*struct {
237 ptr unsafe.Pointer
238 len, cap uintptr
239 })(e.data)
240 t = (*slicetype)(unsafe.Pointer(t)).elem
241 size = t.size * slice.len
242 p = slice.ptr
243 }
244 allocSize := roundupsize(size)
245 systemstack(func() {
246 for i := 0; i < n; i++ {
247 heapBitsSetType(uintptr(p), allocSize, size, t)
248 }
249 })
250 }
251
252 const PtrSize = goarch.PtrSize
253
254 var ForceGCPeriod = &forcegcperiod
255
256
257
258
259 func SetTracebackEnv(level string) {
260 setTraceback(level)
261 traceback_env = traceback_cache
262 }
263
264 var ReadUnaligned32 = readUnaligned32
265 var ReadUnaligned64 = readUnaligned64
266
267 func CountPagesInUse() (pagesInUse, counted uintptr) {
268 stopTheWorld("CountPagesInUse")
269
270 pagesInUse = uintptr(mheap_.pagesInUse.Load())
271
272 for _, s := range mheap_.allspans {
273 if s.state.get() == mSpanInUse {
274 counted += s.npages
275 }
276 }
277
278 startTheWorld()
279
280 return
281 }
282
283 func Fastrand() uint32 { return fastrand() }
284 func Fastrand64() uint64 { return fastrand64() }
285 func Fastrandn(n uint32) uint32 { return fastrandn(n) }
286
287 type ProfBuf profBuf
288
289 func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
290 return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
291 }
292
293 func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
294 (*profBuf)(p).write(tag, now, hdr, stk)
295 }
296
297 const (
298 ProfBufBlocking = profBufBlocking
299 ProfBufNonBlocking = profBufNonBlocking
300 )
301
302 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
303 return (*profBuf)(p).read(profBufReadMode(mode))
304 }
305
306 func (p *ProfBuf) Close() {
307 (*profBuf)(p).close()
308 }
309
310 func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
311 stopTheWorld("ReadMetricsSlow")
312
313
314
315 metricsLock()
316 initMetrics()
317 metricsUnlock()
318
319 systemstack(func() {
320
321
322
323
324 readmemstats_m(memStats)
325 })
326
327
328
329
330
331 readMetrics(samplesp, len, cap)
332
333 startTheWorld()
334 }
335
336
337
338 func ReadMemStatsSlow() (base, slow MemStats) {
339 stopTheWorld("ReadMemStatsSlow")
340
341
342 systemstack(func() {
343
344 getg().m.mallocing++
345
346 readmemstats_m(&base)
347
348
349
350 slow = base
351 slow.Alloc = 0
352 slow.TotalAlloc = 0
353 slow.Mallocs = 0
354 slow.Frees = 0
355 slow.HeapReleased = 0
356 var bySize [_NumSizeClasses]struct {
357 Mallocs, Frees uint64
358 }
359
360
361 for _, s := range mheap_.allspans {
362 if s.state.get() != mSpanInUse {
363 continue
364 }
365 if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
366 slow.Mallocs++
367 slow.Alloc += uint64(s.elemsize)
368 } else {
369 slow.Mallocs += uint64(s.allocCount)
370 slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
371 bySize[sizeclass].Mallocs += uint64(s.allocCount)
372 }
373 }
374
375
376 var m heapStatsDelta
377 memstats.heapStats.unsafeRead(&m)
378
379
380 var smallFree uint64
381 for i := 0; i < _NumSizeClasses; i++ {
382 slow.Frees += uint64(m.smallFreeCount[i])
383 bySize[i].Frees += uint64(m.smallFreeCount[i])
384 bySize[i].Mallocs += uint64(m.smallFreeCount[i])
385 smallFree += uint64(m.smallFreeCount[i]) * uint64(class_to_size[i])
386 }
387 slow.Frees += uint64(m.tinyAllocCount) + uint64(m.largeFreeCount)
388 slow.Mallocs += slow.Frees
389
390 slow.TotalAlloc = slow.Alloc + uint64(m.largeFree) + smallFree
391
392 for i := range slow.BySize {
393 slow.BySize[i].Mallocs = bySize[i].Mallocs
394 slow.BySize[i].Frees = bySize[i].Frees
395 }
396
397 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
398 chunk := mheap_.pages.tryChunkOf(i)
399 if chunk == nil {
400 continue
401 }
402 pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
403 slow.HeapReleased += uint64(pg) * pageSize
404 }
405 for _, p := range allp {
406 pg := sys.OnesCount64(p.pcache.scav)
407 slow.HeapReleased += uint64(pg) * pageSize
408 }
409
410 getg().m.mallocing--
411 })
412
413 startTheWorld()
414 return
415 }
416
417
418
419
420 func BlockOnSystemStack() {
421 systemstack(blockOnSystemStackInternal)
422 }
423
424 func blockOnSystemStackInternal() {
425 print("x\n")
426 lock(&deadlock)
427 lock(&deadlock)
428 }
429
430 type RWMutex struct {
431 rw rwmutex
432 }
433
434 func (rw *RWMutex) RLock() {
435 rw.rw.rlock()
436 }
437
438 func (rw *RWMutex) RUnlock() {
439 rw.rw.runlock()
440 }
441
442 func (rw *RWMutex) Lock() {
443 rw.rw.lock()
444 }
445
446 func (rw *RWMutex) Unlock() {
447 rw.rw.unlock()
448 }
449
450 const RuntimeHmapSize = unsafe.Sizeof(hmap{})
451
452 func MapBucketsCount(m map[int]int) int {
453 h := *(**hmap)(unsafe.Pointer(&m))
454 return 1 << h.B
455 }
456
457 func MapBucketsPointerIsNil(m map[int]int) bool {
458 h := *(**hmap)(unsafe.Pointer(&m))
459 return h.buckets == nil
460 }
461
462 func LockOSCounts() (external, internal uint32) {
463 g := getg()
464 if g.m.lockedExt+g.m.lockedInt == 0 {
465 if g.lockedm != 0 {
466 panic("lockedm on non-locked goroutine")
467 }
468 } else {
469 if g.lockedm == 0 {
470 panic("nil lockedm on locked goroutine")
471 }
472 }
473 return g.m.lockedExt, g.m.lockedInt
474 }
475
476
477 func TracebackSystemstack(stk []uintptr, i int) int {
478 if i == 0 {
479 pc, sp := getcallerpc(), getcallersp()
480 return gentraceback(pc, sp, 0, getg(), 0, &stk[0], len(stk), nil, nil, _TraceJumpStack)
481 }
482 n := 0
483 systemstack(func() {
484 n = TracebackSystemstack(stk, i-1)
485 })
486 return n
487 }
488
489 func KeepNArenaHints(n int) {
490 hint := mheap_.arenaHints
491 for i := 1; i < n; i++ {
492 hint = hint.next
493 if hint == nil {
494 return
495 }
496 }
497 hint.next = nil
498 }
499
500
501
502
503 func MapNextArenaHint() (start, end uintptr) {
504 hint := mheap_.arenaHints
505 addr := hint.addr
506 if hint.down {
507 start, end = addr-heapArenaBytes, addr
508 addr -= physPageSize
509 } else {
510 start, end = addr, addr+heapArenaBytes
511 }
512 sysReserve(unsafe.Pointer(addr), physPageSize)
513 return
514 }
515
516 func GetNextArenaHint() uintptr {
517 return mheap_.arenaHints.addr
518 }
519
520 type G = g
521
522 type Sudog = sudog
523
524 func Getg() *G {
525 return getg()
526 }
527
528
529 func PanicForTesting(b []byte, i int) byte {
530 return unexportedPanicForTesting(b, i)
531 }
532
533
534 func unexportedPanicForTesting(b []byte, i int) byte {
535 return b[i]
536 }
537
538 func G0StackOverflow() {
539 systemstack(func() {
540 stackOverflow(nil)
541 })
542 }
543
544 func stackOverflow(x *byte) {
545 var buf [256]byte
546 stackOverflow(&buf[0])
547 }
548
549 func MapTombstoneCheck(m map[int]int) {
550
551
552
553 h := *(**hmap)(unsafe.Pointer(&m))
554 i := any(m)
555 t := *(**maptype)(unsafe.Pointer(&i))
556
557 for x := 0; x < 1<<h.B; x++ {
558 b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.bucketsize)))
559 n := 0
560 for b := b0; b != nil; b = b.overflow(t) {
561 for i := 0; i < bucketCnt; i++ {
562 if b.tophash[i] != emptyRest {
563 n++
564 }
565 }
566 }
567 k := 0
568 for b := b0; b != nil; b = b.overflow(t) {
569 for i := 0; i < bucketCnt; i++ {
570 if k < n && b.tophash[i] == emptyRest {
571 panic("early emptyRest")
572 }
573 if k >= n && b.tophash[i] != emptyRest {
574 panic("late non-emptyRest")
575 }
576 if k == n-1 && b.tophash[i] == emptyOne {
577 panic("last non-emptyRest entry is emptyOne")
578 }
579 k++
580 }
581 }
582 }
583 }
584
585 func RunGetgThreadSwitchTest() {
586
587
588
589
590
591
592 ch := make(chan int)
593 go func(ch chan int) {
594 ch <- 5
595 LockOSThread()
596 }(ch)
597
598 g1 := getg()
599
600
601
602
603
604 <-ch
605
606 g2 := getg()
607 if g1 != g2 {
608 panic("g1 != g2")
609 }
610
611
612
613 g3 := getg()
614 if g1 != g3 {
615 panic("g1 != g3")
616 }
617 }
618
619 const (
620 PageSize = pageSize
621 PallocChunkPages = pallocChunkPages
622 PageAlloc64Bit = pageAlloc64Bit
623 PallocSumBytes = pallocSumBytes
624 )
625
626
627 type PallocSum pallocSum
628
629 func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
630 func (m PallocSum) Start() uint { return pallocSum(m).start() }
631 func (m PallocSum) Max() uint { return pallocSum(m).max() }
632 func (m PallocSum) End() uint { return pallocSum(m).end() }
633
634
635 type PallocBits pallocBits
636
637 func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
638 return (*pallocBits)(b).find(npages, searchIdx)
639 }
640 func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
641 func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
642 func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
643 func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
644
645
646
647 func SummarizeSlow(b *PallocBits) PallocSum {
648 var start, max, end uint
649
650 const N = uint(len(b)) * 64
651 for start < N && (*pageBits)(b).get(start) == 0 {
652 start++
653 }
654 for end < N && (*pageBits)(b).get(N-end-1) == 0 {
655 end++
656 }
657 run := uint(0)
658 for i := uint(0); i < N; i++ {
659 if (*pageBits)(b).get(i) == 0 {
660 run++
661 } else {
662 run = 0
663 }
664 if run > max {
665 max = run
666 }
667 }
668 return PackPallocSum(start, max, end)
669 }
670
671
672 func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
673
674
675
676 func DiffPallocBits(a, b *PallocBits) []BitRange {
677 ba := (*pageBits)(a)
678 bb := (*pageBits)(b)
679
680 var d []BitRange
681 base, size := uint(0), uint(0)
682 for i := uint(0); i < uint(len(ba))*64; i++ {
683 if ba.get(i) != bb.get(i) {
684 if size == 0 {
685 base = i
686 }
687 size++
688 } else {
689 if size != 0 {
690 d = append(d, BitRange{base, size})
691 }
692 size = 0
693 }
694 }
695 if size != 0 {
696 d = append(d, BitRange{base, size})
697 }
698 return d
699 }
700
701
702
703
704 func StringifyPallocBits(b *PallocBits, r BitRange) string {
705 str := ""
706 for j := r.I; j < r.I+r.N; j++ {
707 if (*pageBits)(b).get(j) != 0 {
708 str += "1"
709 } else {
710 str += "0"
711 }
712 }
713 return str
714 }
715
716
717 type PallocData pallocData
718
719 func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
720 return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
721 }
722 func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
723 func (d *PallocData) ScavengedSetRange(i, n uint) {
724 (*pallocData)(d).scavenged.setRange(i, n)
725 }
726 func (d *PallocData) PallocBits() *PallocBits {
727 return (*PallocBits)(&(*pallocData)(d).pallocBits)
728 }
729 func (d *PallocData) Scavenged() *PallocBits {
730 return (*PallocBits)(&(*pallocData)(d).scavenged)
731 }
732
733
734 func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
735
736
737 type PageCache pageCache
738
739 const PageCachePages = pageCachePages
740
741 func NewPageCache(base uintptr, cache, scav uint64) PageCache {
742 return PageCache(pageCache{base: base, cache: cache, scav: scav})
743 }
744 func (c *PageCache) Empty() bool { return (*pageCache)(c).empty() }
745 func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
746 func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
747 func (c *PageCache) Scav() uint64 { return (*pageCache)(c).scav }
748 func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
749 return (*pageCache)(c).alloc(npages)
750 }
751 func (c *PageCache) Flush(s *PageAlloc) {
752 cp := (*pageCache)(c)
753 sp := (*pageAlloc)(s)
754
755 systemstack(func() {
756
757
758 lock(sp.mheapLock)
759 cp.flush(sp)
760 unlock(sp.mheapLock)
761 })
762 }
763
764
765 type ChunkIdx chunkIdx
766
767
768
769 type PageAlloc pageAlloc
770
771 func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
772 pp := (*pageAlloc)(p)
773
774 var addr, scav uintptr
775 systemstack(func() {
776
777
778 lock(pp.mheapLock)
779 addr, scav = pp.alloc(npages)
780 unlock(pp.mheapLock)
781 })
782 return addr, scav
783 }
784 func (p *PageAlloc) AllocToCache() PageCache {
785 pp := (*pageAlloc)(p)
786
787 var c PageCache
788 systemstack(func() {
789
790
791 lock(pp.mheapLock)
792 c = PageCache(pp.allocToCache())
793 unlock(pp.mheapLock)
794 })
795 return c
796 }
797 func (p *PageAlloc) Free(base, npages uintptr) {
798 pp := (*pageAlloc)(p)
799
800 systemstack(func() {
801
802
803 lock(pp.mheapLock)
804 pp.free(base, npages, true)
805 unlock(pp.mheapLock)
806 })
807 }
808 func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
809 return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
810 }
811 func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
812 pp := (*pageAlloc)(p)
813 systemstack(func() {
814 r = pp.scavenge(nbytes, nil)
815 })
816 return
817 }
818 func (p *PageAlloc) InUse() []AddrRange {
819 ranges := make([]AddrRange, 0, len(p.inUse.ranges))
820 for _, r := range p.inUse.ranges {
821 ranges = append(ranges, AddrRange{r})
822 }
823 return ranges
824 }
825
826
827 func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
828 ci := chunkIdx(i)
829 return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
830 }
831
832
833 type AddrRange struct {
834 addrRange
835 }
836
837
838 func MakeAddrRange(base, limit uintptr) AddrRange {
839 return AddrRange{makeAddrRange(base, limit)}
840 }
841
842
843 func (a AddrRange) Base() uintptr {
844 return a.addrRange.base.addr()
845 }
846
847
848 func (a AddrRange) Limit() uintptr {
849 return a.addrRange.limit.addr()
850 }
851
852
853 func (a AddrRange) Equals(b AddrRange) bool {
854 return a == b
855 }
856
857
858 func (a AddrRange) Size() uintptr {
859 return a.addrRange.size()
860 }
861
862
863
864
865
866 var testSysStat = &memstats.other_sys
867
868
869 type AddrRanges struct {
870 addrRanges
871 mutable bool
872 }
873
874
875
876
877
878
879
880
881
882
883 func NewAddrRanges() AddrRanges {
884 r := addrRanges{}
885 r.init(testSysStat)
886 return AddrRanges{r, true}
887 }
888
889
890
891
892
893
894 func MakeAddrRanges(a ...AddrRange) AddrRanges {
895
896
897
898
899
900 ranges := make([]addrRange, 0, len(a))
901 total := uintptr(0)
902 for _, r := range a {
903 ranges = append(ranges, r.addrRange)
904 total += r.Size()
905 }
906 return AddrRanges{addrRanges{
907 ranges: ranges,
908 totalBytes: total,
909 sysStat: testSysStat,
910 }, false}
911 }
912
913
914
915 func (a *AddrRanges) Ranges() []AddrRange {
916 result := make([]AddrRange, 0, len(a.addrRanges.ranges))
917 for _, r := range a.addrRanges.ranges {
918 result = append(result, AddrRange{r})
919 }
920 return result
921 }
922
923
924
925 func (a *AddrRanges) FindSucc(base uintptr) int {
926 return a.findSucc(base)
927 }
928
929
930
931
932
933 func (a *AddrRanges) Add(r AddrRange) {
934 if !a.mutable {
935 throw("attempt to mutate immutable AddrRanges")
936 }
937 a.add(r.addrRange)
938 }
939
940
941 func (a *AddrRanges) TotalBytes() uintptr {
942 return a.addrRanges.totalBytes
943 }
944
945
946 type BitRange struct {
947 I, N uint
948 }
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964 func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
965 p := new(pageAlloc)
966
967
968 p.init(new(mutex), testSysStat)
969 lockInit(p.mheapLock, lockRankMheap)
970 p.test = true
971 for i, init := range chunks {
972 addr := chunkBase(chunkIdx(i))
973
974
975 systemstack(func() {
976 lock(p.mheapLock)
977 p.grow(addr, pallocChunkBytes)
978 unlock(p.mheapLock)
979 })
980
981
982 chunk := p.chunkOf(chunkIndex(addr))
983
984
985 chunk.scavenged.clearRange(0, pallocChunkPages)
986
987
988 if scav != nil {
989 if scvg, ok := scav[i]; ok {
990 for _, s := range scvg {
991
992
993 if s.N != 0 {
994 chunk.scavenged.setRange(s.I, s.N)
995 }
996 }
997 }
998 }
999
1000
1001 for _, s := range init {
1002
1003
1004 if s.N != 0 {
1005 chunk.allocRange(s.I, s.N)
1006 }
1007 }
1008
1009
1010
1011
1012 minPages := physPageSize / pageSize
1013 if minPages < 1 {
1014 minPages = 1
1015 }
1016 _, npages := chunk.findScavengeCandidate(pallocChunkPages-1, minPages, minPages)
1017 if npages != 0 {
1018 p.scav.index.mark(addr, addr+pallocChunkBytes)
1019 }
1020
1021
1022 systemstack(func() {
1023 lock(p.mheapLock)
1024 p.update(addr, pallocChunkPages, false, false)
1025 unlock(p.mheapLock)
1026 })
1027 }
1028
1029 return (*PageAlloc)(p)
1030 }
1031
1032
1033
1034
1035 func FreePageAlloc(pp *PageAlloc) {
1036 p := (*pageAlloc)(pp)
1037
1038
1039 if pageAlloc64Bit != 0 {
1040 for l := 0; l < summaryLevels; l++ {
1041 sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
1042 }
1043
1044 sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks)))
1045 } else {
1046 resSize := uintptr(0)
1047 for _, s := range p.summary {
1048 resSize += uintptr(cap(s)) * pallocSumBytes
1049 }
1050 sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
1051 }
1052
1053
1054
1055
1056
1057 gcController.mappedReady.Add(-int64(p.summaryMappedReady))
1058 testSysStat.add(-int64(p.summaryMappedReady))
1059
1060
1061 for i := range p.chunks {
1062 if x := p.chunks[i]; x != nil {
1063 p.chunks[i] = nil
1064
1065 sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
1066 }
1067 }
1068 }
1069
1070
1071
1072
1073
1074
1075
1076 var BaseChunkIdx = func() ChunkIdx {
1077 var prefix uintptr
1078 if pageAlloc64Bit != 0 {
1079 prefix = 0xc000
1080 } else {
1081 prefix = 0x100
1082 }
1083 baseAddr := prefix * pallocChunkBytes
1084 if goos.IsAix != 0 {
1085 baseAddr += arenaBaseOffset
1086 }
1087 return ChunkIdx(chunkIndex(baseAddr))
1088 }()
1089
1090
1091
1092 func PageBase(c ChunkIdx, pageIdx uint) uintptr {
1093 return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
1094 }
1095
1096 type BitsMismatch struct {
1097 Base uintptr
1098 Got, Want uint64
1099 }
1100
1101 func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
1102 ok = true
1103
1104
1105 systemstack(func() {
1106 getg().m.mallocing++
1107
1108
1109 lock(&mheap_.lock)
1110 chunkLoop:
1111 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
1112 chunk := mheap_.pages.tryChunkOf(i)
1113 if chunk == nil {
1114 continue
1115 }
1116 for j := 0; j < pallocChunkPages/64; j++ {
1117
1118
1119
1120
1121
1122 want := chunk.scavenged[j] &^ chunk.pallocBits[j]
1123 got := chunk.scavenged[j]
1124 if want != got {
1125 ok = false
1126 if n >= len(mismatches) {
1127 break chunkLoop
1128 }
1129 mismatches[n] = BitsMismatch{
1130 Base: chunkBase(i) + uintptr(j)*64*pageSize,
1131 Got: got,
1132 Want: want,
1133 }
1134 n++
1135 }
1136 }
1137 }
1138 unlock(&mheap_.lock)
1139
1140 getg().m.mallocing--
1141 })
1142 return
1143 }
1144
1145 func PageCachePagesLeaked() (leaked uintptr) {
1146 stopTheWorld("PageCachePagesLeaked")
1147
1148
1149 deadp := allp[len(allp):cap(allp)]
1150 for _, p := range deadp {
1151
1152
1153 if p != nil {
1154 leaked += uintptr(sys.OnesCount64(p.pcache.cache))
1155 }
1156 }
1157
1158 startTheWorld()
1159 return
1160 }
1161
1162 var Semacquire = semacquire
1163 var Semrelease1 = semrelease1
1164
1165 func SemNwait(addr *uint32) uint32 {
1166 root := semtable.rootFor(addr)
1167 return atomic.Load(&root.nwait)
1168 }
1169
1170 const SemTableSize = semTabSize
1171
1172
1173 type SemTable struct {
1174 semTable
1175 }
1176
1177
1178 func (t *SemTable) Enqueue(addr *uint32) {
1179 s := acquireSudog()
1180 s.releasetime = 0
1181 s.acquiretime = 0
1182 s.ticket = 0
1183 t.semTable.rootFor(addr).queue(addr, s, false)
1184 }
1185
1186
1187
1188
1189 func (t *SemTable) Dequeue(addr *uint32) bool {
1190 s, _ := t.semTable.rootFor(addr).dequeue(addr)
1191 if s != nil {
1192 releaseSudog(s)
1193 return true
1194 }
1195 return false
1196 }
1197
1198
1199
1200
1201 type MSpan mspan
1202
1203
1204 func AllocMSpan() *MSpan {
1205 var s *mspan
1206 systemstack(func() {
1207 lock(&mheap_.lock)
1208 s = (*mspan)(mheap_.spanalloc.alloc())
1209 unlock(&mheap_.lock)
1210 })
1211 return (*MSpan)(s)
1212 }
1213
1214
1215 func FreeMSpan(s *MSpan) {
1216 systemstack(func() {
1217 lock(&mheap_.lock)
1218 mheap_.spanalloc.free(unsafe.Pointer(s))
1219 unlock(&mheap_.lock)
1220 })
1221 }
1222
1223 func MSpanCountAlloc(ms *MSpan, bits []byte) int {
1224 s := (*mspan)(ms)
1225 s.nelems = uintptr(len(bits) * 8)
1226 s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
1227 result := s.countAlloc()
1228 s.gcmarkBits = nil
1229 return result
1230 }
1231
1232 const (
1233 TimeHistSubBucketBits = timeHistSubBucketBits
1234 TimeHistNumSubBuckets = timeHistNumSubBuckets
1235 TimeHistNumSuperBuckets = timeHistNumSuperBuckets
1236 )
1237
1238 type TimeHistogram timeHistogram
1239
1240
1241
1242
1243 func (th *TimeHistogram) Count(bucket, subBucket uint) (uint64, bool) {
1244 t := (*timeHistogram)(th)
1245 i := bucket*TimeHistNumSubBuckets + subBucket
1246 if i >= uint(len(t.counts)) {
1247 return t.underflow, false
1248 }
1249 return t.counts[i], true
1250 }
1251
1252 func (th *TimeHistogram) Record(duration int64) {
1253 (*timeHistogram)(th).record(duration)
1254 }
1255
1256 var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
1257
1258 func SetIntArgRegs(a int) int {
1259 lock(&finlock)
1260 old := intArgRegs
1261 if a >= 0 {
1262 intArgRegs = a
1263 }
1264 unlock(&finlock)
1265 return old
1266 }
1267
1268 func FinalizerGAsleep() bool {
1269 lock(&finlock)
1270 result := fingwait
1271 unlock(&finlock)
1272 return result
1273 }
1274
1275
1276
1277
1278 var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
1279
1280
1281
1282 func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
1283 return gcTestIsReachable(ptrs...)
1284 }
1285
1286
1287
1288
1289
1290
1291
1292 func GCTestPointerClass(p unsafe.Pointer) string {
1293 return gcTestPointerClass(p)
1294 }
1295
1296 const Raceenabled = raceenabled
1297
1298 const (
1299 GCBackgroundUtilization = gcBackgroundUtilization
1300 GCGoalUtilization = gcGoalUtilization
1301 DefaultHeapMinimum = defaultHeapMinimum
1302 MemoryLimitHeapGoalHeadroom = memoryLimitHeapGoalHeadroom
1303 )
1304
1305 type GCController struct {
1306 gcControllerState
1307 }
1308
1309 func NewGCController(gcPercent int, memoryLimit int64) *GCController {
1310
1311
1312
1313
1314 g := Escape(new(GCController))
1315 g.gcControllerState.test = true
1316 g.init(int32(gcPercent), memoryLimit)
1317 return g
1318 }
1319
1320 func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
1321 trigger, _ := c.trigger()
1322 if c.heapMarked > trigger {
1323 trigger = c.heapMarked
1324 }
1325 c.maxStackScan = stackSize
1326 c.globalsScan = globalsSize
1327 c.heapLive = trigger
1328 c.heapScan += uint64(float64(trigger-c.heapMarked) * scannableFrac)
1329 c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
1330 }
1331
1332 func (c *GCController) AssistWorkPerByte() float64 {
1333 return c.assistWorkPerByte.Load()
1334 }
1335
1336 func (c *GCController) HeapGoal() uint64 {
1337 return c.heapGoal()
1338 }
1339
1340 func (c *GCController) HeapLive() uint64 {
1341 return c.heapLive
1342 }
1343
1344 func (c *GCController) HeapMarked() uint64 {
1345 return c.heapMarked
1346 }
1347
1348 func (c *GCController) Triggered() uint64 {
1349 return c.triggered
1350 }
1351
1352 type GCControllerReviseDelta struct {
1353 HeapLive int64
1354 HeapScan int64
1355 HeapScanWork int64
1356 StackScanWork int64
1357 GlobalsScanWork int64
1358 }
1359
1360 func (c *GCController) Revise(d GCControllerReviseDelta) {
1361 c.heapLive += uint64(d.HeapLive)
1362 c.heapScan += uint64(d.HeapScan)
1363 c.heapScanWork.Add(d.HeapScanWork)
1364 c.stackScanWork.Add(d.StackScanWork)
1365 c.globalsScanWork.Add(d.GlobalsScanWork)
1366 c.revise()
1367 }
1368
1369 func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
1370 c.assistTime.Store(assistTime)
1371 c.endCycle(elapsed, gomaxprocs, false)
1372 c.resetLive(bytesMarked)
1373 c.commit(false)
1374 }
1375
1376 func (c *GCController) AddIdleMarkWorker() bool {
1377 return c.addIdleMarkWorker()
1378 }
1379
1380 func (c *GCController) NeedIdleMarkWorker() bool {
1381 return c.needIdleMarkWorker()
1382 }
1383
1384 func (c *GCController) RemoveIdleMarkWorker() {
1385 c.removeIdleMarkWorker()
1386 }
1387
1388 func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
1389 c.setMaxIdleMarkWorkers(max)
1390 }
1391
1392 var alwaysFalse bool
1393 var escapeSink any
1394
1395 func Escape[T any](x T) T {
1396 if alwaysFalse {
1397 escapeSink = x
1398 }
1399 return x
1400 }
1401
1402
1403 func Acquirem() {
1404 acquirem()
1405 }
1406
1407 func Releasem() {
1408 releasem(getg().m)
1409 }
1410
1411 var Timediv = timediv
1412
1413 type PIController struct {
1414 piController
1415 }
1416
1417 func NewPIController(kp, ti, tt, min, max float64) *PIController {
1418 return &PIController{piController{
1419 kp: kp,
1420 ti: ti,
1421 tt: tt,
1422 min: min,
1423 max: max,
1424 }}
1425 }
1426
1427 func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
1428 return c.piController.next(input, setpoint, period)
1429 }
1430
1431 const (
1432 CapacityPerProc = capacityPerProc
1433 GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
1434 )
1435
1436 type GCCPULimiter struct {
1437 limiter gcCPULimiterState
1438 }
1439
1440 func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
1441
1442
1443
1444
1445 l := Escape(new(GCCPULimiter))
1446 l.limiter.test = true
1447 l.limiter.resetCapacity(now, gomaxprocs)
1448 return l
1449 }
1450
1451 func (l *GCCPULimiter) Fill() uint64 {
1452 return l.limiter.bucket.fill
1453 }
1454
1455 func (l *GCCPULimiter) Capacity() uint64 {
1456 return l.limiter.bucket.capacity
1457 }
1458
1459 func (l *GCCPULimiter) Overflow() uint64 {
1460 return l.limiter.overflow
1461 }
1462
1463 func (l *GCCPULimiter) Limiting() bool {
1464 return l.limiter.limiting()
1465 }
1466
1467 func (l *GCCPULimiter) NeedUpdate(now int64) bool {
1468 return l.limiter.needUpdate(now)
1469 }
1470
1471 func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
1472 l.limiter.startGCTransition(enableGC, now)
1473 }
1474
1475 func (l *GCCPULimiter) FinishGCTransition(now int64) {
1476 l.limiter.finishGCTransition(now)
1477 }
1478
1479 func (l *GCCPULimiter) Update(now int64) {
1480 l.limiter.update(now)
1481 }
1482
1483 func (l *GCCPULimiter) AddAssistTime(t int64) {
1484 l.limiter.addAssistTime(t)
1485 }
1486
1487 func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
1488 l.limiter.resetCapacity(now, nprocs)
1489 }
1490
1491 const ScavengePercent = scavengePercent
1492
1493 type Scavenger struct {
1494 Sleep func(int64) int64
1495 Scavenge func(uintptr) (uintptr, int64)
1496 ShouldStop func() bool
1497 GoMaxProcs func() int32
1498
1499 released atomic.Uintptr
1500 scavenger scavengerState
1501 stop chan<- struct{}
1502 done <-chan struct{}
1503 }
1504
1505 func (s *Scavenger) Start() {
1506 if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
1507 panic("must populate all stubs")
1508 }
1509
1510
1511 s.scavenger.sleepStub = s.Sleep
1512 s.scavenger.scavenge = s.Scavenge
1513 s.scavenger.shouldStop = s.ShouldStop
1514 s.scavenger.gomaxprocs = s.GoMaxProcs
1515
1516
1517 stop := make(chan struct{})
1518 s.stop = stop
1519 done := make(chan struct{})
1520 s.done = done
1521 go func() {
1522
1523 s.scavenger.init()
1524 s.scavenger.park()
1525 for {
1526 select {
1527 case <-stop:
1528 close(done)
1529 return
1530 default:
1531 }
1532 released, workTime := s.scavenger.run()
1533 if released == 0 {
1534 s.scavenger.park()
1535 continue
1536 }
1537 s.released.Add(released)
1538 s.scavenger.sleep(workTime)
1539 }
1540 }()
1541 if !s.BlockUntilParked(1e9 ) {
1542 panic("timed out waiting for scavenger to get ready")
1543 }
1544 }
1545
1546
1547
1548
1549
1550
1551
1552 func (s *Scavenger) BlockUntilParked(timeout int64) bool {
1553
1554
1555
1556
1557
1558 start := nanotime()
1559 for nanotime()-start < timeout {
1560 lock(&s.scavenger.lock)
1561 parked := s.scavenger.parked
1562 unlock(&s.scavenger.lock)
1563 if parked {
1564 return true
1565 }
1566 Gosched()
1567 }
1568 return false
1569 }
1570
1571
1572 func (s *Scavenger) Released() uintptr {
1573 return s.released.Load()
1574 }
1575
1576
1577 func (s *Scavenger) Wake() {
1578 s.scavenger.wake()
1579 }
1580
1581
1582
1583 func (s *Scavenger) Stop() {
1584 lock(&s.scavenger.lock)
1585 parked := s.scavenger.parked
1586 unlock(&s.scavenger.lock)
1587 if !parked {
1588 panic("tried to clean up scavenger that is not parked")
1589 }
1590 close(s.stop)
1591 s.Wake()
1592 <-s.done
1593 }
1594
1595 type ScavengeIndex struct {
1596 i scavengeIndex
1597 }
1598
1599 func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
1600 s := new(ScavengeIndex)
1601 s.i.chunks = make([]atomic.Uint8, uintptr(1<<heapAddrBits/pallocChunkBytes/8))
1602 s.i.min.Store(int32(min / 8))
1603 s.i.max.Store(int32(max / 8))
1604 return s
1605 }
1606
1607 func (s *ScavengeIndex) Find() (ChunkIdx, uint) {
1608 ci, off := s.i.find()
1609 return ChunkIdx(ci), off
1610 }
1611
1612 func (s *ScavengeIndex) Mark(base, limit uintptr) {
1613 s.i.mark(base, limit)
1614 }
1615
1616 func (s *ScavengeIndex) Clear(ci ChunkIdx) {
1617 s.i.clear(chunkIdx(ci))
1618 }
1619
View as plain text