Source file
src/runtime/export_test.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/goarch"
11 "internal/goos"
12 "runtime/internal/atomic"
13 "runtime/internal/sys"
14 "unsafe"
15 )
16
17 var Fadd64 = fadd64
18 var Fsub64 = fsub64
19 var Fmul64 = fmul64
20 var Fdiv64 = fdiv64
21 var F64to32 = f64to32
22 var F32to64 = f32to64
23 var Fcmp64 = fcmp64
24 var Fintto64 = fintto64
25 var F64toint = f64toint
26
27 var Entersyscall = entersyscall
28 var Exitsyscall = exitsyscall
29 var LockedOSThread = lockedOSThread
30 var Xadduintptr = atomic.Xadduintptr
31
32 var Fastlog2 = fastlog2
33
34 var Atoi = atoi
35 var Atoi32 = atoi32
36 var ParseByteCount = parseByteCount
37
38 var Nanotime = nanotime
39 var NetpollBreak = netpollBreak
40 var Usleep = usleep
41
42 var PhysPageSize = physPageSize
43 var PhysHugePageSize = physHugePageSize
44
45 var NetpollGenericInit = netpollGenericInit
46
47 var Memmove = memmove
48 var MemclrNoHeapPointers = memclrNoHeapPointers
49
50 var CgoCheckPointer = cgoCheckPointer
51
52 const TracebackInnerFrames = tracebackInnerFrames
53 const TracebackOuterFrames = tracebackOuterFrames
54
55 var LockPartialOrder = lockPartialOrder
56
57 type LockRank lockRank
58
59 func (l LockRank) String() string {
60 return lockRank(l).String()
61 }
62
63 const PreemptMSupported = preemptMSupported
64
65 type LFNode struct {
66 Next uint64
67 Pushcnt uintptr
68 }
69
70 func LFStackPush(head *uint64, node *LFNode) {
71 (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
72 }
73
74 func LFStackPop(head *uint64) *LFNode {
75 return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
76 }
77 func LFNodeValidate(node *LFNode) {
78 lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
79 }
80
81 func Netpoll(delta int64) {
82 systemstack(func() {
83 netpoll(delta)
84 })
85 }
86
87 func GCMask(x any) (ret []byte) {
88 systemstack(func() {
89 ret = getgcmask(x)
90 })
91 return
92 }
93
94 func RunSchedLocalQueueTest() {
95 pp := new(p)
96 gs := make([]g, len(pp.runq))
97 Escape(gs)
98 for i := 0; i < len(pp.runq); i++ {
99 if g, _ := runqget(pp); g != nil {
100 throw("runq is not empty initially")
101 }
102 for j := 0; j < i; j++ {
103 runqput(pp, &gs[i], false)
104 }
105 for j := 0; j < i; j++ {
106 if g, _ := runqget(pp); g != &gs[i] {
107 print("bad element at iter ", i, "/", j, "\n")
108 throw("bad element")
109 }
110 }
111 if g, _ := runqget(pp); g != nil {
112 throw("runq is not empty afterwards")
113 }
114 }
115 }
116
117 func RunSchedLocalQueueStealTest() {
118 p1 := new(p)
119 p2 := new(p)
120 gs := make([]g, len(p1.runq))
121 Escape(gs)
122 for i := 0; i < len(p1.runq); i++ {
123 for j := 0; j < i; j++ {
124 gs[j].sig = 0
125 runqput(p1, &gs[j], false)
126 }
127 gp := runqsteal(p2, p1, true)
128 s := 0
129 if gp != nil {
130 s++
131 gp.sig++
132 }
133 for {
134 gp, _ = runqget(p2)
135 if gp == nil {
136 break
137 }
138 s++
139 gp.sig++
140 }
141 for {
142 gp, _ = runqget(p1)
143 if gp == nil {
144 break
145 }
146 gp.sig++
147 }
148 for j := 0; j < i; j++ {
149 if gs[j].sig != 1 {
150 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
151 throw("bad element")
152 }
153 }
154 if s != i/2 && s != i/2+1 {
155 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
156 throw("bad steal")
157 }
158 }
159 }
160
161 func RunSchedLocalQueueEmptyTest(iters int) {
162
163
164
165
166 done := make(chan bool, 1)
167 p := new(p)
168 gs := make([]g, 2)
169 Escape(gs)
170 ready := new(uint32)
171 for i := 0; i < iters; i++ {
172 *ready = 0
173 next0 := (i & 1) == 0
174 next1 := (i & 2) == 0
175 runqput(p, &gs[0], next0)
176 go func() {
177 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
178 }
179 if runqempty(p) {
180 println("next:", next0, next1)
181 throw("queue is empty")
182 }
183 done <- true
184 }()
185 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
186 }
187 runqput(p, &gs[1], next1)
188 runqget(p)
189 <-done
190 runqget(p)
191 }
192 }
193
194 var (
195 StringHash = stringHash
196 BytesHash = bytesHash
197 Int32Hash = int32Hash
198 Int64Hash = int64Hash
199 MemHash = memhash
200 MemHash32 = memhash32
201 MemHash64 = memhash64
202 EfaceHash = efaceHash
203 IfaceHash = ifaceHash
204 )
205
206 var UseAeshash = &useAeshash
207
208 func MemclrBytes(b []byte) {
209 s := (*slice)(unsafe.Pointer(&b))
210 memclrNoHeapPointers(s.array, uintptr(s.len))
211 }
212
213 const HashLoad = hashLoad
214
215
216 func GostringW(w []uint16) (s string) {
217 systemstack(func() {
218 s = gostringw(&w[0])
219 })
220 return
221 }
222
223 var Open = open
224 var Close = closefd
225 var Read = read
226 var Write = write
227
228 func Envs() []string { return envs }
229 func SetEnvs(e []string) { envs = e }
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245 type blockWrapper[T any] struct {
246 value T
247 _ [_MaxSmallSize]byte
248 }
249
250 func BenchSetType[T any](n int, resetTimer func()) {
251 x := new(blockWrapper[T])
252
253
254
255 Escape(x)
256
257
258 var i any = *new(T)
259 e := *efaceOf(&i)
260 t := e._type
261
262
263 benchSetType(n, resetTimer, 1, unsafe.Pointer(&x.value), t)
264 }
265
266 const maxArrayBlockWrapperLen = 32
267
268
269
270 type arrayBlockWrapper[T any] struct {
271 value [maxArrayBlockWrapperLen]T
272 _ [_MaxSmallSize]byte
273 }
274
275
276
277 type arrayLargeBlockWrapper[T any] struct {
278 value [1024]T
279 _ [_MaxSmallSize]byte
280 }
281
282 func BenchSetTypeSlice[T any](n int, resetTimer func(), len int) {
283
284
285
286
287 var y unsafe.Pointer
288 if len <= maxArrayBlockWrapperLen {
289 x := new(arrayBlockWrapper[T])
290
291
292 Escape(x)
293 y = unsafe.Pointer(&x.value[0])
294 } else {
295 x := new(arrayLargeBlockWrapper[T])
296 Escape(x)
297 y = unsafe.Pointer(&x.value[0])
298 }
299
300
301 var i any = *new(T)
302 e := *efaceOf(&i)
303 t := e._type
304
305
306
307 benchSetType(n, resetTimer, len, y, t)
308 }
309
310
311
312
313
314
315
316
317
318
319
320
321
322 func benchSetType(n int, resetTimer func(), len int, x unsafe.Pointer, t *_type) {
323
324 size := t.Size() * uintptr(len)
325
326
327 s := spanOfHeap(uintptr(x))
328 if s == nil {
329 panic("no heap span for input")
330 }
331 if s.spanclass.sizeclass() != 0 {
332 panic("span is not a large object span")
333 }
334
335
336
337 allocSize := roundupsize(size)
338 if s.npages*pageSize < allocSize {
339 panic("backing span not large enough for benchmark")
340 }
341
342
343
344 resetTimer()
345 systemstack(func() {
346 for i := 0; i < n; i++ {
347 heapBitsSetType(uintptr(x), allocSize, size, t)
348 }
349 })
350
351
352 KeepAlive(x)
353 }
354
355 const PtrSize = goarch.PtrSize
356
357 var ForceGCPeriod = &forcegcperiod
358
359
360
361
362 func SetTracebackEnv(level string) {
363 setTraceback(level)
364 traceback_env = traceback_cache
365 }
366
367 var ReadUnaligned32 = readUnaligned32
368 var ReadUnaligned64 = readUnaligned64
369
370 func CountPagesInUse() (pagesInUse, counted uintptr) {
371 stopTheWorld(stwForTestCountPagesInUse)
372
373 pagesInUse = uintptr(mheap_.pagesInUse.Load())
374
375 for _, s := range mheap_.allspans {
376 if s.state.get() == mSpanInUse {
377 counted += s.npages
378 }
379 }
380
381 startTheWorld()
382
383 return
384 }
385
386 func Fastrand() uint32 { return fastrand() }
387 func Fastrand64() uint64 { return fastrand64() }
388 func Fastrandn(n uint32) uint32 { return fastrandn(n) }
389
390 type ProfBuf profBuf
391
392 func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
393 return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
394 }
395
396 func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
397 (*profBuf)(p).write(tag, now, hdr, stk)
398 }
399
400 const (
401 ProfBufBlocking = profBufBlocking
402 ProfBufNonBlocking = profBufNonBlocking
403 )
404
405 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
406 return (*profBuf)(p).read(profBufReadMode(mode))
407 }
408
409 func (p *ProfBuf) Close() {
410 (*profBuf)(p).close()
411 }
412
413 func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
414 stopTheWorld(stwForTestReadMetricsSlow)
415
416
417
418 metricsLock()
419 initMetrics()
420 metricsUnlock()
421
422 systemstack(func() {
423
424
425
426
427 readmemstats_m(memStats)
428 })
429
430
431
432
433
434 readMetrics(samplesp, len, cap)
435
436 startTheWorld()
437 }
438
439
440
441 func ReadMemStatsSlow() (base, slow MemStats) {
442 stopTheWorld(stwForTestReadMemStatsSlow)
443
444
445 systemstack(func() {
446
447 getg().m.mallocing++
448
449 readmemstats_m(&base)
450
451
452
453 slow = base
454 slow.Alloc = 0
455 slow.TotalAlloc = 0
456 slow.Mallocs = 0
457 slow.Frees = 0
458 slow.HeapReleased = 0
459 var bySize [_NumSizeClasses]struct {
460 Mallocs, Frees uint64
461 }
462
463
464 for _, s := range mheap_.allspans {
465 if s.state.get() != mSpanInUse {
466 continue
467 }
468 if s.isUnusedUserArenaChunk() {
469 continue
470 }
471 if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
472 slow.Mallocs++
473 slow.Alloc += uint64(s.elemsize)
474 } else {
475 slow.Mallocs += uint64(s.allocCount)
476 slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
477 bySize[sizeclass].Mallocs += uint64(s.allocCount)
478 }
479 }
480
481
482 var m heapStatsDelta
483 memstats.heapStats.unsafeRead(&m)
484
485
486 var smallFree uint64
487 for i := 0; i < _NumSizeClasses; i++ {
488 slow.Frees += uint64(m.smallFreeCount[i])
489 bySize[i].Frees += uint64(m.smallFreeCount[i])
490 bySize[i].Mallocs += uint64(m.smallFreeCount[i])
491 smallFree += uint64(m.smallFreeCount[i]) * uint64(class_to_size[i])
492 }
493 slow.Frees += uint64(m.tinyAllocCount) + uint64(m.largeFreeCount)
494 slow.Mallocs += slow.Frees
495
496 slow.TotalAlloc = slow.Alloc + uint64(m.largeFree) + smallFree
497
498 for i := range slow.BySize {
499 slow.BySize[i].Mallocs = bySize[i].Mallocs
500 slow.BySize[i].Frees = bySize[i].Frees
501 }
502
503 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
504 chunk := mheap_.pages.tryChunkOf(i)
505 if chunk == nil {
506 continue
507 }
508 pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
509 slow.HeapReleased += uint64(pg) * pageSize
510 }
511 for _, p := range allp {
512 pg := sys.OnesCount64(p.pcache.scav)
513 slow.HeapReleased += uint64(pg) * pageSize
514 }
515
516 getg().m.mallocing--
517 })
518
519 startTheWorld()
520 return
521 }
522
523
524
525
526 func ShrinkStackAndVerifyFramePointers() {
527 before := stackPoisonCopy
528 defer func() { stackPoisonCopy = before }()
529 stackPoisonCopy = 1
530
531 gp := getg()
532 systemstack(func() {
533 shrinkstack(gp)
534 })
535
536
537 FPCallers(make([]uintptr, 1024))
538 }
539
540
541
542
543 func BlockOnSystemStack() {
544 systemstack(blockOnSystemStackInternal)
545 }
546
547 func blockOnSystemStackInternal() {
548 print("x\n")
549 lock(&deadlock)
550 lock(&deadlock)
551 }
552
553 type RWMutex struct {
554 rw rwmutex
555 }
556
557 func (rw *RWMutex) RLock() {
558 rw.rw.rlock()
559 }
560
561 func (rw *RWMutex) RUnlock() {
562 rw.rw.runlock()
563 }
564
565 func (rw *RWMutex) Lock() {
566 rw.rw.lock()
567 }
568
569 func (rw *RWMutex) Unlock() {
570 rw.rw.unlock()
571 }
572
573 const RuntimeHmapSize = unsafe.Sizeof(hmap{})
574
575 func MapBucketsCount(m map[int]int) int {
576 h := *(**hmap)(unsafe.Pointer(&m))
577 return 1 << h.B
578 }
579
580 func MapBucketsPointerIsNil(m map[int]int) bool {
581 h := *(**hmap)(unsafe.Pointer(&m))
582 return h.buckets == nil
583 }
584
585 func LockOSCounts() (external, internal uint32) {
586 gp := getg()
587 if gp.m.lockedExt+gp.m.lockedInt == 0 {
588 if gp.lockedm != 0 {
589 panic("lockedm on non-locked goroutine")
590 }
591 } else {
592 if gp.lockedm == 0 {
593 panic("nil lockedm on locked goroutine")
594 }
595 }
596 return gp.m.lockedExt, gp.m.lockedInt
597 }
598
599
600 func TracebackSystemstack(stk []uintptr, i int) int {
601 if i == 0 {
602 pc, sp := getcallerpc(), getcallersp()
603 var u unwinder
604 u.initAt(pc, sp, 0, getg(), unwindJumpStack)
605 return tracebackPCs(&u, 0, stk)
606 }
607 n := 0
608 systemstack(func() {
609 n = TracebackSystemstack(stk, i-1)
610 })
611 return n
612 }
613
614 func KeepNArenaHints(n int) {
615 hint := mheap_.arenaHints
616 for i := 1; i < n; i++ {
617 hint = hint.next
618 if hint == nil {
619 return
620 }
621 }
622 hint.next = nil
623 }
624
625
626
627
628
629
630
631 func MapNextArenaHint() (start, end uintptr, ok bool) {
632 hint := mheap_.arenaHints
633 addr := hint.addr
634 if hint.down {
635 start, end = addr-heapArenaBytes, addr
636 addr -= physPageSize
637 } else {
638 start, end = addr, addr+heapArenaBytes
639 }
640 got := sysReserve(unsafe.Pointer(addr), physPageSize)
641 ok = (addr == uintptr(got))
642 if !ok {
643
644
645 sysFreeOS(got, physPageSize)
646 }
647 return
648 }
649
650 func GetNextArenaHint() uintptr {
651 return mheap_.arenaHints.addr
652 }
653
654 type G = g
655
656 type Sudog = sudog
657
658 func Getg() *G {
659 return getg()
660 }
661
662 func Goid() uint64 {
663 return getg().goid
664 }
665
666 func GIsWaitingOnMutex(gp *G) bool {
667 return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
668 }
669
670 var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
671
672
673 func PanicForTesting(b []byte, i int) byte {
674 return unexportedPanicForTesting(b, i)
675 }
676
677
678 func unexportedPanicForTesting(b []byte, i int) byte {
679 return b[i]
680 }
681
682 func G0StackOverflow() {
683 systemstack(func() {
684 stackOverflow(nil)
685 })
686 }
687
688 func stackOverflow(x *byte) {
689 var buf [256]byte
690 stackOverflow(&buf[0])
691 }
692
693 func MapTombstoneCheck(m map[int]int) {
694
695
696
697 h := *(**hmap)(unsafe.Pointer(&m))
698 i := any(m)
699 t := *(**maptype)(unsafe.Pointer(&i))
700
701 for x := 0; x < 1<<h.B; x++ {
702 b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
703 n := 0
704 for b := b0; b != nil; b = b.overflow(t) {
705 for i := 0; i < bucketCnt; i++ {
706 if b.tophash[i] != emptyRest {
707 n++
708 }
709 }
710 }
711 k := 0
712 for b := b0; b != nil; b = b.overflow(t) {
713 for i := 0; i < bucketCnt; i++ {
714 if k < n && b.tophash[i] == emptyRest {
715 panic("early emptyRest")
716 }
717 if k >= n && b.tophash[i] != emptyRest {
718 panic("late non-emptyRest")
719 }
720 if k == n-1 && b.tophash[i] == emptyOne {
721 panic("last non-emptyRest entry is emptyOne")
722 }
723 k++
724 }
725 }
726 }
727 }
728
729 func RunGetgThreadSwitchTest() {
730
731
732
733
734
735
736 ch := make(chan int)
737 go func(ch chan int) {
738 ch <- 5
739 LockOSThread()
740 }(ch)
741
742 g1 := getg()
743
744
745
746
747
748 <-ch
749
750 g2 := getg()
751 if g1 != g2 {
752 panic("g1 != g2")
753 }
754
755
756
757 g3 := getg()
758 if g1 != g3 {
759 panic("g1 != g3")
760 }
761 }
762
763 const (
764 PageSize = pageSize
765 PallocChunkPages = pallocChunkPages
766 PageAlloc64Bit = pageAlloc64Bit
767 PallocSumBytes = pallocSumBytes
768 )
769
770
771 type PallocSum pallocSum
772
773 func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
774 func (m PallocSum) Start() uint { return pallocSum(m).start() }
775 func (m PallocSum) Max() uint { return pallocSum(m).max() }
776 func (m PallocSum) End() uint { return pallocSum(m).end() }
777
778
779 type PallocBits pallocBits
780
781 func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
782 return (*pallocBits)(b).find(npages, searchIdx)
783 }
784 func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
785 func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
786 func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
787 func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
788
789
790
791 func SummarizeSlow(b *PallocBits) PallocSum {
792 var start, max, end uint
793
794 const N = uint(len(b)) * 64
795 for start < N && (*pageBits)(b).get(start) == 0 {
796 start++
797 }
798 for end < N && (*pageBits)(b).get(N-end-1) == 0 {
799 end++
800 }
801 run := uint(0)
802 for i := uint(0); i < N; i++ {
803 if (*pageBits)(b).get(i) == 0 {
804 run++
805 } else {
806 run = 0
807 }
808 if run > max {
809 max = run
810 }
811 }
812 return PackPallocSum(start, max, end)
813 }
814
815
816 func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
817
818
819
820 func DiffPallocBits(a, b *PallocBits) []BitRange {
821 ba := (*pageBits)(a)
822 bb := (*pageBits)(b)
823
824 var d []BitRange
825 base, size := uint(0), uint(0)
826 for i := uint(0); i < uint(len(ba))*64; i++ {
827 if ba.get(i) != bb.get(i) {
828 if size == 0 {
829 base = i
830 }
831 size++
832 } else {
833 if size != 0 {
834 d = append(d, BitRange{base, size})
835 }
836 size = 0
837 }
838 }
839 if size != 0 {
840 d = append(d, BitRange{base, size})
841 }
842 return d
843 }
844
845
846
847
848 func StringifyPallocBits(b *PallocBits, r BitRange) string {
849 str := ""
850 for j := r.I; j < r.I+r.N; j++ {
851 if (*pageBits)(b).get(j) != 0 {
852 str += "1"
853 } else {
854 str += "0"
855 }
856 }
857 return str
858 }
859
860
861 type PallocData pallocData
862
863 func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
864 return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
865 }
866 func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
867 func (d *PallocData) ScavengedSetRange(i, n uint) {
868 (*pallocData)(d).scavenged.setRange(i, n)
869 }
870 func (d *PallocData) PallocBits() *PallocBits {
871 return (*PallocBits)(&(*pallocData)(d).pallocBits)
872 }
873 func (d *PallocData) Scavenged() *PallocBits {
874 return (*PallocBits)(&(*pallocData)(d).scavenged)
875 }
876
877
878 func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
879
880
881 type PageCache pageCache
882
883 const PageCachePages = pageCachePages
884
885 func NewPageCache(base uintptr, cache, scav uint64) PageCache {
886 return PageCache(pageCache{base: base, cache: cache, scav: scav})
887 }
888 func (c *PageCache) Empty() bool { return (*pageCache)(c).empty() }
889 func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
890 func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
891 func (c *PageCache) Scav() uint64 { return (*pageCache)(c).scav }
892 func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
893 return (*pageCache)(c).alloc(npages)
894 }
895 func (c *PageCache) Flush(s *PageAlloc) {
896 cp := (*pageCache)(c)
897 sp := (*pageAlloc)(s)
898
899 systemstack(func() {
900
901
902 lock(sp.mheapLock)
903 cp.flush(sp)
904 unlock(sp.mheapLock)
905 })
906 }
907
908
909 type ChunkIdx chunkIdx
910
911
912
913 type PageAlloc pageAlloc
914
915 func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
916 pp := (*pageAlloc)(p)
917
918 var addr, scav uintptr
919 systemstack(func() {
920
921
922 lock(pp.mheapLock)
923 addr, scav = pp.alloc(npages)
924 unlock(pp.mheapLock)
925 })
926 return addr, scav
927 }
928 func (p *PageAlloc) AllocToCache() PageCache {
929 pp := (*pageAlloc)(p)
930
931 var c PageCache
932 systemstack(func() {
933
934
935 lock(pp.mheapLock)
936 c = PageCache(pp.allocToCache())
937 unlock(pp.mheapLock)
938 })
939 return c
940 }
941 func (p *PageAlloc) Free(base, npages uintptr) {
942 pp := (*pageAlloc)(p)
943
944 systemstack(func() {
945
946
947 lock(pp.mheapLock)
948 pp.free(base, npages)
949 unlock(pp.mheapLock)
950 })
951 }
952 func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
953 return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
954 }
955 func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
956 pp := (*pageAlloc)(p)
957 systemstack(func() {
958 r = pp.scavenge(nbytes, nil, true)
959 })
960 return
961 }
962 func (p *PageAlloc) InUse() []AddrRange {
963 ranges := make([]AddrRange, 0, len(p.inUse.ranges))
964 for _, r := range p.inUse.ranges {
965 ranges = append(ranges, AddrRange{r})
966 }
967 return ranges
968 }
969
970
971 func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
972 ci := chunkIdx(i)
973 return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
974 }
975
976
977 type AddrRange struct {
978 addrRange
979 }
980
981
982 func MakeAddrRange(base, limit uintptr) AddrRange {
983 return AddrRange{makeAddrRange(base, limit)}
984 }
985
986
987 func (a AddrRange) Base() uintptr {
988 return a.addrRange.base.addr()
989 }
990
991
992 func (a AddrRange) Limit() uintptr {
993 return a.addrRange.limit.addr()
994 }
995
996
997 func (a AddrRange) Equals(b AddrRange) bool {
998 return a == b
999 }
1000
1001
1002 func (a AddrRange) Size() uintptr {
1003 return a.addrRange.size()
1004 }
1005
1006
1007
1008
1009
1010 var testSysStat = &memstats.other_sys
1011
1012
1013 type AddrRanges struct {
1014 addrRanges
1015 mutable bool
1016 }
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027 func NewAddrRanges() AddrRanges {
1028 r := addrRanges{}
1029 r.init(testSysStat)
1030 return AddrRanges{r, true}
1031 }
1032
1033
1034
1035
1036
1037
1038 func MakeAddrRanges(a ...AddrRange) AddrRanges {
1039
1040
1041
1042
1043
1044 ranges := make([]addrRange, 0, len(a))
1045 total := uintptr(0)
1046 for _, r := range a {
1047 ranges = append(ranges, r.addrRange)
1048 total += r.Size()
1049 }
1050 return AddrRanges{addrRanges{
1051 ranges: ranges,
1052 totalBytes: total,
1053 sysStat: testSysStat,
1054 }, false}
1055 }
1056
1057
1058
1059 func (a *AddrRanges) Ranges() []AddrRange {
1060 result := make([]AddrRange, 0, len(a.addrRanges.ranges))
1061 for _, r := range a.addrRanges.ranges {
1062 result = append(result, AddrRange{r})
1063 }
1064 return result
1065 }
1066
1067
1068
1069 func (a *AddrRanges) FindSucc(base uintptr) int {
1070 return a.findSucc(base)
1071 }
1072
1073
1074
1075
1076
1077 func (a *AddrRanges) Add(r AddrRange) {
1078 if !a.mutable {
1079 throw("attempt to mutate immutable AddrRanges")
1080 }
1081 a.add(r.addrRange)
1082 }
1083
1084
1085 func (a *AddrRanges) TotalBytes() uintptr {
1086 return a.addrRanges.totalBytes
1087 }
1088
1089
1090 type BitRange struct {
1091 I, N uint
1092 }
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108 func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
1109 p := new(pageAlloc)
1110
1111
1112 p.init(new(mutex), testSysStat, true)
1113 lockInit(p.mheapLock, lockRankMheap)
1114 for i, init := range chunks {
1115 addr := chunkBase(chunkIdx(i))
1116
1117
1118 systemstack(func() {
1119 lock(p.mheapLock)
1120 p.grow(addr, pallocChunkBytes)
1121 unlock(p.mheapLock)
1122 })
1123
1124
1125 ci := chunkIndex(addr)
1126 chunk := p.chunkOf(ci)
1127
1128
1129 chunk.scavenged.clearRange(0, pallocChunkPages)
1130
1131
1132
1133
1134 p.scav.index.alloc(ci, pallocChunkPages)
1135 p.scav.index.free(ci, 0, pallocChunkPages)
1136
1137
1138 if scav != nil {
1139 if scvg, ok := scav[i]; ok {
1140 for _, s := range scvg {
1141
1142
1143 if s.N != 0 {
1144 chunk.scavenged.setRange(s.I, s.N)
1145 }
1146 }
1147 }
1148 }
1149
1150
1151 for _, s := range init {
1152
1153
1154 if s.N != 0 {
1155 chunk.allocRange(s.I, s.N)
1156
1157
1158 p.scav.index.alloc(ci, s.N)
1159 }
1160 }
1161
1162
1163 systemstack(func() {
1164 lock(p.mheapLock)
1165 p.update(addr, pallocChunkPages, false, false)
1166 unlock(p.mheapLock)
1167 })
1168 }
1169
1170 return (*PageAlloc)(p)
1171 }
1172
1173
1174
1175
1176 func FreePageAlloc(pp *PageAlloc) {
1177 p := (*pageAlloc)(pp)
1178
1179
1180 if pageAlloc64Bit != 0 {
1181 for l := 0; l < summaryLevels; l++ {
1182 sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
1183 }
1184 } else {
1185 resSize := uintptr(0)
1186 for _, s := range p.summary {
1187 resSize += uintptr(cap(s)) * pallocSumBytes
1188 }
1189 sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
1190 }
1191
1192
1193 sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
1194
1195
1196
1197
1198
1199 gcController.mappedReady.Add(-int64(p.summaryMappedReady))
1200 testSysStat.add(-int64(p.summaryMappedReady))
1201
1202
1203 for i := range p.chunks {
1204 if x := p.chunks[i]; x != nil {
1205 p.chunks[i] = nil
1206
1207 sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
1208 }
1209 }
1210 }
1211
1212
1213
1214
1215
1216
1217
1218 var BaseChunkIdx = func() ChunkIdx {
1219 var prefix uintptr
1220 if pageAlloc64Bit != 0 {
1221 prefix = 0xc000
1222 } else {
1223 prefix = 0x100
1224 }
1225 baseAddr := prefix * pallocChunkBytes
1226 if goos.IsAix != 0 {
1227 baseAddr += arenaBaseOffset
1228 }
1229 return ChunkIdx(chunkIndex(baseAddr))
1230 }()
1231
1232
1233
1234 func PageBase(c ChunkIdx, pageIdx uint) uintptr {
1235 return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
1236 }
1237
1238 type BitsMismatch struct {
1239 Base uintptr
1240 Got, Want uint64
1241 }
1242
1243 func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
1244 ok = true
1245
1246
1247 systemstack(func() {
1248 getg().m.mallocing++
1249
1250
1251 lock(&mheap_.lock)
1252 chunkLoop:
1253 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
1254 chunk := mheap_.pages.tryChunkOf(i)
1255 if chunk == nil {
1256 continue
1257 }
1258 for j := 0; j < pallocChunkPages/64; j++ {
1259
1260
1261
1262
1263
1264 want := chunk.scavenged[j] &^ chunk.pallocBits[j]
1265 got := chunk.scavenged[j]
1266 if want != got {
1267 ok = false
1268 if n >= len(mismatches) {
1269 break chunkLoop
1270 }
1271 mismatches[n] = BitsMismatch{
1272 Base: chunkBase(i) + uintptr(j)*64*pageSize,
1273 Got: got,
1274 Want: want,
1275 }
1276 n++
1277 }
1278 }
1279 }
1280 unlock(&mheap_.lock)
1281
1282 getg().m.mallocing--
1283 })
1284 return
1285 }
1286
1287 func PageCachePagesLeaked() (leaked uintptr) {
1288 stopTheWorld(stwForTestPageCachePagesLeaked)
1289
1290
1291 deadp := allp[len(allp):cap(allp)]
1292 for _, p := range deadp {
1293
1294
1295 if p != nil {
1296 leaked += uintptr(sys.OnesCount64(p.pcache.cache))
1297 }
1298 }
1299
1300 startTheWorld()
1301 return
1302 }
1303
1304 var Semacquire = semacquire
1305 var Semrelease1 = semrelease1
1306
1307 func SemNwait(addr *uint32) uint32 {
1308 root := semtable.rootFor(addr)
1309 return root.nwait.Load()
1310 }
1311
1312 const SemTableSize = semTabSize
1313
1314
1315 type SemTable struct {
1316 semTable
1317 }
1318
1319
1320 func (t *SemTable) Enqueue(addr *uint32) {
1321 s := acquireSudog()
1322 s.releasetime = 0
1323 s.acquiretime = 0
1324 s.ticket = 0
1325 t.semTable.rootFor(addr).queue(addr, s, false)
1326 }
1327
1328
1329
1330
1331 func (t *SemTable) Dequeue(addr *uint32) bool {
1332 s, _ := t.semTable.rootFor(addr).dequeue(addr)
1333 if s != nil {
1334 releaseSudog(s)
1335 return true
1336 }
1337 return false
1338 }
1339
1340
1341 type MSpan mspan
1342
1343
1344 func AllocMSpan() *MSpan {
1345 var s *mspan
1346 systemstack(func() {
1347 lock(&mheap_.lock)
1348 s = (*mspan)(mheap_.spanalloc.alloc())
1349 unlock(&mheap_.lock)
1350 })
1351 return (*MSpan)(s)
1352 }
1353
1354
1355 func FreeMSpan(s *MSpan) {
1356 systemstack(func() {
1357 lock(&mheap_.lock)
1358 mheap_.spanalloc.free(unsafe.Pointer(s))
1359 unlock(&mheap_.lock)
1360 })
1361 }
1362
1363 func MSpanCountAlloc(ms *MSpan, bits []byte) int {
1364 s := (*mspan)(ms)
1365 s.nelems = uintptr(len(bits) * 8)
1366 s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
1367 result := s.countAlloc()
1368 s.gcmarkBits = nil
1369 return result
1370 }
1371
1372 const (
1373 TimeHistSubBucketBits = timeHistSubBucketBits
1374 TimeHistNumSubBuckets = timeHistNumSubBuckets
1375 TimeHistNumBuckets = timeHistNumBuckets
1376 TimeHistMinBucketBits = timeHistMinBucketBits
1377 TimeHistMaxBucketBits = timeHistMaxBucketBits
1378 )
1379
1380 type TimeHistogram timeHistogram
1381
1382
1383
1384
1385
1386 func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
1387 t := (*timeHistogram)(th)
1388 if bucket < 0 {
1389 return t.underflow.Load(), false
1390 }
1391 i := bucket*TimeHistNumSubBuckets + subBucket
1392 if i >= len(t.counts) {
1393 return t.overflow.Load(), false
1394 }
1395 return t.counts[i].Load(), true
1396 }
1397
1398 func (th *TimeHistogram) Record(duration int64) {
1399 (*timeHistogram)(th).record(duration)
1400 }
1401
1402 var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
1403
1404 func SetIntArgRegs(a int) int {
1405 lock(&finlock)
1406 old := intArgRegs
1407 if a >= 0 {
1408 intArgRegs = a
1409 }
1410 unlock(&finlock)
1411 return old
1412 }
1413
1414 func FinalizerGAsleep() bool {
1415 return fingStatus.Load()&fingWait != 0
1416 }
1417
1418
1419
1420
1421 var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
1422
1423
1424
1425 func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
1426 return gcTestIsReachable(ptrs...)
1427 }
1428
1429
1430
1431
1432
1433
1434
1435 func GCTestPointerClass(p unsafe.Pointer) string {
1436 return gcTestPointerClass(p)
1437 }
1438
1439 const Raceenabled = raceenabled
1440
1441 const (
1442 GCBackgroundUtilization = gcBackgroundUtilization
1443 GCGoalUtilization = gcGoalUtilization
1444 DefaultHeapMinimum = defaultHeapMinimum
1445 MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent
1446 MemoryLimitMinHeapGoalHeadroom = memoryLimitMinHeapGoalHeadroom
1447 )
1448
1449 type GCController struct {
1450 gcControllerState
1451 }
1452
1453 func NewGCController(gcPercent int, memoryLimit int64) *GCController {
1454
1455
1456
1457
1458 g := Escape(new(GCController))
1459 g.gcControllerState.test = true
1460 g.init(int32(gcPercent), memoryLimit)
1461 return g
1462 }
1463
1464 func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
1465 trigger, _ := c.trigger()
1466 if c.heapMarked > trigger {
1467 trigger = c.heapMarked
1468 }
1469 c.maxStackScan.Store(stackSize)
1470 c.globalsScan.Store(globalsSize)
1471 c.heapLive.Store(trigger)
1472 c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
1473 c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
1474 }
1475
1476 func (c *GCController) AssistWorkPerByte() float64 {
1477 return c.assistWorkPerByte.Load()
1478 }
1479
1480 func (c *GCController) HeapGoal() uint64 {
1481 return c.heapGoal()
1482 }
1483
1484 func (c *GCController) HeapLive() uint64 {
1485 return c.heapLive.Load()
1486 }
1487
1488 func (c *GCController) HeapMarked() uint64 {
1489 return c.heapMarked
1490 }
1491
1492 func (c *GCController) Triggered() uint64 {
1493 return c.triggered
1494 }
1495
1496 type GCControllerReviseDelta struct {
1497 HeapLive int64
1498 HeapScan int64
1499 HeapScanWork int64
1500 StackScanWork int64
1501 GlobalsScanWork int64
1502 }
1503
1504 func (c *GCController) Revise(d GCControllerReviseDelta) {
1505 c.heapLive.Add(d.HeapLive)
1506 c.heapScan.Add(d.HeapScan)
1507 c.heapScanWork.Add(d.HeapScanWork)
1508 c.stackScanWork.Add(d.StackScanWork)
1509 c.globalsScanWork.Add(d.GlobalsScanWork)
1510 c.revise()
1511 }
1512
1513 func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
1514 c.assistTime.Store(assistTime)
1515 c.endCycle(elapsed, gomaxprocs, false)
1516 c.resetLive(bytesMarked)
1517 c.commit(false)
1518 }
1519
1520 func (c *GCController) AddIdleMarkWorker() bool {
1521 return c.addIdleMarkWorker()
1522 }
1523
1524 func (c *GCController) NeedIdleMarkWorker() bool {
1525 return c.needIdleMarkWorker()
1526 }
1527
1528 func (c *GCController) RemoveIdleMarkWorker() {
1529 c.removeIdleMarkWorker()
1530 }
1531
1532 func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
1533 c.setMaxIdleMarkWorkers(max)
1534 }
1535
1536 var alwaysFalse bool
1537 var escapeSink any
1538
1539 func Escape[T any](x T) T {
1540 if alwaysFalse {
1541 escapeSink = x
1542 }
1543 return x
1544 }
1545
1546
1547 func Acquirem() {
1548 acquirem()
1549 }
1550
1551 func Releasem() {
1552 releasem(getg().m)
1553 }
1554
1555 var Timediv = timediv
1556
1557 type PIController struct {
1558 piController
1559 }
1560
1561 func NewPIController(kp, ti, tt, min, max float64) *PIController {
1562 return &PIController{piController{
1563 kp: kp,
1564 ti: ti,
1565 tt: tt,
1566 min: min,
1567 max: max,
1568 }}
1569 }
1570
1571 func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
1572 return c.piController.next(input, setpoint, period)
1573 }
1574
1575 const (
1576 CapacityPerProc = capacityPerProc
1577 GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
1578 )
1579
1580 type GCCPULimiter struct {
1581 limiter gcCPULimiterState
1582 }
1583
1584 func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
1585
1586
1587
1588
1589 l := Escape(new(GCCPULimiter))
1590 l.limiter.test = true
1591 l.limiter.resetCapacity(now, gomaxprocs)
1592 return l
1593 }
1594
1595 func (l *GCCPULimiter) Fill() uint64 {
1596 return l.limiter.bucket.fill
1597 }
1598
1599 func (l *GCCPULimiter) Capacity() uint64 {
1600 return l.limiter.bucket.capacity
1601 }
1602
1603 func (l *GCCPULimiter) Overflow() uint64 {
1604 return l.limiter.overflow
1605 }
1606
1607 func (l *GCCPULimiter) Limiting() bool {
1608 return l.limiter.limiting()
1609 }
1610
1611 func (l *GCCPULimiter) NeedUpdate(now int64) bool {
1612 return l.limiter.needUpdate(now)
1613 }
1614
1615 func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
1616 l.limiter.startGCTransition(enableGC, now)
1617 }
1618
1619 func (l *GCCPULimiter) FinishGCTransition(now int64) {
1620 l.limiter.finishGCTransition(now)
1621 }
1622
1623 func (l *GCCPULimiter) Update(now int64) {
1624 l.limiter.update(now)
1625 }
1626
1627 func (l *GCCPULimiter) AddAssistTime(t int64) {
1628 l.limiter.addAssistTime(t)
1629 }
1630
1631 func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
1632 l.limiter.resetCapacity(now, nprocs)
1633 }
1634
1635 const ScavengePercent = scavengePercent
1636
1637 type Scavenger struct {
1638 Sleep func(int64) int64
1639 Scavenge func(uintptr) (uintptr, int64)
1640 ShouldStop func() bool
1641 GoMaxProcs func() int32
1642
1643 released atomic.Uintptr
1644 scavenger scavengerState
1645 stop chan<- struct{}
1646 done <-chan struct{}
1647 }
1648
1649 func (s *Scavenger) Start() {
1650 if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
1651 panic("must populate all stubs")
1652 }
1653
1654
1655 s.scavenger.sleepStub = s.Sleep
1656 s.scavenger.scavenge = s.Scavenge
1657 s.scavenger.shouldStop = s.ShouldStop
1658 s.scavenger.gomaxprocs = s.GoMaxProcs
1659
1660
1661 stop := make(chan struct{})
1662 s.stop = stop
1663 done := make(chan struct{})
1664 s.done = done
1665 go func() {
1666
1667 s.scavenger.init()
1668 s.scavenger.park()
1669 for {
1670 select {
1671 case <-stop:
1672 close(done)
1673 return
1674 default:
1675 }
1676 released, workTime := s.scavenger.run()
1677 if released == 0 {
1678 s.scavenger.park()
1679 continue
1680 }
1681 s.released.Add(released)
1682 s.scavenger.sleep(workTime)
1683 }
1684 }()
1685 if !s.BlockUntilParked(1e9 ) {
1686 panic("timed out waiting for scavenger to get ready")
1687 }
1688 }
1689
1690
1691
1692
1693
1694
1695
1696 func (s *Scavenger) BlockUntilParked(timeout int64) bool {
1697
1698
1699
1700
1701
1702 start := nanotime()
1703 for nanotime()-start < timeout {
1704 lock(&s.scavenger.lock)
1705 parked := s.scavenger.parked
1706 unlock(&s.scavenger.lock)
1707 if parked {
1708 return true
1709 }
1710 Gosched()
1711 }
1712 return false
1713 }
1714
1715
1716 func (s *Scavenger) Released() uintptr {
1717 return s.released.Load()
1718 }
1719
1720
1721 func (s *Scavenger) Wake() {
1722 s.scavenger.wake()
1723 }
1724
1725
1726
1727 func (s *Scavenger) Stop() {
1728 lock(&s.scavenger.lock)
1729 parked := s.scavenger.parked
1730 unlock(&s.scavenger.lock)
1731 if !parked {
1732 panic("tried to clean up scavenger that is not parked")
1733 }
1734 close(s.stop)
1735 s.Wake()
1736 <-s.done
1737 }
1738
1739 type ScavengeIndex struct {
1740 i scavengeIndex
1741 }
1742
1743 func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
1744 s := new(ScavengeIndex)
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756 s.i.chunks = make([]atomicScavChunkData, max)
1757 s.i.min.Store(uintptr(min))
1758 s.i.max.Store(uintptr(max))
1759 s.i.minHeapIdx.Store(uintptr(min))
1760 s.i.test = true
1761 return s
1762 }
1763
1764 func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
1765 ci, off := s.i.find(force)
1766 return ChunkIdx(ci), off
1767 }
1768
1769 func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
1770 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1771 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1772
1773 if sc == ec {
1774
1775 s.i.alloc(sc, ei+1-si)
1776 } else {
1777
1778 s.i.alloc(sc, pallocChunkPages-si)
1779 for c := sc + 1; c < ec; c++ {
1780 s.i.alloc(c, pallocChunkPages)
1781 }
1782 s.i.alloc(ec, ei+1)
1783 }
1784 }
1785
1786 func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
1787 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1788 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1789
1790 if sc == ec {
1791
1792 s.i.free(sc, si, ei+1-si)
1793 } else {
1794
1795 s.i.free(sc, si, pallocChunkPages-si)
1796 for c := sc + 1; c < ec; c++ {
1797 s.i.free(c, 0, pallocChunkPages)
1798 }
1799 s.i.free(ec, 0, ei+1)
1800 }
1801 }
1802
1803 func (s *ScavengeIndex) ResetSearchAddrs() {
1804 for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
1805 addr, marked := a.Load()
1806 if marked {
1807 a.StoreUnmark(addr, addr)
1808 }
1809 a.Clear()
1810 }
1811 s.i.freeHWM = minOffAddr
1812 }
1813
1814 func (s *ScavengeIndex) NextGen() {
1815 s.i.nextGen()
1816 }
1817
1818 func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
1819 s.i.setEmpty(chunkIdx(ci))
1820 }
1821
1822 func (s *ScavengeIndex) SetNoHugePage(ci ChunkIdx) {
1823 s.i.setNoHugePage(chunkIdx(ci))
1824 }
1825
1826 func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
1827 sc0 := scavChunkData{
1828 gen: gen,
1829 inUse: inUse,
1830 lastInUse: lastInUse,
1831 scavChunkFlags: scavChunkFlags(flags),
1832 }
1833 scp := sc0.pack()
1834 sc1 := unpackScavChunkData(scp)
1835 return sc0 == sc1
1836 }
1837
1838 const GTrackingPeriod = gTrackingPeriod
1839
1840 var ZeroBase = unsafe.Pointer(&zerobase)
1841
1842 const UserArenaChunkBytes = userArenaChunkBytes
1843
1844 type UserArena struct {
1845 arena *userArena
1846 }
1847
1848 func NewUserArena() *UserArena {
1849 return &UserArena{newUserArena()}
1850 }
1851
1852 func (a *UserArena) New(out *any) {
1853 i := efaceOf(out)
1854 typ := i._type
1855 if typ.Kind_&kindMask != kindPtr {
1856 panic("new result of non-ptr type")
1857 }
1858 typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
1859 i.data = a.arena.new(typ)
1860 }
1861
1862 func (a *UserArena) Slice(sl any, cap int) {
1863 a.arena.slice(sl, cap)
1864 }
1865
1866 func (a *UserArena) Free() {
1867 a.arena.free()
1868 }
1869
1870 func GlobalWaitingArenaChunks() int {
1871 n := 0
1872 systemstack(func() {
1873 lock(&mheap_.lock)
1874 for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
1875 n++
1876 }
1877 unlock(&mheap_.lock)
1878 })
1879 return n
1880 }
1881
1882 func UserArenaClone[T any](s T) T {
1883 return arena_heapify(s).(T)
1884 }
1885
1886 var AlignUp = alignUp
1887
1888
1889
1890
1891 func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
1892 start := nanotime()
1893 for nanotime()-start < timeout {
1894 lock(&finlock)
1895
1896
1897 empty := finq == nil
1898 empty = empty && readgstatus(fing) == _Gwaiting && fing.waitreason == waitReasonFinalizerWait
1899 unlock(&finlock)
1900 if empty {
1901 return true
1902 }
1903 Gosched()
1904 }
1905 return false
1906 }
1907
1908 func FrameStartLine(f *Frame) int {
1909 return f.startLine
1910 }
1911
1912
1913
1914 func PersistentAlloc(n uintptr) unsafe.Pointer {
1915 return persistentalloc(n, 0, &memstats.other_sys)
1916 }
1917
1918
1919
1920 func FPCallers(pcBuf []uintptr) int {
1921 return fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf)
1922 }
1923
1924 const FramePointerEnabled = framepointer_enabled
1925
1926 var (
1927 IsPinned = isPinned
1928 GetPinCounter = pinnerGetPinCounter
1929 )
1930
1931 func SetPinnerLeakPanic(f func()) {
1932 pinnerLeakPanic = f
1933 }
1934 func GetPinnerLeakPanic() func() {
1935 return pinnerLeakPanic
1936 }
1937
View as plain text