Source file src/runtime/export_test.go

     1  // Copyright 2010 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Export guts for testing.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/goarch"
    11  	"internal/goos"
    12  	"runtime/internal/atomic"
    13  	"runtime/internal/sys"
    14  	"unsafe"
    15  )
    16  
    17  var Fadd64 = fadd64
    18  var Fsub64 = fsub64
    19  var Fmul64 = fmul64
    20  var Fdiv64 = fdiv64
    21  var F64to32 = f64to32
    22  var F32to64 = f32to64
    23  var Fcmp64 = fcmp64
    24  var Fintto64 = fintto64
    25  var F64toint = f64toint
    26  
    27  var Entersyscall = entersyscall
    28  var Exitsyscall = exitsyscall
    29  var LockedOSThread = lockedOSThread
    30  var Xadduintptr = atomic.Xadduintptr
    31  
    32  var Fastlog2 = fastlog2
    33  
    34  var Atoi = atoi
    35  var Atoi32 = atoi32
    36  var ParseByteCount = parseByteCount
    37  
    38  var Nanotime = nanotime
    39  var NetpollBreak = netpollBreak
    40  var Usleep = usleep
    41  
    42  var PhysPageSize = physPageSize
    43  var PhysHugePageSize = physHugePageSize
    44  
    45  var NetpollGenericInit = netpollGenericInit
    46  
    47  var Memmove = memmove
    48  var MemclrNoHeapPointers = memclrNoHeapPointers
    49  
    50  var CgoCheckPointer = cgoCheckPointer
    51  
    52  const TracebackInnerFrames = tracebackInnerFrames
    53  const TracebackOuterFrames = tracebackOuterFrames
    54  
    55  var LockPartialOrder = lockPartialOrder
    56  
    57  type LockRank lockRank
    58  
    59  func (l LockRank) String() string {
    60  	return lockRank(l).String()
    61  }
    62  
    63  const PreemptMSupported = preemptMSupported
    64  
    65  type LFNode struct {
    66  	Next    uint64
    67  	Pushcnt uintptr
    68  }
    69  
    70  func LFStackPush(head *uint64, node *LFNode) {
    71  	(*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
    72  }
    73  
    74  func LFStackPop(head *uint64) *LFNode {
    75  	return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
    76  }
    77  func LFNodeValidate(node *LFNode) {
    78  	lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
    79  }
    80  
    81  func Netpoll(delta int64) {
    82  	systemstack(func() {
    83  		netpoll(delta)
    84  	})
    85  }
    86  
    87  func GCMask(x any) (ret []byte) {
    88  	systemstack(func() {
    89  		ret = getgcmask(x)
    90  	})
    91  	return
    92  }
    93  
    94  func RunSchedLocalQueueTest() {
    95  	pp := new(p)
    96  	gs := make([]g, len(pp.runq))
    97  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
    98  	for i := 0; i < len(pp.runq); i++ {
    99  		if g, _ := runqget(pp); g != nil {
   100  			throw("runq is not empty initially")
   101  		}
   102  		for j := 0; j < i; j++ {
   103  			runqput(pp, &gs[i], false)
   104  		}
   105  		for j := 0; j < i; j++ {
   106  			if g, _ := runqget(pp); g != &gs[i] {
   107  				print("bad element at iter ", i, "/", j, "\n")
   108  				throw("bad element")
   109  			}
   110  		}
   111  		if g, _ := runqget(pp); g != nil {
   112  			throw("runq is not empty afterwards")
   113  		}
   114  	}
   115  }
   116  
   117  func RunSchedLocalQueueStealTest() {
   118  	p1 := new(p)
   119  	p2 := new(p)
   120  	gs := make([]g, len(p1.runq))
   121  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
   122  	for i := 0; i < len(p1.runq); i++ {
   123  		for j := 0; j < i; j++ {
   124  			gs[j].sig = 0
   125  			runqput(p1, &gs[j], false)
   126  		}
   127  		gp := runqsteal(p2, p1, true)
   128  		s := 0
   129  		if gp != nil {
   130  			s++
   131  			gp.sig++
   132  		}
   133  		for {
   134  			gp, _ = runqget(p2)
   135  			if gp == nil {
   136  				break
   137  			}
   138  			s++
   139  			gp.sig++
   140  		}
   141  		for {
   142  			gp, _ = runqget(p1)
   143  			if gp == nil {
   144  				break
   145  			}
   146  			gp.sig++
   147  		}
   148  		for j := 0; j < i; j++ {
   149  			if gs[j].sig != 1 {
   150  				print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
   151  				throw("bad element")
   152  			}
   153  		}
   154  		if s != i/2 && s != i/2+1 {
   155  			print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
   156  			throw("bad steal")
   157  		}
   158  	}
   159  }
   160  
   161  func RunSchedLocalQueueEmptyTest(iters int) {
   162  	// Test that runq is not spuriously reported as empty.
   163  	// Runq emptiness affects scheduling decisions and spurious emptiness
   164  	// can lead to underutilization (both runnable Gs and idle Ps coexist
   165  	// for arbitrary long time).
   166  	done := make(chan bool, 1)
   167  	p := new(p)
   168  	gs := make([]g, 2)
   169  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
   170  	ready := new(uint32)
   171  	for i := 0; i < iters; i++ {
   172  		*ready = 0
   173  		next0 := (i & 1) == 0
   174  		next1 := (i & 2) == 0
   175  		runqput(p, &gs[0], next0)
   176  		go func() {
   177  			for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   178  			}
   179  			if runqempty(p) {
   180  				println("next:", next0, next1)
   181  				throw("queue is empty")
   182  			}
   183  			done <- true
   184  		}()
   185  		for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   186  		}
   187  		runqput(p, &gs[1], next1)
   188  		runqget(p)
   189  		<-done
   190  		runqget(p)
   191  	}
   192  }
   193  
   194  var (
   195  	StringHash = stringHash
   196  	BytesHash  = bytesHash
   197  	Int32Hash  = int32Hash
   198  	Int64Hash  = int64Hash
   199  	MemHash    = memhash
   200  	MemHash32  = memhash32
   201  	MemHash64  = memhash64
   202  	EfaceHash  = efaceHash
   203  	IfaceHash  = ifaceHash
   204  )
   205  
   206  var UseAeshash = &useAeshash
   207  
   208  func MemclrBytes(b []byte) {
   209  	s := (*slice)(unsafe.Pointer(&b))
   210  	memclrNoHeapPointers(s.array, uintptr(s.len))
   211  }
   212  
   213  const HashLoad = hashLoad
   214  
   215  // entry point for testing
   216  func GostringW(w []uint16) (s string) {
   217  	systemstack(func() {
   218  		s = gostringw(&w[0])
   219  	})
   220  	return
   221  }
   222  
   223  var Open = open
   224  var Close = closefd
   225  var Read = read
   226  var Write = write
   227  
   228  func Envs() []string     { return envs }
   229  func SetEnvs(e []string) { envs = e }
   230  
   231  // For benchmarking.
   232  
   233  // blockWrapper is a wrapper type that ensures a T is placed within a
   234  // large object. This is necessary for safely benchmarking things
   235  // that manipulate the heap bitmap, like heapBitsSetType.
   236  //
   237  // More specifically, allocating threads assume they're the sole writers
   238  // to their span's heap bits, which allows those writes to be non-atomic.
   239  // The heap bitmap is written byte-wise, so if one tried to call heapBitsSetType
   240  // on an existing object in a small object span, we might corrupt that
   241  // span's bitmap with a concurrent byte write to the heap bitmap. Large
   242  // object spans contain exactly one object, so we can be sure no other P
   243  // is going to be allocating from it concurrently, hence this wrapper type
   244  // which ensures we have a T in a large object span.
   245  type blockWrapper[T any] struct {
   246  	value T
   247  	_     [_MaxSmallSize]byte // Ensure we're a large object.
   248  }
   249  
   250  func BenchSetType[T any](n int, resetTimer func()) {
   251  	x := new(blockWrapper[T])
   252  
   253  	// Escape x to ensure it is allocated on the heap, as we are
   254  	// working on the heap bits here.
   255  	Escape(x)
   256  
   257  	// Grab the type.
   258  	var i any = *new(T)
   259  	e := *efaceOf(&i)
   260  	t := e._type
   261  
   262  	// Benchmark setting the type bits for just the internal T of the block.
   263  	benchSetType(n, resetTimer, 1, unsafe.Pointer(&x.value), t)
   264  }
   265  
   266  const maxArrayBlockWrapperLen = 32
   267  
   268  // arrayBlockWrapper is like blockWrapper, but the interior value is intended
   269  // to be used as a backing store for a slice.
   270  type arrayBlockWrapper[T any] struct {
   271  	value [maxArrayBlockWrapperLen]T
   272  	_     [_MaxSmallSize]byte // Ensure we're a large object.
   273  }
   274  
   275  // arrayLargeBlockWrapper is like arrayBlockWrapper, but the interior array
   276  // accommodates many more elements.
   277  type arrayLargeBlockWrapper[T any] struct {
   278  	value [1024]T
   279  	_     [_MaxSmallSize]byte // Ensure we're a large object.
   280  }
   281  
   282  func BenchSetTypeSlice[T any](n int, resetTimer func(), len int) {
   283  	// We have two separate cases here because we want to avoid
   284  	// tests on big types but relatively small slices to avoid generating
   285  	// an allocation that's really big. This will likely force a GC which will
   286  	// skew the test results.
   287  	var y unsafe.Pointer
   288  	if len <= maxArrayBlockWrapperLen {
   289  		x := new(arrayBlockWrapper[T])
   290  		// Escape x to ensure it is allocated on the heap, as we are
   291  		// working on the heap bits here.
   292  		Escape(x)
   293  		y = unsafe.Pointer(&x.value[0])
   294  	} else {
   295  		x := new(arrayLargeBlockWrapper[T])
   296  		Escape(x)
   297  		y = unsafe.Pointer(&x.value[0])
   298  	}
   299  
   300  	// Grab the type.
   301  	var i any = *new(T)
   302  	e := *efaceOf(&i)
   303  	t := e._type
   304  
   305  	// Benchmark setting the type for a slice created from the array
   306  	// of T within the arrayBlock.
   307  	benchSetType(n, resetTimer, len, y, t)
   308  }
   309  
   310  // benchSetType is the implementation of the BenchSetType* functions.
   311  // x must be len consecutive Ts allocated within a large object span (to
   312  // avoid a race on the heap bitmap).
   313  //
   314  // Note: this function cannot be generic. It would get its type from one of
   315  // its callers (BenchSetType or BenchSetTypeSlice) whose type parameters are
   316  // set by a call in the runtime_test package. That means this function and its
   317  // callers will get instantiated in the package that provides the type argument,
   318  // i.e. runtime_test. However, we call a function on the system stack. In race
   319  // mode the runtime package is usually left uninstrumented because e.g. g0 has
   320  // no valid racectx, but if we're instantiated in the runtime_test package,
   321  // we might accidentally cause runtime code to be incorrectly instrumented.
   322  func benchSetType(n int, resetTimer func(), len int, x unsafe.Pointer, t *_type) {
   323  	// Compute the input sizes.
   324  	size := t.Size() * uintptr(len)
   325  
   326  	// Validate this function's invariant.
   327  	s := spanOfHeap(uintptr(x))
   328  	if s == nil {
   329  		panic("no heap span for input")
   330  	}
   331  	if s.spanclass.sizeclass() != 0 {
   332  		panic("span is not a large object span")
   333  	}
   334  
   335  	// Round up the size to the size class to make the benchmark a little more
   336  	// realistic. However, validate it, to make sure this is safe.
   337  	allocSize := roundupsize(size)
   338  	if s.npages*pageSize < allocSize {
   339  		panic("backing span not large enough for benchmark")
   340  	}
   341  
   342  	// Benchmark heapBitsSetType by calling it in a loop. This is safe because
   343  	// x is in a large object span.
   344  	resetTimer()
   345  	systemstack(func() {
   346  		for i := 0; i < n; i++ {
   347  			heapBitsSetType(uintptr(x), allocSize, size, t)
   348  		}
   349  	})
   350  
   351  	// Make sure x doesn't get freed, since we're taking a uintptr.
   352  	KeepAlive(x)
   353  }
   354  
   355  const PtrSize = goarch.PtrSize
   356  
   357  var ForceGCPeriod = &forcegcperiod
   358  
   359  // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
   360  // the "environment" traceback level, so later calls to
   361  // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
   362  func SetTracebackEnv(level string) {
   363  	setTraceback(level)
   364  	traceback_env = traceback_cache
   365  }
   366  
   367  var ReadUnaligned32 = readUnaligned32
   368  var ReadUnaligned64 = readUnaligned64
   369  
   370  func CountPagesInUse() (pagesInUse, counted uintptr) {
   371  	stopTheWorld(stwForTestCountPagesInUse)
   372  
   373  	pagesInUse = uintptr(mheap_.pagesInUse.Load())
   374  
   375  	for _, s := range mheap_.allspans {
   376  		if s.state.get() == mSpanInUse {
   377  			counted += s.npages
   378  		}
   379  	}
   380  
   381  	startTheWorld()
   382  
   383  	return
   384  }
   385  
   386  func Fastrand() uint32          { return fastrand() }
   387  func Fastrand64() uint64        { return fastrand64() }
   388  func Fastrandn(n uint32) uint32 { return fastrandn(n) }
   389  
   390  type ProfBuf profBuf
   391  
   392  func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
   393  	return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
   394  }
   395  
   396  func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
   397  	(*profBuf)(p).write(tag, now, hdr, stk)
   398  }
   399  
   400  const (
   401  	ProfBufBlocking    = profBufBlocking
   402  	ProfBufNonBlocking = profBufNonBlocking
   403  )
   404  
   405  func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
   406  	return (*profBuf)(p).read(profBufReadMode(mode))
   407  }
   408  
   409  func (p *ProfBuf) Close() {
   410  	(*profBuf)(p).close()
   411  }
   412  
   413  func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
   414  	stopTheWorld(stwForTestReadMetricsSlow)
   415  
   416  	// Initialize the metrics beforehand because this could
   417  	// allocate and skew the stats.
   418  	metricsLock()
   419  	initMetrics()
   420  	metricsUnlock()
   421  
   422  	systemstack(func() {
   423  		// Read memstats first. It's going to flush
   424  		// the mcaches which readMetrics does not do, so
   425  		// going the other way around may result in
   426  		// inconsistent statistics.
   427  		readmemstats_m(memStats)
   428  	})
   429  
   430  	// Read metrics off the system stack.
   431  	//
   432  	// The only part of readMetrics that could allocate
   433  	// and skew the stats is initMetrics.
   434  	readMetrics(samplesp, len, cap)
   435  
   436  	startTheWorld()
   437  }
   438  
   439  // ReadMemStatsSlow returns both the runtime-computed MemStats and
   440  // MemStats accumulated by scanning the heap.
   441  func ReadMemStatsSlow() (base, slow MemStats) {
   442  	stopTheWorld(stwForTestReadMemStatsSlow)
   443  
   444  	// Run on the system stack to avoid stack growth allocation.
   445  	systemstack(func() {
   446  		// Make sure stats don't change.
   447  		getg().m.mallocing++
   448  
   449  		readmemstats_m(&base)
   450  
   451  		// Initialize slow from base and zero the fields we're
   452  		// recomputing.
   453  		slow = base
   454  		slow.Alloc = 0
   455  		slow.TotalAlloc = 0
   456  		slow.Mallocs = 0
   457  		slow.Frees = 0
   458  		slow.HeapReleased = 0
   459  		var bySize [_NumSizeClasses]struct {
   460  			Mallocs, Frees uint64
   461  		}
   462  
   463  		// Add up current allocations in spans.
   464  		for _, s := range mheap_.allspans {
   465  			if s.state.get() != mSpanInUse {
   466  				continue
   467  			}
   468  			if s.isUnusedUserArenaChunk() {
   469  				continue
   470  			}
   471  			if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
   472  				slow.Mallocs++
   473  				slow.Alloc += uint64(s.elemsize)
   474  			} else {
   475  				slow.Mallocs += uint64(s.allocCount)
   476  				slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
   477  				bySize[sizeclass].Mallocs += uint64(s.allocCount)
   478  			}
   479  		}
   480  
   481  		// Add in frees by just reading the stats for those directly.
   482  		var m heapStatsDelta
   483  		memstats.heapStats.unsafeRead(&m)
   484  
   485  		// Collect per-sizeclass free stats.
   486  		var smallFree uint64
   487  		for i := 0; i < _NumSizeClasses; i++ {
   488  			slow.Frees += uint64(m.smallFreeCount[i])
   489  			bySize[i].Frees += uint64(m.smallFreeCount[i])
   490  			bySize[i].Mallocs += uint64(m.smallFreeCount[i])
   491  			smallFree += uint64(m.smallFreeCount[i]) * uint64(class_to_size[i])
   492  		}
   493  		slow.Frees += uint64(m.tinyAllocCount) + uint64(m.largeFreeCount)
   494  		slow.Mallocs += slow.Frees
   495  
   496  		slow.TotalAlloc = slow.Alloc + uint64(m.largeFree) + smallFree
   497  
   498  		for i := range slow.BySize {
   499  			slow.BySize[i].Mallocs = bySize[i].Mallocs
   500  			slow.BySize[i].Frees = bySize[i].Frees
   501  		}
   502  
   503  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
   504  			chunk := mheap_.pages.tryChunkOf(i)
   505  			if chunk == nil {
   506  				continue
   507  			}
   508  			pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
   509  			slow.HeapReleased += uint64(pg) * pageSize
   510  		}
   511  		for _, p := range allp {
   512  			pg := sys.OnesCount64(p.pcache.scav)
   513  			slow.HeapReleased += uint64(pg) * pageSize
   514  		}
   515  
   516  		getg().m.mallocing--
   517  	})
   518  
   519  	startTheWorld()
   520  	return
   521  }
   522  
   523  // ShrinkStackAndVerifyFramePointers attempts to shrink the stack of the current goroutine
   524  // and verifies that unwinding the new stack doesn't crash, even if the old
   525  // stack has been freed or reused (simulated via poisoning).
   526  func ShrinkStackAndVerifyFramePointers() {
   527  	before := stackPoisonCopy
   528  	defer func() { stackPoisonCopy = before }()
   529  	stackPoisonCopy = 1
   530  
   531  	gp := getg()
   532  	systemstack(func() {
   533  		shrinkstack(gp)
   534  	})
   535  	// If our new stack contains frame pointers into the old stack, this will
   536  	// crash because the old stack has been poisoned.
   537  	FPCallers(make([]uintptr, 1024))
   538  }
   539  
   540  // BlockOnSystemStack switches to the system stack, prints "x\n" to
   541  // stderr, and blocks in a stack containing
   542  // "runtime.blockOnSystemStackInternal".
   543  func BlockOnSystemStack() {
   544  	systemstack(blockOnSystemStackInternal)
   545  }
   546  
   547  func blockOnSystemStackInternal() {
   548  	print("x\n")
   549  	lock(&deadlock)
   550  	lock(&deadlock)
   551  }
   552  
   553  type RWMutex struct {
   554  	rw rwmutex
   555  }
   556  
   557  func (rw *RWMutex) RLock() {
   558  	rw.rw.rlock()
   559  }
   560  
   561  func (rw *RWMutex) RUnlock() {
   562  	rw.rw.runlock()
   563  }
   564  
   565  func (rw *RWMutex) Lock() {
   566  	rw.rw.lock()
   567  }
   568  
   569  func (rw *RWMutex) Unlock() {
   570  	rw.rw.unlock()
   571  }
   572  
   573  const RuntimeHmapSize = unsafe.Sizeof(hmap{})
   574  
   575  func MapBucketsCount(m map[int]int) int {
   576  	h := *(**hmap)(unsafe.Pointer(&m))
   577  	return 1 << h.B
   578  }
   579  
   580  func MapBucketsPointerIsNil(m map[int]int) bool {
   581  	h := *(**hmap)(unsafe.Pointer(&m))
   582  	return h.buckets == nil
   583  }
   584  
   585  func LockOSCounts() (external, internal uint32) {
   586  	gp := getg()
   587  	if gp.m.lockedExt+gp.m.lockedInt == 0 {
   588  		if gp.lockedm != 0 {
   589  			panic("lockedm on non-locked goroutine")
   590  		}
   591  	} else {
   592  		if gp.lockedm == 0 {
   593  			panic("nil lockedm on locked goroutine")
   594  		}
   595  	}
   596  	return gp.m.lockedExt, gp.m.lockedInt
   597  }
   598  
   599  //go:noinline
   600  func TracebackSystemstack(stk []uintptr, i int) int {
   601  	if i == 0 {
   602  		pc, sp := getcallerpc(), getcallersp()
   603  		var u unwinder
   604  		u.initAt(pc, sp, 0, getg(), unwindJumpStack) // Don't ignore errors, for testing
   605  		return tracebackPCs(&u, 0, stk)
   606  	}
   607  	n := 0
   608  	systemstack(func() {
   609  		n = TracebackSystemstack(stk, i-1)
   610  	})
   611  	return n
   612  }
   613  
   614  func KeepNArenaHints(n int) {
   615  	hint := mheap_.arenaHints
   616  	for i := 1; i < n; i++ {
   617  		hint = hint.next
   618  		if hint == nil {
   619  			return
   620  		}
   621  	}
   622  	hint.next = nil
   623  }
   624  
   625  // MapNextArenaHint reserves a page at the next arena growth hint,
   626  // preventing the arena from growing there, and returns the range of
   627  // addresses that are no longer viable.
   628  //
   629  // This may fail to reserve memory. If it fails, it still returns the
   630  // address range it attempted to reserve.
   631  func MapNextArenaHint() (start, end uintptr, ok bool) {
   632  	hint := mheap_.arenaHints
   633  	addr := hint.addr
   634  	if hint.down {
   635  		start, end = addr-heapArenaBytes, addr
   636  		addr -= physPageSize
   637  	} else {
   638  		start, end = addr, addr+heapArenaBytes
   639  	}
   640  	got := sysReserve(unsafe.Pointer(addr), physPageSize)
   641  	ok = (addr == uintptr(got))
   642  	if !ok {
   643  		// We were unable to get the requested reservation.
   644  		// Release what we did get and fail.
   645  		sysFreeOS(got, physPageSize)
   646  	}
   647  	return
   648  }
   649  
   650  func GetNextArenaHint() uintptr {
   651  	return mheap_.arenaHints.addr
   652  }
   653  
   654  type G = g
   655  
   656  type Sudog = sudog
   657  
   658  func Getg() *G {
   659  	return getg()
   660  }
   661  
   662  func Goid() uint64 {
   663  	return getg().goid
   664  }
   665  
   666  func GIsWaitingOnMutex(gp *G) bool {
   667  	return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
   668  }
   669  
   670  var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
   671  
   672  //go:noinline
   673  func PanicForTesting(b []byte, i int) byte {
   674  	return unexportedPanicForTesting(b, i)
   675  }
   676  
   677  //go:noinline
   678  func unexportedPanicForTesting(b []byte, i int) byte {
   679  	return b[i]
   680  }
   681  
   682  func G0StackOverflow() {
   683  	systemstack(func() {
   684  		stackOverflow(nil)
   685  	})
   686  }
   687  
   688  func stackOverflow(x *byte) {
   689  	var buf [256]byte
   690  	stackOverflow(&buf[0])
   691  }
   692  
   693  func MapTombstoneCheck(m map[int]int) {
   694  	// Make sure emptyOne and emptyRest are distributed correctly.
   695  	// We should have a series of filled and emptyOne cells, followed by
   696  	// a series of emptyRest cells.
   697  	h := *(**hmap)(unsafe.Pointer(&m))
   698  	i := any(m)
   699  	t := *(**maptype)(unsafe.Pointer(&i))
   700  
   701  	for x := 0; x < 1<<h.B; x++ {
   702  		b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
   703  		n := 0
   704  		for b := b0; b != nil; b = b.overflow(t) {
   705  			for i := 0; i < bucketCnt; i++ {
   706  				if b.tophash[i] != emptyRest {
   707  					n++
   708  				}
   709  			}
   710  		}
   711  		k := 0
   712  		for b := b0; b != nil; b = b.overflow(t) {
   713  			for i := 0; i < bucketCnt; i++ {
   714  				if k < n && b.tophash[i] == emptyRest {
   715  					panic("early emptyRest")
   716  				}
   717  				if k >= n && b.tophash[i] != emptyRest {
   718  					panic("late non-emptyRest")
   719  				}
   720  				if k == n-1 && b.tophash[i] == emptyOne {
   721  					panic("last non-emptyRest entry is emptyOne")
   722  				}
   723  				k++
   724  			}
   725  		}
   726  	}
   727  }
   728  
   729  func RunGetgThreadSwitchTest() {
   730  	// Test that getg works correctly with thread switch.
   731  	// With gccgo, if we generate getg inlined, the backend
   732  	// may cache the address of the TLS variable, which
   733  	// will become invalid after a thread switch. This test
   734  	// checks that the bad caching doesn't happen.
   735  
   736  	ch := make(chan int)
   737  	go func(ch chan int) {
   738  		ch <- 5
   739  		LockOSThread()
   740  	}(ch)
   741  
   742  	g1 := getg()
   743  
   744  	// Block on a receive. This is likely to get us a thread
   745  	// switch. If we yield to the sender goroutine, it will
   746  	// lock the thread, forcing us to resume on a different
   747  	// thread.
   748  	<-ch
   749  
   750  	g2 := getg()
   751  	if g1 != g2 {
   752  		panic("g1 != g2")
   753  	}
   754  
   755  	// Also test getg after some control flow, as the
   756  	// backend is sensitive to control flow.
   757  	g3 := getg()
   758  	if g1 != g3 {
   759  		panic("g1 != g3")
   760  	}
   761  }
   762  
   763  const (
   764  	PageSize         = pageSize
   765  	PallocChunkPages = pallocChunkPages
   766  	PageAlloc64Bit   = pageAlloc64Bit
   767  	PallocSumBytes   = pallocSumBytes
   768  )
   769  
   770  // Expose pallocSum for testing.
   771  type PallocSum pallocSum
   772  
   773  func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
   774  func (m PallocSum) Start() uint                    { return pallocSum(m).start() }
   775  func (m PallocSum) Max() uint                      { return pallocSum(m).max() }
   776  func (m PallocSum) End() uint                      { return pallocSum(m).end() }
   777  
   778  // Expose pallocBits for testing.
   779  type PallocBits pallocBits
   780  
   781  func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
   782  	return (*pallocBits)(b).find(npages, searchIdx)
   783  }
   784  func (b *PallocBits) AllocRange(i, n uint)       { (*pallocBits)(b).allocRange(i, n) }
   785  func (b *PallocBits) Free(i, n uint)             { (*pallocBits)(b).free(i, n) }
   786  func (b *PallocBits) Summarize() PallocSum       { return PallocSum((*pallocBits)(b).summarize()) }
   787  func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
   788  
   789  // SummarizeSlow is a slow but more obviously correct implementation
   790  // of (*pallocBits).summarize. Used for testing.
   791  func SummarizeSlow(b *PallocBits) PallocSum {
   792  	var start, max, end uint
   793  
   794  	const N = uint(len(b)) * 64
   795  	for start < N && (*pageBits)(b).get(start) == 0 {
   796  		start++
   797  	}
   798  	for end < N && (*pageBits)(b).get(N-end-1) == 0 {
   799  		end++
   800  	}
   801  	run := uint(0)
   802  	for i := uint(0); i < N; i++ {
   803  		if (*pageBits)(b).get(i) == 0 {
   804  			run++
   805  		} else {
   806  			run = 0
   807  		}
   808  		if run > max {
   809  			max = run
   810  		}
   811  	}
   812  	return PackPallocSum(start, max, end)
   813  }
   814  
   815  // Expose non-trivial helpers for testing.
   816  func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
   817  
   818  // Given two PallocBits, returns a set of bit ranges where
   819  // they differ.
   820  func DiffPallocBits(a, b *PallocBits) []BitRange {
   821  	ba := (*pageBits)(a)
   822  	bb := (*pageBits)(b)
   823  
   824  	var d []BitRange
   825  	base, size := uint(0), uint(0)
   826  	for i := uint(0); i < uint(len(ba))*64; i++ {
   827  		if ba.get(i) != bb.get(i) {
   828  			if size == 0 {
   829  				base = i
   830  			}
   831  			size++
   832  		} else {
   833  			if size != 0 {
   834  				d = append(d, BitRange{base, size})
   835  			}
   836  			size = 0
   837  		}
   838  	}
   839  	if size != 0 {
   840  		d = append(d, BitRange{base, size})
   841  	}
   842  	return d
   843  }
   844  
   845  // StringifyPallocBits gets the bits in the bit range r from b,
   846  // and returns a string containing the bits as ASCII 0 and 1
   847  // characters.
   848  func StringifyPallocBits(b *PallocBits, r BitRange) string {
   849  	str := ""
   850  	for j := r.I; j < r.I+r.N; j++ {
   851  		if (*pageBits)(b).get(j) != 0 {
   852  			str += "1"
   853  		} else {
   854  			str += "0"
   855  		}
   856  	}
   857  	return str
   858  }
   859  
   860  // Expose pallocData for testing.
   861  type PallocData pallocData
   862  
   863  func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
   864  	return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
   865  }
   866  func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
   867  func (d *PallocData) ScavengedSetRange(i, n uint) {
   868  	(*pallocData)(d).scavenged.setRange(i, n)
   869  }
   870  func (d *PallocData) PallocBits() *PallocBits {
   871  	return (*PallocBits)(&(*pallocData)(d).pallocBits)
   872  }
   873  func (d *PallocData) Scavenged() *PallocBits {
   874  	return (*PallocBits)(&(*pallocData)(d).scavenged)
   875  }
   876  
   877  // Expose fillAligned for testing.
   878  func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
   879  
   880  // Expose pageCache for testing.
   881  type PageCache pageCache
   882  
   883  const PageCachePages = pageCachePages
   884  
   885  func NewPageCache(base uintptr, cache, scav uint64) PageCache {
   886  	return PageCache(pageCache{base: base, cache: cache, scav: scav})
   887  }
   888  func (c *PageCache) Empty() bool   { return (*pageCache)(c).empty() }
   889  func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
   890  func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
   891  func (c *PageCache) Scav() uint64  { return (*pageCache)(c).scav }
   892  func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
   893  	return (*pageCache)(c).alloc(npages)
   894  }
   895  func (c *PageCache) Flush(s *PageAlloc) {
   896  	cp := (*pageCache)(c)
   897  	sp := (*pageAlloc)(s)
   898  
   899  	systemstack(func() {
   900  		// None of the tests need any higher-level locking, so we just
   901  		// take the lock internally.
   902  		lock(sp.mheapLock)
   903  		cp.flush(sp)
   904  		unlock(sp.mheapLock)
   905  	})
   906  }
   907  
   908  // Expose chunk index type.
   909  type ChunkIdx chunkIdx
   910  
   911  // Expose pageAlloc for testing. Note that because pageAlloc is
   912  // not in the heap, so is PageAlloc.
   913  type PageAlloc pageAlloc
   914  
   915  func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
   916  	pp := (*pageAlloc)(p)
   917  
   918  	var addr, scav uintptr
   919  	systemstack(func() {
   920  		// None of the tests need any higher-level locking, so we just
   921  		// take the lock internally.
   922  		lock(pp.mheapLock)
   923  		addr, scav = pp.alloc(npages)
   924  		unlock(pp.mheapLock)
   925  	})
   926  	return addr, scav
   927  }
   928  func (p *PageAlloc) AllocToCache() PageCache {
   929  	pp := (*pageAlloc)(p)
   930  
   931  	var c PageCache
   932  	systemstack(func() {
   933  		// None of the tests need any higher-level locking, so we just
   934  		// take the lock internally.
   935  		lock(pp.mheapLock)
   936  		c = PageCache(pp.allocToCache())
   937  		unlock(pp.mheapLock)
   938  	})
   939  	return c
   940  }
   941  func (p *PageAlloc) Free(base, npages uintptr) {
   942  	pp := (*pageAlloc)(p)
   943  
   944  	systemstack(func() {
   945  		// None of the tests need any higher-level locking, so we just
   946  		// take the lock internally.
   947  		lock(pp.mheapLock)
   948  		pp.free(base, npages)
   949  		unlock(pp.mheapLock)
   950  	})
   951  }
   952  func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
   953  	return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
   954  }
   955  func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
   956  	pp := (*pageAlloc)(p)
   957  	systemstack(func() {
   958  		r = pp.scavenge(nbytes, nil, true)
   959  	})
   960  	return
   961  }
   962  func (p *PageAlloc) InUse() []AddrRange {
   963  	ranges := make([]AddrRange, 0, len(p.inUse.ranges))
   964  	for _, r := range p.inUse.ranges {
   965  		ranges = append(ranges, AddrRange{r})
   966  	}
   967  	return ranges
   968  }
   969  
   970  // Returns nil if the PallocData's L2 is missing.
   971  func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
   972  	ci := chunkIdx(i)
   973  	return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
   974  }
   975  
   976  // AddrRange is a wrapper around addrRange for testing.
   977  type AddrRange struct {
   978  	addrRange
   979  }
   980  
   981  // MakeAddrRange creates a new address range.
   982  func MakeAddrRange(base, limit uintptr) AddrRange {
   983  	return AddrRange{makeAddrRange(base, limit)}
   984  }
   985  
   986  // Base returns the virtual base address of the address range.
   987  func (a AddrRange) Base() uintptr {
   988  	return a.addrRange.base.addr()
   989  }
   990  
   991  // Base returns the virtual address of the limit of the address range.
   992  func (a AddrRange) Limit() uintptr {
   993  	return a.addrRange.limit.addr()
   994  }
   995  
   996  // Equals returns true if the two address ranges are exactly equal.
   997  func (a AddrRange) Equals(b AddrRange) bool {
   998  	return a == b
   999  }
  1000  
  1001  // Size returns the size in bytes of the address range.
  1002  func (a AddrRange) Size() uintptr {
  1003  	return a.addrRange.size()
  1004  }
  1005  
  1006  // testSysStat is the sysStat passed to test versions of various
  1007  // runtime structures. We do actually have to keep track of this
  1008  // because otherwise memstats.mappedReady won't actually line up
  1009  // with other stats in the runtime during tests.
  1010  var testSysStat = &memstats.other_sys
  1011  
  1012  // AddrRanges is a wrapper around addrRanges for testing.
  1013  type AddrRanges struct {
  1014  	addrRanges
  1015  	mutable bool
  1016  }
  1017  
  1018  // NewAddrRanges creates a new empty addrRanges.
  1019  //
  1020  // Note that this initializes addrRanges just like in the
  1021  // runtime, so its memory is persistentalloc'd. Call this
  1022  // function sparingly since the memory it allocates is
  1023  // leaked.
  1024  //
  1025  // This AddrRanges is mutable, so we can test methods like
  1026  // Add.
  1027  func NewAddrRanges() AddrRanges {
  1028  	r := addrRanges{}
  1029  	r.init(testSysStat)
  1030  	return AddrRanges{r, true}
  1031  }
  1032  
  1033  // MakeAddrRanges creates a new addrRanges populated with
  1034  // the ranges in a.
  1035  //
  1036  // The returned AddrRanges is immutable, so methods like
  1037  // Add will fail.
  1038  func MakeAddrRanges(a ...AddrRange) AddrRanges {
  1039  	// Methods that manipulate the backing store of addrRanges.ranges should
  1040  	// not be used on the result from this function (e.g. add) since they may
  1041  	// trigger reallocation. That would normally be fine, except the new
  1042  	// backing store won't come from the heap, but from persistentalloc, so
  1043  	// we'll leak some memory implicitly.
  1044  	ranges := make([]addrRange, 0, len(a))
  1045  	total := uintptr(0)
  1046  	for _, r := range a {
  1047  		ranges = append(ranges, r.addrRange)
  1048  		total += r.Size()
  1049  	}
  1050  	return AddrRanges{addrRanges{
  1051  		ranges:     ranges,
  1052  		totalBytes: total,
  1053  		sysStat:    testSysStat,
  1054  	}, false}
  1055  }
  1056  
  1057  // Ranges returns a copy of the ranges described by the
  1058  // addrRanges.
  1059  func (a *AddrRanges) Ranges() []AddrRange {
  1060  	result := make([]AddrRange, 0, len(a.addrRanges.ranges))
  1061  	for _, r := range a.addrRanges.ranges {
  1062  		result = append(result, AddrRange{r})
  1063  	}
  1064  	return result
  1065  }
  1066  
  1067  // FindSucc returns the successor to base. See addrRanges.findSucc
  1068  // for more details.
  1069  func (a *AddrRanges) FindSucc(base uintptr) int {
  1070  	return a.findSucc(base)
  1071  }
  1072  
  1073  // Add adds a new AddrRange to the AddrRanges.
  1074  //
  1075  // The AddrRange must be mutable (i.e. created by NewAddrRanges),
  1076  // otherwise this method will throw.
  1077  func (a *AddrRanges) Add(r AddrRange) {
  1078  	if !a.mutable {
  1079  		throw("attempt to mutate immutable AddrRanges")
  1080  	}
  1081  	a.add(r.addrRange)
  1082  }
  1083  
  1084  // TotalBytes returns the totalBytes field of the addrRanges.
  1085  func (a *AddrRanges) TotalBytes() uintptr {
  1086  	return a.addrRanges.totalBytes
  1087  }
  1088  
  1089  // BitRange represents a range over a bitmap.
  1090  type BitRange struct {
  1091  	I, N uint // bit index and length in bits
  1092  }
  1093  
  1094  // NewPageAlloc creates a new page allocator for testing and
  1095  // initializes it with the scav and chunks maps. Each key in these maps
  1096  // represents a chunk index and each value is a series of bit ranges to
  1097  // set within each bitmap's chunk.
  1098  //
  1099  // The initialization of the pageAlloc preserves the invariant that if a
  1100  // scavenged bit is set the alloc bit is necessarily unset, so some
  1101  // of the bits described by scav may be cleared in the final bitmap if
  1102  // ranges in chunks overlap with them.
  1103  //
  1104  // scav is optional, and if nil, the scavenged bitmap will be cleared
  1105  // (as opposed to all 1s, which it usually is). Furthermore, every
  1106  // chunk index in scav must appear in chunks; ones that do not are
  1107  // ignored.
  1108  func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
  1109  	p := new(pageAlloc)
  1110  
  1111  	// We've got an entry, so initialize the pageAlloc.
  1112  	p.init(new(mutex), testSysStat, true)
  1113  	lockInit(p.mheapLock, lockRankMheap)
  1114  	for i, init := range chunks {
  1115  		addr := chunkBase(chunkIdx(i))
  1116  
  1117  		// Mark the chunk's existence in the pageAlloc.
  1118  		systemstack(func() {
  1119  			lock(p.mheapLock)
  1120  			p.grow(addr, pallocChunkBytes)
  1121  			unlock(p.mheapLock)
  1122  		})
  1123  
  1124  		// Initialize the bitmap and update pageAlloc metadata.
  1125  		ci := chunkIndex(addr)
  1126  		chunk := p.chunkOf(ci)
  1127  
  1128  		// Clear all the scavenged bits which grow set.
  1129  		chunk.scavenged.clearRange(0, pallocChunkPages)
  1130  
  1131  		// Simulate the allocation and subsequent free of all pages in
  1132  		// the chunk for the scavenge index. This sets the state equivalent
  1133  		// with all pages within the index being free.
  1134  		p.scav.index.alloc(ci, pallocChunkPages)
  1135  		p.scav.index.free(ci, 0, pallocChunkPages)
  1136  
  1137  		// Apply scavenge state if applicable.
  1138  		if scav != nil {
  1139  			if scvg, ok := scav[i]; ok {
  1140  				for _, s := range scvg {
  1141  					// Ignore the case of s.N == 0. setRange doesn't handle
  1142  					// it and it's a no-op anyway.
  1143  					if s.N != 0 {
  1144  						chunk.scavenged.setRange(s.I, s.N)
  1145  					}
  1146  				}
  1147  			}
  1148  		}
  1149  
  1150  		// Apply alloc state.
  1151  		for _, s := range init {
  1152  			// Ignore the case of s.N == 0. allocRange doesn't handle
  1153  			// it and it's a no-op anyway.
  1154  			if s.N != 0 {
  1155  				chunk.allocRange(s.I, s.N)
  1156  
  1157  				// Make sure the scavenge index is updated.
  1158  				p.scav.index.alloc(ci, s.N)
  1159  			}
  1160  		}
  1161  
  1162  		// Update heap metadata for the allocRange calls above.
  1163  		systemstack(func() {
  1164  			lock(p.mheapLock)
  1165  			p.update(addr, pallocChunkPages, false, false)
  1166  			unlock(p.mheapLock)
  1167  		})
  1168  	}
  1169  
  1170  	return (*PageAlloc)(p)
  1171  }
  1172  
  1173  // FreePageAlloc releases hard OS resources owned by the pageAlloc. Once this
  1174  // is called the pageAlloc may no longer be used. The object itself will be
  1175  // collected by the garbage collector once it is no longer live.
  1176  func FreePageAlloc(pp *PageAlloc) {
  1177  	p := (*pageAlloc)(pp)
  1178  
  1179  	// Free all the mapped space for the summary levels.
  1180  	if pageAlloc64Bit != 0 {
  1181  		for l := 0; l < summaryLevels; l++ {
  1182  			sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
  1183  		}
  1184  	} else {
  1185  		resSize := uintptr(0)
  1186  		for _, s := range p.summary {
  1187  			resSize += uintptr(cap(s)) * pallocSumBytes
  1188  		}
  1189  		sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
  1190  	}
  1191  
  1192  	// Free extra data structures.
  1193  	sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
  1194  
  1195  	// Subtract back out whatever we mapped for the summaries.
  1196  	// sysUsed adds to p.sysStat and memstats.mappedReady no matter what
  1197  	// (and in anger should actually be accounted for), and there's no other
  1198  	// way to figure out how much we actually mapped.
  1199  	gcController.mappedReady.Add(-int64(p.summaryMappedReady))
  1200  	testSysStat.add(-int64(p.summaryMappedReady))
  1201  
  1202  	// Free the mapped space for chunks.
  1203  	for i := range p.chunks {
  1204  		if x := p.chunks[i]; x != nil {
  1205  			p.chunks[i] = nil
  1206  			// This memory comes from sysAlloc and will always be page-aligned.
  1207  			sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
  1208  		}
  1209  	}
  1210  }
  1211  
  1212  // BaseChunkIdx is a convenient chunkIdx value which works on both
  1213  // 64 bit and 32 bit platforms, allowing the tests to share code
  1214  // between the two.
  1215  //
  1216  // This should not be higher than 0x100*pallocChunkBytes to support
  1217  // mips and mipsle, which only have 31-bit address spaces.
  1218  var BaseChunkIdx = func() ChunkIdx {
  1219  	var prefix uintptr
  1220  	if pageAlloc64Bit != 0 {
  1221  		prefix = 0xc000
  1222  	} else {
  1223  		prefix = 0x100
  1224  	}
  1225  	baseAddr := prefix * pallocChunkBytes
  1226  	if goos.IsAix != 0 {
  1227  		baseAddr += arenaBaseOffset
  1228  	}
  1229  	return ChunkIdx(chunkIndex(baseAddr))
  1230  }()
  1231  
  1232  // PageBase returns an address given a chunk index and a page index
  1233  // relative to that chunk.
  1234  func PageBase(c ChunkIdx, pageIdx uint) uintptr {
  1235  	return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
  1236  }
  1237  
  1238  type BitsMismatch struct {
  1239  	Base      uintptr
  1240  	Got, Want uint64
  1241  }
  1242  
  1243  func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
  1244  	ok = true
  1245  
  1246  	// Run on the system stack to avoid stack growth allocation.
  1247  	systemstack(func() {
  1248  		getg().m.mallocing++
  1249  
  1250  		// Lock so that we can safely access the bitmap.
  1251  		lock(&mheap_.lock)
  1252  	chunkLoop:
  1253  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
  1254  			chunk := mheap_.pages.tryChunkOf(i)
  1255  			if chunk == nil {
  1256  				continue
  1257  			}
  1258  			for j := 0; j < pallocChunkPages/64; j++ {
  1259  				// Run over each 64-bit bitmap section and ensure
  1260  				// scavenged is being cleared properly on allocation.
  1261  				// If a used bit and scavenged bit are both set, that's
  1262  				// an error, and could indicate a larger problem, or
  1263  				// an accounting problem.
  1264  				want := chunk.scavenged[j] &^ chunk.pallocBits[j]
  1265  				got := chunk.scavenged[j]
  1266  				if want != got {
  1267  					ok = false
  1268  					if n >= len(mismatches) {
  1269  						break chunkLoop
  1270  					}
  1271  					mismatches[n] = BitsMismatch{
  1272  						Base: chunkBase(i) + uintptr(j)*64*pageSize,
  1273  						Got:  got,
  1274  						Want: want,
  1275  					}
  1276  					n++
  1277  				}
  1278  			}
  1279  		}
  1280  		unlock(&mheap_.lock)
  1281  
  1282  		getg().m.mallocing--
  1283  	})
  1284  	return
  1285  }
  1286  
  1287  func PageCachePagesLeaked() (leaked uintptr) {
  1288  	stopTheWorld(stwForTestPageCachePagesLeaked)
  1289  
  1290  	// Walk over destroyed Ps and look for unflushed caches.
  1291  	deadp := allp[len(allp):cap(allp)]
  1292  	for _, p := range deadp {
  1293  		// Since we're going past len(allp) we may see nil Ps.
  1294  		// Just ignore them.
  1295  		if p != nil {
  1296  			leaked += uintptr(sys.OnesCount64(p.pcache.cache))
  1297  		}
  1298  	}
  1299  
  1300  	startTheWorld()
  1301  	return
  1302  }
  1303  
  1304  var Semacquire = semacquire
  1305  var Semrelease1 = semrelease1
  1306  
  1307  func SemNwait(addr *uint32) uint32 {
  1308  	root := semtable.rootFor(addr)
  1309  	return root.nwait.Load()
  1310  }
  1311  
  1312  const SemTableSize = semTabSize
  1313  
  1314  // SemTable is a wrapper around semTable exported for testing.
  1315  type SemTable struct {
  1316  	semTable
  1317  }
  1318  
  1319  // Enqueue simulates enqueuing a waiter for a semaphore (or lock) at addr.
  1320  func (t *SemTable) Enqueue(addr *uint32) {
  1321  	s := acquireSudog()
  1322  	s.releasetime = 0
  1323  	s.acquiretime = 0
  1324  	s.ticket = 0
  1325  	t.semTable.rootFor(addr).queue(addr, s, false)
  1326  }
  1327  
  1328  // Dequeue simulates dequeuing a waiter for a semaphore (or lock) at addr.
  1329  //
  1330  // Returns true if there actually was a waiter to be dequeued.
  1331  func (t *SemTable) Dequeue(addr *uint32) bool {
  1332  	s, _ := t.semTable.rootFor(addr).dequeue(addr)
  1333  	if s != nil {
  1334  		releaseSudog(s)
  1335  		return true
  1336  	}
  1337  	return false
  1338  }
  1339  
  1340  // mspan wrapper for testing.
  1341  type MSpan mspan
  1342  
  1343  // Allocate an mspan for testing.
  1344  func AllocMSpan() *MSpan {
  1345  	var s *mspan
  1346  	systemstack(func() {
  1347  		lock(&mheap_.lock)
  1348  		s = (*mspan)(mheap_.spanalloc.alloc())
  1349  		unlock(&mheap_.lock)
  1350  	})
  1351  	return (*MSpan)(s)
  1352  }
  1353  
  1354  // Free an allocated mspan.
  1355  func FreeMSpan(s *MSpan) {
  1356  	systemstack(func() {
  1357  		lock(&mheap_.lock)
  1358  		mheap_.spanalloc.free(unsafe.Pointer(s))
  1359  		unlock(&mheap_.lock)
  1360  	})
  1361  }
  1362  
  1363  func MSpanCountAlloc(ms *MSpan, bits []byte) int {
  1364  	s := (*mspan)(ms)
  1365  	s.nelems = uintptr(len(bits) * 8)
  1366  	s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
  1367  	result := s.countAlloc()
  1368  	s.gcmarkBits = nil
  1369  	return result
  1370  }
  1371  
  1372  const (
  1373  	TimeHistSubBucketBits = timeHistSubBucketBits
  1374  	TimeHistNumSubBuckets = timeHistNumSubBuckets
  1375  	TimeHistNumBuckets    = timeHistNumBuckets
  1376  	TimeHistMinBucketBits = timeHistMinBucketBits
  1377  	TimeHistMaxBucketBits = timeHistMaxBucketBits
  1378  )
  1379  
  1380  type TimeHistogram timeHistogram
  1381  
  1382  // Counts returns the counts for the given bucket, subBucket indices.
  1383  // Returns true if the bucket was valid, otherwise returns the counts
  1384  // for the overflow bucket if bucket > 0 or the underflow bucket if
  1385  // bucket < 0, and false.
  1386  func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
  1387  	t := (*timeHistogram)(th)
  1388  	if bucket < 0 {
  1389  		return t.underflow.Load(), false
  1390  	}
  1391  	i := bucket*TimeHistNumSubBuckets + subBucket
  1392  	if i >= len(t.counts) {
  1393  		return t.overflow.Load(), false
  1394  	}
  1395  	return t.counts[i].Load(), true
  1396  }
  1397  
  1398  func (th *TimeHistogram) Record(duration int64) {
  1399  	(*timeHistogram)(th).record(duration)
  1400  }
  1401  
  1402  var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
  1403  
  1404  func SetIntArgRegs(a int) int {
  1405  	lock(&finlock)
  1406  	old := intArgRegs
  1407  	if a >= 0 {
  1408  		intArgRegs = a
  1409  	}
  1410  	unlock(&finlock)
  1411  	return old
  1412  }
  1413  
  1414  func FinalizerGAsleep() bool {
  1415  	return fingStatus.Load()&fingWait != 0
  1416  }
  1417  
  1418  // For GCTestMoveStackOnNextCall, it's important not to introduce an
  1419  // extra layer of call, since then there's a return before the "real"
  1420  // next call.
  1421  var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
  1422  
  1423  // For GCTestIsReachable, it's important that we do this as a call so
  1424  // escape analysis can see through it.
  1425  func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
  1426  	return gcTestIsReachable(ptrs...)
  1427  }
  1428  
  1429  // For GCTestPointerClass, it's important that we do this as a call so
  1430  // escape analysis can see through it.
  1431  //
  1432  // This is nosplit because gcTestPointerClass is.
  1433  //
  1434  //go:nosplit
  1435  func GCTestPointerClass(p unsafe.Pointer) string {
  1436  	return gcTestPointerClass(p)
  1437  }
  1438  
  1439  const Raceenabled = raceenabled
  1440  
  1441  const (
  1442  	GCBackgroundUtilization            = gcBackgroundUtilization
  1443  	GCGoalUtilization                  = gcGoalUtilization
  1444  	DefaultHeapMinimum                 = defaultHeapMinimum
  1445  	MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent
  1446  	MemoryLimitMinHeapGoalHeadroom     = memoryLimitMinHeapGoalHeadroom
  1447  )
  1448  
  1449  type GCController struct {
  1450  	gcControllerState
  1451  }
  1452  
  1453  func NewGCController(gcPercent int, memoryLimit int64) *GCController {
  1454  	// Force the controller to escape. We're going to
  1455  	// do 64-bit atomics on it, and if it gets stack-allocated
  1456  	// on a 32-bit architecture, it may get allocated unaligned
  1457  	// space.
  1458  	g := Escape(new(GCController))
  1459  	g.gcControllerState.test = true // Mark it as a test copy.
  1460  	g.init(int32(gcPercent), memoryLimit)
  1461  	return g
  1462  }
  1463  
  1464  func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
  1465  	trigger, _ := c.trigger()
  1466  	if c.heapMarked > trigger {
  1467  		trigger = c.heapMarked
  1468  	}
  1469  	c.maxStackScan.Store(stackSize)
  1470  	c.globalsScan.Store(globalsSize)
  1471  	c.heapLive.Store(trigger)
  1472  	c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
  1473  	c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
  1474  }
  1475  
  1476  func (c *GCController) AssistWorkPerByte() float64 {
  1477  	return c.assistWorkPerByte.Load()
  1478  }
  1479  
  1480  func (c *GCController) HeapGoal() uint64 {
  1481  	return c.heapGoal()
  1482  }
  1483  
  1484  func (c *GCController) HeapLive() uint64 {
  1485  	return c.heapLive.Load()
  1486  }
  1487  
  1488  func (c *GCController) HeapMarked() uint64 {
  1489  	return c.heapMarked
  1490  }
  1491  
  1492  func (c *GCController) Triggered() uint64 {
  1493  	return c.triggered
  1494  }
  1495  
  1496  type GCControllerReviseDelta struct {
  1497  	HeapLive        int64
  1498  	HeapScan        int64
  1499  	HeapScanWork    int64
  1500  	StackScanWork   int64
  1501  	GlobalsScanWork int64
  1502  }
  1503  
  1504  func (c *GCController) Revise(d GCControllerReviseDelta) {
  1505  	c.heapLive.Add(d.HeapLive)
  1506  	c.heapScan.Add(d.HeapScan)
  1507  	c.heapScanWork.Add(d.HeapScanWork)
  1508  	c.stackScanWork.Add(d.StackScanWork)
  1509  	c.globalsScanWork.Add(d.GlobalsScanWork)
  1510  	c.revise()
  1511  }
  1512  
  1513  func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
  1514  	c.assistTime.Store(assistTime)
  1515  	c.endCycle(elapsed, gomaxprocs, false)
  1516  	c.resetLive(bytesMarked)
  1517  	c.commit(false)
  1518  }
  1519  
  1520  func (c *GCController) AddIdleMarkWorker() bool {
  1521  	return c.addIdleMarkWorker()
  1522  }
  1523  
  1524  func (c *GCController) NeedIdleMarkWorker() bool {
  1525  	return c.needIdleMarkWorker()
  1526  }
  1527  
  1528  func (c *GCController) RemoveIdleMarkWorker() {
  1529  	c.removeIdleMarkWorker()
  1530  }
  1531  
  1532  func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
  1533  	c.setMaxIdleMarkWorkers(max)
  1534  }
  1535  
  1536  var alwaysFalse bool
  1537  var escapeSink any
  1538  
  1539  func Escape[T any](x T) T {
  1540  	if alwaysFalse {
  1541  		escapeSink = x
  1542  	}
  1543  	return x
  1544  }
  1545  
  1546  // Acquirem blocks preemption.
  1547  func Acquirem() {
  1548  	acquirem()
  1549  }
  1550  
  1551  func Releasem() {
  1552  	releasem(getg().m)
  1553  }
  1554  
  1555  var Timediv = timediv
  1556  
  1557  type PIController struct {
  1558  	piController
  1559  }
  1560  
  1561  func NewPIController(kp, ti, tt, min, max float64) *PIController {
  1562  	return &PIController{piController{
  1563  		kp:  kp,
  1564  		ti:  ti,
  1565  		tt:  tt,
  1566  		min: min,
  1567  		max: max,
  1568  	}}
  1569  }
  1570  
  1571  func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
  1572  	return c.piController.next(input, setpoint, period)
  1573  }
  1574  
  1575  const (
  1576  	CapacityPerProc          = capacityPerProc
  1577  	GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
  1578  )
  1579  
  1580  type GCCPULimiter struct {
  1581  	limiter gcCPULimiterState
  1582  }
  1583  
  1584  func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
  1585  	// Force the controller to escape. We're going to
  1586  	// do 64-bit atomics on it, and if it gets stack-allocated
  1587  	// on a 32-bit architecture, it may get allocated unaligned
  1588  	// space.
  1589  	l := Escape(new(GCCPULimiter))
  1590  	l.limiter.test = true
  1591  	l.limiter.resetCapacity(now, gomaxprocs)
  1592  	return l
  1593  }
  1594  
  1595  func (l *GCCPULimiter) Fill() uint64 {
  1596  	return l.limiter.bucket.fill
  1597  }
  1598  
  1599  func (l *GCCPULimiter) Capacity() uint64 {
  1600  	return l.limiter.bucket.capacity
  1601  }
  1602  
  1603  func (l *GCCPULimiter) Overflow() uint64 {
  1604  	return l.limiter.overflow
  1605  }
  1606  
  1607  func (l *GCCPULimiter) Limiting() bool {
  1608  	return l.limiter.limiting()
  1609  }
  1610  
  1611  func (l *GCCPULimiter) NeedUpdate(now int64) bool {
  1612  	return l.limiter.needUpdate(now)
  1613  }
  1614  
  1615  func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
  1616  	l.limiter.startGCTransition(enableGC, now)
  1617  }
  1618  
  1619  func (l *GCCPULimiter) FinishGCTransition(now int64) {
  1620  	l.limiter.finishGCTransition(now)
  1621  }
  1622  
  1623  func (l *GCCPULimiter) Update(now int64) {
  1624  	l.limiter.update(now)
  1625  }
  1626  
  1627  func (l *GCCPULimiter) AddAssistTime(t int64) {
  1628  	l.limiter.addAssistTime(t)
  1629  }
  1630  
  1631  func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
  1632  	l.limiter.resetCapacity(now, nprocs)
  1633  }
  1634  
  1635  const ScavengePercent = scavengePercent
  1636  
  1637  type Scavenger struct {
  1638  	Sleep      func(int64) int64
  1639  	Scavenge   func(uintptr) (uintptr, int64)
  1640  	ShouldStop func() bool
  1641  	GoMaxProcs func() int32
  1642  
  1643  	released  atomic.Uintptr
  1644  	scavenger scavengerState
  1645  	stop      chan<- struct{}
  1646  	done      <-chan struct{}
  1647  }
  1648  
  1649  func (s *Scavenger) Start() {
  1650  	if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
  1651  		panic("must populate all stubs")
  1652  	}
  1653  
  1654  	// Install hooks.
  1655  	s.scavenger.sleepStub = s.Sleep
  1656  	s.scavenger.scavenge = s.Scavenge
  1657  	s.scavenger.shouldStop = s.ShouldStop
  1658  	s.scavenger.gomaxprocs = s.GoMaxProcs
  1659  
  1660  	// Start up scavenger goroutine, and wait for it to be ready.
  1661  	stop := make(chan struct{})
  1662  	s.stop = stop
  1663  	done := make(chan struct{})
  1664  	s.done = done
  1665  	go func() {
  1666  		// This should match bgscavenge, loosely.
  1667  		s.scavenger.init()
  1668  		s.scavenger.park()
  1669  		for {
  1670  			select {
  1671  			case <-stop:
  1672  				close(done)
  1673  				return
  1674  			default:
  1675  			}
  1676  			released, workTime := s.scavenger.run()
  1677  			if released == 0 {
  1678  				s.scavenger.park()
  1679  				continue
  1680  			}
  1681  			s.released.Add(released)
  1682  			s.scavenger.sleep(workTime)
  1683  		}
  1684  	}()
  1685  	if !s.BlockUntilParked(1e9 /* 1 second */) {
  1686  		panic("timed out waiting for scavenger to get ready")
  1687  	}
  1688  }
  1689  
  1690  // BlockUntilParked blocks until the scavenger parks, or until
  1691  // timeout is exceeded. Returns true if the scavenger parked.
  1692  //
  1693  // Note that in testing, parked means something slightly different.
  1694  // In anger, the scavenger parks to sleep, too, but in testing,
  1695  // it only parks when it actually has no work to do.
  1696  func (s *Scavenger) BlockUntilParked(timeout int64) bool {
  1697  	// Just spin, waiting for it to park.
  1698  	//
  1699  	// The actual parking process is racy with respect to
  1700  	// wakeups, which is fine, but for testing we need something
  1701  	// a bit more robust.
  1702  	start := nanotime()
  1703  	for nanotime()-start < timeout {
  1704  		lock(&s.scavenger.lock)
  1705  		parked := s.scavenger.parked
  1706  		unlock(&s.scavenger.lock)
  1707  		if parked {
  1708  			return true
  1709  		}
  1710  		Gosched()
  1711  	}
  1712  	return false
  1713  }
  1714  
  1715  // Released returns how many bytes the scavenger released.
  1716  func (s *Scavenger) Released() uintptr {
  1717  	return s.released.Load()
  1718  }
  1719  
  1720  // Wake wakes up a parked scavenger to keep running.
  1721  func (s *Scavenger) Wake() {
  1722  	s.scavenger.wake()
  1723  }
  1724  
  1725  // Stop cleans up the scavenger's resources. The scavenger
  1726  // must be parked for this to work.
  1727  func (s *Scavenger) Stop() {
  1728  	lock(&s.scavenger.lock)
  1729  	parked := s.scavenger.parked
  1730  	unlock(&s.scavenger.lock)
  1731  	if !parked {
  1732  		panic("tried to clean up scavenger that is not parked")
  1733  	}
  1734  	close(s.stop)
  1735  	s.Wake()
  1736  	<-s.done
  1737  }
  1738  
  1739  type ScavengeIndex struct {
  1740  	i scavengeIndex
  1741  }
  1742  
  1743  func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
  1744  	s := new(ScavengeIndex)
  1745  	// This is a bit lazy but we easily guarantee we'll be able
  1746  	// to reference all the relevant chunks. The worst-case
  1747  	// memory usage here is 512 MiB, but tests generally use
  1748  	// small offsets from BaseChunkIdx, which results in ~100s
  1749  	// of KiB in memory use.
  1750  	//
  1751  	// This may still be worth making better, at least by sharing
  1752  	// this fairly large array across calls with a sync.Pool or
  1753  	// something. Currently, when the tests are run serially,
  1754  	// it takes around 0.5s. Not all that much, but if we have
  1755  	// a lot of tests like this it could add up.
  1756  	s.i.chunks = make([]atomicScavChunkData, max)
  1757  	s.i.min.Store(uintptr(min))
  1758  	s.i.max.Store(uintptr(max))
  1759  	s.i.minHeapIdx.Store(uintptr(min))
  1760  	s.i.test = true
  1761  	return s
  1762  }
  1763  
  1764  func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
  1765  	ci, off := s.i.find(force)
  1766  	return ChunkIdx(ci), off
  1767  }
  1768  
  1769  func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
  1770  	sc, ec := chunkIndex(base), chunkIndex(limit-1)
  1771  	si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
  1772  
  1773  	if sc == ec {
  1774  		// The range doesn't cross any chunk boundaries.
  1775  		s.i.alloc(sc, ei+1-si)
  1776  	} else {
  1777  		// The range crosses at least one chunk boundary.
  1778  		s.i.alloc(sc, pallocChunkPages-si)
  1779  		for c := sc + 1; c < ec; c++ {
  1780  			s.i.alloc(c, pallocChunkPages)
  1781  		}
  1782  		s.i.alloc(ec, ei+1)
  1783  	}
  1784  }
  1785  
  1786  func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
  1787  	sc, ec := chunkIndex(base), chunkIndex(limit-1)
  1788  	si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
  1789  
  1790  	if sc == ec {
  1791  		// The range doesn't cross any chunk boundaries.
  1792  		s.i.free(sc, si, ei+1-si)
  1793  	} else {
  1794  		// The range crosses at least one chunk boundary.
  1795  		s.i.free(sc, si, pallocChunkPages-si)
  1796  		for c := sc + 1; c < ec; c++ {
  1797  			s.i.free(c, 0, pallocChunkPages)
  1798  		}
  1799  		s.i.free(ec, 0, ei+1)
  1800  	}
  1801  }
  1802  
  1803  func (s *ScavengeIndex) ResetSearchAddrs() {
  1804  	for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
  1805  		addr, marked := a.Load()
  1806  		if marked {
  1807  			a.StoreUnmark(addr, addr)
  1808  		}
  1809  		a.Clear()
  1810  	}
  1811  	s.i.freeHWM = minOffAddr
  1812  }
  1813  
  1814  func (s *ScavengeIndex) NextGen() {
  1815  	s.i.nextGen()
  1816  }
  1817  
  1818  func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
  1819  	s.i.setEmpty(chunkIdx(ci))
  1820  }
  1821  
  1822  func (s *ScavengeIndex) SetNoHugePage(ci ChunkIdx) {
  1823  	s.i.setNoHugePage(chunkIdx(ci))
  1824  }
  1825  
  1826  func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
  1827  	sc0 := scavChunkData{
  1828  		gen:            gen,
  1829  		inUse:          inUse,
  1830  		lastInUse:      lastInUse,
  1831  		scavChunkFlags: scavChunkFlags(flags),
  1832  	}
  1833  	scp := sc0.pack()
  1834  	sc1 := unpackScavChunkData(scp)
  1835  	return sc0 == sc1
  1836  }
  1837  
  1838  const GTrackingPeriod = gTrackingPeriod
  1839  
  1840  var ZeroBase = unsafe.Pointer(&zerobase)
  1841  
  1842  const UserArenaChunkBytes = userArenaChunkBytes
  1843  
  1844  type UserArena struct {
  1845  	arena *userArena
  1846  }
  1847  
  1848  func NewUserArena() *UserArena {
  1849  	return &UserArena{newUserArena()}
  1850  }
  1851  
  1852  func (a *UserArena) New(out *any) {
  1853  	i := efaceOf(out)
  1854  	typ := i._type
  1855  	if typ.Kind_&kindMask != kindPtr {
  1856  		panic("new result of non-ptr type")
  1857  	}
  1858  	typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
  1859  	i.data = a.arena.new(typ)
  1860  }
  1861  
  1862  func (a *UserArena) Slice(sl any, cap int) {
  1863  	a.arena.slice(sl, cap)
  1864  }
  1865  
  1866  func (a *UserArena) Free() {
  1867  	a.arena.free()
  1868  }
  1869  
  1870  func GlobalWaitingArenaChunks() int {
  1871  	n := 0
  1872  	systemstack(func() {
  1873  		lock(&mheap_.lock)
  1874  		for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
  1875  			n++
  1876  		}
  1877  		unlock(&mheap_.lock)
  1878  	})
  1879  	return n
  1880  }
  1881  
  1882  func UserArenaClone[T any](s T) T {
  1883  	return arena_heapify(s).(T)
  1884  }
  1885  
  1886  var AlignUp = alignUp
  1887  
  1888  // BlockUntilEmptyFinalizerQueue blocks until either the finalizer
  1889  // queue is emptied (and the finalizers have executed) or the timeout
  1890  // is reached. Returns true if the finalizer queue was emptied.
  1891  func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
  1892  	start := nanotime()
  1893  	for nanotime()-start < timeout {
  1894  		lock(&finlock)
  1895  		// We know the queue has been drained when both finq is nil
  1896  		// and the finalizer g has stopped executing.
  1897  		empty := finq == nil
  1898  		empty = empty && readgstatus(fing) == _Gwaiting && fing.waitreason == waitReasonFinalizerWait
  1899  		unlock(&finlock)
  1900  		if empty {
  1901  			return true
  1902  		}
  1903  		Gosched()
  1904  	}
  1905  	return false
  1906  }
  1907  
  1908  func FrameStartLine(f *Frame) int {
  1909  	return f.startLine
  1910  }
  1911  
  1912  // PersistentAlloc allocates some memory that lives outside the Go heap.
  1913  // This memory will never be freed; use sparingly.
  1914  func PersistentAlloc(n uintptr) unsafe.Pointer {
  1915  	return persistentalloc(n, 0, &memstats.other_sys)
  1916  }
  1917  
  1918  // FPCallers works like Callers and uses frame pointer unwinding to populate
  1919  // pcBuf with the return addresses of the physical frames on the stack.
  1920  func FPCallers(pcBuf []uintptr) int {
  1921  	return fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf)
  1922  }
  1923  
  1924  const FramePointerEnabled = framepointer_enabled
  1925  
  1926  var (
  1927  	IsPinned      = isPinned
  1928  	GetPinCounter = pinnerGetPinCounter
  1929  )
  1930  
  1931  func SetPinnerLeakPanic(f func()) {
  1932  	pinnerLeakPanic = f
  1933  }
  1934  func GetPinnerLeakPanic() func() {
  1935  	return pinnerLeakPanic
  1936  }
  1937  

View as plain text