Source file src/runtime/mbitmap.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Garbage collector: type and heap bitmaps.
     6  //
     7  // Stack, data, and bss bitmaps
     8  //
     9  // Stack frames and global variables in the data and bss sections are
    10  // described by bitmaps with 1 bit per pointer-sized word. A "1" bit
    11  // means the word is a live pointer to be visited by the GC (referred to
    12  // as "pointer"). A "0" bit means the word should be ignored by GC
    13  // (referred to as "scalar", though it could be a dead pointer value).
    14  //
    15  // Heap bitmap
    16  //
    17  // The heap bitmap comprises 1 bit for each pointer-sized word in the heap,
    18  // recording whether a pointer is stored in that word or not. This bitmap
    19  // is stored in the heapArena metadata backing each heap arena.
    20  // That is, if ha is the heapArena for the arena starting at "start",
    21  // then ha.bitmap[0] holds the 64 bits for the 64 words "start"
    22  // through start+63*ptrSize, ha.bitmap[1] holds the entries for
    23  // start+64*ptrSize through start+127*ptrSize, and so on.
    24  // Bits correspond to words in little-endian order. ha.bitmap[0]&1 represents
    25  // the word at "start", ha.bitmap[0]>>1&1 represents the word at start+8, etc.
    26  // (For 32-bit platforms, s/64/32/.)
    27  //
    28  // We also keep a noMorePtrs bitmap which allows us to stop scanning
    29  // the heap bitmap early in certain situations. If ha.noMorePtrs[i]>>j&1
    30  // is 1, then the object containing the last word described by ha.bitmap[8*i+j]
    31  // has no more pointers beyond those described by ha.bitmap[8*i+j].
    32  // If ha.noMorePtrs[i]>>j&1 is set, the entries in ha.bitmap[8*i+j+1] and
    33  // beyond must all be zero until the start of the next object.
    34  //
    35  // The bitmap for noscan spans is set to all zero at span allocation time.
    36  //
    37  // The bitmap for unallocated objects in scannable spans is not maintained
    38  // (can be junk).
    39  
    40  package runtime
    41  
    42  import (
    43  	"internal/goarch"
    44  	"runtime/internal/atomic"
    45  	"runtime/internal/sys"
    46  	"unsafe"
    47  )
    48  
    49  // addb returns the byte pointer p+n.
    50  //
    51  //go:nowritebarrier
    52  //go:nosplit
    53  func addb(p *byte, n uintptr) *byte {
    54  	// Note: wrote out full expression instead of calling add(p, n)
    55  	// to reduce the number of temporaries generated by the
    56  	// compiler for this trivial expression during inlining.
    57  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
    58  }
    59  
    60  // subtractb returns the byte pointer p-n.
    61  //
    62  //go:nowritebarrier
    63  //go:nosplit
    64  func subtractb(p *byte, n uintptr) *byte {
    65  	// Note: wrote out full expression instead of calling add(p, -n)
    66  	// to reduce the number of temporaries generated by the
    67  	// compiler for this trivial expression during inlining.
    68  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
    69  }
    70  
    71  // add1 returns the byte pointer p+1.
    72  //
    73  //go:nowritebarrier
    74  //go:nosplit
    75  func add1(p *byte) *byte {
    76  	// Note: wrote out full expression instead of calling addb(p, 1)
    77  	// to reduce the number of temporaries generated by the
    78  	// compiler for this trivial expression during inlining.
    79  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
    80  }
    81  
    82  // subtract1 returns the byte pointer p-1.
    83  //
    84  // nosplit because it is used during write barriers and must not be preempted.
    85  //
    86  //go:nowritebarrier
    87  //go:nosplit
    88  func subtract1(p *byte) *byte {
    89  	// Note: wrote out full expression instead of calling subtractb(p, 1)
    90  	// to reduce the number of temporaries generated by the
    91  	// compiler for this trivial expression during inlining.
    92  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
    93  }
    94  
    95  // markBits provides access to the mark bit for an object in the heap.
    96  // bytep points to the byte holding the mark bit.
    97  // mask is a byte with a single bit set that can be &ed with *bytep
    98  // to see if the bit has been set.
    99  // *m.byte&m.mask != 0 indicates the mark bit is set.
   100  // index can be used along with span information to generate
   101  // the address of the object in the heap.
   102  // We maintain one set of mark bits for allocation and one for
   103  // marking purposes.
   104  type markBits struct {
   105  	bytep *uint8
   106  	mask  uint8
   107  	index uintptr
   108  }
   109  
   110  //go:nosplit
   111  func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
   112  	bytep, mask := s.allocBits.bitp(allocBitIndex)
   113  	return markBits{bytep, mask, allocBitIndex}
   114  }
   115  
   116  // refillAllocCache takes 8 bytes s.allocBits starting at whichByte
   117  // and negates them so that ctz (count trailing zeros) instructions
   118  // can be used. It then places these 8 bytes into the cached 64 bit
   119  // s.allocCache.
   120  func (s *mspan) refillAllocCache(whichByte uintptr) {
   121  	bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
   122  	aCache := uint64(0)
   123  	aCache |= uint64(bytes[0])
   124  	aCache |= uint64(bytes[1]) << (1 * 8)
   125  	aCache |= uint64(bytes[2]) << (2 * 8)
   126  	aCache |= uint64(bytes[3]) << (3 * 8)
   127  	aCache |= uint64(bytes[4]) << (4 * 8)
   128  	aCache |= uint64(bytes[5]) << (5 * 8)
   129  	aCache |= uint64(bytes[6]) << (6 * 8)
   130  	aCache |= uint64(bytes[7]) << (7 * 8)
   131  	s.allocCache = ^aCache
   132  }
   133  
   134  // nextFreeIndex returns the index of the next free object in s at
   135  // or after s.freeindex.
   136  // There are hardware instructions that can be used to make this
   137  // faster if profiling warrants it.
   138  func (s *mspan) nextFreeIndex() uintptr {
   139  	sfreeindex := s.freeindex
   140  	snelems := s.nelems
   141  	if sfreeindex == snelems {
   142  		return sfreeindex
   143  	}
   144  	if sfreeindex > snelems {
   145  		throw("s.freeindex > s.nelems")
   146  	}
   147  
   148  	aCache := s.allocCache
   149  
   150  	bitIndex := sys.TrailingZeros64(aCache)
   151  	for bitIndex == 64 {
   152  		// Move index to start of next cached bits.
   153  		sfreeindex = (sfreeindex + 64) &^ (64 - 1)
   154  		if sfreeindex >= snelems {
   155  			s.freeindex = snelems
   156  			return snelems
   157  		}
   158  		whichByte := sfreeindex / 8
   159  		// Refill s.allocCache with the next 64 alloc bits.
   160  		s.refillAllocCache(whichByte)
   161  		aCache = s.allocCache
   162  		bitIndex = sys.TrailingZeros64(aCache)
   163  		// nothing available in cached bits
   164  		// grab the next 8 bytes and try again.
   165  	}
   166  	result := sfreeindex + uintptr(bitIndex)
   167  	if result >= snelems {
   168  		s.freeindex = snelems
   169  		return snelems
   170  	}
   171  
   172  	s.allocCache >>= uint(bitIndex + 1)
   173  	sfreeindex = result + 1
   174  
   175  	if sfreeindex%64 == 0 && sfreeindex != snelems {
   176  		// We just incremented s.freeindex so it isn't 0.
   177  		// As each 1 in s.allocCache was encountered and used for allocation
   178  		// it was shifted away. At this point s.allocCache contains all 0s.
   179  		// Refill s.allocCache so that it corresponds
   180  		// to the bits at s.allocBits starting at s.freeindex.
   181  		whichByte := sfreeindex / 8
   182  		s.refillAllocCache(whichByte)
   183  	}
   184  	s.freeindex = sfreeindex
   185  	return result
   186  }
   187  
   188  // isFree reports whether the index'th object in s is unallocated.
   189  //
   190  // The caller must ensure s.state is mSpanInUse, and there must have
   191  // been no preemption points since ensuring this (which could allow a
   192  // GC transition, which would allow the state to change).
   193  func (s *mspan) isFree(index uintptr) bool {
   194  	if index < s.freeIndexForScan {
   195  		return false
   196  	}
   197  	bytep, mask := s.allocBits.bitp(index)
   198  	return *bytep&mask == 0
   199  }
   200  
   201  // divideByElemSize returns n/s.elemsize.
   202  // n must be within [0, s.npages*_PageSize),
   203  // or may be exactly s.npages*_PageSize
   204  // if s.elemsize is from sizeclasses.go.
   205  //
   206  // nosplit, because it is called by objIndex, which is nosplit
   207  //
   208  //go:nosplit
   209  func (s *mspan) divideByElemSize(n uintptr) uintptr {
   210  	const doubleCheck = false
   211  
   212  	// See explanation in mksizeclasses.go's computeDivMagic.
   213  	q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)
   214  
   215  	if doubleCheck && q != n/s.elemsize {
   216  		println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
   217  		throw("bad magic division")
   218  	}
   219  	return q
   220  }
   221  
   222  // nosplit, because it is called by other nosplit code like findObject
   223  //
   224  //go:nosplit
   225  func (s *mspan) objIndex(p uintptr) uintptr {
   226  	return s.divideByElemSize(p - s.base())
   227  }
   228  
   229  func markBitsForAddr(p uintptr) markBits {
   230  	s := spanOf(p)
   231  	objIndex := s.objIndex(p)
   232  	return s.markBitsForIndex(objIndex)
   233  }
   234  
   235  func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
   236  	bytep, mask := s.gcmarkBits.bitp(objIndex)
   237  	return markBits{bytep, mask, objIndex}
   238  }
   239  
   240  func (s *mspan) markBitsForBase() markBits {
   241  	return markBits{&s.gcmarkBits.x, uint8(1), 0}
   242  }
   243  
   244  // isMarked reports whether mark bit m is set.
   245  func (m markBits) isMarked() bool {
   246  	return *m.bytep&m.mask != 0
   247  }
   248  
   249  // setMarked sets the marked bit in the markbits, atomically.
   250  func (m markBits) setMarked() {
   251  	// Might be racing with other updates, so use atomic update always.
   252  	// We used to be clever here and use a non-atomic update in certain
   253  	// cases, but it's not worth the risk.
   254  	atomic.Or8(m.bytep, m.mask)
   255  }
   256  
   257  // setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
   258  func (m markBits) setMarkedNonAtomic() {
   259  	*m.bytep |= m.mask
   260  }
   261  
   262  // clearMarked clears the marked bit in the markbits, atomically.
   263  func (m markBits) clearMarked() {
   264  	// Might be racing with other updates, so use atomic update always.
   265  	// We used to be clever here and use a non-atomic update in certain
   266  	// cases, but it's not worth the risk.
   267  	atomic.And8(m.bytep, ^m.mask)
   268  }
   269  
   270  // markBitsForSpan returns the markBits for the span base address base.
   271  func markBitsForSpan(base uintptr) (mbits markBits) {
   272  	mbits = markBitsForAddr(base)
   273  	if mbits.mask != 1 {
   274  		throw("markBitsForSpan: unaligned start")
   275  	}
   276  	return mbits
   277  }
   278  
   279  // advance advances the markBits to the next object in the span.
   280  func (m *markBits) advance() {
   281  	if m.mask == 1<<7 {
   282  		m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
   283  		m.mask = 1
   284  	} else {
   285  		m.mask = m.mask << 1
   286  	}
   287  	m.index++
   288  }
   289  
   290  // clobberdeadPtr is a special value that is used by the compiler to
   291  // clobber dead stack slots, when -clobberdead flag is set.
   292  const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32))
   293  
   294  // badPointer throws bad pointer in heap panic.
   295  func badPointer(s *mspan, p, refBase, refOff uintptr) {
   296  	// Typically this indicates an incorrect use
   297  	// of unsafe or cgo to store a bad pointer in
   298  	// the Go heap. It may also indicate a runtime
   299  	// bug.
   300  	//
   301  	// TODO(austin): We could be more aggressive
   302  	// and detect pointers to unallocated objects
   303  	// in allocated spans.
   304  	printlock()
   305  	print("runtime: pointer ", hex(p))
   306  	if s != nil {
   307  		state := s.state.get()
   308  		if state != mSpanInUse {
   309  			print(" to unallocated span")
   310  		} else {
   311  			print(" to unused region of span")
   312  		}
   313  		print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state)
   314  	}
   315  	print("\n")
   316  	if refBase != 0 {
   317  		print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
   318  		gcDumpObject("object", refBase, refOff)
   319  	}
   320  	getg().m.traceback = 2
   321  	throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
   322  }
   323  
   324  // findObject returns the base address for the heap object containing
   325  // the address p, the object's span, and the index of the object in s.
   326  // If p does not point into a heap object, it returns base == 0.
   327  //
   328  // If p points is an invalid heap pointer and debug.invalidptr != 0,
   329  // findObject panics.
   330  //
   331  // refBase and refOff optionally give the base address of the object
   332  // in which the pointer p was found and the byte offset at which it
   333  // was found. These are used for error reporting.
   334  //
   335  // It is nosplit so it is safe for p to be a pointer to the current goroutine's stack.
   336  // Since p is a uintptr, it would not be adjusted if the stack were to move.
   337  //
   338  //go:nosplit
   339  func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
   340  	s = spanOf(p)
   341  	// If s is nil, the virtual address has never been part of the heap.
   342  	// This pointer may be to some mmap'd region, so we allow it.
   343  	if s == nil {
   344  		if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
   345  			// Crash if clobberdeadPtr is seen. Only on AMD64 and ARM64 for now,
   346  			// as they are the only platform where compiler's clobberdead mode is
   347  			// implemented. On these platforms clobberdeadPtr cannot be a valid address.
   348  			badPointer(s, p, refBase, refOff)
   349  		}
   350  		return
   351  	}
   352  	// If p is a bad pointer, it may not be in s's bounds.
   353  	//
   354  	// Check s.state to synchronize with span initialization
   355  	// before checking other fields. See also spanOfHeap.
   356  	if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
   357  		// Pointers into stacks are also ok, the runtime manages these explicitly.
   358  		if state == mSpanManual {
   359  			return
   360  		}
   361  		// The following ensures that we are rigorous about what data
   362  		// structures hold valid pointers.
   363  		if debug.invalidptr != 0 {
   364  			badPointer(s, p, refBase, refOff)
   365  		}
   366  		return
   367  	}
   368  
   369  	objIndex = s.objIndex(p)
   370  	base = s.base() + objIndex*s.elemsize
   371  	return
   372  }
   373  
   374  // reflect_verifyNotInHeapPtr reports whether converting the not-in-heap pointer into a unsafe.Pointer is ok.
   375  //
   376  //go:linkname reflect_verifyNotInHeapPtr reflect.verifyNotInHeapPtr
   377  func reflect_verifyNotInHeapPtr(p uintptr) bool {
   378  	// Conversion to a pointer is ok as long as findObject above does not call badPointer.
   379  	// Since we're already promised that p doesn't point into the heap, just disallow heap
   380  	// pointers and the special clobbered pointer.
   381  	return spanOf(p) == nil && p != clobberdeadPtr
   382  }
   383  
   384  const ptrBits = 8 * goarch.PtrSize
   385  
   386  // heapBits provides access to the bitmap bits for a single heap word.
   387  // The methods on heapBits take value receivers so that the compiler
   388  // can more easily inline calls to those methods and registerize the
   389  // struct fields independently.
   390  type heapBits struct {
   391  	// heapBits will report on pointers in the range [addr,addr+size).
   392  	// The low bit of mask contains the pointerness of the word at addr
   393  	// (assuming valid>0).
   394  	addr, size uintptr
   395  
   396  	// The next few pointer bits representing words starting at addr.
   397  	// Those bits already returned by next() are zeroed.
   398  	mask uintptr
   399  	// Number of bits in mask that are valid. mask is always less than 1<<valid.
   400  	valid uintptr
   401  }
   402  
   403  // heapBitsForAddr returns the heapBits for the address addr.
   404  // The caller must ensure [addr,addr+size) is in an allocated span.
   405  // In particular, be careful not to point past the end of an object.
   406  //
   407  // nosplit because it is used during write barriers and must not be preempted.
   408  //
   409  //go:nosplit
   410  func heapBitsForAddr(addr, size uintptr) heapBits {
   411  	// Find arena
   412  	ai := arenaIndex(addr)
   413  	ha := mheap_.arenas[ai.l1()][ai.l2()]
   414  
   415  	// Word index in arena.
   416  	word := addr / goarch.PtrSize % heapArenaWords
   417  
   418  	// Word index and bit offset in bitmap array.
   419  	idx := word / ptrBits
   420  	off := word % ptrBits
   421  
   422  	// Grab relevant bits of bitmap.
   423  	mask := ha.bitmap[idx] >> off
   424  	valid := ptrBits - off
   425  
   426  	// Process depending on where the object ends.
   427  	nptr := size / goarch.PtrSize
   428  	if nptr < valid {
   429  		// Bits for this object end before the end of this bitmap word.
   430  		// Squash bits for the following objects.
   431  		mask &= 1<<(nptr&(ptrBits-1)) - 1
   432  		valid = nptr
   433  	} else if nptr == valid {
   434  		// Bits for this object end at exactly the end of this bitmap word.
   435  		// All good.
   436  	} else {
   437  		// Bits for this object extend into the next bitmap word. See if there
   438  		// may be any pointers recorded there.
   439  		if uintptr(ha.noMorePtrs[idx/8])>>(idx%8)&1 != 0 {
   440  			// No more pointers in this object after this bitmap word.
   441  			// Update size so we know not to look there.
   442  			size = valid * goarch.PtrSize
   443  		}
   444  	}
   445  
   446  	return heapBits{addr: addr, size: size, mask: mask, valid: valid}
   447  }
   448  
   449  // Returns the (absolute) address of the next known pointer and
   450  // a heapBits iterator representing any remaining pointers.
   451  // If there are no more pointers, returns address 0.
   452  // Note that next does not modify h. The caller must record the result.
   453  //
   454  // nosplit because it is used during write barriers and must not be preempted.
   455  //
   456  //go:nosplit
   457  func (h heapBits) next() (heapBits, uintptr) {
   458  	for {
   459  		if h.mask != 0 {
   460  			var i int
   461  			if goarch.PtrSize == 8 {
   462  				i = sys.TrailingZeros64(uint64(h.mask))
   463  			} else {
   464  				i = sys.TrailingZeros32(uint32(h.mask))
   465  			}
   466  			h.mask ^= uintptr(1) << (i & (ptrBits - 1))
   467  			return h, h.addr + uintptr(i)*goarch.PtrSize
   468  		}
   469  
   470  		// Skip words that we've already processed.
   471  		h.addr += h.valid * goarch.PtrSize
   472  		h.size -= h.valid * goarch.PtrSize
   473  		if h.size == 0 {
   474  			return h, 0 // no more pointers
   475  		}
   476  
   477  		// Grab more bits and try again.
   478  		h = heapBitsForAddr(h.addr, h.size)
   479  	}
   480  }
   481  
   482  // nextFast is like next, but can return 0 even when there are more pointers
   483  // to be found. Callers should call next if nextFast returns 0 as its second
   484  // return value.
   485  //
   486  //	if addr, h = h.nextFast(); addr == 0 {
   487  //	    if addr, h = h.next(); addr == 0 {
   488  //	        ... no more pointers ...
   489  //	    }
   490  //	}
   491  //	... process pointer at addr ...
   492  //
   493  // nextFast is designed to be inlineable.
   494  //
   495  //go:nosplit
   496  func (h heapBits) nextFast() (heapBits, uintptr) {
   497  	// TESTQ/JEQ
   498  	if h.mask == 0 {
   499  		return h, 0
   500  	}
   501  	// BSFQ
   502  	var i int
   503  	if goarch.PtrSize == 8 {
   504  		i = sys.TrailingZeros64(uint64(h.mask))
   505  	} else {
   506  		i = sys.TrailingZeros32(uint32(h.mask))
   507  	}
   508  	// BTCQ
   509  	h.mask ^= uintptr(1) << (i & (ptrBits - 1))
   510  	// LEAQ (XX)(XX*8)
   511  	return h, h.addr + uintptr(i)*goarch.PtrSize
   512  }
   513  
   514  // bulkBarrierPreWrite executes a write barrier
   515  // for every pointer slot in the memory range [src, src+size),
   516  // using pointer/scalar information from [dst, dst+size).
   517  // This executes the write barriers necessary before a memmove.
   518  // src, dst, and size must be pointer-aligned.
   519  // The range [dst, dst+size) must lie within a single object.
   520  // It does not perform the actual writes.
   521  //
   522  // As a special case, src == 0 indicates that this is being used for a
   523  // memclr. bulkBarrierPreWrite will pass 0 for the src of each write
   524  // barrier.
   525  //
   526  // Callers should call bulkBarrierPreWrite immediately before
   527  // calling memmove(dst, src, size). This function is marked nosplit
   528  // to avoid being preempted; the GC must not stop the goroutine
   529  // between the memmove and the execution of the barriers.
   530  // The caller is also responsible for cgo pointer checks if this
   531  // may be writing Go pointers into non-Go memory.
   532  //
   533  // The pointer bitmap is not maintained for allocations containing
   534  // no pointers at all; any caller of bulkBarrierPreWrite must first
   535  // make sure the underlying allocation contains pointers, usually
   536  // by checking typ.PtrBytes.
   537  //
   538  // Callers must perform cgo checks if goexperiment.CgoCheck2.
   539  //
   540  //go:nosplit
   541  func bulkBarrierPreWrite(dst, src, size uintptr) {
   542  	if (dst|src|size)&(goarch.PtrSize-1) != 0 {
   543  		throw("bulkBarrierPreWrite: unaligned arguments")
   544  	}
   545  	if !writeBarrier.needed {
   546  		return
   547  	}
   548  	if s := spanOf(dst); s == nil {
   549  		// If dst is a global, use the data or BSS bitmaps to
   550  		// execute write barriers.
   551  		for _, datap := range activeModules() {
   552  			if datap.data <= dst && dst < datap.edata {
   553  				bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
   554  				return
   555  			}
   556  		}
   557  		for _, datap := range activeModules() {
   558  			if datap.bss <= dst && dst < datap.ebss {
   559  				bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
   560  				return
   561  			}
   562  		}
   563  		return
   564  	} else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
   565  		// dst was heap memory at some point, but isn't now.
   566  		// It can't be a global. It must be either our stack,
   567  		// or in the case of direct channel sends, it could be
   568  		// another stack. Either way, no need for barriers.
   569  		// This will also catch if dst is in a freed span,
   570  		// though that should never have.
   571  		return
   572  	}
   573  
   574  	buf := &getg().m.p.ptr().wbBuf
   575  	h := heapBitsForAddr(dst, size)
   576  	if src == 0 {
   577  		for {
   578  			var addr uintptr
   579  			if h, addr = h.next(); addr == 0 {
   580  				break
   581  			}
   582  			dstx := (*uintptr)(unsafe.Pointer(addr))
   583  			p := buf.get1()
   584  			p[0] = *dstx
   585  		}
   586  	} else {
   587  		for {
   588  			var addr uintptr
   589  			if h, addr = h.next(); addr == 0 {
   590  				break
   591  			}
   592  			dstx := (*uintptr)(unsafe.Pointer(addr))
   593  			srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
   594  			p := buf.get2()
   595  			p[0] = *dstx
   596  			p[1] = *srcx
   597  		}
   598  	}
   599  }
   600  
   601  // bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
   602  // does not execute write barriers for [dst, dst+size).
   603  //
   604  // In addition to the requirements of bulkBarrierPreWrite
   605  // callers need to ensure [dst, dst+size) is zeroed.
   606  //
   607  // This is used for special cases where e.g. dst was just
   608  // created and zeroed with malloc.
   609  //
   610  //go:nosplit
   611  func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
   612  	if (dst|src|size)&(goarch.PtrSize-1) != 0 {
   613  		throw("bulkBarrierPreWrite: unaligned arguments")
   614  	}
   615  	if !writeBarrier.needed {
   616  		return
   617  	}
   618  	buf := &getg().m.p.ptr().wbBuf
   619  	h := heapBitsForAddr(dst, size)
   620  	for {
   621  		var addr uintptr
   622  		if h, addr = h.next(); addr == 0 {
   623  			break
   624  		}
   625  		srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
   626  		p := buf.get1()
   627  		p[0] = *srcx
   628  	}
   629  }
   630  
   631  // bulkBarrierBitmap executes write barriers for copying from [src,
   632  // src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
   633  // assumed to start maskOffset bytes into the data covered by the
   634  // bitmap in bits (which may not be a multiple of 8).
   635  //
   636  // This is used by bulkBarrierPreWrite for writes to data and BSS.
   637  //
   638  //go:nosplit
   639  func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
   640  	word := maskOffset / goarch.PtrSize
   641  	bits = addb(bits, word/8)
   642  	mask := uint8(1) << (word % 8)
   643  
   644  	buf := &getg().m.p.ptr().wbBuf
   645  	for i := uintptr(0); i < size; i += goarch.PtrSize {
   646  		if mask == 0 {
   647  			bits = addb(bits, 1)
   648  			if *bits == 0 {
   649  				// Skip 8 words.
   650  				i += 7 * goarch.PtrSize
   651  				continue
   652  			}
   653  			mask = 1
   654  		}
   655  		if *bits&mask != 0 {
   656  			dstx := (*uintptr)(unsafe.Pointer(dst + i))
   657  			if src == 0 {
   658  				p := buf.get1()
   659  				p[0] = *dstx
   660  			} else {
   661  				srcx := (*uintptr)(unsafe.Pointer(src + i))
   662  				p := buf.get2()
   663  				p[0] = *dstx
   664  				p[1] = *srcx
   665  			}
   666  		}
   667  		mask <<= 1
   668  	}
   669  }
   670  
   671  // typeBitsBulkBarrier executes a write barrier for every
   672  // pointer that would be copied from [src, src+size) to [dst,
   673  // dst+size) by a memmove using the type bitmap to locate those
   674  // pointer slots.
   675  //
   676  // The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
   677  // dst, src, and size must be pointer-aligned.
   678  // The type typ must have a plain bitmap, not a GC program.
   679  // The only use of this function is in channel sends, and the
   680  // 64 kB channel element limit takes care of this for us.
   681  //
   682  // Must not be preempted because it typically runs right before memmove,
   683  // and the GC must observe them as an atomic action.
   684  //
   685  // Callers must perform cgo checks if goexperiment.CgoCheck2.
   686  //
   687  //go:nosplit
   688  func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
   689  	if typ == nil {
   690  		throw("runtime: typeBitsBulkBarrier without type")
   691  	}
   692  	if typ.Size_ != size {
   693  		println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " of size ", typ.Size_, " but memory size", size)
   694  		throw("runtime: invalid typeBitsBulkBarrier")
   695  	}
   696  	if typ.Kind_&kindGCProg != 0 {
   697  		println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " with GC prog")
   698  		throw("runtime: invalid typeBitsBulkBarrier")
   699  	}
   700  	if !writeBarrier.needed {
   701  		return
   702  	}
   703  	ptrmask := typ.GCData
   704  	buf := &getg().m.p.ptr().wbBuf
   705  	var bits uint32
   706  	for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize {
   707  		if i&(goarch.PtrSize*8-1) == 0 {
   708  			bits = uint32(*ptrmask)
   709  			ptrmask = addb(ptrmask, 1)
   710  		} else {
   711  			bits = bits >> 1
   712  		}
   713  		if bits&1 != 0 {
   714  			dstx := (*uintptr)(unsafe.Pointer(dst + i))
   715  			srcx := (*uintptr)(unsafe.Pointer(src + i))
   716  			p := buf.get2()
   717  			p[0] = *dstx
   718  			p[1] = *srcx
   719  		}
   720  	}
   721  }
   722  
   723  // initHeapBits initializes the heap bitmap for a span.
   724  // If this is a span of single pointer allocations, it initializes all
   725  // words to pointer. If force is true, clears all bits.
   726  func (s *mspan) initHeapBits(forceClear bool) {
   727  	if forceClear || s.spanclass.noscan() {
   728  		// Set all the pointer bits to zero. We do this once
   729  		// when the span is allocated so we don't have to do it
   730  		// for each object allocation.
   731  		base := s.base()
   732  		size := s.npages * pageSize
   733  		h := writeHeapBitsForAddr(base)
   734  		h.flush(base, size)
   735  		return
   736  	}
   737  	isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize
   738  	if !isPtrs {
   739  		return // nothing to do
   740  	}
   741  	h := writeHeapBitsForAddr(s.base())
   742  	size := s.npages * pageSize
   743  	nptrs := size / goarch.PtrSize
   744  	for i := uintptr(0); i < nptrs; i += ptrBits {
   745  		h = h.write(^uintptr(0), ptrBits)
   746  	}
   747  	h.flush(s.base(), size)
   748  }
   749  
   750  // countAlloc returns the number of objects allocated in span s by
   751  // scanning the allocation bitmap.
   752  func (s *mspan) countAlloc() int {
   753  	count := 0
   754  	bytes := divRoundUp(s.nelems, 8)
   755  	// Iterate over each 8-byte chunk and count allocations
   756  	// with an intrinsic. Note that newMarkBits guarantees that
   757  	// gcmarkBits will be 8-byte aligned, so we don't have to
   758  	// worry about edge cases, irrelevant bits will simply be zero.
   759  	for i := uintptr(0); i < bytes; i += 8 {
   760  		// Extract 64 bits from the byte pointer and get a OnesCount.
   761  		// Note that the unsafe cast here doesn't preserve endianness,
   762  		// but that's OK. We only care about how many bits are 1, not
   763  		// about the order we discover them in.
   764  		mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
   765  		count += sys.OnesCount64(mrkBits)
   766  	}
   767  	return count
   768  }
   769  
   770  type writeHeapBits struct {
   771  	addr  uintptr // address that the low bit of mask represents the pointer state of.
   772  	mask  uintptr // some pointer bits starting at the address addr.
   773  	valid uintptr // number of bits in buf that are valid (including low)
   774  	low   uintptr // number of low-order bits to not overwrite
   775  }
   776  
   777  func writeHeapBitsForAddr(addr uintptr) (h writeHeapBits) {
   778  	// We start writing bits maybe in the middle of a heap bitmap word.
   779  	// Remember how many bits into the word we started, so we can be sure
   780  	// not to overwrite the previous bits.
   781  	h.low = addr / goarch.PtrSize % ptrBits
   782  
   783  	// round down to heap word that starts the bitmap word.
   784  	h.addr = addr - h.low*goarch.PtrSize
   785  
   786  	// We don't have any bits yet.
   787  	h.mask = 0
   788  	h.valid = h.low
   789  
   790  	return
   791  }
   792  
   793  // write appends the pointerness of the next valid pointer slots
   794  // using the low valid bits of bits. 1=pointer, 0=scalar.
   795  func (h writeHeapBits) write(bits, valid uintptr) writeHeapBits {
   796  	if h.valid+valid <= ptrBits {
   797  		// Fast path - just accumulate the bits.
   798  		h.mask |= bits << h.valid
   799  		h.valid += valid
   800  		return h
   801  	}
   802  	// Too many bits to fit in this word. Write the current word
   803  	// out and move on to the next word.
   804  
   805  	data := h.mask | bits<<h.valid       // mask for this word
   806  	h.mask = bits >> (ptrBits - h.valid) // leftover for next word
   807  	h.valid += valid - ptrBits           // have h.valid+valid bits, writing ptrBits of them
   808  
   809  	// Flush mask to the memory bitmap.
   810  	// TODO: figure out how to cache arena lookup.
   811  	ai := arenaIndex(h.addr)
   812  	ha := mheap_.arenas[ai.l1()][ai.l2()]
   813  	idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
   814  	m := uintptr(1)<<h.low - 1
   815  	ha.bitmap[idx] = ha.bitmap[idx]&m | data
   816  	// Note: no synchronization required for this write because
   817  	// the allocator has exclusive access to the page, and the bitmap
   818  	// entries are all for a single page. Also, visibility of these
   819  	// writes is guaranteed by the publication barrier in mallocgc.
   820  
   821  	// Clear noMorePtrs bit, since we're going to be writing bits
   822  	// into the following word.
   823  	ha.noMorePtrs[idx/8] &^= uint8(1) << (idx % 8)
   824  	// Note: same as above
   825  
   826  	// Move to next word of bitmap.
   827  	h.addr += ptrBits * goarch.PtrSize
   828  	h.low = 0
   829  	return h
   830  }
   831  
   832  // Add padding of size bytes.
   833  func (h writeHeapBits) pad(size uintptr) writeHeapBits {
   834  	if size == 0 {
   835  		return h
   836  	}
   837  	words := size / goarch.PtrSize
   838  	for words > ptrBits {
   839  		h = h.write(0, ptrBits)
   840  		words -= ptrBits
   841  	}
   842  	return h.write(0, words)
   843  }
   844  
   845  // Flush the bits that have been written, and add zeros as needed
   846  // to cover the full object [addr, addr+size).
   847  func (h writeHeapBits) flush(addr, size uintptr) {
   848  	// zeros counts the number of bits needed to represent the object minus the
   849  	// number of bits we've already written. This is the number of 0 bits
   850  	// that need to be added.
   851  	zeros := (addr+size-h.addr)/goarch.PtrSize - h.valid
   852  
   853  	// Add zero bits up to the bitmap word boundary
   854  	if zeros > 0 {
   855  		z := ptrBits - h.valid
   856  		if z > zeros {
   857  			z = zeros
   858  		}
   859  		h.valid += z
   860  		zeros -= z
   861  	}
   862  
   863  	// Find word in bitmap that we're going to write.
   864  	ai := arenaIndex(h.addr)
   865  	ha := mheap_.arenas[ai.l1()][ai.l2()]
   866  	idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
   867  
   868  	// Write remaining bits.
   869  	if h.valid != h.low {
   870  		m := uintptr(1)<<h.low - 1      // don't clear existing bits below "low"
   871  		m |= ^(uintptr(1)<<h.valid - 1) // don't clear existing bits above "valid"
   872  		ha.bitmap[idx] = ha.bitmap[idx]&m | h.mask
   873  	}
   874  	if zeros == 0 {
   875  		return
   876  	}
   877  
   878  	// Record in the noMorePtrs map that there won't be any more 1 bits,
   879  	// so readers can stop early.
   880  	ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8)
   881  
   882  	// Advance to next bitmap word.
   883  	h.addr += ptrBits * goarch.PtrSize
   884  
   885  	// Continue on writing zeros for the rest of the object.
   886  	// For standard use of the ptr bits this is not required, as
   887  	// the bits are read from the beginning of the object. Some uses,
   888  	// like noscan spans, oblets, bulk write barriers, and cgocheck, might
   889  	// start mid-object, so these writes are still required.
   890  	for {
   891  		// Write zero bits.
   892  		ai := arenaIndex(h.addr)
   893  		ha := mheap_.arenas[ai.l1()][ai.l2()]
   894  		idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
   895  		if zeros < ptrBits {
   896  			ha.bitmap[idx] &^= uintptr(1)<<zeros - 1
   897  			break
   898  		} else if zeros == ptrBits {
   899  			ha.bitmap[idx] = 0
   900  			break
   901  		} else {
   902  			ha.bitmap[idx] = 0
   903  			zeros -= ptrBits
   904  		}
   905  		ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8)
   906  		h.addr += ptrBits * goarch.PtrSize
   907  	}
   908  }
   909  
   910  // Read the bytes starting at the aligned pointer p into a uintptr.
   911  // Read is little-endian.
   912  func readUintptr(p *byte) uintptr {
   913  	x := *(*uintptr)(unsafe.Pointer(p))
   914  	if goarch.BigEndian {
   915  		if goarch.PtrSize == 8 {
   916  			return uintptr(sys.Bswap64(uint64(x)))
   917  		}
   918  		return uintptr(sys.Bswap32(uint32(x)))
   919  	}
   920  	return x
   921  }
   922  
   923  // heapBitsSetType records that the new allocation [x, x+size)
   924  // holds in [x, x+dataSize) one or more values of type typ.
   925  // (The number of values is given by dataSize / typ.Size.)
   926  // If dataSize < size, the fragment [x+dataSize, x+size) is
   927  // recorded as non-pointer data.
   928  // It is known that the type has pointers somewhere;
   929  // malloc does not call heapBitsSetType when there are no pointers,
   930  // because all free objects are marked as noscan during
   931  // heapBitsSweepSpan.
   932  //
   933  // There can only be one allocation from a given span active at a time,
   934  // and the bitmap for a span always falls on word boundaries,
   935  // so there are no write-write races for access to the heap bitmap.
   936  // Hence, heapBitsSetType can access the bitmap without atomics.
   937  //
   938  // There can be read-write races between heapBitsSetType and things
   939  // that read the heap bitmap like scanobject. However, since
   940  // heapBitsSetType is only used for objects that have not yet been
   941  // made reachable, readers will ignore bits being modified by this
   942  // function. This does mean this function cannot transiently modify
   943  // bits that belong to neighboring objects. Also, on weakly-ordered
   944  // machines, callers must execute a store/store (publication) barrier
   945  // between calling this function and making the object reachable.
   946  func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
   947  	const doubleCheck = false // slow but helpful; enable to test modifications to this code
   948  
   949  	if doubleCheck && dataSize%typ.Size_ != 0 {
   950  		throw("heapBitsSetType: dataSize not a multiple of typ.Size")
   951  	}
   952  
   953  	if goarch.PtrSize == 8 && size == goarch.PtrSize {
   954  		// It's one word and it has pointers, it must be a pointer.
   955  		// Since all allocated one-word objects are pointers
   956  		// (non-pointers are aggregated into tinySize allocations),
   957  		// (*mspan).initHeapBits sets the pointer bits for us.
   958  		// Nothing to do here.
   959  		if doubleCheck {
   960  			h, addr := heapBitsForAddr(x, size).next()
   961  			if addr != x {
   962  				throw("heapBitsSetType: pointer bit missing")
   963  			}
   964  			_, addr = h.next()
   965  			if addr != 0 {
   966  				throw("heapBitsSetType: second pointer bit found")
   967  			}
   968  		}
   969  		return
   970  	}
   971  
   972  	h := writeHeapBitsForAddr(x)
   973  
   974  	// Handle GC program.
   975  	if typ.Kind_&kindGCProg != 0 {
   976  		// Expand the gc program into the storage we're going to use for the actual object.
   977  		obj := (*uint8)(unsafe.Pointer(x))
   978  		n := runGCProg(addb(typ.GCData, 4), obj)
   979  		// Use the expanded program to set the heap bits.
   980  		for i := uintptr(0); true; i += typ.Size_ {
   981  			// Copy expanded program to heap bitmap.
   982  			p := obj
   983  			j := n
   984  			for j > 8 {
   985  				h = h.write(uintptr(*p), 8)
   986  				p = add1(p)
   987  				j -= 8
   988  			}
   989  			h = h.write(uintptr(*p), j)
   990  
   991  			if i+typ.Size_ == dataSize {
   992  				break // no padding after last element
   993  			}
   994  
   995  			// Pad with zeros to the start of the next element.
   996  			h = h.pad(typ.Size_ - n*goarch.PtrSize)
   997  		}
   998  
   999  		h.flush(x, size)
  1000  
  1001  		// Erase the expanded GC program.
  1002  		memclrNoHeapPointers(unsafe.Pointer(obj), (n+7)/8)
  1003  		return
  1004  	}
  1005  
  1006  	// Note about sizes:
  1007  	//
  1008  	// typ.Size is the number of words in the object,
  1009  	// and typ.PtrBytes is the number of words in the prefix
  1010  	// of the object that contains pointers. That is, the final
  1011  	// typ.Size - typ.PtrBytes words contain no pointers.
  1012  	// This allows optimization of a common pattern where
  1013  	// an object has a small header followed by a large scalar
  1014  	// buffer. If we know the pointers are over, we don't have
  1015  	// to scan the buffer's heap bitmap at all.
  1016  	// The 1-bit ptrmasks are sized to contain only bits for
  1017  	// the typ.PtrBytes prefix, zero padded out to a full byte
  1018  	// of bitmap. If there is more room in the allocated object,
  1019  	// that space is pointerless. The noMorePtrs bitmap will prevent
  1020  	// scanning large pointerless tails of an object.
  1021  	//
  1022  	// Replicated copies are not as nice: if there is an array of
  1023  	// objects with scalar tails, all but the last tail does have to
  1024  	// be initialized, because there is no way to say "skip forward".
  1025  
  1026  	ptrs := typ.PtrBytes / goarch.PtrSize
  1027  	if typ.Size_ == dataSize { // Single element
  1028  		if ptrs <= ptrBits { // Single small element
  1029  			m := readUintptr(typ.GCData)
  1030  			h = h.write(m, ptrs)
  1031  		} else { // Single large element
  1032  			p := typ.GCData
  1033  			for {
  1034  				h = h.write(readUintptr(p), ptrBits)
  1035  				p = addb(p, ptrBits/8)
  1036  				ptrs -= ptrBits
  1037  				if ptrs <= ptrBits {
  1038  					break
  1039  				}
  1040  			}
  1041  			m := readUintptr(p)
  1042  			h = h.write(m, ptrs)
  1043  		}
  1044  	} else { // Repeated element
  1045  		words := typ.Size_ / goarch.PtrSize // total words, including scalar tail
  1046  		if words <= ptrBits {               // Repeated small element
  1047  			n := dataSize / typ.Size_
  1048  			m := readUintptr(typ.GCData)
  1049  			// Make larger unit to repeat
  1050  			for words <= ptrBits/2 {
  1051  				if n&1 != 0 {
  1052  					h = h.write(m, words)
  1053  				}
  1054  				n /= 2
  1055  				m |= m << words
  1056  				ptrs += words
  1057  				words *= 2
  1058  				if n == 1 {
  1059  					break
  1060  				}
  1061  			}
  1062  			for n > 1 {
  1063  				h = h.write(m, words)
  1064  				n--
  1065  			}
  1066  			h = h.write(m, ptrs)
  1067  		} else { // Repeated large element
  1068  			for i := uintptr(0); true; i += typ.Size_ {
  1069  				p := typ.GCData
  1070  				j := ptrs
  1071  				for j > ptrBits {
  1072  					h = h.write(readUintptr(p), ptrBits)
  1073  					p = addb(p, ptrBits/8)
  1074  					j -= ptrBits
  1075  				}
  1076  				m := readUintptr(p)
  1077  				h = h.write(m, j)
  1078  				if i+typ.Size_ == dataSize {
  1079  					break // don't need the trailing nonptr bits on the last element.
  1080  				}
  1081  				// Pad with zeros to the start of the next element.
  1082  				h = h.pad(typ.Size_ - typ.PtrBytes)
  1083  			}
  1084  		}
  1085  	}
  1086  	h.flush(x, size)
  1087  
  1088  	if doubleCheck {
  1089  		h := heapBitsForAddr(x, size)
  1090  		for i := uintptr(0); i < size; i += goarch.PtrSize {
  1091  			// Compute the pointer bit we want at offset i.
  1092  			want := false
  1093  			if i < dataSize {
  1094  				off := i % typ.Size_
  1095  				if off < typ.PtrBytes {
  1096  					j := off / goarch.PtrSize
  1097  					want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
  1098  				}
  1099  			}
  1100  			if want {
  1101  				var addr uintptr
  1102  				h, addr = h.next()
  1103  				if addr != x+i {
  1104  					throw("heapBitsSetType: pointer entry not correct")
  1105  				}
  1106  			}
  1107  		}
  1108  		if _, addr := h.next(); addr != 0 {
  1109  			throw("heapBitsSetType: extra pointer")
  1110  		}
  1111  	}
  1112  }
  1113  
  1114  var debugPtrmask struct {
  1115  	lock mutex
  1116  	data *byte
  1117  }
  1118  
  1119  // progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
  1120  // size the size of the region described by prog, in bytes.
  1121  // The resulting bitvector will have no more than size/goarch.PtrSize bits.
  1122  func progToPointerMask(prog *byte, size uintptr) bitvector {
  1123  	n := (size/goarch.PtrSize + 7) / 8
  1124  	x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
  1125  	x[len(x)-1] = 0xa1 // overflow check sentinel
  1126  	n = runGCProg(prog, &x[0])
  1127  	if x[len(x)-1] != 0xa1 {
  1128  		throw("progToPointerMask: overflow")
  1129  	}
  1130  	return bitvector{int32(n), &x[0]}
  1131  }
  1132  
  1133  // Packed GC pointer bitmaps, aka GC programs.
  1134  //
  1135  // For large types containing arrays, the type information has a
  1136  // natural repetition that can be encoded to save space in the
  1137  // binary and in the memory representation of the type information.
  1138  //
  1139  // The encoding is a simple Lempel-Ziv style bytecode machine
  1140  // with the following instructions:
  1141  //
  1142  //	00000000: stop
  1143  //	0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes
  1144  //	10000000 n c: repeat the previous n bits c times; n, c are varints
  1145  //	1nnnnnnn c: repeat the previous n bits c times; c is a varint
  1146  
  1147  // runGCProg returns the number of 1-bit entries written to memory.
  1148  func runGCProg(prog, dst *byte) uintptr {
  1149  	dstStart := dst
  1150  
  1151  	// Bits waiting to be written to memory.
  1152  	var bits uintptr
  1153  	var nbits uintptr
  1154  
  1155  	p := prog
  1156  Run:
  1157  	for {
  1158  		// Flush accumulated full bytes.
  1159  		// The rest of the loop assumes that nbits <= 7.
  1160  		for ; nbits >= 8; nbits -= 8 {
  1161  			*dst = uint8(bits)
  1162  			dst = add1(dst)
  1163  			bits >>= 8
  1164  		}
  1165  
  1166  		// Process one instruction.
  1167  		inst := uintptr(*p)
  1168  		p = add1(p)
  1169  		n := inst & 0x7F
  1170  		if inst&0x80 == 0 {
  1171  			// Literal bits; n == 0 means end of program.
  1172  			if n == 0 {
  1173  				// Program is over.
  1174  				break Run
  1175  			}
  1176  			nbyte := n / 8
  1177  			for i := uintptr(0); i < nbyte; i++ {
  1178  				bits |= uintptr(*p) << nbits
  1179  				p = add1(p)
  1180  				*dst = uint8(bits)
  1181  				dst = add1(dst)
  1182  				bits >>= 8
  1183  			}
  1184  			if n %= 8; n > 0 {
  1185  				bits |= uintptr(*p) << nbits
  1186  				p = add1(p)
  1187  				nbits += n
  1188  			}
  1189  			continue Run
  1190  		}
  1191  
  1192  		// Repeat. If n == 0, it is encoded in a varint in the next bytes.
  1193  		if n == 0 {
  1194  			for off := uint(0); ; off += 7 {
  1195  				x := uintptr(*p)
  1196  				p = add1(p)
  1197  				n |= (x & 0x7F) << off
  1198  				if x&0x80 == 0 {
  1199  					break
  1200  				}
  1201  			}
  1202  		}
  1203  
  1204  		// Count is encoded in a varint in the next bytes.
  1205  		c := uintptr(0)
  1206  		for off := uint(0); ; off += 7 {
  1207  			x := uintptr(*p)
  1208  			p = add1(p)
  1209  			c |= (x & 0x7F) << off
  1210  			if x&0x80 == 0 {
  1211  				break
  1212  			}
  1213  		}
  1214  		c *= n // now total number of bits to copy
  1215  
  1216  		// If the number of bits being repeated is small, load them
  1217  		// into a register and use that register for the entire loop
  1218  		// instead of repeatedly reading from memory.
  1219  		// Handling fewer than 8 bits here makes the general loop simpler.
  1220  		// The cutoff is goarch.PtrSize*8 - 7 to guarantee that when we add
  1221  		// the pattern to a bit buffer holding at most 7 bits (a partial byte)
  1222  		// it will not overflow.
  1223  		src := dst
  1224  		const maxBits = goarch.PtrSize*8 - 7
  1225  		if n <= maxBits {
  1226  			// Start with bits in output buffer.
  1227  			pattern := bits
  1228  			npattern := nbits
  1229  
  1230  			// If we need more bits, fetch them from memory.
  1231  			src = subtract1(src)
  1232  			for npattern < n {
  1233  				pattern <<= 8
  1234  				pattern |= uintptr(*src)
  1235  				src = subtract1(src)
  1236  				npattern += 8
  1237  			}
  1238  
  1239  			// We started with the whole bit output buffer,
  1240  			// and then we loaded bits from whole bytes.
  1241  			// Either way, we might now have too many instead of too few.
  1242  			// Discard the extra.
  1243  			if npattern > n {
  1244  				pattern >>= npattern - n
  1245  				npattern = n
  1246  			}
  1247  
  1248  			// Replicate pattern to at most maxBits.
  1249  			if npattern == 1 {
  1250  				// One bit being repeated.
  1251  				// If the bit is 1, make the pattern all 1s.
  1252  				// If the bit is 0, the pattern is already all 0s,
  1253  				// but we can claim that the number of bits
  1254  				// in the word is equal to the number we need (c),
  1255  				// because right shift of bits will zero fill.
  1256  				if pattern == 1 {
  1257  					pattern = 1<<maxBits - 1
  1258  					npattern = maxBits
  1259  				} else {
  1260  					npattern = c
  1261  				}
  1262  			} else {
  1263  				b := pattern
  1264  				nb := npattern
  1265  				if nb+nb <= maxBits {
  1266  					// Double pattern until the whole uintptr is filled.
  1267  					for nb <= goarch.PtrSize*8 {
  1268  						b |= b << nb
  1269  						nb += nb
  1270  					}
  1271  					// Trim away incomplete copy of original pattern in high bits.
  1272  					// TODO(rsc): Replace with table lookup or loop on systems without divide?
  1273  					nb = maxBits / npattern * npattern
  1274  					b &= 1<<nb - 1
  1275  					pattern = b
  1276  					npattern = nb
  1277  				}
  1278  			}
  1279  
  1280  			// Add pattern to bit buffer and flush bit buffer, c/npattern times.
  1281  			// Since pattern contains >8 bits, there will be full bytes to flush
  1282  			// on each iteration.
  1283  			for ; c >= npattern; c -= npattern {
  1284  				bits |= pattern << nbits
  1285  				nbits += npattern
  1286  				for nbits >= 8 {
  1287  					*dst = uint8(bits)
  1288  					dst = add1(dst)
  1289  					bits >>= 8
  1290  					nbits -= 8
  1291  				}
  1292  			}
  1293  
  1294  			// Add final fragment to bit buffer.
  1295  			if c > 0 {
  1296  				pattern &= 1<<c - 1
  1297  				bits |= pattern << nbits
  1298  				nbits += c
  1299  			}
  1300  			continue Run
  1301  		}
  1302  
  1303  		// Repeat; n too large to fit in a register.
  1304  		// Since nbits <= 7, we know the first few bytes of repeated data
  1305  		// are already written to memory.
  1306  		off := n - nbits // n > nbits because n > maxBits and nbits <= 7
  1307  		// Leading src fragment.
  1308  		src = subtractb(src, (off+7)/8)
  1309  		if frag := off & 7; frag != 0 {
  1310  			bits |= uintptr(*src) >> (8 - frag) << nbits
  1311  			src = add1(src)
  1312  			nbits += frag
  1313  			c -= frag
  1314  		}
  1315  		// Main loop: load one byte, write another.
  1316  		// The bits are rotating through the bit buffer.
  1317  		for i := c / 8; i > 0; i-- {
  1318  			bits |= uintptr(*src) << nbits
  1319  			src = add1(src)
  1320  			*dst = uint8(bits)
  1321  			dst = add1(dst)
  1322  			bits >>= 8
  1323  		}
  1324  		// Final src fragment.
  1325  		if c %= 8; c > 0 {
  1326  			bits |= (uintptr(*src) & (1<<c - 1)) << nbits
  1327  			nbits += c
  1328  		}
  1329  	}
  1330  
  1331  	// Write any final bits out, using full-byte writes, even for the final byte.
  1332  	totalBits := (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
  1333  	nbits += -nbits & 7
  1334  	for ; nbits > 0; nbits -= 8 {
  1335  		*dst = uint8(bits)
  1336  		dst = add1(dst)
  1337  		bits >>= 8
  1338  	}
  1339  	return totalBits
  1340  }
  1341  
  1342  // materializeGCProg allocates space for the (1-bit) pointer bitmask
  1343  // for an object of size ptrdata.  Then it fills that space with the
  1344  // pointer bitmask specified by the program prog.
  1345  // The bitmask starts at s.startAddr.
  1346  // The result must be deallocated with dematerializeGCProg.
  1347  func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
  1348  	// Each word of ptrdata needs one bit in the bitmap.
  1349  	bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
  1350  	// Compute the number of pages needed for bitmapBytes.
  1351  	pages := divRoundUp(bitmapBytes, pageSize)
  1352  	s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
  1353  	runGCProg(addb(prog, 4), (*byte)(unsafe.Pointer(s.startAddr)))
  1354  	return s
  1355  }
  1356  func dematerializeGCProg(s *mspan) {
  1357  	mheap_.freeManual(s, spanAllocPtrScalarBits)
  1358  }
  1359  
  1360  func dumpGCProg(p *byte) {
  1361  	nptr := 0
  1362  	for {
  1363  		x := *p
  1364  		p = add1(p)
  1365  		if x == 0 {
  1366  			print("\t", nptr, " end\n")
  1367  			break
  1368  		}
  1369  		if x&0x80 == 0 {
  1370  			print("\t", nptr, " lit ", x, ":")
  1371  			n := int(x+7) / 8
  1372  			for i := 0; i < n; i++ {
  1373  				print(" ", hex(*p))
  1374  				p = add1(p)
  1375  			}
  1376  			print("\n")
  1377  			nptr += int(x)
  1378  		} else {
  1379  			nbit := int(x &^ 0x80)
  1380  			if nbit == 0 {
  1381  				for nb := uint(0); ; nb += 7 {
  1382  					x := *p
  1383  					p = add1(p)
  1384  					nbit |= int(x&0x7f) << nb
  1385  					if x&0x80 == 0 {
  1386  						break
  1387  					}
  1388  				}
  1389  			}
  1390  			count := 0
  1391  			for nb := uint(0); ; nb += 7 {
  1392  				x := *p
  1393  				p = add1(p)
  1394  				count |= int(x&0x7f) << nb
  1395  				if x&0x80 == 0 {
  1396  					break
  1397  				}
  1398  			}
  1399  			print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
  1400  			nptr += nbit * count
  1401  		}
  1402  	}
  1403  }
  1404  
  1405  // Testing.
  1406  
  1407  // reflect_gcbits returns the GC type info for x, for testing.
  1408  // The result is the bitmap entries (0 or 1), one entry per byte.
  1409  //
  1410  //go:linkname reflect_gcbits reflect.gcbits
  1411  func reflect_gcbits(x any) []byte {
  1412  	return getgcmask(x)
  1413  }
  1414  
  1415  // Returns GC type info for the pointer stored in ep for testing.
  1416  // If ep points to the stack, only static live information will be returned
  1417  // (i.e. not for objects which are only dynamically live stack objects).
  1418  func getgcmask(ep any) (mask []byte) {
  1419  	e := *efaceOf(&ep)
  1420  	p := e.data
  1421  	t := e._type
  1422  	// data or bss
  1423  	for _, datap := range activeModules() {
  1424  		// data
  1425  		if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
  1426  			bitmap := datap.gcdatamask.bytedata
  1427  			n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
  1428  			mask = make([]byte, n/goarch.PtrSize)
  1429  			for i := uintptr(0); i < n; i += goarch.PtrSize {
  1430  				off := (uintptr(p) + i - datap.data) / goarch.PtrSize
  1431  				mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
  1432  			}
  1433  			return
  1434  		}
  1435  
  1436  		// bss
  1437  		if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
  1438  			bitmap := datap.gcbssmask.bytedata
  1439  			n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
  1440  			mask = make([]byte, n/goarch.PtrSize)
  1441  			for i := uintptr(0); i < n; i += goarch.PtrSize {
  1442  				off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
  1443  				mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
  1444  			}
  1445  			return
  1446  		}
  1447  	}
  1448  
  1449  	// heap
  1450  	if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
  1451  		if s.spanclass.noscan() {
  1452  			return nil
  1453  		}
  1454  		n := s.elemsize
  1455  		hbits := heapBitsForAddr(base, n)
  1456  		mask = make([]byte, n/goarch.PtrSize)
  1457  		for {
  1458  			var addr uintptr
  1459  			if hbits, addr = hbits.next(); addr == 0 {
  1460  				break
  1461  			}
  1462  			mask[(addr-base)/goarch.PtrSize] = 1
  1463  		}
  1464  		// Callers expect this mask to end at the last pointer.
  1465  		for len(mask) > 0 && mask[len(mask)-1] == 0 {
  1466  			mask = mask[:len(mask)-1]
  1467  		}
  1468  		return
  1469  	}
  1470  
  1471  	// stack
  1472  	if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
  1473  		found := false
  1474  		var u unwinder
  1475  		for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
  1476  			if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
  1477  				found = true
  1478  				break
  1479  			}
  1480  		}
  1481  		if found {
  1482  			locals, _, _ := u.frame.getStackMap(nil, false)
  1483  			if locals.n == 0 {
  1484  				return
  1485  			}
  1486  			size := uintptr(locals.n) * goarch.PtrSize
  1487  			n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
  1488  			mask = make([]byte, n/goarch.PtrSize)
  1489  			for i := uintptr(0); i < n; i += goarch.PtrSize {
  1490  				off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
  1491  				mask[i/goarch.PtrSize] = locals.ptrbit(off)
  1492  			}
  1493  		}
  1494  		return
  1495  	}
  1496  
  1497  	// otherwise, not something the GC knows about.
  1498  	// possibly read-only data, like malloc(0).
  1499  	// must not have pointers
  1500  	return
  1501  }
  1502  

View as plain text