Source file src/runtime/arena.go

     1  // Copyright 2022 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Implementation of (safe) user arenas.
     6  //
     7  // This file contains the implementation of user arenas wherein Go values can
     8  // be manually allocated and freed in bulk. The act of manually freeing memory,
     9  // potentially before a GC cycle, means that a garbage collection cycle can be
    10  // delayed, improving efficiency by reducing GC cycle frequency. There are other
    11  // potential efficiency benefits, such as improved locality and access to a more
    12  // efficient allocation strategy.
    13  //
    14  // What makes the arenas here safe is that once they are freed, accessing the
    15  // arena's memory will cause an explicit program fault, and the arena's address
    16  // space will not be reused until no more pointers into it are found. There's one
    17  // exception to this: if an arena allocated memory that isn't exhausted, it's placed
    18  // back into a pool for reuse. This means that a crash is not always guaranteed.
    19  //
    20  // While this may seem unsafe, it still prevents memory corruption, and is in fact
    21  // necessary in order to make new(T) a valid implementation of arenas. Such a property
    22  // is desirable to allow for a trivial implementation. (It also avoids complexities
    23  // that arise from synchronization with the GC when trying to set the arena chunks to
    24  // fault while the GC is active.)
    25  //
    26  // The implementation works in layers. At the bottom, arenas are managed in chunks.
    27  // Each chunk must be a multiple of the heap arena size, or the heap arena size must
    28  // be divisible by the arena chunks. The address space for each chunk, and each
    29  // corresponding heapArena for that address space, are eternally reserved for use as
    30  // arena chunks. That is, they can never be used for the general heap. Each chunk
    31  // is also represented by a single mspan, and is modeled as a single large heap
    32  // allocation. It must be, because each chunk contains ordinary Go values that may
    33  // point into the heap, so it must be scanned just like any other object. Any
    34  // pointer into a chunk will therefore always cause the whole chunk to be scanned
    35  // while its corresponding arena is still live.
    36  //
    37  // Chunks may be allocated either from new memory mapped by the OS on our behalf,
    38  // or by reusing old freed chunks. When chunks are freed, their underlying memory
    39  // is returned to the OS, set to fault on access, and may not be reused until the
    40  // program doesn't point into the chunk anymore (the code refers to this state as
    41  // "quarantined"), a property checked by the GC.
    42  //
    43  // The sweeper handles moving chunks out of this quarantine state to be ready for
    44  // reuse. When the chunk is placed into the quarantine state, its corresponding
    45  // span is marked as noscan so that the GC doesn't try to scan memory that would
    46  // cause a fault.
    47  //
    48  // At the next layer are the user arenas themselves. They consist of a single
    49  // active chunk which new Go values are bump-allocated into and a list of chunks
    50  // that were exhausted when allocating into the arena. Once the arena is freed,
    51  // it frees all full chunks it references, and places the active one onto a reuse
    52  // list for a future arena to use. Each arena keeps its list of referenced chunks
    53  // explicitly live until it is freed. Each user arena also maps to an object which
    54  // has a finalizer attached that ensures the arena's chunks are all freed even if
    55  // the arena itself is never explicitly freed.
    56  //
    57  // Pointer-ful memory is bump-allocated from low addresses to high addresses in each
    58  // chunk, while pointer-free memory is bump-allocated from high address to low
    59  // addresses. The reason for this is to take advantage of a GC optimization wherein
    60  // the GC will stop scanning an object when there are no more pointers in it, which
    61  // also allows us to elide clearing the heap bitmap for pointer-free Go values
    62  // allocated into arenas.
    63  //
    64  // Note that arenas are not safe to use concurrently.
    65  //
    66  // In summary, there are 2 resources: arenas, and arena chunks. They exist in the
    67  // following lifecycle:
    68  //
    69  // (1) A new arena is created via newArena.
    70  // (2) Chunks are allocated to hold memory allocated into the arena with new or slice.
    71  //    (a) Chunks are first allocated from the reuse list of partially-used chunks.
    72  //    (b) If there are no such chunks, then chunks on the ready list are taken.
    73  //    (c) Failing all the above, memory for a new chunk is mapped.
    74  // (3) The arena is freed, or all references to it are dropped, triggering its finalizer.
    75  //    (a) If the GC is not active, exhausted chunks are set to fault and placed on a
    76  //        quarantine list.
    77  //    (b) If the GC is active, exhausted chunks are placed on a fault list and will
    78  //        go through step (a) at a later point in time.
    79  //    (c) Any remaining partially-used chunk is placed on a reuse list.
    80  // (4) Once no more pointers are found into quarantined arena chunks, the sweeper
    81  //     takes these chunks out of quarantine and places them on the ready list.
    82  
    83  package runtime
    84  
    85  import (
    86  	"internal/goarch"
    87  	"internal/goexperiment"
    88  	"runtime/internal/atomic"
    89  	"runtime/internal/math"
    90  	"unsafe"
    91  )
    92  
    93  // Functions starting with arena_ are meant to be exported to downstream users
    94  // of arenas. They should wrap these functions in a higher-lever API.
    95  //
    96  // The underlying arena and its resources are managed through an opaque unsafe.Pointer.
    97  
    98  // arena_newArena is a wrapper around newUserArena.
    99  //
   100  //go:linkname arena_newArena arena.runtime_arena_newArena
   101  func arena_newArena() unsafe.Pointer {
   102  	return unsafe.Pointer(newUserArena())
   103  }
   104  
   105  // arena_arena_New is a wrapper around (*userArena).new, except that typ
   106  // is an any (must be a *_type, still) and typ must be a type descriptor
   107  // for a pointer to the type to actually be allocated, i.e. pass a *T
   108  // to allocate a T. This is necessary because this function returns a *T.
   109  //
   110  //go:linkname arena_arena_New arena.runtime_arena_arena_New
   111  func arena_arena_New(arena unsafe.Pointer, typ any) any {
   112  	t := (*_type)(efaceOf(&typ).data)
   113  	if t.Kind_&kindMask != kindPtr {
   114  		throw("arena_New: non-pointer type")
   115  	}
   116  	te := (*ptrtype)(unsafe.Pointer(t)).Elem
   117  	x := ((*userArena)(arena)).new(te)
   118  	var result any
   119  	e := efaceOf(&result)
   120  	e._type = t
   121  	e.data = x
   122  	return result
   123  }
   124  
   125  // arena_arena_Slice is a wrapper around (*userArena).slice.
   126  //
   127  //go:linkname arena_arena_Slice arena.runtime_arena_arena_Slice
   128  func arena_arena_Slice(arena unsafe.Pointer, slice any, cap int) {
   129  	((*userArena)(arena)).slice(slice, cap)
   130  }
   131  
   132  // arena_arena_Free is a wrapper around (*userArena).free.
   133  //
   134  //go:linkname arena_arena_Free arena.runtime_arena_arena_Free
   135  func arena_arena_Free(arena unsafe.Pointer) {
   136  	((*userArena)(arena)).free()
   137  }
   138  
   139  // arena_heapify takes a value that lives in an arena and makes a copy
   140  // of it on the heap. Values that don't live in an arena are returned unmodified.
   141  //
   142  //go:linkname arena_heapify arena.runtime_arena_heapify
   143  func arena_heapify(s any) any {
   144  	var v unsafe.Pointer
   145  	e := efaceOf(&s)
   146  	t := e._type
   147  	switch t.Kind_ & kindMask {
   148  	case kindString:
   149  		v = stringStructOf((*string)(e.data)).str
   150  	case kindSlice:
   151  		v = (*slice)(e.data).array
   152  	case kindPtr:
   153  		v = e.data
   154  	default:
   155  		panic("arena: Clone only supports pointers, slices, and strings")
   156  	}
   157  	span := spanOf(uintptr(v))
   158  	if span == nil || !span.isUserArenaChunk {
   159  		// Not stored in a user arena chunk.
   160  		return s
   161  	}
   162  	// Heap-allocate storage for a copy.
   163  	var x any
   164  	switch t.Kind_ & kindMask {
   165  	case kindString:
   166  		s1 := s.(string)
   167  		s2, b := rawstring(len(s1))
   168  		copy(b, s1)
   169  		x = s2
   170  	case kindSlice:
   171  		len := (*slice)(e.data).len
   172  		et := (*slicetype)(unsafe.Pointer(t)).Elem
   173  		sl := new(slice)
   174  		*sl = slice{makeslicecopy(et, len, len, (*slice)(e.data).array), len, len}
   175  		xe := efaceOf(&x)
   176  		xe._type = t
   177  		xe.data = unsafe.Pointer(sl)
   178  	case kindPtr:
   179  		et := (*ptrtype)(unsafe.Pointer(t)).Elem
   180  		e2 := newobject(et)
   181  		typedmemmove(et, e2, e.data)
   182  		xe := efaceOf(&x)
   183  		xe._type = t
   184  		xe.data = e2
   185  	}
   186  	return x
   187  }
   188  
   189  const (
   190  	// userArenaChunkBytes is the size of a user arena chunk.
   191  	userArenaChunkBytesMax = 8 << 20
   192  	userArenaChunkBytes    = uintptr(int64(userArenaChunkBytesMax-heapArenaBytes)&(int64(userArenaChunkBytesMax-heapArenaBytes)>>63) + heapArenaBytes) // min(userArenaChunkBytesMax, heapArenaBytes)
   193  
   194  	// userArenaChunkPages is the number of pages a user arena chunk uses.
   195  	userArenaChunkPages = userArenaChunkBytes / pageSize
   196  
   197  	// userArenaChunkMaxAllocBytes is the maximum size of an object that can
   198  	// be allocated from an arena. This number is chosen to cap worst-case
   199  	// fragmentation of user arenas to 25%. Larger allocations are redirected
   200  	// to the heap.
   201  	userArenaChunkMaxAllocBytes = userArenaChunkBytes / 4
   202  )
   203  
   204  func init() {
   205  	if userArenaChunkPages*pageSize != userArenaChunkBytes {
   206  		throw("user arena chunk size is not a multiple of the page size")
   207  	}
   208  	if userArenaChunkBytes%physPageSize != 0 {
   209  		throw("user arena chunk size is not a multiple of the physical page size")
   210  	}
   211  	if userArenaChunkBytes < heapArenaBytes {
   212  		if heapArenaBytes%userArenaChunkBytes != 0 {
   213  			throw("user arena chunk size is smaller than a heap arena, but doesn't divide it")
   214  		}
   215  	} else {
   216  		if userArenaChunkBytes%heapArenaBytes != 0 {
   217  			throw("user arena chunks size is larger than a heap arena, but not a multiple")
   218  		}
   219  	}
   220  	lockInit(&userArenaState.lock, lockRankUserArenaState)
   221  }
   222  
   223  // userArenaChunkReserveBytes returns the amount of additional bytes to reserve for
   224  // heap metadata.
   225  func userArenaChunkReserveBytes() uintptr {
   226  	if goexperiment.AllocHeaders {
   227  		// In the allocation headers experiment, we reserve the end of the chunk for
   228  		// a pointer/scalar bitmap. We also reserve space for a dummy _type that
   229  		// refers to the bitmap. The PtrBytes field of the dummy _type indicates how
   230  		// many of those bits are valid.
   231  		return userArenaChunkBytes/goarch.PtrSize/8 + unsafe.Sizeof(_type{})
   232  	}
   233  	return 0
   234  }
   235  
   236  type userArena struct {
   237  	// full is a list of full chunks that have not enough free memory left, and
   238  	// that we'll free once this user arena is freed.
   239  	//
   240  	// Can't use mSpanList here because it's not-in-heap.
   241  	fullList *mspan
   242  
   243  	// active is the user arena chunk we're currently allocating into.
   244  	active *mspan
   245  
   246  	// refs is a set of references to the arena chunks so that they're kept alive.
   247  	//
   248  	// The last reference in the list always refers to active, while the rest of
   249  	// them correspond to fullList. Specifically, the head of fullList is the
   250  	// second-to-last one, fullList.next is the third-to-last, and so on.
   251  	//
   252  	// In other words, every time a new chunk becomes active, its appended to this
   253  	// list.
   254  	refs []unsafe.Pointer
   255  
   256  	// defunct is true if free has been called on this arena.
   257  	//
   258  	// This is just a best-effort way to discover a concurrent allocation
   259  	// and free. Also used to detect a double-free.
   260  	defunct atomic.Bool
   261  }
   262  
   263  // newUserArena creates a new userArena ready to be used.
   264  func newUserArena() *userArena {
   265  	a := new(userArena)
   266  	SetFinalizer(a, func(a *userArena) {
   267  		// If arena handle is dropped without being freed, then call
   268  		// free on the arena, so the arena chunks are never reclaimed
   269  		// by the garbage collector.
   270  		a.free()
   271  	})
   272  	a.refill()
   273  	return a
   274  }
   275  
   276  // new allocates a new object of the provided type into the arena, and returns
   277  // its pointer.
   278  //
   279  // This operation is not safe to call concurrently with other operations on the
   280  // same arena.
   281  func (a *userArena) new(typ *_type) unsafe.Pointer {
   282  	return a.alloc(typ, -1)
   283  }
   284  
   285  // slice allocates a new slice backing store. slice must be a pointer to a slice
   286  // (i.e. *[]T), because userArenaSlice will update the slice directly.
   287  //
   288  // cap determines the capacity of the slice backing store and must be non-negative.
   289  //
   290  // This operation is not safe to call concurrently with other operations on the
   291  // same arena.
   292  func (a *userArena) slice(sl any, cap int) {
   293  	if cap < 0 {
   294  		panic("userArena.slice: negative cap")
   295  	}
   296  	i := efaceOf(&sl)
   297  	typ := i._type
   298  	if typ.Kind_&kindMask != kindPtr {
   299  		panic("slice result of non-ptr type")
   300  	}
   301  	typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
   302  	if typ.Kind_&kindMask != kindSlice {
   303  		panic("slice of non-ptr-to-slice type")
   304  	}
   305  	typ = (*slicetype)(unsafe.Pointer(typ)).Elem
   306  	// t is now the element type of the slice we want to allocate.
   307  
   308  	*((*slice)(i.data)) = slice{a.alloc(typ, cap), cap, cap}
   309  }
   310  
   311  // free returns the userArena's chunks back to mheap and marks it as defunct.
   312  //
   313  // Must be called at most once for any given arena.
   314  //
   315  // This operation is not safe to call concurrently with other operations on the
   316  // same arena.
   317  func (a *userArena) free() {
   318  	// Check for a double-free.
   319  	if a.defunct.Load() {
   320  		panic("arena double free")
   321  	}
   322  
   323  	// Mark ourselves as defunct.
   324  	a.defunct.Store(true)
   325  	SetFinalizer(a, nil)
   326  
   327  	// Free all the full arenas.
   328  	//
   329  	// The refs on this list are in reverse order from the second-to-last.
   330  	s := a.fullList
   331  	i := len(a.refs) - 2
   332  	for s != nil {
   333  		a.fullList = s.next
   334  		s.next = nil
   335  		freeUserArenaChunk(s, a.refs[i])
   336  		s = a.fullList
   337  		i--
   338  	}
   339  	if a.fullList != nil || i >= 0 {
   340  		// There's still something left on the full list, or we
   341  		// failed to actually iterate over the entire refs list.
   342  		throw("full list doesn't match refs list in length")
   343  	}
   344  
   345  	// Put the active chunk onto the reuse list.
   346  	//
   347  	// Note that active's reference is always the last reference in refs.
   348  	s = a.active
   349  	if s != nil {
   350  		if raceenabled || msanenabled || asanenabled {
   351  			// Don't reuse arenas with sanitizers enabled. We want to catch
   352  			// any use-after-free errors aggressively.
   353  			freeUserArenaChunk(s, a.refs[len(a.refs)-1])
   354  		} else {
   355  			lock(&userArenaState.lock)
   356  			userArenaState.reuse = append(userArenaState.reuse, liveUserArenaChunk{s, a.refs[len(a.refs)-1]})
   357  			unlock(&userArenaState.lock)
   358  		}
   359  	}
   360  	// nil out a.active so that a race with freeing will more likely cause a crash.
   361  	a.active = nil
   362  	a.refs = nil
   363  }
   364  
   365  // alloc reserves space in the current chunk or calls refill and reserves space
   366  // in a new chunk. If cap is negative, the type will be taken literally, otherwise
   367  // it will be considered as an element type for a slice backing store with capacity
   368  // cap.
   369  func (a *userArena) alloc(typ *_type, cap int) unsafe.Pointer {
   370  	s := a.active
   371  	var x unsafe.Pointer
   372  	for {
   373  		x = s.userArenaNextFree(typ, cap)
   374  		if x != nil {
   375  			break
   376  		}
   377  		s = a.refill()
   378  	}
   379  	return x
   380  }
   381  
   382  // refill inserts the current arena chunk onto the full list and obtains a new
   383  // one, either from the partial list or allocating a new one, both from mheap.
   384  func (a *userArena) refill() *mspan {
   385  	// If there's an active chunk, assume it's full.
   386  	s := a.active
   387  	if s != nil {
   388  		if s.userArenaChunkFree.size() > userArenaChunkMaxAllocBytes {
   389  			// It's difficult to tell when we're actually out of memory
   390  			// in a chunk because the allocation that failed may still leave
   391  			// some free space available. However, that amount of free space
   392  			// should never exceed the maximum allocation size.
   393  			throw("wasted too much memory in an arena chunk")
   394  		}
   395  		s.next = a.fullList
   396  		a.fullList = s
   397  		a.active = nil
   398  		s = nil
   399  	}
   400  	var x unsafe.Pointer
   401  
   402  	// Check the partially-used list.
   403  	lock(&userArenaState.lock)
   404  	if len(userArenaState.reuse) > 0 {
   405  		// Pick off the last arena chunk from the list.
   406  		n := len(userArenaState.reuse) - 1
   407  		x = userArenaState.reuse[n].x
   408  		s = userArenaState.reuse[n].mspan
   409  		userArenaState.reuse[n].x = nil
   410  		userArenaState.reuse[n].mspan = nil
   411  		userArenaState.reuse = userArenaState.reuse[:n]
   412  	}
   413  	unlock(&userArenaState.lock)
   414  	if s == nil {
   415  		// Allocate a new one.
   416  		x, s = newUserArenaChunk()
   417  		if s == nil {
   418  			throw("out of memory")
   419  		}
   420  	}
   421  	a.refs = append(a.refs, x)
   422  	a.active = s
   423  	return s
   424  }
   425  
   426  type liveUserArenaChunk struct {
   427  	*mspan // Must represent a user arena chunk.
   428  
   429  	// Reference to mspan.base() to keep the chunk alive.
   430  	x unsafe.Pointer
   431  }
   432  
   433  var userArenaState struct {
   434  	lock mutex
   435  
   436  	// reuse contains a list of partially-used and already-live
   437  	// user arena chunks that can be quickly reused for another
   438  	// arena.
   439  	//
   440  	// Protected by lock.
   441  	reuse []liveUserArenaChunk
   442  
   443  	// fault contains full user arena chunks that need to be faulted.
   444  	//
   445  	// Protected by lock.
   446  	fault []liveUserArenaChunk
   447  }
   448  
   449  // userArenaNextFree reserves space in the user arena for an item of the specified
   450  // type. If cap is not -1, this is for an array of cap elements of type t.
   451  func (s *mspan) userArenaNextFree(typ *_type, cap int) unsafe.Pointer {
   452  	size := typ.Size_
   453  	if cap > 0 {
   454  		if size > ^uintptr(0)/uintptr(cap) {
   455  			// Overflow.
   456  			throw("out of memory")
   457  		}
   458  		size *= uintptr(cap)
   459  	}
   460  	if size == 0 || cap == 0 {
   461  		return unsafe.Pointer(&zerobase)
   462  	}
   463  	if size > userArenaChunkMaxAllocBytes {
   464  		// Redirect allocations that don't fit into a chunk well directly
   465  		// from the heap.
   466  		if cap >= 0 {
   467  			return newarray(typ, cap)
   468  		}
   469  		return newobject(typ)
   470  	}
   471  
   472  	// Prevent preemption as we set up the space for a new object.
   473  	//
   474  	// Act like we're allocating.
   475  	mp := acquirem()
   476  	if mp.mallocing != 0 {
   477  		throw("malloc deadlock")
   478  	}
   479  	if mp.gsignal == getg() {
   480  		throw("malloc during signal")
   481  	}
   482  	mp.mallocing = 1
   483  
   484  	var ptr unsafe.Pointer
   485  	if typ.PtrBytes == 0 {
   486  		// Allocate pointer-less objects from the tail end of the chunk.
   487  		v, ok := s.userArenaChunkFree.takeFromBack(size, typ.Align_)
   488  		if ok {
   489  			ptr = unsafe.Pointer(v)
   490  		}
   491  	} else {
   492  		v, ok := s.userArenaChunkFree.takeFromFront(size, typ.Align_)
   493  		if ok {
   494  			ptr = unsafe.Pointer(v)
   495  		}
   496  	}
   497  	if ptr == nil {
   498  		// Failed to allocate.
   499  		mp.mallocing = 0
   500  		releasem(mp)
   501  		return nil
   502  	}
   503  	if s.needzero != 0 {
   504  		throw("arena chunk needs zeroing, but should already be zeroed")
   505  	}
   506  	// Set up heap bitmap and do extra accounting.
   507  	if typ.PtrBytes != 0 {
   508  		if cap >= 0 {
   509  			userArenaHeapBitsSetSliceType(typ, cap, ptr, s)
   510  		} else {
   511  			userArenaHeapBitsSetType(typ, ptr, s)
   512  		}
   513  		c := getMCache(mp)
   514  		if c == nil {
   515  			throw("mallocgc called without a P or outside bootstrapping")
   516  		}
   517  		if cap > 0 {
   518  			c.scanAlloc += size - (typ.Size_ - typ.PtrBytes)
   519  		} else {
   520  			c.scanAlloc += typ.PtrBytes
   521  		}
   522  	}
   523  
   524  	// Ensure that the stores above that initialize x to
   525  	// type-safe memory and set the heap bits occur before
   526  	// the caller can make ptr observable to the garbage
   527  	// collector. Otherwise, on weakly ordered machines,
   528  	// the garbage collector could follow a pointer to x,
   529  	// but see uninitialized memory or stale heap bits.
   530  	publicationBarrier()
   531  
   532  	mp.mallocing = 0
   533  	releasem(mp)
   534  
   535  	return ptr
   536  }
   537  
   538  // userArenaHeapBitsSetSliceType is the equivalent of heapBitsSetType but for
   539  // Go slice backing store values allocated in a user arena chunk. It sets up the
   540  // heap bitmap for n consecutive values with type typ allocated at address ptr.
   541  func userArenaHeapBitsSetSliceType(typ *_type, n int, ptr unsafe.Pointer, s *mspan) {
   542  	mem, overflow := math.MulUintptr(typ.Size_, uintptr(n))
   543  	if overflow || n < 0 || mem > maxAlloc {
   544  		panic(plainError("runtime: allocation size out of range"))
   545  	}
   546  	for i := 0; i < n; i++ {
   547  		userArenaHeapBitsSetType(typ, add(ptr, uintptr(i)*typ.Size_), s)
   548  	}
   549  }
   550  
   551  // newUserArenaChunk allocates a user arena chunk, which maps to a single
   552  // heap arena and single span. Returns a pointer to the base of the chunk
   553  // (this is really important: we need to keep the chunk alive) and the span.
   554  func newUserArenaChunk() (unsafe.Pointer, *mspan) {
   555  	if gcphase == _GCmarktermination {
   556  		throw("newUserArenaChunk called with gcphase == _GCmarktermination")
   557  	}
   558  
   559  	// Deduct assist credit. Because user arena chunks are modeled as one
   560  	// giant heap object which counts toward heapLive, we're obligated to
   561  	// assist the GC proportionally (and it's worth noting that the arena
   562  	// does represent additional work for the GC, but we also have no idea
   563  	// what that looks like until we actually allocate things into the
   564  	// arena).
   565  	deductAssistCredit(userArenaChunkBytes)
   566  
   567  	// Set mp.mallocing to keep from being preempted by GC.
   568  	mp := acquirem()
   569  	if mp.mallocing != 0 {
   570  		throw("malloc deadlock")
   571  	}
   572  	if mp.gsignal == getg() {
   573  		throw("malloc during signal")
   574  	}
   575  	mp.mallocing = 1
   576  
   577  	// Allocate a new user arena.
   578  	var span *mspan
   579  	systemstack(func() {
   580  		span = mheap_.allocUserArenaChunk()
   581  	})
   582  	if span == nil {
   583  		throw("out of memory")
   584  	}
   585  	x := unsafe.Pointer(span.base())
   586  
   587  	// Allocate black during GC.
   588  	// All slots hold nil so no scanning is needed.
   589  	// This may be racing with GC so do it atomically if there can be
   590  	// a race marking the bit.
   591  	if gcphase != _GCoff {
   592  		gcmarknewobject(span, span.base())
   593  	}
   594  
   595  	if raceenabled {
   596  		// TODO(mknyszek): Track individual objects.
   597  		racemalloc(unsafe.Pointer(span.base()), span.elemsize)
   598  	}
   599  
   600  	if msanenabled {
   601  		// TODO(mknyszek): Track individual objects.
   602  		msanmalloc(unsafe.Pointer(span.base()), span.elemsize)
   603  	}
   604  
   605  	if asanenabled {
   606  		// TODO(mknyszek): Track individual objects.
   607  		rzSize := computeRZlog(span.elemsize)
   608  		span.elemsize -= rzSize
   609  		if goexperiment.AllocHeaders {
   610  			span.largeType.Size_ = span.elemsize
   611  		}
   612  		rzStart := span.base() + span.elemsize
   613  		span.userArenaChunkFree = makeAddrRange(span.base(), rzStart)
   614  		asanpoison(unsafe.Pointer(rzStart), span.limit-rzStart)
   615  		asanunpoison(unsafe.Pointer(span.base()), span.elemsize)
   616  	}
   617  
   618  	if rate := MemProfileRate; rate > 0 {
   619  		c := getMCache(mp)
   620  		if c == nil {
   621  			throw("newUserArenaChunk called without a P or outside bootstrapping")
   622  		}
   623  		// Note cache c only valid while m acquired; see #47302
   624  		if rate != 1 && userArenaChunkBytes < c.nextSample {
   625  			c.nextSample -= userArenaChunkBytes
   626  		} else {
   627  			profilealloc(mp, unsafe.Pointer(span.base()), userArenaChunkBytes)
   628  		}
   629  	}
   630  	mp.mallocing = 0
   631  	releasem(mp)
   632  
   633  	// Again, because this chunk counts toward heapLive, potentially trigger a GC.
   634  	if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
   635  		gcStart(t)
   636  	}
   637  
   638  	if debug.malloc {
   639  		if debug.allocfreetrace != 0 {
   640  			tracealloc(unsafe.Pointer(span.base()), userArenaChunkBytes, nil)
   641  		}
   642  
   643  		if inittrace.active && inittrace.id == getg().goid {
   644  			// Init functions are executed sequentially in a single goroutine.
   645  			inittrace.bytes += uint64(userArenaChunkBytes)
   646  		}
   647  	}
   648  
   649  	// Double-check it's aligned to the physical page size. Based on the current
   650  	// implementation this is trivially true, but it need not be in the future.
   651  	// However, if it's not aligned to the physical page size then we can't properly
   652  	// set it to fault later.
   653  	if uintptr(x)%physPageSize != 0 {
   654  		throw("user arena chunk is not aligned to the physical page size")
   655  	}
   656  
   657  	return x, span
   658  }
   659  
   660  // isUnusedUserArenaChunk indicates that the arena chunk has been set to fault
   661  // and doesn't contain any scannable memory anymore. However, it might still be
   662  // mSpanInUse as it sits on the quarantine list, since it needs to be swept.
   663  //
   664  // This is not safe to execute unless the caller has ownership of the mspan or
   665  // the world is stopped (preemption is prevented while the relevant state changes).
   666  //
   667  // This is really only meant to be used by accounting tests in the runtime to
   668  // distinguish when a span shouldn't be counted (since mSpanInUse might not be
   669  // enough).
   670  func (s *mspan) isUnusedUserArenaChunk() bool {
   671  	return s.isUserArenaChunk && s.spanclass == makeSpanClass(0, true)
   672  }
   673  
   674  // setUserArenaChunkToFault sets the address space for the user arena chunk to fault
   675  // and releases any underlying memory resources.
   676  //
   677  // Must be in a non-preemptible state to ensure the consistency of statistics
   678  // exported to MemStats.
   679  func (s *mspan) setUserArenaChunkToFault() {
   680  	if !s.isUserArenaChunk {
   681  		throw("invalid span in heapArena for user arena")
   682  	}
   683  	if s.npages*pageSize != userArenaChunkBytes {
   684  		throw("span on userArena.faultList has invalid size")
   685  	}
   686  
   687  	// Update the span class to be noscan. What we want to happen is that
   688  	// any pointer into the span keeps it from getting recycled, so we want
   689  	// the mark bit to get set, but we're about to set the address space to fault,
   690  	// so we have to prevent the GC from scanning this memory.
   691  	//
   692  	// It's OK to set it here because (1) a GC isn't in progress, so the scanning code
   693  	// won't make a bad decision, (2) we're currently non-preemptible and in the runtime,
   694  	// so a GC is blocked from starting. We might race with sweeping, which could
   695  	// put it on the "wrong" sweep list, but really don't care because the chunk is
   696  	// treated as a large object span and there's no meaningful difference between scan
   697  	// and noscan large objects in the sweeper. The STW at the start of the GC acts as a
   698  	// barrier for this update.
   699  	s.spanclass = makeSpanClass(0, true)
   700  
   701  	// Actually set the arena chunk to fault, so we'll get dangling pointer errors.
   702  	// sysFault currently uses a method on each OS that forces it to evacuate all
   703  	// memory backing the chunk.
   704  	sysFault(unsafe.Pointer(s.base()), s.npages*pageSize)
   705  
   706  	// Everything on the list is counted as in-use, however sysFault transitions to
   707  	// Reserved, not Prepared, so we skip updating heapFree or heapReleased and just
   708  	// remove the memory from the total altogether; it's just address space now.
   709  	gcController.heapInUse.add(-int64(s.npages * pageSize))
   710  
   711  	// Count this as a free of an object right now as opposed to when
   712  	// the span gets off the quarantine list. The main reason is so that the
   713  	// amount of bytes allocated doesn't exceed how much is counted as
   714  	// "mapped ready," which could cause a deadlock in the pacer.
   715  	gcController.totalFree.Add(int64(s.elemsize))
   716  
   717  	// Update consistent stats to match.
   718  	//
   719  	// We're non-preemptible, so it's safe to update consistent stats (our P
   720  	// won't change out from under us).
   721  	stats := memstats.heapStats.acquire()
   722  	atomic.Xaddint64(&stats.committed, -int64(s.npages*pageSize))
   723  	atomic.Xaddint64(&stats.inHeap, -int64(s.npages*pageSize))
   724  	atomic.Xadd64(&stats.largeFreeCount, 1)
   725  	atomic.Xadd64(&stats.largeFree, int64(s.elemsize))
   726  	memstats.heapStats.release()
   727  
   728  	// This counts as a free, so update heapLive.
   729  	gcController.update(-int64(s.elemsize), 0)
   730  
   731  	// Mark it as free for the race detector.
   732  	if raceenabled {
   733  		racefree(unsafe.Pointer(s.base()), s.elemsize)
   734  	}
   735  
   736  	systemstack(func() {
   737  		// Add the user arena to the quarantine list.
   738  		lock(&mheap_.lock)
   739  		mheap_.userArena.quarantineList.insert(s)
   740  		unlock(&mheap_.lock)
   741  	})
   742  }
   743  
   744  // inUserArenaChunk returns true if p points to a user arena chunk.
   745  func inUserArenaChunk(p uintptr) bool {
   746  	s := spanOf(p)
   747  	if s == nil {
   748  		return false
   749  	}
   750  	return s.isUserArenaChunk
   751  }
   752  
   753  // freeUserArenaChunk releases the user arena represented by s back to the runtime.
   754  //
   755  // x must be a live pointer within s.
   756  //
   757  // The runtime will set the user arena to fault once it's safe (the GC is no longer running)
   758  // and then once the user arena is no longer referenced by the application, will allow it to
   759  // be reused.
   760  func freeUserArenaChunk(s *mspan, x unsafe.Pointer) {
   761  	if !s.isUserArenaChunk {
   762  		throw("span is not for a user arena")
   763  	}
   764  	if s.npages*pageSize != userArenaChunkBytes {
   765  		throw("invalid user arena span size")
   766  	}
   767  
   768  	// Mark the region as free to various santizers immediately instead
   769  	// of handling them at sweep time.
   770  	if raceenabled {
   771  		racefree(unsafe.Pointer(s.base()), s.elemsize)
   772  	}
   773  	if msanenabled {
   774  		msanfree(unsafe.Pointer(s.base()), s.elemsize)
   775  	}
   776  	if asanenabled {
   777  		asanpoison(unsafe.Pointer(s.base()), s.elemsize)
   778  	}
   779  
   780  	// Make ourselves non-preemptible as we manipulate state and statistics.
   781  	//
   782  	// Also required by setUserArenaChunksToFault.
   783  	mp := acquirem()
   784  
   785  	// We can only set user arenas to fault if we're in the _GCoff phase.
   786  	if gcphase == _GCoff {
   787  		lock(&userArenaState.lock)
   788  		faultList := userArenaState.fault
   789  		userArenaState.fault = nil
   790  		unlock(&userArenaState.lock)
   791  
   792  		s.setUserArenaChunkToFault()
   793  		for _, lc := range faultList {
   794  			lc.mspan.setUserArenaChunkToFault()
   795  		}
   796  
   797  		// Until the chunks are set to fault, keep them alive via the fault list.
   798  		KeepAlive(x)
   799  		KeepAlive(faultList)
   800  	} else {
   801  		// Put the user arena on the fault list.
   802  		lock(&userArenaState.lock)
   803  		userArenaState.fault = append(userArenaState.fault, liveUserArenaChunk{s, x})
   804  		unlock(&userArenaState.lock)
   805  	}
   806  	releasem(mp)
   807  }
   808  
   809  // allocUserArenaChunk attempts to reuse a free user arena chunk represented
   810  // as a span.
   811  //
   812  // Must be in a non-preemptible state to ensure the consistency of statistics
   813  // exported to MemStats.
   814  //
   815  // Acquires the heap lock. Must run on the system stack for that reason.
   816  //
   817  //go:systemstack
   818  func (h *mheap) allocUserArenaChunk() *mspan {
   819  	var s *mspan
   820  	var base uintptr
   821  
   822  	// First check the free list.
   823  	lock(&h.lock)
   824  	if !h.userArena.readyList.isEmpty() {
   825  		s = h.userArena.readyList.first
   826  		h.userArena.readyList.remove(s)
   827  		base = s.base()
   828  	} else {
   829  		// Free list was empty, so allocate a new arena.
   830  		hintList := &h.userArena.arenaHints
   831  		if raceenabled {
   832  			// In race mode just use the regular heap hints. We might fragment
   833  			// the address space, but the race detector requires that the heap
   834  			// is mapped contiguously.
   835  			hintList = &h.arenaHints
   836  		}
   837  		v, size := h.sysAlloc(userArenaChunkBytes, hintList, false)
   838  		if size%userArenaChunkBytes != 0 {
   839  			throw("sysAlloc size is not divisible by userArenaChunkBytes")
   840  		}
   841  		if size > userArenaChunkBytes {
   842  			// We got more than we asked for. This can happen if
   843  			// heapArenaSize > userArenaChunkSize, or if sysAlloc just returns
   844  			// some extra as a result of trying to find an aligned region.
   845  			//
   846  			// Divide it up and put it on the ready list.
   847  			for i := userArenaChunkBytes; i < size; i += userArenaChunkBytes {
   848  				s := h.allocMSpanLocked()
   849  				s.init(uintptr(v)+i, userArenaChunkPages)
   850  				h.userArena.readyList.insertBack(s)
   851  			}
   852  			size = userArenaChunkBytes
   853  		}
   854  		base = uintptr(v)
   855  		if base == 0 {
   856  			// Out of memory.
   857  			unlock(&h.lock)
   858  			return nil
   859  		}
   860  		s = h.allocMSpanLocked()
   861  	}
   862  	unlock(&h.lock)
   863  
   864  	// sysAlloc returns Reserved address space, and any span we're
   865  	// reusing is set to fault (so, also Reserved), so transition
   866  	// it to Prepared and then Ready.
   867  	//
   868  	// Unlike (*mheap).grow, just map in everything that we
   869  	// asked for. We're likely going to use it all.
   870  	sysMap(unsafe.Pointer(base), userArenaChunkBytes, &gcController.heapReleased)
   871  	sysUsed(unsafe.Pointer(base), userArenaChunkBytes, userArenaChunkBytes)
   872  
   873  	// Model the user arena as a heap span for a large object.
   874  	spc := makeSpanClass(0, false)
   875  	h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages)
   876  	s.isUserArenaChunk = true
   877  	s.elemsize -= userArenaChunkReserveBytes()
   878  	s.limit = s.base() + s.elemsize
   879  	s.freeindex = 1
   880  	s.allocCount = 1
   881  
   882  	// Account for this new arena chunk memory.
   883  	gcController.heapInUse.add(int64(userArenaChunkBytes))
   884  	gcController.heapReleased.add(-int64(userArenaChunkBytes))
   885  
   886  	stats := memstats.heapStats.acquire()
   887  	atomic.Xaddint64(&stats.inHeap, int64(userArenaChunkBytes))
   888  	atomic.Xaddint64(&stats.committed, int64(userArenaChunkBytes))
   889  
   890  	// Model the arena as a single large malloc.
   891  	atomic.Xadd64(&stats.largeAlloc, int64(s.elemsize))
   892  	atomic.Xadd64(&stats.largeAllocCount, 1)
   893  	memstats.heapStats.release()
   894  
   895  	// Count the alloc in inconsistent, internal stats.
   896  	gcController.totalAlloc.Add(int64(s.elemsize))
   897  
   898  	// Update heapLive.
   899  	gcController.update(int64(s.elemsize), 0)
   900  
   901  	// This must clear the entire heap bitmap so that it's safe
   902  	// to allocate noscan data without writing anything out.
   903  	s.initHeapBits(true)
   904  
   905  	// Clear the span preemptively. It's an arena chunk, so let's assume
   906  	// everything is going to be used.
   907  	//
   908  	// This also seems to make a massive difference as to whether or
   909  	// not Linux decides to back this memory with transparent huge
   910  	// pages. There's latency involved in this zeroing, but the hugepage
   911  	// gains are almost always worth it. Note: it's important that we
   912  	// clear even if it's freshly mapped and we know there's no point
   913  	// to zeroing as *that* is the critical signal to use huge pages.
   914  	memclrNoHeapPointers(unsafe.Pointer(s.base()), s.elemsize)
   915  	s.needzero = 0
   916  
   917  	s.freeIndexForScan = 1
   918  
   919  	// Set up the range for allocation.
   920  	s.userArenaChunkFree = makeAddrRange(base, base+s.elemsize)
   921  
   922  	// Put the large span in the mcentral swept list so that it's
   923  	// visible to the background sweeper.
   924  	h.central[spc].mcentral.fullSwept(h.sweepgen).push(s)
   925  
   926  	if goexperiment.AllocHeaders {
   927  		// Set up an allocation header. Avoid write barriers here because this type
   928  		// is not a real type, and it exists in an invalid location.
   929  		*(*uintptr)(unsafe.Pointer(&s.largeType)) = uintptr(unsafe.Pointer(s.limit))
   930  		*(*uintptr)(unsafe.Pointer(&s.largeType.GCData)) = s.limit + unsafe.Sizeof(_type{})
   931  		s.largeType.PtrBytes = 0
   932  		s.largeType.Size_ = s.elemsize
   933  	}
   934  	return s
   935  }
   936  

View as plain text