Source file src/runtime/malloc.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Memory allocator.
     6  //
     7  // This was originally based on tcmalloc, but has diverged quite a bit.
     8  // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
     9  
    10  // The main allocator works in runs of pages.
    11  // Small allocation sizes (up to and including 32 kB) are
    12  // rounded to one of about 70 size classes, each of which
    13  // has its own free set of objects of exactly that size.
    14  // Any free page of memory can be split into a set of objects
    15  // of one size class, which are then managed using a free bitmap.
    16  //
    17  // The allocator's data structures are:
    18  //
    19  //	fixalloc: a free-list allocator for fixed-size off-heap objects,
    20  //		used to manage storage used by the allocator.
    21  //	mheap: the malloc heap, managed at page (8192-byte) granularity.
    22  //	mspan: a run of in-use pages managed by the mheap.
    23  //	mcentral: collects all spans of a given size class.
    24  //	mcache: a per-P cache of mspans with free space.
    25  //	mstats: allocation statistics.
    26  //
    27  // Allocating a small object proceeds up a hierarchy of caches:
    28  //
    29  //	1. Round the size up to one of the small size classes
    30  //	   and look in the corresponding mspan in this P's mcache.
    31  //	   Scan the mspan's free bitmap to find a free slot.
    32  //	   If there is a free slot, allocate it.
    33  //	   This can all be done without acquiring a lock.
    34  //
    35  //	2. If the mspan has no free slots, obtain a new mspan
    36  //	   from the mcentral's list of mspans of the required size
    37  //	   class that have free space.
    38  //	   Obtaining a whole span amortizes the cost of locking
    39  //	   the mcentral.
    40  //
    41  //	3. If the mcentral's mspan list is empty, obtain a run
    42  //	   of pages from the mheap to use for the mspan.
    43  //
    44  //	4. If the mheap is empty or has no page runs large enough,
    45  //	   allocate a new group of pages (at least 1MB) from the
    46  //	   operating system. Allocating a large run of pages
    47  //	   amortizes the cost of talking to the operating system.
    48  //
    49  // Sweeping an mspan and freeing objects on it proceeds up a similar
    50  // hierarchy:
    51  //
    52  //	1. If the mspan is being swept in response to allocation, it
    53  //	   is returned to the mcache to satisfy the allocation.
    54  //
    55  //	2. Otherwise, if the mspan still has allocated objects in it,
    56  //	   it is placed on the mcentral free list for the mspan's size
    57  //	   class.
    58  //
    59  //	3. Otherwise, if all objects in the mspan are free, the mspan's
    60  //	   pages are returned to the mheap and the mspan is now dead.
    61  //
    62  // Allocating and freeing a large object uses the mheap
    63  // directly, bypassing the mcache and mcentral.
    64  //
    65  // If mspan.needzero is false, then free object slots in the mspan are
    66  // already zeroed. Otherwise if needzero is true, objects are zeroed as
    67  // they are allocated. There are various benefits to delaying zeroing
    68  // this way:
    69  //
    70  //	1. Stack frame allocation can avoid zeroing altogether.
    71  //
    72  //	2. It exhibits better temporal locality, since the program is
    73  //	   probably about to write to the memory.
    74  //
    75  //	3. We don't zero pages that never get reused.
    76  
    77  // Virtual memory layout
    78  //
    79  // The heap consists of a set of arenas, which are 64MB on 64-bit and
    80  // 4MB on 32-bit (heapArenaBytes). Each arena's start address is also
    81  // aligned to the arena size.
    82  //
    83  // Each arena has an associated heapArena object that stores the
    84  // metadata for that arena: the heap bitmap for all words in the arena
    85  // and the span map for all pages in the arena. heapArena objects are
    86  // themselves allocated off-heap.
    87  //
    88  // Since arenas are aligned, the address space can be viewed as a
    89  // series of arena frames. The arena map (mheap_.arenas) maps from
    90  // arena frame number to *heapArena, or nil for parts of the address
    91  // space not backed by the Go heap. The arena map is structured as a
    92  // two-level array consisting of a "L1" arena map and many "L2" arena
    93  // maps; however, since arenas are large, on many architectures, the
    94  // arena map consists of a single, large L2 map.
    95  //
    96  // The arena map covers the entire possible address space, allowing
    97  // the Go heap to use any part of the address space. The allocator
    98  // attempts to keep arenas contiguous so that large spans (and hence
    99  // large objects) can cross arenas.
   100  
   101  package runtime
   102  
   103  import (
   104  	"internal/goarch"
   105  	"internal/goos"
   106  	"runtime/internal/atomic"
   107  	"runtime/internal/math"
   108  	"runtime/internal/sys"
   109  	"unsafe"
   110  )
   111  
   112  const (
   113  	maxTinySize   = _TinySize
   114  	tinySizeClass = _TinySizeClass
   115  	maxSmallSize  = _MaxSmallSize
   116  
   117  	pageShift = _PageShift
   118  	pageSize  = _PageSize
   119  
   120  	concurrentSweep = _ConcurrentSweep
   121  
   122  	_PageSize = 1 << _PageShift
   123  	_PageMask = _PageSize - 1
   124  
   125  	// _64bit = 1 on 64-bit systems, 0 on 32-bit systems
   126  	_64bit = 1 << (^uintptr(0) >> 63) / 2
   127  
   128  	// Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
   129  	_TinySize      = 16
   130  	_TinySizeClass = int8(2)
   131  
   132  	_FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
   133  
   134  	// Per-P, per order stack segment cache size.
   135  	_StackCacheSize = 32 * 1024
   136  
   137  	// Number of orders that get caching. Order 0 is FixedStack
   138  	// and each successive order is twice as large.
   139  	// We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks
   140  	// will be allocated directly.
   141  	// Since FixedStack is different on different systems, we
   142  	// must vary NumStackOrders to keep the same maximum cached size.
   143  	//   OS               | FixedStack | NumStackOrders
   144  	//   -----------------+------------+---------------
   145  	//   linux/darwin/bsd | 2KB        | 4
   146  	//   windows/32       | 4KB        | 3
   147  	//   windows/64       | 8KB        | 2
   148  	//   plan9            | 4KB        | 3
   149  	_NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9
   150  
   151  	// heapAddrBits is the number of bits in a heap address. On
   152  	// amd64, addresses are sign-extended beyond heapAddrBits. On
   153  	// other arches, they are zero-extended.
   154  	//
   155  	// On most 64-bit platforms, we limit this to 48 bits based on a
   156  	// combination of hardware and OS limitations.
   157  	//
   158  	// amd64 hardware limits addresses to 48 bits, sign-extended
   159  	// to 64 bits. Addresses where the top 16 bits are not either
   160  	// all 0 or all 1 are "non-canonical" and invalid. Because of
   161  	// these "negative" addresses, we offset addresses by 1<<47
   162  	// (arenaBaseOffset) on amd64 before computing indexes into
   163  	// the heap arenas index. In 2017, amd64 hardware added
   164  	// support for 57 bit addresses; however, currently only Linux
   165  	// supports this extension and the kernel will never choose an
   166  	// address above 1<<47 unless mmap is called with a hint
   167  	// address above 1<<47 (which we never do).
   168  	//
   169  	// arm64 hardware (as of ARMv8) limits user addresses to 48
   170  	// bits, in the range [0, 1<<48).
   171  	//
   172  	// ppc64, mips64, and s390x support arbitrary 64 bit addresses
   173  	// in hardware. On Linux, Go leans on stricter OS limits. Based
   174  	// on Linux's processor.h, the user address space is limited as
   175  	// follows on 64-bit architectures:
   176  	//
   177  	// Architecture  Name              Maximum Value (exclusive)
   178  	// ---------------------------------------------------------------------
   179  	// amd64         TASK_SIZE_MAX     0x007ffffffff000 (47 bit addresses)
   180  	// arm64         TASK_SIZE_64      0x01000000000000 (48 bit addresses)
   181  	// ppc64{,le}    TASK_SIZE_USER64  0x00400000000000 (46 bit addresses)
   182  	// mips64{,le}   TASK_SIZE64       0x00010000000000 (40 bit addresses)
   183  	// s390x         TASK_SIZE         1<<64 (64 bit addresses)
   184  	//
   185  	// These limits may increase over time, but are currently at
   186  	// most 48 bits except on s390x. On all architectures, Linux
   187  	// starts placing mmap'd regions at addresses that are
   188  	// significantly below 48 bits, so even if it's possible to
   189  	// exceed Go's 48 bit limit, it's extremely unlikely in
   190  	// practice.
   191  	//
   192  	// On 32-bit platforms, we accept the full 32-bit address
   193  	// space because doing so is cheap.
   194  	// mips32 only has access to the low 2GB of virtual memory, so
   195  	// we further limit it to 31 bits.
   196  	//
   197  	// On ios/arm64, although 64-bit pointers are presumably
   198  	// available, pointers are truncated to 33 bits in iOS <14.
   199  	// Furthermore, only the top 4 GiB of the address space are
   200  	// actually available to the application. In iOS >=14, more
   201  	// of the address space is available, and the OS can now
   202  	// provide addresses outside of those 33 bits. Pick 40 bits
   203  	// as a reasonable balance between address space usage by the
   204  	// page allocator, and flexibility for what mmap'd regions
   205  	// we'll accept for the heap. We can't just move to the full
   206  	// 48 bits because this uses too much address space for older
   207  	// iOS versions.
   208  	// TODO(mknyszek): Once iOS <14 is deprecated, promote ios/arm64
   209  	// to a 48-bit address space like every other arm64 platform.
   210  	//
   211  	// WebAssembly currently has a limit of 4GB linear memory.
   212  	heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 40*goos.IsIos*goarch.IsArm64
   213  
   214  	// maxAlloc is the maximum size of an allocation. On 64-bit,
   215  	// it's theoretically possible to allocate 1<<heapAddrBits bytes. On
   216  	// 32-bit, however, this is one less than 1<<32 because the
   217  	// number of bytes in the address space doesn't actually fit
   218  	// in a uintptr.
   219  	maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1
   220  
   221  	// The number of bits in a heap address, the size of heap
   222  	// arenas, and the L1 and L2 arena map sizes are related by
   223  	//
   224  	//   (1 << addr bits) = arena size * L1 entries * L2 entries
   225  	//
   226  	// Currently, we balance these as follows:
   227  	//
   228  	//       Platform  Addr bits  Arena size  L1 entries   L2 entries
   229  	// --------------  ---------  ----------  ----------  -----------
   230  	//       */64-bit         48        64MB           1    4M (32MB)
   231  	// windows/64-bit         48         4MB          64    1M  (8MB)
   232  	//      ios/arm64         33         4MB           1  2048  (8KB)
   233  	//       */32-bit         32         4MB           1  1024  (4KB)
   234  	//     */mips(le)         31         4MB           1   512  (2KB)
   235  
   236  	// heapArenaBytes is the size of a heap arena. The heap
   237  	// consists of mappings of size heapArenaBytes, aligned to
   238  	// heapArenaBytes. The initial heap mapping is one arena.
   239  	//
   240  	// This is currently 64MB on 64-bit non-Windows and 4MB on
   241  	// 32-bit and on Windows. We use smaller arenas on Windows
   242  	// because all committed memory is charged to the process,
   243  	// even if it's not touched. Hence, for processes with small
   244  	// heaps, the mapped arena space needs to be commensurate.
   245  	// This is particularly important with the race detector,
   246  	// since it significantly amplifies the cost of committed
   247  	// memory.
   248  	heapArenaBytes = 1 << logHeapArenaBytes
   249  
   250  	heapArenaWords = heapArenaBytes / goarch.PtrSize
   251  
   252  	// logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
   253  	// prefer using heapArenaBytes where possible (we need the
   254  	// constant to compute some other constants).
   255  	logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (2+20)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64
   256  
   257  	// heapArenaBitmapWords is the size of each heap arena's bitmap in uintptrs.
   258  	heapArenaBitmapWords = heapArenaWords / (8 * goarch.PtrSize)
   259  
   260  	pagesPerArena = heapArenaBytes / pageSize
   261  
   262  	// arenaL1Bits is the number of bits of the arena number
   263  	// covered by the first level arena map.
   264  	//
   265  	// This number should be small, since the first level arena
   266  	// map requires PtrSize*(1<<arenaL1Bits) of space in the
   267  	// binary's BSS. It can be zero, in which case the first level
   268  	// index is effectively unused. There is a performance benefit
   269  	// to this, since the generated code can be more efficient,
   270  	// but comes at the cost of having a large L2 mapping.
   271  	//
   272  	// We use the L1 map on 64-bit Windows because the arena size
   273  	// is small, but the address space is still 48 bits, and
   274  	// there's a high cost to having a large L2.
   275  	arenaL1Bits = 6 * (_64bit * goos.IsWindows)
   276  
   277  	// arenaL2Bits is the number of bits of the arena number
   278  	// covered by the second level arena index.
   279  	//
   280  	// The size of each arena map allocation is proportional to
   281  	// 1<<arenaL2Bits, so it's important that this not be too
   282  	// large. 48 bits leads to 32MB arena index allocations, which
   283  	// is about the practical threshold.
   284  	arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits
   285  
   286  	// arenaL1Shift is the number of bits to shift an arena frame
   287  	// number by to compute an index into the first level arena map.
   288  	arenaL1Shift = arenaL2Bits
   289  
   290  	// arenaBits is the total bits in a combined arena map index.
   291  	// This is split between the index into the L1 arena map and
   292  	// the L2 arena map.
   293  	arenaBits = arenaL1Bits + arenaL2Bits
   294  
   295  	// arenaBaseOffset is the pointer value that corresponds to
   296  	// index 0 in the heap arena map.
   297  	//
   298  	// On amd64, the address space is 48 bits, sign extended to 64
   299  	// bits. This offset lets us handle "negative" addresses (or
   300  	// high addresses if viewed as unsigned).
   301  	//
   302  	// On aix/ppc64, this offset allows to keep the heapAddrBits to
   303  	// 48. Otherwise, it would be 60 in order to handle mmap addresses
   304  	// (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this
   305  	// case, the memory reserved in (s *pageAlloc).init for chunks
   306  	// is causing important slowdowns.
   307  	//
   308  	// On other platforms, the user address space is contiguous
   309  	// and starts at 0, so no offset is necessary.
   310  	arenaBaseOffset = 0xffff800000000000*goarch.IsAmd64 + 0x0a00000000000000*goos.IsAix
   311  	// A typed version of this constant that will make it into DWARF (for viewcore).
   312  	arenaBaseOffsetUintptr = uintptr(arenaBaseOffset)
   313  
   314  	// Max number of threads to run garbage collection.
   315  	// 2, 3, and 4 are all plausible maximums depending
   316  	// on the hardware details of the machine. The garbage
   317  	// collector scales well to 32 cpus.
   318  	_MaxGcproc = 32
   319  
   320  	// minLegalPointer is the smallest possible legal pointer.
   321  	// This is the smallest possible architectural page size,
   322  	// since we assume that the first page is never mapped.
   323  	//
   324  	// This should agree with minZeroPage in the compiler.
   325  	minLegalPointer uintptr = 4096
   326  )
   327  
   328  // physPageSize is the size in bytes of the OS's physical pages.
   329  // Mapping and unmapping operations must be done at multiples of
   330  // physPageSize.
   331  //
   332  // This must be set by the OS init code (typically in osinit) before
   333  // mallocinit.
   334  var physPageSize uintptr
   335  
   336  // physHugePageSize is the size in bytes of the OS's default physical huge
   337  // page size whose allocation is opaque to the application. It is assumed
   338  // and verified to be a power of two.
   339  //
   340  // If set, this must be set by the OS init code (typically in osinit) before
   341  // mallocinit. However, setting it at all is optional, and leaving the default
   342  // value is always safe (though potentially less efficient).
   343  //
   344  // Since physHugePageSize is always assumed to be a power of two,
   345  // physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift.
   346  // The purpose of physHugePageShift is to avoid doing divisions in
   347  // performance critical functions.
   348  var (
   349  	physHugePageSize  uintptr
   350  	physHugePageShift uint
   351  )
   352  
   353  func mallocinit() {
   354  	if class_to_size[_TinySizeClass] != _TinySize {
   355  		throw("bad TinySizeClass")
   356  	}
   357  
   358  	if heapArenaBitmapWords&(heapArenaBitmapWords-1) != 0 {
   359  		// heapBits expects modular arithmetic on bitmap
   360  		// addresses to work.
   361  		throw("heapArenaBitmapWords not a power of 2")
   362  	}
   363  
   364  	// Check physPageSize.
   365  	if physPageSize == 0 {
   366  		// The OS init code failed to fetch the physical page size.
   367  		throw("failed to get system page size")
   368  	}
   369  	if physPageSize > maxPhysPageSize {
   370  		print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n")
   371  		throw("bad system page size")
   372  	}
   373  	if physPageSize < minPhysPageSize {
   374  		print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n")
   375  		throw("bad system page size")
   376  	}
   377  	if physPageSize&(physPageSize-1) != 0 {
   378  		print("system page size (", physPageSize, ") must be a power of 2\n")
   379  		throw("bad system page size")
   380  	}
   381  	if physHugePageSize&(physHugePageSize-1) != 0 {
   382  		print("system huge page size (", physHugePageSize, ") must be a power of 2\n")
   383  		throw("bad system huge page size")
   384  	}
   385  	if physHugePageSize > maxPhysHugePageSize {
   386  		// physHugePageSize is greater than the maximum supported huge page size.
   387  		// Don't throw here, like in the other cases, since a system configured
   388  		// in this way isn't wrong, we just don't have the code to support them.
   389  		// Instead, silently set the huge page size to zero.
   390  		physHugePageSize = 0
   391  	}
   392  	if physHugePageSize != 0 {
   393  		// Since physHugePageSize is a power of 2, it suffices to increase
   394  		// physHugePageShift until 1<<physHugePageShift == physHugePageSize.
   395  		for 1<<physHugePageShift != physHugePageSize {
   396  			physHugePageShift++
   397  		}
   398  	}
   399  	if pagesPerArena%pagesPerSpanRoot != 0 {
   400  		print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n")
   401  		throw("bad pagesPerSpanRoot")
   402  	}
   403  	if pagesPerArena%pagesPerReclaimerChunk != 0 {
   404  		print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n")
   405  		throw("bad pagesPerReclaimerChunk")
   406  	}
   407  
   408  	// Initialize the heap.
   409  	mheap_.init()
   410  	mcache0 = allocmcache()
   411  	lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas)
   412  	lockInit(&profInsertLock, lockRankProfInsert)
   413  	lockInit(&profBlockLock, lockRankProfBlock)
   414  	lockInit(&profMemActiveLock, lockRankProfMemActive)
   415  	for i := range profMemFutureLock {
   416  		lockInit(&profMemFutureLock[i], lockRankProfMemFuture)
   417  	}
   418  	lockInit(&globalAlloc.mutex, lockRankGlobalAlloc)
   419  
   420  	// Create initial arena growth hints.
   421  	if goarch.PtrSize == 8 {
   422  		// On a 64-bit machine, we pick the following hints
   423  		// because:
   424  		//
   425  		// 1. Starting from the middle of the address space
   426  		// makes it easier to grow out a contiguous range
   427  		// without running in to some other mapping.
   428  		//
   429  		// 2. This makes Go heap addresses more easily
   430  		// recognizable when debugging.
   431  		//
   432  		// 3. Stack scanning in gccgo is still conservative,
   433  		// so it's important that addresses be distinguishable
   434  		// from other data.
   435  		//
   436  		// Starting at 0x00c0 means that the valid memory addresses
   437  		// will begin 0x00c0, 0x00c1, ...
   438  		// In little-endian, that's c0 00, c1 00, ... None of those are valid
   439  		// UTF-8 sequences, and they are otherwise as far away from
   440  		// ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
   441  		// addresses. An earlier attempt to use 0x11f8 caused out of memory errors
   442  		// on OS X during thread allocations.  0x00c0 causes conflicts with
   443  		// AddressSanitizer which reserves all memory up to 0x0100.
   444  		// These choices reduce the odds of a conservative garbage collector
   445  		// not collecting memory because some non-pointer block of memory
   446  		// had a bit pattern that matched a memory address.
   447  		//
   448  		// However, on arm64, we ignore all this advice above and slam the
   449  		// allocation at 0x40 << 32 because when using 4k pages with 3-level
   450  		// translation buffers, the user address space is limited to 39 bits
   451  		// On ios/arm64, the address space is even smaller.
   452  		//
   453  		// On AIX, mmaps starts at 0x0A00000000000000 for 64-bit.
   454  		// processes.
   455  		//
   456  		// Space mapped for user arenas comes immediately after the range
   457  		// originally reserved for the regular heap when race mode is not
   458  		// enabled because user arena chunks can never be used for regular heap
   459  		// allocations and we want to avoid fragmenting the address space.
   460  		//
   461  		// In race mode we have no choice but to just use the same hints because
   462  		// the race detector requires that the heap be mapped contiguously.
   463  		for i := 0x7f; i >= 0; i-- {
   464  			var p uintptr
   465  			switch {
   466  			case raceenabled:
   467  				// The TSAN runtime requires the heap
   468  				// to be in the range [0x00c000000000,
   469  				// 0x00e000000000).
   470  				p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32)
   471  				if p >= uintptrMask&0x00e000000000 {
   472  					continue
   473  				}
   474  			case GOARCH == "arm64" && GOOS == "ios":
   475  				p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
   476  			case GOARCH == "arm64":
   477  				p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
   478  			case GOOS == "aix":
   479  				if i == 0 {
   480  					// We don't use addresses directly after 0x0A00000000000000
   481  					// to avoid collisions with others mmaps done by non-go programs.
   482  					continue
   483  				}
   484  				p = uintptr(i)<<40 | uintptrMask&(0xa0<<52)
   485  			default:
   486  				p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
   487  			}
   488  			// Switch to generating hints for user arenas if we've gone
   489  			// through about half the hints. In race mode, take only about
   490  			// a quarter; we don't have very much space to work with.
   491  			hintList := &mheap_.arenaHints
   492  			if (!raceenabled && i > 0x3f) || (raceenabled && i > 0x5f) {
   493  				hintList = &mheap_.userArena.arenaHints
   494  			}
   495  			hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
   496  			hint.addr = p
   497  			hint.next, *hintList = *hintList, hint
   498  		}
   499  	} else {
   500  		// On a 32-bit machine, we're much more concerned
   501  		// about keeping the usable heap contiguous.
   502  		// Hence:
   503  		//
   504  		// 1. We reserve space for all heapArenas up front so
   505  		// they don't get interleaved with the heap. They're
   506  		// ~258MB, so this isn't too bad. (We could reserve a
   507  		// smaller amount of space up front if this is a
   508  		// problem.)
   509  		//
   510  		// 2. We hint the heap to start right above the end of
   511  		// the binary so we have the best chance of keeping it
   512  		// contiguous.
   513  		//
   514  		// 3. We try to stake out a reasonably large initial
   515  		// heap reservation.
   516  
   517  		const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
   518  		meta := uintptr(sysReserve(nil, arenaMetaSize))
   519  		if meta != 0 {
   520  			mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true)
   521  		}
   522  
   523  		// We want to start the arena low, but if we're linked
   524  		// against C code, it's possible global constructors
   525  		// have called malloc and adjusted the process' brk.
   526  		// Query the brk so we can avoid trying to map the
   527  		// region over it (which will cause the kernel to put
   528  		// the region somewhere else, likely at a high
   529  		// address).
   530  		procBrk := sbrk0()
   531  
   532  		// If we ask for the end of the data segment but the
   533  		// operating system requires a little more space
   534  		// before we can start allocating, it will give out a
   535  		// slightly higher pointer. Except QEMU, which is
   536  		// buggy, as usual: it won't adjust the pointer
   537  		// upward. So adjust it upward a little bit ourselves:
   538  		// 1/4 MB to get away from the running binary image.
   539  		p := firstmoduledata.end
   540  		if p < procBrk {
   541  			p = procBrk
   542  		}
   543  		if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end {
   544  			p = mheap_.heapArenaAlloc.end
   545  		}
   546  		p = alignUp(p+(256<<10), heapArenaBytes)
   547  		// Because we're worried about fragmentation on
   548  		// 32-bit, we try to make a large initial reservation.
   549  		arenaSizes := []uintptr{
   550  			512 << 20,
   551  			256 << 20,
   552  			128 << 20,
   553  		}
   554  		for _, arenaSize := range arenaSizes {
   555  			a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes)
   556  			if a != nil {
   557  				mheap_.arena.init(uintptr(a), size, false)
   558  				p = mheap_.arena.end // For hint below
   559  				break
   560  			}
   561  		}
   562  		hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
   563  		hint.addr = p
   564  		hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   565  
   566  		// Place the hint for user arenas just after the large reservation.
   567  		//
   568  		// While this potentially competes with the hint above, in practice we probably
   569  		// aren't going to be getting this far anyway on 32-bit platforms.
   570  		userArenaHint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
   571  		userArenaHint.addr = p
   572  		userArenaHint.next, mheap_.userArena.arenaHints = mheap_.userArena.arenaHints, userArenaHint
   573  	}
   574  }
   575  
   576  // sysAlloc allocates heap arena space for at least n bytes. The
   577  // returned pointer is always heapArenaBytes-aligned and backed by
   578  // h.arenas metadata. The returned size is always a multiple of
   579  // heapArenaBytes. sysAlloc returns nil on failure.
   580  // There is no corresponding free function.
   581  //
   582  // hintList is a list of hint addresses for where to allocate new
   583  // heap arenas. It must be non-nil.
   584  //
   585  // register indicates whether the heap arena should be registered
   586  // in allArenas.
   587  //
   588  // sysAlloc returns a memory region in the Reserved state. This region must
   589  // be transitioned to Prepared and then Ready before use.
   590  //
   591  // h must be locked.
   592  func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, register bool) (v unsafe.Pointer, size uintptr) {
   593  	assertLockHeld(&h.lock)
   594  
   595  	n = alignUp(n, heapArenaBytes)
   596  
   597  	if hintList == &h.arenaHints {
   598  		// First, try the arena pre-reservation.
   599  		// Newly-used mappings are considered released.
   600  		//
   601  		// Only do this if we're using the regular heap arena hints.
   602  		// This behavior is only for the heap.
   603  		v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased)
   604  		if v != nil {
   605  			size = n
   606  			goto mapped
   607  		}
   608  	}
   609  
   610  	// Try to grow the heap at a hint address.
   611  	for *hintList != nil {
   612  		hint := *hintList
   613  		p := hint.addr
   614  		if hint.down {
   615  			p -= n
   616  		}
   617  		if p+n < p {
   618  			// We can't use this, so don't ask.
   619  			v = nil
   620  		} else if arenaIndex(p+n-1) >= 1<<arenaBits {
   621  			// Outside addressable heap. Can't use.
   622  			v = nil
   623  		} else {
   624  			v = sysReserve(unsafe.Pointer(p), n)
   625  		}
   626  		if p == uintptr(v) {
   627  			// Success. Update the hint.
   628  			if !hint.down {
   629  				p += n
   630  			}
   631  			hint.addr = p
   632  			size = n
   633  			break
   634  		}
   635  		// Failed. Discard this hint and try the next.
   636  		//
   637  		// TODO: This would be cleaner if sysReserve could be
   638  		// told to only return the requested address. In
   639  		// particular, this is already how Windows behaves, so
   640  		// it would simplify things there.
   641  		if v != nil {
   642  			sysFreeOS(v, n)
   643  		}
   644  		*hintList = hint.next
   645  		h.arenaHintAlloc.free(unsafe.Pointer(hint))
   646  	}
   647  
   648  	if size == 0 {
   649  		if raceenabled {
   650  			// The race detector assumes the heap lives in
   651  			// [0x00c000000000, 0x00e000000000), but we
   652  			// just ran out of hints in this region. Give
   653  			// a nice failure.
   654  			throw("too many address space collisions for -race mode")
   655  		}
   656  
   657  		// All of the hints failed, so we'll take any
   658  		// (sufficiently aligned) address the kernel will give
   659  		// us.
   660  		v, size = sysReserveAligned(nil, n, heapArenaBytes)
   661  		if v == nil {
   662  			return nil, 0
   663  		}
   664  
   665  		// Create new hints for extending this region.
   666  		hint := (*arenaHint)(h.arenaHintAlloc.alloc())
   667  		hint.addr, hint.down = uintptr(v), true
   668  		hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   669  		hint = (*arenaHint)(h.arenaHintAlloc.alloc())
   670  		hint.addr = uintptr(v) + size
   671  		hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
   672  	}
   673  
   674  	// Check for bad pointers or pointers we can't use.
   675  	{
   676  		var bad string
   677  		p := uintptr(v)
   678  		if p+size < p {
   679  			bad = "region exceeds uintptr range"
   680  		} else if arenaIndex(p) >= 1<<arenaBits {
   681  			bad = "base outside usable address space"
   682  		} else if arenaIndex(p+size-1) >= 1<<arenaBits {
   683  			bad = "end outside usable address space"
   684  		}
   685  		if bad != "" {
   686  			// This should be impossible on most architectures,
   687  			// but it would be really confusing to debug.
   688  			print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n")
   689  			throw("memory reservation exceeds address space limit")
   690  		}
   691  	}
   692  
   693  	if uintptr(v)&(heapArenaBytes-1) != 0 {
   694  		throw("misrounded allocation in sysAlloc")
   695  	}
   696  
   697  mapped:
   698  	// Create arena metadata.
   699  	for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ {
   700  		l2 := h.arenas[ri.l1()]
   701  		if l2 == nil {
   702  			// Allocate an L2 arena map.
   703  			//
   704  			// Use sysAllocOS instead of sysAlloc or persistentalloc because there's no
   705  			// statistic we can comfortably account for this space in. With this structure,
   706  			// we rely on demand paging to avoid large overheads, but tracking which memory
   707  			// is paged in is too expensive. Trying to account for the whole region means
   708  			// that it will appear like an enormous memory overhead in statistics, even though
   709  			// it is not.
   710  			l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2)))
   711  			if l2 == nil {
   712  				throw("out of memory allocating heap arena map")
   713  			}
   714  			atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2))
   715  		}
   716  
   717  		if l2[ri.l2()] != nil {
   718  			throw("arena already initialized")
   719  		}
   720  		var r *heapArena
   721  		r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
   722  		if r == nil {
   723  			r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
   724  			if r == nil {
   725  				throw("out of memory allocating heap arena metadata")
   726  			}
   727  		}
   728  
   729  		// Register the arena in allArenas if requested.
   730  		if register {
   731  			if len(h.allArenas) == cap(h.allArenas) {
   732  				size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize
   733  				if size == 0 {
   734  					size = physPageSize
   735  				}
   736  				newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys))
   737  				if newArray == nil {
   738  					throw("out of memory allocating allArenas")
   739  				}
   740  				oldSlice := h.allArenas
   741  				*(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)}
   742  				copy(h.allArenas, oldSlice)
   743  				// Do not free the old backing array because
   744  				// there may be concurrent readers. Since we
   745  				// double the array each time, this can lead
   746  				// to at most 2x waste.
   747  			}
   748  			h.allArenas = h.allArenas[:len(h.allArenas)+1]
   749  			h.allArenas[len(h.allArenas)-1] = ri
   750  		}
   751  
   752  		// Store atomically just in case an object from the
   753  		// new heap arena becomes visible before the heap lock
   754  		// is released (which shouldn't happen, but there's
   755  		// little downside to this).
   756  		atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r))
   757  	}
   758  
   759  	// Tell the race detector about the new heap memory.
   760  	if raceenabled {
   761  		racemapshadow(v, size)
   762  	}
   763  
   764  	return
   765  }
   766  
   767  // sysReserveAligned is like sysReserve, but the returned pointer is
   768  // aligned to align bytes. It may reserve either n or n+align bytes,
   769  // so it returns the size that was reserved.
   770  func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) {
   771  	// Since the alignment is rather large in uses of this
   772  	// function, we're not likely to get it by chance, so we ask
   773  	// for a larger region and remove the parts we don't need.
   774  	retries := 0
   775  retry:
   776  	p := uintptr(sysReserve(v, size+align))
   777  	switch {
   778  	case p == 0:
   779  		return nil, 0
   780  	case p&(align-1) == 0:
   781  		return unsafe.Pointer(p), size + align
   782  	case GOOS == "windows":
   783  		// On Windows we can't release pieces of a
   784  		// reservation, so we release the whole thing and
   785  		// re-reserve the aligned sub-region. This may race,
   786  		// so we may have to try again.
   787  		sysFreeOS(unsafe.Pointer(p), size+align)
   788  		p = alignUp(p, align)
   789  		p2 := sysReserve(unsafe.Pointer(p), size)
   790  		if p != uintptr(p2) {
   791  			// Must have raced. Try again.
   792  			sysFreeOS(p2, size)
   793  			if retries++; retries == 100 {
   794  				throw("failed to allocate aligned heap memory; too many retries")
   795  			}
   796  			goto retry
   797  		}
   798  		// Success.
   799  		return p2, size
   800  	default:
   801  		// Trim off the unaligned parts.
   802  		pAligned := alignUp(p, align)
   803  		sysFreeOS(unsafe.Pointer(p), pAligned-p)
   804  		end := pAligned + size
   805  		endLen := (p + size + align) - end
   806  		if endLen > 0 {
   807  			sysFreeOS(unsafe.Pointer(end), endLen)
   808  		}
   809  		return unsafe.Pointer(pAligned), size
   810  	}
   811  }
   812  
   813  // base address for all 0-byte allocations
   814  var zerobase uintptr
   815  
   816  // nextFreeFast returns the next free object if one is quickly available.
   817  // Otherwise it returns 0.
   818  func nextFreeFast(s *mspan) gclinkptr {
   819  	theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache?
   820  	if theBit < 64 {
   821  		result := s.freeindex + uintptr(theBit)
   822  		if result < s.nelems {
   823  			freeidx := result + 1
   824  			if freeidx%64 == 0 && freeidx != s.nelems {
   825  				return 0
   826  			}
   827  			s.allocCache >>= uint(theBit + 1)
   828  			s.freeindex = freeidx
   829  			s.allocCount++
   830  			return gclinkptr(result*s.elemsize + s.base())
   831  		}
   832  	}
   833  	return 0
   834  }
   835  
   836  // nextFree returns the next free object from the cached span if one is available.
   837  // Otherwise it refills the cache with a span with an available object and
   838  // returns that object along with a flag indicating that this was a heavy
   839  // weight allocation. If it is a heavy weight allocation the caller must
   840  // determine whether a new GC cycle needs to be started or if the GC is active
   841  // whether this goroutine needs to assist the GC.
   842  //
   843  // Must run in a non-preemptible context since otherwise the owner of
   844  // c could change.
   845  func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) {
   846  	s = c.alloc[spc]
   847  	shouldhelpgc = false
   848  	freeIndex := s.nextFreeIndex()
   849  	if freeIndex == s.nelems {
   850  		// The span is full.
   851  		if uintptr(s.allocCount) != s.nelems {
   852  			println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
   853  			throw("s.allocCount != s.nelems && freeIndex == s.nelems")
   854  		}
   855  		c.refill(spc)
   856  		shouldhelpgc = true
   857  		s = c.alloc[spc]
   858  
   859  		freeIndex = s.nextFreeIndex()
   860  	}
   861  
   862  	if freeIndex >= s.nelems {
   863  		throw("freeIndex is not valid")
   864  	}
   865  
   866  	v = gclinkptr(freeIndex*s.elemsize + s.base())
   867  	s.allocCount++
   868  	if uintptr(s.allocCount) > s.nelems {
   869  		println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems)
   870  		throw("s.allocCount > s.nelems")
   871  	}
   872  	return
   873  }
   874  
   875  // Allocate an object of size bytes.
   876  // Small objects are allocated from the per-P cache's free lists.
   877  // Large objects (> 32 kB) are allocated straight from the heap.
   878  func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
   879  	if gcphase == _GCmarktermination {
   880  		throw("mallocgc called with gcphase == _GCmarktermination")
   881  	}
   882  
   883  	if size == 0 {
   884  		return unsafe.Pointer(&zerobase)
   885  	}
   886  
   887  	// It's possible for any malloc to trigger sweeping, which may in
   888  	// turn queue finalizers. Record this dynamic lock edge.
   889  	lockRankMayQueueFinalizer()
   890  
   891  	userSize := size
   892  	if asanenabled {
   893  		// Refer to ASAN runtime library, the malloc() function allocates extra memory,
   894  		// the redzone, around the user requested memory region. And the redzones are marked
   895  		// as unaddressable. We perform the same operations in Go to detect the overflows or
   896  		// underflows.
   897  		size += computeRZlog(size)
   898  	}
   899  
   900  	if debug.malloc {
   901  		if debug.sbrk != 0 {
   902  			align := uintptr(16)
   903  			if typ != nil {
   904  				// TODO(austin): This should be just
   905  				//   align = uintptr(typ.align)
   906  				// but that's only 4 on 32-bit platforms,
   907  				// even if there's a uint64 field in typ (see #599).
   908  				// This causes 64-bit atomic accesses to panic.
   909  				// Hence, we use stricter alignment that matches
   910  				// the normal allocator better.
   911  				if size&7 == 0 {
   912  					align = 8
   913  				} else if size&3 == 0 {
   914  					align = 4
   915  				} else if size&1 == 0 {
   916  					align = 2
   917  				} else {
   918  					align = 1
   919  				}
   920  			}
   921  			return persistentalloc(size, align, &memstats.other_sys)
   922  		}
   923  
   924  		if inittrace.active && inittrace.id == getg().goid {
   925  			// Init functions are executed sequentially in a single goroutine.
   926  			inittrace.allocs += 1
   927  		}
   928  	}
   929  
   930  	// assistG is the G to charge for this allocation, or nil if
   931  	// GC is not currently active.
   932  	assistG := deductAssistCredit(size)
   933  
   934  	// Set mp.mallocing to keep from being preempted by GC.
   935  	mp := acquirem()
   936  	if mp.mallocing != 0 {
   937  		throw("malloc deadlock")
   938  	}
   939  	if mp.gsignal == getg() {
   940  		throw("malloc during signal")
   941  	}
   942  	mp.mallocing = 1
   943  
   944  	shouldhelpgc := false
   945  	dataSize := userSize
   946  	c := getMCache(mp)
   947  	if c == nil {
   948  		throw("mallocgc called without a P or outside bootstrapping")
   949  	}
   950  	var span *mspan
   951  	var x unsafe.Pointer
   952  	noscan := typ == nil || typ.ptrdata == 0
   953  	// In some cases block zeroing can profitably (for latency reduction purposes)
   954  	// be delayed till preemption is possible; delayedZeroing tracks that state.
   955  	delayedZeroing := false
   956  	if size <= maxSmallSize {
   957  		if noscan && size < maxTinySize {
   958  			// Tiny allocator.
   959  			//
   960  			// Tiny allocator combines several tiny allocation requests
   961  			// into a single memory block. The resulting memory block
   962  			// is freed when all subobjects are unreachable. The subobjects
   963  			// must be noscan (don't have pointers), this ensures that
   964  			// the amount of potentially wasted memory is bounded.
   965  			//
   966  			// Size of the memory block used for combining (maxTinySize) is tunable.
   967  			// Current setting is 16 bytes, which relates to 2x worst case memory
   968  			// wastage (when all but one subobjects are unreachable).
   969  			// 8 bytes would result in no wastage at all, but provides less
   970  			// opportunities for combining.
   971  			// 32 bytes provides more opportunities for combining,
   972  			// but can lead to 4x worst case wastage.
   973  			// The best case winning is 8x regardless of block size.
   974  			//
   975  			// Objects obtained from tiny allocator must not be freed explicitly.
   976  			// So when an object will be freed explicitly, we ensure that
   977  			// its size >= maxTinySize.
   978  			//
   979  			// SetFinalizer has a special case for objects potentially coming
   980  			// from tiny allocator, it such case it allows to set finalizers
   981  			// for an inner byte of a memory block.
   982  			//
   983  			// The main targets of tiny allocator are small strings and
   984  			// standalone escaping variables. On a json benchmark
   985  			// the allocator reduces number of allocations by ~12% and
   986  			// reduces heap size by ~20%.
   987  			off := c.tinyoffset
   988  			// Align tiny pointer for required (conservative) alignment.
   989  			if size&7 == 0 {
   990  				off = alignUp(off, 8)
   991  			} else if goarch.PtrSize == 4 && size == 12 {
   992  				// Conservatively align 12-byte objects to 8 bytes on 32-bit
   993  				// systems so that objects whose first field is a 64-bit
   994  				// value is aligned to 8 bytes and does not cause a fault on
   995  				// atomic access. See issue 37262.
   996  				// TODO(mknyszek): Remove this workaround if/when issue 36606
   997  				// is resolved.
   998  				off = alignUp(off, 8)
   999  			} else if size&3 == 0 {
  1000  				off = alignUp(off, 4)
  1001  			} else if size&1 == 0 {
  1002  				off = alignUp(off, 2)
  1003  			}
  1004  			if off+size <= maxTinySize && c.tiny != 0 {
  1005  				// The object fits into existing tiny block.
  1006  				x = unsafe.Pointer(c.tiny + off)
  1007  				c.tinyoffset = off + size
  1008  				c.tinyAllocs++
  1009  				mp.mallocing = 0
  1010  				releasem(mp)
  1011  				return x
  1012  			}
  1013  			// Allocate a new maxTinySize block.
  1014  			span = c.alloc[tinySpanClass]
  1015  			v := nextFreeFast(span)
  1016  			if v == 0 {
  1017  				v, span, shouldhelpgc = c.nextFree(tinySpanClass)
  1018  			}
  1019  			x = unsafe.Pointer(v)
  1020  			(*[2]uint64)(x)[0] = 0
  1021  			(*[2]uint64)(x)[1] = 0
  1022  			// See if we need to replace the existing tiny block with the new one
  1023  			// based on amount of remaining free space.
  1024  			if !raceenabled && (size < c.tinyoffset || c.tiny == 0) {
  1025  				// Note: disabled when race detector is on, see comment near end of this function.
  1026  				c.tiny = uintptr(x)
  1027  				c.tinyoffset = size
  1028  			}
  1029  			size = maxTinySize
  1030  		} else {
  1031  			var sizeclass uint8
  1032  			if size <= smallSizeMax-8 {
  1033  				sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
  1034  			} else {
  1035  				sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]
  1036  			}
  1037  			size = uintptr(class_to_size[sizeclass])
  1038  			spc := makeSpanClass(sizeclass, noscan)
  1039  			span = c.alloc[spc]
  1040  			v := nextFreeFast(span)
  1041  			if v == 0 {
  1042  				v, span, shouldhelpgc = c.nextFree(spc)
  1043  			}
  1044  			x = unsafe.Pointer(v)
  1045  			if needzero && span.needzero != 0 {
  1046  				memclrNoHeapPointers(x, size)
  1047  			}
  1048  		}
  1049  	} else {
  1050  		shouldhelpgc = true
  1051  		// For large allocations, keep track of zeroed state so that
  1052  		// bulk zeroing can be happen later in a preemptible context.
  1053  		span = c.allocLarge(size, noscan)
  1054  		span.freeindex = 1
  1055  		span.allocCount = 1
  1056  		size = span.elemsize
  1057  		x = unsafe.Pointer(span.base())
  1058  		if needzero && span.needzero != 0 {
  1059  			if noscan {
  1060  				delayedZeroing = true
  1061  			} else {
  1062  				memclrNoHeapPointers(x, size)
  1063  				// We've in theory cleared almost the whole span here,
  1064  				// and could take the extra step of actually clearing
  1065  				// the whole thing. However, don't. Any GC bits for the
  1066  				// uncleared parts will be zero, and it's just going to
  1067  				// be needzero = 1 once freed anyway.
  1068  			}
  1069  		}
  1070  	}
  1071  
  1072  	if !noscan {
  1073  		var scanSize uintptr
  1074  		heapBitsSetType(uintptr(x), size, dataSize, typ)
  1075  		if dataSize > typ.size {
  1076  			// Array allocation. If there are any
  1077  			// pointers, GC has to scan to the last
  1078  			// element.
  1079  			if typ.ptrdata != 0 {
  1080  				scanSize = dataSize - typ.size + typ.ptrdata
  1081  			}
  1082  		} else {
  1083  			scanSize = typ.ptrdata
  1084  		}
  1085  		c.scanAlloc += scanSize
  1086  	}
  1087  
  1088  	// Ensure that the stores above that initialize x to
  1089  	// type-safe memory and set the heap bits occur before
  1090  	// the caller can make x observable to the garbage
  1091  	// collector. Otherwise, on weakly ordered machines,
  1092  	// the garbage collector could follow a pointer to x,
  1093  	// but see uninitialized memory or stale heap bits.
  1094  	publicationBarrier()
  1095  	// As x and the heap bits are initialized, update
  1096  	// freeIndexForScan now so x is seen by the GC
  1097  	// (including convervative scan) as an allocated object.
  1098  	// While this pointer can't escape into user code as a
  1099  	// _live_ pointer until we return, conservative scanning
  1100  	// may find a dead pointer that happens to point into this
  1101  	// object. Delaying this update until now ensures that
  1102  	// conservative scanning considers this pointer dead until
  1103  	// this point.
  1104  	span.freeIndexForScan = span.freeindex
  1105  
  1106  	// Allocate black during GC.
  1107  	// All slots hold nil so no scanning is needed.
  1108  	// This may be racing with GC so do it atomically if there can be
  1109  	// a race marking the bit.
  1110  	if gcphase != _GCoff {
  1111  		gcmarknewobject(span, uintptr(x), size)
  1112  	}
  1113  
  1114  	if raceenabled {
  1115  		racemalloc(x, size)
  1116  	}
  1117  
  1118  	if msanenabled {
  1119  		msanmalloc(x, size)
  1120  	}
  1121  
  1122  	if asanenabled {
  1123  		// We should only read/write the memory with the size asked by the user.
  1124  		// The rest of the allocated memory should be poisoned, so that we can report
  1125  		// errors when accessing poisoned memory.
  1126  		// The allocated memory is larger than required userSize, it will also include
  1127  		// redzone and some other padding bytes.
  1128  		rzBeg := unsafe.Add(x, userSize)
  1129  		asanpoison(rzBeg, size-userSize)
  1130  		asanunpoison(x, userSize)
  1131  	}
  1132  
  1133  	if rate := MemProfileRate; rate > 0 {
  1134  		// Note cache c only valid while m acquired; see #47302
  1135  		if rate != 1 && size < c.nextSample {
  1136  			c.nextSample -= size
  1137  		} else {
  1138  			profilealloc(mp, x, size)
  1139  		}
  1140  	}
  1141  	mp.mallocing = 0
  1142  	releasem(mp)
  1143  
  1144  	// Pointerfree data can be zeroed late in a context where preemption can occur.
  1145  	// x will keep the memory alive.
  1146  	if delayedZeroing {
  1147  		if !noscan {
  1148  			throw("delayed zeroing on data that may contain pointers")
  1149  		}
  1150  		memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302
  1151  	}
  1152  
  1153  	if debug.malloc {
  1154  		if debug.allocfreetrace != 0 {
  1155  			tracealloc(x, size, typ)
  1156  		}
  1157  
  1158  		if inittrace.active && inittrace.id == getg().goid {
  1159  			// Init functions are executed sequentially in a single goroutine.
  1160  			inittrace.bytes += uint64(size)
  1161  		}
  1162  	}
  1163  
  1164  	if assistG != nil {
  1165  		// Account for internal fragmentation in the assist
  1166  		// debt now that we know it.
  1167  		assistG.gcAssistBytes -= int64(size - dataSize)
  1168  	}
  1169  
  1170  	if shouldhelpgc {
  1171  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1172  			gcStart(t)
  1173  		}
  1174  	}
  1175  
  1176  	if raceenabled && noscan && dataSize < maxTinySize {
  1177  		// Pad tinysize allocations so they are aligned with the end
  1178  		// of the tinyalloc region. This ensures that any arithmetic
  1179  		// that goes off the top end of the object will be detectable
  1180  		// by checkptr (issue 38872).
  1181  		// Note that we disable tinyalloc when raceenabled for this to work.
  1182  		// TODO: This padding is only performed when the race detector
  1183  		// is enabled. It would be nice to enable it if any package
  1184  		// was compiled with checkptr, but there's no easy way to
  1185  		// detect that (especially at compile time).
  1186  		// TODO: enable this padding for all allocations, not just
  1187  		// tinyalloc ones. It's tricky because of pointer maps.
  1188  		// Maybe just all noscan objects?
  1189  		x = add(x, size-dataSize)
  1190  	}
  1191  
  1192  	return x
  1193  }
  1194  
  1195  // deductAssistCredit reduces the current G's assist credit
  1196  // by size bytes, and assists the GC if necessary.
  1197  //
  1198  // Caller must be preemptible.
  1199  //
  1200  // Returns the G for which the assist credit was accounted.
  1201  func deductAssistCredit(size uintptr) *g {
  1202  	var assistG *g
  1203  	if gcBlackenEnabled != 0 {
  1204  		// Charge the current user G for this allocation.
  1205  		assistG = getg()
  1206  		if assistG.m.curg != nil {
  1207  			assistG = assistG.m.curg
  1208  		}
  1209  		// Charge the allocation against the G. We'll account
  1210  		// for internal fragmentation at the end of mallocgc.
  1211  		assistG.gcAssistBytes -= int64(size)
  1212  
  1213  		if assistG.gcAssistBytes < 0 {
  1214  			// This G is in debt. Assist the GC to correct
  1215  			// this before allocating. This must happen
  1216  			// before disabling preemption.
  1217  			gcAssistAlloc(assistG)
  1218  		}
  1219  	}
  1220  	return assistG
  1221  }
  1222  
  1223  // memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers
  1224  // on chunks of the buffer to be zeroed, with opportunities for preemption
  1225  // along the way.  memclrNoHeapPointers contains no safepoints and also
  1226  // cannot be preemptively scheduled, so this provides a still-efficient
  1227  // block copy that can also be preempted on a reasonable granularity.
  1228  //
  1229  // Use this with care; if the data being cleared is tagged to contain
  1230  // pointers, this allows the GC to run before it is all cleared.
  1231  func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) {
  1232  	v := uintptr(x)
  1233  	// got this from benchmarking. 128k is too small, 512k is too large.
  1234  	const chunkBytes = 256 * 1024
  1235  	vsize := v + size
  1236  	for voff := v; voff < vsize; voff = voff + chunkBytes {
  1237  		if getg().preempt {
  1238  			// may hold locks, e.g., profiling
  1239  			goschedguarded()
  1240  		}
  1241  		// clear min(avail, lump) bytes
  1242  		n := vsize - voff
  1243  		if n > chunkBytes {
  1244  			n = chunkBytes
  1245  		}
  1246  		memclrNoHeapPointers(unsafe.Pointer(voff), n)
  1247  	}
  1248  }
  1249  
  1250  // implementation of new builtin
  1251  // compiler (both frontend and SSA backend) knows the signature
  1252  // of this function.
  1253  func newobject(typ *_type) unsafe.Pointer {
  1254  	return mallocgc(typ.size, typ, true)
  1255  }
  1256  
  1257  //go:linkname reflect_unsafe_New reflect.unsafe_New
  1258  func reflect_unsafe_New(typ *_type) unsafe.Pointer {
  1259  	return mallocgc(typ.size, typ, true)
  1260  }
  1261  
  1262  //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
  1263  func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
  1264  	return mallocgc(typ.size, typ, true)
  1265  }
  1266  
  1267  // newarray allocates an array of n elements of type typ.
  1268  func newarray(typ *_type, n int) unsafe.Pointer {
  1269  	if n == 1 {
  1270  		return mallocgc(typ.size, typ, true)
  1271  	}
  1272  	mem, overflow := math.MulUintptr(typ.size, uintptr(n))
  1273  	if overflow || mem > maxAlloc || n < 0 {
  1274  		panic(plainError("runtime: allocation size out of range"))
  1275  	}
  1276  	return mallocgc(mem, typ, true)
  1277  }
  1278  
  1279  //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
  1280  func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
  1281  	return newarray(typ, n)
  1282  }
  1283  
  1284  func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
  1285  	c := getMCache(mp)
  1286  	if c == nil {
  1287  		throw("profilealloc called without a P or outside bootstrapping")
  1288  	}
  1289  	c.nextSample = nextSample()
  1290  	mProf_Malloc(x, size)
  1291  }
  1292  
  1293  // nextSample returns the next sampling point for heap profiling. The goal is
  1294  // to sample allocations on average every MemProfileRate bytes, but with a
  1295  // completely random distribution over the allocation timeline; this
  1296  // corresponds to a Poisson process with parameter MemProfileRate. In Poisson
  1297  // processes, the distance between two samples follows the exponential
  1298  // distribution (exp(MemProfileRate)), so the best return value is a random
  1299  // number taken from an exponential distribution whose mean is MemProfileRate.
  1300  func nextSample() uintptr {
  1301  	if MemProfileRate == 1 {
  1302  		// Callers assign our return value to
  1303  		// mcache.next_sample, but next_sample is not used
  1304  		// when the rate is 1. So avoid the math below and
  1305  		// just return something.
  1306  		return 0
  1307  	}
  1308  	if GOOS == "plan9" {
  1309  		// Plan 9 doesn't support floating point in note handler.
  1310  		if gp := getg(); gp == gp.m.gsignal {
  1311  			return nextSampleNoFP()
  1312  		}
  1313  	}
  1314  
  1315  	return uintptr(fastexprand(MemProfileRate))
  1316  }
  1317  
  1318  // fastexprand returns a random number from an exponential distribution with
  1319  // the specified mean.
  1320  func fastexprand(mean int) int32 {
  1321  	// Avoid overflow. Maximum possible step is
  1322  	// -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
  1323  	switch {
  1324  	case mean > 0x7000000:
  1325  		mean = 0x7000000
  1326  	case mean == 0:
  1327  		return 0
  1328  	}
  1329  
  1330  	// Take a random sample of the exponential distribution exp(-mean*x).
  1331  	// The probability distribution function is mean*exp(-mean*x), so the CDF is
  1332  	// p = 1 - exp(-mean*x), so
  1333  	// q = 1 - p == exp(-mean*x)
  1334  	// log_e(q) = -mean*x
  1335  	// -log_e(q)/mean = x
  1336  	// x = -log_e(q) * mean
  1337  	// x = log_2(q) * (-log_e(2)) * mean    ; Using log_2 for efficiency
  1338  	const randomBitCount = 26
  1339  	q := fastrandn(1<<randomBitCount) + 1
  1340  	qlog := fastlog2(float64(q)) - randomBitCount
  1341  	if qlog > 0 {
  1342  		qlog = 0
  1343  	}
  1344  	const minusLog2 = -0.6931471805599453 // -ln(2)
  1345  	return int32(qlog*(minusLog2*float64(mean))) + 1
  1346  }
  1347  
  1348  // nextSampleNoFP is similar to nextSample, but uses older,
  1349  // simpler code to avoid floating point.
  1350  func nextSampleNoFP() uintptr {
  1351  	// Set first allocation sample size.
  1352  	rate := MemProfileRate
  1353  	if rate > 0x3fffffff { // make 2*rate not overflow
  1354  		rate = 0x3fffffff
  1355  	}
  1356  	if rate != 0 {
  1357  		return uintptr(fastrandn(uint32(2 * rate)))
  1358  	}
  1359  	return 0
  1360  }
  1361  
  1362  type persistentAlloc struct {
  1363  	base *notInHeap
  1364  	off  uintptr
  1365  }
  1366  
  1367  var globalAlloc struct {
  1368  	mutex
  1369  	persistentAlloc
  1370  }
  1371  
  1372  // persistentChunkSize is the number of bytes we allocate when we grow
  1373  // a persistentAlloc.
  1374  const persistentChunkSize = 256 << 10
  1375  
  1376  // persistentChunks is a list of all the persistent chunks we have
  1377  // allocated. The list is maintained through the first word in the
  1378  // persistent chunk. This is updated atomically.
  1379  var persistentChunks *notInHeap
  1380  
  1381  // Wrapper around sysAlloc that can allocate small chunks.
  1382  // There is no associated free operation.
  1383  // Intended for things like function/type/debug-related persistent data.
  1384  // If align is 0, uses default align (currently 8).
  1385  // The returned memory will be zeroed.
  1386  // sysStat must be non-nil.
  1387  //
  1388  // Consider marking persistentalloc'd types not in heap by embedding
  1389  // runtime/internal/sys.NotInHeap.
  1390  func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
  1391  	var p *notInHeap
  1392  	systemstack(func() {
  1393  		p = persistentalloc1(size, align, sysStat)
  1394  	})
  1395  	return unsafe.Pointer(p)
  1396  }
  1397  
  1398  // Must run on system stack because stack growth can (re)invoke it.
  1399  // See issue 9174.
  1400  //
  1401  //go:systemstack
  1402  func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
  1403  	const (
  1404  		maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
  1405  	)
  1406  
  1407  	if size == 0 {
  1408  		throw("persistentalloc: size == 0")
  1409  	}
  1410  	if align != 0 {
  1411  		if align&(align-1) != 0 {
  1412  			throw("persistentalloc: align is not a power of 2")
  1413  		}
  1414  		if align > _PageSize {
  1415  			throw("persistentalloc: align is too large")
  1416  		}
  1417  	} else {
  1418  		align = 8
  1419  	}
  1420  
  1421  	if size >= maxBlock {
  1422  		return (*notInHeap)(sysAlloc(size, sysStat))
  1423  	}
  1424  
  1425  	mp := acquirem()
  1426  	var persistent *persistentAlloc
  1427  	if mp != nil && mp.p != 0 {
  1428  		persistent = &mp.p.ptr().palloc
  1429  	} else {
  1430  		lock(&globalAlloc.mutex)
  1431  		persistent = &globalAlloc.persistentAlloc
  1432  	}
  1433  	persistent.off = alignUp(persistent.off, align)
  1434  	if persistent.off+size > persistentChunkSize || persistent.base == nil {
  1435  		persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
  1436  		if persistent.base == nil {
  1437  			if persistent == &globalAlloc.persistentAlloc {
  1438  				unlock(&globalAlloc.mutex)
  1439  			}
  1440  			throw("runtime: cannot allocate memory")
  1441  		}
  1442  
  1443  		// Add the new chunk to the persistentChunks list.
  1444  		for {
  1445  			chunks := uintptr(unsafe.Pointer(persistentChunks))
  1446  			*(*uintptr)(unsafe.Pointer(persistent.base)) = chunks
  1447  			if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) {
  1448  				break
  1449  			}
  1450  		}
  1451  		persistent.off = alignUp(goarch.PtrSize, align)
  1452  	}
  1453  	p := persistent.base.add(persistent.off)
  1454  	persistent.off += size
  1455  	releasem(mp)
  1456  	if persistent == &globalAlloc.persistentAlloc {
  1457  		unlock(&globalAlloc.mutex)
  1458  	}
  1459  
  1460  	if sysStat != &memstats.other_sys {
  1461  		sysStat.add(int64(size))
  1462  		memstats.other_sys.add(-int64(size))
  1463  	}
  1464  	return p
  1465  }
  1466  
  1467  // inPersistentAlloc reports whether p points to memory allocated by
  1468  // persistentalloc. This must be nosplit because it is called by the
  1469  // cgo checker code, which is called by the write barrier code.
  1470  //
  1471  //go:nosplit
  1472  func inPersistentAlloc(p uintptr) bool {
  1473  	chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
  1474  	for chunk != 0 {
  1475  		if p >= chunk && p < chunk+persistentChunkSize {
  1476  			return true
  1477  		}
  1478  		chunk = *(*uintptr)(unsafe.Pointer(chunk))
  1479  	}
  1480  	return false
  1481  }
  1482  
  1483  // linearAlloc is a simple linear allocator that pre-reserves a region
  1484  // of memory and then optionally maps that region into the Ready state
  1485  // as needed.
  1486  //
  1487  // The caller is responsible for locking.
  1488  type linearAlloc struct {
  1489  	next   uintptr // next free byte
  1490  	mapped uintptr // one byte past end of mapped space
  1491  	end    uintptr // end of reserved space
  1492  
  1493  	mapMemory bool // transition memory from Reserved to Ready if true
  1494  }
  1495  
  1496  func (l *linearAlloc) init(base, size uintptr, mapMemory bool) {
  1497  	if base+size < base {
  1498  		// Chop off the last byte. The runtime isn't prepared
  1499  		// to deal with situations where the bounds could overflow.
  1500  		// Leave that memory reserved, though, so we don't map it
  1501  		// later.
  1502  		size -= 1
  1503  	}
  1504  	l.next, l.mapped = base, base
  1505  	l.end = base + size
  1506  	l.mapMemory = mapMemory
  1507  }
  1508  
  1509  func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
  1510  	p := alignUp(l.next, align)
  1511  	if p+size > l.end {
  1512  		return nil
  1513  	}
  1514  	l.next = p + size
  1515  	if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
  1516  		if l.mapMemory {
  1517  			// Transition from Reserved to Prepared to Ready.
  1518  			n := pEnd - l.mapped
  1519  			sysMap(unsafe.Pointer(l.mapped), n, sysStat)
  1520  			sysUsed(unsafe.Pointer(l.mapped), n, n)
  1521  		}
  1522  		l.mapped = pEnd
  1523  	}
  1524  	return unsafe.Pointer(p)
  1525  }
  1526  
  1527  // notInHeap is off-heap memory allocated by a lower-level allocator
  1528  // like sysAlloc or persistentAlloc.
  1529  //
  1530  // In general, it's better to use real types which embed
  1531  // runtime/internal/sys.NotInHeap, but this serves as a generic type
  1532  // for situations where that isn't possible (like in the allocators).
  1533  //
  1534  // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
  1535  type notInHeap struct{ _ sys.NotInHeap }
  1536  
  1537  func (p *notInHeap) add(bytes uintptr) *notInHeap {
  1538  	return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
  1539  }
  1540  
  1541  // computeRZlog computes the size of the redzone.
  1542  // Refer to the implementation of the compiler-rt.
  1543  func computeRZlog(userSize uintptr) uintptr {
  1544  	switch {
  1545  	case userSize <= (64 - 16):
  1546  		return 16 << 0
  1547  	case userSize <= (128 - 32):
  1548  		return 16 << 1
  1549  	case userSize <= (512 - 64):
  1550  		return 16 << 2
  1551  	case userSize <= (4096 - 128):
  1552  		return 16 << 3
  1553  	case userSize <= (1<<14)-256:
  1554  		return 16 << 4
  1555  	case userSize <= (1<<15)-512:
  1556  		return 16 << 5
  1557  	case userSize <= (1<<16)-1024:
  1558  		return 16 << 6
  1559  	default:
  1560  		return 16 << 7
  1561  	}
  1562  }
  1563  

View as plain text