Source file src/runtime/mpagealloc_32bit.go

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build 386 || arm || mips || mipsle || wasm
     6  
     7  // wasm is a treated as a 32-bit architecture for the purposes of the page
     8  // allocator, even though it has 64-bit pointers. This is because any wasm
     9  // pointer always has its top 32 bits as zero, so the effective heap address
    10  // space is only 2^32 bytes in size (see heapAddrBits).
    11  
    12  package runtime
    13  
    14  import (
    15  	"unsafe"
    16  )
    17  
    18  const (
    19  	// The number of levels in the radix tree.
    20  	summaryLevels = 4
    21  
    22  	// Constants for testing.
    23  	pageAlloc32Bit = 1
    24  	pageAlloc64Bit = 0
    25  
    26  	// Number of bits needed to represent all indices into the L1 of the
    27  	// chunks map.
    28  	//
    29  	// See (*pageAlloc).chunks for more details. Update the documentation
    30  	// there should this number change.
    31  	pallocChunksL1Bits = 0
    32  )
    33  
    34  // See comment in mpagealloc_64bit.go.
    35  var levelBits = [summaryLevels]uint{
    36  	summaryL0Bits,
    37  	summaryLevelBits,
    38  	summaryLevelBits,
    39  	summaryLevelBits,
    40  }
    41  
    42  // See comment in mpagealloc_64bit.go.
    43  var levelShift = [summaryLevels]uint{
    44  	heapAddrBits - summaryL0Bits,
    45  	heapAddrBits - summaryL0Bits - 1*summaryLevelBits,
    46  	heapAddrBits - summaryL0Bits - 2*summaryLevelBits,
    47  	heapAddrBits - summaryL0Bits - 3*summaryLevelBits,
    48  }
    49  
    50  // See comment in mpagealloc_64bit.go.
    51  var levelLogPages = [summaryLevels]uint{
    52  	logPallocChunkPages + 3*summaryLevelBits,
    53  	logPallocChunkPages + 2*summaryLevelBits,
    54  	logPallocChunkPages + 1*summaryLevelBits,
    55  	logPallocChunkPages,
    56  }
    57  
    58  // scavengeIndexArray is the backing store for p.scav.index.chunks.
    59  // On 32-bit platforms, it's small enough to just be a global.
    60  var scavengeIndexArray [(1 << heapAddrBits) / pallocChunkBytes]atomicScavChunkData
    61  
    62  // See mpagealloc_64bit.go for details.
    63  func (p *pageAlloc) sysInit(test bool) {
    64  	// Calculate how much memory all our entries will take up.
    65  	//
    66  	// This should be around 12 KiB or less.
    67  	totalSize := uintptr(0)
    68  	for l := 0; l < summaryLevels; l++ {
    69  		totalSize += (uintptr(1) << (heapAddrBits - levelShift[l])) * pallocSumBytes
    70  	}
    71  	totalSize = alignUp(totalSize, physPageSize)
    72  
    73  	// Reserve memory for all levels in one go. There shouldn't be much for 32-bit.
    74  	reservation := sysReserve(nil, totalSize)
    75  	if reservation == nil {
    76  		throw("failed to reserve page summary memory")
    77  	}
    78  	// There isn't much. Just map it and mark it as used immediately.
    79  	sysMap(reservation, totalSize, p.sysStat)
    80  	sysUsed(reservation, totalSize, totalSize)
    81  	p.summaryMappedReady += totalSize
    82  
    83  	// Iterate over the reservation and cut it up into slices.
    84  	//
    85  	// Maintain i as the byte offset from reservation where
    86  	// the new slice should start.
    87  	for l, shift := range levelShift {
    88  		entries := 1 << (heapAddrBits - shift)
    89  
    90  		// Put this reservation into a slice.
    91  		sl := notInHeapSlice{(*notInHeap)(reservation), 0, entries}
    92  		p.summary[l] = *(*[]pallocSum)(unsafe.Pointer(&sl))
    93  
    94  		reservation = add(reservation, uintptr(entries)*pallocSumBytes)
    95  	}
    96  }
    97  
    98  // See mpagealloc_64bit.go for details.
    99  func (p *pageAlloc) sysGrow(base, limit uintptr) {
   100  	if base%pallocChunkBytes != 0 || limit%pallocChunkBytes != 0 {
   101  		print("runtime: base = ", hex(base), ", limit = ", hex(limit), "\n")
   102  		throw("sysGrow bounds not aligned to pallocChunkBytes")
   103  	}
   104  
   105  	// Walk up the tree and update the summary slices.
   106  	for l := len(p.summary) - 1; l >= 0; l-- {
   107  		// Figure out what part of the summary array this new address space needs.
   108  		// Note that we need to align the ranges to the block width (1<<levelBits[l])
   109  		// at this level because the full block is needed to compute the summary for
   110  		// the next level.
   111  		lo, hi := addrsToSummaryRange(l, base, limit)
   112  		_, hi = blockAlignSummaryRange(l, lo, hi)
   113  		if hi > len(p.summary[l]) {
   114  			p.summary[l] = p.summary[l][:hi]
   115  		}
   116  	}
   117  }
   118  
   119  // sysInit initializes the scavengeIndex' chunks array.
   120  //
   121  // Returns the amount of memory added to sysStat.
   122  func (s *scavengeIndex) sysInit(test bool, sysStat *sysMemStat) (mappedReady uintptr) {
   123  	if test {
   124  		// Set up the scavenge index via sysAlloc so the test can free it later.
   125  		scavIndexSize := uintptr(len(scavengeIndexArray)) * unsafe.Sizeof(atomicScavChunkData{})
   126  		s.chunks = ((*[(1 << heapAddrBits) / pallocChunkBytes]atomicScavChunkData)(sysAlloc(scavIndexSize, sysStat)))[:]
   127  		mappedReady = scavIndexSize
   128  	} else {
   129  		// Set up the scavenge index.
   130  		s.chunks = scavengeIndexArray[:]
   131  	}
   132  	s.min.Store(1) // The 0th chunk is never going to be mapped for the heap.
   133  	s.max.Store(uintptr(len(s.chunks)))
   134  	return
   135  }
   136  
   137  // sysGrow is a no-op on 32-bit platforms.
   138  func (s *scavengeIndex) sysGrow(base, limit uintptr, sysStat *sysMemStat) uintptr {
   139  	return 0
   140  }
   141  

View as plain text