Source file src/runtime/mgcscavenge.go

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Scavenging free pages.
     6  //
     7  // This file implements scavenging (the release of physical pages backing mapped
     8  // memory) of free and unused pages in the heap as a way to deal with page-level
     9  // fragmentation and reduce the RSS of Go applications.
    10  //
    11  // Scavenging in Go happens on two fronts: there's the background
    12  // (asynchronous) scavenger and the heap-growth (synchronous) scavenger.
    13  //
    14  // The former happens on a goroutine much like the background sweeper which is
    15  // soft-capped at using scavengePercent of the mutator's time, based on
    16  // order-of-magnitude estimates of the costs of scavenging. The background
    17  // scavenger's primary goal is to bring the estimated heap RSS of the
    18  // application down to a goal.
    19  //
    20  // That goal is defined as:
    21  //   (retainExtraPercent+100) / 100 * (heapGoal / lastHeapGoal) * last_heap_inuse
    22  //
    23  // Essentially, we wish to have the application's RSS track the heap goal, but
    24  // the heap goal is defined in terms of bytes of objects, rather than pages like
    25  // RSS. As a result, we need to take into account for fragmentation internal to
    26  // spans. heapGoal / lastHeapGoal defines the ratio between the current heap goal
    27  // and the last heap goal, which tells us by how much the heap is growing and
    28  // shrinking. We estimate what the heap will grow to in terms of pages by taking
    29  // this ratio and multiplying it by heap_inuse at the end of the last GC, which
    30  // allows us to account for this additional fragmentation. Note that this
    31  // procedure makes the assumption that the degree of fragmentation won't change
    32  // dramatically over the next GC cycle. Overestimating the amount of
    33  // fragmentation simply results in higher memory use, which will be accounted
    34  // for by the next pacing up date. Underestimating the fragmentation however
    35  // could lead to performance degradation. Handling this case is not within the
    36  // scope of the scavenger. Situations where the amount of fragmentation balloons
    37  // over the course of a single GC cycle should be considered pathologies,
    38  // flagged as bugs, and fixed appropriately.
    39  //
    40  // An additional factor of retainExtraPercent is added as a buffer to help ensure
    41  // that there's more unscavenged memory to allocate out of, since each allocation
    42  // out of scavenged memory incurs a potentially expensive page fault.
    43  //
    44  // The goal is updated after each GC and the scavenger's pacing parameters
    45  // (which live in mheap_) are updated to match. The pacing parameters work much
    46  // like the background sweeping parameters. The parameters define a line whose
    47  // horizontal axis is time and vertical axis is estimated heap RSS, and the
    48  // scavenger attempts to stay below that line at all times.
    49  //
    50  // The synchronous heap-growth scavenging happens whenever the heap grows in
    51  // size, for some definition of heap-growth. The intuition behind this is that
    52  // the application had to grow the heap because existing fragments were
    53  // not sufficiently large to satisfy a page-level memory allocation, so we
    54  // scavenge those fragments eagerly to offset the growth in RSS that results.
    55  
    56  package runtime
    57  
    58  import (
    59  	"runtime/internal/atomic"
    60  	"runtime/internal/sys"
    61  	"unsafe"
    62  )
    63  
    64  const (
    65  	// The background scavenger is paced according to these parameters.
    66  	//
    67  	// scavengePercent represents the portion of mutator time we're willing
    68  	// to spend on scavenging in percent.
    69  	scavengePercent = 1 // 1%
    70  
    71  	// retainExtraPercent represents the amount of memory over the heap goal
    72  	// that the scavenger should keep as a buffer space for the allocator.
    73  	//
    74  	// The purpose of maintaining this overhead is to have a greater pool of
    75  	// unscavenged memory available for allocation (since using scavenged memory
    76  	// incurs an additional cost), to account for heap fragmentation and
    77  	// the ever-changing layout of the heap.
    78  	retainExtraPercent = 10
    79  
    80  	// maxPagesPerPhysPage is the maximum number of supported runtime pages per
    81  	// physical page, based on maxPhysPageSize.
    82  	maxPagesPerPhysPage = maxPhysPageSize / pageSize
    83  
    84  	// scavengeCostRatio is the approximate ratio between the costs of using previously
    85  	// scavenged memory and scavenging memory.
    86  	//
    87  	// For most systems the cost of scavenging greatly outweighs the costs
    88  	// associated with using scavenged memory, making this constant 0. On other systems
    89  	// (especially ones where "sysUsed" is not just a no-op) this cost is non-trivial.
    90  	//
    91  	// This ratio is used as part of multiplicative factor to help the scavenger account
    92  	// for the additional costs of using scavenged memory in its pacing.
    93  	scavengeCostRatio = 0.7 * (sys.GoosDarwin + sys.GoosIos)
    94  
    95  	// scavengeReservationShards determines the amount of memory the scavenger
    96  	// should reserve for scavenging at a time. Specifically, the amount of
    97  	// memory reserved is (heap size in bytes) / scavengeReservationShards.
    98  	scavengeReservationShards = 64
    99  )
   100  
   101  // heapRetained returns an estimate of the current heap RSS.
   102  func heapRetained() uint64 {
   103  	return memstats.heap_sys.load() - atomic.Load64(&memstats.heap_released)
   104  }
   105  
   106  // gcPaceScavenger updates the scavenger's pacing, particularly
   107  // its rate and RSS goal.
   108  //
   109  // The RSS goal is based on the current heap goal with a small overhead
   110  // to accommodate non-determinism in the allocator.
   111  //
   112  // The pacing is based on scavengePageRate, which applies to both regular and
   113  // huge pages. See that constant for more information.
   114  //
   115  // mheap_.lock must be held or the world must be stopped.
   116  func gcPaceScavenger() {
   117  	// If we're called before the first GC completed, disable scavenging.
   118  	// We never scavenge before the 2nd GC cycle anyway (we don't have enough
   119  	// information about the heap yet) so this is fine, and avoids a fault
   120  	// or garbage data later.
   121  	if gcController.lastHeapGoal == 0 {
   122  		mheap_.scavengeGoal = ^uint64(0)
   123  		return
   124  	}
   125  	// Compute our scavenging goal.
   126  	goalRatio := float64(atomic.Load64(&gcController.heapGoal)) / float64(gcController.lastHeapGoal)
   127  	retainedGoal := uint64(float64(memstats.last_heap_inuse) * goalRatio)
   128  	// Add retainExtraPercent overhead to retainedGoal. This calculation
   129  	// looks strange but the purpose is to arrive at an integer division
   130  	// (e.g. if retainExtraPercent = 12.5, then we get a divisor of 8)
   131  	// that also avoids the overflow from a multiplication.
   132  	retainedGoal += retainedGoal / (1.0 / (retainExtraPercent / 100.0))
   133  	// Align it to a physical page boundary to make the following calculations
   134  	// a bit more exact.
   135  	retainedGoal = (retainedGoal + uint64(physPageSize) - 1) &^ (uint64(physPageSize) - 1)
   136  
   137  	// Represents where we are now in the heap's contribution to RSS in bytes.
   138  	//
   139  	// Guaranteed to always be a multiple of physPageSize on systems where
   140  	// physPageSize <= pageSize since we map heap_sys at a rate larger than
   141  	// any physPageSize and released memory in multiples of the physPageSize.
   142  	//
   143  	// However, certain functions recategorize heap_sys as other stats (e.g.
   144  	// stack_sys) and this happens in multiples of pageSize, so on systems
   145  	// where physPageSize > pageSize the calculations below will not be exact.
   146  	// Generally this is OK since we'll be off by at most one regular
   147  	// physical page.
   148  	retainedNow := heapRetained()
   149  
   150  	// If we're already below our goal, or within one page of our goal, then disable
   151  	// the background scavenger. We disable the background scavenger if there's
   152  	// less than one physical page of work to do because it's not worth it.
   153  	if retainedNow <= retainedGoal || retainedNow-retainedGoal < uint64(physPageSize) {
   154  		mheap_.scavengeGoal = ^uint64(0)
   155  		return
   156  	}
   157  	mheap_.scavengeGoal = retainedGoal
   158  }
   159  
   160  // Sleep/wait state of the background scavenger.
   161  var scavenge struct {
   162  	lock       mutex
   163  	g          *g
   164  	parked     bool
   165  	timer      *timer
   166  	sysmonWake uint32 // Set atomically.
   167  }
   168  
   169  // readyForScavenger signals sysmon to wake the scavenger because
   170  // there may be new work to do.
   171  //
   172  // There may be a significant delay between when this function runs
   173  // and when the scavenger is kicked awake, but it may be safely invoked
   174  // in contexts where wakeScavenger is unsafe to call directly.
   175  func readyForScavenger() {
   176  	atomic.Store(&scavenge.sysmonWake, 1)
   177  }
   178  
   179  // wakeScavenger immediately unparks the scavenger if necessary.
   180  //
   181  // May run without a P, but it may allocate, so it must not be called
   182  // on any allocation path.
   183  //
   184  // mheap_.lock, scavenge.lock, and sched.lock must not be held.
   185  func wakeScavenger() {
   186  	lock(&scavenge.lock)
   187  	if scavenge.parked {
   188  		// Notify sysmon that it shouldn't bother waking up the scavenger.
   189  		atomic.Store(&scavenge.sysmonWake, 0)
   190  
   191  		// Try to stop the timer but we don't really care if we succeed.
   192  		// It's possible that either a timer was never started, or that
   193  		// we're racing with it.
   194  		// In the case that we're racing with there's the low chance that
   195  		// we experience a spurious wake-up of the scavenger, but that's
   196  		// totally safe.
   197  		stopTimer(scavenge.timer)
   198  
   199  		// Unpark the goroutine and tell it that there may have been a pacing
   200  		// change. Note that we skip the scheduler's runnext slot because we
   201  		// want to avoid having the scavenger interfere with the fair
   202  		// scheduling of user goroutines. In effect, this schedules the
   203  		// scavenger at a "lower priority" but that's OK because it'll
   204  		// catch up on the work it missed when it does get scheduled.
   205  		scavenge.parked = false
   206  
   207  		// Ready the goroutine by injecting it. We use injectglist instead
   208  		// of ready or goready in order to allow us to run this function
   209  		// without a P. injectglist also avoids placing the goroutine in
   210  		// the current P's runnext slot, which is desirable to prevent
   211  		// the scavenger from interfering with user goroutine scheduling
   212  		// too much.
   213  		var list gList
   214  		list.push(scavenge.g)
   215  		injectglist(&list)
   216  	}
   217  	unlock(&scavenge.lock)
   218  }
   219  
   220  // scavengeSleep attempts to put the scavenger to sleep for ns.
   221  //
   222  // Note that this function should only be called by the scavenger.
   223  //
   224  // The scavenger may be woken up earlier by a pacing change, and it may not go
   225  // to sleep at all if there's a pending pacing change.
   226  //
   227  // Returns the amount of time actually slept.
   228  func scavengeSleep(ns int64) int64 {
   229  	lock(&scavenge.lock)
   230  
   231  	// Set the timer.
   232  	//
   233  	// This must happen here instead of inside gopark
   234  	// because we can't close over any variables without
   235  	// failing escape analysis.
   236  	start := nanotime()
   237  	resetTimer(scavenge.timer, start+ns)
   238  
   239  	// Mark ourself as asleep and go to sleep.
   240  	scavenge.parked = true
   241  	goparkunlock(&scavenge.lock, waitReasonSleep, traceEvGoSleep, 2)
   242  
   243  	// Return how long we actually slept for.
   244  	return nanotime() - start
   245  }
   246  
   247  // Background scavenger.
   248  //
   249  // The background scavenger maintains the RSS of the application below
   250  // the line described by the proportional scavenging statistics in
   251  // the mheap struct.
   252  func bgscavenge() {
   253  	scavenge.g = getg()
   254  
   255  	lockInit(&scavenge.lock, lockRankScavenge)
   256  	lock(&scavenge.lock)
   257  	scavenge.parked = true
   258  
   259  	scavenge.timer = new(timer)
   260  	scavenge.timer.f = func(_ interface{}, _ uintptr) {
   261  		wakeScavenger()
   262  	}
   263  
   264  	gcenable_setup <- 1
   265  	goparkunlock(&scavenge.lock, waitReasonGCScavengeWait, traceEvGoBlock, 1)
   266  
   267  	// Exponentially-weighted moving average of the fraction of time this
   268  	// goroutine spends scavenging (that is, percent of a single CPU).
   269  	// It represents a measure of scheduling overheads which might extend
   270  	// the sleep or the critical time beyond what's expected. Assume no
   271  	// overhead to begin with.
   272  	//
   273  	// TODO(mknyszek): Consider making this based on total CPU time of the
   274  	// application (i.e. scavengePercent * GOMAXPROCS). This isn't really
   275  	// feasible now because the scavenger acquires the heap lock over the
   276  	// scavenging operation, which means scavenging effectively blocks
   277  	// allocators and isn't scalable. However, given a scalable allocator,
   278  	// it makes sense to also make the scavenger scale with it; if you're
   279  	// allocating more frequently, then presumably you're also generating
   280  	// more work for the scavenger.
   281  	const idealFraction = scavengePercent / 100.0
   282  	scavengeEWMA := float64(idealFraction)
   283  
   284  	for {
   285  		released := uintptr(0)
   286  
   287  		// Time in scavenging critical section.
   288  		crit := float64(0)
   289  
   290  		// Run on the system stack since we grab the heap lock,
   291  		// and a stack growth with the heap lock means a deadlock.
   292  		systemstack(func() {
   293  			lock(&mheap_.lock)
   294  
   295  			// If background scavenging is disabled or if there's no work to do just park.
   296  			retained, goal := heapRetained(), mheap_.scavengeGoal
   297  			if retained <= goal {
   298  				unlock(&mheap_.lock)
   299  				return
   300  			}
   301  
   302  			// Scavenge one page, and measure the amount of time spent scavenging.
   303  			start := nanotime()
   304  			released = mheap_.pages.scavenge(physPageSize, true)
   305  			mheap_.pages.scav.released += released
   306  			crit = float64(nanotime() - start)
   307  
   308  			unlock(&mheap_.lock)
   309  		})
   310  
   311  		if released == 0 {
   312  			lock(&scavenge.lock)
   313  			scavenge.parked = true
   314  			goparkunlock(&scavenge.lock, waitReasonGCScavengeWait, traceEvGoBlock, 1)
   315  			continue
   316  		}
   317  
   318  		if released < physPageSize {
   319  			// If this happens, it means that we may have attempted to release part
   320  			// of a physical page, but the likely effect of that is that it released
   321  			// the whole physical page, some of which may have still been in-use.
   322  			// This could lead to memory corruption. Throw.
   323  			throw("released less than one physical page of memory")
   324  		}
   325  
   326  		// On some platforms we may see crit as zero if the time it takes to scavenge
   327  		// memory is less than the minimum granularity of its clock (e.g. Windows).
   328  		// In this case, just assume scavenging takes 10 ┬Ás per regular physical page
   329  		// (determined empirically), and conservatively ignore the impact of huge pages
   330  		// on timing.
   331  		//
   332  		// We shouldn't ever see a crit value less than zero unless there's a bug of
   333  		// some kind, either on our side or in the platform we're running on, but be
   334  		// defensive in that case as well.
   335  		const approxCritNSPerPhysicalPage = 10e3
   336  		if crit <= 0 {
   337  			crit = approxCritNSPerPhysicalPage * float64(released/physPageSize)
   338  		}
   339  
   340  		// Multiply the critical time by 1 + the ratio of the costs of using
   341  		// scavenged memory vs. scavenging memory. This forces us to pay down
   342  		// the cost of reusing this memory eagerly by sleeping for a longer period
   343  		// of time and scavenging less frequently. More concretely, we avoid situations
   344  		// where we end up scavenging so often that we hurt allocation performance
   345  		// because of the additional overheads of using scavenged memory.
   346  		crit *= 1 + scavengeCostRatio
   347  
   348  		// If we spent more than 10 ms (for example, if the OS scheduled us away, or someone
   349  		// put their machine to sleep) in the critical section, bound the time we use to
   350  		// calculate at 10 ms to avoid letting the sleep time get arbitrarily high.
   351  		const maxCrit = 10e6
   352  		if crit > maxCrit {
   353  			crit = maxCrit
   354  		}
   355  
   356  		// Compute the amount of time to sleep, assuming we want to use at most
   357  		// scavengePercent of CPU time. Take into account scheduling overheads
   358  		// that may extend the length of our sleep by multiplying by how far
   359  		// off we are from the ideal ratio. For example, if we're sleeping too
   360  		// much, then scavengeEMWA < idealFraction, so we'll adjust the sleep time
   361  		// down.
   362  		adjust := scavengeEWMA / idealFraction
   363  		sleepTime := int64(adjust * crit / (scavengePercent / 100.0))
   364  
   365  		// Go to sleep.
   366  		slept := scavengeSleep(sleepTime)
   367  
   368  		// Compute the new ratio.
   369  		fraction := crit / (crit + float64(slept))
   370  
   371  		// Set a lower bound on the fraction.
   372  		// Due to OS-related anomalies we may "sleep" for an inordinate amount
   373  		// of time. Let's avoid letting the ratio get out of hand by bounding
   374  		// the sleep time we use in our EWMA.
   375  		const minFraction = 1.0 / 1000.0
   376  		if fraction < minFraction {
   377  			fraction = minFraction
   378  		}
   379  
   380  		// Update scavengeEWMA by merging in the new crit/slept ratio.
   381  		const alpha = 0.5
   382  		scavengeEWMA = alpha*fraction + (1-alpha)*scavengeEWMA
   383  	}
   384  }
   385  
   386  // scavenge scavenges nbytes worth of free pages, starting with the
   387  // highest address first. Successive calls continue from where it left
   388  // off until the heap is exhausted. Call scavengeStartGen to bring it
   389  // back to the top of the heap.
   390  //
   391  // Returns the amount of memory scavenged in bytes.
   392  //
   393  // p.mheapLock must be held, but may be temporarily released if
   394  // mayUnlock == true.
   395  //
   396  // Must run on the system stack because p.mheapLock must be held.
   397  //
   398  //go:systemstack
   399  func (p *pageAlloc) scavenge(nbytes uintptr, mayUnlock bool) uintptr {
   400  	assertLockHeld(p.mheapLock)
   401  
   402  	var (
   403  		addrs addrRange
   404  		gen   uint32
   405  	)
   406  	released := uintptr(0)
   407  	for released < nbytes {
   408  		if addrs.size() == 0 {
   409  			if addrs, gen = p.scavengeReserve(); addrs.size() == 0 {
   410  				break
   411  			}
   412  		}
   413  		r, a := p.scavengeOne(addrs, nbytes-released, mayUnlock)
   414  		released += r
   415  		addrs = a
   416  	}
   417  	// Only unreserve the space which hasn't been scavenged or searched
   418  	// to ensure we always make progress.
   419  	p.scavengeUnreserve(addrs, gen)
   420  	return released
   421  }
   422  
   423  // printScavTrace prints a scavenge trace line to standard error.
   424  //
   425  // released should be the amount of memory released since the last time this
   426  // was called, and forced indicates whether the scavenge was forced by the
   427  // application.
   428  func printScavTrace(gen uint32, released uintptr, forced bool) {
   429  	printlock()
   430  	print("scav ", gen, " ",
   431  		released>>10, " KiB work, ",
   432  		atomic.Load64(&memstats.heap_released)>>10, " KiB total, ",
   433  		(atomic.Load64(&memstats.heap_inuse)*100)/heapRetained(), "% util",
   434  	)
   435  	if forced {
   436  		print(" (forced)")
   437  	}
   438  	println()
   439  	printunlock()
   440  }
   441  
   442  // scavengeStartGen starts a new scavenge generation, resetting
   443  // the scavenger's search space to the full in-use address space.
   444  //
   445  // p.mheapLock must be held.
   446  //
   447  // Must run on the system stack because p.mheapLock must be held.
   448  //
   449  //go:systemstack
   450  func (p *pageAlloc) scavengeStartGen() {
   451  	assertLockHeld(p.mheapLock)
   452  
   453  	if debug.scavtrace > 0 {
   454  		printScavTrace(p.scav.gen, p.scav.released, false)
   455  	}
   456  	p.inUse.cloneInto(&p.scav.inUse)
   457  
   458  	// Pick the new starting address for the scavenger cycle.
   459  	var startAddr offAddr
   460  	if p.scav.scavLWM.lessThan(p.scav.freeHWM) {
   461  		// The "free" high watermark exceeds the "scavenged" low watermark,
   462  		// so there are free scavengable pages in parts of the address space
   463  		// that the scavenger already searched, the high watermark being the
   464  		// highest one. Pick that as our new starting point to ensure we
   465  		// see those pages.
   466  		startAddr = p.scav.freeHWM
   467  	} else {
   468  		// The "free" high watermark does not exceed the "scavenged" low
   469  		// watermark. This means the allocator didn't free any memory in
   470  		// the range we scavenged last cycle, so we might as well continue
   471  		// scavenging from where we were.
   472  		startAddr = p.scav.scavLWM
   473  	}
   474  	p.scav.inUse.removeGreaterEqual(startAddr.addr())
   475  
   476  	// reservationBytes may be zero if p.inUse.totalBytes is small, or if
   477  	// scavengeReservationShards is large. This case is fine as the scavenger
   478  	// will simply be turned off, but it does mean that scavengeReservationShards,
   479  	// in concert with pallocChunkBytes, dictates the minimum heap size at which
   480  	// the scavenger triggers. In practice this minimum is generally less than an
   481  	// arena in size, so virtually every heap has the scavenger on.
   482  	p.scav.reservationBytes = alignUp(p.inUse.totalBytes, pallocChunkBytes) / scavengeReservationShards
   483  	p.scav.gen++
   484  	p.scav.released = 0
   485  	p.scav.freeHWM = minOffAddr
   486  	p.scav.scavLWM = maxOffAddr
   487  }
   488  
   489  // scavengeReserve reserves a contiguous range of the address space
   490  // for scavenging. The maximum amount of space it reserves is proportional
   491  // to the size of the heap. The ranges are reserved from the high addresses
   492  // first.
   493  //
   494  // Returns the reserved range and the scavenge generation number for it.
   495  //
   496  // p.mheapLock must be held.
   497  //
   498  // Must run on the system stack because p.mheapLock must be held.
   499  //
   500  //go:systemstack
   501  func (p *pageAlloc) scavengeReserve() (addrRange, uint32) {
   502  	assertLockHeld(p.mheapLock)
   503  
   504  	// Start by reserving the minimum.
   505  	r := p.scav.inUse.removeLast(p.scav.reservationBytes)
   506  
   507  	// Return early if the size is zero; we don't want to use
   508  	// the bogus address below.
   509  	if r.size() == 0 {
   510  		return r, p.scav.gen
   511  	}
   512  
   513  	// The scavenger requires that base be aligned to a
   514  	// palloc chunk because that's the unit of operation for
   515  	// the scavenger, so align down, potentially extending
   516  	// the range.
   517  	newBase := alignDown(r.base.addr(), pallocChunkBytes)
   518  
   519  	// Remove from inUse however much extra we just pulled out.
   520  	p.scav.inUse.removeGreaterEqual(newBase)
   521  	r.base = offAddr{newBase}
   522  	return r, p.scav.gen
   523  }
   524  
   525  // scavengeUnreserve returns an unscavenged portion of a range that was
   526  // previously reserved with scavengeReserve.
   527  //
   528  // p.mheapLock must be held.
   529  //
   530  // Must run on the system stack because p.mheapLock must be held.
   531  //
   532  //go:systemstack
   533  func (p *pageAlloc) scavengeUnreserve(r addrRange, gen uint32) {
   534  	assertLockHeld(p.mheapLock)
   535  
   536  	if r.size() == 0 || gen != p.scav.gen {
   537  		return
   538  	}
   539  	if r.base.addr()%pallocChunkBytes != 0 {
   540  		throw("unreserving unaligned region")
   541  	}
   542  	p.scav.inUse.add(r)
   543  }
   544  
   545  // scavengeOne walks over address range work until it finds
   546  // a contiguous run of pages to scavenge. It will try to scavenge
   547  // at most max bytes at once, but may scavenge more to avoid
   548  // breaking huge pages. Once it scavenges some memory it returns
   549  // how much it scavenged in bytes.
   550  //
   551  // Returns the number of bytes scavenged and the part of work
   552  // which was not yet searched.
   553  //
   554  // work's base address must be aligned to pallocChunkBytes.
   555  //
   556  // p.mheapLock must be held, but may be temporarily released if
   557  // mayUnlock == true.
   558  //
   559  // Must run on the system stack because p.mheapLock must be held.
   560  //
   561  //go:systemstack
   562  func (p *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (uintptr, addrRange) {
   563  	assertLockHeld(p.mheapLock)
   564  
   565  	// Defensively check if we've received an empty address range.
   566  	// If so, just return.
   567  	if work.size() == 0 {
   568  		// Nothing to do.
   569  		return 0, work
   570  	}
   571  	// Check the prerequisites of work.
   572  	if work.base.addr()%pallocChunkBytes != 0 {
   573  		throw("scavengeOne called with unaligned work region")
   574  	}
   575  	// Calculate the maximum number of pages to scavenge.
   576  	//
   577  	// This should be alignUp(max, pageSize) / pageSize but max can and will
   578  	// be ^uintptr(0), so we need to be very careful not to overflow here.
   579  	// Rather than use alignUp, calculate the number of pages rounded down
   580  	// first, then add back one if necessary.
   581  	maxPages := max / pageSize
   582  	if max%pageSize != 0 {
   583  		maxPages++
   584  	}
   585  
   586  	// Calculate the minimum number of pages we can scavenge.
   587  	//
   588  	// Because we can only scavenge whole physical pages, we must
   589  	// ensure that we scavenge at least minPages each time, aligned
   590  	// to minPages*pageSize.
   591  	minPages := physPageSize / pageSize
   592  	if minPages < 1 {
   593  		minPages = 1
   594  	}
   595  
   596  	// Helpers for locking and unlocking only if mayUnlock == true.
   597  	lockHeap := func() {
   598  		if mayUnlock {
   599  			lock(p.mheapLock)
   600  		}
   601  	}
   602  	unlockHeap := func() {
   603  		if mayUnlock {
   604  			unlock(p.mheapLock)
   605  		}
   606  	}
   607  
   608  	// Fast path: check the chunk containing the top-most address in work,
   609  	// starting at that address's page index in the chunk.
   610  	//
   611  	// Note that work.end() is exclusive, so get the chunk we care about
   612  	// by subtracting 1.
   613  	maxAddr := work.limit.addr() - 1
   614  	maxChunk := chunkIndex(maxAddr)
   615  	if p.summary[len(p.summary)-1][maxChunk].max() >= uint(minPages) {
   616  		// We only bother looking for a candidate if there at least
   617  		// minPages free pages at all.
   618  		base, npages := p.chunkOf(maxChunk).findScavengeCandidate(chunkPageIndex(maxAddr), minPages, maxPages)
   619  
   620  		// If we found something, scavenge it and return!
   621  		if npages != 0 {
   622  			work.limit = offAddr{p.scavengeRangeLocked(maxChunk, base, npages)}
   623  
   624  			assertLockHeld(p.mheapLock) // Must be locked on return.
   625  			return uintptr(npages) * pageSize, work
   626  		}
   627  	}
   628  	// Update the limit to reflect the fact that we checked maxChunk already.
   629  	work.limit = offAddr{chunkBase(maxChunk)}
   630  
   631  	// findCandidate finds the next scavenge candidate in work optimistically.
   632  	//
   633  	// Returns the candidate chunk index and true on success, and false on failure.
   634  	//
   635  	// The heap need not be locked.
   636  	findCandidate := func(work addrRange) (chunkIdx, bool) {
   637  		// Iterate over this work's chunks.
   638  		for i := chunkIndex(work.limit.addr() - 1); i >= chunkIndex(work.base.addr()); i-- {
   639  			// If this chunk is totally in-use or has no unscavenged pages, don't bother
   640  			// doing a more sophisticated check.
   641  			//
   642  			// Note we're accessing the summary and the chunks without a lock, but
   643  			// that's fine. We're being optimistic anyway.
   644  
   645  			// Check quickly if there are enough free pages at all.
   646  			if p.summary[len(p.summary)-1][i].max() < uint(minPages) {
   647  				continue
   648  			}
   649  
   650  			// Run over the chunk looking harder for a candidate. Again, we could
   651  			// race with a lot of different pieces of code, but we're just being
   652  			// optimistic. Make sure we load the l2 pointer atomically though, to
   653  			// avoid races with heap growth. It may or may not be possible to also
   654  			// see a nil pointer in this case if we do race with heap growth, but
   655  			// just defensively ignore the nils. This operation is optimistic anyway.
   656  			l2 := (*[1 << pallocChunksL2Bits]pallocData)(atomic.Loadp(unsafe.Pointer(&p.chunks[i.l1()])))
   657  			if l2 != nil && l2[i.l2()].hasScavengeCandidate(minPages) {
   658  				return i, true
   659  			}
   660  		}
   661  		return 0, false
   662  	}
   663  
   664  	// Slow path: iterate optimistically over the in-use address space
   665  	// looking for any free and unscavenged page. If we think we see something,
   666  	// lock and verify it!
   667  	for work.size() != 0 {
   668  		unlockHeap()
   669  
   670  		// Search for the candidate.
   671  		candidateChunkIdx, ok := findCandidate(work)
   672  
   673  		// Lock the heap. We need to do this now if we found a candidate or not.
   674  		// If we did, we'll verify it. If not, we need to lock before returning
   675  		// anyway.
   676  		lockHeap()
   677  
   678  		if !ok {
   679  			// We didn't find a candidate, so we're done.
   680  			work.limit = work.base
   681  			break
   682  		}
   683  
   684  		// Find, verify, and scavenge if we can.
   685  		chunk := p.chunkOf(candidateChunkIdx)
   686  		base, npages := chunk.findScavengeCandidate(pallocChunkPages-1, minPages, maxPages)
   687  		if npages > 0 {
   688  			work.limit = offAddr{p.scavengeRangeLocked(candidateChunkIdx, base, npages)}
   689  
   690  			assertLockHeld(p.mheapLock) // Must be locked on return.
   691  			return uintptr(npages) * pageSize, work
   692  		}
   693  
   694  		// We were fooled, so let's continue from where we left off.
   695  		work.limit = offAddr{chunkBase(candidateChunkIdx)}
   696  	}
   697  
   698  	assertLockHeld(p.mheapLock) // Must be locked on return.
   699  	return 0, work
   700  }
   701  
   702  // scavengeRangeLocked scavenges the given region of memory.
   703  // The region of memory is described by its chunk index (ci),
   704  // the starting page index of the region relative to that
   705  // chunk (base), and the length of the region in pages (npages).
   706  //
   707  // Returns the base address of the scavenged region.
   708  //
   709  // p.mheapLock must be held.
   710  func (p *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) uintptr {
   711  	assertLockHeld(p.mheapLock)
   712  
   713  	p.chunkOf(ci).scavenged.setRange(base, npages)
   714  
   715  	// Compute the full address for the start of the range.
   716  	addr := chunkBase(ci) + uintptr(base)*pageSize
   717  
   718  	// Update the scavenge low watermark.
   719  	if oAddr := (offAddr{addr}); oAddr.lessThan(p.scav.scavLWM) {
   720  		p.scav.scavLWM = oAddr
   721  	}
   722  
   723  	// Only perform the actual scavenging if we're not in a test.
   724  	// It's dangerous to do so otherwise.
   725  	if p.test {
   726  		return addr
   727  	}
   728  	sysUnused(unsafe.Pointer(addr), uintptr(npages)*pageSize)
   729  
   730  	// Update global accounting only when not in test, otherwise
   731  	// the runtime's accounting will be wrong.
   732  	nbytes := int64(npages) * pageSize
   733  	atomic.Xadd64(&memstats.heap_released, nbytes)
   734  
   735  	// Update consistent accounting too.
   736  	stats := memstats.heapStats.acquire()
   737  	atomic.Xaddint64(&stats.committed, -nbytes)
   738  	atomic.Xaddint64(&stats.released, nbytes)
   739  	memstats.heapStats.release()
   740  
   741  	return addr
   742  }
   743  
   744  // fillAligned returns x but with all zeroes in m-aligned
   745  // groups of m bits set to 1 if any bit in the group is non-zero.
   746  //
   747  // For example, fillAligned(0x0100a3, 8) == 0xff00ff.
   748  //
   749  // Note that if m == 1, this is a no-op.
   750  //
   751  // m must be a power of 2 <= maxPagesPerPhysPage.
   752  func fillAligned(x uint64, m uint) uint64 {
   753  	apply := func(x uint64, c uint64) uint64 {
   754  		// The technique used it here is derived from
   755  		// https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
   756  		// and extended for more than just bytes (like nibbles
   757  		// and uint16s) by using an appropriate constant.
   758  		//
   759  		// To summarize the technique, quoting from that page:
   760  		// "[It] works by first zeroing the high bits of the [8]
   761  		// bytes in the word. Subsequently, it adds a number that
   762  		// will result in an overflow to the high bit of a byte if
   763  		// any of the low bits were initially set. Next the high
   764  		// bits of the original word are ORed with these values;
   765  		// thus, the high bit of a byte is set iff any bit in the
   766  		// byte was set. Finally, we determine if any of these high
   767  		// bits are zero by ORing with ones everywhere except the
   768  		// high bits and inverting the result."
   769  		return ^((((x & c) + c) | x) | c)
   770  	}
   771  	// Transform x to contain a 1 bit at the top of each m-aligned
   772  	// group of m zero bits.
   773  	switch m {
   774  	case 1:
   775  		return x
   776  	case 2:
   777  		x = apply(x, 0x5555555555555555)
   778  	case 4:
   779  		x = apply(x, 0x7777777777777777)
   780  	case 8:
   781  		x = apply(x, 0x7f7f7f7f7f7f7f7f)
   782  	case 16:
   783  		x = apply(x, 0x7fff7fff7fff7fff)
   784  	case 32:
   785  		x = apply(x, 0x7fffffff7fffffff)
   786  	case 64: // == maxPagesPerPhysPage
   787  		x = apply(x, 0x7fffffffffffffff)
   788  	default:
   789  		throw("bad m value")
   790  	}
   791  	// Now, the top bit of each m-aligned group in x is set
   792  	// that group was all zero in the original x.
   793  
   794  	// From each group of m bits subtract 1.
   795  	// Because we know only the top bits of each
   796  	// m-aligned group are set, we know this will
   797  	// set each group to have all the bits set except
   798  	// the top bit, so just OR with the original
   799  	// result to set all the bits.
   800  	return ^((x - (x >> (m - 1))) | x)
   801  }
   802  
   803  // hasScavengeCandidate returns true if there's any min-page-aligned groups of
   804  // min pages of free-and-unscavenged memory in the region represented by this
   805  // pallocData.
   806  //
   807  // min must be a non-zero power of 2 <= maxPagesPerPhysPage.
   808  func (m *pallocData) hasScavengeCandidate(min uintptr) bool {
   809  	if min&(min-1) != 0 || min == 0 {
   810  		print("runtime: min = ", min, "\n")
   811  		throw("min must be a non-zero power of 2")
   812  	} else if min > maxPagesPerPhysPage {
   813  		print("runtime: min = ", min, "\n")
   814  		throw("min too large")
   815  	}
   816  
   817  	// The goal of this search is to see if the chunk contains any free and unscavenged memory.
   818  	for i := len(m.scavenged) - 1; i >= 0; i-- {
   819  		// 1s are scavenged OR non-free => 0s are unscavenged AND free
   820  		//
   821  		// TODO(mknyszek): Consider splitting up fillAligned into two
   822  		// functions, since here we technically could get by with just
   823  		// the first half of its computation. It'll save a few instructions
   824  		// but adds some additional code complexity.
   825  		x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(min))
   826  
   827  		// Quickly skip over chunks of non-free or scavenged pages.
   828  		if x != ^uint64(0) {
   829  			return true
   830  		}
   831  	}
   832  	return false
   833  }
   834  
   835  // findScavengeCandidate returns a start index and a size for this pallocData
   836  // segment which represents a contiguous region of free and unscavenged memory.
   837  //
   838  // searchIdx indicates the page index within this chunk to start the search, but
   839  // note that findScavengeCandidate searches backwards through the pallocData. As a
   840  // a result, it will return the highest scavenge candidate in address order.
   841  //
   842  // min indicates a hard minimum size and alignment for runs of pages. That is,
   843  // findScavengeCandidate will not return a region smaller than min pages in size,
   844  // or that is min pages or greater in size but not aligned to min. min must be
   845  // a non-zero power of 2 <= maxPagesPerPhysPage.
   846  //
   847  // max is a hint for how big of a region is desired. If max >= pallocChunkPages, then
   848  // findScavengeCandidate effectively returns entire free and unscavenged regions.
   849  // If max < pallocChunkPages, it may truncate the returned region such that size is
   850  // max. However, findScavengeCandidate may still return a larger region if, for
   851  // example, it chooses to preserve huge pages, or if max is not aligned to min (it
   852  // will round up). That is, even if max is small, the returned size is not guaranteed
   853  // to be equal to max. max is allowed to be less than min, in which case it is as if
   854  // max == min.
   855  func (m *pallocData) findScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
   856  	if min&(min-1) != 0 || min == 0 {
   857  		print("runtime: min = ", min, "\n")
   858  		throw("min must be a non-zero power of 2")
   859  	} else if min > maxPagesPerPhysPage {
   860  		print("runtime: min = ", min, "\n")
   861  		throw("min too large")
   862  	}
   863  	// max may not be min-aligned, so we might accidentally truncate to
   864  	// a max value which causes us to return a non-min-aligned value.
   865  	// To prevent this, align max up to a multiple of min (which is always
   866  	// a power of 2). This also prevents max from ever being less than
   867  	// min, unless it's zero, so handle that explicitly.
   868  	if max == 0 {
   869  		max = min
   870  	} else {
   871  		max = alignUp(max, min)
   872  	}
   873  
   874  	i := int(searchIdx / 64)
   875  	// Start by quickly skipping over blocks of non-free or scavenged pages.
   876  	for ; i >= 0; i-- {
   877  		// 1s are scavenged OR non-free => 0s are unscavenged AND free
   878  		x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(min))
   879  		if x != ^uint64(0) {
   880  			break
   881  		}
   882  	}
   883  	if i < 0 {
   884  		// Failed to find any free/unscavenged pages.
   885  		return 0, 0
   886  	}
   887  	// We have something in the 64-bit chunk at i, but it could
   888  	// extend further. Loop until we find the extent of it.
   889  
   890  	// 1s are scavenged OR non-free => 0s are unscavenged AND free
   891  	x := fillAligned(m.scavenged[i]|m.pallocBits[i], uint(min))
   892  	z1 := uint(sys.LeadingZeros64(^x))
   893  	run, end := uint(0), uint(i)*64+(64-z1)
   894  	if x<<z1 != 0 {
   895  		// After shifting out z1 bits, we still have 1s,
   896  		// so the run ends inside this word.
   897  		run = uint(sys.LeadingZeros64(x << z1))
   898  	} else {
   899  		// After shifting out z1 bits, we have no more 1s.
   900  		// This means the run extends to the bottom of the
   901  		// word so it may extend into further words.
   902  		run = 64 - z1
   903  		for j := i - 1; j >= 0; j-- {
   904  			x := fillAligned(m.scavenged[j]|m.pallocBits[j], uint(min))
   905  			run += uint(sys.LeadingZeros64(x))
   906  			if x != 0 {
   907  				// The run stopped in this word.
   908  				break
   909  			}
   910  		}
   911  	}
   912  
   913  	// Split the run we found if it's larger than max but hold on to
   914  	// our original length, since we may need it later.
   915  	size := run
   916  	if size > uint(max) {
   917  		size = uint(max)
   918  	}
   919  	start := end - size
   920  
   921  	// Each huge page is guaranteed to fit in a single palloc chunk.
   922  	//
   923  	// TODO(mknyszek): Support larger huge page sizes.
   924  	// TODO(mknyszek): Consider taking pages-per-huge-page as a parameter
   925  	// so we can write tests for this.
   926  	if physHugePageSize > pageSize && physHugePageSize > physPageSize {
   927  		// We have huge pages, so let's ensure we don't break one by scavenging
   928  		// over a huge page boundary. If the range [start, start+size) overlaps with
   929  		// a free-and-unscavenged huge page, we want to grow the region we scavenge
   930  		// to include that huge page.
   931  
   932  		// Compute the huge page boundary above our candidate.
   933  		pagesPerHugePage := uintptr(physHugePageSize / pageSize)
   934  		hugePageAbove := uint(alignUp(uintptr(start), pagesPerHugePage))
   935  
   936  		// If that boundary is within our current candidate, then we may be breaking
   937  		// a huge page.
   938  		if hugePageAbove <= end {
   939  			// Compute the huge page boundary below our candidate.
   940  			hugePageBelow := uint(alignDown(uintptr(start), pagesPerHugePage))
   941  
   942  			if hugePageBelow >= end-run {
   943  				// We're in danger of breaking apart a huge page since start+size crosses
   944  				// a huge page boundary and rounding down start to the nearest huge
   945  				// page boundary is included in the full run we found. Include the entire
   946  				// huge page in the bound by rounding down to the huge page size.
   947  				size = size + (start - hugePageBelow)
   948  				start = hugePageBelow
   949  			}
   950  		}
   951  	}
   952  	return start, size
   953  }
   954  

View as plain text