Source file src/runtime/mstats.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Memory statistics
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"unsafe"
    12  )
    13  
    14  type mstats struct {
    15  	// Statistics about malloc heap.
    16  	heapStats consistentHeapStats
    17  
    18  	// Statistics about stacks.
    19  	stacks_sys sysMemStat // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
    20  
    21  	// Statistics about allocation of low-level fixed-size structures.
    22  	mspan_sys    sysMemStat
    23  	mcache_sys   sysMemStat
    24  	buckhash_sys sysMemStat // profiling bucket hash table
    25  
    26  	// Statistics about GC overhead.
    27  	gcMiscSys sysMemStat // updated atomically or during STW
    28  
    29  	// Miscellaneous statistics.
    30  	other_sys sysMemStat // updated atomically or during STW
    31  
    32  	// Statistics about the garbage collector.
    33  
    34  	// Protected by mheap or stopping the world during GC.
    35  	last_gc_unix    uint64 // last gc (in unix time)
    36  	pause_total_ns  uint64
    37  	pause_ns        [256]uint64 // circular buffer of recent gc pause lengths
    38  	pause_end       [256]uint64 // circular buffer of recent gc end times (nanoseconds since 1970)
    39  	numgc           uint32
    40  	numforcedgc     uint32  // number of user-forced GCs
    41  	gc_cpu_fraction float64 // fraction of CPU time used by GC
    42  
    43  	last_gc_nanotime uint64 // last gc (monotonic time)
    44  	lastHeapInUse    uint64 // heapInUse at mark termination of the previous GC
    45  
    46  	enablegc bool
    47  
    48  	// gcPauseDist represents the distribution of all GC-related
    49  	// application pauses in the runtime.
    50  	//
    51  	// Each individual pause is counted separately, unlike pause_ns.
    52  	gcPauseDist timeHistogram
    53  }
    54  
    55  var memstats mstats
    56  
    57  // A MemStats records statistics about the memory allocator.
    58  type MemStats struct {
    59  	// General statistics.
    60  
    61  	// Alloc is bytes of allocated heap objects.
    62  	//
    63  	// This is the same as HeapAlloc (see below).
    64  	Alloc uint64
    65  
    66  	// TotalAlloc is cumulative bytes allocated for heap objects.
    67  	//
    68  	// TotalAlloc increases as heap objects are allocated, but
    69  	// unlike Alloc and HeapAlloc, it does not decrease when
    70  	// objects are freed.
    71  	TotalAlloc uint64
    72  
    73  	// Sys is the total bytes of memory obtained from the OS.
    74  	//
    75  	// Sys is the sum of the XSys fields below. Sys measures the
    76  	// virtual address space reserved by the Go runtime for the
    77  	// heap, stacks, and other internal data structures. It's
    78  	// likely that not all of the virtual address space is backed
    79  	// by physical memory at any given moment, though in general
    80  	// it all was at some point.
    81  	Sys uint64
    82  
    83  	// Lookups is the number of pointer lookups performed by the
    84  	// runtime.
    85  	//
    86  	// This is primarily useful for debugging runtime internals.
    87  	Lookups uint64
    88  
    89  	// Mallocs is the cumulative count of heap objects allocated.
    90  	// The number of live objects is Mallocs - Frees.
    91  	Mallocs uint64
    92  
    93  	// Frees is the cumulative count of heap objects freed.
    94  	Frees uint64
    95  
    96  	// Heap memory statistics.
    97  	//
    98  	// Interpreting the heap statistics requires some knowledge of
    99  	// how Go organizes memory. Go divides the virtual address
   100  	// space of the heap into "spans", which are contiguous
   101  	// regions of memory 8K or larger. A span may be in one of
   102  	// three states:
   103  	//
   104  	// An "idle" span contains no objects or other data. The
   105  	// physical memory backing an idle span can be released back
   106  	// to the OS (but the virtual address space never is), or it
   107  	// can be converted into an "in use" or "stack" span.
   108  	//
   109  	// An "in use" span contains at least one heap object and may
   110  	// have free space available to allocate more heap objects.
   111  	//
   112  	// A "stack" span is used for goroutine stacks. Stack spans
   113  	// are not considered part of the heap. A span can change
   114  	// between heap and stack memory; it is never used for both
   115  	// simultaneously.
   116  
   117  	// HeapAlloc is bytes of allocated heap objects.
   118  	//
   119  	// "Allocated" heap objects include all reachable objects, as
   120  	// well as unreachable objects that the garbage collector has
   121  	// not yet freed. Specifically, HeapAlloc increases as heap
   122  	// objects are allocated and decreases as the heap is swept
   123  	// and unreachable objects are freed. Sweeping occurs
   124  	// incrementally between GC cycles, so these two processes
   125  	// occur simultaneously, and as a result HeapAlloc tends to
   126  	// change smoothly (in contrast with the sawtooth that is
   127  	// typical of stop-the-world garbage collectors).
   128  	HeapAlloc uint64
   129  
   130  	// HeapSys is bytes of heap memory obtained from the OS.
   131  	//
   132  	// HeapSys measures the amount of virtual address space
   133  	// reserved for the heap. This includes virtual address space
   134  	// that has been reserved but not yet used, which consumes no
   135  	// physical memory, but tends to be small, as well as virtual
   136  	// address space for which the physical memory has been
   137  	// returned to the OS after it became unused (see HeapReleased
   138  	// for a measure of the latter).
   139  	//
   140  	// HeapSys estimates the largest size the heap has had.
   141  	HeapSys uint64
   142  
   143  	// HeapIdle is bytes in idle (unused) spans.
   144  	//
   145  	// Idle spans have no objects in them. These spans could be
   146  	// (and may already have been) returned to the OS, or they can
   147  	// be reused for heap allocations, or they can be reused as
   148  	// stack memory.
   149  	//
   150  	// HeapIdle minus HeapReleased estimates the amount of memory
   151  	// that could be returned to the OS, but is being retained by
   152  	// the runtime so it can grow the heap without requesting more
   153  	// memory from the OS. If this difference is significantly
   154  	// larger than the heap size, it indicates there was a recent
   155  	// transient spike in live heap size.
   156  	HeapIdle uint64
   157  
   158  	// HeapInuse is bytes in in-use spans.
   159  	//
   160  	// In-use spans have at least one object in them. These spans
   161  	// can only be used for other objects of roughly the same
   162  	// size.
   163  	//
   164  	// HeapInuse minus HeapAlloc estimates the amount of memory
   165  	// that has been dedicated to particular size classes, but is
   166  	// not currently being used. This is an upper bound on
   167  	// fragmentation, but in general this memory can be reused
   168  	// efficiently.
   169  	HeapInuse uint64
   170  
   171  	// HeapReleased is bytes of physical memory returned to the OS.
   172  	//
   173  	// This counts heap memory from idle spans that was returned
   174  	// to the OS and has not yet been reacquired for the heap.
   175  	HeapReleased uint64
   176  
   177  	// HeapObjects is the number of allocated heap objects.
   178  	//
   179  	// Like HeapAlloc, this increases as objects are allocated and
   180  	// decreases as the heap is swept and unreachable objects are
   181  	// freed.
   182  	HeapObjects uint64
   183  
   184  	// Stack memory statistics.
   185  	//
   186  	// Stacks are not considered part of the heap, but the runtime
   187  	// can reuse a span of heap memory for stack memory, and
   188  	// vice-versa.
   189  
   190  	// StackInuse is bytes in stack spans.
   191  	//
   192  	// In-use stack spans have at least one stack in them. These
   193  	// spans can only be used for other stacks of the same size.
   194  	//
   195  	// There is no StackIdle because unused stack spans are
   196  	// returned to the heap (and hence counted toward HeapIdle).
   197  	StackInuse uint64
   198  
   199  	// StackSys is bytes of stack memory obtained from the OS.
   200  	//
   201  	// StackSys is StackInuse, plus any memory obtained directly
   202  	// from the OS for OS thread stacks.
   203  	//
   204  	// In non-cgo programs this metric is currently equal to StackInuse
   205  	// (but this should not be relied upon, and the value may change in
   206  	// the future).
   207  	//
   208  	// In cgo programs this metric includes OS thread stacks allocated
   209  	// directly from the OS. Currently, this only accounts for one stack in
   210  	// c-shared and c-archive build modes and other sources of stacks from
   211  	// the OS (notably, any allocated by C code) are not currently measured.
   212  	// Note this too may change in the future.
   213  	StackSys uint64
   214  
   215  	// Off-heap memory statistics.
   216  	//
   217  	// The following statistics measure runtime-internal
   218  	// structures that are not allocated from heap memory (usually
   219  	// because they are part of implementing the heap). Unlike
   220  	// heap or stack memory, any memory allocated to these
   221  	// structures is dedicated to these structures.
   222  	//
   223  	// These are primarily useful for debugging runtime memory
   224  	// overheads.
   225  
   226  	// MSpanInuse is bytes of allocated mspan structures.
   227  	MSpanInuse uint64
   228  
   229  	// MSpanSys is bytes of memory obtained from the OS for mspan
   230  	// structures.
   231  	MSpanSys uint64
   232  
   233  	// MCacheInuse is bytes of allocated mcache structures.
   234  	MCacheInuse uint64
   235  
   236  	// MCacheSys is bytes of memory obtained from the OS for
   237  	// mcache structures.
   238  	MCacheSys uint64
   239  
   240  	// BuckHashSys is bytes of memory in profiling bucket hash tables.
   241  	BuckHashSys uint64
   242  
   243  	// GCSys is bytes of memory in garbage collection metadata.
   244  	GCSys uint64
   245  
   246  	// OtherSys is bytes of memory in miscellaneous off-heap
   247  	// runtime allocations.
   248  	OtherSys uint64
   249  
   250  	// Garbage collector statistics.
   251  
   252  	// NextGC is the target heap size of the next GC cycle.
   253  	//
   254  	// The garbage collector's goal is to keep HeapAlloc ≤ NextGC.
   255  	// At the end of each GC cycle, the target for the next cycle
   256  	// is computed based on the amount of reachable data and the
   257  	// value of GOGC.
   258  	NextGC uint64
   259  
   260  	// LastGC is the time the last garbage collection finished, as
   261  	// nanoseconds since 1970 (the UNIX epoch).
   262  	LastGC uint64
   263  
   264  	// PauseTotalNs is the cumulative nanoseconds in GC
   265  	// stop-the-world pauses since the program started.
   266  	//
   267  	// During a stop-the-world pause, all goroutines are paused
   268  	// and only the garbage collector can run.
   269  	PauseTotalNs uint64
   270  
   271  	// PauseNs is a circular buffer of recent GC stop-the-world
   272  	// pause times in nanoseconds.
   273  	//
   274  	// The most recent pause is at PauseNs[(NumGC+255)%256]. In
   275  	// general, PauseNs[N%256] records the time paused in the most
   276  	// recent N%256th GC cycle. There may be multiple pauses per
   277  	// GC cycle; this is the sum of all pauses during a cycle.
   278  	PauseNs [256]uint64
   279  
   280  	// PauseEnd is a circular buffer of recent GC pause end times,
   281  	// as nanoseconds since 1970 (the UNIX epoch).
   282  	//
   283  	// This buffer is filled the same way as PauseNs. There may be
   284  	// multiple pauses per GC cycle; this records the end of the
   285  	// last pause in a cycle.
   286  	PauseEnd [256]uint64
   287  
   288  	// NumGC is the number of completed GC cycles.
   289  	NumGC uint32
   290  
   291  	// NumForcedGC is the number of GC cycles that were forced by
   292  	// the application calling the GC function.
   293  	NumForcedGC uint32
   294  
   295  	// GCCPUFraction is the fraction of this program's available
   296  	// CPU time used by the GC since the program started.
   297  	//
   298  	// GCCPUFraction is expressed as a number between 0 and 1,
   299  	// where 0 means GC has consumed none of this program's CPU. A
   300  	// program's available CPU time is defined as the integral of
   301  	// GOMAXPROCS since the program started. That is, if
   302  	// GOMAXPROCS is 2 and a program has been running for 10
   303  	// seconds, its "available CPU" is 20 seconds. GCCPUFraction
   304  	// does not include CPU time used for write barrier activity.
   305  	//
   306  	// This is the same as the fraction of CPU reported by
   307  	// GODEBUG=gctrace=1.
   308  	GCCPUFraction float64
   309  
   310  	// EnableGC indicates that GC is enabled. It is always true,
   311  	// even if GOGC=off.
   312  	EnableGC bool
   313  
   314  	// DebugGC is currently unused.
   315  	DebugGC bool
   316  
   317  	// BySize reports per-size class allocation statistics.
   318  	//
   319  	// BySize[N] gives statistics for allocations of size S where
   320  	// BySize[N-1].Size < S ≤ BySize[N].Size.
   321  	//
   322  	// This does not report allocations larger than BySize[60].Size.
   323  	BySize [61]struct {
   324  		// Size is the maximum byte size of an object in this
   325  		// size class.
   326  		Size uint32
   327  
   328  		// Mallocs is the cumulative count of heap objects
   329  		// allocated in this size class. The cumulative bytes
   330  		// of allocation is Size*Mallocs. The number of live
   331  		// objects in this size class is Mallocs - Frees.
   332  		Mallocs uint64
   333  
   334  		// Frees is the cumulative count of heap objects freed
   335  		// in this size class.
   336  		Frees uint64
   337  	}
   338  }
   339  
   340  func init() {
   341  	if offset := unsafe.Offsetof(memstats.heapStats); offset%8 != 0 {
   342  		println(offset)
   343  		throw("memstats.heapStats not aligned to 8 bytes")
   344  	}
   345  	// Ensure the size of heapStatsDelta causes adjacent fields/slots (e.g.
   346  	// [3]heapStatsDelta) to be 8-byte aligned.
   347  	if size := unsafe.Sizeof(heapStatsDelta{}); size%8 != 0 {
   348  		println(size)
   349  		throw("heapStatsDelta not a multiple of 8 bytes in size")
   350  	}
   351  }
   352  
   353  // ReadMemStats populates m with memory allocator statistics.
   354  //
   355  // The returned memory allocator statistics are up to date as of the
   356  // call to ReadMemStats. This is in contrast with a heap profile,
   357  // which is a snapshot as of the most recently completed garbage
   358  // collection cycle.
   359  func ReadMemStats(m *MemStats) {
   360  	_ = m.Alloc // nil check test before we switch stacks, see issue 61158
   361  	stopTheWorld(stwReadMemStats)
   362  
   363  	systemstack(func() {
   364  		readmemstats_m(m)
   365  	})
   366  
   367  	startTheWorld()
   368  }
   369  
   370  // readmemstats_m populates stats for internal runtime values.
   371  //
   372  // The world must be stopped.
   373  func readmemstats_m(stats *MemStats) {
   374  	assertWorldStopped()
   375  
   376  	// Flush mcaches to mcentral before doing anything else.
   377  	//
   378  	// Flushing to the mcentral may in general cause stats to
   379  	// change as mcentral data structures are manipulated.
   380  	systemstack(flushallmcaches)
   381  
   382  	// Calculate memory allocator stats.
   383  	// During program execution we only count number of frees and amount of freed memory.
   384  	// Current number of alive objects in the heap and amount of alive heap memory
   385  	// are calculated by scanning all spans.
   386  	// Total number of mallocs is calculated as number of frees plus number of alive objects.
   387  	// Similarly, total amount of allocated memory is calculated as amount of freed memory
   388  	// plus amount of alive heap memory.
   389  
   390  	// Collect consistent stats, which are the source-of-truth in some cases.
   391  	var consStats heapStatsDelta
   392  	memstats.heapStats.unsafeRead(&consStats)
   393  
   394  	// Collect large allocation stats.
   395  	totalAlloc := consStats.largeAlloc
   396  	nMalloc := consStats.largeAllocCount
   397  	totalFree := consStats.largeFree
   398  	nFree := consStats.largeFreeCount
   399  
   400  	// Collect per-sizeclass stats.
   401  	var bySize [_NumSizeClasses]struct {
   402  		Size    uint32
   403  		Mallocs uint64
   404  		Frees   uint64
   405  	}
   406  	for i := range bySize {
   407  		bySize[i].Size = uint32(class_to_size[i])
   408  
   409  		// Malloc stats.
   410  		a := consStats.smallAllocCount[i]
   411  		totalAlloc += a * uint64(class_to_size[i])
   412  		nMalloc += a
   413  		bySize[i].Mallocs = a
   414  
   415  		// Free stats.
   416  		f := consStats.smallFreeCount[i]
   417  		totalFree += f * uint64(class_to_size[i])
   418  		nFree += f
   419  		bySize[i].Frees = f
   420  	}
   421  
   422  	// Account for tiny allocations.
   423  	// For historical reasons, MemStats includes tiny allocations
   424  	// in both the total free and total alloc count. This double-counts
   425  	// memory in some sense because their tiny allocation block is also
   426  	// counted. Tracking the lifetime of individual tiny allocations is
   427  	// currently not done because it would be too expensive.
   428  	nFree += consStats.tinyAllocCount
   429  	nMalloc += consStats.tinyAllocCount
   430  
   431  	// Calculate derived stats.
   432  
   433  	stackInUse := uint64(consStats.inStacks)
   434  	gcWorkBufInUse := uint64(consStats.inWorkBufs)
   435  	gcProgPtrScalarBitsInUse := uint64(consStats.inPtrScalarBits)
   436  
   437  	totalMapped := gcController.heapInUse.load() + gcController.heapFree.load() + gcController.heapReleased.load() +
   438  		memstats.stacks_sys.load() + memstats.mspan_sys.load() + memstats.mcache_sys.load() +
   439  		memstats.buckhash_sys.load() + memstats.gcMiscSys.load() + memstats.other_sys.load() +
   440  		stackInUse + gcWorkBufInUse + gcProgPtrScalarBitsInUse
   441  
   442  	heapGoal := gcController.heapGoal()
   443  
   444  	// The world is stopped, so the consistent stats (after aggregation)
   445  	// should be identical to some combination of memstats. In particular:
   446  	//
   447  	// * memstats.heapInUse == inHeap
   448  	// * memstats.heapReleased == released
   449  	// * memstats.heapInUse + memstats.heapFree == committed - inStacks - inWorkBufs - inPtrScalarBits
   450  	// * memstats.totalAlloc == totalAlloc
   451  	// * memstats.totalFree == totalFree
   452  	//
   453  	// Check if that's actually true.
   454  	//
   455  	// TODO(mknyszek): Maybe don't throw here. It would be bad if a
   456  	// bug in otherwise benign accounting caused the whole application
   457  	// to crash.
   458  	if gcController.heapInUse.load() != uint64(consStats.inHeap) {
   459  		print("runtime: heapInUse=", gcController.heapInUse.load(), "\n")
   460  		print("runtime: consistent value=", consStats.inHeap, "\n")
   461  		throw("heapInUse and consistent stats are not equal")
   462  	}
   463  	if gcController.heapReleased.load() != uint64(consStats.released) {
   464  		print("runtime: heapReleased=", gcController.heapReleased.load(), "\n")
   465  		print("runtime: consistent value=", consStats.released, "\n")
   466  		throw("heapReleased and consistent stats are not equal")
   467  	}
   468  	heapRetained := gcController.heapInUse.load() + gcController.heapFree.load()
   469  	consRetained := uint64(consStats.committed - consStats.inStacks - consStats.inWorkBufs - consStats.inPtrScalarBits)
   470  	if heapRetained != consRetained {
   471  		print("runtime: global value=", heapRetained, "\n")
   472  		print("runtime: consistent value=", consRetained, "\n")
   473  		throw("measures of the retained heap are not equal")
   474  	}
   475  	if gcController.totalAlloc.Load() != totalAlloc {
   476  		print("runtime: totalAlloc=", gcController.totalAlloc.Load(), "\n")
   477  		print("runtime: consistent value=", totalAlloc, "\n")
   478  		throw("totalAlloc and consistent stats are not equal")
   479  	}
   480  	if gcController.totalFree.Load() != totalFree {
   481  		print("runtime: totalFree=", gcController.totalFree.Load(), "\n")
   482  		print("runtime: consistent value=", totalFree, "\n")
   483  		throw("totalFree and consistent stats are not equal")
   484  	}
   485  	// Also check that mappedReady lines up with totalMapped - released.
   486  	// This isn't really the same type of "make sure consistent stats line up" situation,
   487  	// but this is an opportune time to check.
   488  	if gcController.mappedReady.Load() != totalMapped-uint64(consStats.released) {
   489  		print("runtime: mappedReady=", gcController.mappedReady.Load(), "\n")
   490  		print("runtime: totalMapped=", totalMapped, "\n")
   491  		print("runtime: released=", uint64(consStats.released), "\n")
   492  		print("runtime: totalMapped-released=", totalMapped-uint64(consStats.released), "\n")
   493  		throw("mappedReady and other memstats are not equal")
   494  	}
   495  
   496  	// We've calculated all the values we need. Now, populate stats.
   497  
   498  	stats.Alloc = totalAlloc - totalFree
   499  	stats.TotalAlloc = totalAlloc
   500  	stats.Sys = totalMapped
   501  	stats.Mallocs = nMalloc
   502  	stats.Frees = nFree
   503  	stats.HeapAlloc = totalAlloc - totalFree
   504  	stats.HeapSys = gcController.heapInUse.load() + gcController.heapFree.load() + gcController.heapReleased.load()
   505  	// By definition, HeapIdle is memory that was mapped
   506  	// for the heap but is not currently used to hold heap
   507  	// objects. It also specifically is memory that can be
   508  	// used for other purposes, like stacks, but this memory
   509  	// is subtracted out of HeapSys before it makes that
   510  	// transition. Put another way:
   511  	//
   512  	// HeapSys = bytes allocated from the OS for the heap - bytes ultimately used for non-heap purposes
   513  	// HeapIdle = bytes allocated from the OS for the heap - bytes ultimately used for any purpose
   514  	//
   515  	// or
   516  	//
   517  	// HeapSys = sys - stacks_inuse - gcWorkBufInUse - gcProgPtrScalarBitsInUse
   518  	// HeapIdle = sys - stacks_inuse - gcWorkBufInUse - gcProgPtrScalarBitsInUse - heapInUse
   519  	//
   520  	// => HeapIdle = HeapSys - heapInUse = heapFree + heapReleased
   521  	stats.HeapIdle = gcController.heapFree.load() + gcController.heapReleased.load()
   522  	stats.HeapInuse = gcController.heapInUse.load()
   523  	stats.HeapReleased = gcController.heapReleased.load()
   524  	stats.HeapObjects = nMalloc - nFree
   525  	stats.StackInuse = stackInUse
   526  	// memstats.stacks_sys is only memory mapped directly for OS stacks.
   527  	// Add in heap-allocated stack memory for user consumption.
   528  	stats.StackSys = stackInUse + memstats.stacks_sys.load()
   529  	stats.MSpanInuse = uint64(mheap_.spanalloc.inuse)
   530  	stats.MSpanSys = memstats.mspan_sys.load()
   531  	stats.MCacheInuse = uint64(mheap_.cachealloc.inuse)
   532  	stats.MCacheSys = memstats.mcache_sys.load()
   533  	stats.BuckHashSys = memstats.buckhash_sys.load()
   534  	// MemStats defines GCSys as an aggregate of all memory related
   535  	// to the memory management system, but we track this memory
   536  	// at a more granular level in the runtime.
   537  	stats.GCSys = memstats.gcMiscSys.load() + gcWorkBufInUse + gcProgPtrScalarBitsInUse
   538  	stats.OtherSys = memstats.other_sys.load()
   539  	stats.NextGC = heapGoal
   540  	stats.LastGC = memstats.last_gc_unix
   541  	stats.PauseTotalNs = memstats.pause_total_ns
   542  	stats.PauseNs = memstats.pause_ns
   543  	stats.PauseEnd = memstats.pause_end
   544  	stats.NumGC = memstats.numgc
   545  	stats.NumForcedGC = memstats.numforcedgc
   546  	stats.GCCPUFraction = memstats.gc_cpu_fraction
   547  	stats.EnableGC = true
   548  
   549  	// stats.BySize and bySize might not match in length.
   550  	// That's OK, stats.BySize cannot change due to backwards
   551  	// compatibility issues. copy will copy the minimum amount
   552  	// of values between the two of them.
   553  	copy(stats.BySize[:], bySize[:])
   554  }
   555  
   556  //go:linkname readGCStats runtime/debug.readGCStats
   557  func readGCStats(pauses *[]uint64) {
   558  	systemstack(func() {
   559  		readGCStats_m(pauses)
   560  	})
   561  }
   562  
   563  // readGCStats_m must be called on the system stack because it acquires the heap
   564  // lock. See mheap for details.
   565  //
   566  //go:systemstack
   567  func readGCStats_m(pauses *[]uint64) {
   568  	p := *pauses
   569  	// Calling code in runtime/debug should make the slice large enough.
   570  	if cap(p) < len(memstats.pause_ns)+3 {
   571  		throw("short slice passed to readGCStats")
   572  	}
   573  
   574  	// Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns.
   575  	lock(&mheap_.lock)
   576  
   577  	n := memstats.numgc
   578  	if n > uint32(len(memstats.pause_ns)) {
   579  		n = uint32(len(memstats.pause_ns))
   580  	}
   581  
   582  	// The pause buffer is circular. The most recent pause is at
   583  	// pause_ns[(numgc-1)%len(pause_ns)], and then backward
   584  	// from there to go back farther in time. We deliver the times
   585  	// most recent first (in p[0]).
   586  	p = p[:cap(p)]
   587  	for i := uint32(0); i < n; i++ {
   588  		j := (memstats.numgc - 1 - i) % uint32(len(memstats.pause_ns))
   589  		p[i] = memstats.pause_ns[j]
   590  		p[n+i] = memstats.pause_end[j]
   591  	}
   592  
   593  	p[n+n] = memstats.last_gc_unix
   594  	p[n+n+1] = uint64(memstats.numgc)
   595  	p[n+n+2] = memstats.pause_total_ns
   596  	unlock(&mheap_.lock)
   597  	*pauses = p[:n+n+3]
   598  }
   599  
   600  // flushmcache flushes the mcache of allp[i].
   601  //
   602  // The world must be stopped.
   603  //
   604  //go:nowritebarrier
   605  func flushmcache(i int) {
   606  	assertWorldStopped()
   607  
   608  	p := allp[i]
   609  	c := p.mcache
   610  	if c == nil {
   611  		return
   612  	}
   613  	c.releaseAll()
   614  	stackcache_clear(c)
   615  }
   616  
   617  // flushallmcaches flushes the mcaches of all Ps.
   618  //
   619  // The world must be stopped.
   620  //
   621  //go:nowritebarrier
   622  func flushallmcaches() {
   623  	assertWorldStopped()
   624  
   625  	for i := 0; i < int(gomaxprocs); i++ {
   626  		flushmcache(i)
   627  	}
   628  }
   629  
   630  // sysMemStat represents a global system statistic that is managed atomically.
   631  //
   632  // This type must structurally be a uint64 so that mstats aligns with MemStats.
   633  type sysMemStat uint64
   634  
   635  // load atomically reads the value of the stat.
   636  //
   637  // Must be nosplit as it is called in runtime initialization, e.g. newosproc0.
   638  //
   639  //go:nosplit
   640  func (s *sysMemStat) load() uint64 {
   641  	return atomic.Load64((*uint64)(s))
   642  }
   643  
   644  // add atomically adds the sysMemStat by n.
   645  //
   646  // Must be nosplit as it is called in runtime initialization, e.g. newosproc0.
   647  //
   648  //go:nosplit
   649  func (s *sysMemStat) add(n int64) {
   650  	val := atomic.Xadd64((*uint64)(s), n)
   651  	if (n > 0 && int64(val) < n) || (n < 0 && int64(val)+n < n) {
   652  		print("runtime: val=", val, " n=", n, "\n")
   653  		throw("sysMemStat overflow")
   654  	}
   655  }
   656  
   657  // heapStatsDelta contains deltas of various runtime memory statistics
   658  // that need to be updated together in order for them to be kept
   659  // consistent with one another.
   660  type heapStatsDelta struct {
   661  	// Memory stats.
   662  	committed       int64 // byte delta of memory committed
   663  	released        int64 // byte delta of released memory generated
   664  	inHeap          int64 // byte delta of memory placed in the heap
   665  	inStacks        int64 // byte delta of memory reserved for stacks
   666  	inWorkBufs      int64 // byte delta of memory reserved for work bufs
   667  	inPtrScalarBits int64 // byte delta of memory reserved for unrolled GC prog bits
   668  
   669  	// Allocator stats.
   670  	//
   671  	// These are all uint64 because they're cumulative, and could quickly wrap
   672  	// around otherwise.
   673  	tinyAllocCount  uint64                  // number of tiny allocations
   674  	largeAlloc      uint64                  // bytes allocated for large objects
   675  	largeAllocCount uint64                  // number of large object allocations
   676  	smallAllocCount [_NumSizeClasses]uint64 // number of allocs for small objects
   677  	largeFree       uint64                  // bytes freed for large objects (>maxSmallSize)
   678  	largeFreeCount  uint64                  // number of frees for large objects (>maxSmallSize)
   679  	smallFreeCount  [_NumSizeClasses]uint64 // number of frees for small objects (<=maxSmallSize)
   680  
   681  	// NOTE: This struct must be a multiple of 8 bytes in size because it
   682  	// is stored in an array. If it's not, atomic accesses to the above
   683  	// fields may be unaligned and fail on 32-bit platforms.
   684  }
   685  
   686  // merge adds in the deltas from b into a.
   687  func (a *heapStatsDelta) merge(b *heapStatsDelta) {
   688  	a.committed += b.committed
   689  	a.released += b.released
   690  	a.inHeap += b.inHeap
   691  	a.inStacks += b.inStacks
   692  	a.inWorkBufs += b.inWorkBufs
   693  	a.inPtrScalarBits += b.inPtrScalarBits
   694  
   695  	a.tinyAllocCount += b.tinyAllocCount
   696  	a.largeAlloc += b.largeAlloc
   697  	a.largeAllocCount += b.largeAllocCount
   698  	for i := range b.smallAllocCount {
   699  		a.smallAllocCount[i] += b.smallAllocCount[i]
   700  	}
   701  	a.largeFree += b.largeFree
   702  	a.largeFreeCount += b.largeFreeCount
   703  	for i := range b.smallFreeCount {
   704  		a.smallFreeCount[i] += b.smallFreeCount[i]
   705  	}
   706  }
   707  
   708  // consistentHeapStats represents a set of various memory statistics
   709  // whose updates must be viewed completely to get a consistent
   710  // state of the world.
   711  //
   712  // To write updates to memory stats use the acquire and release
   713  // methods. To obtain a consistent global snapshot of these statistics,
   714  // use read.
   715  type consistentHeapStats struct {
   716  	// stats is a ring buffer of heapStatsDelta values.
   717  	// Writers always atomically update the delta at index gen.
   718  	//
   719  	// Readers operate by rotating gen (0 -> 1 -> 2 -> 0 -> ...)
   720  	// and synchronizing with writers by observing each P's
   721  	// statsSeq field. If the reader observes a P not writing,
   722  	// it can be sure that it will pick up the new gen value the
   723  	// next time it writes.
   724  	//
   725  	// The reader then takes responsibility by clearing space
   726  	// in the ring buffer for the next reader to rotate gen to
   727  	// that space (i.e. it merges in values from index (gen-2) mod 3
   728  	// to index (gen-1) mod 3, then clears the former).
   729  	//
   730  	// Note that this means only one reader can be reading at a time.
   731  	// There is no way for readers to synchronize.
   732  	//
   733  	// This process is why we need a ring buffer of size 3 instead
   734  	// of 2: one is for the writers, one contains the most recent
   735  	// data, and the last one is clear so writers can begin writing
   736  	// to it the moment gen is updated.
   737  	stats [3]heapStatsDelta
   738  
   739  	// gen represents the current index into which writers
   740  	// are writing, and can take on the value of 0, 1, or 2.
   741  	gen atomic.Uint32
   742  
   743  	// noPLock is intended to provide mutual exclusion for updating
   744  	// stats when no P is available. It does not block other writers
   745  	// with a P, only other writers without a P and the reader. Because
   746  	// stats are usually updated when a P is available, contention on
   747  	// this lock should be minimal.
   748  	noPLock mutex
   749  }
   750  
   751  // acquire returns a heapStatsDelta to be updated. In effect,
   752  // it acquires the shard for writing. release must be called
   753  // as soon as the relevant deltas are updated.
   754  //
   755  // The returned heapStatsDelta must be updated atomically.
   756  //
   757  // The caller's P must not change between acquire and
   758  // release. This also means that the caller should not
   759  // acquire a P or release its P in between. A P also must
   760  // not acquire a given consistentHeapStats if it hasn't
   761  // yet released it.
   762  //
   763  // nosplit because a stack growth in this function could
   764  // lead to a stack allocation that could reenter the
   765  // function.
   766  //
   767  //go:nosplit
   768  func (m *consistentHeapStats) acquire() *heapStatsDelta {
   769  	if pp := getg().m.p.ptr(); pp != nil {
   770  		seq := pp.statsSeq.Add(1)
   771  		if seq%2 == 0 {
   772  			// Should have been incremented to odd.
   773  			print("runtime: seq=", seq, "\n")
   774  			throw("bad sequence number")
   775  		}
   776  	} else {
   777  		lock(&m.noPLock)
   778  	}
   779  	gen := m.gen.Load() % 3
   780  	return &m.stats[gen]
   781  }
   782  
   783  // release indicates that the writer is done modifying
   784  // the delta. The value returned by the corresponding
   785  // acquire must no longer be accessed or modified after
   786  // release is called.
   787  //
   788  // The caller's P must not change between acquire and
   789  // release. This also means that the caller should not
   790  // acquire a P or release its P in between.
   791  //
   792  // nosplit because a stack growth in this function could
   793  // lead to a stack allocation that causes another acquire
   794  // before this operation has completed.
   795  //
   796  //go:nosplit
   797  func (m *consistentHeapStats) release() {
   798  	if pp := getg().m.p.ptr(); pp != nil {
   799  		seq := pp.statsSeq.Add(1)
   800  		if seq%2 != 0 {
   801  			// Should have been incremented to even.
   802  			print("runtime: seq=", seq, "\n")
   803  			throw("bad sequence number")
   804  		}
   805  	} else {
   806  		unlock(&m.noPLock)
   807  	}
   808  }
   809  
   810  // unsafeRead aggregates the delta for this shard into out.
   811  //
   812  // Unsafe because it does so without any synchronization. The
   813  // world must be stopped.
   814  func (m *consistentHeapStats) unsafeRead(out *heapStatsDelta) {
   815  	assertWorldStopped()
   816  
   817  	for i := range m.stats {
   818  		out.merge(&m.stats[i])
   819  	}
   820  }
   821  
   822  // unsafeClear clears the shard.
   823  //
   824  // Unsafe because the world must be stopped and values should
   825  // be donated elsewhere before clearing.
   826  func (m *consistentHeapStats) unsafeClear() {
   827  	assertWorldStopped()
   828  
   829  	for i := range m.stats {
   830  		m.stats[i] = heapStatsDelta{}
   831  	}
   832  }
   833  
   834  // read takes a globally consistent snapshot of m
   835  // and puts the aggregated value in out. Even though out is a
   836  // heapStatsDelta, the resulting values should be complete and
   837  // valid statistic values.
   838  //
   839  // Not safe to call concurrently. The world must be stopped
   840  // or metricsSema must be held.
   841  func (m *consistentHeapStats) read(out *heapStatsDelta) {
   842  	// Getting preempted after this point is not safe because
   843  	// we read allp. We need to make sure a STW can't happen
   844  	// so it doesn't change out from under us.
   845  	mp := acquirem()
   846  
   847  	// Get the current generation. We can be confident that this
   848  	// will not change since read is serialized and is the only
   849  	// one that modifies currGen.
   850  	currGen := m.gen.Load()
   851  	prevGen := currGen - 1
   852  	if currGen == 0 {
   853  		prevGen = 2
   854  	}
   855  
   856  	// Prevent writers without a P from writing while we update gen.
   857  	lock(&m.noPLock)
   858  
   859  	// Rotate gen, effectively taking a snapshot of the state of
   860  	// these statistics at the point of the exchange by moving
   861  	// writers to the next set of deltas.
   862  	//
   863  	// This exchange is safe to do because we won't race
   864  	// with anyone else trying to update this value.
   865  	m.gen.Swap((currGen + 1) % 3)
   866  
   867  	// Allow P-less writers to continue. They'll be writing to the
   868  	// next generation now.
   869  	unlock(&m.noPLock)
   870  
   871  	for _, p := range allp {
   872  		// Spin until there are no more writers.
   873  		for p.statsSeq.Load()%2 != 0 {
   874  		}
   875  	}
   876  
   877  	// At this point we've observed that each sequence
   878  	// number is even, so any future writers will observe
   879  	// the new gen value. That means it's safe to read from
   880  	// the other deltas in the stats buffer.
   881  
   882  	// Perform our responsibilities and free up
   883  	// stats[prevGen] for the next time we want to take
   884  	// a snapshot.
   885  	m.stats[currGen].merge(&m.stats[prevGen])
   886  	m.stats[prevGen] = heapStatsDelta{}
   887  
   888  	// Finally, copy out the complete delta.
   889  	*out = m.stats[currGen]
   890  
   891  	releasem(mp)
   892  }
   893  
   894  type cpuStats struct {
   895  	// All fields are CPU time in nanoseconds computed by comparing
   896  	// calls of nanotime. This means they're all overestimates, because
   897  	// they don't accurately compute on-CPU time (so some of the time
   898  	// could be spent scheduled away by the OS).
   899  
   900  	gcAssistTime    int64 // GC assists
   901  	gcDedicatedTime int64 // GC dedicated mark workers + pauses
   902  	gcIdleTime      int64 // GC idle mark workers
   903  	gcPauseTime     int64 // GC pauses (all GOMAXPROCS, even if just 1 is running)
   904  	gcTotalTime     int64
   905  
   906  	scavengeAssistTime int64 // background scavenger
   907  	scavengeBgTime     int64 // scavenge assists
   908  	scavengeTotalTime  int64
   909  
   910  	idleTime int64 // Time Ps spent in _Pidle.
   911  	userTime int64 // Time Ps spent in _Prunning or _Psyscall that's not any of the above.
   912  
   913  	totalTime int64 // GOMAXPROCS * (monotonic wall clock time elapsed)
   914  }
   915  
   916  // accumulate takes a cpuStats and adds in the current state of all GC CPU
   917  // counters.
   918  //
   919  // gcMarkPhase indicates that we're in the mark phase and that certain counter
   920  // values should be used.
   921  func (s *cpuStats) accumulate(now int64, gcMarkPhase bool) {
   922  	// N.B. Mark termination and sweep termination pauses are
   923  	// accumulated in work.cpuStats at the end of their respective pauses.
   924  	var (
   925  		markAssistCpu     int64
   926  		markDedicatedCpu  int64
   927  		markFractionalCpu int64
   928  		markIdleCpu       int64
   929  	)
   930  	if gcMarkPhase {
   931  		// N.B. These stats may have stale values if the GC is not
   932  		// currently in the mark phase.
   933  		markAssistCpu = gcController.assistTime.Load()
   934  		markDedicatedCpu = gcController.dedicatedMarkTime.Load()
   935  		markFractionalCpu = gcController.fractionalMarkTime.Load()
   936  		markIdleCpu = gcController.idleMarkTime.Load()
   937  	}
   938  
   939  	// The rest of the stats below are either derived from the above or
   940  	// are reset on each mark termination.
   941  
   942  	scavAssistCpu := scavenge.assistTime.Load()
   943  	scavBgCpu := scavenge.backgroundTime.Load()
   944  
   945  	// Update cumulative GC CPU stats.
   946  	s.gcAssistTime += markAssistCpu
   947  	s.gcDedicatedTime += markDedicatedCpu + markFractionalCpu
   948  	s.gcIdleTime += markIdleCpu
   949  	s.gcTotalTime += markAssistCpu + markDedicatedCpu + markFractionalCpu + markIdleCpu
   950  
   951  	// Update cumulative scavenge CPU stats.
   952  	s.scavengeAssistTime += scavAssistCpu
   953  	s.scavengeBgTime += scavBgCpu
   954  	s.scavengeTotalTime += scavAssistCpu + scavBgCpu
   955  
   956  	// Update total CPU.
   957  	s.totalTime = sched.totaltime + (now-sched.procresizetime)*int64(gomaxprocs)
   958  	s.idleTime += sched.idleTime.Load()
   959  
   960  	// Compute userTime. We compute this indirectly as everything that's not the above.
   961  	//
   962  	// Since time spent in _Pgcstop is covered by gcPauseTime, and time spent in _Pidle
   963  	// is covered by idleTime, what we're left with is time spent in _Prunning and _Psyscall,
   964  	// the latter of which is fine because the P will either go idle or get used for something
   965  	// else via sysmon. Meanwhile if we subtract GC time from whatever's left, we get non-GC
   966  	// _Prunning time. Note that this still leaves time spent in sweeping and in the scheduler,
   967  	// but that's fine. The overwhelming majority of this time will be actual user time.
   968  	s.userTime = s.totalTime - (s.gcTotalTime + s.scavengeTotalTime + s.idleTime)
   969  }
   970  

View as plain text