Source file src/runtime/trace.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Go execution tracer.
     6  // The tracer captures a wide range of execution events like goroutine
     7  // creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
     8  // changes of heap size, processor start/stop, etc and writes them to a buffer
     9  // in a compact form. A precise nanosecond-precision timestamp and a stack
    10  // trace is captured for most events.
    11  // See https://golang.org/s/go15trace for more info.
    12  
    13  package runtime
    14  
    15  import (
    16  	"internal/goarch"
    17  	"runtime/internal/atomic"
    18  	"runtime/internal/sys"
    19  	"unsafe"
    20  )
    21  
    22  // Event types in the trace, args are given in square brackets.
    23  const (
    24  	traceEvNone              = 0  // unused
    25  	traceEvBatch             = 1  // start of per-P batch of events [pid, timestamp]
    26  	traceEvFrequency         = 2  // contains tracer timer frequency [frequency (ticks per second)]
    27  	traceEvStack             = 3  // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}]
    28  	traceEvGomaxprocs        = 4  // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
    29  	traceEvProcStart         = 5  // start of P [timestamp, thread id]
    30  	traceEvProcStop          = 6  // stop of P [timestamp]
    31  	traceEvGCStart           = 7  // GC start [timestamp, seq, stack id]
    32  	traceEvGCDone            = 8  // GC done [timestamp]
    33  	traceEvGCSTWStart        = 9  // GC STW start [timestamp, kind]
    34  	traceEvGCSTWDone         = 10 // GC STW done [timestamp]
    35  	traceEvGCSweepStart      = 11 // GC sweep start [timestamp, stack id]
    36  	traceEvGCSweepDone       = 12 // GC sweep done [timestamp, swept, reclaimed]
    37  	traceEvGoCreate          = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id]
    38  	traceEvGoStart           = 14 // goroutine starts running [timestamp, goroutine id, seq]
    39  	traceEvGoEnd             = 15 // goroutine ends [timestamp]
    40  	traceEvGoStop            = 16 // goroutine stops (like in select{}) [timestamp, stack]
    41  	traceEvGoSched           = 17 // goroutine calls Gosched [timestamp, stack]
    42  	traceEvGoPreempt         = 18 // goroutine is preempted [timestamp, stack]
    43  	traceEvGoSleep           = 19 // goroutine calls Sleep [timestamp, stack]
    44  	traceEvGoBlock           = 20 // goroutine blocks [timestamp, stack]
    45  	traceEvGoUnblock         = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack]
    46  	traceEvGoBlockSend       = 22 // goroutine blocks on chan send [timestamp, stack]
    47  	traceEvGoBlockRecv       = 23 // goroutine blocks on chan recv [timestamp, stack]
    48  	traceEvGoBlockSelect     = 24 // goroutine blocks on select [timestamp, stack]
    49  	traceEvGoBlockSync       = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
    50  	traceEvGoBlockCond       = 26 // goroutine blocks on Cond [timestamp, stack]
    51  	traceEvGoBlockNet        = 27 // goroutine blocks on network [timestamp, stack]
    52  	traceEvGoSysCall         = 28 // syscall enter [timestamp, stack]
    53  	traceEvGoSysExit         = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp]
    54  	traceEvGoSysBlock        = 30 // syscall blocks [timestamp]
    55  	traceEvGoWaiting         = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id]
    56  	traceEvGoInSyscall       = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
    57  	traceEvHeapAlloc         = 33 // gcController.heapLive change [timestamp, heap_alloc]
    58  	traceEvHeapGoal          = 34 // gcController.heapGoal() (formerly next_gc) change [timestamp, heap goal in bytes]
    59  	traceEvTimerGoroutine    = 35 // not currently used; previously denoted timer goroutine [timer goroutine id]
    60  	traceEvFutileWakeup      = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
    61  	traceEvString            = 37 // string dictionary entry [ID, length, string]
    62  	traceEvGoStartLocal      = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
    63  	traceEvGoUnblockLocal    = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack]
    64  	traceEvGoSysExitLocal    = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp]
    65  	traceEvGoStartLabel      = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id]
    66  	traceEvGoBlockGC         = 42 // goroutine blocks on GC assist [timestamp, stack]
    67  	traceEvGCMarkAssistStart = 43 // GC mark assist start [timestamp, stack]
    68  	traceEvGCMarkAssistDone  = 44 // GC mark assist done [timestamp]
    69  	traceEvUserTaskCreate    = 45 // trace.NewContext [timestamp, internal task id, internal parent task id, stack, name string]
    70  	traceEvUserTaskEnd       = 46 // end of a task [timestamp, internal task id, stack]
    71  	traceEvUserRegion        = 47 // trace.WithRegion [timestamp, internal task id, mode(0:start, 1:end), stack, name string]
    72  	traceEvUserLog           = 48 // trace.Log [timestamp, internal task id, key string id, stack, value string]
    73  	traceEvCPUSample         = 49 // CPU profiling sample [timestamp, stack, real timestamp, real P id (-1 when absent), goroutine id]
    74  	traceEvCount             = 50
    75  	// Byte is used but only 6 bits are available for event type.
    76  	// The remaining 2 bits are used to specify the number of arguments.
    77  	// That means, the max event type value is 63.
    78  )
    79  
    80  const (
    81  	// Timestamps in trace are cputicks/traceTickDiv.
    82  	// This makes absolute values of timestamp diffs smaller,
    83  	// and so they are encoded in less number of bytes.
    84  	// 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine).
    85  	// The suggested increment frequency for PowerPC's time base register is
    86  	// 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64
    87  	// and ppc64le.
    88  	// Tracing won't work reliably for architectures where cputicks is emulated
    89  	// by nanotime, so the value doesn't matter for those architectures.
    90  	traceTickDiv = 16 + 48*(goarch.Is386|goarch.IsAmd64)
    91  	// Maximum number of PCs in a single stack trace.
    92  	// Since events contain only stack id rather than whole stack trace,
    93  	// we can allow quite large values here.
    94  	traceStackSize = 128
    95  	// Identifier of a fake P that is used when we trace without a real P.
    96  	traceGlobProc = -1
    97  	// Maximum number of bytes to encode uint64 in base-128.
    98  	traceBytesPerNumber = 10
    99  	// Shift of the number of arguments in the first event byte.
   100  	traceArgCountShift = 6
   101  	// Flag passed to traceGoPark to denote that the previous wakeup of this
   102  	// goroutine was futile. For example, a goroutine was unblocked on a mutex,
   103  	// but another goroutine got ahead and acquired the mutex before the first
   104  	// goroutine is scheduled, so the first goroutine has to block again.
   105  	// Such wakeups happen on buffered channels and sync.Mutex,
   106  	// but are generally not interesting for end user.
   107  	traceFutileWakeup byte = 128
   108  )
   109  
   110  // trace is global tracing context.
   111  var trace struct {
   112  	// trace.lock must only be acquired on the system stack where
   113  	// stack splits cannot happen while it is held.
   114  	lock          mutex       // protects the following members
   115  	lockOwner     *g          // to avoid deadlocks during recursive lock locks
   116  	enabled       bool        // when set runtime traces events
   117  	shutdown      bool        // set when we are waiting for trace reader to finish after setting enabled to false
   118  	headerWritten bool        // whether ReadTrace has emitted trace header
   119  	footerWritten bool        // whether ReadTrace has emitted trace footer
   120  	shutdownSema  uint32      // used to wait for ReadTrace completion
   121  	seqStart      uint64      // sequence number when tracing was started
   122  	ticksStart    int64       // cputicks when tracing was started
   123  	ticksEnd      int64       // cputicks when tracing was stopped
   124  	timeStart     int64       // nanotime when tracing was started
   125  	timeEnd       int64       // nanotime when tracing was stopped
   126  	seqGC         uint64      // GC start/done sequencer
   127  	reading       traceBufPtr // buffer currently handed off to user
   128  	empty         traceBufPtr // stack of empty buffers
   129  	fullHead      traceBufPtr // queue of full buffers
   130  	fullTail      traceBufPtr
   131  	stackTab      traceStackTable // maps stack traces to unique ids
   132  	// cpuLogRead accepts CPU profile samples from the signal handler where
   133  	// they're generated. It uses a two-word header to hold the IDs of the P and
   134  	// G (respectively) that were active at the time of the sample. Because
   135  	// profBuf uses a record with all zeros in its header to indicate overflow,
   136  	// we make sure to make the P field always non-zero: The ID of a real P will
   137  	// start at bit 1, and bit 0 will be set. Samples that arrive while no P is
   138  	// running (such as near syscalls) will set the first header field to 0b10.
   139  	// This careful handling of the first header field allows us to store ID of
   140  	// the active G directly in the second field, even though that will be 0
   141  	// when sampling g0.
   142  	cpuLogRead *profBuf
   143  	// cpuLogBuf is a trace buffer to hold events corresponding to CPU profile
   144  	// samples, which arrive out of band and not directly connected to a
   145  	// specific P.
   146  	cpuLogBuf traceBufPtr
   147  
   148  	reader atomic.Pointer[g] // goroutine that called ReadTrace, or nil
   149  
   150  	signalLock  atomic.Uint32 // protects use of the following member, only usable in signal handlers
   151  	cpuLogWrite *profBuf      // copy of cpuLogRead for use in signal handlers, set without signalLock
   152  
   153  	// Dictionary for traceEvString.
   154  	//
   155  	// TODO: central lock to access the map is not ideal.
   156  	//   option: pre-assign ids to all user annotation region names and tags
   157  	//   option: per-P cache
   158  	//   option: sync.Map like data structure
   159  	stringsLock mutex
   160  	strings     map[string]uint64
   161  	stringSeq   uint64
   162  
   163  	// markWorkerLabels maps gcMarkWorkerMode to string ID.
   164  	markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
   165  
   166  	bufLock mutex       // protects buf
   167  	buf     traceBufPtr // global trace buffer, used when running without a p
   168  }
   169  
   170  // traceBufHeader is per-P tracing buffer.
   171  type traceBufHeader struct {
   172  	link      traceBufPtr             // in trace.empty/full
   173  	lastTicks uint64                  // when we wrote the last event
   174  	pos       int                     // next write offset in arr
   175  	stk       [traceStackSize]uintptr // scratch buffer for traceback
   176  }
   177  
   178  // traceBuf is per-P tracing buffer.
   179  type traceBuf struct {
   180  	_ sys.NotInHeap
   181  	traceBufHeader
   182  	arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
   183  }
   184  
   185  // traceBufPtr is a *traceBuf that is not traced by the garbage
   186  // collector and doesn't have write barriers. traceBufs are not
   187  // allocated from the GC'd heap, so this is safe, and are often
   188  // manipulated in contexts where write barriers are not allowed, so
   189  // this is necessary.
   190  //
   191  // TODO: Since traceBuf is now embedded runtime/internal/sys.NotInHeap, this isn't necessary.
   192  type traceBufPtr uintptr
   193  
   194  func (tp traceBufPtr) ptr() *traceBuf   { return (*traceBuf)(unsafe.Pointer(tp)) }
   195  func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
   196  func traceBufPtrOf(b *traceBuf) traceBufPtr {
   197  	return traceBufPtr(unsafe.Pointer(b))
   198  }
   199  
   200  // StartTrace enables tracing for the current process.
   201  // While tracing, the data will be buffered and available via ReadTrace.
   202  // StartTrace returns an error if tracing is already enabled.
   203  // Most clients should use the runtime/trace package or the testing package's
   204  // -test.trace flag instead of calling StartTrace directly.
   205  func StartTrace() error {
   206  	// Stop the world so that we can take a consistent snapshot
   207  	// of all goroutines at the beginning of the trace.
   208  	// Do not stop the world during GC so we ensure we always see
   209  	// a consistent view of GC-related events (e.g. a start is always
   210  	// paired with an end).
   211  	stopTheWorldGC("start tracing")
   212  
   213  	// Prevent sysmon from running any code that could generate events.
   214  	lock(&sched.sysmonlock)
   215  
   216  	// We are in stop-the-world, but syscalls can finish and write to trace concurrently.
   217  	// Exitsyscall could check trace.enabled long before and then suddenly wake up
   218  	// and decide to write to trace at a random point in time.
   219  	// However, such syscall will use the global trace.buf buffer, because we've
   220  	// acquired all p's by doing stop-the-world. So this protects us from such races.
   221  	lock(&trace.bufLock)
   222  
   223  	if trace.enabled || trace.shutdown {
   224  		unlock(&trace.bufLock)
   225  		unlock(&sched.sysmonlock)
   226  		startTheWorldGC()
   227  		return errorString("tracing is already enabled")
   228  	}
   229  
   230  	// Can't set trace.enabled yet. While the world is stopped, exitsyscall could
   231  	// already emit a delayed event (see exitTicks in exitsyscall) if we set trace.enabled here.
   232  	// That would lead to an inconsistent trace:
   233  	// - either GoSysExit appears before EvGoInSyscall,
   234  	// - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below.
   235  	// To instruct traceEvent that it must not ignore events below, we set startingtrace.
   236  	// trace.enabled is set afterwards once we have emitted all preliminary events.
   237  	mp := getg().m
   238  	mp.startingtrace = true
   239  
   240  	// Obtain current stack ID to use in all traceEvGoCreate events below.
   241  	stkBuf := make([]uintptr, traceStackSize)
   242  	stackID := traceStackID(mp, stkBuf, 2)
   243  
   244  	profBuf := newProfBuf(2, profBufWordCount, profBufTagCount) // after the timestamp, header is [pp.id, gp.goid]
   245  	trace.cpuLogRead = profBuf
   246  
   247  	// We must not acquire trace.signalLock outside of a signal handler: a
   248  	// profiling signal may arrive at any time and try to acquire it, leading to
   249  	// deadlock. Because we can't use that lock to protect updates to
   250  	// trace.cpuLogWrite (only use of the structure it references), reads and
   251  	// writes of the pointer must be atomic. (And although this field is never
   252  	// the sole pointer to the profBuf value, it's best to allow a write barrier
   253  	// here.)
   254  	atomicstorep(unsafe.Pointer(&trace.cpuLogWrite), unsafe.Pointer(profBuf))
   255  
   256  	// World is stopped, no need to lock.
   257  	forEachGRace(func(gp *g) {
   258  		status := readgstatus(gp)
   259  		if status != _Gdead {
   260  			gp.traceseq = 0
   261  			gp.tracelastp = getg().m.p
   262  			// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
   263  			id := trace.stackTab.put([]uintptr{startPCforTrace(gp.startpc) + sys.PCQuantum})
   264  			traceEvent(traceEvGoCreate, -1, gp.goid, uint64(id), stackID)
   265  		}
   266  		if status == _Gwaiting {
   267  			// traceEvGoWaiting is implied to have seq=1.
   268  			gp.traceseq++
   269  			traceEvent(traceEvGoWaiting, -1, gp.goid)
   270  		}
   271  		if status == _Gsyscall {
   272  			gp.traceseq++
   273  			traceEvent(traceEvGoInSyscall, -1, gp.goid)
   274  		} else if status == _Gdead && gp.m != nil && gp.m.isextra {
   275  			// Trigger two trace events for the dead g in the extra m,
   276  			// since the next event of the g will be traceEvGoSysExit in exitsyscall,
   277  			// while calling from C thread to Go.
   278  			gp.traceseq = 0
   279  			gp.tracelastp = getg().m.p
   280  			// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
   281  			id := trace.stackTab.put([]uintptr{startPCforTrace(0) + sys.PCQuantum}) // no start pc
   282  			traceEvent(traceEvGoCreate, -1, gp.goid, uint64(id), stackID)
   283  			gp.traceseq++
   284  			traceEvent(traceEvGoInSyscall, -1, gp.goid)
   285  		} else {
   286  			gp.sysblocktraced = false
   287  		}
   288  	})
   289  	traceProcStart()
   290  	traceGoStart()
   291  	// Note: ticksStart needs to be set after we emit traceEvGoInSyscall events.
   292  	// If we do it the other way around, it is possible that exitsyscall will
   293  	// query sysexitticks after ticksStart but before traceEvGoInSyscall timestamp.
   294  	// It will lead to a false conclusion that cputicks is broken.
   295  	trace.ticksStart = cputicks()
   296  	trace.timeStart = nanotime()
   297  	trace.headerWritten = false
   298  	trace.footerWritten = false
   299  
   300  	// string to id mapping
   301  	//  0 : reserved for an empty string
   302  	//  remaining: other strings registered by traceString
   303  	trace.stringSeq = 0
   304  	trace.strings = make(map[string]uint64)
   305  
   306  	trace.seqGC = 0
   307  	mp.startingtrace = false
   308  	trace.enabled = true
   309  
   310  	// Register runtime goroutine labels.
   311  	_, pid, bufp := traceAcquireBuffer()
   312  	for i, label := range gcMarkWorkerModeStrings[:] {
   313  		trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label)
   314  	}
   315  	traceReleaseBuffer(pid)
   316  
   317  	unlock(&trace.bufLock)
   318  
   319  	unlock(&sched.sysmonlock)
   320  
   321  	startTheWorldGC()
   322  	return nil
   323  }
   324  
   325  // StopTrace stops tracing, if it was previously enabled.
   326  // StopTrace only returns after all the reads for the trace have completed.
   327  func StopTrace() {
   328  	// Stop the world so that we can collect the trace buffers from all p's below,
   329  	// and also to avoid races with traceEvent.
   330  	stopTheWorldGC("stop tracing")
   331  
   332  	// See the comment in StartTrace.
   333  	lock(&sched.sysmonlock)
   334  
   335  	// See the comment in StartTrace.
   336  	lock(&trace.bufLock)
   337  
   338  	if !trace.enabled {
   339  		unlock(&trace.bufLock)
   340  		unlock(&sched.sysmonlock)
   341  		startTheWorldGC()
   342  		return
   343  	}
   344  
   345  	traceGoSched()
   346  
   347  	atomicstorep(unsafe.Pointer(&trace.cpuLogWrite), nil)
   348  	trace.cpuLogRead.close()
   349  	traceReadCPU()
   350  
   351  	// Loop over all allocated Ps because dead Ps may still have
   352  	// trace buffers.
   353  	for _, p := range allp[:cap(allp)] {
   354  		buf := p.tracebuf
   355  		if buf != 0 {
   356  			traceFullQueue(buf)
   357  			p.tracebuf = 0
   358  		}
   359  	}
   360  	if trace.buf != 0 {
   361  		buf := trace.buf
   362  		trace.buf = 0
   363  		if buf.ptr().pos != 0 {
   364  			traceFullQueue(buf)
   365  		}
   366  	}
   367  	if trace.cpuLogBuf != 0 {
   368  		buf := trace.cpuLogBuf
   369  		trace.cpuLogBuf = 0
   370  		if buf.ptr().pos != 0 {
   371  			traceFullQueue(buf)
   372  		}
   373  	}
   374  
   375  	for {
   376  		trace.ticksEnd = cputicks()
   377  		trace.timeEnd = nanotime()
   378  		// Windows time can tick only every 15ms, wait for at least one tick.
   379  		if trace.timeEnd != trace.timeStart {
   380  			break
   381  		}
   382  		osyield()
   383  	}
   384  
   385  	trace.enabled = false
   386  	trace.shutdown = true
   387  	unlock(&trace.bufLock)
   388  
   389  	unlock(&sched.sysmonlock)
   390  
   391  	startTheWorldGC()
   392  
   393  	// The world is started but we've set trace.shutdown, so new tracing can't start.
   394  	// Wait for the trace reader to flush pending buffers and stop.
   395  	semacquire(&trace.shutdownSema)
   396  	if raceenabled {
   397  		raceacquire(unsafe.Pointer(&trace.shutdownSema))
   398  	}
   399  
   400  	systemstack(func() {
   401  		// The lock protects us from races with StartTrace/StopTrace because they do stop-the-world.
   402  		lock(&trace.lock)
   403  		for _, p := range allp[:cap(allp)] {
   404  			if p.tracebuf != 0 {
   405  				throw("trace: non-empty trace buffer in proc")
   406  			}
   407  		}
   408  		if trace.buf != 0 {
   409  			throw("trace: non-empty global trace buffer")
   410  		}
   411  		if trace.fullHead != 0 || trace.fullTail != 0 {
   412  			throw("trace: non-empty full trace buffer")
   413  		}
   414  		if trace.reading != 0 || trace.reader.Load() != nil {
   415  			throw("trace: reading after shutdown")
   416  		}
   417  		for trace.empty != 0 {
   418  			buf := trace.empty
   419  			trace.empty = buf.ptr().link
   420  			sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
   421  		}
   422  		trace.strings = nil
   423  		trace.shutdown = false
   424  		trace.cpuLogRead = nil
   425  		unlock(&trace.lock)
   426  	})
   427  }
   428  
   429  // ReadTrace returns the next chunk of binary tracing data, blocking until data
   430  // is available. If tracing is turned off and all the data accumulated while it
   431  // was on has been returned, ReadTrace returns nil. The caller must copy the
   432  // returned data before calling ReadTrace again.
   433  // ReadTrace must be called from one goroutine at a time.
   434  func ReadTrace() []byte {
   435  top:
   436  	var buf []byte
   437  	var park bool
   438  	systemstack(func() {
   439  		buf, park = readTrace0()
   440  	})
   441  	if park {
   442  		gopark(func(gp *g, _ unsafe.Pointer) bool {
   443  			if !trace.reader.CompareAndSwapNoWB(nil, gp) {
   444  				// We're racing with another reader.
   445  				// Wake up and handle this case.
   446  				return false
   447  			}
   448  
   449  			if g2 := traceReader(); gp == g2 {
   450  				// New data arrived between unlocking
   451  				// and the CAS and we won the wake-up
   452  				// race, so wake up directly.
   453  				return false
   454  			} else if g2 != nil {
   455  				printlock()
   456  				println("runtime: got trace reader", g2, g2.goid)
   457  				throw("unexpected trace reader")
   458  			}
   459  
   460  			return true
   461  		}, nil, waitReasonTraceReaderBlocked, traceEvGoBlock, 2)
   462  		goto top
   463  	}
   464  
   465  	return buf
   466  }
   467  
   468  // readTrace0 is ReadTrace's continuation on g0. This must run on the
   469  // system stack because it acquires trace.lock.
   470  //
   471  //go:systemstack
   472  func readTrace0() (buf []byte, park bool) {
   473  	if raceenabled {
   474  		// g0 doesn't have a race context. Borrow the user G's.
   475  		if getg().racectx != 0 {
   476  			throw("expected racectx == 0")
   477  		}
   478  		getg().racectx = getg().m.curg.racectx
   479  		// (This defer should get open-coded, which is safe on
   480  		// the system stack.)
   481  		defer func() { getg().racectx = 0 }()
   482  	}
   483  
   484  	// This function may need to lock trace.lock recursively
   485  	// (goparkunlock -> traceGoPark -> traceEvent -> traceFlush).
   486  	// To allow this we use trace.lockOwner.
   487  	// Also this function must not allocate while holding trace.lock:
   488  	// allocation can call heap allocate, which will try to emit a trace
   489  	// event while holding heap lock.
   490  	lock(&trace.lock)
   491  	trace.lockOwner = getg().m.curg
   492  
   493  	if trace.reader.Load() != nil {
   494  		// More than one goroutine reads trace. This is bad.
   495  		// But we rather do not crash the program because of tracing,
   496  		// because tracing can be enabled at runtime on prod servers.
   497  		trace.lockOwner = nil
   498  		unlock(&trace.lock)
   499  		println("runtime: ReadTrace called from multiple goroutines simultaneously")
   500  		return nil, false
   501  	}
   502  	// Recycle the old buffer.
   503  	if buf := trace.reading; buf != 0 {
   504  		buf.ptr().link = trace.empty
   505  		trace.empty = buf
   506  		trace.reading = 0
   507  	}
   508  	// Write trace header.
   509  	if !trace.headerWritten {
   510  		trace.headerWritten = true
   511  		trace.lockOwner = nil
   512  		unlock(&trace.lock)
   513  		return []byte("go 1.19 trace\x00\x00\x00"), false
   514  	}
   515  	// Optimistically look for CPU profile samples. This may write new stack
   516  	// records, and may write new tracing buffers.
   517  	if !trace.footerWritten && !trace.shutdown {
   518  		traceReadCPU()
   519  	}
   520  	// Wait for new data.
   521  	if trace.fullHead == 0 && !trace.shutdown {
   522  		// We don't simply use a note because the scheduler
   523  		// executes this goroutine directly when it wakes up
   524  		// (also a note would consume an M).
   525  		trace.lockOwner = nil
   526  		unlock(&trace.lock)
   527  		return nil, true
   528  	}
   529  newFull:
   530  	assertLockHeld(&trace.lock)
   531  	// Write a buffer.
   532  	if trace.fullHead != 0 {
   533  		buf := traceFullDequeue()
   534  		trace.reading = buf
   535  		trace.lockOwner = nil
   536  		unlock(&trace.lock)
   537  		return buf.ptr().arr[:buf.ptr().pos], false
   538  	}
   539  
   540  	// Write footer with timer frequency.
   541  	if !trace.footerWritten {
   542  		trace.footerWritten = true
   543  		// Use float64 because (trace.ticksEnd - trace.ticksStart) * 1e9 can overflow int64.
   544  		freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
   545  		if freq <= 0 {
   546  			throw("trace: ReadTrace got invalid frequency")
   547  		}
   548  		trace.lockOwner = nil
   549  		unlock(&trace.lock)
   550  
   551  		// Write frequency event.
   552  		bufp := traceFlush(0, 0)
   553  		buf := bufp.ptr()
   554  		buf.byte(traceEvFrequency | 0<<traceArgCountShift)
   555  		buf.varint(uint64(freq))
   556  
   557  		// Dump stack table.
   558  		// This will emit a bunch of full buffers, we will pick them up
   559  		// on the next iteration.
   560  		bufp = trace.stackTab.dump(bufp)
   561  
   562  		// Flush final buffer.
   563  		lock(&trace.lock)
   564  		traceFullQueue(bufp)
   565  		goto newFull // trace.lock should be held at newFull
   566  	}
   567  	// Done.
   568  	if trace.shutdown {
   569  		trace.lockOwner = nil
   570  		unlock(&trace.lock)
   571  		if raceenabled {
   572  			// Model synchronization on trace.shutdownSema, which race
   573  			// detector does not see. This is required to avoid false
   574  			// race reports on writer passed to trace.Start.
   575  			racerelease(unsafe.Pointer(&trace.shutdownSema))
   576  		}
   577  		// trace.enabled is already reset, so can call traceable functions.
   578  		semrelease(&trace.shutdownSema)
   579  		return nil, false
   580  	}
   581  	// Also bad, but see the comment above.
   582  	trace.lockOwner = nil
   583  	unlock(&trace.lock)
   584  	println("runtime: spurious wakeup of trace reader")
   585  	return nil, false
   586  }
   587  
   588  // traceReader returns the trace reader that should be woken up, if any.
   589  // Callers should first check that trace.enabled or trace.shutdown is set.
   590  //
   591  // This must run on the system stack because it acquires trace.lock.
   592  //
   593  //go:systemstack
   594  func traceReader() *g {
   595  	// Optimistic check first
   596  	if traceReaderAvailable() == nil {
   597  		return nil
   598  	}
   599  	lock(&trace.lock)
   600  	gp := traceReaderAvailable()
   601  	if gp == nil || !trace.reader.CompareAndSwapNoWB(gp, nil) {
   602  		unlock(&trace.lock)
   603  		return nil
   604  	}
   605  	unlock(&trace.lock)
   606  	return gp
   607  }
   608  
   609  // traceReaderAvailable returns the trace reader if it is not currently
   610  // scheduled and should be. Callers should first check that trace.enabled
   611  // or trace.shutdown is set.
   612  func traceReaderAvailable() *g {
   613  	if trace.fullHead != 0 || trace.shutdown {
   614  		return trace.reader.Load()
   615  	}
   616  	return nil
   617  }
   618  
   619  // traceProcFree frees trace buffer associated with pp.
   620  //
   621  // This must run on the system stack because it acquires trace.lock.
   622  //
   623  //go:systemstack
   624  func traceProcFree(pp *p) {
   625  	buf := pp.tracebuf
   626  	pp.tracebuf = 0
   627  	if buf == 0 {
   628  		return
   629  	}
   630  	lock(&trace.lock)
   631  	traceFullQueue(buf)
   632  	unlock(&trace.lock)
   633  }
   634  
   635  // traceFullQueue queues buf into queue of full buffers.
   636  func traceFullQueue(buf traceBufPtr) {
   637  	buf.ptr().link = 0
   638  	if trace.fullHead == 0 {
   639  		trace.fullHead = buf
   640  	} else {
   641  		trace.fullTail.ptr().link = buf
   642  	}
   643  	trace.fullTail = buf
   644  }
   645  
   646  // traceFullDequeue dequeues from queue of full buffers.
   647  func traceFullDequeue() traceBufPtr {
   648  	buf := trace.fullHead
   649  	if buf == 0 {
   650  		return 0
   651  	}
   652  	trace.fullHead = buf.ptr().link
   653  	if trace.fullHead == 0 {
   654  		trace.fullTail = 0
   655  	}
   656  	buf.ptr().link = 0
   657  	return buf
   658  }
   659  
   660  // traceEvent writes a single event to trace buffer, flushing the buffer if necessary.
   661  // ev is event type.
   662  // If skip > 0, write current stack id as the last argument (skipping skip top frames).
   663  // If skip = 0, this event type should contain a stack, but we don't want
   664  // to collect and remember it for this particular call.
   665  func traceEvent(ev byte, skip int, args ...uint64) {
   666  	mp, pid, bufp := traceAcquireBuffer()
   667  	// Double-check trace.enabled now that we've done m.locks++ and acquired bufLock.
   668  	// This protects from races between traceEvent and StartTrace/StopTrace.
   669  
   670  	// The caller checked that trace.enabled == true, but trace.enabled might have been
   671  	// turned off between the check and now. Check again. traceLockBuffer did mp.locks++,
   672  	// StopTrace does stopTheWorld, and stopTheWorld waits for mp.locks to go back to zero,
   673  	// so if we see trace.enabled == true now, we know it's true for the rest of the function.
   674  	// Exitsyscall can run even during stopTheWorld. The race with StartTrace/StopTrace
   675  	// during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer.
   676  	//
   677  	// Note trace_userTaskCreate runs the same check.
   678  	if !trace.enabled && !mp.startingtrace {
   679  		traceReleaseBuffer(pid)
   680  		return
   681  	}
   682  
   683  	if skip > 0 {
   684  		if getg() == mp.curg {
   685  			skip++ // +1 because stack is captured in traceEventLocked.
   686  		}
   687  	}
   688  	traceEventLocked(0, mp, pid, bufp, ev, 0, skip, args...)
   689  	traceReleaseBuffer(pid)
   690  }
   691  
   692  // traceEventLocked writes a single event of type ev to the trace buffer bufp,
   693  // flushing the buffer if necessary. pid is the id of the current P, or
   694  // traceGlobProc if we're tracing without a real P.
   695  //
   696  // Preemption is disabled, and if running without a real P the global tracing
   697  // buffer is locked.
   698  //
   699  // Events types that do not include a stack set skip to -1. Event types that
   700  // include a stack may explicitly reference a stackID from the trace.stackTab
   701  // (obtained by an earlier call to traceStackID). Without an explicit stackID,
   702  // this function will automatically capture the stack of the goroutine currently
   703  // running on mp, skipping skip top frames or, if skip is 0, writing out an
   704  // empty stack record.
   705  //
   706  // It records the event's args to the traceBuf, and also makes an effort to
   707  // reserve extraBytes bytes of additional space immediately following the event,
   708  // in the same traceBuf.
   709  func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, stackID uint32, skip int, args ...uint64) {
   710  	buf := bufp.ptr()
   711  	// TODO: test on non-zero extraBytes param.
   712  	maxSize := 2 + 5*traceBytesPerNumber + extraBytes // event type, length, sequence, timestamp, stack id and two add params
   713  	if buf == nil || len(buf.arr)-buf.pos < maxSize {
   714  		systemstack(func() {
   715  			buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
   716  		})
   717  		bufp.set(buf)
   718  	}
   719  
   720  	// NOTE: ticks might be same after tick division, although the real cputicks is
   721  	// linear growth.
   722  	ticks := uint64(cputicks()) / traceTickDiv
   723  	tickDiff := ticks - buf.lastTicks
   724  	if tickDiff == 0 {
   725  		ticks = buf.lastTicks + 1
   726  		tickDiff = 1
   727  	}
   728  
   729  	buf.lastTicks = ticks
   730  	narg := byte(len(args))
   731  	if stackID != 0 || skip >= 0 {
   732  		narg++
   733  	}
   734  	// We have only 2 bits for number of arguments.
   735  	// If number is >= 3, then the event type is followed by event length in bytes.
   736  	if narg > 3 {
   737  		narg = 3
   738  	}
   739  	startPos := buf.pos
   740  	buf.byte(ev | narg<<traceArgCountShift)
   741  	var lenp *byte
   742  	if narg == 3 {
   743  		// Reserve the byte for length assuming that length < 128.
   744  		buf.varint(0)
   745  		lenp = &buf.arr[buf.pos-1]
   746  	}
   747  	buf.varint(tickDiff)
   748  	for _, a := range args {
   749  		buf.varint(a)
   750  	}
   751  	if stackID != 0 {
   752  		buf.varint(uint64(stackID))
   753  	} else if skip == 0 {
   754  		buf.varint(0)
   755  	} else if skip > 0 {
   756  		buf.varint(traceStackID(mp, buf.stk[:], skip))
   757  	}
   758  	evSize := buf.pos - startPos
   759  	if evSize > maxSize {
   760  		throw("invalid length of trace event")
   761  	}
   762  	if lenp != nil {
   763  		// Fill in actual length.
   764  		*lenp = byte(evSize - 2)
   765  	}
   766  }
   767  
   768  // traceCPUSample writes a CPU profile sample stack to the execution tracer's
   769  // profiling buffer. It is called from a signal handler, so is limited in what
   770  // it can do.
   771  func traceCPUSample(gp *g, pp *p, stk []uintptr) {
   772  	if !trace.enabled {
   773  		// Tracing is usually turned off; don't spend time acquiring the signal
   774  		// lock unless it's active.
   775  		return
   776  	}
   777  
   778  	// Match the clock used in traceEventLocked
   779  	now := cputicks()
   780  	// The "header" here is the ID of the P that was running the profiled code,
   781  	// followed by the ID of the goroutine. (For normal CPU profiling, it's
   782  	// usually the number of samples with the given stack.) Near syscalls, pp
   783  	// may be nil. Reporting goid of 0 is fine for either g0 or a nil gp.
   784  	var hdr [2]uint64
   785  	if pp != nil {
   786  		// Overflow records in profBuf have all header values set to zero. Make
   787  		// sure that real headers have at least one bit set.
   788  		hdr[0] = uint64(pp.id)<<1 | 0b1
   789  	} else {
   790  		hdr[0] = 0b10
   791  	}
   792  	if gp != nil {
   793  		hdr[1] = gp.goid
   794  	}
   795  
   796  	// Allow only one writer at a time
   797  	for !trace.signalLock.CompareAndSwap(0, 1) {
   798  		// TODO: Is it safe to osyield here? https://go.dev/issue/52672
   799  		osyield()
   800  	}
   801  
   802  	if log := (*profBuf)(atomic.Loadp(unsafe.Pointer(&trace.cpuLogWrite))); log != nil {
   803  		// Note: we don't pass a tag pointer here (how should profiling tags
   804  		// interact with the execution tracer?), but if we did we'd need to be
   805  		// careful about write barriers. See the long comment in profBuf.write.
   806  		log.write(nil, now, hdr[:], stk)
   807  	}
   808  
   809  	trace.signalLock.Store(0)
   810  }
   811  
   812  func traceReadCPU() {
   813  	bufp := &trace.cpuLogBuf
   814  
   815  	for {
   816  		data, tags, _ := trace.cpuLogRead.read(profBufNonBlocking)
   817  		if len(data) == 0 {
   818  			break
   819  		}
   820  		for len(data) > 0 {
   821  			if len(data) < 4 || data[0] > uint64(len(data)) {
   822  				break // truncated profile
   823  			}
   824  			if data[0] < 4 || tags != nil && len(tags) < 1 {
   825  				break // malformed profile
   826  			}
   827  			if len(tags) < 1 {
   828  				break // mismatched profile records and tags
   829  			}
   830  			timestamp := data[1]
   831  			ppid := data[2] >> 1
   832  			if hasP := (data[2] & 0b1) != 0; !hasP {
   833  				ppid = ^uint64(0)
   834  			}
   835  			goid := data[3]
   836  			stk := data[4:data[0]]
   837  			empty := len(stk) == 1 && data[2] == 0 && data[3] == 0
   838  			data = data[data[0]:]
   839  			// No support here for reporting goroutine tags at the moment; if
   840  			// that information is to be part of the execution trace, we'd
   841  			// probably want to see when the tags are applied and when they
   842  			// change, instead of only seeing them when we get a CPU sample.
   843  			tags = tags[1:]
   844  
   845  			if empty {
   846  				// Looks like an overflow record from the profBuf. Not much to
   847  				// do here, we only want to report full records.
   848  				//
   849  				// TODO: should we start a goroutine to drain the profBuf,
   850  				// rather than relying on a high-enough volume of tracing events
   851  				// to keep ReadTrace busy? https://go.dev/issue/52674
   852  				continue
   853  			}
   854  
   855  			buf := bufp.ptr()
   856  			if buf == nil {
   857  				systemstack(func() {
   858  					*bufp = traceFlush(*bufp, 0)
   859  				})
   860  				buf = bufp.ptr()
   861  			}
   862  			for i := range stk {
   863  				if i >= len(buf.stk) {
   864  					break
   865  				}
   866  				buf.stk[i] = uintptr(stk[i])
   867  			}
   868  			stackID := trace.stackTab.put(buf.stk[:len(stk)])
   869  
   870  			traceEventLocked(0, nil, 0, bufp, traceEvCPUSample, stackID, 1, timestamp/traceTickDiv, ppid, goid)
   871  		}
   872  	}
   873  }
   874  
   875  func traceStackID(mp *m, buf []uintptr, skip int) uint64 {
   876  	gp := getg()
   877  	curgp := mp.curg
   878  	var nstk int
   879  	if curgp == gp {
   880  		nstk = callers(skip+1, buf)
   881  	} else if curgp != nil {
   882  		nstk = gcallers(curgp, skip, buf)
   883  	}
   884  	if nstk > 0 {
   885  		nstk-- // skip runtime.goexit
   886  	}
   887  	if nstk > 0 && curgp.goid == 1 {
   888  		nstk-- // skip runtime.main
   889  	}
   890  	id := trace.stackTab.put(buf[:nstk])
   891  	return uint64(id)
   892  }
   893  
   894  // traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
   895  func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
   896  	// Any time we acquire a buffer, we may end up flushing it,
   897  	// but flushes are rare. Record the lock edge even if it
   898  	// doesn't happen this time.
   899  	lockRankMayTraceFlush()
   900  
   901  	mp = acquirem()
   902  	if p := mp.p.ptr(); p != nil {
   903  		return mp, p.id, &p.tracebuf
   904  	}
   905  	lock(&trace.bufLock)
   906  	return mp, traceGlobProc, &trace.buf
   907  }
   908  
   909  // traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer.
   910  func traceReleaseBuffer(pid int32) {
   911  	if pid == traceGlobProc {
   912  		unlock(&trace.bufLock)
   913  	}
   914  	releasem(getg().m)
   915  }
   916  
   917  // lockRankMayTraceFlush records the lock ranking effects of a
   918  // potential call to traceFlush.
   919  func lockRankMayTraceFlush() {
   920  	owner := trace.lockOwner
   921  	dolock := owner == nil || owner != getg().m.curg
   922  	if dolock {
   923  		lockWithRankMayAcquire(&trace.lock, getLockRank(&trace.lock))
   924  	}
   925  }
   926  
   927  // traceFlush puts buf onto stack of full buffers and returns an empty buffer.
   928  //
   929  // This must run on the system stack because it acquires trace.lock.
   930  //
   931  //go:systemstack
   932  func traceFlush(buf traceBufPtr, pid int32) traceBufPtr {
   933  	owner := trace.lockOwner
   934  	dolock := owner == nil || owner != getg().m.curg
   935  	if dolock {
   936  		lock(&trace.lock)
   937  	}
   938  	if buf != 0 {
   939  		traceFullQueue(buf)
   940  	}
   941  	if trace.empty != 0 {
   942  		buf = trace.empty
   943  		trace.empty = buf.ptr().link
   944  	} else {
   945  		buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
   946  		if buf == 0 {
   947  			throw("trace: out of memory")
   948  		}
   949  	}
   950  	bufp := buf.ptr()
   951  	bufp.link.set(nil)
   952  	bufp.pos = 0
   953  
   954  	// initialize the buffer for a new batch
   955  	ticks := uint64(cputicks()) / traceTickDiv
   956  	if ticks == bufp.lastTicks {
   957  		ticks = bufp.lastTicks + 1
   958  	}
   959  	bufp.lastTicks = ticks
   960  	bufp.byte(traceEvBatch | 1<<traceArgCountShift)
   961  	bufp.varint(uint64(pid))
   962  	bufp.varint(ticks)
   963  
   964  	if dolock {
   965  		unlock(&trace.lock)
   966  	}
   967  	return buf
   968  }
   969  
   970  // traceString adds a string to the trace.strings and returns the id.
   971  func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) {
   972  	if s == "" {
   973  		return 0, bufp
   974  	}
   975  
   976  	lock(&trace.stringsLock)
   977  	if raceenabled {
   978  		// raceacquire is necessary because the map access
   979  		// below is race annotated.
   980  		raceacquire(unsafe.Pointer(&trace.stringsLock))
   981  	}
   982  
   983  	if id, ok := trace.strings[s]; ok {
   984  		if raceenabled {
   985  			racerelease(unsafe.Pointer(&trace.stringsLock))
   986  		}
   987  		unlock(&trace.stringsLock)
   988  
   989  		return id, bufp
   990  	}
   991  
   992  	trace.stringSeq++
   993  	id := trace.stringSeq
   994  	trace.strings[s] = id
   995  
   996  	if raceenabled {
   997  		racerelease(unsafe.Pointer(&trace.stringsLock))
   998  	}
   999  	unlock(&trace.stringsLock)
  1000  
  1001  	// memory allocation in above may trigger tracing and
  1002  	// cause *bufp changes. Following code now works with *bufp,
  1003  	// so there must be no memory allocation or any activities
  1004  	// that causes tracing after this point.
  1005  
  1006  	buf := bufp.ptr()
  1007  	size := 1 + 2*traceBytesPerNumber + len(s)
  1008  	if buf == nil || len(buf.arr)-buf.pos < size {
  1009  		systemstack(func() {
  1010  			buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
  1011  			bufp.set(buf)
  1012  		})
  1013  	}
  1014  	buf.byte(traceEvString)
  1015  	buf.varint(id)
  1016  
  1017  	// double-check the string and the length can fit.
  1018  	// Otherwise, truncate the string.
  1019  	slen := len(s)
  1020  	if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
  1021  		slen = room
  1022  	}
  1023  
  1024  	buf.varint(uint64(slen))
  1025  	buf.pos += copy(buf.arr[buf.pos:], s[:slen])
  1026  
  1027  	bufp.set(buf)
  1028  	return id, bufp
  1029  }
  1030  
  1031  // varint appends v to buf in little-endian-base-128 encoding.
  1032  func (buf *traceBuf) varint(v uint64) {
  1033  	pos := buf.pos
  1034  	for ; v >= 0x80; v >>= 7 {
  1035  		buf.arr[pos] = 0x80 | byte(v)
  1036  		pos++
  1037  	}
  1038  	buf.arr[pos] = byte(v)
  1039  	pos++
  1040  	buf.pos = pos
  1041  }
  1042  
  1043  // varintAt writes varint v at byte position pos in buf. This always
  1044  // consumes traceBytesPerNumber bytes. This is intended for when the
  1045  // caller needs to reserve space for a varint but can't populate it
  1046  // until later.
  1047  func (buf *traceBuf) varintAt(pos int, v uint64) {
  1048  	for i := 0; i < traceBytesPerNumber; i++ {
  1049  		if i < traceBytesPerNumber-1 {
  1050  			buf.arr[pos] = 0x80 | byte(v)
  1051  		} else {
  1052  			buf.arr[pos] = byte(v)
  1053  		}
  1054  		v >>= 7
  1055  		pos++
  1056  	}
  1057  }
  1058  
  1059  // byte appends v to buf.
  1060  func (buf *traceBuf) byte(v byte) {
  1061  	buf.arr[buf.pos] = v
  1062  	buf.pos++
  1063  }
  1064  
  1065  // traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
  1066  // It is lock-free for reading.
  1067  type traceStackTable struct {
  1068  	lock mutex // Must be acquired on the system stack
  1069  	seq  uint32
  1070  	mem  traceAlloc
  1071  	tab  [1 << 13]traceStackPtr
  1072  }
  1073  
  1074  // traceStack is a single stack in traceStackTable.
  1075  type traceStack struct {
  1076  	link traceStackPtr
  1077  	hash uintptr
  1078  	id   uint32
  1079  	n    int
  1080  	stk  [0]uintptr // real type [n]uintptr
  1081  }
  1082  
  1083  type traceStackPtr uintptr
  1084  
  1085  func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
  1086  
  1087  // stack returns slice of PCs.
  1088  func (ts *traceStack) stack() []uintptr {
  1089  	return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
  1090  }
  1091  
  1092  // put returns a unique id for the stack trace pcs and caches it in the table,
  1093  // if it sees the trace for the first time.
  1094  func (tab *traceStackTable) put(pcs []uintptr) uint32 {
  1095  	if len(pcs) == 0 {
  1096  		return 0
  1097  	}
  1098  	hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
  1099  	// First, search the hashtable w/o the mutex.
  1100  	if id := tab.find(pcs, hash); id != 0 {
  1101  		return id
  1102  	}
  1103  	// Now, double check under the mutex.
  1104  	// Switch to the system stack so we can acquire tab.lock
  1105  	var id uint32
  1106  	systemstack(func() {
  1107  		lock(&tab.lock)
  1108  		if id = tab.find(pcs, hash); id != 0 {
  1109  			unlock(&tab.lock)
  1110  			return
  1111  		}
  1112  		// Create new record.
  1113  		tab.seq++
  1114  		stk := tab.newStack(len(pcs))
  1115  		stk.hash = hash
  1116  		stk.id = tab.seq
  1117  		id = stk.id
  1118  		stk.n = len(pcs)
  1119  		stkpc := stk.stack()
  1120  		for i, pc := range pcs {
  1121  			stkpc[i] = pc
  1122  		}
  1123  		part := int(hash % uintptr(len(tab.tab)))
  1124  		stk.link = tab.tab[part]
  1125  		atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
  1126  		unlock(&tab.lock)
  1127  	})
  1128  	return id
  1129  }
  1130  
  1131  // find checks if the stack trace pcs is already present in the table.
  1132  func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
  1133  	part := int(hash % uintptr(len(tab.tab)))
  1134  Search:
  1135  	for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
  1136  		if stk.hash == hash && stk.n == len(pcs) {
  1137  			for i, stkpc := range stk.stack() {
  1138  				if stkpc != pcs[i] {
  1139  					continue Search
  1140  				}
  1141  			}
  1142  			return stk.id
  1143  		}
  1144  	}
  1145  	return 0
  1146  }
  1147  
  1148  // newStack allocates a new stack of size n.
  1149  func (tab *traceStackTable) newStack(n int) *traceStack {
  1150  	return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*goarch.PtrSize))
  1151  }
  1152  
  1153  // traceFrames returns the frames corresponding to pcs. It may
  1154  // allocate and may emit trace events.
  1155  func traceFrames(bufp traceBufPtr, pcs []uintptr) ([]traceFrame, traceBufPtr) {
  1156  	frames := make([]traceFrame, 0, len(pcs))
  1157  	ci := CallersFrames(pcs)
  1158  	for {
  1159  		var frame traceFrame
  1160  		f, more := ci.Next()
  1161  		frame, bufp = traceFrameForPC(bufp, 0, f)
  1162  		frames = append(frames, frame)
  1163  		if !more {
  1164  			return frames, bufp
  1165  		}
  1166  	}
  1167  }
  1168  
  1169  // dump writes all previously cached stacks to trace buffers,
  1170  // releases all memory and resets state.
  1171  //
  1172  // This must run on the system stack because it calls traceFlush.
  1173  //
  1174  //go:systemstack
  1175  func (tab *traceStackTable) dump(bufp traceBufPtr) traceBufPtr {
  1176  	for i := range tab.tab {
  1177  		stk := tab.tab[i].ptr()
  1178  		for ; stk != nil; stk = stk.link.ptr() {
  1179  			var frames []traceFrame
  1180  			frames, bufp = traceFrames(bufp, stk.stack())
  1181  
  1182  			// Estimate the size of this record. This
  1183  			// bound is pretty loose, but avoids counting
  1184  			// lots of varint sizes.
  1185  			maxSize := 1 + traceBytesPerNumber + (2+4*len(frames))*traceBytesPerNumber
  1186  			// Make sure we have enough buffer space.
  1187  			if buf := bufp.ptr(); len(buf.arr)-buf.pos < maxSize {
  1188  				bufp = traceFlush(bufp, 0)
  1189  			}
  1190  
  1191  			// Emit header, with space reserved for length.
  1192  			buf := bufp.ptr()
  1193  			buf.byte(traceEvStack | 3<<traceArgCountShift)
  1194  			lenPos := buf.pos
  1195  			buf.pos += traceBytesPerNumber
  1196  
  1197  			// Emit body.
  1198  			recPos := buf.pos
  1199  			buf.varint(uint64(stk.id))
  1200  			buf.varint(uint64(len(frames)))
  1201  			for _, frame := range frames {
  1202  				buf.varint(uint64(frame.PC))
  1203  				buf.varint(frame.funcID)
  1204  				buf.varint(frame.fileID)
  1205  				buf.varint(frame.line)
  1206  			}
  1207  
  1208  			// Fill in size header.
  1209  			buf.varintAt(lenPos, uint64(buf.pos-recPos))
  1210  		}
  1211  	}
  1212  
  1213  	tab.mem.drop()
  1214  	*tab = traceStackTable{}
  1215  	lockInit(&((*tab).lock), lockRankTraceStackTab)
  1216  
  1217  	return bufp
  1218  }
  1219  
  1220  type traceFrame struct {
  1221  	PC     uintptr
  1222  	funcID uint64
  1223  	fileID uint64
  1224  	line   uint64
  1225  }
  1226  
  1227  // traceFrameForPC records the frame information.
  1228  // It may allocate memory.
  1229  func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr) {
  1230  	bufp := &buf
  1231  	var frame traceFrame
  1232  	frame.PC = f.PC
  1233  
  1234  	fn := f.Function
  1235  	const maxLen = 1 << 10
  1236  	if len(fn) > maxLen {
  1237  		fn = fn[len(fn)-maxLen:]
  1238  	}
  1239  	frame.funcID, bufp = traceString(bufp, pid, fn)
  1240  	frame.line = uint64(f.Line)
  1241  	file := f.File
  1242  	if len(file) > maxLen {
  1243  		file = file[len(file)-maxLen:]
  1244  	}
  1245  	frame.fileID, bufp = traceString(bufp, pid, file)
  1246  	return frame, (*bufp)
  1247  }
  1248  
  1249  // traceAlloc is a non-thread-safe region allocator.
  1250  // It holds a linked list of traceAllocBlock.
  1251  type traceAlloc struct {
  1252  	head traceAllocBlockPtr
  1253  	off  uintptr
  1254  }
  1255  
  1256  // traceAllocBlock is a block in traceAlloc.
  1257  //
  1258  // traceAllocBlock is allocated from non-GC'd memory, so it must not
  1259  // contain heap pointers. Writes to pointers to traceAllocBlocks do
  1260  // not need write barriers.
  1261  type traceAllocBlock struct {
  1262  	_    sys.NotInHeap
  1263  	next traceAllocBlockPtr
  1264  	data [64<<10 - goarch.PtrSize]byte
  1265  }
  1266  
  1267  // TODO: Since traceAllocBlock is now embedded runtime/internal/sys.NotInHeap, this isn't necessary.
  1268  type traceAllocBlockPtr uintptr
  1269  
  1270  func (p traceAllocBlockPtr) ptr() *traceAllocBlock   { return (*traceAllocBlock)(unsafe.Pointer(p)) }
  1271  func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
  1272  
  1273  // alloc allocates n-byte block.
  1274  func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
  1275  	n = alignUp(n, goarch.PtrSize)
  1276  	if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
  1277  		if n > uintptr(len(a.head.ptr().data)) {
  1278  			throw("trace: alloc too large")
  1279  		}
  1280  		block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
  1281  		if block == nil {
  1282  			throw("trace: out of memory")
  1283  		}
  1284  		block.next.set(a.head.ptr())
  1285  		a.head.set(block)
  1286  		a.off = 0
  1287  	}
  1288  	p := &a.head.ptr().data[a.off]
  1289  	a.off += n
  1290  	return unsafe.Pointer(p)
  1291  }
  1292  
  1293  // drop frees all previously allocated memory and resets the allocator.
  1294  func (a *traceAlloc) drop() {
  1295  	for a.head != 0 {
  1296  		block := a.head.ptr()
  1297  		a.head.set(block.next.ptr())
  1298  		sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
  1299  	}
  1300  }
  1301  
  1302  // The following functions write specific events to trace.
  1303  
  1304  func traceGomaxprocs(procs int32) {
  1305  	traceEvent(traceEvGomaxprocs, 1, uint64(procs))
  1306  }
  1307  
  1308  func traceProcStart() {
  1309  	traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
  1310  }
  1311  
  1312  func traceProcStop(pp *p) {
  1313  	// Sysmon and stopTheWorld can stop Ps blocked in syscalls,
  1314  	// to handle this we temporary employ the P.
  1315  	mp := acquirem()
  1316  	oldp := mp.p
  1317  	mp.p.set(pp)
  1318  	traceEvent(traceEvProcStop, -1)
  1319  	mp.p = oldp
  1320  	releasem(mp)
  1321  }
  1322  
  1323  func traceGCStart() {
  1324  	traceEvent(traceEvGCStart, 3, trace.seqGC)
  1325  	trace.seqGC++
  1326  }
  1327  
  1328  func traceGCDone() {
  1329  	traceEvent(traceEvGCDone, -1)
  1330  }
  1331  
  1332  func traceGCSTWStart(kind int) {
  1333  	traceEvent(traceEvGCSTWStart, -1, uint64(kind))
  1334  }
  1335  
  1336  func traceGCSTWDone() {
  1337  	traceEvent(traceEvGCSTWDone, -1)
  1338  }
  1339  
  1340  // traceGCSweepStart prepares to trace a sweep loop. This does not
  1341  // emit any events until traceGCSweepSpan is called.
  1342  //
  1343  // traceGCSweepStart must be paired with traceGCSweepDone and there
  1344  // must be no preemption points between these two calls.
  1345  func traceGCSweepStart() {
  1346  	// Delay the actual GCSweepStart event until the first span
  1347  	// sweep. If we don't sweep anything, don't emit any events.
  1348  	pp := getg().m.p.ptr()
  1349  	if pp.traceSweep {
  1350  		throw("double traceGCSweepStart")
  1351  	}
  1352  	pp.traceSweep, pp.traceSwept, pp.traceReclaimed = true, 0, 0
  1353  }
  1354  
  1355  // traceGCSweepSpan traces the sweep of a single page.
  1356  //
  1357  // This may be called outside a traceGCSweepStart/traceGCSweepDone
  1358  // pair; however, it will not emit any trace events in this case.
  1359  func traceGCSweepSpan(bytesSwept uintptr) {
  1360  	pp := getg().m.p.ptr()
  1361  	if pp.traceSweep {
  1362  		if pp.traceSwept == 0 {
  1363  			traceEvent(traceEvGCSweepStart, 1)
  1364  		}
  1365  		pp.traceSwept += bytesSwept
  1366  	}
  1367  }
  1368  
  1369  func traceGCSweepDone() {
  1370  	pp := getg().m.p.ptr()
  1371  	if !pp.traceSweep {
  1372  		throw("missing traceGCSweepStart")
  1373  	}
  1374  	if pp.traceSwept != 0 {
  1375  		traceEvent(traceEvGCSweepDone, -1, uint64(pp.traceSwept), uint64(pp.traceReclaimed))
  1376  	}
  1377  	pp.traceSweep = false
  1378  }
  1379  
  1380  func traceGCMarkAssistStart() {
  1381  	traceEvent(traceEvGCMarkAssistStart, 1)
  1382  }
  1383  
  1384  func traceGCMarkAssistDone() {
  1385  	traceEvent(traceEvGCMarkAssistDone, -1)
  1386  }
  1387  
  1388  func traceGoCreate(newg *g, pc uintptr) {
  1389  	newg.traceseq = 0
  1390  	newg.tracelastp = getg().m.p
  1391  	// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
  1392  	id := trace.stackTab.put([]uintptr{startPCforTrace(pc) + sys.PCQuantum})
  1393  	traceEvent(traceEvGoCreate, 2, newg.goid, uint64(id))
  1394  }
  1395  
  1396  func traceGoStart() {
  1397  	gp := getg().m.curg
  1398  	pp := gp.m.p
  1399  	gp.traceseq++
  1400  	if pp.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
  1401  		traceEvent(traceEvGoStartLabel, -1, gp.goid, gp.traceseq, trace.markWorkerLabels[pp.ptr().gcMarkWorkerMode])
  1402  	} else if gp.tracelastp == pp {
  1403  		traceEvent(traceEvGoStartLocal, -1, gp.goid)
  1404  	} else {
  1405  		gp.tracelastp = pp
  1406  		traceEvent(traceEvGoStart, -1, gp.goid, gp.traceseq)
  1407  	}
  1408  }
  1409  
  1410  func traceGoEnd() {
  1411  	traceEvent(traceEvGoEnd, -1)
  1412  }
  1413  
  1414  func traceGoSched() {
  1415  	gp := getg()
  1416  	gp.tracelastp = gp.m.p
  1417  	traceEvent(traceEvGoSched, 1)
  1418  }
  1419  
  1420  func traceGoPreempt() {
  1421  	gp := getg()
  1422  	gp.tracelastp = gp.m.p
  1423  	traceEvent(traceEvGoPreempt, 1)
  1424  }
  1425  
  1426  func traceGoPark(traceEv byte, skip int) {
  1427  	if traceEv&traceFutileWakeup != 0 {
  1428  		traceEvent(traceEvFutileWakeup, -1)
  1429  	}
  1430  	traceEvent(traceEv & ^traceFutileWakeup, skip)
  1431  }
  1432  
  1433  func traceGoUnpark(gp *g, skip int) {
  1434  	pp := getg().m.p
  1435  	gp.traceseq++
  1436  	if gp.tracelastp == pp {
  1437  		traceEvent(traceEvGoUnblockLocal, skip, gp.goid)
  1438  	} else {
  1439  		gp.tracelastp = pp
  1440  		traceEvent(traceEvGoUnblock, skip, gp.goid, gp.traceseq)
  1441  	}
  1442  }
  1443  
  1444  func traceGoSysCall() {
  1445  	traceEvent(traceEvGoSysCall, 1)
  1446  }
  1447  
  1448  func traceGoSysExit(ts int64) {
  1449  	if ts != 0 && ts < trace.ticksStart {
  1450  		// There is a race between the code that initializes sysexitticks
  1451  		// (in exitsyscall, which runs without a P, and therefore is not
  1452  		// stopped with the rest of the world) and the code that initializes
  1453  		// a new trace. The recorded sysexitticks must therefore be treated
  1454  		// as "best effort". If they are valid for this trace, then great,
  1455  		// use them for greater accuracy. But if they're not valid for this
  1456  		// trace, assume that the trace was started after the actual syscall
  1457  		// exit (but before we actually managed to start the goroutine,
  1458  		// aka right now), and assign a fresh time stamp to keep the log consistent.
  1459  		ts = 0
  1460  	}
  1461  	gp := getg().m.curg
  1462  	gp.traceseq++
  1463  	gp.tracelastp = gp.m.p
  1464  	traceEvent(traceEvGoSysExit, -1, gp.goid, gp.traceseq, uint64(ts)/traceTickDiv)
  1465  }
  1466  
  1467  func traceGoSysBlock(pp *p) {
  1468  	// Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked,
  1469  	// to handle this we temporary employ the P.
  1470  	mp := acquirem()
  1471  	oldp := mp.p
  1472  	mp.p.set(pp)
  1473  	traceEvent(traceEvGoSysBlock, -1)
  1474  	mp.p = oldp
  1475  	releasem(mp)
  1476  }
  1477  
  1478  func traceHeapAlloc(live uint64) {
  1479  	traceEvent(traceEvHeapAlloc, -1, live)
  1480  }
  1481  
  1482  func traceHeapGoal() {
  1483  	heapGoal := gcController.heapGoal()
  1484  	if heapGoal == ^uint64(0) {
  1485  		// Heap-based triggering is disabled.
  1486  		traceEvent(traceEvHeapGoal, -1, 0)
  1487  	} else {
  1488  		traceEvent(traceEvHeapGoal, -1, heapGoal)
  1489  	}
  1490  }
  1491  
  1492  // To access runtime functions from runtime/trace.
  1493  // See runtime/trace/annotation.go
  1494  
  1495  //go:linkname trace_userTaskCreate runtime/trace.userTaskCreate
  1496  func trace_userTaskCreate(id, parentID uint64, taskType string) {
  1497  	if !trace.enabled {
  1498  		return
  1499  	}
  1500  
  1501  	// Same as in traceEvent.
  1502  	mp, pid, bufp := traceAcquireBuffer()
  1503  	if !trace.enabled && !mp.startingtrace {
  1504  		traceReleaseBuffer(pid)
  1505  		return
  1506  	}
  1507  
  1508  	typeStringID, bufp := traceString(bufp, pid, taskType)
  1509  	traceEventLocked(0, mp, pid, bufp, traceEvUserTaskCreate, 0, 3, id, parentID, typeStringID)
  1510  	traceReleaseBuffer(pid)
  1511  }
  1512  
  1513  //go:linkname trace_userTaskEnd runtime/trace.userTaskEnd
  1514  func trace_userTaskEnd(id uint64) {
  1515  	traceEvent(traceEvUserTaskEnd, 2, id)
  1516  }
  1517  
  1518  //go:linkname trace_userRegion runtime/trace.userRegion
  1519  func trace_userRegion(id, mode uint64, name string) {
  1520  	if !trace.enabled {
  1521  		return
  1522  	}
  1523  
  1524  	mp, pid, bufp := traceAcquireBuffer()
  1525  	if !trace.enabled && !mp.startingtrace {
  1526  		traceReleaseBuffer(pid)
  1527  		return
  1528  	}
  1529  
  1530  	nameStringID, bufp := traceString(bufp, pid, name)
  1531  	traceEventLocked(0, mp, pid, bufp, traceEvUserRegion, 0, 3, id, mode, nameStringID)
  1532  	traceReleaseBuffer(pid)
  1533  }
  1534  
  1535  //go:linkname trace_userLog runtime/trace.userLog
  1536  func trace_userLog(id uint64, category, message string) {
  1537  	if !trace.enabled {
  1538  		return
  1539  	}
  1540  
  1541  	mp, pid, bufp := traceAcquireBuffer()
  1542  	if !trace.enabled && !mp.startingtrace {
  1543  		traceReleaseBuffer(pid)
  1544  		return
  1545  	}
  1546  
  1547  	categoryID, bufp := traceString(bufp, pid, category)
  1548  
  1549  	extraSpace := traceBytesPerNumber + len(message) // extraSpace for the value string
  1550  	traceEventLocked(extraSpace, mp, pid, bufp, traceEvUserLog, 0, 3, id, categoryID)
  1551  	// traceEventLocked reserved extra space for val and len(val)
  1552  	// in buf, so buf now has room for the following.
  1553  	buf := bufp.ptr()
  1554  
  1555  	// double-check the message and its length can fit.
  1556  	// Otherwise, truncate the message.
  1557  	slen := len(message)
  1558  	if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
  1559  		slen = room
  1560  	}
  1561  	buf.varint(uint64(slen))
  1562  	buf.pos += copy(buf.arr[buf.pos:], message[:slen])
  1563  
  1564  	traceReleaseBuffer(pid)
  1565  }
  1566  
  1567  // the start PC of a goroutine for tracing purposes. If pc is a wrapper,
  1568  // it returns the PC of the wrapped function. Otherwise it returns pc.
  1569  func startPCforTrace(pc uintptr) uintptr {
  1570  	f := findfunc(pc)
  1571  	if !f.valid() {
  1572  		return pc // may happen for locked g in extra M since its pc is 0.
  1573  	}
  1574  	w := funcdata(f, _FUNCDATA_WrapInfo)
  1575  	if w == nil {
  1576  		return pc // not a wrapper
  1577  	}
  1578  	return f.datap.textAddr(*(*uint32)(w))
  1579  }
  1580  

View as plain text