Source file src/runtime/profbuf.go

     1  // Copyright 2017 The Go Authors.  All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"unsafe"
    10  )
    11  
    12  // A profBuf is a lock-free buffer for profiling events,
    13  // safe for concurrent use by one reader and one writer.
    14  // The writer may be a signal handler running without a user g.
    15  // The reader is assumed to be a user g.
    16  //
    17  // Each logged event corresponds to a fixed size header, a list of
    18  // uintptrs (typically a stack), and exactly one unsafe.Pointer tag.
    19  // The header and uintptrs are stored in the circular buffer data and the
    20  // tag is stored in a circular buffer tags, running in parallel.
    21  // In the circular buffer data, each event takes 2+hdrsize+len(stk)
    22  // words: the value 2+hdrsize+len(stk), then the time of the event, then
    23  // hdrsize words giving the fixed-size header, and then len(stk) words
    24  // for the stack.
    25  //
    26  // The current effective offsets into the tags and data circular buffers
    27  // for reading and writing are stored in the high 30 and low 32 bits of r and w.
    28  // The bottom bits of the high 32 are additional flag bits in w, unused in r.
    29  // "Effective" offsets means the total number of reads or writes, mod 2^length.
    30  // The offset in the buffer is the effective offset mod the length of the buffer.
    31  // To make wraparound mod 2^length match wraparound mod length of the buffer,
    32  // the length of the buffer must be a power of two.
    33  //
    34  // If the reader catches up to the writer, a flag passed to read controls
    35  // whether the read blocks until more data is available. A read returns a
    36  // pointer to the buffer data itself; the caller is assumed to be done with
    37  // that data at the next read. The read offset rNext tracks the next offset to
    38  // be returned by read. By definition, r ≤ rNext ≤ w (before wraparound),
    39  // and rNext is only used by the reader, so it can be accessed without atomics.
    40  //
    41  // If the writer gets ahead of the reader, so that the buffer fills,
    42  // future writes are discarded and replaced in the output stream by an
    43  // overflow entry, which has size 2+hdrsize+1, time set to the time of
    44  // the first discarded write, a header of all zeroed words, and a "stack"
    45  // containing one word, the number of discarded writes.
    46  //
    47  // Between the time the buffer fills and the buffer becomes empty enough
    48  // to hold more data, the overflow entry is stored as a pending overflow
    49  // entry in the fields overflow and overflowTime. The pending overflow
    50  // entry can be turned into a real record by either the writer or the
    51  // reader. If the writer is called to write a new record and finds that
    52  // the output buffer has room for both the pending overflow entry and the
    53  // new record, the writer emits the pending overflow entry and the new
    54  // record into the buffer. If the reader is called to read data and finds
    55  // that the output buffer is empty but that there is a pending overflow
    56  // entry, the reader will return a synthesized record for the pending
    57  // overflow entry.
    58  //
    59  // Only the writer can create or add to a pending overflow entry, but
    60  // either the reader or the writer can clear the pending overflow entry.
    61  // A pending overflow entry is indicated by the low 32 bits of 'overflow'
    62  // holding the number of discarded writes, and overflowTime holding the
    63  // time of the first discarded write. The high 32 bits of 'overflow'
    64  // increment each time the low 32 bits transition from zero to non-zero
    65  // or vice versa. This sequence number avoids ABA problems in the use of
    66  // compare-and-swap to coordinate between reader and writer.
    67  // The overflowTime is only written when the low 32 bits of overflow are
    68  // zero, that is, only when there is no pending overflow entry, in
    69  // preparation for creating a new one. The reader can therefore fetch and
    70  // clear the entry atomically using
    71  //
    72  //	for {
    73  //		overflow = load(&b.overflow)
    74  //		if uint32(overflow) == 0 {
    75  //			// no pending entry
    76  //			break
    77  //		}
    78  //		time = load(&b.overflowTime)
    79  //		if cas(&b.overflow, overflow, ((overflow>>32)+1)<<32) {
    80  //			// pending entry cleared
    81  //			break
    82  //		}
    83  //	}
    84  //	if uint32(overflow) > 0 {
    85  //		emit entry for uint32(overflow), time
    86  //	}
    87  type profBuf struct {
    88  	// accessed atomically
    89  	r, w         profAtomic
    90  	overflow     uint64
    91  	overflowTime uint64
    92  	eof          uint32
    93  
    94  	// immutable (excluding slice content)
    95  	hdrsize uintptr
    96  	data    []uint64
    97  	tags    []unsafe.Pointer
    98  
    99  	// owned by reader
   100  	rNext       profIndex
   101  	overflowBuf []uint64 // for use by reader to return overflow record
   102  	wait        note
   103  }
   104  
   105  // A profAtomic is the atomically-accessed word holding a profIndex.
   106  type profAtomic uint64
   107  
   108  // A profIndex is the packet tag and data counts and flags bits, described above.
   109  type profIndex uint64
   110  
   111  const (
   112  	profReaderSleeping profIndex = 1 << 32 // reader is sleeping and must be woken up
   113  	profWriteExtra     profIndex = 1 << 33 // overflow or eof waiting
   114  )
   115  
   116  func (x *profAtomic) load() profIndex {
   117  	return profIndex(atomic.Load64((*uint64)(x)))
   118  }
   119  
   120  func (x *profAtomic) store(new profIndex) {
   121  	atomic.Store64((*uint64)(x), uint64(new))
   122  }
   123  
   124  func (x *profAtomic) cas(old, new profIndex) bool {
   125  	return atomic.Cas64((*uint64)(x), uint64(old), uint64(new))
   126  }
   127  
   128  func (x profIndex) dataCount() uint32 {
   129  	return uint32(x)
   130  }
   131  
   132  func (x profIndex) tagCount() uint32 {
   133  	return uint32(x >> 34)
   134  }
   135  
   136  // countSub subtracts two counts obtained from profIndex.dataCount or profIndex.tagCount,
   137  // assuming that they are no more than 2^29 apart (guaranteed since they are never more than
   138  // len(data) or len(tags) apart, respectively).
   139  // tagCount wraps at 2^30, while dataCount wraps at 2^32.
   140  // This function works for both.
   141  func countSub(x, y uint32) int {
   142  	// x-y is 32-bit signed or 30-bit signed; sign-extend to 32 bits and convert to int.
   143  	return int(int32(x-y) << 2 >> 2)
   144  }
   145  
   146  // addCountsAndClearFlags returns the packed form of "x + (data, tag) - all flags".
   147  func (x profIndex) addCountsAndClearFlags(data, tag int) profIndex {
   148  	return profIndex((uint64(x)>>34+uint64(uint32(tag)<<2>>2))<<34 | uint64(uint32(x)+uint32(data)))
   149  }
   150  
   151  // hasOverflow reports whether b has any overflow records pending.
   152  func (b *profBuf) hasOverflow() bool {
   153  	return uint32(atomic.Load64(&b.overflow)) > 0
   154  }
   155  
   156  // takeOverflow consumes the pending overflow records, returning the overflow count
   157  // and the time of the first overflow.
   158  // When called by the reader, it is racing against incrementOverflow.
   159  func (b *profBuf) takeOverflow() (count uint32, time uint64) {
   160  	overflow := atomic.Load64(&b.overflow)
   161  	time = atomic.Load64(&b.overflowTime)
   162  	for {
   163  		count = uint32(overflow)
   164  		if count == 0 {
   165  			time = 0
   166  			break
   167  		}
   168  		// Increment generation, clear overflow count in low bits.
   169  		if atomic.Cas64(&b.overflow, overflow, ((overflow>>32)+1)<<32) {
   170  			break
   171  		}
   172  		overflow = atomic.Load64(&b.overflow)
   173  		time = atomic.Load64(&b.overflowTime)
   174  	}
   175  	return uint32(overflow), time
   176  }
   177  
   178  // incrementOverflow records a single overflow at time now.
   179  // It is racing against a possible takeOverflow in the reader.
   180  func (b *profBuf) incrementOverflow(now int64) {
   181  	for {
   182  		overflow := atomic.Load64(&b.overflow)
   183  
   184  		// Once we see b.overflow reach 0, it's stable: no one else is changing it underfoot.
   185  		// We need to set overflowTime if we're incrementing b.overflow from 0.
   186  		if uint32(overflow) == 0 {
   187  			// Store overflowTime first so it's always available when overflow != 0.
   188  			atomic.Store64(&b.overflowTime, uint64(now))
   189  			atomic.Store64(&b.overflow, (((overflow>>32)+1)<<32)+1)
   190  			break
   191  		}
   192  		// Otherwise we're racing to increment against reader
   193  		// who wants to set b.overflow to 0.
   194  		// Out of paranoia, leave 2³²-1 a sticky overflow value,
   195  		// to avoid wrapping around. Extremely unlikely.
   196  		if int32(overflow) == -1 {
   197  			break
   198  		}
   199  		if atomic.Cas64(&b.overflow, overflow, overflow+1) {
   200  			break
   201  		}
   202  	}
   203  }
   204  
   205  // newProfBuf returns a new profiling buffer with room for
   206  // a header of hdrsize words and a buffer of at least bufwords words.
   207  func newProfBuf(hdrsize, bufwords, tags int) *profBuf {
   208  	if min := 2 + hdrsize + 1; bufwords < min {
   209  		bufwords = min
   210  	}
   211  
   212  	// Buffer sizes must be power of two, so that we don't have to
   213  	// worry about uint32 wraparound changing the effective position
   214  	// within the buffers. We store 30 bits of count; limiting to 28
   215  	// gives us some room for intermediate calculations.
   216  	if bufwords >= 1<<28 || tags >= 1<<28 {
   217  		throw("newProfBuf: buffer too large")
   218  	}
   219  	var i int
   220  	for i = 1; i < bufwords; i <<= 1 {
   221  	}
   222  	bufwords = i
   223  	for i = 1; i < tags; i <<= 1 {
   224  	}
   225  	tags = i
   226  
   227  	b := new(profBuf)
   228  	b.hdrsize = uintptr(hdrsize)
   229  	b.data = make([]uint64, bufwords)
   230  	b.tags = make([]unsafe.Pointer, tags)
   231  	b.overflowBuf = make([]uint64, 2+b.hdrsize+1)
   232  	return b
   233  }
   234  
   235  // canWriteRecord reports whether the buffer has room
   236  // for a single contiguous record with a stack of length nstk.
   237  func (b *profBuf) canWriteRecord(nstk int) bool {
   238  	br := b.r.load()
   239  	bw := b.w.load()
   240  
   241  	// room for tag?
   242  	if countSub(br.tagCount(), bw.tagCount())+len(b.tags) < 1 {
   243  		return false
   244  	}
   245  
   246  	// room for data?
   247  	nd := countSub(br.dataCount(), bw.dataCount()) + len(b.data)
   248  	want := 2 + int(b.hdrsize) + nstk
   249  	i := int(bw.dataCount() % uint32(len(b.data)))
   250  	if i+want > len(b.data) {
   251  		// Can't fit in trailing fragment of slice.
   252  		// Skip over that and start over at beginning of slice.
   253  		nd -= len(b.data) - i
   254  	}
   255  	return nd >= want
   256  }
   257  
   258  // canWriteTwoRecords reports whether the buffer has room
   259  // for two records with stack lengths nstk1, nstk2, in that order.
   260  // Each record must be contiguous on its own, but the two
   261  // records need not be contiguous (one can be at the end of the buffer
   262  // and the other can wrap around and start at the beginning of the buffer).
   263  func (b *profBuf) canWriteTwoRecords(nstk1, nstk2 int) bool {
   264  	br := b.r.load()
   265  	bw := b.w.load()
   266  
   267  	// room for tag?
   268  	if countSub(br.tagCount(), bw.tagCount())+len(b.tags) < 2 {
   269  		return false
   270  	}
   271  
   272  	// room for data?
   273  	nd := countSub(br.dataCount(), bw.dataCount()) + len(b.data)
   274  
   275  	// first record
   276  	want := 2 + int(b.hdrsize) + nstk1
   277  	i := int(bw.dataCount() % uint32(len(b.data)))
   278  	if i+want > len(b.data) {
   279  		// Can't fit in trailing fragment of slice.
   280  		// Skip over that and start over at beginning of slice.
   281  		nd -= len(b.data) - i
   282  		i = 0
   283  	}
   284  	i += want
   285  	nd -= want
   286  
   287  	// second record
   288  	want = 2 + int(b.hdrsize) + nstk2
   289  	if i+want > len(b.data) {
   290  		// Can't fit in trailing fragment of slice.
   291  		// Skip over that and start over at beginning of slice.
   292  		nd -= len(b.data) - i
   293  		i = 0
   294  	}
   295  	return nd >= want
   296  }
   297  
   298  // write writes an entry to the profiling buffer b.
   299  // The entry begins with a fixed hdr, which must have
   300  // length b.hdrsize, followed by a variable-sized stack
   301  // and a single tag pointer *tagPtr (or nil if tagPtr is nil).
   302  // No write barriers allowed because this might be called from a signal handler.
   303  func (b *profBuf) write(tagPtr *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
   304  	if b == nil {
   305  		return
   306  	}
   307  	if len(hdr) > int(b.hdrsize) {
   308  		throw("misuse of profBuf.write")
   309  	}
   310  
   311  	if hasOverflow := b.hasOverflow(); hasOverflow && b.canWriteTwoRecords(1, len(stk)) {
   312  		// Room for both an overflow record and the one being written.
   313  		// Write the overflow record if the reader hasn't gotten to it yet.
   314  		// Only racing against reader, not other writers.
   315  		count, time := b.takeOverflow()
   316  		if count > 0 {
   317  			var stk [1]uintptr
   318  			stk[0] = uintptr(count)
   319  			b.write(nil, int64(time), nil, stk[:])
   320  		}
   321  	} else if hasOverflow || !b.canWriteRecord(len(stk)) {
   322  		// Pending overflow without room to write overflow and new records
   323  		// or no overflow but also no room for new record.
   324  		b.incrementOverflow(now)
   325  		b.wakeupExtra()
   326  		return
   327  	}
   328  
   329  	// There's room: write the record.
   330  	br := b.r.load()
   331  	bw := b.w.load()
   332  
   333  	// Profiling tag
   334  	//
   335  	// The tag is a pointer, but we can't run a write barrier here.
   336  	// We have interrupted the OS-level execution of gp, but the
   337  	// runtime still sees gp as executing. In effect, we are running
   338  	// in place of the real gp. Since gp is the only goroutine that
   339  	// can overwrite gp.labels, the value of gp.labels is stable during
   340  	// this signal handler: it will still be reachable from gp when
   341  	// we finish executing. If a GC is in progress right now, it must
   342  	// keep gp.labels alive, because gp.labels is reachable from gp.
   343  	// If gp were to overwrite gp.labels, the deletion barrier would
   344  	// still shade that pointer, which would preserve it for the
   345  	// in-progress GC, so all is well. Any future GC will see the
   346  	// value we copied when scanning b.tags (heap-allocated).
   347  	// We arrange that the store here is always overwriting a nil,
   348  	// so there is no need for a deletion barrier on b.tags[wt].
   349  	wt := int(bw.tagCount() % uint32(len(b.tags)))
   350  	if tagPtr != nil {
   351  		*(*uintptr)(unsafe.Pointer(&b.tags[wt])) = uintptr(unsafe.Pointer(*tagPtr))
   352  	}
   353  
   354  	// Main record.
   355  	// It has to fit in a contiguous section of the slice, so if it doesn't fit at the end,
   356  	// leave a rewind marker (0) and start over at the beginning of the slice.
   357  	wd := int(bw.dataCount() % uint32(len(b.data)))
   358  	nd := countSub(br.dataCount(), bw.dataCount()) + len(b.data)
   359  	skip := 0
   360  	if wd+2+int(b.hdrsize)+len(stk) > len(b.data) {
   361  		b.data[wd] = 0
   362  		skip = len(b.data) - wd
   363  		nd -= skip
   364  		wd = 0
   365  	}
   366  	data := b.data[wd:]
   367  	data[0] = uint64(2 + b.hdrsize + uintptr(len(stk))) // length
   368  	data[1] = uint64(now)                               // time stamp
   369  	// header, zero-padded
   370  	i := uintptr(copy(data[2:2+b.hdrsize], hdr))
   371  	for ; i < b.hdrsize; i++ {
   372  		data[2+i] = 0
   373  	}
   374  	for i, pc := range stk {
   375  		data[2+b.hdrsize+uintptr(i)] = uint64(pc)
   376  	}
   377  
   378  	for {
   379  		// Commit write.
   380  		// Racing with reader setting flag bits in b.w, to avoid lost wakeups.
   381  		old := b.w.load()
   382  		new := old.addCountsAndClearFlags(skip+2+len(stk)+int(b.hdrsize), 1)
   383  		if !b.w.cas(old, new) {
   384  			continue
   385  		}
   386  		// If there was a reader, wake it up.
   387  		if old&profReaderSleeping != 0 {
   388  			notewakeup(&b.wait)
   389  		}
   390  		break
   391  	}
   392  }
   393  
   394  // close signals that there will be no more writes on the buffer.
   395  // Once all the data has been read from the buffer, reads will return eof=true.
   396  func (b *profBuf) close() {
   397  	if atomic.Load(&b.eof) > 0 {
   398  		throw("runtime: profBuf already closed")
   399  	}
   400  	atomic.Store(&b.eof, 1)
   401  	b.wakeupExtra()
   402  }
   403  
   404  // wakeupExtra must be called after setting one of the "extra"
   405  // atomic fields b.overflow or b.eof.
   406  // It records the change in b.w and wakes up the reader if needed.
   407  func (b *profBuf) wakeupExtra() {
   408  	for {
   409  		old := b.w.load()
   410  		new := old | profWriteExtra
   411  		if !b.w.cas(old, new) {
   412  			continue
   413  		}
   414  		if old&profReaderSleeping != 0 {
   415  			notewakeup(&b.wait)
   416  		}
   417  		break
   418  	}
   419  }
   420  
   421  // profBufReadMode specifies whether to block when no data is available to read.
   422  type profBufReadMode int
   423  
   424  const (
   425  	profBufBlocking profBufReadMode = iota
   426  	profBufNonBlocking
   427  )
   428  
   429  var overflowTag [1]unsafe.Pointer // always nil
   430  
   431  func (b *profBuf) read(mode profBufReadMode) (data []uint64, tags []unsafe.Pointer, eof bool) {
   432  	if b == nil {
   433  		return nil, nil, true
   434  	}
   435  
   436  	br := b.rNext
   437  
   438  	// Commit previous read, returning that part of the ring to the writer.
   439  	// First clear tags that have now been read, both to avoid holding
   440  	// up the memory they point at for longer than necessary
   441  	// and so that b.write can assume it is always overwriting
   442  	// nil tag entries (see comment in b.write).
   443  	rPrev := b.r.load()
   444  	if rPrev != br {
   445  		ntag := countSub(br.tagCount(), rPrev.tagCount())
   446  		ti := int(rPrev.tagCount() % uint32(len(b.tags)))
   447  		for i := 0; i < ntag; i++ {
   448  			b.tags[ti] = nil
   449  			if ti++; ti == len(b.tags) {
   450  				ti = 0
   451  			}
   452  		}
   453  		b.r.store(br)
   454  	}
   455  
   456  Read:
   457  	bw := b.w.load()
   458  	numData := countSub(bw.dataCount(), br.dataCount())
   459  	if numData == 0 {
   460  		if b.hasOverflow() {
   461  			// No data to read, but there is overflow to report.
   462  			// Racing with writer flushing b.overflow into a real record.
   463  			count, time := b.takeOverflow()
   464  			if count == 0 {
   465  				// Lost the race, go around again.
   466  				goto Read
   467  			}
   468  			// Won the race, report overflow.
   469  			dst := b.overflowBuf
   470  			dst[0] = uint64(2 + b.hdrsize + 1)
   471  			dst[1] = uint64(time)
   472  			for i := uintptr(0); i < b.hdrsize; i++ {
   473  				dst[2+i] = 0
   474  			}
   475  			dst[2+b.hdrsize] = uint64(count)
   476  			return dst[:2+b.hdrsize+1], overflowTag[:1], false
   477  		}
   478  		if atomic.Load(&b.eof) > 0 {
   479  			// No data, no overflow, EOF set: done.
   480  			return nil, nil, true
   481  		}
   482  		if bw&profWriteExtra != 0 {
   483  			// Writer claims to have published extra information (overflow or eof).
   484  			// Attempt to clear notification and then check again.
   485  			// If we fail to clear the notification it means b.w changed,
   486  			// so we still need to check again.
   487  			b.w.cas(bw, bw&^profWriteExtra)
   488  			goto Read
   489  		}
   490  
   491  		// Nothing to read right now.
   492  		// Return or sleep according to mode.
   493  		if mode == profBufNonBlocking {
   494  			return nil, nil, false
   495  		}
   496  		if !b.w.cas(bw, bw|profReaderSleeping) {
   497  			goto Read
   498  		}
   499  		// Committed to sleeping.
   500  		notetsleepg(&b.wait, -1)
   501  		noteclear(&b.wait)
   502  		goto Read
   503  	}
   504  	data = b.data[br.dataCount()%uint32(len(b.data)):]
   505  	if len(data) > numData {
   506  		data = data[:numData]
   507  	} else {
   508  		numData -= len(data) // available in case of wraparound
   509  	}
   510  	skip := 0
   511  	if data[0] == 0 {
   512  		// Wraparound record. Go back to the beginning of the ring.
   513  		skip = len(data)
   514  		data = b.data
   515  		if len(data) > numData {
   516  			data = data[:numData]
   517  		}
   518  	}
   519  
   520  	ntag := countSub(bw.tagCount(), br.tagCount())
   521  	if ntag == 0 {
   522  		throw("runtime: malformed profBuf buffer - tag and data out of sync")
   523  	}
   524  	tags = b.tags[br.tagCount()%uint32(len(b.tags)):]
   525  	if len(tags) > ntag {
   526  		tags = tags[:ntag]
   527  	}
   528  
   529  	// Count out whole data records until either data or tags is done.
   530  	// They are always in sync in the buffer, but due to an end-of-slice
   531  	// wraparound we might need to stop early and return the rest
   532  	// in the next call.
   533  	di := 0
   534  	ti := 0
   535  	for di < len(data) && data[di] != 0 && ti < len(tags) {
   536  		if uintptr(di)+uintptr(data[di]) > uintptr(len(data)) {
   537  			throw("runtime: malformed profBuf buffer - invalid size")
   538  		}
   539  		di += int(data[di])
   540  		ti++
   541  	}
   542  
   543  	// Remember how much we returned, to commit read on next call.
   544  	b.rNext = br.addCountsAndClearFlags(skip+di, ti)
   545  
   546  	if raceenabled {
   547  		// Match racereleasemerge in runtime_setProfLabel,
   548  		// so that the setting of the labels in runtime_setProfLabel
   549  		// is treated as happening before any use of the labels
   550  		// by our caller. The synchronization on labelSync itself is a fiction
   551  		// for the race detector. The actual synchronization is handled
   552  		// by the fact that the signal handler only reads from the current
   553  		// goroutine and uses atomics to write the updated queue indices,
   554  		// and then the read-out from the signal handler buffer uses
   555  		// atomics to read those queue indices.
   556  		raceacquire(unsafe.Pointer(&labelSync))
   557  	}
   558  
   559  	return data[:di], tags[:ti], false
   560  }
   561  

View as plain text