Source file src/runtime/stack_test.go

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"bytes"
     9  	"fmt"
    10  	"reflect"
    11  	"regexp"
    12  	. "runtime"
    13  	"strings"
    14  	"sync"
    15  	"sync/atomic"
    16  	"testing"
    17  	"time"
    18  	_ "unsafe" // for go:linkname
    19  )
    20  
    21  // TestStackMem measures per-thread stack segment cache behavior.
    22  // The test consumed up to 500MB in the past.
    23  func TestStackMem(t *testing.T) {
    24  	const (
    25  		BatchSize      = 32
    26  		BatchCount     = 256
    27  		ArraySize      = 1024
    28  		RecursionDepth = 128
    29  	)
    30  	if testing.Short() {
    31  		return
    32  	}
    33  	defer GOMAXPROCS(GOMAXPROCS(BatchSize))
    34  	s0 := new(MemStats)
    35  	ReadMemStats(s0)
    36  	for b := 0; b < BatchCount; b++ {
    37  		c := make(chan bool, BatchSize)
    38  		for i := 0; i < BatchSize; i++ {
    39  			go func() {
    40  				var f func(k int, a [ArraySize]byte)
    41  				f = func(k int, a [ArraySize]byte) {
    42  					if k == 0 {
    43  						time.Sleep(time.Millisecond)
    44  						return
    45  					}
    46  					f(k-1, a)
    47  				}
    48  				f(RecursionDepth, [ArraySize]byte{})
    49  				c <- true
    50  			}()
    51  		}
    52  		for i := 0; i < BatchSize; i++ {
    53  			<-c
    54  		}
    55  
    56  		// The goroutines have signaled via c that they are ready to exit.
    57  		// Give them a chance to exit by sleeping. If we don't wait, we
    58  		// might not reuse them on the next batch.
    59  		time.Sleep(10 * time.Millisecond)
    60  	}
    61  	s1 := new(MemStats)
    62  	ReadMemStats(s1)
    63  	consumed := int64(s1.StackSys - s0.StackSys)
    64  	t.Logf("Consumed %vMB for stack mem", consumed>>20)
    65  	estimate := int64(8 * BatchSize * ArraySize * RecursionDepth) // 8 is to reduce flakiness.
    66  	if consumed > estimate {
    67  		t.Fatalf("Stack mem: want %v, got %v", estimate, consumed)
    68  	}
    69  	// Due to broken stack memory accounting (https://golang.org/issue/7468),
    70  	// StackInuse can decrease during function execution, so we cast the values to int64.
    71  	inuse := int64(s1.StackInuse) - int64(s0.StackInuse)
    72  	t.Logf("Inuse %vMB for stack mem", inuse>>20)
    73  	if inuse > 4<<20 {
    74  		t.Fatalf("Stack inuse: want %v, got %v", 4<<20, inuse)
    75  	}
    76  }
    77  
    78  // Test stack growing in different contexts.
    79  func TestStackGrowth(t *testing.T) {
    80  	if *flagQuick {
    81  		t.Skip("-quick")
    82  	}
    83  
    84  	t.Parallel()
    85  
    86  	var wg sync.WaitGroup
    87  
    88  	// in a normal goroutine
    89  	var growDuration time.Duration // For debugging failures
    90  	wg.Add(1)
    91  	go func() {
    92  		defer wg.Done()
    93  		start := time.Now()
    94  		growStack(nil)
    95  		growDuration = time.Since(start)
    96  	}()
    97  	wg.Wait()
    98  	t.Log("first growStack took", growDuration)
    99  
   100  	// in locked goroutine
   101  	wg.Add(1)
   102  	go func() {
   103  		defer wg.Done()
   104  		LockOSThread()
   105  		growStack(nil)
   106  		UnlockOSThread()
   107  	}()
   108  	wg.Wait()
   109  
   110  	// in finalizer
   111  	var finalizerStart time.Time
   112  	var started, progress uint32
   113  	wg.Add(1)
   114  	s := new(string) // Must be of a type that avoids the tiny allocator, or else the finalizer might not run.
   115  	SetFinalizer(s, func(ss *string) {
   116  		defer wg.Done()
   117  		finalizerStart = time.Now()
   118  		atomic.StoreUint32(&started, 1)
   119  		growStack(&progress)
   120  	})
   121  	setFinalizerTime := time.Now()
   122  	s = nil
   123  
   124  	if d, ok := t.Deadline(); ok {
   125  		// Pad the timeout by an arbitrary 5% to give the AfterFunc time to run.
   126  		timeout := time.Until(d) * 19 / 20
   127  		timer := time.AfterFunc(timeout, func() {
   128  			// Panic — instead of calling t.Error and returning from the test — so
   129  			// that we get a useful goroutine dump if the test times out, especially
   130  			// if GOTRACEBACK=system or GOTRACEBACK=crash is set.
   131  			if atomic.LoadUint32(&started) == 0 {
   132  				panic("finalizer did not start")
   133  			} else {
   134  				panic(fmt.Sprintf("finalizer started %s ago (%s after registration) and ran %d iterations, but did not return", time.Since(finalizerStart), finalizerStart.Sub(setFinalizerTime), atomic.LoadUint32(&progress)))
   135  			}
   136  		})
   137  		defer timer.Stop()
   138  	}
   139  
   140  	GC()
   141  	wg.Wait()
   142  	t.Logf("finalizer started after %s and ran %d iterations in %v", finalizerStart.Sub(setFinalizerTime), atomic.LoadUint32(&progress), time.Since(finalizerStart))
   143  }
   144  
   145  // ... and in init
   146  //func init() {
   147  //	growStack()
   148  //}
   149  
   150  func growStack(progress *uint32) {
   151  	n := 1 << 10
   152  	if testing.Short() {
   153  		n = 1 << 8
   154  	}
   155  	for i := 0; i < n; i++ {
   156  		x := 0
   157  		growStackIter(&x, i)
   158  		if x != i+1 {
   159  			panic("stack is corrupted")
   160  		}
   161  		if progress != nil {
   162  			atomic.StoreUint32(progress, uint32(i))
   163  		}
   164  	}
   165  	GC()
   166  }
   167  
   168  // This function is not an anonymous func, so that the compiler can do escape
   169  // analysis and place x on stack (and subsequently stack growth update the pointer).
   170  func growStackIter(p *int, n int) {
   171  	if n == 0 {
   172  		*p = n + 1
   173  		GC()
   174  		return
   175  	}
   176  	*p = n + 1
   177  	x := 0
   178  	growStackIter(&x, n-1)
   179  	if x != n {
   180  		panic("stack is corrupted")
   181  	}
   182  }
   183  
   184  func TestStackGrowthCallback(t *testing.T) {
   185  	t.Parallel()
   186  	var wg sync.WaitGroup
   187  
   188  	// test stack growth at chan op
   189  	wg.Add(1)
   190  	go func() {
   191  		defer wg.Done()
   192  		c := make(chan int, 1)
   193  		growStackWithCallback(func() {
   194  			c <- 1
   195  			<-c
   196  		})
   197  	}()
   198  
   199  	// test stack growth at map op
   200  	wg.Add(1)
   201  	go func() {
   202  		defer wg.Done()
   203  		m := make(map[int]int)
   204  		growStackWithCallback(func() {
   205  			_, _ = m[1]
   206  			m[1] = 1
   207  		})
   208  	}()
   209  
   210  	// test stack growth at goroutine creation
   211  	wg.Add(1)
   212  	go func() {
   213  		defer wg.Done()
   214  		growStackWithCallback(func() {
   215  			done := make(chan bool)
   216  			go func() {
   217  				done <- true
   218  			}()
   219  			<-done
   220  		})
   221  	}()
   222  	wg.Wait()
   223  }
   224  
   225  func growStackWithCallback(cb func()) {
   226  	var f func(n int)
   227  	f = func(n int) {
   228  		if n == 0 {
   229  			cb()
   230  			return
   231  		}
   232  		f(n - 1)
   233  	}
   234  	for i := 0; i < 1<<10; i++ {
   235  		f(i)
   236  	}
   237  }
   238  
   239  // TestDeferPtrs tests the adjustment of Defer's argument pointers (p aka &y)
   240  // during a stack copy.
   241  func set(p *int, x int) {
   242  	*p = x
   243  }
   244  func TestDeferPtrs(t *testing.T) {
   245  	var y int
   246  
   247  	defer func() {
   248  		if y != 42 {
   249  			t.Errorf("defer's stack references were not adjusted appropriately")
   250  		}
   251  	}()
   252  	defer set(&y, 42)
   253  	growStack(nil)
   254  }
   255  
   256  type bigBuf [4 * 1024]byte
   257  
   258  // TestDeferPtrsGoexit is like TestDeferPtrs but exercises the possibility that the
   259  // stack grows as part of starting the deferred function. It calls Goexit at various
   260  // stack depths, forcing the deferred function (with >4kB of args) to be run at
   261  // the bottom of the stack. The goal is to find a stack depth less than 4kB from
   262  // the end of the stack. Each trial runs in a different goroutine so that an earlier
   263  // stack growth does not invalidate a later attempt.
   264  func TestDeferPtrsGoexit(t *testing.T) {
   265  	for i := 0; i < 100; i++ {
   266  		c := make(chan int, 1)
   267  		go testDeferPtrsGoexit(c, i)
   268  		if n := <-c; n != 42 {
   269  			t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
   270  		}
   271  	}
   272  }
   273  
   274  func testDeferPtrsGoexit(c chan int, i int) {
   275  	var y int
   276  	defer func() {
   277  		c <- y
   278  	}()
   279  	defer setBig(&y, 42, bigBuf{})
   280  	useStackAndCall(i, Goexit)
   281  }
   282  
   283  func setBig(p *int, x int, b bigBuf) {
   284  	*p = x
   285  }
   286  
   287  // TestDeferPtrsPanic is like TestDeferPtrsGoexit, but it's using panic instead
   288  // of Goexit to run the Defers. Those two are different execution paths
   289  // in the runtime.
   290  func TestDeferPtrsPanic(t *testing.T) {
   291  	for i := 0; i < 100; i++ {
   292  		c := make(chan int, 1)
   293  		go testDeferPtrsGoexit(c, i)
   294  		if n := <-c; n != 42 {
   295  			t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
   296  		}
   297  	}
   298  }
   299  
   300  func testDeferPtrsPanic(c chan int, i int) {
   301  	var y int
   302  	defer func() {
   303  		if recover() == nil {
   304  			c <- -1
   305  			return
   306  		}
   307  		c <- y
   308  	}()
   309  	defer setBig(&y, 42, bigBuf{})
   310  	useStackAndCall(i, func() { panic(1) })
   311  }
   312  
   313  //go:noinline
   314  func testDeferLeafSigpanic1() {
   315  	// Cause a sigpanic to be injected in this frame.
   316  	//
   317  	// This function has to be declared before
   318  	// TestDeferLeafSigpanic so the runtime will crash if we think
   319  	// this function's continuation PC is in
   320  	// TestDeferLeafSigpanic.
   321  	*(*int)(nil) = 0
   322  }
   323  
   324  // TestDeferLeafSigpanic tests defer matching around leaf functions
   325  // that sigpanic. This is tricky because on LR machines the outer
   326  // function and the inner function have the same SP, but it's critical
   327  // that we match up the defer correctly to get the right liveness map.
   328  // See issue #25499.
   329  func TestDeferLeafSigpanic(t *testing.T) {
   330  	// Push a defer that will walk the stack.
   331  	defer func() {
   332  		if err := recover(); err == nil {
   333  			t.Fatal("expected panic from nil pointer")
   334  		}
   335  		GC()
   336  	}()
   337  	// Call a leaf function. We must set up the exact call stack:
   338  	//
   339  	//  defering function -> leaf function -> sigpanic
   340  	//
   341  	// On LR machines, the leaf function will have the same SP as
   342  	// the SP pushed for the defer frame.
   343  	testDeferLeafSigpanic1()
   344  }
   345  
   346  // TestPanicUseStack checks that a chain of Panic structs on the stack are
   347  // updated correctly if the stack grows during the deferred execution that
   348  // happens as a result of the panic.
   349  func TestPanicUseStack(t *testing.T) {
   350  	pc := make([]uintptr, 10000)
   351  	defer func() {
   352  		recover()
   353  		Callers(0, pc) // force stack walk
   354  		useStackAndCall(100, func() {
   355  			defer func() {
   356  				recover()
   357  				Callers(0, pc) // force stack walk
   358  				useStackAndCall(200, func() {
   359  					defer func() {
   360  						recover()
   361  						Callers(0, pc) // force stack walk
   362  					}()
   363  					panic(3)
   364  				})
   365  			}()
   366  			panic(2)
   367  		})
   368  	}()
   369  	panic(1)
   370  }
   371  
   372  func TestPanicFar(t *testing.T) {
   373  	var xtree *xtreeNode
   374  	pc := make([]uintptr, 10000)
   375  	defer func() {
   376  		// At this point we created a large stack and unwound
   377  		// it via recovery. Force a stack walk, which will
   378  		// check the stack's consistency.
   379  		Callers(0, pc)
   380  	}()
   381  	defer func() {
   382  		recover()
   383  	}()
   384  	useStackAndCall(100, func() {
   385  		// Kick off the GC and make it do something nontrivial.
   386  		// (This used to force stack barriers to stick around.)
   387  		xtree = makeTree(18)
   388  		// Give the GC time to start scanning stacks.
   389  		time.Sleep(time.Millisecond)
   390  		panic(1)
   391  	})
   392  	_ = xtree
   393  }
   394  
   395  type xtreeNode struct {
   396  	l, r *xtreeNode
   397  }
   398  
   399  func makeTree(d int) *xtreeNode {
   400  	if d == 0 {
   401  		return new(xtreeNode)
   402  	}
   403  	return &xtreeNode{makeTree(d - 1), makeTree(d - 1)}
   404  }
   405  
   406  // use about n KB of stack and call f
   407  func useStackAndCall(n int, f func()) {
   408  	if n == 0 {
   409  		f()
   410  		return
   411  	}
   412  	var b [1024]byte // makes frame about 1KB
   413  	useStackAndCall(n-1+int(b[99]), f)
   414  }
   415  
   416  func useStack(n int) {
   417  	useStackAndCall(n, func() {})
   418  }
   419  
   420  func growing(c chan int, done chan struct{}) {
   421  	for n := range c {
   422  		useStack(n)
   423  		done <- struct{}{}
   424  	}
   425  	done <- struct{}{}
   426  }
   427  
   428  func TestStackCache(t *testing.T) {
   429  	// Allocate a bunch of goroutines and grow their stacks.
   430  	// Repeat a few times to test the stack cache.
   431  	const (
   432  		R = 4
   433  		G = 200
   434  		S = 5
   435  	)
   436  	for i := 0; i < R; i++ {
   437  		var reqchans [G]chan int
   438  		done := make(chan struct{})
   439  		for j := 0; j < G; j++ {
   440  			reqchans[j] = make(chan int)
   441  			go growing(reqchans[j], done)
   442  		}
   443  		for s := 0; s < S; s++ {
   444  			for j := 0; j < G; j++ {
   445  				reqchans[j] <- 1 << uint(s)
   446  			}
   447  			for j := 0; j < G; j++ {
   448  				<-done
   449  			}
   450  		}
   451  		for j := 0; j < G; j++ {
   452  			close(reqchans[j])
   453  		}
   454  		for j := 0; j < G; j++ {
   455  			<-done
   456  		}
   457  	}
   458  }
   459  
   460  func TestStackOutput(t *testing.T) {
   461  	b := make([]byte, 1024)
   462  	stk := string(b[:Stack(b, false)])
   463  	if !strings.HasPrefix(stk, "goroutine ") {
   464  		t.Errorf("Stack (len %d):\n%s", len(stk), stk)
   465  		t.Errorf("Stack output should begin with \"goroutine \"")
   466  	}
   467  }
   468  
   469  func TestStackAllOutput(t *testing.T) {
   470  	b := make([]byte, 1024)
   471  	stk := string(b[:Stack(b, true)])
   472  	if !strings.HasPrefix(stk, "goroutine ") {
   473  		t.Errorf("Stack (len %d):\n%s", len(stk), stk)
   474  		t.Errorf("Stack output should begin with \"goroutine \"")
   475  	}
   476  }
   477  
   478  func TestStackPanic(t *testing.T) {
   479  	// Test that stack copying copies panics correctly. This is difficult
   480  	// to test because it is very unlikely that the stack will be copied
   481  	// in the middle of gopanic. But it can happen.
   482  	// To make this test effective, edit panic.go:gopanic and uncomment
   483  	// the GC() call just before freedefer(d).
   484  	defer func() {
   485  		if x := recover(); x == nil {
   486  			t.Errorf("recover failed")
   487  		}
   488  	}()
   489  	useStack(32)
   490  	panic("test panic")
   491  }
   492  
   493  func BenchmarkStackCopyPtr(b *testing.B) {
   494  	c := make(chan bool)
   495  	for i := 0; i < b.N; i++ {
   496  		go func() {
   497  			i := 1000000
   498  			countp(&i)
   499  			c <- true
   500  		}()
   501  		<-c
   502  	}
   503  }
   504  
   505  func countp(n *int) {
   506  	if *n == 0 {
   507  		return
   508  	}
   509  	*n--
   510  	countp(n)
   511  }
   512  
   513  func BenchmarkStackCopy(b *testing.B) {
   514  	c := make(chan bool)
   515  	for i := 0; i < b.N; i++ {
   516  		go func() {
   517  			count(1000000)
   518  			c <- true
   519  		}()
   520  		<-c
   521  	}
   522  }
   523  
   524  func count(n int) int {
   525  	if n == 0 {
   526  		return 0
   527  	}
   528  	return 1 + count(n-1)
   529  }
   530  
   531  func BenchmarkStackCopyNoCache(b *testing.B) {
   532  	c := make(chan bool)
   533  	for i := 0; i < b.N; i++ {
   534  		go func() {
   535  			count1(1000000)
   536  			c <- true
   537  		}()
   538  		<-c
   539  	}
   540  }
   541  
   542  func count1(n int) int {
   543  	if n <= 0 {
   544  		return 0
   545  	}
   546  	return 1 + count2(n-1)
   547  }
   548  
   549  func count2(n int) int  { return 1 + count3(n-1) }
   550  func count3(n int) int  { return 1 + count4(n-1) }
   551  func count4(n int) int  { return 1 + count5(n-1) }
   552  func count5(n int) int  { return 1 + count6(n-1) }
   553  func count6(n int) int  { return 1 + count7(n-1) }
   554  func count7(n int) int  { return 1 + count8(n-1) }
   555  func count8(n int) int  { return 1 + count9(n-1) }
   556  func count9(n int) int  { return 1 + count10(n-1) }
   557  func count10(n int) int { return 1 + count11(n-1) }
   558  func count11(n int) int { return 1 + count12(n-1) }
   559  func count12(n int) int { return 1 + count13(n-1) }
   560  func count13(n int) int { return 1 + count14(n-1) }
   561  func count14(n int) int { return 1 + count15(n-1) }
   562  func count15(n int) int { return 1 + count16(n-1) }
   563  func count16(n int) int { return 1 + count17(n-1) }
   564  func count17(n int) int { return 1 + count18(n-1) }
   565  func count18(n int) int { return 1 + count19(n-1) }
   566  func count19(n int) int { return 1 + count20(n-1) }
   567  func count20(n int) int { return 1 + count21(n-1) }
   568  func count21(n int) int { return 1 + count22(n-1) }
   569  func count22(n int) int { return 1 + count23(n-1) }
   570  func count23(n int) int { return 1 + count1(n-1) }
   571  
   572  type stkobjT struct {
   573  	p *stkobjT
   574  	x int64
   575  	y [20]int // consume some stack
   576  }
   577  
   578  // Sum creates a linked list of stkobjTs.
   579  func Sum(n int64, p *stkobjT) {
   580  	if n == 0 {
   581  		return
   582  	}
   583  	s := stkobjT{p: p, x: n}
   584  	Sum(n-1, &s)
   585  	p.x += s.x
   586  }
   587  
   588  func BenchmarkStackCopyWithStkobj(b *testing.B) {
   589  	c := make(chan bool)
   590  	for i := 0; i < b.N; i++ {
   591  		go func() {
   592  			var s stkobjT
   593  			Sum(100000, &s)
   594  			c <- true
   595  		}()
   596  		<-c
   597  	}
   598  }
   599  
   600  func BenchmarkIssue18138(b *testing.B) {
   601  	// Channel with N "can run a goroutine" tokens
   602  	const N = 10
   603  	c := make(chan []byte, N)
   604  	for i := 0; i < N; i++ {
   605  		c <- make([]byte, 1)
   606  	}
   607  
   608  	for i := 0; i < b.N; i++ {
   609  		<-c // get token
   610  		go func() {
   611  			useStackPtrs(1000, false) // uses ~1MB max
   612  			m := make([]byte, 8192)   // make GC trigger occasionally
   613  			c <- m                    // return token
   614  		}()
   615  	}
   616  }
   617  
   618  func useStackPtrs(n int, b bool) {
   619  	if b {
   620  		// This code contributes to the stack frame size, and hence to the
   621  		// stack copying cost. But since b is always false, it costs no
   622  		// execution time (not even the zeroing of a).
   623  		var a [128]*int // 1KB of pointers
   624  		a[n] = &n
   625  		n = *a[0]
   626  	}
   627  	if n == 0 {
   628  		return
   629  	}
   630  	useStackPtrs(n-1, b)
   631  }
   632  
   633  type structWithMethod struct{}
   634  
   635  func (s structWithMethod) caller() string {
   636  	_, file, line, ok := Caller(1)
   637  	if !ok {
   638  		panic("Caller failed")
   639  	}
   640  	return fmt.Sprintf("%s:%d", file, line)
   641  }
   642  
   643  func (s structWithMethod) callers() []uintptr {
   644  	pc := make([]uintptr, 16)
   645  	return pc[:Callers(0, pc)]
   646  }
   647  
   648  func (s structWithMethod) stack() string {
   649  	buf := make([]byte, 4<<10)
   650  	return string(buf[:Stack(buf, false)])
   651  }
   652  
   653  func (s structWithMethod) nop() {}
   654  
   655  func TestStackWrapperCaller(t *testing.T) {
   656  	var d structWithMethod
   657  	// Force the compiler to construct a wrapper method.
   658  	wrapper := (*structWithMethod).caller
   659  	// Check that the wrapper doesn't affect the stack trace.
   660  	if dc, ic := d.caller(), wrapper(&d); dc != ic {
   661  		t.Fatalf("direct caller %q != indirect caller %q", dc, ic)
   662  	}
   663  }
   664  
   665  func TestStackWrapperCallers(t *testing.T) {
   666  	var d structWithMethod
   667  	wrapper := (*structWithMethod).callers
   668  	// Check that <autogenerated> doesn't appear in the stack trace.
   669  	pcs := wrapper(&d)
   670  	frames := CallersFrames(pcs)
   671  	for {
   672  		fr, more := frames.Next()
   673  		if fr.File == "<autogenerated>" {
   674  			t.Fatalf("<autogenerated> appears in stack trace: %+v", fr)
   675  		}
   676  		if !more {
   677  			break
   678  		}
   679  	}
   680  }
   681  
   682  func TestStackWrapperStack(t *testing.T) {
   683  	var d structWithMethod
   684  	wrapper := (*structWithMethod).stack
   685  	// Check that <autogenerated> doesn't appear in the stack trace.
   686  	stk := wrapper(&d)
   687  	if strings.Contains(stk, "<autogenerated>") {
   688  		t.Fatalf("<autogenerated> appears in stack trace:\n%s", stk)
   689  	}
   690  }
   691  
   692  type I interface {
   693  	M()
   694  }
   695  
   696  func TestStackWrapperStackPanic(t *testing.T) {
   697  	t.Run("sigpanic", func(t *testing.T) {
   698  		// nil calls to interface methods cause a sigpanic.
   699  		testStackWrapperPanic(t, func() { I.M(nil) }, "runtime_test.I.M")
   700  	})
   701  	t.Run("panicwrap", func(t *testing.T) {
   702  		// Nil calls to value method wrappers call panicwrap.
   703  		wrapper := (*structWithMethod).nop
   704  		testStackWrapperPanic(t, func() { wrapper(nil) }, "runtime_test.(*structWithMethod).nop")
   705  	})
   706  }
   707  
   708  func testStackWrapperPanic(t *testing.T, cb func(), expect string) {
   709  	// Test that the stack trace from a panicking wrapper includes
   710  	// the wrapper, even though elide these when they don't panic.
   711  	t.Run("CallersFrames", func(t *testing.T) {
   712  		defer func() {
   713  			err := recover()
   714  			if err == nil {
   715  				t.Fatalf("expected panic")
   716  			}
   717  			pcs := make([]uintptr, 10)
   718  			n := Callers(0, pcs)
   719  			frames := CallersFrames(pcs[:n])
   720  			for {
   721  				frame, more := frames.Next()
   722  				t.Log(frame.Function)
   723  				if frame.Function == expect {
   724  					return
   725  				}
   726  				if !more {
   727  					break
   728  				}
   729  			}
   730  			t.Fatalf("panicking wrapper %s missing from stack trace", expect)
   731  		}()
   732  		cb()
   733  	})
   734  	t.Run("Stack", func(t *testing.T) {
   735  		defer func() {
   736  			err := recover()
   737  			if err == nil {
   738  				t.Fatalf("expected panic")
   739  			}
   740  			buf := make([]byte, 4<<10)
   741  			stk := string(buf[:Stack(buf, false)])
   742  			if !strings.Contains(stk, "\n"+expect) {
   743  				t.Fatalf("panicking wrapper %s missing from stack trace:\n%s", expect, stk)
   744  			}
   745  		}()
   746  		cb()
   747  	})
   748  }
   749  
   750  func TestCallersFromWrapper(t *testing.T) {
   751  	// Test that invoking CallersFrames on a stack where the first
   752  	// PC is an autogenerated wrapper keeps the wrapper in the
   753  	// trace. Normally we elide these, assuming that the wrapper
   754  	// calls the thing you actually wanted to see, but in this
   755  	// case we need to keep it.
   756  	pc := reflect.ValueOf(I.M).Pointer()
   757  	frames := CallersFrames([]uintptr{pc})
   758  	frame, more := frames.Next()
   759  	if frame.Function != "runtime_test.I.M" {
   760  		t.Fatalf("want function %s, got %s", "runtime_test.I.M", frame.Function)
   761  	}
   762  	if more {
   763  		t.Fatalf("want 1 frame, got > 1")
   764  	}
   765  }
   766  
   767  func TestTracebackSystemstack(t *testing.T) {
   768  	if GOARCH == "ppc64" || GOARCH == "ppc64le" {
   769  		t.Skip("systemstack tail call not implemented on ppc64x")
   770  	}
   771  
   772  	// Test that profiles correctly jump over systemstack,
   773  	// including nested systemstack calls.
   774  	pcs := make([]uintptr, 20)
   775  	pcs = pcs[:TracebackSystemstack(pcs, 5)]
   776  	// Check that runtime.TracebackSystemstack appears five times
   777  	// and that we see TestTracebackSystemstack.
   778  	countIn, countOut := 0, 0
   779  	frames := CallersFrames(pcs)
   780  	var tb bytes.Buffer
   781  	for {
   782  		frame, more := frames.Next()
   783  		fmt.Fprintf(&tb, "\n%s+0x%x %s:%d", frame.Function, frame.PC-frame.Entry, frame.File, frame.Line)
   784  		switch frame.Function {
   785  		case "runtime.TracebackSystemstack":
   786  			countIn++
   787  		case "runtime_test.TestTracebackSystemstack":
   788  			countOut++
   789  		}
   790  		if !more {
   791  			break
   792  		}
   793  	}
   794  	if countIn != 5 || countOut != 1 {
   795  		t.Fatalf("expected 5 calls to TracebackSystemstack and 1 call to TestTracebackSystemstack, got:%s", tb.String())
   796  	}
   797  }
   798  
   799  func TestTracebackAncestors(t *testing.T) {
   800  	goroutineRegex := regexp.MustCompile(`goroutine [0-9]+ \[`)
   801  	for _, tracebackDepth := range []int{0, 1, 5, 50} {
   802  		output := runTestProg(t, "testprog", "TracebackAncestors", fmt.Sprintf("GODEBUG=tracebackancestors=%d", tracebackDepth))
   803  
   804  		numGoroutines := 3
   805  		numFrames := 2
   806  		ancestorsExpected := numGoroutines
   807  		if numGoroutines > tracebackDepth {
   808  			ancestorsExpected = tracebackDepth
   809  		}
   810  
   811  		matches := goroutineRegex.FindAllStringSubmatch(output, -1)
   812  		if len(matches) != 2 {
   813  			t.Fatalf("want 2 goroutines, got:\n%s", output)
   814  		}
   815  
   816  		// Check functions in the traceback.
   817  		fns := []string{"main.recurseThenCallGo", "main.main", "main.printStack", "main.TracebackAncestors"}
   818  		for _, fn := range fns {
   819  			if !strings.Contains(output, "\n"+fn+"(") {
   820  				t.Fatalf("expected %q function in traceback:\n%s", fn, output)
   821  			}
   822  		}
   823  
   824  		if want, count := "originating from goroutine", ancestorsExpected; strings.Count(output, want) != count {
   825  			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
   826  		}
   827  
   828  		if want, count := "main.recurseThenCallGo(...)", ancestorsExpected*(numFrames+1); strings.Count(output, want) != count {
   829  			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
   830  		}
   831  
   832  		if want, count := "main.recurseThenCallGo(0x", 1; strings.Count(output, want) != count {
   833  			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
   834  		}
   835  	}
   836  }
   837  
   838  // Test that defer closure is correctly scanned when the stack is scanned.
   839  func TestDeferLiveness(t *testing.T) {
   840  	output := runTestProg(t, "testprog", "DeferLiveness", "GODEBUG=clobberfree=1")
   841  	if output != "" {
   842  		t.Errorf("output:\n%s\n\nwant no output", output)
   843  	}
   844  }
   845  
   846  func TestDeferHeapAndStack(t *testing.T) {
   847  	P := 4     // processors
   848  	N := 10000 //iterations
   849  	D := 200   // stack depth
   850  
   851  	if testing.Short() {
   852  		P /= 2
   853  		N /= 10
   854  		D /= 10
   855  	}
   856  	c := make(chan bool)
   857  	for p := 0; p < P; p++ {
   858  		go func() {
   859  			for i := 0; i < N; i++ {
   860  				if deferHeapAndStack(D) != 2*D {
   861  					panic("bad result")
   862  				}
   863  			}
   864  			c <- true
   865  		}()
   866  	}
   867  	for p := 0; p < P; p++ {
   868  		<-c
   869  	}
   870  }
   871  
   872  // deferHeapAndStack(n) computes 2*n
   873  func deferHeapAndStack(n int) (r int) {
   874  	if n == 0 {
   875  		return 0
   876  	}
   877  	if n%2 == 0 {
   878  		// heap-allocated defers
   879  		for i := 0; i < 2; i++ {
   880  			defer func() {
   881  				r++
   882  			}()
   883  		}
   884  	} else {
   885  		// stack-allocated defers
   886  		defer func() {
   887  			r++
   888  		}()
   889  		defer func() {
   890  			r++
   891  		}()
   892  	}
   893  	r = deferHeapAndStack(n - 1)
   894  	escapeMe(new([1024]byte)) // force some GCs
   895  	return
   896  }
   897  
   898  // Pass a value to escapeMe to force it to escape.
   899  var escapeMe = func(x any) {}
   900  
   901  // Test that when F -> G is inlined and F is excluded from stack
   902  // traces, G still appears.
   903  func TestTracebackInlineExcluded(t *testing.T) {
   904  	defer func() {
   905  		recover()
   906  		buf := make([]byte, 4<<10)
   907  		stk := string(buf[:Stack(buf, false)])
   908  
   909  		t.Log(stk)
   910  
   911  		if not := "tracebackExcluded"; strings.Contains(stk, not) {
   912  			t.Errorf("found but did not expect %q", not)
   913  		}
   914  		if want := "tracebackNotExcluded"; !strings.Contains(stk, want) {
   915  			t.Errorf("expected %q in stack", want)
   916  		}
   917  	}()
   918  	tracebackExcluded()
   919  }
   920  
   921  // tracebackExcluded should be excluded from tracebacks. There are
   922  // various ways this could come up. Linking it to a "runtime." name is
   923  // rather synthetic, but it's easy and reliable. See issue #42754 for
   924  // one way this happened in real code.
   925  //
   926  //go:linkname tracebackExcluded runtime.tracebackExcluded
   927  //go:noinline
   928  func tracebackExcluded() {
   929  	// Call an inlined function that should not itself be excluded
   930  	// from tracebacks.
   931  	tracebackNotExcluded()
   932  }
   933  
   934  // tracebackNotExcluded should be inlined into tracebackExcluded, but
   935  // should not itself be excluded from the traceback.
   936  func tracebackNotExcluded() {
   937  	var x *int
   938  	*x = 0
   939  }
   940  

View as plain text