Source file src/runtime/trace/trace_test.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package trace_test
     6  
     7  import (
     8  	"bytes"
     9  	"context"
    10  	"flag"
    11  	"fmt"
    12  	"internal/goexperiment"
    13  	"internal/profile"
    14  	"internal/race"
    15  	"internal/trace"
    16  	"io"
    17  	"net"
    18  	"os"
    19  	"runtime"
    20  	"runtime/pprof"
    21  	. "runtime/trace"
    22  	"strconv"
    23  	"strings"
    24  	"sync"
    25  	"testing"
    26  	"time"
    27  )
    28  
    29  var (
    30  	saveTraces = flag.Bool("savetraces", false, "save traces collected by tests")
    31  )
    32  
    33  // TestEventBatch tests Flush calls that happen during Start
    34  // don't produce corrupted traces.
    35  func TestEventBatch(t *testing.T) {
    36  	if race.Enabled {
    37  		t.Skip("skipping in race mode")
    38  	}
    39  	if IsEnabled() {
    40  		t.Skip("skipping because -test.trace is set")
    41  	}
    42  	if testing.Short() {
    43  		t.Skip("skipping in short mode")
    44  	}
    45  	if goexperiment.ExecTracer2 {
    46  		t.Skip("skipping because this test is incompatible with the new tracer")
    47  	}
    48  	// During Start, bunch of records are written to reflect the current
    49  	// snapshot of the program, including state of each goroutines.
    50  	// And some string constants are written to the trace to aid trace
    51  	// parsing. This test checks Flush of the buffer occurred during
    52  	// this process doesn't cause corrupted traces.
    53  	// When a Flush is called during Start is complicated
    54  	// so we test with a range of number of goroutines hoping that one
    55  	// of them triggers Flush.
    56  	// This range was chosen to fill up a ~64KB buffer with traceEvGoCreate
    57  	// and traceEvGoWaiting events (12~13bytes per goroutine).
    58  	for g := 4950; g < 5050; g++ {
    59  		n := g
    60  		t.Run("G="+strconv.Itoa(n), func(t *testing.T) {
    61  			var wg sync.WaitGroup
    62  			wg.Add(n)
    63  
    64  			in := make(chan bool, 1000)
    65  			for i := 0; i < n; i++ {
    66  				go func() {
    67  					<-in
    68  					wg.Done()
    69  				}()
    70  			}
    71  			buf := new(bytes.Buffer)
    72  			if err := Start(buf); err != nil {
    73  				t.Fatalf("failed to start tracing: %v", err)
    74  			}
    75  
    76  			for i := 0; i < n; i++ {
    77  				in <- true
    78  			}
    79  			wg.Wait()
    80  			Stop()
    81  
    82  			_, err := trace.Parse(buf, "")
    83  			if err == trace.ErrTimeOrder {
    84  				t.Skipf("skipping trace: %v", err)
    85  			}
    86  
    87  			if err != nil {
    88  				t.Fatalf("failed to parse trace: %v", err)
    89  			}
    90  		})
    91  	}
    92  }
    93  
    94  func TestTraceStartStop(t *testing.T) {
    95  	if IsEnabled() {
    96  		t.Skip("skipping because -test.trace is set")
    97  	}
    98  	buf := new(bytes.Buffer)
    99  	if err := Start(buf); err != nil {
   100  		t.Fatalf("failed to start tracing: %v", err)
   101  	}
   102  	Stop()
   103  	size := buf.Len()
   104  	if size == 0 {
   105  		t.Fatalf("trace is empty")
   106  	}
   107  	time.Sleep(100 * time.Millisecond)
   108  	if size != buf.Len() {
   109  		t.Fatalf("trace writes after stop: %v -> %v", size, buf.Len())
   110  	}
   111  	saveTrace(t, buf, "TestTraceStartStop")
   112  }
   113  
   114  func TestTraceDoubleStart(t *testing.T) {
   115  	if IsEnabled() {
   116  		t.Skip("skipping because -test.trace is set")
   117  	}
   118  	Stop()
   119  	buf := new(bytes.Buffer)
   120  	if err := Start(buf); err != nil {
   121  		t.Fatalf("failed to start tracing: %v", err)
   122  	}
   123  	if err := Start(buf); err == nil {
   124  		t.Fatalf("succeed to start tracing second time")
   125  	}
   126  	Stop()
   127  	Stop()
   128  }
   129  
   130  func TestTrace(t *testing.T) {
   131  	if IsEnabled() {
   132  		t.Skip("skipping because -test.trace is set")
   133  	}
   134  	if goexperiment.ExecTracer2 {
   135  		// An equivalent test exists in internal/trace/v2.
   136  		t.Skip("skipping because this test is incompatible with the new tracer")
   137  	}
   138  	buf := new(bytes.Buffer)
   139  	if err := Start(buf); err != nil {
   140  		t.Fatalf("failed to start tracing: %v", err)
   141  	}
   142  	Stop()
   143  	saveTrace(t, buf, "TestTrace")
   144  	_, err := trace.Parse(buf, "")
   145  	if err == trace.ErrTimeOrder {
   146  		t.Skipf("skipping trace: %v", err)
   147  	}
   148  	if err != nil {
   149  		t.Fatalf("failed to parse trace: %v", err)
   150  	}
   151  }
   152  
   153  func parseTrace(t *testing.T, r io.Reader) ([]*trace.Event, map[uint64]*trace.GDesc) {
   154  	res, err := trace.Parse(r, "")
   155  	if err == trace.ErrTimeOrder {
   156  		t.Skipf("skipping trace: %v", err)
   157  	}
   158  	if err != nil {
   159  		t.Fatalf("failed to parse trace: %v", err)
   160  	}
   161  	gs := trace.GoroutineStats(res.Events)
   162  	for goid := range gs {
   163  		// We don't do any particular checks on the result at the moment.
   164  		// But still check that RelatedGoroutines does not crash, hang, etc.
   165  		_ = trace.RelatedGoroutines(res.Events, goid)
   166  	}
   167  	return res.Events, gs
   168  }
   169  
   170  func testBrokenTimestamps(t *testing.T, data []byte) {
   171  	// On some processors cputicks (used to generate trace timestamps)
   172  	// produce non-monotonic timestamps. It is important that the parser
   173  	// distinguishes logically inconsistent traces (e.g. missing, excessive
   174  	// or misordered events) from broken timestamps. The former is a bug
   175  	// in tracer, the latter is a machine issue.
   176  	// So now that we have a consistent trace, test that (1) parser does
   177  	// not return a logical error in case of broken timestamps
   178  	// and (2) broken timestamps are eventually detected and reported.
   179  	trace.BreakTimestampsForTesting = true
   180  	defer func() {
   181  		trace.BreakTimestampsForTesting = false
   182  	}()
   183  	for i := 0; i < 1e4; i++ {
   184  		_, err := trace.Parse(bytes.NewReader(data), "")
   185  		if err == trace.ErrTimeOrder {
   186  			return
   187  		}
   188  		if err != nil {
   189  			t.Fatalf("failed to parse trace: %v", err)
   190  		}
   191  	}
   192  }
   193  
   194  func TestTraceStress(t *testing.T) {
   195  	switch runtime.GOOS {
   196  	case "js", "wasip1":
   197  		t.Skip("no os.Pipe on " + runtime.GOOS)
   198  	}
   199  	if IsEnabled() {
   200  		t.Skip("skipping because -test.trace is set")
   201  	}
   202  	if testing.Short() {
   203  		t.Skip("skipping in -short mode")
   204  	}
   205  	if goexperiment.ExecTracer2 {
   206  		// An equivalent test exists in internal/trace/v2.
   207  		t.Skip("skipping because this test is incompatible with the new tracer")
   208  	}
   209  
   210  	var wg sync.WaitGroup
   211  	done := make(chan bool)
   212  
   213  	// Create a goroutine blocked before tracing.
   214  	wg.Add(1)
   215  	go func() {
   216  		<-done
   217  		wg.Done()
   218  	}()
   219  
   220  	// Create a goroutine blocked in syscall before tracing.
   221  	rp, wp, err := os.Pipe()
   222  	if err != nil {
   223  		t.Fatalf("failed to create pipe: %v", err)
   224  	}
   225  	defer func() {
   226  		rp.Close()
   227  		wp.Close()
   228  	}()
   229  	wg.Add(1)
   230  	go func() {
   231  		var tmp [1]byte
   232  		rp.Read(tmp[:])
   233  		<-done
   234  		wg.Done()
   235  	}()
   236  	time.Sleep(time.Millisecond) // give the goroutine above time to block
   237  
   238  	buf := new(bytes.Buffer)
   239  	if err := Start(buf); err != nil {
   240  		t.Fatalf("failed to start tracing: %v", err)
   241  	}
   242  
   243  	procs := runtime.GOMAXPROCS(10)
   244  	time.Sleep(50 * time.Millisecond) // test proc stop/start events
   245  
   246  	go func() {
   247  		runtime.LockOSThread()
   248  		for {
   249  			select {
   250  			case <-done:
   251  				return
   252  			default:
   253  				runtime.Gosched()
   254  			}
   255  		}
   256  	}()
   257  
   258  	runtime.GC()
   259  	// Trigger GC from malloc.
   260  	n := int(1e3)
   261  	if isMemoryConstrained() {
   262  		// Reduce allocation to avoid running out of
   263  		// memory on the builder - see issue/12032.
   264  		n = 512
   265  	}
   266  	for i := 0; i < n; i++ {
   267  		_ = make([]byte, 1<<20)
   268  	}
   269  
   270  	// Create a bunch of busy goroutines to load all Ps.
   271  	for p := 0; p < 10; p++ {
   272  		wg.Add(1)
   273  		go func() {
   274  			// Do something useful.
   275  			tmp := make([]byte, 1<<16)
   276  			for i := range tmp {
   277  				tmp[i]++
   278  			}
   279  			_ = tmp
   280  			<-done
   281  			wg.Done()
   282  		}()
   283  	}
   284  
   285  	// Block in syscall.
   286  	wg.Add(1)
   287  	go func() {
   288  		var tmp [1]byte
   289  		rp.Read(tmp[:])
   290  		<-done
   291  		wg.Done()
   292  	}()
   293  
   294  	// Test timers.
   295  	timerDone := make(chan bool)
   296  	go func() {
   297  		time.Sleep(time.Millisecond)
   298  		timerDone <- true
   299  	}()
   300  	<-timerDone
   301  
   302  	// A bit of network.
   303  	ln, err := net.Listen("tcp", "127.0.0.1:0")
   304  	if err != nil {
   305  		t.Fatalf("listen failed: %v", err)
   306  	}
   307  	defer ln.Close()
   308  	go func() {
   309  		c, err := ln.Accept()
   310  		if err != nil {
   311  			return
   312  		}
   313  		time.Sleep(time.Millisecond)
   314  		var buf [1]byte
   315  		c.Write(buf[:])
   316  		c.Close()
   317  	}()
   318  	c, err := net.Dial("tcp", ln.Addr().String())
   319  	if err != nil {
   320  		t.Fatalf("dial failed: %v", err)
   321  	}
   322  	var tmp [1]byte
   323  	c.Read(tmp[:])
   324  	c.Close()
   325  
   326  	go func() {
   327  		runtime.Gosched()
   328  		select {}
   329  	}()
   330  
   331  	// Unblock helper goroutines and wait them to finish.
   332  	wp.Write(tmp[:])
   333  	wp.Write(tmp[:])
   334  	close(done)
   335  	wg.Wait()
   336  
   337  	runtime.GOMAXPROCS(procs)
   338  
   339  	Stop()
   340  	saveTrace(t, buf, "TestTraceStress")
   341  	trace := buf.Bytes()
   342  	parseTrace(t, buf)
   343  	testBrokenTimestamps(t, trace)
   344  }
   345  
   346  // isMemoryConstrained reports whether the current machine is likely
   347  // to be memory constrained.
   348  // This was originally for the openbsd/arm builder (Issue 12032).
   349  // TODO: move this to testenv? Make this look at memory? Look at GO_BUILDER_NAME?
   350  func isMemoryConstrained() bool {
   351  	if runtime.GOOS == "plan9" {
   352  		return true
   353  	}
   354  	switch runtime.GOARCH {
   355  	case "arm", "mips", "mipsle":
   356  		return true
   357  	}
   358  	return false
   359  }
   360  
   361  // Do a bunch of various stuff (timers, GC, network, etc) in a separate goroutine.
   362  // And concurrently with all that start/stop trace 3 times.
   363  func TestTraceStressStartStop(t *testing.T) {
   364  	switch runtime.GOOS {
   365  	case "js", "wasip1":
   366  		t.Skip("no os.Pipe on " + runtime.GOOS)
   367  	}
   368  	if IsEnabled() {
   369  		t.Skip("skipping because -test.trace is set")
   370  	}
   371  	if goexperiment.ExecTracer2 {
   372  		// An equivalent test exists in internal/trace/v2.
   373  		t.Skip("skipping because this test is incompatible with the new tracer")
   374  	}
   375  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8))
   376  	outerDone := make(chan bool)
   377  
   378  	go func() {
   379  		defer func() {
   380  			outerDone <- true
   381  		}()
   382  
   383  		var wg sync.WaitGroup
   384  		done := make(chan bool)
   385  
   386  		wg.Add(1)
   387  		go func() {
   388  			<-done
   389  			wg.Done()
   390  		}()
   391  
   392  		rp, wp, err := os.Pipe()
   393  		if err != nil {
   394  			t.Errorf("failed to create pipe: %v", err)
   395  			return
   396  		}
   397  		defer func() {
   398  			rp.Close()
   399  			wp.Close()
   400  		}()
   401  		wg.Add(1)
   402  		go func() {
   403  			var tmp [1]byte
   404  			rp.Read(tmp[:])
   405  			<-done
   406  			wg.Done()
   407  		}()
   408  		time.Sleep(time.Millisecond)
   409  
   410  		go func() {
   411  			runtime.LockOSThread()
   412  			for {
   413  				select {
   414  				case <-done:
   415  					return
   416  				default:
   417  					runtime.Gosched()
   418  				}
   419  			}
   420  		}()
   421  
   422  		runtime.GC()
   423  		// Trigger GC from malloc.
   424  		n := int(1e3)
   425  		if isMemoryConstrained() {
   426  			// Reduce allocation to avoid running out of
   427  			// memory on the builder.
   428  			n = 512
   429  		}
   430  		for i := 0; i < n; i++ {
   431  			_ = make([]byte, 1<<20)
   432  		}
   433  
   434  		// Create a bunch of busy goroutines to load all Ps.
   435  		for p := 0; p < 10; p++ {
   436  			wg.Add(1)
   437  			go func() {
   438  				// Do something useful.
   439  				tmp := make([]byte, 1<<16)
   440  				for i := range tmp {
   441  					tmp[i]++
   442  				}
   443  				_ = tmp
   444  				<-done
   445  				wg.Done()
   446  			}()
   447  		}
   448  
   449  		// Block in syscall.
   450  		wg.Add(1)
   451  		go func() {
   452  			var tmp [1]byte
   453  			rp.Read(tmp[:])
   454  			<-done
   455  			wg.Done()
   456  		}()
   457  
   458  		runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
   459  
   460  		// Test timers.
   461  		timerDone := make(chan bool)
   462  		go func() {
   463  			time.Sleep(time.Millisecond)
   464  			timerDone <- true
   465  		}()
   466  		<-timerDone
   467  
   468  		// A bit of network.
   469  		ln, err := net.Listen("tcp", "127.0.0.1:0")
   470  		if err != nil {
   471  			t.Errorf("listen failed: %v", err)
   472  			return
   473  		}
   474  		defer ln.Close()
   475  		go func() {
   476  			c, err := ln.Accept()
   477  			if err != nil {
   478  				return
   479  			}
   480  			time.Sleep(time.Millisecond)
   481  			var buf [1]byte
   482  			c.Write(buf[:])
   483  			c.Close()
   484  		}()
   485  		c, err := net.Dial("tcp", ln.Addr().String())
   486  		if err != nil {
   487  			t.Errorf("dial failed: %v", err)
   488  			return
   489  		}
   490  		var tmp [1]byte
   491  		c.Read(tmp[:])
   492  		c.Close()
   493  
   494  		go func() {
   495  			runtime.Gosched()
   496  			select {}
   497  		}()
   498  
   499  		// Unblock helper goroutines and wait them to finish.
   500  		wp.Write(tmp[:])
   501  		wp.Write(tmp[:])
   502  		close(done)
   503  		wg.Wait()
   504  	}()
   505  
   506  	for i := 0; i < 3; i++ {
   507  		buf := new(bytes.Buffer)
   508  		if err := Start(buf); err != nil {
   509  			t.Fatalf("failed to start tracing: %v", err)
   510  		}
   511  		time.Sleep(time.Millisecond)
   512  		Stop()
   513  		saveTrace(t, buf, "TestTraceStressStartStop")
   514  		trace := buf.Bytes()
   515  		parseTrace(t, buf)
   516  		testBrokenTimestamps(t, trace)
   517  	}
   518  	<-outerDone
   519  }
   520  
   521  func TestTraceFutileWakeup(t *testing.T) {
   522  	if IsEnabled() {
   523  		t.Skip("skipping because -test.trace is set")
   524  	}
   525  	if goexperiment.ExecTracer2 {
   526  		t.Skip("skipping because this test is incompatible with the new tracer")
   527  	}
   528  	buf := new(bytes.Buffer)
   529  	if err := Start(buf); err != nil {
   530  		t.Fatalf("failed to start tracing: %v", err)
   531  	}
   532  
   533  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8))
   534  	c0 := make(chan int, 1)
   535  	c1 := make(chan int, 1)
   536  	c2 := make(chan int, 1)
   537  	const procs = 2
   538  	var done sync.WaitGroup
   539  	done.Add(4 * procs)
   540  	for p := 0; p < procs; p++ {
   541  		const iters = 1e3
   542  		go func() {
   543  			for i := 0; i < iters; i++ {
   544  				runtime.Gosched()
   545  				c0 <- 0
   546  			}
   547  			done.Done()
   548  		}()
   549  		go func() {
   550  			for i := 0; i < iters; i++ {
   551  				runtime.Gosched()
   552  				<-c0
   553  			}
   554  			done.Done()
   555  		}()
   556  		go func() {
   557  			for i := 0; i < iters; i++ {
   558  				runtime.Gosched()
   559  				select {
   560  				case c1 <- 0:
   561  				case c2 <- 0:
   562  				}
   563  			}
   564  			done.Done()
   565  		}()
   566  		go func() {
   567  			for i := 0; i < iters; i++ {
   568  				runtime.Gosched()
   569  				select {
   570  				case <-c1:
   571  				case <-c2:
   572  				}
   573  			}
   574  			done.Done()
   575  		}()
   576  	}
   577  	done.Wait()
   578  
   579  	Stop()
   580  	saveTrace(t, buf, "TestTraceFutileWakeup")
   581  	events, _ := parseTrace(t, buf)
   582  	// Check that (1) trace does not contain EvFutileWakeup events and
   583  	// (2) there are no consecutive EvGoBlock/EvGCStart/EvGoBlock events
   584  	// (we call runtime.Gosched between all operations, so these would be futile wakeups).
   585  	gs := make(map[uint64]int)
   586  	for _, ev := range events {
   587  		switch ev.Type {
   588  		case trace.EvFutileWakeup:
   589  			t.Fatalf("found EvFutileWakeup event")
   590  		case trace.EvGoBlockSend, trace.EvGoBlockRecv, trace.EvGoBlockSelect:
   591  			if gs[ev.G] == 2 {
   592  				t.Fatalf("goroutine %v blocked on %v at %v right after start",
   593  					ev.G, trace.EventDescriptions[ev.Type].Name, ev.Ts)
   594  			}
   595  			if gs[ev.G] == 1 {
   596  				t.Fatalf("goroutine %v blocked on %v at %v while blocked",
   597  					ev.G, trace.EventDescriptions[ev.Type].Name, ev.Ts)
   598  			}
   599  			gs[ev.G] = 1
   600  		case trace.EvGoStart:
   601  			if gs[ev.G] == 1 {
   602  				gs[ev.G] = 2
   603  			}
   604  		default:
   605  			delete(gs, ev.G)
   606  		}
   607  	}
   608  }
   609  
   610  func TestTraceCPUProfile(t *testing.T) {
   611  	if IsEnabled() {
   612  		t.Skip("skipping because -test.trace is set")
   613  	}
   614  	if goexperiment.ExecTracer2 {
   615  		// An equivalent test exists in internal/trace/v2.
   616  		t.Skip("skipping because this test is incompatible with the new tracer")
   617  	}
   618  
   619  	cpuBuf := new(bytes.Buffer)
   620  	if err := pprof.StartCPUProfile(cpuBuf); err != nil {
   621  		t.Skipf("failed to start CPU profile: %v", err)
   622  	}
   623  
   624  	buf := new(bytes.Buffer)
   625  	if err := Start(buf); err != nil {
   626  		t.Fatalf("failed to start tracing: %v", err)
   627  	}
   628  
   629  	dur := 100 * time.Millisecond
   630  	func() {
   631  		// Create a region in the execution trace. Set and clear goroutine
   632  		// labels fully within that region, so we know that any CPU profile
   633  		// sample with the label must also be eligible for inclusion in the
   634  		// execution trace.
   635  		ctx := context.Background()
   636  		defer StartRegion(ctx, "cpuHogger").End()
   637  		pprof.Do(ctx, pprof.Labels("tracing", "on"), func(ctx context.Context) {
   638  			cpuHogger(cpuHog1, &salt1, dur)
   639  		})
   640  		// Be sure the execution trace's view, when filtered to this goroutine
   641  		// via the explicit goroutine ID in each event, gets many more samples
   642  		// than the CPU profiler when filtered to this goroutine via labels.
   643  		cpuHogger(cpuHog1, &salt1, dur)
   644  	}()
   645  
   646  	Stop()
   647  	pprof.StopCPUProfile()
   648  	saveTrace(t, buf, "TestTraceCPUProfile")
   649  
   650  	prof, err := profile.Parse(cpuBuf)
   651  	if err != nil {
   652  		t.Fatalf("failed to parse CPU profile: %v", err)
   653  	}
   654  	// Examine the CPU profiler's view. Filter it to only include samples from
   655  	// the single test goroutine. Use labels to execute that filter: they should
   656  	// apply to all work done while that goroutine is getg().m.curg, and they
   657  	// should apply to no other goroutines.
   658  	pprofSamples := 0
   659  	pprofStacks := make(map[string]int)
   660  	for _, s := range prof.Sample {
   661  		if s.Label["tracing"] != nil {
   662  			var fns []string
   663  			var leaf string
   664  			for _, loc := range s.Location {
   665  				for _, line := range loc.Line {
   666  					fns = append(fns, fmt.Sprintf("%s:%d", line.Function.Name, line.Line))
   667  					leaf = line.Function.Name
   668  				}
   669  			}
   670  			// runtime.sigprof synthesizes call stacks when "normal traceback is
   671  			// impossible or has failed", using particular placeholder functions
   672  			// to represent common failure cases. Look for those functions in
   673  			// the leaf position as a sign that the call stack and its
   674  			// symbolization are more complex than this test can handle.
   675  			//
   676  			// TODO: Make the symbolization done by the execution tracer and CPU
   677  			// profiler match up even in these harder cases. See #53378.
   678  			switch leaf {
   679  			case "runtime._System", "runtime._GC", "runtime._ExternalCode", "runtime._VDSO":
   680  				continue
   681  			}
   682  			stack := strings.Join(fns, " ")
   683  			samples := int(s.Value[0])
   684  			pprofSamples += samples
   685  			pprofStacks[stack] += samples
   686  		}
   687  	}
   688  	if pprofSamples == 0 {
   689  		t.Skipf("CPU profile did not include any samples while tracing was active\n%s", prof)
   690  	}
   691  
   692  	// Examine the execution tracer's view of the CPU profile samples. Filter it
   693  	// to only include samples from the single test goroutine. Use the goroutine
   694  	// ID that was recorded in the events: that should reflect getg().m.curg,
   695  	// same as the profiler's labels (even when the M is using its g0 stack).
   696  	totalTraceSamples := 0
   697  	traceSamples := 0
   698  	traceStacks := make(map[string]int)
   699  	events, _ := parseTrace(t, buf)
   700  	var hogRegion *trace.Event
   701  	for _, ev := range events {
   702  		if ev.Type == trace.EvUserRegion && ev.Args[1] == 0 && ev.SArgs[0] == "cpuHogger" {
   703  			// mode "0" indicates region start
   704  			hogRegion = ev
   705  		}
   706  	}
   707  	if hogRegion == nil {
   708  		t.Fatalf("execution trace did not identify cpuHogger goroutine")
   709  	} else if hogRegion.Link == nil {
   710  		t.Fatalf("execution trace did not close cpuHogger region")
   711  	}
   712  	for _, ev := range events {
   713  		if ev.Type == trace.EvCPUSample {
   714  			totalTraceSamples++
   715  			if ev.G == hogRegion.G {
   716  				traceSamples++
   717  				var fns []string
   718  				for _, frame := range ev.Stk {
   719  					if frame.Fn != "runtime.goexit" {
   720  						fns = append(fns, fmt.Sprintf("%s:%d", frame.Fn, frame.Line))
   721  					}
   722  				}
   723  				stack := strings.Join(fns, " ")
   724  				traceStacks[stack]++
   725  			}
   726  		}
   727  	}
   728  
   729  	// The execution trace may drop CPU profile samples if the profiling buffer
   730  	// overflows. Based on the size of profBufWordCount, that takes a bit over
   731  	// 1900 CPU samples or 19 thread-seconds at a 100 Hz sample rate. If we've
   732  	// hit that case, then we definitely have at least one full buffer's worth
   733  	// of CPU samples, so we'll call that success.
   734  	overflowed := totalTraceSamples >= 1900
   735  	if traceSamples < pprofSamples {
   736  		t.Logf("execution trace did not include all CPU profile samples; %d in profile, %d in trace", pprofSamples, traceSamples)
   737  		if !overflowed {
   738  			t.Fail()
   739  		}
   740  	}
   741  
   742  	for stack, traceSamples := range traceStacks {
   743  		pprofSamples := pprofStacks[stack]
   744  		delete(pprofStacks, stack)
   745  		if traceSamples < pprofSamples {
   746  			t.Logf("execution trace did not include all CPU profile samples for stack %q; %d in profile, %d in trace",
   747  				stack, pprofSamples, traceSamples)
   748  			if !overflowed {
   749  				t.Fail()
   750  			}
   751  		}
   752  	}
   753  	for stack, pprofSamples := range pprofStacks {
   754  		t.Logf("CPU profile included %d samples at stack %q not present in execution trace", pprofSamples, stack)
   755  		if !overflowed {
   756  			t.Fail()
   757  		}
   758  	}
   759  
   760  	if t.Failed() {
   761  		t.Logf("execution trace CPU samples:")
   762  		for stack, samples := range traceStacks {
   763  			t.Logf("%d: %q", samples, stack)
   764  		}
   765  		t.Logf("CPU profile:\n%v", prof)
   766  	}
   767  }
   768  
   769  func cpuHogger(f func(x int) int, y *int, dur time.Duration) {
   770  	// We only need to get one 100 Hz clock tick, so we've got
   771  	// a large safety buffer.
   772  	// But do at least 500 iterations (which should take about 100ms),
   773  	// otherwise TestCPUProfileMultithreaded can fail if only one
   774  	// thread is scheduled during the testing period.
   775  	t0 := time.Now()
   776  	accum := *y
   777  	for i := 0; i < 500 || time.Since(t0) < dur; i++ {
   778  		accum = f(accum)
   779  	}
   780  	*y = accum
   781  }
   782  
   783  var (
   784  	salt1 = 0
   785  )
   786  
   787  // The actual CPU hogging function.
   788  // Must not call other functions nor access heap/globals in the loop,
   789  // otherwise under race detector the samples will be in the race runtime.
   790  func cpuHog1(x int) int {
   791  	return cpuHog0(x, 1e5)
   792  }
   793  
   794  func cpuHog0(x, n int) int {
   795  	foo := x
   796  	for i := 0; i < n; i++ {
   797  		if i%1000 == 0 {
   798  			// Spend time in mcall, stored as gp.m.curg, with g0 running
   799  			runtime.Gosched()
   800  		}
   801  		if foo > 0 {
   802  			foo *= foo
   803  		} else {
   804  			foo *= foo + 1
   805  		}
   806  	}
   807  	return foo
   808  }
   809  
   810  func saveTrace(t *testing.T, buf *bytes.Buffer, name string) {
   811  	if !*saveTraces {
   812  		return
   813  	}
   814  	if err := os.WriteFile(name+".trace", buf.Bytes(), 0600); err != nil {
   815  		t.Errorf("failed to write trace file: %s", err)
   816  	}
   817  }
   818  

View as plain text