Source file src/runtime/runtime_test.go

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"flag"
     9  	"fmt"
    10  	"io"
    11  	. "runtime"
    12  	"runtime/debug"
    13  	"sort"
    14  	"strings"
    15  	"sync"
    16  	"testing"
    17  	"time"
    18  	"unsafe"
    19  )
    20  
    21  // flagQuick is set by the -quick option to skip some relatively slow tests.
    22  // This is used by the cmd/dist test runtime:cpu124.
    23  // The cmd/dist test passes both -test.short and -quick;
    24  // there are tests that only check testing.Short, and those tests will
    25  // not be skipped if only -quick is used.
    26  var flagQuick = flag.Bool("quick", false, "skip slow tests, for cmd/dist test runtime:cpu124")
    27  
    28  func init() {
    29  	// We're testing the runtime, so make tracebacks show things
    30  	// in the runtime. This only raises the level, so it won't
    31  	// override GOTRACEBACK=crash from the user.
    32  	SetTracebackEnv("system")
    33  }
    34  
    35  var errf error
    36  
    37  func errfn() error {
    38  	return errf
    39  }
    40  
    41  func errfn1() error {
    42  	return io.EOF
    43  }
    44  
    45  func BenchmarkIfaceCmp100(b *testing.B) {
    46  	for i := 0; i < b.N; i++ {
    47  		for j := 0; j < 100; j++ {
    48  			if errfn() == io.EOF {
    49  				b.Fatal("bad comparison")
    50  			}
    51  		}
    52  	}
    53  }
    54  
    55  func BenchmarkIfaceCmpNil100(b *testing.B) {
    56  	for i := 0; i < b.N; i++ {
    57  		for j := 0; j < 100; j++ {
    58  			if errfn1() == nil {
    59  				b.Fatal("bad comparison")
    60  			}
    61  		}
    62  	}
    63  }
    64  
    65  var efaceCmp1 any
    66  var efaceCmp2 any
    67  
    68  func BenchmarkEfaceCmpDiff(b *testing.B) {
    69  	x := 5
    70  	efaceCmp1 = &x
    71  	y := 6
    72  	efaceCmp2 = &y
    73  	for i := 0; i < b.N; i++ {
    74  		for j := 0; j < 100; j++ {
    75  			if efaceCmp1 == efaceCmp2 {
    76  				b.Fatal("bad comparison")
    77  			}
    78  		}
    79  	}
    80  }
    81  
    82  func BenchmarkEfaceCmpDiffIndirect(b *testing.B) {
    83  	efaceCmp1 = [2]int{1, 2}
    84  	efaceCmp2 = [2]int{1, 2}
    85  	for i := 0; i < b.N; i++ {
    86  		for j := 0; j < 100; j++ {
    87  			if efaceCmp1 != efaceCmp2 {
    88  				b.Fatal("bad comparison")
    89  			}
    90  		}
    91  	}
    92  }
    93  
    94  func BenchmarkDefer(b *testing.B) {
    95  	for i := 0; i < b.N; i++ {
    96  		defer1()
    97  	}
    98  }
    99  
   100  func defer1() {
   101  	defer func(x, y, z int) {
   102  		if recover() != nil || x != 1 || y != 2 || z != 3 {
   103  			panic("bad recover")
   104  		}
   105  	}(1, 2, 3)
   106  }
   107  
   108  func BenchmarkDefer10(b *testing.B) {
   109  	for i := 0; i < b.N/10; i++ {
   110  		defer2()
   111  	}
   112  }
   113  
   114  func defer2() {
   115  	for i := 0; i < 10; i++ {
   116  		defer func(x, y, z int) {
   117  			if recover() != nil || x != 1 || y != 2 || z != 3 {
   118  				panic("bad recover")
   119  			}
   120  		}(1, 2, 3)
   121  	}
   122  }
   123  
   124  func BenchmarkDeferMany(b *testing.B) {
   125  	for i := 0; i < b.N; i++ {
   126  		defer func(x, y, z int) {
   127  			if recover() != nil || x != 1 || y != 2 || z != 3 {
   128  				panic("bad recover")
   129  			}
   130  		}(1, 2, 3)
   131  	}
   132  }
   133  
   134  func BenchmarkPanicRecover(b *testing.B) {
   135  	for i := 0; i < b.N; i++ {
   136  		defer3()
   137  	}
   138  }
   139  
   140  func defer3() {
   141  	defer func(x, y, z int) {
   142  		if recover() == nil {
   143  			panic("failed recover")
   144  		}
   145  	}(1, 2, 3)
   146  	panic("hi")
   147  }
   148  
   149  // golang.org/issue/7063
   150  func TestStopCPUProfilingWithProfilerOff(t *testing.T) {
   151  	SetCPUProfileRate(0)
   152  }
   153  
   154  // Addresses to test for faulting behavior.
   155  // This is less a test of SetPanicOnFault and more a check that
   156  // the operating system and the runtime can process these faults
   157  // correctly. That is, we're indirectly testing that without SetPanicOnFault
   158  // these would manage to turn into ordinary crashes.
   159  // Note that these are truncated on 32-bit systems, so the bottom 32 bits
   160  // of the larger addresses must themselves be invalid addresses.
   161  // We might get unlucky and the OS might have mapped one of these
   162  // addresses, but probably not: they're all in the first page, very high
   163  // addresses that normally an OS would reserve for itself, or malformed
   164  // addresses. Even so, we might have to remove one or two on different
   165  // systems. We will see.
   166  
   167  var faultAddrs = []uint64{
   168  	// low addresses
   169  	0,
   170  	1,
   171  	0xfff,
   172  	// high (kernel) addresses
   173  	// or else malformed.
   174  	0xffffffffffffffff,
   175  	0xfffffffffffff001,
   176  	0xffffffffffff0001,
   177  	0xfffffffffff00001,
   178  	0xffffffffff000001,
   179  	0xfffffffff0000001,
   180  	0xffffffff00000001,
   181  	0xfffffff000000001,
   182  	0xffffff0000000001,
   183  	0xfffff00000000001,
   184  	0xffff000000000001,
   185  	0xfff0000000000001,
   186  	0xff00000000000001,
   187  	0xf000000000000001,
   188  	0x8000000000000001,
   189  }
   190  
   191  func TestSetPanicOnFault(t *testing.T) {
   192  	old := debug.SetPanicOnFault(true)
   193  	defer debug.SetPanicOnFault(old)
   194  
   195  	nfault := 0
   196  	for _, addr := range faultAddrs {
   197  		testSetPanicOnFault(t, uintptr(addr), &nfault)
   198  	}
   199  	if nfault == 0 {
   200  		t.Fatalf("none of the addresses faulted")
   201  	}
   202  }
   203  
   204  // testSetPanicOnFault tests one potentially faulting address.
   205  // It deliberately constructs and uses an invalid pointer,
   206  // so mark it as nocheckptr.
   207  //
   208  //go:nocheckptr
   209  func testSetPanicOnFault(t *testing.T, addr uintptr, nfault *int) {
   210  	if GOOS == "js" || GOOS == "wasip1" {
   211  		t.Skip(GOOS + " does not support catching faults")
   212  	}
   213  
   214  	defer func() {
   215  		if err := recover(); err != nil {
   216  			*nfault++
   217  		}
   218  	}()
   219  
   220  	// The read should fault, except that sometimes we hit
   221  	// addresses that have had C or kernel pages mapped there
   222  	// readable by user code. So just log the content.
   223  	// If no addresses fault, we'll fail the test.
   224  	v := *(*byte)(unsafe.Pointer(addr))
   225  	t.Logf("addr %#x: %#x\n", addr, v)
   226  }
   227  
   228  func eqstring_generic(s1, s2 string) bool {
   229  	if len(s1) != len(s2) {
   230  		return false
   231  	}
   232  	// optimization in assembly versions:
   233  	// if s1.str == s2.str { return true }
   234  	for i := 0; i < len(s1); i++ {
   235  		if s1[i] != s2[i] {
   236  			return false
   237  		}
   238  	}
   239  	return true
   240  }
   241  
   242  func TestEqString(t *testing.T) {
   243  	// This isn't really an exhaustive test of == on strings, it's
   244  	// just a convenient way of documenting (via eqstring_generic)
   245  	// what == does.
   246  	s := []string{
   247  		"",
   248  		"a",
   249  		"c",
   250  		"aaa",
   251  		"ccc",
   252  		"cccc"[:3], // same contents, different string
   253  		"1234567890",
   254  	}
   255  	for _, s1 := range s {
   256  		for _, s2 := range s {
   257  			x := s1 == s2
   258  			y := eqstring_generic(s1, s2)
   259  			if x != y {
   260  				t.Errorf(`("%s" == "%s") = %t, want %t`, s1, s2, x, y)
   261  			}
   262  		}
   263  	}
   264  }
   265  
   266  func TestTrailingZero(t *testing.T) {
   267  	// make sure we add padding for structs with trailing zero-sized fields
   268  	type T1 struct {
   269  		n int32
   270  		z [0]byte
   271  	}
   272  	if unsafe.Sizeof(T1{}) != 8 {
   273  		t.Errorf("sizeof(%#v)==%d, want 8", T1{}, unsafe.Sizeof(T1{}))
   274  	}
   275  	type T2 struct {
   276  		n int64
   277  		z struct{}
   278  	}
   279  	if unsafe.Sizeof(T2{}) != 8+unsafe.Sizeof(uintptr(0)) {
   280  		t.Errorf("sizeof(%#v)==%d, want %d", T2{}, unsafe.Sizeof(T2{}), 8+unsafe.Sizeof(uintptr(0)))
   281  	}
   282  	type T3 struct {
   283  		n byte
   284  		z [4]struct{}
   285  	}
   286  	if unsafe.Sizeof(T3{}) != 2 {
   287  		t.Errorf("sizeof(%#v)==%d, want 2", T3{}, unsafe.Sizeof(T3{}))
   288  	}
   289  	// make sure padding can double for both zerosize and alignment
   290  	type T4 struct {
   291  		a int32
   292  		b int16
   293  		c int8
   294  		z struct{}
   295  	}
   296  	if unsafe.Sizeof(T4{}) != 8 {
   297  		t.Errorf("sizeof(%#v)==%d, want 8", T4{}, unsafe.Sizeof(T4{}))
   298  	}
   299  	// make sure we don't pad a zero-sized thing
   300  	type T5 struct {
   301  	}
   302  	if unsafe.Sizeof(T5{}) != 0 {
   303  		t.Errorf("sizeof(%#v)==%d, want 0", T5{}, unsafe.Sizeof(T5{}))
   304  	}
   305  }
   306  
   307  func TestAppendGrowth(t *testing.T) {
   308  	var x []int64
   309  	check := func(want int) {
   310  		if cap(x) != want {
   311  			t.Errorf("len=%d, cap=%d, want cap=%d", len(x), cap(x), want)
   312  		}
   313  	}
   314  
   315  	check(0)
   316  	want := 1
   317  	for i := 1; i <= 100; i++ {
   318  		x = append(x, 1)
   319  		check(want)
   320  		if i&(i-1) == 0 {
   321  			want = 2 * i
   322  		}
   323  	}
   324  }
   325  
   326  var One = []int64{1}
   327  
   328  func TestAppendSliceGrowth(t *testing.T) {
   329  	var x []int64
   330  	check := func(want int) {
   331  		if cap(x) != want {
   332  			t.Errorf("len=%d, cap=%d, want cap=%d", len(x), cap(x), want)
   333  		}
   334  	}
   335  
   336  	check(0)
   337  	want := 1
   338  	for i := 1; i <= 100; i++ {
   339  		x = append(x, One...)
   340  		check(want)
   341  		if i&(i-1) == 0 {
   342  			want = 2 * i
   343  		}
   344  	}
   345  }
   346  
   347  func TestGoroutineProfileTrivial(t *testing.T) {
   348  	// Calling GoroutineProfile twice in a row should find the same number of goroutines,
   349  	// but it's possible there are goroutines just about to exit, so we might end up
   350  	// with fewer in the second call. Try a few times; it should converge once those
   351  	// zombies are gone.
   352  	for i := 0; ; i++ {
   353  		n1, ok := GoroutineProfile(nil) // should fail, there's at least 1 goroutine
   354  		if n1 < 1 || ok {
   355  			t.Fatalf("GoroutineProfile(nil) = %d, %v, want >0, false", n1, ok)
   356  		}
   357  		n2, ok := GoroutineProfile(make([]StackRecord, n1))
   358  		if n2 == n1 && ok {
   359  			break
   360  		}
   361  		t.Logf("GoroutineProfile(%d) = %d, %v, want %d, true", n1, n2, ok, n1)
   362  		if i >= 10 {
   363  			t.Fatalf("GoroutineProfile not converging")
   364  		}
   365  	}
   366  }
   367  
   368  func BenchmarkGoroutineProfile(b *testing.B) {
   369  	run := func(fn func() bool) func(b *testing.B) {
   370  		runOne := func(b *testing.B) {
   371  			latencies := make([]time.Duration, 0, b.N)
   372  
   373  			b.ResetTimer()
   374  			for i := 0; i < b.N; i++ {
   375  				start := time.Now()
   376  				ok := fn()
   377  				if !ok {
   378  					b.Fatal("goroutine profile failed")
   379  				}
   380  				latencies = append(latencies, time.Since(start))
   381  			}
   382  			b.StopTimer()
   383  
   384  			// Sort latencies then report percentiles.
   385  			sort.Slice(latencies, func(i, j int) bool {
   386  				return latencies[i] < latencies[j]
   387  			})
   388  			b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns")
   389  			b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns")
   390  			b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns")
   391  		}
   392  		return func(b *testing.B) {
   393  			b.Run("idle", runOne)
   394  
   395  			b.Run("loaded", func(b *testing.B) {
   396  				stop := applyGCLoad(b)
   397  				runOne(b)
   398  				// Make sure to stop the timer before we wait! The load created above
   399  				// is very heavy-weight and not easy to stop, so we could end up
   400  				// confusing the benchmarking framework for small b.N.
   401  				b.StopTimer()
   402  				stop()
   403  			})
   404  		}
   405  	}
   406  
   407  	// Measure the cost of counting goroutines
   408  	b.Run("small-nil", run(func() bool {
   409  		GoroutineProfile(nil)
   410  		return true
   411  	}))
   412  
   413  	// Measure the cost with a small set of goroutines
   414  	n := NumGoroutine()
   415  	p := make([]StackRecord, 2*n+2*GOMAXPROCS(0))
   416  	b.Run("small", run(func() bool {
   417  		_, ok := GoroutineProfile(p)
   418  		return ok
   419  	}))
   420  
   421  	// Measure the cost with a large set of goroutines
   422  	ch := make(chan int)
   423  	var ready, done sync.WaitGroup
   424  	for i := 0; i < 5000; i++ {
   425  		ready.Add(1)
   426  		done.Add(1)
   427  		go func() { ready.Done(); <-ch; done.Done() }()
   428  	}
   429  	ready.Wait()
   430  
   431  	// Count goroutines with a large allgs list
   432  	b.Run("large-nil", run(func() bool {
   433  		GoroutineProfile(nil)
   434  		return true
   435  	}))
   436  
   437  	n = NumGoroutine()
   438  	p = make([]StackRecord, 2*n+2*GOMAXPROCS(0))
   439  	b.Run("large", run(func() bool {
   440  		_, ok := GoroutineProfile(p)
   441  		return ok
   442  	}))
   443  
   444  	close(ch)
   445  	done.Wait()
   446  
   447  	// Count goroutines with a large (but unused) allgs list
   448  	b.Run("sparse-nil", run(func() bool {
   449  		GoroutineProfile(nil)
   450  		return true
   451  	}))
   452  
   453  	// Measure the cost of a large (but unused) allgs list
   454  	n = NumGoroutine()
   455  	p = make([]StackRecord, 2*n+2*GOMAXPROCS(0))
   456  	b.Run("sparse", run(func() bool {
   457  		_, ok := GoroutineProfile(p)
   458  		return ok
   459  	}))
   460  }
   461  
   462  func TestVersion(t *testing.T) {
   463  	// Test that version does not contain \r or \n.
   464  	vers := Version()
   465  	if strings.Contains(vers, "\r") || strings.Contains(vers, "\n") {
   466  		t.Fatalf("cr/nl in version: %q", vers)
   467  	}
   468  }
   469  
   470  func TestTimediv(t *testing.T) {
   471  	for _, tc := range []struct {
   472  		num int64
   473  		div int32
   474  		ret int32
   475  		rem int32
   476  	}{
   477  		{
   478  			num: 8,
   479  			div: 2,
   480  			ret: 4,
   481  			rem: 0,
   482  		},
   483  		{
   484  			num: 9,
   485  			div: 2,
   486  			ret: 4,
   487  			rem: 1,
   488  		},
   489  		{
   490  			// Used by runtime.check.
   491  			num: 12345*1000000000 + 54321,
   492  			div: 1000000000,
   493  			ret: 12345,
   494  			rem: 54321,
   495  		},
   496  		{
   497  			num: 1<<32 - 1,
   498  			div: 2,
   499  			ret: 1<<31 - 1, // no overflow.
   500  			rem: 1,
   501  		},
   502  		{
   503  			num: 1 << 32,
   504  			div: 2,
   505  			ret: 1<<31 - 1, // overflow.
   506  			rem: 0,
   507  		},
   508  		{
   509  			num: 1 << 40,
   510  			div: 2,
   511  			ret: 1<<31 - 1, // overflow.
   512  			rem: 0,
   513  		},
   514  		{
   515  			num: 1<<40 + 1,
   516  			div: 1 << 10,
   517  			ret: 1 << 30,
   518  			rem: 1,
   519  		},
   520  	} {
   521  		name := fmt.Sprintf("%d div %d", tc.num, tc.div)
   522  		t.Run(name, func(t *testing.T) {
   523  			// Double check that the inputs make sense using
   524  			// standard 64-bit division.
   525  			ret64 := tc.num / int64(tc.div)
   526  			rem64 := tc.num % int64(tc.div)
   527  			if ret64 != int64(int32(ret64)) {
   528  				// Simulate timediv overflow value.
   529  				ret64 = 1<<31 - 1
   530  				rem64 = 0
   531  			}
   532  			if ret64 != int64(tc.ret) {
   533  				t.Errorf("%d / %d got ret %d rem %d want ret %d rem %d", tc.num, tc.div, ret64, rem64, tc.ret, tc.rem)
   534  			}
   535  
   536  			var rem int32
   537  			ret := Timediv(tc.num, tc.div, &rem)
   538  			if ret != tc.ret || rem != tc.rem {
   539  				t.Errorf("timediv %d / %d got ret %d rem %d want ret %d rem %d", tc.num, tc.div, ret, rem, tc.ret, tc.rem)
   540  			}
   541  		})
   542  	}
   543  }
   544  

View as plain text