Source file src/runtime/runtime1.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/bytealg"
     9  	"internal/goarch"
    10  	"runtime/internal/atomic"
    11  	"unsafe"
    12  )
    13  
    14  // Keep a cached value to make gotraceback fast,
    15  // since we call it on every call to gentraceback.
    16  // The cached value is a uint32 in which the low bits
    17  // are the "crash" and "all" settings and the remaining
    18  // bits are the traceback value (0 off, 1 on, 2 include system).
    19  const (
    20  	tracebackCrash = 1 << iota
    21  	tracebackAll
    22  	tracebackShift = iota
    23  )
    24  
    25  var traceback_cache uint32 = 2 << tracebackShift
    26  var traceback_env uint32
    27  
    28  // gotraceback returns the current traceback settings.
    29  //
    30  // If level is 0, suppress all tracebacks.
    31  // If level is 1, show tracebacks, but exclude runtime frames.
    32  // If level is 2, show tracebacks including runtime frames.
    33  // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
    34  // If crash is set, crash (core dump, etc) after tracebacking.
    35  //
    36  //go:nosplit
    37  func gotraceback() (level int32, all, crash bool) {
    38  	gp := getg()
    39  	t := atomic.Load(&traceback_cache)
    40  	crash = t&tracebackCrash != 0
    41  	all = gp.m.throwing >= throwTypeUser || t&tracebackAll != 0
    42  	if gp.m.traceback != 0 {
    43  		level = int32(gp.m.traceback)
    44  	} else if gp.m.throwing >= throwTypeRuntime {
    45  		// Always include runtime frames in runtime throws unless
    46  		// otherwise overridden by m.traceback.
    47  		level = 2
    48  	} else {
    49  		level = int32(t >> tracebackShift)
    50  	}
    51  	return
    52  }
    53  
    54  var (
    55  	argc int32
    56  	argv **byte
    57  )
    58  
    59  // nosplit for use in linux startup sysargs.
    60  //
    61  //go:nosplit
    62  func argv_index(argv **byte, i int32) *byte {
    63  	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
    64  }
    65  
    66  func args(c int32, v **byte) {
    67  	argc = c
    68  	argv = v
    69  	sysargs(c, v)
    70  }
    71  
    72  func goargs() {
    73  	if GOOS == "windows" {
    74  		return
    75  	}
    76  	argslice = make([]string, argc)
    77  	for i := int32(0); i < argc; i++ {
    78  		argslice[i] = gostringnocopy(argv_index(argv, i))
    79  	}
    80  }
    81  
    82  func goenvs_unix() {
    83  	// TODO(austin): ppc64 in dynamic linking mode doesn't
    84  	// guarantee env[] will immediately follow argv. Might cause
    85  	// problems.
    86  	n := int32(0)
    87  	for argv_index(argv, argc+1+n) != nil {
    88  		n++
    89  	}
    90  
    91  	envs = make([]string, n)
    92  	for i := int32(0); i < n; i++ {
    93  		envs[i] = gostring(argv_index(argv, argc+1+i))
    94  	}
    95  }
    96  
    97  func environ() []string {
    98  	return envs
    99  }
   100  
   101  // TODO: These should be locals in testAtomic64, but we don't 8-byte
   102  // align stack variables on 386.
   103  var test_z64, test_x64 uint64
   104  
   105  func testAtomic64() {
   106  	test_z64 = 42
   107  	test_x64 = 0
   108  	if atomic.Cas64(&test_z64, test_x64, 1) {
   109  		throw("cas64 failed")
   110  	}
   111  	if test_x64 != 0 {
   112  		throw("cas64 failed")
   113  	}
   114  	test_x64 = 42
   115  	if !atomic.Cas64(&test_z64, test_x64, 1) {
   116  		throw("cas64 failed")
   117  	}
   118  	if test_x64 != 42 || test_z64 != 1 {
   119  		throw("cas64 failed")
   120  	}
   121  	if atomic.Load64(&test_z64) != 1 {
   122  		throw("load64 failed")
   123  	}
   124  	atomic.Store64(&test_z64, (1<<40)+1)
   125  	if atomic.Load64(&test_z64) != (1<<40)+1 {
   126  		throw("store64 failed")
   127  	}
   128  	if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
   129  		throw("xadd64 failed")
   130  	}
   131  	if atomic.Load64(&test_z64) != (2<<40)+2 {
   132  		throw("xadd64 failed")
   133  	}
   134  	if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
   135  		throw("xchg64 failed")
   136  	}
   137  	if atomic.Load64(&test_z64) != (3<<40)+3 {
   138  		throw("xchg64 failed")
   139  	}
   140  }
   141  
   142  func check() {
   143  	var (
   144  		a     int8
   145  		b     uint8
   146  		c     int16
   147  		d     uint16
   148  		e     int32
   149  		f     uint32
   150  		g     int64
   151  		h     uint64
   152  		i, i1 float32
   153  		j, j1 float64
   154  		k     unsafe.Pointer
   155  		l     *uint16
   156  		m     [4]byte
   157  	)
   158  	type x1t struct {
   159  		x uint8
   160  	}
   161  	type y1t struct {
   162  		x1 x1t
   163  		y  uint8
   164  	}
   165  	var x1 x1t
   166  	var y1 y1t
   167  
   168  	if unsafe.Sizeof(a) != 1 {
   169  		throw("bad a")
   170  	}
   171  	if unsafe.Sizeof(b) != 1 {
   172  		throw("bad b")
   173  	}
   174  	if unsafe.Sizeof(c) != 2 {
   175  		throw("bad c")
   176  	}
   177  	if unsafe.Sizeof(d) != 2 {
   178  		throw("bad d")
   179  	}
   180  	if unsafe.Sizeof(e) != 4 {
   181  		throw("bad e")
   182  	}
   183  	if unsafe.Sizeof(f) != 4 {
   184  		throw("bad f")
   185  	}
   186  	if unsafe.Sizeof(g) != 8 {
   187  		throw("bad g")
   188  	}
   189  	if unsafe.Sizeof(h) != 8 {
   190  		throw("bad h")
   191  	}
   192  	if unsafe.Sizeof(i) != 4 {
   193  		throw("bad i")
   194  	}
   195  	if unsafe.Sizeof(j) != 8 {
   196  		throw("bad j")
   197  	}
   198  	if unsafe.Sizeof(k) != goarch.PtrSize {
   199  		throw("bad k")
   200  	}
   201  	if unsafe.Sizeof(l) != goarch.PtrSize {
   202  		throw("bad l")
   203  	}
   204  	if unsafe.Sizeof(x1) != 1 {
   205  		throw("bad unsafe.Sizeof x1")
   206  	}
   207  	if unsafe.Offsetof(y1.y) != 1 {
   208  		throw("bad offsetof y1.y")
   209  	}
   210  	if unsafe.Sizeof(y1) != 2 {
   211  		throw("bad unsafe.Sizeof y1")
   212  	}
   213  
   214  	if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
   215  		throw("bad timediv")
   216  	}
   217  
   218  	var z uint32
   219  	z = 1
   220  	if !atomic.Cas(&z, 1, 2) {
   221  		throw("cas1")
   222  	}
   223  	if z != 2 {
   224  		throw("cas2")
   225  	}
   226  
   227  	z = 4
   228  	if atomic.Cas(&z, 5, 6) {
   229  		throw("cas3")
   230  	}
   231  	if z != 4 {
   232  		throw("cas4")
   233  	}
   234  
   235  	z = 0xffffffff
   236  	if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
   237  		throw("cas5")
   238  	}
   239  	if z != 0xfffffffe {
   240  		throw("cas6")
   241  	}
   242  
   243  	m = [4]byte{1, 1, 1, 1}
   244  	atomic.Or8(&m[1], 0xf0)
   245  	if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
   246  		throw("atomicor8")
   247  	}
   248  
   249  	m = [4]byte{0xff, 0xff, 0xff, 0xff}
   250  	atomic.And8(&m[1], 0x1)
   251  	if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
   252  		throw("atomicand8")
   253  	}
   254  
   255  	*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
   256  	if j == j {
   257  		throw("float64nan")
   258  	}
   259  	if !(j != j) {
   260  		throw("float64nan1")
   261  	}
   262  
   263  	*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
   264  	if j == j1 {
   265  		throw("float64nan2")
   266  	}
   267  	if !(j != j1) {
   268  		throw("float64nan3")
   269  	}
   270  
   271  	*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
   272  	if i == i {
   273  		throw("float32nan")
   274  	}
   275  	if i == i {
   276  		throw("float32nan1")
   277  	}
   278  
   279  	*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
   280  	if i == i1 {
   281  		throw("float32nan2")
   282  	}
   283  	if i == i1 {
   284  		throw("float32nan3")
   285  	}
   286  
   287  	testAtomic64()
   288  
   289  	if _FixedStack != round2(_FixedStack) {
   290  		throw("FixedStack is not power-of-2")
   291  	}
   292  
   293  	if !checkASM() {
   294  		throw("assembly checks failed")
   295  	}
   296  }
   297  
   298  type dbgVar struct {
   299  	name  string
   300  	value *int32
   301  }
   302  
   303  // Holds variables parsed from GODEBUG env var,
   304  // except for "memprofilerate" since there is an
   305  // existing int var for that value, which may
   306  // already have an initial value.
   307  var debug struct {
   308  	cgocheck           int32
   309  	clobberfree        int32
   310  	efence             int32
   311  	gccheckmark        int32
   312  	gcpacertrace       int32
   313  	gcshrinkstackoff   int32
   314  	gcstoptheworld     int32
   315  	gctrace            int32
   316  	invalidptr         int32
   317  	madvdontneed       int32 // for Linux; issue 28466
   318  	scavtrace          int32
   319  	scheddetail        int32
   320  	schedtrace         int32
   321  	tracebackancestors int32
   322  	asyncpreemptoff    int32
   323  	harddecommit       int32
   324  	adaptivestackstart int32
   325  
   326  	// debug.malloc is used as a combined debug check
   327  	// in the malloc function and should be set
   328  	// if any of the below debug options is != 0.
   329  	malloc         bool
   330  	allocfreetrace int32
   331  	inittrace      int32
   332  	sbrk           int32
   333  }
   334  
   335  var dbgvars = []dbgVar{
   336  	{"allocfreetrace", &debug.allocfreetrace},
   337  	{"clobberfree", &debug.clobberfree},
   338  	{"cgocheck", &debug.cgocheck},
   339  	{"efence", &debug.efence},
   340  	{"gccheckmark", &debug.gccheckmark},
   341  	{"gcpacertrace", &debug.gcpacertrace},
   342  	{"gcshrinkstackoff", &debug.gcshrinkstackoff},
   343  	{"gcstoptheworld", &debug.gcstoptheworld},
   344  	{"gctrace", &debug.gctrace},
   345  	{"invalidptr", &debug.invalidptr},
   346  	{"madvdontneed", &debug.madvdontneed},
   347  	{"sbrk", &debug.sbrk},
   348  	{"scavtrace", &debug.scavtrace},
   349  	{"scheddetail", &debug.scheddetail},
   350  	{"schedtrace", &debug.schedtrace},
   351  	{"tracebackancestors", &debug.tracebackancestors},
   352  	{"asyncpreemptoff", &debug.asyncpreemptoff},
   353  	{"inittrace", &debug.inittrace},
   354  	{"harddecommit", &debug.harddecommit},
   355  	{"adaptivestackstart", &debug.adaptivestackstart},
   356  }
   357  
   358  var globalGODEBUG string
   359  
   360  func parsedebugvars() {
   361  	// defaults
   362  	debug.cgocheck = 1
   363  	debug.invalidptr = 1
   364  	debug.adaptivestackstart = 1 // go119 - set this to 0 to turn larger initial goroutine stacks off
   365  	if GOOS == "linux" {
   366  		// On Linux, MADV_FREE is faster than MADV_DONTNEED,
   367  		// but doesn't affect many of the statistics that
   368  		// MADV_DONTNEED does until the memory is actually
   369  		// reclaimed. This generally leads to poor user
   370  		// experience, like confusing stats in top and other
   371  		// monitoring tools; and bad integration with
   372  		// management systems that respond to memory usage.
   373  		// Hence, default to MADV_DONTNEED.
   374  		debug.madvdontneed = 1
   375  	}
   376  
   377  	globalGODEBUG = gogetenv("GODEBUG")
   378  	godebugEnv.StoreNoWB(&globalGODEBUG)
   379  	for p := globalGODEBUG; p != ""; {
   380  		field := ""
   381  		i := bytealg.IndexByteString(p, ',')
   382  		if i < 0 {
   383  			field, p = p, ""
   384  		} else {
   385  			field, p = p[:i], p[i+1:]
   386  		}
   387  		i = bytealg.IndexByteString(field, '=')
   388  		if i < 0 {
   389  			continue
   390  		}
   391  		key, value := field[:i], field[i+1:]
   392  
   393  		// Update MemProfileRate directly here since it
   394  		// is int, not int32, and should only be updated
   395  		// if specified in GODEBUG.
   396  		if key == "memprofilerate" {
   397  			if n, ok := atoi(value); ok {
   398  				MemProfileRate = n
   399  			}
   400  		} else {
   401  			for _, v := range dbgvars {
   402  				if v.name == key {
   403  					if n, ok := atoi32(value); ok {
   404  						*v.value = n
   405  					}
   406  				}
   407  			}
   408  		}
   409  	}
   410  
   411  	debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0
   412  
   413  	setTraceback(gogetenv("GOTRACEBACK"))
   414  	traceback_env = traceback_cache
   415  }
   416  
   417  //go:linkname setTraceback runtime/debug.SetTraceback
   418  func setTraceback(level string) {
   419  	var t uint32
   420  	switch level {
   421  	case "none":
   422  		t = 0
   423  	case "single", "":
   424  		t = 1 << tracebackShift
   425  	case "all":
   426  		t = 1<<tracebackShift | tracebackAll
   427  	case "system":
   428  		t = 2<<tracebackShift | tracebackAll
   429  	case "crash":
   430  		t = 2<<tracebackShift | tracebackAll | tracebackCrash
   431  	default:
   432  		t = tracebackAll
   433  		if n, ok := atoi(level); ok && n == int(uint32(n)) {
   434  			t |= uint32(n) << tracebackShift
   435  		}
   436  	}
   437  	// when C owns the process, simply exit'ing the process on fatal errors
   438  	// and panics is surprising. Be louder and abort instead.
   439  	if islibrary || isarchive {
   440  		t |= tracebackCrash
   441  	}
   442  
   443  	t |= traceback_env
   444  
   445  	atomic.Store(&traceback_cache, t)
   446  }
   447  
   448  // Poor mans 64-bit division.
   449  // This is a very special function, do not use it if you are not sure what you are doing.
   450  // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
   451  // Handles overflow in a time-specific manner.
   452  // This keeps us within no-split stack limits on 32-bit processors.
   453  //
   454  //go:nosplit
   455  func timediv(v int64, div int32, rem *int32) int32 {
   456  	res := int32(0)
   457  	for bit := 30; bit >= 0; bit-- {
   458  		if v >= int64(div)<<uint(bit) {
   459  			v = v - (int64(div) << uint(bit))
   460  			// Before this for loop, res was 0, thus all these
   461  			// power of 2 increments are now just bitsets.
   462  			res |= 1 << uint(bit)
   463  		}
   464  	}
   465  	if v >= int64(div) {
   466  		if rem != nil {
   467  			*rem = 0
   468  		}
   469  		return 0x7fffffff
   470  	}
   471  	if rem != nil {
   472  		*rem = int32(v)
   473  	}
   474  	return res
   475  }
   476  
   477  // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
   478  
   479  //go:nosplit
   480  func acquirem() *m {
   481  	gp := getg()
   482  	gp.m.locks++
   483  	return gp.m
   484  }
   485  
   486  //go:nosplit
   487  func releasem(mp *m) {
   488  	gp := getg()
   489  	mp.locks--
   490  	if mp.locks == 0 && gp.preempt {
   491  		// restore the preemption request in case we've cleared it in newstack
   492  		gp.stackguard0 = stackPreempt
   493  	}
   494  }
   495  
   496  //go:linkname reflect_typelinks reflect.typelinks
   497  func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
   498  	modules := activeModules()
   499  	sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
   500  	ret := [][]int32{modules[0].typelinks}
   501  	for _, md := range modules[1:] {
   502  		sections = append(sections, unsafe.Pointer(md.types))
   503  		ret = append(ret, md.typelinks)
   504  	}
   505  	return sections, ret
   506  }
   507  
   508  // reflect_resolveNameOff resolves a name offset from a base pointer.
   509  //
   510  //go:linkname reflect_resolveNameOff reflect.resolveNameOff
   511  func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   512  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
   513  }
   514  
   515  // reflect_resolveTypeOff resolves an *rtype offset from a base type.
   516  //
   517  //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
   518  func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   519  	return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
   520  }
   521  
   522  // reflect_resolveTextOff resolves a function pointer offset from a base type.
   523  //
   524  //go:linkname reflect_resolveTextOff reflect.resolveTextOff
   525  func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   526  	return (*_type)(rtype).textOff(textOff(off))
   527  
   528  }
   529  
   530  // reflectlite_resolveNameOff resolves a name offset from a base pointer.
   531  //
   532  //go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff
   533  func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   534  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
   535  }
   536  
   537  // reflectlite_resolveTypeOff resolves an *rtype offset from a base type.
   538  //
   539  //go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff
   540  func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   541  	return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
   542  }
   543  
   544  // reflect_addReflectOff adds a pointer to the reflection offset lookup map.
   545  //
   546  //go:linkname reflect_addReflectOff reflect.addReflectOff
   547  func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
   548  	reflectOffsLock()
   549  	if reflectOffs.m == nil {
   550  		reflectOffs.m = make(map[int32]unsafe.Pointer)
   551  		reflectOffs.minv = make(map[unsafe.Pointer]int32)
   552  		reflectOffs.next = -1
   553  	}
   554  	id, found := reflectOffs.minv[ptr]
   555  	if !found {
   556  		id = reflectOffs.next
   557  		reflectOffs.next-- // use negative offsets as IDs to aid debugging
   558  		reflectOffs.m[id] = ptr
   559  		reflectOffs.minv[ptr] = id
   560  	}
   561  	reflectOffsUnlock()
   562  	return id
   563  }
   564  

View as plain text