Source file src/runtime/panic.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/goarch"
     9  	"runtime/internal/atomic"
    10  	"runtime/internal/sys"
    11  	"unsafe"
    12  )
    13  
    14  // throwType indicates the current type of ongoing throw, which affects the
    15  // amount of detail printed to stderr. Higher values include more detail.
    16  type throwType uint32
    17  
    18  const (
    19  	// throwTypeNone means that we are not throwing.
    20  	throwTypeNone throwType = iota
    21  
    22  	// throwTypeUser is a throw due to a problem with the application.
    23  	//
    24  	// These throws do not include runtime frames, system goroutines, or
    25  	// frame metadata.
    26  	throwTypeUser
    27  
    28  	// throwTypeRuntime is a throw due to a problem with Go itself.
    29  	//
    30  	// These throws include as much information as possible to aid in
    31  	// debugging the runtime, including runtime frames, system goroutines,
    32  	// and frame metadata.
    33  	throwTypeRuntime
    34  )
    35  
    36  // We have two different ways of doing defers. The older way involves creating a
    37  // defer record at the time that a defer statement is executing and adding it to a
    38  // defer chain. This chain is inspected by the deferreturn call at all function
    39  // exits in order to run the appropriate defer calls. A cheaper way (which we call
    40  // open-coded defers) is used for functions in which no defer statements occur in
    41  // loops. In that case, we simply store the defer function/arg information into
    42  // specific stack slots at the point of each defer statement, as well as setting a
    43  // bit in a bitmask. At each function exit, we add inline code to directly make
    44  // the appropriate defer calls based on the bitmask and fn/arg information stored
    45  // on the stack. During panic/Goexit processing, the appropriate defer calls are
    46  // made using extra funcdata info that indicates the exact stack slots that
    47  // contain the bitmask and defer fn/args.
    48  
    49  // Check to make sure we can really generate a panic. If the panic
    50  // was generated from the runtime, or from inside malloc, then convert
    51  // to a throw of msg.
    52  // pc should be the program counter of the compiler-generated code that
    53  // triggered this panic.
    54  func panicCheck1(pc uintptr, msg string) {
    55  	if goarch.IsWasm == 0 && hasPrefix(funcname(findfunc(pc)), "runtime.") {
    56  		// Note: wasm can't tail call, so we can't get the original caller's pc.
    57  		throw(msg)
    58  	}
    59  	// TODO: is this redundant? How could we be in malloc
    60  	// but not in the runtime? runtime/internal/*, maybe?
    61  	gp := getg()
    62  	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
    63  		throw(msg)
    64  	}
    65  }
    66  
    67  // Same as above, but calling from the runtime is allowed.
    68  //
    69  // Using this function is necessary for any panic that may be
    70  // generated by runtime.sigpanic, since those are always called by the
    71  // runtime.
    72  func panicCheck2(err string) {
    73  	// panic allocates, so to avoid recursive malloc, turn panics
    74  	// during malloc into throws.
    75  	gp := getg()
    76  	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
    77  		throw(err)
    78  	}
    79  }
    80  
    81  // Many of the following panic entry-points turn into throws when they
    82  // happen in various runtime contexts. These should never happen in
    83  // the runtime, and if they do, they indicate a serious issue and
    84  // should not be caught by user code.
    85  //
    86  // The panic{Index,Slice,divide,shift} functions are called by
    87  // code generated by the compiler for out of bounds index expressions,
    88  // out of bounds slice expressions, division by zero, and shift by negative.
    89  // The panicdivide (again), panicoverflow, panicfloat, and panicmem
    90  // functions are called by the signal handler when a signal occurs
    91  // indicating the respective problem.
    92  //
    93  // Since panic{Index,Slice,shift} are never called directly, and
    94  // since the runtime package should never have an out of bounds slice
    95  // or array reference or negative shift, if we see those functions called from the
    96  // runtime package we turn the panic into a throw. That will dump the
    97  // entire runtime stack for easier debugging.
    98  //
    99  // The entry points called by the signal handler will be called from
   100  // runtime.sigpanic, so we can't disallow calls from the runtime to
   101  // these (they always look like they're called from the runtime).
   102  // Hence, for these, we just check for clearly bad runtime conditions.
   103  //
   104  // The panic{Index,Slice} functions are implemented in assembly and tail call
   105  // to the goPanic{Index,Slice} functions below. This is done so we can use
   106  // a space-minimal register calling convention.
   107  
   108  // failures in the comparisons for s[x], 0 <= x < y (y == len(s))
   109  //
   110  //go:yeswritebarrierrec
   111  func goPanicIndex(x int, y int) {
   112  	panicCheck1(getcallerpc(), "index out of range")
   113  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex})
   114  }
   115  
   116  //go:yeswritebarrierrec
   117  func goPanicIndexU(x uint, y int) {
   118  	panicCheck1(getcallerpc(), "index out of range")
   119  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex})
   120  }
   121  
   122  // failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
   123  //
   124  //go:yeswritebarrierrec
   125  func goPanicSliceAlen(x int, y int) {
   126  	panicCheck1(getcallerpc(), "slice bounds out of range")
   127  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen})
   128  }
   129  
   130  //go:yeswritebarrierrec
   131  func goPanicSliceAlenU(x uint, y int) {
   132  	panicCheck1(getcallerpc(), "slice bounds out of range")
   133  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen})
   134  }
   135  
   136  //go:yeswritebarrierrec
   137  func goPanicSliceAcap(x int, y int) {
   138  	panicCheck1(getcallerpc(), "slice bounds out of range")
   139  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap})
   140  }
   141  
   142  //go:yeswritebarrierrec
   143  func goPanicSliceAcapU(x uint, y int) {
   144  	panicCheck1(getcallerpc(), "slice bounds out of range")
   145  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap})
   146  }
   147  
   148  // failures in the comparisons for s[x:y], 0 <= x <= y
   149  //
   150  //go:yeswritebarrierrec
   151  func goPanicSliceB(x int, y int) {
   152  	panicCheck1(getcallerpc(), "slice bounds out of range")
   153  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB})
   154  }
   155  
   156  //go:yeswritebarrierrec
   157  func goPanicSliceBU(x uint, y int) {
   158  	panicCheck1(getcallerpc(), "slice bounds out of range")
   159  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB})
   160  }
   161  
   162  // failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
   163  func goPanicSlice3Alen(x int, y int) {
   164  	panicCheck1(getcallerpc(), "slice bounds out of range")
   165  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen})
   166  }
   167  func goPanicSlice3AlenU(x uint, y int) {
   168  	panicCheck1(getcallerpc(), "slice bounds out of range")
   169  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen})
   170  }
   171  func goPanicSlice3Acap(x int, y int) {
   172  	panicCheck1(getcallerpc(), "slice bounds out of range")
   173  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap})
   174  }
   175  func goPanicSlice3AcapU(x uint, y int) {
   176  	panicCheck1(getcallerpc(), "slice bounds out of range")
   177  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap})
   178  }
   179  
   180  // failures in the comparisons for s[:x:y], 0 <= x <= y
   181  func goPanicSlice3B(x int, y int) {
   182  	panicCheck1(getcallerpc(), "slice bounds out of range")
   183  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B})
   184  }
   185  func goPanicSlice3BU(x uint, y int) {
   186  	panicCheck1(getcallerpc(), "slice bounds out of range")
   187  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B})
   188  }
   189  
   190  // failures in the comparisons for s[x:y:], 0 <= x <= y
   191  func goPanicSlice3C(x int, y int) {
   192  	panicCheck1(getcallerpc(), "slice bounds out of range")
   193  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C})
   194  }
   195  func goPanicSlice3CU(x uint, y int) {
   196  	panicCheck1(getcallerpc(), "slice bounds out of range")
   197  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C})
   198  }
   199  
   200  // failures in the conversion (*[x]T)s, 0 <= x <= y, x == cap(s)
   201  func goPanicSliceConvert(x int, y int) {
   202  	panicCheck1(getcallerpc(), "slice length too short to convert to pointer to array")
   203  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsConvert})
   204  }
   205  
   206  // Implemented in assembly, as they take arguments in registers.
   207  // Declared here to mark them as ABIInternal.
   208  func panicIndex(x int, y int)
   209  func panicIndexU(x uint, y int)
   210  func panicSliceAlen(x int, y int)
   211  func panicSliceAlenU(x uint, y int)
   212  func panicSliceAcap(x int, y int)
   213  func panicSliceAcapU(x uint, y int)
   214  func panicSliceB(x int, y int)
   215  func panicSliceBU(x uint, y int)
   216  func panicSlice3Alen(x int, y int)
   217  func panicSlice3AlenU(x uint, y int)
   218  func panicSlice3Acap(x int, y int)
   219  func panicSlice3AcapU(x uint, y int)
   220  func panicSlice3B(x int, y int)
   221  func panicSlice3BU(x uint, y int)
   222  func panicSlice3C(x int, y int)
   223  func panicSlice3CU(x uint, y int)
   224  func panicSliceConvert(x int, y int)
   225  
   226  var shiftError = error(errorString("negative shift amount"))
   227  
   228  //go:yeswritebarrierrec
   229  func panicshift() {
   230  	panicCheck1(getcallerpc(), "negative shift amount")
   231  	panic(shiftError)
   232  }
   233  
   234  var divideError = error(errorString("integer divide by zero"))
   235  
   236  //go:yeswritebarrierrec
   237  func panicdivide() {
   238  	panicCheck2("integer divide by zero")
   239  	panic(divideError)
   240  }
   241  
   242  var overflowError = error(errorString("integer overflow"))
   243  
   244  func panicoverflow() {
   245  	panicCheck2("integer overflow")
   246  	panic(overflowError)
   247  }
   248  
   249  var floatError = error(errorString("floating point error"))
   250  
   251  func panicfloat() {
   252  	panicCheck2("floating point error")
   253  	panic(floatError)
   254  }
   255  
   256  var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
   257  
   258  func panicmem() {
   259  	panicCheck2("invalid memory address or nil pointer dereference")
   260  	panic(memoryError)
   261  }
   262  
   263  func panicmemAddr(addr uintptr) {
   264  	panicCheck2("invalid memory address or nil pointer dereference")
   265  	panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr})
   266  }
   267  
   268  // Create a new deferred function fn, which has no arguments and results.
   269  // The compiler turns a defer statement into a call to this.
   270  func deferproc(fn func()) {
   271  	gp := getg()
   272  	if gp.m.curg != gp {
   273  		// go code on the system stack can't defer
   274  		throw("defer on system stack")
   275  	}
   276  
   277  	d := newdefer()
   278  	if d._panic != nil {
   279  		throw("deferproc: d.panic != nil after newdefer")
   280  	}
   281  	d.link = gp._defer
   282  	gp._defer = d
   283  	d.fn = fn
   284  	d.pc = getcallerpc()
   285  	// We must not be preempted between calling getcallersp and
   286  	// storing it to d.sp because getcallersp's result is a
   287  	// uintptr stack pointer.
   288  	d.sp = getcallersp()
   289  
   290  	// deferproc returns 0 normally.
   291  	// a deferred func that stops a panic
   292  	// makes the deferproc return 1.
   293  	// the code the compiler generates always
   294  	// checks the return value and jumps to the
   295  	// end of the function if deferproc returns != 0.
   296  	return0()
   297  	// No code can go here - the C return register has
   298  	// been set and must not be clobbered.
   299  }
   300  
   301  // deferprocStack queues a new deferred function with a defer record on the stack.
   302  // The defer record must have its fn field initialized.
   303  // All other fields can contain junk.
   304  // Nosplit because of the uninitialized pointer fields on the stack.
   305  //
   306  //go:nosplit
   307  func deferprocStack(d *_defer) {
   308  	gp := getg()
   309  	if gp.m.curg != gp {
   310  		// go code on the system stack can't defer
   311  		throw("defer on system stack")
   312  	}
   313  	// fn is already set.
   314  	// The other fields are junk on entry to deferprocStack and
   315  	// are initialized here.
   316  	d.started = false
   317  	d.heap = false
   318  	d.openDefer = false
   319  	d.sp = getcallersp()
   320  	d.pc = getcallerpc()
   321  	d.framepc = 0
   322  	d.varp = 0
   323  	// The lines below implement:
   324  	//   d.panic = nil
   325  	//   d.fd = nil
   326  	//   d.link = gp._defer
   327  	//   gp._defer = d
   328  	// But without write barriers. The first three are writes to
   329  	// the stack so they don't need a write barrier, and furthermore
   330  	// are to uninitialized memory, so they must not use a write barrier.
   331  	// The fourth write does not require a write barrier because we
   332  	// explicitly mark all the defer structures, so we don't need to
   333  	// keep track of pointers to them with a write barrier.
   334  	*(*uintptr)(unsafe.Pointer(&d._panic)) = 0
   335  	*(*uintptr)(unsafe.Pointer(&d.fd)) = 0
   336  	*(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer))
   337  	*(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d))
   338  
   339  	return0()
   340  	// No code can go here - the C return register has
   341  	// been set and must not be clobbered.
   342  }
   343  
   344  // Each P holds a pool for defers.
   345  
   346  // Allocate a Defer, usually using per-P pool.
   347  // Each defer must be released with freedefer.  The defer is not
   348  // added to any defer chain yet.
   349  func newdefer() *_defer {
   350  	var d *_defer
   351  	mp := acquirem()
   352  	pp := mp.p.ptr()
   353  	if len(pp.deferpool) == 0 && sched.deferpool != nil {
   354  		lock(&sched.deferlock)
   355  		for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
   356  			d := sched.deferpool
   357  			sched.deferpool = d.link
   358  			d.link = nil
   359  			pp.deferpool = append(pp.deferpool, d)
   360  		}
   361  		unlock(&sched.deferlock)
   362  	}
   363  	if n := len(pp.deferpool); n > 0 {
   364  		d = pp.deferpool[n-1]
   365  		pp.deferpool[n-1] = nil
   366  		pp.deferpool = pp.deferpool[:n-1]
   367  	}
   368  	releasem(mp)
   369  	mp, pp = nil, nil
   370  
   371  	if d == nil {
   372  		// Allocate new defer.
   373  		d = new(_defer)
   374  	}
   375  	d.heap = true
   376  	return d
   377  }
   378  
   379  // Free the given defer.
   380  // The defer cannot be used after this call.
   381  //
   382  // This is nosplit because the incoming defer is in a perilous state.
   383  // It's not on any defer list, so stack copying won't adjust stack
   384  // pointers in it (namely, d.link). Hence, if we were to copy the
   385  // stack, d could then contain a stale pointer.
   386  //
   387  //go:nosplit
   388  func freedefer(d *_defer) {
   389  	d.link = nil
   390  	// After this point we can copy the stack.
   391  
   392  	if d._panic != nil {
   393  		freedeferpanic()
   394  	}
   395  	if d.fn != nil {
   396  		freedeferfn()
   397  	}
   398  	if !d.heap {
   399  		return
   400  	}
   401  
   402  	mp := acquirem()
   403  	pp := mp.p.ptr()
   404  	if len(pp.deferpool) == cap(pp.deferpool) {
   405  		// Transfer half of local cache to the central cache.
   406  		var first, last *_defer
   407  		for len(pp.deferpool) > cap(pp.deferpool)/2 {
   408  			n := len(pp.deferpool)
   409  			d := pp.deferpool[n-1]
   410  			pp.deferpool[n-1] = nil
   411  			pp.deferpool = pp.deferpool[:n-1]
   412  			if first == nil {
   413  				first = d
   414  			} else {
   415  				last.link = d
   416  			}
   417  			last = d
   418  		}
   419  		lock(&sched.deferlock)
   420  		last.link = sched.deferpool
   421  		sched.deferpool = first
   422  		unlock(&sched.deferlock)
   423  	}
   424  
   425  	*d = _defer{}
   426  
   427  	pp.deferpool = append(pp.deferpool, d)
   428  
   429  	releasem(mp)
   430  	mp, pp = nil, nil
   431  }
   432  
   433  // Separate function so that it can split stack.
   434  // Windows otherwise runs out of stack space.
   435  func freedeferpanic() {
   436  	// _panic must be cleared before d is unlinked from gp.
   437  	throw("freedefer with d._panic != nil")
   438  }
   439  
   440  func freedeferfn() {
   441  	// fn must be cleared before d is unlinked from gp.
   442  	throw("freedefer with d.fn != nil")
   443  }
   444  
   445  // deferreturn runs deferred functions for the caller's frame.
   446  // The compiler inserts a call to this at the end of any
   447  // function which calls defer.
   448  func deferreturn() {
   449  	gp := getg()
   450  	for {
   451  		d := gp._defer
   452  		if d == nil {
   453  			return
   454  		}
   455  		sp := getcallersp()
   456  		if d.sp != sp {
   457  			return
   458  		}
   459  		if d.openDefer {
   460  			done := runOpenDeferFrame(gp, d)
   461  			if !done {
   462  				throw("unfinished open-coded defers in deferreturn")
   463  			}
   464  			gp._defer = d.link
   465  			freedefer(d)
   466  			// If this frame uses open defers, then this
   467  			// must be the only defer record for the
   468  			// frame, so we can just return.
   469  			return
   470  		}
   471  
   472  		fn := d.fn
   473  		d.fn = nil
   474  		gp._defer = d.link
   475  		freedefer(d)
   476  		fn()
   477  	}
   478  }
   479  
   480  // Goexit terminates the goroutine that calls it. No other goroutine is affected.
   481  // Goexit runs all deferred calls before terminating the goroutine. Because Goexit
   482  // is not a panic, any recover calls in those deferred functions will return nil.
   483  //
   484  // Calling Goexit from the main goroutine terminates that goroutine
   485  // without func main returning. Since func main has not returned,
   486  // the program continues execution of other goroutines.
   487  // If all other goroutines exit, the program crashes.
   488  func Goexit() {
   489  	// Run all deferred functions for the current goroutine.
   490  	// This code is similar to gopanic, see that implementation
   491  	// for detailed comments.
   492  	gp := getg()
   493  
   494  	// Create a panic object for Goexit, so we can recognize when it might be
   495  	// bypassed by a recover().
   496  	var p _panic
   497  	p.goexit = true
   498  	p.link = gp._panic
   499  	gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   500  
   501  	addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp()))
   502  	for {
   503  		d := gp._defer
   504  		if d == nil {
   505  			break
   506  		}
   507  		if d.started {
   508  			if d._panic != nil {
   509  				d._panic.aborted = true
   510  				d._panic = nil
   511  			}
   512  			if !d.openDefer {
   513  				d.fn = nil
   514  				gp._defer = d.link
   515  				freedefer(d)
   516  				continue
   517  			}
   518  		}
   519  		d.started = true
   520  		d._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   521  		if d.openDefer {
   522  			done := runOpenDeferFrame(gp, d)
   523  			if !done {
   524  				// We should always run all defers in the frame,
   525  				// since there is no panic associated with this
   526  				// defer that can be recovered.
   527  				throw("unfinished open-coded defers in Goexit")
   528  			}
   529  			if p.aborted {
   530  				// Since our current defer caused a panic and may
   531  				// have been already freed, just restart scanning
   532  				// for open-coded defers from this frame again.
   533  				addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp()))
   534  			} else {
   535  				addOneOpenDeferFrame(gp, 0, nil)
   536  			}
   537  		} else {
   538  			// Save the pc/sp in deferCallSave(), so we can "recover" back to this
   539  			// loop if necessary.
   540  			deferCallSave(&p, d.fn)
   541  		}
   542  		if p.aborted {
   543  			// We had a recursive panic in the defer d we started, and
   544  			// then did a recover in a defer that was further down the
   545  			// defer chain than d. In the case of an outstanding Goexit,
   546  			// we force the recover to return back to this loop. d will
   547  			// have already been freed if completed, so just continue
   548  			// immediately to the next defer on the chain.
   549  			p.aborted = false
   550  			continue
   551  		}
   552  		if gp._defer != d {
   553  			throw("bad defer entry in Goexit")
   554  		}
   555  		d._panic = nil
   556  		d.fn = nil
   557  		gp._defer = d.link
   558  		freedefer(d)
   559  		// Note: we ignore recovers here because Goexit isn't a panic
   560  	}
   561  	goexit1()
   562  }
   563  
   564  // Call all Error and String methods before freezing the world.
   565  // Used when crashing with panicking.
   566  func preprintpanics(p *_panic) {
   567  	defer func() {
   568  		text := "panic while printing panic value"
   569  		switch r := recover().(type) {
   570  		case nil:
   571  			// nothing to do
   572  		case string:
   573  			throw(text + ": " + r)
   574  		default:
   575  			throw(text + ": type " + efaceOf(&r)._type.string())
   576  		}
   577  	}()
   578  	for p != nil {
   579  		switch v := p.arg.(type) {
   580  		case error:
   581  			p.arg = v.Error()
   582  		case stringer:
   583  			p.arg = v.String()
   584  		}
   585  		p = p.link
   586  	}
   587  }
   588  
   589  // Print all currently active panics. Used when crashing.
   590  // Should only be called after preprintpanics.
   591  func printpanics(p *_panic) {
   592  	if p.link != nil {
   593  		printpanics(p.link)
   594  		if !p.link.goexit {
   595  			print("\t")
   596  		}
   597  	}
   598  	if p.goexit {
   599  		return
   600  	}
   601  	print("panic: ")
   602  	printany(p.arg)
   603  	if p.recovered {
   604  		print(" [recovered]")
   605  	}
   606  	print("\n")
   607  }
   608  
   609  // addOneOpenDeferFrame scans the stack (in gentraceback order, from inner frames to
   610  // outer frames) for the first frame (if any) with open-coded defers. If it finds
   611  // one, it adds a single entry to the defer chain for that frame. The entry added
   612  // represents all the defers in the associated open defer frame, and is sorted in
   613  // order with respect to any non-open-coded defers.
   614  //
   615  // addOneOpenDeferFrame stops (possibly without adding a new entry) if it encounters
   616  // an in-progress open defer entry. An in-progress open defer entry means there has
   617  // been a new panic because of a defer in the associated frame. addOneOpenDeferFrame
   618  // does not add an open defer entry past a started entry, because that started entry
   619  // still needs to finished, and addOneOpenDeferFrame will be called when that started
   620  // entry is completed. The defer removal loop in gopanic() similarly stops at an
   621  // in-progress defer entry. Together, addOneOpenDeferFrame and the defer removal loop
   622  // ensure the invariant that there is no open defer entry further up the stack than
   623  // an in-progress defer, and also that the defer removal loop is guaranteed to remove
   624  // all not-in-progress open defer entries from the defer chain.
   625  //
   626  // If sp is non-nil, addOneOpenDeferFrame starts the stack scan from the frame
   627  // specified by sp. If sp is nil, it uses the sp from the current defer record (which
   628  // has just been finished). Hence, it continues the stack scan from the frame of the
   629  // defer that just finished. It skips any frame that already has a (not-in-progress)
   630  // open-coded _defer record in the defer chain.
   631  //
   632  // Note: All entries of the defer chain (including this new open-coded entry) have
   633  // their pointers (including sp) adjusted properly if the stack moves while
   634  // running deferred functions. Also, it is safe to pass in the sp arg (which is
   635  // the direct result of calling getcallersp()), because all pointer variables
   636  // (including arguments) are adjusted as needed during stack copies.
   637  func addOneOpenDeferFrame(gp *g, pc uintptr, sp unsafe.Pointer) {
   638  	var prevDefer *_defer
   639  	if sp == nil {
   640  		prevDefer = gp._defer
   641  		pc = prevDefer.framepc
   642  		sp = unsafe.Pointer(prevDefer.sp)
   643  	}
   644  	systemstack(func() {
   645  		gentraceback(pc, uintptr(sp), 0, gp, 0, nil, 0x7fffffff,
   646  			func(frame *stkframe, unused unsafe.Pointer) bool {
   647  				if prevDefer != nil && prevDefer.sp == frame.sp {
   648  					// Skip the frame for the previous defer that
   649  					// we just finished (and was used to set
   650  					// where we restarted the stack scan)
   651  					return true
   652  				}
   653  				f := frame.fn
   654  				fd := funcdata(f, _FUNCDATA_OpenCodedDeferInfo)
   655  				if fd == nil {
   656  					return true
   657  				}
   658  				// Insert the open defer record in the
   659  				// chain, in order sorted by sp.
   660  				d := gp._defer
   661  				var prev *_defer
   662  				for d != nil {
   663  					dsp := d.sp
   664  					if frame.sp < dsp {
   665  						break
   666  					}
   667  					if frame.sp == dsp {
   668  						if !d.openDefer {
   669  							throw("duplicated defer entry")
   670  						}
   671  						// Don't add any record past an
   672  						// in-progress defer entry. We don't
   673  						// need it, and more importantly, we
   674  						// want to keep the invariant that
   675  						// there is no open defer entry
   676  						// passed an in-progress entry (see
   677  						// header comment).
   678  						if d.started {
   679  							return false
   680  						}
   681  						return true
   682  					}
   683  					prev = d
   684  					d = d.link
   685  				}
   686  				if frame.fn.deferreturn == 0 {
   687  					throw("missing deferreturn")
   688  				}
   689  
   690  				d1 := newdefer()
   691  				d1.openDefer = true
   692  				d1._panic = nil
   693  				// These are the pc/sp to set after we've
   694  				// run a defer in this frame that did a
   695  				// recover. We return to a special
   696  				// deferreturn that runs any remaining
   697  				// defers and then returns from the
   698  				// function.
   699  				d1.pc = frame.fn.entry() + uintptr(frame.fn.deferreturn)
   700  				d1.varp = frame.varp
   701  				d1.fd = fd
   702  				// Save the SP/PC associated with current frame,
   703  				// so we can continue stack trace later if needed.
   704  				d1.framepc = frame.pc
   705  				d1.sp = frame.sp
   706  				d1.link = d
   707  				if prev == nil {
   708  					gp._defer = d1
   709  				} else {
   710  					prev.link = d1
   711  				}
   712  				// Stop stack scanning after adding one open defer record
   713  				return false
   714  			},
   715  			nil, 0)
   716  	})
   717  }
   718  
   719  // readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the
   720  // uint32 and a pointer to the byte following the varint.
   721  //
   722  // There is a similar function runtime.readvarint, which takes a slice of bytes,
   723  // rather than an unsafe pointer. These functions are duplicated, because one of
   724  // the two use cases for the functions would get slower if the functions were
   725  // combined.
   726  func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) {
   727  	var r uint32
   728  	var shift int
   729  	for {
   730  		b := *(*uint8)((unsafe.Pointer(fd)))
   731  		fd = add(fd, unsafe.Sizeof(b))
   732  		if b < 128 {
   733  			return r + uint32(b)<<shift, fd
   734  		}
   735  		r += ((uint32(b) &^ 128) << shift)
   736  		shift += 7
   737  		if shift > 28 {
   738  			panic("Bad varint")
   739  		}
   740  	}
   741  }
   742  
   743  // runOpenDeferFrame runs the active open-coded defers in the frame specified by
   744  // d. It normally processes all active defers in the frame, but stops immediately
   745  // if a defer does a successful recover. It returns true if there are no
   746  // remaining defers to run in the frame.
   747  func runOpenDeferFrame(gp *g, d *_defer) bool {
   748  	done := true
   749  	fd := d.fd
   750  
   751  	deferBitsOffset, fd := readvarintUnsafe(fd)
   752  	nDefers, fd := readvarintUnsafe(fd)
   753  	deferBits := *(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset)))
   754  
   755  	for i := int(nDefers) - 1; i >= 0; i-- {
   756  		// read the funcdata info for this defer
   757  		var closureOffset uint32
   758  		closureOffset, fd = readvarintUnsafe(fd)
   759  		if deferBits&(1<<i) == 0 {
   760  			continue
   761  		}
   762  		closure := *(*func())(unsafe.Pointer(d.varp - uintptr(closureOffset)))
   763  		d.fn = closure
   764  		deferBits = deferBits &^ (1 << i)
   765  		*(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset))) = deferBits
   766  		p := d._panic
   767  		// Call the defer. Note that this can change d.varp if
   768  		// the stack moves.
   769  		deferCallSave(p, d.fn)
   770  		if p != nil && p.aborted {
   771  			break
   772  		}
   773  		d.fn = nil
   774  		if d._panic != nil && d._panic.recovered {
   775  			done = deferBits == 0
   776  			break
   777  		}
   778  	}
   779  
   780  	return done
   781  }
   782  
   783  // deferCallSave calls fn() after saving the caller's pc and sp in the
   784  // panic record. This allows the runtime to return to the Goexit defer
   785  // processing loop, in the unusual case where the Goexit may be
   786  // bypassed by a successful recover.
   787  //
   788  // This is marked as a wrapper by the compiler so it doesn't appear in
   789  // tracebacks.
   790  func deferCallSave(p *_panic, fn func()) {
   791  	if p != nil {
   792  		p.argp = unsafe.Pointer(getargp())
   793  		p.pc = getcallerpc()
   794  		p.sp = unsafe.Pointer(getcallersp())
   795  	}
   796  	fn()
   797  	if p != nil {
   798  		p.pc = 0
   799  		p.sp = unsafe.Pointer(nil)
   800  	}
   801  }
   802  
   803  // The implementation of the predeclared function panic.
   804  func gopanic(e any) {
   805  	gp := getg()
   806  	if gp.m.curg != gp {
   807  		print("panic: ")
   808  		printany(e)
   809  		print("\n")
   810  		throw("panic on system stack")
   811  	}
   812  
   813  	if gp.m.mallocing != 0 {
   814  		print("panic: ")
   815  		printany(e)
   816  		print("\n")
   817  		throw("panic during malloc")
   818  	}
   819  	if gp.m.preemptoff != "" {
   820  		print("panic: ")
   821  		printany(e)
   822  		print("\n")
   823  		print("preempt off reason: ")
   824  		print(gp.m.preemptoff)
   825  		print("\n")
   826  		throw("panic during preemptoff")
   827  	}
   828  	if gp.m.locks != 0 {
   829  		print("panic: ")
   830  		printany(e)
   831  		print("\n")
   832  		throw("panic holding locks")
   833  	}
   834  
   835  	var p _panic
   836  	p.arg = e
   837  	p.link = gp._panic
   838  	gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   839  
   840  	atomic.Xadd(&runningPanicDefers, 1)
   841  
   842  	// By calculating getcallerpc/getcallersp here, we avoid scanning the
   843  	// gopanic frame (stack scanning is slow...)
   844  	addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp()))
   845  
   846  	for {
   847  		d := gp._defer
   848  		if d == nil {
   849  			break
   850  		}
   851  
   852  		// If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
   853  		// take defer off list. An earlier panic will not continue running, but we will make sure below that an
   854  		// earlier Goexit does continue running.
   855  		if d.started {
   856  			if d._panic != nil {
   857  				d._panic.aborted = true
   858  			}
   859  			d._panic = nil
   860  			if !d.openDefer {
   861  				// For open-coded defers, we need to process the
   862  				// defer again, in case there are any other defers
   863  				// to call in the frame (not including the defer
   864  				// call that caused the panic).
   865  				d.fn = nil
   866  				gp._defer = d.link
   867  				freedefer(d)
   868  				continue
   869  			}
   870  		}
   871  
   872  		// Mark defer as started, but keep on list, so that traceback
   873  		// can find and update the defer's argument frame if stack growth
   874  		// or a garbage collection happens before executing d.fn.
   875  		d.started = true
   876  
   877  		// Record the panic that is running the defer.
   878  		// If there is a new panic during the deferred call, that panic
   879  		// will find d in the list and will mark d._panic (this panic) aborted.
   880  		d._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   881  
   882  		done := true
   883  		if d.openDefer {
   884  			done = runOpenDeferFrame(gp, d)
   885  			if done && !d._panic.recovered {
   886  				addOneOpenDeferFrame(gp, 0, nil)
   887  			}
   888  		} else {
   889  			p.argp = unsafe.Pointer(getargp())
   890  			d.fn()
   891  		}
   892  		p.argp = nil
   893  
   894  		// Deferred function did not panic. Remove d.
   895  		if gp._defer != d {
   896  			throw("bad defer entry in panic")
   897  		}
   898  		d._panic = nil
   899  
   900  		// trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic
   901  		//GC()
   902  
   903  		pc := d.pc
   904  		sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy
   905  		if done {
   906  			d.fn = nil
   907  			gp._defer = d.link
   908  			freedefer(d)
   909  		}
   910  		if p.recovered {
   911  			gp._panic = p.link
   912  			if gp._panic != nil && gp._panic.goexit && gp._panic.aborted {
   913  				// A normal recover would bypass/abort the Goexit.  Instead,
   914  				// we return to the processing loop of the Goexit.
   915  				gp.sigcode0 = uintptr(gp._panic.sp)
   916  				gp.sigcode1 = uintptr(gp._panic.pc)
   917  				mcall(recovery)
   918  				throw("bypassed recovery failed") // mcall should not return
   919  			}
   920  			atomic.Xadd(&runningPanicDefers, -1)
   921  
   922  			// After a recover, remove any remaining non-started,
   923  			// open-coded defer entries, since the corresponding defers
   924  			// will be executed normally (inline). Any such entry will
   925  			// become stale once we run the corresponding defers inline
   926  			// and exit the associated stack frame. We only remove up to
   927  			// the first started (in-progress) open defer entry, not
   928  			// including the current frame, since any higher entries will
   929  			// be from a higher panic in progress, and will still be
   930  			// needed.
   931  			d := gp._defer
   932  			var prev *_defer
   933  			if !done {
   934  				// Skip our current frame, if not done. It is
   935  				// needed to complete any remaining defers in
   936  				// deferreturn()
   937  				prev = d
   938  				d = d.link
   939  			}
   940  			for d != nil {
   941  				if d.started {
   942  					// This defer is started but we
   943  					// are in the middle of a
   944  					// defer-panic-recover inside of
   945  					// it, so don't remove it or any
   946  					// further defer entries
   947  					break
   948  				}
   949  				if d.openDefer {
   950  					if prev == nil {
   951  						gp._defer = d.link
   952  					} else {
   953  						prev.link = d.link
   954  					}
   955  					newd := d.link
   956  					freedefer(d)
   957  					d = newd
   958  				} else {
   959  					prev = d
   960  					d = d.link
   961  				}
   962  			}
   963  
   964  			gp._panic = p.link
   965  			// Aborted panics are marked but remain on the g.panic list.
   966  			// Remove them from the list.
   967  			for gp._panic != nil && gp._panic.aborted {
   968  				gp._panic = gp._panic.link
   969  			}
   970  			if gp._panic == nil { // must be done with signal
   971  				gp.sig = 0
   972  			}
   973  			// Pass information about recovering frame to recovery.
   974  			gp.sigcode0 = uintptr(sp)
   975  			gp.sigcode1 = pc
   976  			mcall(recovery)
   977  			throw("recovery failed") // mcall should not return
   978  		}
   979  	}
   980  
   981  	// ran out of deferred calls - old-school panic now
   982  	// Because it is unsafe to call arbitrary user code after freezing
   983  	// the world, we call preprintpanics to invoke all necessary Error
   984  	// and String methods to prepare the panic strings before startpanic.
   985  	preprintpanics(gp._panic)
   986  
   987  	fatalpanic(gp._panic) // should not return
   988  	*(*int)(nil) = 0      // not reached
   989  }
   990  
   991  // getargp returns the location where the caller
   992  // writes outgoing function call arguments.
   993  //
   994  //go:nosplit
   995  //go:noinline
   996  func getargp() uintptr {
   997  	return getcallersp() + sys.MinFrameSize
   998  }
   999  
  1000  // The implementation of the predeclared function recover.
  1001  // Cannot split the stack because it needs to reliably
  1002  // find the stack segment of its caller.
  1003  //
  1004  // TODO(rsc): Once we commit to CopyStackAlways,
  1005  // this doesn't need to be nosplit.
  1006  //
  1007  //go:nosplit
  1008  func gorecover(argp uintptr) any {
  1009  	// Must be in a function running as part of a deferred call during the panic.
  1010  	// Must be called from the topmost function of the call
  1011  	// (the function used in the defer statement).
  1012  	// p.argp is the argument pointer of that topmost deferred function call.
  1013  	// Compare against argp reported by caller.
  1014  	// If they match, the caller is the one who can recover.
  1015  	gp := getg()
  1016  	p := gp._panic
  1017  	if p != nil && !p.goexit && !p.recovered && argp == uintptr(p.argp) {
  1018  		p.recovered = true
  1019  		return p.arg
  1020  	}
  1021  	return nil
  1022  }
  1023  
  1024  //go:linkname sync_throw sync.throw
  1025  func sync_throw(s string) {
  1026  	throw(s)
  1027  }
  1028  
  1029  //go:linkname sync_fatal sync.fatal
  1030  func sync_fatal(s string) {
  1031  	fatal(s)
  1032  }
  1033  
  1034  // throw triggers a fatal error that dumps a stack trace and exits.
  1035  //
  1036  // throw should be used for runtime-internal fatal errors where Go itself,
  1037  // rather than user code, may be at fault for the failure.
  1038  //
  1039  //go:nosplit
  1040  func throw(s string) {
  1041  	// Everything throw does should be recursively nosplit so it
  1042  	// can be called even when it's unsafe to grow the stack.
  1043  	systemstack(func() {
  1044  		print("fatal error: ", s, "\n")
  1045  	})
  1046  
  1047  	fatalthrow(throwTypeRuntime)
  1048  }
  1049  
  1050  // fatal triggers a fatal error that dumps a stack trace and exits.
  1051  //
  1052  // fatal is equivalent to throw, but is used when user code is expected to be
  1053  // at fault for the failure, such as racing map writes.
  1054  //
  1055  // fatal does not include runtime frames, system goroutines, or frame metadata
  1056  // (fp, sp, pc) in the stack trace unless GOTRACEBACK=system or higher.
  1057  //
  1058  //go:nosplit
  1059  func fatal(s string) {
  1060  	// Everything fatal does should be recursively nosplit so it
  1061  	// can be called even when it's unsafe to grow the stack.
  1062  	systemstack(func() {
  1063  		print("fatal error: ", s, "\n")
  1064  	})
  1065  
  1066  	fatalthrow(throwTypeUser)
  1067  }
  1068  
  1069  // runningPanicDefers is non-zero while running deferred functions for panic.
  1070  // runningPanicDefers is incremented and decremented atomically.
  1071  // This is used to try hard to get a panic stack trace out when exiting.
  1072  var runningPanicDefers uint32
  1073  
  1074  // panicking is non-zero when crashing the program for an unrecovered panic.
  1075  // panicking is incremented and decremented atomically.
  1076  var panicking uint32
  1077  
  1078  // paniclk is held while printing the panic information and stack trace,
  1079  // so that two concurrent panics don't overlap their output.
  1080  var paniclk mutex
  1081  
  1082  // Unwind the stack after a deferred function calls recover
  1083  // after a panic. Then arrange to continue running as though
  1084  // the caller of the deferred function returned normally.
  1085  func recovery(gp *g) {
  1086  	// Info about defer passed in G struct.
  1087  	sp := gp.sigcode0
  1088  	pc := gp.sigcode1
  1089  
  1090  	// d's arguments need to be in the stack.
  1091  	if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
  1092  		print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
  1093  		throw("bad recovery")
  1094  	}
  1095  
  1096  	// Make the deferproc for this d return again,
  1097  	// this time returning 1. The calling function will
  1098  	// jump to the standard return epilogue.
  1099  	gp.sched.sp = sp
  1100  	gp.sched.pc = pc
  1101  	gp.sched.lr = 0
  1102  	gp.sched.ret = 1
  1103  	gogo(&gp.sched)
  1104  }
  1105  
  1106  // fatalthrow implements an unrecoverable runtime throw. It freezes the
  1107  // system, prints stack traces starting from its caller, and terminates the
  1108  // process.
  1109  //
  1110  //go:nosplit
  1111  func fatalthrow(t throwType) {
  1112  	pc := getcallerpc()
  1113  	sp := getcallersp()
  1114  	gp := getg()
  1115  
  1116  	if gp.m.throwing == throwTypeNone {
  1117  		gp.m.throwing = t
  1118  	}
  1119  
  1120  	// Switch to the system stack to avoid any stack growth, which may make
  1121  	// things worse if the runtime is in a bad state.
  1122  	systemstack(func() {
  1123  		startpanic_m()
  1124  
  1125  		if dopanic_m(gp, pc, sp) {
  1126  			// crash uses a decent amount of nosplit stack and we're already
  1127  			// low on stack in throw, so crash on the system stack (unlike
  1128  			// fatalpanic).
  1129  			crash()
  1130  		}
  1131  
  1132  		exit(2)
  1133  	})
  1134  
  1135  	*(*int)(nil) = 0 // not reached
  1136  }
  1137  
  1138  // fatalpanic implements an unrecoverable panic. It is like fatalthrow, except
  1139  // that if msgs != nil, fatalpanic also prints panic messages and decrements
  1140  // runningPanicDefers once main is blocked from exiting.
  1141  //
  1142  //go:nosplit
  1143  func fatalpanic(msgs *_panic) {
  1144  	pc := getcallerpc()
  1145  	sp := getcallersp()
  1146  	gp := getg()
  1147  	var docrash bool
  1148  	// Switch to the system stack to avoid any stack growth, which
  1149  	// may make things worse if the runtime is in a bad state.
  1150  	systemstack(func() {
  1151  		if startpanic_m() && msgs != nil {
  1152  			// There were panic messages and startpanic_m
  1153  			// says it's okay to try to print them.
  1154  
  1155  			// startpanic_m set panicking, which will
  1156  			// block main from exiting, so now OK to
  1157  			// decrement runningPanicDefers.
  1158  			atomic.Xadd(&runningPanicDefers, -1)
  1159  
  1160  			printpanics(msgs)
  1161  		}
  1162  
  1163  		docrash = dopanic_m(gp, pc, sp)
  1164  	})
  1165  
  1166  	if docrash {
  1167  		// By crashing outside the above systemstack call, debuggers
  1168  		// will not be confused when generating a backtrace.
  1169  		// Function crash is marked nosplit to avoid stack growth.
  1170  		crash()
  1171  	}
  1172  
  1173  	systemstack(func() {
  1174  		exit(2)
  1175  	})
  1176  
  1177  	*(*int)(nil) = 0 // not reached
  1178  }
  1179  
  1180  // startpanic_m prepares for an unrecoverable panic.
  1181  //
  1182  // It returns true if panic messages should be printed, or false if
  1183  // the runtime is in bad shape and should just print stacks.
  1184  //
  1185  // It must not have write barriers even though the write barrier
  1186  // explicitly ignores writes once dying > 0. Write barriers still
  1187  // assume that g.m.p != nil, and this function may not have P
  1188  // in some contexts (e.g. a panic in a signal handler for a signal
  1189  // sent to an M with no P).
  1190  //
  1191  //go:nowritebarrierrec
  1192  func startpanic_m() bool {
  1193  	_g_ := getg()
  1194  	if mheap_.cachealloc.size == 0 { // very early
  1195  		print("runtime: panic before malloc heap initialized\n")
  1196  	}
  1197  	// Disallow malloc during an unrecoverable panic. A panic
  1198  	// could happen in a signal handler, or in a throw, or inside
  1199  	// malloc itself. We want to catch if an allocation ever does
  1200  	// happen (even if we're not in one of these situations).
  1201  	_g_.m.mallocing++
  1202  
  1203  	// If we're dying because of a bad lock count, set it to a
  1204  	// good lock count so we don't recursively panic below.
  1205  	if _g_.m.locks < 0 {
  1206  		_g_.m.locks = 1
  1207  	}
  1208  
  1209  	switch _g_.m.dying {
  1210  	case 0:
  1211  		// Setting dying >0 has the side-effect of disabling this G's writebuf.
  1212  		_g_.m.dying = 1
  1213  		atomic.Xadd(&panicking, 1)
  1214  		lock(&paniclk)
  1215  		if debug.schedtrace > 0 || debug.scheddetail > 0 {
  1216  			schedtrace(true)
  1217  		}
  1218  		freezetheworld()
  1219  		return true
  1220  	case 1:
  1221  		// Something failed while panicking.
  1222  		// Just print a stack trace and exit.
  1223  		_g_.m.dying = 2
  1224  		print("panic during panic\n")
  1225  		return false
  1226  	case 2:
  1227  		// This is a genuine bug in the runtime, we couldn't even
  1228  		// print the stack trace successfully.
  1229  		_g_.m.dying = 3
  1230  		print("stack trace unavailable\n")
  1231  		exit(4)
  1232  		fallthrough
  1233  	default:
  1234  		// Can't even print! Just exit.
  1235  		exit(5)
  1236  		return false // Need to return something.
  1237  	}
  1238  }
  1239  
  1240  var didothers bool
  1241  var deadlock mutex
  1242  
  1243  func dopanic_m(gp *g, pc, sp uintptr) bool {
  1244  	if gp.sig != 0 {
  1245  		signame := signame(gp.sig)
  1246  		if signame != "" {
  1247  			print("[signal ", signame)
  1248  		} else {
  1249  			print("[signal ", hex(gp.sig))
  1250  		}
  1251  		print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
  1252  	}
  1253  
  1254  	level, all, docrash := gotraceback()
  1255  	_g_ := getg()
  1256  	if level > 0 {
  1257  		if gp != gp.m.curg {
  1258  			all = true
  1259  		}
  1260  		if gp != gp.m.g0 {
  1261  			print("\n")
  1262  			goroutineheader(gp)
  1263  			traceback(pc, sp, 0, gp)
  1264  		} else if level >= 2 || _g_.m.throwing >= throwTypeRuntime {
  1265  			print("\nruntime stack:\n")
  1266  			traceback(pc, sp, 0, gp)
  1267  		}
  1268  		if !didothers && all {
  1269  			didothers = true
  1270  			tracebackothers(gp)
  1271  		}
  1272  	}
  1273  	unlock(&paniclk)
  1274  
  1275  	if atomic.Xadd(&panicking, -1) != 0 {
  1276  		// Some other m is panicking too.
  1277  		// Let it print what it needs to print.
  1278  		// Wait forever without chewing up cpu.
  1279  		// It will exit when it's done.
  1280  		lock(&deadlock)
  1281  		lock(&deadlock)
  1282  	}
  1283  
  1284  	printDebugLog()
  1285  
  1286  	return docrash
  1287  }
  1288  
  1289  // canpanic returns false if a signal should throw instead of
  1290  // panicking.
  1291  //
  1292  //go:nosplit
  1293  func canpanic(gp *g) bool {
  1294  	// Note that g is m->gsignal, different from gp.
  1295  	// Note also that g->m can change at preemption, so m can go stale
  1296  	// if this function ever makes a function call.
  1297  	_g_ := getg()
  1298  	mp := _g_.m
  1299  
  1300  	// Is it okay for gp to panic instead of crashing the program?
  1301  	// Yes, as long as it is running Go code, not runtime code,
  1302  	// and not stuck in a system call.
  1303  	if gp == nil || gp != mp.curg {
  1304  		return false
  1305  	}
  1306  	if mp.locks != 0 || mp.mallocing != 0 || mp.throwing != throwTypeNone || mp.preemptoff != "" || mp.dying != 0 {
  1307  		return false
  1308  	}
  1309  	status := readgstatus(gp)
  1310  	if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
  1311  		return false
  1312  	}
  1313  	if GOOS == "windows" && mp.libcallsp != 0 {
  1314  		return false
  1315  	}
  1316  	return true
  1317  }
  1318  
  1319  // shouldPushSigpanic reports whether pc should be used as sigpanic's
  1320  // return PC (pushing a frame for the call). Otherwise, it should be
  1321  // left alone so that LR is used as sigpanic's return PC, effectively
  1322  // replacing the top-most frame with sigpanic. This is used by
  1323  // preparePanic.
  1324  func shouldPushSigpanic(gp *g, pc, lr uintptr) bool {
  1325  	if pc == 0 {
  1326  		// Probably a call to a nil func. The old LR is more
  1327  		// useful in the stack trace. Not pushing the frame
  1328  		// will make the trace look like a call to sigpanic
  1329  		// instead. (Otherwise the trace will end at sigpanic
  1330  		// and we won't get to see who faulted.)
  1331  		return false
  1332  	}
  1333  	// If we don't recognize the PC as code, but we do recognize
  1334  	// the link register as code, then this assumes the panic was
  1335  	// caused by a call to non-code. In this case, we want to
  1336  	// ignore this call to make unwinding show the context.
  1337  	//
  1338  	// If we running C code, we're not going to recognize pc as a
  1339  	// Go function, so just assume it's good. Otherwise, traceback
  1340  	// may try to read a stale LR that looks like a Go code
  1341  	// pointer and wander into the woods.
  1342  	if gp.m.incgo || findfunc(pc).valid() {
  1343  		// This wasn't a bad call, so use PC as sigpanic's
  1344  		// return PC.
  1345  		return true
  1346  	}
  1347  	if findfunc(lr).valid() {
  1348  		// This was a bad call, but the LR is good, so use the
  1349  		// LR as sigpanic's return PC.
  1350  		return false
  1351  	}
  1352  	// Neither the PC or LR is good. Hopefully pushing a frame
  1353  	// will work.
  1354  	return true
  1355  }
  1356  
  1357  // isAbortPC reports whether pc is the program counter at which
  1358  // runtime.abort raises a signal.
  1359  //
  1360  // It is nosplit because it's part of the isgoexception
  1361  // implementation.
  1362  //
  1363  //go:nosplit
  1364  func isAbortPC(pc uintptr) bool {
  1365  	f := findfunc(pc)
  1366  	if !f.valid() {
  1367  		return false
  1368  	}
  1369  	return f.funcID == funcID_abort
  1370  }
  1371  

View as plain text