Source file src/runtime/race.go

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build race
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"unsafe"
    12  )
    13  
    14  // Public race detection API, present iff build with -race.
    15  
    16  func RaceRead(addr unsafe.Pointer)
    17  func RaceWrite(addr unsafe.Pointer)
    18  func RaceReadRange(addr unsafe.Pointer, len int)
    19  func RaceWriteRange(addr unsafe.Pointer, len int)
    20  
    21  func RaceErrors() int {
    22  	var n uint64
    23  	racecall(&__tsan_report_count, uintptr(unsafe.Pointer(&n)), 0, 0, 0)
    24  	return int(n)
    25  }
    26  
    27  //go:nosplit
    28  
    29  // RaceAcquire/RaceRelease/RaceReleaseMerge establish happens-before relations
    30  // between goroutines. These inform the race detector about actual synchronization
    31  // that it can't see for some reason (e.g. synchronization within RaceDisable/RaceEnable
    32  // sections of code).
    33  // RaceAcquire establishes a happens-before relation with the preceding
    34  // RaceReleaseMerge on addr up to and including the last RaceRelease on addr.
    35  // In terms of the C memory model (C11 §5.1.2.4, §7.17.3),
    36  // RaceAcquire is equivalent to atomic_load(memory_order_acquire).
    37  func RaceAcquire(addr unsafe.Pointer) {
    38  	raceacquire(addr)
    39  }
    40  
    41  //go:nosplit
    42  
    43  // RaceRelease performs a release operation on addr that
    44  // can synchronize with a later RaceAcquire on addr.
    45  //
    46  // In terms of the C memory model, RaceRelease is equivalent to
    47  // atomic_store(memory_order_release).
    48  func RaceRelease(addr unsafe.Pointer) {
    49  	racerelease(addr)
    50  }
    51  
    52  //go:nosplit
    53  
    54  // RaceReleaseMerge is like RaceRelease, but also establishes a happens-before
    55  // relation with the preceding RaceRelease or RaceReleaseMerge on addr.
    56  //
    57  // In terms of the C memory model, RaceReleaseMerge is equivalent to
    58  // atomic_exchange(memory_order_release).
    59  func RaceReleaseMerge(addr unsafe.Pointer) {
    60  	racereleasemerge(addr)
    61  }
    62  
    63  //go:nosplit
    64  
    65  // RaceDisable disables handling of race synchronization events in the current goroutine.
    66  // Handling is re-enabled with RaceEnable. RaceDisable/RaceEnable can be nested.
    67  // Non-synchronization events (memory accesses, function entry/exit) still affect
    68  // the race detector.
    69  func RaceDisable() {
    70  	_g_ := getg()
    71  	if _g_.raceignore == 0 {
    72  		racecall(&__tsan_go_ignore_sync_begin, _g_.racectx, 0, 0, 0)
    73  	}
    74  	_g_.raceignore++
    75  }
    76  
    77  //go:nosplit
    78  
    79  // RaceEnable re-enables handling of race events in the current goroutine.
    80  func RaceEnable() {
    81  	_g_ := getg()
    82  	_g_.raceignore--
    83  	if _g_.raceignore == 0 {
    84  		racecall(&__tsan_go_ignore_sync_end, _g_.racectx, 0, 0, 0)
    85  	}
    86  }
    87  
    88  // Private interface for the runtime.
    89  
    90  const raceenabled = true
    91  
    92  // For all functions accepting callerpc and pc,
    93  // callerpc is a return PC of the function that calls this function,
    94  // pc is start PC of the function that calls this function.
    95  func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
    96  	kind := t.kind & kindMask
    97  	if kind == kindArray || kind == kindStruct {
    98  		// for composite objects we have to read every address
    99  		// because a write might happen to any subobject.
   100  		racereadrangepc(addr, t.size, callerpc, pc)
   101  	} else {
   102  		// for non-composite objects we can read just the start
   103  		// address, as any write must write the first byte.
   104  		racereadpc(addr, callerpc, pc)
   105  	}
   106  }
   107  
   108  func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
   109  	kind := t.kind & kindMask
   110  	if kind == kindArray || kind == kindStruct {
   111  		// for composite objects we have to write every address
   112  		// because a write might happen to any subobject.
   113  		racewriterangepc(addr, t.size, callerpc, pc)
   114  	} else {
   115  		// for non-composite objects we can write just the start
   116  		// address, as any write must write the first byte.
   117  		racewritepc(addr, callerpc, pc)
   118  	}
   119  }
   120  
   121  //go:noescape
   122  func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
   123  
   124  //go:noescape
   125  func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
   126  
   127  type symbolizeCodeContext struct {
   128  	pc   uintptr
   129  	fn   *byte
   130  	file *byte
   131  	line uintptr
   132  	off  uintptr
   133  	res  uintptr
   134  }
   135  
   136  var qq = [...]byte{'?', '?', 0}
   137  var dash = [...]byte{'-', 0}
   138  
   139  const (
   140  	raceGetProcCmd = iota
   141  	raceSymbolizeCodeCmd
   142  	raceSymbolizeDataCmd
   143  )
   144  
   145  // Callback from C into Go, runs on g0.
   146  func racecallback(cmd uintptr, ctx unsafe.Pointer) {
   147  	switch cmd {
   148  	case raceGetProcCmd:
   149  		throw("should have been handled by racecallbackthunk")
   150  	case raceSymbolizeCodeCmd:
   151  		raceSymbolizeCode((*symbolizeCodeContext)(ctx))
   152  	case raceSymbolizeDataCmd:
   153  		raceSymbolizeData((*symbolizeDataContext)(ctx))
   154  	default:
   155  		throw("unknown command")
   156  	}
   157  }
   158  
   159  // raceSymbolizeCode reads ctx.pc and populates the rest of *ctx with
   160  // information about the code at that pc.
   161  //
   162  // The race detector has already subtracted 1 from pcs, so they point to the last
   163  // byte of call instructions (including calls to runtime.racewrite and friends).
   164  //
   165  // If the incoming pc is part of an inlined function, *ctx is populated
   166  // with information about the inlined function, and on return ctx.pc is set
   167  // to a pc in the logically containing function. (The race detector should call this
   168  // function again with that pc.)
   169  //
   170  // If the incoming pc is not part of an inlined function, the return pc is unchanged.
   171  func raceSymbolizeCode(ctx *symbolizeCodeContext) {
   172  	pc := ctx.pc
   173  	fi := findfunc(pc)
   174  	f := fi._Func()
   175  	if f != nil {
   176  		file, line := f.FileLine(pc)
   177  		if line != 0 {
   178  			if inldata := funcdata(fi, _FUNCDATA_InlTree); inldata != nil {
   179  				inltree := (*[1 << 20]inlinedCall)(inldata)
   180  				for {
   181  					ix := pcdatavalue(fi, _PCDATA_InlTreeIndex, pc, nil)
   182  					if ix >= 0 {
   183  						if inltree[ix].funcID == funcID_wrapper {
   184  							// ignore wrappers
   185  							// Back up to an instruction in the "caller".
   186  							pc = f.Entry() + uintptr(inltree[ix].parentPc)
   187  							continue
   188  						}
   189  						ctx.pc = f.Entry() + uintptr(inltree[ix].parentPc) // "caller" pc
   190  						ctx.fn = cfuncnameFromNameoff(fi, inltree[ix].func_)
   191  						ctx.line = uintptr(line)
   192  						ctx.file = &bytes(file)[0] // assume NUL-terminated
   193  						ctx.off = pc - f.Entry()
   194  						ctx.res = 1
   195  						return
   196  					}
   197  					break
   198  				}
   199  			}
   200  			ctx.fn = cfuncname(fi)
   201  			ctx.line = uintptr(line)
   202  			ctx.file = &bytes(file)[0] // assume NUL-terminated
   203  			ctx.off = pc - f.Entry()
   204  			ctx.res = 1
   205  			return
   206  		}
   207  	}
   208  	ctx.fn = &qq[0]
   209  	ctx.file = &dash[0]
   210  	ctx.line = 0
   211  	ctx.off = ctx.pc
   212  	ctx.res = 1
   213  }
   214  
   215  type symbolizeDataContext struct {
   216  	addr  uintptr
   217  	heap  uintptr
   218  	start uintptr
   219  	size  uintptr
   220  	name  *byte
   221  	file  *byte
   222  	line  uintptr
   223  	res   uintptr
   224  }
   225  
   226  func raceSymbolizeData(ctx *symbolizeDataContext) {
   227  	if base, span, _ := findObject(ctx.addr, 0, 0); base != 0 {
   228  		ctx.heap = 1
   229  		ctx.start = base
   230  		ctx.size = span.elemsize
   231  		ctx.res = 1
   232  	}
   233  }
   234  
   235  // Race runtime functions called via runtime·racecall.
   236  //
   237  //go:linkname __tsan_init __tsan_init
   238  var __tsan_init byte
   239  
   240  //go:linkname __tsan_fini __tsan_fini
   241  var __tsan_fini byte
   242  
   243  //go:linkname __tsan_proc_create __tsan_proc_create
   244  var __tsan_proc_create byte
   245  
   246  //go:linkname __tsan_proc_destroy __tsan_proc_destroy
   247  var __tsan_proc_destroy byte
   248  
   249  //go:linkname __tsan_map_shadow __tsan_map_shadow
   250  var __tsan_map_shadow byte
   251  
   252  //go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
   253  var __tsan_finalizer_goroutine byte
   254  
   255  //go:linkname __tsan_go_start __tsan_go_start
   256  var __tsan_go_start byte
   257  
   258  //go:linkname __tsan_go_end __tsan_go_end
   259  var __tsan_go_end byte
   260  
   261  //go:linkname __tsan_malloc __tsan_malloc
   262  var __tsan_malloc byte
   263  
   264  //go:linkname __tsan_free __tsan_free
   265  var __tsan_free byte
   266  
   267  //go:linkname __tsan_acquire __tsan_acquire
   268  var __tsan_acquire byte
   269  
   270  //go:linkname __tsan_release __tsan_release
   271  var __tsan_release byte
   272  
   273  //go:linkname __tsan_release_acquire __tsan_release_acquire
   274  var __tsan_release_acquire byte
   275  
   276  //go:linkname __tsan_release_merge __tsan_release_merge
   277  var __tsan_release_merge byte
   278  
   279  //go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
   280  var __tsan_go_ignore_sync_begin byte
   281  
   282  //go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
   283  var __tsan_go_ignore_sync_end byte
   284  
   285  //go:linkname __tsan_report_count __tsan_report_count
   286  var __tsan_report_count byte
   287  
   288  // Mimic what cmd/cgo would do.
   289  //
   290  //go:cgo_import_static __tsan_init
   291  //go:cgo_import_static __tsan_fini
   292  //go:cgo_import_static __tsan_proc_create
   293  //go:cgo_import_static __tsan_proc_destroy
   294  //go:cgo_import_static __tsan_map_shadow
   295  //go:cgo_import_static __tsan_finalizer_goroutine
   296  //go:cgo_import_static __tsan_go_start
   297  //go:cgo_import_static __tsan_go_end
   298  //go:cgo_import_static __tsan_malloc
   299  //go:cgo_import_static __tsan_free
   300  //go:cgo_import_static __tsan_acquire
   301  //go:cgo_import_static __tsan_release
   302  //go:cgo_import_static __tsan_release_acquire
   303  //go:cgo_import_static __tsan_release_merge
   304  //go:cgo_import_static __tsan_go_ignore_sync_begin
   305  //go:cgo_import_static __tsan_go_ignore_sync_end
   306  //go:cgo_import_static __tsan_report_count
   307  
   308  // These are called from race_amd64.s.
   309  //
   310  //go:cgo_import_static __tsan_read
   311  //go:cgo_import_static __tsan_read_pc
   312  //go:cgo_import_static __tsan_read_range
   313  //go:cgo_import_static __tsan_write
   314  //go:cgo_import_static __tsan_write_pc
   315  //go:cgo_import_static __tsan_write_range
   316  //go:cgo_import_static __tsan_func_enter
   317  //go:cgo_import_static __tsan_func_exit
   318  
   319  //go:cgo_import_static __tsan_go_atomic32_load
   320  //go:cgo_import_static __tsan_go_atomic64_load
   321  //go:cgo_import_static __tsan_go_atomic32_store
   322  //go:cgo_import_static __tsan_go_atomic64_store
   323  //go:cgo_import_static __tsan_go_atomic32_exchange
   324  //go:cgo_import_static __tsan_go_atomic64_exchange
   325  //go:cgo_import_static __tsan_go_atomic32_fetch_add
   326  //go:cgo_import_static __tsan_go_atomic64_fetch_add
   327  //go:cgo_import_static __tsan_go_atomic32_compare_exchange
   328  //go:cgo_import_static __tsan_go_atomic64_compare_exchange
   329  
   330  // start/end of global data (data+bss).
   331  var racedatastart uintptr
   332  var racedataend uintptr
   333  
   334  // start/end of heap for race_amd64.s
   335  var racearenastart uintptr
   336  var racearenaend uintptr
   337  
   338  func racefuncenter(callpc uintptr)
   339  func racefuncenterfp(fp uintptr)
   340  func racefuncexit()
   341  func raceread(addr uintptr)
   342  func racewrite(addr uintptr)
   343  func racereadrange(addr, size uintptr)
   344  func racewriterange(addr, size uintptr)
   345  func racereadrangepc1(addr, size, pc uintptr)
   346  func racewriterangepc1(addr, size, pc uintptr)
   347  func racecallbackthunk(uintptr)
   348  
   349  // racecall allows calling an arbitrary function fn from C race runtime
   350  // with up to 4 uintptr arguments.
   351  func racecall(fn *byte, arg0, arg1, arg2, arg3 uintptr)
   352  
   353  // checks if the address has shadow (i.e. heap or data/bss)
   354  //
   355  //go:nosplit
   356  func isvalidaddr(addr unsafe.Pointer) bool {
   357  	return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
   358  		racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
   359  }
   360  
   361  //go:nosplit
   362  func raceinit() (gctx, pctx uintptr) {
   363  	// cgo is required to initialize libc, which is used by race runtime
   364  	if !iscgo {
   365  		throw("raceinit: race build must use cgo")
   366  	}
   367  
   368  	racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), abi.FuncPCABI0(racecallbackthunk), 0)
   369  
   370  	// Round data segment to page boundaries, because it's used in mmap().
   371  	start := ^uintptr(0)
   372  	end := uintptr(0)
   373  	if start > firstmoduledata.noptrdata {
   374  		start = firstmoduledata.noptrdata
   375  	}
   376  	if start > firstmoduledata.data {
   377  		start = firstmoduledata.data
   378  	}
   379  	if start > firstmoduledata.noptrbss {
   380  		start = firstmoduledata.noptrbss
   381  	}
   382  	if start > firstmoduledata.bss {
   383  		start = firstmoduledata.bss
   384  	}
   385  	if end < firstmoduledata.enoptrdata {
   386  		end = firstmoduledata.enoptrdata
   387  	}
   388  	if end < firstmoduledata.edata {
   389  		end = firstmoduledata.edata
   390  	}
   391  	if end < firstmoduledata.enoptrbss {
   392  		end = firstmoduledata.enoptrbss
   393  	}
   394  	if end < firstmoduledata.ebss {
   395  		end = firstmoduledata.ebss
   396  	}
   397  	size := alignUp(end-start, _PageSize)
   398  	racecall(&__tsan_map_shadow, start, size, 0, 0)
   399  	racedatastart = start
   400  	racedataend = start + size
   401  
   402  	return
   403  }
   404  
   405  var raceFiniLock mutex
   406  
   407  //go:nosplit
   408  func racefini() {
   409  	// racefini() can only be called once to avoid races.
   410  	// This eventually (via __tsan_fini) calls C.exit which has
   411  	// undefined behavior if called more than once. If the lock is
   412  	// already held it's assumed that the first caller exits the program
   413  	// so other calls can hang forever without an issue.
   414  	lock(&raceFiniLock)
   415  	// We're entering external code that may call ExitProcess on
   416  	// Windows.
   417  	osPreemptExtEnter(getg().m)
   418  	racecall(&__tsan_fini, 0, 0, 0, 0)
   419  }
   420  
   421  //go:nosplit
   422  func raceproccreate() uintptr {
   423  	var ctx uintptr
   424  	racecall(&__tsan_proc_create, uintptr(unsafe.Pointer(&ctx)), 0, 0, 0)
   425  	return ctx
   426  }
   427  
   428  //go:nosplit
   429  func raceprocdestroy(ctx uintptr) {
   430  	racecall(&__tsan_proc_destroy, ctx, 0, 0, 0)
   431  }
   432  
   433  //go:nosplit
   434  func racemapshadow(addr unsafe.Pointer, size uintptr) {
   435  	if racearenastart == 0 {
   436  		racearenastart = uintptr(addr)
   437  	}
   438  	if racearenaend < uintptr(addr)+size {
   439  		racearenaend = uintptr(addr) + size
   440  	}
   441  	racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
   442  }
   443  
   444  //go:nosplit
   445  func racemalloc(p unsafe.Pointer, sz uintptr) {
   446  	racecall(&__tsan_malloc, 0, 0, uintptr(p), sz)
   447  }
   448  
   449  //go:nosplit
   450  func racefree(p unsafe.Pointer, sz uintptr) {
   451  	racecall(&__tsan_free, uintptr(p), sz, 0, 0)
   452  }
   453  
   454  //go:nosplit
   455  func racegostart(pc uintptr) uintptr {
   456  	_g_ := getg()
   457  	var spawng *g
   458  	if _g_.m.curg != nil {
   459  		spawng = _g_.m.curg
   460  	} else {
   461  		spawng = _g_
   462  	}
   463  
   464  	var racectx uintptr
   465  	racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
   466  	return racectx
   467  }
   468  
   469  //go:nosplit
   470  func racegoend() {
   471  	racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
   472  }
   473  
   474  //go:nosplit
   475  func racectxend(racectx uintptr) {
   476  	racecall(&__tsan_go_end, racectx, 0, 0, 0)
   477  }
   478  
   479  //go:nosplit
   480  func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   481  	_g_ := getg()
   482  	if _g_ != _g_.m.curg {
   483  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   484  		// Not interesting.
   485  		return
   486  	}
   487  	if callpc != 0 {
   488  		racefuncenter(callpc)
   489  	}
   490  	racewriterangepc1(uintptr(addr), sz, pc)
   491  	if callpc != 0 {
   492  		racefuncexit()
   493  	}
   494  }
   495  
   496  //go:nosplit
   497  func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   498  	_g_ := getg()
   499  	if _g_ != _g_.m.curg {
   500  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   501  		// Not interesting.
   502  		return
   503  	}
   504  	if callpc != 0 {
   505  		racefuncenter(callpc)
   506  	}
   507  	racereadrangepc1(uintptr(addr), sz, pc)
   508  	if callpc != 0 {
   509  		racefuncexit()
   510  	}
   511  }
   512  
   513  //go:nosplit
   514  func raceacquire(addr unsafe.Pointer) {
   515  	raceacquireg(getg(), addr)
   516  }
   517  
   518  //go:nosplit
   519  func raceacquireg(gp *g, addr unsafe.Pointer) {
   520  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   521  		return
   522  	}
   523  	racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
   524  }
   525  
   526  //go:nosplit
   527  func raceacquirectx(racectx uintptr, addr unsafe.Pointer) {
   528  	if !isvalidaddr(addr) {
   529  		return
   530  	}
   531  	racecall(&__tsan_acquire, racectx, uintptr(addr), 0, 0)
   532  }
   533  
   534  //go:nosplit
   535  func racerelease(addr unsafe.Pointer) {
   536  	racereleaseg(getg(), addr)
   537  }
   538  
   539  //go:nosplit
   540  func racereleaseg(gp *g, addr unsafe.Pointer) {
   541  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   542  		return
   543  	}
   544  	racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
   545  }
   546  
   547  //go:nosplit
   548  func racereleaseacquire(addr unsafe.Pointer) {
   549  	racereleaseacquireg(getg(), addr)
   550  }
   551  
   552  //go:nosplit
   553  func racereleaseacquireg(gp *g, addr unsafe.Pointer) {
   554  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   555  		return
   556  	}
   557  	racecall(&__tsan_release_acquire, gp.racectx, uintptr(addr), 0, 0)
   558  }
   559  
   560  //go:nosplit
   561  func racereleasemerge(addr unsafe.Pointer) {
   562  	racereleasemergeg(getg(), addr)
   563  }
   564  
   565  //go:nosplit
   566  func racereleasemergeg(gp *g, addr unsafe.Pointer) {
   567  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   568  		return
   569  	}
   570  	racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
   571  }
   572  
   573  //go:nosplit
   574  func racefingo() {
   575  	racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
   576  }
   577  
   578  // The declarations below generate ABI wrappers for functions
   579  // implemented in assembly in this package but declared in another
   580  // package.
   581  
   582  //go:linkname abigen_sync_atomic_LoadInt32 sync/atomic.LoadInt32
   583  func abigen_sync_atomic_LoadInt32(addr *int32) (val int32)
   584  
   585  //go:linkname abigen_sync_atomic_LoadInt64 sync/atomic.LoadInt64
   586  func abigen_sync_atomic_LoadInt64(addr *int64) (val int64)
   587  
   588  //go:linkname abigen_sync_atomic_LoadUint32 sync/atomic.LoadUint32
   589  func abigen_sync_atomic_LoadUint32(addr *uint32) (val uint32)
   590  
   591  //go:linkname abigen_sync_atomic_LoadUint64 sync/atomic.LoadUint64
   592  func abigen_sync_atomic_LoadUint64(addr *uint64) (val uint64)
   593  
   594  //go:linkname abigen_sync_atomic_LoadUintptr sync/atomic.LoadUintptr
   595  func abigen_sync_atomic_LoadUintptr(addr *uintptr) (val uintptr)
   596  
   597  //go:linkname abigen_sync_atomic_LoadPointer sync/atomic.LoadPointer
   598  func abigen_sync_atomic_LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
   599  
   600  //go:linkname abigen_sync_atomic_StoreInt32 sync/atomic.StoreInt32
   601  func abigen_sync_atomic_StoreInt32(addr *int32, val int32)
   602  
   603  //go:linkname abigen_sync_atomic_StoreInt64 sync/atomic.StoreInt64
   604  func abigen_sync_atomic_StoreInt64(addr *int64, val int64)
   605  
   606  //go:linkname abigen_sync_atomic_StoreUint32 sync/atomic.StoreUint32
   607  func abigen_sync_atomic_StoreUint32(addr *uint32, val uint32)
   608  
   609  //go:linkname abigen_sync_atomic_StoreUint64 sync/atomic.StoreUint64
   610  func abigen_sync_atomic_StoreUint64(addr *uint64, val uint64)
   611  
   612  //go:linkname abigen_sync_atomic_SwapInt32 sync/atomic.SwapInt32
   613  func abigen_sync_atomic_SwapInt32(addr *int32, new int32) (old int32)
   614  
   615  //go:linkname abigen_sync_atomic_SwapInt64 sync/atomic.SwapInt64
   616  func abigen_sync_atomic_SwapInt64(addr *int64, new int64) (old int64)
   617  
   618  //go:linkname abigen_sync_atomic_SwapUint32 sync/atomic.SwapUint32
   619  func abigen_sync_atomic_SwapUint32(addr *uint32, new uint32) (old uint32)
   620  
   621  //go:linkname abigen_sync_atomic_SwapUint64 sync/atomic.SwapUint64
   622  func abigen_sync_atomic_SwapUint64(addr *uint64, new uint64) (old uint64)
   623  
   624  //go:linkname abigen_sync_atomic_AddInt32 sync/atomic.AddInt32
   625  func abigen_sync_atomic_AddInt32(addr *int32, delta int32) (new int32)
   626  
   627  //go:linkname abigen_sync_atomic_AddUint32 sync/atomic.AddUint32
   628  func abigen_sync_atomic_AddUint32(addr *uint32, delta uint32) (new uint32)
   629  
   630  //go:linkname abigen_sync_atomic_AddInt64 sync/atomic.AddInt64
   631  func abigen_sync_atomic_AddInt64(addr *int64, delta int64) (new int64)
   632  
   633  //go:linkname abigen_sync_atomic_AddUint64 sync/atomic.AddUint64
   634  func abigen_sync_atomic_AddUint64(addr *uint64, delta uint64) (new uint64)
   635  
   636  //go:linkname abigen_sync_atomic_AddUintptr sync/atomic.AddUintptr
   637  func abigen_sync_atomic_AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
   638  
   639  //go:linkname abigen_sync_atomic_CompareAndSwapInt32 sync/atomic.CompareAndSwapInt32
   640  func abigen_sync_atomic_CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
   641  
   642  //go:linkname abigen_sync_atomic_CompareAndSwapInt64 sync/atomic.CompareAndSwapInt64
   643  func abigen_sync_atomic_CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool)
   644  
   645  //go:linkname abigen_sync_atomic_CompareAndSwapUint32 sync/atomic.CompareAndSwapUint32
   646  func abigen_sync_atomic_CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool)
   647  
   648  //go:linkname abigen_sync_atomic_CompareAndSwapUint64 sync/atomic.CompareAndSwapUint64
   649  func abigen_sync_atomic_CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool)
   650  

View as plain text