Source file src/runtime/lock_futex.go

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build dragonfly || freebsd || linux
     6  // +build dragonfly freebsd linux
     7  
     8  package runtime
     9  
    10  import (
    11  	"runtime/internal/atomic"
    12  	"unsafe"
    13  )
    14  
    15  // This implementation depends on OS-specific implementations of
    16  //
    17  //	futexsleep(addr *uint32, val uint32, ns int64)
    18  //		Atomically,
    19  //			if *addr == val { sleep }
    20  //		Might be woken up spuriously; that's allowed.
    21  //		Don't sleep longer than ns; ns < 0 means forever.
    22  //
    23  //	futexwakeup(addr *uint32, cnt uint32)
    24  //		If any procs are sleeping on addr, wake up at most cnt.
    25  
    26  const (
    27  	mutex_unlocked = 0
    28  	mutex_locked   = 1
    29  	mutex_sleeping = 2
    30  
    31  	active_spin     = 4
    32  	active_spin_cnt = 30
    33  	passive_spin    = 1
    34  )
    35  
    36  // Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
    37  // mutex_sleeping means that there is presumably at least one sleeping thread.
    38  // Note that there can be spinning threads during all states - they do not
    39  // affect mutex's state.
    40  
    41  // We use the uintptr mutex.key and note.key as a uint32.
    42  //go:nosplit
    43  func key32(p *uintptr) *uint32 {
    44  	return (*uint32)(unsafe.Pointer(p))
    45  }
    46  
    47  func lock(l *mutex) {
    48  	lockWithRank(l, getLockRank(l))
    49  }
    50  
    51  func lock2(l *mutex) {
    52  	gp := getg()
    53  
    54  	if gp.m.locks < 0 {
    55  		throw("runtime·lock: lock count")
    56  	}
    57  	gp.m.locks++
    58  
    59  	// Speculative grab for lock.
    60  	v := atomic.Xchg(key32(&l.key), mutex_locked)
    61  	if v == mutex_unlocked {
    62  		return
    63  	}
    64  
    65  	// wait is either MUTEX_LOCKED or MUTEX_SLEEPING
    66  	// depending on whether there is a thread sleeping
    67  	// on this mutex. If we ever change l->key from
    68  	// MUTEX_SLEEPING to some other value, we must be
    69  	// careful to change it back to MUTEX_SLEEPING before
    70  	// returning, to ensure that the sleeping thread gets
    71  	// its wakeup call.
    72  	wait := v
    73  
    74  	// On uniprocessors, no point spinning.
    75  	// On multiprocessors, spin for ACTIVE_SPIN attempts.
    76  	spin := 0
    77  	if ncpu > 1 {
    78  		spin = active_spin
    79  	}
    80  	for {
    81  		// Try for lock, spinning.
    82  		for i := 0; i < spin; i++ {
    83  			for l.key == mutex_unlocked {
    84  				if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
    85  					return
    86  				}
    87  			}
    88  			procyield(active_spin_cnt)
    89  		}
    90  
    91  		// Try for lock, rescheduling.
    92  		for i := 0; i < passive_spin; i++ {
    93  			for l.key == mutex_unlocked {
    94  				if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
    95  					return
    96  				}
    97  			}
    98  			osyield()
    99  		}
   100  
   101  		// Sleep.
   102  		v = atomic.Xchg(key32(&l.key), mutex_sleeping)
   103  		if v == mutex_unlocked {
   104  			return
   105  		}
   106  		wait = mutex_sleeping
   107  		futexsleep(key32(&l.key), mutex_sleeping, -1)
   108  	}
   109  }
   110  
   111  func unlock(l *mutex) {
   112  	unlockWithRank(l)
   113  }
   114  
   115  func unlock2(l *mutex) {
   116  	v := atomic.Xchg(key32(&l.key), mutex_unlocked)
   117  	if v == mutex_unlocked {
   118  		throw("unlock of unlocked lock")
   119  	}
   120  	if v == mutex_sleeping {
   121  		futexwakeup(key32(&l.key), 1)
   122  	}
   123  
   124  	gp := getg()
   125  	gp.m.locks--
   126  	if gp.m.locks < 0 {
   127  		throw("runtime·unlock: lock count")
   128  	}
   129  	if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
   130  		gp.stackguard0 = stackPreempt
   131  	}
   132  }
   133  
   134  // One-time notifications.
   135  func noteclear(n *note) {
   136  	n.key = 0
   137  }
   138  
   139  func notewakeup(n *note) {
   140  	old := atomic.Xchg(key32(&n.key), 1)
   141  	if old != 0 {
   142  		print("notewakeup - double wakeup (", old, ")\n")
   143  		throw("notewakeup - double wakeup")
   144  	}
   145  	futexwakeup(key32(&n.key), 1)
   146  }
   147  
   148  func notesleep(n *note) {
   149  	gp := getg()
   150  	if gp != gp.m.g0 {
   151  		throw("notesleep not on g0")
   152  	}
   153  	ns := int64(-1)
   154  	if *cgo_yield != nil {
   155  		// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
   156  		ns = 10e6
   157  	}
   158  	for atomic.Load(key32(&n.key)) == 0 {
   159  		gp.m.blocked = true
   160  		futexsleep(key32(&n.key), 0, ns)
   161  		if *cgo_yield != nil {
   162  			asmcgocall(*cgo_yield, nil)
   163  		}
   164  		gp.m.blocked = false
   165  	}
   166  }
   167  
   168  // May run with m.p==nil if called from notetsleep, so write barriers
   169  // are not allowed.
   170  //
   171  //go:nosplit
   172  //go:nowritebarrier
   173  func notetsleep_internal(n *note, ns int64) bool {
   174  	gp := getg()
   175  
   176  	if ns < 0 {
   177  		if *cgo_yield != nil {
   178  			// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
   179  			ns = 10e6
   180  		}
   181  		for atomic.Load(key32(&n.key)) == 0 {
   182  			gp.m.blocked = true
   183  			futexsleep(key32(&n.key), 0, ns)
   184  			if *cgo_yield != nil {
   185  				asmcgocall(*cgo_yield, nil)
   186  			}
   187  			gp.m.blocked = false
   188  		}
   189  		return true
   190  	}
   191  
   192  	if atomic.Load(key32(&n.key)) != 0 {
   193  		return true
   194  	}
   195  
   196  	deadline := nanotime() + ns
   197  	for {
   198  		if *cgo_yield != nil && ns > 10e6 {
   199  			ns = 10e6
   200  		}
   201  		gp.m.blocked = true
   202  		futexsleep(key32(&n.key), 0, ns)
   203  		if *cgo_yield != nil {
   204  			asmcgocall(*cgo_yield, nil)
   205  		}
   206  		gp.m.blocked = false
   207  		if atomic.Load(key32(&n.key)) != 0 {
   208  			break
   209  		}
   210  		now := nanotime()
   211  		if now >= deadline {
   212  			break
   213  		}
   214  		ns = deadline - now
   215  	}
   216  	return atomic.Load(key32(&n.key)) != 0
   217  }
   218  
   219  func notetsleep(n *note, ns int64) bool {
   220  	gp := getg()
   221  	if gp != gp.m.g0 && gp.m.preemptoff != "" {
   222  		throw("notetsleep not on g0")
   223  	}
   224  
   225  	return notetsleep_internal(n, ns)
   226  }
   227  
   228  // same as runtime·notetsleep, but called on user g (not g0)
   229  // calls only nosplit functions between entersyscallblock/exitsyscall
   230  func notetsleepg(n *note, ns int64) bool {
   231  	gp := getg()
   232  	if gp == gp.m.g0 {
   233  		throw("notetsleepg on g0")
   234  	}
   235  
   236  	entersyscallblock()
   237  	ok := notetsleep_internal(n, ns)
   238  	exitsyscall()
   239  	return ok
   240  }
   241  
   242  func beforeIdle(int64, int64) (*g, bool) {
   243  	return nil, false
   244  }
   245  
   246  func checkTimeouts() {}
   247  

View as plain text