Source file src/runtime/lockrank_on.go

     1  // Copyright 2020 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build goexperiment.staticlockranking
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"unsafe"
    12  )
    13  
    14  // worldIsStopped is accessed atomically to track world-stops. 1 == world
    15  // stopped.
    16  var worldIsStopped uint32
    17  
    18  // lockRankStruct is embedded in mutex
    19  type lockRankStruct struct {
    20  	// static lock ranking of the lock
    21  	rank lockRank
    22  	// pad field to make sure lockRankStruct is a multiple of 8 bytes, even on
    23  	// 32-bit systems.
    24  	pad int
    25  }
    26  
    27  func lockInit(l *mutex, rank lockRank) {
    28  	l.rank = rank
    29  }
    30  
    31  func getLockRank(l *mutex) lockRank {
    32  	return l.rank
    33  }
    34  
    35  // lockWithRank is like lock(l), but allows the caller to specify a lock rank
    36  // when acquiring a non-static lock.
    37  //
    38  // Note that we need to be careful about stack splits:
    39  //
    40  // This function is not nosplit, thus it may split at function entry. This may
    41  // introduce a new edge in the lock order, but it is no different from any
    42  // other (nosplit) call before this call (including the call to lock() itself).
    43  //
    44  // However, we switch to the systemstack to record the lock held to ensure that
    45  // we record an accurate lock ordering. e.g., without systemstack, a stack
    46  // split on entry to lock2() would record stack split locks as taken after l,
    47  // even though l is not actually locked yet.
    48  func lockWithRank(l *mutex, rank lockRank) {
    49  	if l == &debuglock || l == &paniclk {
    50  		// debuglock is only used for println/printlock(). Don't do lock
    51  		// rank recording for it, since print/println are used when
    52  		// printing out a lock ordering problem below.
    53  		//
    54  		// paniclk is only used for fatal throw/panic. Don't do lock
    55  		// ranking recording for it, since we throw after reporting a
    56  		// lock ordering problem. Additionally, paniclk may be taken
    57  		// after effectively any lock (anywhere we might panic), which
    58  		// the partial order doesn't cover.
    59  		lock2(l)
    60  		return
    61  	}
    62  	if rank == 0 {
    63  		rank = lockRankLeafRank
    64  	}
    65  	gp := getg()
    66  	// Log the new class.
    67  	systemstack(func() {
    68  		i := gp.m.locksHeldLen
    69  		if i >= len(gp.m.locksHeld) {
    70  			throw("too many locks held concurrently for rank checking")
    71  		}
    72  		gp.m.locksHeld[i].rank = rank
    73  		gp.m.locksHeld[i].lockAddr = uintptr(unsafe.Pointer(l))
    74  		gp.m.locksHeldLen++
    75  
    76  		// i is the index of the lock being acquired
    77  		if i > 0 {
    78  			checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
    79  		}
    80  		lock2(l)
    81  	})
    82  }
    83  
    84  // nosplit to ensure it can be called in as many contexts as possible.
    85  //
    86  //go:nosplit
    87  func printHeldLocks(gp *g) {
    88  	if gp.m.locksHeldLen == 0 {
    89  		println("<none>")
    90  		return
    91  	}
    92  
    93  	for j, held := range gp.m.locksHeld[:gp.m.locksHeldLen] {
    94  		println(j, ":", held.rank.String(), held.rank, unsafe.Pointer(gp.m.locksHeld[j].lockAddr))
    95  	}
    96  }
    97  
    98  // acquireLockRank acquires a rank which is not associated with a mutex lock
    99  //
   100  // This function may be called in nosplit context and thus must be nosplit.
   101  //
   102  //go:nosplit
   103  func acquireLockRank(rank lockRank) {
   104  	gp := getg()
   105  	// Log the new class. See comment on lockWithRank.
   106  	systemstack(func() {
   107  		i := gp.m.locksHeldLen
   108  		if i >= len(gp.m.locksHeld) {
   109  			throw("too many locks held concurrently for rank checking")
   110  		}
   111  		gp.m.locksHeld[i].rank = rank
   112  		gp.m.locksHeld[i].lockAddr = 0
   113  		gp.m.locksHeldLen++
   114  
   115  		// i is the index of the lock being acquired
   116  		if i > 0 {
   117  			checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
   118  		}
   119  	})
   120  }
   121  
   122  // checkRanks checks if goroutine g, which has mostly recently acquired a lock
   123  // with rank 'prevRank', can now acquire a lock with rank 'rank'.
   124  //
   125  //go:systemstack
   126  func checkRanks(gp *g, prevRank, rank lockRank) {
   127  	rankOK := false
   128  	if rank < prevRank {
   129  		// If rank < prevRank, then we definitely have a rank error
   130  		rankOK = false
   131  	} else if rank == lockRankLeafRank {
   132  		// If new lock is a leaf lock, then the preceding lock can
   133  		// be anything except another leaf lock.
   134  		rankOK = prevRank < lockRankLeafRank
   135  	} else {
   136  		// We've now verified the total lock ranking, but we
   137  		// also enforce the partial ordering specified by
   138  		// lockPartialOrder as well. Two locks with the same rank
   139  		// can only be acquired at the same time if explicitly
   140  		// listed in the lockPartialOrder table.
   141  		list := lockPartialOrder[rank]
   142  		for _, entry := range list {
   143  			if entry == prevRank {
   144  				rankOK = true
   145  				break
   146  			}
   147  		}
   148  	}
   149  	if !rankOK {
   150  		printlock()
   151  		println(gp.m.procid, " ======")
   152  		printHeldLocks(gp)
   153  		throw("lock ordering problem")
   154  	}
   155  }
   156  
   157  // See comment on lockWithRank regarding stack splitting.
   158  func unlockWithRank(l *mutex) {
   159  	if l == &debuglock || l == &paniclk {
   160  		// See comment at beginning of lockWithRank.
   161  		unlock2(l)
   162  		return
   163  	}
   164  	gp := getg()
   165  	systemstack(func() {
   166  		found := false
   167  		for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   168  			if gp.m.locksHeld[i].lockAddr == uintptr(unsafe.Pointer(l)) {
   169  				found = true
   170  				copy(gp.m.locksHeld[i:gp.m.locksHeldLen-1], gp.m.locksHeld[i+1:gp.m.locksHeldLen])
   171  				gp.m.locksHeldLen--
   172  				break
   173  			}
   174  		}
   175  		if !found {
   176  			println(gp.m.procid, ":", l.rank.String(), l.rank, l)
   177  			throw("unlock without matching lock acquire")
   178  		}
   179  		unlock2(l)
   180  	})
   181  }
   182  
   183  // releaseLockRank releases a rank which is not associated with a mutex lock
   184  //
   185  // This function may be called in nosplit context and thus must be nosplit.
   186  //
   187  //go:nosplit
   188  func releaseLockRank(rank lockRank) {
   189  	gp := getg()
   190  	systemstack(func() {
   191  		found := false
   192  		for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   193  			if gp.m.locksHeld[i].rank == rank && gp.m.locksHeld[i].lockAddr == 0 {
   194  				found = true
   195  				copy(gp.m.locksHeld[i:gp.m.locksHeldLen-1], gp.m.locksHeld[i+1:gp.m.locksHeldLen])
   196  				gp.m.locksHeldLen--
   197  				break
   198  			}
   199  		}
   200  		if !found {
   201  			println(gp.m.procid, ":", rank.String(), rank)
   202  			throw("lockRank release without matching lockRank acquire")
   203  		}
   204  	})
   205  }
   206  
   207  // See comment on lockWithRank regarding stack splitting.
   208  func lockWithRankMayAcquire(l *mutex, rank lockRank) {
   209  	gp := getg()
   210  	if gp.m.locksHeldLen == 0 {
   211  		// No possibility of lock ordering problem if no other locks held
   212  		return
   213  	}
   214  
   215  	systemstack(func() {
   216  		i := gp.m.locksHeldLen
   217  		if i >= len(gp.m.locksHeld) {
   218  			throw("too many locks held concurrently for rank checking")
   219  		}
   220  		// Temporarily add this lock to the locksHeld list, so
   221  		// checkRanks() will print out list, including this lock, if there
   222  		// is a lock ordering problem.
   223  		gp.m.locksHeld[i].rank = rank
   224  		gp.m.locksHeld[i].lockAddr = uintptr(unsafe.Pointer(l))
   225  		gp.m.locksHeldLen++
   226  		checkRanks(gp, gp.m.locksHeld[i-1].rank, rank)
   227  		gp.m.locksHeldLen--
   228  	})
   229  }
   230  
   231  // nosplit to ensure it can be called in as many contexts as possible.
   232  //
   233  //go:nosplit
   234  func checkLockHeld(gp *g, l *mutex) bool {
   235  	for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   236  		if gp.m.locksHeld[i].lockAddr == uintptr(unsafe.Pointer(l)) {
   237  			return true
   238  		}
   239  	}
   240  	return false
   241  }
   242  
   243  // assertLockHeld throws if l is not held by the caller.
   244  //
   245  // nosplit to ensure it can be called in as many contexts as possible.
   246  //
   247  //go:nosplit
   248  func assertLockHeld(l *mutex) {
   249  	gp := getg()
   250  
   251  	held := checkLockHeld(gp, l)
   252  	if held {
   253  		return
   254  	}
   255  
   256  	// Crash from system stack to avoid splits that may cause
   257  	// additional issues.
   258  	systemstack(func() {
   259  		printlock()
   260  		print("caller requires lock ", l, " (rank ", l.rank.String(), "), holding:\n")
   261  		printHeldLocks(gp)
   262  		throw("not holding required lock!")
   263  	})
   264  }
   265  
   266  // assertRankHeld throws if a mutex with rank r is not held by the caller.
   267  //
   268  // This is less precise than assertLockHeld, but can be used in places where a
   269  // pointer to the exact mutex is not available.
   270  //
   271  // nosplit to ensure it can be called in as many contexts as possible.
   272  //
   273  //go:nosplit
   274  func assertRankHeld(r lockRank) {
   275  	gp := getg()
   276  
   277  	for i := gp.m.locksHeldLen - 1; i >= 0; i-- {
   278  		if gp.m.locksHeld[i].rank == r {
   279  			return
   280  		}
   281  	}
   282  
   283  	// Crash from system stack to avoid splits that may cause
   284  	// additional issues.
   285  	systemstack(func() {
   286  		printlock()
   287  		print("caller requires lock with rank ", r.String(), "), holding:\n")
   288  		printHeldLocks(gp)
   289  		throw("not holding required lock!")
   290  	})
   291  }
   292  
   293  // worldStopped notes that the world is stopped.
   294  //
   295  // Caller must hold worldsema.
   296  //
   297  // nosplit to ensure it can be called in as many contexts as possible.
   298  //
   299  //go:nosplit
   300  func worldStopped() {
   301  	if stopped := atomic.Xadd(&worldIsStopped, 1); stopped != 1 {
   302  		systemstack(func() {
   303  			print("world stop count=", stopped, "\n")
   304  			throw("recursive world stop")
   305  		})
   306  	}
   307  }
   308  
   309  // worldStarted that the world is starting.
   310  //
   311  // Caller must hold worldsema.
   312  //
   313  // nosplit to ensure it can be called in as many contexts as possible.
   314  //
   315  //go:nosplit
   316  func worldStarted() {
   317  	if stopped := atomic.Xadd(&worldIsStopped, -1); stopped != 0 {
   318  		systemstack(func() {
   319  			print("world stop count=", stopped, "\n")
   320  			throw("released non-stopped world stop")
   321  		})
   322  	}
   323  }
   324  
   325  // nosplit to ensure it can be called in as many contexts as possible.
   326  //
   327  //go:nosplit
   328  func checkWorldStopped() bool {
   329  	stopped := atomic.Load(&worldIsStopped)
   330  	if stopped > 1 {
   331  		systemstack(func() {
   332  			print("inconsistent world stop count=", stopped, "\n")
   333  			throw("inconsistent world stop count")
   334  		})
   335  	}
   336  
   337  	return stopped == 1
   338  }
   339  
   340  // assertWorldStopped throws if the world is not stopped. It does not check
   341  // which M stopped the world.
   342  //
   343  // nosplit to ensure it can be called in as many contexts as possible.
   344  //
   345  //go:nosplit
   346  func assertWorldStopped() {
   347  	if checkWorldStopped() {
   348  		return
   349  	}
   350  
   351  	throw("world not stopped")
   352  }
   353  
   354  // assertWorldStoppedOrLockHeld throws if the world is not stopped and the
   355  // passed lock is not held.
   356  //
   357  // nosplit to ensure it can be called in as many contexts as possible.
   358  //
   359  //go:nosplit
   360  func assertWorldStoppedOrLockHeld(l *mutex) {
   361  	if checkWorldStopped() {
   362  		return
   363  	}
   364  
   365  	gp := getg()
   366  	held := checkLockHeld(gp, l)
   367  	if held {
   368  		return
   369  	}
   370  
   371  	// Crash from system stack to avoid splits that may cause
   372  	// additional issues.
   373  	systemstack(func() {
   374  		printlock()
   375  		print("caller requires world stop or lock ", l, " (rank ", l.rank.String(), "), holding:\n")
   376  		println("<no world stop>")
   377  		printHeldLocks(gp)
   378  		throw("no world stop or required lock!")
   379  	})
   380  }
   381  

View as plain text