Source file src/runtime/mklockrank.go

     1  // Copyright 2022 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build ignore
     6  
     7  // mklockrank records the static rank graph of the locks in the
     8  // runtime and generates the rank checking structures in lockrank.go.
     9  package main
    10  
    11  import (
    12  	"bytes"
    13  	"flag"
    14  	"fmt"
    15  	"go/format"
    16  	"internal/dag"
    17  	"io"
    18  	"log"
    19  	"os"
    20  	"strings"
    21  )
    22  
    23  // ranks describes the lock rank graph. See "go doc internal/dag" for
    24  // the syntax.
    25  //
    26  // "a < b" means a must be acquired before b if both are held
    27  // (or, if b is held, a cannot be acquired).
    28  //
    29  // "NONE < a" means no locks may be held when a is acquired.
    30  //
    31  // If a lock is not given a rank, then it is assumed to be a leaf
    32  // lock, which means no other lock can be acquired while it is held.
    33  // Therefore, leaf locks do not need to be given an explicit rank.
    34  //
    35  // Ranks in all caps are pseudo-nodes that help define order, but do
    36  // not actually define a rank.
    37  //
    38  // TODO: It's often hard to correlate rank names to locks. Change
    39  // these to be more consistent with the locks they label.
    40  const ranks = `
    41  # Sysmon
    42  NONE
    43  < sysmon
    44  < scavenge, forcegc;
    45  
    46  # Defer
    47  NONE < defer;
    48  
    49  # GC
    50  NONE <
    51    sweepWaiters,
    52    assistQueue,
    53    strongFromWeakQueue,
    54    sweep;
    55  
    56  # Test only
    57  NONE < testR, testW;
    58  
    59  NONE < timerSend;
    60  
    61  # Scheduler, timers, netpoll
    62  NONE < allocmW, execW, cpuprof, pollCache, pollDesc, wakeableSleep;
    63  scavenge, sweep, testR, wakeableSleep, timerSend < hchan;
    64  assistQueue,
    65    cpuprof,
    66    forcegc,
    67    hchan,
    68    pollDesc, # pollDesc can interact with timers, which can lock sched.
    69    scavenge,
    70    strongFromWeakQueue,
    71    sweep,
    72    sweepWaiters,
    73    testR,
    74    wakeableSleep
    75  # Above SCHED are things that can call into the scheduler.
    76  < SCHED
    77  # Below SCHED is the scheduler implementation.
    78  < allocmR,
    79    execR;
    80  allocmR, execR, hchan < sched;
    81  sched < allg, allp;
    82  
    83  # Channels
    84  NONE < notifyList;
    85  hchan, notifyList < sudog;
    86  
    87  hchan, pollDesc, wakeableSleep < timers;
    88  timers, timerSend < timer < netpollInit;
    89  
    90  # Semaphores
    91  NONE < root;
    92  
    93  # Itabs
    94  NONE
    95  < itab
    96  < reflectOffs;
    97  
    98  # Synctest
    99  hchan, root, timers, timer, notifyList, reflectOffs < synctest;
   100  
   101  # User arena state
   102  NONE < userArenaState;
   103  
   104  # Tracing without a P uses a global trace buffer.
   105  scavenge
   106  # Above TRACEGLOBAL can emit a trace event without a P.
   107  < TRACEGLOBAL
   108  # Below TRACEGLOBAL manages the global tracing buffer.
   109  # Note that traceBuf eventually chains to MALLOC, but we never get that far
   110  # in the situation where there's no P.
   111  < traceBuf;
   112  # Starting/stopping tracing traces strings.
   113  traceBuf < traceStrings;
   114  
   115  # Malloc
   116  allg,
   117    allocmR,
   118    allp, # procresize
   119    execR, # May grow stack
   120    execW, # May allocate after BeforeFork
   121    hchan,
   122    notifyList,
   123    reflectOffs,
   124    timer,
   125    traceStrings,
   126    userArenaState
   127  # Above MALLOC are things that can allocate memory.
   128  < MALLOC
   129  # Below MALLOC is the malloc implementation.
   130  < fin,
   131    spanSetSpine,
   132    mspanSpecial,
   133    traceTypeTab,
   134    MPROF;
   135  
   136  # We can acquire gcBitsArenas for pinner bits, and
   137  # it's guarded by mspanSpecial.
   138  MALLOC, mspanSpecial < gcBitsArenas;
   139  
   140  # Memory profiling
   141  MPROF < profInsert, profBlock, profMemActive;
   142  profMemActive < profMemFuture;
   143  
   144  # Stack allocation and copying
   145  gcBitsArenas,
   146    netpollInit,
   147    profBlock,
   148    profInsert,
   149    profMemFuture,
   150    spanSetSpine,
   151    synctest,
   152    fin,
   153    root
   154  # Anything that can grow the stack can acquire STACKGROW.
   155  # (Most higher layers imply STACKGROW, like MALLOC.)
   156  < STACKGROW
   157  # Below STACKGROW is the stack allocator/copying implementation.
   158  < gscan;
   159  gscan < stackpool;
   160  gscan < stackLarge;
   161  # Generally, hchan must be acquired before gscan. But in one case,
   162  # where we suspend a G and then shrink its stack, syncadjustsudogs
   163  # can acquire hchan locks while holding gscan. To allow this case,
   164  # we use hchanLeaf instead of hchan.
   165  gscan < hchanLeaf;
   166  
   167  # Write barrier
   168  defer,
   169    gscan,
   170    mspanSpecial,
   171    pollCache,
   172    sudog,
   173    timer
   174  # Anything that can have write barriers can acquire WB.
   175  # Above WB, we can have write barriers.
   176  < WB
   177  # Below WB is the write barrier implementation.
   178  < wbufSpans;
   179  
   180  # Span allocator
   181  stackLarge,
   182    stackpool,
   183    wbufSpans
   184  # Above mheap is anything that can call the span allocator.
   185  < mheap;
   186  # Below mheap is the span allocator implementation.
   187  #
   188  # Specials: we're allowed to allocate a special while holding
   189  # an mspanSpecial lock, and they're part of the malloc implementation.
   190  # Pinner bits might be freed by the span allocator.
   191  mheap, mspanSpecial < mheapSpecial;
   192  mheap, mheapSpecial < globalAlloc;
   193  
   194  # Execution tracer events (with a P)
   195  hchan,
   196    mheap,
   197    root,
   198    sched,
   199    traceStrings,
   200    notifyList,
   201    fin
   202  # Above TRACE is anything that can create a trace event
   203  < TRACE
   204  < trace
   205  < traceStackTab;
   206  
   207  # panic is handled specially. It is implicitly below all other locks.
   208  NONE < panic;
   209  # deadlock is not acquired while holding panic, but it also needs to be
   210  # below all other locks.
   211  panic < deadlock;
   212  # raceFini is only held while exiting.
   213  panic < raceFini;
   214  
   215  # RWMutex internal read lock
   216  
   217  allocmR,
   218    allocmW
   219  < allocmRInternal;
   220  
   221  execR,
   222    execW
   223  < execRInternal;
   224  
   225  testR,
   226    testW
   227  < testRInternal;
   228  `
   229  
   230  // cyclicRanks lists lock ranks that allow multiple locks of the same
   231  // rank to be acquired simultaneously. The runtime enforces ordering
   232  // within these ranks using a separate mechanism.
   233  var cyclicRanks = map[string]bool{
   234  	// Multiple timers are locked simultaneously in destroy().
   235  	"timers": true,
   236  	// Multiple hchans are acquired in hchan.sortkey() order in
   237  	// select.
   238  	"hchan": true,
   239  	// Multiple hchanLeafs are acquired in hchan.sortkey() order in
   240  	// syncadjustsudogs().
   241  	"hchanLeaf": true,
   242  	// The point of the deadlock lock is to deadlock.
   243  	"deadlock": true,
   244  }
   245  
   246  func main() {
   247  	flagO := flag.String("o", "", "write to `file` instead of stdout")
   248  	flagDot := flag.Bool("dot", false, "emit graphviz output instead of Go")
   249  	flag.Parse()
   250  	if flag.NArg() != 0 {
   251  		fmt.Fprintf(os.Stderr, "too many arguments")
   252  		os.Exit(2)
   253  	}
   254  
   255  	g, err := dag.Parse(ranks)
   256  	if err != nil {
   257  		log.Fatal(err)
   258  	}
   259  
   260  	var out []byte
   261  	if *flagDot {
   262  		var b bytes.Buffer
   263  		g.TransitiveReduction()
   264  		// Add cyclic edges for visualization.
   265  		for k := range cyclicRanks {
   266  			g.AddEdge(k, k)
   267  		}
   268  		// Reverse the graph. It's much easier to read this as
   269  		// a "<" partial order than a ">" partial order. This
   270  		// ways, locks are acquired from the top going down
   271  		// and time moves forward over the edges instead of
   272  		// backward.
   273  		g.Transpose()
   274  		generateDot(&b, g)
   275  		out = b.Bytes()
   276  	} else {
   277  		var b bytes.Buffer
   278  		generateGo(&b, g)
   279  		out, err = format.Source(b.Bytes())
   280  		if err != nil {
   281  			log.Fatal(err)
   282  		}
   283  	}
   284  
   285  	if *flagO != "" {
   286  		err = os.WriteFile(*flagO, out, 0666)
   287  	} else {
   288  		_, err = os.Stdout.Write(out)
   289  	}
   290  	if err != nil {
   291  		log.Fatal(err)
   292  	}
   293  }
   294  
   295  func generateGo(w io.Writer, g *dag.Graph) {
   296  	fmt.Fprintf(w, `// Code generated by mklockrank.go; DO NOT EDIT.
   297  
   298  package runtime
   299  
   300  type lockRank int
   301  
   302  `)
   303  
   304  	// Create numeric ranks.
   305  	topo := g.Topo()
   306  	for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 {
   307  		topo[i], topo[j] = topo[j], topo[i]
   308  	}
   309  	fmt.Fprintf(w, `
   310  // Constants representing the ranks of all non-leaf runtime locks, in rank order.
   311  // Locks with lower rank must be taken before locks with higher rank,
   312  // in addition to satisfying the partial order in lockPartialOrder.
   313  // A few ranks allow self-cycles, which are specified in lockPartialOrder.
   314  const (
   315  	lockRankUnknown lockRank = iota
   316  
   317  `)
   318  	for _, rank := range topo {
   319  		if isPseudo(rank) {
   320  			fmt.Fprintf(w, "\t// %s\n", rank)
   321  		} else {
   322  			fmt.Fprintf(w, "\t%s\n", cname(rank))
   323  		}
   324  	}
   325  	fmt.Fprintf(w, `)
   326  
   327  // lockRankLeafRank is the rank of lock that does not have a declared rank,
   328  // and hence is a leaf lock.
   329  const lockRankLeafRank lockRank = 1000
   330  `)
   331  
   332  	// Create string table.
   333  	fmt.Fprintf(w, `
   334  // lockNames gives the names associated with each of the above ranks.
   335  var lockNames = []string{
   336  `)
   337  	for _, rank := range topo {
   338  		if !isPseudo(rank) {
   339  			fmt.Fprintf(w, "\t%s: %q,\n", cname(rank), rank)
   340  		}
   341  	}
   342  	fmt.Fprintf(w, `}
   343  
   344  func (rank lockRank) String() string {
   345  	if rank == 0 {
   346  		return "UNKNOWN"
   347  	}
   348  	if rank == lockRankLeafRank {
   349  		return "LEAF"
   350  	}
   351  	if rank < 0 || int(rank) >= len(lockNames) {
   352  		return "BAD RANK"
   353  	}
   354  	return lockNames[rank]
   355  }
   356  `)
   357  
   358  	// Create partial order structure.
   359  	fmt.Fprintf(w, `
   360  // lockPartialOrder is the transitive closure of the lock rank graph.
   361  // An entry for rank X lists all of the ranks that can already be held
   362  // when rank X is acquired.
   363  //
   364  // Lock ranks that allow self-cycles list themselves.
   365  var lockPartialOrder [][]lockRank = [][]lockRank{
   366  `)
   367  	for _, rank := range topo {
   368  		if isPseudo(rank) {
   369  			continue
   370  		}
   371  		list := []string{}
   372  		for _, before := range g.Edges(rank) {
   373  			if !isPseudo(before) {
   374  				list = append(list, cname(before))
   375  			}
   376  		}
   377  		if cyclicRanks[rank] {
   378  			list = append(list, cname(rank))
   379  		}
   380  
   381  		fmt.Fprintf(w, "\t%s: {%s},\n", cname(rank), strings.Join(list, ", "))
   382  	}
   383  	fmt.Fprintf(w, "}\n")
   384  }
   385  
   386  // cname returns the Go const name for the given lock rank label.
   387  func cname(label string) string {
   388  	return "lockRank" + strings.ToUpper(label[:1]) + label[1:]
   389  }
   390  
   391  func isPseudo(label string) bool {
   392  	return strings.ToUpper(label) == label
   393  }
   394  
   395  // generateDot emits a Graphviz dot representation of g to w.
   396  func generateDot(w io.Writer, g *dag.Graph) {
   397  	fmt.Fprintf(w, "digraph g {\n")
   398  
   399  	// Define all nodes.
   400  	for _, node := range g.Nodes {
   401  		fmt.Fprintf(w, "%q;\n", node)
   402  	}
   403  
   404  	// Create edges.
   405  	for _, node := range g.Nodes {
   406  		for _, to := range g.Edges(node) {
   407  			fmt.Fprintf(w, "%q -> %q;\n", node, to)
   408  		}
   409  	}
   410  
   411  	fmt.Fprintf(w, "}\n")
   412  }
   413  

View as plain text