Source file
src/runtime/mklockrank.go
1
2
3
4
5
6
7
8
9 package main
10
11 import (
12 "bytes"
13 "flag"
14 "fmt"
15 "go/format"
16 "internal/dag"
17 "io"
18 "log"
19 "os"
20 "strings"
21 )
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40 const ranks = `
41 # Sysmon
42 NONE
43 < sysmon
44 < scavenge, forcegc;
45
46 # Defer
47 NONE < defer;
48
49 # GC
50 NONE <
51 sweepWaiters,
52 assistQueue,
53 strongFromWeakQueue,
54 sweep;
55
56 # Test only
57 NONE < testR, testW;
58
59 NONE < timerSend;
60
61 # Scheduler, timers, netpoll
62 NONE < allocmW, execW, cpuprof, pollCache, pollDesc, wakeableSleep;
63 scavenge, sweep, testR, wakeableSleep, timerSend < hchan;
64 assistQueue,
65 cpuprof,
66 forcegc,
67 hchan,
68 pollDesc, # pollDesc can interact with timers, which can lock sched.
69 scavenge,
70 strongFromWeakQueue,
71 sweep,
72 sweepWaiters,
73 testR,
74 wakeableSleep
75 # Above SCHED are things that can call into the scheduler.
76 < SCHED
77 # Below SCHED is the scheduler implementation.
78 < allocmR,
79 execR;
80 allocmR, execR, hchan < sched;
81 sched < allg, allp;
82
83 # Channels
84 NONE < notifyList;
85 hchan, notifyList < sudog;
86
87 hchan, pollDesc, wakeableSleep < timers;
88 timers, timerSend < timer < netpollInit;
89
90 # Semaphores
91 NONE < root;
92
93 # Itabs
94 NONE
95 < itab
96 < reflectOffs;
97
98 # Synctest
99 hchan, root, timers, timer, notifyList, reflectOffs < synctest;
100
101 # User arena state
102 NONE < userArenaState;
103
104 # Tracing without a P uses a global trace buffer.
105 scavenge
106 # Above TRACEGLOBAL can emit a trace event without a P.
107 < TRACEGLOBAL
108 # Below TRACEGLOBAL manages the global tracing buffer.
109 # Note that traceBuf eventually chains to MALLOC, but we never get that far
110 # in the situation where there's no P.
111 < traceBuf;
112 # Starting/stopping tracing traces strings.
113 traceBuf < traceStrings;
114
115 # Malloc
116 allg,
117 allocmR,
118 allp, # procresize
119 execR, # May grow stack
120 execW, # May allocate after BeforeFork
121 hchan,
122 notifyList,
123 reflectOffs,
124 timer,
125 traceStrings,
126 userArenaState
127 # Above MALLOC are things that can allocate memory.
128 < MALLOC
129 # Below MALLOC is the malloc implementation.
130 < fin,
131 spanSetSpine,
132 mspanSpecial,
133 traceTypeTab,
134 MPROF;
135
136 # We can acquire gcBitsArenas for pinner bits, and
137 # it's guarded by mspanSpecial.
138 MALLOC, mspanSpecial < gcBitsArenas;
139
140 # Memory profiling
141 MPROF < profInsert, profBlock, profMemActive;
142 profMemActive < profMemFuture;
143
144 # Stack allocation and copying
145 gcBitsArenas,
146 netpollInit,
147 profBlock,
148 profInsert,
149 profMemFuture,
150 spanSetSpine,
151 synctest,
152 fin,
153 root
154 # Anything that can grow the stack can acquire STACKGROW.
155 # (Most higher layers imply STACKGROW, like MALLOC.)
156 < STACKGROW
157 # Below STACKGROW is the stack allocator/copying implementation.
158 < gscan;
159 gscan < stackpool;
160 gscan < stackLarge;
161 # Generally, hchan must be acquired before gscan. But in one case,
162 # where we suspend a G and then shrink its stack, syncadjustsudogs
163 # can acquire hchan locks while holding gscan. To allow this case,
164 # we use hchanLeaf instead of hchan.
165 gscan < hchanLeaf;
166
167 # Write barrier
168 defer,
169 gscan,
170 mspanSpecial,
171 pollCache,
172 sudog,
173 timer
174 # Anything that can have write barriers can acquire WB.
175 # Above WB, we can have write barriers.
176 < WB
177 # Below WB is the write barrier implementation.
178 < wbufSpans;
179
180 # Span allocator
181 stackLarge,
182 stackpool,
183 wbufSpans
184 # Above mheap is anything that can call the span allocator.
185 < mheap;
186 # Below mheap is the span allocator implementation.
187 #
188 # Specials: we're allowed to allocate a special while holding
189 # an mspanSpecial lock, and they're part of the malloc implementation.
190 # Pinner bits might be freed by the span allocator.
191 mheap, mspanSpecial < mheapSpecial;
192 mheap, mheapSpecial < globalAlloc;
193
194 # Execution tracer events (with a P)
195 hchan,
196 mheap,
197 root,
198 sched,
199 traceStrings,
200 notifyList,
201 fin
202 # Above TRACE is anything that can create a trace event
203 < TRACE
204 < trace
205 < traceStackTab;
206
207 # panic is handled specially. It is implicitly below all other locks.
208 NONE < panic;
209 # deadlock is not acquired while holding panic, but it also needs to be
210 # below all other locks.
211 panic < deadlock;
212 # raceFini is only held while exiting.
213 panic < raceFini;
214
215 # RWMutex internal read lock
216
217 allocmR,
218 allocmW
219 < allocmRInternal;
220
221 execR,
222 execW
223 < execRInternal;
224
225 testR,
226 testW
227 < testRInternal;
228 `
229
230
231
232
233 var cyclicRanks = map[string]bool{
234
235 "timers": true,
236
237
238 "hchan": true,
239
240
241 "hchanLeaf": true,
242
243 "deadlock": true,
244 }
245
246 func main() {
247 flagO := flag.String("o", "", "write to `file` instead of stdout")
248 flagDot := flag.Bool("dot", false, "emit graphviz output instead of Go")
249 flag.Parse()
250 if flag.NArg() != 0 {
251 fmt.Fprintf(os.Stderr, "too many arguments")
252 os.Exit(2)
253 }
254
255 g, err := dag.Parse(ranks)
256 if err != nil {
257 log.Fatal(err)
258 }
259
260 var out []byte
261 if *flagDot {
262 var b bytes.Buffer
263 g.TransitiveReduction()
264
265 for k := range cyclicRanks {
266 g.AddEdge(k, k)
267 }
268
269
270
271
272
273 g.Transpose()
274 generateDot(&b, g)
275 out = b.Bytes()
276 } else {
277 var b bytes.Buffer
278 generateGo(&b, g)
279 out, err = format.Source(b.Bytes())
280 if err != nil {
281 log.Fatal(err)
282 }
283 }
284
285 if *flagO != "" {
286 err = os.WriteFile(*flagO, out, 0666)
287 } else {
288 _, err = os.Stdout.Write(out)
289 }
290 if err != nil {
291 log.Fatal(err)
292 }
293 }
294
295 func generateGo(w io.Writer, g *dag.Graph) {
296 fmt.Fprintf(w, `// Code generated by mklockrank.go; DO NOT EDIT.
297
298 package runtime
299
300 type lockRank int
301
302 `)
303
304
305 topo := g.Topo()
306 for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 {
307 topo[i], topo[j] = topo[j], topo[i]
308 }
309 fmt.Fprintf(w, `
310 // Constants representing the ranks of all non-leaf runtime locks, in rank order.
311 // Locks with lower rank must be taken before locks with higher rank,
312 // in addition to satisfying the partial order in lockPartialOrder.
313 // A few ranks allow self-cycles, which are specified in lockPartialOrder.
314 const (
315 lockRankUnknown lockRank = iota
316
317 `)
318 for _, rank := range topo {
319 if isPseudo(rank) {
320 fmt.Fprintf(w, "\t// %s\n", rank)
321 } else {
322 fmt.Fprintf(w, "\t%s\n", cname(rank))
323 }
324 }
325 fmt.Fprintf(w, `)
326
327 // lockRankLeafRank is the rank of lock that does not have a declared rank,
328 // and hence is a leaf lock.
329 const lockRankLeafRank lockRank = 1000
330 `)
331
332
333 fmt.Fprintf(w, `
334 // lockNames gives the names associated with each of the above ranks.
335 var lockNames = []string{
336 `)
337 for _, rank := range topo {
338 if !isPseudo(rank) {
339 fmt.Fprintf(w, "\t%s: %q,\n", cname(rank), rank)
340 }
341 }
342 fmt.Fprintf(w, `}
343
344 func (rank lockRank) String() string {
345 if rank == 0 {
346 return "UNKNOWN"
347 }
348 if rank == lockRankLeafRank {
349 return "LEAF"
350 }
351 if rank < 0 || int(rank) >= len(lockNames) {
352 return "BAD RANK"
353 }
354 return lockNames[rank]
355 }
356 `)
357
358
359 fmt.Fprintf(w, `
360 // lockPartialOrder is the transitive closure of the lock rank graph.
361 // An entry for rank X lists all of the ranks that can already be held
362 // when rank X is acquired.
363 //
364 // Lock ranks that allow self-cycles list themselves.
365 var lockPartialOrder [][]lockRank = [][]lockRank{
366 `)
367 for _, rank := range topo {
368 if isPseudo(rank) {
369 continue
370 }
371 list := []string{}
372 for _, before := range g.Edges(rank) {
373 if !isPseudo(before) {
374 list = append(list, cname(before))
375 }
376 }
377 if cyclicRanks[rank] {
378 list = append(list, cname(rank))
379 }
380
381 fmt.Fprintf(w, "\t%s: {%s},\n", cname(rank), strings.Join(list, ", "))
382 }
383 fmt.Fprintf(w, "}\n")
384 }
385
386
387 func cname(label string) string {
388 return "lockRank" + strings.ToUpper(label[:1]) + label[1:]
389 }
390
391 func isPseudo(label string) bool {
392 return strings.ToUpper(label) == label
393 }
394
395
396 func generateDot(w io.Writer, g *dag.Graph) {
397 fmt.Fprintf(w, "digraph g {\n")
398
399
400 for _, node := range g.Nodes {
401 fmt.Fprintf(w, "%q;\n", node)
402 }
403
404
405 for _, node := range g.Nodes {
406 for _, to := range g.Edges(node) {
407 fmt.Fprintf(w, "%q -> %q;\n", node, to)
408 }
409 }
410
411 fmt.Fprintf(w, "}\n")
412 }
413
View as plain text