1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
|
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build ignore
// mklockrank records the static rank graph of the locks in the
// runtime and generates the rank checking structures in lockrank.go.
package main
import (
"bytes"
"flag"
"fmt"
"go/format"
"internal/dag"
"io"
"log"
"os"
"strings"
)
// ranks describes the lock rank graph. See "go doc internal/dag" for
// the syntax.
//
// "a < b" means a must be acquired before b if both are held
// (or, if b is held, a cannot be acquired).
//
// "NONE < a" means no locks may be held when a is acquired.
//
// If a lock is not given a rank, then it is assumed to be a leaf
// lock, which means no other lock can be acquired while it is held.
// Therefore, leaf locks do not need to be given an explicit rank.
//
// Ranks in all caps are pseudo-nodes that help define order, but do
// not actually define a rank.
//
// TODO: It's often hard to correlate rank names to locks. Change
// these to be more consistent with the locks they label.
const ranks = `
# Sysmon
NONE
< sysmon
< scavenge, forcegc;
# Defer
NONE < defer;
# GC
NONE <
sweepWaiters,
assistQueue,
sweep;
# Test only
NONE < testR, testW;
# Scheduler, timers, netpoll
NONE <
allocmW,
execW,
cpuprof,
pollDesc;
assistQueue,
cpuprof,
forcegc,
pollDesc, # pollDesc can interact with timers, which can lock sched.
scavenge,
sweep,
sweepWaiters,
testR
# Above SCHED are things that can call into the scheduler.
< SCHED
# Below SCHED is the scheduler implementation.
< allocmR,
execR
< sched;
sched < allg, allp;
allp < timers;
timers < netpollInit;
# Channels
scavenge, sweep, testR < hchan;
NONE < notifyList;
hchan, notifyList < sudog;
# Semaphores
NONE < root;
# Itabs
NONE
< itab
< reflectOffs;
# User arena state
NONE < userArenaState;
# Tracing without a P uses a global trace buffer.
scavenge
# Above TRACEGLOBAL can emit a trace event without a P.
< TRACEGLOBAL
# Below TRACEGLOBAL manages the global tracing buffer.
# Note that traceBuf eventually chains to MALLOC, but we never get that far
# in the situation where there's no P.
< traceBuf;
# Starting/stopping tracing traces strings.
traceBuf < traceStrings;
# Malloc
allg,
allocmR,
execR, # May grow stack
execW, # May allocate after BeforeFork
hchan,
notifyList,
reflectOffs,
timers,
traceStrings,
userArenaState
# Above MALLOC are things that can allocate memory.
< MALLOC
# Below MALLOC is the malloc implementation.
< fin,
gcBitsArenas,
mheapSpecial,
mspanSpecial,
spanSetSpine,
MPROF;
# Memory profiling
MPROF < profInsert, profBlock, profMemActive;
profMemActive < profMemFuture;
# Stack allocation and copying
gcBitsArenas,
netpollInit,
profBlock,
profInsert,
profMemFuture,
spanSetSpine,
fin,
root
# Anything that can grow the stack can acquire STACKGROW.
# (Most higher layers imply STACKGROW, like MALLOC.)
< STACKGROW
# Below STACKGROW is the stack allocator/copying implementation.
< gscan;
gscan < stackpool;
gscan < stackLarge;
# Generally, hchan must be acquired before gscan. But in one case,
# where we suspend a G and then shrink its stack, syncadjustsudogs
# can acquire hchan locks while holding gscan. To allow this case,
# we use hchanLeaf instead of hchan.
gscan < hchanLeaf;
# Write barrier
defer,
gscan,
mspanSpecial,
sudog
# Anything that can have write barriers can acquire WB.
# Above WB, we can have write barriers.
< WB
# Below WB is the write barrier implementation.
< wbufSpans;
# Span allocator
stackLarge,
stackpool,
wbufSpans
# Above mheap is anything that can call the span allocator.
< mheap;
# Below mheap is the span allocator implementation.
mheap, mheapSpecial < globalAlloc;
# Execution tracer events (with a P)
hchan,
mheap,
root,
sched,
traceStrings,
notifyList,
fin
# Above TRACE is anything that can create a trace event
< TRACE
< trace
< traceStackTab;
# panic is handled specially. It is implicitly below all other locks.
NONE < panic;
# deadlock is not acquired while holding panic, but it also needs to be
# below all other locks.
panic < deadlock;
# RWMutex internal read lock
allocmR,
allocmW
< allocmRInternal;
execR,
execW
< execRInternal;
testR,
testW
< testRInternal;
`
// cyclicRanks lists lock ranks that allow multiple locks of the same
// rank to be acquired simultaneously. The runtime enforces ordering
// within these ranks using a separate mechanism.
var cyclicRanks = map[string]bool{
// Multiple timers are locked simultaneously in destroy().
"timers": true,
// Multiple hchans are acquired in hchan.sortkey() order in
// select.
"hchan": true,
// Multiple hchanLeafs are acquired in hchan.sortkey() order in
// syncadjustsudogs().
"hchanLeaf": true,
// The point of the deadlock lock is to deadlock.
"deadlock": true,
}
func main() {
flagO := flag.String("o", "", "write to `file` instead of stdout")
flagDot := flag.Bool("dot", false, "emit graphviz output instead of Go")
flag.Parse()
if flag.NArg() != 0 {
fmt.Fprintf(os.Stderr, "too many arguments")
os.Exit(2)
}
g, err := dag.Parse(ranks)
if err != nil {
log.Fatal(err)
}
var out []byte
if *flagDot {
var b bytes.Buffer
g.TransitiveReduction()
// Add cyclic edges for visualization.
for k := range cyclicRanks {
g.AddEdge(k, k)
}
// Reverse the graph. It's much easier to read this as
// a "<" partial order than a ">" partial order. This
// ways, locks are acquired from the top going down
// and time moves forward over the edges instead of
// backward.
g.Transpose()
generateDot(&b, g)
out = b.Bytes()
} else {
var b bytes.Buffer
generateGo(&b, g)
out, err = format.Source(b.Bytes())
if err != nil {
log.Fatal(err)
}
}
if *flagO != "" {
err = os.WriteFile(*flagO, out, 0666)
} else {
_, err = os.Stdout.Write(out)
}
if err != nil {
log.Fatal(err)
}
}
func generateGo(w io.Writer, g *dag.Graph) {
fmt.Fprintf(w, `// Code generated by mklockrank.go; DO NOT EDIT.
package runtime
type lockRank int
`)
// Create numeric ranks.
topo := g.Topo()
for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 {
topo[i], topo[j] = topo[j], topo[i]
}
fmt.Fprintf(w, `
// Constants representing the ranks of all non-leaf runtime locks, in rank order.
// Locks with lower rank must be taken before locks with higher rank,
// in addition to satisfying the partial order in lockPartialOrder.
// A few ranks allow self-cycles, which are specified in lockPartialOrder.
const (
lockRankUnknown lockRank = iota
`)
for _, rank := range topo {
if isPseudo(rank) {
fmt.Fprintf(w, "\t// %s\n", rank)
} else {
fmt.Fprintf(w, "\t%s\n", cname(rank))
}
}
fmt.Fprintf(w, `)
// lockRankLeafRank is the rank of lock that does not have a declared rank,
// and hence is a leaf lock.
const lockRankLeafRank lockRank = 1000
`)
// Create string table.
fmt.Fprintf(w, `
// lockNames gives the names associated with each of the above ranks.
var lockNames = []string{
`)
for _, rank := range topo {
if !isPseudo(rank) {
fmt.Fprintf(w, "\t%s: %q,\n", cname(rank), rank)
}
}
fmt.Fprintf(w, `}
func (rank lockRank) String() string {
if rank == 0 {
return "UNKNOWN"
}
if rank == lockRankLeafRank {
return "LEAF"
}
if rank < 0 || int(rank) >= len(lockNames) {
return "BAD RANK"
}
return lockNames[rank]
}
`)
// Create partial order structure.
fmt.Fprintf(w, `
// lockPartialOrder is the transitive closure of the lock rank graph.
// An entry for rank X lists all of the ranks that can already be held
// when rank X is acquired.
//
// Lock ranks that allow self-cycles list themselves.
var lockPartialOrder [][]lockRank = [][]lockRank{
`)
for _, rank := range topo {
if isPseudo(rank) {
continue
}
list := []string{}
for _, before := range g.Edges(rank) {
if !isPseudo(before) {
list = append(list, cname(before))
}
}
if cyclicRanks[rank] {
list = append(list, cname(rank))
}
fmt.Fprintf(w, "\t%s: {%s},\n", cname(rank), strings.Join(list, ", "))
}
fmt.Fprintf(w, "}\n")
}
// cname returns the Go const name for the given lock rank label.
func cname(label string) string {
return "lockRank" + strings.ToUpper(label[:1]) + label[1:]
}
func isPseudo(label string) bool {
return strings.ToUpper(label) == label
}
// generateDot emits a Graphviz dot representation of g to w.
func generateDot(w io.Writer, g *dag.Graph) {
fmt.Fprintf(w, "digraph g {\n")
// Define all nodes.
for _, node := range g.Nodes {
fmt.Fprintf(w, "%q;\n", node)
}
// Create edges.
for _, node := range g.Nodes {
for _, to := range g.Edges(node) {
fmt.Fprintf(w, "%q -> %q;\n", node, to)
}
}
fmt.Fprintf(w, "}\n")
}
|