summaryrefslogtreecommitdiffstats
path: root/src/runtime/mgcstack.go
blob: 6b552203ee42437691cc207bedf8777b0c65dbb1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Garbage collector: stack objects and stack tracing
// See the design doc at https://docs.google.com/document/d/1un-Jn47yByHL7I0aVIP_uVCMxjdM5mpelJhiKlIqxkE/edit?usp=sharing
// Also see issue 22350.

// Stack tracing solves the problem of determining which parts of the
// stack are live and should be scanned. It runs as part of scanning
// a single goroutine stack.
//
// Normally determining which parts of the stack are live is easy to
// do statically, as user code has explicit references (reads and
// writes) to stack variables. The compiler can do a simple dataflow
// analysis to determine liveness of stack variables at every point in
// the code. See cmd/compile/internal/gc/plive.go for that analysis.
//
// However, when we take the address of a stack variable, determining
// whether that variable is still live is less clear. We can still
// look for static accesses, but accesses through a pointer to the
// variable are difficult in general to track statically. That pointer
// can be passed among functions on the stack, conditionally retained,
// etc.
//
// Instead, we will track pointers to stack variables dynamically.
// All pointers to stack-allocated variables will themselves be on the
// stack somewhere (or in associated locations, like defer records), so
// we can find them all efficiently.
//
// Stack tracing is organized as a mini garbage collection tracing
// pass. The objects in this garbage collection are all the variables
// on the stack whose address is taken, and which themselves contain a
// pointer. We call these variables "stack objects".
//
// We begin by determining all the stack objects on the stack and all
// the statically live pointers that may point into the stack. We then
// process each pointer to see if it points to a stack object. If it
// does, we scan that stack object. It may contain pointers into the
// heap, in which case those pointers are passed to the main garbage
// collection. It may also contain pointers into the stack, in which
// case we add them to our set of stack pointers.
//
// Once we're done processing all the pointers (including the ones we
// added during processing), we've found all the stack objects that
// are live. Any dead stack objects are not scanned and their contents
// will not keep heap objects live. Unlike the main garbage
// collection, we can't sweep the dead stack objects; they live on in
// a moribund state until the stack frame that contains them is
// popped.
//
// A stack can look like this:
//
// +----------+
// | foo()    |
// | +------+ |
// | |  A   | | <---\
// | +------+ |     |
// |          |     |
// | +------+ |     |
// | |  B   | |     |
// | +------+ |     |
// |          |     |
// +----------+     |
// | bar()    |     |
// | +------+ |     |
// | |  C   | | <-\ |
// | +----|-+ |   | |
// |      |   |   | |
// | +----v-+ |   | |
// | |  D  ---------/
// | +------+ |   |
// |          |   |
// +----------+   |
// | baz()    |   |
// | +------+ |   |
// | |  E  -------/
// | +------+ |
// |      ^   |
// | F: --/   |
// |          |
// +----------+
//
// foo() calls bar() calls baz(). Each has a frame on the stack.
// foo() has stack objects A and B.
// bar() has stack objects C and D, with C pointing to D and D pointing to A.
// baz() has a stack object E pointing to C, and a local variable F pointing to E.
//
// Starting from the pointer in local variable F, we will eventually
// scan all of E, C, D, and A (in that order). B is never scanned
// because there is no live pointer to it. If B is also statically
// dead (meaning that foo() never accesses B again after it calls
// bar()), then B's pointers into the heap are not considered live.

package runtime

import (
	"internal/goarch"
	"runtime/internal/sys"
	"unsafe"
)

const stackTraceDebug = false

// Buffer for pointers found during stack tracing.
// Must be smaller than or equal to workbuf.
type stackWorkBuf struct {
	_ sys.NotInHeap
	stackWorkBufHdr
	obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / goarch.PtrSize]uintptr
}

// Header declaration must come after the buf declaration above, because of issue #14620.
type stackWorkBufHdr struct {
	_ sys.NotInHeap
	workbufhdr
	next *stackWorkBuf // linked list of workbufs
	// Note: we could theoretically repurpose lfnode.next as this next pointer.
	// It would save 1 word, but that probably isn't worth busting open
	// the lfnode API.
}

// Buffer for stack objects found on a goroutine stack.
// Must be smaller than or equal to workbuf.
type stackObjectBuf struct {
	_ sys.NotInHeap
	stackObjectBufHdr
	obj [(_WorkbufSize - unsafe.Sizeof(stackObjectBufHdr{})) / unsafe.Sizeof(stackObject{})]stackObject
}

type stackObjectBufHdr struct {
	_ sys.NotInHeap
	workbufhdr
	next *stackObjectBuf
}

func init() {
	if unsafe.Sizeof(stackWorkBuf{}) > unsafe.Sizeof(workbuf{}) {
		panic("stackWorkBuf too big")
	}
	if unsafe.Sizeof(stackObjectBuf{}) > unsafe.Sizeof(workbuf{}) {
		panic("stackObjectBuf too big")
	}
}

// A stackObject represents a variable on the stack that has had
// its address taken.
type stackObject struct {
	_     sys.NotInHeap
	off   uint32             // offset above stack.lo
	size  uint32             // size of object
	r     *stackObjectRecord // info of the object (for ptr/nonptr bits). nil if object has been scanned.
	left  *stackObject       // objects with lower addresses
	right *stackObject       // objects with higher addresses
}

// obj.r = r, but with no write barrier.
//
//go:nowritebarrier
func (obj *stackObject) setRecord(r *stackObjectRecord) {
	// Types of stack objects are always in read-only memory, not the heap.
	// So not using a write barrier is ok.
	*(*uintptr)(unsafe.Pointer(&obj.r)) = uintptr(unsafe.Pointer(r))
}

// A stackScanState keeps track of the state used during the GC walk
// of a goroutine.
type stackScanState struct {
	cache pcvalueCache

	// stack limits
	stack stack

	// conservative indicates that the next frame must be scanned conservatively.
	// This applies only to the innermost frame at an async safe-point.
	conservative bool

	// buf contains the set of possible pointers to stack objects.
	// Organized as a LIFO linked list of buffers.
	// All buffers except possibly the head buffer are full.
	buf     *stackWorkBuf
	freeBuf *stackWorkBuf // keep around one free buffer for allocation hysteresis

	// cbuf contains conservative pointers to stack objects. If
	// all pointers to a stack object are obtained via
	// conservative scanning, then the stack object may be dead
	// and may contain dead pointers, so it must be scanned
	// defensively.
	cbuf *stackWorkBuf

	// list of stack objects
	// Objects are in increasing address order.
	head  *stackObjectBuf
	tail  *stackObjectBuf
	nobjs int

	// root of binary tree for fast object lookup by address
	// Initialized by buildIndex.
	root *stackObject
}

// Add p as a potential pointer to a stack object.
// p must be a stack address.
func (s *stackScanState) putPtr(p uintptr, conservative bool) {
	if p < s.stack.lo || p >= s.stack.hi {
		throw("address not a stack address")
	}
	head := &s.buf
	if conservative {
		head = &s.cbuf
	}
	buf := *head
	if buf == nil {
		// Initial setup.
		buf = (*stackWorkBuf)(unsafe.Pointer(getempty()))
		buf.nobj = 0
		buf.next = nil
		*head = buf
	} else if buf.nobj == len(buf.obj) {
		if s.freeBuf != nil {
			buf = s.freeBuf
			s.freeBuf = nil
		} else {
			buf = (*stackWorkBuf)(unsafe.Pointer(getempty()))
		}
		buf.nobj = 0
		buf.next = *head
		*head = buf
	}
	buf.obj[buf.nobj] = p
	buf.nobj++
}

// Remove and return a potential pointer to a stack object.
// Returns 0 if there are no more pointers available.
//
// This prefers non-conservative pointers so we scan stack objects
// precisely if there are any non-conservative pointers to them.
func (s *stackScanState) getPtr() (p uintptr, conservative bool) {
	for _, head := range []**stackWorkBuf{&s.buf, &s.cbuf} {
		buf := *head
		if buf == nil {
			// Never had any data.
			continue
		}
		if buf.nobj == 0 {
			if s.freeBuf != nil {
				// Free old freeBuf.
				putempty((*workbuf)(unsafe.Pointer(s.freeBuf)))
			}
			// Move buf to the freeBuf.
			s.freeBuf = buf
			buf = buf.next
			*head = buf
			if buf == nil {
				// No more data in this list.
				continue
			}
		}
		buf.nobj--
		return buf.obj[buf.nobj], head == &s.cbuf
	}
	// No more data in either list.
	if s.freeBuf != nil {
		putempty((*workbuf)(unsafe.Pointer(s.freeBuf)))
		s.freeBuf = nil
	}
	return 0, false
}

// addObject adds a stack object at addr of type typ to the set of stack objects.
func (s *stackScanState) addObject(addr uintptr, r *stackObjectRecord) {
	x := s.tail
	if x == nil {
		// initial setup
		x = (*stackObjectBuf)(unsafe.Pointer(getempty()))
		x.next = nil
		s.head = x
		s.tail = x
	}
	if x.nobj > 0 && uint32(addr-s.stack.lo) < x.obj[x.nobj-1].off+x.obj[x.nobj-1].size {
		throw("objects added out of order or overlapping")
	}
	if x.nobj == len(x.obj) {
		// full buffer - allocate a new buffer, add to end of linked list
		y := (*stackObjectBuf)(unsafe.Pointer(getempty()))
		y.next = nil
		x.next = y
		s.tail = y
		x = y
	}
	obj := &x.obj[x.nobj]
	x.nobj++
	obj.off = uint32(addr - s.stack.lo)
	obj.size = uint32(r.size)
	obj.setRecord(r)
	// obj.left and obj.right will be initialized by buildIndex before use.
	s.nobjs++
}

// buildIndex initializes s.root to a binary search tree.
// It should be called after all addObject calls but before
// any call of findObject.
func (s *stackScanState) buildIndex() {
	s.root, _, _ = binarySearchTree(s.head, 0, s.nobjs)
}

// Build a binary search tree with the n objects in the list
// x.obj[idx], x.obj[idx+1], ..., x.next.obj[0], ...
// Returns the root of that tree, and the buf+idx of the nth object after x.obj[idx].
// (The first object that was not included in the binary search tree.)
// If n == 0, returns nil, x.
func binarySearchTree(x *stackObjectBuf, idx int, n int) (root *stackObject, restBuf *stackObjectBuf, restIdx int) {
	if n == 0 {
		return nil, x, idx
	}
	var left, right *stackObject
	left, x, idx = binarySearchTree(x, idx, n/2)
	root = &x.obj[idx]
	idx++
	if idx == len(x.obj) {
		x = x.next
		idx = 0
	}
	right, x, idx = binarySearchTree(x, idx, n-n/2-1)
	root.left = left
	root.right = right
	return root, x, idx
}

// findObject returns the stack object containing address a, if any.
// Must have called buildIndex previously.
func (s *stackScanState) findObject(a uintptr) *stackObject {
	off := uint32(a - s.stack.lo)
	obj := s.root
	for {
		if obj == nil {
			return nil
		}
		if off < obj.off {
			obj = obj.left
			continue
		}
		if off >= obj.off+obj.size {
			obj = obj.right
			continue
		}
		return obj
	}
}