summaryrefslogtreecommitdiffstats
path: root/src/runtime/mranges.go
blob: 4388d2608822867558654789afa710d3ab4eb057 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Address range data structure.
//
// This file contains an implementation of a data structure which
// manages ordered address ranges.

package runtime

import (
	"internal/goarch"
	"runtime/internal/atomic"
	"unsafe"
)

// addrRange represents a region of address space.
//
// An addrRange must never span a gap in the address space.
type addrRange struct {
	// base and limit together represent the region of address space
	// [base, limit). That is, base is inclusive, limit is exclusive.
	// These are address over an offset view of the address space on
	// platforms with a segmented address space, that is, on platforms
	// where arenaBaseOffset != 0.
	base, limit offAddr
}

// makeAddrRange creates a new address range from two virtual addresses.
//
// Throws if the base and limit are not in the same memory segment.
func makeAddrRange(base, limit uintptr) addrRange {
	r := addrRange{offAddr{base}, offAddr{limit}}
	if (base-arenaBaseOffset >= base) != (limit-arenaBaseOffset >= limit) {
		throw("addr range base and limit are not in the same memory segment")
	}
	return r
}

// size returns the size of the range represented in bytes.
func (a addrRange) size() uintptr {
	if !a.base.lessThan(a.limit) {
		return 0
	}
	// Subtraction is safe because limit and base must be in the same
	// segment of the address space.
	return a.limit.diff(a.base)
}

// contains returns whether or not the range contains a given address.
func (a addrRange) contains(addr uintptr) bool {
	return a.base.lessEqual(offAddr{addr}) && (offAddr{addr}).lessThan(a.limit)
}

// subtract takes the addrRange toPrune and cuts out any overlap with
// from, then returns the new range. subtract assumes that a and b
// either don't overlap at all, only overlap on one side, or are equal.
// If b is strictly contained in a, thus forcing a split, it will throw.
func (a addrRange) subtract(b addrRange) addrRange {
	if b.base.lessEqual(a.base) && a.limit.lessEqual(b.limit) {
		return addrRange{}
	} else if a.base.lessThan(b.base) && b.limit.lessThan(a.limit) {
		throw("bad prune")
	} else if b.limit.lessThan(a.limit) && a.base.lessThan(b.limit) {
		a.base = b.limit
	} else if a.base.lessThan(b.base) && b.base.lessThan(a.limit) {
		a.limit = b.base
	}
	return a
}

// takeFromFront takes len bytes from the front of the address range, aligning
// the base to align first. On success, returns the aligned start of the region
// taken and true.
func (a *addrRange) takeFromFront(len uintptr, align uint8) (uintptr, bool) {
	base := alignUp(a.base.addr(), uintptr(align)) + len
	if base > a.limit.addr() {
		return 0, false
	}
	a.base = offAddr{base}
	return base - len, true
}

// takeFromBack takes len bytes from the end of the address range, aligning
// the limit to align after subtracting len. On success, returns the aligned
// start of the region taken and true.
func (a *addrRange) takeFromBack(len uintptr, align uint8) (uintptr, bool) {
	limit := alignDown(a.limit.addr()-len, uintptr(align))
	if a.base.addr() > limit {
		return 0, false
	}
	a.limit = offAddr{limit}
	return limit, true
}

// removeGreaterEqual removes all addresses in a greater than or equal
// to addr and returns the new range.
func (a addrRange) removeGreaterEqual(addr uintptr) addrRange {
	if (offAddr{addr}).lessEqual(a.base) {
		return addrRange{}
	}
	if a.limit.lessEqual(offAddr{addr}) {
		return a
	}
	return makeAddrRange(a.base.addr(), addr)
}

var (
	// minOffAddr is the minimum address in the offset space, and
	// it corresponds to the virtual address arenaBaseOffset.
	minOffAddr = offAddr{arenaBaseOffset}

	// maxOffAddr is the maximum address in the offset address
	// space. It corresponds to the highest virtual address representable
	// by the page alloc chunk and heap arena maps.
	maxOffAddr = offAddr{(((1 << heapAddrBits) - 1) + arenaBaseOffset) & uintptrMask}
)

// offAddr represents an address in a contiguous view
// of the address space on systems where the address space is
// segmented. On other systems, it's just a normal address.
type offAddr struct {
	// a is just the virtual address, but should never be used
	// directly. Call addr() to get this value instead.
	a uintptr
}

// add adds a uintptr offset to the offAddr.
func (l offAddr) add(bytes uintptr) offAddr {
	return offAddr{a: l.a + bytes}
}

// sub subtracts a uintptr offset from the offAddr.
func (l offAddr) sub(bytes uintptr) offAddr {
	return offAddr{a: l.a - bytes}
}

// diff returns the amount of bytes in between the
// two offAddrs.
func (l1 offAddr) diff(l2 offAddr) uintptr {
	return l1.a - l2.a
}

// lessThan returns true if l1 is less than l2 in the offset
// address space.
func (l1 offAddr) lessThan(l2 offAddr) bool {
	return (l1.a - arenaBaseOffset) < (l2.a - arenaBaseOffset)
}

// lessEqual returns true if l1 is less than or equal to l2 in
// the offset address space.
func (l1 offAddr) lessEqual(l2 offAddr) bool {
	return (l1.a - arenaBaseOffset) <= (l2.a - arenaBaseOffset)
}

// equal returns true if the two offAddr values are equal.
func (l1 offAddr) equal(l2 offAddr) bool {
	// No need to compare in the offset space, it
	// means the same thing.
	return l1 == l2
}

// addr returns the virtual address for this offset address.
func (l offAddr) addr() uintptr {
	return l.a
}

// atomicOffAddr is like offAddr, but operations on it are atomic.
// It also contains operations to be able to store marked addresses
// to ensure that they're not overridden until they've been seen.
type atomicOffAddr struct {
	// a contains the offset address, unlike offAddr.
	a atomic.Int64
}

// Clear attempts to store minOffAddr in atomicOffAddr. It may fail
// if a marked value is placed in the box in the meanwhile.
func (b *atomicOffAddr) Clear() {
	for {
		old := b.a.Load()
		if old < 0 {
			return
		}
		if b.a.CompareAndSwap(old, int64(minOffAddr.addr()-arenaBaseOffset)) {
			return
		}
	}
}

// StoreMin stores addr if it's less than the current value in the
// offset address space if the current value is not marked.
func (b *atomicOffAddr) StoreMin(addr uintptr) {
	new := int64(addr - arenaBaseOffset)
	for {
		old := b.a.Load()
		if old < new {
			return
		}
		if b.a.CompareAndSwap(old, new) {
			return
		}
	}
}

// StoreUnmark attempts to unmark the value in atomicOffAddr and
// replace it with newAddr. markedAddr must be a marked address
// returned by Load. This function will not store newAddr if the
// box no longer contains markedAddr.
func (b *atomicOffAddr) StoreUnmark(markedAddr, newAddr uintptr) {
	b.a.CompareAndSwap(-int64(markedAddr-arenaBaseOffset), int64(newAddr-arenaBaseOffset))
}

// StoreMarked stores addr but first converted to the offset address
// space and then negated.
func (b *atomicOffAddr) StoreMarked(addr uintptr) {
	b.a.Store(-int64(addr - arenaBaseOffset))
}

// Load returns the address in the box as a virtual address. It also
// returns if the value was marked or not.
func (b *atomicOffAddr) Load() (uintptr, bool) {
	v := b.a.Load()
	wasMarked := false
	if v < 0 {
		wasMarked = true
		v = -v
	}
	return uintptr(v) + arenaBaseOffset, wasMarked
}

// addrRanges is a data structure holding a collection of ranges of
// address space.
//
// The ranges are coalesced eagerly to reduce the
// number ranges it holds.
//
// The slice backing store for this field is persistentalloc'd
// and thus there is no way to free it.
//
// addrRanges is not thread-safe.
type addrRanges struct {
	// ranges is a slice of ranges sorted by base.
	ranges []addrRange

	// totalBytes is the total amount of address space in bytes counted by
	// this addrRanges.
	totalBytes uintptr

	// sysStat is the stat to track allocations by this type
	sysStat *sysMemStat
}

func (a *addrRanges) init(sysStat *sysMemStat) {
	ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
	ranges.len = 0
	ranges.cap = 16
	ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, sysStat))
	a.sysStat = sysStat
	a.totalBytes = 0
}

// findSucc returns the first index in a such that addr is
// less than the base of the addrRange at that index.
func (a *addrRanges) findSucc(addr uintptr) int {
	base := offAddr{addr}

	// Narrow down the search space via a binary search
	// for large addrRanges until we have at most iterMax
	// candidates left.
	const iterMax = 8
	bot, top := 0, len(a.ranges)
	for top-bot > iterMax {
		i := ((top - bot) / 2) + bot
		if a.ranges[i].contains(base.addr()) {
			// a.ranges[i] contains base, so
			// its successor is the next index.
			return i + 1
		}
		if base.lessThan(a.ranges[i].base) {
			// In this case i might actually be
			// the successor, but we can't be sure
			// until we check the ones before it.
			top = i
		} else {
			// In this case we know base is
			// greater than or equal to a.ranges[i].limit-1,
			// so i is definitely not the successor.
			// We already checked i, so pick the next
			// one.
			bot = i + 1
		}
	}
	// There are top-bot candidates left, so
	// iterate over them and find the first that
	// base is strictly less than.
	for i := bot; i < top; i++ {
		if base.lessThan(a.ranges[i].base) {
			return i
		}
	}
	return top
}

// findAddrGreaterEqual returns the smallest address represented by a
// that is >= addr. Thus, if the address is represented by a,
// then it returns addr. The second return value indicates whether
// such an address exists for addr in a. That is, if addr is larger than
// any address known to a, the second return value will be false.
func (a *addrRanges) findAddrGreaterEqual(addr uintptr) (uintptr, bool) {
	i := a.findSucc(addr)
	if i == 0 {
		return a.ranges[0].base.addr(), true
	}
	if a.ranges[i-1].contains(addr) {
		return addr, true
	}
	if i < len(a.ranges) {
		return a.ranges[i].base.addr(), true
	}
	return 0, false
}

// contains returns true if a covers the address addr.
func (a *addrRanges) contains(addr uintptr) bool {
	i := a.findSucc(addr)
	if i == 0 {
		return false
	}
	return a.ranges[i-1].contains(addr)
}

// add inserts a new address range to a.
//
// r must not overlap with any address range in a and r.size() must be > 0.
func (a *addrRanges) add(r addrRange) {
	// The copies in this function are potentially expensive, but this data
	// structure is meant to represent the Go heap. At worst, copying this
	// would take ~160µs assuming a conservative copying rate of 25 GiB/s (the
	// copy will almost never trigger a page fault) for a 1 TiB heap with 4 MiB
	// arenas which is completely discontiguous. ~160µs is still a lot, but in
	// practice most platforms have 64 MiB arenas (which cuts this by a factor
	// of 16) and Go heaps are usually mostly contiguous, so the chance that
	// an addrRanges even grows to that size is extremely low.

	// An empty range has no effect on the set of addresses represented
	// by a, but passing a zero-sized range is almost always a bug.
	if r.size() == 0 {
		print("runtime: range = {", hex(r.base.addr()), ", ", hex(r.limit.addr()), "}\n")
		throw("attempted to add zero-sized address range")
	}
	// Because we assume r is not currently represented in a,
	// findSucc gives us our insertion index.
	i := a.findSucc(r.base.addr())
	coalescesDown := i > 0 && a.ranges[i-1].limit.equal(r.base)
	coalescesUp := i < len(a.ranges) && r.limit.equal(a.ranges[i].base)
	if coalescesUp && coalescesDown {
		// We have neighbors and they both border us.
		// Merge a.ranges[i-1], r, and a.ranges[i] together into a.ranges[i-1].
		a.ranges[i-1].limit = a.ranges[i].limit

		// Delete a.ranges[i].
		copy(a.ranges[i:], a.ranges[i+1:])
		a.ranges = a.ranges[:len(a.ranges)-1]
	} else if coalescesDown {
		// We have a neighbor at a lower address only and it borders us.
		// Merge the new space into a.ranges[i-1].
		a.ranges[i-1].limit = r.limit
	} else if coalescesUp {
		// We have a neighbor at a higher address only and it borders us.
		// Merge the new space into a.ranges[i].
		a.ranges[i].base = r.base
	} else {
		// We may or may not have neighbors which don't border us.
		// Add the new range.
		if len(a.ranges)+1 > cap(a.ranges) {
			// Grow the array. Note that this leaks the old array, but since
			// we're doubling we have at most 2x waste. For a 1 TiB heap and
			// 4 MiB arenas which are all discontiguous (both very conservative
			// assumptions), this would waste at most 4 MiB of memory.
			oldRanges := a.ranges
			ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
			ranges.len = len(oldRanges) + 1
			ranges.cap = cap(oldRanges) * 2
			ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, a.sysStat))

			// Copy in the old array, but make space for the new range.
			copy(a.ranges[:i], oldRanges[:i])
			copy(a.ranges[i+1:], oldRanges[i:])
		} else {
			a.ranges = a.ranges[:len(a.ranges)+1]
			copy(a.ranges[i+1:], a.ranges[i:])
		}
		a.ranges[i] = r
	}
	a.totalBytes += r.size()
}

// removeLast removes and returns the highest-addressed contiguous range
// of a, or the last nBytes of that range, whichever is smaller. If a is
// empty, it returns an empty range.
func (a *addrRanges) removeLast(nBytes uintptr) addrRange {
	if len(a.ranges) == 0 {
		return addrRange{}
	}
	r := a.ranges[len(a.ranges)-1]
	size := r.size()
	if size > nBytes {
		newEnd := r.limit.sub(nBytes)
		a.ranges[len(a.ranges)-1].limit = newEnd
		a.totalBytes -= nBytes
		return addrRange{newEnd, r.limit}
	}
	a.ranges = a.ranges[:len(a.ranges)-1]
	a.totalBytes -= size
	return r
}

// removeGreaterEqual removes the ranges of a which are above addr, and additionally
// splits any range containing addr.
func (a *addrRanges) removeGreaterEqual(addr uintptr) {
	pivot := a.findSucc(addr)
	if pivot == 0 {
		// addr is before all ranges in a.
		a.totalBytes = 0
		a.ranges = a.ranges[:0]
		return
	}
	removed := uintptr(0)
	for _, r := range a.ranges[pivot:] {
		removed += r.size()
	}
	if r := a.ranges[pivot-1]; r.contains(addr) {
		removed += r.size()
		r = r.removeGreaterEqual(addr)
		if r.size() == 0 {
			pivot--
		} else {
			removed -= r.size()
			a.ranges[pivot-1] = r
		}
	}
	a.ranges = a.ranges[:pivot]
	a.totalBytes -= removed
}

// cloneInto makes a deep clone of a's state into b, re-using
// b's ranges if able.
func (a *addrRanges) cloneInto(b *addrRanges) {
	if len(a.ranges) > cap(b.ranges) {
		// Grow the array.
		ranges := (*notInHeapSlice)(unsafe.Pointer(&b.ranges))
		ranges.len = 0
		ranges.cap = cap(a.ranges)
		ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, b.sysStat))
	}
	b.ranges = b.ranges[:len(a.ranges)]
	b.totalBytes = a.totalBytes
	copy(b.ranges, a.ranges)
}