diff options
Diffstat (limited to 'src/runtime/slice.go')
-rw-r--r-- | src/runtime/slice.go | 347 |
1 files changed, 347 insertions, 0 deletions
diff --git a/src/runtime/slice.go b/src/runtime/slice.go new file mode 100644 index 0000000..459dc88 --- /dev/null +++ b/src/runtime/slice.go @@ -0,0 +1,347 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/abi" + "internal/goarch" + "runtime/internal/math" + "runtime/internal/sys" + "unsafe" +) + +type slice struct { + array unsafe.Pointer + len int + cap int +} + +// A notInHeapSlice is a slice backed by runtime/internal/sys.NotInHeap memory. +type notInHeapSlice struct { + array *notInHeap + len int + cap int +} + +func panicmakeslicelen() { + panic(errorString("makeslice: len out of range")) +} + +func panicmakeslicecap() { + panic(errorString("makeslice: cap out of range")) +} + +// makeslicecopy allocates a slice of "tolen" elements of type "et", +// then copies "fromlen" elements of type "et" into that new allocation from "from". +func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer { + var tomem, copymem uintptr + if uintptr(tolen) > uintptr(fromlen) { + var overflow bool + tomem, overflow = math.MulUintptr(et.size, uintptr(tolen)) + if overflow || tomem > maxAlloc || tolen < 0 { + panicmakeslicelen() + } + copymem = et.size * uintptr(fromlen) + } else { + // fromlen is a known good length providing and equal or greater than tolen, + // thereby making tolen a good slice length too as from and to slices have the + // same element width. + tomem = et.size * uintptr(tolen) + copymem = tomem + } + + var to unsafe.Pointer + if et.ptrdata == 0 { + to = mallocgc(tomem, nil, false) + if copymem < tomem { + memclrNoHeapPointers(add(to, copymem), tomem-copymem) + } + } else { + // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. + to = mallocgc(tomem, et, true) + if copymem > 0 && writeBarrier.enabled { + // Only shade the pointers in old.array since we know the destination slice to + // only contains nil pointers because it has been cleared during alloc. + bulkBarrierPreWriteSrcOnly(uintptr(to), uintptr(from), copymem) + } + } + + if raceenabled { + callerpc := getcallerpc() + pc := abi.FuncPCABIInternal(makeslicecopy) + racereadrangepc(from, copymem, callerpc, pc) + } + if msanenabled { + msanread(from, copymem) + } + if asanenabled { + asanread(from, copymem) + } + + memmove(to, from, copymem) + + return to +} + +func makeslice(et *_type, len, cap int) unsafe.Pointer { + mem, overflow := math.MulUintptr(et.size, uintptr(cap)) + if overflow || mem > maxAlloc || len < 0 || len > cap { + // NOTE: Produce a 'len out of range' error instead of a + // 'cap out of range' error when someone does make([]T, bignumber). + // 'cap out of range' is true too, but since the cap is only being + // supplied implicitly, saying len is clearer. + // See golang.org/issue/4085. + mem, overflow := math.MulUintptr(et.size, uintptr(len)) + if overflow || mem > maxAlloc || len < 0 { + panicmakeslicelen() + } + panicmakeslicecap() + } + + return mallocgc(mem, et, true) +} + +func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer { + len := int(len64) + if int64(len) != len64 { + panicmakeslicelen() + } + + cap := int(cap64) + if int64(cap) != cap64 { + panicmakeslicecap() + } + + return makeslice(et, len, cap) +} + +// This is a wrapper over runtime/internal/math.MulUintptr, +// so the compiler can recognize and treat it as an intrinsic. +func mulUintptr(a, b uintptr) (uintptr, bool) { + return math.MulUintptr(a, b) +} + +// growslice allocates new backing store for a slice. +// +// arguments: +// +// oldPtr = pointer to the slice's backing array +// newLen = new length (= oldLen + num) +// oldCap = original slice's capacity. +// num = number of elements being added +// et = element type +// +// return values: +// +// newPtr = pointer to the new backing store +// newLen = same value as the argument +// newCap = capacity of the new backing store +// +// Requires that uint(newLen) > uint(oldCap). +// Assumes the original slice length is newLen - num +// +// A new backing store is allocated with space for at least newLen elements. +// Existing entries [0, oldLen) are copied over to the new backing store. +// Added entries [oldLen, newLen) are not initialized by growslice +// (although for pointer-containing element types, they are zeroed). They +// must be initialized by the caller. +// Trailing entries [newLen, newCap) are zeroed. +// +// growslice's odd calling convention makes the generated code that calls +// this function simpler. In particular, it accepts and returns the +// new length so that the old length is not live (does not need to be +// spilled/restored) and the new length is returned (also does not need +// to be spilled/restored). +func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice { + oldLen := newLen - num + if raceenabled { + callerpc := getcallerpc() + racereadrangepc(oldPtr, uintptr(oldLen*int(et.size)), callerpc, abi.FuncPCABIInternal(growslice)) + } + if msanenabled { + msanread(oldPtr, uintptr(oldLen*int(et.size))) + } + if asanenabled { + asanread(oldPtr, uintptr(oldLen*int(et.size))) + } + + if newLen < 0 { + panic(errorString("growslice: len out of range")) + } + + if et.size == 0 { + // append should not create a slice with nil pointer but non-zero len. + // We assume that append doesn't need to preserve oldPtr in this case. + return slice{unsafe.Pointer(&zerobase), newLen, newLen} + } + + newcap := oldCap + doublecap := newcap + newcap + if newLen > doublecap { + newcap = newLen + } else { + const threshold = 256 + if oldCap < threshold { + newcap = doublecap + } else { + // Check 0 < newcap to detect overflow + // and prevent an infinite loop. + for 0 < newcap && newcap < newLen { + // Transition from growing 2x for small slices + // to growing 1.25x for large slices. This formula + // gives a smooth-ish transition between the two. + newcap += (newcap + 3*threshold) / 4 + } + // Set newcap to the requested cap when + // the newcap calculation overflowed. + if newcap <= 0 { + newcap = newLen + } + } + } + + var overflow bool + var lenmem, newlenmem, capmem uintptr + // Specialize for common values of et.size. + // For 1 we don't need any division/multiplication. + // For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant. + // For powers of 2, use a variable shift. + switch { + case et.size == 1: + lenmem = uintptr(oldLen) + newlenmem = uintptr(newLen) + capmem = roundupsize(uintptr(newcap)) + overflow = uintptr(newcap) > maxAlloc + newcap = int(capmem) + case et.size == goarch.PtrSize: + lenmem = uintptr(oldLen) * goarch.PtrSize + newlenmem = uintptr(newLen) * goarch.PtrSize + capmem = roundupsize(uintptr(newcap) * goarch.PtrSize) + overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize + newcap = int(capmem / goarch.PtrSize) + case isPowerOfTwo(et.size): + var shift uintptr + if goarch.PtrSize == 8 { + // Mask shift for better code generation. + shift = uintptr(sys.TrailingZeros64(uint64(et.size))) & 63 + } else { + shift = uintptr(sys.TrailingZeros32(uint32(et.size))) & 31 + } + lenmem = uintptr(oldLen) << shift + newlenmem = uintptr(newLen) << shift + capmem = roundupsize(uintptr(newcap) << shift) + overflow = uintptr(newcap) > (maxAlloc >> shift) + newcap = int(capmem >> shift) + capmem = uintptr(newcap) << shift + default: + lenmem = uintptr(oldLen) * et.size + newlenmem = uintptr(newLen) * et.size + capmem, overflow = math.MulUintptr(et.size, uintptr(newcap)) + capmem = roundupsize(capmem) + newcap = int(capmem / et.size) + capmem = uintptr(newcap) * et.size + } + + // The check of overflow in addition to capmem > maxAlloc is needed + // to prevent an overflow which can be used to trigger a segfault + // on 32bit architectures with this example program: + // + // type T [1<<27 + 1]int64 + // + // var d T + // var s []T + // + // func main() { + // s = append(s, d, d, d, d) + // print(len(s), "\n") + // } + if overflow || capmem > maxAlloc { + panic(errorString("growslice: len out of range")) + } + + var p unsafe.Pointer + if et.ptrdata == 0 { + p = mallocgc(capmem, nil, false) + // The append() that calls growslice is going to overwrite from oldLen to newLen. + // Only clear the part that will not be overwritten. + // The reflect_growslice() that calls growslice will manually clear + // the region not cleared here. + memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem) + } else { + // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. + p = mallocgc(capmem, et, true) + if lenmem > 0 && writeBarrier.enabled { + // Only shade the pointers in oldPtr since we know the destination slice p + // only contains nil pointers because it has been cleared during alloc. + bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.size+et.ptrdata) + } + } + memmove(p, oldPtr, lenmem) + + return slice{p, newLen, newcap} +} + +//go:linkname reflect_growslice reflect.growslice +func reflect_growslice(et *_type, old slice, num int) slice { + // Semantically equivalent to slices.Grow, except that the caller + // is responsible for ensuring that old.len+num > old.cap. + num -= old.cap - old.len // preserve memory of old[old.len:old.cap] + new := growslice(old.array, old.cap+num, old.cap, num, et) + // growslice does not zero out new[old.cap:new.len] since it assumes that + // the memory will be overwritten by an append() that called growslice. + // Since the caller of reflect_growslice is not append(), + // zero out this region before returning the slice to the reflect package. + if et.ptrdata == 0 { + oldcapmem := uintptr(old.cap) * et.size + newlenmem := uintptr(new.len) * et.size + memclrNoHeapPointers(add(new.array, oldcapmem), newlenmem-oldcapmem) + } + new.len = old.len // preserve the old length + return new +} + +func isPowerOfTwo(x uintptr) bool { + return x&(x-1) == 0 +} + +// slicecopy is used to copy from a string or slice of pointerless elements into a slice. +func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen int, width uintptr) int { + if fromLen == 0 || toLen == 0 { + return 0 + } + + n := fromLen + if toLen < n { + n = toLen + } + + if width == 0 { + return n + } + + size := uintptr(n) * width + if raceenabled { + callerpc := getcallerpc() + pc := abi.FuncPCABIInternal(slicecopy) + racereadrangepc(fromPtr, size, callerpc, pc) + racewriterangepc(toPtr, size, callerpc, pc) + } + if msanenabled { + msanread(fromPtr, size) + msanwrite(toPtr, size) + } + if asanenabled { + asanread(fromPtr, size) + asanwrite(toPtr, size) + } + + if size == 1 { // common case worth about 2x to do here + // TODO: is this still worth it with new memmove impl? + *(*byte)(toPtr) = *(*byte)(fromPtr) // known to be a byte pointer + } else { + memmove(toPtr, fromPtr, size) + } + return n +} |