diff options
Diffstat (limited to 'src/runtime/map_fast32.go')
-rw-r--r-- | src/runtime/map_fast32.go | 462 |
1 files changed, 462 insertions, 0 deletions
diff --git a/src/runtime/map_fast32.go b/src/runtime/map_fast32.go new file mode 100644 index 0000000..01ea330 --- /dev/null +++ b/src/runtime/map_fast32.go @@ -0,0 +1,462 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/abi" + "internal/goarch" + "unsafe" +) + +func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { + if raceenabled && h != nil { + callerpc := getcallerpc() + racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(&zeroVal[0]) + } + if h.flags&hashWriting != 0 { + fatal("concurrent map read and map write") + } + var b *bmap + if h.B == 0 { + // One-bucket table. No need to hash. + b = (*bmap)(h.buckets) + } else { + hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + m := bucketMask(h.B) + b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + if !h.sameSizeGrow() { + // There used to be half as many buckets; mask down one more power of two. + m >>= 1 + } + oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + } + for ; b != nil; b = b.overflow(t) { + for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) { + if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize)) + } + } + } + return unsafe.Pointer(&zeroVal[0]) +} + +func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { + if raceenabled && h != nil { + callerpc := getcallerpc() + racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(&zeroVal[0]), false + } + if h.flags&hashWriting != 0 { + fatal("concurrent map read and map write") + } + var b *bmap + if h.B == 0 { + // One-bucket table. No need to hash. + b = (*bmap)(h.buckets) + } else { + hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + m := bucketMask(h.B) + b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + if !h.sameSizeGrow() { + // There used to be half as many buckets; mask down one more power of two. + m >>= 1 + } + oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + } + for ; b != nil; b = b.overflow(t) { + for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) { + if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize)), true + } + } + } + return unsafe.Pointer(&zeroVal[0]), false +} + +func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { + if h == nil { + panic(plainError("assignment to entry in nil map")) + } + if raceenabled { + callerpc := getcallerpc() + racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32)) + } + if h.flags&hashWriting != 0 { + fatal("concurrent map writes") + } + hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + + // Set hashWriting after calling t.hasher for consistency with mapassign. + h.flags ^= hashWriting + + if h.buckets == nil { + h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) + } + +again: + bucket := hash & bucketMask(h.B) + if h.growing() { + growWork_fast32(t, h, bucket) + } + b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) + + var insertb *bmap + var inserti uintptr + var insertk unsafe.Pointer + +bucketloop: + for { + for i := uintptr(0); i < bucketCnt; i++ { + if isEmpty(b.tophash[i]) { + if insertb == nil { + inserti = i + insertb = b + } + if b.tophash[i] == emptyRest { + break bucketloop + } + continue + } + k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))) + if k != key { + continue + } + inserti = i + insertb = b + goto done + } + ovf := b.overflow(t) + if ovf == nil { + break + } + b = ovf + } + + // Did not find mapping for key. Allocate new cell & add entry. + + // If we hit the max load factor or we have too many overflow buckets, + // and we're not already in the middle of growing, start growing. + if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { + hashGrow(t, h) + goto again // Growing the table invalidates everything, so try again + } + + if insertb == nil { + // The current bucket and all the overflow buckets connected to it are full, allocate a new one. + insertb = h.newoverflow(t, b) + inserti = 0 // not necessary, but avoids needlessly spilling inserti + } + insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks + + insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4) + // store new key at insert position + *(*uint32)(insertk) = key + + h.count++ + +done: + elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize)) + if h.flags&hashWriting == 0 { + fatal("concurrent map writes") + } + h.flags &^= hashWriting + return elem +} + +func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { + if h == nil { + panic(plainError("assignment to entry in nil map")) + } + if raceenabled { + callerpc := getcallerpc() + racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32)) + } + if h.flags&hashWriting != 0 { + fatal("concurrent map writes") + } + hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + + // Set hashWriting after calling t.hasher for consistency with mapassign. + h.flags ^= hashWriting + + if h.buckets == nil { + h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) + } + +again: + bucket := hash & bucketMask(h.B) + if h.growing() { + growWork_fast32(t, h, bucket) + } + b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) + + var insertb *bmap + var inserti uintptr + var insertk unsafe.Pointer + +bucketloop: + for { + for i := uintptr(0); i < bucketCnt; i++ { + if isEmpty(b.tophash[i]) { + if insertb == nil { + inserti = i + insertb = b + } + if b.tophash[i] == emptyRest { + break bucketloop + } + continue + } + k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4))) + if k != key { + continue + } + inserti = i + insertb = b + goto done + } + ovf := b.overflow(t) + if ovf == nil { + break + } + b = ovf + } + + // Did not find mapping for key. Allocate new cell & add entry. + + // If we hit the max load factor or we have too many overflow buckets, + // and we're not already in the middle of growing, start growing. + if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { + hashGrow(t, h) + goto again // Growing the table invalidates everything, so try again + } + + if insertb == nil { + // The current bucket and all the overflow buckets connected to it are full, allocate a new one. + insertb = h.newoverflow(t, b) + inserti = 0 // not necessary, but avoids needlessly spilling inserti + } + insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks + + insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4) + // store new key at insert position + *(*unsafe.Pointer)(insertk) = key + + h.count++ + +done: + elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize)) + if h.flags&hashWriting == 0 { + fatal("concurrent map writes") + } + h.flags &^= hashWriting + return elem +} + +func mapdelete_fast32(t *maptype, h *hmap, key uint32) { + if raceenabled && h != nil { + callerpc := getcallerpc() + racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32)) + } + if h == nil || h.count == 0 { + return + } + if h.flags&hashWriting != 0 { + fatal("concurrent map writes") + } + + hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + + // Set hashWriting after calling t.hasher for consistency with mapdelete + h.flags ^= hashWriting + + bucket := hash & bucketMask(h.B) + if h.growing() { + growWork_fast32(t, h, bucket) + } + b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) + bOrig := b +search: + for ; b != nil; b = b.overflow(t) { + for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) { + if key != *(*uint32)(k) || isEmpty(b.tophash[i]) { + continue + } + // Only clear key if there are pointers in it. + // This can only happen if pointers are 32 bit + // wide as 64 bit pointers do not fit into a 32 bit key. + if goarch.PtrSize == 4 && t.key.ptrdata != 0 { + // The key must be a pointer as we checked pointers are + // 32 bits wide and the key is 32 bits wide also. + *(*unsafe.Pointer)(k) = nil + } + e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize)) + if t.elem.ptrdata != 0 { + memclrHasPointers(e, t.elem.size) + } else { + memclrNoHeapPointers(e, t.elem.size) + } + b.tophash[i] = emptyOne + // If the bucket now ends in a bunch of emptyOne states, + // change those to emptyRest states. + if i == bucketCnt-1 { + if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { + goto notLast + } + } else { + if b.tophash[i+1] != emptyRest { + goto notLast + } + } + for { + b.tophash[i] = emptyRest + if i == 0 { + if b == bOrig { + break // beginning of initial bucket, we're done. + } + // Find previous bucket, continue at its last entry. + c := b + for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { + } + i = bucketCnt - 1 + } else { + i-- + } + if b.tophash[i] != emptyOne { + break + } + } + notLast: + h.count-- + // Reset the hash seed to make it more difficult for attackers to + // repeatedly trigger hash collisions. See issue 25237. + if h.count == 0 { + h.hash0 = fastrand() + } + break search + } + } + + if h.flags&hashWriting == 0 { + fatal("concurrent map writes") + } + h.flags &^= hashWriting +} + +func growWork_fast32(t *maptype, h *hmap, bucket uintptr) { + // make sure we evacuate the oldbucket corresponding + // to the bucket we're about to use + evacuate_fast32(t, h, bucket&h.oldbucketmask()) + + // evacuate one more oldbucket to make progress on growing + if h.growing() { + evacuate_fast32(t, h, h.nevacuate) + } +} + +func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) { + b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) + newbit := h.noldbuckets() + if !evacuated(b) { + // TODO: reuse overflow buckets instead of using new ones, if there + // is no iterator using the old buckets. (If !oldIterator.) + + // xy contains the x and y (low and high) evacuation destinations. + var xy [2]evacDst + x := &xy[0] + x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) + x.k = add(unsafe.Pointer(x.b), dataOffset) + x.e = add(x.k, bucketCnt*4) + + if !h.sameSizeGrow() { + // Only calculate y pointers if we're growing bigger. + // Otherwise GC can see bad pointers. + y := &xy[1] + y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) + y.k = add(unsafe.Pointer(y.b), dataOffset) + y.e = add(y.k, bucketCnt*4) + } + + for ; b != nil; b = b.overflow(t) { + k := add(unsafe.Pointer(b), dataOffset) + e := add(k, bucketCnt*4) + for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 4), add(e, uintptr(t.elemsize)) { + top := b.tophash[i] + if isEmpty(top) { + b.tophash[i] = evacuatedEmpty + continue + } + if top < minTopHash { + throw("bad map state") + } + var useY uint8 + if !h.sameSizeGrow() { + // Compute hash to make our evacuation decision (whether we need + // to send this key/elem to bucket x or bucket y). + hash := t.hasher(k, uintptr(h.hash0)) + if hash&newbit != 0 { + useY = 1 + } + } + + b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap + dst := &xy[useY] // evacuation destination + + if dst.i == bucketCnt { + dst.b = h.newoverflow(t, dst.b) + dst.i = 0 + dst.k = add(unsafe.Pointer(dst.b), dataOffset) + dst.e = add(dst.k, bucketCnt*4) + } + dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check + + // Copy key. + if goarch.PtrSize == 4 && t.key.ptrdata != 0 && writeBarrier.enabled { + // Write with a write barrier. + *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k) + } else { + *(*uint32)(dst.k) = *(*uint32)(k) + } + + typedmemmove(t.elem, dst.e, e) + dst.i++ + // These updates might push these pointers past the end of the + // key or elem arrays. That's ok, as we have the overflow pointer + // at the end of the bucket to protect against pointing past the + // end of the bucket. + dst.k = add(dst.k, 4) + dst.e = add(dst.e, uintptr(t.elemsize)) + } + } + // Unlink the overflow buckets & clear key/elem to help GC. + if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 { + b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)) + // Preserve b.tophash because the evacuation + // state is maintained there. + ptr := add(b, dataOffset) + n := uintptr(t.bucketsize) - dataOffset + memclrHasPointers(ptr, n) + } + } + + if oldbucket == h.nevacuate { + advanceEvacuationMark(h, t, newbit) + } +} |