From 43a123c1ae6613b3efeed291fa552ecd909d3acf Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Tue, 16 Apr 2024 21:23:18 +0200 Subject: Adding upstream version 1.20.14. Signed-off-by: Daniel Baumann --- src/runtime/map_faststr.go | 485 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 485 insertions(+) create mode 100644 src/runtime/map_faststr.go (limited to 'src/runtime/map_faststr.go') diff --git a/src/runtime/map_faststr.go b/src/runtime/map_faststr.go new file mode 100644 index 0000000..006c24c --- /dev/null +++ b/src/runtime/map_faststr.go @@ -0,0 +1,485 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/abi" + "internal/goarch" + "unsafe" +) + +func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { + if raceenabled && h != nil { + callerpc := getcallerpc() + racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(&zeroVal[0]) + } + if h.flags&hashWriting != 0 { + fatal("concurrent map read and map write") + } + key := stringStructOf(&ky) + if h.B == 0 { + // One-bucket table. + b := (*bmap)(h.buckets) + if key.len < 32 { + // short key, doing lots of comparisons is ok + for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + k := (*stringStruct)(kptr) + if k.len != key.len || isEmpty(b.tophash[i]) { + if b.tophash[i] == emptyRest { + break + } + continue + } + if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)) + } + } + return unsafe.Pointer(&zeroVal[0]) + } + // long key, try not to do more comparisons than necessary + keymaybe := uintptr(bucketCnt) + for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + k := (*stringStruct)(kptr) + if k.len != key.len || isEmpty(b.tophash[i]) { + if b.tophash[i] == emptyRest { + break + } + continue + } + if k.str == key.str { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)) + } + // check first 4 bytes + if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { + continue + } + // check last 4 bytes + if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { + continue + } + if keymaybe != bucketCnt { + // Two keys are potential matches. Use hash to distinguish them. + goto dohash + } + keymaybe = i + } + if keymaybe != bucketCnt { + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize)) + if memequal(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.elemsize)) + } + } + return unsafe.Pointer(&zeroVal[0]) + } +dohash: + hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) + m := bucketMask(h.B) + b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + if !h.sameSizeGrow() { + // There used to be half as many buckets; mask down one more power of two. + m >>= 1 + } + oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + top := tophash(hash) + for ; b != nil; b = b.overflow(t) { + for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + k := (*stringStruct)(kptr) + if k.len != key.len || b.tophash[i] != top { + continue + } + if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)) + } + } + } + return unsafe.Pointer(&zeroVal[0]) +} + +func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { + if raceenabled && h != nil { + callerpc := getcallerpc() + racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(&zeroVal[0]), false + } + if h.flags&hashWriting != 0 { + fatal("concurrent map read and map write") + } + key := stringStructOf(&ky) + if h.B == 0 { + // One-bucket table. + b := (*bmap)(h.buckets) + if key.len < 32 { + // short key, doing lots of comparisons is ok + for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + k := (*stringStruct)(kptr) + if k.len != key.len || isEmpty(b.tophash[i]) { + if b.tophash[i] == emptyRest { + break + } + continue + } + if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true + } + } + return unsafe.Pointer(&zeroVal[0]), false + } + // long key, try not to do more comparisons than necessary + keymaybe := uintptr(bucketCnt) + for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + k := (*stringStruct)(kptr) + if k.len != key.len || isEmpty(b.tophash[i]) { + if b.tophash[i] == emptyRest { + break + } + continue + } + if k.str == key.str { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true + } + // check first 4 bytes + if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { + continue + } + // check last 4 bytes + if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { + continue + } + if keymaybe != bucketCnt { + // Two keys are potential matches. Use hash to distinguish them. + goto dohash + } + keymaybe = i + } + if keymaybe != bucketCnt { + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize)) + if memequal(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.elemsize)), true + } + } + return unsafe.Pointer(&zeroVal[0]), false + } +dohash: + hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) + m := bucketMask(h.B) + b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + if !h.sameSizeGrow() { + // There used to be half as many buckets; mask down one more power of two. + m >>= 1 + } + oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + top := tophash(hash) + for ; b != nil; b = b.overflow(t) { + for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + k := (*stringStruct)(kptr) + if k.len != key.len || b.tophash[i] != top { + continue + } + if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true + } + } + } + return unsafe.Pointer(&zeroVal[0]), false +} + +func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer { + if h == nil { + panic(plainError("assignment to entry in nil map")) + } + if raceenabled { + callerpc := getcallerpc() + racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr)) + } + if h.flags&hashWriting != 0 { + fatal("concurrent map writes") + } + key := stringStructOf(&s) + hash := t.hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0)) + + // Set hashWriting after calling t.hasher for consistency with mapassign. + h.flags ^= hashWriting + + if h.buckets == nil { + h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) + } + +again: + bucket := hash & bucketMask(h.B) + if h.growing() { + growWork_faststr(t, h, bucket) + } + b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) + top := tophash(hash) + + var insertb *bmap + var inserti uintptr + var insertk unsafe.Pointer + +bucketloop: + for { + for i := uintptr(0); i < bucketCnt; i++ { + if b.tophash[i] != top { + if isEmpty(b.tophash[i]) && insertb == nil { + insertb = b + inserti = i + } + if b.tophash[i] == emptyRest { + break bucketloop + } + continue + } + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize)) + if k.len != key.len { + continue + } + if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) { + continue + } + // already have a mapping for key. Update it. + inserti = i + insertb = b + // Overwrite existing key, so it can be garbage collected. + // The size is already guaranteed to be set correctly. + k.str = key.str + goto done + } + ovf := b.overflow(t) + if ovf == nil { + break + } + b = ovf + } + + // Did not find mapping for key. Allocate new cell & add entry. + + // If we hit the max load factor or we have too many overflow buckets, + // and we're not already in the middle of growing, start growing. + if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { + hashGrow(t, h) + goto again // Growing the table invalidates everything, so try again + } + + if insertb == nil { + // The current bucket and all the overflow buckets connected to it are full, allocate a new one. + insertb = h.newoverflow(t, b) + inserti = 0 // not necessary, but avoids needlessly spilling inserti + } + insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks + + insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize) + // store new key at insert position + *((*stringStruct)(insertk)) = *key + h.count++ + +done: + elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*goarch.PtrSize+inserti*uintptr(t.elemsize)) + if h.flags&hashWriting == 0 { + fatal("concurrent map writes") + } + h.flags &^= hashWriting + return elem +} + +func mapdelete_faststr(t *maptype, h *hmap, ky string) { + if raceenabled && h != nil { + callerpc := getcallerpc() + racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr)) + } + if h == nil || h.count == 0 { + return + } + if h.flags&hashWriting != 0 { + fatal("concurrent map writes") + } + + key := stringStructOf(&ky) + hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) + + // Set hashWriting after calling t.hasher for consistency with mapdelete + h.flags ^= hashWriting + + bucket := hash & bucketMask(h.B) + if h.growing() { + growWork_faststr(t, h, bucket) + } + b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) + bOrig := b + top := tophash(hash) +search: + for ; b != nil; b = b.overflow(t) { + for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + k := (*stringStruct)(kptr) + if k.len != key.len || b.tophash[i] != top { + continue + } + if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) { + continue + } + // Clear key's pointer. + k.str = nil + e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)) + if t.elem.ptrdata != 0 { + memclrHasPointers(e, t.elem.size) + } else { + memclrNoHeapPointers(e, t.elem.size) + } + b.tophash[i] = emptyOne + // If the bucket now ends in a bunch of emptyOne states, + // change those to emptyRest states. + if i == bucketCnt-1 { + if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { + goto notLast + } + } else { + if b.tophash[i+1] != emptyRest { + goto notLast + } + } + for { + b.tophash[i] = emptyRest + if i == 0 { + if b == bOrig { + break // beginning of initial bucket, we're done. + } + // Find previous bucket, continue at its last entry. + c := b + for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { + } + i = bucketCnt - 1 + } else { + i-- + } + if b.tophash[i] != emptyOne { + break + } + } + notLast: + h.count-- + // Reset the hash seed to make it more difficult for attackers to + // repeatedly trigger hash collisions. See issue 25237. + if h.count == 0 { + h.hash0 = fastrand() + } + break search + } + } + + if h.flags&hashWriting == 0 { + fatal("concurrent map writes") + } + h.flags &^= hashWriting +} + +func growWork_faststr(t *maptype, h *hmap, bucket uintptr) { + // make sure we evacuate the oldbucket corresponding + // to the bucket we're about to use + evacuate_faststr(t, h, bucket&h.oldbucketmask()) + + // evacuate one more oldbucket to make progress on growing + if h.growing() { + evacuate_faststr(t, h, h.nevacuate) + } +} + +func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { + b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) + newbit := h.noldbuckets() + if !evacuated(b) { + // TODO: reuse overflow buckets instead of using new ones, if there + // is no iterator using the old buckets. (If !oldIterator.) + + // xy contains the x and y (low and high) evacuation destinations. + var xy [2]evacDst + x := &xy[0] + x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) + x.k = add(unsafe.Pointer(x.b), dataOffset) + x.e = add(x.k, bucketCnt*2*goarch.PtrSize) + + if !h.sameSizeGrow() { + // Only calculate y pointers if we're growing bigger. + // Otherwise GC can see bad pointers. + y := &xy[1] + y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) + y.k = add(unsafe.Pointer(y.b), dataOffset) + y.e = add(y.k, bucketCnt*2*goarch.PtrSize) + } + + for ; b != nil; b = b.overflow(t) { + k := add(unsafe.Pointer(b), dataOffset) + e := add(k, bucketCnt*2*goarch.PtrSize) + for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.elemsize)) { + top := b.tophash[i] + if isEmpty(top) { + b.tophash[i] = evacuatedEmpty + continue + } + if top < minTopHash { + throw("bad map state") + } + var useY uint8 + if !h.sameSizeGrow() { + // Compute hash to make our evacuation decision (whether we need + // to send this key/elem to bucket x or bucket y). + hash := t.hasher(k, uintptr(h.hash0)) + if hash&newbit != 0 { + useY = 1 + } + } + + b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap + dst := &xy[useY] // evacuation destination + + if dst.i == bucketCnt { + dst.b = h.newoverflow(t, dst.b) + dst.i = 0 + dst.k = add(unsafe.Pointer(dst.b), dataOffset) + dst.e = add(dst.k, bucketCnt*2*goarch.PtrSize) + } + dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check + + // Copy key. + *(*string)(dst.k) = *(*string)(k) + + typedmemmove(t.elem, dst.e, e) + dst.i++ + // These updates might push these pointers past the end of the + // key or elem arrays. That's ok, as we have the overflow pointer + // at the end of the bucket to protect against pointing past the + // end of the bucket. + dst.k = add(dst.k, 2*goarch.PtrSize) + dst.e = add(dst.e, uintptr(t.elemsize)) + } + } + // Unlink the overflow buckets & clear key/elem to help GC. + if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 { + b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)) + // Preserve b.tophash because the evacuation + // state is maintained there. + ptr := add(b, dataOffset) + n := uintptr(t.bucketsize) - dataOffset + memclrHasPointers(ptr, n) + } + } + + if oldbucket == h.nevacuate { + advanceEvacuationMark(h, t, newbit) + } +} -- cgit v1.2.3