diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-16 19:23:18 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-16 19:23:18 +0000 |
commit | 43a123c1ae6613b3efeed291fa552ecd909d3acf (patch) | |
tree | fd92518b7024bc74031f78a1cf9e454b65e73665 /src/crypto/internal/boring/bcache | |
parent | Initial commit. (diff) | |
download | golang-1.20-upstream.tar.xz golang-1.20-upstream.zip |
Adding upstream version 1.20.14.upstream/1.20.14upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/crypto/internal/boring/bcache')
-rw-r--r-- | src/crypto/internal/boring/bcache/cache.go | 140 | ||||
-rw-r--r-- | src/crypto/internal/boring/bcache/cache_test.go | 122 | ||||
-rw-r--r-- | src/crypto/internal/boring/bcache/stub.s | 6 |
3 files changed, 268 insertions, 0 deletions
diff --git a/src/crypto/internal/boring/bcache/cache.go b/src/crypto/internal/boring/bcache/cache.go new file mode 100644 index 0000000..7934d03 --- /dev/null +++ b/src/crypto/internal/boring/bcache/cache.go @@ -0,0 +1,140 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcache implements a GC-friendly cache (see [Cache]) for BoringCrypto. +package bcache + +import ( + "sync/atomic" + "unsafe" +) + +// A Cache is a GC-friendly concurrent map from unsafe.Pointer to +// unsafe.Pointer. It is meant to be used for maintaining shadow +// BoringCrypto state associated with certain allocated structs, in +// particular public and private RSA and ECDSA keys. +// +// The cache is GC-friendly in the sense that the keys do not +// indefinitely prevent the garbage collector from collecting them. +// Instead, at the start of each GC, the cache is cleared entirely. That +// is, the cache is lossy, and the loss happens at the start of each GC. +// This means that clients need to be able to cope with cache entries +// disappearing, but it also means that clients don't need to worry about +// cache entries keeping the keys from being collected. +type Cache[K, V any] struct { + // The runtime atomically stores nil to ptable at the start of each GC. + ptable atomic.Pointer[cacheTable[K, V]] +} + +type cacheTable[K, V any] [cacheSize]atomic.Pointer[cacheEntry[K, V]] + +// A cacheEntry is a single entry in the linked list for a given hash table entry. +type cacheEntry[K, V any] struct { + k *K // immutable once created + v atomic.Pointer[V] // read and written atomically to allow updates + next *cacheEntry[K, V] // immutable once linked into table +} + +func registerCache(unsafe.Pointer) // provided by runtime + +// Register registers the cache with the runtime, +// so that c.ptable can be cleared at the start of each GC. +// Register must be called during package initialization. +func (c *Cache[K, V]) Register() { + registerCache(unsafe.Pointer(&c.ptable)) +} + +// cacheSize is the number of entries in the hash table. +// The hash is the pointer value mod cacheSize, a prime. +// Collisions are resolved by maintaining a linked list in each hash slot. +const cacheSize = 1021 + +// table returns a pointer to the current cache hash table, +// coping with the possibility of the GC clearing it out from under us. +func (c *Cache[K, V]) table() *cacheTable[K, V] { + for { + p := c.ptable.Load() + if p == nil { + p = new(cacheTable[K, V]) + if !c.ptable.CompareAndSwap(nil, p) { + continue + } + } + return p + } +} + +// Clear clears the cache. +// The runtime does this automatically at each garbage collection; +// this method is exposed only for testing. +func (c *Cache[K, V]) Clear() { + // The runtime does this at the start of every garbage collection + // (itself, not by calling this function). + c.ptable.Store(nil) +} + +// Get returns the cached value associated with v, +// which is either the value v corresponding to the most recent call to Put(k, v) +// or nil if that cache entry has been dropped. +func (c *Cache[K, V]) Get(k *K) *V { + head := &c.table()[uintptr(unsafe.Pointer(k))%cacheSize] + e := head.Load() + for ; e != nil; e = e.next { + if e.k == k { + return e.v.Load() + } + } + return nil +} + +// Put sets the cached value associated with k to v. +func (c *Cache[K, V]) Put(k *K, v *V) { + head := &c.table()[uintptr(unsafe.Pointer(k))%cacheSize] + + // Strategy is to walk the linked list at head, + // same as in Get, to look for existing entry. + // If we find one, we update v atomically in place. + // If not, then we race to replace the start = *head + // we observed with a new k, v entry. + // If we win that race, we're done. + // Otherwise, we try the whole thing again, + // with two optimizations: + // + // 1. We track in noK the start of the section of + // the list that we've confirmed has no entry for k. + // The next time down the list, we can stop at noK, + // because new entries are inserted at the front of the list. + // This guarantees we never traverse an entry + // multiple times. + // + // 2. We only allocate the entry to be added once, + // saving it in add for the next attempt. + var add, noK *cacheEntry[K, V] + n := 0 + for { + e := head.Load() + start := e + for ; e != nil && e != noK; e = e.next { + if e.k == k { + e.v.Store(v) + return + } + n++ + } + if add == nil { + add = &cacheEntry[K, V]{k: k} + add.v.Store(v) + } + add.next = start + if n >= 1000 { + // If an individual list gets too long, which shouldn't happen, + // throw it away to avoid quadratic lookup behavior. + add.next = nil + } + if head.CompareAndSwap(start, add) { + return + } + noK = start + } +} diff --git a/src/crypto/internal/boring/bcache/cache_test.go b/src/crypto/internal/boring/bcache/cache_test.go new file mode 100644 index 0000000..19458a1 --- /dev/null +++ b/src/crypto/internal/boring/bcache/cache_test.go @@ -0,0 +1,122 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bcache + +import ( + "fmt" + "runtime" + "sync" + "sync/atomic" + "testing" +) + +var registeredCache Cache[int, int32] + +func init() { + registeredCache.Register() +} + +var seq atomic.Uint32 + +func next[T int | int32]() *T { + x := new(T) + *x = T(seq.Add(1)) + return x +} + +func str[T int | int32](x *T) string { + if x == nil { + return "nil" + } + return fmt.Sprint(*x) +} + +func TestCache(t *testing.T) { + // Use unregistered cache for functionality tests, + // to keep the runtime from clearing behind our backs. + c := new(Cache[int, int32]) + + // Create many entries. + m := make(map[*int]*int32) + for i := 0; i < 10000; i++ { + k := next[int]() + v := next[int32]() + m[k] = v + c.Put(k, v) + } + + // Overwrite a random 20% of those. + n := 0 + for k := range m { + v := next[int32]() + m[k] = v + c.Put(k, v) + if n++; n >= 2000 { + break + } + } + + // Check results. + for k, v := range m { + if cv := c.Get(k); cv != v { + t.Fatalf("c.Get(%v) = %v, want %v", str(k), str(cv), str(v)) + } + } + + c.Clear() + for k := range m { + if cv := c.Get(k); cv != nil { + t.Fatalf("after GC, c.Get(%v) = %v, want nil", str(k), str(cv)) + } + } + + // Check that registered cache is cleared at GC. + c = ®isteredCache + for k, v := range m { + c.Put(k, v) + } + runtime.GC() + for k := range m { + if cv := c.Get(k); cv != nil { + t.Fatalf("after Clear, c.Get(%v) = %v, want nil", str(k), str(cv)) + } + } + + // Check that cache works for concurrent access. + // Lists are discarded if they reach 1000 entries, + // and there are cacheSize list heads, so we should be + // able to do 100 * cacheSize entries with no problem at all. + c = new(Cache[int, int32]) + var barrier, wg sync.WaitGroup + const N = 100 + barrier.Add(N) + wg.Add(N) + var lost int32 + for i := 0; i < N; i++ { + go func() { + defer wg.Done() + + m := make(map[*int]*int32) + for j := 0; j < cacheSize; j++ { + k, v := next[int](), next[int32]() + m[k] = v + c.Put(k, v) + } + barrier.Done() + barrier.Wait() + + for k, v := range m { + if cv := c.Get(k); cv != v { + t.Errorf("c.Get(%v) = %v, want %v", str(k), str(cv), str(v)) + atomic.AddInt32(&lost, +1) + } + } + }() + } + wg.Wait() + if lost != 0 { + t.Errorf("lost %d entries", lost) + } +} diff --git a/src/crypto/internal/boring/bcache/stub.s b/src/crypto/internal/boring/bcache/stub.s new file mode 100644 index 0000000..59f2dee --- /dev/null +++ b/src/crypto/internal/boring/bcache/stub.s @@ -0,0 +1,6 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is here to silence an error about registerCache not having a body. +// (The body is provided by package runtime.) |