summaryrefslogtreecommitdiffstats
path: root/src/sync
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:19:13 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:19:13 +0000
commitccd992355df7192993c666236047820244914598 (patch)
treef00fea65147227b7743083c6148396f74cd66935 /src/sync
parentInitial commit. (diff)
downloadgolang-1.21-ccd992355df7192993c666236047820244914598.tar.xz
golang-1.21-ccd992355df7192993c666236047820244914598.zip
Adding upstream version 1.21.8.upstream/1.21.8
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/sync')
-rw-r--r--src/sync/atomic/asm.s85
-rw-r--r--src/sync/atomic/atomic_test.go2534
-rw-r--r--src/sync/atomic/doc.go192
-rw-r--r--src/sync/atomic/example_test.go76
-rw-r--r--src/sync/atomic/race.s8
-rw-r--r--src/sync/atomic/type.go200
-rw-r--r--src/sync/atomic/value.go194
-rw-r--r--src/sync/atomic/value_test.go274
-rw-r--r--src/sync/cond.go117
-rw-r--r--src/sync/cond_test.go311
-rw-r--r--src/sync/example_pool_test.go45
-rw-r--r--src/sync/example_test.go59
-rw-r--r--src/sync/export_test.go57
-rw-r--r--src/sync/map.go515
-rw-r--r--src/sync/map_bench_test.go535
-rw-r--r--src/sync/map_reference_test.go271
-rw-r--r--src/sync/map_test.go282
-rw-r--r--src/sync/mutex.go259
-rw-r--r--src/sync/mutex_test.go335
-rw-r--r--src/sync/once.go76
-rw-r--r--src/sync/once_test.go68
-rw-r--r--src/sync/oncefunc.go97
-rw-r--r--src/sync/oncefunc_test.go265
-rw-r--r--src/sync/pool.go297
-rw-r--r--src/sync/pool_test.go373
-rw-r--r--src/sync/poolqueue.go309
-rw-r--r--src/sync/runtime.go63
-rw-r--r--src/sync/runtime2.go19
-rw-r--r--src/sync/runtime2_lockrank.go22
-rw-r--r--src/sync/runtime_sema_test.go75
-rw-r--r--src/sync/rwmutex.go244
-rw-r--r--src/sync/rwmutex_test.go245
-rw-r--r--src/sync/waitgroup.go127
-rw-r--r--src/sync/waitgroup_test.go175
34 files changed, 8804 insertions, 0 deletions
diff --git a/src/sync/atomic/asm.s b/src/sync/atomic/asm.s
new file mode 100644
index 0000000..2022304
--- /dev/null
+++ b/src/sync/atomic/asm.s
@@ -0,0 +1,85 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !race
+
+#include "textflag.h"
+
+TEXT ·SwapInt32(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Xchg(SB)
+
+TEXT ·SwapUint32(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Xchg(SB)
+
+TEXT ·SwapInt64(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Xchg64(SB)
+
+TEXT ·SwapUint64(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Xchg64(SB)
+
+TEXT ·SwapUintptr(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Xchguintptr(SB)
+
+TEXT ·CompareAndSwapInt32(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Cas(SB)
+
+TEXT ·CompareAndSwapUint32(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Cas(SB)
+
+TEXT ·CompareAndSwapUintptr(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Casuintptr(SB)
+
+TEXT ·CompareAndSwapInt64(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Cas64(SB)
+
+TEXT ·CompareAndSwapUint64(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Cas64(SB)
+
+TEXT ·AddInt32(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Xadd(SB)
+
+TEXT ·AddUint32(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Xadd(SB)
+
+TEXT ·AddUintptr(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Xadduintptr(SB)
+
+TEXT ·AddInt64(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Xadd64(SB)
+
+TEXT ·AddUint64(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Xadd64(SB)
+
+TEXT ·LoadInt32(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Load(SB)
+
+TEXT ·LoadUint32(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Load(SB)
+
+TEXT ·LoadInt64(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Load64(SB)
+
+TEXT ·LoadUint64(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Load64(SB)
+
+TEXT ·LoadUintptr(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Loaduintptr(SB)
+
+TEXT ·LoadPointer(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Loadp(SB)
+
+TEXT ·StoreInt32(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Store(SB)
+
+TEXT ·StoreUint32(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Store(SB)
+
+TEXT ·StoreInt64(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Store64(SB)
+
+TEXT ·StoreUint64(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Store64(SB)
+
+TEXT ·StoreUintptr(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Storeuintptr(SB)
diff --git a/src/sync/atomic/atomic_test.go b/src/sync/atomic/atomic_test.go
new file mode 100644
index 0000000..c3604ef
--- /dev/null
+++ b/src/sync/atomic/atomic_test.go
@@ -0,0 +1,2534 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic_test
+
+import (
+ "fmt"
+ "reflect"
+ "runtime"
+ "runtime/debug"
+ "strings"
+ . "sync/atomic"
+ "testing"
+ "unsafe"
+)
+
+// Tests of correct behavior, without contention.
+// (Does the function work as advertised?)
+//
+// Test that the Add functions add correctly.
+// Test that the CompareAndSwap functions actually
+// do the comparison and the swap correctly.
+//
+// The loop over power-of-two values is meant to
+// ensure that the operations apply to the full word size.
+// The struct fields x.before and x.after check that the
+// operations do not extend past the full word size.
+
+const (
+ magic32 = 0xdedbeef
+ magic64 = 0xdeddeadbeefbeef
+)
+
+func TestSwapInt32(t *testing.T) {
+ var x struct {
+ before int32
+ i int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j int32
+ for delta := int32(1); delta+delta > delta; delta += delta {
+ k := SwapInt32(&x.i, delta)
+ if x.i != delta || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ j = delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestSwapInt32Method(t *testing.T) {
+ var x struct {
+ before int32
+ i Int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j int32
+ for delta := int32(1); delta+delta > delta; delta += delta {
+ k := x.i.Swap(delta)
+ if x.i.Load() != delta || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
+ }
+ j = delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestSwapUint32(t *testing.T) {
+ var x struct {
+ before uint32
+ i uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j uint32
+ for delta := uint32(1); delta+delta > delta; delta += delta {
+ k := SwapUint32(&x.i, delta)
+ if x.i != delta || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ j = delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestSwapUint32Method(t *testing.T) {
+ var x struct {
+ before uint32
+ i Uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j uint32
+ for delta := uint32(1); delta+delta > delta; delta += delta {
+ k := x.i.Swap(delta)
+ if x.i.Load() != delta || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
+ }
+ j = delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestSwapInt64(t *testing.T) {
+ var x struct {
+ before int64
+ i int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ var j int64
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ k := SwapInt64(&x.i, delta)
+ if x.i != delta || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ j = delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestSwapInt64Method(t *testing.T) {
+ var x struct {
+ before int64
+ i Int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ var j int64
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ k := x.i.Swap(delta)
+ if x.i.Load() != delta || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
+ }
+ j = delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestSwapUint64(t *testing.T) {
+ var x struct {
+ before uint64
+ i uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ var j uint64
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ k := SwapUint64(&x.i, delta)
+ if x.i != delta || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ j = delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestSwapUint64Method(t *testing.T) {
+ var x struct {
+ before uint64
+ i Uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ var j uint64
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ k := x.i.Swap(delta)
+ if x.i.Load() != delta || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
+ }
+ j = delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestSwapUintptr(t *testing.T) {
+ var x struct {
+ before uintptr
+ i uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ var j uintptr
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ k := SwapUintptr(&x.i, delta)
+ if x.i != delta || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ j = delta
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestSwapUintptrMethod(t *testing.T) {
+ var x struct {
+ before uintptr
+ i Uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ var j uintptr
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ k := x.i.Swap(delta)
+ if x.i.Load() != delta || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
+ }
+ j = delta
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+var global [1024]byte
+
+func testPointers() []unsafe.Pointer {
+ var pointers []unsafe.Pointer
+ // globals
+ for i := 0; i < 10; i++ {
+ pointers = append(pointers, unsafe.Pointer(&global[1<<i-1]))
+ }
+ // heap
+ pointers = append(pointers, unsafe.Pointer(new(byte)))
+ // nil
+ pointers = append(pointers, nil)
+ return pointers
+}
+
+func TestSwapPointer(t *testing.T) {
+ var x struct {
+ before uintptr
+ i unsafe.Pointer
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ var j unsafe.Pointer
+
+ for _, p := range testPointers() {
+ k := SwapPointer(&x.i, p)
+ if x.i != p || k != j {
+ t.Fatalf("p=%p i=%p j=%p k=%p", p, x.i, j, k)
+ }
+ j = p
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestSwapPointerMethod(t *testing.T) {
+ var x struct {
+ before uintptr
+ i Pointer[byte]
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ var j *byte
+ for _, p := range testPointers() {
+ p := (*byte)(p)
+ k := x.i.Swap(p)
+ if x.i.Load() != p || k != j {
+ t.Fatalf("p=%p i=%p j=%p k=%p", p, x.i.Load(), j, k)
+ }
+ j = p
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestAddInt32(t *testing.T) {
+ var x struct {
+ before int32
+ i int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j int32
+ for delta := int32(1); delta+delta > delta; delta += delta {
+ k := AddInt32(&x.i, delta)
+ j += delta
+ if x.i != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestAddInt32Method(t *testing.T) {
+ var x struct {
+ before int32
+ i Int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j int32
+ for delta := int32(1); delta+delta > delta; delta += delta {
+ k := x.i.Add(delta)
+ j += delta
+ if x.i.Load() != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestAddUint32(t *testing.T) {
+ var x struct {
+ before uint32
+ i uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j uint32
+ for delta := uint32(1); delta+delta > delta; delta += delta {
+ k := AddUint32(&x.i, delta)
+ j += delta
+ if x.i != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestAddUint32Method(t *testing.T) {
+ var x struct {
+ before uint32
+ i Uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j uint32
+ for delta := uint32(1); delta+delta > delta; delta += delta {
+ k := x.i.Add(delta)
+ j += delta
+ if x.i.Load() != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestAddInt64(t *testing.T) {
+ var x struct {
+ before int64
+ i int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ var j int64
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ k := AddInt64(&x.i, delta)
+ j += delta
+ if x.i != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestAddInt64Method(t *testing.T) {
+ var x struct {
+ before int64
+ i Int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ var j int64
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ k := x.i.Add(delta)
+ j += delta
+ if x.i.Load() != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestAddUint64(t *testing.T) {
+ var x struct {
+ before uint64
+ i uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ var j uint64
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ k := AddUint64(&x.i, delta)
+ j += delta
+ if x.i != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestAddUint64Method(t *testing.T) {
+ var x struct {
+ before uint64
+ i Uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ var j uint64
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ k := x.i.Add(delta)
+ j += delta
+ if x.i.Load() != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestAddUintptr(t *testing.T) {
+ var x struct {
+ before uintptr
+ i uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ var j uintptr
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ k := AddUintptr(&x.i, delta)
+ j += delta
+ if x.i != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestAddUintptrMethod(t *testing.T) {
+ var x struct {
+ before uintptr
+ i Uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ var j uintptr
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ k := x.i.Add(delta)
+ j += delta
+ if x.i.Load() != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestCompareAndSwapInt32(t *testing.T) {
+ var x struct {
+ before int32
+ i int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ for val := int32(1); val+val > val; val += val {
+ x.i = val
+ if !CompareAndSwapInt32(&x.i, val, val+1) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ x.i = val + 1
+ if CompareAndSwapInt32(&x.i, val, val+2) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestCompareAndSwapInt32Method(t *testing.T) {
+ var x struct {
+ before int32
+ i Int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ for val := int32(1); val+val > val; val += val {
+ x.i.Store(val)
+ if !x.i.CompareAndSwap(val, val+1) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i.Load() != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
+ }
+ x.i.Store(val + 1)
+ if x.i.CompareAndSwap(val, val+2) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i.Load() != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestCompareAndSwapUint32(t *testing.T) {
+ var x struct {
+ before uint32
+ i uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ for val := uint32(1); val+val > val; val += val {
+ x.i = val
+ if !CompareAndSwapUint32(&x.i, val, val+1) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ x.i = val + 1
+ if CompareAndSwapUint32(&x.i, val, val+2) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestCompareAndSwapUint32Method(t *testing.T) {
+ var x struct {
+ before uint32
+ i Uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ for val := uint32(1); val+val > val; val += val {
+ x.i.Store(val)
+ if !x.i.CompareAndSwap(val, val+1) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i.Load() != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
+ }
+ x.i.Store(val + 1)
+ if x.i.CompareAndSwap(val, val+2) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i.Load() != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestCompareAndSwapInt64(t *testing.T) {
+ var x struct {
+ before int64
+ i int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ for val := int64(1); val+val > val; val += val {
+ x.i = val
+ if !CompareAndSwapInt64(&x.i, val, val+1) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ x.i = val + 1
+ if CompareAndSwapInt64(&x.i, val, val+2) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestCompareAndSwapInt64Method(t *testing.T) {
+ var x struct {
+ before int64
+ i Int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ for val := int64(1); val+val > val; val += val {
+ x.i.Store(val)
+ if !x.i.CompareAndSwap(val, val+1) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i.Load() != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
+ }
+ x.i.Store(val + 1)
+ if x.i.CompareAndSwap(val, val+2) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i.Load() != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func testCompareAndSwapUint64(t *testing.T, cas func(*uint64, uint64, uint64) bool) {
+ var x struct {
+ before uint64
+ i uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ for val := uint64(1); val+val > val; val += val {
+ x.i = val
+ if !cas(&x.i, val, val+1) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ x.i = val + 1
+ if cas(&x.i, val, val+2) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestCompareAndSwapUint64(t *testing.T) {
+ testCompareAndSwapUint64(t, CompareAndSwapUint64)
+}
+
+func TestCompareAndSwapUint64Method(t *testing.T) {
+ var x struct {
+ before uint64
+ i Uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ for val := uint64(1); val+val > val; val += val {
+ x.i.Store(val)
+ if !x.i.CompareAndSwap(val, val+1) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i.Load() != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
+ }
+ x.i.Store(val + 1)
+ if x.i.CompareAndSwap(val, val+2) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i.Load() != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestCompareAndSwapUintptr(t *testing.T) {
+ var x struct {
+ before uintptr
+ i uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for val := uintptr(1); val+val > val; val += val {
+ x.i = val
+ if !CompareAndSwapUintptr(&x.i, val, val+1) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ x.i = val + 1
+ if CompareAndSwapUintptr(&x.i, val, val+2) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestCompareAndSwapUintptrMethod(t *testing.T) {
+ var x struct {
+ before uintptr
+ i Uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for val := uintptr(1); val+val > val; val += val {
+ x.i.Store(val)
+ if !x.i.CompareAndSwap(val, val+1) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i.Load() != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
+ }
+ x.i.Store(val + 1)
+ if x.i.CompareAndSwap(val, val+2) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i.Load() != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uintptr(magicptr), uintptr(magicptr))
+ }
+}
+
+func TestCompareAndSwapPointer(t *testing.T) {
+ var x struct {
+ before uintptr
+ i unsafe.Pointer
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ q := unsafe.Pointer(new(byte))
+ for _, p := range testPointers() {
+ x.i = p
+ if !CompareAndSwapPointer(&x.i, p, q) {
+ t.Fatalf("should have swapped %p %p", p, q)
+ }
+ if x.i != q {
+ t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i, q)
+ }
+ if CompareAndSwapPointer(&x.i, p, nil) {
+ t.Fatalf("should not have swapped %p nil", p)
+ }
+ if x.i != q {
+ t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i, q)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestCompareAndSwapPointerMethod(t *testing.T) {
+ var x struct {
+ before uintptr
+ i Pointer[byte]
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ q := new(byte)
+ for _, p := range testPointers() {
+ p := (*byte)(p)
+ x.i.Store(p)
+ if !x.i.CompareAndSwap(p, q) {
+ t.Fatalf("should have swapped %p %p", p, q)
+ }
+ if x.i.Load() != q {
+ t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i.Load(), q)
+ }
+ if x.i.CompareAndSwap(p, nil) {
+ t.Fatalf("should not have swapped %p nil", p)
+ }
+ if x.i.Load() != q {
+ t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i.Load(), q)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestLoadInt32(t *testing.T) {
+ var x struct {
+ before int32
+ i int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ for delta := int32(1); delta+delta > delta; delta += delta {
+ k := LoadInt32(&x.i)
+ if k != x.i {
+ t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
+ }
+ x.i += delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestLoadInt32Method(t *testing.T) {
+ var x struct {
+ before int32
+ i Int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ want := int32(0)
+ for delta := int32(1); delta+delta > delta; delta += delta {
+ k := x.i.Load()
+ if k != want {
+ t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want)
+ }
+ x.i.Store(k + delta)
+ want = k + delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestLoadUint32(t *testing.T) {
+ var x struct {
+ before uint32
+ i uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ for delta := uint32(1); delta+delta > delta; delta += delta {
+ k := LoadUint32(&x.i)
+ if k != x.i {
+ t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
+ }
+ x.i += delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestLoadUint32Method(t *testing.T) {
+ var x struct {
+ before uint32
+ i Uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ want := uint32(0)
+ for delta := uint32(1); delta+delta > delta; delta += delta {
+ k := x.i.Load()
+ if k != want {
+ t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want)
+ }
+ x.i.Store(k + delta)
+ want = k + delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestLoadInt64(t *testing.T) {
+ var x struct {
+ before int64
+ i int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ k := LoadInt64(&x.i)
+ if k != x.i {
+ t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
+ }
+ x.i += delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestLoadInt64Method(t *testing.T) {
+ var x struct {
+ before int64
+ i Int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ want := int64(0)
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ k := x.i.Load()
+ if k != want {
+ t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want)
+ }
+ x.i.Store(k + delta)
+ want = k + delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestLoadUint64(t *testing.T) {
+ var x struct {
+ before uint64
+ i uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ k := LoadUint64(&x.i)
+ if k != x.i {
+ t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
+ }
+ x.i += delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestLoadUint64Method(t *testing.T) {
+ var x struct {
+ before uint64
+ i Uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ want := uint64(0)
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ k := x.i.Load()
+ if k != want {
+ t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want)
+ }
+ x.i.Store(k + delta)
+ want = k + delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestLoadUintptr(t *testing.T) {
+ var x struct {
+ before uintptr
+ i uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ k := LoadUintptr(&x.i)
+ if k != x.i {
+ t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
+ }
+ x.i += delta
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestLoadUintptrMethod(t *testing.T) {
+ var x struct {
+ before uintptr
+ i Uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ want := uintptr(0)
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ k := x.i.Load()
+ if k != want {
+ t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want)
+ }
+ x.i.Store(k + delta)
+ want = k + delta
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestLoadPointer(t *testing.T) {
+ var x struct {
+ before uintptr
+ i unsafe.Pointer
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for _, p := range testPointers() {
+ x.i = p
+ k := LoadPointer(&x.i)
+ if k != p {
+ t.Fatalf("p=%x k=%x", p, k)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestLoadPointerMethod(t *testing.T) {
+ var x struct {
+ before uintptr
+ i Pointer[byte]
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for _, p := range testPointers() {
+ p := (*byte)(p)
+ x.i.Store(p)
+ k := x.i.Load()
+ if k != p {
+ t.Fatalf("p=%x k=%x", p, k)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestStoreInt32(t *testing.T) {
+ var x struct {
+ before int32
+ i int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ v := int32(0)
+ for delta := int32(1); delta+delta > delta; delta += delta {
+ StoreInt32(&x.i, v)
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v += delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestStoreInt32Method(t *testing.T) {
+ var x struct {
+ before int32
+ i Int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ v := int32(0)
+ for delta := int32(1); delta+delta > delta; delta += delta {
+ x.i.Store(v)
+ if x.i.Load() != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v)
+ }
+ v += delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestStoreUint32(t *testing.T) {
+ var x struct {
+ before uint32
+ i uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ v := uint32(0)
+ for delta := uint32(1); delta+delta > delta; delta += delta {
+ StoreUint32(&x.i, v)
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v += delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestStoreUint32Method(t *testing.T) {
+ var x struct {
+ before uint32
+ i Uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ v := uint32(0)
+ for delta := uint32(1); delta+delta > delta; delta += delta {
+ x.i.Store(v)
+ if x.i.Load() != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v)
+ }
+ v += delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestStoreInt64(t *testing.T) {
+ var x struct {
+ before int64
+ i int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ v := int64(0)
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ StoreInt64(&x.i, v)
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v += delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestStoreInt64Method(t *testing.T) {
+ var x struct {
+ before int64
+ i Int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ v := int64(0)
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ x.i.Store(v)
+ if x.i.Load() != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v)
+ }
+ v += delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestStoreUint64(t *testing.T) {
+ var x struct {
+ before uint64
+ i uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ v := uint64(0)
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ StoreUint64(&x.i, v)
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v += delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestStoreUint64Method(t *testing.T) {
+ var x struct {
+ before uint64
+ i Uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ v := uint64(0)
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ x.i.Store(v)
+ if x.i.Load() != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v)
+ }
+ v += delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestStoreUintptr(t *testing.T) {
+ var x struct {
+ before uintptr
+ i uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ v := uintptr(0)
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ StoreUintptr(&x.i, v)
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v += delta
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestStoreUintptrMethod(t *testing.T) {
+ var x struct {
+ before uintptr
+ i Uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ v := uintptr(0)
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ x.i.Store(v)
+ if x.i.Load() != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v)
+ }
+ v += delta
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestStorePointer(t *testing.T) {
+ var x struct {
+ before uintptr
+ i unsafe.Pointer
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for _, p := range testPointers() {
+ StorePointer(&x.i, p)
+ if x.i != p {
+ t.Fatalf("x.i=%p p=%p", x.i, p)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestStorePointerMethod(t *testing.T) {
+ var x struct {
+ before uintptr
+ i Pointer[byte]
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for _, p := range testPointers() {
+ p := (*byte)(p)
+ x.i.Store(p)
+ if x.i.Load() != p {
+ t.Fatalf("x.i=%p p=%p", x.i.Load(), p)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+// Tests of correct behavior, with contention.
+// (Is the function atomic?)
+//
+// For each function, we write a "hammer" function that repeatedly
+// uses the atomic operation to add 1 to a value. After running
+// multiple hammers in parallel, check that we end with the correct
+// total.
+// Swap can't add 1, so it uses a different scheme.
+// The functions repeatedly generate a pseudo-random number such that
+// low bits are equal to high bits, swap, check that the old value
+// has low and high bits equal.
+
+var hammer32 = map[string]func(*uint32, int){
+ "SwapInt32": hammerSwapInt32,
+ "SwapUint32": hammerSwapUint32,
+ "SwapUintptr": hammerSwapUintptr32,
+ "AddInt32": hammerAddInt32,
+ "AddUint32": hammerAddUint32,
+ "AddUintptr": hammerAddUintptr32,
+ "CompareAndSwapInt32": hammerCompareAndSwapInt32,
+ "CompareAndSwapUint32": hammerCompareAndSwapUint32,
+ "CompareAndSwapUintptr": hammerCompareAndSwapUintptr32,
+
+ "SwapInt32Method": hammerSwapInt32Method,
+ "SwapUint32Method": hammerSwapUint32Method,
+ "SwapUintptrMethod": hammerSwapUintptr32Method,
+ "AddInt32Method": hammerAddInt32Method,
+ "AddUint32Method": hammerAddUint32Method,
+ "AddUintptrMethod": hammerAddUintptr32Method,
+ "CompareAndSwapInt32Method": hammerCompareAndSwapInt32Method,
+ "CompareAndSwapUint32Method": hammerCompareAndSwapUint32Method,
+ "CompareAndSwapUintptrMethod": hammerCompareAndSwapUintptr32Method,
+}
+
+func init() {
+ var v uint64 = 1 << 50
+ if uintptr(v) != 0 {
+ // 64-bit system; clear uintptr tests
+ delete(hammer32, "SwapUintptr")
+ delete(hammer32, "AddUintptr")
+ delete(hammer32, "CompareAndSwapUintptr")
+ delete(hammer32, "SwapUintptrMethod")
+ delete(hammer32, "AddUintptrMethod")
+ delete(hammer32, "CompareAndSwapUintptrMethod")
+ }
+}
+
+func hammerSwapInt32(uaddr *uint32, count int) {
+ addr := (*int32)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16
+ old := uint32(SwapInt32(addr, int32(new)))
+ if old>>16 != old<<16>>16 {
+ panic(fmt.Sprintf("SwapInt32 is not atomic: %v", old))
+ }
+ }
+}
+
+func hammerSwapInt32Method(uaddr *uint32, count int) {
+ addr := (*Int32)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16
+ old := uint32(addr.Swap(int32(new)))
+ if old>>16 != old<<16>>16 {
+ panic(fmt.Sprintf("SwapInt32 is not atomic: %v", old))
+ }
+ }
+}
+
+func hammerSwapUint32(addr *uint32, count int) {
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16
+ old := SwapUint32(addr, new)
+ if old>>16 != old<<16>>16 {
+ panic(fmt.Sprintf("SwapUint32 is not atomic: %v", old))
+ }
+ }
+}
+
+func hammerSwapUint32Method(uaddr *uint32, count int) {
+ addr := (*Uint32)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16
+ old := addr.Swap(new)
+ if old>>16 != old<<16>>16 {
+ panic(fmt.Sprintf("SwapUint32 is not atomic: %v", old))
+ }
+ }
+}
+
+func hammerSwapUintptr32(uaddr *uint32, count int) {
+ // only safe when uintptr is 32-bit.
+ // not called on 64-bit systems.
+ addr := (*uintptr)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uintptr(seed+i)<<16 | uintptr(seed+i)<<16>>16
+ old := SwapUintptr(addr, new)
+ if old>>16 != old<<16>>16 {
+ panic(fmt.Sprintf("SwapUintptr is not atomic: %#08x", old))
+ }
+ }
+}
+
+func hammerSwapUintptr32Method(uaddr *uint32, count int) {
+ // only safe when uintptr is 32-bit.
+ // not called on 64-bit systems.
+ addr := (*Uintptr)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uintptr(seed+i)<<16 | uintptr(seed+i)<<16>>16
+ old := addr.Swap(new)
+ if old>>16 != old<<16>>16 {
+ panic(fmt.Sprintf("Uintptr.Swap is not atomic: %#08x", old))
+ }
+ }
+}
+
+func hammerAddInt32(uaddr *uint32, count int) {
+ addr := (*int32)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ AddInt32(addr, 1)
+ }
+}
+
+func hammerAddInt32Method(uaddr *uint32, count int) {
+ addr := (*Int32)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ addr.Add(1)
+ }
+}
+
+func hammerAddUint32(addr *uint32, count int) {
+ for i := 0; i < count; i++ {
+ AddUint32(addr, 1)
+ }
+}
+
+func hammerAddUint32Method(uaddr *uint32, count int) {
+ addr := (*Uint32)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ addr.Add(1)
+ }
+}
+
+func hammerAddUintptr32(uaddr *uint32, count int) {
+ // only safe when uintptr is 32-bit.
+ // not called on 64-bit systems.
+ addr := (*uintptr)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ AddUintptr(addr, 1)
+ }
+}
+
+func hammerAddUintptr32Method(uaddr *uint32, count int) {
+ // only safe when uintptr is 32-bit.
+ // not called on 64-bit systems.
+ addr := (*Uintptr)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ addr.Add(1)
+ }
+}
+
+func hammerCompareAndSwapInt32(uaddr *uint32, count int) {
+ addr := (*int32)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ for {
+ v := LoadInt32(addr)
+ if CompareAndSwapInt32(addr, v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapInt32Method(uaddr *uint32, count int) {
+ addr := (*Int32)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ for {
+ v := addr.Load()
+ if addr.CompareAndSwap(v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapUint32(addr *uint32, count int) {
+ for i := 0; i < count; i++ {
+ for {
+ v := LoadUint32(addr)
+ if CompareAndSwapUint32(addr, v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapUint32Method(uaddr *uint32, count int) {
+ addr := (*Uint32)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ for {
+ v := addr.Load()
+ if addr.CompareAndSwap(v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapUintptr32(uaddr *uint32, count int) {
+ // only safe when uintptr is 32-bit.
+ // not called on 64-bit systems.
+ addr := (*uintptr)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ for {
+ v := LoadUintptr(addr)
+ if CompareAndSwapUintptr(addr, v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapUintptr32Method(uaddr *uint32, count int) {
+ // only safe when uintptr is 32-bit.
+ // not called on 64-bit systems.
+ addr := (*Uintptr)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ for {
+ v := addr.Load()
+ if addr.CompareAndSwap(v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func TestHammer32(t *testing.T) {
+ const p = 4
+ n := 100000
+ if testing.Short() {
+ n = 1000
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p))
+
+ for name, testf := range hammer32 {
+ c := make(chan int)
+ var val uint32
+ for i := 0; i < p; i++ {
+ go func() {
+ defer func() {
+ if err := recover(); err != nil {
+ t.Error(err.(string))
+ }
+ c <- 1
+ }()
+ testf(&val, n)
+ }()
+ }
+ for i := 0; i < p; i++ {
+ <-c
+ }
+ if !strings.HasPrefix(name, "Swap") && val != uint32(n)*p {
+ t.Fatalf("%s: val=%d want %d", name, val, n*p)
+ }
+ }
+}
+
+var hammer64 = map[string]func(*uint64, int){
+ "SwapInt64": hammerSwapInt64,
+ "SwapUint64": hammerSwapUint64,
+ "SwapUintptr": hammerSwapUintptr64,
+ "AddInt64": hammerAddInt64,
+ "AddUint64": hammerAddUint64,
+ "AddUintptr": hammerAddUintptr64,
+ "CompareAndSwapInt64": hammerCompareAndSwapInt64,
+ "CompareAndSwapUint64": hammerCompareAndSwapUint64,
+ "CompareAndSwapUintptr": hammerCompareAndSwapUintptr64,
+
+ "SwapInt64Method": hammerSwapInt64Method,
+ "SwapUint64Method": hammerSwapUint64Method,
+ "SwapUintptrMethod": hammerSwapUintptr64Method,
+ "AddInt64Method": hammerAddInt64Method,
+ "AddUint64Method": hammerAddUint64Method,
+ "AddUintptrMethod": hammerAddUintptr64Method,
+ "CompareAndSwapInt64Method": hammerCompareAndSwapInt64Method,
+ "CompareAndSwapUint64Method": hammerCompareAndSwapUint64Method,
+ "CompareAndSwapUintptrMethod": hammerCompareAndSwapUintptr64Method,
+}
+
+func init() {
+ var v uint64 = 1 << 50
+ if uintptr(v) == 0 {
+ // 32-bit system; clear uintptr tests
+ delete(hammer64, "SwapUintptr")
+ delete(hammer64, "SwapUintptrMethod")
+ delete(hammer64, "AddUintptr")
+ delete(hammer64, "AddUintptrMethod")
+ delete(hammer64, "CompareAndSwapUintptr")
+ delete(hammer64, "CompareAndSwapUintptrMethod")
+ }
+}
+
+func hammerSwapInt64(uaddr *uint64, count int) {
+ addr := (*int64)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32
+ old := uint64(SwapInt64(addr, int64(new)))
+ if old>>32 != old<<32>>32 {
+ panic(fmt.Sprintf("SwapInt64 is not atomic: %v", old))
+ }
+ }
+}
+
+func hammerSwapInt64Method(uaddr *uint64, count int) {
+ addr := (*Int64)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32
+ old := uint64(addr.Swap(int64(new)))
+ if old>>32 != old<<32>>32 {
+ panic(fmt.Sprintf("SwapInt64 is not atomic: %v", old))
+ }
+ }
+}
+
+func hammerSwapUint64(addr *uint64, count int) {
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32
+ old := SwapUint64(addr, new)
+ if old>>32 != old<<32>>32 {
+ panic(fmt.Sprintf("SwapUint64 is not atomic: %v", old))
+ }
+ }
+}
+
+func hammerSwapUint64Method(uaddr *uint64, count int) {
+ addr := (*Uint64)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32
+ old := addr.Swap(new)
+ if old>>32 != old<<32>>32 {
+ panic(fmt.Sprintf("SwapUint64 is not atomic: %v", old))
+ }
+ }
+}
+
+const arch32 = unsafe.Sizeof(uintptr(0)) == 4
+
+func hammerSwapUintptr64(uaddr *uint64, count int) {
+ // only safe when uintptr is 64-bit.
+ // not called on 32-bit systems.
+ if !arch32 {
+ addr := (*uintptr)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uintptr(seed+i)<<32 | uintptr(seed+i)<<32>>32
+ old := SwapUintptr(addr, new)
+ if old>>32 != old<<32>>32 {
+ panic(fmt.Sprintf("SwapUintptr is not atomic: %v", old))
+ }
+ }
+ }
+}
+
+func hammerSwapUintptr64Method(uaddr *uint64, count int) {
+ // only safe when uintptr is 64-bit.
+ // not called on 32-bit systems.
+ if !arch32 {
+ addr := (*Uintptr)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uintptr(seed+i)<<32 | uintptr(seed+i)<<32>>32
+ old := addr.Swap(new)
+ if old>>32 != old<<32>>32 {
+ panic(fmt.Sprintf("SwapUintptr is not atomic: %v", old))
+ }
+ }
+ }
+}
+
+func hammerAddInt64(uaddr *uint64, count int) {
+ addr := (*int64)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ AddInt64(addr, 1)
+ }
+}
+
+func hammerAddInt64Method(uaddr *uint64, count int) {
+ addr := (*Int64)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ addr.Add(1)
+ }
+}
+
+func hammerAddUint64(addr *uint64, count int) {
+ for i := 0; i < count; i++ {
+ AddUint64(addr, 1)
+ }
+}
+
+func hammerAddUint64Method(uaddr *uint64, count int) {
+ addr := (*Uint64)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ addr.Add(1)
+ }
+}
+
+func hammerAddUintptr64(uaddr *uint64, count int) {
+ // only safe when uintptr is 64-bit.
+ // not called on 32-bit systems.
+ addr := (*uintptr)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ AddUintptr(addr, 1)
+ }
+}
+
+func hammerAddUintptr64Method(uaddr *uint64, count int) {
+ // only safe when uintptr is 64-bit.
+ // not called on 32-bit systems.
+ addr := (*Uintptr)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ addr.Add(1)
+ }
+}
+
+func hammerCompareAndSwapInt64(uaddr *uint64, count int) {
+ addr := (*int64)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ for {
+ v := LoadInt64(addr)
+ if CompareAndSwapInt64(addr, v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapInt64Method(uaddr *uint64, count int) {
+ addr := (*Int64)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ for {
+ v := addr.Load()
+ if addr.CompareAndSwap(v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapUint64(addr *uint64, count int) {
+ for i := 0; i < count; i++ {
+ for {
+ v := LoadUint64(addr)
+ if CompareAndSwapUint64(addr, v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapUint64Method(uaddr *uint64, count int) {
+ addr := (*Uint64)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ for {
+ v := addr.Load()
+ if addr.CompareAndSwap(v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapUintptr64(uaddr *uint64, count int) {
+ // only safe when uintptr is 64-bit.
+ // not called on 32-bit systems.
+ addr := (*uintptr)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ for {
+ v := LoadUintptr(addr)
+ if CompareAndSwapUintptr(addr, v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapUintptr64Method(uaddr *uint64, count int) {
+ // only safe when uintptr is 64-bit.
+ // not called on 32-bit systems.
+ addr := (*Uintptr)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ for {
+ v := addr.Load()
+ if addr.CompareAndSwap(v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func TestHammer64(t *testing.T) {
+ const p = 4
+ n := 100000
+ if testing.Short() {
+ n = 1000
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p))
+
+ for name, testf := range hammer64 {
+ c := make(chan int)
+ var val uint64
+ for i := 0; i < p; i++ {
+ go func() {
+ defer func() {
+ if err := recover(); err != nil {
+ t.Error(err.(string))
+ }
+ c <- 1
+ }()
+ testf(&val, n)
+ }()
+ }
+ for i := 0; i < p; i++ {
+ <-c
+ }
+ if !strings.HasPrefix(name, "Swap") && val != uint64(n)*p {
+ t.Fatalf("%s: val=%d want %d", name, val, n*p)
+ }
+ }
+}
+
+func hammerStoreLoadInt32(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*int32)(paddr)
+ v := LoadInt32(addr)
+ vlo := v & ((1 << 16) - 1)
+ vhi := v >> 16
+ if vlo != vhi {
+ t.Fatalf("Int32: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<16
+ if vlo == 1e4 {
+ new = 0
+ }
+ StoreInt32(addr, new)
+}
+
+func hammerStoreLoadInt32Method(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*int32)(paddr)
+ v := LoadInt32(addr)
+ vlo := v & ((1 << 16) - 1)
+ vhi := v >> 16
+ if vlo != vhi {
+ t.Fatalf("Int32: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<16
+ if vlo == 1e4 {
+ new = 0
+ }
+ StoreInt32(addr, new)
+}
+
+func hammerStoreLoadUint32(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*uint32)(paddr)
+ v := LoadUint32(addr)
+ vlo := v & ((1 << 16) - 1)
+ vhi := v >> 16
+ if vlo != vhi {
+ t.Fatalf("Uint32: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<16
+ if vlo == 1e4 {
+ new = 0
+ }
+ StoreUint32(addr, new)
+}
+
+func hammerStoreLoadUint32Method(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*Uint32)(paddr)
+ v := addr.Load()
+ vlo := v & ((1 << 16) - 1)
+ vhi := v >> 16
+ if vlo != vhi {
+ t.Fatalf("Uint32: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<16
+ if vlo == 1e4 {
+ new = 0
+ }
+ addr.Store(new)
+}
+
+func hammerStoreLoadInt64(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*int64)(paddr)
+ v := LoadInt64(addr)
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Int64: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<32
+ StoreInt64(addr, new)
+}
+
+func hammerStoreLoadInt64Method(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*Int64)(paddr)
+ v := addr.Load()
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Int64: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<32
+ addr.Store(new)
+}
+
+func hammerStoreLoadUint64(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*uint64)(paddr)
+ v := LoadUint64(addr)
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Uint64: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<32
+ StoreUint64(addr, new)
+}
+
+func hammerStoreLoadUint64Method(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*Uint64)(paddr)
+ v := addr.Load()
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Uint64: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<32
+ addr.Store(new)
+}
+
+func hammerStoreLoadUintptr(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*uintptr)(paddr)
+ v := LoadUintptr(addr)
+ new := v
+ if arch32 {
+ vlo := v & ((1 << 16) - 1)
+ vhi := v >> 16
+ if vlo != vhi {
+ t.Fatalf("Uintptr: %#x != %#x", vlo, vhi)
+ }
+ new = v + 1 + 1<<16
+ if vlo == 1e4 {
+ new = 0
+ }
+ } else {
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Uintptr: %#x != %#x", vlo, vhi)
+ }
+ inc := uint64(1 + 1<<32)
+ new = v + uintptr(inc)
+ }
+ StoreUintptr(addr, new)
+}
+
+//go:nocheckptr
+func hammerStoreLoadUintptrMethod(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*Uintptr)(paddr)
+ v := addr.Load()
+ new := v
+ if arch32 {
+ vlo := v & ((1 << 16) - 1)
+ vhi := v >> 16
+ if vlo != vhi {
+ t.Fatalf("Uintptr: %#x != %#x", vlo, vhi)
+ }
+ new = v + 1 + 1<<16
+ if vlo == 1e4 {
+ new = 0
+ }
+ } else {
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Uintptr: %#x != %#x", vlo, vhi)
+ }
+ inc := uint64(1 + 1<<32)
+ new = v + uintptr(inc)
+ }
+ addr.Store(new)
+}
+
+// This code is just testing that LoadPointer/StorePointer operate
+// atomically; it's not actually calculating pointers.
+//
+//go:nocheckptr
+func hammerStoreLoadPointer(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*unsafe.Pointer)(paddr)
+ v := uintptr(LoadPointer(addr))
+ new := v
+ if arch32 {
+ vlo := v & ((1 << 16) - 1)
+ vhi := v >> 16
+ if vlo != vhi {
+ t.Fatalf("Pointer: %#x != %#x", vlo, vhi)
+ }
+ new = v + 1 + 1<<16
+ if vlo == 1e4 {
+ new = 0
+ }
+ } else {
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Pointer: %#x != %#x", vlo, vhi)
+ }
+ inc := uint64(1 + 1<<32)
+ new = v + uintptr(inc)
+ }
+ StorePointer(addr, unsafe.Pointer(new))
+}
+
+// This code is just testing that LoadPointer/StorePointer operate
+// atomically; it's not actually calculating pointers.
+//
+//go:nocheckptr
+func hammerStoreLoadPointerMethod(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*Pointer[byte])(paddr)
+ v := uintptr(unsafe.Pointer(addr.Load()))
+ new := v
+ if arch32 {
+ vlo := v & ((1 << 16) - 1)
+ vhi := v >> 16
+ if vlo != vhi {
+ t.Fatalf("Pointer: %#x != %#x", vlo, vhi)
+ }
+ new = v + 1 + 1<<16
+ if vlo == 1e4 {
+ new = 0
+ }
+ } else {
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Pointer: %#x != %#x", vlo, vhi)
+ }
+ inc := uint64(1 + 1<<32)
+ new = v + uintptr(inc)
+ }
+ addr.Store((*byte)(unsafe.Pointer(new)))
+}
+
+func TestHammerStoreLoad(t *testing.T) {
+ tests := []func(*testing.T, unsafe.Pointer){
+ hammerStoreLoadInt32, hammerStoreLoadUint32,
+ hammerStoreLoadUintptr, hammerStoreLoadPointer,
+ hammerStoreLoadInt32Method, hammerStoreLoadUint32Method,
+ hammerStoreLoadUintptrMethod, hammerStoreLoadPointerMethod,
+ hammerStoreLoadInt64, hammerStoreLoadUint64,
+ hammerStoreLoadInt64Method, hammerStoreLoadUint64Method,
+ }
+ n := int(1e6)
+ if testing.Short() {
+ n = int(1e4)
+ }
+ const procs = 8
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(procs))
+ // Disable the GC because hammerStoreLoadPointer invokes
+ // write barriers on values that aren't real pointers.
+ defer debug.SetGCPercent(debug.SetGCPercent(-1))
+ // Ensure any in-progress GC is finished.
+ runtime.GC()
+ for _, tt := range tests {
+ c := make(chan int)
+ var val uint64
+ for p := 0; p < procs; p++ {
+ go func() {
+ for i := 0; i < n; i++ {
+ tt(t, unsafe.Pointer(&val))
+ }
+ c <- 1
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+ }
+}
+
+func TestStoreLoadSeqCst32(t *testing.T) {
+ if runtime.NumCPU() == 1 {
+ t.Skipf("Skipping test on %v processor machine", runtime.NumCPU())
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
+ N := int32(1e3)
+ if testing.Short() {
+ N = int32(1e2)
+ }
+ c := make(chan bool, 2)
+ X := [2]int32{}
+ ack := [2][3]int32{{-1, -1, -1}, {-1, -1, -1}}
+ for p := 0; p < 2; p++ {
+ go func(me int) {
+ he := 1 - me
+ for i := int32(1); i < N; i++ {
+ StoreInt32(&X[me], i)
+ my := LoadInt32(&X[he])
+ StoreInt32(&ack[me][i%3], my)
+ for w := 1; LoadInt32(&ack[he][i%3]) == -1; w++ {
+ if w%1000 == 0 {
+ runtime.Gosched()
+ }
+ }
+ his := LoadInt32(&ack[he][i%3])
+ if (my != i && my != i-1) || (his != i && his != i-1) {
+ t.Errorf("invalid values: %d/%d (%d)", my, his, i)
+ break
+ }
+ if my != i && his != i {
+ t.Errorf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i)
+ break
+ }
+ StoreInt32(&ack[me][(i-1)%3], -1)
+ }
+ c <- true
+ }(p)
+ }
+ <-c
+ <-c
+}
+
+func TestStoreLoadSeqCst64(t *testing.T) {
+ if runtime.NumCPU() == 1 {
+ t.Skipf("Skipping test on %v processor machine", runtime.NumCPU())
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
+ N := int64(1e3)
+ if testing.Short() {
+ N = int64(1e2)
+ }
+ c := make(chan bool, 2)
+ X := [2]int64{}
+ ack := [2][3]int64{{-1, -1, -1}, {-1, -1, -1}}
+ for p := 0; p < 2; p++ {
+ go func(me int) {
+ he := 1 - me
+ for i := int64(1); i < N; i++ {
+ StoreInt64(&X[me], i)
+ my := LoadInt64(&X[he])
+ StoreInt64(&ack[me][i%3], my)
+ for w := 1; LoadInt64(&ack[he][i%3]) == -1; w++ {
+ if w%1000 == 0 {
+ runtime.Gosched()
+ }
+ }
+ his := LoadInt64(&ack[he][i%3])
+ if (my != i && my != i-1) || (his != i && his != i-1) {
+ t.Errorf("invalid values: %d/%d (%d)", my, his, i)
+ break
+ }
+ if my != i && his != i {
+ t.Errorf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i)
+ break
+ }
+ StoreInt64(&ack[me][(i-1)%3], -1)
+ }
+ c <- true
+ }(p)
+ }
+ <-c
+ <-c
+}
+
+func TestStoreLoadRelAcq32(t *testing.T) {
+ if runtime.NumCPU() == 1 {
+ t.Skipf("Skipping test on %v processor machine", runtime.NumCPU())
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
+ N := int32(1e3)
+ if testing.Short() {
+ N = int32(1e2)
+ }
+ c := make(chan bool, 2)
+ type Data struct {
+ signal int32
+ pad1 [128]int8
+ data1 int32
+ pad2 [128]int8
+ data2 float32
+ }
+ var X Data
+ for p := int32(0); p < 2; p++ {
+ go func(p int32) {
+ for i := int32(1); i < N; i++ {
+ if (i+p)%2 == 0 {
+ X.data1 = i
+ X.data2 = float32(i)
+ StoreInt32(&X.signal, i)
+ } else {
+ for w := 1; LoadInt32(&X.signal) != i; w++ {
+ if w%1000 == 0 {
+ runtime.Gosched()
+ }
+ }
+ d1 := X.data1
+ d2 := X.data2
+ if d1 != i || d2 != float32(i) {
+ t.Errorf("incorrect data: %d/%g (%d)", d1, d2, i)
+ break
+ }
+ }
+ }
+ c <- true
+ }(p)
+ }
+ <-c
+ <-c
+}
+
+func TestStoreLoadRelAcq64(t *testing.T) {
+ if runtime.NumCPU() == 1 {
+ t.Skipf("Skipping test on %v processor machine", runtime.NumCPU())
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
+ N := int64(1e3)
+ if testing.Short() {
+ N = int64(1e2)
+ }
+ c := make(chan bool, 2)
+ type Data struct {
+ signal int64
+ pad1 [128]int8
+ data1 int64
+ pad2 [128]int8
+ data2 float64
+ }
+ var X Data
+ for p := int64(0); p < 2; p++ {
+ go func(p int64) {
+ for i := int64(1); i < N; i++ {
+ if (i+p)%2 == 0 {
+ X.data1 = i
+ X.data2 = float64(i)
+ StoreInt64(&X.signal, i)
+ } else {
+ for w := 1; LoadInt64(&X.signal) != i; w++ {
+ if w%1000 == 0 {
+ runtime.Gosched()
+ }
+ }
+ d1 := X.data1
+ d2 := X.data2
+ if d1 != i || d2 != float64(i) {
+ t.Errorf("incorrect data: %d/%g (%d)", d1, d2, i)
+ break
+ }
+ }
+ }
+ c <- true
+ }(p)
+ }
+ <-c
+ <-c
+}
+
+func shouldPanic(t *testing.T, name string, f func()) {
+ defer func() {
+ // Check that all GC maps are sane.
+ runtime.GC()
+
+ err := recover()
+ want := "unaligned 64-bit atomic operation"
+ if err == nil {
+ t.Errorf("%s did not panic", name)
+ } else if s, _ := err.(string); s != want {
+ t.Errorf("%s: wanted panic %q, got %q", name, want, err)
+ }
+ }()
+ f()
+}
+
+func TestUnaligned64(t *testing.T) {
+ // Unaligned 64-bit atomics on 32-bit systems are
+ // a continual source of pain. Test that on 32-bit systems they crash
+ // instead of failing silently.
+ if !arch32 {
+ t.Skip("test only runs on 32-bit systems")
+ }
+
+ x := make([]uint32, 4)
+ p := (*uint64)(unsafe.Pointer(&x[1])) // misaligned
+
+ shouldPanic(t, "LoadUint64", func() { LoadUint64(p) })
+ shouldPanic(t, "LoadUint64Method", func() { (*Uint64)(unsafe.Pointer(p)).Load() })
+ shouldPanic(t, "StoreUint64", func() { StoreUint64(p, 1) })
+ shouldPanic(t, "StoreUint64Method", func() { (*Uint64)(unsafe.Pointer(p)).Store(1) })
+ shouldPanic(t, "CompareAndSwapUint64", func() { CompareAndSwapUint64(p, 1, 2) })
+ shouldPanic(t, "CompareAndSwapUint64Method", func() { (*Uint64)(unsafe.Pointer(p)).CompareAndSwap(1, 2) })
+ shouldPanic(t, "AddUint64", func() { AddUint64(p, 3) })
+ shouldPanic(t, "AddUint64Method", func() { (*Uint64)(unsafe.Pointer(p)).Add(3) })
+}
+
+func TestAutoAligned64(t *testing.T) {
+ var signed struct {
+ _ uint32
+ i Int64
+ }
+ if o := reflect.TypeOf(&signed).Elem().Field(1).Offset; o != 8 {
+ t.Fatalf("Int64 offset = %d, want 8", o)
+ }
+ if p := reflect.ValueOf(&signed).Elem().Field(1).Addr().Pointer(); p&7 != 0 {
+ t.Fatalf("Int64 pointer = %#x, want 8-aligned", p)
+ }
+
+ var unsigned struct {
+ _ uint32
+ i Uint64
+ }
+ if o := reflect.TypeOf(&unsigned).Elem().Field(1).Offset; o != 8 {
+ t.Fatalf("Uint64 offset = %d, want 8", o)
+ }
+ if p := reflect.ValueOf(&unsigned).Elem().Field(1).Addr().Pointer(); p&7 != 0 {
+ t.Fatalf("Int64 pointer = %#x, want 8-aligned", p)
+ }
+}
+
+func TestNilDeref(t *testing.T) {
+ funcs := [...]func(){
+ func() { CompareAndSwapInt32(nil, 0, 0) },
+ func() { (*Int32)(nil).CompareAndSwap(0, 0) },
+ func() { CompareAndSwapInt64(nil, 0, 0) },
+ func() { (*Int64)(nil).CompareAndSwap(0, 0) },
+ func() { CompareAndSwapUint32(nil, 0, 0) },
+ func() { (*Uint32)(nil).CompareAndSwap(0, 0) },
+ func() { CompareAndSwapUint64(nil, 0, 0) },
+ func() { (*Uint64)(nil).CompareAndSwap(0, 0) },
+ func() { CompareAndSwapUintptr(nil, 0, 0) },
+ func() { (*Uintptr)(nil).CompareAndSwap(0, 0) },
+ func() { CompareAndSwapPointer(nil, nil, nil) },
+ func() { (*Pointer[byte])(nil).CompareAndSwap(nil, nil) },
+ func() { SwapInt32(nil, 0) },
+ func() { (*Int32)(nil).Swap(0) },
+ func() { SwapUint32(nil, 0) },
+ func() { (*Uint32)(nil).Swap(0) },
+ func() { SwapInt64(nil, 0) },
+ func() { (*Int64)(nil).Swap(0) },
+ func() { SwapUint64(nil, 0) },
+ func() { (*Uint64)(nil).Swap(0) },
+ func() { SwapUintptr(nil, 0) },
+ func() { (*Uintptr)(nil).Swap(0) },
+ func() { SwapPointer(nil, nil) },
+ func() { (*Pointer[byte])(nil).Swap(nil) },
+ func() { AddInt32(nil, 0) },
+ func() { (*Int32)(nil).Add(0) },
+ func() { AddUint32(nil, 0) },
+ func() { (*Uint32)(nil).Add(0) },
+ func() { AddInt64(nil, 0) },
+ func() { (*Int64)(nil).Add(0) },
+ func() { AddUint64(nil, 0) },
+ func() { (*Uint64)(nil).Add(0) },
+ func() { AddUintptr(nil, 0) },
+ func() { (*Uintptr)(nil).Add(0) },
+ func() { LoadInt32(nil) },
+ func() { (*Int32)(nil).Load() },
+ func() { LoadInt64(nil) },
+ func() { (*Int64)(nil).Load() },
+ func() { LoadUint32(nil) },
+ func() { (*Uint32)(nil).Load() },
+ func() { LoadUint64(nil) },
+ func() { (*Uint64)(nil).Load() },
+ func() { LoadUintptr(nil) },
+ func() { (*Uintptr)(nil).Load() },
+ func() { LoadPointer(nil) },
+ func() { (*Pointer[byte])(nil).Load() },
+ func() { StoreInt32(nil, 0) },
+ func() { (*Int32)(nil).Store(0) },
+ func() { StoreInt64(nil, 0) },
+ func() { (*Int64)(nil).Store(0) },
+ func() { StoreUint32(nil, 0) },
+ func() { (*Uint32)(nil).Store(0) },
+ func() { StoreUint64(nil, 0) },
+ func() { (*Uint64)(nil).Store(0) },
+ func() { StoreUintptr(nil, 0) },
+ func() { (*Uintptr)(nil).Store(0) },
+ func() { StorePointer(nil, nil) },
+ func() { (*Pointer[byte])(nil).Store(nil) },
+ }
+ for _, f := range funcs {
+ func() {
+ defer func() {
+ runtime.GC()
+ recover()
+ }()
+ f()
+ }()
+ }
+}
+
+// Test that this compiles.
+// When atomic.Pointer used _ [0]T, it did not.
+type List struct {
+ Next Pointer[List]
+}
diff --git a/src/sync/atomic/doc.go b/src/sync/atomic/doc.go
new file mode 100644
index 0000000..c22d115
--- /dev/null
+++ b/src/sync/atomic/doc.go
@@ -0,0 +1,192 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package atomic provides low-level atomic memory primitives
+// useful for implementing synchronization algorithms.
+//
+// These functions require great care to be used correctly.
+// Except for special, low-level applications, synchronization is better
+// done with channels or the facilities of the [sync] package.
+// Share memory by communicating;
+// don't communicate by sharing memory.
+//
+// The swap operation, implemented by the SwapT functions, is the atomic
+// equivalent of:
+//
+// old = *addr
+// *addr = new
+// return old
+//
+// The compare-and-swap operation, implemented by the CompareAndSwapT
+// functions, is the atomic equivalent of:
+//
+// if *addr == old {
+// *addr = new
+// return true
+// }
+// return false
+//
+// The add operation, implemented by the AddT functions, is the atomic
+// equivalent of:
+//
+// *addr += delta
+// return *addr
+//
+// The load and store operations, implemented by the LoadT and StoreT
+// functions, are the atomic equivalents of "return *addr" and
+// "*addr = val".
+//
+// In the terminology of the Go memory model, if the effect of
+// an atomic operation A is observed by atomic operation B,
+// then A “synchronizes before” B.
+// Additionally, all the atomic operations executed in a program
+// behave as though executed in some sequentially consistent order.
+// This definition provides the same semantics as
+// C++'s sequentially consistent atomics and Java's volatile variables.
+package atomic
+
+import (
+ "unsafe"
+)
+
+// BUG(rsc): On 386, the 64-bit functions use instructions unavailable before the Pentium MMX.
+//
+// On non-Linux ARM, the 64-bit functions use instructions unavailable before the ARMv6k core.
+//
+// On ARM, 386, and 32-bit MIPS, it is the caller's responsibility to arrange
+// for 64-bit alignment of 64-bit words accessed atomically via the primitive
+// atomic functions (types [Int64] and [Uint64] are automatically aligned).
+// The first word in an allocated struct, array, or slice; in a global
+// variable; or in a local variable (because the subject of all atomic operations
+// will escape to the heap) can be relied upon to be 64-bit aligned.
+
+// SwapInt32 atomically stores new into *addr and returns the previous *addr value.
+// Consider using the more ergonomic and less error-prone [Int32.Swap] instead.
+func SwapInt32(addr *int32, new int32) (old int32)
+
+// SwapInt64 atomically stores new into *addr and returns the previous *addr value.
+// Consider using the more ergonomic and less error-prone [Int64.Swap] instead
+// (particularly if you target 32-bit platforms; see the bugs section).
+func SwapInt64(addr *int64, new int64) (old int64)
+
+// SwapUint32 atomically stores new into *addr and returns the previous *addr value.
+// Consider using the more ergonomic and less error-prone [Uint32.Swap] instead.
+func SwapUint32(addr *uint32, new uint32) (old uint32)
+
+// SwapUint64 atomically stores new into *addr and returns the previous *addr value.
+// Consider using the more ergonomic and less error-prone [Uint64.Swap] instead
+// (particularly if you target 32-bit platforms; see the bugs section).
+func SwapUint64(addr *uint64, new uint64) (old uint64)
+
+// SwapUintptr atomically stores new into *addr and returns the previous *addr value.
+// Consider using the more ergonomic and less error-prone [Uintptr.Swap] instead.
+func SwapUintptr(addr *uintptr, new uintptr) (old uintptr)
+
+// SwapPointer atomically stores new into *addr and returns the previous *addr value.
+// Consider using the more ergonomic and less error-prone [Pointer.Swap] instead.
+func SwapPointer(addr *unsafe.Pointer, new unsafe.Pointer) (old unsafe.Pointer)
+
+// CompareAndSwapInt32 executes the compare-and-swap operation for an int32 value.
+// Consider using the more ergonomic and less error-prone [Int32.CompareAndSwap] instead.
+func CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
+
+// CompareAndSwapInt64 executes the compare-and-swap operation for an int64 value.
+// Consider using the more ergonomic and less error-prone [Int64.CompareAndSwap] instead
+// (particularly if you target 32-bit platforms; see the bugs section).
+func CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool)
+
+// CompareAndSwapUint32 executes the compare-and-swap operation for a uint32 value.
+// Consider using the more ergonomic and less error-prone [Uint32.CompareAndSwap] instead.
+func CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool)
+
+// CompareAndSwapUint64 executes the compare-and-swap operation for a uint64 value.
+// Consider using the more ergonomic and less error-prone [Uint64.CompareAndSwap] instead
+// (particularly if you target 32-bit platforms; see the bugs section).
+func CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool)
+
+// CompareAndSwapUintptr executes the compare-and-swap operation for a uintptr value.
+// Consider using the more ergonomic and less error-prone [Uintptr.CompareAndSwap] instead.
+func CompareAndSwapUintptr(addr *uintptr, old, new uintptr) (swapped bool)
+
+// CompareAndSwapPointer executes the compare-and-swap operation for a unsafe.Pointer value.
+// Consider using the more ergonomic and less error-prone [Pointer.CompareAndSwap] instead.
+func CompareAndSwapPointer(addr *unsafe.Pointer, old, new unsafe.Pointer) (swapped bool)
+
+// AddInt32 atomically adds delta to *addr and returns the new value.
+// Consider using the more ergonomic and less error-prone [Int32.Add] instead.
+func AddInt32(addr *int32, delta int32) (new int32)
+
+// AddUint32 atomically adds delta to *addr and returns the new value.
+// To subtract a signed positive constant value c from x, do AddUint32(&x, ^uint32(c-1)).
+// In particular, to decrement x, do AddUint32(&x, ^uint32(0)).
+// Consider using the more ergonomic and less error-prone [Uint32.Add] instead.
+func AddUint32(addr *uint32, delta uint32) (new uint32)
+
+// AddInt64 atomically adds delta to *addr and returns the new value.
+// Consider using the more ergonomic and less error-prone [Int64.Add] instead
+// (particularly if you target 32-bit platforms; see the bugs section).
+func AddInt64(addr *int64, delta int64) (new int64)
+
+// AddUint64 atomically adds delta to *addr and returns the new value.
+// To subtract a signed positive constant value c from x, do AddUint64(&x, ^uint64(c-1)).
+// In particular, to decrement x, do AddUint64(&x, ^uint64(0)).
+// Consider using the more ergonomic and less error-prone [Uint64.Add] instead
+// (particularly if you target 32-bit platforms; see the bugs section).
+func AddUint64(addr *uint64, delta uint64) (new uint64)
+
+// AddUintptr atomically adds delta to *addr and returns the new value.
+// Consider using the more ergonomic and less error-prone [Uintptr.Add] instead.
+func AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
+
+// LoadInt32 atomically loads *addr.
+// Consider using the more ergonomic and less error-prone [Int32.Load] instead.
+func LoadInt32(addr *int32) (val int32)
+
+// LoadInt64 atomically loads *addr.
+// Consider using the more ergonomic and less error-prone [Int64.Load] instead
+// (particularly if you target 32-bit platforms; see the bugs section).
+func LoadInt64(addr *int64) (val int64)
+
+// LoadUint32 atomically loads *addr.
+// Consider using the more ergonomic and less error-prone [Uint32.Load] instead.
+func LoadUint32(addr *uint32) (val uint32)
+
+// LoadUint64 atomically loads *addr.
+// Consider using the more ergonomic and less error-prone [Uint64.Load] instead
+// (particularly if you target 32-bit platforms; see the bugs section).
+func LoadUint64(addr *uint64) (val uint64)
+
+// LoadUintptr atomically loads *addr.
+// Consider using the more ergonomic and less error-prone [Uintptr.Load] instead.
+func LoadUintptr(addr *uintptr) (val uintptr)
+
+// LoadPointer atomically loads *addr.
+// Consider using the more ergonomic and less error-prone [Pointer.Load] instead.
+func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
+
+// StoreInt32 atomically stores val into *addr.
+// Consider using the more ergonomic and less error-prone [Int32.Store] instead.
+func StoreInt32(addr *int32, val int32)
+
+// StoreInt64 atomically stores val into *addr.
+// Consider using the more ergonomic and less error-prone [Int64.Store] instead
+// (particularly if you target 32-bit platforms; see the bugs section).
+func StoreInt64(addr *int64, val int64)
+
+// StoreUint32 atomically stores val into *addr.
+// Consider using the more ergonomic and less error-prone [Uint32.Store] instead.
+func StoreUint32(addr *uint32, val uint32)
+
+// StoreUint64 atomically stores val into *addr.
+// Consider using the more ergonomic and less error-prone [Uint64.Store] instead
+// (particularly if you target 32-bit platforms; see the bugs section).
+func StoreUint64(addr *uint64, val uint64)
+
+// StoreUintptr atomically stores val into *addr.
+// Consider using the more ergonomic and less error-prone [Uintptr.Store] instead.
+func StoreUintptr(addr *uintptr, val uintptr)
+
+// StorePointer atomically stores val into *addr.
+// Consider using the more ergonomic and less error-prone [Pointer.Store] instead.
+func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer)
diff --git a/src/sync/atomic/example_test.go b/src/sync/atomic/example_test.go
new file mode 100644
index 0000000..09ae0aa
--- /dev/null
+++ b/src/sync/atomic/example_test.go
@@ -0,0 +1,76 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic_test
+
+import (
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+func loadConfig() map[string]string {
+ return make(map[string]string)
+}
+
+func requests() chan int {
+ return make(chan int)
+}
+
+// The following example shows how to use Value for periodic program config updates
+// and propagation of the changes to worker goroutines.
+func ExampleValue_config() {
+ var config atomic.Value // holds current server configuration
+ // Create initial config value and store into config.
+ config.Store(loadConfig())
+ go func() {
+ // Reload config every 10 seconds
+ // and update config value with the new version.
+ for {
+ time.Sleep(10 * time.Second)
+ config.Store(loadConfig())
+ }
+ }()
+ // Create worker goroutines that handle incoming requests
+ // using the latest config value.
+ for i := 0; i < 10; i++ {
+ go func() {
+ for r := range requests() {
+ c := config.Load()
+ // Handle request r using config c.
+ _, _ = r, c
+ }
+ }()
+ }
+}
+
+// The following example shows how to maintain a scalable frequently read,
+// but infrequently updated data structure using copy-on-write idiom.
+func ExampleValue_readMostly() {
+ type Map map[string]string
+ var m atomic.Value
+ m.Store(make(Map))
+ var mu sync.Mutex // used only by writers
+ // read function can be used to read the data without further synchronization
+ read := func(key string) (val string) {
+ m1 := m.Load().(Map)
+ return m1[key]
+ }
+ // insert function can be used to update the data without further synchronization
+ insert := func(key, val string) {
+ mu.Lock() // synchronize with other potential writers
+ defer mu.Unlock()
+ m1 := m.Load().(Map) // load current value of the data structure
+ m2 := make(Map) // create a new value
+ for k, v := range m1 {
+ m2[k] = v // copy all data from the current object to the new one
+ }
+ m2[key] = val // do the update that we need
+ m.Store(m2) // atomically replace the current object with the new one
+ // At this point all new readers start working with the new version.
+ // The old version will be garbage collected once the existing readers
+ // (if any) are done with it.
+ }
+ _, _ = read, insert
+}
diff --git a/src/sync/atomic/race.s b/src/sync/atomic/race.s
new file mode 100644
index 0000000..90bd69f
--- /dev/null
+++ b/src/sync/atomic/race.s
@@ -0,0 +1,8 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+
+// This file is here only to allow external functions.
+// The operations are implemented in src/runtime/race_amd64.s
diff --git a/src/sync/atomic/type.go b/src/sync/atomic/type.go
new file mode 100644
index 0000000..179fa93
--- /dev/null
+++ b/src/sync/atomic/type.go
@@ -0,0 +1,200 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+import "unsafe"
+
+// A Bool is an atomic boolean value.
+// The zero value is false.
+type Bool struct {
+ _ noCopy
+ v uint32
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Bool) Load() bool { return LoadUint32(&x.v) != 0 }
+
+// Store atomically stores val into x.
+func (x *Bool) Store(val bool) { StoreUint32(&x.v, b32(val)) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Bool) Swap(new bool) (old bool) { return SwapUint32(&x.v, b32(new)) != 0 }
+
+// CompareAndSwap executes the compare-and-swap operation for the boolean value x.
+func (x *Bool) CompareAndSwap(old, new bool) (swapped bool) {
+ return CompareAndSwapUint32(&x.v, b32(old), b32(new))
+}
+
+// b32 returns a uint32 0 or 1 representing b.
+func b32(b bool) uint32 {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+// For testing *Pointer[T]'s methods can be inlined.
+// Keep in sync with cmd/compile/internal/test/inl_test.go:TestIntendedInlining.
+var _ = &Pointer[int]{}
+
+// A Pointer is an atomic pointer of type *T. The zero value is a nil *T.
+type Pointer[T any] struct {
+ // Mention *T in a field to disallow conversion between Pointer types.
+ // See go.dev/issue/56603 for more details.
+ // Use *T, not T, to avoid spurious recursive type definition errors.
+ _ [0]*T
+
+ _ noCopy
+ v unsafe.Pointer
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Pointer[T]) Load() *T { return (*T)(LoadPointer(&x.v)) }
+
+// Store atomically stores val into x.
+func (x *Pointer[T]) Store(val *T) { StorePointer(&x.v, unsafe.Pointer(val)) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Pointer[T]) Swap(new *T) (old *T) { return (*T)(SwapPointer(&x.v, unsafe.Pointer(new))) }
+
+// CompareAndSwap executes the compare-and-swap operation for x.
+func (x *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) {
+ return CompareAndSwapPointer(&x.v, unsafe.Pointer(old), unsafe.Pointer(new))
+}
+
+// An Int32 is an atomic int32. The zero value is zero.
+type Int32 struct {
+ _ noCopy
+ v int32
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Int32) Load() int32 { return LoadInt32(&x.v) }
+
+// Store atomically stores val into x.
+func (x *Int32) Store(val int32) { StoreInt32(&x.v, val) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Int32) Swap(new int32) (old int32) { return SwapInt32(&x.v, new) }
+
+// CompareAndSwap executes the compare-and-swap operation for x.
+func (x *Int32) CompareAndSwap(old, new int32) (swapped bool) {
+ return CompareAndSwapInt32(&x.v, old, new)
+}
+
+// Add atomically adds delta to x and returns the new value.
+func (x *Int32) Add(delta int32) (new int32) { return AddInt32(&x.v, delta) }
+
+// An Int64 is an atomic int64. The zero value is zero.
+type Int64 struct {
+ _ noCopy
+ _ align64
+ v int64
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Int64) Load() int64 { return LoadInt64(&x.v) }
+
+// Store atomically stores val into x.
+func (x *Int64) Store(val int64) { StoreInt64(&x.v, val) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Int64) Swap(new int64) (old int64) { return SwapInt64(&x.v, new) }
+
+// CompareAndSwap executes the compare-and-swap operation for x.
+func (x *Int64) CompareAndSwap(old, new int64) (swapped bool) {
+ return CompareAndSwapInt64(&x.v, old, new)
+}
+
+// Add atomically adds delta to x and returns the new value.
+func (x *Int64) Add(delta int64) (new int64) { return AddInt64(&x.v, delta) }
+
+// A Uint32 is an atomic uint32. The zero value is zero.
+type Uint32 struct {
+ _ noCopy
+ v uint32
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Uint32) Load() uint32 { return LoadUint32(&x.v) }
+
+// Store atomically stores val into x.
+func (x *Uint32) Store(val uint32) { StoreUint32(&x.v, val) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Uint32) Swap(new uint32) (old uint32) { return SwapUint32(&x.v, new) }
+
+// CompareAndSwap executes the compare-and-swap operation for x.
+func (x *Uint32) CompareAndSwap(old, new uint32) (swapped bool) {
+ return CompareAndSwapUint32(&x.v, old, new)
+}
+
+// Add atomically adds delta to x and returns the new value.
+func (x *Uint32) Add(delta uint32) (new uint32) { return AddUint32(&x.v, delta) }
+
+// A Uint64 is an atomic uint64. The zero value is zero.
+type Uint64 struct {
+ _ noCopy
+ _ align64
+ v uint64
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Uint64) Load() uint64 { return LoadUint64(&x.v) }
+
+// Store atomically stores val into x.
+func (x *Uint64) Store(val uint64) { StoreUint64(&x.v, val) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Uint64) Swap(new uint64) (old uint64) { return SwapUint64(&x.v, new) }
+
+// CompareAndSwap executes the compare-and-swap operation for x.
+func (x *Uint64) CompareAndSwap(old, new uint64) (swapped bool) {
+ return CompareAndSwapUint64(&x.v, old, new)
+}
+
+// Add atomically adds delta to x and returns the new value.
+func (x *Uint64) Add(delta uint64) (new uint64) { return AddUint64(&x.v, delta) }
+
+// A Uintptr is an atomic uintptr. The zero value is zero.
+type Uintptr struct {
+ _ noCopy
+ v uintptr
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Uintptr) Load() uintptr { return LoadUintptr(&x.v) }
+
+// Store atomically stores val into x.
+func (x *Uintptr) Store(val uintptr) { StoreUintptr(&x.v, val) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Uintptr) Swap(new uintptr) (old uintptr) { return SwapUintptr(&x.v, new) }
+
+// CompareAndSwap executes the compare-and-swap operation for x.
+func (x *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) {
+ return CompareAndSwapUintptr(&x.v, old, new)
+}
+
+// Add atomically adds delta to x and returns the new value.
+func (x *Uintptr) Add(delta uintptr) (new uintptr) { return AddUintptr(&x.v, delta) }
+
+// noCopy may be added to structs which must not be copied
+// after the first use.
+//
+// See https://golang.org/issues/8005#issuecomment-190753527
+// for details.
+//
+// Note that it must not be embedded, due to the Lock and Unlock methods.
+type noCopy struct{}
+
+// Lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) Lock() {}
+func (*noCopy) Unlock() {}
+
+// align64 may be added to structs that must be 64-bit aligned.
+// This struct is recognized by a special case in the compiler
+// and will not work if copied to any other package.
+type align64 struct{}
diff --git a/src/sync/atomic/value.go b/src/sync/atomic/value.go
new file mode 100644
index 0000000..a57b08a
--- /dev/null
+++ b/src/sync/atomic/value.go
@@ -0,0 +1,194 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+import (
+ "unsafe"
+)
+
+// A Value provides an atomic load and store of a consistently typed value.
+// The zero value for a Value returns nil from Load.
+// Once Store has been called, a Value must not be copied.
+//
+// A Value must not be copied after first use.
+type Value struct {
+ v any
+}
+
+// efaceWords is interface{} internal representation.
+type efaceWords struct {
+ typ unsafe.Pointer
+ data unsafe.Pointer
+}
+
+// Load returns the value set by the most recent Store.
+// It returns nil if there has been no call to Store for this Value.
+func (v *Value) Load() (val any) {
+ vp := (*efaceWords)(unsafe.Pointer(v))
+ typ := LoadPointer(&vp.typ)
+ if typ == nil || typ == unsafe.Pointer(&firstStoreInProgress) {
+ // First store not yet completed.
+ return nil
+ }
+ data := LoadPointer(&vp.data)
+ vlp := (*efaceWords)(unsafe.Pointer(&val))
+ vlp.typ = typ
+ vlp.data = data
+ return
+}
+
+var firstStoreInProgress byte
+
+// Store sets the value of the Value v to val.
+// All calls to Store for a given Value must use values of the same concrete type.
+// Store of an inconsistent type panics, as does Store(nil).
+func (v *Value) Store(val any) {
+ if val == nil {
+ panic("sync/atomic: store of nil value into Value")
+ }
+ vp := (*efaceWords)(unsafe.Pointer(v))
+ vlp := (*efaceWords)(unsafe.Pointer(&val))
+ for {
+ typ := LoadPointer(&vp.typ)
+ if typ == nil {
+ // Attempt to start first store.
+ // Disable preemption so that other goroutines can use
+ // active spin wait to wait for completion.
+ runtime_procPin()
+ if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) {
+ runtime_procUnpin()
+ continue
+ }
+ // Complete first store.
+ StorePointer(&vp.data, vlp.data)
+ StorePointer(&vp.typ, vlp.typ)
+ runtime_procUnpin()
+ return
+ }
+ if typ == unsafe.Pointer(&firstStoreInProgress) {
+ // First store in progress. Wait.
+ // Since we disable preemption around the first store,
+ // we can wait with active spinning.
+ continue
+ }
+ // First store completed. Check type and overwrite data.
+ if typ != vlp.typ {
+ panic("sync/atomic: store of inconsistently typed value into Value")
+ }
+ StorePointer(&vp.data, vlp.data)
+ return
+ }
+}
+
+// Swap stores new into Value and returns the previous value. It returns nil if
+// the Value is empty.
+//
+// All calls to Swap for a given Value must use values of the same concrete
+// type. Swap of an inconsistent type panics, as does Swap(nil).
+func (v *Value) Swap(new any) (old any) {
+ if new == nil {
+ panic("sync/atomic: swap of nil value into Value")
+ }
+ vp := (*efaceWords)(unsafe.Pointer(v))
+ np := (*efaceWords)(unsafe.Pointer(&new))
+ for {
+ typ := LoadPointer(&vp.typ)
+ if typ == nil {
+ // Attempt to start first store.
+ // Disable preemption so that other goroutines can use
+ // active spin wait to wait for completion; and so that
+ // GC does not see the fake type accidentally.
+ runtime_procPin()
+ if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) {
+ runtime_procUnpin()
+ continue
+ }
+ // Complete first store.
+ StorePointer(&vp.data, np.data)
+ StorePointer(&vp.typ, np.typ)
+ runtime_procUnpin()
+ return nil
+ }
+ if typ == unsafe.Pointer(&firstStoreInProgress) {
+ // First store in progress. Wait.
+ // Since we disable preemption around the first store,
+ // we can wait with active spinning.
+ continue
+ }
+ // First store completed. Check type and overwrite data.
+ if typ != np.typ {
+ panic("sync/atomic: swap of inconsistently typed value into Value")
+ }
+ op := (*efaceWords)(unsafe.Pointer(&old))
+ op.typ, op.data = np.typ, SwapPointer(&vp.data, np.data)
+ return old
+ }
+}
+
+// CompareAndSwap executes the compare-and-swap operation for the Value.
+//
+// All calls to CompareAndSwap for a given Value must use values of the same
+// concrete type. CompareAndSwap of an inconsistent type panics, as does
+// CompareAndSwap(old, nil).
+func (v *Value) CompareAndSwap(old, new any) (swapped bool) {
+ if new == nil {
+ panic("sync/atomic: compare and swap of nil value into Value")
+ }
+ vp := (*efaceWords)(unsafe.Pointer(v))
+ np := (*efaceWords)(unsafe.Pointer(&new))
+ op := (*efaceWords)(unsafe.Pointer(&old))
+ if op.typ != nil && np.typ != op.typ {
+ panic("sync/atomic: compare and swap of inconsistently typed values")
+ }
+ for {
+ typ := LoadPointer(&vp.typ)
+ if typ == nil {
+ if old != nil {
+ return false
+ }
+ // Attempt to start first store.
+ // Disable preemption so that other goroutines can use
+ // active spin wait to wait for completion; and so that
+ // GC does not see the fake type accidentally.
+ runtime_procPin()
+ if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) {
+ runtime_procUnpin()
+ continue
+ }
+ // Complete first store.
+ StorePointer(&vp.data, np.data)
+ StorePointer(&vp.typ, np.typ)
+ runtime_procUnpin()
+ return true
+ }
+ if typ == unsafe.Pointer(&firstStoreInProgress) {
+ // First store in progress. Wait.
+ // Since we disable preemption around the first store,
+ // we can wait with active spinning.
+ continue
+ }
+ // First store completed. Check type and overwrite data.
+ if typ != np.typ {
+ panic("sync/atomic: compare and swap of inconsistently typed value into Value")
+ }
+ // Compare old and current via runtime equality check.
+ // This allows value types to be compared, something
+ // not offered by the package functions.
+ // CompareAndSwapPointer below only ensures vp.data
+ // has not changed since LoadPointer.
+ data := LoadPointer(&vp.data)
+ var i any
+ (*efaceWords)(unsafe.Pointer(&i)).typ = typ
+ (*efaceWords)(unsafe.Pointer(&i)).data = data
+ if i != old {
+ return false
+ }
+ return CompareAndSwapPointer(&vp.data, data, np.data)
+ }
+}
+
+// Disable/enable preemption, implemented in runtime.
+func runtime_procPin() int
+func runtime_procUnpin()
diff --git a/src/sync/atomic/value_test.go b/src/sync/atomic/value_test.go
new file mode 100644
index 0000000..721da96
--- /dev/null
+++ b/src/sync/atomic/value_test.go
@@ -0,0 +1,274 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic_test
+
+import (
+ "math/rand"
+ "runtime"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ . "sync/atomic"
+ "testing"
+)
+
+func TestValue(t *testing.T) {
+ var v Value
+ if v.Load() != nil {
+ t.Fatal("initial Value is not nil")
+ }
+ v.Store(42)
+ x := v.Load()
+ if xx, ok := x.(int); !ok || xx != 42 {
+ t.Fatalf("wrong value: got %+v, want 42", x)
+ }
+ v.Store(84)
+ x = v.Load()
+ if xx, ok := x.(int); !ok || xx != 84 {
+ t.Fatalf("wrong value: got %+v, want 84", x)
+ }
+}
+
+func TestValueLarge(t *testing.T) {
+ var v Value
+ v.Store("foo")
+ x := v.Load()
+ if xx, ok := x.(string); !ok || xx != "foo" {
+ t.Fatalf("wrong value: got %+v, want foo", x)
+ }
+ v.Store("barbaz")
+ x = v.Load()
+ if xx, ok := x.(string); !ok || xx != "barbaz" {
+ t.Fatalf("wrong value: got %+v, want barbaz", x)
+ }
+}
+
+func TestValuePanic(t *testing.T) {
+ const nilErr = "sync/atomic: store of nil value into Value"
+ const badErr = "sync/atomic: store of inconsistently typed value into Value"
+ var v Value
+ func() {
+ defer func() {
+ err := recover()
+ if err != nilErr {
+ t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, nilErr)
+ }
+ }()
+ v.Store(nil)
+ }()
+ v.Store(42)
+ func() {
+ defer func() {
+ err := recover()
+ if err != badErr {
+ t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, badErr)
+ }
+ }()
+ v.Store("foo")
+ }()
+ func() {
+ defer func() {
+ err := recover()
+ if err != nilErr {
+ t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, nilErr)
+ }
+ }()
+ v.Store(nil)
+ }()
+}
+
+func TestValueConcurrent(t *testing.T) {
+ tests := [][]any{
+ {uint16(0), ^uint16(0), uint16(1 + 2<<8), uint16(3 + 4<<8)},
+ {uint32(0), ^uint32(0), uint32(1 + 2<<16), uint32(3 + 4<<16)},
+ {uint64(0), ^uint64(0), uint64(1 + 2<<32), uint64(3 + 4<<32)},
+ {complex(0, 0), complex(1, 2), complex(3, 4), complex(5, 6)},
+ }
+ p := 4 * runtime.GOMAXPROCS(0)
+ N := int(1e5)
+ if testing.Short() {
+ p /= 2
+ N = 1e3
+ }
+ for _, test := range tests {
+ var v Value
+ done := make(chan bool, p)
+ for i := 0; i < p; i++ {
+ go func() {
+ r := rand.New(rand.NewSource(rand.Int63()))
+ expected := true
+ loop:
+ for j := 0; j < N; j++ {
+ x := test[r.Intn(len(test))]
+ v.Store(x)
+ x = v.Load()
+ for _, x1 := range test {
+ if x == x1 {
+ continue loop
+ }
+ }
+ t.Logf("loaded unexpected value %+v, want %+v", x, test)
+ expected = false
+ break
+ }
+ done <- expected
+ }()
+ }
+ for i := 0; i < p; i++ {
+ if !<-done {
+ t.FailNow()
+ }
+ }
+ }
+}
+
+func BenchmarkValueRead(b *testing.B) {
+ var v Value
+ v.Store(new(int))
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ x := v.Load().(*int)
+ if *x != 0 {
+ b.Fatalf("wrong value: got %v, want 0", *x)
+ }
+ }
+ })
+}
+
+var Value_SwapTests = []struct {
+ init any
+ new any
+ want any
+ err any
+}{
+ {init: nil, new: nil, err: "sync/atomic: swap of nil value into Value"},
+ {init: nil, new: true, want: nil, err: nil},
+ {init: true, new: "", err: "sync/atomic: swap of inconsistently typed value into Value"},
+ {init: true, new: false, want: true, err: nil},
+}
+
+func TestValue_Swap(t *testing.T) {
+ for i, tt := range Value_SwapTests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ var v Value
+ if tt.init != nil {
+ v.Store(tt.init)
+ }
+ defer func() {
+ err := recover()
+ switch {
+ case tt.err == nil && err != nil:
+ t.Errorf("should not panic, got %v", err)
+ case tt.err != nil && err == nil:
+ t.Errorf("should panic %v, got <nil>", tt.err)
+ }
+ }()
+ if got := v.Swap(tt.new); got != tt.want {
+ t.Errorf("got %v, want %v", got, tt.want)
+ }
+ if got := v.Load(); got != tt.new {
+ t.Errorf("got %v, want %v", got, tt.new)
+ }
+ })
+ }
+}
+
+func TestValueSwapConcurrent(t *testing.T) {
+ var v Value
+ var count uint64
+ var g sync.WaitGroup
+ var m, n uint64 = 10000, 10000
+ if testing.Short() {
+ m = 1000
+ n = 1000
+ }
+ for i := uint64(0); i < m*n; i += n {
+ i := i
+ g.Add(1)
+ go func() {
+ var c uint64
+ for new := i; new < i+n; new++ {
+ if old := v.Swap(new); old != nil {
+ c += old.(uint64)
+ }
+ }
+ atomic.AddUint64(&count, c)
+ g.Done()
+ }()
+ }
+ g.Wait()
+ if want, got := (m*n-1)*(m*n)/2, count+v.Load().(uint64); got != want {
+ t.Errorf("sum from 0 to %d was %d, want %v", m*n-1, got, want)
+ }
+}
+
+var heapA, heapB = struct{ uint }{0}, struct{ uint }{0}
+
+var Value_CompareAndSwapTests = []struct {
+ init any
+ new any
+ old any
+ want bool
+ err any
+}{
+ {init: nil, new: nil, old: nil, err: "sync/atomic: compare and swap of nil value into Value"},
+ {init: nil, new: true, old: "", err: "sync/atomic: compare and swap of inconsistently typed values into Value"},
+ {init: nil, new: true, old: true, want: false, err: nil},
+ {init: nil, new: true, old: nil, want: true, err: nil},
+ {init: true, new: "", err: "sync/atomic: compare and swap of inconsistently typed value into Value"},
+ {init: true, new: true, old: false, want: false, err: nil},
+ {init: true, new: true, old: true, want: true, err: nil},
+ {init: heapA, new: struct{ uint }{1}, old: heapB, want: true, err: nil},
+}
+
+func TestValue_CompareAndSwap(t *testing.T) {
+ for i, tt := range Value_CompareAndSwapTests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ var v Value
+ if tt.init != nil {
+ v.Store(tt.init)
+ }
+ defer func() {
+ err := recover()
+ switch {
+ case tt.err == nil && err != nil:
+ t.Errorf("got %v, wanted no panic", err)
+ case tt.err != nil && err == nil:
+ t.Errorf("did not panic, want %v", tt.err)
+ }
+ }()
+ if got := v.CompareAndSwap(tt.old, tt.new); got != tt.want {
+ t.Errorf("got %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestValueCompareAndSwapConcurrent(t *testing.T) {
+ var v Value
+ var w sync.WaitGroup
+ v.Store(0)
+ m, n := 1000, 100
+ if testing.Short() {
+ m = 100
+ n = 100
+ }
+ for i := 0; i < m; i++ {
+ i := i
+ w.Add(1)
+ go func() {
+ for j := i; j < m*n; runtime.Gosched() {
+ if v.CompareAndSwap(j, j+1) {
+ j += m
+ }
+ }
+ w.Done()
+ }()
+ }
+ w.Wait()
+ if stop := v.Load().(int); stop != m*n {
+ t.Errorf("did not get to %v, stopped at %v", m*n, stop)
+ }
+}
diff --git a/src/sync/cond.go b/src/sync/cond.go
new file mode 100644
index 0000000..cc927ad
--- /dev/null
+++ b/src/sync/cond.go
@@ -0,0 +1,117 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+// Cond implements a condition variable, a rendezvous point
+// for goroutines waiting for or announcing the occurrence
+// of an event.
+//
+// Each Cond has an associated Locker L (often a *Mutex or *RWMutex),
+// which must be held when changing the condition and
+// when calling the Wait method.
+//
+// A Cond must not be copied after first use.
+//
+// In the terminology of the Go memory model, Cond arranges that
+// a call to Broadcast or Signal “synchronizes before” any Wait call
+// that it unblocks.
+//
+// For many simple use cases, users will be better off using channels than a
+// Cond (Broadcast corresponds to closing a channel, and Signal corresponds to
+// sending on a channel).
+//
+// For more on replacements for sync.Cond, see [Roberto Clapis's series on
+// advanced concurrency patterns], as well as [Bryan Mills's talk on concurrency
+// patterns].
+//
+// [Roberto Clapis's series on advanced concurrency patterns]: https://blogtitle.github.io/categories/concurrency/
+// [Bryan Mills's talk on concurrency patterns]: https://drive.google.com/file/d/1nPdvhB0PutEJzdCq5ms6UI58dp50fcAN/view
+type Cond struct {
+ noCopy noCopy
+
+ // L is held while observing or changing the condition
+ L Locker
+
+ notify notifyList
+ checker copyChecker
+}
+
+// NewCond returns a new Cond with Locker l.
+func NewCond(l Locker) *Cond {
+ return &Cond{L: l}
+}
+
+// Wait atomically unlocks c.L and suspends execution
+// of the calling goroutine. After later resuming execution,
+// Wait locks c.L before returning. Unlike in other systems,
+// Wait cannot return unless awoken by Broadcast or Signal.
+//
+// Because c.L is not locked while Wait is waiting, the caller
+// typically cannot assume that the condition is true when
+// Wait returns. Instead, the caller should Wait in a loop:
+//
+// c.L.Lock()
+// for !condition() {
+// c.Wait()
+// }
+// ... make use of condition ...
+// c.L.Unlock()
+func (c *Cond) Wait() {
+ c.checker.check()
+ t := runtime_notifyListAdd(&c.notify)
+ c.L.Unlock()
+ runtime_notifyListWait(&c.notify, t)
+ c.L.Lock()
+}
+
+// Signal wakes one goroutine waiting on c, if there is any.
+//
+// It is allowed but not required for the caller to hold c.L
+// during the call.
+//
+// Signal() does not affect goroutine scheduling priority; if other goroutines
+// are attempting to lock c.L, they may be awoken before a "waiting" goroutine.
+func (c *Cond) Signal() {
+ c.checker.check()
+ runtime_notifyListNotifyOne(&c.notify)
+}
+
+// Broadcast wakes all goroutines waiting on c.
+//
+// It is allowed but not required for the caller to hold c.L
+// during the call.
+func (c *Cond) Broadcast() {
+ c.checker.check()
+ runtime_notifyListNotifyAll(&c.notify)
+}
+
+// copyChecker holds back pointer to itself to detect object copying.
+type copyChecker uintptr
+
+func (c *copyChecker) check() {
+ if uintptr(*c) != uintptr(unsafe.Pointer(c)) &&
+ !atomic.CompareAndSwapUintptr((*uintptr)(c), 0, uintptr(unsafe.Pointer(c))) &&
+ uintptr(*c) != uintptr(unsafe.Pointer(c)) {
+ panic("sync.Cond is copied")
+ }
+}
+
+// noCopy may be added to structs which must not be copied
+// after the first use.
+//
+// See https://golang.org/issues/8005#issuecomment-190753527
+// for details.
+//
+// Note that it must not be embedded, due to the Lock and Unlock methods.
+type noCopy struct{}
+
+// Lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) Lock() {}
+func (*noCopy) Unlock() {}
diff --git a/src/sync/cond_test.go b/src/sync/cond_test.go
new file mode 100644
index 0000000..aa134e3
--- /dev/null
+++ b/src/sync/cond_test.go
@@ -0,0 +1,311 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync_test
+
+import (
+ "reflect"
+ "runtime"
+ . "sync"
+ "testing"
+)
+
+func TestCondSignal(t *testing.T) {
+ var m Mutex
+ c := NewCond(&m)
+ n := 2
+ running := make(chan bool, n)
+ awake := make(chan bool, n)
+ for i := 0; i < n; i++ {
+ go func() {
+ m.Lock()
+ running <- true
+ c.Wait()
+ awake <- true
+ m.Unlock()
+ }()
+ }
+ for i := 0; i < n; i++ {
+ <-running // Wait for everyone to run.
+ }
+ for n > 0 {
+ select {
+ case <-awake:
+ t.Fatal("goroutine not asleep")
+ default:
+ }
+ m.Lock()
+ c.Signal()
+ m.Unlock()
+ <-awake // Will deadlock if no goroutine wakes up
+ select {
+ case <-awake:
+ t.Fatal("too many goroutines awake")
+ default:
+ }
+ n--
+ }
+ c.Signal()
+}
+
+func TestCondSignalGenerations(t *testing.T) {
+ var m Mutex
+ c := NewCond(&m)
+ n := 100
+ running := make(chan bool, n)
+ awake := make(chan int, n)
+ for i := 0; i < n; i++ {
+ go func(i int) {
+ m.Lock()
+ running <- true
+ c.Wait()
+ awake <- i
+ m.Unlock()
+ }(i)
+ if i > 0 {
+ a := <-awake
+ if a != i-1 {
+ t.Fatalf("wrong goroutine woke up: want %d, got %d", i-1, a)
+ }
+ }
+ <-running
+ m.Lock()
+ c.Signal()
+ m.Unlock()
+ }
+}
+
+func TestCondBroadcast(t *testing.T) {
+ var m Mutex
+ c := NewCond(&m)
+ n := 200
+ running := make(chan int, n)
+ awake := make(chan int, n)
+ exit := false
+ for i := 0; i < n; i++ {
+ go func(g int) {
+ m.Lock()
+ for !exit {
+ running <- g
+ c.Wait()
+ awake <- g
+ }
+ m.Unlock()
+ }(i)
+ }
+ for i := 0; i < n; i++ {
+ for i := 0; i < n; i++ {
+ <-running // Will deadlock unless n are running.
+ }
+ if i == n-1 {
+ m.Lock()
+ exit = true
+ m.Unlock()
+ }
+ select {
+ case <-awake:
+ t.Fatal("goroutine not asleep")
+ default:
+ }
+ m.Lock()
+ c.Broadcast()
+ m.Unlock()
+ seen := make([]bool, n)
+ for i := 0; i < n; i++ {
+ g := <-awake
+ if seen[g] {
+ t.Fatal("goroutine woke up twice")
+ }
+ seen[g] = true
+ }
+ }
+ select {
+ case <-running:
+ t.Fatal("goroutine did not exit")
+ default:
+ }
+ c.Broadcast()
+}
+
+func TestRace(t *testing.T) {
+ x := 0
+ c := NewCond(&Mutex{})
+ done := make(chan bool)
+ go func() {
+ c.L.Lock()
+ x = 1
+ c.Wait()
+ if x != 2 {
+ t.Error("want 2")
+ }
+ x = 3
+ c.Signal()
+ c.L.Unlock()
+ done <- true
+ }()
+ go func() {
+ c.L.Lock()
+ for {
+ if x == 1 {
+ x = 2
+ c.Signal()
+ break
+ }
+ c.L.Unlock()
+ runtime.Gosched()
+ c.L.Lock()
+ }
+ c.L.Unlock()
+ done <- true
+ }()
+ go func() {
+ c.L.Lock()
+ for {
+ if x == 2 {
+ c.Wait()
+ if x != 3 {
+ t.Error("want 3")
+ }
+ break
+ }
+ if x == 3 {
+ break
+ }
+ c.L.Unlock()
+ runtime.Gosched()
+ c.L.Lock()
+ }
+ c.L.Unlock()
+ done <- true
+ }()
+ <-done
+ <-done
+ <-done
+}
+
+func TestCondSignalStealing(t *testing.T) {
+ for iters := 0; iters < 1000; iters++ {
+ var m Mutex
+ cond := NewCond(&m)
+
+ // Start a waiter.
+ ch := make(chan struct{})
+ go func() {
+ m.Lock()
+ ch <- struct{}{}
+ cond.Wait()
+ m.Unlock()
+
+ ch <- struct{}{}
+ }()
+
+ <-ch
+ m.Lock()
+ m.Unlock()
+
+ // We know that the waiter is in the cond.Wait() call because we
+ // synchronized with it, then acquired/released the mutex it was
+ // holding when we synchronized.
+ //
+ // Start two goroutines that will race: one will broadcast on
+ // the cond var, the other will wait on it.
+ //
+ // The new waiter may or may not get notified, but the first one
+ // has to be notified.
+ done := false
+ go func() {
+ cond.Broadcast()
+ }()
+
+ go func() {
+ m.Lock()
+ for !done {
+ cond.Wait()
+ }
+ m.Unlock()
+ }()
+
+ // Check that the first waiter does get signaled.
+ <-ch
+
+ // Release the second waiter in case it didn't get the
+ // broadcast.
+ m.Lock()
+ done = true
+ m.Unlock()
+ cond.Broadcast()
+ }
+}
+
+func TestCondCopy(t *testing.T) {
+ defer func() {
+ err := recover()
+ if err == nil || err.(string) != "sync.Cond is copied" {
+ t.Fatalf("got %v, expect sync.Cond is copied", err)
+ }
+ }()
+ c := Cond{L: &Mutex{}}
+ c.Signal()
+ var c2 Cond
+ reflect.ValueOf(&c2).Elem().Set(reflect.ValueOf(&c).Elem()) // c2 := c, hidden from vet
+ c2.Signal()
+}
+
+func BenchmarkCond1(b *testing.B) {
+ benchmarkCond(b, 1)
+}
+
+func BenchmarkCond2(b *testing.B) {
+ benchmarkCond(b, 2)
+}
+
+func BenchmarkCond4(b *testing.B) {
+ benchmarkCond(b, 4)
+}
+
+func BenchmarkCond8(b *testing.B) {
+ benchmarkCond(b, 8)
+}
+
+func BenchmarkCond16(b *testing.B) {
+ benchmarkCond(b, 16)
+}
+
+func BenchmarkCond32(b *testing.B) {
+ benchmarkCond(b, 32)
+}
+
+func benchmarkCond(b *testing.B, waiters int) {
+ c := NewCond(&Mutex{})
+ done := make(chan bool)
+ id := 0
+
+ for routine := 0; routine < waiters+1; routine++ {
+ go func() {
+ for i := 0; i < b.N; i++ {
+ c.L.Lock()
+ if id == -1 {
+ c.L.Unlock()
+ break
+ }
+ id++
+ if id == waiters+1 {
+ id = 0
+ c.Broadcast()
+ } else {
+ c.Wait()
+ }
+ c.L.Unlock()
+ }
+ c.L.Lock()
+ id = -1
+ c.Broadcast()
+ c.L.Unlock()
+ done <- true
+ }()
+ }
+ for routine := 0; routine < waiters+1; routine++ {
+ <-done
+ }
+}
diff --git a/src/sync/example_pool_test.go b/src/sync/example_pool_test.go
new file mode 100644
index 0000000..2fb4c1e
--- /dev/null
+++ b/src/sync/example_pool_test.go
@@ -0,0 +1,45 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync_test
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "sync"
+ "time"
+)
+
+var bufPool = sync.Pool{
+ New: func() any {
+ // The Pool's New function should generally only return pointer
+ // types, since a pointer can be put into the return interface
+ // value without an allocation:
+ return new(bytes.Buffer)
+ },
+}
+
+// timeNow is a fake version of time.Now for tests.
+func timeNow() time.Time {
+ return time.Unix(1136214245, 0)
+}
+
+func Log(w io.Writer, key, val string) {
+ b := bufPool.Get().(*bytes.Buffer)
+ b.Reset()
+ // Replace this with time.Now() in a real logger.
+ b.WriteString(timeNow().UTC().Format(time.RFC3339))
+ b.WriteByte(' ')
+ b.WriteString(key)
+ b.WriteByte('=')
+ b.WriteString(val)
+ w.Write(b.Bytes())
+ bufPool.Put(b)
+}
+
+func ExamplePool() {
+ Log(os.Stdout, "path", "/search?q=flowers")
+ // Output: 2006-01-02T15:04:05Z path=/search?q=flowers
+}
diff --git a/src/sync/example_test.go b/src/sync/example_test.go
new file mode 100644
index 0000000..f009a68
--- /dev/null
+++ b/src/sync/example_test.go
@@ -0,0 +1,59 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync_test
+
+import (
+ "fmt"
+ "sync"
+)
+
+type httpPkg struct{}
+
+func (httpPkg) Get(url string) {}
+
+var http httpPkg
+
+// This example fetches several URLs concurrently,
+// using a WaitGroup to block until all the fetches are complete.
+func ExampleWaitGroup() {
+ var wg sync.WaitGroup
+ var urls = []string{
+ "http://www.golang.org/",
+ "http://www.google.com/",
+ "http://www.example.com/",
+ }
+ for _, url := range urls {
+ // Increment the WaitGroup counter.
+ wg.Add(1)
+ // Launch a goroutine to fetch the URL.
+ go func(url string) {
+ // Decrement the counter when the goroutine completes.
+ defer wg.Done()
+ // Fetch the URL.
+ http.Get(url)
+ }(url)
+ }
+ // Wait for all HTTP fetches to complete.
+ wg.Wait()
+}
+
+func ExampleOnce() {
+ var once sync.Once
+ onceBody := func() {
+ fmt.Println("Only once")
+ }
+ done := make(chan bool)
+ for i := 0; i < 10; i++ {
+ go func() {
+ once.Do(onceBody)
+ done <- true
+ }()
+ }
+ for i := 0; i < 10; i++ {
+ <-done
+ }
+ // Output:
+ // Only once
+}
diff --git a/src/sync/export_test.go b/src/sync/export_test.go
new file mode 100644
index 0000000..c020ef7
--- /dev/null
+++ b/src/sync/export_test.go
@@ -0,0 +1,57 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+// Export for testing.
+var Runtime_Semacquire = runtime_Semacquire
+var Runtime_Semrelease = runtime_Semrelease
+var Runtime_procPin = runtime_procPin
+var Runtime_procUnpin = runtime_procUnpin
+
+// poolDequeue testing.
+type PoolDequeue interface {
+ PushHead(val any) bool
+ PopHead() (any, bool)
+ PopTail() (any, bool)
+}
+
+func NewPoolDequeue(n int) PoolDequeue {
+ d := &poolDequeue{
+ vals: make([]eface, n),
+ }
+ // For testing purposes, set the head and tail indexes close
+ // to wrapping around.
+ d.headTail = d.pack(1<<dequeueBits-500, 1<<dequeueBits-500)
+ return d
+}
+
+func (d *poolDequeue) PushHead(val any) bool {
+ return d.pushHead(val)
+}
+
+func (d *poolDequeue) PopHead() (any, bool) {
+ return d.popHead()
+}
+
+func (d *poolDequeue) PopTail() (any, bool) {
+ return d.popTail()
+}
+
+func NewPoolChain() PoolDequeue {
+ return new(poolChain)
+}
+
+func (c *poolChain) PushHead(val any) bool {
+ c.pushHead(val)
+ return true
+}
+
+func (c *poolChain) PopHead() (any, bool) {
+ return c.popHead()
+}
+
+func (c *poolChain) PopTail() (any, bool) {
+ return c.popTail()
+}
diff --git a/src/sync/map.go b/src/sync/map.go
new file mode 100644
index 0000000..e8ccf58
--- /dev/null
+++ b/src/sync/map.go
@@ -0,0 +1,515 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+import (
+ "sync/atomic"
+)
+
+// Map is like a Go map[interface{}]interface{} but is safe for concurrent use
+// by multiple goroutines without additional locking or coordination.
+// Loads, stores, and deletes run in amortized constant time.
+//
+// The Map type is specialized. Most code should use a plain Go map instead,
+// with separate locking or coordination, for better type safety and to make it
+// easier to maintain other invariants along with the map content.
+//
+// The Map type is optimized for two common use cases: (1) when the entry for a given
+// key is only ever written once but read many times, as in caches that only grow,
+// or (2) when multiple goroutines read, write, and overwrite entries for disjoint
+// sets of keys. In these two cases, use of a Map may significantly reduce lock
+// contention compared to a Go map paired with a separate Mutex or RWMutex.
+//
+// The zero Map is empty and ready for use. A Map must not be copied after first use.
+//
+// In the terminology of the Go memory model, Map arranges that a write operation
+// “synchronizes before” any read operation that observes the effect of the write, where
+// read and write operations are defined as follows.
+// Load, LoadAndDelete, LoadOrStore, Swap, CompareAndSwap, and CompareAndDelete
+// are read operations; Delete, LoadAndDelete, Store, and Swap are write operations;
+// LoadOrStore is a write operation when it returns loaded set to false;
+// CompareAndSwap is a write operation when it returns swapped set to true;
+// and CompareAndDelete is a write operation when it returns deleted set to true.
+type Map struct {
+ mu Mutex
+
+ // read contains the portion of the map's contents that are safe for
+ // concurrent access (with or without mu held).
+ //
+ // The read field itself is always safe to load, but must only be stored with
+ // mu held.
+ //
+ // Entries stored in read may be updated concurrently without mu, but updating
+ // a previously-expunged entry requires that the entry be copied to the dirty
+ // map and unexpunged with mu held.
+ read atomic.Pointer[readOnly]
+
+ // dirty contains the portion of the map's contents that require mu to be
+ // held. To ensure that the dirty map can be promoted to the read map quickly,
+ // it also includes all of the non-expunged entries in the read map.
+ //
+ // Expunged entries are not stored in the dirty map. An expunged entry in the
+ // clean map must be unexpunged and added to the dirty map before a new value
+ // can be stored to it.
+ //
+ // If the dirty map is nil, the next write to the map will initialize it by
+ // making a shallow copy of the clean map, omitting stale entries.
+ dirty map[any]*entry
+
+ // misses counts the number of loads since the read map was last updated that
+ // needed to lock mu to determine whether the key was present.
+ //
+ // Once enough misses have occurred to cover the cost of copying the dirty
+ // map, the dirty map will be promoted to the read map (in the unamended
+ // state) and the next store to the map will make a new dirty copy.
+ misses int
+}
+
+// readOnly is an immutable struct stored atomically in the Map.read field.
+type readOnly struct {
+ m map[any]*entry
+ amended bool // true if the dirty map contains some key not in m.
+}
+
+// expunged is an arbitrary pointer that marks entries which have been deleted
+// from the dirty map.
+var expunged = new(any)
+
+// An entry is a slot in the map corresponding to a particular key.
+type entry struct {
+ // p points to the interface{} value stored for the entry.
+ //
+ // If p == nil, the entry has been deleted, and either m.dirty == nil or
+ // m.dirty[key] is e.
+ //
+ // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
+ // is missing from m.dirty.
+ //
+ // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
+ // != nil, in m.dirty[key].
+ //
+ // An entry can be deleted by atomic replacement with nil: when m.dirty is
+ // next created, it will atomically replace nil with expunged and leave
+ // m.dirty[key] unset.
+ //
+ // An entry's associated value can be updated by atomic replacement, provided
+ // p != expunged. If p == expunged, an entry's associated value can be updated
+ // only after first setting m.dirty[key] = e so that lookups using the dirty
+ // map find the entry.
+ p atomic.Pointer[any]
+}
+
+func newEntry(i any) *entry {
+ e := &entry{}
+ e.p.Store(&i)
+ return e
+}
+
+func (m *Map) loadReadOnly() readOnly {
+ if p := m.read.Load(); p != nil {
+ return *p
+ }
+ return readOnly{}
+}
+
+// Load returns the value stored in the map for a key, or nil if no
+// value is present.
+// The ok result indicates whether value was found in the map.
+func (m *Map) Load(key any) (value any, ok bool) {
+ read := m.loadReadOnly()
+ e, ok := read.m[key]
+ if !ok && read.amended {
+ m.mu.Lock()
+ // Avoid reporting a spurious miss if m.dirty got promoted while we were
+ // blocked on m.mu. (If further loads of the same key will not miss, it's
+ // not worth copying the dirty map for this key.)
+ read = m.loadReadOnly()
+ e, ok = read.m[key]
+ if !ok && read.amended {
+ e, ok = m.dirty[key]
+ // Regardless of whether the entry was present, record a miss: this key
+ // will take the slow path until the dirty map is promoted to the read
+ // map.
+ m.missLocked()
+ }
+ m.mu.Unlock()
+ }
+ if !ok {
+ return nil, false
+ }
+ return e.load()
+}
+
+func (e *entry) load() (value any, ok bool) {
+ p := e.p.Load()
+ if p == nil || p == expunged {
+ return nil, false
+ }
+ return *p, true
+}
+
+// Store sets the value for a key.
+func (m *Map) Store(key, value any) {
+ _, _ = m.Swap(key, value)
+}
+
+// tryCompareAndSwap compare the entry with the given old value and swaps
+// it with a new value if the entry is equal to the old value, and the entry
+// has not been expunged.
+//
+// If the entry is expunged, tryCompareAndSwap returns false and leaves
+// the entry unchanged.
+func (e *entry) tryCompareAndSwap(old, new any) bool {
+ p := e.p.Load()
+ if p == nil || p == expunged || *p != old {
+ return false
+ }
+
+ // Copy the interface after the first load to make this method more amenable
+ // to escape analysis: if the comparison fails from the start, we shouldn't
+ // bother heap-allocating an interface value to store.
+ nc := new
+ for {
+ if e.p.CompareAndSwap(p, &nc) {
+ return true
+ }
+ p = e.p.Load()
+ if p == nil || p == expunged || *p != old {
+ return false
+ }
+ }
+}
+
+// unexpungeLocked ensures that the entry is not marked as expunged.
+//
+// If the entry was previously expunged, it must be added to the dirty map
+// before m.mu is unlocked.
+func (e *entry) unexpungeLocked() (wasExpunged bool) {
+ return e.p.CompareAndSwap(expunged, nil)
+}
+
+// swapLocked unconditionally swaps a value into the entry.
+//
+// The entry must be known not to be expunged.
+func (e *entry) swapLocked(i *any) *any {
+ return e.p.Swap(i)
+}
+
+// LoadOrStore returns the existing value for the key if present.
+// Otherwise, it stores and returns the given value.
+// The loaded result is true if the value was loaded, false if stored.
+func (m *Map) LoadOrStore(key, value any) (actual any, loaded bool) {
+ // Avoid locking if it's a clean hit.
+ read := m.loadReadOnly()
+ if e, ok := read.m[key]; ok {
+ actual, loaded, ok := e.tryLoadOrStore(value)
+ if ok {
+ return actual, loaded
+ }
+ }
+
+ m.mu.Lock()
+ read = m.loadReadOnly()
+ if e, ok := read.m[key]; ok {
+ if e.unexpungeLocked() {
+ m.dirty[key] = e
+ }
+ actual, loaded, _ = e.tryLoadOrStore(value)
+ } else if e, ok := m.dirty[key]; ok {
+ actual, loaded, _ = e.tryLoadOrStore(value)
+ m.missLocked()
+ } else {
+ if !read.amended {
+ // We're adding the first new key to the dirty map.
+ // Make sure it is allocated and mark the read-only map as incomplete.
+ m.dirtyLocked()
+ m.read.Store(&readOnly{m: read.m, amended: true})
+ }
+ m.dirty[key] = newEntry(value)
+ actual, loaded = value, false
+ }
+ m.mu.Unlock()
+
+ return actual, loaded
+}
+
+// tryLoadOrStore atomically loads or stores a value if the entry is not
+// expunged.
+//
+// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
+// returns with ok==false.
+func (e *entry) tryLoadOrStore(i any) (actual any, loaded, ok bool) {
+ p := e.p.Load()
+ if p == expunged {
+ return nil, false, false
+ }
+ if p != nil {
+ return *p, true, true
+ }
+
+ // Copy the interface after the first load to make this method more amenable
+ // to escape analysis: if we hit the "load" path or the entry is expunged, we
+ // shouldn't bother heap-allocating.
+ ic := i
+ for {
+ if e.p.CompareAndSwap(nil, &ic) {
+ return i, false, true
+ }
+ p = e.p.Load()
+ if p == expunged {
+ return nil, false, false
+ }
+ if p != nil {
+ return *p, true, true
+ }
+ }
+}
+
+// LoadAndDelete deletes the value for a key, returning the previous value if any.
+// The loaded result reports whether the key was present.
+func (m *Map) LoadAndDelete(key any) (value any, loaded bool) {
+ read := m.loadReadOnly()
+ e, ok := read.m[key]
+ if !ok && read.amended {
+ m.mu.Lock()
+ read = m.loadReadOnly()
+ e, ok = read.m[key]
+ if !ok && read.amended {
+ e, ok = m.dirty[key]
+ delete(m.dirty, key)
+ // Regardless of whether the entry was present, record a miss: this key
+ // will take the slow path until the dirty map is promoted to the read
+ // map.
+ m.missLocked()
+ }
+ m.mu.Unlock()
+ }
+ if ok {
+ return e.delete()
+ }
+ return nil, false
+}
+
+// Delete deletes the value for a key.
+func (m *Map) Delete(key any) {
+ m.LoadAndDelete(key)
+}
+
+func (e *entry) delete() (value any, ok bool) {
+ for {
+ p := e.p.Load()
+ if p == nil || p == expunged {
+ return nil, false
+ }
+ if e.p.CompareAndSwap(p, nil) {
+ return *p, true
+ }
+ }
+}
+
+// trySwap swaps a value if the entry has not been expunged.
+//
+// If the entry is expunged, trySwap returns false and leaves the entry
+// unchanged.
+func (e *entry) trySwap(i *any) (*any, bool) {
+ for {
+ p := e.p.Load()
+ if p == expunged {
+ return nil, false
+ }
+ if e.p.CompareAndSwap(p, i) {
+ return p, true
+ }
+ }
+}
+
+// Swap swaps the value for a key and returns the previous value if any.
+// The loaded result reports whether the key was present.
+func (m *Map) Swap(key, value any) (previous any, loaded bool) {
+ read := m.loadReadOnly()
+ if e, ok := read.m[key]; ok {
+ if v, ok := e.trySwap(&value); ok {
+ if v == nil {
+ return nil, false
+ }
+ return *v, true
+ }
+ }
+
+ m.mu.Lock()
+ read = m.loadReadOnly()
+ if e, ok := read.m[key]; ok {
+ if e.unexpungeLocked() {
+ // The entry was previously expunged, which implies that there is a
+ // non-nil dirty map and this entry is not in it.
+ m.dirty[key] = e
+ }
+ if v := e.swapLocked(&value); v != nil {
+ loaded = true
+ previous = *v
+ }
+ } else if e, ok := m.dirty[key]; ok {
+ if v := e.swapLocked(&value); v != nil {
+ loaded = true
+ previous = *v
+ }
+ } else {
+ if !read.amended {
+ // We're adding the first new key to the dirty map.
+ // Make sure it is allocated and mark the read-only map as incomplete.
+ m.dirtyLocked()
+ m.read.Store(&readOnly{m: read.m, amended: true})
+ }
+ m.dirty[key] = newEntry(value)
+ }
+ m.mu.Unlock()
+ return previous, loaded
+}
+
+// CompareAndSwap swaps the old and new values for key
+// if the value stored in the map is equal to old.
+// The old value must be of a comparable type.
+func (m *Map) CompareAndSwap(key, old, new any) bool {
+ read := m.loadReadOnly()
+ if e, ok := read.m[key]; ok {
+ return e.tryCompareAndSwap(old, new)
+ } else if !read.amended {
+ return false // No existing value for key.
+ }
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ read = m.loadReadOnly()
+ swapped := false
+ if e, ok := read.m[key]; ok {
+ swapped = e.tryCompareAndSwap(old, new)
+ } else if e, ok := m.dirty[key]; ok {
+ swapped = e.tryCompareAndSwap(old, new)
+ // We needed to lock mu in order to load the entry for key,
+ // and the operation didn't change the set of keys in the map
+ // (so it would be made more efficient by promoting the dirty
+ // map to read-only).
+ // Count it as a miss so that we will eventually switch to the
+ // more efficient steady state.
+ m.missLocked()
+ }
+ return swapped
+}
+
+// CompareAndDelete deletes the entry for key if its value is equal to old.
+// The old value must be of a comparable type.
+//
+// If there is no current value for key in the map, CompareAndDelete
+// returns false (even if the old value is the nil interface value).
+func (m *Map) CompareAndDelete(key, old any) (deleted bool) {
+ read := m.loadReadOnly()
+ e, ok := read.m[key]
+ if !ok && read.amended {
+ m.mu.Lock()
+ read = m.loadReadOnly()
+ e, ok = read.m[key]
+ if !ok && read.amended {
+ e, ok = m.dirty[key]
+ // Don't delete key from m.dirty: we still need to do the “compare” part
+ // of the operation. The entry will eventually be expunged when the
+ // dirty map is promoted to the read map.
+ //
+ // Regardless of whether the entry was present, record a miss: this key
+ // will take the slow path until the dirty map is promoted to the read
+ // map.
+ m.missLocked()
+ }
+ m.mu.Unlock()
+ }
+ for ok {
+ p := e.p.Load()
+ if p == nil || p == expunged || *p != old {
+ return false
+ }
+ if e.p.CompareAndSwap(p, nil) {
+ return true
+ }
+ }
+ return false
+}
+
+// Range calls f sequentially for each key and value present in the map.
+// If f returns false, range stops the iteration.
+//
+// Range does not necessarily correspond to any consistent snapshot of the Map's
+// contents: no key will be visited more than once, but if the value for any key
+// is stored or deleted concurrently (including by f), Range may reflect any
+// mapping for that key from any point during the Range call. Range does not
+// block other methods on the receiver; even f itself may call any method on m.
+//
+// Range may be O(N) with the number of elements in the map even if f returns
+// false after a constant number of calls.
+func (m *Map) Range(f func(key, value any) bool) {
+ // We need to be able to iterate over all of the keys that were already
+ // present at the start of the call to Range.
+ // If read.amended is false, then read.m satisfies that property without
+ // requiring us to hold m.mu for a long time.
+ read := m.loadReadOnly()
+ if read.amended {
+ // m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
+ // (assuming the caller does not break out early), so a call to Range
+ // amortizes an entire copy of the map: we can promote the dirty copy
+ // immediately!
+ m.mu.Lock()
+ read = m.loadReadOnly()
+ if read.amended {
+ read = readOnly{m: m.dirty}
+ m.read.Store(&read)
+ m.dirty = nil
+ m.misses = 0
+ }
+ m.mu.Unlock()
+ }
+
+ for k, e := range read.m {
+ v, ok := e.load()
+ if !ok {
+ continue
+ }
+ if !f(k, v) {
+ break
+ }
+ }
+}
+
+func (m *Map) missLocked() {
+ m.misses++
+ if m.misses < len(m.dirty) {
+ return
+ }
+ m.read.Store(&readOnly{m: m.dirty})
+ m.dirty = nil
+ m.misses = 0
+}
+
+func (m *Map) dirtyLocked() {
+ if m.dirty != nil {
+ return
+ }
+
+ read := m.loadReadOnly()
+ m.dirty = make(map[any]*entry, len(read.m))
+ for k, e := range read.m {
+ if !e.tryExpungeLocked() {
+ m.dirty[k] = e
+ }
+ }
+}
+
+func (e *entry) tryExpungeLocked() (isExpunged bool) {
+ p := e.p.Load()
+ for p == nil {
+ if e.p.CompareAndSwap(nil, expunged) {
+ return true
+ }
+ p = e.p.Load()
+ }
+ return p == expunged
+}
diff --git a/src/sync/map_bench_test.go b/src/sync/map_bench_test.go
new file mode 100644
index 0000000..eebec3b
--- /dev/null
+++ b/src/sync/map_bench_test.go
@@ -0,0 +1,535 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync_test
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+ "sync/atomic"
+ "testing"
+)
+
+type bench struct {
+ setup func(*testing.B, mapInterface)
+ perG func(b *testing.B, pb *testing.PB, i int, m mapInterface)
+}
+
+func benchMap(b *testing.B, bench bench) {
+ for _, m := range [...]mapInterface{&DeepCopyMap{}, &RWMutexMap{}, &sync.Map{}} {
+ b.Run(fmt.Sprintf("%T", m), func(b *testing.B) {
+ m = reflect.New(reflect.TypeOf(m).Elem()).Interface().(mapInterface)
+ if bench.setup != nil {
+ bench.setup(b, m)
+ }
+
+ b.ResetTimer()
+
+ var i int64
+ b.RunParallel(func(pb *testing.PB) {
+ id := int(atomic.AddInt64(&i, 1) - 1)
+ bench.perG(b, pb, id*b.N, m)
+ })
+ })
+ }
+}
+
+func BenchmarkLoadMostlyHits(b *testing.B) {
+ const hits, misses = 1023, 1
+
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ for i := 0; i < hits; i++ {
+ m.LoadOrStore(i, i)
+ }
+ // Prime the map to get it into a steady state.
+ for i := 0; i < hits*2; i++ {
+ m.Load(i % hits)
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ m.Load(i % (hits + misses))
+ }
+ },
+ })
+}
+
+func BenchmarkLoadMostlyMisses(b *testing.B) {
+ const hits, misses = 1, 1023
+
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ for i := 0; i < hits; i++ {
+ m.LoadOrStore(i, i)
+ }
+ // Prime the map to get it into a steady state.
+ for i := 0; i < hits*2; i++ {
+ m.Load(i % hits)
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ m.Load(i % (hits + misses))
+ }
+ },
+ })
+}
+
+func BenchmarkLoadOrStoreBalanced(b *testing.B) {
+ const hits, misses = 128, 128
+
+ benchMap(b, bench{
+ setup: func(b *testing.B, m mapInterface) {
+ if _, ok := m.(*DeepCopyMap); ok {
+ b.Skip("DeepCopyMap has quadratic running time.")
+ }
+ for i := 0; i < hits; i++ {
+ m.LoadOrStore(i, i)
+ }
+ // Prime the map to get it into a steady state.
+ for i := 0; i < hits*2; i++ {
+ m.Load(i % hits)
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ j := i % (hits + misses)
+ if j < hits {
+ if _, ok := m.LoadOrStore(j, i); !ok {
+ b.Fatalf("unexpected miss for %v", j)
+ }
+ } else {
+ if v, loaded := m.LoadOrStore(i, i); loaded {
+ b.Fatalf("failed to store %v: existing value %v", i, v)
+ }
+ }
+ }
+ },
+ })
+}
+
+func BenchmarkLoadOrStoreUnique(b *testing.B) {
+ benchMap(b, bench{
+ setup: func(b *testing.B, m mapInterface) {
+ if _, ok := m.(*DeepCopyMap); ok {
+ b.Skip("DeepCopyMap has quadratic running time.")
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ m.LoadOrStore(i, i)
+ }
+ },
+ })
+}
+
+func BenchmarkLoadOrStoreCollision(b *testing.B) {
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ m.LoadOrStore(0, 0)
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ m.LoadOrStore(0, 0)
+ }
+ },
+ })
+}
+
+func BenchmarkLoadAndDeleteBalanced(b *testing.B) {
+ const hits, misses = 128, 128
+
+ benchMap(b, bench{
+ setup: func(b *testing.B, m mapInterface) {
+ if _, ok := m.(*DeepCopyMap); ok {
+ b.Skip("DeepCopyMap has quadratic running time.")
+ }
+ for i := 0; i < hits; i++ {
+ m.LoadOrStore(i, i)
+ }
+ // Prime the map to get it into a steady state.
+ for i := 0; i < hits*2; i++ {
+ m.Load(i % hits)
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ j := i % (hits + misses)
+ if j < hits {
+ m.LoadAndDelete(j)
+ } else {
+ m.LoadAndDelete(i)
+ }
+ }
+ },
+ })
+}
+
+func BenchmarkLoadAndDeleteUnique(b *testing.B) {
+ benchMap(b, bench{
+ setup: func(b *testing.B, m mapInterface) {
+ if _, ok := m.(*DeepCopyMap); ok {
+ b.Skip("DeepCopyMap has quadratic running time.")
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ m.LoadAndDelete(i)
+ }
+ },
+ })
+}
+
+func BenchmarkLoadAndDeleteCollision(b *testing.B) {
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ m.LoadOrStore(0, 0)
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ if _, loaded := m.LoadAndDelete(0); loaded {
+ m.Store(0, 0)
+ }
+ }
+ },
+ })
+}
+
+func BenchmarkRange(b *testing.B) {
+ const mapSize = 1 << 10
+
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ for i := 0; i < mapSize; i++ {
+ m.Store(i, i)
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ m.Range(func(_, _ any) bool { return true })
+ }
+ },
+ })
+}
+
+// BenchmarkAdversarialAlloc tests performance when we store a new value
+// immediately whenever the map is promoted to clean and otherwise load a
+// unique, missing key.
+//
+// This forces the Load calls to always acquire the map's mutex.
+func BenchmarkAdversarialAlloc(b *testing.B) {
+ benchMap(b, bench{
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ var stores, loadsSinceStore int64
+ for ; pb.Next(); i++ {
+ m.Load(i)
+ if loadsSinceStore++; loadsSinceStore > stores {
+ m.LoadOrStore(i, stores)
+ loadsSinceStore = 0
+ stores++
+ }
+ }
+ },
+ })
+}
+
+// BenchmarkAdversarialDelete tests performance when we periodically delete
+// one key and add a different one in a large map.
+//
+// This forces the Load calls to always acquire the map's mutex and periodically
+// makes a full copy of the map despite changing only one entry.
+func BenchmarkAdversarialDelete(b *testing.B) {
+ const mapSize = 1 << 10
+
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ for i := 0; i < mapSize; i++ {
+ m.Store(i, i)
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ m.Load(i)
+
+ if i%mapSize == 0 {
+ m.Range(func(k, _ any) bool {
+ m.Delete(k)
+ return false
+ })
+ m.Store(i, i)
+ }
+ }
+ },
+ })
+}
+
+func BenchmarkDeleteCollision(b *testing.B) {
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ m.LoadOrStore(0, 0)
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ m.Delete(0)
+ }
+ },
+ })
+}
+
+func BenchmarkSwapCollision(b *testing.B) {
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ m.LoadOrStore(0, 0)
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ m.Swap(0, 0)
+ }
+ },
+ })
+}
+
+func BenchmarkSwapMostlyHits(b *testing.B) {
+ const hits, misses = 1023, 1
+
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ for i := 0; i < hits; i++ {
+ m.LoadOrStore(i, i)
+ }
+ // Prime the map to get it into a steady state.
+ for i := 0; i < hits*2; i++ {
+ m.Load(i % hits)
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ if i%(hits+misses) < hits {
+ v := i % (hits + misses)
+ m.Swap(v, v)
+ } else {
+ m.Swap(i, i)
+ m.Delete(i)
+ }
+ }
+ },
+ })
+}
+
+func BenchmarkSwapMostlyMisses(b *testing.B) {
+ const hits, misses = 1, 1023
+
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ for i := 0; i < hits; i++ {
+ m.LoadOrStore(i, i)
+ }
+ // Prime the map to get it into a steady state.
+ for i := 0; i < hits*2; i++ {
+ m.Load(i % hits)
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ if i%(hits+misses) < hits {
+ v := i % (hits + misses)
+ m.Swap(v, v)
+ } else {
+ m.Swap(i, i)
+ m.Delete(i)
+ }
+ }
+ },
+ })
+}
+
+func BenchmarkCompareAndSwapCollision(b *testing.B) {
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ m.LoadOrStore(0, 0)
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for pb.Next() {
+ if m.CompareAndSwap(0, 0, 42) {
+ m.CompareAndSwap(0, 42, 0)
+ }
+ }
+ },
+ })
+}
+
+func BenchmarkCompareAndSwapNoExistingKey(b *testing.B) {
+ benchMap(b, bench{
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ if m.CompareAndSwap(i, 0, 0) {
+ m.Delete(i)
+ }
+ }
+ },
+ })
+}
+
+func BenchmarkCompareAndSwapValueNotEqual(b *testing.B) {
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ m.Store(0, 0)
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ m.CompareAndSwap(0, 1, 2)
+ }
+ },
+ })
+}
+
+func BenchmarkCompareAndSwapMostlyHits(b *testing.B) {
+ const hits, misses = 1023, 1
+
+ benchMap(b, bench{
+ setup: func(b *testing.B, m mapInterface) {
+ if _, ok := m.(*DeepCopyMap); ok {
+ b.Skip("DeepCopyMap has quadratic running time.")
+ }
+
+ for i := 0; i < hits; i++ {
+ m.LoadOrStore(i, i)
+ }
+ // Prime the map to get it into a steady state.
+ for i := 0; i < hits*2; i++ {
+ m.Load(i % hits)
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ v := i
+ if i%(hits+misses) < hits {
+ v = i % (hits + misses)
+ }
+ m.CompareAndSwap(v, v, v)
+ }
+ },
+ })
+}
+
+func BenchmarkCompareAndSwapMostlyMisses(b *testing.B) {
+ const hits, misses = 1, 1023
+
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ for i := 0; i < hits; i++ {
+ m.LoadOrStore(i, i)
+ }
+ // Prime the map to get it into a steady state.
+ for i := 0; i < hits*2; i++ {
+ m.Load(i % hits)
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ v := i
+ if i%(hits+misses) < hits {
+ v = i % (hits + misses)
+ }
+ m.CompareAndSwap(v, v, v)
+ }
+ },
+ })
+}
+
+func BenchmarkCompareAndDeleteCollision(b *testing.B) {
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ m.LoadOrStore(0, 0)
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ if m.CompareAndDelete(0, 0) {
+ m.Store(0, 0)
+ }
+ }
+ },
+ })
+}
+
+func BenchmarkCompareAndDeleteMostlyHits(b *testing.B) {
+ const hits, misses = 1023, 1
+
+ benchMap(b, bench{
+ setup: func(b *testing.B, m mapInterface) {
+ if _, ok := m.(*DeepCopyMap); ok {
+ b.Skip("DeepCopyMap has quadratic running time.")
+ }
+
+ for i := 0; i < hits; i++ {
+ m.LoadOrStore(i, i)
+ }
+ // Prime the map to get it into a steady state.
+ for i := 0; i < hits*2; i++ {
+ m.Load(i % hits)
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ v := i
+ if i%(hits+misses) < hits {
+ v = i % (hits + misses)
+ }
+ if m.CompareAndDelete(v, v) {
+ m.Store(v, v)
+ }
+ }
+ },
+ })
+}
+
+func BenchmarkCompareAndDeleteMostlyMisses(b *testing.B) {
+ const hits, misses = 1, 1023
+
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ for i := 0; i < hits; i++ {
+ m.LoadOrStore(i, i)
+ }
+ // Prime the map to get it into a steady state.
+ for i := 0; i < hits*2; i++ {
+ m.Load(i % hits)
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ v := i
+ if i%(hits+misses) < hits {
+ v = i % (hits + misses)
+ }
+ if m.CompareAndDelete(v, v) {
+ m.Store(v, v)
+ }
+ }
+ },
+ })
+}
diff --git a/src/sync/map_reference_test.go b/src/sync/map_reference_test.go
new file mode 100644
index 0000000..aa5ebf3
--- /dev/null
+++ b/src/sync/map_reference_test.go
@@ -0,0 +1,271 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync_test
+
+import (
+ "sync"
+ "sync/atomic"
+)
+
+// This file contains reference map implementations for unit-tests.
+
+// mapInterface is the interface Map implements.
+type mapInterface interface {
+ Load(any) (any, bool)
+ Store(key, value any)
+ LoadOrStore(key, value any) (actual any, loaded bool)
+ LoadAndDelete(key any) (value any, loaded bool)
+ Delete(any)
+ Swap(key, value any) (previous any, loaded bool)
+ CompareAndSwap(key, old, new any) (swapped bool)
+ CompareAndDelete(key, old any) (deleted bool)
+ Range(func(key, value any) (shouldContinue bool))
+}
+
+var (
+ _ mapInterface = &RWMutexMap{}
+ _ mapInterface = &DeepCopyMap{}
+)
+
+// RWMutexMap is an implementation of mapInterface using a sync.RWMutex.
+type RWMutexMap struct {
+ mu sync.RWMutex
+ dirty map[any]any
+}
+
+func (m *RWMutexMap) Load(key any) (value any, ok bool) {
+ m.mu.RLock()
+ value, ok = m.dirty[key]
+ m.mu.RUnlock()
+ return
+}
+
+func (m *RWMutexMap) Store(key, value any) {
+ m.mu.Lock()
+ if m.dirty == nil {
+ m.dirty = make(map[any]any)
+ }
+ m.dirty[key] = value
+ m.mu.Unlock()
+}
+
+func (m *RWMutexMap) LoadOrStore(key, value any) (actual any, loaded bool) {
+ m.mu.Lock()
+ actual, loaded = m.dirty[key]
+ if !loaded {
+ actual = value
+ if m.dirty == nil {
+ m.dirty = make(map[any]any)
+ }
+ m.dirty[key] = value
+ }
+ m.mu.Unlock()
+ return actual, loaded
+}
+
+func (m *RWMutexMap) Swap(key, value any) (previous any, loaded bool) {
+ m.mu.Lock()
+ if m.dirty == nil {
+ m.dirty = make(map[any]any)
+ }
+
+ previous, loaded = m.dirty[key]
+ m.dirty[key] = value
+ m.mu.Unlock()
+ return
+}
+
+func (m *RWMutexMap) LoadAndDelete(key any) (value any, loaded bool) {
+ m.mu.Lock()
+ value, loaded = m.dirty[key]
+ if !loaded {
+ m.mu.Unlock()
+ return nil, false
+ }
+ delete(m.dirty, key)
+ m.mu.Unlock()
+ return value, loaded
+}
+
+func (m *RWMutexMap) Delete(key any) {
+ m.mu.Lock()
+ delete(m.dirty, key)
+ m.mu.Unlock()
+}
+
+func (m *RWMutexMap) CompareAndSwap(key, old, new any) (swapped bool) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if m.dirty == nil {
+ return false
+ }
+
+ value, loaded := m.dirty[key]
+ if loaded && value == old {
+ m.dirty[key] = new
+ return true
+ }
+ return false
+}
+
+func (m *RWMutexMap) CompareAndDelete(key, old any) (deleted bool) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if m.dirty == nil {
+ return false
+ }
+
+ value, loaded := m.dirty[key]
+ if loaded && value == old {
+ delete(m.dirty, key)
+ return true
+ }
+ return false
+}
+
+func (m *RWMutexMap) Range(f func(key, value any) (shouldContinue bool)) {
+ m.mu.RLock()
+ keys := make([]any, 0, len(m.dirty))
+ for k := range m.dirty {
+ keys = append(keys, k)
+ }
+ m.mu.RUnlock()
+
+ for _, k := range keys {
+ v, ok := m.Load(k)
+ if !ok {
+ continue
+ }
+ if !f(k, v) {
+ break
+ }
+ }
+}
+
+// DeepCopyMap is an implementation of mapInterface using a Mutex and
+// atomic.Value. It makes deep copies of the map on every write to avoid
+// acquiring the Mutex in Load.
+type DeepCopyMap struct {
+ mu sync.Mutex
+ clean atomic.Value
+}
+
+func (m *DeepCopyMap) Load(key any) (value any, ok bool) {
+ clean, _ := m.clean.Load().(map[any]any)
+ value, ok = clean[key]
+ return value, ok
+}
+
+func (m *DeepCopyMap) Store(key, value any) {
+ m.mu.Lock()
+ dirty := m.dirty()
+ dirty[key] = value
+ m.clean.Store(dirty)
+ m.mu.Unlock()
+}
+
+func (m *DeepCopyMap) LoadOrStore(key, value any) (actual any, loaded bool) {
+ clean, _ := m.clean.Load().(map[any]any)
+ actual, loaded = clean[key]
+ if loaded {
+ return actual, loaded
+ }
+
+ m.mu.Lock()
+ // Reload clean in case it changed while we were waiting on m.mu.
+ clean, _ = m.clean.Load().(map[any]any)
+ actual, loaded = clean[key]
+ if !loaded {
+ dirty := m.dirty()
+ dirty[key] = value
+ actual = value
+ m.clean.Store(dirty)
+ }
+ m.mu.Unlock()
+ return actual, loaded
+}
+
+func (m *DeepCopyMap) Swap(key, value any) (previous any, loaded bool) {
+ m.mu.Lock()
+ dirty := m.dirty()
+ previous, loaded = dirty[key]
+ dirty[key] = value
+ m.clean.Store(dirty)
+ m.mu.Unlock()
+ return
+}
+
+func (m *DeepCopyMap) LoadAndDelete(key any) (value any, loaded bool) {
+ m.mu.Lock()
+ dirty := m.dirty()
+ value, loaded = dirty[key]
+ delete(dirty, key)
+ m.clean.Store(dirty)
+ m.mu.Unlock()
+ return
+}
+
+func (m *DeepCopyMap) Delete(key any) {
+ m.mu.Lock()
+ dirty := m.dirty()
+ delete(dirty, key)
+ m.clean.Store(dirty)
+ m.mu.Unlock()
+}
+
+func (m *DeepCopyMap) CompareAndSwap(key, old, new any) (swapped bool) {
+ clean, _ := m.clean.Load().(map[any]any)
+ if previous, ok := clean[key]; !ok || previous != old {
+ return false
+ }
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ dirty := m.dirty()
+ value, loaded := dirty[key]
+ if loaded && value == old {
+ dirty[key] = new
+ m.clean.Store(dirty)
+ return true
+ }
+ return false
+}
+
+func (m *DeepCopyMap) CompareAndDelete(key, old any) (deleted bool) {
+ clean, _ := m.clean.Load().(map[any]any)
+ if previous, ok := clean[key]; !ok || previous != old {
+ return false
+ }
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ dirty := m.dirty()
+ value, loaded := dirty[key]
+ if loaded && value == old {
+ delete(dirty, key)
+ m.clean.Store(dirty)
+ return true
+ }
+ return false
+}
+
+func (m *DeepCopyMap) Range(f func(key, value any) (shouldContinue bool)) {
+ clean, _ := m.clean.Load().(map[any]any)
+ for k, v := range clean {
+ if !f(k, v) {
+ break
+ }
+ }
+}
+
+func (m *DeepCopyMap) dirty() map[any]any {
+ clean, _ := m.clean.Load().(map[any]any)
+ dirty := make(map[any]any, len(clean)+1)
+ for k, v := range clean {
+ dirty[k] = v
+ }
+ return dirty
+}
diff --git a/src/sync/map_test.go b/src/sync/map_test.go
new file mode 100644
index 0000000..1eb3fc6
--- /dev/null
+++ b/src/sync/map_test.go
@@ -0,0 +1,282 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync_test
+
+import (
+ "math/rand"
+ "reflect"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "testing/quick"
+)
+
+type mapOp string
+
+const (
+ opLoad = mapOp("Load")
+ opStore = mapOp("Store")
+ opLoadOrStore = mapOp("LoadOrStore")
+ opLoadAndDelete = mapOp("LoadAndDelete")
+ opDelete = mapOp("Delete")
+ opSwap = mapOp("Swap")
+ opCompareAndSwap = mapOp("CompareAndSwap")
+ opCompareAndDelete = mapOp("CompareAndDelete")
+)
+
+var mapOps = [...]mapOp{
+ opLoad,
+ opStore,
+ opLoadOrStore,
+ opLoadAndDelete,
+ opDelete,
+ opSwap,
+ opCompareAndSwap,
+ opCompareAndDelete,
+}
+
+// mapCall is a quick.Generator for calls on mapInterface.
+type mapCall struct {
+ op mapOp
+ k, v any
+}
+
+func (c mapCall) apply(m mapInterface) (any, bool) {
+ switch c.op {
+ case opLoad:
+ return m.Load(c.k)
+ case opStore:
+ m.Store(c.k, c.v)
+ return nil, false
+ case opLoadOrStore:
+ return m.LoadOrStore(c.k, c.v)
+ case opLoadAndDelete:
+ return m.LoadAndDelete(c.k)
+ case opDelete:
+ m.Delete(c.k)
+ return nil, false
+ case opSwap:
+ return m.Swap(c.k, c.v)
+ case opCompareAndSwap:
+ if m.CompareAndSwap(c.k, c.v, rand.Int()) {
+ m.Delete(c.k)
+ return c.v, true
+ }
+ return nil, false
+ case opCompareAndDelete:
+ if m.CompareAndDelete(c.k, c.v) {
+ if _, ok := m.Load(c.k); !ok {
+ return nil, true
+ }
+ }
+ return nil, false
+ default:
+ panic("invalid mapOp")
+ }
+}
+
+type mapResult struct {
+ value any
+ ok bool
+}
+
+func randValue(r *rand.Rand) any {
+ b := make([]byte, r.Intn(4))
+ for i := range b {
+ b[i] = 'a' + byte(rand.Intn(26))
+ }
+ return string(b)
+}
+
+func (mapCall) Generate(r *rand.Rand, size int) reflect.Value {
+ c := mapCall{op: mapOps[rand.Intn(len(mapOps))], k: randValue(r)}
+ switch c.op {
+ case opStore, opLoadOrStore:
+ c.v = randValue(r)
+ }
+ return reflect.ValueOf(c)
+}
+
+func applyCalls(m mapInterface, calls []mapCall) (results []mapResult, final map[any]any) {
+ for _, c := range calls {
+ v, ok := c.apply(m)
+ results = append(results, mapResult{v, ok})
+ }
+
+ final = make(map[any]any)
+ m.Range(func(k, v any) bool {
+ final[k] = v
+ return true
+ })
+
+ return results, final
+}
+
+func applyMap(calls []mapCall) ([]mapResult, map[any]any) {
+ return applyCalls(new(sync.Map), calls)
+}
+
+func applyRWMutexMap(calls []mapCall) ([]mapResult, map[any]any) {
+ return applyCalls(new(RWMutexMap), calls)
+}
+
+func applyDeepCopyMap(calls []mapCall) ([]mapResult, map[any]any) {
+ return applyCalls(new(DeepCopyMap), calls)
+}
+
+func TestMapMatchesRWMutex(t *testing.T) {
+ if err := quick.CheckEqual(applyMap, applyRWMutexMap, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestMapMatchesDeepCopy(t *testing.T) {
+ if err := quick.CheckEqual(applyMap, applyDeepCopyMap, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestConcurrentRange(t *testing.T) {
+ const mapSize = 1 << 10
+
+ m := new(sync.Map)
+ for n := int64(1); n <= mapSize; n++ {
+ m.Store(n, int64(n))
+ }
+
+ done := make(chan struct{})
+ var wg sync.WaitGroup
+ defer func() {
+ close(done)
+ wg.Wait()
+ }()
+ for g := int64(runtime.GOMAXPROCS(0)); g > 0; g-- {
+ r := rand.New(rand.NewSource(g))
+ wg.Add(1)
+ go func(g int64) {
+ defer wg.Done()
+ for i := int64(0); ; i++ {
+ select {
+ case <-done:
+ return
+ default:
+ }
+ for n := int64(1); n < mapSize; n++ {
+ if r.Int63n(mapSize) == 0 {
+ m.Store(n, n*i*g)
+ } else {
+ m.Load(n)
+ }
+ }
+ }
+ }(g)
+ }
+
+ iters := 1 << 10
+ if testing.Short() {
+ iters = 16
+ }
+ for n := iters; n > 0; n-- {
+ seen := make(map[int64]bool, mapSize)
+
+ m.Range(func(ki, vi any) bool {
+ k, v := ki.(int64), vi.(int64)
+ if v%k != 0 {
+ t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v)
+ }
+ if seen[k] {
+ t.Fatalf("Range visited key %v twice", k)
+ }
+ seen[k] = true
+ return true
+ })
+
+ if len(seen) != mapSize {
+ t.Fatalf("Range visited %v elements of %v-element Map", len(seen), mapSize)
+ }
+ }
+}
+
+func TestIssue40999(t *testing.T) {
+ var m sync.Map
+
+ // Since the miss-counting in missLocked (via Delete)
+ // compares the miss count with len(m.dirty),
+ // add an initial entry to bias len(m.dirty) above the miss count.
+ m.Store(nil, struct{}{})
+
+ var finalized uint32
+
+ // Set finalizers that count for collected keys. A non-zero count
+ // indicates that keys have not been leaked.
+ for atomic.LoadUint32(&finalized) == 0 {
+ p := new(int)
+ runtime.SetFinalizer(p, func(*int) {
+ atomic.AddUint32(&finalized, 1)
+ })
+ m.Store(p, struct{}{})
+ m.Delete(p)
+ runtime.GC()
+ }
+}
+
+func TestMapRangeNestedCall(t *testing.T) { // Issue 46399
+ var m sync.Map
+ for i, v := range [3]string{"hello", "world", "Go"} {
+ m.Store(i, v)
+ }
+ m.Range(func(key, value any) bool {
+ m.Range(func(key, value any) bool {
+ // We should be able to load the key offered in the Range callback,
+ // because there are no concurrent Delete involved in this tested map.
+ if v, ok := m.Load(key); !ok || !reflect.DeepEqual(v, value) {
+ t.Fatalf("Nested Range loads unexpected value, got %+v want %+v", v, value)
+ }
+
+ // We didn't keep 42 and a value into the map before, if somehow we loaded
+ // a value from such a key, meaning there must be an internal bug regarding
+ // nested range in the Map.
+ if _, loaded := m.LoadOrStore(42, "dummy"); loaded {
+ t.Fatalf("Nested Range loads unexpected value, want store a new value")
+ }
+
+ // Try to Store then LoadAndDelete the corresponding value with the key
+ // 42 to the Map. In this case, the key 42 and associated value should be
+ // removed from the Map. Therefore any future range won't observe key 42
+ // as we checked in above.
+ val := "sync.Map"
+ m.Store(42, val)
+ if v, loaded := m.LoadAndDelete(42); !loaded || !reflect.DeepEqual(v, val) {
+ t.Fatalf("Nested Range loads unexpected value, got %v, want %v", v, val)
+ }
+ return true
+ })
+
+ // Remove key from Map on-the-fly.
+ m.Delete(key)
+ return true
+ })
+
+ // After a Range of Delete, all keys should be removed and any
+ // further Range won't invoke the callback. Hence length remains 0.
+ length := 0
+ m.Range(func(key, value any) bool {
+ length++
+ return true
+ })
+
+ if length != 0 {
+ t.Fatalf("Unexpected sync.Map size, got %v want %v", length, 0)
+ }
+}
+
+func TestCompareAndSwap_NonExistingKey(t *testing.T) {
+ m := &sync.Map{}
+ if m.CompareAndSwap(m, nil, 42) {
+ // See https://go.dev/issue/51972#issuecomment-1126408637.
+ t.Fatalf("CompareAndSwap on an non-existing key succeeded")
+ }
+}
diff --git a/src/sync/mutex.go b/src/sync/mutex.go
new file mode 100644
index 0000000..2ea024e
--- /dev/null
+++ b/src/sync/mutex.go
@@ -0,0 +1,259 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sync provides basic synchronization primitives such as mutual
+// exclusion locks. Other than the Once and WaitGroup types, most are intended
+// for use by low-level library routines. Higher-level synchronization is
+// better done via channels and communication.
+//
+// Values containing the types defined in this package should not be copied.
+package sync
+
+import (
+ "internal/race"
+ "sync/atomic"
+ "unsafe"
+)
+
+// Provided by runtime via linkname.
+func throw(string)
+func fatal(string)
+
+// A Mutex is a mutual exclusion lock.
+// The zero value for a Mutex is an unlocked mutex.
+//
+// A Mutex must not be copied after first use.
+//
+// In the terminology of the Go memory model,
+// the n'th call to Unlock “synchronizes before” the m'th call to Lock
+// for any n < m.
+// A successful call to TryLock is equivalent to a call to Lock.
+// A failed call to TryLock does not establish any “synchronizes before”
+// relation at all.
+type Mutex struct {
+ state int32
+ sema uint32
+}
+
+// A Locker represents an object that can be locked and unlocked.
+type Locker interface {
+ Lock()
+ Unlock()
+}
+
+const (
+ mutexLocked = 1 << iota // mutex is locked
+ mutexWoken
+ mutexStarving
+ mutexWaiterShift = iota
+
+ // Mutex fairness.
+ //
+ // Mutex can be in 2 modes of operations: normal and starvation.
+ // In normal mode waiters are queued in FIFO order, but a woken up waiter
+ // does not own the mutex and competes with new arriving goroutines over
+ // the ownership. New arriving goroutines have an advantage -- they are
+ // already running on CPU and there can be lots of them, so a woken up
+ // waiter has good chances of losing. In such case it is queued at front
+ // of the wait queue. If a waiter fails to acquire the mutex for more than 1ms,
+ // it switches mutex to the starvation mode.
+ //
+ // In starvation mode ownership of the mutex is directly handed off from
+ // the unlocking goroutine to the waiter at the front of the queue.
+ // New arriving goroutines don't try to acquire the mutex even if it appears
+ // to be unlocked, and don't try to spin. Instead they queue themselves at
+ // the tail of the wait queue.
+ //
+ // If a waiter receives ownership of the mutex and sees that either
+ // (1) it is the last waiter in the queue, or (2) it waited for less than 1 ms,
+ // it switches mutex back to normal operation mode.
+ //
+ // Normal mode has considerably better performance as a goroutine can acquire
+ // a mutex several times in a row even if there are blocked waiters.
+ // Starvation mode is important to prevent pathological cases of tail latency.
+ starvationThresholdNs = 1e6
+)
+
+// Lock locks m.
+// If the lock is already in use, the calling goroutine
+// blocks until the mutex is available.
+func (m *Mutex) Lock() {
+ // Fast path: grab unlocked mutex.
+ if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) {
+ if race.Enabled {
+ race.Acquire(unsafe.Pointer(m))
+ }
+ return
+ }
+ // Slow path (outlined so that the fast path can be inlined)
+ m.lockSlow()
+}
+
+// TryLock tries to lock m and reports whether it succeeded.
+//
+// Note that while correct uses of TryLock do exist, they are rare,
+// and use of TryLock is often a sign of a deeper problem
+// in a particular use of mutexes.
+func (m *Mutex) TryLock() bool {
+ old := m.state
+ if old&(mutexLocked|mutexStarving) != 0 {
+ return false
+ }
+
+ // There may be a goroutine waiting for the mutex, but we are
+ // running now and can try to grab the mutex before that
+ // goroutine wakes up.
+ if !atomic.CompareAndSwapInt32(&m.state, old, old|mutexLocked) {
+ return false
+ }
+
+ if race.Enabled {
+ race.Acquire(unsafe.Pointer(m))
+ }
+ return true
+}
+
+func (m *Mutex) lockSlow() {
+ var waitStartTime int64
+ starving := false
+ awoke := false
+ iter := 0
+ old := m.state
+ for {
+ // Don't spin in starvation mode, ownership is handed off to waiters
+ // so we won't be able to acquire the mutex anyway.
+ if old&(mutexLocked|mutexStarving) == mutexLocked && runtime_canSpin(iter) {
+ // Active spinning makes sense.
+ // Try to set mutexWoken flag to inform Unlock
+ // to not wake other blocked goroutines.
+ if !awoke && old&mutexWoken == 0 && old>>mutexWaiterShift != 0 &&
+ atomic.CompareAndSwapInt32(&m.state, old, old|mutexWoken) {
+ awoke = true
+ }
+ runtime_doSpin()
+ iter++
+ old = m.state
+ continue
+ }
+ new := old
+ // Don't try to acquire starving mutex, new arriving goroutines must queue.
+ if old&mutexStarving == 0 {
+ new |= mutexLocked
+ }
+ if old&(mutexLocked|mutexStarving) != 0 {
+ new += 1 << mutexWaiterShift
+ }
+ // The current goroutine switches mutex to starvation mode.
+ // But if the mutex is currently unlocked, don't do the switch.
+ // Unlock expects that starving mutex has waiters, which will not
+ // be true in this case.
+ if starving && old&mutexLocked != 0 {
+ new |= mutexStarving
+ }
+ if awoke {
+ // The goroutine has been woken from sleep,
+ // so we need to reset the flag in either case.
+ if new&mutexWoken == 0 {
+ throw("sync: inconsistent mutex state")
+ }
+ new &^= mutexWoken
+ }
+ if atomic.CompareAndSwapInt32(&m.state, old, new) {
+ if old&(mutexLocked|mutexStarving) == 0 {
+ break // locked the mutex with CAS
+ }
+ // If we were already waiting before, queue at the front of the queue.
+ queueLifo := waitStartTime != 0
+ if waitStartTime == 0 {
+ waitStartTime = runtime_nanotime()
+ }
+ runtime_SemacquireMutex(&m.sema, queueLifo, 1)
+ starving = starving || runtime_nanotime()-waitStartTime > starvationThresholdNs
+ old = m.state
+ if old&mutexStarving != 0 {
+ // If this goroutine was woken and mutex is in starvation mode,
+ // ownership was handed off to us but mutex is in somewhat
+ // inconsistent state: mutexLocked is not set and we are still
+ // accounted as waiter. Fix that.
+ if old&(mutexLocked|mutexWoken) != 0 || old>>mutexWaiterShift == 0 {
+ throw("sync: inconsistent mutex state")
+ }
+ delta := int32(mutexLocked - 1<<mutexWaiterShift)
+ if !starving || old>>mutexWaiterShift == 1 {
+ // Exit starvation mode.
+ // Critical to do it here and consider wait time.
+ // Starvation mode is so inefficient, that two goroutines
+ // can go lock-step infinitely once they switch mutex
+ // to starvation mode.
+ delta -= mutexStarving
+ }
+ atomic.AddInt32(&m.state, delta)
+ break
+ }
+ awoke = true
+ iter = 0
+ } else {
+ old = m.state
+ }
+ }
+
+ if race.Enabled {
+ race.Acquire(unsafe.Pointer(m))
+ }
+}
+
+// Unlock unlocks m.
+// It is a run-time error if m is not locked on entry to Unlock.
+//
+// A locked Mutex is not associated with a particular goroutine.
+// It is allowed for one goroutine to lock a Mutex and then
+// arrange for another goroutine to unlock it.
+func (m *Mutex) Unlock() {
+ if race.Enabled {
+ _ = m.state
+ race.Release(unsafe.Pointer(m))
+ }
+
+ // Fast path: drop lock bit.
+ new := atomic.AddInt32(&m.state, -mutexLocked)
+ if new != 0 {
+ // Outlined slow path to allow inlining the fast path.
+ // To hide unlockSlow during tracing we skip one extra frame when tracing GoUnblock.
+ m.unlockSlow(new)
+ }
+}
+
+func (m *Mutex) unlockSlow(new int32) {
+ if (new+mutexLocked)&mutexLocked == 0 {
+ fatal("sync: unlock of unlocked mutex")
+ }
+ if new&mutexStarving == 0 {
+ old := new
+ for {
+ // If there are no waiters or a goroutine has already
+ // been woken or grabbed the lock, no need to wake anyone.
+ // In starvation mode ownership is directly handed off from unlocking
+ // goroutine to the next waiter. We are not part of this chain,
+ // since we did not observe mutexStarving when we unlocked the mutex above.
+ // So get off the way.
+ if old>>mutexWaiterShift == 0 || old&(mutexLocked|mutexWoken|mutexStarving) != 0 {
+ return
+ }
+ // Grab the right to wake someone.
+ new = (old - 1<<mutexWaiterShift) | mutexWoken
+ if atomic.CompareAndSwapInt32(&m.state, old, new) {
+ runtime_Semrelease(&m.sema, false, 1)
+ return
+ }
+ old = m.state
+ }
+ } else {
+ // Starving mode: handoff mutex ownership to the next waiter, and yield
+ // our time slice so that the next waiter can start to run immediately.
+ // Note: mutexLocked is not set, the waiter will set it after wakeup.
+ // But mutex is still considered locked if mutexStarving is set,
+ // so new coming goroutines won't acquire it.
+ runtime_Semrelease(&m.sema, true, 1)
+ }
+}
diff --git a/src/sync/mutex_test.go b/src/sync/mutex_test.go
new file mode 100644
index 0000000..cca0986
--- /dev/null
+++ b/src/sync/mutex_test.go
@@ -0,0 +1,335 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// GOMAXPROCS=10 go test
+
+package sync_test
+
+import (
+ "fmt"
+ "internal/testenv"
+ "os"
+ "os/exec"
+ "runtime"
+ "strings"
+ . "sync"
+ "testing"
+ "time"
+)
+
+func HammerSemaphore(s *uint32, loops int, cdone chan bool) {
+ for i := 0; i < loops; i++ {
+ Runtime_Semacquire(s)
+ Runtime_Semrelease(s, false, 0)
+ }
+ cdone <- true
+}
+
+func TestSemaphore(t *testing.T) {
+ s := new(uint32)
+ *s = 1
+ c := make(chan bool)
+ for i := 0; i < 10; i++ {
+ go HammerSemaphore(s, 1000, c)
+ }
+ for i := 0; i < 10; i++ {
+ <-c
+ }
+}
+
+func BenchmarkUncontendedSemaphore(b *testing.B) {
+ s := new(uint32)
+ *s = 1
+ HammerSemaphore(s, b.N, make(chan bool, 2))
+}
+
+func BenchmarkContendedSemaphore(b *testing.B) {
+ b.StopTimer()
+ s := new(uint32)
+ *s = 1
+ c := make(chan bool)
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
+ b.StartTimer()
+
+ go HammerSemaphore(s, b.N/2, c)
+ go HammerSemaphore(s, b.N/2, c)
+ <-c
+ <-c
+}
+
+func HammerMutex(m *Mutex, loops int, cdone chan bool) {
+ for i := 0; i < loops; i++ {
+ if i%3 == 0 {
+ if m.TryLock() {
+ m.Unlock()
+ }
+ continue
+ }
+ m.Lock()
+ m.Unlock()
+ }
+ cdone <- true
+}
+
+func TestMutex(t *testing.T) {
+ if n := runtime.SetMutexProfileFraction(1); n != 0 {
+ t.Logf("got mutexrate %d expected 0", n)
+ }
+ defer runtime.SetMutexProfileFraction(0)
+
+ m := new(Mutex)
+
+ m.Lock()
+ if m.TryLock() {
+ t.Fatalf("TryLock succeeded with mutex locked")
+ }
+ m.Unlock()
+ if !m.TryLock() {
+ t.Fatalf("TryLock failed with mutex unlocked")
+ }
+ m.Unlock()
+
+ c := make(chan bool)
+ for i := 0; i < 10; i++ {
+ go HammerMutex(m, 1000, c)
+ }
+ for i := 0; i < 10; i++ {
+ <-c
+ }
+}
+
+var misuseTests = []struct {
+ name string
+ f func()
+}{
+ {
+ "Mutex.Unlock",
+ func() {
+ var mu Mutex
+ mu.Unlock()
+ },
+ },
+ {
+ "Mutex.Unlock2",
+ func() {
+ var mu Mutex
+ mu.Lock()
+ mu.Unlock()
+ mu.Unlock()
+ },
+ },
+ {
+ "RWMutex.Unlock",
+ func() {
+ var mu RWMutex
+ mu.Unlock()
+ },
+ },
+ {
+ "RWMutex.Unlock2",
+ func() {
+ var mu RWMutex
+ mu.RLock()
+ mu.Unlock()
+ },
+ },
+ {
+ "RWMutex.Unlock3",
+ func() {
+ var mu RWMutex
+ mu.Lock()
+ mu.Unlock()
+ mu.Unlock()
+ },
+ },
+ {
+ "RWMutex.RUnlock",
+ func() {
+ var mu RWMutex
+ mu.RUnlock()
+ },
+ },
+ {
+ "RWMutex.RUnlock2",
+ func() {
+ var mu RWMutex
+ mu.Lock()
+ mu.RUnlock()
+ },
+ },
+ {
+ "RWMutex.RUnlock3",
+ func() {
+ var mu RWMutex
+ mu.RLock()
+ mu.RUnlock()
+ mu.RUnlock()
+ },
+ },
+}
+
+func init() {
+ if len(os.Args) == 3 && os.Args[1] == "TESTMISUSE" {
+ for _, test := range misuseTests {
+ if test.name == os.Args[2] {
+ func() {
+ defer func() { recover() }()
+ test.f()
+ }()
+ fmt.Printf("test completed\n")
+ os.Exit(0)
+ }
+ }
+ fmt.Printf("unknown test\n")
+ os.Exit(0)
+ }
+}
+
+func TestMutexMisuse(t *testing.T) {
+ testenv.MustHaveExec(t)
+ for _, test := range misuseTests {
+ out, err := exec.Command(os.Args[0], "TESTMISUSE", test.name).CombinedOutput()
+ if err == nil || !strings.Contains(string(out), "unlocked") {
+ t.Errorf("%s: did not find failure with message about unlocked lock: %s\n%s\n", test.name, err, out)
+ }
+ }
+}
+
+func TestMutexFairness(t *testing.T) {
+ var mu Mutex
+ stop := make(chan bool)
+ defer close(stop)
+ go func() {
+ for {
+ mu.Lock()
+ time.Sleep(100 * time.Microsecond)
+ mu.Unlock()
+ select {
+ case <-stop:
+ return
+ default:
+ }
+ }
+ }()
+ done := make(chan bool, 1)
+ go func() {
+ for i := 0; i < 10; i++ {
+ time.Sleep(100 * time.Microsecond)
+ mu.Lock()
+ mu.Unlock()
+ }
+ done <- true
+ }()
+ select {
+ case <-done:
+ case <-time.After(10 * time.Second):
+ t.Fatalf("can't acquire Mutex in 10 seconds")
+ }
+}
+
+func BenchmarkMutexUncontended(b *testing.B) {
+ type PaddedMutex struct {
+ Mutex
+ pad [128]uint8
+ }
+ b.RunParallel(func(pb *testing.PB) {
+ var mu PaddedMutex
+ for pb.Next() {
+ mu.Lock()
+ mu.Unlock()
+ }
+ })
+}
+
+func benchmarkMutex(b *testing.B, slack, work bool) {
+ var mu Mutex
+ if slack {
+ b.SetParallelism(10)
+ }
+ b.RunParallel(func(pb *testing.PB) {
+ foo := 0
+ for pb.Next() {
+ mu.Lock()
+ mu.Unlock()
+ if work {
+ for i := 0; i < 100; i++ {
+ foo *= 2
+ foo /= 2
+ }
+ }
+ }
+ _ = foo
+ })
+}
+
+func BenchmarkMutex(b *testing.B) {
+ benchmarkMutex(b, false, false)
+}
+
+func BenchmarkMutexSlack(b *testing.B) {
+ benchmarkMutex(b, true, false)
+}
+
+func BenchmarkMutexWork(b *testing.B) {
+ benchmarkMutex(b, false, true)
+}
+
+func BenchmarkMutexWorkSlack(b *testing.B) {
+ benchmarkMutex(b, true, true)
+}
+
+func BenchmarkMutexNoSpin(b *testing.B) {
+ // This benchmark models a situation where spinning in the mutex should be
+ // non-profitable and allows to confirm that spinning does not do harm.
+ // To achieve this we create excess of goroutines most of which do local work.
+ // These goroutines yield during local work, so that switching from
+ // a blocked goroutine to other goroutines is profitable.
+ // As a matter of fact, this benchmark still triggers some spinning in the mutex.
+ var m Mutex
+ var acc0, acc1 uint64
+ b.SetParallelism(4)
+ b.RunParallel(func(pb *testing.PB) {
+ c := make(chan bool)
+ var data [4 << 10]uint64
+ for i := 0; pb.Next(); i++ {
+ if i%4 == 0 {
+ m.Lock()
+ acc0 -= 100
+ acc1 += 100
+ m.Unlock()
+ } else {
+ for i := 0; i < len(data); i += 4 {
+ data[i]++
+ }
+ // Elaborate way to say runtime.Gosched
+ // that does not put the goroutine onto global runq.
+ go func() {
+ c <- true
+ }()
+ <-c
+ }
+ }
+ })
+}
+
+func BenchmarkMutexSpin(b *testing.B) {
+ // This benchmark models a situation where spinning in the mutex should be
+ // profitable. To achieve this we create a goroutine per-proc.
+ // These goroutines access considerable amount of local data so that
+ // unnecessary rescheduling is penalized by cache misses.
+ var m Mutex
+ var acc0, acc1 uint64
+ b.RunParallel(func(pb *testing.PB) {
+ var data [16 << 10]uint64
+ for i := 0; pb.Next(); i++ {
+ m.Lock()
+ acc0 -= 100
+ acc1 += 100
+ m.Unlock()
+ for i := 0; i < len(data); i += 4 {
+ data[i]++
+ }
+ }
+ })
+}
diff --git a/src/sync/once.go b/src/sync/once.go
new file mode 100644
index 0000000..b6399cf
--- /dev/null
+++ b/src/sync/once.go
@@ -0,0 +1,76 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+import (
+ "sync/atomic"
+)
+
+// Once is an object that will perform exactly one action.
+//
+// A Once must not be copied after first use.
+//
+// In the terminology of the Go memory model,
+// the return from f “synchronizes before”
+// the return from any call of once.Do(f).
+type Once struct {
+ // done indicates whether the action has been performed.
+ // It is first in the struct because it is used in the hot path.
+ // The hot path is inlined at every call site.
+ // Placing done first allows more compact instructions on some architectures (amd64/386),
+ // and fewer instructions (to calculate offset) on other architectures.
+ done uint32
+ m Mutex
+}
+
+// Do calls the function f if and only if Do is being called for the
+// first time for this instance of Once. In other words, given
+//
+// var once Once
+//
+// if once.Do(f) is called multiple times, only the first call will invoke f,
+// even if f has a different value in each invocation. A new instance of
+// Once is required for each function to execute.
+//
+// Do is intended for initialization that must be run exactly once. Since f
+// is niladic, it may be necessary to use a function literal to capture the
+// arguments to a function to be invoked by Do:
+//
+// config.once.Do(func() { config.init(filename) })
+//
+// Because no call to Do returns until the one call to f returns, if f causes
+// Do to be called, it will deadlock.
+//
+// If f panics, Do considers it to have returned; future calls of Do return
+// without calling f.
+func (o *Once) Do(f func()) {
+ // Note: Here is an incorrect implementation of Do:
+ //
+ // if atomic.CompareAndSwapUint32(&o.done, 0, 1) {
+ // f()
+ // }
+ //
+ // Do guarantees that when it returns, f has finished.
+ // This implementation would not implement that guarantee:
+ // given two simultaneous calls, the winner of the cas would
+ // call f, and the second would return immediately, without
+ // waiting for the first's call to f to complete.
+ // This is why the slow path falls back to a mutex, and why
+ // the atomic.StoreUint32 must be delayed until after f returns.
+
+ if atomic.LoadUint32(&o.done) == 0 {
+ // Outlined slow-path to allow inlining of the fast-path.
+ o.doSlow(f)
+ }
+}
+
+func (o *Once) doSlow(f func()) {
+ o.m.Lock()
+ defer o.m.Unlock()
+ if o.done == 0 {
+ defer atomic.StoreUint32(&o.done, 1)
+ f()
+ }
+}
diff --git a/src/sync/once_test.go b/src/sync/once_test.go
new file mode 100644
index 0000000..1eec8d1
--- /dev/null
+++ b/src/sync/once_test.go
@@ -0,0 +1,68 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync_test
+
+import (
+ . "sync"
+ "testing"
+)
+
+type one int
+
+func (o *one) Increment() {
+ *o++
+}
+
+func run(t *testing.T, once *Once, o *one, c chan bool) {
+ once.Do(func() { o.Increment() })
+ if v := *o; v != 1 {
+ t.Errorf("once failed inside run: %d is not 1", v)
+ }
+ c <- true
+}
+
+func TestOnce(t *testing.T) {
+ o := new(one)
+ once := new(Once)
+ c := make(chan bool)
+ const N = 10
+ for i := 0; i < N; i++ {
+ go run(t, once, o, c)
+ }
+ for i := 0; i < N; i++ {
+ <-c
+ }
+ if *o != 1 {
+ t.Errorf("once failed outside run: %d is not 1", *o)
+ }
+}
+
+func TestOncePanic(t *testing.T) {
+ var once Once
+ func() {
+ defer func() {
+ if r := recover(); r == nil {
+ t.Fatalf("Once.Do did not panic")
+ }
+ }()
+ once.Do(func() {
+ panic("failed")
+ })
+ }()
+
+ once.Do(func() {
+ t.Fatalf("Once.Do called twice")
+ })
+}
+
+func BenchmarkOnce(b *testing.B) {
+ var once Once
+ f := func() {}
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ once.Do(f)
+ }
+ })
+}
diff --git a/src/sync/oncefunc.go b/src/sync/oncefunc.go
new file mode 100644
index 0000000..9ef8344
--- /dev/null
+++ b/src/sync/oncefunc.go
@@ -0,0 +1,97 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+// OnceFunc returns a function that invokes f only once. The returned function
+// may be called concurrently.
+//
+// If f panics, the returned function will panic with the same value on every call.
+func OnceFunc(f func()) func() {
+ var (
+ once Once
+ valid bool
+ p any
+ )
+ // Construct the inner closure just once to reduce costs on the fast path.
+ g := func() {
+ defer func() {
+ p = recover()
+ if !valid {
+ // Re-panic immediately so on the first call the user gets a
+ // complete stack trace into f.
+ panic(p)
+ }
+ }()
+ f()
+ valid = true // Set only if f does not panic
+ }
+ return func() {
+ once.Do(g)
+ if !valid {
+ panic(p)
+ }
+ }
+}
+
+// OnceValue returns a function that invokes f only once and returns the value
+// returned by f. The returned function may be called concurrently.
+//
+// If f panics, the returned function will panic with the same value on every call.
+func OnceValue[T any](f func() T) func() T {
+ var (
+ once Once
+ valid bool
+ p any
+ result T
+ )
+ g := func() {
+ defer func() {
+ p = recover()
+ if !valid {
+ panic(p)
+ }
+ }()
+ result = f()
+ valid = true
+ }
+ return func() T {
+ once.Do(g)
+ if !valid {
+ panic(p)
+ }
+ return result
+ }
+}
+
+// OnceValues returns a function that invokes f only once and returns the values
+// returned by f. The returned function may be called concurrently.
+//
+// If f panics, the returned function will panic with the same value on every call.
+func OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) {
+ var (
+ once Once
+ valid bool
+ p any
+ r1 T1
+ r2 T2
+ )
+ g := func() {
+ defer func() {
+ p = recover()
+ if !valid {
+ panic(p)
+ }
+ }()
+ r1, r2 = f()
+ valid = true
+ }
+ return func() (T1, T2) {
+ once.Do(g)
+ if !valid {
+ panic(p)
+ }
+ return r1, r2
+ }
+}
diff --git a/src/sync/oncefunc_test.go b/src/sync/oncefunc_test.go
new file mode 100644
index 0000000..3c523a5
--- /dev/null
+++ b/src/sync/oncefunc_test.go
@@ -0,0 +1,265 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync_test
+
+import (
+ "bytes"
+ "runtime"
+ "runtime/debug"
+ "sync"
+ "testing"
+)
+
+// We assume that the Once.Do tests have already covered parallelism.
+
+func TestOnceFunc(t *testing.T) {
+ calls := 0
+ f := sync.OnceFunc(func() { calls++ })
+ allocs := testing.AllocsPerRun(10, f)
+ if calls != 1 {
+ t.Errorf("want calls==1, got %d", calls)
+ }
+ if allocs != 0 {
+ t.Errorf("want 0 allocations per call, got %v", allocs)
+ }
+}
+
+func TestOnceValue(t *testing.T) {
+ calls := 0
+ f := sync.OnceValue(func() int {
+ calls++
+ return calls
+ })
+ allocs := testing.AllocsPerRun(10, func() { f() })
+ value := f()
+ if calls != 1 {
+ t.Errorf("want calls==1, got %d", calls)
+ }
+ if value != 1 {
+ t.Errorf("want value==1, got %d", value)
+ }
+ if allocs != 0 {
+ t.Errorf("want 0 allocations per call, got %v", allocs)
+ }
+}
+
+func TestOnceValues(t *testing.T) {
+ calls := 0
+ f := sync.OnceValues(func() (int, int) {
+ calls++
+ return calls, calls + 1
+ })
+ allocs := testing.AllocsPerRun(10, func() { f() })
+ v1, v2 := f()
+ if calls != 1 {
+ t.Errorf("want calls==1, got %d", calls)
+ }
+ if v1 != 1 || v2 != 2 {
+ t.Errorf("want v1==1 and v2==2, got %d and %d", v1, v2)
+ }
+ if allocs != 0 {
+ t.Errorf("want 0 allocations per call, got %v", allocs)
+ }
+}
+
+func testOncePanicX(t *testing.T, calls *int, f func()) {
+ testOncePanicWith(t, calls, f, func(label string, p any) {
+ if p != "x" {
+ t.Fatalf("%s: want panic %v, got %v", label, "x", p)
+ }
+ })
+}
+
+func testOncePanicWith(t *testing.T, calls *int, f func(), check func(label string, p any)) {
+ // Check that the each call to f panics with the same value, but the
+ // underlying function is only called once.
+ for _, label := range []string{"first time", "second time"} {
+ var p any
+ panicked := true
+ func() {
+ defer func() {
+ p = recover()
+ }()
+ f()
+ panicked = false
+ }()
+ if !panicked {
+ t.Fatalf("%s: f did not panic", label)
+ }
+ check(label, p)
+ }
+ if *calls != 1 {
+ t.Errorf("want calls==1, got %d", *calls)
+ }
+}
+
+func TestOnceFuncPanic(t *testing.T) {
+ calls := 0
+ f := sync.OnceFunc(func() {
+ calls++
+ panic("x")
+ })
+ testOncePanicX(t, &calls, f)
+}
+
+func TestOnceValuePanic(t *testing.T) {
+ calls := 0
+ f := sync.OnceValue(func() int {
+ calls++
+ panic("x")
+ })
+ testOncePanicX(t, &calls, func() { f() })
+}
+
+func TestOnceValuesPanic(t *testing.T) {
+ calls := 0
+ f := sync.OnceValues(func() (int, int) {
+ calls++
+ panic("x")
+ })
+ testOncePanicX(t, &calls, func() { f() })
+}
+
+func TestOnceFuncPanicNil(t *testing.T) {
+ calls := 0
+ f := sync.OnceFunc(func() {
+ calls++
+ panic(nil)
+ })
+ testOncePanicWith(t, &calls, f, func(label string, p any) {
+ switch p.(type) {
+ case nil, *runtime.PanicNilError:
+ return
+ }
+ t.Fatalf("%s: want nil panic, got %v", label, p)
+ })
+}
+
+func TestOnceFuncGoexit(t *testing.T) {
+ // If f calls Goexit, the results are unspecified. But check that f doesn't
+ // get called twice.
+ calls := 0
+ f := sync.OnceFunc(func() {
+ calls++
+ runtime.Goexit()
+ })
+ var wg sync.WaitGroup
+ for i := 0; i < 2; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ defer func() { recover() }()
+ f()
+ }()
+ wg.Wait()
+ }
+ if calls != 1 {
+ t.Errorf("want calls==1, got %d", calls)
+ }
+}
+
+func TestOnceFuncPanicTraceback(t *testing.T) {
+ // Test that on the first invocation of a OnceFunc, the stack trace goes all
+ // the way to the origin of the panic.
+ f := sync.OnceFunc(onceFuncPanic)
+
+ defer func() {
+ if p := recover(); p != "x" {
+ t.Fatalf("want panic %v, got %v", "x", p)
+ }
+ stack := debug.Stack()
+ want := "sync_test.onceFuncPanic"
+ if !bytes.Contains(stack, []byte(want)) {
+ t.Fatalf("want stack containing %v, got:\n%s", want, string(stack))
+ }
+ }()
+ f()
+}
+
+func onceFuncPanic() {
+ panic("x")
+}
+
+var (
+ onceFunc = sync.OnceFunc(func() {})
+
+ onceFuncOnce sync.Once
+)
+
+func doOnceFunc() {
+ onceFuncOnce.Do(func() {})
+}
+
+func BenchmarkOnceFunc(b *testing.B) {
+ b.Run("v=Once", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ // The baseline is direct use of sync.Once.
+ doOnceFunc()
+ }
+ })
+ b.Run("v=Global", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ // As of 3/2023, the compiler doesn't recognize that onceFunc is
+ // never mutated and is a closure that could be inlined.
+ // Too bad, because this is how OnceFunc will usually be used.
+ onceFunc()
+ }
+ })
+ b.Run("v=Local", func(b *testing.B) {
+ b.ReportAllocs()
+ // As of 3/2023, the compiler *does* recognize this local binding as an
+ // inlinable closure. This is the best case for OnceFunc, but probably
+ // not typical usage.
+ f := sync.OnceFunc(func() {})
+ for i := 0; i < b.N; i++ {
+ f()
+ }
+ })
+}
+
+var (
+ onceValue = sync.OnceValue(func() int { return 42 })
+
+ onceValueOnce sync.Once
+ onceValueValue int
+)
+
+func doOnceValue() int {
+ onceValueOnce.Do(func() {
+ onceValueValue = 42
+ })
+ return onceValueValue
+}
+
+func BenchmarkOnceValue(b *testing.B) {
+ // See BenchmarkOnceFunc
+ b.Run("v=Once", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ if want, got := 42, doOnceValue(); want != got {
+ b.Fatalf("want %d, got %d", want, got)
+ }
+ }
+ })
+ b.Run("v=Global", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ if want, got := 42, onceValue(); want != got {
+ b.Fatalf("want %d, got %d", want, got)
+ }
+ }
+ })
+ b.Run("v=Local", func(b *testing.B) {
+ b.ReportAllocs()
+ onceValue := sync.OnceValue(func() int { return 42 })
+ for i := 0; i < b.N; i++ {
+ if want, got := 42, onceValue(); want != got {
+ b.Fatalf("want %d, got %d", want, got)
+ }
+ }
+ })
+}
diff --git a/src/sync/pool.go b/src/sync/pool.go
new file mode 100644
index 0000000..cf01e2e
--- /dev/null
+++ b/src/sync/pool.go
@@ -0,0 +1,297 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+import (
+ "internal/race"
+ "runtime"
+ "sync/atomic"
+ "unsafe"
+)
+
+// A Pool is a set of temporary objects that may be individually saved and
+// retrieved.
+//
+// Any item stored in the Pool may be removed automatically at any time without
+// notification. If the Pool holds the only reference when this happens, the
+// item might be deallocated.
+//
+// A Pool is safe for use by multiple goroutines simultaneously.
+//
+// Pool's purpose is to cache allocated but unused items for later reuse,
+// relieving pressure on the garbage collector. That is, it makes it easy to
+// build efficient, thread-safe free lists. However, it is not suitable for all
+// free lists.
+//
+// An appropriate use of a Pool is to manage a group of temporary items
+// silently shared among and potentially reused by concurrent independent
+// clients of a package. Pool provides a way to amortize allocation overhead
+// across many clients.
+//
+// An example of good use of a Pool is in the fmt package, which maintains a
+// dynamically-sized store of temporary output buffers. The store scales under
+// load (when many goroutines are actively printing) and shrinks when
+// quiescent.
+//
+// On the other hand, a free list maintained as part of a short-lived object is
+// not a suitable use for a Pool, since the overhead does not amortize well in
+// that scenario. It is more efficient to have such objects implement their own
+// free list.
+//
+// A Pool must not be copied after first use.
+//
+// In the terminology of the Go memory model, a call to Put(x) “synchronizes before”
+// a call to Get returning that same value x.
+// Similarly, a call to New returning x “synchronizes before”
+// a call to Get returning that same value x.
+type Pool struct {
+ noCopy noCopy
+
+ local unsafe.Pointer // local fixed-size per-P pool, actual type is [P]poolLocal
+ localSize uintptr // size of the local array
+
+ victim unsafe.Pointer // local from previous cycle
+ victimSize uintptr // size of victims array
+
+ // New optionally specifies a function to generate
+ // a value when Get would otherwise return nil.
+ // It may not be changed concurrently with calls to Get.
+ New func() any
+}
+
+// Local per-P Pool appendix.
+type poolLocalInternal struct {
+ private any // Can be used only by the respective P.
+ shared poolChain // Local P can pushHead/popHead; any P can popTail.
+}
+
+type poolLocal struct {
+ poolLocalInternal
+
+ // Prevents false sharing on widespread platforms with
+ // 128 mod (cache line size) = 0 .
+ pad [128 - unsafe.Sizeof(poolLocalInternal{})%128]byte
+}
+
+// from runtime
+func fastrandn(n uint32) uint32
+
+var poolRaceHash [128]uint64
+
+// poolRaceAddr returns an address to use as the synchronization point
+// for race detector logic. We don't use the actual pointer stored in x
+// directly, for fear of conflicting with other synchronization on that address.
+// Instead, we hash the pointer to get an index into poolRaceHash.
+// See discussion on golang.org/cl/31589.
+func poolRaceAddr(x any) unsafe.Pointer {
+ ptr := uintptr((*[2]unsafe.Pointer)(unsafe.Pointer(&x))[1])
+ h := uint32((uint64(uint32(ptr)) * 0x85ebca6b) >> 16)
+ return unsafe.Pointer(&poolRaceHash[h%uint32(len(poolRaceHash))])
+}
+
+// Put adds x to the pool.
+func (p *Pool) Put(x any) {
+ if x == nil {
+ return
+ }
+ if race.Enabled {
+ if fastrandn(4) == 0 {
+ // Randomly drop x on floor.
+ return
+ }
+ race.ReleaseMerge(poolRaceAddr(x))
+ race.Disable()
+ }
+ l, _ := p.pin()
+ if l.private == nil {
+ l.private = x
+ } else {
+ l.shared.pushHead(x)
+ }
+ runtime_procUnpin()
+ if race.Enabled {
+ race.Enable()
+ }
+}
+
+// Get selects an arbitrary item from the Pool, removes it from the
+// Pool, and returns it to the caller.
+// Get may choose to ignore the pool and treat it as empty.
+// Callers should not assume any relation between values passed to Put and
+// the values returned by Get.
+//
+// If Get would otherwise return nil and p.New is non-nil, Get returns
+// the result of calling p.New.
+func (p *Pool) Get() any {
+ if race.Enabled {
+ race.Disable()
+ }
+ l, pid := p.pin()
+ x := l.private
+ l.private = nil
+ if x == nil {
+ // Try to pop the head of the local shard. We prefer
+ // the head over the tail for temporal locality of
+ // reuse.
+ x, _ = l.shared.popHead()
+ if x == nil {
+ x = p.getSlow(pid)
+ }
+ }
+ runtime_procUnpin()
+ if race.Enabled {
+ race.Enable()
+ if x != nil {
+ race.Acquire(poolRaceAddr(x))
+ }
+ }
+ if x == nil && p.New != nil {
+ x = p.New()
+ }
+ return x
+}
+
+func (p *Pool) getSlow(pid int) any {
+ // See the comment in pin regarding ordering of the loads.
+ size := runtime_LoadAcquintptr(&p.localSize) // load-acquire
+ locals := p.local // load-consume
+ // Try to steal one element from other procs.
+ for i := 0; i < int(size); i++ {
+ l := indexLocal(locals, (pid+i+1)%int(size))
+ if x, _ := l.shared.popTail(); x != nil {
+ return x
+ }
+ }
+
+ // Try the victim cache. We do this after attempting to steal
+ // from all primary caches because we want objects in the
+ // victim cache to age out if at all possible.
+ size = atomic.LoadUintptr(&p.victimSize)
+ if uintptr(pid) >= size {
+ return nil
+ }
+ locals = p.victim
+ l := indexLocal(locals, pid)
+ if x := l.private; x != nil {
+ l.private = nil
+ return x
+ }
+ for i := 0; i < int(size); i++ {
+ l := indexLocal(locals, (pid+i)%int(size))
+ if x, _ := l.shared.popTail(); x != nil {
+ return x
+ }
+ }
+
+ // Mark the victim cache as empty for future gets don't bother
+ // with it.
+ atomic.StoreUintptr(&p.victimSize, 0)
+
+ return nil
+}
+
+// pin pins the current goroutine to P, disables preemption and
+// returns poolLocal pool for the P and the P's id.
+// Caller must call runtime_procUnpin() when done with the pool.
+func (p *Pool) pin() (*poolLocal, int) {
+ pid := runtime_procPin()
+ // In pinSlow we store to local and then to localSize, here we load in opposite order.
+ // Since we've disabled preemption, GC cannot happen in between.
+ // Thus here we must observe local at least as large localSize.
+ // We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness).
+ s := runtime_LoadAcquintptr(&p.localSize) // load-acquire
+ l := p.local // load-consume
+ if uintptr(pid) < s {
+ return indexLocal(l, pid), pid
+ }
+ return p.pinSlow()
+}
+
+func (p *Pool) pinSlow() (*poolLocal, int) {
+ // Retry under the mutex.
+ // Can not lock the mutex while pinned.
+ runtime_procUnpin()
+ allPoolsMu.Lock()
+ defer allPoolsMu.Unlock()
+ pid := runtime_procPin()
+ // poolCleanup won't be called while we are pinned.
+ s := p.localSize
+ l := p.local
+ if uintptr(pid) < s {
+ return indexLocal(l, pid), pid
+ }
+ if p.local == nil {
+ allPools = append(allPools, p)
+ }
+ // If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
+ size := runtime.GOMAXPROCS(0)
+ local := make([]poolLocal, size)
+ atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release
+ runtime_StoreReluintptr(&p.localSize, uintptr(size)) // store-release
+ return &local[pid], pid
+}
+
+func poolCleanup() {
+ // This function is called with the world stopped, at the beginning of a garbage collection.
+ // It must not allocate and probably should not call any runtime functions.
+
+ // Because the world is stopped, no pool user can be in a
+ // pinned section (in effect, this has all Ps pinned).
+
+ // Drop victim caches from all pools.
+ for _, p := range oldPools {
+ p.victim = nil
+ p.victimSize = 0
+ }
+
+ // Move primary cache to victim cache.
+ for _, p := range allPools {
+ p.victim = p.local
+ p.victimSize = p.localSize
+ p.local = nil
+ p.localSize = 0
+ }
+
+ // The pools with non-empty primary caches now have non-empty
+ // victim caches and no pools have primary caches.
+ oldPools, allPools = allPools, nil
+}
+
+var (
+ allPoolsMu Mutex
+
+ // allPools is the set of pools that have non-empty primary
+ // caches. Protected by either 1) allPoolsMu and pinning or 2)
+ // STW.
+ allPools []*Pool
+
+ // oldPools is the set of pools that may have non-empty victim
+ // caches. Protected by STW.
+ oldPools []*Pool
+)
+
+func init() {
+ runtime_registerPoolCleanup(poolCleanup)
+}
+
+func indexLocal(l unsafe.Pointer, i int) *poolLocal {
+ lp := unsafe.Pointer(uintptr(l) + uintptr(i)*unsafe.Sizeof(poolLocal{}))
+ return (*poolLocal)(lp)
+}
+
+// Implemented in runtime.
+func runtime_registerPoolCleanup(cleanup func())
+func runtime_procPin() int
+func runtime_procUnpin()
+
+// The below are implemented in runtime/internal/atomic and the
+// compiler also knows to intrinsify the symbol we linkname into this
+// package.
+
+//go:linkname runtime_LoadAcquintptr runtime/internal/atomic.LoadAcquintptr
+func runtime_LoadAcquintptr(ptr *uintptr) uintptr
+
+//go:linkname runtime_StoreReluintptr runtime/internal/atomic.StoreReluintptr
+func runtime_StoreReluintptr(ptr *uintptr, val uintptr) uintptr
diff --git a/src/sync/pool_test.go b/src/sync/pool_test.go
new file mode 100644
index 0000000..5e38597
--- /dev/null
+++ b/src/sync/pool_test.go
@@ -0,0 +1,373 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Pool is no-op under race detector, so all these tests do not work.
+//
+//go:build !race
+
+package sync_test
+
+import (
+ "runtime"
+ "runtime/debug"
+ "sort"
+ . "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+func TestPool(t *testing.T) {
+ // disable GC so we can control when it happens.
+ defer debug.SetGCPercent(debug.SetGCPercent(-1))
+ var p Pool
+ if p.Get() != nil {
+ t.Fatal("expected empty")
+ }
+
+ // Make sure that the goroutine doesn't migrate to another P
+ // between Put and Get calls.
+ Runtime_procPin()
+ p.Put("a")
+ p.Put("b")
+ if g := p.Get(); g != "a" {
+ t.Fatalf("got %#v; want a", g)
+ }
+ if g := p.Get(); g != "b" {
+ t.Fatalf("got %#v; want b", g)
+ }
+ if g := p.Get(); g != nil {
+ t.Fatalf("got %#v; want nil", g)
+ }
+ Runtime_procUnpin()
+
+ // Put in a large number of objects so they spill into
+ // stealable space.
+ for i := 0; i < 100; i++ {
+ p.Put("c")
+ }
+ // After one GC, the victim cache should keep them alive.
+ runtime.GC()
+ if g := p.Get(); g != "c" {
+ t.Fatalf("got %#v; want c after GC", g)
+ }
+ // A second GC should drop the victim cache.
+ runtime.GC()
+ if g := p.Get(); g != nil {
+ t.Fatalf("got %#v; want nil after second GC", g)
+ }
+}
+
+func TestPoolNew(t *testing.T) {
+ // disable GC so we can control when it happens.
+ defer debug.SetGCPercent(debug.SetGCPercent(-1))
+
+ i := 0
+ p := Pool{
+ New: func() any {
+ i++
+ return i
+ },
+ }
+ if v := p.Get(); v != 1 {
+ t.Fatalf("got %v; want 1", v)
+ }
+ if v := p.Get(); v != 2 {
+ t.Fatalf("got %v; want 2", v)
+ }
+
+ // Make sure that the goroutine doesn't migrate to another P
+ // between Put and Get calls.
+ Runtime_procPin()
+ p.Put(42)
+ if v := p.Get(); v != 42 {
+ t.Fatalf("got %v; want 42", v)
+ }
+ Runtime_procUnpin()
+
+ if v := p.Get(); v != 3 {
+ t.Fatalf("got %v; want 3", v)
+ }
+}
+
+// Test that Pool does not hold pointers to previously cached resources.
+func TestPoolGC(t *testing.T) {
+ testPool(t, true)
+}
+
+// Test that Pool releases resources on GC.
+func TestPoolRelease(t *testing.T) {
+ testPool(t, false)
+}
+
+func testPool(t *testing.T, drain bool) {
+ var p Pool
+ const N = 100
+loop:
+ for try := 0; try < 3; try++ {
+ if try == 1 && testing.Short() {
+ break
+ }
+ var fin, fin1 uint32
+ for i := 0; i < N; i++ {
+ v := new(string)
+ runtime.SetFinalizer(v, func(vv *string) {
+ atomic.AddUint32(&fin, 1)
+ })
+ p.Put(v)
+ }
+ if drain {
+ for i := 0; i < N; i++ {
+ p.Get()
+ }
+ }
+ for i := 0; i < 5; i++ {
+ runtime.GC()
+ time.Sleep(time.Duration(i*100+10) * time.Millisecond)
+ // 1 pointer can remain on stack or elsewhere
+ if fin1 = atomic.LoadUint32(&fin); fin1 >= N-1 {
+ continue loop
+ }
+ }
+ t.Fatalf("only %v out of %v resources are finalized on try %v", fin1, N, try)
+ }
+}
+
+func TestPoolStress(t *testing.T) {
+ const P = 10
+ N := int(1e6)
+ if testing.Short() {
+ N /= 100
+ }
+ var p Pool
+ done := make(chan bool)
+ for i := 0; i < P; i++ {
+ go func() {
+ var v any = 0
+ for j := 0; j < N; j++ {
+ if v == nil {
+ v = 0
+ }
+ p.Put(v)
+ v = p.Get()
+ if v != nil && v.(int) != 0 {
+ t.Errorf("expect 0, got %v", v)
+ break
+ }
+ }
+ done <- true
+ }()
+ }
+ for i := 0; i < P; i++ {
+ <-done
+ }
+}
+
+func TestPoolDequeue(t *testing.T) {
+ testPoolDequeue(t, NewPoolDequeue(16))
+}
+
+func TestPoolChain(t *testing.T) {
+ testPoolDequeue(t, NewPoolChain())
+}
+
+func testPoolDequeue(t *testing.T, d PoolDequeue) {
+ const P = 10
+ var N int = 2e6
+ if testing.Short() {
+ N = 1e3
+ }
+ have := make([]int32, N)
+ var stop int32
+ var wg WaitGroup
+ record := func(val int) {
+ atomic.AddInt32(&have[val], 1)
+ if val == N-1 {
+ atomic.StoreInt32(&stop, 1)
+ }
+ }
+
+ // Start P-1 consumers.
+ for i := 1; i < P; i++ {
+ wg.Add(1)
+ go func() {
+ fail := 0
+ for atomic.LoadInt32(&stop) == 0 {
+ val, ok := d.PopTail()
+ if ok {
+ fail = 0
+ record(val.(int))
+ } else {
+ // Speed up the test by
+ // allowing the pusher to run.
+ if fail++; fail%100 == 0 {
+ runtime.Gosched()
+ }
+ }
+ }
+ wg.Done()
+ }()
+ }
+
+ // Start 1 producer.
+ nPopHead := 0
+ wg.Add(1)
+ go func() {
+ for j := 0; j < N; j++ {
+ for !d.PushHead(j) {
+ // Allow a popper to run.
+ runtime.Gosched()
+ }
+ if j%10 == 0 {
+ val, ok := d.PopHead()
+ if ok {
+ nPopHead++
+ record(val.(int))
+ }
+ }
+ }
+ wg.Done()
+ }()
+ wg.Wait()
+
+ // Check results.
+ for i, count := range have {
+ if count != 1 {
+ t.Errorf("expected have[%d] = 1, got %d", i, count)
+ }
+ }
+ // Check that at least some PopHeads succeeded. We skip this
+ // check in short mode because it's common enough that the
+ // queue will stay nearly empty all the time and a PopTail
+ // will happen during the window between every PushHead and
+ // PopHead.
+ if !testing.Short() && nPopHead == 0 {
+ t.Errorf("popHead never succeeded")
+ }
+}
+
+func BenchmarkPool(b *testing.B) {
+ var p Pool
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ p.Put(1)
+ p.Get()
+ }
+ })
+}
+
+func BenchmarkPoolOverflow(b *testing.B) {
+ var p Pool
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ for b := 0; b < 100; b++ {
+ p.Put(1)
+ }
+ for b := 0; b < 100; b++ {
+ p.Get()
+ }
+ }
+ })
+}
+
+// Simulate object starvation in order to force Ps to steal objects
+// from other Ps.
+func BenchmarkPoolStarvation(b *testing.B) {
+ var p Pool
+ count := 100
+ // Reduce number of putted objects by 33 %. It creates objects starvation
+ // that force P-local storage to steal objects from other Ps.
+ countStarved := count - int(float32(count)*0.33)
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ for b := 0; b < countStarved; b++ {
+ p.Put(1)
+ }
+ for b := 0; b < count; b++ {
+ p.Get()
+ }
+ }
+ })
+}
+
+var globalSink any
+
+func BenchmarkPoolSTW(b *testing.B) {
+ // Take control of GC.
+ defer debug.SetGCPercent(debug.SetGCPercent(-1))
+
+ var mstats runtime.MemStats
+ var pauses []uint64
+
+ var p Pool
+ for i := 0; i < b.N; i++ {
+ // Put a large number of items into a pool.
+ const N = 100000
+ var item any = 42
+ for i := 0; i < N; i++ {
+ p.Put(item)
+ }
+ // Do a GC.
+ runtime.GC()
+ // Record pause time.
+ runtime.ReadMemStats(&mstats)
+ pauses = append(pauses, mstats.PauseNs[(mstats.NumGC+255)%256])
+ }
+
+ // Get pause time stats.
+ sort.Slice(pauses, func(i, j int) bool { return pauses[i] < pauses[j] })
+ var total uint64
+ for _, ns := range pauses {
+ total += ns
+ }
+ // ns/op for this benchmark is average STW time.
+ b.ReportMetric(float64(total)/float64(b.N), "ns/op")
+ b.ReportMetric(float64(pauses[len(pauses)*95/100]), "p95-ns/STW")
+ b.ReportMetric(float64(pauses[len(pauses)*50/100]), "p50-ns/STW")
+}
+
+func BenchmarkPoolExpensiveNew(b *testing.B) {
+ // Populate a pool with items that are expensive to construct
+ // to stress pool cleanup and subsequent reconstruction.
+
+ // Create a ballast so the GC has a non-zero heap size and
+ // runs at reasonable times.
+ globalSink = make([]byte, 8<<20)
+ defer func() { globalSink = nil }()
+
+ // Create a pool that's "expensive" to fill.
+ var p Pool
+ var nNew uint64
+ p.New = func() any {
+ atomic.AddUint64(&nNew, 1)
+ time.Sleep(time.Millisecond)
+ return 42
+ }
+ var mstats1, mstats2 runtime.MemStats
+ runtime.ReadMemStats(&mstats1)
+ b.RunParallel(func(pb *testing.PB) {
+ // Simulate 100X the number of goroutines having items
+ // checked out from the Pool simultaneously.
+ items := make([]any, 100)
+ var sink []byte
+ for pb.Next() {
+ // Stress the pool.
+ for i := range items {
+ items[i] = p.Get()
+ // Simulate doing some work with this
+ // item checked out.
+ sink = make([]byte, 32<<10)
+ }
+ for i, v := range items {
+ p.Put(v)
+ items[i] = nil
+ }
+ }
+ _ = sink
+ })
+ runtime.ReadMemStats(&mstats2)
+
+ b.ReportMetric(float64(mstats2.NumGC-mstats1.NumGC)/float64(b.N), "GCs/op")
+ b.ReportMetric(float64(nNew)/float64(b.N), "New/op")
+}
diff --git a/src/sync/poolqueue.go b/src/sync/poolqueue.go
new file mode 100644
index 0000000..631f2c1
--- /dev/null
+++ b/src/sync/poolqueue.go
@@ -0,0 +1,309 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+// poolDequeue is a lock-free fixed-size single-producer,
+// multi-consumer queue. The single producer can both push and pop
+// from the head, and consumers can pop from the tail.
+//
+// It has the added feature that it nils out unused slots to avoid
+// unnecessary retention of objects. This is important for sync.Pool,
+// but not typically a property considered in the literature.
+type poolDequeue struct {
+ // headTail packs together a 32-bit head index and a 32-bit
+ // tail index. Both are indexes into vals modulo len(vals)-1.
+ //
+ // tail = index of oldest data in queue
+ // head = index of next slot to fill
+ //
+ // Slots in the range [tail, head) are owned by consumers.
+ // A consumer continues to own a slot outside this range until
+ // it nils the slot, at which point ownership passes to the
+ // producer.
+ //
+ // The head index is stored in the most-significant bits so
+ // that we can atomically add to it and the overflow is
+ // harmless.
+ headTail uint64
+
+ // vals is a ring buffer of interface{} values stored in this
+ // dequeue. The size of this must be a power of 2.
+ //
+ // vals[i].typ is nil if the slot is empty and non-nil
+ // otherwise. A slot is still in use until *both* the tail
+ // index has moved beyond it and typ has been set to nil. This
+ // is set to nil atomically by the consumer and read
+ // atomically by the producer.
+ vals []eface
+}
+
+type eface struct {
+ typ, val unsafe.Pointer
+}
+
+const dequeueBits = 32
+
+// dequeueLimit is the maximum size of a poolDequeue.
+//
+// This must be at most (1<<dequeueBits)/2 because detecting fullness
+// depends on wrapping around the ring buffer without wrapping around
+// the index. We divide by 4 so this fits in an int on 32-bit.
+const dequeueLimit = (1 << dequeueBits) / 4
+
+// dequeueNil is used in poolDequeue to represent interface{}(nil).
+// Since we use nil to represent empty slots, we need a sentinel value
+// to represent nil.
+type dequeueNil *struct{}
+
+func (d *poolDequeue) unpack(ptrs uint64) (head, tail uint32) {
+ const mask = 1<<dequeueBits - 1
+ head = uint32((ptrs >> dequeueBits) & mask)
+ tail = uint32(ptrs & mask)
+ return
+}
+
+func (d *poolDequeue) pack(head, tail uint32) uint64 {
+ const mask = 1<<dequeueBits - 1
+ return (uint64(head) << dequeueBits) |
+ uint64(tail&mask)
+}
+
+// pushHead adds val at the head of the queue. It returns false if the
+// queue is full. It must only be called by a single producer.
+func (d *poolDequeue) pushHead(val any) bool {
+ ptrs := atomic.LoadUint64(&d.headTail)
+ head, tail := d.unpack(ptrs)
+ if (tail+uint32(len(d.vals)))&(1<<dequeueBits-1) == head {
+ // Queue is full.
+ return false
+ }
+ slot := &d.vals[head&uint32(len(d.vals)-1)]
+
+ // Check if the head slot has been released by popTail.
+ typ := atomic.LoadPointer(&slot.typ)
+ if typ != nil {
+ // Another goroutine is still cleaning up the tail, so
+ // the queue is actually still full.
+ return false
+ }
+
+ // The head slot is free, so we own it.
+ if val == nil {
+ val = dequeueNil(nil)
+ }
+ *(*any)(unsafe.Pointer(slot)) = val
+
+ // Increment head. This passes ownership of slot to popTail
+ // and acts as a store barrier for writing the slot.
+ atomic.AddUint64(&d.headTail, 1<<dequeueBits)
+ return true
+}
+
+// popHead removes and returns the element at the head of the queue.
+// It returns false if the queue is empty. It must only be called by a
+// single producer.
+func (d *poolDequeue) popHead() (any, bool) {
+ var slot *eface
+ for {
+ ptrs := atomic.LoadUint64(&d.headTail)
+ head, tail := d.unpack(ptrs)
+ if tail == head {
+ // Queue is empty.
+ return nil, false
+ }
+
+ // Confirm tail and decrement head. We do this before
+ // reading the value to take back ownership of this
+ // slot.
+ head--
+ ptrs2 := d.pack(head, tail)
+ if atomic.CompareAndSwapUint64(&d.headTail, ptrs, ptrs2) {
+ // We successfully took back slot.
+ slot = &d.vals[head&uint32(len(d.vals)-1)]
+ break
+ }
+ }
+
+ val := *(*any)(unsafe.Pointer(slot))
+ if val == dequeueNil(nil) {
+ val = nil
+ }
+ // Zero the slot. Unlike popTail, this isn't racing with
+ // pushHead, so we don't need to be careful here.
+ *slot = eface{}
+ return val, true
+}
+
+// popTail removes and returns the element at the tail of the queue.
+// It returns false if the queue is empty. It may be called by any
+// number of consumers.
+func (d *poolDequeue) popTail() (any, bool) {
+ var slot *eface
+ for {
+ ptrs := atomic.LoadUint64(&d.headTail)
+ head, tail := d.unpack(ptrs)
+ if tail == head {
+ // Queue is empty.
+ return nil, false
+ }
+
+ // Confirm head and tail (for our speculative check
+ // above) and increment tail. If this succeeds, then
+ // we own the slot at tail.
+ ptrs2 := d.pack(head, tail+1)
+ if atomic.CompareAndSwapUint64(&d.headTail, ptrs, ptrs2) {
+ // Success.
+ slot = &d.vals[tail&uint32(len(d.vals)-1)]
+ break
+ }
+ }
+
+ // We now own slot.
+ val := *(*any)(unsafe.Pointer(slot))
+ if val == dequeueNil(nil) {
+ val = nil
+ }
+
+ // Tell pushHead that we're done with this slot. Zeroing the
+ // slot is also important so we don't leave behind references
+ // that could keep this object live longer than necessary.
+ //
+ // We write to val first and then publish that we're done with
+ // this slot by atomically writing to typ.
+ slot.val = nil
+ atomic.StorePointer(&slot.typ, nil)
+ // At this point pushHead owns the slot.
+
+ return val, true
+}
+
+// poolChain is a dynamically-sized version of poolDequeue.
+//
+// This is implemented as a doubly-linked list queue of poolDequeues
+// where each dequeue is double the size of the previous one. Once a
+// dequeue fills up, this allocates a new one and only ever pushes to
+// the latest dequeue. Pops happen from the other end of the list and
+// once a dequeue is exhausted, it gets removed from the list.
+type poolChain struct {
+ // head is the poolDequeue to push to. This is only accessed
+ // by the producer, so doesn't need to be synchronized.
+ head *poolChainElt
+
+ // tail is the poolDequeue to popTail from. This is accessed
+ // by consumers, so reads and writes must be atomic.
+ tail *poolChainElt
+}
+
+type poolChainElt struct {
+ poolDequeue
+
+ // next and prev link to the adjacent poolChainElts in this
+ // poolChain.
+ //
+ // next is written atomically by the producer and read
+ // atomically by the consumer. It only transitions from nil to
+ // non-nil.
+ //
+ // prev is written atomically by the consumer and read
+ // atomically by the producer. It only transitions from
+ // non-nil to nil.
+ next, prev *poolChainElt
+}
+
+func storePoolChainElt(pp **poolChainElt, v *poolChainElt) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(pp)), unsafe.Pointer(v))
+}
+
+func loadPoolChainElt(pp **poolChainElt) *poolChainElt {
+ return (*poolChainElt)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(pp))))
+}
+
+func (c *poolChain) pushHead(val any) {
+ d := c.head
+ if d == nil {
+ // Initialize the chain.
+ const initSize = 8 // Must be a power of 2
+ d = new(poolChainElt)
+ d.vals = make([]eface, initSize)
+ c.head = d
+ storePoolChainElt(&c.tail, d)
+ }
+
+ if d.pushHead(val) {
+ return
+ }
+
+ // The current dequeue is full. Allocate a new one of twice
+ // the size.
+ newSize := len(d.vals) * 2
+ if newSize >= dequeueLimit {
+ // Can't make it any bigger.
+ newSize = dequeueLimit
+ }
+
+ d2 := &poolChainElt{prev: d}
+ d2.vals = make([]eface, newSize)
+ c.head = d2
+ storePoolChainElt(&d.next, d2)
+ d2.pushHead(val)
+}
+
+func (c *poolChain) popHead() (any, bool) {
+ d := c.head
+ for d != nil {
+ if val, ok := d.popHead(); ok {
+ return val, ok
+ }
+ // There may still be unconsumed elements in the
+ // previous dequeue, so try backing up.
+ d = loadPoolChainElt(&d.prev)
+ }
+ return nil, false
+}
+
+func (c *poolChain) popTail() (any, bool) {
+ d := loadPoolChainElt(&c.tail)
+ if d == nil {
+ return nil, false
+ }
+
+ for {
+ // It's important that we load the next pointer
+ // *before* popping the tail. In general, d may be
+ // transiently empty, but if next is non-nil before
+ // the pop and the pop fails, then d is permanently
+ // empty, which is the only condition under which it's
+ // safe to drop d from the chain.
+ d2 := loadPoolChainElt(&d.next)
+
+ if val, ok := d.popTail(); ok {
+ return val, ok
+ }
+
+ if d2 == nil {
+ // This is the only dequeue. It's empty right
+ // now, but could be pushed to in the future.
+ return nil, false
+ }
+
+ // The tail of the chain has been drained, so move on
+ // to the next dequeue. Try to drop it from the chain
+ // so the next pop doesn't have to look at the empty
+ // dequeue again.
+ if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&c.tail)), unsafe.Pointer(d), unsafe.Pointer(d2)) {
+ // We won the race. Clear the prev pointer so
+ // the garbage collector can collect the empty
+ // dequeue and so popHead doesn't back up
+ // further than necessary.
+ storePoolChainElt(&d2.prev, nil)
+ }
+ d = d2
+ }
+}
diff --git a/src/sync/runtime.go b/src/sync/runtime.go
new file mode 100644
index 0000000..5a90813
--- /dev/null
+++ b/src/sync/runtime.go
@@ -0,0 +1,63 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+import "unsafe"
+
+// defined in package runtime
+
+// Semacquire waits until *s > 0 and then atomically decrements it.
+// It is intended as a simple sleep primitive for use by the synchronization
+// library and should not be used directly.
+func runtime_Semacquire(s *uint32)
+
+// Semacquire(RW)Mutex(R) is like Semacquire, but for profiling contended
+// Mutexes and RWMutexes.
+// If lifo is true, queue waiter at the head of wait queue.
+// skipframes is the number of frames to omit during tracing, counting from
+// runtime_SemacquireMutex's caller.
+// The different forms of this function just tell the runtime how to present
+// the reason for waiting in a backtrace, and is used to compute some metrics.
+// Otherwise they're functionally identical.
+func runtime_SemacquireMutex(s *uint32, lifo bool, skipframes int)
+func runtime_SemacquireRWMutexR(s *uint32, lifo bool, skipframes int)
+func runtime_SemacquireRWMutex(s *uint32, lifo bool, skipframes int)
+
+// Semrelease atomically increments *s and notifies a waiting goroutine
+// if one is blocked in Semacquire.
+// It is intended as a simple wakeup primitive for use by the synchronization
+// library and should not be used directly.
+// If handoff is true, pass count directly to the first waiter.
+// skipframes is the number of frames to omit during tracing, counting from
+// runtime_Semrelease's caller.
+func runtime_Semrelease(s *uint32, handoff bool, skipframes int)
+
+// See runtime/sema.go for documentation.
+func runtime_notifyListAdd(l *notifyList) uint32
+
+// See runtime/sema.go for documentation.
+func runtime_notifyListWait(l *notifyList, t uint32)
+
+// See runtime/sema.go for documentation.
+func runtime_notifyListNotifyAll(l *notifyList)
+
+// See runtime/sema.go for documentation.
+func runtime_notifyListNotifyOne(l *notifyList)
+
+// Ensure that sync and runtime agree on size of notifyList.
+func runtime_notifyListCheck(size uintptr)
+func init() {
+ var n notifyList
+ runtime_notifyListCheck(unsafe.Sizeof(n))
+}
+
+// Active spinning runtime support.
+// runtime_canSpin reports whether spinning makes sense at the moment.
+func runtime_canSpin(i int) bool
+
+// runtime_doSpin does active spinning.
+func runtime_doSpin()
+
+func runtime_nanotime() int64
diff --git a/src/sync/runtime2.go b/src/sync/runtime2.go
new file mode 100644
index 0000000..9b7e992
--- /dev/null
+++ b/src/sync/runtime2.go
@@ -0,0 +1,19 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !goexperiment.staticlockranking
+
+package sync
+
+import "unsafe"
+
+// Approximation of notifyList in runtime/sema.go. Size and alignment must
+// agree.
+type notifyList struct {
+ wait uint32
+ notify uint32
+ lock uintptr // key field of the mutex
+ head unsafe.Pointer
+ tail unsafe.Pointer
+}
diff --git a/src/sync/runtime2_lockrank.go b/src/sync/runtime2_lockrank.go
new file mode 100644
index 0000000..cdb1af4
--- /dev/null
+++ b/src/sync/runtime2_lockrank.go
@@ -0,0 +1,22 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.staticlockranking
+
+package sync
+
+import "unsafe"
+
+// Approximation of notifyList in runtime/sema.go. Size and alignment must
+// agree.
+type notifyList struct {
+ wait uint32
+ notify uint32
+ rank int // rank field of the mutex
+ pad int // pad field of the mutex
+ lock uintptr // key field of the mutex
+
+ head unsafe.Pointer
+ tail unsafe.Pointer
+}
diff --git a/src/sync/runtime_sema_test.go b/src/sync/runtime_sema_test.go
new file mode 100644
index 0000000..152cf0e
--- /dev/null
+++ b/src/sync/runtime_sema_test.go
@@ -0,0 +1,75 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync_test
+
+import (
+ "runtime"
+ . "sync"
+ "testing"
+)
+
+func BenchmarkSemaUncontended(b *testing.B) {
+ type PaddedSem struct {
+ sem uint32
+ pad [32]uint32
+ }
+ b.RunParallel(func(pb *testing.PB) {
+ sem := new(PaddedSem)
+ for pb.Next() {
+ Runtime_Semrelease(&sem.sem, false, 0)
+ Runtime_Semacquire(&sem.sem)
+ }
+ })
+}
+
+func benchmarkSema(b *testing.B, block, work bool) {
+ if b.N == 0 {
+ return
+ }
+ sem := uint32(0)
+ if block {
+ done := make(chan bool)
+ go func() {
+ for p := 0; p < runtime.GOMAXPROCS(0)/2; p++ {
+ Runtime_Semacquire(&sem)
+ }
+ done <- true
+ }()
+ defer func() {
+ <-done
+ }()
+ }
+ b.RunParallel(func(pb *testing.PB) {
+ foo := 0
+ for pb.Next() {
+ Runtime_Semrelease(&sem, false, 0)
+ if work {
+ for i := 0; i < 100; i++ {
+ foo *= 2
+ foo /= 2
+ }
+ }
+ Runtime_Semacquire(&sem)
+ }
+ _ = foo
+ Runtime_Semrelease(&sem, false, 0)
+ })
+}
+
+func BenchmarkSemaSyntNonblock(b *testing.B) {
+ benchmarkSema(b, false, false)
+}
+
+func BenchmarkSemaSyntBlock(b *testing.B) {
+ benchmarkSema(b, true, false)
+}
+
+func BenchmarkSemaWorkNonblock(b *testing.B) {
+ benchmarkSema(b, false, true)
+}
+
+func BenchmarkSemaWorkBlock(b *testing.B) {
+ benchmarkSema(b, true, true)
+}
diff --git a/src/sync/rwmutex.go b/src/sync/rwmutex.go
new file mode 100644
index 0000000..1317624
--- /dev/null
+++ b/src/sync/rwmutex.go
@@ -0,0 +1,244 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+import (
+ "internal/race"
+ "sync/atomic"
+ "unsafe"
+)
+
+// There is a modified copy of this file in runtime/rwmutex.go.
+// If you make any changes here, see if you should make them there.
+
+// A RWMutex is a reader/writer mutual exclusion lock.
+// The lock can be held by an arbitrary number of readers or a single writer.
+// The zero value for a RWMutex is an unlocked mutex.
+//
+// A RWMutex must not be copied after first use.
+//
+// If a goroutine holds a RWMutex for reading and another goroutine might
+// call Lock, no goroutine should expect to be able to acquire a read lock
+// until the initial read lock is released. In particular, this prohibits
+// recursive read locking. This is to ensure that the lock eventually becomes
+// available; a blocked Lock call excludes new readers from acquiring the
+// lock.
+//
+// In the terminology of the Go memory model,
+// the n'th call to Unlock “synchronizes before” the m'th call to Lock
+// for any n < m, just as for Mutex.
+// For any call to RLock, there exists an n such that
+// the n'th call to Unlock “synchronizes before” that call to RLock,
+// and the corresponding call to RUnlock “synchronizes before”
+// the n+1'th call to Lock.
+type RWMutex struct {
+ w Mutex // held if there are pending writers
+ writerSem uint32 // semaphore for writers to wait for completing readers
+ readerSem uint32 // semaphore for readers to wait for completing writers
+ readerCount atomic.Int32 // number of pending readers
+ readerWait atomic.Int32 // number of departing readers
+}
+
+const rwmutexMaxReaders = 1 << 30
+
+// Happens-before relationships are indicated to the race detector via:
+// - Unlock -> Lock: readerSem
+// - Unlock -> RLock: readerSem
+// - RUnlock -> Lock: writerSem
+//
+// The methods below temporarily disable handling of race synchronization
+// events in order to provide the more precise model above to the race
+// detector.
+//
+// For example, atomic.AddInt32 in RLock should not appear to provide
+// acquire-release semantics, which would incorrectly synchronize racing
+// readers, thus potentially missing races.
+
+// RLock locks rw for reading.
+//
+// It should not be used for recursive read locking; a blocked Lock
+// call excludes new readers from acquiring the lock. See the
+// documentation on the RWMutex type.
+func (rw *RWMutex) RLock() {
+ if race.Enabled {
+ _ = rw.w.state
+ race.Disable()
+ }
+ if rw.readerCount.Add(1) < 0 {
+ // A writer is pending, wait for it.
+ runtime_SemacquireRWMutexR(&rw.readerSem, false, 0)
+ }
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(&rw.readerSem))
+ }
+}
+
+// TryRLock tries to lock rw for reading and reports whether it succeeded.
+//
+// Note that while correct uses of TryRLock do exist, they are rare,
+// and use of TryRLock is often a sign of a deeper problem
+// in a particular use of mutexes.
+func (rw *RWMutex) TryRLock() bool {
+ if race.Enabled {
+ _ = rw.w.state
+ race.Disable()
+ }
+ for {
+ c := rw.readerCount.Load()
+ if c < 0 {
+ if race.Enabled {
+ race.Enable()
+ }
+ return false
+ }
+ if rw.readerCount.CompareAndSwap(c, c+1) {
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(&rw.readerSem))
+ }
+ return true
+ }
+ }
+}
+
+// RUnlock undoes a single RLock call;
+// it does not affect other simultaneous readers.
+// It is a run-time error if rw is not locked for reading
+// on entry to RUnlock.
+func (rw *RWMutex) RUnlock() {
+ if race.Enabled {
+ _ = rw.w.state
+ race.ReleaseMerge(unsafe.Pointer(&rw.writerSem))
+ race.Disable()
+ }
+ if r := rw.readerCount.Add(-1); r < 0 {
+ // Outlined slow-path to allow the fast-path to be inlined
+ rw.rUnlockSlow(r)
+ }
+ if race.Enabled {
+ race.Enable()
+ }
+}
+
+func (rw *RWMutex) rUnlockSlow(r int32) {
+ if r+1 == 0 || r+1 == -rwmutexMaxReaders {
+ race.Enable()
+ fatal("sync: RUnlock of unlocked RWMutex")
+ }
+ // A writer is pending.
+ if rw.readerWait.Add(-1) == 0 {
+ // The last reader unblocks the writer.
+ runtime_Semrelease(&rw.writerSem, false, 1)
+ }
+}
+
+// Lock locks rw for writing.
+// If the lock is already locked for reading or writing,
+// Lock blocks until the lock is available.
+func (rw *RWMutex) Lock() {
+ if race.Enabled {
+ _ = rw.w.state
+ race.Disable()
+ }
+ // First, resolve competition with other writers.
+ rw.w.Lock()
+ // Announce to readers there is a pending writer.
+ r := rw.readerCount.Add(-rwmutexMaxReaders) + rwmutexMaxReaders
+ // Wait for active readers.
+ if r != 0 && rw.readerWait.Add(r) != 0 {
+ runtime_SemacquireRWMutex(&rw.writerSem, false, 0)
+ }
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(&rw.readerSem))
+ race.Acquire(unsafe.Pointer(&rw.writerSem))
+ }
+}
+
+// TryLock tries to lock rw for writing and reports whether it succeeded.
+//
+// Note that while correct uses of TryLock do exist, they are rare,
+// and use of TryLock is often a sign of a deeper problem
+// in a particular use of mutexes.
+func (rw *RWMutex) TryLock() bool {
+ if race.Enabled {
+ _ = rw.w.state
+ race.Disable()
+ }
+ if !rw.w.TryLock() {
+ if race.Enabled {
+ race.Enable()
+ }
+ return false
+ }
+ if !rw.readerCount.CompareAndSwap(0, -rwmutexMaxReaders) {
+ rw.w.Unlock()
+ if race.Enabled {
+ race.Enable()
+ }
+ return false
+ }
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(&rw.readerSem))
+ race.Acquire(unsafe.Pointer(&rw.writerSem))
+ }
+ return true
+}
+
+// Unlock unlocks rw for writing. It is a run-time error if rw is
+// not locked for writing on entry to Unlock.
+//
+// As with Mutexes, a locked RWMutex is not associated with a particular
+// goroutine. One goroutine may RLock (Lock) a RWMutex and then
+// arrange for another goroutine to RUnlock (Unlock) it.
+func (rw *RWMutex) Unlock() {
+ if race.Enabled {
+ _ = rw.w.state
+ race.Release(unsafe.Pointer(&rw.readerSem))
+ race.Disable()
+ }
+
+ // Announce to readers there is no active writer.
+ r := rw.readerCount.Add(rwmutexMaxReaders)
+ if r >= rwmutexMaxReaders {
+ race.Enable()
+ fatal("sync: Unlock of unlocked RWMutex")
+ }
+ // Unblock blocked readers, if any.
+ for i := 0; i < int(r); i++ {
+ runtime_Semrelease(&rw.readerSem, false, 0)
+ }
+ // Allow other writers to proceed.
+ rw.w.Unlock()
+ if race.Enabled {
+ race.Enable()
+ }
+}
+
+// syscall_hasWaitingReaders reports whether any goroutine is waiting
+// to acquire a read lock on rw. This exists because syscall.ForkLock
+// is an RWMutex, and we can't change that without breaking compatibility.
+// We don't need or want RWMutex semantics for ForkLock, and we use
+// this private API to avoid having to change the type of ForkLock.
+// For more details see the syscall package.
+//
+//go:linkname syscall_hasWaitingReaders syscall.hasWaitingReaders
+func syscall_hasWaitingReaders(rw *RWMutex) bool {
+ r := rw.readerCount.Load()
+ return r < 0 && r+rwmutexMaxReaders > 0
+}
+
+// RLocker returns a Locker interface that implements
+// the Lock and Unlock methods by calling rw.RLock and rw.RUnlock.
+func (rw *RWMutex) RLocker() Locker {
+ return (*rlocker)(rw)
+}
+
+type rlocker RWMutex
+
+func (r *rlocker) Lock() { (*RWMutex)(r).RLock() }
+func (r *rlocker) Unlock() { (*RWMutex)(r).RUnlock() }
diff --git a/src/sync/rwmutex_test.go b/src/sync/rwmutex_test.go
new file mode 100644
index 0000000..dfbdd9b
--- /dev/null
+++ b/src/sync/rwmutex_test.go
@@ -0,0 +1,245 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// GOMAXPROCS=10 go test
+
+package sync_test
+
+import (
+ "fmt"
+ "runtime"
+ . "sync"
+ "sync/atomic"
+ "testing"
+)
+
+// There is a modified copy of this file in runtime/rwmutex_test.go.
+// If you make any changes here, see if you should make them there.
+
+func parallelReader(m *RWMutex, clocked, cunlock, cdone chan bool) {
+ m.RLock()
+ clocked <- true
+ <-cunlock
+ m.RUnlock()
+ cdone <- true
+}
+
+func doTestParallelReaders(numReaders, gomaxprocs int) {
+ runtime.GOMAXPROCS(gomaxprocs)
+ var m RWMutex
+ clocked := make(chan bool)
+ cunlock := make(chan bool)
+ cdone := make(chan bool)
+ for i := 0; i < numReaders; i++ {
+ go parallelReader(&m, clocked, cunlock, cdone)
+ }
+ // Wait for all parallel RLock()s to succeed.
+ for i := 0; i < numReaders; i++ {
+ <-clocked
+ }
+ for i := 0; i < numReaders; i++ {
+ cunlock <- true
+ }
+ // Wait for the goroutines to finish.
+ for i := 0; i < numReaders; i++ {
+ <-cdone
+ }
+}
+
+func TestParallelReaders(t *testing.T) {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
+ doTestParallelReaders(1, 4)
+ doTestParallelReaders(3, 4)
+ doTestParallelReaders(4, 2)
+}
+
+func reader(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
+ for i := 0; i < num_iterations; i++ {
+ rwm.RLock()
+ n := atomic.AddInt32(activity, 1)
+ if n < 1 || n >= 10000 {
+ rwm.RUnlock()
+ panic(fmt.Sprintf("wlock(%d)\n", n))
+ }
+ for i := 0; i < 100; i++ {
+ }
+ atomic.AddInt32(activity, -1)
+ rwm.RUnlock()
+ }
+ cdone <- true
+}
+
+func writer(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
+ for i := 0; i < num_iterations; i++ {
+ rwm.Lock()
+ n := atomic.AddInt32(activity, 10000)
+ if n != 10000 {
+ rwm.Unlock()
+ panic(fmt.Sprintf("wlock(%d)\n", n))
+ }
+ for i := 0; i < 100; i++ {
+ }
+ atomic.AddInt32(activity, -10000)
+ rwm.Unlock()
+ }
+ cdone <- true
+}
+
+func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) {
+ runtime.GOMAXPROCS(gomaxprocs)
+ // Number of active readers + 10000 * number of active writers.
+ var activity int32
+ var rwm RWMutex
+ cdone := make(chan bool)
+ go writer(&rwm, num_iterations, &activity, cdone)
+ var i int
+ for i = 0; i < numReaders/2; i++ {
+ go reader(&rwm, num_iterations, &activity, cdone)
+ }
+ go writer(&rwm, num_iterations, &activity, cdone)
+ for ; i < numReaders; i++ {
+ go reader(&rwm, num_iterations, &activity, cdone)
+ }
+ // Wait for the 2 writers and all readers to finish.
+ for i := 0; i < 2+numReaders; i++ {
+ <-cdone
+ }
+}
+
+func TestRWMutex(t *testing.T) {
+ var m RWMutex
+
+ m.Lock()
+ if m.TryLock() {
+ t.Fatalf("TryLock succeeded with mutex locked")
+ }
+ if m.TryRLock() {
+ t.Fatalf("TryRLock succeeded with mutex locked")
+ }
+ m.Unlock()
+
+ if !m.TryLock() {
+ t.Fatalf("TryLock failed with mutex unlocked")
+ }
+ m.Unlock()
+
+ if !m.TryRLock() {
+ t.Fatalf("TryRLock failed with mutex unlocked")
+ }
+ if !m.TryRLock() {
+ t.Fatalf("TryRLock failed with mutex rlocked")
+ }
+ if m.TryLock() {
+ t.Fatalf("TryLock succeeded with mutex rlocked")
+ }
+ m.RUnlock()
+ m.RUnlock()
+
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
+ n := 1000
+ if testing.Short() {
+ n = 5
+ }
+ HammerRWMutex(1, 1, n)
+ HammerRWMutex(1, 3, n)
+ HammerRWMutex(1, 10, n)
+ HammerRWMutex(4, 1, n)
+ HammerRWMutex(4, 3, n)
+ HammerRWMutex(4, 10, n)
+ HammerRWMutex(10, 1, n)
+ HammerRWMutex(10, 3, n)
+ HammerRWMutex(10, 10, n)
+ HammerRWMutex(10, 5, n)
+}
+
+func TestRLocker(t *testing.T) {
+ var wl RWMutex
+ var rl Locker
+ wlocked := make(chan bool, 1)
+ rlocked := make(chan bool, 1)
+ rl = wl.RLocker()
+ n := 10
+ go func() {
+ for i := 0; i < n; i++ {
+ rl.Lock()
+ rl.Lock()
+ rlocked <- true
+ wl.Lock()
+ wlocked <- true
+ }
+ }()
+ for i := 0; i < n; i++ {
+ <-rlocked
+ rl.Unlock()
+ select {
+ case <-wlocked:
+ t.Fatal("RLocker() didn't read-lock it")
+ default:
+ }
+ rl.Unlock()
+ <-wlocked
+ select {
+ case <-rlocked:
+ t.Fatal("RLocker() didn't respect the write lock")
+ default:
+ }
+ wl.Unlock()
+ }
+}
+
+func BenchmarkRWMutexUncontended(b *testing.B) {
+ type PaddedRWMutex struct {
+ RWMutex
+ pad [32]uint32
+ }
+ b.RunParallel(func(pb *testing.PB) {
+ var rwm PaddedRWMutex
+ for pb.Next() {
+ rwm.RLock()
+ rwm.RLock()
+ rwm.RUnlock()
+ rwm.RUnlock()
+ rwm.Lock()
+ rwm.Unlock()
+ }
+ })
+}
+
+func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
+ var rwm RWMutex
+ b.RunParallel(func(pb *testing.PB) {
+ foo := 0
+ for pb.Next() {
+ foo++
+ if foo%writeRatio == 0 {
+ rwm.Lock()
+ rwm.Unlock()
+ } else {
+ rwm.RLock()
+ for i := 0; i != localWork; i += 1 {
+ foo *= 2
+ foo /= 2
+ }
+ rwm.RUnlock()
+ }
+ }
+ _ = foo
+ })
+}
+
+func BenchmarkRWMutexWrite100(b *testing.B) {
+ benchmarkRWMutex(b, 0, 100)
+}
+
+func BenchmarkRWMutexWrite10(b *testing.B) {
+ benchmarkRWMutex(b, 0, 10)
+}
+
+func BenchmarkRWMutexWorkWrite100(b *testing.B) {
+ benchmarkRWMutex(b, 100, 100)
+}
+
+func BenchmarkRWMutexWorkWrite10(b *testing.B) {
+ benchmarkRWMutex(b, 100, 10)
+}
diff --git a/src/sync/waitgroup.go b/src/sync/waitgroup.go
new file mode 100644
index 0000000..be21417
--- /dev/null
+++ b/src/sync/waitgroup.go
@@ -0,0 +1,127 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+import (
+ "internal/race"
+ "sync/atomic"
+ "unsafe"
+)
+
+// A WaitGroup waits for a collection of goroutines to finish.
+// The main goroutine calls Add to set the number of
+// goroutines to wait for. Then each of the goroutines
+// runs and calls Done when finished. At the same time,
+// Wait can be used to block until all goroutines have finished.
+//
+// A WaitGroup must not be copied after first use.
+//
+// In the terminology of the Go memory model, a call to Done
+// “synchronizes before” the return of any Wait call that it unblocks.
+type WaitGroup struct {
+ noCopy noCopy
+
+ state atomic.Uint64 // high 32 bits are counter, low 32 bits are waiter count.
+ sema uint32
+}
+
+// Add adds delta, which may be negative, to the WaitGroup counter.
+// If the counter becomes zero, all goroutines blocked on Wait are released.
+// If the counter goes negative, Add panics.
+//
+// Note that calls with a positive delta that occur when the counter is zero
+// must happen before a Wait. Calls with a negative delta, or calls with a
+// positive delta that start when the counter is greater than zero, may happen
+// at any time.
+// Typically this means the calls to Add should execute before the statement
+// creating the goroutine or other event to be waited for.
+// If a WaitGroup is reused to wait for several independent sets of events,
+// new Add calls must happen after all previous Wait calls have returned.
+// See the WaitGroup example.
+func (wg *WaitGroup) Add(delta int) {
+ if race.Enabled {
+ if delta < 0 {
+ // Synchronize decrements with Wait.
+ race.ReleaseMerge(unsafe.Pointer(wg))
+ }
+ race.Disable()
+ defer race.Enable()
+ }
+ state := wg.state.Add(uint64(delta) << 32)
+ v := int32(state >> 32)
+ w := uint32(state)
+ if race.Enabled && delta > 0 && v == int32(delta) {
+ // The first increment must be synchronized with Wait.
+ // Need to model this as a read, because there can be
+ // several concurrent wg.counter transitions from 0.
+ race.Read(unsafe.Pointer(&wg.sema))
+ }
+ if v < 0 {
+ panic("sync: negative WaitGroup counter")
+ }
+ if w != 0 && delta > 0 && v == int32(delta) {
+ panic("sync: WaitGroup misuse: Add called concurrently with Wait")
+ }
+ if v > 0 || w == 0 {
+ return
+ }
+ // This goroutine has set counter to 0 when waiters > 0.
+ // Now there can't be concurrent mutations of state:
+ // - Adds must not happen concurrently with Wait,
+ // - Wait does not increment waiters if it sees counter == 0.
+ // Still do a cheap sanity check to detect WaitGroup misuse.
+ if wg.state.Load() != state {
+ panic("sync: WaitGroup misuse: Add called concurrently with Wait")
+ }
+ // Reset waiters count to 0.
+ wg.state.Store(0)
+ for ; w != 0; w-- {
+ runtime_Semrelease(&wg.sema, false, 0)
+ }
+}
+
+// Done decrements the WaitGroup counter by one.
+func (wg *WaitGroup) Done() {
+ wg.Add(-1)
+}
+
+// Wait blocks until the WaitGroup counter is zero.
+func (wg *WaitGroup) Wait() {
+ if race.Enabled {
+ race.Disable()
+ }
+ for {
+ state := wg.state.Load()
+ v := int32(state >> 32)
+ w := uint32(state)
+ if v == 0 {
+ // Counter is 0, no need to wait.
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(wg))
+ }
+ return
+ }
+ // Increment waiters count.
+ if wg.state.CompareAndSwap(state, state+1) {
+ if race.Enabled && w == 0 {
+ // Wait must be synchronized with the first Add.
+ // Need to model this is as a write to race with the read in Add.
+ // As a consequence, can do the write only for the first waiter,
+ // otherwise concurrent Waits will race with each other.
+ race.Write(unsafe.Pointer(&wg.sema))
+ }
+ runtime_Semacquire(&wg.sema)
+ if wg.state.Load() != 0 {
+ panic("sync: WaitGroup is reused before previous Wait has returned")
+ }
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(wg))
+ }
+ return
+ }
+ }
+}
diff --git a/src/sync/waitgroup_test.go b/src/sync/waitgroup_test.go
new file mode 100644
index 0000000..4ded218
--- /dev/null
+++ b/src/sync/waitgroup_test.go
@@ -0,0 +1,175 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync_test
+
+import (
+ . "sync"
+ "sync/atomic"
+ "testing"
+)
+
+func testWaitGroup(t *testing.T, wg1 *WaitGroup, wg2 *WaitGroup) {
+ n := 16
+ wg1.Add(n)
+ wg2.Add(n)
+ exited := make(chan bool, n)
+ for i := 0; i != n; i++ {
+ go func() {
+ wg1.Done()
+ wg2.Wait()
+ exited <- true
+ }()
+ }
+ wg1.Wait()
+ for i := 0; i != n; i++ {
+ select {
+ case <-exited:
+ t.Fatal("WaitGroup released group too soon")
+ default:
+ }
+ wg2.Done()
+ }
+ for i := 0; i != n; i++ {
+ <-exited // Will block if barrier fails to unlock someone.
+ }
+}
+
+func TestWaitGroup(t *testing.T) {
+ wg1 := &WaitGroup{}
+ wg2 := &WaitGroup{}
+
+ // Run the same test a few times to ensure barrier is in a proper state.
+ for i := 0; i != 8; i++ {
+ testWaitGroup(t, wg1, wg2)
+ }
+}
+
+func TestWaitGroupMisuse(t *testing.T) {
+ defer func() {
+ err := recover()
+ if err != "sync: negative WaitGroup counter" {
+ t.Fatalf("Unexpected panic: %#v", err)
+ }
+ }()
+ wg := &WaitGroup{}
+ wg.Add(1)
+ wg.Done()
+ wg.Done()
+ t.Fatal("Should panic")
+}
+
+func TestWaitGroupRace(t *testing.T) {
+ // Run this test for about 1ms.
+ for i := 0; i < 1000; i++ {
+ wg := &WaitGroup{}
+ n := new(int32)
+ // spawn goroutine 1
+ wg.Add(1)
+ go func() {
+ atomic.AddInt32(n, 1)
+ wg.Done()
+ }()
+ // spawn goroutine 2
+ wg.Add(1)
+ go func() {
+ atomic.AddInt32(n, 1)
+ wg.Done()
+ }()
+ // Wait for goroutine 1 and 2
+ wg.Wait()
+ if atomic.LoadInt32(n) != 2 {
+ t.Fatal("Spurious wakeup from Wait")
+ }
+ }
+}
+
+func TestWaitGroupAlign(t *testing.T) {
+ type X struct {
+ x byte
+ wg WaitGroup
+ }
+ var x X
+ x.wg.Add(1)
+ go func(x *X) {
+ x.wg.Done()
+ }(&x)
+ x.wg.Wait()
+}
+
+func BenchmarkWaitGroupUncontended(b *testing.B) {
+ type PaddedWaitGroup struct {
+ WaitGroup
+ pad [128]uint8
+ }
+ b.RunParallel(func(pb *testing.PB) {
+ var wg PaddedWaitGroup
+ for pb.Next() {
+ wg.Add(1)
+ wg.Done()
+ wg.Wait()
+ }
+ })
+}
+
+func benchmarkWaitGroupAddDone(b *testing.B, localWork int) {
+ var wg WaitGroup
+ b.RunParallel(func(pb *testing.PB) {
+ foo := 0
+ for pb.Next() {
+ wg.Add(1)
+ for i := 0; i < localWork; i++ {
+ foo *= 2
+ foo /= 2
+ }
+ wg.Done()
+ }
+ _ = foo
+ })
+}
+
+func BenchmarkWaitGroupAddDone(b *testing.B) {
+ benchmarkWaitGroupAddDone(b, 0)
+}
+
+func BenchmarkWaitGroupAddDoneWork(b *testing.B) {
+ benchmarkWaitGroupAddDone(b, 100)
+}
+
+func benchmarkWaitGroupWait(b *testing.B, localWork int) {
+ var wg WaitGroup
+ b.RunParallel(func(pb *testing.PB) {
+ foo := 0
+ for pb.Next() {
+ wg.Wait()
+ for i := 0; i < localWork; i++ {
+ foo *= 2
+ foo /= 2
+ }
+ }
+ _ = foo
+ })
+}
+
+func BenchmarkWaitGroupWait(b *testing.B) {
+ benchmarkWaitGroupWait(b, 0)
+}
+
+func BenchmarkWaitGroupWaitWork(b *testing.B) {
+ benchmarkWaitGroupWait(b, 100)
+}
+
+func BenchmarkWaitGroupActuallyWait(b *testing.B) {
+ b.ReportAllocs()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ var wg WaitGroup
+ wg.Add(1)
+ go func() {
+ wg.Done()
+ }()
+ wg.Wait()
+ }
+ })
+}