summaryrefslogtreecommitdiffstats
path: root/src/sync/atomic
diff options
context:
space:
mode:
Diffstat (limited to 'src/sync/atomic')
-rw-r--r--src/sync/atomic/asm.s85
-rw-r--r--src/sync/atomic/atomic_test.go2534
-rw-r--r--src/sync/atomic/doc.go192
-rw-r--r--src/sync/atomic/example_test.go76
-rw-r--r--src/sync/atomic/race.s8
-rw-r--r--src/sync/atomic/type.go200
-rw-r--r--src/sync/atomic/value.go194
-rw-r--r--src/sync/atomic/value_test.go274
8 files changed, 3563 insertions, 0 deletions
diff --git a/src/sync/atomic/asm.s b/src/sync/atomic/asm.s
new file mode 100644
index 0000000..2022304
--- /dev/null
+++ b/src/sync/atomic/asm.s
@@ -0,0 +1,85 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !race
+
+#include "textflag.h"
+
+TEXT ·SwapInt32(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Xchg(SB)
+
+TEXT ·SwapUint32(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Xchg(SB)
+
+TEXT ·SwapInt64(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Xchg64(SB)
+
+TEXT ·SwapUint64(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Xchg64(SB)
+
+TEXT ·SwapUintptr(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Xchguintptr(SB)
+
+TEXT ·CompareAndSwapInt32(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Cas(SB)
+
+TEXT ·CompareAndSwapUint32(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Cas(SB)
+
+TEXT ·CompareAndSwapUintptr(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Casuintptr(SB)
+
+TEXT ·CompareAndSwapInt64(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Cas64(SB)
+
+TEXT ·CompareAndSwapUint64(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Cas64(SB)
+
+TEXT ·AddInt32(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Xadd(SB)
+
+TEXT ·AddUint32(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Xadd(SB)
+
+TEXT ·AddUintptr(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Xadduintptr(SB)
+
+TEXT ·AddInt64(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Xadd64(SB)
+
+TEXT ·AddUint64(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Xadd64(SB)
+
+TEXT ·LoadInt32(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Load(SB)
+
+TEXT ·LoadUint32(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Load(SB)
+
+TEXT ·LoadInt64(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Load64(SB)
+
+TEXT ·LoadUint64(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Load64(SB)
+
+TEXT ·LoadUintptr(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Loaduintptr(SB)
+
+TEXT ·LoadPointer(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Loadp(SB)
+
+TEXT ·StoreInt32(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Store(SB)
+
+TEXT ·StoreUint32(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Store(SB)
+
+TEXT ·StoreInt64(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Store64(SB)
+
+TEXT ·StoreUint64(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Store64(SB)
+
+TEXT ·StoreUintptr(SB),NOSPLIT,$0
+ JMP runtime∕internal∕atomic·Storeuintptr(SB)
diff --git a/src/sync/atomic/atomic_test.go b/src/sync/atomic/atomic_test.go
new file mode 100644
index 0000000..c3604ef
--- /dev/null
+++ b/src/sync/atomic/atomic_test.go
@@ -0,0 +1,2534 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic_test
+
+import (
+ "fmt"
+ "reflect"
+ "runtime"
+ "runtime/debug"
+ "strings"
+ . "sync/atomic"
+ "testing"
+ "unsafe"
+)
+
+// Tests of correct behavior, without contention.
+// (Does the function work as advertised?)
+//
+// Test that the Add functions add correctly.
+// Test that the CompareAndSwap functions actually
+// do the comparison and the swap correctly.
+//
+// The loop over power-of-two values is meant to
+// ensure that the operations apply to the full word size.
+// The struct fields x.before and x.after check that the
+// operations do not extend past the full word size.
+
+const (
+ magic32 = 0xdedbeef
+ magic64 = 0xdeddeadbeefbeef
+)
+
+func TestSwapInt32(t *testing.T) {
+ var x struct {
+ before int32
+ i int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j int32
+ for delta := int32(1); delta+delta > delta; delta += delta {
+ k := SwapInt32(&x.i, delta)
+ if x.i != delta || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ j = delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestSwapInt32Method(t *testing.T) {
+ var x struct {
+ before int32
+ i Int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j int32
+ for delta := int32(1); delta+delta > delta; delta += delta {
+ k := x.i.Swap(delta)
+ if x.i.Load() != delta || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
+ }
+ j = delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestSwapUint32(t *testing.T) {
+ var x struct {
+ before uint32
+ i uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j uint32
+ for delta := uint32(1); delta+delta > delta; delta += delta {
+ k := SwapUint32(&x.i, delta)
+ if x.i != delta || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ j = delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestSwapUint32Method(t *testing.T) {
+ var x struct {
+ before uint32
+ i Uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j uint32
+ for delta := uint32(1); delta+delta > delta; delta += delta {
+ k := x.i.Swap(delta)
+ if x.i.Load() != delta || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
+ }
+ j = delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestSwapInt64(t *testing.T) {
+ var x struct {
+ before int64
+ i int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ var j int64
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ k := SwapInt64(&x.i, delta)
+ if x.i != delta || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ j = delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestSwapInt64Method(t *testing.T) {
+ var x struct {
+ before int64
+ i Int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ var j int64
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ k := x.i.Swap(delta)
+ if x.i.Load() != delta || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
+ }
+ j = delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestSwapUint64(t *testing.T) {
+ var x struct {
+ before uint64
+ i uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ var j uint64
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ k := SwapUint64(&x.i, delta)
+ if x.i != delta || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ j = delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestSwapUint64Method(t *testing.T) {
+ var x struct {
+ before uint64
+ i Uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ var j uint64
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ k := x.i.Swap(delta)
+ if x.i.Load() != delta || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
+ }
+ j = delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestSwapUintptr(t *testing.T) {
+ var x struct {
+ before uintptr
+ i uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ var j uintptr
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ k := SwapUintptr(&x.i, delta)
+ if x.i != delta || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ j = delta
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestSwapUintptrMethod(t *testing.T) {
+ var x struct {
+ before uintptr
+ i Uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ var j uintptr
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ k := x.i.Swap(delta)
+ if x.i.Load() != delta || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
+ }
+ j = delta
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+var global [1024]byte
+
+func testPointers() []unsafe.Pointer {
+ var pointers []unsafe.Pointer
+ // globals
+ for i := 0; i < 10; i++ {
+ pointers = append(pointers, unsafe.Pointer(&global[1<<i-1]))
+ }
+ // heap
+ pointers = append(pointers, unsafe.Pointer(new(byte)))
+ // nil
+ pointers = append(pointers, nil)
+ return pointers
+}
+
+func TestSwapPointer(t *testing.T) {
+ var x struct {
+ before uintptr
+ i unsafe.Pointer
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ var j unsafe.Pointer
+
+ for _, p := range testPointers() {
+ k := SwapPointer(&x.i, p)
+ if x.i != p || k != j {
+ t.Fatalf("p=%p i=%p j=%p k=%p", p, x.i, j, k)
+ }
+ j = p
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestSwapPointerMethod(t *testing.T) {
+ var x struct {
+ before uintptr
+ i Pointer[byte]
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ var j *byte
+ for _, p := range testPointers() {
+ p := (*byte)(p)
+ k := x.i.Swap(p)
+ if x.i.Load() != p || k != j {
+ t.Fatalf("p=%p i=%p j=%p k=%p", p, x.i.Load(), j, k)
+ }
+ j = p
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestAddInt32(t *testing.T) {
+ var x struct {
+ before int32
+ i int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j int32
+ for delta := int32(1); delta+delta > delta; delta += delta {
+ k := AddInt32(&x.i, delta)
+ j += delta
+ if x.i != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestAddInt32Method(t *testing.T) {
+ var x struct {
+ before int32
+ i Int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j int32
+ for delta := int32(1); delta+delta > delta; delta += delta {
+ k := x.i.Add(delta)
+ j += delta
+ if x.i.Load() != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestAddUint32(t *testing.T) {
+ var x struct {
+ before uint32
+ i uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j uint32
+ for delta := uint32(1); delta+delta > delta; delta += delta {
+ k := AddUint32(&x.i, delta)
+ j += delta
+ if x.i != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestAddUint32Method(t *testing.T) {
+ var x struct {
+ before uint32
+ i Uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j uint32
+ for delta := uint32(1); delta+delta > delta; delta += delta {
+ k := x.i.Add(delta)
+ j += delta
+ if x.i.Load() != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestAddInt64(t *testing.T) {
+ var x struct {
+ before int64
+ i int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ var j int64
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ k := AddInt64(&x.i, delta)
+ j += delta
+ if x.i != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestAddInt64Method(t *testing.T) {
+ var x struct {
+ before int64
+ i Int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ var j int64
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ k := x.i.Add(delta)
+ j += delta
+ if x.i.Load() != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestAddUint64(t *testing.T) {
+ var x struct {
+ before uint64
+ i uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ var j uint64
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ k := AddUint64(&x.i, delta)
+ j += delta
+ if x.i != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestAddUint64Method(t *testing.T) {
+ var x struct {
+ before uint64
+ i Uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ var j uint64
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ k := x.i.Add(delta)
+ j += delta
+ if x.i.Load() != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestAddUintptr(t *testing.T) {
+ var x struct {
+ before uintptr
+ i uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ var j uintptr
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ k := AddUintptr(&x.i, delta)
+ j += delta
+ if x.i != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestAddUintptrMethod(t *testing.T) {
+ var x struct {
+ before uintptr
+ i Uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ var j uintptr
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ k := x.i.Add(delta)
+ j += delta
+ if x.i.Load() != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestCompareAndSwapInt32(t *testing.T) {
+ var x struct {
+ before int32
+ i int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ for val := int32(1); val+val > val; val += val {
+ x.i = val
+ if !CompareAndSwapInt32(&x.i, val, val+1) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ x.i = val + 1
+ if CompareAndSwapInt32(&x.i, val, val+2) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestCompareAndSwapInt32Method(t *testing.T) {
+ var x struct {
+ before int32
+ i Int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ for val := int32(1); val+val > val; val += val {
+ x.i.Store(val)
+ if !x.i.CompareAndSwap(val, val+1) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i.Load() != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
+ }
+ x.i.Store(val + 1)
+ if x.i.CompareAndSwap(val, val+2) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i.Load() != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestCompareAndSwapUint32(t *testing.T) {
+ var x struct {
+ before uint32
+ i uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ for val := uint32(1); val+val > val; val += val {
+ x.i = val
+ if !CompareAndSwapUint32(&x.i, val, val+1) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ x.i = val + 1
+ if CompareAndSwapUint32(&x.i, val, val+2) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestCompareAndSwapUint32Method(t *testing.T) {
+ var x struct {
+ before uint32
+ i Uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ for val := uint32(1); val+val > val; val += val {
+ x.i.Store(val)
+ if !x.i.CompareAndSwap(val, val+1) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i.Load() != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
+ }
+ x.i.Store(val + 1)
+ if x.i.CompareAndSwap(val, val+2) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i.Load() != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestCompareAndSwapInt64(t *testing.T) {
+ var x struct {
+ before int64
+ i int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ for val := int64(1); val+val > val; val += val {
+ x.i = val
+ if !CompareAndSwapInt64(&x.i, val, val+1) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ x.i = val + 1
+ if CompareAndSwapInt64(&x.i, val, val+2) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestCompareAndSwapInt64Method(t *testing.T) {
+ var x struct {
+ before int64
+ i Int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ for val := int64(1); val+val > val; val += val {
+ x.i.Store(val)
+ if !x.i.CompareAndSwap(val, val+1) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i.Load() != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
+ }
+ x.i.Store(val + 1)
+ if x.i.CompareAndSwap(val, val+2) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i.Load() != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func testCompareAndSwapUint64(t *testing.T, cas func(*uint64, uint64, uint64) bool) {
+ var x struct {
+ before uint64
+ i uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ for val := uint64(1); val+val > val; val += val {
+ x.i = val
+ if !cas(&x.i, val, val+1) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ x.i = val + 1
+ if cas(&x.i, val, val+2) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestCompareAndSwapUint64(t *testing.T) {
+ testCompareAndSwapUint64(t, CompareAndSwapUint64)
+}
+
+func TestCompareAndSwapUint64Method(t *testing.T) {
+ var x struct {
+ before uint64
+ i Uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ for val := uint64(1); val+val > val; val += val {
+ x.i.Store(val)
+ if !x.i.CompareAndSwap(val, val+1) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i.Load() != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
+ }
+ x.i.Store(val + 1)
+ if x.i.CompareAndSwap(val, val+2) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i.Load() != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestCompareAndSwapUintptr(t *testing.T) {
+ var x struct {
+ before uintptr
+ i uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for val := uintptr(1); val+val > val; val += val {
+ x.i = val
+ if !CompareAndSwapUintptr(&x.i, val, val+1) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ x.i = val + 1
+ if CompareAndSwapUintptr(&x.i, val, val+2) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestCompareAndSwapUintptrMethod(t *testing.T) {
+ var x struct {
+ before uintptr
+ i Uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for val := uintptr(1); val+val > val; val += val {
+ x.i.Store(val)
+ if !x.i.CompareAndSwap(val, val+1) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i.Load() != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
+ }
+ x.i.Store(val + 1)
+ if x.i.CompareAndSwap(val, val+2) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i.Load() != val+1 {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uintptr(magicptr), uintptr(magicptr))
+ }
+}
+
+func TestCompareAndSwapPointer(t *testing.T) {
+ var x struct {
+ before uintptr
+ i unsafe.Pointer
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ q := unsafe.Pointer(new(byte))
+ for _, p := range testPointers() {
+ x.i = p
+ if !CompareAndSwapPointer(&x.i, p, q) {
+ t.Fatalf("should have swapped %p %p", p, q)
+ }
+ if x.i != q {
+ t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i, q)
+ }
+ if CompareAndSwapPointer(&x.i, p, nil) {
+ t.Fatalf("should not have swapped %p nil", p)
+ }
+ if x.i != q {
+ t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i, q)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestCompareAndSwapPointerMethod(t *testing.T) {
+ var x struct {
+ before uintptr
+ i Pointer[byte]
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ q := new(byte)
+ for _, p := range testPointers() {
+ p := (*byte)(p)
+ x.i.Store(p)
+ if !x.i.CompareAndSwap(p, q) {
+ t.Fatalf("should have swapped %p %p", p, q)
+ }
+ if x.i.Load() != q {
+ t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i.Load(), q)
+ }
+ if x.i.CompareAndSwap(p, nil) {
+ t.Fatalf("should not have swapped %p nil", p)
+ }
+ if x.i.Load() != q {
+ t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i.Load(), q)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestLoadInt32(t *testing.T) {
+ var x struct {
+ before int32
+ i int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ for delta := int32(1); delta+delta > delta; delta += delta {
+ k := LoadInt32(&x.i)
+ if k != x.i {
+ t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
+ }
+ x.i += delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestLoadInt32Method(t *testing.T) {
+ var x struct {
+ before int32
+ i Int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ want := int32(0)
+ for delta := int32(1); delta+delta > delta; delta += delta {
+ k := x.i.Load()
+ if k != want {
+ t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want)
+ }
+ x.i.Store(k + delta)
+ want = k + delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestLoadUint32(t *testing.T) {
+ var x struct {
+ before uint32
+ i uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ for delta := uint32(1); delta+delta > delta; delta += delta {
+ k := LoadUint32(&x.i)
+ if k != x.i {
+ t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
+ }
+ x.i += delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestLoadUint32Method(t *testing.T) {
+ var x struct {
+ before uint32
+ i Uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ want := uint32(0)
+ for delta := uint32(1); delta+delta > delta; delta += delta {
+ k := x.i.Load()
+ if k != want {
+ t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want)
+ }
+ x.i.Store(k + delta)
+ want = k + delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestLoadInt64(t *testing.T) {
+ var x struct {
+ before int64
+ i int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ k := LoadInt64(&x.i)
+ if k != x.i {
+ t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
+ }
+ x.i += delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestLoadInt64Method(t *testing.T) {
+ var x struct {
+ before int64
+ i Int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ want := int64(0)
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ k := x.i.Load()
+ if k != want {
+ t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want)
+ }
+ x.i.Store(k + delta)
+ want = k + delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestLoadUint64(t *testing.T) {
+ var x struct {
+ before uint64
+ i uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ k := LoadUint64(&x.i)
+ if k != x.i {
+ t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
+ }
+ x.i += delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestLoadUint64Method(t *testing.T) {
+ var x struct {
+ before uint64
+ i Uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ want := uint64(0)
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ k := x.i.Load()
+ if k != want {
+ t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want)
+ }
+ x.i.Store(k + delta)
+ want = k + delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestLoadUintptr(t *testing.T) {
+ var x struct {
+ before uintptr
+ i uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ k := LoadUintptr(&x.i)
+ if k != x.i {
+ t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
+ }
+ x.i += delta
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestLoadUintptrMethod(t *testing.T) {
+ var x struct {
+ before uintptr
+ i Uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ want := uintptr(0)
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ k := x.i.Load()
+ if k != want {
+ t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want)
+ }
+ x.i.Store(k + delta)
+ want = k + delta
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestLoadPointer(t *testing.T) {
+ var x struct {
+ before uintptr
+ i unsafe.Pointer
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for _, p := range testPointers() {
+ x.i = p
+ k := LoadPointer(&x.i)
+ if k != p {
+ t.Fatalf("p=%x k=%x", p, k)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestLoadPointerMethod(t *testing.T) {
+ var x struct {
+ before uintptr
+ i Pointer[byte]
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for _, p := range testPointers() {
+ p := (*byte)(p)
+ x.i.Store(p)
+ k := x.i.Load()
+ if k != p {
+ t.Fatalf("p=%x k=%x", p, k)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestStoreInt32(t *testing.T) {
+ var x struct {
+ before int32
+ i int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ v := int32(0)
+ for delta := int32(1); delta+delta > delta; delta += delta {
+ StoreInt32(&x.i, v)
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v += delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestStoreInt32Method(t *testing.T) {
+ var x struct {
+ before int32
+ i Int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ v := int32(0)
+ for delta := int32(1); delta+delta > delta; delta += delta {
+ x.i.Store(v)
+ if x.i.Load() != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v)
+ }
+ v += delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestStoreUint32(t *testing.T) {
+ var x struct {
+ before uint32
+ i uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ v := uint32(0)
+ for delta := uint32(1); delta+delta > delta; delta += delta {
+ StoreUint32(&x.i, v)
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v += delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestStoreUint32Method(t *testing.T) {
+ var x struct {
+ before uint32
+ i Uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ v := uint32(0)
+ for delta := uint32(1); delta+delta > delta; delta += delta {
+ x.i.Store(v)
+ if x.i.Load() != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v)
+ }
+ v += delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestStoreInt64(t *testing.T) {
+ var x struct {
+ before int64
+ i int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ v := int64(0)
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ StoreInt64(&x.i, v)
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v += delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestStoreInt64Method(t *testing.T) {
+ var x struct {
+ before int64
+ i Int64
+ after int64
+ }
+ magic64 := int64(magic64)
+ x.before = magic64
+ x.after = magic64
+ v := int64(0)
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ x.i.Store(v)
+ if x.i.Load() != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v)
+ }
+ v += delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestStoreUint64(t *testing.T) {
+ var x struct {
+ before uint64
+ i uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ v := uint64(0)
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ StoreUint64(&x.i, v)
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v += delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestStoreUint64Method(t *testing.T) {
+ var x struct {
+ before uint64
+ i Uint64
+ after uint64
+ }
+ magic64 := uint64(magic64)
+ x.before = magic64
+ x.after = magic64
+ v := uint64(0)
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ x.i.Store(v)
+ if x.i.Load() != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v)
+ }
+ v += delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64)
+ }
+}
+
+func TestStoreUintptr(t *testing.T) {
+ var x struct {
+ before uintptr
+ i uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ v := uintptr(0)
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ StoreUintptr(&x.i, v)
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v += delta
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestStoreUintptrMethod(t *testing.T) {
+ var x struct {
+ before uintptr
+ i Uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ v := uintptr(0)
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ x.i.Store(v)
+ if x.i.Load() != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v)
+ }
+ v += delta
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestStorePointer(t *testing.T) {
+ var x struct {
+ before uintptr
+ i unsafe.Pointer
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for _, p := range testPointers() {
+ StorePointer(&x.i, p)
+ if x.i != p {
+ t.Fatalf("x.i=%p p=%p", x.i, p)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestStorePointerMethod(t *testing.T) {
+ var x struct {
+ before uintptr
+ i Pointer[byte]
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for _, p := range testPointers() {
+ p := (*byte)(p)
+ x.i.Store(p)
+ if x.i.Load() != p {
+ t.Fatalf("x.i=%p p=%p", x.i.Load(), p)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+// Tests of correct behavior, with contention.
+// (Is the function atomic?)
+//
+// For each function, we write a "hammer" function that repeatedly
+// uses the atomic operation to add 1 to a value. After running
+// multiple hammers in parallel, check that we end with the correct
+// total.
+// Swap can't add 1, so it uses a different scheme.
+// The functions repeatedly generate a pseudo-random number such that
+// low bits are equal to high bits, swap, check that the old value
+// has low and high bits equal.
+
+var hammer32 = map[string]func(*uint32, int){
+ "SwapInt32": hammerSwapInt32,
+ "SwapUint32": hammerSwapUint32,
+ "SwapUintptr": hammerSwapUintptr32,
+ "AddInt32": hammerAddInt32,
+ "AddUint32": hammerAddUint32,
+ "AddUintptr": hammerAddUintptr32,
+ "CompareAndSwapInt32": hammerCompareAndSwapInt32,
+ "CompareAndSwapUint32": hammerCompareAndSwapUint32,
+ "CompareAndSwapUintptr": hammerCompareAndSwapUintptr32,
+
+ "SwapInt32Method": hammerSwapInt32Method,
+ "SwapUint32Method": hammerSwapUint32Method,
+ "SwapUintptrMethod": hammerSwapUintptr32Method,
+ "AddInt32Method": hammerAddInt32Method,
+ "AddUint32Method": hammerAddUint32Method,
+ "AddUintptrMethod": hammerAddUintptr32Method,
+ "CompareAndSwapInt32Method": hammerCompareAndSwapInt32Method,
+ "CompareAndSwapUint32Method": hammerCompareAndSwapUint32Method,
+ "CompareAndSwapUintptrMethod": hammerCompareAndSwapUintptr32Method,
+}
+
+func init() {
+ var v uint64 = 1 << 50
+ if uintptr(v) != 0 {
+ // 64-bit system; clear uintptr tests
+ delete(hammer32, "SwapUintptr")
+ delete(hammer32, "AddUintptr")
+ delete(hammer32, "CompareAndSwapUintptr")
+ delete(hammer32, "SwapUintptrMethod")
+ delete(hammer32, "AddUintptrMethod")
+ delete(hammer32, "CompareAndSwapUintptrMethod")
+ }
+}
+
+func hammerSwapInt32(uaddr *uint32, count int) {
+ addr := (*int32)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16
+ old := uint32(SwapInt32(addr, int32(new)))
+ if old>>16 != old<<16>>16 {
+ panic(fmt.Sprintf("SwapInt32 is not atomic: %v", old))
+ }
+ }
+}
+
+func hammerSwapInt32Method(uaddr *uint32, count int) {
+ addr := (*Int32)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16
+ old := uint32(addr.Swap(int32(new)))
+ if old>>16 != old<<16>>16 {
+ panic(fmt.Sprintf("SwapInt32 is not atomic: %v", old))
+ }
+ }
+}
+
+func hammerSwapUint32(addr *uint32, count int) {
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16
+ old := SwapUint32(addr, new)
+ if old>>16 != old<<16>>16 {
+ panic(fmt.Sprintf("SwapUint32 is not atomic: %v", old))
+ }
+ }
+}
+
+func hammerSwapUint32Method(uaddr *uint32, count int) {
+ addr := (*Uint32)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16
+ old := addr.Swap(new)
+ if old>>16 != old<<16>>16 {
+ panic(fmt.Sprintf("SwapUint32 is not atomic: %v", old))
+ }
+ }
+}
+
+func hammerSwapUintptr32(uaddr *uint32, count int) {
+ // only safe when uintptr is 32-bit.
+ // not called on 64-bit systems.
+ addr := (*uintptr)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uintptr(seed+i)<<16 | uintptr(seed+i)<<16>>16
+ old := SwapUintptr(addr, new)
+ if old>>16 != old<<16>>16 {
+ panic(fmt.Sprintf("SwapUintptr is not atomic: %#08x", old))
+ }
+ }
+}
+
+func hammerSwapUintptr32Method(uaddr *uint32, count int) {
+ // only safe when uintptr is 32-bit.
+ // not called on 64-bit systems.
+ addr := (*Uintptr)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uintptr(seed+i)<<16 | uintptr(seed+i)<<16>>16
+ old := addr.Swap(new)
+ if old>>16 != old<<16>>16 {
+ panic(fmt.Sprintf("Uintptr.Swap is not atomic: %#08x", old))
+ }
+ }
+}
+
+func hammerAddInt32(uaddr *uint32, count int) {
+ addr := (*int32)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ AddInt32(addr, 1)
+ }
+}
+
+func hammerAddInt32Method(uaddr *uint32, count int) {
+ addr := (*Int32)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ addr.Add(1)
+ }
+}
+
+func hammerAddUint32(addr *uint32, count int) {
+ for i := 0; i < count; i++ {
+ AddUint32(addr, 1)
+ }
+}
+
+func hammerAddUint32Method(uaddr *uint32, count int) {
+ addr := (*Uint32)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ addr.Add(1)
+ }
+}
+
+func hammerAddUintptr32(uaddr *uint32, count int) {
+ // only safe when uintptr is 32-bit.
+ // not called on 64-bit systems.
+ addr := (*uintptr)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ AddUintptr(addr, 1)
+ }
+}
+
+func hammerAddUintptr32Method(uaddr *uint32, count int) {
+ // only safe when uintptr is 32-bit.
+ // not called on 64-bit systems.
+ addr := (*Uintptr)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ addr.Add(1)
+ }
+}
+
+func hammerCompareAndSwapInt32(uaddr *uint32, count int) {
+ addr := (*int32)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ for {
+ v := LoadInt32(addr)
+ if CompareAndSwapInt32(addr, v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapInt32Method(uaddr *uint32, count int) {
+ addr := (*Int32)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ for {
+ v := addr.Load()
+ if addr.CompareAndSwap(v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapUint32(addr *uint32, count int) {
+ for i := 0; i < count; i++ {
+ for {
+ v := LoadUint32(addr)
+ if CompareAndSwapUint32(addr, v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapUint32Method(uaddr *uint32, count int) {
+ addr := (*Uint32)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ for {
+ v := addr.Load()
+ if addr.CompareAndSwap(v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapUintptr32(uaddr *uint32, count int) {
+ // only safe when uintptr is 32-bit.
+ // not called on 64-bit systems.
+ addr := (*uintptr)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ for {
+ v := LoadUintptr(addr)
+ if CompareAndSwapUintptr(addr, v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapUintptr32Method(uaddr *uint32, count int) {
+ // only safe when uintptr is 32-bit.
+ // not called on 64-bit systems.
+ addr := (*Uintptr)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ for {
+ v := addr.Load()
+ if addr.CompareAndSwap(v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func TestHammer32(t *testing.T) {
+ const p = 4
+ n := 100000
+ if testing.Short() {
+ n = 1000
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p))
+
+ for name, testf := range hammer32 {
+ c := make(chan int)
+ var val uint32
+ for i := 0; i < p; i++ {
+ go func() {
+ defer func() {
+ if err := recover(); err != nil {
+ t.Error(err.(string))
+ }
+ c <- 1
+ }()
+ testf(&val, n)
+ }()
+ }
+ for i := 0; i < p; i++ {
+ <-c
+ }
+ if !strings.HasPrefix(name, "Swap") && val != uint32(n)*p {
+ t.Fatalf("%s: val=%d want %d", name, val, n*p)
+ }
+ }
+}
+
+var hammer64 = map[string]func(*uint64, int){
+ "SwapInt64": hammerSwapInt64,
+ "SwapUint64": hammerSwapUint64,
+ "SwapUintptr": hammerSwapUintptr64,
+ "AddInt64": hammerAddInt64,
+ "AddUint64": hammerAddUint64,
+ "AddUintptr": hammerAddUintptr64,
+ "CompareAndSwapInt64": hammerCompareAndSwapInt64,
+ "CompareAndSwapUint64": hammerCompareAndSwapUint64,
+ "CompareAndSwapUintptr": hammerCompareAndSwapUintptr64,
+
+ "SwapInt64Method": hammerSwapInt64Method,
+ "SwapUint64Method": hammerSwapUint64Method,
+ "SwapUintptrMethod": hammerSwapUintptr64Method,
+ "AddInt64Method": hammerAddInt64Method,
+ "AddUint64Method": hammerAddUint64Method,
+ "AddUintptrMethod": hammerAddUintptr64Method,
+ "CompareAndSwapInt64Method": hammerCompareAndSwapInt64Method,
+ "CompareAndSwapUint64Method": hammerCompareAndSwapUint64Method,
+ "CompareAndSwapUintptrMethod": hammerCompareAndSwapUintptr64Method,
+}
+
+func init() {
+ var v uint64 = 1 << 50
+ if uintptr(v) == 0 {
+ // 32-bit system; clear uintptr tests
+ delete(hammer64, "SwapUintptr")
+ delete(hammer64, "SwapUintptrMethod")
+ delete(hammer64, "AddUintptr")
+ delete(hammer64, "AddUintptrMethod")
+ delete(hammer64, "CompareAndSwapUintptr")
+ delete(hammer64, "CompareAndSwapUintptrMethod")
+ }
+}
+
+func hammerSwapInt64(uaddr *uint64, count int) {
+ addr := (*int64)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32
+ old := uint64(SwapInt64(addr, int64(new)))
+ if old>>32 != old<<32>>32 {
+ panic(fmt.Sprintf("SwapInt64 is not atomic: %v", old))
+ }
+ }
+}
+
+func hammerSwapInt64Method(uaddr *uint64, count int) {
+ addr := (*Int64)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32
+ old := uint64(addr.Swap(int64(new)))
+ if old>>32 != old<<32>>32 {
+ panic(fmt.Sprintf("SwapInt64 is not atomic: %v", old))
+ }
+ }
+}
+
+func hammerSwapUint64(addr *uint64, count int) {
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32
+ old := SwapUint64(addr, new)
+ if old>>32 != old<<32>>32 {
+ panic(fmt.Sprintf("SwapUint64 is not atomic: %v", old))
+ }
+ }
+}
+
+func hammerSwapUint64Method(uaddr *uint64, count int) {
+ addr := (*Uint64)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32
+ old := addr.Swap(new)
+ if old>>32 != old<<32>>32 {
+ panic(fmt.Sprintf("SwapUint64 is not atomic: %v", old))
+ }
+ }
+}
+
+const arch32 = unsafe.Sizeof(uintptr(0)) == 4
+
+func hammerSwapUintptr64(uaddr *uint64, count int) {
+ // only safe when uintptr is 64-bit.
+ // not called on 32-bit systems.
+ if !arch32 {
+ addr := (*uintptr)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uintptr(seed+i)<<32 | uintptr(seed+i)<<32>>32
+ old := SwapUintptr(addr, new)
+ if old>>32 != old<<32>>32 {
+ panic(fmt.Sprintf("SwapUintptr is not atomic: %v", old))
+ }
+ }
+ }
+}
+
+func hammerSwapUintptr64Method(uaddr *uint64, count int) {
+ // only safe when uintptr is 64-bit.
+ // not called on 32-bit systems.
+ if !arch32 {
+ addr := (*Uintptr)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uintptr(seed+i)<<32 | uintptr(seed+i)<<32>>32
+ old := addr.Swap(new)
+ if old>>32 != old<<32>>32 {
+ panic(fmt.Sprintf("SwapUintptr is not atomic: %v", old))
+ }
+ }
+ }
+}
+
+func hammerAddInt64(uaddr *uint64, count int) {
+ addr := (*int64)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ AddInt64(addr, 1)
+ }
+}
+
+func hammerAddInt64Method(uaddr *uint64, count int) {
+ addr := (*Int64)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ addr.Add(1)
+ }
+}
+
+func hammerAddUint64(addr *uint64, count int) {
+ for i := 0; i < count; i++ {
+ AddUint64(addr, 1)
+ }
+}
+
+func hammerAddUint64Method(uaddr *uint64, count int) {
+ addr := (*Uint64)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ addr.Add(1)
+ }
+}
+
+func hammerAddUintptr64(uaddr *uint64, count int) {
+ // only safe when uintptr is 64-bit.
+ // not called on 32-bit systems.
+ addr := (*uintptr)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ AddUintptr(addr, 1)
+ }
+}
+
+func hammerAddUintptr64Method(uaddr *uint64, count int) {
+ // only safe when uintptr is 64-bit.
+ // not called on 32-bit systems.
+ addr := (*Uintptr)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ addr.Add(1)
+ }
+}
+
+func hammerCompareAndSwapInt64(uaddr *uint64, count int) {
+ addr := (*int64)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ for {
+ v := LoadInt64(addr)
+ if CompareAndSwapInt64(addr, v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapInt64Method(uaddr *uint64, count int) {
+ addr := (*Int64)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ for {
+ v := addr.Load()
+ if addr.CompareAndSwap(v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapUint64(addr *uint64, count int) {
+ for i := 0; i < count; i++ {
+ for {
+ v := LoadUint64(addr)
+ if CompareAndSwapUint64(addr, v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapUint64Method(uaddr *uint64, count int) {
+ addr := (*Uint64)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ for {
+ v := addr.Load()
+ if addr.CompareAndSwap(v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapUintptr64(uaddr *uint64, count int) {
+ // only safe when uintptr is 64-bit.
+ // not called on 32-bit systems.
+ addr := (*uintptr)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ for {
+ v := LoadUintptr(addr)
+ if CompareAndSwapUintptr(addr, v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapUintptr64Method(uaddr *uint64, count int) {
+ // only safe when uintptr is 64-bit.
+ // not called on 32-bit systems.
+ addr := (*Uintptr)(unsafe.Pointer(uaddr))
+ for i := 0; i < count; i++ {
+ for {
+ v := addr.Load()
+ if addr.CompareAndSwap(v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func TestHammer64(t *testing.T) {
+ const p = 4
+ n := 100000
+ if testing.Short() {
+ n = 1000
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p))
+
+ for name, testf := range hammer64 {
+ c := make(chan int)
+ var val uint64
+ for i := 0; i < p; i++ {
+ go func() {
+ defer func() {
+ if err := recover(); err != nil {
+ t.Error(err.(string))
+ }
+ c <- 1
+ }()
+ testf(&val, n)
+ }()
+ }
+ for i := 0; i < p; i++ {
+ <-c
+ }
+ if !strings.HasPrefix(name, "Swap") && val != uint64(n)*p {
+ t.Fatalf("%s: val=%d want %d", name, val, n*p)
+ }
+ }
+}
+
+func hammerStoreLoadInt32(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*int32)(paddr)
+ v := LoadInt32(addr)
+ vlo := v & ((1 << 16) - 1)
+ vhi := v >> 16
+ if vlo != vhi {
+ t.Fatalf("Int32: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<16
+ if vlo == 1e4 {
+ new = 0
+ }
+ StoreInt32(addr, new)
+}
+
+func hammerStoreLoadInt32Method(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*int32)(paddr)
+ v := LoadInt32(addr)
+ vlo := v & ((1 << 16) - 1)
+ vhi := v >> 16
+ if vlo != vhi {
+ t.Fatalf("Int32: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<16
+ if vlo == 1e4 {
+ new = 0
+ }
+ StoreInt32(addr, new)
+}
+
+func hammerStoreLoadUint32(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*uint32)(paddr)
+ v := LoadUint32(addr)
+ vlo := v & ((1 << 16) - 1)
+ vhi := v >> 16
+ if vlo != vhi {
+ t.Fatalf("Uint32: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<16
+ if vlo == 1e4 {
+ new = 0
+ }
+ StoreUint32(addr, new)
+}
+
+func hammerStoreLoadUint32Method(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*Uint32)(paddr)
+ v := addr.Load()
+ vlo := v & ((1 << 16) - 1)
+ vhi := v >> 16
+ if vlo != vhi {
+ t.Fatalf("Uint32: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<16
+ if vlo == 1e4 {
+ new = 0
+ }
+ addr.Store(new)
+}
+
+func hammerStoreLoadInt64(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*int64)(paddr)
+ v := LoadInt64(addr)
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Int64: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<32
+ StoreInt64(addr, new)
+}
+
+func hammerStoreLoadInt64Method(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*Int64)(paddr)
+ v := addr.Load()
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Int64: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<32
+ addr.Store(new)
+}
+
+func hammerStoreLoadUint64(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*uint64)(paddr)
+ v := LoadUint64(addr)
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Uint64: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<32
+ StoreUint64(addr, new)
+}
+
+func hammerStoreLoadUint64Method(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*Uint64)(paddr)
+ v := addr.Load()
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Uint64: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<32
+ addr.Store(new)
+}
+
+func hammerStoreLoadUintptr(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*uintptr)(paddr)
+ v := LoadUintptr(addr)
+ new := v
+ if arch32 {
+ vlo := v & ((1 << 16) - 1)
+ vhi := v >> 16
+ if vlo != vhi {
+ t.Fatalf("Uintptr: %#x != %#x", vlo, vhi)
+ }
+ new = v + 1 + 1<<16
+ if vlo == 1e4 {
+ new = 0
+ }
+ } else {
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Uintptr: %#x != %#x", vlo, vhi)
+ }
+ inc := uint64(1 + 1<<32)
+ new = v + uintptr(inc)
+ }
+ StoreUintptr(addr, new)
+}
+
+//go:nocheckptr
+func hammerStoreLoadUintptrMethod(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*Uintptr)(paddr)
+ v := addr.Load()
+ new := v
+ if arch32 {
+ vlo := v & ((1 << 16) - 1)
+ vhi := v >> 16
+ if vlo != vhi {
+ t.Fatalf("Uintptr: %#x != %#x", vlo, vhi)
+ }
+ new = v + 1 + 1<<16
+ if vlo == 1e4 {
+ new = 0
+ }
+ } else {
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Uintptr: %#x != %#x", vlo, vhi)
+ }
+ inc := uint64(1 + 1<<32)
+ new = v + uintptr(inc)
+ }
+ addr.Store(new)
+}
+
+// This code is just testing that LoadPointer/StorePointer operate
+// atomically; it's not actually calculating pointers.
+//
+//go:nocheckptr
+func hammerStoreLoadPointer(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*unsafe.Pointer)(paddr)
+ v := uintptr(LoadPointer(addr))
+ new := v
+ if arch32 {
+ vlo := v & ((1 << 16) - 1)
+ vhi := v >> 16
+ if vlo != vhi {
+ t.Fatalf("Pointer: %#x != %#x", vlo, vhi)
+ }
+ new = v + 1 + 1<<16
+ if vlo == 1e4 {
+ new = 0
+ }
+ } else {
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Pointer: %#x != %#x", vlo, vhi)
+ }
+ inc := uint64(1 + 1<<32)
+ new = v + uintptr(inc)
+ }
+ StorePointer(addr, unsafe.Pointer(new))
+}
+
+// This code is just testing that LoadPointer/StorePointer operate
+// atomically; it's not actually calculating pointers.
+//
+//go:nocheckptr
+func hammerStoreLoadPointerMethod(t *testing.T, paddr unsafe.Pointer) {
+ addr := (*Pointer[byte])(paddr)
+ v := uintptr(unsafe.Pointer(addr.Load()))
+ new := v
+ if arch32 {
+ vlo := v & ((1 << 16) - 1)
+ vhi := v >> 16
+ if vlo != vhi {
+ t.Fatalf("Pointer: %#x != %#x", vlo, vhi)
+ }
+ new = v + 1 + 1<<16
+ if vlo == 1e4 {
+ new = 0
+ }
+ } else {
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Pointer: %#x != %#x", vlo, vhi)
+ }
+ inc := uint64(1 + 1<<32)
+ new = v + uintptr(inc)
+ }
+ addr.Store((*byte)(unsafe.Pointer(new)))
+}
+
+func TestHammerStoreLoad(t *testing.T) {
+ tests := []func(*testing.T, unsafe.Pointer){
+ hammerStoreLoadInt32, hammerStoreLoadUint32,
+ hammerStoreLoadUintptr, hammerStoreLoadPointer,
+ hammerStoreLoadInt32Method, hammerStoreLoadUint32Method,
+ hammerStoreLoadUintptrMethod, hammerStoreLoadPointerMethod,
+ hammerStoreLoadInt64, hammerStoreLoadUint64,
+ hammerStoreLoadInt64Method, hammerStoreLoadUint64Method,
+ }
+ n := int(1e6)
+ if testing.Short() {
+ n = int(1e4)
+ }
+ const procs = 8
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(procs))
+ // Disable the GC because hammerStoreLoadPointer invokes
+ // write barriers on values that aren't real pointers.
+ defer debug.SetGCPercent(debug.SetGCPercent(-1))
+ // Ensure any in-progress GC is finished.
+ runtime.GC()
+ for _, tt := range tests {
+ c := make(chan int)
+ var val uint64
+ for p := 0; p < procs; p++ {
+ go func() {
+ for i := 0; i < n; i++ {
+ tt(t, unsafe.Pointer(&val))
+ }
+ c <- 1
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+ }
+}
+
+func TestStoreLoadSeqCst32(t *testing.T) {
+ if runtime.NumCPU() == 1 {
+ t.Skipf("Skipping test on %v processor machine", runtime.NumCPU())
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
+ N := int32(1e3)
+ if testing.Short() {
+ N = int32(1e2)
+ }
+ c := make(chan bool, 2)
+ X := [2]int32{}
+ ack := [2][3]int32{{-1, -1, -1}, {-1, -1, -1}}
+ for p := 0; p < 2; p++ {
+ go func(me int) {
+ he := 1 - me
+ for i := int32(1); i < N; i++ {
+ StoreInt32(&X[me], i)
+ my := LoadInt32(&X[he])
+ StoreInt32(&ack[me][i%3], my)
+ for w := 1; LoadInt32(&ack[he][i%3]) == -1; w++ {
+ if w%1000 == 0 {
+ runtime.Gosched()
+ }
+ }
+ his := LoadInt32(&ack[he][i%3])
+ if (my != i && my != i-1) || (his != i && his != i-1) {
+ t.Errorf("invalid values: %d/%d (%d)", my, his, i)
+ break
+ }
+ if my != i && his != i {
+ t.Errorf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i)
+ break
+ }
+ StoreInt32(&ack[me][(i-1)%3], -1)
+ }
+ c <- true
+ }(p)
+ }
+ <-c
+ <-c
+}
+
+func TestStoreLoadSeqCst64(t *testing.T) {
+ if runtime.NumCPU() == 1 {
+ t.Skipf("Skipping test on %v processor machine", runtime.NumCPU())
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
+ N := int64(1e3)
+ if testing.Short() {
+ N = int64(1e2)
+ }
+ c := make(chan bool, 2)
+ X := [2]int64{}
+ ack := [2][3]int64{{-1, -1, -1}, {-1, -1, -1}}
+ for p := 0; p < 2; p++ {
+ go func(me int) {
+ he := 1 - me
+ for i := int64(1); i < N; i++ {
+ StoreInt64(&X[me], i)
+ my := LoadInt64(&X[he])
+ StoreInt64(&ack[me][i%3], my)
+ for w := 1; LoadInt64(&ack[he][i%3]) == -1; w++ {
+ if w%1000 == 0 {
+ runtime.Gosched()
+ }
+ }
+ his := LoadInt64(&ack[he][i%3])
+ if (my != i && my != i-1) || (his != i && his != i-1) {
+ t.Errorf("invalid values: %d/%d (%d)", my, his, i)
+ break
+ }
+ if my != i && his != i {
+ t.Errorf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i)
+ break
+ }
+ StoreInt64(&ack[me][(i-1)%3], -1)
+ }
+ c <- true
+ }(p)
+ }
+ <-c
+ <-c
+}
+
+func TestStoreLoadRelAcq32(t *testing.T) {
+ if runtime.NumCPU() == 1 {
+ t.Skipf("Skipping test on %v processor machine", runtime.NumCPU())
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
+ N := int32(1e3)
+ if testing.Short() {
+ N = int32(1e2)
+ }
+ c := make(chan bool, 2)
+ type Data struct {
+ signal int32
+ pad1 [128]int8
+ data1 int32
+ pad2 [128]int8
+ data2 float32
+ }
+ var X Data
+ for p := int32(0); p < 2; p++ {
+ go func(p int32) {
+ for i := int32(1); i < N; i++ {
+ if (i+p)%2 == 0 {
+ X.data1 = i
+ X.data2 = float32(i)
+ StoreInt32(&X.signal, i)
+ } else {
+ for w := 1; LoadInt32(&X.signal) != i; w++ {
+ if w%1000 == 0 {
+ runtime.Gosched()
+ }
+ }
+ d1 := X.data1
+ d2 := X.data2
+ if d1 != i || d2 != float32(i) {
+ t.Errorf("incorrect data: %d/%g (%d)", d1, d2, i)
+ break
+ }
+ }
+ }
+ c <- true
+ }(p)
+ }
+ <-c
+ <-c
+}
+
+func TestStoreLoadRelAcq64(t *testing.T) {
+ if runtime.NumCPU() == 1 {
+ t.Skipf("Skipping test on %v processor machine", runtime.NumCPU())
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
+ N := int64(1e3)
+ if testing.Short() {
+ N = int64(1e2)
+ }
+ c := make(chan bool, 2)
+ type Data struct {
+ signal int64
+ pad1 [128]int8
+ data1 int64
+ pad2 [128]int8
+ data2 float64
+ }
+ var X Data
+ for p := int64(0); p < 2; p++ {
+ go func(p int64) {
+ for i := int64(1); i < N; i++ {
+ if (i+p)%2 == 0 {
+ X.data1 = i
+ X.data2 = float64(i)
+ StoreInt64(&X.signal, i)
+ } else {
+ for w := 1; LoadInt64(&X.signal) != i; w++ {
+ if w%1000 == 0 {
+ runtime.Gosched()
+ }
+ }
+ d1 := X.data1
+ d2 := X.data2
+ if d1 != i || d2 != float64(i) {
+ t.Errorf("incorrect data: %d/%g (%d)", d1, d2, i)
+ break
+ }
+ }
+ }
+ c <- true
+ }(p)
+ }
+ <-c
+ <-c
+}
+
+func shouldPanic(t *testing.T, name string, f func()) {
+ defer func() {
+ // Check that all GC maps are sane.
+ runtime.GC()
+
+ err := recover()
+ want := "unaligned 64-bit atomic operation"
+ if err == nil {
+ t.Errorf("%s did not panic", name)
+ } else if s, _ := err.(string); s != want {
+ t.Errorf("%s: wanted panic %q, got %q", name, want, err)
+ }
+ }()
+ f()
+}
+
+func TestUnaligned64(t *testing.T) {
+ // Unaligned 64-bit atomics on 32-bit systems are
+ // a continual source of pain. Test that on 32-bit systems they crash
+ // instead of failing silently.
+ if !arch32 {
+ t.Skip("test only runs on 32-bit systems")
+ }
+
+ x := make([]uint32, 4)
+ p := (*uint64)(unsafe.Pointer(&x[1])) // misaligned
+
+ shouldPanic(t, "LoadUint64", func() { LoadUint64(p) })
+ shouldPanic(t, "LoadUint64Method", func() { (*Uint64)(unsafe.Pointer(p)).Load() })
+ shouldPanic(t, "StoreUint64", func() { StoreUint64(p, 1) })
+ shouldPanic(t, "StoreUint64Method", func() { (*Uint64)(unsafe.Pointer(p)).Store(1) })
+ shouldPanic(t, "CompareAndSwapUint64", func() { CompareAndSwapUint64(p, 1, 2) })
+ shouldPanic(t, "CompareAndSwapUint64Method", func() { (*Uint64)(unsafe.Pointer(p)).CompareAndSwap(1, 2) })
+ shouldPanic(t, "AddUint64", func() { AddUint64(p, 3) })
+ shouldPanic(t, "AddUint64Method", func() { (*Uint64)(unsafe.Pointer(p)).Add(3) })
+}
+
+func TestAutoAligned64(t *testing.T) {
+ var signed struct {
+ _ uint32
+ i Int64
+ }
+ if o := reflect.TypeOf(&signed).Elem().Field(1).Offset; o != 8 {
+ t.Fatalf("Int64 offset = %d, want 8", o)
+ }
+ if p := reflect.ValueOf(&signed).Elem().Field(1).Addr().Pointer(); p&7 != 0 {
+ t.Fatalf("Int64 pointer = %#x, want 8-aligned", p)
+ }
+
+ var unsigned struct {
+ _ uint32
+ i Uint64
+ }
+ if o := reflect.TypeOf(&unsigned).Elem().Field(1).Offset; o != 8 {
+ t.Fatalf("Uint64 offset = %d, want 8", o)
+ }
+ if p := reflect.ValueOf(&unsigned).Elem().Field(1).Addr().Pointer(); p&7 != 0 {
+ t.Fatalf("Int64 pointer = %#x, want 8-aligned", p)
+ }
+}
+
+func TestNilDeref(t *testing.T) {
+ funcs := [...]func(){
+ func() { CompareAndSwapInt32(nil, 0, 0) },
+ func() { (*Int32)(nil).CompareAndSwap(0, 0) },
+ func() { CompareAndSwapInt64(nil, 0, 0) },
+ func() { (*Int64)(nil).CompareAndSwap(0, 0) },
+ func() { CompareAndSwapUint32(nil, 0, 0) },
+ func() { (*Uint32)(nil).CompareAndSwap(0, 0) },
+ func() { CompareAndSwapUint64(nil, 0, 0) },
+ func() { (*Uint64)(nil).CompareAndSwap(0, 0) },
+ func() { CompareAndSwapUintptr(nil, 0, 0) },
+ func() { (*Uintptr)(nil).CompareAndSwap(0, 0) },
+ func() { CompareAndSwapPointer(nil, nil, nil) },
+ func() { (*Pointer[byte])(nil).CompareAndSwap(nil, nil) },
+ func() { SwapInt32(nil, 0) },
+ func() { (*Int32)(nil).Swap(0) },
+ func() { SwapUint32(nil, 0) },
+ func() { (*Uint32)(nil).Swap(0) },
+ func() { SwapInt64(nil, 0) },
+ func() { (*Int64)(nil).Swap(0) },
+ func() { SwapUint64(nil, 0) },
+ func() { (*Uint64)(nil).Swap(0) },
+ func() { SwapUintptr(nil, 0) },
+ func() { (*Uintptr)(nil).Swap(0) },
+ func() { SwapPointer(nil, nil) },
+ func() { (*Pointer[byte])(nil).Swap(nil) },
+ func() { AddInt32(nil, 0) },
+ func() { (*Int32)(nil).Add(0) },
+ func() { AddUint32(nil, 0) },
+ func() { (*Uint32)(nil).Add(0) },
+ func() { AddInt64(nil, 0) },
+ func() { (*Int64)(nil).Add(0) },
+ func() { AddUint64(nil, 0) },
+ func() { (*Uint64)(nil).Add(0) },
+ func() { AddUintptr(nil, 0) },
+ func() { (*Uintptr)(nil).Add(0) },
+ func() { LoadInt32(nil) },
+ func() { (*Int32)(nil).Load() },
+ func() { LoadInt64(nil) },
+ func() { (*Int64)(nil).Load() },
+ func() { LoadUint32(nil) },
+ func() { (*Uint32)(nil).Load() },
+ func() { LoadUint64(nil) },
+ func() { (*Uint64)(nil).Load() },
+ func() { LoadUintptr(nil) },
+ func() { (*Uintptr)(nil).Load() },
+ func() { LoadPointer(nil) },
+ func() { (*Pointer[byte])(nil).Load() },
+ func() { StoreInt32(nil, 0) },
+ func() { (*Int32)(nil).Store(0) },
+ func() { StoreInt64(nil, 0) },
+ func() { (*Int64)(nil).Store(0) },
+ func() { StoreUint32(nil, 0) },
+ func() { (*Uint32)(nil).Store(0) },
+ func() { StoreUint64(nil, 0) },
+ func() { (*Uint64)(nil).Store(0) },
+ func() { StoreUintptr(nil, 0) },
+ func() { (*Uintptr)(nil).Store(0) },
+ func() { StorePointer(nil, nil) },
+ func() { (*Pointer[byte])(nil).Store(nil) },
+ }
+ for _, f := range funcs {
+ func() {
+ defer func() {
+ runtime.GC()
+ recover()
+ }()
+ f()
+ }()
+ }
+}
+
+// Test that this compiles.
+// When atomic.Pointer used _ [0]T, it did not.
+type List struct {
+ Next Pointer[List]
+}
diff --git a/src/sync/atomic/doc.go b/src/sync/atomic/doc.go
new file mode 100644
index 0000000..472ab9d
--- /dev/null
+++ b/src/sync/atomic/doc.go
@@ -0,0 +1,192 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package atomic provides low-level atomic memory primitives
+// useful for implementing synchronization algorithms.
+//
+// These functions require great care to be used correctly.
+// Except for special, low-level applications, synchronization is better
+// done with channels or the facilities of the sync package.
+// Share memory by communicating;
+// don't communicate by sharing memory.
+//
+// The swap operation, implemented by the SwapT functions, is the atomic
+// equivalent of:
+//
+// old = *addr
+// *addr = new
+// return old
+//
+// The compare-and-swap operation, implemented by the CompareAndSwapT
+// functions, is the atomic equivalent of:
+//
+// if *addr == old {
+// *addr = new
+// return true
+// }
+// return false
+//
+// The add operation, implemented by the AddT functions, is the atomic
+// equivalent of:
+//
+// *addr += delta
+// return *addr
+//
+// The load and store operations, implemented by the LoadT and StoreT
+// functions, are the atomic equivalents of "return *addr" and
+// "*addr = val".
+//
+// In the terminology of the Go memory model, if the effect of
+// an atomic operation A is observed by atomic operation B,
+// then A “synchronizes before” B.
+// Additionally, all the atomic operations executed in a program
+// behave as though executed in some sequentially consistent order.
+// This definition provides the same semantics as
+// C++'s sequentially consistent atomics and Java's volatile variables.
+package atomic
+
+import (
+ "unsafe"
+)
+
+// BUG(rsc): On 386, the 64-bit functions use instructions unavailable before the Pentium MMX.
+//
+// On non-Linux ARM, the 64-bit functions use instructions unavailable before the ARMv6k core.
+//
+// On ARM, 386, and 32-bit MIPS, it is the caller's responsibility to arrange
+// for 64-bit alignment of 64-bit words accessed atomically via the primitive
+// atomic functions (types [Int64] and [Uint64] are automatically aligned).
+// The first word in an allocated struct, array, or slice; in a global
+// variable; or in a local variable (because the subject of all atomic operations
+// will escape to the heap) can be relied upon to be 64-bit aligned.
+
+// SwapInt32 atomically stores new into *addr and returns the previous *addr value.
+// Consider using the more ergonomic and less error-prone [Int32.Swap] instead.
+func SwapInt32(addr *int32, new int32) (old int32)
+
+// SwapInt64 atomically stores new into *addr and returns the previous *addr value.
+// Consider using the more ergonomic and less error-prone [Int64.Swap] instead
+// (particularly if you target 32-bit platforms; see the bugs section).
+func SwapInt64(addr *int64, new int64) (old int64)
+
+// SwapUint32 atomically stores new into *addr and returns the previous *addr value.
+// Consider using the more ergonomic and less error-prone [Uint32.Swap] instead.
+func SwapUint32(addr *uint32, new uint32) (old uint32)
+
+// SwapUint64 atomically stores new into *addr and returns the previous *addr value.
+// Consider using the more ergonomic and less error-prone [Uint64.Swap] instead
+// (particularly if you target 32-bit platforms; see the bugs section).
+func SwapUint64(addr *uint64, new uint64) (old uint64)
+
+// SwapUintptr atomically stores new into *addr and returns the previous *addr value.
+// Consider using the more ergonomic and less error-prone [Uintptr.Swap] instead.
+func SwapUintptr(addr *uintptr, new uintptr) (old uintptr)
+
+// SwapPointer atomically stores new into *addr and returns the previous *addr value.
+// Consider using the more ergonomic and less error-prone [Pointer.Swap] instead.
+func SwapPointer(addr *unsafe.Pointer, new unsafe.Pointer) (old unsafe.Pointer)
+
+// CompareAndSwapInt32 executes the compare-and-swap operation for an int32 value.
+// Consider using the more ergonomic and less error-prone [Int32.CompareAndSwap] instead.
+func CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
+
+// CompareAndSwapInt64 executes the compare-and-swap operation for an int64 value.
+// Consider using the more ergonomic and less error-prone [Int64.CompareAndSwap] instead
+// (particularly if you target 32-bit platforms; see the bugs section).
+func CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool)
+
+// CompareAndSwapUint32 executes the compare-and-swap operation for a uint32 value.
+// Consider using the more ergonomic and less error-prone [Uint32.CompareAndSwap] instead.
+func CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool)
+
+// CompareAndSwapUint64 executes the compare-and-swap operation for a uint64 value.
+// Consider using the more ergonomic and less error-prone [Uint64.CompareAndSwap] instead
+// (particularly if you target 32-bit platforms; see the bugs section).
+func CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool)
+
+// CompareAndSwapUintptr executes the compare-and-swap operation for a uintptr value.
+// Consider using the more ergonomic and less error-prone [Uintptr.CompareAndSwap] instead.
+func CompareAndSwapUintptr(addr *uintptr, old, new uintptr) (swapped bool)
+
+// CompareAndSwapPointer executes the compare-and-swap operation for a unsafe.Pointer value.
+// Consider using the more ergonomic and less error-prone [Pointer.CompareAndSwap] instead.
+func CompareAndSwapPointer(addr *unsafe.Pointer, old, new unsafe.Pointer) (swapped bool)
+
+// AddInt32 atomically adds delta to *addr and returns the new value.
+// Consider using the more ergonomic and less error-prone [Int32.Add] instead.
+func AddInt32(addr *int32, delta int32) (new int32)
+
+// AddUint32 atomically adds delta to *addr and returns the new value.
+// To subtract a signed positive constant value c from x, do AddUint32(&x, ^uint32(c-1)).
+// In particular, to decrement x, do AddUint32(&x, ^uint32(0)).
+// Consider using the more ergonomic and less error-prone [Uint32.Add] instead.
+func AddUint32(addr *uint32, delta uint32) (new uint32)
+
+// AddInt64 atomically adds delta to *addr and returns the new value.
+// Consider using the more ergonomic and less error-prone [Int64.Add] instead
+// (particularly if you target 32-bit platforms; see the bugs section).
+func AddInt64(addr *int64, delta int64) (new int64)
+
+// AddUint64 atomically adds delta to *addr and returns the new value.
+// To subtract a signed positive constant value c from x, do AddUint64(&x, ^uint64(c-1)).
+// In particular, to decrement x, do AddUint64(&x, ^uint64(0)).
+// Consider using the more ergonomic and less error-prone [Uint64.Add] instead
+// (particularly if you target 32-bit platforms; see the bugs section).
+func AddUint64(addr *uint64, delta uint64) (new uint64)
+
+// AddUintptr atomically adds delta to *addr and returns the new value.
+// Consider using the more ergonomic and less error-prone [Uintptr.Add] instead.
+func AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
+
+// LoadInt32 atomically loads *addr.
+// Consider using the more ergonomic and less error-prone [Int32.Load] instead.
+func LoadInt32(addr *int32) (val int32)
+
+// LoadInt64 atomically loads *addr.
+// Consider using the more ergonomic and less error-prone [Int64.Load] instead
+// (particularly if you target 32-bit platforms; see the bugs section).
+func LoadInt64(addr *int64) (val int64)
+
+// LoadUint32 atomically loads *addr.
+// Consider using the more ergonomic and less error-prone [Uint32.Load] instead.
+func LoadUint32(addr *uint32) (val uint32)
+
+// LoadUint64 atomically loads *addr.
+// Consider using the more ergonomic and less error-prone [Uint64.Load] instead
+// (particularly if you target 32-bit platforms; see the bugs section).
+func LoadUint64(addr *uint64) (val uint64)
+
+// LoadUintptr atomically loads *addr.
+// Consider using the more ergonomic and less error-prone [Uintptr.Load] instead.
+func LoadUintptr(addr *uintptr) (val uintptr)
+
+// LoadPointer atomically loads *addr.
+// Consider using the more ergonomic and less error-prone [Pointer.Load] instead.
+func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
+
+// StoreInt32 atomically stores val into *addr.
+// Consider using the more ergonomic and less error-prone [Int32.Store] instead.
+func StoreInt32(addr *int32, val int32)
+
+// StoreInt64 atomically stores val into *addr.
+// Consider using the more ergonomic and less error-prone [Int64.Store] instead
+// (particularly if you target 32-bit platforms; see the bugs section).
+func StoreInt64(addr *int64, val int64)
+
+// StoreUint32 atomically stores val into *addr.
+// Consider using the more ergonomic and less error-prone [Uint32.Store] instead.
+func StoreUint32(addr *uint32, val uint32)
+
+// StoreUint64 atomically stores val into *addr.
+// Consider using the more ergonomic and less error-prone [Uint64.Store] instead
+// (particularly if you target 32-bit platforms; see the bugs section).
+func StoreUint64(addr *uint64, val uint64)
+
+// StoreUintptr atomically stores val into *addr.
+// Consider using the more ergonomic and less error-prone [Uintptr.Store] instead.
+func StoreUintptr(addr *uintptr, val uintptr)
+
+// StorePointer atomically stores val into *addr.
+// Consider using the more ergonomic and less error-prone [Pointer.Store] instead.
+func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer)
diff --git a/src/sync/atomic/example_test.go b/src/sync/atomic/example_test.go
new file mode 100644
index 0000000..09ae0aa
--- /dev/null
+++ b/src/sync/atomic/example_test.go
@@ -0,0 +1,76 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic_test
+
+import (
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+func loadConfig() map[string]string {
+ return make(map[string]string)
+}
+
+func requests() chan int {
+ return make(chan int)
+}
+
+// The following example shows how to use Value for periodic program config updates
+// and propagation of the changes to worker goroutines.
+func ExampleValue_config() {
+ var config atomic.Value // holds current server configuration
+ // Create initial config value and store into config.
+ config.Store(loadConfig())
+ go func() {
+ // Reload config every 10 seconds
+ // and update config value with the new version.
+ for {
+ time.Sleep(10 * time.Second)
+ config.Store(loadConfig())
+ }
+ }()
+ // Create worker goroutines that handle incoming requests
+ // using the latest config value.
+ for i := 0; i < 10; i++ {
+ go func() {
+ for r := range requests() {
+ c := config.Load()
+ // Handle request r using config c.
+ _, _ = r, c
+ }
+ }()
+ }
+}
+
+// The following example shows how to maintain a scalable frequently read,
+// but infrequently updated data structure using copy-on-write idiom.
+func ExampleValue_readMostly() {
+ type Map map[string]string
+ var m atomic.Value
+ m.Store(make(Map))
+ var mu sync.Mutex // used only by writers
+ // read function can be used to read the data without further synchronization
+ read := func(key string) (val string) {
+ m1 := m.Load().(Map)
+ return m1[key]
+ }
+ // insert function can be used to update the data without further synchronization
+ insert := func(key, val string) {
+ mu.Lock() // synchronize with other potential writers
+ defer mu.Unlock()
+ m1 := m.Load().(Map) // load current value of the data structure
+ m2 := make(Map) // create a new value
+ for k, v := range m1 {
+ m2[k] = v // copy all data from the current object to the new one
+ }
+ m2[key] = val // do the update that we need
+ m.Store(m2) // atomically replace the current object with the new one
+ // At this point all new readers start working with the new version.
+ // The old version will be garbage collected once the existing readers
+ // (if any) are done with it.
+ }
+ _, _ = read, insert
+}
diff --git a/src/sync/atomic/race.s b/src/sync/atomic/race.s
new file mode 100644
index 0000000..90bd69f
--- /dev/null
+++ b/src/sync/atomic/race.s
@@ -0,0 +1,8 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+
+// This file is here only to allow external functions.
+// The operations are implemented in src/runtime/race_amd64.s
diff --git a/src/sync/atomic/type.go b/src/sync/atomic/type.go
new file mode 100644
index 0000000..cc01683
--- /dev/null
+++ b/src/sync/atomic/type.go
@@ -0,0 +1,200 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+import "unsafe"
+
+// A Bool is an atomic boolean value.
+// The zero value is false.
+type Bool struct {
+ _ noCopy
+ v uint32
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Bool) Load() bool { return LoadUint32(&x.v) != 0 }
+
+// Store atomically stores val into x.
+func (x *Bool) Store(val bool) { StoreUint32(&x.v, b32(val)) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Bool) Swap(new bool) (old bool) { return SwapUint32(&x.v, b32(new)) != 0 }
+
+// CompareAndSwap executes the compare-and-swap operation for the boolean value x.
+func (x *Bool) CompareAndSwap(old, new bool) (swapped bool) {
+ return CompareAndSwapUint32(&x.v, b32(old), b32(new))
+}
+
+// b32 returns a uint32 0 or 1 representing b.
+func b32(b bool) uint32 {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+// For testing *Pointer[T]'s methods can be inlined.
+// Keep in sync with cmd/compile/internal/test/inl_test.go:TestIntendedInlining.
+var _ = &Pointer[int]{}
+
+// A Pointer is an atomic pointer of type *T. The zero value is a nil *T.
+type Pointer[T any] struct {
+ // Mention *T in a field to disallow conversion between Pointer types.
+ // See go.dev/issue/56603 for more details.
+ // Use *T, not T, to avoid spurious recursive type definition errors.
+ _ [0]*T
+
+ _ noCopy
+ v unsafe.Pointer
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Pointer[T]) Load() *T { return (*T)(LoadPointer(&x.v)) }
+
+// Store atomically stores val into x.
+func (x *Pointer[T]) Store(val *T) { StorePointer(&x.v, unsafe.Pointer(val)) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Pointer[T]) Swap(new *T) (old *T) { return (*T)(SwapPointer(&x.v, unsafe.Pointer(new))) }
+
+// CompareAndSwap executes the compare-and-swap operation for x.
+func (x *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) {
+ return CompareAndSwapPointer(&x.v, unsafe.Pointer(old), unsafe.Pointer(new))
+}
+
+// An Int32 is an atomic int32. The zero value is zero.
+type Int32 struct {
+ _ noCopy
+ v int32
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Int32) Load() int32 { return LoadInt32(&x.v) }
+
+// Store atomically stores val into x.
+func (x *Int32) Store(val int32) { StoreInt32(&x.v, val) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Int32) Swap(new int32) (old int32) { return SwapInt32(&x.v, new) }
+
+// CompareAndSwap executes the compare-and-swap operation for x.
+func (x *Int32) CompareAndSwap(old, new int32) (swapped bool) {
+ return CompareAndSwapInt32(&x.v, old, new)
+}
+
+// Add atomically adds delta to x and returns the new value.
+func (x *Int32) Add(delta int32) (new int32) { return AddInt32(&x.v, delta) }
+
+// An Int64 is an atomic int64. The zero value is zero.
+type Int64 struct {
+ _ noCopy
+ _ align64
+ v int64
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Int64) Load() int64 { return LoadInt64(&x.v) }
+
+// Store atomically stores val into x.
+func (x *Int64) Store(val int64) { StoreInt64(&x.v, val) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Int64) Swap(new int64) (old int64) { return SwapInt64(&x.v, new) }
+
+// CompareAndSwap executes the compare-and-swap operation for x.
+func (x *Int64) CompareAndSwap(old, new int64) (swapped bool) {
+ return CompareAndSwapInt64(&x.v, old, new)
+}
+
+// Add atomically adds delta to x and returns the new value.
+func (x *Int64) Add(delta int64) (new int64) { return AddInt64(&x.v, delta) }
+
+// An Uint32 is an atomic uint32. The zero value is zero.
+type Uint32 struct {
+ _ noCopy
+ v uint32
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Uint32) Load() uint32 { return LoadUint32(&x.v) }
+
+// Store atomically stores val into x.
+func (x *Uint32) Store(val uint32) { StoreUint32(&x.v, val) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Uint32) Swap(new uint32) (old uint32) { return SwapUint32(&x.v, new) }
+
+// CompareAndSwap executes the compare-and-swap operation for x.
+func (x *Uint32) CompareAndSwap(old, new uint32) (swapped bool) {
+ return CompareAndSwapUint32(&x.v, old, new)
+}
+
+// Add atomically adds delta to x and returns the new value.
+func (x *Uint32) Add(delta uint32) (new uint32) { return AddUint32(&x.v, delta) }
+
+// An Uint64 is an atomic uint64. The zero value is zero.
+type Uint64 struct {
+ _ noCopy
+ _ align64
+ v uint64
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Uint64) Load() uint64 { return LoadUint64(&x.v) }
+
+// Store atomically stores val into x.
+func (x *Uint64) Store(val uint64) { StoreUint64(&x.v, val) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Uint64) Swap(new uint64) (old uint64) { return SwapUint64(&x.v, new) }
+
+// CompareAndSwap executes the compare-and-swap operation for x.
+func (x *Uint64) CompareAndSwap(old, new uint64) (swapped bool) {
+ return CompareAndSwapUint64(&x.v, old, new)
+}
+
+// Add atomically adds delta to x and returns the new value.
+func (x *Uint64) Add(delta uint64) (new uint64) { return AddUint64(&x.v, delta) }
+
+// An Uintptr is an atomic uintptr. The zero value is zero.
+type Uintptr struct {
+ _ noCopy
+ v uintptr
+}
+
+// Load atomically loads and returns the value stored in x.
+func (x *Uintptr) Load() uintptr { return LoadUintptr(&x.v) }
+
+// Store atomically stores val into x.
+func (x *Uintptr) Store(val uintptr) { StoreUintptr(&x.v, val) }
+
+// Swap atomically stores new into x and returns the previous value.
+func (x *Uintptr) Swap(new uintptr) (old uintptr) { return SwapUintptr(&x.v, new) }
+
+// CompareAndSwap executes the compare-and-swap operation for x.
+func (x *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) {
+ return CompareAndSwapUintptr(&x.v, old, new)
+}
+
+// Add atomically adds delta to x and returns the new value.
+func (x *Uintptr) Add(delta uintptr) (new uintptr) { return AddUintptr(&x.v, delta) }
+
+// noCopy may be added to structs which must not be copied
+// after the first use.
+//
+// See https://golang.org/issues/8005#issuecomment-190753527
+// for details.
+//
+// Note that it must not be embedded, due to the Lock and Unlock methods.
+type noCopy struct{}
+
+// Lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) Lock() {}
+func (*noCopy) Unlock() {}
+
+// align64 may be added to structs that must be 64-bit aligned.
+// This struct is recognized by a special case in the compiler
+// and will not work if copied to any other package.
+type align64 struct{}
diff --git a/src/sync/atomic/value.go b/src/sync/atomic/value.go
new file mode 100644
index 0000000..a57b08a
--- /dev/null
+++ b/src/sync/atomic/value.go
@@ -0,0 +1,194 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+import (
+ "unsafe"
+)
+
+// A Value provides an atomic load and store of a consistently typed value.
+// The zero value for a Value returns nil from Load.
+// Once Store has been called, a Value must not be copied.
+//
+// A Value must not be copied after first use.
+type Value struct {
+ v any
+}
+
+// efaceWords is interface{} internal representation.
+type efaceWords struct {
+ typ unsafe.Pointer
+ data unsafe.Pointer
+}
+
+// Load returns the value set by the most recent Store.
+// It returns nil if there has been no call to Store for this Value.
+func (v *Value) Load() (val any) {
+ vp := (*efaceWords)(unsafe.Pointer(v))
+ typ := LoadPointer(&vp.typ)
+ if typ == nil || typ == unsafe.Pointer(&firstStoreInProgress) {
+ // First store not yet completed.
+ return nil
+ }
+ data := LoadPointer(&vp.data)
+ vlp := (*efaceWords)(unsafe.Pointer(&val))
+ vlp.typ = typ
+ vlp.data = data
+ return
+}
+
+var firstStoreInProgress byte
+
+// Store sets the value of the Value v to val.
+// All calls to Store for a given Value must use values of the same concrete type.
+// Store of an inconsistent type panics, as does Store(nil).
+func (v *Value) Store(val any) {
+ if val == nil {
+ panic("sync/atomic: store of nil value into Value")
+ }
+ vp := (*efaceWords)(unsafe.Pointer(v))
+ vlp := (*efaceWords)(unsafe.Pointer(&val))
+ for {
+ typ := LoadPointer(&vp.typ)
+ if typ == nil {
+ // Attempt to start first store.
+ // Disable preemption so that other goroutines can use
+ // active spin wait to wait for completion.
+ runtime_procPin()
+ if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) {
+ runtime_procUnpin()
+ continue
+ }
+ // Complete first store.
+ StorePointer(&vp.data, vlp.data)
+ StorePointer(&vp.typ, vlp.typ)
+ runtime_procUnpin()
+ return
+ }
+ if typ == unsafe.Pointer(&firstStoreInProgress) {
+ // First store in progress. Wait.
+ // Since we disable preemption around the first store,
+ // we can wait with active spinning.
+ continue
+ }
+ // First store completed. Check type and overwrite data.
+ if typ != vlp.typ {
+ panic("sync/atomic: store of inconsistently typed value into Value")
+ }
+ StorePointer(&vp.data, vlp.data)
+ return
+ }
+}
+
+// Swap stores new into Value and returns the previous value. It returns nil if
+// the Value is empty.
+//
+// All calls to Swap for a given Value must use values of the same concrete
+// type. Swap of an inconsistent type panics, as does Swap(nil).
+func (v *Value) Swap(new any) (old any) {
+ if new == nil {
+ panic("sync/atomic: swap of nil value into Value")
+ }
+ vp := (*efaceWords)(unsafe.Pointer(v))
+ np := (*efaceWords)(unsafe.Pointer(&new))
+ for {
+ typ := LoadPointer(&vp.typ)
+ if typ == nil {
+ // Attempt to start first store.
+ // Disable preemption so that other goroutines can use
+ // active spin wait to wait for completion; and so that
+ // GC does not see the fake type accidentally.
+ runtime_procPin()
+ if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) {
+ runtime_procUnpin()
+ continue
+ }
+ // Complete first store.
+ StorePointer(&vp.data, np.data)
+ StorePointer(&vp.typ, np.typ)
+ runtime_procUnpin()
+ return nil
+ }
+ if typ == unsafe.Pointer(&firstStoreInProgress) {
+ // First store in progress. Wait.
+ // Since we disable preemption around the first store,
+ // we can wait with active spinning.
+ continue
+ }
+ // First store completed. Check type and overwrite data.
+ if typ != np.typ {
+ panic("sync/atomic: swap of inconsistently typed value into Value")
+ }
+ op := (*efaceWords)(unsafe.Pointer(&old))
+ op.typ, op.data = np.typ, SwapPointer(&vp.data, np.data)
+ return old
+ }
+}
+
+// CompareAndSwap executes the compare-and-swap operation for the Value.
+//
+// All calls to CompareAndSwap for a given Value must use values of the same
+// concrete type. CompareAndSwap of an inconsistent type panics, as does
+// CompareAndSwap(old, nil).
+func (v *Value) CompareAndSwap(old, new any) (swapped bool) {
+ if new == nil {
+ panic("sync/atomic: compare and swap of nil value into Value")
+ }
+ vp := (*efaceWords)(unsafe.Pointer(v))
+ np := (*efaceWords)(unsafe.Pointer(&new))
+ op := (*efaceWords)(unsafe.Pointer(&old))
+ if op.typ != nil && np.typ != op.typ {
+ panic("sync/atomic: compare and swap of inconsistently typed values")
+ }
+ for {
+ typ := LoadPointer(&vp.typ)
+ if typ == nil {
+ if old != nil {
+ return false
+ }
+ // Attempt to start first store.
+ // Disable preemption so that other goroutines can use
+ // active spin wait to wait for completion; and so that
+ // GC does not see the fake type accidentally.
+ runtime_procPin()
+ if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) {
+ runtime_procUnpin()
+ continue
+ }
+ // Complete first store.
+ StorePointer(&vp.data, np.data)
+ StorePointer(&vp.typ, np.typ)
+ runtime_procUnpin()
+ return true
+ }
+ if typ == unsafe.Pointer(&firstStoreInProgress) {
+ // First store in progress. Wait.
+ // Since we disable preemption around the first store,
+ // we can wait with active spinning.
+ continue
+ }
+ // First store completed. Check type and overwrite data.
+ if typ != np.typ {
+ panic("sync/atomic: compare and swap of inconsistently typed value into Value")
+ }
+ // Compare old and current via runtime equality check.
+ // This allows value types to be compared, something
+ // not offered by the package functions.
+ // CompareAndSwapPointer below only ensures vp.data
+ // has not changed since LoadPointer.
+ data := LoadPointer(&vp.data)
+ var i any
+ (*efaceWords)(unsafe.Pointer(&i)).typ = typ
+ (*efaceWords)(unsafe.Pointer(&i)).data = data
+ if i != old {
+ return false
+ }
+ return CompareAndSwapPointer(&vp.data, data, np.data)
+ }
+}
+
+// Disable/enable preemption, implemented in runtime.
+func runtime_procPin() int
+func runtime_procUnpin()
diff --git a/src/sync/atomic/value_test.go b/src/sync/atomic/value_test.go
new file mode 100644
index 0000000..721da96
--- /dev/null
+++ b/src/sync/atomic/value_test.go
@@ -0,0 +1,274 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic_test
+
+import (
+ "math/rand"
+ "runtime"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ . "sync/atomic"
+ "testing"
+)
+
+func TestValue(t *testing.T) {
+ var v Value
+ if v.Load() != nil {
+ t.Fatal("initial Value is not nil")
+ }
+ v.Store(42)
+ x := v.Load()
+ if xx, ok := x.(int); !ok || xx != 42 {
+ t.Fatalf("wrong value: got %+v, want 42", x)
+ }
+ v.Store(84)
+ x = v.Load()
+ if xx, ok := x.(int); !ok || xx != 84 {
+ t.Fatalf("wrong value: got %+v, want 84", x)
+ }
+}
+
+func TestValueLarge(t *testing.T) {
+ var v Value
+ v.Store("foo")
+ x := v.Load()
+ if xx, ok := x.(string); !ok || xx != "foo" {
+ t.Fatalf("wrong value: got %+v, want foo", x)
+ }
+ v.Store("barbaz")
+ x = v.Load()
+ if xx, ok := x.(string); !ok || xx != "barbaz" {
+ t.Fatalf("wrong value: got %+v, want barbaz", x)
+ }
+}
+
+func TestValuePanic(t *testing.T) {
+ const nilErr = "sync/atomic: store of nil value into Value"
+ const badErr = "sync/atomic: store of inconsistently typed value into Value"
+ var v Value
+ func() {
+ defer func() {
+ err := recover()
+ if err != nilErr {
+ t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, nilErr)
+ }
+ }()
+ v.Store(nil)
+ }()
+ v.Store(42)
+ func() {
+ defer func() {
+ err := recover()
+ if err != badErr {
+ t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, badErr)
+ }
+ }()
+ v.Store("foo")
+ }()
+ func() {
+ defer func() {
+ err := recover()
+ if err != nilErr {
+ t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, nilErr)
+ }
+ }()
+ v.Store(nil)
+ }()
+}
+
+func TestValueConcurrent(t *testing.T) {
+ tests := [][]any{
+ {uint16(0), ^uint16(0), uint16(1 + 2<<8), uint16(3 + 4<<8)},
+ {uint32(0), ^uint32(0), uint32(1 + 2<<16), uint32(3 + 4<<16)},
+ {uint64(0), ^uint64(0), uint64(1 + 2<<32), uint64(3 + 4<<32)},
+ {complex(0, 0), complex(1, 2), complex(3, 4), complex(5, 6)},
+ }
+ p := 4 * runtime.GOMAXPROCS(0)
+ N := int(1e5)
+ if testing.Short() {
+ p /= 2
+ N = 1e3
+ }
+ for _, test := range tests {
+ var v Value
+ done := make(chan bool, p)
+ for i := 0; i < p; i++ {
+ go func() {
+ r := rand.New(rand.NewSource(rand.Int63()))
+ expected := true
+ loop:
+ for j := 0; j < N; j++ {
+ x := test[r.Intn(len(test))]
+ v.Store(x)
+ x = v.Load()
+ for _, x1 := range test {
+ if x == x1 {
+ continue loop
+ }
+ }
+ t.Logf("loaded unexpected value %+v, want %+v", x, test)
+ expected = false
+ break
+ }
+ done <- expected
+ }()
+ }
+ for i := 0; i < p; i++ {
+ if !<-done {
+ t.FailNow()
+ }
+ }
+ }
+}
+
+func BenchmarkValueRead(b *testing.B) {
+ var v Value
+ v.Store(new(int))
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ x := v.Load().(*int)
+ if *x != 0 {
+ b.Fatalf("wrong value: got %v, want 0", *x)
+ }
+ }
+ })
+}
+
+var Value_SwapTests = []struct {
+ init any
+ new any
+ want any
+ err any
+}{
+ {init: nil, new: nil, err: "sync/atomic: swap of nil value into Value"},
+ {init: nil, new: true, want: nil, err: nil},
+ {init: true, new: "", err: "sync/atomic: swap of inconsistently typed value into Value"},
+ {init: true, new: false, want: true, err: nil},
+}
+
+func TestValue_Swap(t *testing.T) {
+ for i, tt := range Value_SwapTests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ var v Value
+ if tt.init != nil {
+ v.Store(tt.init)
+ }
+ defer func() {
+ err := recover()
+ switch {
+ case tt.err == nil && err != nil:
+ t.Errorf("should not panic, got %v", err)
+ case tt.err != nil && err == nil:
+ t.Errorf("should panic %v, got <nil>", tt.err)
+ }
+ }()
+ if got := v.Swap(tt.new); got != tt.want {
+ t.Errorf("got %v, want %v", got, tt.want)
+ }
+ if got := v.Load(); got != tt.new {
+ t.Errorf("got %v, want %v", got, tt.new)
+ }
+ })
+ }
+}
+
+func TestValueSwapConcurrent(t *testing.T) {
+ var v Value
+ var count uint64
+ var g sync.WaitGroup
+ var m, n uint64 = 10000, 10000
+ if testing.Short() {
+ m = 1000
+ n = 1000
+ }
+ for i := uint64(0); i < m*n; i += n {
+ i := i
+ g.Add(1)
+ go func() {
+ var c uint64
+ for new := i; new < i+n; new++ {
+ if old := v.Swap(new); old != nil {
+ c += old.(uint64)
+ }
+ }
+ atomic.AddUint64(&count, c)
+ g.Done()
+ }()
+ }
+ g.Wait()
+ if want, got := (m*n-1)*(m*n)/2, count+v.Load().(uint64); got != want {
+ t.Errorf("sum from 0 to %d was %d, want %v", m*n-1, got, want)
+ }
+}
+
+var heapA, heapB = struct{ uint }{0}, struct{ uint }{0}
+
+var Value_CompareAndSwapTests = []struct {
+ init any
+ new any
+ old any
+ want bool
+ err any
+}{
+ {init: nil, new: nil, old: nil, err: "sync/atomic: compare and swap of nil value into Value"},
+ {init: nil, new: true, old: "", err: "sync/atomic: compare and swap of inconsistently typed values into Value"},
+ {init: nil, new: true, old: true, want: false, err: nil},
+ {init: nil, new: true, old: nil, want: true, err: nil},
+ {init: true, new: "", err: "sync/atomic: compare and swap of inconsistently typed value into Value"},
+ {init: true, new: true, old: false, want: false, err: nil},
+ {init: true, new: true, old: true, want: true, err: nil},
+ {init: heapA, new: struct{ uint }{1}, old: heapB, want: true, err: nil},
+}
+
+func TestValue_CompareAndSwap(t *testing.T) {
+ for i, tt := range Value_CompareAndSwapTests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ var v Value
+ if tt.init != nil {
+ v.Store(tt.init)
+ }
+ defer func() {
+ err := recover()
+ switch {
+ case tt.err == nil && err != nil:
+ t.Errorf("got %v, wanted no panic", err)
+ case tt.err != nil && err == nil:
+ t.Errorf("did not panic, want %v", tt.err)
+ }
+ }()
+ if got := v.CompareAndSwap(tt.old, tt.new); got != tt.want {
+ t.Errorf("got %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestValueCompareAndSwapConcurrent(t *testing.T) {
+ var v Value
+ var w sync.WaitGroup
+ v.Store(0)
+ m, n := 1000, 100
+ if testing.Short() {
+ m = 100
+ n = 100
+ }
+ for i := 0; i < m; i++ {
+ i := i
+ w.Add(1)
+ go func() {
+ for j := i; j < m*n; runtime.Gosched() {
+ if v.CompareAndSwap(j, j+1) {
+ j += m
+ }
+ }
+ w.Done()
+ }()
+ }
+ w.Wait()
+ if stop := v.Load().(int); stop != m*n {
+ t.Errorf("did not get to %v, stopped at %v", m*n, stop)
+ }
+}