diff options
Diffstat (limited to '')
-rw-r--r-- | src/sync/atomic/asm.s | 85 | ||||
-rw-r--r-- | src/sync/atomic/atomic_test.go | 1472 | ||||
-rw-r--r-- | src/sync/atomic/doc.go | 144 | ||||
-rw-r--r-- | src/sync/atomic/example_test.go | 76 | ||||
-rw-r--r-- | src/sync/atomic/race.s | 8 | ||||
-rw-r--r-- | src/sync/atomic/value.go | 86 | ||||
-rw-r--r-- | src/sync/atomic/value_test.go | 135 |
7 files changed, 2006 insertions, 0 deletions
diff --git a/src/sync/atomic/asm.s b/src/sync/atomic/asm.s new file mode 100644 index 0000000..f86726f --- /dev/null +++ b/src/sync/atomic/asm.s @@ -0,0 +1,85 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !race + +#include "textflag.h" + +TEXT ·SwapInt32(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Xchg(SB) + +TEXT ·SwapUint32(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Xchg(SB) + +TEXT ·SwapInt64(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Xchg64(SB) + +TEXT ·SwapUint64(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Xchg64(SB) + +TEXT ·SwapUintptr(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Xchguintptr(SB) + +TEXT ·CompareAndSwapInt32(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Cas(SB) + +TEXT ·CompareAndSwapUint32(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Cas(SB) + +TEXT ·CompareAndSwapUintptr(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Casuintptr(SB) + +TEXT ·CompareAndSwapInt64(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Cas64(SB) + +TEXT ·CompareAndSwapUint64(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Cas64(SB) + +TEXT ·AddInt32(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Xadd(SB) + +TEXT ·AddUint32(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Xadd(SB) + +TEXT ·AddUintptr(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Xadduintptr(SB) + +TEXT ·AddInt64(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Xadd64(SB) + +TEXT ·AddUint64(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Xadd64(SB) + +TEXT ·LoadInt32(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Load(SB) + +TEXT ·LoadUint32(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Load(SB) + +TEXT ·LoadInt64(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Load64(SB) + +TEXT ·LoadUint64(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Load64(SB) + +TEXT ·LoadUintptr(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Loaduintptr(SB) + +TEXT ·LoadPointer(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Loadp(SB) + +TEXT ·StoreInt32(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Store(SB) + +TEXT ·StoreUint32(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Store(SB) + +TEXT ·StoreInt64(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Store64(SB) + +TEXT ·StoreUint64(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Store64(SB) + +TEXT ·StoreUintptr(SB),NOSPLIT,$0 + JMP runtime∕internal∕atomic·Storeuintptr(SB) diff --git a/src/sync/atomic/atomic_test.go b/src/sync/atomic/atomic_test.go new file mode 100644 index 0000000..eadc962 --- /dev/null +++ b/src/sync/atomic/atomic_test.go @@ -0,0 +1,1472 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic_test + +import ( + "fmt" + "runtime" + "strings" + . "sync/atomic" + "testing" + "unsafe" +) + +// Tests of correct behavior, without contention. +// (Does the function work as advertised?) +// +// Test that the Add functions add correctly. +// Test that the CompareAndSwap functions actually +// do the comparison and the swap correctly. +// +// The loop over power-of-two values is meant to +// ensure that the operations apply to the full word size. +// The struct fields x.before and x.after check that the +// operations do not extend past the full word size. + +const ( + magic32 = 0xdedbeef + magic64 = 0xdeddeadbeefbeef +) + +// Do the 64-bit functions panic? If so, don't bother testing. +var test64err = func() (err interface{}) { + defer func() { + err = recover() + }() + var x int64 + AddInt64(&x, 1) + return nil +}() + +func TestSwapInt32(t *testing.T) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + var j int32 + for delta := int32(1); delta+delta > delta; delta += delta { + k := SwapInt32(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestSwapUint32(t *testing.T) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + var j uint32 + for delta := uint32(1); delta+delta > delta; delta += delta { + k := SwapUint32(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestSwapInt64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before int64 + i int64 + after int64 + } + x.before = magic64 + x.after = magic64 + var j int64 + for delta := int64(1); delta+delta > delta; delta += delta { + k := SwapInt64(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestSwapUint64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before uint64 + i uint64 + after uint64 + } + x.before = magic64 + x.after = magic64 + var j uint64 + for delta := uint64(1); delta+delta > delta; delta += delta { + k := SwapUint64(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestSwapUintptr(t *testing.T) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + var j uintptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := SwapUintptr(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +var global [1024]byte + +func testPointers() []unsafe.Pointer { + var pointers []unsafe.Pointer + // globals + for i := 0; i < 10; i++ { + pointers = append(pointers, unsafe.Pointer(&global[1<<i-1])) + } + // heap + pointers = append(pointers, unsafe.Pointer(new(byte))) + // nil + pointers = append(pointers, nil) + return pointers +} + +func TestSwapPointer(t *testing.T) { + var x struct { + before uintptr + i unsafe.Pointer + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + var j unsafe.Pointer + + for _, p := range testPointers() { + k := SwapPointer(&x.i, p) + if x.i != p || k != j { + t.Fatalf("p=%p i=%p j=%p k=%p", p, x.i, j, k) + } + j = p + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestAddInt32(t *testing.T) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + var j int32 + for delta := int32(1); delta+delta > delta; delta += delta { + k := AddInt32(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestAddUint32(t *testing.T) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + var j uint32 + for delta := uint32(1); delta+delta > delta; delta += delta { + k := AddUint32(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestAddInt64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before int64 + i int64 + after int64 + } + x.before = magic64 + x.after = magic64 + var j int64 + for delta := int64(1); delta+delta > delta; delta += delta { + k := AddInt64(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, int64(magic64), int64(magic64)) + } +} + +func TestAddUint64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before uint64 + i uint64 + after uint64 + } + x.before = magic64 + x.after = magic64 + var j uint64 + for delta := uint64(1); delta+delta > delta; delta += delta { + k := AddUint64(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestAddUintptr(t *testing.T) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + var j uintptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := AddUintptr(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestCompareAndSwapInt32(t *testing.T) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + for val := int32(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapInt32(&x.i, val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapInt32(&x.i, val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestCompareAndSwapUint32(t *testing.T) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + for val := uint32(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapUint32(&x.i, val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapUint32(&x.i, val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestCompareAndSwapInt64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before int64 + i int64 + after int64 + } + x.before = magic64 + x.after = magic64 + for val := int64(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapInt64(&x.i, val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapInt64(&x.i, val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func testCompareAndSwapUint64(t *testing.T, cas func(*uint64, uint64, uint64) bool) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before uint64 + i uint64 + after uint64 + } + x.before = magic64 + x.after = magic64 + for val := uint64(1); val+val > val; val += val { + x.i = val + if !cas(&x.i, val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if cas(&x.i, val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestCompareAndSwapUint64(t *testing.T) { + testCompareAndSwapUint64(t, CompareAndSwapUint64) +} + +func TestCompareAndSwapUintptr(t *testing.T) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for val := uintptr(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapUintptr(&x.i, val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapUintptr(&x.i, val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestCompareAndSwapPointer(t *testing.T) { + var x struct { + before uintptr + i unsafe.Pointer + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + q := unsafe.Pointer(new(byte)) + for _, p := range testPointers() { + x.i = p + if !CompareAndSwapPointer(&x.i, p, q) { + t.Fatalf("should have swapped %p %p", p, q) + } + if x.i != q { + t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i, q) + } + if CompareAndSwapPointer(&x.i, p, nil) { + t.Fatalf("should not have swapped %p nil", p) + } + if x.i != q { + t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i, q) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestLoadInt32(t *testing.T) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + for delta := int32(1); delta+delta > delta; delta += delta { + k := LoadInt32(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestLoadUint32(t *testing.T) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + for delta := uint32(1); delta+delta > delta; delta += delta { + k := LoadUint32(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestLoadInt64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before int64 + i int64 + after int64 + } + x.before = magic64 + x.after = magic64 + for delta := int64(1); delta+delta > delta; delta += delta { + k := LoadInt64(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestLoadUint64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before uint64 + i uint64 + after uint64 + } + x.before = magic64 + x.after = magic64 + for delta := uint64(1); delta+delta > delta; delta += delta { + k := LoadUint64(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestLoadUintptr(t *testing.T) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := LoadUintptr(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestLoadPointer(t *testing.T) { + var x struct { + before uintptr + i unsafe.Pointer + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for _, p := range testPointers() { + x.i = p + k := LoadPointer(&x.i) + if k != p { + t.Fatalf("p=%x k=%x", p, k) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestStoreInt32(t *testing.T) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + v := int32(0) + for delta := int32(1); delta+delta > delta; delta += delta { + StoreInt32(&x.i, v) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestStoreUint32(t *testing.T) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + v := uint32(0) + for delta := uint32(1); delta+delta > delta; delta += delta { + StoreUint32(&x.i, v) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestStoreInt64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before int64 + i int64 + after int64 + } + x.before = magic64 + x.after = magic64 + v := int64(0) + for delta := int64(1); delta+delta > delta; delta += delta { + StoreInt64(&x.i, v) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v += delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestStoreUint64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before uint64 + i uint64 + after uint64 + } + x.before = magic64 + x.after = magic64 + v := uint64(0) + for delta := uint64(1); delta+delta > delta; delta += delta { + StoreUint64(&x.i, v) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v += delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestStoreUintptr(t *testing.T) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + v := uintptr(0) + for delta := uintptr(1); delta+delta > delta; delta += delta { + StoreUintptr(&x.i, v) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v += delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestStorePointer(t *testing.T) { + var x struct { + before uintptr + i unsafe.Pointer + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for _, p := range testPointers() { + StorePointer(&x.i, p) + if x.i != p { + t.Fatalf("x.i=%p p=%p", x.i, p) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +// Tests of correct behavior, with contention. +// (Is the function atomic?) +// +// For each function, we write a "hammer" function that repeatedly +// uses the atomic operation to add 1 to a value. After running +// multiple hammers in parallel, check that we end with the correct +// total. +// Swap can't add 1, so it uses a different scheme. +// The functions repeatedly generate a pseudo-random number such that +// low bits are equal to high bits, swap, check that the old value +// has low and high bits equal. + +var hammer32 = map[string]func(*uint32, int){ + "SwapInt32": hammerSwapInt32, + "SwapUint32": hammerSwapUint32, + "SwapUintptr": hammerSwapUintptr32, + "AddInt32": hammerAddInt32, + "AddUint32": hammerAddUint32, + "AddUintptr": hammerAddUintptr32, + "CompareAndSwapInt32": hammerCompareAndSwapInt32, + "CompareAndSwapUint32": hammerCompareAndSwapUint32, + "CompareAndSwapUintptr": hammerCompareAndSwapUintptr32, +} + +func init() { + var v uint64 = 1 << 50 + if uintptr(v) != 0 { + // 64-bit system; clear uintptr tests + delete(hammer32, "SwapUintptr") + delete(hammer32, "AddUintptr") + delete(hammer32, "CompareAndSwapUintptr") + } +} + +func hammerSwapInt32(uaddr *uint32, count int) { + addr := (*int32)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16 + old := uint32(SwapInt32(addr, int32(new))) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapInt32 is not atomic: %v", old)) + } + } +} + +func hammerSwapUint32(addr *uint32, count int) { + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16 + old := SwapUint32(addr, new) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapUint32 is not atomic: %v", old)) + } + } +} + +func hammerSwapUintptr32(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uintptr(seed+i)<<16 | uintptr(seed+i)<<16>>16 + old := SwapUintptr(addr, new) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapUintptr is not atomic: %#08x", old)) + } + } +} + +func hammerAddInt32(uaddr *uint32, count int) { + addr := (*int32)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + AddInt32(addr, 1) + } +} + +func hammerAddUint32(addr *uint32, count int) { + for i := 0; i < count; i++ { + AddUint32(addr, 1) + } +} + +func hammerAddUintptr32(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + AddUintptr(addr, 1) + } +} + +func hammerCompareAndSwapInt32(uaddr *uint32, count int) { + addr := (*int32)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := LoadInt32(addr) + if CompareAndSwapInt32(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUint32(addr *uint32, count int) { + for i := 0; i < count; i++ { + for { + v := LoadUint32(addr) + if CompareAndSwapUint32(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUintptr32(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := LoadUintptr(addr) + if CompareAndSwapUintptr(addr, v, v+1) { + break + } + } + } +} + +func TestHammer32(t *testing.T) { + const p = 4 + n := 100000 + if testing.Short() { + n = 1000 + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p)) + + for name, testf := range hammer32 { + c := make(chan int) + var val uint32 + for i := 0; i < p; i++ { + go func() { + defer func() { + if err := recover(); err != nil { + t.Error(err.(string)) + } + c <- 1 + }() + testf(&val, n) + }() + } + for i := 0; i < p; i++ { + <-c + } + if !strings.HasPrefix(name, "Swap") && val != uint32(n)*p { + t.Fatalf("%s: val=%d want %d", name, val, n*p) + } + } +} + +var hammer64 = map[string]func(*uint64, int){ + "SwapInt64": hammerSwapInt64, + "SwapUint64": hammerSwapUint64, + "SwapUintptr": hammerSwapUintptr64, + "AddInt64": hammerAddInt64, + "AddUint64": hammerAddUint64, + "AddUintptr": hammerAddUintptr64, + "CompareAndSwapInt64": hammerCompareAndSwapInt64, + "CompareAndSwapUint64": hammerCompareAndSwapUint64, + "CompareAndSwapUintptr": hammerCompareAndSwapUintptr64, +} + +func init() { + var v uint64 = 1 << 50 + if uintptr(v) == 0 { + // 32-bit system; clear uintptr tests + delete(hammer64, "SwapUintptr") + delete(hammer64, "AddUintptr") + delete(hammer64, "CompareAndSwapUintptr") + } +} + +func hammerSwapInt64(uaddr *uint64, count int) { + addr := (*int64)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32 + old := uint64(SwapInt64(addr, int64(new))) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapInt64 is not atomic: %v", old)) + } + } +} + +func hammerSwapUint64(addr *uint64, count int) { + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32 + old := SwapUint64(addr, new) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapUint64 is not atomic: %v", old)) + } + } +} + +const arch32 = unsafe.Sizeof(uintptr(0)) == 4 + +func hammerSwapUintptr64(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + if !arch32 { + addr := (*uintptr)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uintptr(seed+i)<<32 | uintptr(seed+i)<<32>>32 + old := SwapUintptr(addr, new) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapUintptr is not atomic: %v", old)) + } + } + } +} + +func hammerAddInt64(uaddr *uint64, count int) { + addr := (*int64)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + AddInt64(addr, 1) + } +} + +func hammerAddUint64(addr *uint64, count int) { + for i := 0; i < count; i++ { + AddUint64(addr, 1) + } +} + +func hammerAddUintptr64(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + AddUintptr(addr, 1) + } +} + +func hammerCompareAndSwapInt64(uaddr *uint64, count int) { + addr := (*int64)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := LoadInt64(addr) + if CompareAndSwapInt64(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUint64(addr *uint64, count int) { + for i := 0; i < count; i++ { + for { + v := LoadUint64(addr) + if CompareAndSwapUint64(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUintptr64(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := LoadUintptr(addr) + if CompareAndSwapUintptr(addr, v, v+1) { + break + } + } + } +} + +func TestHammer64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + const p = 4 + n := 100000 + if testing.Short() { + n = 1000 + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p)) + + for name, testf := range hammer64 { + c := make(chan int) + var val uint64 + for i := 0; i < p; i++ { + go func() { + defer func() { + if err := recover(); err != nil { + t.Error(err.(string)) + } + c <- 1 + }() + testf(&val, n) + }() + } + for i := 0; i < p; i++ { + <-c + } + if !strings.HasPrefix(name, "Swap") && val != uint64(n)*p { + t.Fatalf("%s: val=%d want %d", name, val, n*p) + } + } +} + +func hammerStoreLoadInt32(t *testing.T, paddr unsafe.Pointer) { + addr := (*int32)(paddr) + v := LoadInt32(addr) + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Int32: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + StoreInt32(addr, new) +} + +func hammerStoreLoadUint32(t *testing.T, paddr unsafe.Pointer) { + addr := (*uint32)(paddr) + v := LoadUint32(addr) + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Uint32: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + StoreUint32(addr, new) +} + +func hammerStoreLoadInt64(t *testing.T, paddr unsafe.Pointer) { + addr := (*int64)(paddr) + v := LoadInt64(addr) + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Int64: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<32 + StoreInt64(addr, new) +} + +func hammerStoreLoadUint64(t *testing.T, paddr unsafe.Pointer) { + addr := (*uint64)(paddr) + v := LoadUint64(addr) + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Uint64: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<32 + StoreUint64(addr, new) +} + +func hammerStoreLoadUintptr(t *testing.T, paddr unsafe.Pointer) { + addr := (*uintptr)(paddr) + v := LoadUintptr(addr) + new := v + if arch32 { + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Uintptr: %#x != %#x", vlo, vhi) + } + new = v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + } else { + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Uintptr: %#x != %#x", vlo, vhi) + } + inc := uint64(1 + 1<<32) + new = v + uintptr(inc) + } + StoreUintptr(addr, new) +} + +//go:nocheckptr +// This code is just testing that LoadPointer/StorePointer operate +// atomically; it's not actually calculating pointers. +func hammerStoreLoadPointer(t *testing.T, paddr unsafe.Pointer) { + addr := (*unsafe.Pointer)(paddr) + v := uintptr(LoadPointer(addr)) + new := v + if arch32 { + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Pointer: %#x != %#x", vlo, vhi) + } + new = v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + } else { + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Pointer: %#x != %#x", vlo, vhi) + } + inc := uint64(1 + 1<<32) + new = v + uintptr(inc) + } + StorePointer(addr, unsafe.Pointer(new)) +} + +func TestHammerStoreLoad(t *testing.T) { + var tests []func(*testing.T, unsafe.Pointer) + tests = append(tests, hammerStoreLoadInt32, hammerStoreLoadUint32, + hammerStoreLoadUintptr, hammerStoreLoadPointer) + if test64err == nil { + tests = append(tests, hammerStoreLoadInt64, hammerStoreLoadUint64) + } + n := int(1e6) + if testing.Short() { + n = int(1e4) + } + const procs = 8 + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(procs)) + for _, tt := range tests { + c := make(chan int) + var val uint64 + for p := 0; p < procs; p++ { + go func() { + for i := 0; i < n; i++ { + tt(t, unsafe.Pointer(&val)) + } + c <- 1 + }() + } + for p := 0; p < procs; p++ { + <-c + } + } +} + +func TestStoreLoadSeqCst32(t *testing.T) { + if runtime.NumCPU() == 1 { + t.Skipf("Skipping test on %v processor machine", runtime.NumCPU()) + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + N := int32(1e3) + if testing.Short() { + N = int32(1e2) + } + c := make(chan bool, 2) + X := [2]int32{} + ack := [2][3]int32{{-1, -1, -1}, {-1, -1, -1}} + for p := 0; p < 2; p++ { + go func(me int) { + he := 1 - me + for i := int32(1); i < N; i++ { + StoreInt32(&X[me], i) + my := LoadInt32(&X[he]) + StoreInt32(&ack[me][i%3], my) + for w := 1; LoadInt32(&ack[he][i%3]) == -1; w++ { + if w%1000 == 0 { + runtime.Gosched() + } + } + his := LoadInt32(&ack[he][i%3]) + if (my != i && my != i-1) || (his != i && his != i-1) { + t.Errorf("invalid values: %d/%d (%d)", my, his, i) + break + } + if my != i && his != i { + t.Errorf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i) + break + } + StoreInt32(&ack[me][(i-1)%3], -1) + } + c <- true + }(p) + } + <-c + <-c +} + +func TestStoreLoadSeqCst64(t *testing.T) { + if runtime.NumCPU() == 1 { + t.Skipf("Skipping test on %v processor machine", runtime.NumCPU()) + } + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + N := int64(1e3) + if testing.Short() { + N = int64(1e2) + } + c := make(chan bool, 2) + X := [2]int64{} + ack := [2][3]int64{{-1, -1, -1}, {-1, -1, -1}} + for p := 0; p < 2; p++ { + go func(me int) { + he := 1 - me + for i := int64(1); i < N; i++ { + StoreInt64(&X[me], i) + my := LoadInt64(&X[he]) + StoreInt64(&ack[me][i%3], my) + for w := 1; LoadInt64(&ack[he][i%3]) == -1; w++ { + if w%1000 == 0 { + runtime.Gosched() + } + } + his := LoadInt64(&ack[he][i%3]) + if (my != i && my != i-1) || (his != i && his != i-1) { + t.Errorf("invalid values: %d/%d (%d)", my, his, i) + break + } + if my != i && his != i { + t.Errorf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i) + break + } + StoreInt64(&ack[me][(i-1)%3], -1) + } + c <- true + }(p) + } + <-c + <-c +} + +func TestStoreLoadRelAcq32(t *testing.T) { + if runtime.NumCPU() == 1 { + t.Skipf("Skipping test on %v processor machine", runtime.NumCPU()) + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + N := int32(1e3) + if testing.Short() { + N = int32(1e2) + } + c := make(chan bool, 2) + type Data struct { + signal int32 + pad1 [128]int8 + data1 int32 + pad2 [128]int8 + data2 float32 + } + var X Data + for p := int32(0); p < 2; p++ { + go func(p int32) { + for i := int32(1); i < N; i++ { + if (i+p)%2 == 0 { + X.data1 = i + X.data2 = float32(i) + StoreInt32(&X.signal, i) + } else { + for w := 1; LoadInt32(&X.signal) != i; w++ { + if w%1000 == 0 { + runtime.Gosched() + } + } + d1 := X.data1 + d2 := X.data2 + if d1 != i || d2 != float32(i) { + t.Errorf("incorrect data: %d/%g (%d)", d1, d2, i) + break + } + } + } + c <- true + }(p) + } + <-c + <-c +} + +func TestStoreLoadRelAcq64(t *testing.T) { + if runtime.NumCPU() == 1 { + t.Skipf("Skipping test on %v processor machine", runtime.NumCPU()) + } + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + N := int64(1e3) + if testing.Short() { + N = int64(1e2) + } + c := make(chan bool, 2) + type Data struct { + signal int64 + pad1 [128]int8 + data1 int64 + pad2 [128]int8 + data2 float64 + } + var X Data + for p := int64(0); p < 2; p++ { + go func(p int64) { + for i := int64(1); i < N; i++ { + if (i+p)%2 == 0 { + X.data1 = i + X.data2 = float64(i) + StoreInt64(&X.signal, i) + } else { + for w := 1; LoadInt64(&X.signal) != i; w++ { + if w%1000 == 0 { + runtime.Gosched() + } + } + d1 := X.data1 + d2 := X.data2 + if d1 != i || d2 != float64(i) { + t.Errorf("incorrect data: %d/%g (%d)", d1, d2, i) + break + } + } + } + c <- true + }(p) + } + <-c + <-c +} + +func shouldPanic(t *testing.T, name string, f func()) { + defer func() { + // Check that all GC maps are sane. + runtime.GC() + + err := recover() + want := "unaligned 64-bit atomic operation" + if err == nil { + t.Errorf("%s did not panic", name) + } else if s, _ := err.(string); s != want { + t.Errorf("%s: wanted panic %q, got %q", name, want, err) + } + }() + f() +} + +func TestUnaligned64(t *testing.T) { + // Unaligned 64-bit atomics on 32-bit systems are + // a continual source of pain. Test that on 32-bit systems they crash + // instead of failing silently. + if !arch32 { + t.Skip("test only runs on 32-bit systems") + } + + x := make([]uint32, 4) + p := (*uint64)(unsafe.Pointer(&x[1])) // misaligned + + shouldPanic(t, "LoadUint64", func() { LoadUint64(p) }) + shouldPanic(t, "StoreUint64", func() { StoreUint64(p, 1) }) + shouldPanic(t, "CompareAndSwapUint64", func() { CompareAndSwapUint64(p, 1, 2) }) + shouldPanic(t, "AddUint64", func() { AddUint64(p, 3) }) +} + +func TestNilDeref(t *testing.T) { + funcs := [...]func(){ + func() { CompareAndSwapInt32(nil, 0, 0) }, + func() { CompareAndSwapInt64(nil, 0, 0) }, + func() { CompareAndSwapUint32(nil, 0, 0) }, + func() { CompareAndSwapUint64(nil, 0, 0) }, + func() { CompareAndSwapUintptr(nil, 0, 0) }, + func() { CompareAndSwapPointer(nil, nil, nil) }, + func() { SwapInt32(nil, 0) }, + func() { SwapUint32(nil, 0) }, + func() { SwapInt64(nil, 0) }, + func() { SwapUint64(nil, 0) }, + func() { SwapUintptr(nil, 0) }, + func() { SwapPointer(nil, nil) }, + func() { AddInt32(nil, 0) }, + func() { AddUint32(nil, 0) }, + func() { AddInt64(nil, 0) }, + func() { AddUint64(nil, 0) }, + func() { AddUintptr(nil, 0) }, + func() { LoadInt32(nil) }, + func() { LoadInt64(nil) }, + func() { LoadUint32(nil) }, + func() { LoadUint64(nil) }, + func() { LoadUintptr(nil) }, + func() { LoadPointer(nil) }, + func() { StoreInt32(nil, 0) }, + func() { StoreInt64(nil, 0) }, + func() { StoreUint32(nil, 0) }, + func() { StoreUint64(nil, 0) }, + func() { StoreUintptr(nil, 0) }, + func() { StorePointer(nil, nil) }, + } + for _, f := range funcs { + func() { + defer func() { + runtime.GC() + recover() + }() + f() + }() + } +} diff --git a/src/sync/atomic/doc.go b/src/sync/atomic/doc.go new file mode 100644 index 0000000..805ef95 --- /dev/null +++ b/src/sync/atomic/doc.go @@ -0,0 +1,144 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package atomic provides low-level atomic memory primitives +// useful for implementing synchronization algorithms. +// +// These functions require great care to be used correctly. +// Except for special, low-level applications, synchronization is better +// done with channels or the facilities of the sync package. +// Share memory by communicating; +// don't communicate by sharing memory. +// +// The swap operation, implemented by the SwapT functions, is the atomic +// equivalent of: +// +// old = *addr +// *addr = new +// return old +// +// The compare-and-swap operation, implemented by the CompareAndSwapT +// functions, is the atomic equivalent of: +// +// if *addr == old { +// *addr = new +// return true +// } +// return false +// +// The add operation, implemented by the AddT functions, is the atomic +// equivalent of: +// +// *addr += delta +// return *addr +// +// The load and store operations, implemented by the LoadT and StoreT +// functions, are the atomic equivalents of "return *addr" and +// "*addr = val". +// +package atomic + +import ( + "unsafe" +) + +// BUG(rsc): On 386, the 64-bit functions use instructions unavailable before the Pentium MMX. +// +// On non-Linux ARM, the 64-bit functions use instructions unavailable before the ARMv6k core. +// +// On ARM, 386, and 32-bit MIPS, it is the caller's responsibility +// to arrange for 64-bit alignment of 64-bit words accessed atomically. +// The first word in a variable or in an allocated struct, array, or slice can +// be relied upon to be 64-bit aligned. + +// SwapInt32 atomically stores new into *addr and returns the previous *addr value. +func SwapInt32(addr *int32, new int32) (old int32) + +// SwapInt64 atomically stores new into *addr and returns the previous *addr value. +func SwapInt64(addr *int64, new int64) (old int64) + +// SwapUint32 atomically stores new into *addr and returns the previous *addr value. +func SwapUint32(addr *uint32, new uint32) (old uint32) + +// SwapUint64 atomically stores new into *addr and returns the previous *addr value. +func SwapUint64(addr *uint64, new uint64) (old uint64) + +// SwapUintptr atomically stores new into *addr and returns the previous *addr value. +func SwapUintptr(addr *uintptr, new uintptr) (old uintptr) + +// SwapPointer atomically stores new into *addr and returns the previous *addr value. +func SwapPointer(addr *unsafe.Pointer, new unsafe.Pointer) (old unsafe.Pointer) + +// CompareAndSwapInt32 executes the compare-and-swap operation for an int32 value. +func CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool) + +// CompareAndSwapInt64 executes the compare-and-swap operation for an int64 value. +func CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool) + +// CompareAndSwapUint32 executes the compare-and-swap operation for a uint32 value. +func CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool) + +// CompareAndSwapUint64 executes the compare-and-swap operation for a uint64 value. +func CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool) + +// CompareAndSwapUintptr executes the compare-and-swap operation for a uintptr value. +func CompareAndSwapUintptr(addr *uintptr, old, new uintptr) (swapped bool) + +// CompareAndSwapPointer executes the compare-and-swap operation for a unsafe.Pointer value. +func CompareAndSwapPointer(addr *unsafe.Pointer, old, new unsafe.Pointer) (swapped bool) + +// AddInt32 atomically adds delta to *addr and returns the new value. +func AddInt32(addr *int32, delta int32) (new int32) + +// AddUint32 atomically adds delta to *addr and returns the new value. +// To subtract a signed positive constant value c from x, do AddUint32(&x, ^uint32(c-1)). +// In particular, to decrement x, do AddUint32(&x, ^uint32(0)). +func AddUint32(addr *uint32, delta uint32) (new uint32) + +// AddInt64 atomically adds delta to *addr and returns the new value. +func AddInt64(addr *int64, delta int64) (new int64) + +// AddUint64 atomically adds delta to *addr and returns the new value. +// To subtract a signed positive constant value c from x, do AddUint64(&x, ^uint64(c-1)). +// In particular, to decrement x, do AddUint64(&x, ^uint64(0)). +func AddUint64(addr *uint64, delta uint64) (new uint64) + +// AddUintptr atomically adds delta to *addr and returns the new value. +func AddUintptr(addr *uintptr, delta uintptr) (new uintptr) + +// LoadInt32 atomically loads *addr. +func LoadInt32(addr *int32) (val int32) + +// LoadInt64 atomically loads *addr. +func LoadInt64(addr *int64) (val int64) + +// LoadUint32 atomically loads *addr. +func LoadUint32(addr *uint32) (val uint32) + +// LoadUint64 atomically loads *addr. +func LoadUint64(addr *uint64) (val uint64) + +// LoadUintptr atomically loads *addr. +func LoadUintptr(addr *uintptr) (val uintptr) + +// LoadPointer atomically loads *addr. +func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer) + +// StoreInt32 atomically stores val into *addr. +func StoreInt32(addr *int32, val int32) + +// StoreInt64 atomically stores val into *addr. +func StoreInt64(addr *int64, val int64) + +// StoreUint32 atomically stores val into *addr. +func StoreUint32(addr *uint32, val uint32) + +// StoreUint64 atomically stores val into *addr. +func StoreUint64(addr *uint64, val uint64) + +// StoreUintptr atomically stores val into *addr. +func StoreUintptr(addr *uintptr, val uintptr) + +// StorePointer atomically stores val into *addr. +func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer) diff --git a/src/sync/atomic/example_test.go b/src/sync/atomic/example_test.go new file mode 100644 index 0000000..09ae0aa --- /dev/null +++ b/src/sync/atomic/example_test.go @@ -0,0 +1,76 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic_test + +import ( + "sync" + "sync/atomic" + "time" +) + +func loadConfig() map[string]string { + return make(map[string]string) +} + +func requests() chan int { + return make(chan int) +} + +// The following example shows how to use Value for periodic program config updates +// and propagation of the changes to worker goroutines. +func ExampleValue_config() { + var config atomic.Value // holds current server configuration + // Create initial config value and store into config. + config.Store(loadConfig()) + go func() { + // Reload config every 10 seconds + // and update config value with the new version. + for { + time.Sleep(10 * time.Second) + config.Store(loadConfig()) + } + }() + // Create worker goroutines that handle incoming requests + // using the latest config value. + for i := 0; i < 10; i++ { + go func() { + for r := range requests() { + c := config.Load() + // Handle request r using config c. + _, _ = r, c + } + }() + } +} + +// The following example shows how to maintain a scalable frequently read, +// but infrequently updated data structure using copy-on-write idiom. +func ExampleValue_readMostly() { + type Map map[string]string + var m atomic.Value + m.Store(make(Map)) + var mu sync.Mutex // used only by writers + // read function can be used to read the data without further synchronization + read := func(key string) (val string) { + m1 := m.Load().(Map) + return m1[key] + } + // insert function can be used to update the data without further synchronization + insert := func(key, val string) { + mu.Lock() // synchronize with other potential writers + defer mu.Unlock() + m1 := m.Load().(Map) // load current value of the data structure + m2 := make(Map) // create a new value + for k, v := range m1 { + m2[k] = v // copy all data from the current object to the new one + } + m2[key] = val // do the update that we need + m.Store(m2) // atomically replace the current object with the new one + // At this point all new readers start working with the new version. + // The old version will be garbage collected once the existing readers + // (if any) are done with it. + } + _, _ = read, insert +} diff --git a/src/sync/atomic/race.s b/src/sync/atomic/race.s new file mode 100644 index 0000000..fd6ca22 --- /dev/null +++ b/src/sync/atomic/race.s @@ -0,0 +1,8 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build race + +// This file is here only to allow external functions. +// The operations are implemented in src/runtime/race_amd64.s diff --git a/src/sync/atomic/value.go b/src/sync/atomic/value.go new file mode 100644 index 0000000..eab7e70 --- /dev/null +++ b/src/sync/atomic/value.go @@ -0,0 +1,86 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic + +import ( + "unsafe" +) + +// A Value provides an atomic load and store of a consistently typed value. +// The zero value for a Value returns nil from Load. +// Once Store has been called, a Value must not be copied. +// +// A Value must not be copied after first use. +type Value struct { + v interface{} +} + +// ifaceWords is interface{} internal representation. +type ifaceWords struct { + typ unsafe.Pointer + data unsafe.Pointer +} + +// Load returns the value set by the most recent Store. +// It returns nil if there has been no call to Store for this Value. +func (v *Value) Load() (x interface{}) { + vp := (*ifaceWords)(unsafe.Pointer(v)) + typ := LoadPointer(&vp.typ) + if typ == nil || uintptr(typ) == ^uintptr(0) { + // First store not yet completed. + return nil + } + data := LoadPointer(&vp.data) + xp := (*ifaceWords)(unsafe.Pointer(&x)) + xp.typ = typ + xp.data = data + return +} + +// Store sets the value of the Value to x. +// All calls to Store for a given Value must use values of the same concrete type. +// Store of an inconsistent type panics, as does Store(nil). +func (v *Value) Store(x interface{}) { + if x == nil { + panic("sync/atomic: store of nil value into Value") + } + vp := (*ifaceWords)(unsafe.Pointer(v)) + xp := (*ifaceWords)(unsafe.Pointer(&x)) + for { + typ := LoadPointer(&vp.typ) + if typ == nil { + // Attempt to start first store. + // Disable preemption so that other goroutines can use + // active spin wait to wait for completion; and so that + // GC does not see the fake type accidentally. + runtime_procPin() + if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(^uintptr(0))) { + runtime_procUnpin() + continue + } + // Complete first store. + StorePointer(&vp.data, xp.data) + StorePointer(&vp.typ, xp.typ) + runtime_procUnpin() + return + } + if uintptr(typ) == ^uintptr(0) { + // First store in progress. Wait. + // Since we disable preemption around the first store, + // we can wait with active spinning. + continue + } + // First store completed. Check type and overwrite data. + if typ != xp.typ { + panic("sync/atomic: store of inconsistently typed value into Value") + } + StorePointer(&vp.data, xp.data) + return + } +} + +// Disable/enable preemption, implemented in runtime. +func runtime_procPin() +func runtime_procUnpin() diff --git a/src/sync/atomic/value_test.go b/src/sync/atomic/value_test.go new file mode 100644 index 0000000..f289766 --- /dev/null +++ b/src/sync/atomic/value_test.go @@ -0,0 +1,135 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic_test + +import ( + "math/rand" + "runtime" + . "sync/atomic" + "testing" +) + +func TestValue(t *testing.T) { + var v Value + if v.Load() != nil { + t.Fatal("initial Value is not nil") + } + v.Store(42) + x := v.Load() + if xx, ok := x.(int); !ok || xx != 42 { + t.Fatalf("wrong value: got %+v, want 42", x) + } + v.Store(84) + x = v.Load() + if xx, ok := x.(int); !ok || xx != 84 { + t.Fatalf("wrong value: got %+v, want 84", x) + } +} + +func TestValueLarge(t *testing.T) { + var v Value + v.Store("foo") + x := v.Load() + if xx, ok := x.(string); !ok || xx != "foo" { + t.Fatalf("wrong value: got %+v, want foo", x) + } + v.Store("barbaz") + x = v.Load() + if xx, ok := x.(string); !ok || xx != "barbaz" { + t.Fatalf("wrong value: got %+v, want barbaz", x) + } +} + +func TestValuePanic(t *testing.T) { + const nilErr = "sync/atomic: store of nil value into Value" + const badErr = "sync/atomic: store of inconsistently typed value into Value" + var v Value + func() { + defer func() { + err := recover() + if err != nilErr { + t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, nilErr) + } + }() + v.Store(nil) + }() + v.Store(42) + func() { + defer func() { + err := recover() + if err != badErr { + t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, badErr) + } + }() + v.Store("foo") + }() + func() { + defer func() { + err := recover() + if err != nilErr { + t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, nilErr) + } + }() + v.Store(nil) + }() +} + +func TestValueConcurrent(t *testing.T) { + tests := [][]interface{}{ + {uint16(0), ^uint16(0), uint16(1 + 2<<8), uint16(3 + 4<<8)}, + {uint32(0), ^uint32(0), uint32(1 + 2<<16), uint32(3 + 4<<16)}, + {uint64(0), ^uint64(0), uint64(1 + 2<<32), uint64(3 + 4<<32)}, + {complex(0, 0), complex(1, 2), complex(3, 4), complex(5, 6)}, + } + p := 4 * runtime.GOMAXPROCS(0) + N := int(1e5) + if testing.Short() { + p /= 2 + N = 1e3 + } + for _, test := range tests { + var v Value + done := make(chan bool, p) + for i := 0; i < p; i++ { + go func() { + r := rand.New(rand.NewSource(rand.Int63())) + expected := true + loop: + for j := 0; j < N; j++ { + x := test[r.Intn(len(test))] + v.Store(x) + x = v.Load() + for _, x1 := range test { + if x == x1 { + continue loop + } + } + t.Logf("loaded unexpected value %+v, want %+v", x, test) + expected = false + break + } + done <- expected + }() + } + for i := 0; i < p; i++ { + if !<-done { + t.FailNow() + } + } + } +} + +func BenchmarkValueRead(b *testing.B) { + var v Value + v.Store(new(int)) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + x := v.Load().(*int) + if *x != 0 { + b.Fatalf("wrong value: got %v, want 0", *x) + } + } + }) +} |