summaryrefslogtreecommitdiffstats
path: root/src/runtime/internal/atomic/atomic_arm.go
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:23:18 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:23:18 +0000
commit43a123c1ae6613b3efeed291fa552ecd909d3acf (patch)
treefd92518b7024bc74031f78a1cf9e454b65e73665 /src/runtime/internal/atomic/atomic_arm.go
parentInitial commit. (diff)
downloadgolang-1.20-upstream.tar.xz
golang-1.20-upstream.zip
Adding upstream version 1.20.14.upstream/1.20.14upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/runtime/internal/atomic/atomic_arm.go')
-rw-r--r--src/runtime/internal/atomic/atomic_arm.go244
1 files changed, 244 insertions, 0 deletions
diff --git a/src/runtime/internal/atomic/atomic_arm.go b/src/runtime/internal/atomic/atomic_arm.go
new file mode 100644
index 0000000..bdb1847
--- /dev/null
+++ b/src/runtime/internal/atomic/atomic_arm.go
@@ -0,0 +1,244 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm
+
+package atomic
+
+import (
+ "internal/cpu"
+ "unsafe"
+)
+
+// Export some functions via linkname to assembly in sync/atomic.
+//
+//go:linkname Xchg
+//go:linkname Xchguintptr
+
+type spinlock struct {
+ v uint32
+}
+
+//go:nosplit
+func (l *spinlock) lock() {
+ for {
+ if Cas(&l.v, 0, 1) {
+ return
+ }
+ }
+}
+
+//go:nosplit
+func (l *spinlock) unlock() {
+ Store(&l.v, 0)
+}
+
+var locktab [57]struct {
+ l spinlock
+ pad [cpu.CacheLinePadSize - unsafe.Sizeof(spinlock{})]byte
+}
+
+func addrLock(addr *uint64) *spinlock {
+ return &locktab[(uintptr(unsafe.Pointer(addr))>>3)%uintptr(len(locktab))].l
+}
+
+// Atomic add and return new value.
+//
+//go:nosplit
+func Xadd(val *uint32, delta int32) uint32 {
+ for {
+ oval := *val
+ nval := oval + uint32(delta)
+ if Cas(val, oval, nval) {
+ return nval
+ }
+ }
+}
+
+//go:noescape
+func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
+
+//go:nosplit
+func Xchg(addr *uint32, v uint32) uint32 {
+ for {
+ old := *addr
+ if Cas(addr, old, v) {
+ return old
+ }
+ }
+}
+
+//go:nosplit
+func Xchguintptr(addr *uintptr, v uintptr) uintptr {
+ return uintptr(Xchg((*uint32)(unsafe.Pointer(addr)), uint32(v)))
+}
+
+// Not noescape -- it installs a pointer to addr.
+func StorepNoWB(addr unsafe.Pointer, v unsafe.Pointer)
+
+//go:noescape
+func Store(addr *uint32, v uint32)
+
+//go:noescape
+func StoreRel(addr *uint32, v uint32)
+
+//go:noescape
+func StoreReluintptr(addr *uintptr, v uintptr)
+
+//go:nosplit
+func goCas64(addr *uint64, old, new uint64) bool {
+ if uintptr(unsafe.Pointer(addr))&7 != 0 {
+ *(*int)(nil) = 0 // crash on unaligned uint64
+ }
+ _ = *addr // if nil, fault before taking the lock
+ var ok bool
+ addrLock(addr).lock()
+ if *addr == old {
+ *addr = new
+ ok = true
+ }
+ addrLock(addr).unlock()
+ return ok
+}
+
+//go:nosplit
+func goXadd64(addr *uint64, delta int64) uint64 {
+ if uintptr(unsafe.Pointer(addr))&7 != 0 {
+ *(*int)(nil) = 0 // crash on unaligned uint64
+ }
+ _ = *addr // if nil, fault before taking the lock
+ var r uint64
+ addrLock(addr).lock()
+ r = *addr + uint64(delta)
+ *addr = r
+ addrLock(addr).unlock()
+ return r
+}
+
+//go:nosplit
+func goXchg64(addr *uint64, v uint64) uint64 {
+ if uintptr(unsafe.Pointer(addr))&7 != 0 {
+ *(*int)(nil) = 0 // crash on unaligned uint64
+ }
+ _ = *addr // if nil, fault before taking the lock
+ var r uint64
+ addrLock(addr).lock()
+ r = *addr
+ *addr = v
+ addrLock(addr).unlock()
+ return r
+}
+
+//go:nosplit
+func goLoad64(addr *uint64) uint64 {
+ if uintptr(unsafe.Pointer(addr))&7 != 0 {
+ *(*int)(nil) = 0 // crash on unaligned uint64
+ }
+ _ = *addr // if nil, fault before taking the lock
+ var r uint64
+ addrLock(addr).lock()
+ r = *addr
+ addrLock(addr).unlock()
+ return r
+}
+
+//go:nosplit
+func goStore64(addr *uint64, v uint64) {
+ if uintptr(unsafe.Pointer(addr))&7 != 0 {
+ *(*int)(nil) = 0 // crash on unaligned uint64
+ }
+ _ = *addr // if nil, fault before taking the lock
+ addrLock(addr).lock()
+ *addr = v
+ addrLock(addr).unlock()
+}
+
+//go:nosplit
+func Or8(addr *uint8, v uint8) {
+ // Align down to 4 bytes and use 32-bit CAS.
+ uaddr := uintptr(unsafe.Pointer(addr))
+ addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
+ word := uint32(v) << ((uaddr & 3) * 8) // little endian
+ for {
+ old := *addr32
+ if Cas(addr32, old, old|word) {
+ return
+ }
+ }
+}
+
+//go:nosplit
+func And8(addr *uint8, v uint8) {
+ // Align down to 4 bytes and use 32-bit CAS.
+ uaddr := uintptr(unsafe.Pointer(addr))
+ addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
+ word := uint32(v) << ((uaddr & 3) * 8) // little endian
+ mask := uint32(0xFF) << ((uaddr & 3) * 8) // little endian
+ word |= ^mask
+ for {
+ old := *addr32
+ if Cas(addr32, old, old&word) {
+ return
+ }
+ }
+}
+
+//go:nosplit
+func Or(addr *uint32, v uint32) {
+ for {
+ old := *addr
+ if Cas(addr, old, old|v) {
+ return
+ }
+ }
+}
+
+//go:nosplit
+func And(addr *uint32, v uint32) {
+ for {
+ old := *addr
+ if Cas(addr, old, old&v) {
+ return
+ }
+ }
+}
+
+//go:nosplit
+func armcas(ptr *uint32, old, new uint32) bool
+
+//go:noescape
+func Load(addr *uint32) uint32
+
+// NO go:noescape annotation; *addr escapes if result escapes (#31525)
+func Loadp(addr unsafe.Pointer) unsafe.Pointer
+
+//go:noescape
+func Load8(addr *uint8) uint8
+
+//go:noescape
+func LoadAcq(addr *uint32) uint32
+
+//go:noescape
+func LoadAcquintptr(ptr *uintptr) uintptr
+
+//go:noescape
+func Cas64(addr *uint64, old, new uint64) bool
+
+//go:noescape
+func CasRel(addr *uint32, old, new uint32) bool
+
+//go:noescape
+func Xadd64(addr *uint64, delta int64) uint64
+
+//go:noescape
+func Xchg64(addr *uint64, v uint64) uint64
+
+//go:noescape
+func Load64(addr *uint64) uint64
+
+//go:noescape
+func Store8(addr *uint8, v uint8)
+
+//go:noescape
+func Store64(addr *uint64, v uint64)