summaryrefslogtreecommitdiffstats
path: root/lib/locks/exclusive
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 09:13:47 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 09:13:47 +0000
commit102b0d2daa97dae68d3eed54d8fe37a9cc38a892 (patch)
treebcf648efac40ca6139842707f0eba5a4496a6dd2 /lib/locks/exclusive
parentInitial commit. (diff)
downloadarm-trusted-firmware-102b0d2daa97dae68d3eed54d8fe37a9cc38a892.tar.xz
arm-trusted-firmware-102b0d2daa97dae68d3eed54d8fe37a9cc38a892.zip
Adding upstream version 2.8.0+dfsg.upstream/2.8.0+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'lib/locks/exclusive')
-rw-r--r--lib/locks/exclusive/aarch32/spinlock.S43
-rw-r--r--lib/locks/exclusive/aarch64/spinlock.S75
2 files changed, 118 insertions, 0 deletions
diff --git a/lib/locks/exclusive/aarch32/spinlock.S b/lib/locks/exclusive/aarch32/spinlock.S
new file mode 100644
index 0000000..9492cc0
--- /dev/null
+++ b/lib/locks/exclusive/aarch32/spinlock.S
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl spin_lock
+ .globl spin_unlock
+
+#if ARM_ARCH_AT_LEAST(8, 0)
+/*
+ * According to the ARMv8-A Architecture Reference Manual, "when the global
+ * monitor for a PE changes from Exclusive Access state to Open Access state,
+ * an event is generated.". This applies to both AArch32 and AArch64 modes of
+ * ARMv8-A. As a result, no explicit SEV with unlock is required.
+ */
+#define COND_SEV()
+#else
+#define COND_SEV() sev
+#endif
+
+func spin_lock
+ mov r2, #1
+1:
+ ldrex r1, [r0]
+ cmp r1, #0
+ wfene
+ strexeq r1, r2, [r0]
+ cmpeq r1, #0
+ bne 1b
+ dmb
+ bx lr
+endfunc spin_lock
+
+
+func spin_unlock
+ mov r1, #0
+ stl r1, [r0]
+ COND_SEV()
+ bx lr
+endfunc spin_unlock
diff --git a/lib/locks/exclusive/aarch64/spinlock.S b/lib/locks/exclusive/aarch64/spinlock.S
new file mode 100644
index 0000000..e941b8a
--- /dev/null
+++ b/lib/locks/exclusive/aarch64/spinlock.S
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl spin_lock
+ .globl spin_unlock
+
+#if USE_SPINLOCK_CAS
+#if !ARM_ARCH_AT_LEAST(8, 1)
+#error USE_SPINLOCK_CAS option requires at least an ARMv8.1 platform
+#endif
+
+/*
+ * When compiled for ARMv8.1 or later, choose spin locks based on Compare and
+ * Swap instruction.
+ */
+
+/*
+ * Acquire lock using Compare and Swap instruction.
+ *
+ * Compare for 0 with acquire semantics, and swap 1. If failed to acquire, use
+ * load exclusive semantics to monitor the address and enter WFE.
+ *
+ * void spin_lock(spinlock_t *lock);
+ */
+func spin_lock
+ mov w2, #1
+1: mov w1, wzr
+2: casa w1, w2, [x0]
+ cbz w1, 3f
+ ldxr w1, [x0]
+ cbz w1, 2b
+ wfe
+ b 1b
+3:
+ ret
+endfunc spin_lock
+
+#else /* !USE_SPINLOCK_CAS */
+
+/*
+ * Acquire lock using load-/store-exclusive instruction pair.
+ *
+ * void spin_lock(spinlock_t *lock);
+ */
+func spin_lock
+ mov w2, #1
+ sevl
+l1: wfe
+l2: ldaxr w1, [x0]
+ cbnz w1, l1
+ stxr w1, w2, [x0]
+ cbnz w1, l2
+ ret
+endfunc spin_lock
+
+#endif /* USE_SPINLOCK_CAS */
+
+/*
+ * Release lock previously acquired by spin_lock.
+ *
+ * Use store-release to unconditionally clear the spinlock variable.
+ * Store operation generates an event to all cores waiting in WFE
+ * when address is monitored by the global monitor.
+ *
+ * void spin_unlock(spinlock_t *lock);
+ */
+func spin_unlock
+ stlr wzr, [x0]
+ ret
+endfunc spin_unlock