summaryrefslogtreecommitdiffstats
path: root/arch/arc/include/asm/atomic-spinlock.h
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /arch/arc/include/asm/atomic-spinlock.h
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/arc/include/asm/atomic-spinlock.h')
-rw-r--r--arch/arc/include/asm/atomic-spinlock.h102
1 files changed, 102 insertions, 0 deletions
diff --git a/arch/arc/include/asm/atomic-spinlock.h b/arch/arc/include/asm/atomic-spinlock.h
new file mode 100644
index 000000000..2c830347b
--- /dev/null
+++ b/arch/arc/include/asm/atomic-spinlock.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_ARC_ATOMIC_SPLOCK_H
+#define _ASM_ARC_ATOMIC_SPLOCK_H
+
+/*
+ * Non hardware assisted Atomic-R-M-W
+ * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+ */
+
+static inline void arch_atomic_set(atomic_t *v, int i)
+{
+ /*
+ * Independent of hardware support, all of the atomic_xxx() APIs need
+ * to follow the same locking rules to make sure that a "hardware"
+ * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
+ * sequence
+ *
+ * Thus atomic_set() despite being 1 insn (and seemingly atomic)
+ * requires the locking.
+ */
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ WRITE_ONCE(v->counter, i);
+ atomic_ops_unlock(flags);
+}
+
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
+
+#define ATOMIC_OP(op, c_op, asm_op) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ \
+ atomic_ops_lock(flags); \
+ v->counter c_op i; \
+ atomic_ops_unlock(flags); \
+}
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ unsigned int temp; \
+ \
+ /* \
+ * spin lock/unlock provides the needed smp_mb() before/after \
+ */ \
+ atomic_ops_lock(flags); \
+ temp = v->counter; \
+ temp c_op i; \
+ v->counter = temp; \
+ atomic_ops_unlock(flags); \
+ \
+ return temp; \
+}
+
+#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ unsigned int orig; \
+ \
+ /* \
+ * spin lock/unlock provides the needed smp_mb() before/after \
+ */ \
+ atomic_ops_lock(flags); \
+ orig = v->counter; \
+ v->counter c_op i; \
+ atomic_ops_unlock(flags); \
+ \
+ return orig; \
+}
+
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_OP_RETURN(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(add, +=, add)
+ATOMIC_OPS(sub, -=, sub)
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(and, &=, and)
+ATOMIC_OPS(andnot, &= ~, bic)
+ATOMIC_OPS(or, |=, or)
+ATOMIC_OPS(xor, ^=, xor)
+
+#define arch_atomic_andnot arch_atomic_andnot
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#endif