From ace9429bb58fd418f0c81d4c2835699bddf6bde6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 11 Apr 2024 10:27:49 +0200 Subject: Adding upstream version 6.6.15. Signed-off-by: Daniel Baumann --- arch/m68k/include/asm/atomic.h | 221 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 221 insertions(+) create mode 100644 arch/m68k/include/asm/atomic.h (limited to 'arch/m68k/include/asm/atomic.h') diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h new file mode 100644 index 0000000000..4bfbc25f6e --- /dev/null +++ b/arch/m68k/include/asm/atomic.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ARCH_M68K_ATOMIC__ +#define __ARCH_M68K_ATOMIC__ + +#include +#include +#include +#include + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc.. + */ + +/* + * We do not have SMP m68k systems, so we don't have to deal with that. + */ + +#define arch_atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) + +/* + * The ColdFire parts cannot do some immediate to memory operations, + * so for them we do not specify the "i" asm constraint. + */ +#ifdef CONFIG_COLDFIRE +#define ASM_DI "d" +#else +#define ASM_DI "di" +#endif + +#define ATOMIC_OP(op, c_op, asm_op) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + __asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\ +} \ + +#ifdef CONFIG_RMW_INSNS + +#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ +static inline int arch_atomic_##op##_return(int i, atomic_t *v) \ +{ \ + int t, tmp; \ + \ + __asm__ __volatile__( \ + "1: movel %2,%1\n" \ + " " #asm_op "l %3,%1\n" \ + " casl %2,%1,%0\n" \ + " jne 1b" \ + : "+m" (*v), "=&d" (t), "=&d" (tmp) \ + : "di" (i), "2" (arch_atomic_read(v))); \ + return t; \ +} + +#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + int t, tmp; \ + \ + __asm__ __volatile__( \ + "1: movel %2,%1\n" \ + " " #asm_op "l %3,%1\n" \ + " casl %2,%1,%0\n" \ + " jne 1b" \ + : "+m" (*v), "=&d" (t), "=&d" (tmp) \ + : "di" (i), "2" (arch_atomic_read(v))); \ + return tmp; \ +} + +#else + +#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ +static inline int arch_atomic_##op##_return(int i, atomic_t * v) \ +{ \ + unsigned long flags; \ + int t; \ + \ + local_irq_save(flags); \ + t = (v->counter c_op i); \ + local_irq_restore(flags); \ + \ + return t; \ +} + +#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \ +{ \ + unsigned long flags; \ + int t; \ + \ + local_irq_save(flags); \ + t = v->counter; \ + v->counter c_op i; \ + local_irq_restore(flags); \ + \ + return t; \ +} + +#endif /* CONFIG_RMW_INSNS */ + +#define ATOMIC_OPS(op, c_op, asm_op) \ + ATOMIC_OP(op, c_op, asm_op) \ + ATOMIC_OP_RETURN(op, c_op, asm_op) \ + ATOMIC_FETCH_OP(op, c_op, asm_op) + +ATOMIC_OPS(add, +=, add) +ATOMIC_OPS(sub, -=, sub) + +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub + +#undef ATOMIC_OPS +#define ATOMIC_OPS(op, c_op, asm_op) \ + ATOMIC_OP(op, c_op, asm_op) \ + ATOMIC_FETCH_OP(op, c_op, asm_op) + +ATOMIC_OPS(and, &=, and) +ATOMIC_OPS(or, |=, or) +ATOMIC_OPS(xor, ^=, eor) + +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + +#undef ATOMIC_OPS +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +static inline void arch_atomic_inc(atomic_t *v) +{ + __asm__ __volatile__("addql #1,%0" : "+m" (*v)); +} +#define arch_atomic_inc arch_atomic_inc + +static inline void arch_atomic_dec(atomic_t *v) +{ + __asm__ __volatile__("subql #1,%0" : "+m" (*v)); +} +#define arch_atomic_dec arch_atomic_dec + +static inline int arch_atomic_dec_and_test(atomic_t *v) +{ + char c; + __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v)); + return c != 0; +} +#define arch_atomic_dec_and_test arch_atomic_dec_and_test + +static inline int arch_atomic_dec_and_test_lt(atomic_t *v) +{ + char c; + __asm__ __volatile__( + "subql #1,%1; slt %0" + : "=d" (c), "=m" (*v) + : "m" (*v)); + return c != 0; +} + +static inline int arch_atomic_inc_and_test(atomic_t *v) +{ + char c; + __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v)); + return c != 0; +} +#define arch_atomic_inc_and_test arch_atomic_inc_and_test + +#ifndef CONFIG_RMW_INSNS + +static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) +{ + unsigned long flags; + int prev; + + local_irq_save(flags); + prev = arch_atomic_read(v); + if (prev == old) + arch_atomic_set(v, new); + local_irq_restore(flags); + return prev; +} +#define arch_atomic_cmpxchg arch_atomic_cmpxchg + +static inline int arch_atomic_xchg(atomic_t *v, int new) +{ + unsigned long flags; + int prev; + + local_irq_save(flags); + prev = arch_atomic_read(v); + arch_atomic_set(v, new); + local_irq_restore(flags); + return prev; +} +#define arch_atomic_xchg arch_atomic_xchg + +#endif /* !CONFIG_RMW_INSNS */ + +static inline int arch_atomic_sub_and_test(int i, atomic_t *v) +{ + char c; + __asm__ __volatile__("subl %2,%1; seq %0" + : "=d" (c), "+m" (*v) + : ASM_DI (i)); + return c != 0; +} +#define arch_atomic_sub_and_test arch_atomic_sub_and_test + +static inline int arch_atomic_add_negative(int i, atomic_t *v) +{ + char c; + __asm__ __volatile__("addl %2,%1; smi %0" + : "=d" (c), "+m" (*v) + : ASM_DI (i)); + return c != 0; +} +#define arch_atomic_add_negative arch_atomic_add_negative + +#endif /* __ARCH_M68K_ATOMIC __ */ -- cgit v1.2.3