diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
commit | 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch) | |
tree | a94efe259b9009378be6d90eb30d2b019d95c194 /arch/sparc/lib/atomic32.c | |
parent | Initial commit. (diff) | |
download | linux-430c2fc249ea5c0536abd21c23382884005c9093.tar.xz linux-430c2fc249ea5c0536abd21c23382884005c9093.zip |
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/sparc/lib/atomic32.c')
-rw-r--r-- | arch/sparc/lib/atomic32.c | 202 |
1 files changed, 202 insertions, 0 deletions
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c new file mode 100644 index 000000000..281fa634b --- /dev/null +++ b/arch/sparc/lib/atomic32.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * atomic32.c: 32-bit atomic_t implementation + * + * Copyright (C) 2004 Keith M Wesolowski + * Copyright (C) 2007 Kyle McMartin + * + * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf + */ + +#include <linux/atomic.h> +#include <linux/spinlock.h> +#include <linux/module.h> + +#ifdef CONFIG_SMP +#define ATOMIC_HASH_SIZE 4 +#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)]) + +spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = { + [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash) +}; + +#else /* SMP */ + +static DEFINE_SPINLOCK(dummy); +#define ATOMIC_HASH_SIZE 1 +#define ATOMIC_HASH(a) (&dummy) + +#endif /* SMP */ + +#define ATOMIC_FETCH_OP(op, c_op) \ +int atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + int ret; \ + unsigned long flags; \ + spin_lock_irqsave(ATOMIC_HASH(v), flags); \ + \ + ret = v->counter; \ + v->counter c_op i; \ + \ + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ + return ret; \ +} \ +EXPORT_SYMBOL(atomic_fetch_##op); + +#define ATOMIC_OP_RETURN(op, c_op) \ +int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + int ret; \ + unsigned long flags; \ + spin_lock_irqsave(ATOMIC_HASH(v), flags); \ + \ + ret = (v->counter c_op i); \ + \ + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ + return ret; \ +} \ +EXPORT_SYMBOL(atomic_##op##_return); + +ATOMIC_OP_RETURN(add, +=) + +ATOMIC_FETCH_OP(add, +=) +ATOMIC_FETCH_OP(and, &=) +ATOMIC_FETCH_OP(or, |=) +ATOMIC_FETCH_OP(xor, ^=) + +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN + +int atomic_xchg(atomic_t *v, int new) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(ATOMIC_HASH(v), flags); + ret = v->counter; + v->counter = new; + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); + return ret; +} +EXPORT_SYMBOL(atomic_xchg); + +int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(ATOMIC_HASH(v), flags); + ret = v->counter; + if (likely(ret == old)) + v->counter = new; + + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); + return ret; +} +EXPORT_SYMBOL(atomic_cmpxchg); + +int atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(ATOMIC_HASH(v), flags); + ret = v->counter; + if (ret != u) + v->counter += a; + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); + return ret; +} +EXPORT_SYMBOL(atomic_fetch_add_unless); + +/* Atomic operations are already serializing */ +void atomic_set(atomic_t *v, int i) +{ + unsigned long flags; + + spin_lock_irqsave(ATOMIC_HASH(v), flags); + v->counter = i; + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); +} +EXPORT_SYMBOL(atomic_set); + +unsigned long ___set_bit(unsigned long *addr, unsigned long mask) +{ + unsigned long old, flags; + + spin_lock_irqsave(ATOMIC_HASH(addr), flags); + old = *addr; + *addr = old | mask; + spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); + + return old & mask; +} +EXPORT_SYMBOL(___set_bit); + +unsigned long ___clear_bit(unsigned long *addr, unsigned long mask) +{ + unsigned long old, flags; + + spin_lock_irqsave(ATOMIC_HASH(addr), flags); + old = *addr; + *addr = old & ~mask; + spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); + + return old & mask; +} +EXPORT_SYMBOL(___clear_bit); + +unsigned long ___change_bit(unsigned long *addr, unsigned long mask) +{ + unsigned long old, flags; + + spin_lock_irqsave(ATOMIC_HASH(addr), flags); + old = *addr; + *addr = old ^ mask; + spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); + + return old & mask; +} +EXPORT_SYMBOL(___change_bit); + +unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) +{ + unsigned long flags; + u32 prev; + + spin_lock_irqsave(ATOMIC_HASH(ptr), flags); + if ((prev = *ptr) == old) + *ptr = new; + spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); + + return (unsigned long)prev; +} +EXPORT_SYMBOL(__cmpxchg_u32); + +u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new) +{ + unsigned long flags; + u64 prev; + + spin_lock_irqsave(ATOMIC_HASH(ptr), flags); + if ((prev = *ptr) == old) + *ptr = new; + spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); + + return prev; +} +EXPORT_SYMBOL(__cmpxchg_u64); + +unsigned long __xchg_u32(volatile u32 *ptr, u32 new) +{ + unsigned long flags; + u32 prev; + + spin_lock_irqsave(ATOMIC_HASH(ptr), flags); + prev = *ptr; + *ptr = new; + spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); + + return (unsigned long)prev; +} +EXPORT_SYMBOL(__xchg_u32); |