diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-04 18:00:34 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-04 18:00:34 +0000 |
commit | 3f619478f796eddbba6e39502fe941b285dd97b1 (patch) | |
tree | e2c7b5777f728320e5b5542b6213fd3591ba51e2 /include/atomic | |
parent | Initial commit. (diff) | |
download | mariadb-3f619478f796eddbba6e39502fe941b285dd97b1.tar.xz mariadb-3f619478f796eddbba6e39502fe941b285dd97b1.zip |
Adding upstream version 1:10.11.6.upstream/1%10.11.6upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'include/atomic')
-rw-r--r-- | include/atomic/gcc_builtins.h | 78 | ||||
-rw-r--r-- | include/atomic/generic-msvc.h | 140 | ||||
-rw-r--r-- | include/atomic/solaris.h | 117 |
3 files changed, 335 insertions, 0 deletions
diff --git a/include/atomic/gcc_builtins.h b/include/atomic/gcc_builtins.h new file mode 100644 index 00000000..5b2b2c90 --- /dev/null +++ b/include/atomic/gcc_builtins.h @@ -0,0 +1,78 @@ +#ifndef ATOMIC_GCC_BUILTINS_INCLUDED +#define ATOMIC_GCC_BUILTINS_INCLUDED + +/* Copyright (c) 2017 MariaDB Foundation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */ + + +#define MY_MEMORY_ORDER_RELAXED __ATOMIC_RELAXED +#define MY_MEMORY_ORDER_CONSUME __ATOMIC_CONSUME +#define MY_MEMORY_ORDER_ACQUIRE __ATOMIC_ACQUIRE +#define MY_MEMORY_ORDER_RELEASE __ATOMIC_RELEASE +#define MY_MEMORY_ORDER_ACQ_REL __ATOMIC_ACQ_REL +#define MY_MEMORY_ORDER_SEQ_CST __ATOMIC_SEQ_CST + +#define my_atomic_store32_explicit(P, D, O) __atomic_store_n((P), (D), (O)) +#define my_atomic_store64_explicit(P, D, O) __atomic_store_n((P), (D), (O)) +#define my_atomic_storeptr_explicit(P, D, O) __atomic_store_n((P), (D), (O)) + +#define my_atomic_load32_explicit(P, O) __atomic_load_n((P), (O)) +#define my_atomic_load64_explicit(P, O) __atomic_load_n((P), (O)) +#define my_atomic_loadptr_explicit(P, O) __atomic_load_n((P), (O)) + +#define my_atomic_fas32_explicit(P, D, O) __atomic_exchange_n((P), (D), (O)) +#define my_atomic_fas64_explicit(P, D, O) __atomic_exchange_n((P), (D), (O)) +#define my_atomic_fasptr_explicit(P, D, O) __atomic_exchange_n((P), (D), (O)) + +#define my_atomic_add32_explicit(P, A, O) __atomic_fetch_add((P), (A), (O)) +#define my_atomic_add64_explicit(P, A, O) __atomic_fetch_add((P), (A), (O)) + +#define my_atomic_cas32_weak_explicit(P, E, D, S, F) \ + __atomic_compare_exchange_n((P), (E), (D), 1, (S), (F)) +#define my_atomic_cas64_weak_explicit(P, E, D, S, F) \ + __atomic_compare_exchange_n((P), (E), (D), 1, (S), (F)) +#define my_atomic_casptr_weak_explicit(P, E, D, S, F) \ + __atomic_compare_exchange_n((P), (E), (D), 1, (S), (F)) + +#define my_atomic_cas32_strong_explicit(P, E, D, S, F) \ + __atomic_compare_exchange_n((P), (E), (D), 0, (S), (F)) +#define my_atomic_cas64_strong_explicit(P, E, D, S, F) \ + __atomic_compare_exchange_n((P), (E), (D), 0, (S), (F)) +#define my_atomic_casptr_strong_explicit(P, E, D, S, F) \ + __atomic_compare_exchange_n((P), (E), (D), 0, (S), (F)) + +#define my_atomic_store32(P, D) __atomic_store_n((P), (D), __ATOMIC_SEQ_CST) +#define my_atomic_store64(P, D) __atomic_store_n((P), (D), __ATOMIC_SEQ_CST) +#define my_atomic_storeptr(P, D) __atomic_store_n((P), (D), __ATOMIC_SEQ_CST) + +#define my_atomic_load32(P) __atomic_load_n((P), __ATOMIC_SEQ_CST) +#define my_atomic_load64(P) __atomic_load_n((P), __ATOMIC_SEQ_CST) +#define my_atomic_loadptr(P) __atomic_load_n((P), __ATOMIC_SEQ_CST) + +#define my_atomic_fas32(P, D) __atomic_exchange_n((P), (D), __ATOMIC_SEQ_CST) +#define my_atomic_fas64(P, D) __atomic_exchange_n((P), (D), __ATOMIC_SEQ_CST) +#define my_atomic_fasptr(P, D) __atomic_exchange_n((P), (D), __ATOMIC_SEQ_CST) + +#define my_atomic_add32(P, A) __atomic_fetch_add((P), (A), __ATOMIC_SEQ_CST) +#define my_atomic_add64(P, A) __atomic_fetch_add((P), (A), __ATOMIC_SEQ_CST) + +#define my_atomic_cas32(P, E, D) \ + __atomic_compare_exchange_n((P), (E), (D), 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) +#define my_atomic_cas64(P, E, D) \ + __atomic_compare_exchange_n((P), (E), (D), 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) +#define my_atomic_casptr(P, E, D) \ + __atomic_compare_exchange_n((P), (E), (D), 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + +#endif /* ATOMIC_GCC_BUILTINS_INCLUDED */ diff --git a/include/atomic/generic-msvc.h b/include/atomic/generic-msvc.h new file mode 100644 index 00000000..ff2a5434 --- /dev/null +++ b/include/atomic/generic-msvc.h @@ -0,0 +1,140 @@ +#ifndef ATOMIC_MSC_INCLUDED +#define ATOMIC_MSC_INCLUDED + +/* Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */ + +#include <windows.h> + +static inline int my_atomic_cas32(int32 volatile *a, int32 *cmp, int32 set) +{ + int32 initial_cmp= *cmp; + int32 initial_a= InterlockedCompareExchange((volatile LONG*)a, + set, initial_cmp); + int ret= (initial_a == initial_cmp); + if (!ret) + *cmp= initial_a; + return ret; +} + +static inline int my_atomic_cas64(int64 volatile *a, int64 *cmp, int64 set) +{ + int64 initial_cmp= *cmp; + int64 initial_a= InterlockedCompareExchange64((volatile LONGLONG*)a, + (LONGLONG)set, + (LONGLONG)initial_cmp); + int ret= (initial_a == initial_cmp); + if (!ret) + *cmp= initial_a; + return ret; +} + +static inline int my_atomic_casptr(void * volatile *a, void **cmp, void *set) +{ + void *initial_cmp= *cmp; + void *initial_a= InterlockedCompareExchangePointer(a, set, initial_cmp); + int ret= (initial_a == initial_cmp); + if (!ret) + *cmp= initial_a; + return ret; +} + +static inline int32 my_atomic_add32(int32 volatile *a, int32 v) +{ + return (int32)InterlockedExchangeAdd((volatile LONG*)a, v); +} + +static inline int64 my_atomic_add64(int64 volatile *a, int64 v) +{ + return (int64)InterlockedExchangeAdd64((volatile LONGLONG*)a, (LONGLONG)v); +} + + +/* + According to MSDN: + + Simple reads and writes to properly-aligned 32-bit variables are atomic + operations. + ... + Simple reads and writes to properly aligned 64-bit variables are atomic on + 64-bit Windows. Reads and writes to 64-bit values are not guaranteed to be + atomic on 32-bit Windows. + + https://msdn.microsoft.com/en-us/library/windows/desktop/ms684122(v=vs.85).aspx +*/ + +static inline int32 my_atomic_load32(int32 volatile *a) +{ + int32 value= *a; + MemoryBarrier(); + return value; +} + +static inline int64 my_atomic_load64(int64 volatile *a) +{ +#ifdef _M_X64 + int64 value= *a; + MemoryBarrier(); + return value; +#else + return (int64) InterlockedCompareExchange64((volatile LONGLONG *) a, 0, 0); +#endif +} + +static inline void* my_atomic_loadptr(void * volatile *a) +{ + void *value= *a; + MemoryBarrier(); + return value; +} + +static inline int32 my_atomic_fas32(int32 volatile *a, int32 v) +{ + return (int32)InterlockedExchange((volatile LONG*)a, v); +} + +static inline int64 my_atomic_fas64(int64 volatile *a, int64 v) +{ + return (int64)InterlockedExchange64((volatile LONGLONG*)a, v); +} + +static inline void * my_atomic_fasptr(void * volatile *a, void * v) +{ + return InterlockedExchangePointer(a, v); +} + +static inline void my_atomic_store32(int32 volatile *a, int32 v) +{ + MemoryBarrier(); + *a= v; +} + +static inline void my_atomic_store64(int64 volatile *a, int64 v) +{ +#ifdef _M_X64 + MemoryBarrier(); + *a= v; +#else + (void) InterlockedExchange64((volatile LONGLONG *) a, v); +#endif +} + +static inline void my_atomic_storeptr(void * volatile *a, void *v) +{ + MemoryBarrier(); + *a= v; +} + +#endif /* ATOMIC_MSC_INCLUDED */ diff --git a/include/atomic/solaris.h b/include/atomic/solaris.h new file mode 100644 index 00000000..b48ab46f --- /dev/null +++ b/include/atomic/solaris.h @@ -0,0 +1,117 @@ +#ifndef ATOMIC_SOLARIS_INCLUDED +#define ATOMIC_SOLARIS_INCLUDED + +/* Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */ + +#include <atomic.h> + +#if defined(__GNUC__) +#define atomic_typeof(T,V) __typeof__(V) +#else +#define atomic_typeof(T,V) T +#endif + +static inline int my_atomic_cas32(int32 volatile *a, int32 *cmp, int32 set) +{ + int ret; + atomic_typeof(uint32_t, *cmp) sav; + sav= atomic_cas_32((volatile uint32_t *)a, (uint32_t)*cmp, (uint32_t)set); + ret= (sav == *cmp); + if (!ret) + *cmp= sav; + return ret; +} + +static inline int my_atomic_cas64(int64 volatile *a, int64 *cmp, int64 set) +{ + int ret; + atomic_typeof(uint64_t, *cmp) sav; + sav= atomic_cas_64((volatile uint64_t *)a, (uint64_t)*cmp, (uint64_t)set); + ret= (sav == *cmp); + if (!ret) + *cmp= sav; + return ret; +} + +static inline int my_atomic_casptr(void * volatile *a, void **cmp, void *set) +{ + int ret; + atomic_typeof(void *, *cmp) sav; + sav= atomic_cas_ptr((volatile void **)a, (void *)*cmp, (void *)set); + ret= (sav == *cmp); + if (!ret) + *cmp= sav; + return ret; +} + +static inline int32 my_atomic_add32(int32 volatile *a, int32 v) +{ + int32 nv= atomic_add_32_nv((volatile uint32_t *)a, v); + return nv - v; +} + +static inline int64 my_atomic_add64(int64 volatile *a, int64 v) +{ + int64 nv= atomic_add_64_nv((volatile uint64_t *)a, v); + return nv - v; +} + +static inline int32 my_atomic_fas32(int32 volatile *a, int32 v) +{ + return atomic_swap_32((volatile uint32_t *)a, (uint32_t)v); +} + +static inline int64 my_atomic_fas64(int64 volatile *a, int64 v) +{ + return atomic_swap_64((volatile uint64_t *)a, (uint64_t)v); +} + +static inline void * my_atomic_fasptr(void * volatile *a, void * v) +{ + return atomic_swap_ptr(a, v); +} + +static inline int32 my_atomic_load32(int32 volatile *a) +{ + return atomic_or_32_nv((volatile uint32_t *)a, 0); +} + +static inline int64 my_atomic_load64(int64 volatile *a) +{ + return atomic_or_64_nv((volatile uint64_t *)a, 0); +} + +static inline void* my_atomic_loadptr(void * volatile *a) +{ + return atomic_add_ptr_nv(a, 0); +} + +static inline void my_atomic_store32(int32 volatile *a, int32 v) +{ + (void) atomic_swap_32((volatile uint32_t *)a, (uint32_t)v); +} + +static inline void my_atomic_store64(int64 volatile *a, int64 v) +{ + (void) atomic_swap_64((volatile uint64_t *)a, (uint64_t)v); +} + +static inline void my_atomic_storeptr(void * volatile *a, void *v) +{ + (void) atomic_swap_ptr((volatile void **)a, (void *)v); +} + +#endif /* ATOMIC_SOLARIS_INCLUDED */ |