summaryrefslogtreecommitdiffstats
path: root/atomic/unix
diff options
context:
space:
mode:
Diffstat (limited to 'atomic/unix')
-rw-r--r--atomic/unix/builtins.c134
-rw-r--r--atomic/unix/builtins64.c109
-rw-r--r--atomic/unix/ia32.c131
-rw-r--r--atomic/unix/mutex.c206
-rw-r--r--atomic/unix/mutex64.c178
-rw-r--r--atomic/unix/ppc.c242
-rw-r--r--atomic/unix/s390.c159
-rw-r--r--atomic/unix/solaris.c83
8 files changed, 1242 insertions, 0 deletions
diff --git a/atomic/unix/builtins.c b/atomic/unix/builtins.c
new file mode 100644
index 0000000..22b828c
--- /dev/null
+++ b/atomic/unix/builtins.c
@@ -0,0 +1,134 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_arch_atomic.h"
+
+#ifdef USE_ATOMICS_BUILTINS
+
+#if defined(__arm__) || defined(__powerpc__) || defined(__powerpc64__)
+#define WEAK_MEMORY_ORDERING 1
+#else
+#define WEAK_MEMORY_ORDERING 0
+#endif
+
+APR_DECLARE(apr_status_t) apr_atomic_init(apr_pool_t *p)
+{
+ return APR_SUCCESS;
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_read32(volatile apr_uint32_t *mem)
+{
+#if HAVE__ATOMIC_BUILTINS
+ return __atomic_load_n(mem, __ATOMIC_SEQ_CST);
+#elif WEAK_MEMORY_ORDERING
+ /* No __sync_load() available => apr_atomic_add32(mem, 0) */
+ return __sync_fetch_and_add(mem, 0);
+#else
+ return *mem;
+#endif
+}
+
+APR_DECLARE(void) apr_atomic_set32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+#if HAVE__ATOMIC_BUILTINS
+ __atomic_store_n(mem, val, __ATOMIC_SEQ_CST);
+#elif WEAK_MEMORY_ORDERING
+ /* No __sync_store() available => apr_atomic_xchg32(mem, val) */
+ __sync_synchronize();
+ __sync_lock_test_and_set(mem, val);
+#else
+ *mem = val;
+#endif
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+#if HAVE__ATOMIC_BUILTINS
+ return __atomic_fetch_add(mem, val, __ATOMIC_SEQ_CST);
+#else
+ return __sync_fetch_and_add(mem, val);
+#endif
+}
+
+APR_DECLARE(void) apr_atomic_sub32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+#if HAVE__ATOMIC_BUILTINS
+ __atomic_fetch_sub(mem, val, __ATOMIC_SEQ_CST);
+#else
+ __sync_fetch_and_sub(mem, val);
+#endif
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_inc32(volatile apr_uint32_t *mem)
+{
+#if HAVE__ATOMIC_BUILTINS
+ return __atomic_fetch_add(mem, 1, __ATOMIC_SEQ_CST);
+#else
+ return __sync_fetch_and_add(mem, 1);
+#endif
+}
+
+APR_DECLARE(int) apr_atomic_dec32(volatile apr_uint32_t *mem)
+{
+#if HAVE__ATOMIC_BUILTINS
+ return __atomic_sub_fetch(mem, 1, __ATOMIC_SEQ_CST);
+#else
+ return __sync_sub_and_fetch(mem, 1);
+#endif
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_cas32(volatile apr_uint32_t *mem, apr_uint32_t val,
+ apr_uint32_t cmp)
+{
+#if HAVE__ATOMIC_BUILTINS
+ __atomic_compare_exchange_n(mem, &cmp, val, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ return cmp;
+#else
+ return __sync_val_compare_and_swap(mem, cmp, val);
+#endif
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_xchg32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+#if HAVE__ATOMIC_BUILTINS
+ return __atomic_exchange_n(mem, val, __ATOMIC_SEQ_CST);
+#else
+ __sync_synchronize();
+ return __sync_lock_test_and_set(mem, val);
+#endif
+}
+
+APR_DECLARE(void*) apr_atomic_casptr(volatile void **mem, void *ptr, const void *cmp)
+{
+#if HAVE__ATOMIC_BUILTINS
+ __atomic_compare_exchange_n(mem, (void **)&cmp, ptr, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ return (void *)cmp;
+#else
+ return (void *)__sync_val_compare_and_swap(mem, (void *)cmp, ptr);
+#endif
+}
+
+APR_DECLARE(void*) apr_atomic_xchgptr(volatile void **mem, void *ptr)
+{
+#if HAVE__ATOMIC_BUILTINS
+ return (void *)__atomic_exchange_n(mem, ptr, __ATOMIC_SEQ_CST);
+#else
+ __sync_synchronize();
+ return (void *)__sync_lock_test_and_set(mem, ptr);
+#endif
+}
+
+#endif /* USE_ATOMICS_BUILTINS */
diff --git a/atomic/unix/builtins64.c b/atomic/unix/builtins64.c
new file mode 100644
index 0000000..7d84225
--- /dev/null
+++ b/atomic/unix/builtins64.c
@@ -0,0 +1,109 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_arch_atomic.h"
+
+#ifdef USE_ATOMICS_BUILTINS64
+
+#if defined(__arm__) || defined(__powerpc__) || defined(__powerpc64__)
+#define WEAK_MEMORY_ORDERING 1
+#else
+#define WEAK_MEMORY_ORDERING 0
+#endif
+
+APR_DECLARE(apr_uint64_t) apr_atomic_read64(volatile apr_uint64_t *mem)
+{
+#if HAVE__ATOMIC_BUILTINS
+ return __atomic_load_n(mem, __ATOMIC_SEQ_CST);
+#elif WEAK_MEMORY_ORDERING
+ /* No __sync_load() available => apr_atomic_add64(mem, 0) */
+ return __sync_fetch_and_add(mem, 0);
+#else
+ return *mem;
+#endif
+}
+
+APR_DECLARE(void) apr_atomic_set64(volatile apr_uint64_t *mem, apr_uint64_t val)
+{
+#if HAVE__ATOMIC_BUILTINS
+ __atomic_store_n(mem, val, __ATOMIC_SEQ_CST);
+#elif WEAK_MEMORY_ORDERING
+ /* No __sync_store() available => apr_atomic_xchg64(mem, val) */
+ __sync_synchronize();
+ __sync_lock_test_and_set(mem, val);
+#else
+ *mem = val;
+#endif
+}
+
+APR_DECLARE(apr_uint64_t) apr_atomic_add64(volatile apr_uint64_t *mem, apr_uint64_t val)
+{
+#if HAVE__ATOMIC_BUILTINS
+ return __atomic_fetch_add(mem, val, __ATOMIC_SEQ_CST);
+#else
+ return __sync_fetch_and_add(mem, val);
+#endif
+}
+
+APR_DECLARE(void) apr_atomic_sub64(volatile apr_uint64_t *mem, apr_uint64_t val)
+{
+#if HAVE__ATOMIC_BUILTINS
+ __atomic_fetch_sub(mem, val, __ATOMIC_SEQ_CST);
+#else
+ __sync_fetch_and_sub(mem, val);
+#endif
+}
+
+APR_DECLARE(apr_uint64_t) apr_atomic_inc64(volatile apr_uint64_t *mem)
+{
+#if HAVE__ATOMIC_BUILTINS
+ return __atomic_fetch_add(mem, 1, __ATOMIC_SEQ_CST);
+#else
+ return __sync_fetch_and_add(mem, 1);
+#endif
+}
+
+APR_DECLARE(int) apr_atomic_dec64(volatile apr_uint64_t *mem)
+{
+#if HAVE__ATOMIC_BUILTINS
+ return __atomic_sub_fetch(mem, 1, __ATOMIC_SEQ_CST);
+#else
+ return __sync_sub_and_fetch(mem, 1);
+#endif
+}
+
+APR_DECLARE(apr_uint64_t) apr_atomic_cas64(volatile apr_uint64_t *mem, apr_uint64_t val,
+ apr_uint64_t cmp)
+{
+#if HAVE__ATOMIC_BUILTINS
+ __atomic_compare_exchange_n(mem, &cmp, val, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ return cmp;
+#else
+ return __sync_val_compare_and_swap(mem, cmp, val);
+#endif
+}
+
+APR_DECLARE(apr_uint64_t) apr_atomic_xchg64(volatile apr_uint64_t *mem, apr_uint64_t val)
+{
+#if HAVE__ATOMIC_BUILTINS
+ return __atomic_exchange_n(mem, val, __ATOMIC_SEQ_CST);
+#else
+ __sync_synchronize();
+ return __sync_lock_test_and_set(mem, val);
+#endif
+}
+
+#endif /* USE_ATOMICS_BUILTINS64 */
diff --git a/atomic/unix/ia32.c b/atomic/unix/ia32.c
new file mode 100644
index 0000000..cdb7a52
--- /dev/null
+++ b/atomic/unix/ia32.c
@@ -0,0 +1,131 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_arch_atomic.h"
+
+#ifdef USE_ATOMICS_IA32
+
+APR_DECLARE(apr_status_t) apr_atomic_init(apr_pool_t *p)
+{
+#if defined (NEED_ATOMICS_GENERIC64)
+ return apr__atomic_generic64_init(p);
+#else
+ return APR_SUCCESS;
+#endif
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_read32(volatile apr_uint32_t *mem)
+{
+ return *mem;
+}
+
+APR_DECLARE(void) apr_atomic_set32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ *mem = val;
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ asm volatile ("lock; xaddl %0,%1"
+ : "=r" (val), "=m" (*mem)
+ : "0" (val), "m" (*mem)
+ : "memory", "cc");
+ return val;
+}
+
+APR_DECLARE(void) apr_atomic_sub32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ asm volatile ("lock; subl %1, %0"
+ : /* no output */
+ : "m" (*(mem)), "r" (val)
+ : "memory", "cc");
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_inc32(volatile apr_uint32_t *mem)
+{
+ return apr_atomic_add32(mem, 1);
+}
+
+APR_DECLARE(int) apr_atomic_dec32(volatile apr_uint32_t *mem)
+{
+ unsigned char prev;
+
+ asm volatile ("lock; decl %0; setnz %1"
+ : "=m" (*mem), "=qm" (prev)
+ : "m" (*mem)
+ : "memory");
+
+ return prev;
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_cas32(volatile apr_uint32_t *mem, apr_uint32_t with,
+ apr_uint32_t cmp)
+{
+ apr_uint32_t prev;
+
+ asm volatile ("lock; cmpxchgl %1, %2"
+ : "=a" (prev)
+ : "r" (with), "m" (*(mem)), "0"(cmp)
+ : "memory", "cc");
+ return prev;
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_xchg32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ apr_uint32_t prev = val;
+
+ asm volatile ("xchgl %0, %1"
+ : "=r" (prev), "+m" (*mem)
+ : "0" (prev));
+ return prev;
+}
+
+APR_DECLARE(void*) apr_atomic_casptr(volatile void **mem, void *with, const void *cmp)
+{
+ void *prev;
+#if APR_SIZEOF_VOIDP == 4
+ asm volatile ("lock; cmpxchgl %2, %1"
+ : "=a" (prev), "=m" (*mem)
+ : "r" (with), "m" (*mem), "0" (cmp));
+#elif APR_SIZEOF_VOIDP == 8
+ asm volatile ("lock; cmpxchgq %q2, %1"
+ : "=a" (prev), "=m" (*mem)
+ : "r" ((unsigned long)with), "m" (*mem),
+ "0" ((unsigned long)cmp));
+#else
+#error APR_SIZEOF_VOIDP value not supported
+#endif
+ return prev;
+}
+
+APR_DECLARE(void*) apr_atomic_xchgptr(volatile void **mem, void *with)
+{
+ void *prev;
+#if APR_SIZEOF_VOIDP == 4
+ asm volatile ("xchgl %2, %1"
+ : "=a" (prev), "+m" (*mem)
+ : "0" (with));
+#elif APR_SIZEOF_VOIDP == 8
+ asm volatile ("xchgq %q2, %1"
+ : "=a" (prev), "+m" (*mem)
+ : "0" (with));
+#else
+#error APR_SIZEOF_VOIDP value not supported
+#endif
+ return prev;
+}
+
+#endif /* USE_ATOMICS_IA32 */
diff --git a/atomic/unix/mutex.c b/atomic/unix/mutex.c
new file mode 100644
index 0000000..45bc5e2
--- /dev/null
+++ b/atomic/unix/mutex.c
@@ -0,0 +1,206 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_arch_atomic.h"
+#include "apr_thread_mutex.h"
+
+#ifdef USE_ATOMICS_GENERIC
+
+#include <stdlib.h>
+
+#if APR_HAS_THREADS
+# define DECLARE_MUTEX_LOCKED(name, mem) \
+ apr_thread_mutex_t *name = mutex_hash(mem)
+# define MUTEX_UNLOCK(name) \
+ do { \
+ if (apr_thread_mutex_unlock(name) != APR_SUCCESS) \
+ abort(); \
+ } while (0)
+#else
+# define DECLARE_MUTEX_LOCKED(name, mem)
+# define MUTEX_UNLOCK(name)
+# warning Be warned: using stubs for all atomic operations
+#endif
+
+#if APR_HAS_THREADS
+
+static apr_thread_mutex_t **hash_mutex;
+
+#define NUM_ATOMIC_HASH 7
+/* shift by 2 to get rid of alignment issues */
+#define ATOMIC_HASH(x) (unsigned int)(((unsigned long)(x)>>2)%(unsigned int)NUM_ATOMIC_HASH)
+
+static apr_status_t atomic_cleanup(void *data)
+{
+ if (hash_mutex == data)
+ hash_mutex = NULL;
+
+ return APR_SUCCESS;
+}
+
+APR_DECLARE(apr_status_t) apr_atomic_init(apr_pool_t *p)
+{
+ int i;
+ apr_status_t rv;
+
+ if (hash_mutex != NULL)
+ return APR_SUCCESS;
+
+ hash_mutex = apr_palloc(p, sizeof(apr_thread_mutex_t*) * NUM_ATOMIC_HASH);
+ apr_pool_cleanup_register(p, hash_mutex, atomic_cleanup,
+ apr_pool_cleanup_null);
+
+ for (i = 0; i < NUM_ATOMIC_HASH; i++) {
+ rv = apr_thread_mutex_create(&(hash_mutex[i]),
+ APR_THREAD_MUTEX_DEFAULT, p);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ }
+
+ return apr__atomic_generic64_init(p);
+}
+
+static APR_INLINE apr_thread_mutex_t *mutex_hash(volatile apr_uint32_t *mem)
+{
+ apr_thread_mutex_t *mutex = hash_mutex[ATOMIC_HASH(mem)];
+
+ if (apr_thread_mutex_lock(mutex) != APR_SUCCESS) {
+ abort();
+ }
+
+ return mutex;
+}
+
+#else
+
+APR_DECLARE(apr_status_t) apr_atomic_init(apr_pool_t *p)
+{
+ return apr__atomic_generic64_init(p);
+}
+
+#endif /* APR_HAS_THREADS */
+
+APR_DECLARE(apr_uint32_t) apr_atomic_read32(volatile apr_uint32_t *mem)
+{
+ return *mem;
+}
+
+APR_DECLARE(void) apr_atomic_set32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ DECLARE_MUTEX_LOCKED(mutex, mem);
+
+ *mem = val;
+
+ MUTEX_UNLOCK(mutex);
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ apr_uint32_t old_value;
+ DECLARE_MUTEX_LOCKED(mutex, mem);
+
+ old_value = *mem;
+ *mem += val;
+
+ MUTEX_UNLOCK(mutex);
+
+ return old_value;
+}
+
+APR_DECLARE(void) apr_atomic_sub32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ DECLARE_MUTEX_LOCKED(mutex, mem);
+ *mem -= val;
+ MUTEX_UNLOCK(mutex);
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_inc32(volatile apr_uint32_t *mem)
+{
+ return apr_atomic_add32(mem, 1);
+}
+
+APR_DECLARE(int) apr_atomic_dec32(volatile apr_uint32_t *mem)
+{
+ apr_uint32_t new;
+ DECLARE_MUTEX_LOCKED(mutex, mem);
+
+ (*mem)--;
+ new = *mem;
+
+ MUTEX_UNLOCK(mutex);
+
+ return new;
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_cas32(volatile apr_uint32_t *mem, apr_uint32_t with,
+ apr_uint32_t cmp)
+{
+ apr_uint32_t prev;
+ DECLARE_MUTEX_LOCKED(mutex, mem);
+
+ prev = *mem;
+ if (prev == cmp) {
+ *mem = with;
+ }
+
+ MUTEX_UNLOCK(mutex);
+
+ return prev;
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_xchg32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ apr_uint32_t prev;
+ DECLARE_MUTEX_LOCKED(mutex, mem);
+
+ prev = *mem;
+ *mem = val;
+
+ MUTEX_UNLOCK(mutex);
+
+ return prev;
+}
+
+APR_DECLARE(void*) apr_atomic_casptr(volatile void **mem, void *with, const void *cmp)
+{
+ void *prev;
+ DECLARE_MUTEX_LOCKED(mutex, *mem);
+
+ prev = *(void **)mem;
+ if (prev == cmp) {
+ *mem = with;
+ }
+
+ MUTEX_UNLOCK(mutex);
+
+ return prev;
+}
+
+APR_DECLARE(void*) apr_atomic_xchgptr(volatile void **mem, void *with)
+{
+ void *prev;
+ DECLARE_MUTEX_LOCKED(mutex, *mem);
+
+ prev = *(void **)mem;
+ *mem = with;
+
+ MUTEX_UNLOCK(mutex);
+
+ return prev;
+}
+
+#endif /* USE_ATOMICS_GENERIC */
diff --git a/atomic/unix/mutex64.c b/atomic/unix/mutex64.c
new file mode 100644
index 0000000..9fc44af
--- /dev/null
+++ b/atomic/unix/mutex64.c
@@ -0,0 +1,178 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_arch_atomic.h"
+#include "apr_thread_mutex.h"
+
+#if defined(USE_ATOMICS_GENERIC) || defined (NEED_ATOMICS_GENERIC64)
+
+#include <stdlib.h>
+
+#if APR_HAS_THREADS
+# define DECLARE_MUTEX_LOCKED(name, mem) \
+ apr_thread_mutex_t *name = mutex_hash(mem)
+# define MUTEX_UNLOCK(name) \
+ do { \
+ if (apr_thread_mutex_unlock(name) != APR_SUCCESS) \
+ abort(); \
+ } while (0)
+#else
+# define DECLARE_MUTEX_LOCKED(name, mem)
+# define MUTEX_UNLOCK(name)
+# warning Be warned: using stubs for all atomic operations
+#endif
+
+#if APR_HAS_THREADS
+
+static apr_thread_mutex_t **hash_mutex;
+
+#define NUM_ATOMIC_HASH 7
+/* shift by 2 to get rid of alignment issues */
+#define ATOMIC_HASH(x) (unsigned int)(((unsigned long)(x)>>2)%(unsigned int)NUM_ATOMIC_HASH)
+
+static apr_status_t atomic_cleanup(void *data)
+{
+ if (hash_mutex == data)
+ hash_mutex = NULL;
+
+ return APR_SUCCESS;
+}
+
+apr_status_t apr__atomic_generic64_init(apr_pool_t *p)
+{
+ int i;
+ apr_status_t rv;
+
+ if (hash_mutex != NULL)
+ return APR_SUCCESS;
+
+ hash_mutex = apr_palloc(p, sizeof(apr_thread_mutex_t*) * NUM_ATOMIC_HASH);
+ apr_pool_cleanup_register(p, hash_mutex, atomic_cleanup,
+ apr_pool_cleanup_null);
+
+ for (i = 0; i < NUM_ATOMIC_HASH; i++) {
+ rv = apr_thread_mutex_create(&(hash_mutex[i]),
+ APR_THREAD_MUTEX_DEFAULT, p);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+static APR_INLINE apr_thread_mutex_t *mutex_hash(volatile apr_uint64_t *mem)
+{
+ apr_thread_mutex_t *mutex = hash_mutex[ATOMIC_HASH(mem)];
+
+ if (apr_thread_mutex_lock(mutex) != APR_SUCCESS) {
+ abort();
+ }
+
+ return mutex;
+}
+
+#else
+
+apr_status_t apr__atomic_generic64_init(apr_pool_t *p)
+{
+ return APR_SUCCESS;
+}
+
+#endif /* APR_HAS_THREADS */
+
+APR_DECLARE(apr_uint64_t) apr_atomic_read64(volatile apr_uint64_t *mem)
+{
+ return *mem;
+}
+
+APR_DECLARE(void) apr_atomic_set64(volatile apr_uint64_t *mem, apr_uint64_t val)
+{
+ DECLARE_MUTEX_LOCKED(mutex, mem);
+
+ *mem = val;
+
+ MUTEX_UNLOCK(mutex);
+}
+
+APR_DECLARE(apr_uint64_t) apr_atomic_add64(volatile apr_uint64_t *mem, apr_uint64_t val)
+{
+ apr_uint64_t old_value;
+ DECLARE_MUTEX_LOCKED(mutex, mem);
+
+ old_value = *mem;
+ *mem += val;
+
+ MUTEX_UNLOCK(mutex);
+
+ return old_value;
+}
+
+APR_DECLARE(void) apr_atomic_sub64(volatile apr_uint64_t *mem, apr_uint64_t val)
+{
+ DECLARE_MUTEX_LOCKED(mutex, mem);
+ *mem -= val;
+ MUTEX_UNLOCK(mutex);
+}
+
+APR_DECLARE(apr_uint64_t) apr_atomic_inc64(volatile apr_uint64_t *mem)
+{
+ return apr_atomic_add64(mem, 1);
+}
+
+APR_DECLARE(int) apr_atomic_dec64(volatile apr_uint64_t *mem)
+{
+ apr_uint64_t new;
+ DECLARE_MUTEX_LOCKED(mutex, mem);
+
+ (*mem)--;
+ new = *mem;
+
+ MUTEX_UNLOCK(mutex);
+
+ return new;
+}
+
+APR_DECLARE(apr_uint64_t) apr_atomic_cas64(volatile apr_uint64_t *mem, apr_uint64_t with,
+ apr_uint64_t cmp)
+{
+ apr_uint64_t prev;
+ DECLARE_MUTEX_LOCKED(mutex, mem);
+
+ prev = *mem;
+ if (prev == cmp) {
+ *mem = with;
+ }
+
+ MUTEX_UNLOCK(mutex);
+
+ return prev;
+}
+
+APR_DECLARE(apr_uint64_t) apr_atomic_xchg64(volatile apr_uint64_t *mem, apr_uint64_t val)
+{
+ apr_uint64_t prev;
+ DECLARE_MUTEX_LOCKED(mutex, mem);
+
+ prev = *mem;
+ *mem = val;
+
+ MUTEX_UNLOCK(mutex);
+
+ return prev;
+}
+
+#endif /* USE_ATOMICS_GENERIC64 */
diff --git a/atomic/unix/ppc.c b/atomic/unix/ppc.c
new file mode 100644
index 0000000..46554af
--- /dev/null
+++ b/atomic/unix/ppc.c
@@ -0,0 +1,242 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_arch_atomic.h"
+
+#ifdef USE_ATOMICS_PPC
+
+#ifdef PPC405_ERRATA
+# define PPC405_ERR77_SYNC " sync\n"
+#else
+# define PPC405_ERR77_SYNC
+#endif
+
+APR_DECLARE(apr_status_t) apr_atomic_init(apr_pool_t *p)
+{
+#if defined (NEED_ATOMICS_GENERIC64)
+ return apr__atomic_generic64_init(p);
+#else
+ return APR_SUCCESS;
+#endif
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_read32(volatile apr_uint32_t *mem)
+{
+ apr_uint32_t val;
+ asm volatile (" sync\n" /* full barrier */
+ " lwz %0,%1\n" /* load */
+ " cmpw 7,%0,%0\n" /* compare (always equal) */
+ " bne- 7,$+4\n" /* goto next in any case */
+ " isync" /* acquire barrier (bc+isync) */
+ : "=r"(val)
+ : "m"(*mem)
+ : "cc", "memory");
+ return val;
+}
+
+APR_DECLARE(void) apr_atomic_set32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ asm volatile (" sync\n" /* full barrier */
+ " stw %1,%0" /* store */
+ : "=m"(*mem)
+ : "r"(val)
+ : "memory");
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ apr_uint32_t prev, temp;
+
+ asm volatile (" sync\n" /* full barrier */
+ "1:\n" /* lost reservation */
+ " lwarx %0,0,%3\n" /* load and reserve */
+ " add %1,%0,%4\n" /* add val and prev */
+ PPC405_ERR77_SYNC /* ppc405 Erratum 77 */
+ " stwcx. %1,0,%3\n" /* store if still reserved */
+ " bne- 1b\n" /* loop if lost */
+ " isync\n" /* acquire barrier (bc+isync) */
+ : "=&r" (prev), "=&r" (temp), "=m" (*mem)
+ : "b" (mem), "r" (val)
+ : "cc", "memory");
+
+ return prev;
+}
+
+APR_DECLARE(void) apr_atomic_sub32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ apr_uint32_t temp;
+
+ asm volatile (" sync\n" /* full barrier */
+ "1:\n" /* lost reservation */
+ " lwarx %0,0,%2\n" /* load and reserve */
+ " subf %0,%3,%0\n" /* subtract val */
+ PPC405_ERR77_SYNC /* ppc405 Erratum 77 */
+ " stwcx. %0,0,%2\n" /* store new value */
+ " bne- 1b\n" /* loop if lost */
+ " isync\n" /* acquire barrier (bc+isync) */
+ : "=&r" (temp), "=m" (*mem)
+ : "b" (mem), "r" (val)
+ : "cc", "memory");
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_inc32(volatile apr_uint32_t *mem)
+{
+ apr_uint32_t prev;
+
+ asm volatile (" sync\n" /* full barrier */
+ "1:\n" /* lost reservation */
+ " lwarx %0,0,%2\n" /* load and reserve */
+ " addi %0,%0,1\n" /* add immediate */
+ PPC405_ERR77_SYNC /* ppc405 Erratum 77 */
+ " stwcx. %0,0,%2\n" /* store new value */
+ " bne- 1b\n" /* loop if lost */
+ " subi %0,%0,1\n" /* return old value */
+ " isync\n" /* acquire barrier (bc+isync) */
+ : "=&b" (prev), "=m" (*mem)
+ : "b" (mem), "m" (*mem)
+ : "cc", "memory");
+
+ return prev;
+}
+
+APR_DECLARE(int) apr_atomic_dec32(volatile apr_uint32_t *mem)
+{
+ apr_uint32_t prev;
+
+ asm volatile (" sync\n" /* full barrier */
+ "1:\n" /* lost reservation */
+ " lwarx %0,0,%2\n" /* load and reserve */
+ " subi %0,%0,1\n" /* subtract immediate */
+ PPC405_ERR77_SYNC /* ppc405 Erratum 77 */
+ " stwcx. %0,0,%2\n" /* store new value */
+ " bne- 1b\n" /* loop if lost */
+ " isync\n" /* acquire barrier (bc+isync) */
+ : "=&b" (prev), "=m" (*mem)
+ : "b" (mem), "m" (*mem)
+ : "cc", "memory");
+
+ return prev;
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_cas32(volatile apr_uint32_t *mem, apr_uint32_t with,
+ apr_uint32_t cmp)
+{
+ apr_uint32_t prev;
+
+ asm volatile (" sync\n" /* full barrier */
+ "1:\n" /* lost reservation */
+ " lwarx %0,0,%1\n" /* load and reserve */
+ " cmpw %0,%3\n" /* compare operands */
+ " bne- exit_%=\n" /* skip if not equal */
+ PPC405_ERR77_SYNC /* ppc405 Erratum 77 */
+ " stwcx. %2,0,%1\n" /* store new value */
+ " bne- 1b\n" /* loop if lost */
+ "exit_%=:\n" /* not equal */
+ " isync\n" /* acquire barrier (bc+isync) */
+ : "=&r" (prev)
+ : "b" (mem), "r" (with), "r" (cmp)
+ : "cc", "memory");
+
+ return prev;
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_xchg32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ apr_uint32_t prev;
+
+ asm volatile (" sync\n" /* full barrier */
+ "1:\n" /* lost reservation */
+ " lwarx %0,0,%1\n" /* load and reserve */
+ PPC405_ERR77_SYNC /* ppc405 Erratum 77 */
+ " stwcx. %2,0,%1\n" /* store new value */
+ " bne- 1b\n" /* loop if lost */
+ " isync\n" /* acquire barrier (bc+isync) */
+ : "=&r" (prev)
+ : "b" (mem), "r" (val)
+ : "cc", "memory");
+
+ return prev;
+}
+
+APR_DECLARE(void*) apr_atomic_casptr(volatile void **mem, void *with, const void *cmp)
+{
+ void *prev;
+#if APR_SIZEOF_VOIDP == 4
+ asm volatile (" sync\n" /* full barrier */
+ "1:\n" /* lost reservation */
+ " lwarx %0,0,%1\n" /* load and reserve */
+ " cmpw %0,%3\n" /* compare operands */
+ " bne- 2f\n" /* skip if not equal */
+ PPC405_ERR77_SYNC /* ppc405 Erratum 77 */
+ " stwcx. %2,0,%1\n" /* store new value */
+ " bne- 1b\n" /* loop if lost */
+ "2:\n" /* not equal */
+ " isync\n" /* acquire barrier (bc+isync) */
+ : "=&r" (prev)
+ : "b" (mem), "r" (with), "r" (cmp)
+ : "cc", "memory");
+#elif APR_SIZEOF_VOIDP == 8
+ asm volatile (" sync\n" /* full barrier */
+ "1:\n" /* lost reservation */
+ " ldarx %0,0,%1\n" /* load and reserve */
+ " cmpd %0,%3\n" /* compare operands */
+ " bne- 2f\n" /* skip if not equal */
+ PPC405_ERR77_SYNC /* ppc405 Erratum 77 */
+ " stdcx. %2,0,%1\n" /* store new value */
+ " bne- 1b\n" /* loop if lost */
+ "2:\n" /* not equal */
+ " isync\n" /* acquire barrier (bc+isync) */
+ : "=&r" (prev)
+ : "b" (mem), "r" (with), "r" (cmp)
+ : "cc", "memory");
+#else
+#error APR_SIZEOF_VOIDP value not supported
+#endif
+ return prev;
+}
+
+APR_DECLARE(void*) apr_atomic_xchgptr(volatile void **mem, void *with)
+{
+ void *prev;
+#if APR_SIZEOF_VOIDP == 4
+ asm volatile (" sync\n" /* full barrier */
+ "1:\n" /* lost reservation */
+ " lwarx %0,0,%1\n" /* load and reserve */
+ PPC405_ERR77_SYNC /* ppc405 Erratum 77 */
+ " stwcx. %2,0,%1\n" /* store new value */
+ " bne- 1b\n" /* loop if lost */
+ " isync\n" /* acquire barrier (bc+isync) */
+ : "=&r" (prev)
+ : "b" (mem), "r" (with)
+ : "cc", "memory");
+#elif APR_SIZEOF_VOIDP == 8
+ asm volatile (" sync\n" /* full barrier */
+ "1:\n" /* lost reservation */
+ " ldarx %0,0,%1\n" /* load and reserve */
+ PPC405_ERR77_SYNC /* ppc405 Erratum 77 */
+ " stdcx. %2,0,%1\n" /* store new value */
+ " bne- 1b\n" /* loop if lost */
+ " isync\n" /* acquire barrier (bc+isync) */
+ : "=&r" (prev)
+ : "b" (mem), "r" (with)
+ : "cc", "memory");
+#else
+#error APR_SIZEOF_VOIDP value not supported
+#endif
+ return prev;
+}
+
+#endif /* USE_ATOMICS_PPC */
diff --git a/atomic/unix/s390.c b/atomic/unix/s390.c
new file mode 100644
index 0000000..ae5d040
--- /dev/null
+++ b/atomic/unix/s390.c
@@ -0,0 +1,159 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_arch_atomic.h"
+
+#ifdef USE_ATOMICS_S390
+
+APR_DECLARE(apr_status_t) apr_atomic_init(apr_pool_t *p)
+{
+#if defined (NEED_ATOMICS_GENERIC64)
+ return apr__atomic_generic64_init(p);
+#else
+ return APR_SUCCESS;
+#endif
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_read32(volatile apr_uint32_t *mem)
+{
+ return *mem;
+}
+
+APR_DECLARE(void) apr_atomic_set32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ *mem = val;
+}
+
+static APR_INLINE apr_uint32_t atomic_add(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ apr_uint32_t prev = *mem, temp;
+
+ asm volatile ("loop_%=:\n"
+ " lr %1,%0\n"
+ " alr %1,%3\n"
+ " cs %0,%1,%2\n"
+ " jl loop_%=\n"
+ : "+d" (prev), "+d" (temp), "=Q" (*mem)
+ : "d" (val), "m" (*mem)
+ : "cc", "memory");
+
+ return prev;
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ return atomic_add(mem, val);
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_inc32(volatile apr_uint32_t *mem)
+{
+ return atomic_add(mem, 1);
+}
+
+static APR_INLINE apr_uint32_t atomic_sub(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ apr_uint32_t prev = *mem, temp;
+
+ asm volatile ("loop_%=:\n"
+ " lr %1,%0\n"
+ " slr %1,%3\n"
+ " cs %0,%1,%2\n"
+ " jl loop_%=\n"
+ : "+d" (prev), "+d" (temp), "=Q" (*mem)
+ : "d" (val), "m" (*mem)
+ : "cc", "memory");
+
+ return temp;
+}
+
+APR_DECLARE(void) apr_atomic_sub32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ atomic_sub(mem, val);
+}
+
+APR_DECLARE(int) apr_atomic_dec32(volatile apr_uint32_t *mem)
+{
+ return atomic_sub(mem, 1);
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_cas32(volatile apr_uint32_t *mem, apr_uint32_t with,
+ apr_uint32_t cmp)
+{
+ asm volatile (" cs %0,%2,%1\n"
+ : "+d" (cmp), "=Q" (*mem)
+ : "d" (with), "m" (*mem)
+ : "cc", "memory");
+
+ return cmp;
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_xchg32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ apr_uint32_t prev = *mem;
+
+ asm volatile ("loop_%=:\n"
+ " cs %0,%2,%1\n"
+ " jl loop_%=\n"
+ : "+d" (prev), "=Q" (*mem)
+ : "d" (val), "m" (*mem)
+ : "cc", "memory");
+
+ return prev;
+}
+
+APR_DECLARE(void*) apr_atomic_casptr(volatile void **mem, void *with, const void *cmp)
+{
+ void *prev = (void *) cmp;
+#if APR_SIZEOF_VOIDP == 4
+ asm volatile (" cs %0,%2,%1\n"
+ : "+d" (prev), "=Q" (*mem)
+ : "d" (with), "m" (*mem)
+ : "cc", "memory");
+#elif APR_SIZEOF_VOIDP == 8
+ asm volatile (" csg %0,%2,%1\n"
+ : "+d" (prev), "=Q" (*mem)
+ : "d" (with), "m" (*mem)
+ : "cc", "memory");
+#else
+#error APR_SIZEOF_VOIDP value not supported
+#endif
+ return prev;
+}
+
+APR_DECLARE(void*) apr_atomic_xchgptr(volatile void **mem, void *with)
+{
+ void *prev = (void *) *mem;
+#if APR_SIZEOF_VOIDP == 4
+ asm volatile ("loop_%=:\n"
+ " cs %0,%2,%1\n"
+ " jl loop_%=\n"
+ : "+d" (prev), "=Q" (*mem)
+ : "d" (with), "m" (*mem)
+ : "cc", "memory");
+#elif APR_SIZEOF_VOIDP == 8
+ asm volatile ("loop_%=:\n"
+ " csg %0,%2,%1\n"
+ " jl loop_%=\n"
+ : "+d" (prev), "=Q" (*mem)
+ : "d" (with), "m" (*mem)
+ : "cc", "memory");
+#else
+#error APR_SIZEOF_VOIDP value not supported
+#endif
+ return prev;
+}
+
+#endif /* USE_ATOMICS_S390 */
diff --git a/atomic/unix/solaris.c b/atomic/unix/solaris.c
new file mode 100644
index 0000000..9bc2de2
--- /dev/null
+++ b/atomic/unix/solaris.c
@@ -0,0 +1,83 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_arch_atomic.h"
+
+#ifdef USE_ATOMICS_SOLARIS
+
+#include <atomic.h>
+
+APR_DECLARE(apr_status_t) apr_atomic_init(apr_pool_t *p)
+{
+#if defined (NEED_ATOMICS_GENERIC64)
+ return apr__atomic_generic64_init(p);
+#else
+ return APR_SUCCESS;
+#endif
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_read32(volatile apr_uint32_t *mem)
+{
+ return *mem;
+}
+
+APR_DECLARE(void) apr_atomic_set32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ *mem = val;
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_add32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ return atomic_add_32_nv(mem, val) - val;
+}
+
+APR_DECLARE(void) apr_atomic_sub32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ atomic_add_32(mem, -val);
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_inc32(volatile apr_uint32_t *mem)
+{
+ return atomic_inc_32_nv(mem) - 1;
+}
+
+APR_DECLARE(int) apr_atomic_dec32(volatile apr_uint32_t *mem)
+{
+ return atomic_dec_32_nv(mem);
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_cas32(volatile apr_uint32_t *mem, apr_uint32_t with,
+ apr_uint32_t cmp)
+{
+ return atomic_cas_32(mem, cmp, with);
+}
+
+APR_DECLARE(apr_uint32_t) apr_atomic_xchg32(volatile apr_uint32_t *mem, apr_uint32_t val)
+{
+ return atomic_swap_32(mem, val);
+}
+
+APR_DECLARE(void*) apr_atomic_casptr(volatile void **mem, void *with, const void *cmp)
+{
+ return atomic_cas_ptr(mem, (void*) cmp, with);
+}
+
+APR_DECLARE(void*) apr_atomic_xchgptr(volatile void **mem, void *with)
+{
+ return atomic_swap_ptr(mem, with);
+}
+
+#endif /* USE_ATOMICS_SOLARIS */