summaryrefslogtreecommitdiffstats
path: root/src/include/port/atomics/fallback.h
blob: b344aae955e49eb5638a40ba05f859ec391bada4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
/*-------------------------------------------------------------------------
 *
 * fallback.h
 *    Fallback for platforms without spinlock and/or atomics support. Slower
 *    than native atomics support, but not unusably slow.
 *
 * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
 * Portions Copyright (c) 1994, Regents of the University of California
 *
 * src/include/port/atomics/fallback.h
 *
 *-------------------------------------------------------------------------
 */

/* intentionally no include guards, should only be included by atomics.h */
#ifndef INSIDE_ATOMICS_H
#	error "should be included via atomics.h"
#endif

#ifndef pg_memory_barrier_impl
/*
 * If we have no memory barrier implementation for this architecture, we
 * fall back to acquiring and releasing a spinlock.  This might, in turn,
 * fall back to the semaphore-based spinlock implementation, which will be
 * amazingly slow.
 *
 * It's not self-evident that every possible legal implementation of a
 * spinlock acquire-and-release would be equivalent to a full memory barrier.
 * For example, I'm not sure that Itanium's acq and rel add up to a full
 * fence.  But all of our actual implementations seem OK in this regard.
 */
#define PG_HAVE_MEMORY_BARRIER_EMULATION

extern void pg_spinlock_barrier(void);
#define pg_memory_barrier_impl pg_spinlock_barrier
#endif

#ifndef pg_compiler_barrier_impl
/*
 * If the compiler/arch combination does not provide compiler barriers,
 * provide a fallback.  The fallback simply consists of a function call into
 * an externally defined function.  That should guarantee compiler barrier
 * semantics except for compilers that do inter translation unit/global
 * optimization - those better provide an actual compiler barrier.
 *
 * A native compiler barrier for sure is a lot faster than this...
 */
#define PG_HAVE_COMPILER_BARRIER_EMULATION
extern void pg_extern_compiler_barrier(void);
#define pg_compiler_barrier_impl pg_extern_compiler_barrier
#endif


/*
 * If we have atomics implementation for this platform, fall back to providing
 * the atomics API using a spinlock to protect the internal state. Possibly
 * the spinlock implementation uses semaphores internally...
 *
 * We have to be a bit careful here, as it's not guaranteed that atomic
 * variables are mapped to the same address in every process (e.g. dynamic
 * shared memory segments). We can't just hash the address and use that to map
 * to a spinlock. Instead assign a spinlock on initialization of the atomic
 * variable.
 */
#if !defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) && !defined(PG_HAVE_ATOMIC_U32_SUPPORT)

#define PG_HAVE_ATOMIC_FLAG_SIMULATION
#define PG_HAVE_ATOMIC_FLAG_SUPPORT

typedef struct pg_atomic_flag
{
	/*
	 * To avoid circular includes we can't use s_lock as a type here. Instead
	 * just reserve enough space for all spinlock types. Some platforms would
	 * be content with just one byte instead of 4, but that's not too much
	 * waste.
	 */
#if defined(__hppa) || defined(__hppa__)	/* HP PA-RISC, GCC and HP compilers */
	int			sema[4];
#else
	int			sema;
#endif
	volatile bool value;
} pg_atomic_flag;

#endif /* PG_HAVE_ATOMIC_FLAG_SUPPORT */

#if !defined(PG_HAVE_ATOMIC_U32_SUPPORT)

#define PG_HAVE_ATOMIC_U32_SIMULATION

#define PG_HAVE_ATOMIC_U32_SUPPORT
typedef struct pg_atomic_uint32
{
	/* Check pg_atomic_flag's definition above for an explanation */
#if defined(__hppa) || defined(__hppa__)	/* HP PA-RISC, GCC and HP compilers */
	int			sema[4];
#else
	int			sema;
#endif
	volatile uint32 value;
} pg_atomic_uint32;

#endif /* PG_HAVE_ATOMIC_U32_SUPPORT */

#if !defined(PG_HAVE_ATOMIC_U64_SUPPORT)

#define PG_HAVE_ATOMIC_U64_SIMULATION

#define PG_HAVE_ATOMIC_U64_SUPPORT
typedef struct pg_atomic_uint64
{
	/* Check pg_atomic_flag's definition above for an explanation */
#if defined(__hppa) || defined(__hppa__)	/* HP PA-RISC, GCC and HP compilers */
	int			sema[4];
#else
	int			sema;
#endif
	volatile uint64 value;
} pg_atomic_uint64;

#endif /* PG_HAVE_ATOMIC_U64_SUPPORT */

#ifdef PG_HAVE_ATOMIC_FLAG_SIMULATION

#define PG_HAVE_ATOMIC_INIT_FLAG
extern void pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr);

#define PG_HAVE_ATOMIC_TEST_SET_FLAG
extern bool pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr);

#define PG_HAVE_ATOMIC_CLEAR_FLAG
extern void pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr);

#define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
extern bool pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr);

#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */

#ifdef PG_HAVE_ATOMIC_U32_SIMULATION

#define PG_HAVE_ATOMIC_INIT_U32
extern void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_);

#define PG_HAVE_ATOMIC_WRITE_U32
extern void pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val);

#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
extern bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
												uint32 *expected, uint32 newval);

#define PG_HAVE_ATOMIC_FETCH_ADD_U32
extern uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_);

#endif /* PG_HAVE_ATOMIC_U32_SIMULATION */


#ifdef PG_HAVE_ATOMIC_U64_SIMULATION

#define PG_HAVE_ATOMIC_INIT_U64
extern void pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_);

#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
extern bool pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
												uint64 *expected, uint64 newval);

#define PG_HAVE_ATOMIC_FETCH_ADD_U64
extern uint64 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_);

#endif /* PG_HAVE_ATOMIC_U64_SIMULATION */