1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
|
/*-------------------------------------------------------------------------
*
* atomics.c
* Non-Inline parts of the atomics implementation
*
* Portions Copyright (c) 2013-2020, PostgreSQL Global Development Group
*
*
* IDENTIFICATION
* src/backend/port/atomics.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "miscadmin.h"
#include "port/atomics.h"
#include "storage/spin.h"
#ifdef PG_HAVE_MEMORY_BARRIER_EMULATION
#ifdef WIN32
#error "barriers are required (and provided) on WIN32 platforms"
#endif
#include <signal.h>
#endif
#ifdef PG_HAVE_MEMORY_BARRIER_EMULATION
void
pg_spinlock_barrier(void)
{
/*
* NB: we have to be reentrant here, some barriers are placed in signal
* handlers.
*
* We use kill(0) for the fallback barrier as we assume that kernels on
* systems old enough to require fallback barrier support will include an
* appropriate barrier while checking the existence of the postmaster pid.
*/
(void) kill(PostmasterPid, 0);
}
#endif
#ifdef PG_HAVE_COMPILER_BARRIER_EMULATION
void
pg_extern_compiler_barrier(void)
{
/* do nothing */
}
#endif
#ifdef PG_HAVE_ATOMIC_FLAG_SIMULATION
void
pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
{
StaticAssertStmt(sizeof(ptr->sema) >= sizeof(slock_t),
"size mismatch of atomic_flag vs slock_t");
#ifndef HAVE_SPINLOCKS
/*
* NB: If we're using semaphore based TAS emulation, be careful to use a
* separate set of semaphores. Otherwise we'd get in trouble if an atomic
* var would be manipulated while spinlock is held.
*/
s_init_lock_sema((slock_t *) &ptr->sema, true);
#else
SpinLockInit((slock_t *) &ptr->sema);
#endif
ptr->value = false;
}
bool
pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
{
uint32 oldval;
SpinLockAcquire((slock_t *) &ptr->sema);
oldval = ptr->value;
ptr->value = true;
SpinLockRelease((slock_t *) &ptr->sema);
return oldval == 0;
}
void
pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
{
SpinLockAcquire((slock_t *) &ptr->sema);
ptr->value = false;
SpinLockRelease((slock_t *) &ptr->sema);
}
bool
pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
{
return ptr->value == 0;
}
#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
#ifdef PG_HAVE_ATOMIC_U32_SIMULATION
void
pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
{
StaticAssertStmt(sizeof(ptr->sema) >= sizeof(slock_t),
"size mismatch of atomic_uint32 vs slock_t");
/*
* If we're using semaphore based atomic flags, be careful about nested
* usage of atomics while a spinlock is held.
*/
#ifndef HAVE_SPINLOCKS
s_init_lock_sema((slock_t *) &ptr->sema, true);
#else
SpinLockInit((slock_t *) &ptr->sema);
#endif
ptr->value = val_;
}
void
pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
{
/*
* One might think that an unlocked write doesn't need to acquire the
* spinlock, but one would be wrong. Even an unlocked write has to cause a
* concurrent pg_atomic_compare_exchange_u32() (et al) to fail.
*/
SpinLockAcquire((slock_t *) &ptr->sema);
ptr->value = val;
SpinLockRelease((slock_t *) &ptr->sema);
}
bool
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval)
{
bool ret;
/*
* Do atomic op under a spinlock. It might look like we could just skip
* the cmpxchg if the lock isn't available, but that'd just emulate a
* 'weak' compare and swap. I.e. one that allows spurious failures. Since
* several algorithms rely on a strong variant and that is efficiently
* implementable on most major architectures let's emulate it here as
* well.
*/
SpinLockAcquire((slock_t *) &ptr->sema);
/* perform compare/exchange logic */
ret = ptr->value == *expected;
*expected = ptr->value;
if (ret)
ptr->value = newval;
/* and release lock */
SpinLockRelease((slock_t *) &ptr->sema);
return ret;
}
uint32
pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
{
uint32 oldval;
SpinLockAcquire((slock_t *) &ptr->sema);
oldval = ptr->value;
ptr->value += add_;
SpinLockRelease((slock_t *) &ptr->sema);
return oldval;
}
#endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
#ifdef PG_HAVE_ATOMIC_U64_SIMULATION
void
pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
{
StaticAssertStmt(sizeof(ptr->sema) >= sizeof(slock_t),
"size mismatch of atomic_uint64 vs slock_t");
/*
* If we're using semaphore based atomic flags, be careful about nested
* usage of atomics while a spinlock is held.
*/
#ifndef HAVE_SPINLOCKS
s_init_lock_sema((slock_t *) &ptr->sema, true);
#else
SpinLockInit((slock_t *) &ptr->sema);
#endif
ptr->value = val_;
}
bool
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval)
{
bool ret;
/*
* Do atomic op under a spinlock. It might look like we could just skip
* the cmpxchg if the lock isn't available, but that'd just emulate a
* 'weak' compare and swap. I.e. one that allows spurious failures. Since
* several algorithms rely on a strong variant and that is efficiently
* implementable on most major architectures let's emulate it here as
* well.
*/
SpinLockAcquire((slock_t *) &ptr->sema);
/* perform compare/exchange logic */
ret = ptr->value == *expected;
*expected = ptr->value;
if (ret)
ptr->value = newval;
/* and release lock */
SpinLockRelease((slock_t *) &ptr->sema);
return ret;
}
uint64
pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
{
uint64 oldval;
SpinLockAcquire((slock_t *) &ptr->sema);
oldval = ptr->value;
ptr->value += add_;
SpinLockRelease((slock_t *) &ptr->sema);
return oldval;
}
#endif /* PG_HAVE_ATOMIC_U64_SIMULATION */
|