summaryrefslogtreecommitdiffstats
path: root/src/include/port/atomics/generic-acc.h
blob: 842b2dec6ed5870d5f8f21582c1f07a6be4af20d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
/*-------------------------------------------------------------------------
 *
 * generic-acc.h
 *	  Atomic operations support when using HPs acc on HPUX
 *
 * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
 * Portions Copyright (c) 1994, Regents of the University of California
 *
 * NOTES:
 *
 * Documentation:
 * * inline assembly for Itanium-based HP-UX:
 *   http://h21007.www2.hp.com/portal/download/files/unprot/Itanium/inline_assem_ERS.pdf
 * * Implementing Spinlocks on the Intel (R) Itanium (R) Architecture and PA-RISC
 *   http://h21007.www2.hp.com/portal/download/files/unprot/itanium/spinlocks.pdf
 *
 * Itanium only supports a small set of numbers (6, -8, -4, -1, 1, 4, 8, 16)
 * for atomic add/sub, so we just implement everything but compare_exchange
 * via the compare_exchange fallbacks in atomics/generic.h.
 *
 * src/include/port/atomics/generic-acc.h
 *
 * -------------------------------------------------------------------------
 */

#include <machine/sys/inline.h>

#define pg_compiler_barrier_impl()	_Asm_sched_fence()

#if defined(HAVE_ATOMICS)

/* IA64 always has 32/64 bit atomics */

#define PG_HAVE_ATOMIC_U32_SUPPORT
typedef struct pg_atomic_uint32
{
	volatile uint32 value;
} pg_atomic_uint32;

#define PG_HAVE_ATOMIC_U64_SUPPORT
typedef struct pg_atomic_uint64
{
	/*
	 * Alignment is guaranteed to be 64bit. Search for "Well-behaved
	 * application restrictions" => "Data alignment and data sharing" on HP's
	 * website. Unfortunately the URL doesn't seem to stable enough to
	 * include.
	 */
	volatile uint64 value;
} pg_atomic_uint64;


#define MINOR_FENCE (_Asm_fence) (_UP_CALL_FENCE | _UP_SYS_FENCE | \
								 _DOWN_CALL_FENCE | _DOWN_SYS_FENCE )

#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
static inline bool
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
									uint32 *expected, uint32 newval)
{
	bool	ret;
	uint32	current;

	_Asm_mov_to_ar(_AREG_CCV, *expected, MINOR_FENCE);
	/*
	 * We want a barrier, not just release/acquire semantics.
	 */
	_Asm_mf();
	/*
	 * Notes:
	 * _DOWN_MEM_FENCE | _UP_MEM_FENCE prevents reordering by the compiler
	 */
	current =  _Asm_cmpxchg(_SZ_W, /* word */
							_SEM_REL,
							&ptr->value,
							newval, _LDHINT_NONE,
							_DOWN_MEM_FENCE | _UP_MEM_FENCE);
	ret = current == *expected;
	*expected = current;
	return ret;
}


#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
static inline bool
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
									uint64 *expected, uint64 newval)
{
	bool	ret;
	uint64	current;

	_Asm_mov_to_ar(_AREG_CCV, *expected, MINOR_FENCE);
	_Asm_mf();
	current =  _Asm_cmpxchg(_SZ_D, /* doubleword */
							_SEM_REL,
							&ptr->value,
							newval, _LDHINT_NONE,
							_DOWN_MEM_FENCE | _UP_MEM_FENCE);
	ret = current == *expected;
	*expected = current;
	return ret;
}

#undef MINOR_FENCE

#endif /* defined(HAVE_ATOMICS) */