1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_CSKY_ATOMIC_H
#define __ASM_CSKY_ATOMIC_H
#ifdef CONFIG_SMP
#include <asm-generic/atomic64.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#define __atomic_acquire_fence() __bar_brarw()
#define __atomic_release_fence() __bar_brwaw()
static __always_inline int arch_atomic_read(const atomic_t *v)
{
return READ_ONCE(v->counter);
}
static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
WRITE_ONCE(v->counter, i);
}
#define ATOMIC_OP(op) \
static __always_inline \
void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
__asm__ __volatile__ ( \
"1: ldex.w %0, (%2) \n" \
" " #op " %0, %1 \n" \
" stex.w %0, (%2) \n" \
" bez %0, 1b \n" \
: "=&r" (tmp) \
: "r" (i), "r" (&v->counter) \
: "memory"); \
}
ATOMIC_OP(add)
ATOMIC_OP(sub)
ATOMIC_OP(and)
ATOMIC_OP( or)
ATOMIC_OP(xor)
#undef ATOMIC_OP
#define ATOMIC_FETCH_OP(op) \
static __always_inline \
int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
register int ret, tmp; \
__asm__ __volatile__ ( \
"1: ldex.w %0, (%3) \n" \
" mov %1, %0 \n" \
" " #op " %0, %2 \n" \
" stex.w %0, (%3) \n" \
" bez %0, 1b \n" \
: "=&r" (tmp), "=&r" (ret) \
: "r" (i), "r"(&v->counter) \
: "memory"); \
return ret; \
}
#define ATOMIC_OP_RETURN(op, c_op) \
static __always_inline \
int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
return arch_atomic_fetch_##op##_relaxed(i, v) c_op i; \
}
#define ATOMIC_OPS(op, c_op) \
ATOMIC_FETCH_OP(op) \
ATOMIC_OP_RETURN(op, c_op)
ATOMIC_OPS(add, +)
ATOMIC_OPS(sub, -)
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#define ATOMIC_OPS(op) \
ATOMIC_FETCH_OP(op)
ATOMIC_OPS(and)
ATOMIC_OPS( or)
ATOMIC_OPS(xor)
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
static __always_inline int
arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int prev, tmp;
__asm__ __volatile__ (
RELEASE_FENCE
"1: ldex.w %0, (%3) \n"
" cmpne %0, %4 \n"
" bf 2f \n"
" mov %1, %0 \n"
" add %1, %2 \n"
" stex.w %1, (%3) \n"
" bez %1, 1b \n"
FULL_FENCE
"2:\n"
: "=&r" (prev), "=&r" (tmp)
: "r" (a), "r" (&v->counter), "r" (u)
: "memory");
return prev;
}
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
static __always_inline bool
arch_atomic_inc_unless_negative(atomic_t *v)
{
int rc, tmp;
__asm__ __volatile__ (
RELEASE_FENCE
"1: ldex.w %0, (%2) \n"
" movi %1, 0 \n"
" blz %0, 2f \n"
" movi %1, 1 \n"
" addi %0, 1 \n"
" stex.w %0, (%2) \n"
" bez %0, 1b \n"
FULL_FENCE
"2:\n"
: "=&r" (tmp), "=&r" (rc)
: "r" (&v->counter)
: "memory");
return tmp ? true : false;
}
#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
static __always_inline bool
arch_atomic_dec_unless_positive(atomic_t *v)
{
int rc, tmp;
__asm__ __volatile__ (
RELEASE_FENCE
"1: ldex.w %0, (%2) \n"
" movi %1, 0 \n"
" bhz %0, 2f \n"
" movi %1, 1 \n"
" subi %0, 1 \n"
" stex.w %0, (%2) \n"
" bez %0, 1b \n"
FULL_FENCE
"2:\n"
: "=&r" (tmp), "=&r" (rc)
: "r" (&v->counter)
: "memory");
return tmp ? true : false;
}
#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
static __always_inline int
arch_atomic_dec_if_positive(atomic_t *v)
{
int dec, tmp;
__asm__ __volatile__ (
RELEASE_FENCE
"1: ldex.w %0, (%2) \n"
" subi %1, %0, 1 \n"
" blz %1, 2f \n"
" stex.w %1, (%2) \n"
" bez %1, 1b \n"
FULL_FENCE
"2:\n"
: "=&r" (dec), "=&r" (tmp)
: "r" (&v->counter)
: "memory");
return dec - 1;
}
#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
#define ATOMIC_OP() \
static __always_inline \
int arch_atomic_xchg_relaxed(atomic_t *v, int n) \
{ \
return __xchg_relaxed(n, &(v->counter), 4); \
} \
static __always_inline \
int arch_atomic_cmpxchg_relaxed(atomic_t *v, int o, int n) \
{ \
return __cmpxchg_relaxed(&(v->counter), o, n, 4); \
} \
static __always_inline \
int arch_atomic_cmpxchg_acquire(atomic_t *v, int o, int n) \
{ \
return __cmpxchg_acquire(&(v->counter), o, n, 4); \
} \
static __always_inline \
int arch_atomic_cmpxchg(atomic_t *v, int o, int n) \
{ \
return __cmpxchg(&(v->counter), o, n, 4); \
}
#define ATOMIC_OPS() \
ATOMIC_OP()
ATOMIC_OPS()
#define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed
#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
#define arch_atomic_cmpxchg arch_atomic_cmpxchg
#undef ATOMIC_OPS
#undef ATOMIC_OP
#else
#include <asm-generic/atomic.h>
#endif
#endif /* __ASM_CSKY_ATOMIC_H */
|