Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/include/asm/atomic_lse.h
26481 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
* Based on arch/arm/include/asm/atomic.h
4
*
5
* Copyright (C) 1996 Russell King.
6
* Copyright (C) 2002 Deep Blue Solutions Ltd.
7
* Copyright (C) 2012 ARM Ltd.
8
*/
9
10
#ifndef __ASM_ATOMIC_LSE_H
11
#define __ASM_ATOMIC_LSE_H
12
13
#define ATOMIC_OP(op, asm_op) \
14
static __always_inline void \
15
__lse_atomic_##op(int i, atomic_t *v) \
16
{ \
17
asm volatile( \
18
__LSE_PREAMBLE \
19
" " #asm_op " %w[i], %[v]\n" \
20
: [v] "+Q" (v->counter) \
21
: [i] "r" (i)); \
22
}
23
24
ATOMIC_OP(andnot, stclr)
25
ATOMIC_OP(or, stset)
26
ATOMIC_OP(xor, steor)
27
ATOMIC_OP(add, stadd)
28
29
static __always_inline void __lse_atomic_sub(int i, atomic_t *v)
30
{
31
__lse_atomic_add(-i, v);
32
}
33
34
#undef ATOMIC_OP
35
36
#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
37
static __always_inline int \
38
__lse_atomic_fetch_##op##name(int i, atomic_t *v) \
39
{ \
40
int old; \
41
\
42
asm volatile( \
43
__LSE_PREAMBLE \
44
" " #asm_op #mb " %w[i], %w[old], %[v]" \
45
: [v] "+Q" (v->counter), \
46
[old] "=r" (old) \
47
: [i] "r" (i) \
48
: cl); \
49
\
50
return old; \
51
}
52
53
#define ATOMIC_FETCH_OPS(op, asm_op) \
54
ATOMIC_FETCH_OP(_relaxed, , op, asm_op) \
55
ATOMIC_FETCH_OP(_acquire, a, op, asm_op, "memory") \
56
ATOMIC_FETCH_OP(_release, l, op, asm_op, "memory") \
57
ATOMIC_FETCH_OP( , al, op, asm_op, "memory")
58
59
ATOMIC_FETCH_OPS(andnot, ldclr)
60
ATOMIC_FETCH_OPS(or, ldset)
61
ATOMIC_FETCH_OPS(xor, ldeor)
62
ATOMIC_FETCH_OPS(add, ldadd)
63
64
#undef ATOMIC_FETCH_OP
65
#undef ATOMIC_FETCH_OPS
66
67
#define ATOMIC_FETCH_OP_SUB(name) \
68
static __always_inline int \
69
__lse_atomic_fetch_sub##name(int i, atomic_t *v) \
70
{ \
71
return __lse_atomic_fetch_add##name(-i, v); \
72
}
73
74
ATOMIC_FETCH_OP_SUB(_relaxed)
75
ATOMIC_FETCH_OP_SUB(_acquire)
76
ATOMIC_FETCH_OP_SUB(_release)
77
ATOMIC_FETCH_OP_SUB( )
78
79
#undef ATOMIC_FETCH_OP_SUB
80
81
#define ATOMIC_OP_ADD_SUB_RETURN(name) \
82
static __always_inline int \
83
__lse_atomic_add_return##name(int i, atomic_t *v) \
84
{ \
85
return __lse_atomic_fetch_add##name(i, v) + i; \
86
} \
87
\
88
static __always_inline int \
89
__lse_atomic_sub_return##name(int i, atomic_t *v) \
90
{ \
91
return __lse_atomic_fetch_sub(i, v) - i; \
92
}
93
94
ATOMIC_OP_ADD_SUB_RETURN(_relaxed)
95
ATOMIC_OP_ADD_SUB_RETURN(_acquire)
96
ATOMIC_OP_ADD_SUB_RETURN(_release)
97
ATOMIC_OP_ADD_SUB_RETURN( )
98
99
#undef ATOMIC_OP_ADD_SUB_RETURN
100
101
static __always_inline void __lse_atomic_and(int i, atomic_t *v)
102
{
103
return __lse_atomic_andnot(~i, v);
104
}
105
106
#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
107
static __always_inline int \
108
__lse_atomic_fetch_and##name(int i, atomic_t *v) \
109
{ \
110
return __lse_atomic_fetch_andnot##name(~i, v); \
111
}
112
113
ATOMIC_FETCH_OP_AND(_relaxed, )
114
ATOMIC_FETCH_OP_AND(_acquire, a, "memory")
115
ATOMIC_FETCH_OP_AND(_release, l, "memory")
116
ATOMIC_FETCH_OP_AND( , al, "memory")
117
118
#undef ATOMIC_FETCH_OP_AND
119
120
#define ATOMIC64_OP(op, asm_op) \
121
static __always_inline void \
122
__lse_atomic64_##op(s64 i, atomic64_t *v) \
123
{ \
124
asm volatile( \
125
__LSE_PREAMBLE \
126
" " #asm_op " %[i], %[v]\n" \
127
: [v] "+Q" (v->counter) \
128
: [i] "r" (i)); \
129
}
130
131
ATOMIC64_OP(andnot, stclr)
132
ATOMIC64_OP(or, stset)
133
ATOMIC64_OP(xor, steor)
134
ATOMIC64_OP(add, stadd)
135
136
static __always_inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
137
{
138
__lse_atomic64_add(-i, v);
139
}
140
141
#undef ATOMIC64_OP
142
143
#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
144
static __always_inline long \
145
__lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
146
{ \
147
s64 old; \
148
\
149
asm volatile( \
150
__LSE_PREAMBLE \
151
" " #asm_op #mb " %[i], %[old], %[v]" \
152
: [v] "+Q" (v->counter), \
153
[old] "=r" (old) \
154
: [i] "r" (i) \
155
: cl); \
156
\
157
return old; \
158
}
159
160
#define ATOMIC64_FETCH_OPS(op, asm_op) \
161
ATOMIC64_FETCH_OP(_relaxed, , op, asm_op) \
162
ATOMIC64_FETCH_OP(_acquire, a, op, asm_op, "memory") \
163
ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \
164
ATOMIC64_FETCH_OP( , al, op, asm_op, "memory")
165
166
ATOMIC64_FETCH_OPS(andnot, ldclr)
167
ATOMIC64_FETCH_OPS(or, ldset)
168
ATOMIC64_FETCH_OPS(xor, ldeor)
169
ATOMIC64_FETCH_OPS(add, ldadd)
170
171
#undef ATOMIC64_FETCH_OP
172
#undef ATOMIC64_FETCH_OPS
173
174
#define ATOMIC64_FETCH_OP_SUB(name) \
175
static __always_inline long \
176
__lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
177
{ \
178
return __lse_atomic64_fetch_add##name(-i, v); \
179
}
180
181
ATOMIC64_FETCH_OP_SUB(_relaxed)
182
ATOMIC64_FETCH_OP_SUB(_acquire)
183
ATOMIC64_FETCH_OP_SUB(_release)
184
ATOMIC64_FETCH_OP_SUB( )
185
186
#undef ATOMIC64_FETCH_OP_SUB
187
188
#define ATOMIC64_OP_ADD_SUB_RETURN(name) \
189
static __always_inline long \
190
__lse_atomic64_add_return##name(s64 i, atomic64_t *v) \
191
{ \
192
return __lse_atomic64_fetch_add##name(i, v) + i; \
193
} \
194
\
195
static __always_inline long \
196
__lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
197
{ \
198
return __lse_atomic64_fetch_sub##name(i, v) - i; \
199
}
200
201
ATOMIC64_OP_ADD_SUB_RETURN(_relaxed)
202
ATOMIC64_OP_ADD_SUB_RETURN(_acquire)
203
ATOMIC64_OP_ADD_SUB_RETURN(_release)
204
ATOMIC64_OP_ADD_SUB_RETURN( )
205
206
#undef ATOMIC64_OP_ADD_SUB_RETURN
207
208
static __always_inline void __lse_atomic64_and(s64 i, atomic64_t *v)
209
{
210
return __lse_atomic64_andnot(~i, v);
211
}
212
213
#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
214
static __always_inline long \
215
__lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
216
{ \
217
return __lse_atomic64_fetch_andnot##name(~i, v); \
218
}
219
220
ATOMIC64_FETCH_OP_AND(_relaxed, )
221
ATOMIC64_FETCH_OP_AND(_acquire, a, "memory")
222
ATOMIC64_FETCH_OP_AND(_release, l, "memory")
223
ATOMIC64_FETCH_OP_AND( , al, "memory")
224
225
#undef ATOMIC64_FETCH_OP_AND
226
227
static __always_inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
228
{
229
unsigned long tmp;
230
231
asm volatile(
232
__LSE_PREAMBLE
233
"1: ldr %x[tmp], %[v]\n"
234
" subs %[ret], %x[tmp], #1\n"
235
" b.lt 2f\n"
236
" casal %x[tmp], %[ret], %[v]\n"
237
" sub %x[tmp], %x[tmp], #1\n"
238
" sub %x[tmp], %x[tmp], %[ret]\n"
239
" cbnz %x[tmp], 1b\n"
240
"2:"
241
: [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)
242
:
243
: "cc", "memory");
244
245
return (long)v;
246
}
247
248
#define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \
249
static __always_inline u##sz \
250
__lse__cmpxchg_case_##name##sz(volatile void *ptr, \
251
u##sz old, \
252
u##sz new) \
253
{ \
254
asm volatile( \
255
__LSE_PREAMBLE \
256
" cas" #mb #sfx " %" #w "[old], %" #w "[new], %[v]\n" \
257
: [v] "+Q" (*(u##sz *)ptr), \
258
[old] "+r" (old) \
259
: [new] "rZ" (new) \
260
: cl); \
261
\
262
return old; \
263
}
264
265
__CMPXCHG_CASE(w, b, , 8, )
266
__CMPXCHG_CASE(w, h, , 16, )
267
__CMPXCHG_CASE(w, , , 32, )
268
__CMPXCHG_CASE(x, , , 64, )
269
__CMPXCHG_CASE(w, b, acq_, 8, a, "memory")
270
__CMPXCHG_CASE(w, h, acq_, 16, a, "memory")
271
__CMPXCHG_CASE(w, , acq_, 32, a, "memory")
272
__CMPXCHG_CASE(x, , acq_, 64, a, "memory")
273
__CMPXCHG_CASE(w, b, rel_, 8, l, "memory")
274
__CMPXCHG_CASE(w, h, rel_, 16, l, "memory")
275
__CMPXCHG_CASE(w, , rel_, 32, l, "memory")
276
__CMPXCHG_CASE(x, , rel_, 64, l, "memory")
277
__CMPXCHG_CASE(w, b, mb_, 8, al, "memory")
278
__CMPXCHG_CASE(w, h, mb_, 16, al, "memory")
279
__CMPXCHG_CASE(w, , mb_, 32, al, "memory")
280
__CMPXCHG_CASE(x, , mb_, 64, al, "memory")
281
282
#undef __CMPXCHG_CASE
283
284
#define __CMPXCHG128(name, mb, cl...) \
285
static __always_inline u128 \
286
__lse__cmpxchg128##name(volatile u128 *ptr, u128 old, u128 new) \
287
{ \
288
union __u128_halves r, o = { .full = (old) }, \
289
n = { .full = (new) }; \
290
register unsigned long x0 asm ("x0") = o.low; \
291
register unsigned long x1 asm ("x1") = o.high; \
292
register unsigned long x2 asm ("x2") = n.low; \
293
register unsigned long x3 asm ("x3") = n.high; \
294
register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
295
\
296
asm volatile( \
297
__LSE_PREAMBLE \
298
" casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
299
: [old1] "+&r" (x0), [old2] "+&r" (x1), \
300
[v] "+Q" (*(u128 *)ptr) \
301
: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
302
[oldval1] "r" (o.low), [oldval2] "r" (o.high) \
303
: cl); \
304
\
305
r.low = x0; r.high = x1; \
306
\
307
return r.full; \
308
}
309
310
__CMPXCHG128( , )
311
__CMPXCHG128(_mb, al, "memory")
312
313
#undef __CMPXCHG128
314
315
#endif /* __ASM_ATOMIC_LSE_H */
316
317