Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/loongarch/include/asm/atomic.h
26488 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
* Atomic operations.
4
*
5
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6
*/
7
#ifndef _ASM_ATOMIC_H
8
#define _ASM_ATOMIC_H
9
10
#include <linux/types.h>
11
#include <asm/barrier.h>
12
#include <asm/cmpxchg.h>
13
14
#if __SIZEOF_LONG__ == 4
15
#define __LL "ll.w "
16
#define __SC "sc.w "
17
#define __AMADD "amadd.w "
18
#define __AMOR "amor.w "
19
#define __AMAND_DB "amand_db.w "
20
#define __AMOR_DB "amor_db.w "
21
#define __AMXOR_DB "amxor_db.w "
22
#elif __SIZEOF_LONG__ == 8
23
#define __LL "ll.d "
24
#define __SC "sc.d "
25
#define __AMADD "amadd.d "
26
#define __AMOR "amor.d "
27
#define __AMAND_DB "amand_db.d "
28
#define __AMOR_DB "amor_db.d "
29
#define __AMXOR_DB "amxor_db.d "
30
#endif
31
32
#define ATOMIC_INIT(i) { (i) }
33
34
#define arch_atomic_read(v) READ_ONCE((v)->counter)
35
#define arch_atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
36
37
#define ATOMIC_OP(op, I, asm_op) \
38
static inline void arch_atomic_##op(int i, atomic_t *v) \
39
{ \
40
__asm__ __volatile__( \
41
"am"#asm_op".w" " $zero, %1, %0 \n" \
42
: "+ZB" (v->counter) \
43
: "r" (I) \
44
: "memory"); \
45
}
46
47
#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
48
static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v) \
49
{ \
50
int result; \
51
\
52
__asm__ __volatile__( \
53
"am"#asm_op#mb".w" " %1, %2, %0 \n" \
54
: "+ZB" (v->counter), "=&r" (result) \
55
: "r" (I) \
56
: "memory"); \
57
\
58
return result c_op I; \
59
}
60
61
#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \
62
static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v) \
63
{ \
64
int result; \
65
\
66
__asm__ __volatile__( \
67
"am"#asm_op#mb".w" " %1, %2, %0 \n" \
68
: "+ZB" (v->counter), "=&r" (result) \
69
: "r" (I) \
70
: "memory"); \
71
\
72
return result; \
73
}
74
75
#define ATOMIC_OPS(op, I, asm_op, c_op) \
76
ATOMIC_OP(op, I, asm_op) \
77
ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \
78
ATOMIC_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
79
ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
80
ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
81
82
ATOMIC_OPS(add, i, add, +)
83
ATOMIC_OPS(sub, -i, add, +)
84
85
#define arch_atomic_add_return arch_atomic_add_return
86
#define arch_atomic_add_return_acquire arch_atomic_add_return
87
#define arch_atomic_add_return_release arch_atomic_add_return
88
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
89
#define arch_atomic_sub_return arch_atomic_sub_return
90
#define arch_atomic_sub_return_acquire arch_atomic_sub_return
91
#define arch_atomic_sub_return_release arch_atomic_sub_return
92
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
93
#define arch_atomic_fetch_add arch_atomic_fetch_add
94
#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
95
#define arch_atomic_fetch_add_release arch_atomic_fetch_add
96
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
97
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
98
#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
99
#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
100
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
101
102
#undef ATOMIC_OPS
103
104
#define ATOMIC_OPS(op, I, asm_op) \
105
ATOMIC_OP(op, I, asm_op) \
106
ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
107
ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
108
109
ATOMIC_OPS(and, i, and)
110
ATOMIC_OPS(or, i, or)
111
ATOMIC_OPS(xor, i, xor)
112
113
#define arch_atomic_fetch_and arch_atomic_fetch_and
114
#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
115
#define arch_atomic_fetch_and_release arch_atomic_fetch_and
116
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
117
#define arch_atomic_fetch_or arch_atomic_fetch_or
118
#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
119
#define arch_atomic_fetch_or_release arch_atomic_fetch_or
120
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
121
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
122
#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
123
#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
124
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
125
126
#undef ATOMIC_OPS
127
#undef ATOMIC_FETCH_OP
128
#undef ATOMIC_OP_RETURN
129
#undef ATOMIC_OP
130
131
static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
132
{
133
int prev, rc;
134
135
__asm__ __volatile__ (
136
"0: ll.w %[p], %[c]\n"
137
" beq %[p], %[u], 1f\n"
138
" add.w %[rc], %[p], %[a]\n"
139
" sc.w %[rc], %[c]\n"
140
" beqz %[rc], 0b\n"
141
" b 2f\n"
142
"1:\n"
143
__WEAK_LLSC_MB
144
"2:\n"
145
: [p]"=&r" (prev), [rc]"=&r" (rc),
146
[c]"=ZB" (v->counter)
147
: [a]"r" (a), [u]"r" (u)
148
: "memory");
149
150
return prev;
151
}
152
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
153
154
static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
155
{
156
int result;
157
int temp;
158
159
if (__builtin_constant_p(i)) {
160
__asm__ __volatile__(
161
"1: ll.w %1, %2 # atomic_sub_if_positive\n"
162
" addi.w %0, %1, %3 \n"
163
" move %1, %0 \n"
164
" bltz %0, 2f \n"
165
" sc.w %1, %2 \n"
166
" beqz %1, 1b \n"
167
"2: \n"
168
__WEAK_LLSC_MB
169
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
170
: "I" (-i));
171
} else {
172
__asm__ __volatile__(
173
"1: ll.w %1, %2 # atomic_sub_if_positive\n"
174
" sub.w %0, %1, %3 \n"
175
" move %1, %0 \n"
176
" bltz %0, 2f \n"
177
" sc.w %1, %2 \n"
178
" beqz %1, 1b \n"
179
"2: \n"
180
__WEAK_LLSC_MB
181
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
182
: "r" (i));
183
}
184
185
return result;
186
}
187
188
#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
189
190
#ifdef CONFIG_64BIT
191
192
#define ATOMIC64_INIT(i) { (i) }
193
194
#define arch_atomic64_read(v) READ_ONCE((v)->counter)
195
#define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
196
197
#define ATOMIC64_OP(op, I, asm_op) \
198
static inline void arch_atomic64_##op(long i, atomic64_t *v) \
199
{ \
200
__asm__ __volatile__( \
201
"am"#asm_op".d " " $zero, %1, %0 \n" \
202
: "+ZB" (v->counter) \
203
: "r" (I) \
204
: "memory"); \
205
}
206
207
#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
208
static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t *v) \
209
{ \
210
long result; \
211
__asm__ __volatile__( \
212
"am"#asm_op#mb".d " " %1, %2, %0 \n" \
213
: "+ZB" (v->counter), "=&r" (result) \
214
: "r" (I) \
215
: "memory"); \
216
\
217
return result c_op I; \
218
}
219
220
#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix) \
221
static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v) \
222
{ \
223
long result; \
224
\
225
__asm__ __volatile__( \
226
"am"#asm_op#mb".d " " %1, %2, %0 \n" \
227
: "+ZB" (v->counter), "=&r" (result) \
228
: "r" (I) \
229
: "memory"); \
230
\
231
return result; \
232
}
233
234
#define ATOMIC64_OPS(op, I, asm_op, c_op) \
235
ATOMIC64_OP(op, I, asm_op) \
236
ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db, ) \
237
ATOMIC64_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
238
ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
239
ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
240
241
ATOMIC64_OPS(add, i, add, +)
242
ATOMIC64_OPS(sub, -i, add, +)
243
244
#define arch_atomic64_add_return arch_atomic64_add_return
245
#define arch_atomic64_add_return_acquire arch_atomic64_add_return
246
#define arch_atomic64_add_return_release arch_atomic64_add_return
247
#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
248
#define arch_atomic64_sub_return arch_atomic64_sub_return
249
#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
250
#define arch_atomic64_sub_return_release arch_atomic64_sub_return
251
#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
252
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
253
#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
254
#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
255
#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
256
#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
257
#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
258
#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
259
#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
260
261
#undef ATOMIC64_OPS
262
263
#define ATOMIC64_OPS(op, I, asm_op) \
264
ATOMIC64_OP(op, I, asm_op) \
265
ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
266
ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
267
268
ATOMIC64_OPS(and, i, and)
269
ATOMIC64_OPS(or, i, or)
270
ATOMIC64_OPS(xor, i, xor)
271
272
#define arch_atomic64_fetch_and arch_atomic64_fetch_and
273
#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
274
#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
275
#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
276
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
277
#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
278
#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
279
#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
280
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
281
#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
282
#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
283
#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
284
285
#undef ATOMIC64_OPS
286
#undef ATOMIC64_FETCH_OP
287
#undef ATOMIC64_OP_RETURN
288
#undef ATOMIC64_OP
289
290
static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
291
{
292
long prev, rc;
293
294
__asm__ __volatile__ (
295
"0: ll.d %[p], %[c]\n"
296
" beq %[p], %[u], 1f\n"
297
" add.d %[rc], %[p], %[a]\n"
298
" sc.d %[rc], %[c]\n"
299
" beqz %[rc], 0b\n"
300
" b 2f\n"
301
"1:\n"
302
__WEAK_LLSC_MB
303
"2:\n"
304
: [p]"=&r" (prev), [rc]"=&r" (rc),
305
[c] "=ZB" (v->counter)
306
: [a]"r" (a), [u]"r" (u)
307
: "memory");
308
309
return prev;
310
}
311
#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
312
313
static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
314
{
315
long result;
316
long temp;
317
318
if (__builtin_constant_p(i)) {
319
__asm__ __volatile__(
320
"1: ll.d %1, %2 # atomic64_sub_if_positive \n"
321
" addi.d %0, %1, %3 \n"
322
" move %1, %0 \n"
323
" bltz %0, 2f \n"
324
" sc.d %1, %2 \n"
325
" beqz %1, 1b \n"
326
"2: \n"
327
__WEAK_LLSC_MB
328
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
329
: "I" (-i));
330
} else {
331
__asm__ __volatile__(
332
"1: ll.d %1, %2 # atomic64_sub_if_positive \n"
333
" sub.d %0, %1, %3 \n"
334
" move %1, %0 \n"
335
" bltz %0, 2f \n"
336
" sc.d %1, %2 \n"
337
" beqz %1, 1b \n"
338
"2: \n"
339
__WEAK_LLSC_MB
340
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
341
: "r" (i));
342
}
343
344
return result;
345
}
346
347
#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
348
349
#endif /* CONFIG_64BIT */
350
351
#endif /* _ASM_ATOMIC_H */
352
353