Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/mips/include/asm/atomic.h
26481 views
1
/*
2
* Atomic operations that C can't guarantee us. Useful for
3
* resource counting etc..
4
*
5
* But use these as seldom as possible since they are much more slower
6
* than regular operations.
7
*
8
* This file is subject to the terms and conditions of the GNU General Public
9
* License. See the file "COPYING" in the main directory of this archive
10
* for more details.
11
*
12
* Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
13
*/
14
#ifndef _ASM_ATOMIC_H
15
#define _ASM_ATOMIC_H
16
17
#include <linux/irqflags.h>
18
#include <linux/types.h>
19
#include <asm/asm.h>
20
#include <asm/barrier.h>
21
#include <asm/compiler.h>
22
#include <asm/cpu-features.h>
23
#include <asm/cmpxchg.h>
24
#include <asm/sync.h>
25
26
#define ATOMIC_OPS(pfx, type) \
27
static __always_inline type arch_##pfx##_read(const pfx##_t *v) \
28
{ \
29
return READ_ONCE(v->counter); \
30
} \
31
\
32
static __always_inline void arch_##pfx##_set(pfx##_t *v, type i) \
33
{ \
34
WRITE_ONCE(v->counter, i); \
35
} \
36
37
ATOMIC_OPS(atomic, int)
38
39
#ifdef CONFIG_64BIT
40
# define ATOMIC64_INIT(i) { (i) }
41
ATOMIC_OPS(atomic64, s64)
42
#endif
43
44
#define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
45
static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v) \
46
{ \
47
type temp; \
48
\
49
if (!kernel_uses_llsc) { \
50
unsigned long flags; \
51
\
52
raw_local_irq_save(flags); \
53
v->counter c_op i; \
54
raw_local_irq_restore(flags); \
55
return; \
56
} \
57
\
58
__asm__ __volatile__( \
59
" .set push \n" \
60
" .set " MIPS_ISA_LEVEL " \n" \
61
" " __SYNC(full, loongson3_war) " \n" \
62
"1: " #ll " %0, %1 # " #pfx "_" #op " \n" \
63
" " #asm_op " %0, %2 \n" \
64
" " #sc " %0, %1 \n" \
65
"\t" __stringify(SC_BEQZ) " %0, 1b \n" \
66
" .set pop \n" \
67
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
68
: "Ir" (i) : __LLSC_CLOBBER); \
69
}
70
71
#define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
72
static __inline__ type \
73
arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
74
{ \
75
type temp, result; \
76
\
77
if (!kernel_uses_llsc) { \
78
unsigned long flags; \
79
\
80
raw_local_irq_save(flags); \
81
result = v->counter; \
82
result c_op i; \
83
v->counter = result; \
84
raw_local_irq_restore(flags); \
85
return result; \
86
} \
87
\
88
__asm__ __volatile__( \
89
" .set push \n" \
90
" .set " MIPS_ISA_LEVEL " \n" \
91
" " __SYNC(full, loongson3_war) " \n" \
92
"1: " #ll " %1, %2 # " #pfx "_" #op "_return\n" \
93
" " #asm_op " %0, %1, %3 \n" \
94
" " #sc " %0, %2 \n" \
95
"\t" __stringify(SC_BEQZ) " %0, 1b \n" \
96
" " #asm_op " %0, %1, %3 \n" \
97
" .set pop \n" \
98
: "=&r" (result), "=&r" (temp), \
99
"+" GCC_OFF_SMALL_ASM() (v->counter) \
100
: "Ir" (i) : __LLSC_CLOBBER); \
101
\
102
return result; \
103
}
104
105
#define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \
106
static __inline__ type \
107
arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
108
{ \
109
int temp, result; \
110
\
111
if (!kernel_uses_llsc) { \
112
unsigned long flags; \
113
\
114
raw_local_irq_save(flags); \
115
result = v->counter; \
116
v->counter c_op i; \
117
raw_local_irq_restore(flags); \
118
return result; \
119
} \
120
\
121
__asm__ __volatile__( \
122
" .set push \n" \
123
" .set " MIPS_ISA_LEVEL " \n" \
124
" " __SYNC(full, loongson3_war) " \n" \
125
"1: " #ll " %1, %2 # " #pfx "_fetch_" #op "\n" \
126
" " #asm_op " %0, %1, %3 \n" \
127
" " #sc " %0, %2 \n" \
128
"\t" __stringify(SC_BEQZ) " %0, 1b \n" \
129
" .set pop \n" \
130
" move %0, %1 \n" \
131
: "=&r" (result), "=&r" (temp), \
132
"+" GCC_OFF_SMALL_ASM() (v->counter) \
133
: "Ir" (i) : __LLSC_CLOBBER); \
134
\
135
return result; \
136
}
137
138
#undef ATOMIC_OPS
139
#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
140
ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
141
ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
142
ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
143
144
ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
145
ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
146
147
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
148
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
149
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
150
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
151
152
#ifdef CONFIG_64BIT
153
ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
154
ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
155
# define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
156
# define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
157
# define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
158
# define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
159
#endif /* CONFIG_64BIT */
160
161
#undef ATOMIC_OPS
162
#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
163
ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
164
ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
165
166
ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
167
ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
168
ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
169
170
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
171
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
172
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
173
174
#ifdef CONFIG_64BIT
175
ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
176
ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
177
ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
178
# define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
179
# define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
180
# define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
181
#endif
182
183
#undef ATOMIC_OPS
184
#undef ATOMIC_FETCH_OP
185
#undef ATOMIC_OP_RETURN
186
#undef ATOMIC_OP
187
188
/*
189
* atomic_sub_if_positive - conditionally subtract integer from atomic variable
190
* @i: integer value to subtract
191
* @v: pointer of type atomic_t
192
*
193
* Atomically test @v and subtract @i if @v is greater or equal than @i.
194
* The function returns the old value of @v minus @i.
195
*/
196
#define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \
197
static __inline__ type arch_##pfx##_sub_if_positive(type i, pfx##_t * v) \
198
{ \
199
type temp, result; \
200
\
201
smp_mb__before_atomic(); \
202
\
203
if (!kernel_uses_llsc) { \
204
unsigned long flags; \
205
\
206
raw_local_irq_save(flags); \
207
result = v->counter; \
208
result -= i; \
209
if (result >= 0) \
210
v->counter = result; \
211
raw_local_irq_restore(flags); \
212
smp_mb__after_atomic(); \
213
return result; \
214
} \
215
\
216
__asm__ __volatile__( \
217
" .set push \n" \
218
" .set " MIPS_ISA_LEVEL " \n" \
219
" " __SYNC(full, loongson3_war) " \n" \
220
"1: " #ll " %1, %2 # atomic_sub_if_positive\n" \
221
" .set pop \n" \
222
" " #op " %0, %1, %3 \n" \
223
" move %1, %0 \n" \
224
" bltz %0, 2f \n" \
225
" .set push \n" \
226
" .set " MIPS_ISA_LEVEL " \n" \
227
" " #sc " %1, %2 \n" \
228
" " __stringify(SC_BEQZ) " %1, 1b \n" \
229
"2: " __SYNC(full, loongson3_war) " \n" \
230
" .set pop \n" \
231
: "=&r" (result), "=&r" (temp), \
232
"+" GCC_OFF_SMALL_ASM() (v->counter) \
233
: "Ir" (i) \
234
: __LLSC_CLOBBER); \
235
\
236
/* \
237
* In the Loongson3 workaround case we already have a \
238
* completion barrier at 2: above, which is needed due to the \
239
* bltz that can branch to code outside of the LL/SC loop. As \
240
* such, we don't need to emit another barrier here. \
241
*/ \
242
if (__SYNC_loongson3_war == 0) \
243
smp_mb__after_atomic(); \
244
\
245
return result; \
246
}
247
248
ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
249
#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
250
251
#ifdef CONFIG_64BIT
252
ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
253
#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
254
#endif
255
256
#undef ATOMIC_SIP_OP
257
258
#endif /* _ASM_ATOMIC_H */
259
260