Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/xtensa/include/asm/atomic.h
26442 views
1
/*
2
* include/asm-xtensa/atomic.h
3
*
4
* Atomic operations that C can't guarantee us. Useful for resource counting..
5
*
6
* This file is subject to the terms and conditions of the GNU General Public
7
* License. See the file "COPYING" in the main directory of this archive
8
* for more details.
9
*
10
* Copyright (C) 2001 - 2008 Tensilica Inc.
11
*/
12
13
#ifndef _XTENSA_ATOMIC_H
14
#define _XTENSA_ATOMIC_H
15
16
#include <linux/stringify.h>
17
#include <linux/types.h>
18
#include <asm/processor.h>
19
#include <asm/cmpxchg.h>
20
#include <asm/barrier.h>
21
22
/*
23
* This Xtensa implementation assumes that the right mechanism
24
* for exclusion is for locking interrupts to level EXCM_LEVEL.
25
*
26
* Locking interrupts looks like this:
27
*
28
* rsil a14, TOPLEVEL
29
* <code>
30
* wsr a14, PS
31
* rsync
32
*
33
* Note that a14 is used here because the register allocation
34
* done by the compiler is not guaranteed and a window overflow
35
* may not occur between the rsil and wsr instructions. By using
36
* a14 in the rsil, the machine is guaranteed to be in a state
37
* where no register reference will cause an overflow.
38
*/
39
40
/**
41
* atomic_read - read atomic variable
42
* @v: pointer of type atomic_t
43
*
44
* Atomically reads the value of @v.
45
*/
46
#define arch_atomic_read(v) READ_ONCE((v)->counter)
47
48
/**
49
* atomic_set - set atomic variable
50
* @v: pointer of type atomic_t
51
* @i: required value
52
*
53
* Atomically sets the value of @v to @i.
54
*/
55
#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
56
57
#if XCHAL_HAVE_EXCLUSIVE
58
#define ATOMIC_OP(op) \
59
static inline void arch_atomic_##op(int i, atomic_t *v) \
60
{ \
61
unsigned long tmp; \
62
int result; \
63
\
64
__asm__ __volatile__( \
65
"1: l32ex %[tmp], %[addr]\n" \
66
" " #op " %[result], %[tmp], %[i]\n" \
67
" s32ex %[result], %[addr]\n" \
68
" getex %[result]\n" \
69
" beqz %[result], 1b\n" \
70
: [result] "=&a" (result), [tmp] "=&a" (tmp) \
71
: [i] "a" (i), [addr] "a" (v) \
72
: "memory" \
73
); \
74
} \
75
76
#define ATOMIC_OP_RETURN(op) \
77
static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
78
{ \
79
unsigned long tmp; \
80
int result; \
81
\
82
__asm__ __volatile__( \
83
"1: l32ex %[tmp], %[addr]\n" \
84
" " #op " %[result], %[tmp], %[i]\n" \
85
" s32ex %[result], %[addr]\n" \
86
" getex %[result]\n" \
87
" beqz %[result], 1b\n" \
88
" " #op " %[result], %[tmp], %[i]\n" \
89
: [result] "=&a" (result), [tmp] "=&a" (tmp) \
90
: [i] "a" (i), [addr] "a" (v) \
91
: "memory" \
92
); \
93
\
94
return result; \
95
}
96
97
#define ATOMIC_FETCH_OP(op) \
98
static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
99
{ \
100
unsigned long tmp; \
101
int result; \
102
\
103
__asm__ __volatile__( \
104
"1: l32ex %[tmp], %[addr]\n" \
105
" " #op " %[result], %[tmp], %[i]\n" \
106
" s32ex %[result], %[addr]\n" \
107
" getex %[result]\n" \
108
" beqz %[result], 1b\n" \
109
: [result] "=&a" (result), [tmp] "=&a" (tmp) \
110
: [i] "a" (i), [addr] "a" (v) \
111
: "memory" \
112
); \
113
\
114
return tmp; \
115
}
116
117
#elif XCHAL_HAVE_S32C1I
118
#define ATOMIC_OP(op) \
119
static inline void arch_atomic_##op(int i, atomic_t * v) \
120
{ \
121
unsigned long tmp; \
122
int result; \
123
\
124
__asm__ __volatile__( \
125
"1: l32i %[tmp], %[mem]\n" \
126
" wsr %[tmp], scompare1\n" \
127
" " #op " %[result], %[tmp], %[i]\n" \
128
" s32c1i %[result], %[mem]\n" \
129
" bne %[result], %[tmp], 1b\n" \
130
: [result] "=&a" (result), [tmp] "=&a" (tmp), \
131
[mem] "+m" (*v) \
132
: [i] "a" (i) \
133
: "memory" \
134
); \
135
} \
136
137
#define ATOMIC_OP_RETURN(op) \
138
static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
139
{ \
140
unsigned long tmp; \
141
int result; \
142
\
143
__asm__ __volatile__( \
144
"1: l32i %[tmp], %[mem]\n" \
145
" wsr %[tmp], scompare1\n" \
146
" " #op " %[result], %[tmp], %[i]\n" \
147
" s32c1i %[result], %[mem]\n" \
148
" bne %[result], %[tmp], 1b\n" \
149
" " #op " %[result], %[result], %[i]\n" \
150
: [result] "=&a" (result), [tmp] "=&a" (tmp), \
151
[mem] "+m" (*v) \
152
: [i] "a" (i) \
153
: "memory" \
154
); \
155
\
156
return result; \
157
}
158
159
#define ATOMIC_FETCH_OP(op) \
160
static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
161
{ \
162
unsigned long tmp; \
163
int result; \
164
\
165
__asm__ __volatile__( \
166
"1: l32i %[tmp], %[mem]\n" \
167
" wsr %[tmp], scompare1\n" \
168
" " #op " %[result], %[tmp], %[i]\n" \
169
" s32c1i %[result], %[mem]\n" \
170
" bne %[result], %[tmp], 1b\n" \
171
: [result] "=&a" (result), [tmp] "=&a" (tmp), \
172
[mem] "+m" (*v) \
173
: [i] "a" (i) \
174
: "memory" \
175
); \
176
\
177
return result; \
178
}
179
180
#else /* XCHAL_HAVE_S32C1I */
181
182
#define ATOMIC_OP(op) \
183
static inline void arch_atomic_##op(int i, atomic_t * v) \
184
{ \
185
unsigned int vval; \
186
\
187
__asm__ __volatile__( \
188
" rsil a14, "__stringify(TOPLEVEL)"\n" \
189
" l32i %[result], %[mem]\n" \
190
" " #op " %[result], %[result], %[i]\n" \
191
" s32i %[result], %[mem]\n" \
192
" wsr a14, ps\n" \
193
" rsync\n" \
194
: [result] "=&a" (vval), [mem] "+m" (*v) \
195
: [i] "a" (i) \
196
: "a14", "memory" \
197
); \
198
} \
199
200
#define ATOMIC_OP_RETURN(op) \
201
static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
202
{ \
203
unsigned int vval; \
204
\
205
__asm__ __volatile__( \
206
" rsil a14,"__stringify(TOPLEVEL)"\n" \
207
" l32i %[result], %[mem]\n" \
208
" " #op " %[result], %[result], %[i]\n" \
209
" s32i %[result], %[mem]\n" \
210
" wsr a14, ps\n" \
211
" rsync\n" \
212
: [result] "=&a" (vval), [mem] "+m" (*v) \
213
: [i] "a" (i) \
214
: "a14", "memory" \
215
); \
216
\
217
return vval; \
218
}
219
220
#define ATOMIC_FETCH_OP(op) \
221
static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
222
{ \
223
unsigned int tmp, vval; \
224
\
225
__asm__ __volatile__( \
226
" rsil a14,"__stringify(TOPLEVEL)"\n" \
227
" l32i %[result], %[mem]\n" \
228
" " #op " %[tmp], %[result], %[i]\n" \
229
" s32i %[tmp], %[mem]\n" \
230
" wsr a14, ps\n" \
231
" rsync\n" \
232
: [result] "=&a" (vval), [tmp] "=&a" (tmp), \
233
[mem] "+m" (*v) \
234
: [i] "a" (i) \
235
: "a14", "memory" \
236
); \
237
\
238
return vval; \
239
}
240
241
#endif /* XCHAL_HAVE_S32C1I */
242
243
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) ATOMIC_OP_RETURN(op)
244
245
ATOMIC_OPS(add)
246
ATOMIC_OPS(sub)
247
248
#define arch_atomic_add_return arch_atomic_add_return
249
#define arch_atomic_sub_return arch_atomic_sub_return
250
#define arch_atomic_fetch_add arch_atomic_fetch_add
251
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
252
253
#undef ATOMIC_OPS
254
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
255
256
ATOMIC_OPS(and)
257
ATOMIC_OPS(or)
258
ATOMIC_OPS(xor)
259
260
#define arch_atomic_fetch_and arch_atomic_fetch_and
261
#define arch_atomic_fetch_or arch_atomic_fetch_or
262
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
263
264
#undef ATOMIC_OPS
265
#undef ATOMIC_FETCH_OP
266
#undef ATOMIC_OP_RETURN
267
#undef ATOMIC_OP
268
269
#endif /* _XTENSA_ATOMIC_H */
270
271