Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/xtensa/include/asm/cmpxchg.h
26451 views
1
/*
2
* Atomic xchg and cmpxchg operations.
3
*
4
* This file is subject to the terms and conditions of the GNU General Public
5
* License. See the file "COPYING" in the main directory of this archive
6
* for more details.
7
*
8
* Copyright (C) 2001 - 2005 Tensilica Inc.
9
*/
10
11
#ifndef _XTENSA_CMPXCHG_H
12
#define _XTENSA_CMPXCHG_H
13
14
#ifndef __ASSEMBLER__
15
16
#include <linux/bits.h>
17
#include <linux/stringify.h>
18
#include <linux/cmpxchg-emu.h>
19
20
/*
21
* cmpxchg
22
*/
23
24
static inline unsigned long
25
__cmpxchg_u32(volatile int *p, int old, int new)
26
{
27
#if XCHAL_HAVE_EXCLUSIVE
28
unsigned long tmp, result;
29
30
__asm__ __volatile__(
31
"1: l32ex %[result], %[addr]\n"
32
" bne %[result], %[cmp], 2f\n"
33
" mov %[tmp], %[new]\n"
34
" s32ex %[tmp], %[addr]\n"
35
" getex %[tmp]\n"
36
" beqz %[tmp], 1b\n"
37
"2:\n"
38
: [result] "=&a" (result), [tmp] "=&a" (tmp)
39
: [new] "a" (new), [addr] "a" (p), [cmp] "a" (old)
40
: "memory"
41
);
42
43
return result;
44
#elif XCHAL_HAVE_S32C1I
45
__asm__ __volatile__(
46
" wsr %[cmp], scompare1\n"
47
" s32c1i %[new], %[mem]\n"
48
: [new] "+a" (new), [mem] "+m" (*p)
49
: [cmp] "a" (old)
50
: "memory"
51
);
52
53
return new;
54
#else
55
__asm__ __volatile__(
56
" rsil a14, "__stringify(TOPLEVEL)"\n"
57
" l32i %[old], %[mem]\n"
58
" bne %[old], %[cmp], 1f\n"
59
" s32i %[new], %[mem]\n"
60
"1:\n"
61
" wsr a14, ps\n"
62
" rsync\n"
63
: [old] "=&a" (old), [mem] "+m" (*p)
64
: [cmp] "a" (old), [new] "r" (new)
65
: "a14", "memory");
66
return old;
67
#endif
68
}
69
/* This function doesn't exist, so you'll get a linker error
70
* if something tries to do an invalid cmpxchg(). */
71
72
extern void __cmpxchg_called_with_bad_pointer(void);
73
74
static __inline__ unsigned long
75
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
76
{
77
switch (size) {
78
case 1: return cmpxchg_emu_u8(ptr, old, new);
79
case 4: return __cmpxchg_u32(ptr, old, new);
80
default: __cmpxchg_called_with_bad_pointer();
81
return old;
82
}
83
}
84
85
#define arch_cmpxchg(ptr,o,n) \
86
({ __typeof__(*(ptr)) _o_ = (o); \
87
__typeof__(*(ptr)) _n_ = (n); \
88
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
89
(unsigned long)_n_, sizeof (*(ptr))); \
90
})
91
92
#include <asm-generic/cmpxchg-local.h>
93
94
static inline unsigned long __cmpxchg_local(volatile void *ptr,
95
unsigned long old,
96
unsigned long new, int size)
97
{
98
switch (size) {
99
case 4:
100
return __cmpxchg_u32(ptr, old, new);
101
default:
102
return __generic_cmpxchg_local(ptr, old, new, size);
103
}
104
105
return old;
106
}
107
108
/*
109
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
110
* them available.
111
*/
112
#define arch_cmpxchg_local(ptr, o, n) \
113
((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o),\
114
(unsigned long)(n), sizeof(*(ptr))))
115
#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
116
#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
117
118
/*
119
* xchg_u32
120
*
121
* Note that a14 is used here because the register allocation
122
* done by the compiler is not guaranteed and a window overflow
123
* may not occur between the rsil and wsr instructions. By using
124
* a14 in the rsil, the machine is guaranteed to be in a state
125
* where no register reference will cause an overflow.
126
*/
127
128
static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
129
{
130
#if XCHAL_HAVE_EXCLUSIVE
131
unsigned long tmp, result;
132
133
__asm__ __volatile__(
134
"1: l32ex %[result], %[addr]\n"
135
" mov %[tmp], %[val]\n"
136
" s32ex %[tmp], %[addr]\n"
137
" getex %[tmp]\n"
138
" beqz %[tmp], 1b\n"
139
: [result] "=&a" (result), [tmp] "=&a" (tmp)
140
: [val] "a" (val), [addr] "a" (m)
141
: "memory"
142
);
143
144
return result;
145
#elif XCHAL_HAVE_S32C1I
146
unsigned long tmp, result;
147
__asm__ __volatile__(
148
"1: l32i %[tmp], %[mem]\n"
149
" mov %[result], %[val]\n"
150
" wsr %[tmp], scompare1\n"
151
" s32c1i %[result], %[mem]\n"
152
" bne %[result], %[tmp], 1b\n"
153
: [result] "=&a" (result), [tmp] "=&a" (tmp),
154
[mem] "+m" (*m)
155
: [val] "a" (val)
156
: "memory"
157
);
158
return result;
159
#else
160
unsigned long tmp;
161
__asm__ __volatile__(
162
" rsil a14, "__stringify(TOPLEVEL)"\n"
163
" l32i %[tmp], %[mem]\n"
164
" s32i %[val], %[mem]\n"
165
" wsr a14, ps\n"
166
" rsync\n"
167
: [tmp] "=&a" (tmp), [mem] "+m" (*m)
168
: [val] "a" (val)
169
: "a14", "memory");
170
return tmp;
171
#endif
172
}
173
174
#define arch_xchg(ptr,x) \
175
((__typeof__(*(ptr)))__arch_xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
176
177
static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
178
{
179
int off = (unsigned long)ptr % sizeof(u32);
180
volatile u32 *p = ptr - off;
181
#ifdef __BIG_ENDIAN
182
int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
183
#else
184
int bitoff = off * BITS_PER_BYTE;
185
#endif
186
u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
187
u32 oldv, newv;
188
u32 ret;
189
190
do {
191
oldv = READ_ONCE(*p);
192
ret = (oldv & bitmask) >> bitoff;
193
newv = (oldv & ~bitmask) | (x << bitoff);
194
} while (__cmpxchg_u32(p, oldv, newv) != oldv);
195
196
return ret;
197
}
198
199
/*
200
* This only works if the compiler isn't horribly bad at optimizing.
201
* gcc-2.5.8 reportedly can't handle this, but I define that one to
202
* be dead anyway.
203
*/
204
205
extern void __xchg_called_with_bad_pointer(void);
206
207
static __inline__ unsigned long
208
__arch_xchg(unsigned long x, volatile void * ptr, int size)
209
{
210
switch (size) {
211
case 1:
212
return xchg_small(ptr, x, 1);
213
case 2:
214
return xchg_small(ptr, x, 2);
215
case 4:
216
return xchg_u32(ptr, x);
217
default:
218
__xchg_called_with_bad_pointer();
219
return x;
220
}
221
}
222
223
#endif /* __ASSEMBLER__ */
224
225
#endif /* _XTENSA_CMPXCHG_H */
226
227