Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/s390/include/asm/cmpxchg.h
10819 views
1
/*
2
* Copyright IBM Corp. 1999, 2011
3
*
4
* Author(s): Martin Schwidefsky <[email protected]>,
5
*/
6
7
#ifndef __ASM_CMPXCHG_H
8
#define __ASM_CMPXCHG_H
9
10
#include <linux/types.h>
11
12
extern void __xchg_called_with_bad_pointer(void);
13
14
static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
15
{
16
unsigned long addr, old;
17
int shift;
18
19
switch (size) {
20
case 1:
21
addr = (unsigned long) ptr;
22
shift = (3 ^ (addr & 3)) << 3;
23
addr ^= addr & 3;
24
asm volatile(
25
" l %0,%4\n"
26
"0: lr 0,%0\n"
27
" nr 0,%3\n"
28
" or 0,%2\n"
29
" cs %0,0,%4\n"
30
" jl 0b\n"
31
: "=&d" (old), "=Q" (*(int *) addr)
32
: "d" (x << shift), "d" (~(255 << shift)),
33
"Q" (*(int *) addr) : "memory", "cc", "0");
34
return old >> shift;
35
case 2:
36
addr = (unsigned long) ptr;
37
shift = (2 ^ (addr & 2)) << 3;
38
addr ^= addr & 2;
39
asm volatile(
40
" l %0,%4\n"
41
"0: lr 0,%0\n"
42
" nr 0,%3\n"
43
" or 0,%2\n"
44
" cs %0,0,%4\n"
45
" jl 0b\n"
46
: "=&d" (old), "=Q" (*(int *) addr)
47
: "d" (x << shift), "d" (~(65535 << shift)),
48
"Q" (*(int *) addr) : "memory", "cc", "0");
49
return old >> shift;
50
case 4:
51
asm volatile(
52
" l %0,%3\n"
53
"0: cs %0,%2,%3\n"
54
" jl 0b\n"
55
: "=&d" (old), "=Q" (*(int *) ptr)
56
: "d" (x), "Q" (*(int *) ptr)
57
: "memory", "cc");
58
return old;
59
#ifdef CONFIG_64BIT
60
case 8:
61
asm volatile(
62
" lg %0,%3\n"
63
"0: csg %0,%2,%3\n"
64
" jl 0b\n"
65
: "=&d" (old), "=m" (*(long *) ptr)
66
: "d" (x), "Q" (*(long *) ptr)
67
: "memory", "cc");
68
return old;
69
#endif /* CONFIG_64BIT */
70
}
71
__xchg_called_with_bad_pointer();
72
return x;
73
}
74
75
#define xchg(ptr, x) \
76
({ \
77
__typeof__(*(ptr)) __ret; \
78
__ret = (__typeof__(*(ptr))) \
79
__xchg((unsigned long)(x), (void *)(ptr), sizeof(*(ptr)));\
80
__ret; \
81
})
82
83
/*
84
* Atomic compare and exchange. Compare OLD with MEM, if identical,
85
* store NEW in MEM. Return the initial value in MEM. Success is
86
* indicated by comparing RETURN with OLD.
87
*/
88
89
#define __HAVE_ARCH_CMPXCHG
90
91
extern void __cmpxchg_called_with_bad_pointer(void);
92
93
static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
94
unsigned long new, int size)
95
{
96
unsigned long addr, prev, tmp;
97
int shift;
98
99
switch (size) {
100
case 1:
101
addr = (unsigned long) ptr;
102
shift = (3 ^ (addr & 3)) << 3;
103
addr ^= addr & 3;
104
asm volatile(
105
" l %0,%2\n"
106
"0: nr %0,%5\n"
107
" lr %1,%0\n"
108
" or %0,%3\n"
109
" or %1,%4\n"
110
" cs %0,%1,%2\n"
111
" jnl 1f\n"
112
" xr %1,%0\n"
113
" nr %1,%5\n"
114
" jnz 0b\n"
115
"1:"
116
: "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
117
: "d" (old << shift), "d" (new << shift),
118
"d" (~(255 << shift)), "Q" (*(int *) ptr)
119
: "memory", "cc");
120
return prev >> shift;
121
case 2:
122
addr = (unsigned long) ptr;
123
shift = (2 ^ (addr & 2)) << 3;
124
addr ^= addr & 2;
125
asm volatile(
126
" l %0,%2\n"
127
"0: nr %0,%5\n"
128
" lr %1,%0\n"
129
" or %0,%3\n"
130
" or %1,%4\n"
131
" cs %0,%1,%2\n"
132
" jnl 1f\n"
133
" xr %1,%0\n"
134
" nr %1,%5\n"
135
" jnz 0b\n"
136
"1:"
137
: "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
138
: "d" (old << shift), "d" (new << shift),
139
"d" (~(65535 << shift)), "Q" (*(int *) ptr)
140
: "memory", "cc");
141
return prev >> shift;
142
case 4:
143
asm volatile(
144
" cs %0,%3,%1\n"
145
: "=&d" (prev), "=Q" (*(int *) ptr)
146
: "0" (old), "d" (new), "Q" (*(int *) ptr)
147
: "memory", "cc");
148
return prev;
149
#ifdef CONFIG_64BIT
150
case 8:
151
asm volatile(
152
" csg %0,%3,%1\n"
153
: "=&d" (prev), "=Q" (*(long *) ptr)
154
: "0" (old), "d" (new), "Q" (*(long *) ptr)
155
: "memory", "cc");
156
return prev;
157
#endif /* CONFIG_64BIT */
158
}
159
__cmpxchg_called_with_bad_pointer();
160
return old;
161
}
162
163
#define cmpxchg(ptr, o, n) \
164
((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
165
(unsigned long)(n), sizeof(*(ptr))))
166
167
#ifdef CONFIG_64BIT
168
#define cmpxchg64(ptr, o, n) \
169
({ \
170
cmpxchg((ptr), (o), (n)); \
171
})
172
#else /* CONFIG_64BIT */
173
static inline unsigned long long __cmpxchg64(void *ptr,
174
unsigned long long old,
175
unsigned long long new)
176
{
177
register_pair rp_old = {.pair = old};
178
register_pair rp_new = {.pair = new};
179
180
asm volatile(
181
" cds %0,%2,%1"
182
: "+&d" (rp_old), "=Q" (ptr)
183
: "d" (rp_new), "Q" (ptr)
184
: "cc");
185
return rp_old.pair;
186
}
187
#define cmpxchg64(ptr, o, n) \
188
((__typeof__(*(ptr)))__cmpxchg64((ptr), \
189
(unsigned long long)(o), \
190
(unsigned long long)(n)))
191
#endif /* CONFIG_64BIT */
192
193
#include <asm-generic/cmpxchg-local.h>
194
195
static inline unsigned long __cmpxchg_local(void *ptr,
196
unsigned long old,
197
unsigned long new, int size)
198
{
199
switch (size) {
200
case 1:
201
case 2:
202
case 4:
203
#ifdef CONFIG_64BIT
204
case 8:
205
#endif
206
return __cmpxchg(ptr, old, new, size);
207
default:
208
return __cmpxchg_local_generic(ptr, old, new, size);
209
}
210
211
return old;
212
}
213
214
/*
215
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
216
* them available.
217
*/
218
#define cmpxchg_local(ptr, o, n) \
219
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
220
(unsigned long)(n), sizeof(*(ptr))))
221
222
#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n))
223
224
#endif /* __ASM_CMPXCHG_H */
225
226