Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/alpha/include/asm/atomic.h
15126 views
1
#ifndef _ALPHA_ATOMIC_H
2
#define _ALPHA_ATOMIC_H
3
4
#include <linux/types.h>
5
#include <asm/barrier.h>
6
#include <asm/system.h>
7
8
/*
9
* Atomic operations that C can't guarantee us. Useful for
10
* resource counting etc...
11
*
12
* But use these as seldom as possible since they are much slower
13
* than regular operations.
14
*/
15
16
17
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
18
#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
19
20
#define atomic_read(v) (*(volatile int *)&(v)->counter)
21
#define atomic64_read(v) (*(volatile long *)&(v)->counter)
22
23
#define atomic_set(v,i) ((v)->counter = (i))
24
#define atomic64_set(v,i) ((v)->counter = (i))
25
26
/*
27
* To get proper branch prediction for the main line, we must branch
28
* forward to code at the end of this object's .text section, then
29
* branch back to restart the operation.
30
*/
31
32
static __inline__ void atomic_add(int i, atomic_t * v)
33
{
34
unsigned long temp;
35
__asm__ __volatile__(
36
"1: ldl_l %0,%1\n"
37
" addl %0,%2,%0\n"
38
" stl_c %0,%1\n"
39
" beq %0,2f\n"
40
".subsection 2\n"
41
"2: br 1b\n"
42
".previous"
43
:"=&r" (temp), "=m" (v->counter)
44
:"Ir" (i), "m" (v->counter));
45
}
46
47
static __inline__ void atomic64_add(long i, atomic64_t * v)
48
{
49
unsigned long temp;
50
__asm__ __volatile__(
51
"1: ldq_l %0,%1\n"
52
" addq %0,%2,%0\n"
53
" stq_c %0,%1\n"
54
" beq %0,2f\n"
55
".subsection 2\n"
56
"2: br 1b\n"
57
".previous"
58
:"=&r" (temp), "=m" (v->counter)
59
:"Ir" (i), "m" (v->counter));
60
}
61
62
static __inline__ void atomic_sub(int i, atomic_t * v)
63
{
64
unsigned long temp;
65
__asm__ __volatile__(
66
"1: ldl_l %0,%1\n"
67
" subl %0,%2,%0\n"
68
" stl_c %0,%1\n"
69
" beq %0,2f\n"
70
".subsection 2\n"
71
"2: br 1b\n"
72
".previous"
73
:"=&r" (temp), "=m" (v->counter)
74
:"Ir" (i), "m" (v->counter));
75
}
76
77
static __inline__ void atomic64_sub(long i, atomic64_t * v)
78
{
79
unsigned long temp;
80
__asm__ __volatile__(
81
"1: ldq_l %0,%1\n"
82
" subq %0,%2,%0\n"
83
" stq_c %0,%1\n"
84
" beq %0,2f\n"
85
".subsection 2\n"
86
"2: br 1b\n"
87
".previous"
88
:"=&r" (temp), "=m" (v->counter)
89
:"Ir" (i), "m" (v->counter));
90
}
91
92
93
/*
94
* Same as above, but return the result value
95
*/
96
static inline int atomic_add_return(int i, atomic_t *v)
97
{
98
long temp, result;
99
smp_mb();
100
__asm__ __volatile__(
101
"1: ldl_l %0,%1\n"
102
" addl %0,%3,%2\n"
103
" addl %0,%3,%0\n"
104
" stl_c %0,%1\n"
105
" beq %0,2f\n"
106
".subsection 2\n"
107
"2: br 1b\n"
108
".previous"
109
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
110
:"Ir" (i), "m" (v->counter) : "memory");
111
smp_mb();
112
return result;
113
}
114
115
static __inline__ long atomic64_add_return(long i, atomic64_t * v)
116
{
117
long temp, result;
118
smp_mb();
119
__asm__ __volatile__(
120
"1: ldq_l %0,%1\n"
121
" addq %0,%3,%2\n"
122
" addq %0,%3,%0\n"
123
" stq_c %0,%1\n"
124
" beq %0,2f\n"
125
".subsection 2\n"
126
"2: br 1b\n"
127
".previous"
128
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
129
:"Ir" (i), "m" (v->counter) : "memory");
130
smp_mb();
131
return result;
132
}
133
134
static __inline__ long atomic_sub_return(int i, atomic_t * v)
135
{
136
long temp, result;
137
smp_mb();
138
__asm__ __volatile__(
139
"1: ldl_l %0,%1\n"
140
" subl %0,%3,%2\n"
141
" subl %0,%3,%0\n"
142
" stl_c %0,%1\n"
143
" beq %0,2f\n"
144
".subsection 2\n"
145
"2: br 1b\n"
146
".previous"
147
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
148
:"Ir" (i), "m" (v->counter) : "memory");
149
smp_mb();
150
return result;
151
}
152
153
static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
154
{
155
long temp, result;
156
smp_mb();
157
__asm__ __volatile__(
158
"1: ldq_l %0,%1\n"
159
" subq %0,%3,%2\n"
160
" subq %0,%3,%0\n"
161
" stq_c %0,%1\n"
162
" beq %0,2f\n"
163
".subsection 2\n"
164
"2: br 1b\n"
165
".previous"
166
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
167
:"Ir" (i), "m" (v->counter) : "memory");
168
smp_mb();
169
return result;
170
}
171
172
#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
173
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
174
175
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
176
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
177
178
/**
179
* atomic_add_unless - add unless the number is a given value
180
* @v: pointer of type atomic_t
181
* @a: the amount to add to v...
182
* @u: ...unless v is equal to u.
183
*
184
* Atomically adds @a to @v, so long as it was not @u.
185
* Returns non-zero if @v was not @u, and zero otherwise.
186
*/
187
static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
188
{
189
int c, old;
190
c = atomic_read(v);
191
for (;;) {
192
if (unlikely(c == (u)))
193
break;
194
old = atomic_cmpxchg((v), c, c + (a));
195
if (likely(old == c))
196
break;
197
c = old;
198
}
199
return c != (u);
200
}
201
202
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
203
204
/**
205
* atomic64_add_unless - add unless the number is a given value
206
* @v: pointer of type atomic64_t
207
* @a: the amount to add to v...
208
* @u: ...unless v is equal to u.
209
*
210
* Atomically adds @a to @v, so long as it was not @u.
211
* Returns non-zero if @v was not @u, and zero otherwise.
212
*/
213
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
214
{
215
long c, old;
216
c = atomic64_read(v);
217
for (;;) {
218
if (unlikely(c == (u)))
219
break;
220
old = atomic64_cmpxchg((v), c, c + (a));
221
if (likely(old == c))
222
break;
223
c = old;
224
}
225
return c != (u);
226
}
227
228
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
229
230
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
231
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
232
233
#define atomic_dec_return(v) atomic_sub_return(1,(v))
234
#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
235
236
#define atomic_inc_return(v) atomic_add_return(1,(v))
237
#define atomic64_inc_return(v) atomic64_add_return(1,(v))
238
239
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
240
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
241
242
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
243
#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
244
245
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
246
#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
247
248
#define atomic_inc(v) atomic_add(1,(v))
249
#define atomic64_inc(v) atomic64_add(1,(v))
250
251
#define atomic_dec(v) atomic_sub(1,(v))
252
#define atomic64_dec(v) atomic64_sub(1,(v))
253
254
#define smp_mb__before_atomic_dec() smp_mb()
255
#define smp_mb__after_atomic_dec() smp_mb()
256
#define smp_mb__before_atomic_inc() smp_mb()
257
#define smp_mb__after_atomic_inc() smp_mb()
258
259
#include <asm-generic/atomic-long.h>
260
#endif /* _ALPHA_ATOMIC_H */
261
262