Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/ia64/include/asm/atomic.h
17292 views
1
#ifndef _ASM_IA64_ATOMIC_H
2
#define _ASM_IA64_ATOMIC_H
3
4
/*
5
* Atomic operations that C can't guarantee us. Useful for
6
* resource counting etc..
7
*
8
* NOTE: don't mess with the types below! The "unsigned long" and
9
* "int" types were carefully placed so as to ensure proper operation
10
* of the macros.
11
*
12
* Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
13
* David Mosberger-Tang <[email protected]>
14
*/
15
#include <linux/types.h>
16
17
#include <asm/intrinsics.h>
18
#include <asm/system.h>
19
20
21
#define ATOMIC_INIT(i) ((atomic_t) { (i) })
22
#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
23
24
#define atomic_read(v) (*(volatile int *)&(v)->counter)
25
#define atomic64_read(v) (*(volatile long *)&(v)->counter)
26
27
#define atomic_set(v,i) (((v)->counter) = (i))
28
#define atomic64_set(v,i) (((v)->counter) = (i))
29
30
static __inline__ int
31
ia64_atomic_add (int i, atomic_t *v)
32
{
33
__s32 old, new;
34
CMPXCHG_BUGCHECK_DECL
35
36
do {
37
CMPXCHG_BUGCHECK(v);
38
old = atomic_read(v);
39
new = old + i;
40
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
41
return new;
42
}
43
44
static __inline__ long
45
ia64_atomic64_add (__s64 i, atomic64_t *v)
46
{
47
__s64 old, new;
48
CMPXCHG_BUGCHECK_DECL
49
50
do {
51
CMPXCHG_BUGCHECK(v);
52
old = atomic64_read(v);
53
new = old + i;
54
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
55
return new;
56
}
57
58
static __inline__ int
59
ia64_atomic_sub (int i, atomic_t *v)
60
{
61
__s32 old, new;
62
CMPXCHG_BUGCHECK_DECL
63
64
do {
65
CMPXCHG_BUGCHECK(v);
66
old = atomic_read(v);
67
new = old - i;
68
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
69
return new;
70
}
71
72
static __inline__ long
73
ia64_atomic64_sub (__s64 i, atomic64_t *v)
74
{
75
__s64 old, new;
76
CMPXCHG_BUGCHECK_DECL
77
78
do {
79
CMPXCHG_BUGCHECK(v);
80
old = atomic64_read(v);
81
new = old - i;
82
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
83
return new;
84
}
85
86
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
87
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
88
89
#define atomic64_cmpxchg(v, old, new) \
90
(cmpxchg(&((v)->counter), old, new))
91
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
92
93
static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
94
{
95
int c, old;
96
c = atomic_read(v);
97
for (;;) {
98
if (unlikely(c == (u)))
99
break;
100
old = atomic_cmpxchg((v), c, c + (a));
101
if (likely(old == c))
102
break;
103
c = old;
104
}
105
return c != (u);
106
}
107
108
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
109
110
static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
111
{
112
long c, old;
113
c = atomic64_read(v);
114
for (;;) {
115
if (unlikely(c == (u)))
116
break;
117
old = atomic64_cmpxchg((v), c, c + (a));
118
if (likely(old == c))
119
break;
120
c = old;
121
}
122
return c != (u);
123
}
124
125
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
126
127
#define atomic_add_return(i,v) \
128
({ \
129
int __ia64_aar_i = (i); \
130
(__builtin_constant_p(i) \
131
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
132
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
133
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
134
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
135
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
136
: ia64_atomic_add(__ia64_aar_i, v); \
137
})
138
139
#define atomic64_add_return(i,v) \
140
({ \
141
long __ia64_aar_i = (i); \
142
(__builtin_constant_p(i) \
143
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
144
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
145
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
146
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
147
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
148
: ia64_atomic64_add(__ia64_aar_i, v); \
149
})
150
151
/*
152
* Atomically add I to V and return TRUE if the resulting value is
153
* negative.
154
*/
155
static __inline__ int
156
atomic_add_negative (int i, atomic_t *v)
157
{
158
return atomic_add_return(i, v) < 0;
159
}
160
161
static __inline__ long
162
atomic64_add_negative (__s64 i, atomic64_t *v)
163
{
164
return atomic64_add_return(i, v) < 0;
165
}
166
167
#define atomic_sub_return(i,v) \
168
({ \
169
int __ia64_asr_i = (i); \
170
(__builtin_constant_p(i) \
171
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
172
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
173
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
174
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
175
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
176
: ia64_atomic_sub(__ia64_asr_i, v); \
177
})
178
179
#define atomic64_sub_return(i,v) \
180
({ \
181
long __ia64_asr_i = (i); \
182
(__builtin_constant_p(i) \
183
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
184
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
185
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
186
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
187
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
188
: ia64_atomic64_sub(__ia64_asr_i, v); \
189
})
190
191
#define atomic_dec_return(v) atomic_sub_return(1, (v))
192
#define atomic_inc_return(v) atomic_add_return(1, (v))
193
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
194
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
195
196
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
197
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
198
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
199
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
200
#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
201
#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
202
203
#define atomic_add(i,v) atomic_add_return((i), (v))
204
#define atomic_sub(i,v) atomic_sub_return((i), (v))
205
#define atomic_inc(v) atomic_add(1, (v))
206
#define atomic_dec(v) atomic_sub(1, (v))
207
208
#define atomic64_add(i,v) atomic64_add_return((i), (v))
209
#define atomic64_sub(i,v) atomic64_sub_return((i), (v))
210
#define atomic64_inc(v) atomic64_add(1, (v))
211
#define atomic64_dec(v) atomic64_sub(1, (v))
212
213
/* Atomic operations are already serializing */
214
#define smp_mb__before_atomic_dec() barrier()
215
#define smp_mb__after_atomic_dec() barrier()
216
#define smp_mb__before_atomic_inc() barrier()
217
#define smp_mb__after_atomic_inc() barrier()
218
219
#include <asm-generic/atomic-long.h>
220
#endif /* _ASM_IA64_ATOMIC_H */
221
222