Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/mn10300/include/asm/atomic.h
15126 views
1
/* MN10300 Atomic counter operations
2
*
3
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4
* Written by David Howells ([email protected])
5
*
6
* This program is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU General Public Licence
8
* as published by the Free Software Foundation; either version
9
* 2 of the Licence, or (at your option) any later version.
10
*/
11
#ifndef _ASM_ATOMIC_H
12
#define _ASM_ATOMIC_H
13
14
#include <asm/irqflags.h>
15
16
#ifndef __ASSEMBLY__
17
18
#ifdef CONFIG_SMP
19
#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
20
static inline
21
unsigned long __xchg(volatile unsigned long *m, unsigned long val)
22
{
23
unsigned long status;
24
unsigned long oldval;
25
26
asm volatile(
27
"1: mov %4,(_AAR,%3) \n"
28
" mov (_ADR,%3),%1 \n"
29
" mov %5,(_ADR,%3) \n"
30
" mov (_ADR,%3),%0 \n" /* flush */
31
" mov (_ASR,%3),%0 \n"
32
" or %0,%0 \n"
33
" bne 1b \n"
34
: "=&r"(status), "=&r"(oldval), "=m"(*m)
35
: "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), "r"(val)
36
: "memory", "cc");
37
38
return oldval;
39
}
40
41
static inline unsigned long __cmpxchg(volatile unsigned long *m,
42
unsigned long old, unsigned long new)
43
{
44
unsigned long status;
45
unsigned long oldval;
46
47
asm volatile(
48
"1: mov %4,(_AAR,%3) \n"
49
" mov (_ADR,%3),%1 \n"
50
" cmp %5,%1 \n"
51
" bne 2f \n"
52
" mov %6,(_ADR,%3) \n"
53
"2: mov (_ADR,%3),%0 \n" /* flush */
54
" mov (_ASR,%3),%0 \n"
55
" or %0,%0 \n"
56
" bne 1b \n"
57
: "=&r"(status), "=&r"(oldval), "=m"(*m)
58
: "a"(ATOMIC_OPS_BASE_ADDR), "r"(m),
59
"r"(old), "r"(new)
60
: "memory", "cc");
61
62
return oldval;
63
}
64
#else /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
65
#error "No SMP atomic operation support!"
66
#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
67
68
#else /* CONFIG_SMP */
69
70
/*
71
* Emulate xchg for non-SMP MN10300
72
*/
73
struct __xchg_dummy { unsigned long a[100]; };
74
#define __xg(x) ((struct __xchg_dummy *)(x))
75
76
static inline
77
unsigned long __xchg(volatile unsigned long *m, unsigned long val)
78
{
79
unsigned long oldval;
80
unsigned long flags;
81
82
flags = arch_local_cli_save();
83
oldval = *m;
84
*m = val;
85
arch_local_irq_restore(flags);
86
return oldval;
87
}
88
89
/*
90
* Emulate cmpxchg for non-SMP MN10300
91
*/
92
static inline unsigned long __cmpxchg(volatile unsigned long *m,
93
unsigned long old, unsigned long new)
94
{
95
unsigned long oldval;
96
unsigned long flags;
97
98
flags = arch_local_cli_save();
99
oldval = *m;
100
if (oldval == old)
101
*m = new;
102
arch_local_irq_restore(flags);
103
return oldval;
104
}
105
106
#endif /* CONFIG_SMP */
107
108
#define xchg(ptr, v) \
109
((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
110
(unsigned long)(v)))
111
112
#define cmpxchg(ptr, o, n) \
113
((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
114
(unsigned long)(o), \
115
(unsigned long)(n)))
116
117
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
118
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
119
120
#endif /* !__ASSEMBLY__ */
121
122
#ifndef CONFIG_SMP
123
#include <asm-generic/atomic.h>
124
#else
125
126
/*
127
* Atomic operations that C can't guarantee us. Useful for
128
* resource counting etc..
129
*/
130
131
#define ATOMIC_INIT(i) { (i) }
132
133
#ifdef __KERNEL__
134
135
/**
136
* atomic_read - read atomic variable
137
* @v: pointer of type atomic_t
138
*
139
* Atomically reads the value of @v. Note that the guaranteed
140
* useful range of an atomic_t is only 24 bits.
141
*/
142
#define atomic_read(v) (ACCESS_ONCE((v)->counter))
143
144
/**
145
* atomic_set - set atomic variable
146
* @v: pointer of type atomic_t
147
* @i: required value
148
*
149
* Atomically sets the value of @v to @i. Note that the guaranteed
150
* useful range of an atomic_t is only 24 bits.
151
*/
152
#define atomic_set(v, i) (((v)->counter) = (i))
153
154
/**
155
* atomic_add_return - add integer to atomic variable
156
* @i: integer value to add
157
* @v: pointer of type atomic_t
158
*
159
* Atomically adds @i to @v and returns the result
160
* Note that the guaranteed useful range of an atomic_t is only 24 bits.
161
*/
162
static inline int atomic_add_return(int i, atomic_t *v)
163
{
164
int retval;
165
#ifdef CONFIG_SMP
166
int status;
167
168
asm volatile(
169
"1: mov %4,(_AAR,%3) \n"
170
" mov (_ADR,%3),%1 \n"
171
" add %5,%1 \n"
172
" mov %1,(_ADR,%3) \n"
173
" mov (_ADR,%3),%0 \n" /* flush */
174
" mov (_ASR,%3),%0 \n"
175
" or %0,%0 \n"
176
" bne 1b \n"
177
: "=&r"(status), "=&r"(retval), "=m"(v->counter)
178
: "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
179
: "memory", "cc");
180
181
#else
182
unsigned long flags;
183
184
flags = arch_local_cli_save();
185
retval = v->counter;
186
retval += i;
187
v->counter = retval;
188
arch_local_irq_restore(flags);
189
#endif
190
return retval;
191
}
192
193
/**
194
* atomic_sub_return - subtract integer from atomic variable
195
* @i: integer value to subtract
196
* @v: pointer of type atomic_t
197
*
198
* Atomically subtracts @i from @v and returns the result
199
* Note that the guaranteed useful range of an atomic_t is only 24 bits.
200
*/
201
static inline int atomic_sub_return(int i, atomic_t *v)
202
{
203
int retval;
204
#ifdef CONFIG_SMP
205
int status;
206
207
asm volatile(
208
"1: mov %4,(_AAR,%3) \n"
209
" mov (_ADR,%3),%1 \n"
210
" sub %5,%1 \n"
211
" mov %1,(_ADR,%3) \n"
212
" mov (_ADR,%3),%0 \n" /* flush */
213
" mov (_ASR,%3),%0 \n"
214
" or %0,%0 \n"
215
" bne 1b \n"
216
: "=&r"(status), "=&r"(retval), "=m"(v->counter)
217
: "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
218
: "memory", "cc");
219
220
#else
221
unsigned long flags;
222
flags = arch_local_cli_save();
223
retval = v->counter;
224
retval -= i;
225
v->counter = retval;
226
arch_local_irq_restore(flags);
227
#endif
228
return retval;
229
}
230
231
static inline int atomic_add_negative(int i, atomic_t *v)
232
{
233
return atomic_add_return(i, v) < 0;
234
}
235
236
static inline void atomic_add(int i, atomic_t *v)
237
{
238
atomic_add_return(i, v);
239
}
240
241
static inline void atomic_sub(int i, atomic_t *v)
242
{
243
atomic_sub_return(i, v);
244
}
245
246
static inline void atomic_inc(atomic_t *v)
247
{
248
atomic_add_return(1, v);
249
}
250
251
static inline void atomic_dec(atomic_t *v)
252
{
253
atomic_sub_return(1, v);
254
}
255
256
#define atomic_dec_return(v) atomic_sub_return(1, (v))
257
#define atomic_inc_return(v) atomic_add_return(1, (v))
258
259
#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
260
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
261
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
262
263
#define atomic_add_unless(v, a, u) \
264
({ \
265
int c, old; \
266
c = atomic_read(v); \
267
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
268
c = old; \
269
c != (u); \
270
})
271
272
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
273
274
/**
275
* atomic_clear_mask - Atomically clear bits in memory
276
* @mask: Mask of the bits to be cleared
277
* @v: pointer to word in memory
278
*
279
* Atomically clears the bits set in mask from the memory word specified.
280
*/
281
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
282
{
283
#ifdef CONFIG_SMP
284
int status;
285
286
asm volatile(
287
"1: mov %3,(_AAR,%2) \n"
288
" mov (_ADR,%2),%0 \n"
289
" and %4,%0 \n"
290
" mov %0,(_ADR,%2) \n"
291
" mov (_ADR,%2),%0 \n" /* flush */
292
" mov (_ASR,%2),%0 \n"
293
" or %0,%0 \n"
294
" bne 1b \n"
295
: "=&r"(status), "=m"(*addr)
296
: "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
297
: "memory", "cc");
298
#else
299
unsigned long flags;
300
301
mask = ~mask;
302
flags = arch_local_cli_save();
303
*addr &= mask;
304
arch_local_irq_restore(flags);
305
#endif
306
}
307
308
/**
309
* atomic_set_mask - Atomically set bits in memory
310
* @mask: Mask of the bits to be set
311
* @v: pointer to word in memory
312
*
313
* Atomically sets the bits set in mask from the memory word specified.
314
*/
315
static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
316
{
317
#ifdef CONFIG_SMP
318
int status;
319
320
asm volatile(
321
"1: mov %3,(_AAR,%2) \n"
322
" mov (_ADR,%2),%0 \n"
323
" or %4,%0 \n"
324
" mov %0,(_ADR,%2) \n"
325
" mov (_ADR,%2),%0 \n" /* flush */
326
" mov (_ASR,%2),%0 \n"
327
" or %0,%0 \n"
328
" bne 1b \n"
329
: "=&r"(status), "=m"(*addr)
330
: "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
331
: "memory", "cc");
332
#else
333
unsigned long flags;
334
335
flags = arch_local_cli_save();
336
*addr |= mask;
337
arch_local_irq_restore(flags);
338
#endif
339
}
340
341
/* Atomic operations are already serializing on MN10300??? */
342
#define smp_mb__before_atomic_dec() barrier()
343
#define smp_mb__after_atomic_dec() barrier()
344
#define smp_mb__before_atomic_inc() barrier()
345
#define smp_mb__after_atomic_inc() barrier()
346
347
#include <asm-generic/atomic-long.h>
348
349
#endif /* __KERNEL__ */
350
#endif /* CONFIG_SMP */
351
#endif /* _ASM_ATOMIC_H */
352
353