Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/m68k/include/asm/bitops_no.h
10820 views
1
#ifndef _M68KNOMMU_BITOPS_H
2
#define _M68KNOMMU_BITOPS_H
3
4
/*
5
* Copyright 1992, Linus Torvalds.
6
*/
7
8
#include <linux/compiler.h>
9
#include <asm/byteorder.h> /* swab32 */
10
11
#ifdef __KERNEL__
12
13
#ifndef _LINUX_BITOPS_H
14
#error only <linux/bitops.h> can be included directly
15
#endif
16
17
#if defined (__mcfisaaplus__) || defined (__mcfisac__)
18
static inline int ffs(unsigned int val)
19
{
20
if (!val)
21
return 0;
22
23
asm volatile(
24
"bitrev %0\n\t"
25
"ff1 %0\n\t"
26
: "=d" (val)
27
: "0" (val)
28
);
29
val++;
30
return val;
31
}
32
33
static inline int __ffs(unsigned int val)
34
{
35
asm volatile(
36
"bitrev %0\n\t"
37
"ff1 %0\n\t"
38
: "=d" (val)
39
: "0" (val)
40
);
41
return val;
42
}
43
44
#else
45
#include <asm-generic/bitops/ffs.h>
46
#include <asm-generic/bitops/__ffs.h>
47
#endif
48
49
#include <asm-generic/bitops/sched.h>
50
#include <asm-generic/bitops/ffz.h>
51
52
static __inline__ void set_bit(int nr, volatile unsigned long * addr)
53
{
54
#ifdef CONFIG_COLDFIRE
55
__asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
56
: "+m" (((volatile char *)addr)[(nr^31) >> 3])
57
: "d" (nr)
58
: "%a0", "cc");
59
#else
60
__asm__ __volatile__ ("bset %1,%0"
61
: "+m" (((volatile char *)addr)[(nr^31) >> 3])
62
: "di" (nr)
63
: "cc");
64
#endif
65
}
66
67
#define __set_bit(nr, addr) set_bit(nr, addr)
68
69
/*
70
* clear_bit() doesn't provide any barrier for the compiler.
71
*/
72
#define smp_mb__before_clear_bit() barrier()
73
#define smp_mb__after_clear_bit() barrier()
74
75
static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
76
{
77
#ifdef CONFIG_COLDFIRE
78
__asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
79
: "+m" (((volatile char *)addr)[(nr^31) >> 3])
80
: "d" (nr)
81
: "%a0", "cc");
82
#else
83
__asm__ __volatile__ ("bclr %1,%0"
84
: "+m" (((volatile char *)addr)[(nr^31) >> 3])
85
: "di" (nr)
86
: "cc");
87
#endif
88
}
89
90
#define __clear_bit(nr, addr) clear_bit(nr, addr)
91
92
static __inline__ void change_bit(int nr, volatile unsigned long * addr)
93
{
94
#ifdef CONFIG_COLDFIRE
95
__asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
96
: "+m" (((volatile char *)addr)[(nr^31) >> 3])
97
: "d" (nr)
98
: "%a0", "cc");
99
#else
100
__asm__ __volatile__ ("bchg %1,%0"
101
: "+m" (((volatile char *)addr)[(nr^31) >> 3])
102
: "di" (nr)
103
: "cc");
104
#endif
105
}
106
107
#define __change_bit(nr, addr) change_bit(nr, addr)
108
109
static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
110
{
111
char retval;
112
113
#ifdef CONFIG_COLDFIRE
114
__asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
115
: "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
116
: "d" (nr)
117
: "%a0");
118
#else
119
__asm__ __volatile__ ("bset %2,%1; sne %0"
120
: "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
121
: "di" (nr)
122
/* No clobber */);
123
#endif
124
125
return retval;
126
}
127
128
#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
129
130
static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
131
{
132
char retval;
133
134
#ifdef CONFIG_COLDFIRE
135
__asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
136
: "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
137
: "d" (nr)
138
: "%a0");
139
#else
140
__asm__ __volatile__ ("bclr %2,%1; sne %0"
141
: "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
142
: "di" (nr)
143
/* No clobber */);
144
#endif
145
146
return retval;
147
}
148
149
#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
150
151
static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
152
{
153
char retval;
154
155
#ifdef CONFIG_COLDFIRE
156
__asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
157
: "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
158
: "d" (nr)
159
: "%a0");
160
#else
161
__asm__ __volatile__ ("bchg %2,%1; sne %0"
162
: "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
163
: "di" (nr)
164
/* No clobber */);
165
#endif
166
167
return retval;
168
}
169
170
#define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
171
172
/*
173
* This routine doesn't need to be atomic.
174
*/
175
static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
176
{
177
return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
178
}
179
180
static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
181
{
182
int * a = (int *) addr;
183
int mask;
184
185
a += nr >> 5;
186
mask = 1 << (nr & 0x1f);
187
return ((mask & *a) != 0);
188
}
189
190
#define test_bit(nr,addr) \
191
(__builtin_constant_p(nr) ? \
192
__constant_test_bit((nr),(addr)) : \
193
__test_bit((nr),(addr)))
194
195
#include <asm-generic/bitops/find.h>
196
#include <asm-generic/bitops/hweight.h>
197
#include <asm-generic/bitops/lock.h>
198
199
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
200
201
static inline void __set_bit_le(int nr, void *addr)
202
{
203
__set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
204
}
205
206
static inline void __clear_bit_le(int nr, void *addr)
207
{
208
__clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
209
}
210
211
static inline int __test_and_set_bit_le(int nr, volatile void *addr)
212
{
213
char retval;
214
215
#ifdef CONFIG_COLDFIRE
216
__asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
217
: "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
218
: "d" (nr)
219
: "%a0");
220
#else
221
__asm__ __volatile__ ("bset %2,%1; sne %0"
222
: "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
223
: "di" (nr)
224
/* No clobber */);
225
#endif
226
227
return retval;
228
}
229
230
static inline int __test_and_clear_bit_le(int nr, volatile void *addr)
231
{
232
char retval;
233
234
#ifdef CONFIG_COLDFIRE
235
__asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
236
: "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
237
: "d" (nr)
238
: "%a0");
239
#else
240
__asm__ __volatile__ ("bclr %2,%1; sne %0"
241
: "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
242
: "di" (nr)
243
/* No clobber */);
244
#endif
245
246
return retval;
247
}
248
249
#include <asm-generic/bitops/ext2-atomic.h>
250
251
static inline int test_bit_le(int nr, const volatile void *addr)
252
{
253
char retval;
254
255
#ifdef CONFIG_COLDFIRE
256
__asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
257
: "=d" (retval)
258
: "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
259
: "%a0");
260
#else
261
__asm__ __volatile__ ("btst %2,%1; sne %0"
262
: "=d" (retval)
263
: "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
264
/* No clobber */);
265
#endif
266
267
return retval;
268
}
269
270
#define find_first_zero_bit_le(addr, size) \
271
find_next_zero_bit_le((addr), (size), 0)
272
273
static inline unsigned long find_next_zero_bit_le(void *addr, unsigned long size, unsigned long offset)
274
{
275
unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
276
unsigned long result = offset & ~31UL;
277
unsigned long tmp;
278
279
if (offset >= size)
280
return size;
281
size -= result;
282
offset &= 31UL;
283
if(offset) {
284
/* We hold the little endian value in tmp, but then the
285
* shift is illegal. So we could keep a big endian value
286
* in tmp, like this:
287
*
288
* tmp = __swab32(*(p++));
289
* tmp |= ~0UL >> (32-offset);
290
*
291
* but this would decrease performance, so we change the
292
* shift:
293
*/
294
tmp = *(p++);
295
tmp |= __swab32(~0UL >> (32-offset));
296
if(size < 32)
297
goto found_first;
298
if(~tmp)
299
goto found_middle;
300
size -= 32;
301
result += 32;
302
}
303
while(size & ~31UL) {
304
if(~(tmp = *(p++)))
305
goto found_middle;
306
result += 32;
307
size -= 32;
308
}
309
if(!size)
310
return result;
311
tmp = *p;
312
313
found_first:
314
/* tmp is little endian, so we would have to swab the shift,
315
* see above. But then we have to swab tmp below for ffz, so
316
* we might as well do this here.
317
*/
318
return result + ffz(__swab32(tmp) | (~0UL << size));
319
found_middle:
320
return result + ffz(__swab32(tmp));
321
}
322
#define find_next_zero_bit_le find_next_zero_bit_le
323
324
extern unsigned long find_next_bit_le(const void *addr,
325
unsigned long size, unsigned long offset);
326
327
#endif /* __KERNEL__ */
328
329
#include <asm-generic/bitops/fls.h>
330
#include <asm-generic/bitops/__fls.h>
331
#include <asm-generic/bitops/fls64.h>
332
333
#endif /* _M68KNOMMU_BITOPS_H */
334
335