Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/m68k/include/asm/bitops_mm.h
10820 views
1
#ifndef _M68K_BITOPS_H
2
#define _M68K_BITOPS_H
3
/*
4
* Copyright 1992, Linus Torvalds.
5
*
6
* This file is subject to the terms and conditions of the GNU General Public
7
* License. See the file COPYING in the main directory of this archive
8
* for more details.
9
*/
10
11
#ifndef _LINUX_BITOPS_H
12
#error only <linux/bitops.h> can be included directly
13
#endif
14
15
#include <linux/compiler.h>
16
17
/*
18
* Require 68020 or better.
19
*
20
* They use the standard big-endian m680x0 bit ordering.
21
*/
22
23
#define test_and_set_bit(nr,vaddr) \
24
(__builtin_constant_p(nr) ? \
25
__constant_test_and_set_bit(nr, vaddr) : \
26
__generic_test_and_set_bit(nr, vaddr))
27
28
#define __test_and_set_bit(nr,vaddr) test_and_set_bit(nr,vaddr)
29
30
static inline int __constant_test_and_set_bit(int nr, unsigned long *vaddr)
31
{
32
char *p = (char *)vaddr + (nr ^ 31) / 8;
33
char retval;
34
35
__asm__ __volatile__ ("bset %2,%1; sne %0"
36
: "=d" (retval), "+m" (*p)
37
: "di" (nr & 7));
38
39
return retval;
40
}
41
42
static inline int __generic_test_and_set_bit(int nr, unsigned long *vaddr)
43
{
44
char retval;
45
46
__asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
47
: "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
48
49
return retval;
50
}
51
52
#define set_bit(nr,vaddr) \
53
(__builtin_constant_p(nr) ? \
54
__constant_set_bit(nr, vaddr) : \
55
__generic_set_bit(nr, vaddr))
56
57
#define __set_bit(nr,vaddr) set_bit(nr,vaddr)
58
59
static inline void __constant_set_bit(int nr, volatile unsigned long *vaddr)
60
{
61
char *p = (char *)vaddr + (nr ^ 31) / 8;
62
__asm__ __volatile__ ("bset %1,%0"
63
: "+m" (*p) : "di" (nr & 7));
64
}
65
66
static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr)
67
{
68
__asm__ __volatile__ ("bfset %1{%0:#1}"
69
: : "d" (nr^31), "o" (*vaddr) : "memory");
70
}
71
72
#define test_and_clear_bit(nr,vaddr) \
73
(__builtin_constant_p(nr) ? \
74
__constant_test_and_clear_bit(nr, vaddr) : \
75
__generic_test_and_clear_bit(nr, vaddr))
76
77
#define __test_and_clear_bit(nr,vaddr) test_and_clear_bit(nr,vaddr)
78
79
static inline int __constant_test_and_clear_bit(int nr, unsigned long *vaddr)
80
{
81
char *p = (char *)vaddr + (nr ^ 31) / 8;
82
char retval;
83
84
__asm__ __volatile__ ("bclr %2,%1; sne %0"
85
: "=d" (retval), "+m" (*p)
86
: "di" (nr & 7));
87
88
return retval;
89
}
90
91
static inline int __generic_test_and_clear_bit(int nr, unsigned long *vaddr)
92
{
93
char retval;
94
95
__asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
96
: "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
97
98
return retval;
99
}
100
101
/*
102
* clear_bit() doesn't provide any barrier for the compiler.
103
*/
104
#define smp_mb__before_clear_bit() barrier()
105
#define smp_mb__after_clear_bit() barrier()
106
107
#define clear_bit(nr,vaddr) \
108
(__builtin_constant_p(nr) ? \
109
__constant_clear_bit(nr, vaddr) : \
110
__generic_clear_bit(nr, vaddr))
111
#define __clear_bit(nr,vaddr) clear_bit(nr,vaddr)
112
113
static inline void __constant_clear_bit(int nr, volatile unsigned long *vaddr)
114
{
115
char *p = (char *)vaddr + (nr ^ 31) / 8;
116
__asm__ __volatile__ ("bclr %1,%0"
117
: "+m" (*p) : "di" (nr & 7));
118
}
119
120
static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr)
121
{
122
__asm__ __volatile__ ("bfclr %1{%0:#1}"
123
: : "d" (nr^31), "o" (*vaddr) : "memory");
124
}
125
126
#define test_and_change_bit(nr,vaddr) \
127
(__builtin_constant_p(nr) ? \
128
__constant_test_and_change_bit(nr, vaddr) : \
129
__generic_test_and_change_bit(nr, vaddr))
130
131
#define __test_and_change_bit(nr,vaddr) test_and_change_bit(nr,vaddr)
132
#define __change_bit(nr,vaddr) change_bit(nr,vaddr)
133
134
static inline int __constant_test_and_change_bit(int nr, unsigned long *vaddr)
135
{
136
char *p = (char *)vaddr + (nr ^ 31) / 8;
137
char retval;
138
139
__asm__ __volatile__ ("bchg %2,%1; sne %0"
140
: "=d" (retval), "+m" (*p)
141
: "di" (nr & 7));
142
143
return retval;
144
}
145
146
static inline int __generic_test_and_change_bit(int nr, unsigned long *vaddr)
147
{
148
char retval;
149
150
__asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
151
: "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
152
153
return retval;
154
}
155
156
#define change_bit(nr,vaddr) \
157
(__builtin_constant_p(nr) ? \
158
__constant_change_bit(nr, vaddr) : \
159
__generic_change_bit(nr, vaddr))
160
161
static inline void __constant_change_bit(int nr, unsigned long *vaddr)
162
{
163
char *p = (char *)vaddr + (nr ^ 31) / 8;
164
__asm__ __volatile__ ("bchg %1,%0"
165
: "+m" (*p) : "di" (nr & 7));
166
}
167
168
static inline void __generic_change_bit(int nr, unsigned long *vaddr)
169
{
170
__asm__ __volatile__ ("bfchg %1{%0:#1}"
171
: : "d" (nr^31), "o" (*vaddr) : "memory");
172
}
173
174
static inline int test_bit(int nr, const unsigned long *vaddr)
175
{
176
return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
177
}
178
179
static inline int find_first_zero_bit(const unsigned long *vaddr,
180
unsigned size)
181
{
182
const unsigned long *p = vaddr;
183
int res = 32;
184
unsigned int words;
185
unsigned long num;
186
187
if (!size)
188
return 0;
189
190
words = (size + 31) >> 5;
191
while (!(num = ~*p++)) {
192
if (!--words)
193
goto out;
194
}
195
196
__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
197
: "=d" (res) : "d" (num & -num));
198
res ^= 31;
199
out:
200
res += ((long)p - (long)vaddr - 4) * 8;
201
return res < size ? res : size;
202
}
203
#define find_first_zero_bit find_first_zero_bit
204
205
static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
206
int offset)
207
{
208
const unsigned long *p = vaddr + (offset >> 5);
209
int bit = offset & 31UL, res;
210
211
if (offset >= size)
212
return size;
213
214
if (bit) {
215
unsigned long num = ~*p++ & (~0UL << bit);
216
offset -= bit;
217
218
/* Look for zero in first longword */
219
__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
220
: "=d" (res) : "d" (num & -num));
221
if (res < 32) {
222
offset += res ^ 31;
223
return offset < size ? offset : size;
224
}
225
offset += 32;
226
227
if (offset >= size)
228
return size;
229
}
230
/* No zero yet, search remaining full bytes for a zero */
231
return offset + find_first_zero_bit(p, size - offset);
232
}
233
#define find_next_zero_bit find_next_zero_bit
234
235
static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
236
{
237
const unsigned long *p = vaddr;
238
int res = 32;
239
unsigned int words;
240
unsigned long num;
241
242
if (!size)
243
return 0;
244
245
words = (size + 31) >> 5;
246
while (!(num = *p++)) {
247
if (!--words)
248
goto out;
249
}
250
251
__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
252
: "=d" (res) : "d" (num & -num));
253
res ^= 31;
254
out:
255
res += ((long)p - (long)vaddr - 4) * 8;
256
return res < size ? res : size;
257
}
258
#define find_first_bit find_first_bit
259
260
static inline int find_next_bit(const unsigned long *vaddr, int size,
261
int offset)
262
{
263
const unsigned long *p = vaddr + (offset >> 5);
264
int bit = offset & 31UL, res;
265
266
if (offset >= size)
267
return size;
268
269
if (bit) {
270
unsigned long num = *p++ & (~0UL << bit);
271
offset -= bit;
272
273
/* Look for one in first longword */
274
__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
275
: "=d" (res) : "d" (num & -num));
276
if (res < 32) {
277
offset += res ^ 31;
278
return offset < size ? offset : size;
279
}
280
offset += 32;
281
282
if (offset >= size)
283
return size;
284
}
285
/* No one yet, search remaining full bytes for a one */
286
return offset + find_first_bit(p, size - offset);
287
}
288
#define find_next_bit find_next_bit
289
290
/*
291
* ffz = Find First Zero in word. Undefined if no zero exists,
292
* so code should check against ~0UL first..
293
*/
294
static inline unsigned long ffz(unsigned long word)
295
{
296
int res;
297
298
__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
299
: "=d" (res) : "d" (~word & -~word));
300
return res ^ 31;
301
}
302
303
#ifdef __KERNEL__
304
305
/*
306
* ffs: find first bit set. This is defined the same way as
307
* the libc and compiler builtin ffs routines, therefore
308
* differs in spirit from the above ffz (man ffs).
309
*/
310
311
static inline int ffs(int x)
312
{
313
int cnt;
314
315
asm ("bfffo %1{#0:#0},%0" : "=d" (cnt) : "dm" (x & -x));
316
317
return 32 - cnt;
318
}
319
#define __ffs(x) (ffs(x) - 1)
320
321
/*
322
* fls: find last bit set.
323
*/
324
325
static inline int fls(int x)
326
{
327
int cnt;
328
329
asm ("bfffo %1{#0,#0},%0" : "=d" (cnt) : "dm" (x));
330
331
return 32 - cnt;
332
}
333
334
static inline int __fls(int x)
335
{
336
return fls(x) - 1;
337
}
338
339
#include <asm-generic/bitops/fls64.h>
340
#include <asm-generic/bitops/sched.h>
341
#include <asm-generic/bitops/hweight.h>
342
#include <asm-generic/bitops/lock.h>
343
344
/* Bitmap functions for the little endian bitmap. */
345
346
static inline void __set_bit_le(int nr, void *addr)
347
{
348
__set_bit(nr ^ 24, addr);
349
}
350
351
static inline void __clear_bit_le(int nr, void *addr)
352
{
353
__clear_bit(nr ^ 24, addr);
354
}
355
356
static inline int __test_and_set_bit_le(int nr, void *addr)
357
{
358
return __test_and_set_bit(nr ^ 24, addr);
359
}
360
361
static inline int test_and_set_bit_le(int nr, void *addr)
362
{
363
return test_and_set_bit(nr ^ 24, addr);
364
}
365
366
static inline int __test_and_clear_bit_le(int nr, void *addr)
367
{
368
return __test_and_clear_bit(nr ^ 24, addr);
369
}
370
371
static inline int test_and_clear_bit_le(int nr, void *addr)
372
{
373
return test_and_clear_bit(nr ^ 24, addr);
374
}
375
376
static inline int test_bit_le(int nr, const void *vaddr)
377
{
378
const unsigned char *p = vaddr;
379
return (p[nr >> 3] & (1U << (nr & 7))) != 0;
380
}
381
382
static inline int find_first_zero_bit_le(const void *vaddr, unsigned size)
383
{
384
const unsigned long *p = vaddr, *addr = vaddr;
385
int res = 0;
386
unsigned int words;
387
388
if (!size)
389
return 0;
390
391
words = (size >> 5) + ((size & 31) > 0);
392
while (*p++ == ~0UL) {
393
if (--words == 0)
394
goto out;
395
}
396
397
--p;
398
for (res = 0; res < 32; res++)
399
if (!test_bit_le(res, p))
400
break;
401
out:
402
res += (p - addr) * 32;
403
return res < size ? res : size;
404
}
405
#define find_first_zero_bit_le find_first_zero_bit_le
406
407
static inline unsigned long find_next_zero_bit_le(const void *addr,
408
unsigned long size, unsigned long offset)
409
{
410
const unsigned long *p = addr;
411
int bit = offset & 31UL, res;
412
413
if (offset >= size)
414
return size;
415
416
p += offset >> 5;
417
418
if (bit) {
419
offset -= bit;
420
/* Look for zero in first longword */
421
for (res = bit; res < 32; res++)
422
if (!test_bit_le(res, p)) {
423
offset += res;
424
return offset < size ? offset : size;
425
}
426
p++;
427
offset += 32;
428
429
if (offset >= size)
430
return size;
431
}
432
/* No zero yet, search remaining full bytes for a zero */
433
return offset + find_first_zero_bit_le(p, size - offset);
434
}
435
#define find_next_zero_bit_le find_next_zero_bit_le
436
437
static inline int find_first_bit_le(const void *vaddr, unsigned size)
438
{
439
const unsigned long *p = vaddr, *addr = vaddr;
440
int res = 0;
441
unsigned int words;
442
443
if (!size)
444
return 0;
445
446
words = (size >> 5) + ((size & 31) > 0);
447
while (*p++ == 0UL) {
448
if (--words == 0)
449
goto out;
450
}
451
452
--p;
453
for (res = 0; res < 32; res++)
454
if (test_bit_le(res, p))
455
break;
456
out:
457
res += (p - addr) * 32;
458
return res < size ? res : size;
459
}
460
#define find_first_bit_le find_first_bit_le
461
462
static inline unsigned long find_next_bit_le(const void *addr,
463
unsigned long size, unsigned long offset)
464
{
465
const unsigned long *p = addr;
466
int bit = offset & 31UL, res;
467
468
if (offset >= size)
469
return size;
470
471
p += offset >> 5;
472
473
if (bit) {
474
offset -= bit;
475
/* Look for one in first longword */
476
for (res = bit; res < 32; res++)
477
if (test_bit_le(res, p)) {
478
offset += res;
479
return offset < size ? offset : size;
480
}
481
p++;
482
offset += 32;
483
484
if (offset >= size)
485
return size;
486
}
487
/* No set bit yet, search remaining full bytes for a set bit */
488
return offset + find_first_bit_le(p, size - offset);
489
}
490
#define find_next_bit_le find_next_bit_le
491
492
/* Bitmap functions for the ext2 filesystem. */
493
494
#define ext2_set_bit_atomic(lock, nr, addr) \
495
test_and_set_bit_le(nr, addr)
496
#define ext2_clear_bit_atomic(lock, nr, addr) \
497
test_and_clear_bit_le(nr, addr)
498
499
#endif /* __KERNEL__ */
500
501
#endif /* _M68K_BITOPS_H */
502
503