Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/alpha/include/asm/bitops.h
15126 views
1
#ifndef _ALPHA_BITOPS_H
2
#define _ALPHA_BITOPS_H
3
4
#ifndef _LINUX_BITOPS_H
5
#error only <linux/bitops.h> can be included directly
6
#endif
7
8
#include <asm/compiler.h>
9
#include <asm/barrier.h>
10
11
/*
12
* Copyright 1994, Linus Torvalds.
13
*/
14
15
/*
16
* These have to be done with inline assembly: that way the bit-setting
17
* is guaranteed to be atomic. All bit operations return 0 if the bit
18
* was cleared before the operation and != 0 if it was not.
19
*
20
* To get proper branch prediction for the main line, we must branch
21
* forward to code at the end of this object's .text section, then
22
* branch back to restart the operation.
23
*
24
* bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
25
*/
26
27
static inline void
28
set_bit(unsigned long nr, volatile void * addr)
29
{
30
unsigned long temp;
31
int *m = ((int *) addr) + (nr >> 5);
32
33
__asm__ __volatile__(
34
"1: ldl_l %0,%3\n"
35
" bis %0,%2,%0\n"
36
" stl_c %0,%1\n"
37
" beq %0,2f\n"
38
".subsection 2\n"
39
"2: br 1b\n"
40
".previous"
41
:"=&r" (temp), "=m" (*m)
42
:"Ir" (1UL << (nr & 31)), "m" (*m));
43
}
44
45
/*
46
* WARNING: non atomic version.
47
*/
48
static inline void
49
__set_bit(unsigned long nr, volatile void * addr)
50
{
51
int *m = ((int *) addr) + (nr >> 5);
52
53
*m |= 1 << (nr & 31);
54
}
55
56
#define smp_mb__before_clear_bit() smp_mb()
57
#define smp_mb__after_clear_bit() smp_mb()
58
59
static inline void
60
clear_bit(unsigned long nr, volatile void * addr)
61
{
62
unsigned long temp;
63
int *m = ((int *) addr) + (nr >> 5);
64
65
__asm__ __volatile__(
66
"1: ldl_l %0,%3\n"
67
" bic %0,%2,%0\n"
68
" stl_c %0,%1\n"
69
" beq %0,2f\n"
70
".subsection 2\n"
71
"2: br 1b\n"
72
".previous"
73
:"=&r" (temp), "=m" (*m)
74
:"Ir" (1UL << (nr & 31)), "m" (*m));
75
}
76
77
static inline void
78
clear_bit_unlock(unsigned long nr, volatile void * addr)
79
{
80
smp_mb();
81
clear_bit(nr, addr);
82
}
83
84
/*
85
* WARNING: non atomic version.
86
*/
87
static __inline__ void
88
__clear_bit(unsigned long nr, volatile void * addr)
89
{
90
int *m = ((int *) addr) + (nr >> 5);
91
92
*m &= ~(1 << (nr & 31));
93
}
94
95
static inline void
96
__clear_bit_unlock(unsigned long nr, volatile void * addr)
97
{
98
smp_mb();
99
__clear_bit(nr, addr);
100
}
101
102
static inline void
103
change_bit(unsigned long nr, volatile void * addr)
104
{
105
unsigned long temp;
106
int *m = ((int *) addr) + (nr >> 5);
107
108
__asm__ __volatile__(
109
"1: ldl_l %0,%3\n"
110
" xor %0,%2,%0\n"
111
" stl_c %0,%1\n"
112
" beq %0,2f\n"
113
".subsection 2\n"
114
"2: br 1b\n"
115
".previous"
116
:"=&r" (temp), "=m" (*m)
117
:"Ir" (1UL << (nr & 31)), "m" (*m));
118
}
119
120
/*
121
* WARNING: non atomic version.
122
*/
123
static __inline__ void
124
__change_bit(unsigned long nr, volatile void * addr)
125
{
126
int *m = ((int *) addr) + (nr >> 5);
127
128
*m ^= 1 << (nr & 31);
129
}
130
131
static inline int
132
test_and_set_bit(unsigned long nr, volatile void *addr)
133
{
134
unsigned long oldbit;
135
unsigned long temp;
136
int *m = ((int *) addr) + (nr >> 5);
137
138
__asm__ __volatile__(
139
#ifdef CONFIG_SMP
140
" mb\n"
141
#endif
142
"1: ldl_l %0,%4\n"
143
" and %0,%3,%2\n"
144
" bne %2,2f\n"
145
" xor %0,%3,%0\n"
146
" stl_c %0,%1\n"
147
" beq %0,3f\n"
148
"2:\n"
149
#ifdef CONFIG_SMP
150
" mb\n"
151
#endif
152
".subsection 2\n"
153
"3: br 1b\n"
154
".previous"
155
:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
156
:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
157
158
return oldbit != 0;
159
}
160
161
static inline int
162
test_and_set_bit_lock(unsigned long nr, volatile void *addr)
163
{
164
unsigned long oldbit;
165
unsigned long temp;
166
int *m = ((int *) addr) + (nr >> 5);
167
168
__asm__ __volatile__(
169
"1: ldl_l %0,%4\n"
170
" and %0,%3,%2\n"
171
" bne %2,2f\n"
172
" xor %0,%3,%0\n"
173
" stl_c %0,%1\n"
174
" beq %0,3f\n"
175
"2:\n"
176
#ifdef CONFIG_SMP
177
" mb\n"
178
#endif
179
".subsection 2\n"
180
"3: br 1b\n"
181
".previous"
182
:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
183
:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
184
185
return oldbit != 0;
186
}
187
188
/*
189
* WARNING: non atomic version.
190
*/
191
static inline int
192
__test_and_set_bit(unsigned long nr, volatile void * addr)
193
{
194
unsigned long mask = 1 << (nr & 0x1f);
195
int *m = ((int *) addr) + (nr >> 5);
196
int old = *m;
197
198
*m = old | mask;
199
return (old & mask) != 0;
200
}
201
202
static inline int
203
test_and_clear_bit(unsigned long nr, volatile void * addr)
204
{
205
unsigned long oldbit;
206
unsigned long temp;
207
int *m = ((int *) addr) + (nr >> 5);
208
209
__asm__ __volatile__(
210
#ifdef CONFIG_SMP
211
" mb\n"
212
#endif
213
"1: ldl_l %0,%4\n"
214
" and %0,%3,%2\n"
215
" beq %2,2f\n"
216
" xor %0,%3,%0\n"
217
" stl_c %0,%1\n"
218
" beq %0,3f\n"
219
"2:\n"
220
#ifdef CONFIG_SMP
221
" mb\n"
222
#endif
223
".subsection 2\n"
224
"3: br 1b\n"
225
".previous"
226
:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
227
:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
228
229
return oldbit != 0;
230
}
231
232
/*
233
* WARNING: non atomic version.
234
*/
235
static inline int
236
__test_and_clear_bit(unsigned long nr, volatile void * addr)
237
{
238
unsigned long mask = 1 << (nr & 0x1f);
239
int *m = ((int *) addr) + (nr >> 5);
240
int old = *m;
241
242
*m = old & ~mask;
243
return (old & mask) != 0;
244
}
245
246
static inline int
247
test_and_change_bit(unsigned long nr, volatile void * addr)
248
{
249
unsigned long oldbit;
250
unsigned long temp;
251
int *m = ((int *) addr) + (nr >> 5);
252
253
__asm__ __volatile__(
254
#ifdef CONFIG_SMP
255
" mb\n"
256
#endif
257
"1: ldl_l %0,%4\n"
258
" and %0,%3,%2\n"
259
" xor %0,%3,%0\n"
260
" stl_c %0,%1\n"
261
" beq %0,3f\n"
262
#ifdef CONFIG_SMP
263
" mb\n"
264
#endif
265
".subsection 2\n"
266
"3: br 1b\n"
267
".previous"
268
:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
269
:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
270
271
return oldbit != 0;
272
}
273
274
/*
275
* WARNING: non atomic version.
276
*/
277
static __inline__ int
278
__test_and_change_bit(unsigned long nr, volatile void * addr)
279
{
280
unsigned long mask = 1 << (nr & 0x1f);
281
int *m = ((int *) addr) + (nr >> 5);
282
int old = *m;
283
284
*m = old ^ mask;
285
return (old & mask) != 0;
286
}
287
288
static inline int
289
test_bit(int nr, const volatile void * addr)
290
{
291
return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
292
}
293
294
/*
295
* ffz = Find First Zero in word. Undefined if no zero exists,
296
* so code should check against ~0UL first..
297
*
298
* Do a binary search on the bits. Due to the nature of large
299
* constants on the alpha, it is worthwhile to split the search.
300
*/
301
static inline unsigned long ffz_b(unsigned long x)
302
{
303
unsigned long sum, x1, x2, x4;
304
305
x = ~x & -~x; /* set first 0 bit, clear others */
306
x1 = x & 0xAA;
307
x2 = x & 0xCC;
308
x4 = x & 0xF0;
309
sum = x2 ? 2 : 0;
310
sum += (x4 != 0) * 4;
311
sum += (x1 != 0);
312
313
return sum;
314
}
315
316
static inline unsigned long ffz(unsigned long word)
317
{
318
#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
319
/* Whee. EV67 can calculate it directly. */
320
return __kernel_cttz(~word);
321
#else
322
unsigned long bits, qofs, bofs;
323
324
bits = __kernel_cmpbge(word, ~0UL);
325
qofs = ffz_b(bits);
326
bits = __kernel_extbl(word, qofs);
327
bofs = ffz_b(bits);
328
329
return qofs*8 + bofs;
330
#endif
331
}
332
333
/*
334
* __ffs = Find First set bit in word. Undefined if no set bit exists.
335
*/
336
static inline unsigned long __ffs(unsigned long word)
337
{
338
#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
339
/* Whee. EV67 can calculate it directly. */
340
return __kernel_cttz(word);
341
#else
342
unsigned long bits, qofs, bofs;
343
344
bits = __kernel_cmpbge(0, word);
345
qofs = ffz_b(bits);
346
bits = __kernel_extbl(word, qofs);
347
bofs = ffz_b(~bits);
348
349
return qofs*8 + bofs;
350
#endif
351
}
352
353
#ifdef __KERNEL__
354
355
/*
356
* ffs: find first bit set. This is defined the same way as
357
* the libc and compiler builtin ffs routines, therefore
358
* differs in spirit from the above __ffs.
359
*/
360
361
static inline int ffs(int word)
362
{
363
int result = __ffs(word) + 1;
364
return word ? result : 0;
365
}
366
367
/*
368
* fls: find last bit set.
369
*/
370
#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
371
static inline int fls64(unsigned long word)
372
{
373
return 64 - __kernel_ctlz(word);
374
}
375
#else
376
extern const unsigned char __flsm1_tab[256];
377
378
static inline int fls64(unsigned long x)
379
{
380
unsigned long t, a, r;
381
382
t = __kernel_cmpbge (x, 0x0101010101010101UL);
383
a = __flsm1_tab[t];
384
t = __kernel_extbl (x, a);
385
r = a*8 + __flsm1_tab[t] + (x != 0);
386
387
return r;
388
}
389
#endif
390
391
static inline unsigned long __fls(unsigned long x)
392
{
393
return fls64(x) - 1;
394
}
395
396
static inline int fls(int x)
397
{
398
return fls64((unsigned int) x);
399
}
400
401
/*
402
* hweightN: returns the hamming weight (i.e. the number
403
* of bits set) of a N-bit word
404
*/
405
406
#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
407
/* Whee. EV67 can calculate it directly. */
408
static inline unsigned long __arch_hweight64(unsigned long w)
409
{
410
return __kernel_ctpop(w);
411
}
412
413
static inline unsigned int __arch_hweight32(unsigned int w)
414
{
415
return __arch_hweight64(w);
416
}
417
418
static inline unsigned int __arch_hweight16(unsigned int w)
419
{
420
return __arch_hweight64(w & 0xffff);
421
}
422
423
static inline unsigned int __arch_hweight8(unsigned int w)
424
{
425
return __arch_hweight64(w & 0xff);
426
}
427
#else
428
#include <asm-generic/bitops/arch_hweight.h>
429
#endif
430
431
#include <asm-generic/bitops/const_hweight.h>
432
433
#endif /* __KERNEL__ */
434
435
#include <asm-generic/bitops/find.h>
436
437
#ifdef __KERNEL__
438
439
/*
440
* Every architecture must define this function. It's the fastest
441
* way of searching a 100-bit bitmap. It's guaranteed that at least
442
* one of the 100 bits is cleared.
443
*/
444
static inline unsigned long
445
sched_find_first_bit(const unsigned long b[2])
446
{
447
unsigned long b0, b1, ofs, tmp;
448
449
b0 = b[0];
450
b1 = b[1];
451
ofs = (b0 ? 0 : 64);
452
tmp = (b0 ? b0 : b1);
453
454
return __ffs(tmp) + ofs;
455
}
456
457
#include <asm-generic/bitops/le.h>
458
459
#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
460
#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
461
462
#endif /* __KERNEL__ */
463
464
#endif /* _ALPHA_BITOPS_H */
465
466