Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/hexagon/include/asm/bitops.h
26481 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
* Bit operations for the Hexagon architecture
4
*
5
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
6
*/
7
8
#ifndef _ASM_BITOPS_H
9
#define _ASM_BITOPS_H
10
11
#include <linux/compiler.h>
12
#include <asm/byteorder.h>
13
#include <asm/atomic.h>
14
#include <asm/barrier.h>
15
16
#ifdef __KERNEL__
17
18
/*
19
* The offset calculations for these are based on BITS_PER_LONG == 32
20
* (i.e. I get to shift by #5-2 (32 bits per long, 4 bytes per access),
21
* mask by 0x0000001F)
22
*
23
* Typically, R10 is clobbered for address, R11 bit nr, and R12 is temp
24
*/
25
26
/**
27
* test_and_clear_bit - clear a bit and return its old value
28
* @nr: bit number to clear
29
* @addr: pointer to memory
30
*/
31
static inline int test_and_clear_bit(int nr, volatile void *addr)
32
{
33
int oldval;
34
35
__asm__ __volatile__ (
36
" {R10 = %1; R11 = asr(%2,#5); }\n"
37
" {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
38
"1: R12 = memw_locked(R10);\n"
39
" { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n"
40
" memw_locked(R10,P1) = R12;\n"
41
" {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
42
: "=&r" (oldval)
43
: "r" (addr), "r" (nr)
44
: "r10", "r11", "r12", "p0", "p1", "memory"
45
);
46
47
return oldval;
48
}
49
50
/**
51
* test_and_set_bit - set a bit and return its old value
52
* @nr: bit number to set
53
* @addr: pointer to memory
54
*/
55
static inline int test_and_set_bit(int nr, volatile void *addr)
56
{
57
int oldval;
58
59
__asm__ __volatile__ (
60
" {R10 = %1; R11 = asr(%2,#5); }\n"
61
" {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
62
"1: R12 = memw_locked(R10);\n"
63
" { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n"
64
" memw_locked(R10,P1) = R12;\n"
65
" {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
66
: "=&r" (oldval)
67
: "r" (addr), "r" (nr)
68
: "r10", "r11", "r12", "p0", "p1", "memory"
69
);
70
71
72
return oldval;
73
74
}
75
76
/**
77
* test_and_change_bit - toggle a bit and return its old value
78
* @nr: bit number to set
79
* @addr: pointer to memory
80
*/
81
static inline int test_and_change_bit(int nr, volatile void *addr)
82
{
83
int oldval;
84
85
__asm__ __volatile__ (
86
" {R10 = %1; R11 = asr(%2,#5); }\n"
87
" {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
88
"1: R12 = memw_locked(R10);\n"
89
" { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n"
90
" memw_locked(R10,P1) = R12;\n"
91
" {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
92
: "=&r" (oldval)
93
: "r" (addr), "r" (nr)
94
: "r10", "r11", "r12", "p0", "p1", "memory"
95
);
96
97
return oldval;
98
99
}
100
101
/*
102
* Atomic, but doesn't care about the return value.
103
* Rewrite later to save a cycle or two.
104
*/
105
106
static inline void clear_bit(int nr, volatile void *addr)
107
{
108
test_and_clear_bit(nr, addr);
109
}
110
111
static inline void set_bit(int nr, volatile void *addr)
112
{
113
test_and_set_bit(nr, addr);
114
}
115
116
static inline void change_bit(int nr, volatile void *addr)
117
{
118
test_and_change_bit(nr, addr);
119
}
120
121
122
/*
123
* These are allowed to be non-atomic. In fact the generic flavors are
124
* in non-atomic.h. Would it be better to use intrinsics for this?
125
*
126
* OK, writes in our architecture do not invalidate LL/SC, so this has to
127
* be atomic, particularly for things like slab_lock and slab_unlock.
128
*
129
*/
130
static __always_inline void
131
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
132
{
133
test_and_clear_bit(nr, addr);
134
}
135
136
static __always_inline void
137
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
138
{
139
test_and_set_bit(nr, addr);
140
}
141
142
static __always_inline void
143
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
144
{
145
test_and_change_bit(nr, addr);
146
}
147
148
/* Apparently, at least some of these are allowed to be non-atomic */
149
static __always_inline bool
150
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
151
{
152
return test_and_clear_bit(nr, addr);
153
}
154
155
static __always_inline bool
156
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
157
{
158
return test_and_set_bit(nr, addr);
159
}
160
161
static __always_inline bool
162
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
163
{
164
return test_and_change_bit(nr, addr);
165
}
166
167
static __always_inline bool
168
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
169
{
170
int retval;
171
172
asm volatile(
173
"{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n"
174
: "=&r" (retval)
175
: "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG)
176
: "p0"
177
);
178
179
return retval;
180
}
181
182
static __always_inline bool
183
arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
184
{
185
int retval;
186
187
asm volatile(
188
"{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n"
189
: "=&r" (retval)
190
: "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG)
191
: "p0", "memory"
192
);
193
194
return retval;
195
}
196
197
/*
198
* ffz - find first zero in word.
199
* @word: The word to search
200
*
201
* Undefined if no zero exists, so code should check against ~0UL first.
202
*/
203
static inline long ffz(int x)
204
{
205
int r;
206
207
asm("%0 = ct1(%1);\n"
208
: "=&r" (r)
209
: "r" (x));
210
return r;
211
}
212
213
/*
214
* fls - find last (most-significant) bit set
215
* @x: the word to search
216
*
217
* This is defined the same way as ffs.
218
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
219
*/
220
static inline int fls(unsigned int x)
221
{
222
int r;
223
224
asm("{ %0 = cl0(%1);}\n"
225
"%0 = sub(#32,%0);\n"
226
: "=&r" (r)
227
: "r" (x)
228
: "p0");
229
230
return r;
231
}
232
233
/*
234
* ffs - find first bit set
235
* @x: the word to search
236
*
237
* This is defined the same way as
238
* the libc and compiler builtin ffs routines, therefore
239
* differs in spirit from the above ffz (man ffs).
240
*/
241
static inline int ffs(int x)
242
{
243
int r;
244
245
asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n"
246
"{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n"
247
: "=&r" (r)
248
: "r" (x)
249
: "p0");
250
251
return r;
252
}
253
254
/*
255
* __ffs - find first bit in word.
256
* @word: The word to search
257
*
258
* Undefined if no bit exists, so code should check against 0 first.
259
*
260
* bits_per_long assumed to be 32
261
* numbering starts at 0 I think (instead of 1 like ffs)
262
*/
263
static inline unsigned long __ffs(unsigned long word)
264
{
265
int num;
266
267
asm("%0 = ct0(%1);\n"
268
: "=&r" (num)
269
: "r" (word));
270
271
return num;
272
}
273
274
/*
275
* __fls - find last (most-significant) set bit in a long word
276
* @word: the word to search
277
*
278
* Undefined if no set bit exists, so code should check against 0 first.
279
* bits_per_long assumed to be 32
280
*/
281
static inline unsigned long __fls(unsigned long word)
282
{
283
int num;
284
285
asm("%0 = cl0(%1);\n"
286
"%0 = sub(#31,%0);\n"
287
: "=&r" (num)
288
: "r" (word));
289
290
return num;
291
}
292
293
#include <asm-generic/bitops/lock.h>
294
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
295
296
#include <asm-generic/bitops/fls64.h>
297
#include <asm-generic/bitops/sched.h>
298
#include <asm-generic/bitops/hweight.h>
299
300
#include <asm-generic/bitops/le.h>
301
#include <asm-generic/bitops/ext2-atomic.h>
302
303
#endif /* __KERNEL__ */
304
#endif
305
306