Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/include/asm-generic/bitops/non-atomic.h
26295 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
3
#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
4
5
#include <linux/bits.h>
6
7
/**
8
* ___set_bit - Set a bit in memory
9
* @nr: the bit to set
10
* @addr: the address to start counting from
11
*
12
* Unlike set_bit(), this function is non-atomic and may be reordered.
13
* If it's called on the same region of memory simultaneously, the effect
14
* may be that only one operation succeeds.
15
*/
16
static __always_inline void
17
___set_bit(unsigned long nr, volatile unsigned long *addr)
18
{
19
unsigned long mask = BIT_MASK(nr);
20
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
21
22
*p |= mask;
23
}
24
25
static __always_inline void
26
___clear_bit(unsigned long nr, volatile unsigned long *addr)
27
{
28
unsigned long mask = BIT_MASK(nr);
29
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
30
31
*p &= ~mask;
32
}
33
34
/**
35
* ___change_bit - Toggle a bit in memory
36
* @nr: the bit to change
37
* @addr: the address to start counting from
38
*
39
* Unlike change_bit(), this function is non-atomic and may be reordered.
40
* If it's called on the same region of memory simultaneously, the effect
41
* may be that only one operation succeeds.
42
*/
43
static __always_inline void
44
___change_bit(unsigned long nr, volatile unsigned long *addr)
45
{
46
unsigned long mask = BIT_MASK(nr);
47
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
48
49
*p ^= mask;
50
}
51
52
/**
53
* ___test_and_set_bit - Set a bit and return its old value
54
* @nr: Bit to set
55
* @addr: Address to count from
56
*
57
* This operation is non-atomic and can be reordered.
58
* If two examples of this operation race, one can appear to succeed
59
* but actually fail. You must protect multiple accesses with a lock.
60
*/
61
static __always_inline bool
62
___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
63
{
64
unsigned long mask = BIT_MASK(nr);
65
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
66
unsigned long old = *p;
67
68
*p = old | mask;
69
return (old & mask) != 0;
70
}
71
72
/**
73
* ___test_and_clear_bit - Clear a bit and return its old value
74
* @nr: Bit to clear
75
* @addr: Address to count from
76
*
77
* This operation is non-atomic and can be reordered.
78
* If two examples of this operation race, one can appear to succeed
79
* but actually fail. You must protect multiple accesses with a lock.
80
*/
81
static __always_inline bool
82
___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
83
{
84
unsigned long mask = BIT_MASK(nr);
85
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
86
unsigned long old = *p;
87
88
*p = old & ~mask;
89
return (old & mask) != 0;
90
}
91
92
/* WARNING: non atomic and it can be reordered! */
93
static __always_inline bool
94
___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
95
{
96
unsigned long mask = BIT_MASK(nr);
97
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
98
unsigned long old = *p;
99
100
*p = old ^ mask;
101
return (old & mask) != 0;
102
}
103
104
/**
105
* _test_bit - Determine whether a bit is set
106
* @nr: bit number to test
107
* @addr: Address to start counting from
108
*/
109
static __always_inline bool
110
_test_bit(unsigned long nr, const volatile unsigned long *addr)
111
{
112
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
113
}
114
115
#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
116
117