Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/include/asm-generic/bitops/lock.h
26289 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#ifndef _ASM_GENERIC_BITOPS_LOCK_H_
3
#define _ASM_GENERIC_BITOPS_LOCK_H_
4
5
#include <linux/atomic.h>
6
#include <linux/compiler.h>
7
#include <asm/barrier.h>
8
9
/**
10
* arch_test_and_set_bit_lock - Set a bit and return its old value, for lock
11
* @nr: Bit to set
12
* @addr: Address to count from
13
*
14
* This operation is atomic and provides acquire barrier semantics if
15
* the returned value is 0.
16
* It can be used to implement bit locks.
17
*/
18
static __always_inline int
19
arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p)
20
{
21
long old;
22
unsigned long mask = BIT_MASK(nr);
23
24
p += BIT_WORD(nr);
25
if (READ_ONCE(*p) & mask)
26
return 1;
27
28
old = raw_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
29
return !!(old & mask);
30
}
31
32
33
/**
34
* arch_clear_bit_unlock - Clear a bit in memory, for unlock
35
* @nr: the bit to set
36
* @addr: the address to start counting from
37
*
38
* This operation is atomic and provides release barrier semantics.
39
*/
40
static __always_inline void
41
arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
42
{
43
p += BIT_WORD(nr);
44
raw_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
45
}
46
47
/**
48
* arch___clear_bit_unlock - Clear a bit in memory, for unlock
49
* @nr: the bit to set
50
* @addr: the address to start counting from
51
*
52
* A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
53
* the bits in the word are protected by this lock some archs can use weaker
54
* ops to safely unlock.
55
*
56
* See for example x86's implementation.
57
*/
58
static inline void
59
arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
60
{
61
unsigned long old;
62
63
p += BIT_WORD(nr);
64
old = READ_ONCE(*p);
65
old &= ~BIT_MASK(nr);
66
raw_atomic_long_set_release((atomic_long_t *)p, old);
67
}
68
69
#ifndef arch_xor_unlock_is_negative_byte
70
static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
71
volatile unsigned long *p)
72
{
73
long old;
74
75
old = raw_atomic_long_fetch_xor_release(mask, (atomic_long_t *)p);
76
return !!(old & BIT(7));
77
}
78
#endif
79
80
#include <asm-generic/bitops/instrumented-lock.h>
81
82
#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
83
84