Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/sparc/lib/atomic32.c
10817 views
1
/*
2
* atomic32.c: 32-bit atomic_t implementation
3
*
4
* Copyright (C) 2004 Keith M Wesolowski
5
* Copyright (C) 2007 Kyle McMartin
6
*
7
* Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
8
*/
9
10
#include <asm/atomic.h>
11
#include <linux/spinlock.h>
12
#include <linux/module.h>
13
14
#ifdef CONFIG_SMP
15
#define ATOMIC_HASH_SIZE 4
16
#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
17
18
spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
19
[0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
20
};
21
22
#else /* SMP */
23
24
static DEFINE_SPINLOCK(dummy);
25
#define ATOMIC_HASH_SIZE 1
26
#define ATOMIC_HASH(a) (&dummy)
27
28
#endif /* SMP */
29
30
int __atomic_add_return(int i, atomic_t *v)
31
{
32
int ret;
33
unsigned long flags;
34
spin_lock_irqsave(ATOMIC_HASH(v), flags);
35
36
ret = (v->counter += i);
37
38
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
39
return ret;
40
}
41
EXPORT_SYMBOL(__atomic_add_return);
42
43
int atomic_cmpxchg(atomic_t *v, int old, int new)
44
{
45
int ret;
46
unsigned long flags;
47
48
spin_lock_irqsave(ATOMIC_HASH(v), flags);
49
ret = v->counter;
50
if (likely(ret == old))
51
v->counter = new;
52
53
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
54
return ret;
55
}
56
EXPORT_SYMBOL(atomic_cmpxchg);
57
58
int atomic_add_unless(atomic_t *v, int a, int u)
59
{
60
int ret;
61
unsigned long flags;
62
63
spin_lock_irqsave(ATOMIC_HASH(v), flags);
64
ret = v->counter;
65
if (ret != u)
66
v->counter += a;
67
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
68
return ret != u;
69
}
70
EXPORT_SYMBOL(atomic_add_unless);
71
72
/* Atomic operations are already serializing */
73
void atomic_set(atomic_t *v, int i)
74
{
75
unsigned long flags;
76
77
spin_lock_irqsave(ATOMIC_HASH(v), flags);
78
v->counter = i;
79
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
80
}
81
EXPORT_SYMBOL(atomic_set);
82
83
unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
84
{
85
unsigned long old, flags;
86
87
spin_lock_irqsave(ATOMIC_HASH(addr), flags);
88
old = *addr;
89
*addr = old | mask;
90
spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
91
92
return old & mask;
93
}
94
EXPORT_SYMBOL(___set_bit);
95
96
unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
97
{
98
unsigned long old, flags;
99
100
spin_lock_irqsave(ATOMIC_HASH(addr), flags);
101
old = *addr;
102
*addr = old & ~mask;
103
spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
104
105
return old & mask;
106
}
107
EXPORT_SYMBOL(___clear_bit);
108
109
unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
110
{
111
unsigned long old, flags;
112
113
spin_lock_irqsave(ATOMIC_HASH(addr), flags);
114
old = *addr;
115
*addr = old ^ mask;
116
spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
117
118
return old & mask;
119
}
120
EXPORT_SYMBOL(___change_bit);
121
122
unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
123
{
124
unsigned long flags;
125
u32 prev;
126
127
spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
128
if ((prev = *ptr) == old)
129
*ptr = new;
130
spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
131
132
return (unsigned long)prev;
133
}
134
EXPORT_SYMBOL(__cmpxchg_u32);
135
136