Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/lib/atomic64.c
10811 views
1
/*
2
* Generic implementation of 64-bit atomics using spinlocks,
3
* useful on processors that don't have 64-bit atomic instructions.
4
*
5
* Copyright © 2009 Paul Mackerras, IBM Corp. <[email protected]>
6
*
7
* This program is free software; you can redistribute it and/or
8
* modify it under the terms of the GNU General Public License
9
* as published by the Free Software Foundation; either version
10
* 2 of the License, or (at your option) any later version.
11
*/
12
#include <linux/types.h>
13
#include <linux/cache.h>
14
#include <linux/spinlock.h>
15
#include <linux/init.h>
16
#include <linux/module.h>
17
#include <asm/atomic.h>
18
19
/*
20
* We use a hashed array of spinlocks to provide exclusive access
21
* to each atomic64_t variable. Since this is expected to used on
22
* systems with small numbers of CPUs (<= 4 or so), we use a
23
* relatively small array of 16 spinlocks to avoid wasting too much
24
* memory on the spinlock array.
25
*/
26
#define NR_LOCKS 16
27
28
/*
29
* Ensure each lock is in a separate cacheline.
30
*/
31
static union {
32
spinlock_t lock;
33
char pad[L1_CACHE_BYTES];
34
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
35
36
static inline spinlock_t *lock_addr(const atomic64_t *v)
37
{
38
unsigned long addr = (unsigned long) v;
39
40
addr >>= L1_CACHE_SHIFT;
41
addr ^= (addr >> 8) ^ (addr >> 16);
42
return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
43
}
44
45
long long atomic64_read(const atomic64_t *v)
46
{
47
unsigned long flags;
48
spinlock_t *lock = lock_addr(v);
49
long long val;
50
51
spin_lock_irqsave(lock, flags);
52
val = v->counter;
53
spin_unlock_irqrestore(lock, flags);
54
return val;
55
}
56
EXPORT_SYMBOL(atomic64_read);
57
58
void atomic64_set(atomic64_t *v, long long i)
59
{
60
unsigned long flags;
61
spinlock_t *lock = lock_addr(v);
62
63
spin_lock_irqsave(lock, flags);
64
v->counter = i;
65
spin_unlock_irqrestore(lock, flags);
66
}
67
EXPORT_SYMBOL(atomic64_set);
68
69
void atomic64_add(long long a, atomic64_t *v)
70
{
71
unsigned long flags;
72
spinlock_t *lock = lock_addr(v);
73
74
spin_lock_irqsave(lock, flags);
75
v->counter += a;
76
spin_unlock_irqrestore(lock, flags);
77
}
78
EXPORT_SYMBOL(atomic64_add);
79
80
long long atomic64_add_return(long long a, atomic64_t *v)
81
{
82
unsigned long flags;
83
spinlock_t *lock = lock_addr(v);
84
long long val;
85
86
spin_lock_irqsave(lock, flags);
87
val = v->counter += a;
88
spin_unlock_irqrestore(lock, flags);
89
return val;
90
}
91
EXPORT_SYMBOL(atomic64_add_return);
92
93
void atomic64_sub(long long a, atomic64_t *v)
94
{
95
unsigned long flags;
96
spinlock_t *lock = lock_addr(v);
97
98
spin_lock_irqsave(lock, flags);
99
v->counter -= a;
100
spin_unlock_irqrestore(lock, flags);
101
}
102
EXPORT_SYMBOL(atomic64_sub);
103
104
long long atomic64_sub_return(long long a, atomic64_t *v)
105
{
106
unsigned long flags;
107
spinlock_t *lock = lock_addr(v);
108
long long val;
109
110
spin_lock_irqsave(lock, flags);
111
val = v->counter -= a;
112
spin_unlock_irqrestore(lock, flags);
113
return val;
114
}
115
EXPORT_SYMBOL(atomic64_sub_return);
116
117
long long atomic64_dec_if_positive(atomic64_t *v)
118
{
119
unsigned long flags;
120
spinlock_t *lock = lock_addr(v);
121
long long val;
122
123
spin_lock_irqsave(lock, flags);
124
val = v->counter - 1;
125
if (val >= 0)
126
v->counter = val;
127
spin_unlock_irqrestore(lock, flags);
128
return val;
129
}
130
EXPORT_SYMBOL(atomic64_dec_if_positive);
131
132
long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
133
{
134
unsigned long flags;
135
spinlock_t *lock = lock_addr(v);
136
long long val;
137
138
spin_lock_irqsave(lock, flags);
139
val = v->counter;
140
if (val == o)
141
v->counter = n;
142
spin_unlock_irqrestore(lock, flags);
143
return val;
144
}
145
EXPORT_SYMBOL(atomic64_cmpxchg);
146
147
long long atomic64_xchg(atomic64_t *v, long long new)
148
{
149
unsigned long flags;
150
spinlock_t *lock = lock_addr(v);
151
long long val;
152
153
spin_lock_irqsave(lock, flags);
154
val = v->counter;
155
v->counter = new;
156
spin_unlock_irqrestore(lock, flags);
157
return val;
158
}
159
EXPORT_SYMBOL(atomic64_xchg);
160
161
int atomic64_add_unless(atomic64_t *v, long long a, long long u)
162
{
163
unsigned long flags;
164
spinlock_t *lock = lock_addr(v);
165
int ret = 0;
166
167
spin_lock_irqsave(lock, flags);
168
if (v->counter != u) {
169
v->counter += a;
170
ret = 1;
171
}
172
spin_unlock_irqrestore(lock, flags);
173
return ret;
174
}
175
EXPORT_SYMBOL(atomic64_add_unless);
176
177
static int init_atomic64_lock(void)
178
{
179
int i;
180
181
for (i = 0; i < NR_LOCKS; ++i)
182
spin_lock_init(&atomic64_lock[i].lock);
183
return 0;
184
}
185
186
pure_initcall(init_atomic64_lock);
187
188