Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/openzfs/lib/libspl/atomic.c
48378 views
1
// SPDX-License-Identifier: CDDL-1.0
2
/*
3
* CDDL HEADER START
4
*
5
* The contents of this file are subject to the terms of the
6
* Common Development and Distribution License, Version 1.0 only
7
* (the "License"). You may not use this file except in compliance
8
* with the License.
9
*
10
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
11
* or https://opensource.org/licenses/CDDL-1.0.
12
* See the License for the specific language governing permissions
13
* and limitations under the License.
14
*
15
* When distributing Covered Code, include this CDDL HEADER in each
16
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
17
* If applicable, add the following below this CDDL HEADER, with the
18
* fields enclosed by brackets "[]" replaced with your own identifying
19
* information: Portions Copyright [yyyy] [name of copyright owner]
20
*
21
* CDDL HEADER END
22
*/
23
/*
24
* Copyright (c) 2009 by Sun Microsystems, Inc. All rights reserved.
25
* Use is subject to license terms.
26
*/
27
28
#include <atomic.h>
29
30
/*
31
* These are the void returning variants
32
*/
33
#define ATOMIC_INC(name, type) \
34
void atomic_inc_##name(volatile type *target) \
35
{ \
36
(void) __atomic_add_fetch(target, 1, __ATOMIC_SEQ_CST); \
37
}
38
39
ATOMIC_INC(8, uint8_t)
40
ATOMIC_INC(16, uint16_t)
41
ATOMIC_INC(32, uint32_t)
42
ATOMIC_INC(64, uint64_t)
43
ATOMIC_INC(uchar, uchar_t)
44
ATOMIC_INC(ushort, ushort_t)
45
ATOMIC_INC(uint, uint_t)
46
ATOMIC_INC(ulong, ulong_t)
47
48
49
#define ATOMIC_DEC(name, type) \
50
void atomic_dec_##name(volatile type *target) \
51
{ \
52
(void) __atomic_sub_fetch(target, 1, __ATOMIC_SEQ_CST); \
53
}
54
55
ATOMIC_DEC(8, uint8_t)
56
ATOMIC_DEC(16, uint16_t)
57
ATOMIC_DEC(32, uint32_t)
58
ATOMIC_DEC(64, uint64_t)
59
ATOMIC_DEC(uchar, uchar_t)
60
ATOMIC_DEC(ushort, ushort_t)
61
ATOMIC_DEC(uint, uint_t)
62
ATOMIC_DEC(ulong, ulong_t)
63
64
65
#define ATOMIC_ADD(name, type1, type2) \
66
void atomic_add_##name(volatile type1 *target, type2 bits) \
67
{ \
68
(void) __atomic_add_fetch(target, bits, __ATOMIC_SEQ_CST); \
69
}
70
71
void
72
atomic_add_ptr(volatile void *target, ssize_t bits)
73
{
74
(void) __atomic_add_fetch((void **)target, bits, __ATOMIC_SEQ_CST);
75
}
76
77
ATOMIC_ADD(8, uint8_t, int8_t)
78
ATOMIC_ADD(16, uint16_t, int16_t)
79
ATOMIC_ADD(32, uint32_t, int32_t)
80
ATOMIC_ADD(64, uint64_t, int64_t)
81
ATOMIC_ADD(char, uchar_t, signed char)
82
ATOMIC_ADD(short, ushort_t, short)
83
ATOMIC_ADD(int, uint_t, int)
84
ATOMIC_ADD(long, ulong_t, long)
85
86
87
#define ATOMIC_SUB(name, type1, type2) \
88
void atomic_sub_##name(volatile type1 *target, type2 bits) \
89
{ \
90
(void) __atomic_sub_fetch(target, bits, __ATOMIC_SEQ_CST); \
91
}
92
93
void
94
atomic_sub_ptr(volatile void *target, ssize_t bits)
95
{
96
(void) __atomic_sub_fetch((void **)target, bits, __ATOMIC_SEQ_CST);
97
}
98
99
ATOMIC_SUB(8, uint8_t, int8_t)
100
ATOMIC_SUB(16, uint16_t, int16_t)
101
ATOMIC_SUB(32, uint32_t, int32_t)
102
ATOMIC_SUB(64, uint64_t, int64_t)
103
ATOMIC_SUB(char, uchar_t, signed char)
104
ATOMIC_SUB(short, ushort_t, short)
105
ATOMIC_SUB(int, uint_t, int)
106
ATOMIC_SUB(long, ulong_t, long)
107
108
109
#define ATOMIC_OR(name, type) \
110
void atomic_or_##name(volatile type *target, type bits) \
111
{ \
112
(void) __atomic_or_fetch(target, bits, __ATOMIC_SEQ_CST); \
113
}
114
115
ATOMIC_OR(8, uint8_t)
116
ATOMIC_OR(16, uint16_t)
117
ATOMIC_OR(32, uint32_t)
118
ATOMIC_OR(64, uint64_t)
119
ATOMIC_OR(uchar, uchar_t)
120
ATOMIC_OR(ushort, ushort_t)
121
ATOMIC_OR(uint, uint_t)
122
ATOMIC_OR(ulong, ulong_t)
123
124
125
#define ATOMIC_AND(name, type) \
126
void atomic_and_##name(volatile type *target, type bits) \
127
{ \
128
(void) __atomic_and_fetch(target, bits, __ATOMIC_SEQ_CST); \
129
}
130
131
ATOMIC_AND(8, uint8_t)
132
ATOMIC_AND(16, uint16_t)
133
ATOMIC_AND(32, uint32_t)
134
ATOMIC_AND(64, uint64_t)
135
ATOMIC_AND(uchar, uchar_t)
136
ATOMIC_AND(ushort, ushort_t)
137
ATOMIC_AND(uint, uint_t)
138
ATOMIC_AND(ulong, ulong_t)
139
140
141
/*
142
* New value returning variants
143
*/
144
145
#define ATOMIC_INC_NV(name, type) \
146
type atomic_inc_##name##_nv(volatile type *target) \
147
{ \
148
return (__atomic_add_fetch(target, 1, __ATOMIC_SEQ_CST)); \
149
}
150
151
ATOMIC_INC_NV(8, uint8_t)
152
ATOMIC_INC_NV(16, uint16_t)
153
ATOMIC_INC_NV(32, uint32_t)
154
ATOMIC_INC_NV(64, uint64_t)
155
ATOMIC_INC_NV(uchar, uchar_t)
156
ATOMIC_INC_NV(ushort, ushort_t)
157
ATOMIC_INC_NV(uint, uint_t)
158
ATOMIC_INC_NV(ulong, ulong_t)
159
160
161
#define ATOMIC_DEC_NV(name, type) \
162
type atomic_dec_##name##_nv(volatile type *target) \
163
{ \
164
return (__atomic_sub_fetch(target, 1, __ATOMIC_SEQ_CST)); \
165
}
166
167
ATOMIC_DEC_NV(8, uint8_t)
168
ATOMIC_DEC_NV(16, uint16_t)
169
ATOMIC_DEC_NV(32, uint32_t)
170
ATOMIC_DEC_NV(64, uint64_t)
171
ATOMIC_DEC_NV(uchar, uchar_t)
172
ATOMIC_DEC_NV(ushort, ushort_t)
173
ATOMIC_DEC_NV(uint, uint_t)
174
ATOMIC_DEC_NV(ulong, ulong_t)
175
176
177
#define ATOMIC_ADD_NV(name, type1, type2) \
178
type1 atomic_add_##name##_nv(volatile type1 *target, type2 bits) \
179
{ \
180
return (__atomic_add_fetch(target, bits, __ATOMIC_SEQ_CST)); \
181
}
182
183
void *
184
atomic_add_ptr_nv(volatile void *target, ssize_t bits)
185
{
186
return (__atomic_add_fetch((void **)target, bits, __ATOMIC_SEQ_CST));
187
}
188
189
ATOMIC_ADD_NV(8, uint8_t, int8_t)
190
ATOMIC_ADD_NV(16, uint16_t, int16_t)
191
ATOMIC_ADD_NV(32, uint32_t, int32_t)
192
ATOMIC_ADD_NV(64, uint64_t, int64_t)
193
ATOMIC_ADD_NV(char, uchar_t, signed char)
194
ATOMIC_ADD_NV(short, ushort_t, short)
195
ATOMIC_ADD_NV(int, uint_t, int)
196
ATOMIC_ADD_NV(long, ulong_t, long)
197
198
199
#define ATOMIC_SUB_NV(name, type1, type2) \
200
type1 atomic_sub_##name##_nv(volatile type1 *target, type2 bits) \
201
{ \
202
return (__atomic_sub_fetch(target, bits, __ATOMIC_SEQ_CST)); \
203
}
204
205
void *
206
atomic_sub_ptr_nv(volatile void *target, ssize_t bits)
207
{
208
return (__atomic_sub_fetch((void **)target, bits, __ATOMIC_SEQ_CST));
209
}
210
211
ATOMIC_SUB_NV(8, uint8_t, int8_t)
212
ATOMIC_SUB_NV(char, uchar_t, signed char)
213
ATOMIC_SUB_NV(16, uint16_t, int16_t)
214
ATOMIC_SUB_NV(short, ushort_t, short)
215
ATOMIC_SUB_NV(32, uint32_t, int32_t)
216
ATOMIC_SUB_NV(int, uint_t, int)
217
ATOMIC_SUB_NV(long, ulong_t, long)
218
ATOMIC_SUB_NV(64, uint64_t, int64_t)
219
220
221
#define ATOMIC_OR_NV(name, type) \
222
type atomic_or_##name##_nv(volatile type *target, type bits) \
223
{ \
224
return (__atomic_or_fetch(target, bits, __ATOMIC_SEQ_CST)); \
225
}
226
227
ATOMIC_OR_NV(8, uint8_t)
228
ATOMIC_OR_NV(16, uint16_t)
229
ATOMIC_OR_NV(32, uint32_t)
230
ATOMIC_OR_NV(64, uint64_t)
231
ATOMIC_OR_NV(uchar, uchar_t)
232
ATOMIC_OR_NV(ushort, ushort_t)
233
ATOMIC_OR_NV(uint, uint_t)
234
ATOMIC_OR_NV(ulong, ulong_t)
235
236
237
#define ATOMIC_AND_NV(name, type) \
238
type atomic_and_##name##_nv(volatile type *target, type bits) \
239
{ \
240
return (__atomic_and_fetch(target, bits, __ATOMIC_SEQ_CST)); \
241
}
242
243
ATOMIC_AND_NV(8, uint8_t)
244
ATOMIC_AND_NV(16, uint16_t)
245
ATOMIC_AND_NV(32, uint32_t)
246
ATOMIC_AND_NV(64, uint64_t)
247
ATOMIC_AND_NV(uchar, uchar_t)
248
ATOMIC_AND_NV(ushort, ushort_t)
249
ATOMIC_AND_NV(uint, uint_t)
250
ATOMIC_AND_NV(ulong, ulong_t)
251
252
253
/*
254
* If *tgt == exp, set *tgt = des; return old value
255
*
256
* This may not look right on the first pass (or the sixteenth), but,
257
* from https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html:
258
* > If they are not equal, the operation is a read
259
* > and the current contents of *ptr are written into *expected.
260
* And, in the converse case, exp is already *target by definition.
261
*/
262
263
#define ATOMIC_CAS(name, type) \
264
type atomic_cas_##name(volatile type *target, type exp, type des) \
265
{ \
266
__atomic_compare_exchange_n(target, &exp, des, B_FALSE, \
267
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
268
return (exp); \
269
}
270
271
void *
272
atomic_cas_ptr(volatile void *target, void *exp, void *des)
273
{
274
275
__atomic_compare_exchange_n((void **)target, &exp, des, B_FALSE,
276
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
277
return (exp);
278
}
279
280
ATOMIC_CAS(8, uint8_t)
281
ATOMIC_CAS(16, uint16_t)
282
ATOMIC_CAS(32, uint32_t)
283
ATOMIC_CAS(64, uint64_t)
284
ATOMIC_CAS(uchar, uchar_t)
285
ATOMIC_CAS(ushort, ushort_t)
286
ATOMIC_CAS(uint, uint_t)
287
ATOMIC_CAS(ulong, ulong_t)
288
289
290
/*
291
* Swap target and return old value
292
*/
293
294
#define ATOMIC_SWAP(name, type) \
295
type atomic_swap_##name(volatile type *target, type bits) \
296
{ \
297
return (__atomic_exchange_n(target, bits, __ATOMIC_SEQ_CST)); \
298
}
299
300
ATOMIC_SWAP(8, uint8_t)
301
ATOMIC_SWAP(16, uint16_t)
302
ATOMIC_SWAP(32, uint32_t)
303
ATOMIC_SWAP(64, uint64_t)
304
ATOMIC_SWAP(uchar, uchar_t)
305
ATOMIC_SWAP(ushort, ushort_t)
306
ATOMIC_SWAP(uint, uint_t)
307
ATOMIC_SWAP(ulong, ulong_t)
308
309
void *
310
atomic_swap_ptr(volatile void *target, void *bits)
311
{
312
return (__atomic_exchange_n((void **)target, bits, __ATOMIC_SEQ_CST));
313
}
314
315
#ifndef _LP64
316
uint64_t
317
atomic_load_64(volatile uint64_t *target)
318
{
319
return (__atomic_load_n(target, __ATOMIC_RELAXED));
320
}
321
322
void
323
atomic_store_64(volatile uint64_t *target, uint64_t bits)
324
{
325
return (__atomic_store_n(target, bits, __ATOMIC_RELAXED));
326
}
327
#endif
328
329
int
330
atomic_set_long_excl(volatile ulong_t *target, uint_t value)
331
{
332
ulong_t bit = 1UL << value;
333
ulong_t old = __atomic_fetch_or(target, bit, __ATOMIC_SEQ_CST);
334
return ((old & bit) ? -1 : 0);
335
}
336
337
int
338
atomic_clear_long_excl(volatile ulong_t *target, uint_t value)
339
{
340
ulong_t bit = 1UL << value;
341
ulong_t old = __atomic_fetch_and(target, ~bit, __ATOMIC_SEQ_CST);
342
return ((old & bit) ? 0 : -1);
343
}
344
345
void
346
membar_enter(void)
347
{
348
__atomic_thread_fence(__ATOMIC_SEQ_CST);
349
}
350
351
void
352
membar_exit(void)
353
{
354
__atomic_thread_fence(__ATOMIC_SEQ_CST);
355
}
356
357
void
358
membar_sync(void)
359
{
360
__atomic_thread_fence(__ATOMIC_SEQ_CST);
361
}
362
363
void
364
membar_producer(void)
365
{
366
__atomic_thread_fence(__ATOMIC_RELEASE);
367
}
368
369
void
370
membar_consumer(void)
371
{
372
__atomic_thread_fence(__ATOMIC_ACQUIRE);
373
}
374
375