Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/amd64/include/atomic.h
39534 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 1998 Doug Rabson
5
* All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
*
16
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26
* SUCH DAMAGE.
27
*/
28
29
#ifdef __i386__
30
#include <i386/atomic.h>
31
#else /* !__i386__ */
32
33
#ifndef _MACHINE_ATOMIC_H_
34
#define _MACHINE_ATOMIC_H_
35
36
/*
37
* To express interprocessor (as opposed to processor and device) memory
38
* ordering constraints, use the atomic_*() functions with acquire and release
39
* semantics rather than the *mb() functions. An architecture's memory
40
* ordering (or memory consistency) model governs the order in which a
41
* program's accesses to different locations may be performed by an
42
* implementation of that architecture. In general, for memory regions
43
* defined as writeback cacheable, the memory ordering implemented by amd64
44
* processors preserves the program ordering of a load followed by a load, a
45
* load followed by a store, and a store followed by a store. Only a store
46
* followed by a load to a different memory location may be reordered.
47
* Therefore, except for special cases, like non-temporal memory accesses or
48
* memory regions defined as write combining, the memory ordering effects
49
* provided by the sfence instruction in the wmb() function and the lfence
50
* instruction in the rmb() function are redundant. In contrast, the
51
* atomic_*() functions with acquire and release semantics do not perform
52
* redundant instructions for ordinary cases of interprocessor memory
53
* ordering on any architecture.
54
*/
55
#define mb() __asm __volatile("mfence;" : : : "memory")
56
#define wmb() __asm __volatile("sfence;" : : : "memory")
57
#define rmb() __asm __volatile("lfence;" : : : "memory")
58
59
#ifdef _KERNEL
60
/*
61
* OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
62
*
63
* The open-coded number is used instead of the symbolic expression to
64
* avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
65
* An assertion in amd64/vm_machdep.c ensures that the value is correct.
66
*/
67
#define OFFSETOF_MONITORBUF 0x100
68
#endif
69
70
#if defined(SAN_NEEDS_INTERCEPTORS) && !defined(SAN_RUNTIME)
71
#include <sys/atomic_san.h>
72
#else
73
#include <sys/atomic_common.h>
74
75
/*
76
* Various simple operations on memory, each of which is atomic in the
77
* presence of interrupts and multiple processors.
78
*
79
* atomic_set_char(P, V) (*(u_char *)(P) |= (V))
80
* atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V))
81
* atomic_add_char(P, V) (*(u_char *)(P) += (V))
82
* atomic_subtract_char(P, V) (*(u_char *)(P) -= (V))
83
*
84
* atomic_set_short(P, V) (*(u_short *)(P) |= (V))
85
* atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V))
86
* atomic_add_short(P, V) (*(u_short *)(P) += (V))
87
* atomic_subtract_short(P, V) (*(u_short *)(P) -= (V))
88
*
89
* atomic_set_int(P, V) (*(u_int *)(P) |= (V))
90
* atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V))
91
* atomic_add_int(P, V) (*(u_int *)(P) += (V))
92
* atomic_subtract_int(P, V) (*(u_int *)(P) -= (V))
93
* atomic_swap_int(P, V) (return (*(u_int *)(P)); *(u_int *)(P) = (V);)
94
* atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
95
*
96
* atomic_set_long(P, V) (*(u_long *)(P) |= (V))
97
* atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V))
98
* atomic_add_long(P, V) (*(u_long *)(P) += (V))
99
* atomic_subtract_long(P, V) (*(u_long *)(P) -= (V))
100
* atomic_swap_long(P, V) (return (*(u_long *)(P)); *(u_long *)(P) = (V);)
101
* atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
102
*/
103
104
/*
105
* Always use lock prefixes. The result is slightly less optimal for
106
* UP systems, but it matters less now, and sometimes UP is emulated
107
* over SMP.
108
*
109
* The assembly is volatilized to avoid code chunk removal by the compiler.
110
* GCC aggressively reorders operations and memory clobbering is necessary
111
* in order to avoid that for memory barriers.
112
*/
113
#define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
114
static __inline void \
115
atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
116
{ \
117
__asm __volatile("lock; " OP \
118
: "+m" (*p) \
119
: CONS (V) \
120
: "cc"); \
121
} \
122
\
123
static __inline void \
124
atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
125
{ \
126
__asm __volatile("lock; " OP \
127
: "+m" (*p) \
128
: CONS (V) \
129
: "memory", "cc"); \
130
} \
131
struct __hack
132
133
/*
134
* Atomic compare and set, used by the mutex functions.
135
*
136
* cmpset:
137
* if (*dst == expect)
138
* *dst = src
139
*
140
* fcmpset:
141
* if (*dst == *expect)
142
* *dst = src
143
* else
144
* *expect = *dst
145
*
146
* Returns 0 on failure, non-zero on success.
147
*/
148
#define ATOMIC_CMPSET(TYPE) \
149
static __inline int \
150
atomic_cmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE expect, u_##TYPE src) \
151
{ \
152
u_char res; \
153
\
154
__asm __volatile( \
155
" lock; cmpxchg %3,%1 ; " \
156
"# atomic_cmpset_" #TYPE " " \
157
: "=@cce" (res), /* 0 */ \
158
"+m" (*dst), /* 1 */ \
159
"+a" (expect) /* 2 */ \
160
: "r" (src) /* 3 */ \
161
: "memory", "cc"); \
162
return (res); \
163
} \
164
\
165
static __inline int \
166
atomic_fcmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE *expect, u_##TYPE src) \
167
{ \
168
u_char res; \
169
\
170
__asm __volatile( \
171
" lock; cmpxchg %3,%1 ; " \
172
"# atomic_fcmpset_" #TYPE " " \
173
: "=@cce" (res), /* 0 */ \
174
"+m" (*dst), /* 1 */ \
175
"+a" (*expect) /* 2 */ \
176
: "r" (src) /* 3 */ \
177
: "memory", "cc"); \
178
return (res); \
179
}
180
181
ATOMIC_CMPSET(char);
182
ATOMIC_CMPSET(short);
183
ATOMIC_CMPSET(int);
184
ATOMIC_CMPSET(long);
185
186
/*
187
* Atomically add the value of v to the integer pointed to by p and return
188
* the previous value of *p.
189
*/
190
static __inline u_int
191
atomic_fetchadd_int(volatile u_int *p, u_int v)
192
{
193
194
__asm __volatile(
195
" lock; xaddl %0,%1 ; "
196
"# atomic_fetchadd_int"
197
: "+r" (v), /* 0 */
198
"+m" (*p) /* 1 */
199
: : "cc");
200
return (v);
201
}
202
203
/*
204
* Atomically add the value of v to the long integer pointed to by p and return
205
* the previous value of *p.
206
*/
207
static __inline u_long
208
atomic_fetchadd_long(volatile u_long *p, u_long v)
209
{
210
211
__asm __volatile(
212
" lock; xaddq %0,%1 ; "
213
"# atomic_fetchadd_long"
214
: "+r" (v), /* 0 */
215
"+m" (*p) /* 1 */
216
: : "cc");
217
return (v);
218
}
219
220
static __inline int
221
atomic_testandset_int(volatile u_int *p, u_int v)
222
{
223
u_char res;
224
225
__asm __volatile(
226
" lock; btsl %2,%1 ; "
227
"# atomic_testandset_int"
228
: "=@ccc" (res), /* 0 */
229
"+m" (*p) /* 1 */
230
: "Ir" (v & 0x1f) /* 2 */
231
: "cc");
232
return (res);
233
}
234
235
static __inline int
236
atomic_testandset_long(volatile u_long *p, u_int v)
237
{
238
u_char res;
239
240
__asm __volatile(
241
" lock; btsq %2,%1 ; "
242
"# atomic_testandset_long"
243
: "=@ccc" (res), /* 0 */
244
"+m" (*p) /* 1 */
245
: "Jr" ((u_long)(v & 0x3f)) /* 2 */
246
: "cc");
247
return (res);
248
}
249
250
static __inline int
251
atomic_testandclear_int(volatile u_int *p, u_int v)
252
{
253
u_char res;
254
255
__asm __volatile(
256
" lock; btrl %2,%1 ; "
257
"# atomic_testandclear_int"
258
: "=@ccc" (res), /* 0 */
259
"+m" (*p) /* 1 */
260
: "Ir" (v & 0x1f) /* 2 */
261
: "cc");
262
return (res);
263
}
264
265
static __inline int
266
atomic_testandclear_long(volatile u_long *p, u_int v)
267
{
268
u_char res;
269
270
__asm __volatile(
271
" lock; btrq %2,%1 ; "
272
"# atomic_testandclear_long"
273
: "=@ccc" (res), /* 0 */
274
"+m" (*p) /* 1 */
275
: "Jr" ((u_long)(v & 0x3f)) /* 2 */
276
: "cc");
277
return (res);
278
}
279
280
/*
281
* We assume that a = b will do atomic loads and stores. Due to the
282
* IA32 memory model, a simple store guarantees release semantics.
283
*
284
* However, a load may pass a store if they are performed on distinct
285
* addresses, so we need a Store/Load barrier for sequentially
286
* consistent fences in SMP kernels. We use "lock addl $0,mem" for a
287
* Store/Load barrier, as recommended by the AMD Software Optimization
288
* Guide, and not mfence. To avoid false data dependencies, we use a
289
* special address for "mem". In the kernel, we use a private per-cpu
290
* cache line. In user space, we use a word in the stack's red zone
291
* (-8(%rsp)).
292
*/
293
294
static __inline void
295
__storeload_barrier(void)
296
{
297
#if defined(_KERNEL)
298
__asm __volatile("lock; addl $0,%%gs:%c0"
299
: : "i" (OFFSETOF_MONITORBUF) : "memory", "cc");
300
#else /* !_KERNEL */
301
__asm __volatile("lock; addl $0,-8(%%rsp)" : : : "memory", "cc");
302
#endif /* _KERNEL*/
303
}
304
305
#define ATOMIC_LOAD(TYPE) \
306
static __inline u_##TYPE \
307
atomic_load_acq_##TYPE(const volatile u_##TYPE *p) \
308
{ \
309
u_##TYPE res; \
310
\
311
res = *p; \
312
__compiler_membar(); \
313
return (res); \
314
} \
315
struct __hack
316
317
#define ATOMIC_STORE(TYPE) \
318
static __inline void \
319
atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) \
320
{ \
321
\
322
__compiler_membar(); \
323
*p = v; \
324
} \
325
struct __hack
326
327
static __inline void
328
atomic_thread_fence_acq(void)
329
{
330
331
__compiler_membar();
332
}
333
334
static __inline void
335
atomic_thread_fence_rel(void)
336
{
337
338
__compiler_membar();
339
}
340
341
static __inline void
342
atomic_thread_fence_acq_rel(void)
343
{
344
345
__compiler_membar();
346
}
347
348
static __inline void
349
atomic_thread_fence_seq_cst(void)
350
{
351
352
__storeload_barrier();
353
}
354
355
ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v);
356
ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v);
357
ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v);
358
ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v);
359
360
ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v);
361
ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v);
362
ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v);
363
ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v);
364
365
ATOMIC_ASM(set, int, "orl %1,%0", "ir", v);
366
ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v);
367
ATOMIC_ASM(add, int, "addl %1,%0", "ir", v);
368
ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v);
369
370
ATOMIC_ASM(set, long, "orq %1,%0", "er", v);
371
ATOMIC_ASM(clear, long, "andq %1,%0", "er", ~v);
372
ATOMIC_ASM(add, long, "addq %1,%0", "er", v);
373
ATOMIC_ASM(subtract, long, "subq %1,%0", "er", v);
374
375
#define ATOMIC_LOADSTORE(TYPE) \
376
ATOMIC_LOAD(TYPE); \
377
ATOMIC_STORE(TYPE)
378
379
ATOMIC_LOADSTORE(char);
380
ATOMIC_LOADSTORE(short);
381
ATOMIC_LOADSTORE(int);
382
ATOMIC_LOADSTORE(long);
383
384
#undef ATOMIC_ASM
385
#undef ATOMIC_LOAD
386
#undef ATOMIC_STORE
387
#undef ATOMIC_LOADSTORE
388
389
/* Read the current value and store a new value in the destination. */
390
static __inline u_int
391
atomic_swap_int(volatile u_int *p, u_int v)
392
{
393
394
__asm __volatile(
395
" xchgl %1,%0 ; "
396
"# atomic_swap_int"
397
: "+r" (v), /* 0 */
398
"+m" (*p)); /* 1 */
399
return (v);
400
}
401
402
static __inline u_long
403
atomic_swap_long(volatile u_long *p, u_long v)
404
{
405
406
__asm __volatile(
407
" xchgq %1,%0 ; "
408
"# atomic_swap_long"
409
: "+r" (v), /* 0 */
410
"+m" (*p)); /* 1 */
411
return (v);
412
}
413
414
#define atomic_set_acq_char atomic_set_barr_char
415
#define atomic_set_rel_char atomic_set_barr_char
416
#define atomic_clear_acq_char atomic_clear_barr_char
417
#define atomic_clear_rel_char atomic_clear_barr_char
418
#define atomic_add_acq_char atomic_add_barr_char
419
#define atomic_add_rel_char atomic_add_barr_char
420
#define atomic_subtract_acq_char atomic_subtract_barr_char
421
#define atomic_subtract_rel_char atomic_subtract_barr_char
422
#define atomic_cmpset_acq_char atomic_cmpset_char
423
#define atomic_cmpset_rel_char atomic_cmpset_char
424
#define atomic_fcmpset_acq_char atomic_fcmpset_char
425
#define atomic_fcmpset_rel_char atomic_fcmpset_char
426
427
#define atomic_set_acq_short atomic_set_barr_short
428
#define atomic_set_rel_short atomic_set_barr_short
429
#define atomic_clear_acq_short atomic_clear_barr_short
430
#define atomic_clear_rel_short atomic_clear_barr_short
431
#define atomic_add_acq_short atomic_add_barr_short
432
#define atomic_add_rel_short atomic_add_barr_short
433
#define atomic_subtract_acq_short atomic_subtract_barr_short
434
#define atomic_subtract_rel_short atomic_subtract_barr_short
435
#define atomic_cmpset_acq_short atomic_cmpset_short
436
#define atomic_cmpset_rel_short atomic_cmpset_short
437
#define atomic_fcmpset_acq_short atomic_fcmpset_short
438
#define atomic_fcmpset_rel_short atomic_fcmpset_short
439
440
#define atomic_set_acq_int atomic_set_barr_int
441
#define atomic_set_rel_int atomic_set_barr_int
442
#define atomic_clear_acq_int atomic_clear_barr_int
443
#define atomic_clear_rel_int atomic_clear_barr_int
444
#define atomic_add_acq_int atomic_add_barr_int
445
#define atomic_add_rel_int atomic_add_barr_int
446
#define atomic_subtract_acq_int atomic_subtract_barr_int
447
#define atomic_subtract_rel_int atomic_subtract_barr_int
448
#define atomic_cmpset_acq_int atomic_cmpset_int
449
#define atomic_cmpset_rel_int atomic_cmpset_int
450
#define atomic_fcmpset_acq_int atomic_fcmpset_int
451
#define atomic_fcmpset_rel_int atomic_fcmpset_int
452
453
#define atomic_set_acq_long atomic_set_barr_long
454
#define atomic_set_rel_long atomic_set_barr_long
455
#define atomic_clear_acq_long atomic_clear_barr_long
456
#define atomic_clear_rel_long atomic_clear_barr_long
457
#define atomic_add_acq_long atomic_add_barr_long
458
#define atomic_add_rel_long atomic_add_barr_long
459
#define atomic_subtract_acq_long atomic_subtract_barr_long
460
#define atomic_subtract_rel_long atomic_subtract_barr_long
461
#define atomic_cmpset_acq_long atomic_cmpset_long
462
#define atomic_cmpset_rel_long atomic_cmpset_long
463
#define atomic_fcmpset_acq_long atomic_fcmpset_long
464
#define atomic_fcmpset_rel_long atomic_fcmpset_long
465
466
#define atomic_readandclear_int(p) atomic_swap_int(p, 0)
467
#define atomic_readandclear_long(p) atomic_swap_long(p, 0)
468
#define atomic_testandset_acq_long atomic_testandset_long
469
470
/* Operations on 8-bit bytes. */
471
#define atomic_set_8 atomic_set_char
472
#define atomic_set_acq_8 atomic_set_acq_char
473
#define atomic_set_rel_8 atomic_set_rel_char
474
#define atomic_clear_8 atomic_clear_char
475
#define atomic_clear_acq_8 atomic_clear_acq_char
476
#define atomic_clear_rel_8 atomic_clear_rel_char
477
#define atomic_add_8 atomic_add_char
478
#define atomic_add_acq_8 atomic_add_acq_char
479
#define atomic_add_rel_8 atomic_add_rel_char
480
#define atomic_subtract_8 atomic_subtract_char
481
#define atomic_subtract_acq_8 atomic_subtract_acq_char
482
#define atomic_subtract_rel_8 atomic_subtract_rel_char
483
#define atomic_load_acq_8 atomic_load_acq_char
484
#define atomic_store_rel_8 atomic_store_rel_char
485
#define atomic_cmpset_8 atomic_cmpset_char
486
#define atomic_cmpset_acq_8 atomic_cmpset_acq_char
487
#define atomic_cmpset_rel_8 atomic_cmpset_rel_char
488
#define atomic_fcmpset_8 atomic_fcmpset_char
489
#define atomic_fcmpset_acq_8 atomic_fcmpset_acq_char
490
#define atomic_fcmpset_rel_8 atomic_fcmpset_rel_char
491
492
/* Operations on 16-bit words. */
493
#define atomic_set_16 atomic_set_short
494
#define atomic_set_acq_16 atomic_set_acq_short
495
#define atomic_set_rel_16 atomic_set_rel_short
496
#define atomic_clear_16 atomic_clear_short
497
#define atomic_clear_acq_16 atomic_clear_acq_short
498
#define atomic_clear_rel_16 atomic_clear_rel_short
499
#define atomic_add_16 atomic_add_short
500
#define atomic_add_acq_16 atomic_add_acq_short
501
#define atomic_add_rel_16 atomic_add_rel_short
502
#define atomic_subtract_16 atomic_subtract_short
503
#define atomic_subtract_acq_16 atomic_subtract_acq_short
504
#define atomic_subtract_rel_16 atomic_subtract_rel_short
505
#define atomic_load_acq_16 atomic_load_acq_short
506
#define atomic_store_rel_16 atomic_store_rel_short
507
#define atomic_cmpset_16 atomic_cmpset_short
508
#define atomic_cmpset_acq_16 atomic_cmpset_acq_short
509
#define atomic_cmpset_rel_16 atomic_cmpset_rel_short
510
#define atomic_fcmpset_16 atomic_fcmpset_short
511
#define atomic_fcmpset_acq_16 atomic_fcmpset_acq_short
512
#define atomic_fcmpset_rel_16 atomic_fcmpset_rel_short
513
514
/* Operations on 32-bit double words. */
515
#define atomic_set_32 atomic_set_int
516
#define atomic_set_acq_32 atomic_set_acq_int
517
#define atomic_set_rel_32 atomic_set_rel_int
518
#define atomic_clear_32 atomic_clear_int
519
#define atomic_clear_acq_32 atomic_clear_acq_int
520
#define atomic_clear_rel_32 atomic_clear_rel_int
521
#define atomic_add_32 atomic_add_int
522
#define atomic_add_acq_32 atomic_add_acq_int
523
#define atomic_add_rel_32 atomic_add_rel_int
524
#define atomic_subtract_32 atomic_subtract_int
525
#define atomic_subtract_acq_32 atomic_subtract_acq_int
526
#define atomic_subtract_rel_32 atomic_subtract_rel_int
527
#define atomic_load_acq_32 atomic_load_acq_int
528
#define atomic_store_rel_32 atomic_store_rel_int
529
#define atomic_cmpset_32 atomic_cmpset_int
530
#define atomic_cmpset_acq_32 atomic_cmpset_acq_int
531
#define atomic_cmpset_rel_32 atomic_cmpset_rel_int
532
#define atomic_fcmpset_32 atomic_fcmpset_int
533
#define atomic_fcmpset_acq_32 atomic_fcmpset_acq_int
534
#define atomic_fcmpset_rel_32 atomic_fcmpset_rel_int
535
#define atomic_swap_32 atomic_swap_int
536
#define atomic_readandclear_32 atomic_readandclear_int
537
#define atomic_fetchadd_32 atomic_fetchadd_int
538
#define atomic_testandset_32 atomic_testandset_int
539
#define atomic_testandclear_32 atomic_testandclear_int
540
541
/* Operations on 64-bit quad words. */
542
#define atomic_set_64 atomic_set_long
543
#define atomic_set_acq_64 atomic_set_acq_long
544
#define atomic_set_rel_64 atomic_set_rel_long
545
#define atomic_clear_64 atomic_clear_long
546
#define atomic_clear_acq_64 atomic_clear_acq_long
547
#define atomic_clear_rel_64 atomic_clear_rel_long
548
#define atomic_add_64 atomic_add_long
549
#define atomic_add_acq_64 atomic_add_acq_long
550
#define atomic_add_rel_64 atomic_add_rel_long
551
#define atomic_subtract_64 atomic_subtract_long
552
#define atomic_subtract_acq_64 atomic_subtract_acq_long
553
#define atomic_subtract_rel_64 atomic_subtract_rel_long
554
#define atomic_load_acq_64 atomic_load_acq_long
555
#define atomic_store_rel_64 atomic_store_rel_long
556
#define atomic_cmpset_64 atomic_cmpset_long
557
#define atomic_cmpset_acq_64 atomic_cmpset_acq_long
558
#define atomic_cmpset_rel_64 atomic_cmpset_rel_long
559
#define atomic_fcmpset_64 atomic_fcmpset_long
560
#define atomic_fcmpset_acq_64 atomic_fcmpset_acq_long
561
#define atomic_fcmpset_rel_64 atomic_fcmpset_rel_long
562
#define atomic_swap_64 atomic_swap_long
563
#define atomic_readandclear_64 atomic_readandclear_long
564
#define atomic_fetchadd_64 atomic_fetchadd_long
565
#define atomic_testandset_64 atomic_testandset_long
566
#define atomic_testandclear_64 atomic_testandclear_long
567
568
/* Operations on pointers. */
569
#define atomic_set_ptr atomic_set_long
570
#define atomic_set_acq_ptr atomic_set_acq_long
571
#define atomic_set_rel_ptr atomic_set_rel_long
572
#define atomic_clear_ptr atomic_clear_long
573
#define atomic_clear_acq_ptr atomic_clear_acq_long
574
#define atomic_clear_rel_ptr atomic_clear_rel_long
575
#define atomic_add_ptr atomic_add_long
576
#define atomic_add_acq_ptr atomic_add_acq_long
577
#define atomic_add_rel_ptr atomic_add_rel_long
578
#define atomic_subtract_ptr atomic_subtract_long
579
#define atomic_subtract_acq_ptr atomic_subtract_acq_long
580
#define atomic_subtract_rel_ptr atomic_subtract_rel_long
581
#define atomic_load_acq_ptr atomic_load_acq_long
582
#define atomic_store_rel_ptr atomic_store_rel_long
583
#define atomic_cmpset_ptr atomic_cmpset_long
584
#define atomic_cmpset_acq_ptr atomic_cmpset_acq_long
585
#define atomic_cmpset_rel_ptr atomic_cmpset_rel_long
586
#define atomic_fcmpset_ptr atomic_fcmpset_long
587
#define atomic_fcmpset_acq_ptr atomic_fcmpset_acq_long
588
#define atomic_fcmpset_rel_ptr atomic_fcmpset_rel_long
589
#define atomic_swap_ptr atomic_swap_long
590
#define atomic_readandclear_ptr atomic_readandclear_long
591
#define atomic_testandset_ptr atomic_testandset_long
592
#define atomic_testandclear_ptr atomic_testandclear_long
593
594
#endif /* !SAN_NEEDS_INTERCEPTORS || SAN_RUNTIME */
595
596
#endif /* !_MACHINE_ATOMIC_H_ */
597
598
#endif /* __i386__ */
599
600