Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h
35233 views
1
//===-- sanitizer_atomic_clang.h --------------------------------*- C++ -*-===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10
// Not intended for direct inclusion. Include sanitizer_atomic.h.
11
//
12
//===----------------------------------------------------------------------===//
13
14
#ifndef SANITIZER_ATOMIC_CLANG_H
15
#define SANITIZER_ATOMIC_CLANG_H
16
17
namespace __sanitizer {
18
19
// We use the compiler builtin atomic operations for loads and stores, which
20
// generates correct code for all architectures, but may require libatomic
21
// on platforms where e.g. 64-bit atomics are not supported natively.
22
23
// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
24
// for mappings of the memory model to different processors.
25
26
inline void atomic_signal_fence(memory_order mo) { __atomic_signal_fence(mo); }
27
28
inline void atomic_thread_fence(memory_order mo) { __atomic_thread_fence(mo); }
29
30
inline void proc_yield(int cnt) {
31
__asm__ __volatile__("" ::: "memory");
32
#if defined(__i386__) || defined(__x86_64__)
33
for (int i = 0; i < cnt; i++) __asm__ __volatile__("pause");
34
__asm__ __volatile__("" ::: "memory");
35
#endif
36
}
37
38
template <typename T>
39
inline typename T::Type atomic_load(const volatile T *a, memory_order mo) {
40
DCHECK(mo == memory_order_relaxed || mo == memory_order_consume ||
41
mo == memory_order_acquire || mo == memory_order_seq_cst);
42
DCHECK(!((uptr)a % sizeof(*a)));
43
return __atomic_load_n(&a->val_dont_use, mo);
44
}
45
46
template <typename T>
47
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
48
DCHECK(mo == memory_order_relaxed || mo == memory_order_release ||
49
mo == memory_order_seq_cst);
50
DCHECK(!((uptr)a % sizeof(*a)));
51
__atomic_store_n(&a->val_dont_use, v, mo);
52
}
53
54
template <typename T>
55
inline typename T::Type atomic_fetch_add(volatile T *a, typename T::Type v,
56
memory_order mo) {
57
DCHECK(!((uptr)a % sizeof(*a)));
58
return __atomic_fetch_add(&a->val_dont_use, v, mo);
59
}
60
61
template <typename T>
62
inline typename T::Type atomic_fetch_sub(volatile T *a, typename T::Type v,
63
memory_order mo) {
64
(void)mo;
65
DCHECK(!((uptr)a % sizeof(*a)));
66
return __atomic_fetch_sub(&a->val_dont_use, v, mo);
67
}
68
69
template <typename T>
70
inline typename T::Type atomic_exchange(volatile T *a, typename T::Type v,
71
memory_order mo) {
72
DCHECK(!((uptr)a % sizeof(*a)));
73
return __atomic_exchange_n(&a->val_dont_use, v, mo);
74
}
75
76
template <typename T>
77
inline bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
78
typename T::Type xchg,
79
memory_order mo) {
80
// Transitioned from __sync_val_compare_and_swap to support targets like
81
// SPARC V8 that cannot inline atomic cmpxchg. __atomic_compare_exchange
82
// can then be resolved from libatomic. __ATOMIC_SEQ_CST is used to best
83
// match the __sync builtin memory order.
84
return __atomic_compare_exchange(&a->val_dont_use, cmp, &xchg, false,
85
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
86
}
87
88
template <typename T>
89
inline bool atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp,
90
typename T::Type xchg,
91
memory_order mo) {
92
return atomic_compare_exchange_strong(a, cmp, xchg, mo);
93
}
94
95
} // namespace __sanitizer
96
97
#undef ATOMIC_ORDER
98
99
#endif // SANITIZER_ATOMIC_CLANG_H
100
101