Path: blob/main/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h
35233 views
//===-- sanitizer_atomic_clang.h --------------------------------*- C++ -*-===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file is a part of ThreadSanitizer/AddressSanitizer runtime.9// Not intended for direct inclusion. Include sanitizer_atomic.h.10//11//===----------------------------------------------------------------------===//1213#ifndef SANITIZER_ATOMIC_CLANG_H14#define SANITIZER_ATOMIC_CLANG_H1516namespace __sanitizer {1718// We use the compiler builtin atomic operations for loads and stores, which19// generates correct code for all architectures, but may require libatomic20// on platforms where e.g. 64-bit atomics are not supported natively.2122// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html23// for mappings of the memory model to different processors.2425inline void atomic_signal_fence(memory_order mo) { __atomic_signal_fence(mo); }2627inline void atomic_thread_fence(memory_order mo) { __atomic_thread_fence(mo); }2829inline void proc_yield(int cnt) {30__asm__ __volatile__("" ::: "memory");31#if defined(__i386__) || defined(__x86_64__)32for (int i = 0; i < cnt; i++) __asm__ __volatile__("pause");33__asm__ __volatile__("" ::: "memory");34#endif35}3637template <typename T>38inline typename T::Type atomic_load(const volatile T *a, memory_order mo) {39DCHECK(mo == memory_order_relaxed || mo == memory_order_consume ||40mo == memory_order_acquire || mo == memory_order_seq_cst);41DCHECK(!((uptr)a % sizeof(*a)));42return __atomic_load_n(&a->val_dont_use, mo);43}4445template <typename T>46inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {47DCHECK(mo == memory_order_relaxed || mo == memory_order_release ||48mo == memory_order_seq_cst);49DCHECK(!((uptr)a % sizeof(*a)));50__atomic_store_n(&a->val_dont_use, v, mo);51}5253template <typename T>54inline typename T::Type atomic_fetch_add(volatile T *a, typename T::Type v,55memory_order mo) {56DCHECK(!((uptr)a % sizeof(*a)));57return __atomic_fetch_add(&a->val_dont_use, v, mo);58}5960template <typename T>61inline typename T::Type atomic_fetch_sub(volatile T *a, typename T::Type v,62memory_order mo) {63(void)mo;64DCHECK(!((uptr)a % sizeof(*a)));65return __atomic_fetch_sub(&a->val_dont_use, v, mo);66}6768template <typename T>69inline typename T::Type atomic_exchange(volatile T *a, typename T::Type v,70memory_order mo) {71DCHECK(!((uptr)a % sizeof(*a)));72return __atomic_exchange_n(&a->val_dont_use, v, mo);73}7475template <typename T>76inline bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,77typename T::Type xchg,78memory_order mo) {79// Transitioned from __sync_val_compare_and_swap to support targets like80// SPARC V8 that cannot inline atomic cmpxchg. __atomic_compare_exchange81// can then be resolved from libatomic. __ATOMIC_SEQ_CST is used to best82// match the __sync builtin memory order.83return __atomic_compare_exchange(&a->val_dont_use, cmp, &xchg, false,84__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);85}8687template <typename T>88inline bool atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp,89typename T::Type xchg,90memory_order mo) {91return atomic_compare_exchange_strong(a, cmp, xchg, mo);92}9394} // namespace __sanitizer9596#undef ATOMIC_ORDER9798#endif // SANITIZER_ATOMIC_CLANG_H99100101