Path: blob/main/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
35269 views
//===-- tsan_interface_atomic.cpp -----------------------------------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//7//8// This file is a part of ThreadSanitizer (TSan), a race detector.9//10//===----------------------------------------------------------------------===//1112// ThreadSanitizer atomic operations are based on C++11/C1x standards.13// For background see C++11 standard. A slightly older, publicly14// available draft of the standard (not entirely up-to-date, but close enough15// for casual browsing) is available here:16// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf17// The following page contains more background information:18// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/1920#include "sanitizer_common/sanitizer_placement_new.h"21#include "sanitizer_common/sanitizer_stacktrace.h"22#include "sanitizer_common/sanitizer_mutex.h"23#include "tsan_flags.h"24#include "tsan_interface.h"25#include "tsan_rtl.h"2627using namespace __tsan;2829#if !SANITIZER_GO && __TSAN_HAS_INT12830// Protects emulation of 128-bit atomic operations.31static StaticSpinMutex mutex128;32#endif3334#if SANITIZER_DEBUG35static bool IsLoadOrder(morder mo) {36return mo == mo_relaxed || mo == mo_consume37|| mo == mo_acquire || mo == mo_seq_cst;38}3940static bool IsStoreOrder(morder mo) {41return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;42}43#endif4445static bool IsReleaseOrder(morder mo) {46return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;47}4849static bool IsAcquireOrder(morder mo) {50return mo == mo_consume || mo == mo_acquire51|| mo == mo_acq_rel || mo == mo_seq_cst;52}5354static bool IsAcqRelOrder(morder mo) {55return mo == mo_acq_rel || mo == mo_seq_cst;56}5758template<typename T> T func_xchg(volatile T *v, T op) {59T res = __sync_lock_test_and_set(v, op);60// __sync_lock_test_and_set does not contain full barrier.61__sync_synchronize();62return res;63}6465template<typename T> T func_add(volatile T *v, T op) {66return __sync_fetch_and_add(v, op);67}6869template<typename T> T func_sub(volatile T *v, T op) {70return __sync_fetch_and_sub(v, op);71}7273template<typename T> T func_and(volatile T *v, T op) {74return __sync_fetch_and_and(v, op);75}7677template<typename T> T func_or(volatile T *v, T op) {78return __sync_fetch_and_or(v, op);79}8081template<typename T> T func_xor(volatile T *v, T op) {82return __sync_fetch_and_xor(v, op);83}8485template<typename T> T func_nand(volatile T *v, T op) {86// clang does not support __sync_fetch_and_nand.87T cmp = *v;88for (;;) {89T newv = ~(cmp & op);90T cur = __sync_val_compare_and_swap(v, cmp, newv);91if (cmp == cur)92return cmp;93cmp = cur;94}95}9697template<typename T> T func_cas(volatile T *v, T cmp, T xch) {98return __sync_val_compare_and_swap(v, cmp, xch);99}100101// clang does not support 128-bit atomic ops.102// Atomic ops are executed under tsan internal mutex,103// here we assume that the atomic variables are not accessed104// from non-instrumented code.105#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \106&& __TSAN_HAS_INT128107a128 func_xchg(volatile a128 *v, a128 op) {108SpinMutexLock lock(&mutex128);109a128 cmp = *v;110*v = op;111return cmp;112}113114a128 func_add(volatile a128 *v, a128 op) {115SpinMutexLock lock(&mutex128);116a128 cmp = *v;117*v = cmp + op;118return cmp;119}120121a128 func_sub(volatile a128 *v, a128 op) {122SpinMutexLock lock(&mutex128);123a128 cmp = *v;124*v = cmp - op;125return cmp;126}127128a128 func_and(volatile a128 *v, a128 op) {129SpinMutexLock lock(&mutex128);130a128 cmp = *v;131*v = cmp & op;132return cmp;133}134135a128 func_or(volatile a128 *v, a128 op) {136SpinMutexLock lock(&mutex128);137a128 cmp = *v;138*v = cmp | op;139return cmp;140}141142a128 func_xor(volatile a128 *v, a128 op) {143SpinMutexLock lock(&mutex128);144a128 cmp = *v;145*v = cmp ^ op;146return cmp;147}148149a128 func_nand(volatile a128 *v, a128 op) {150SpinMutexLock lock(&mutex128);151a128 cmp = *v;152*v = ~(cmp & op);153return cmp;154}155156a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {157SpinMutexLock lock(&mutex128);158a128 cur = *v;159if (cur == cmp)160*v = xch;161return cur;162}163#endif164165template <typename T>166static int AccessSize() {167if (sizeof(T) <= 1)168return 1;169else if (sizeof(T) <= 2)170return 2;171else if (sizeof(T) <= 4)172return 4;173else174return 8;175// For 16-byte atomics we also use 8-byte memory access,176// this leads to false negatives only in very obscure cases.177}178179#if !SANITIZER_GO180static atomic_uint8_t *to_atomic(const volatile a8 *a) {181return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));182}183184static atomic_uint16_t *to_atomic(const volatile a16 *a) {185return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));186}187#endif188189static atomic_uint32_t *to_atomic(const volatile a32 *a) {190return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));191}192193static atomic_uint64_t *to_atomic(const volatile a64 *a) {194return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));195}196197static memory_order to_mo(morder mo) {198switch (mo) {199case mo_relaxed: return memory_order_relaxed;200case mo_consume: return memory_order_consume;201case mo_acquire: return memory_order_acquire;202case mo_release: return memory_order_release;203case mo_acq_rel: return memory_order_acq_rel;204case mo_seq_cst: return memory_order_seq_cst;205}206DCHECK(0);207return memory_order_seq_cst;208}209210template<typename T>211static T NoTsanAtomicLoad(const volatile T *a, morder mo) {212return atomic_load(to_atomic(a), to_mo(mo));213}214215#if __TSAN_HAS_INT128 && !SANITIZER_GO216static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {217SpinMutexLock lock(&mutex128);218return *a;219}220#endif221222template <typename T>223static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {224DCHECK(IsLoadOrder(mo));225// This fast-path is critical for performance.226// Assume the access is atomic.227if (!IsAcquireOrder(mo)) {228MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),229kAccessRead | kAccessAtomic);230return NoTsanAtomicLoad(a, mo);231}232// Don't create sync object if it does not exist yet. For example, an atomic233// pointer is initialized to nullptr and then periodically acquire-loaded.234T v = NoTsanAtomicLoad(a, mo);235SyncVar *s = ctx->metamap.GetSyncIfExists((uptr)a);236if (s) {237SlotLocker locker(thr);238ReadLock lock(&s->mtx);239thr->clock.Acquire(s->clock);240// Re-read under sync mutex because we need a consistent snapshot241// of the value and the clock we acquire.242v = NoTsanAtomicLoad(a, mo);243}244MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessRead | kAccessAtomic);245return v;246}247248template<typename T>249static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {250atomic_store(to_atomic(a), v, to_mo(mo));251}252253#if __TSAN_HAS_INT128 && !SANITIZER_GO254static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {255SpinMutexLock lock(&mutex128);256*a = v;257}258#endif259260template <typename T>261static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,262morder mo) {263DCHECK(IsStoreOrder(mo));264MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);265// This fast-path is critical for performance.266// Assume the access is atomic.267// Strictly saying even relaxed store cuts off release sequence,268// so must reset the clock.269if (!IsReleaseOrder(mo)) {270NoTsanAtomicStore(a, v, mo);271return;272}273SlotLocker locker(thr);274{275auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);276Lock lock(&s->mtx);277thr->clock.ReleaseStore(&s->clock);278NoTsanAtomicStore(a, v, mo);279}280IncrementEpoch(thr);281}282283template <typename T, T (*F)(volatile T *v, T op)>284static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {285MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);286if (LIKELY(mo == mo_relaxed))287return F(a, v);288SlotLocker locker(thr);289{290auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);291RWLock lock(&s->mtx, IsReleaseOrder(mo));292if (IsAcqRelOrder(mo))293thr->clock.ReleaseAcquire(&s->clock);294else if (IsReleaseOrder(mo))295thr->clock.Release(&s->clock);296else if (IsAcquireOrder(mo))297thr->clock.Acquire(s->clock);298v = F(a, v);299}300if (IsReleaseOrder(mo))301IncrementEpoch(thr);302return v;303}304305template<typename T>306static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {307return func_xchg(a, v);308}309310template<typename T>311static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {312return func_add(a, v);313}314315template<typename T>316static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {317return func_sub(a, v);318}319320template<typename T>321static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {322return func_and(a, v);323}324325template<typename T>326static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {327return func_or(a, v);328}329330template<typename T>331static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {332return func_xor(a, v);333}334335template<typename T>336static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {337return func_nand(a, v);338}339340template<typename T>341static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,342morder mo) {343return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);344}345346template<typename T>347static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,348morder mo) {349return AtomicRMW<T, func_add>(thr, pc, a, v, mo);350}351352template<typename T>353static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,354morder mo) {355return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);356}357358template<typename T>359static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,360morder mo) {361return AtomicRMW<T, func_and>(thr, pc, a, v, mo);362}363364template<typename T>365static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,366morder mo) {367return AtomicRMW<T, func_or>(thr, pc, a, v, mo);368}369370template<typename T>371static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,372morder mo) {373return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);374}375376template<typename T>377static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,378morder mo) {379return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);380}381382template<typename T>383static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {384return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));385}386387#if __TSAN_HAS_INT128388static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,389morder mo, morder fmo) {390a128 old = *c;391a128 cur = func_cas(a, old, v);392if (cur == old)393return true;394*c = cur;395return false;396}397#endif398399template<typename T>400static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {401NoTsanAtomicCAS(a, &c, v, mo, fmo);402return c;403}404405template <typename T>406static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v,407morder mo, morder fmo) {408// 31.7.2.18: "The failure argument shall not be memory_order_release409// nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic410// (mo_relaxed) when those are used.411DCHECK(IsLoadOrder(fmo));412413MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);414if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {415T cc = *c;416T pr = func_cas(a, cc, v);417if (pr == cc)418return true;419*c = pr;420return false;421}422SlotLocker locker(thr);423bool release = IsReleaseOrder(mo);424bool success;425{426auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);427RWLock lock(&s->mtx, release);428T cc = *c;429T pr = func_cas(a, cc, v);430success = pr == cc;431if (!success) {432*c = pr;433mo = fmo;434}435if (success && IsAcqRelOrder(mo))436thr->clock.ReleaseAcquire(&s->clock);437else if (success && IsReleaseOrder(mo))438thr->clock.Release(&s->clock);439else if (IsAcquireOrder(mo))440thr->clock.Acquire(s->clock);441}442if (success && release)443IncrementEpoch(thr);444return success;445}446447template<typename T>448static T AtomicCAS(ThreadState *thr, uptr pc,449volatile T *a, T c, T v, morder mo, morder fmo) {450AtomicCAS(thr, pc, a, &c, v, mo, fmo);451return c;452}453454#if !SANITIZER_GO455static void NoTsanAtomicFence(morder mo) {456__sync_synchronize();457}458459static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {460// FIXME(dvyukov): not implemented.461__sync_synchronize();462}463#endif464465// Interface functions follow.466#if !SANITIZER_GO467468// C/C++469470static morder convert_morder(morder mo) {471if (flags()->force_seq_cst_atomics)472return (morder)mo_seq_cst;473474// Filter out additional memory order flags:475// MEMMODEL_SYNC = 1 << 15476// __ATOMIC_HLE_ACQUIRE = 1 << 16477// __ATOMIC_HLE_RELEASE = 1 << 17478//479// HLE is an optimization, and we pretend that elision always fails.480// MEMMODEL_SYNC is used when lowering __sync_ atomics,481// since we use __sync_ atomics for actual atomic operations,482// we can safely ignore it as well. It also subtly affects semantics,483// but we don't model the difference.484return (morder)(mo & 0x7fff);485}486487# define ATOMIC_IMPL(func, ...) \488ThreadState *const thr = cur_thread(); \489ProcessPendingSignals(thr); \490if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) \491return NoTsanAtomic##func(__VA_ARGS__); \492mo = convert_morder(mo); \493return Atomic##func(thr, GET_CALLER_PC(), __VA_ARGS__);494495extern "C" {496SANITIZER_INTERFACE_ATTRIBUTE497a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {498ATOMIC_IMPL(Load, a, mo);499}500501SANITIZER_INTERFACE_ATTRIBUTE502a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {503ATOMIC_IMPL(Load, a, mo);504}505506SANITIZER_INTERFACE_ATTRIBUTE507a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {508ATOMIC_IMPL(Load, a, mo);509}510511SANITIZER_INTERFACE_ATTRIBUTE512a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {513ATOMIC_IMPL(Load, a, mo);514}515516#if __TSAN_HAS_INT128517SANITIZER_INTERFACE_ATTRIBUTE518a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {519ATOMIC_IMPL(Load, a, mo);520}521#endif522523SANITIZER_INTERFACE_ATTRIBUTE524void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {525ATOMIC_IMPL(Store, a, v, mo);526}527528SANITIZER_INTERFACE_ATTRIBUTE529void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {530ATOMIC_IMPL(Store, a, v, mo);531}532533SANITIZER_INTERFACE_ATTRIBUTE534void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {535ATOMIC_IMPL(Store, a, v, mo);536}537538SANITIZER_INTERFACE_ATTRIBUTE539void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {540ATOMIC_IMPL(Store, a, v, mo);541}542543#if __TSAN_HAS_INT128544SANITIZER_INTERFACE_ATTRIBUTE545void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {546ATOMIC_IMPL(Store, a, v, mo);547}548#endif549550SANITIZER_INTERFACE_ATTRIBUTE551a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {552ATOMIC_IMPL(Exchange, a, v, mo);553}554555SANITIZER_INTERFACE_ATTRIBUTE556a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {557ATOMIC_IMPL(Exchange, a, v, mo);558}559560SANITIZER_INTERFACE_ATTRIBUTE561a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {562ATOMIC_IMPL(Exchange, a, v, mo);563}564565SANITIZER_INTERFACE_ATTRIBUTE566a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {567ATOMIC_IMPL(Exchange, a, v, mo);568}569570#if __TSAN_HAS_INT128571SANITIZER_INTERFACE_ATTRIBUTE572a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {573ATOMIC_IMPL(Exchange, a, v, mo);574}575#endif576577SANITIZER_INTERFACE_ATTRIBUTE578a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {579ATOMIC_IMPL(FetchAdd, a, v, mo);580}581582SANITIZER_INTERFACE_ATTRIBUTE583a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {584ATOMIC_IMPL(FetchAdd, a, v, mo);585}586587SANITIZER_INTERFACE_ATTRIBUTE588a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {589ATOMIC_IMPL(FetchAdd, a, v, mo);590}591592SANITIZER_INTERFACE_ATTRIBUTE593a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {594ATOMIC_IMPL(FetchAdd, a, v, mo);595}596597#if __TSAN_HAS_INT128598SANITIZER_INTERFACE_ATTRIBUTE599a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {600ATOMIC_IMPL(FetchAdd, a, v, mo);601}602#endif603604SANITIZER_INTERFACE_ATTRIBUTE605a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {606ATOMIC_IMPL(FetchSub, a, v, mo);607}608609SANITIZER_INTERFACE_ATTRIBUTE610a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {611ATOMIC_IMPL(FetchSub, a, v, mo);612}613614SANITIZER_INTERFACE_ATTRIBUTE615a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {616ATOMIC_IMPL(FetchSub, a, v, mo);617}618619SANITIZER_INTERFACE_ATTRIBUTE620a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {621ATOMIC_IMPL(FetchSub, a, v, mo);622}623624#if __TSAN_HAS_INT128625SANITIZER_INTERFACE_ATTRIBUTE626a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {627ATOMIC_IMPL(FetchSub, a, v, mo);628}629#endif630631SANITIZER_INTERFACE_ATTRIBUTE632a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {633ATOMIC_IMPL(FetchAnd, a, v, mo);634}635636SANITIZER_INTERFACE_ATTRIBUTE637a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {638ATOMIC_IMPL(FetchAnd, a, v, mo);639}640641SANITIZER_INTERFACE_ATTRIBUTE642a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {643ATOMIC_IMPL(FetchAnd, a, v, mo);644}645646SANITIZER_INTERFACE_ATTRIBUTE647a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {648ATOMIC_IMPL(FetchAnd, a, v, mo);649}650651#if __TSAN_HAS_INT128652SANITIZER_INTERFACE_ATTRIBUTE653a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {654ATOMIC_IMPL(FetchAnd, a, v, mo);655}656#endif657658SANITIZER_INTERFACE_ATTRIBUTE659a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {660ATOMIC_IMPL(FetchOr, a, v, mo);661}662663SANITIZER_INTERFACE_ATTRIBUTE664a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {665ATOMIC_IMPL(FetchOr, a, v, mo);666}667668SANITIZER_INTERFACE_ATTRIBUTE669a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {670ATOMIC_IMPL(FetchOr, a, v, mo);671}672673SANITIZER_INTERFACE_ATTRIBUTE674a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {675ATOMIC_IMPL(FetchOr, a, v, mo);676}677678#if __TSAN_HAS_INT128679SANITIZER_INTERFACE_ATTRIBUTE680a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {681ATOMIC_IMPL(FetchOr, a, v, mo);682}683#endif684685SANITIZER_INTERFACE_ATTRIBUTE686a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {687ATOMIC_IMPL(FetchXor, a, v, mo);688}689690SANITIZER_INTERFACE_ATTRIBUTE691a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {692ATOMIC_IMPL(FetchXor, a, v, mo);693}694695SANITIZER_INTERFACE_ATTRIBUTE696a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {697ATOMIC_IMPL(FetchXor, a, v, mo);698}699700SANITIZER_INTERFACE_ATTRIBUTE701a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {702ATOMIC_IMPL(FetchXor, a, v, mo);703}704705#if __TSAN_HAS_INT128706SANITIZER_INTERFACE_ATTRIBUTE707a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {708ATOMIC_IMPL(FetchXor, a, v, mo);709}710#endif711712SANITIZER_INTERFACE_ATTRIBUTE713a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {714ATOMIC_IMPL(FetchNand, a, v, mo);715}716717SANITIZER_INTERFACE_ATTRIBUTE718a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {719ATOMIC_IMPL(FetchNand, a, v, mo);720}721722SANITIZER_INTERFACE_ATTRIBUTE723a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {724ATOMIC_IMPL(FetchNand, a, v, mo);725}726727SANITIZER_INTERFACE_ATTRIBUTE728a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {729ATOMIC_IMPL(FetchNand, a, v, mo);730}731732#if __TSAN_HAS_INT128733SANITIZER_INTERFACE_ATTRIBUTE734a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {735ATOMIC_IMPL(FetchNand, a, v, mo);736}737#endif738739SANITIZER_INTERFACE_ATTRIBUTE740int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,741morder mo, morder fmo) {742ATOMIC_IMPL(CAS, a, c, v, mo, fmo);743}744745SANITIZER_INTERFACE_ATTRIBUTE746int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,747morder mo, morder fmo) {748ATOMIC_IMPL(CAS, a, c, v, mo, fmo);749}750751SANITIZER_INTERFACE_ATTRIBUTE752int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,753morder mo, morder fmo) {754ATOMIC_IMPL(CAS, a, c, v, mo, fmo);755}756757SANITIZER_INTERFACE_ATTRIBUTE758int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,759morder mo, morder fmo) {760ATOMIC_IMPL(CAS, a, c, v, mo, fmo);761}762763#if __TSAN_HAS_INT128764SANITIZER_INTERFACE_ATTRIBUTE765int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,766morder mo, morder fmo) {767ATOMIC_IMPL(CAS, a, c, v, mo, fmo);768}769#endif770771SANITIZER_INTERFACE_ATTRIBUTE772int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,773morder mo, morder fmo) {774ATOMIC_IMPL(CAS, a, c, v, mo, fmo);775}776777SANITIZER_INTERFACE_ATTRIBUTE778int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,779morder mo, morder fmo) {780ATOMIC_IMPL(CAS, a, c, v, mo, fmo);781}782783SANITIZER_INTERFACE_ATTRIBUTE784int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,785morder mo, morder fmo) {786ATOMIC_IMPL(CAS, a, c, v, mo, fmo);787}788789SANITIZER_INTERFACE_ATTRIBUTE790int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,791morder mo, morder fmo) {792ATOMIC_IMPL(CAS, a, c, v, mo, fmo);793}794795#if __TSAN_HAS_INT128796SANITIZER_INTERFACE_ATTRIBUTE797int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,798morder mo, morder fmo) {799ATOMIC_IMPL(CAS, a, c, v, mo, fmo);800}801#endif802803SANITIZER_INTERFACE_ATTRIBUTE804a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,805morder mo, morder fmo) {806ATOMIC_IMPL(CAS, a, c, v, mo, fmo);807}808809SANITIZER_INTERFACE_ATTRIBUTE810a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,811morder mo, morder fmo) {812ATOMIC_IMPL(CAS, a, c, v, mo, fmo);813}814815SANITIZER_INTERFACE_ATTRIBUTE816a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,817morder mo, morder fmo) {818ATOMIC_IMPL(CAS, a, c, v, mo, fmo);819}820821SANITIZER_INTERFACE_ATTRIBUTE822a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,823morder mo, morder fmo) {824ATOMIC_IMPL(CAS, a, c, v, mo, fmo);825}826827#if __TSAN_HAS_INT128828SANITIZER_INTERFACE_ATTRIBUTE829a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,830morder mo, morder fmo) {831ATOMIC_IMPL(CAS, a, c, v, mo, fmo);832}833#endif834835SANITIZER_INTERFACE_ATTRIBUTE836void __tsan_atomic_thread_fence(morder mo) { ATOMIC_IMPL(Fence, mo); }837838SANITIZER_INTERFACE_ATTRIBUTE839void __tsan_atomic_signal_fence(morder mo) {840}841} // extern "C"842843#else // #if !SANITIZER_GO844845// Go846847# define ATOMIC(func, ...) \848if (thr->ignore_sync) { \849NoTsanAtomic##func(__VA_ARGS__); \850} else { \851FuncEntry(thr, cpc); \852Atomic##func(thr, pc, __VA_ARGS__); \853FuncExit(thr); \854}855856# define ATOMIC_RET(func, ret, ...) \857if (thr->ignore_sync) { \858(ret) = NoTsanAtomic##func(__VA_ARGS__); \859} else { \860FuncEntry(thr, cpc); \861(ret) = Atomic##func(thr, pc, __VA_ARGS__); \862FuncExit(thr); \863}864865extern "C" {866SANITIZER_INTERFACE_ATTRIBUTE867void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {868ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);869}870871SANITIZER_INTERFACE_ATTRIBUTE872void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {873ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);874}875876SANITIZER_INTERFACE_ATTRIBUTE877void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {878ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);879}880881SANITIZER_INTERFACE_ATTRIBUTE882void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {883ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);884}885886SANITIZER_INTERFACE_ATTRIBUTE887void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {888ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);889}890891SANITIZER_INTERFACE_ATTRIBUTE892void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {893ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);894}895896SANITIZER_INTERFACE_ATTRIBUTE897void __tsan_go_atomic32_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {898ATOMIC_RET(FetchAnd, *(a32 *)(a + 16), *(a32 **)a, *(a32 *)(a + 8),899mo_acq_rel);900}901902SANITIZER_INTERFACE_ATTRIBUTE903void __tsan_go_atomic64_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {904ATOMIC_RET(FetchAnd, *(a64 *)(a + 16), *(a64 **)a, *(a64 *)(a + 8),905mo_acq_rel);906}907908SANITIZER_INTERFACE_ATTRIBUTE909void __tsan_go_atomic32_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {910ATOMIC_RET(FetchOr, *(a32 *)(a + 16), *(a32 **)a, *(a32 *)(a + 8),911mo_acq_rel);912}913914SANITIZER_INTERFACE_ATTRIBUTE915void __tsan_go_atomic64_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {916ATOMIC_RET(FetchOr, *(a64 *)(a + 16), *(a64 **)a, *(a64 *)(a + 8),917mo_acq_rel);918}919920SANITIZER_INTERFACE_ATTRIBUTE921void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {922ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);923}924925SANITIZER_INTERFACE_ATTRIBUTE926void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {927ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);928}929930SANITIZER_INTERFACE_ATTRIBUTE931void __tsan_go_atomic32_compare_exchange(932ThreadState *thr, uptr cpc, uptr pc, u8 *a) {933a32 cur = 0;934a32 cmp = *(a32*)(a+8);935ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);936*(bool*)(a+16) = (cur == cmp);937}938939SANITIZER_INTERFACE_ATTRIBUTE940void __tsan_go_atomic64_compare_exchange(941ThreadState *thr, uptr cpc, uptr pc, u8 *a) {942a64 cur = 0;943a64 cmp = *(a64*)(a+8);944ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);945*(bool*)(a+24) = (cur == cmp);946}947} // extern "C"948#endif // #if !SANITIZER_GO949950951