Path: blob/master/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp
40930 views
/*1* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP25#define OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP2627#include <intrin.h>28#include "runtime/os.hpp"2930// Note that in MSVC, volatile memory accesses are explicitly31// guaranteed to have acquire release semantics (w.r.t. compiler32// reordering) and therefore does not even need a compiler barrier33// for normal acquire release accesses. And all generalized34// bound calls like release_store go through Atomic::load35// and Atomic::store which do volatile memory accesses.36template<> inline void ScopedFence<X_ACQUIRE>::postfix() { }37template<> inline void ScopedFence<RELEASE_X>::prefix() { }38template<> inline void ScopedFence<RELEASE_X_FENCE>::prefix() { }39template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }4041template<size_t byte_size>42struct Atomic::PlatformAdd {43template<typename D, typename I>44D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;4546template<typename D, typename I>47D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {48return add_and_fetch(dest, add_value, order) - add_value;49}50};5152// The Interlocked* APIs only take long and will not accept __int32. That is53// acceptable on Windows, since long is a 32-bits integer type.5455#define DEFINE_INTRINSIC_ADD(IntrinsicName, IntrinsicType) \56template<> \57template<typename D, typename I> \58inline D Atomic::PlatformAdd<sizeof(IntrinsicType)>::add_and_fetch(D volatile* dest, \59I add_value, \60atomic_memory_order order) const { \61STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(D)); \62return PrimitiveConversions::cast<D>( \63IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \64PrimitiveConversions::cast<IntrinsicType>(add_value))); \65}6667DEFINE_INTRINSIC_ADD(InterlockedAdd, long)68DEFINE_INTRINSIC_ADD(InterlockedAdd64, __int64)6970#undef DEFINE_INTRINSIC_ADD7172#define DEFINE_INTRINSIC_XCHG(IntrinsicName, IntrinsicType) \73template<> \74template<typename T> \75inline T Atomic::PlatformXchg<sizeof(IntrinsicType)>::operator()(T volatile* dest, \76T exchange_value, \77atomic_memory_order order) const { \78STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \79return PrimitiveConversions::cast<T>( \80IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \81PrimitiveConversions::cast<IntrinsicType>(exchange_value))); \82}8384DEFINE_INTRINSIC_XCHG(InterlockedExchange, long)85DEFINE_INTRINSIC_XCHG(InterlockedExchange64, __int64)8687#undef DEFINE_INTRINSIC_XCHG8889// Note: the order of the parameters is different between90// Atomic::PlatformCmpxchg<*>::operator() and the91// InterlockedCompareExchange* API.9293#define DEFINE_INTRINSIC_CMPXCHG(IntrinsicName, IntrinsicType) \94template<> \95template<typename T> \96inline T Atomic::PlatformCmpxchg<sizeof(IntrinsicType)>::operator()(T volatile* dest, \97T compare_value, \98T exchange_value, \99atomic_memory_order order) const { \100STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \101return PrimitiveConversions::cast<T>( \102IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \103PrimitiveConversions::cast<IntrinsicType>(exchange_value), \104PrimitiveConversions::cast<IntrinsicType>(compare_value))); \105}106107DEFINE_INTRINSIC_CMPXCHG(_InterlockedCompareExchange8, char) // Use the intrinsic as InterlockedCompareExchange8 does not exist108DEFINE_INTRINSIC_CMPXCHG(InterlockedCompareExchange, long)109DEFINE_INTRINSIC_CMPXCHG(InterlockedCompareExchange64, __int64)110111#undef DEFINE_INTRINSIC_CMPXCHG112113#ifndef AMD64114115#pragma warning(disable: 4035) // Disables warnings reporting missing return statement116117template<>118template<typename T>119inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {120STATIC_ASSERT(8 == sizeof(T));121volatile T dest;122volatile T* pdest = &dest;123__asm {124mov eax, src125fild qword ptr [eax]126mov eax, pdest127fistp qword ptr [eax]128}129return dest;130}131132template<>133template<typename T>134inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,135T store_value) const {136STATIC_ASSERT(8 == sizeof(T));137volatile T* src = &store_value;138__asm {139mov eax, src140fild qword ptr [eax]141mov eax, dest142fistp qword ptr [eax]143}144}145146#pragma warning(default: 4035) // Enables warnings reporting missing return statement147148template<>149struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>150{151template <typename T>152void operator()(volatile T* p, T v) const {153__asm {154mov edx, p;155mov al, v;156xchg al, byte ptr [edx];157}158}159};160161template<>162struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>163{164template <typename T>165void operator()(volatile T* p, T v) const {166__asm {167mov edx, p;168mov ax, v;169xchg ax, word ptr [edx];170}171}172};173174template<>175struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>176{177template <typename T>178void operator()(volatile T* p, T v) const {179__asm {180mov edx, p;181mov eax, v;182xchg eax, dword ptr [edx];183}184}185};186#endif // AMD64187188#endif // OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP189190191