Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp
40930 views
1
/*
2
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP
26
#define OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP
27
28
#include <intrin.h>
29
#include "runtime/os.hpp"
30
31
// Note that in MSVC, volatile memory accesses are explicitly
32
// guaranteed to have acquire release semantics (w.r.t. compiler
33
// reordering) and therefore does not even need a compiler barrier
34
// for normal acquire release accesses. And all generalized
35
// bound calls like release_store go through Atomic::load
36
// and Atomic::store which do volatile memory accesses.
37
template<> inline void ScopedFence<X_ACQUIRE>::postfix() { }
38
template<> inline void ScopedFence<RELEASE_X>::prefix() { }
39
template<> inline void ScopedFence<RELEASE_X_FENCE>::prefix() { }
40
template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
41
42
template<size_t byte_size>
43
struct Atomic::PlatformAdd {
44
template<typename D, typename I>
45
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
46
47
template<typename D, typename I>
48
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
49
return add_and_fetch(dest, add_value, order) - add_value;
50
}
51
};
52
53
// The Interlocked* APIs only take long and will not accept __int32. That is
54
// acceptable on Windows, since long is a 32-bits integer type.
55
56
#define DEFINE_INTRINSIC_ADD(IntrinsicName, IntrinsicType) \
57
template<> \
58
template<typename D, typename I> \
59
inline D Atomic::PlatformAdd<sizeof(IntrinsicType)>::add_and_fetch(D volatile* dest, \
60
I add_value, \
61
atomic_memory_order order) const { \
62
STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(D)); \
63
return PrimitiveConversions::cast<D>( \
64
IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \
65
PrimitiveConversions::cast<IntrinsicType>(add_value))); \
66
}
67
68
DEFINE_INTRINSIC_ADD(InterlockedAdd, long)
69
DEFINE_INTRINSIC_ADD(InterlockedAdd64, __int64)
70
71
#undef DEFINE_INTRINSIC_ADD
72
73
#define DEFINE_INTRINSIC_XCHG(IntrinsicName, IntrinsicType) \
74
template<> \
75
template<typename T> \
76
inline T Atomic::PlatformXchg<sizeof(IntrinsicType)>::operator()(T volatile* dest, \
77
T exchange_value, \
78
atomic_memory_order order) const { \
79
STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \
80
return PrimitiveConversions::cast<T>( \
81
IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \
82
PrimitiveConversions::cast<IntrinsicType>(exchange_value))); \
83
}
84
85
DEFINE_INTRINSIC_XCHG(InterlockedExchange, long)
86
DEFINE_INTRINSIC_XCHG(InterlockedExchange64, __int64)
87
88
#undef DEFINE_INTRINSIC_XCHG
89
90
// Note: the order of the parameters is different between
91
// Atomic::PlatformCmpxchg<*>::operator() and the
92
// InterlockedCompareExchange* API.
93
94
#define DEFINE_INTRINSIC_CMPXCHG(IntrinsicName, IntrinsicType) \
95
template<> \
96
template<typename T> \
97
inline T Atomic::PlatformCmpxchg<sizeof(IntrinsicType)>::operator()(T volatile* dest, \
98
T compare_value, \
99
T exchange_value, \
100
atomic_memory_order order) const { \
101
STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \
102
return PrimitiveConversions::cast<T>( \
103
IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \
104
PrimitiveConversions::cast<IntrinsicType>(exchange_value), \
105
PrimitiveConversions::cast<IntrinsicType>(compare_value))); \
106
}
107
108
DEFINE_INTRINSIC_CMPXCHG(_InterlockedCompareExchange8, char) // Use the intrinsic as InterlockedCompareExchange8 does not exist
109
DEFINE_INTRINSIC_CMPXCHG(InterlockedCompareExchange, long)
110
DEFINE_INTRINSIC_CMPXCHG(InterlockedCompareExchange64, __int64)
111
112
#undef DEFINE_INTRINSIC_CMPXCHG
113
114
#ifndef AMD64
115
116
#pragma warning(disable: 4035) // Disables warnings reporting missing return statement
117
118
template<>
119
template<typename T>
120
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
121
STATIC_ASSERT(8 == sizeof(T));
122
volatile T dest;
123
volatile T* pdest = &dest;
124
__asm {
125
mov eax, src
126
fild qword ptr [eax]
127
mov eax, pdest
128
fistp qword ptr [eax]
129
}
130
return dest;
131
}
132
133
template<>
134
template<typename T>
135
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
136
T store_value) const {
137
STATIC_ASSERT(8 == sizeof(T));
138
volatile T* src = &store_value;
139
__asm {
140
mov eax, src
141
fild qword ptr [eax]
142
mov eax, dest
143
fistp qword ptr [eax]
144
}
145
}
146
147
#pragma warning(default: 4035) // Enables warnings reporting missing return statement
148
149
template<>
150
struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
151
{
152
template <typename T>
153
void operator()(volatile T* p, T v) const {
154
__asm {
155
mov edx, p;
156
mov al, v;
157
xchg al, byte ptr [edx];
158
}
159
}
160
};
161
162
template<>
163
struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
164
{
165
template <typename T>
166
void operator()(volatile T* p, T v) const {
167
__asm {
168
mov edx, p;
169
mov ax, v;
170
xchg ax, word ptr [edx];
171
}
172
}
173
};
174
175
template<>
176
struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
177
{
178
template <typename T>
179
void operator()(volatile T* p, T v) const {
180
__asm {
181
mov edx, p;
182
mov eax, v;
183
xchg eax, dword ptr [edx];
184
}
185
}
186
};
187
#endif // AMD64
188
189
#endif // OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP
190
191