Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp
40930 views
1
/*
2
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef OS_CPU_BSD_X86_ATOMIC_BSD_X86_HPP
26
#define OS_CPU_BSD_X86_ATOMIC_BSD_X86_HPP
27
28
// Implementation of class atomic
29
30
template<size_t byte_size>
31
struct Atomic::PlatformAdd {
32
template<typename D, typename I>
33
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const;
34
35
template<typename D, typename I>
36
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
37
return fetch_and_add(dest, add_value, order) + add_value;
38
}
39
};
40
41
template<>
42
template<typename D, typename I>
43
inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value,
44
atomic_memory_order /* order */) const {
45
STATIC_ASSERT(4 == sizeof(I));
46
STATIC_ASSERT(4 == sizeof(D));
47
D old_value;
48
__asm__ volatile ( "lock xaddl %0,(%2)"
49
: "=r" (old_value)
50
: "0" (add_value), "r" (dest)
51
: "cc", "memory");
52
return old_value;
53
}
54
55
template<>
56
template<typename T>
57
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
58
T exchange_value,
59
atomic_memory_order /* order */) const {
60
STATIC_ASSERT(4 == sizeof(T));
61
__asm__ volatile ( "xchgl (%2),%0"
62
: "=r" (exchange_value)
63
: "0" (exchange_value), "r" (dest)
64
: "memory");
65
return exchange_value;
66
}
67
68
template<>
69
template<typename T>
70
inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
71
T compare_value,
72
T exchange_value,
73
atomic_memory_order /* order */) const {
74
STATIC_ASSERT(1 == sizeof(T));
75
__asm__ volatile ( "lock cmpxchgb %1,(%3)"
76
: "=a" (exchange_value)
77
: "q" (exchange_value), "a" (compare_value), "r" (dest)
78
: "cc", "memory");
79
return exchange_value;
80
}
81
82
template<>
83
template<typename T>
84
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
85
T compare_value,
86
T exchange_value,
87
atomic_memory_order /* order */) const {
88
STATIC_ASSERT(4 == sizeof(T));
89
__asm__ volatile ( "lock cmpxchgl %1,(%3)"
90
: "=a" (exchange_value)
91
: "r" (exchange_value), "a" (compare_value), "r" (dest)
92
: "cc", "memory");
93
return exchange_value;
94
}
95
96
#ifdef AMD64
97
template<>
98
template<typename D, typename I>
99
inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value,
100
atomic_memory_order /* order */) const {
101
STATIC_ASSERT(8 == sizeof(I));
102
STATIC_ASSERT(8 == sizeof(D));
103
D old_value;
104
__asm__ __volatile__ ( "lock xaddq %0,(%2)"
105
: "=r" (old_value)
106
: "0" (add_value), "r" (dest)
107
: "cc", "memory");
108
return old_value;
109
}
110
111
template<>
112
template<typename T>
113
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
114
T exchange_value,
115
atomic_memory_order /* order */) const {
116
STATIC_ASSERT(8 == sizeof(T));
117
__asm__ __volatile__ ("xchgq (%2),%0"
118
: "=r" (exchange_value)
119
: "0" (exchange_value), "r" (dest)
120
: "memory");
121
return exchange_value;
122
}
123
124
template<>
125
template<typename T>
126
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
127
T compare_value,
128
T exchange_value,
129
atomic_memory_order /* order */) const {
130
STATIC_ASSERT(8 == sizeof(T));
131
__asm__ __volatile__ ( "lock cmpxchgq %1,(%3)"
132
: "=a" (exchange_value)
133
: "r" (exchange_value), "a" (compare_value), "r" (dest)
134
: "cc", "memory");
135
return exchange_value;
136
}
137
138
#else // !AMD64
139
140
extern "C" {
141
// defined in bsd_x86.s
142
int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t);
143
void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
144
}
145
146
template<>
147
template<typename T>
148
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
149
T compare_value,
150
T exchange_value,
151
atomic_memory_order /* order */) const {
152
STATIC_ASSERT(8 == sizeof(T));
153
return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, dest, compare_value, exchange_value);
154
}
155
156
template<>
157
template<typename T>
158
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
159
STATIC_ASSERT(8 == sizeof(T));
160
volatile int64_t dest;
161
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
162
return PrimitiveConversions::cast<T>(dest);
163
}
164
165
template<>
166
template<typename T>
167
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
168
T store_value) const {
169
STATIC_ASSERT(8 == sizeof(T));
170
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
171
}
172
173
#endif // AMD64
174
175
template<>
176
struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
177
{
178
template <typename T>
179
void operator()(volatile T* p, T v) const {
180
__asm__ volatile ( "xchgb (%2),%0"
181
: "=q" (v)
182
: "0" (v), "r" (p)
183
: "memory");
184
}
185
};
186
187
template<>
188
struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
189
{
190
template <typename T>
191
void operator()(volatile T* p, T v) const {
192
__asm__ volatile ( "xchgw (%2),%0"
193
: "=r" (v)
194
: "0" (v), "r" (p)
195
: "memory");
196
}
197
};
198
199
template<>
200
struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
201
{
202
template <typename T>
203
void operator()(volatile T* p, T v) const {
204
__asm__ volatile ( "xchgl (%2),%0"
205
: "=r" (v)
206
: "0" (v), "r" (p)
207
: "memory");
208
}
209
};
210
211
#ifdef AMD64
212
template<>
213
struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE>
214
{
215
template <typename T>
216
void operator()(volatile T* p, T v) const {
217
__asm__ volatile ( "xchgq (%2), %0"
218
: "=r" (v)
219
: "0" (v), "r" (p)
220
: "memory");
221
}
222
};
223
#endif // AMD64
224
225
#endif // OS_CPU_BSD_X86_ATOMIC_BSD_X86_HPP
226
227