Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp
40949 views
1
/*
2
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
3
* Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
#ifndef OS_CPU_BSD_ZERO_ATOMIC_BSD_ZERO_HPP
27
#define OS_CPU_BSD_ZERO_ATOMIC_BSD_ZERO_HPP
28
29
#include "orderAccess_bsd_zero.hpp"
30
#include "runtime/os.hpp"
31
32
// Implementation of class atomic
33
34
#ifdef M68K
35
36
/*
37
* __m68k_cmpxchg
38
*
39
* Atomically store newval in *ptr if *ptr is equal to oldval for user space.
40
* Returns newval on success and oldval if no exchange happened.
41
* This implementation is processor specific and works on
42
* 68020 68030 68040 and 68060.
43
*
44
* It will not work on ColdFire, 68000 and 68010 since they lack the CAS
45
* instruction.
46
* Using a kernelhelper would be better for arch complete implementation.
47
*
48
*/
49
50
static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
51
int ret;
52
__asm __volatile ("cas%.l %0,%2,%1"
53
: "=d" (ret), "+m" (*(ptr))
54
: "d" (newval), "0" (oldval));
55
return ret;
56
}
57
58
/* Perform an atomic compare and swap: if the current value of `*PTR'
59
is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of
60
`*PTR' before the operation.*/
61
static inline int m68k_compare_and_swap(int newval,
62
volatile int *ptr,
63
int oldval) {
64
for (;;) {
65
int prev = *ptr;
66
if (prev != oldval)
67
return prev;
68
69
if (__m68k_cmpxchg (prev, newval, ptr) == newval)
70
// Success.
71
return prev;
72
73
// We failed even though prev == oldval. Try again.
74
}
75
}
76
77
/* Atomically add an int to memory. */
78
static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) {
79
for (;;) {
80
// Loop until success.
81
82
int prev = *ptr;
83
84
if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
85
return prev + add_value;
86
}
87
}
88
89
/* Atomically write VALUE into `*PTR' and returns the previous
90
contents of `*PTR'. */
91
static inline int m68k_lock_test_and_set(int newval, volatile int *ptr) {
92
for (;;) {
93
// Loop until success.
94
int prev = *ptr;
95
96
if (__m68k_cmpxchg (prev, newval, ptr) == prev)
97
return prev;
98
}
99
}
100
#endif // M68K
101
102
#ifdef ARM
103
104
/*
105
* __kernel_cmpxchg
106
*
107
* Atomically store newval in *ptr if *ptr is equal to oldval for user space.
108
* Return zero if *ptr was changed or non-zero if no exchange happened.
109
* The C flag is also set if *ptr was changed to allow for assembly
110
* optimization in the calling code.
111
*
112
*/
113
114
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
115
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
116
117
118
119
/* Perform an atomic compare and swap: if the current value of `*PTR'
120
is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of
121
`*PTR' before the operation.*/
122
static inline int arm_compare_and_swap(int newval,
123
volatile int *ptr,
124
int oldval) {
125
for (;;) {
126
int prev = *ptr;
127
if (prev != oldval)
128
return prev;
129
130
if (__kernel_cmpxchg (prev, newval, ptr) == 0)
131
// Success.
132
return prev;
133
134
// We failed even though prev == oldval. Try again.
135
}
136
}
137
138
/* Atomically add an int to memory. */
139
static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {
140
for (;;) {
141
// Loop until a __kernel_cmpxchg succeeds.
142
143
int prev = *ptr;
144
145
if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
146
return prev + add_value;
147
}
148
}
149
150
/* Atomically write VALUE into `*PTR' and returns the previous
151
contents of `*PTR'. */
152
static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
153
for (;;) {
154
// Loop until a __kernel_cmpxchg succeeds.
155
int prev = *ptr;
156
157
if (__kernel_cmpxchg (prev, newval, ptr) == 0)
158
return prev;
159
}
160
}
161
#endif // ARM
162
163
template<size_t byte_size>
164
struct Atomic::PlatformAdd {
165
template<typename D, typename I>
166
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
167
168
template<typename D, typename I>
169
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
170
return add_and_fetch(dest, add_value, order) - add_value;
171
}
172
};
173
174
template<>
175
template<typename D, typename I>
176
inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
177
atomic_memory_order order) const {
178
STATIC_ASSERT(4 == sizeof(I));
179
STATIC_ASSERT(4 == sizeof(D));
180
181
#ifdef ARM
182
return add_using_helper<int>(arm_add_and_fetch, dest, add_value);
183
#else
184
#ifdef M68K
185
return add_using_helper<int>(m68k_add_and_fetch, dest, add_value);
186
#else
187
D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
188
FULL_MEM_BARRIER;
189
return res;
190
#endif // M68K
191
#endif // ARM
192
}
193
194
template<>
195
template<typename D, typename I>
196
inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
197
atomic_memory_order order) const {
198
STATIC_ASSERT(8 == sizeof(I));
199
STATIC_ASSERT(8 == sizeof(D));
200
201
D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
202
FULL_MEM_BARRIER;
203
return res;
204
}
205
206
template<>
207
template<typename T>
208
inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
209
T exchange_value,
210
atomic_memory_order order) const {
211
STATIC_ASSERT(4 == sizeof(T));
212
#ifdef ARM
213
return xchg_using_helper<int>(arm_lock_test_and_set, dest, exchange_value);
214
#else
215
#ifdef M68K
216
return xchg_using_helper<int>(m68k_lock_test_and_set, dest, exchange_value);
217
#else
218
// __sync_lock_test_and_set is a bizarrely named atomic exchange
219
// operation. Note that some platforms only support this with the
220
// limitation that the only valid value to store is the immediate
221
// constant 1. There is a test for this in JNI_CreateJavaVM().
222
T result = __sync_lock_test_and_set (dest, exchange_value);
223
// All atomic operations are expected to be full memory barriers
224
// (see atomic.hpp). However, __sync_lock_test_and_set is not
225
// a full memory barrier, but an acquire barrier. Hence, this added
226
// barrier. Some platforms (notably ARM) have peculiarities with
227
// their barrier implementations, delegate it to OrderAccess.
228
OrderAccess::fence();
229
return result;
230
#endif // M68K
231
#endif // ARM
232
}
233
234
template<>
235
template<typename T>
236
inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
237
T exchange_value,
238
atomic_memory_order order) const {
239
STATIC_ASSERT(8 == sizeof(T));
240
T result = __sync_lock_test_and_set (dest, exchange_value);
241
OrderAccess::fence();
242
return result;
243
}
244
245
// No direct support for cmpxchg of bytes; emulate using int.
246
template<>
247
struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
248
249
template<>
250
template<typename T>
251
inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
252
T compare_value,
253
T exchange_value,
254
atomic_memory_order order) const {
255
STATIC_ASSERT(4 == sizeof(T));
256
#ifdef ARM
257
return cmpxchg_using_helper<int>(arm_compare_and_swap, dest, compare_value, exchange_value);
258
#else
259
#ifdef M68K
260
return cmpxchg_using_helper<int>(m68k_compare_and_swap, dest, compare_value, exchange_value);
261
#else
262
T value = compare_value;
263
FULL_MEM_BARRIER;
264
__atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
265
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
266
FULL_MEM_BARRIER;
267
return value;
268
#endif // M68K
269
#endif // ARM
270
}
271
272
template<>
273
template<typename T>
274
inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
275
T compare_value,
276
T exchange_value,
277
atomic_memory_order order) const {
278
STATIC_ASSERT(8 == sizeof(T));
279
280
T value = compare_value;
281
FULL_MEM_BARRIER;
282
__atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
283
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
284
FULL_MEM_BARRIER;
285
return value;
286
}
287
288
template<>
289
template<typename T>
290
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
291
STATIC_ASSERT(8 == sizeof(T));
292
volatile int64_t dest;
293
os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
294
return PrimitiveConversions::cast<T>(dest);
295
}
296
297
template<>
298
template<typename T>
299
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
300
T store_value) const {
301
STATIC_ASSERT(8 == sizeof(T));
302
os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
303
}
304
305
#endif // OS_CPU_BSD_ZERO_ATOMIC_BSD_ZERO_HPP
306
307