Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/os_cpu/windows_aarch64/atomic_windows_aarch64.hpp
40930 views
1
/*
2
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef OS_CPU_WINDOWS_AARCH64_ATOMIC_WINDOWS_AARCH64_HPP
26
#define OS_CPU_WINDOWS_AARCH64_ATOMIC_WINDOWS_AARCH64_HPP
27
28
#include <intrin.h>
29
#include "runtime/os.hpp"
30
#include "runtime/vm_version.hpp"
31
32
33
// As per atomic.hpp all read-modify-write operations have to provide two-way
34
// barriers semantics. The memory_order parameter is ignored - we always provide
35
// the strongest/most-conservative ordering
36
//
37
// For AARCH64 we add explicit barriers in the stubs.
38
39
template<size_t byte_size>
40
struct Atomic::PlatformAdd {
41
template<typename D, typename I>
42
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
43
44
template<typename D, typename I>
45
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
46
return add_and_fetch(dest, add_value, order) - add_value;
47
}
48
};
49
50
// The Interlocked* APIs only take long and will not accept __int32. That is
51
// acceptable on Windows, since long is a 32-bits integer type.
52
53
#define DEFINE_INTRINSIC_ADD(IntrinsicName, IntrinsicType) \
54
template<> \
55
template<typename D, typename I> \
56
inline D Atomic::PlatformAdd<sizeof(IntrinsicType)>::add_and_fetch(D volatile* dest, \
57
I add_value, \
58
atomic_memory_order order) const { \
59
STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(D)); \
60
return PrimitiveConversions::cast<D>( \
61
IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \
62
PrimitiveConversions::cast<IntrinsicType>(add_value))); \
63
}
64
65
DEFINE_INTRINSIC_ADD(InterlockedAdd, long)
66
DEFINE_INTRINSIC_ADD(InterlockedAdd64, __int64)
67
68
#undef DEFINE_INTRINSIC_ADD
69
70
#define DEFINE_INTRINSIC_XCHG(IntrinsicName, IntrinsicType) \
71
template<> \
72
template<typename T> \
73
inline T Atomic::PlatformXchg<sizeof(IntrinsicType)>::operator()(T volatile* dest, \
74
T exchange_value, \
75
atomic_memory_order order) const { \
76
STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \
77
return PrimitiveConversions::cast<T>( \
78
IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \
79
PrimitiveConversions::cast<IntrinsicType>(exchange_value))); \
80
}
81
82
DEFINE_INTRINSIC_XCHG(InterlockedExchange, long)
83
DEFINE_INTRINSIC_XCHG(InterlockedExchange64, __int64)
84
85
#undef DEFINE_INTRINSIC_XCHG
86
87
// Note: the order of the parameters is different between
88
// Atomic::PlatformCmpxchg<*>::operator() and the
89
// InterlockedCompareExchange* API.
90
91
#define DEFINE_INTRINSIC_CMPXCHG(IntrinsicName, IntrinsicType) \
92
template<> \
93
template<typename T> \
94
inline T Atomic::PlatformCmpxchg<sizeof(IntrinsicType)>::operator()(T volatile* dest, \
95
T compare_value, \
96
T exchange_value, \
97
atomic_memory_order order) const { \
98
STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \
99
return PrimitiveConversions::cast<T>( \
100
IntrinsicName(reinterpret_cast<IntrinsicType volatile *>(dest), \
101
PrimitiveConversions::cast<IntrinsicType>(exchange_value), \
102
PrimitiveConversions::cast<IntrinsicType>(compare_value))); \
103
}
104
105
DEFINE_INTRINSIC_CMPXCHG(_InterlockedCompareExchange8, char) // Use the intrinsic as InterlockedCompareExchange8 does not exist
106
DEFINE_INTRINSIC_CMPXCHG(InterlockedCompareExchange, long)
107
DEFINE_INTRINSIC_CMPXCHG(InterlockedCompareExchange64, __int64)
108
109
#undef DEFINE_INTRINSIC_CMPXCHG
110
111
#endif // OS_CPU_WINDOWS_AARCH64_ATOMIC_WINDOWS_AARCH64_HPP
112
113