Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/os_cpu/bsd_aarch64/atomic_bsd_aarch64.hpp
64440 views
1
/*
2
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
4
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
5
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6
*
7
* This code is free software; you can redistribute it and/or modify it
8
* under the terms of the GNU General Public License version 2 only, as
9
* published by the Free Software Foundation.
10
*
11
* This code is distributed in the hope that it will be useful, but WITHOUT
12
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14
* version 2 for more details (a copy is included in the LICENSE file that
15
* accompanied this code).
16
*
17
* You should have received a copy of the GNU General Public License version
18
* 2 along with this work; if not, write to the Free Software Foundation,
19
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20
*
21
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22
* or visit www.oracle.com if you need additional information or have any
23
* questions.
24
*
25
*/
26
27
#ifndef OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP
28
#define OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP
29
30
#include "utilities/debug.hpp"
31
32
// Implementation of class atomic
33
// Note that memory_order_conservative requires a full barrier after atomic stores.
34
// See https://patchwork.kernel.org/patch/3575821/
35
36
template<size_t byte_size>
37
struct Atomic::PlatformAdd {
38
template<typename D, typename I>
39
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
40
D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
41
FULL_MEM_BARRIER;
42
return res;
43
}
44
45
template<typename D, typename I>
46
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
47
return add_and_fetch(dest, add_value, order) - add_value;
48
}
49
};
50
51
template<size_t byte_size>
52
template<typename T>
53
inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest,
54
T exchange_value,
55
atomic_memory_order order) const {
56
STATIC_ASSERT(byte_size == sizeof(T));
57
T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE);
58
FULL_MEM_BARRIER;
59
return res;
60
}
61
62
template<size_t byte_size>
63
template<typename T>
64
inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T volatile* dest,
65
T compare_value,
66
T exchange_value,
67
atomic_memory_order order) const {
68
STATIC_ASSERT(byte_size == sizeof(T));
69
if (order == memory_order_conservative) {
70
T value = compare_value;
71
FULL_MEM_BARRIER;
72
__atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
73
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
74
FULL_MEM_BARRIER;
75
return value;
76
} else {
77
STATIC_ASSERT (
78
// The modes that align with C++11 are intended to
79
// follow the same semantics.
80
memory_order_relaxed == __ATOMIC_RELAXED &&
81
memory_order_acquire == __ATOMIC_ACQUIRE &&
82
memory_order_release == __ATOMIC_RELEASE &&
83
memory_order_acq_rel == __ATOMIC_ACQ_REL &&
84
memory_order_seq_cst == __ATOMIC_SEQ_CST);
85
86
// Some sanity checking on the memory order. It makes no
87
// sense to have a release operation for a store that never
88
// happens.
89
int failure_memory_order;
90
switch (order) {
91
case memory_order_release:
92
failure_memory_order = memory_order_relaxed; break;
93
case memory_order_acq_rel:
94
failure_memory_order = memory_order_acquire; break;
95
default:
96
failure_memory_order = order;
97
}
98
assert(failure_memory_order <= order, "must be");
99
100
T value = compare_value;
101
__atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
102
order, failure_memory_order);
103
return value;
104
}
105
}
106
107
template<size_t byte_size>
108
struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
109
{
110
template <typename T>
111
T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }
112
};
113
114
template<size_t byte_size>
115
struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X>
116
{
117
template <typename T>
118
void operator()(volatile T* p, T v) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
119
};
120
121
template<size_t byte_size>
122
struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
123
{
124
template <typename T>
125
void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); }
126
};
127
128
129
#endif // OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP
130
131