Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/os_cpu/bsd_aarch64/atomic_bsd_aarch64.hpp
40930 views
1
/*
2
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
4
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
5
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6
*
7
* This code is free software; you can redistribute it and/or modify it
8
* under the terms of the GNU General Public License version 2 only, as
9
* published by the Free Software Foundation.
10
*
11
* This code is distributed in the hope that it will be useful, but WITHOUT
12
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14
* version 2 for more details (a copy is included in the LICENSE file that
15
* accompanied this code).
16
*
17
* You should have received a copy of the GNU General Public License version
18
* 2 along with this work; if not, write to the Free Software Foundation,
19
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20
*
21
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22
* or visit www.oracle.com if you need additional information or have any
23
* questions.
24
*
25
*/
26
27
#ifndef OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP
28
#define OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP
29
30
// Implementation of class atomic
31
// Note that memory_order_conservative requires a full barrier after atomic stores.
32
// See https://patchwork.kernel.org/patch/3575821/
33
34
template<size_t byte_size>
35
struct Atomic::PlatformAdd {
36
template<typename D, typename I>
37
D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
38
D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
39
FULL_MEM_BARRIER;
40
return res;
41
}
42
43
template<typename D, typename I>
44
D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
45
return add_and_fetch(dest, add_value, order) - add_value;
46
}
47
};
48
49
template<size_t byte_size>
50
template<typename T>
51
inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest,
52
T exchange_value,
53
atomic_memory_order order) const {
54
STATIC_ASSERT(byte_size == sizeof(T));
55
T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE);
56
FULL_MEM_BARRIER;
57
return res;
58
}
59
60
template<size_t byte_size>
61
template<typename T>
62
inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T volatile* dest,
63
T compare_value,
64
T exchange_value,
65
atomic_memory_order order) const {
66
STATIC_ASSERT(byte_size == sizeof(T));
67
if (order == memory_order_relaxed) {
68
T value = compare_value;
69
__atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
70
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
71
return value;
72
} else {
73
T value = compare_value;
74
FULL_MEM_BARRIER;
75
__atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
76
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
77
FULL_MEM_BARRIER;
78
return value;
79
}
80
}
81
82
template<size_t byte_size>
83
struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
84
{
85
template <typename T>
86
T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }
87
};
88
89
template<size_t byte_size>
90
struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X>
91
{
92
template <typename T>
93
void operator()(volatile T* p, T v) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
94
};
95
96
template<size_t byte_size>
97
struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
98
{
99
template <typename T>
100
void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); }
101
};
102
103
104
#endif // OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP
105
106