Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/os_cpu/linux_zero/os_linux_zero.hpp
40931 views
1
/*
2
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
3
* Copyright 2007, 2008, 2010, 2018, Red Hat, Inc.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
#ifndef OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_HPP
27
#define OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_HPP
28
29
static void setup_fpu() {}
30
31
// Used to register dynamic code cache area with the OS
32
// Note: Currently only used in 64 bit Windows implementations
33
static bool register_code_area(char *low, char *high) { return true; }
34
35
/*
36
* Work-around for broken NX emulation using CS limit, Red Hat patch "Exec-Shield"
37
* (IA32 only).
38
*
39
* Map and execute at a high VA to prevent CS lazy updates race with SMP MM
40
* invalidation.Further code generation by the JVM will no longer cause CS limit
41
* updates.
42
*
43
* Affects IA32: RHEL 5 & 6, Ubuntu 10.04 (LTS), 10.10, 11.04, 11.10, 12.04.
44
* @see JDK-8023956
45
*/
46
static void workaround_expand_exec_shield_cs_limit();
47
48
// Atomically copy 64 bits of data
49
static void atomic_copy64(const volatile void *src, volatile void *dst) {
50
#if defined(PPC32) && !defined(__SPE__)
51
double tmp;
52
asm volatile ("lfd %0, %2\n"
53
"stfd %0, %1\n"
54
: "=&f"(tmp), "=Q"(*(volatile double*)dst)
55
: "Q"(*(volatile double*)src));
56
#elif defined(PPC32) && defined(__SPE__)
57
long tmp;
58
asm volatile ("evldd %0, %2\n"
59
"evstdd %0, %1\n"
60
: "=&r"(tmp), "=Q"(*(volatile long*)dst)
61
: "Q"(*(volatile long*)src));
62
#elif defined(S390) && !defined(_LP64)
63
double tmp;
64
asm volatile ("ld %0, %2\n"
65
"std %0, %1\n"
66
: "=&f"(tmp), "=Q"(*(volatile double*)dst)
67
: "Q"(*(volatile double*)src));
68
#elif defined(__ARM_ARCH_7A__)
69
// The only way to perform the atomic 64-bit load/store
70
// is to use ldrexd/strexd for both reads and writes.
71
// For store, we need to have the matching (fake) load first.
72
// Put clrex between exclusive ops on src and dst for clarity.
73
uint64_t tmp_r, tmp_w;
74
uint32_t flag_w;
75
asm volatile ("ldrexd %[tmp_r], [%[src]]\n"
76
"clrex\n"
77
"1:\n"
78
"ldrexd %[tmp_w], [%[dst]]\n"
79
"strexd %[flag_w], %[tmp_r], [%[dst]]\n"
80
"cmp %[flag_w], 0\n"
81
"bne 1b\n"
82
: [tmp_r] "=&r" (tmp_r), [tmp_w] "=&r" (tmp_w),
83
[flag_w] "=&r" (flag_w)
84
: [src] "r" (src), [dst] "r" (dst)
85
: "cc", "memory");
86
#else
87
*(jlong *) dst = *(const jlong *) src;
88
#endif
89
}
90
91
#endif // OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_HPP
92
93