Path: blob/master/src/hotspot/os_cpu/linux_zero/os_linux_zero.hpp
40931 views
/*1* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.2* Copyright 2007, 2008, 2010, 2018, Red Hat, Inc.3* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.4*5* This code is free software; you can redistribute it and/or modify it6* under the terms of the GNU General Public License version 2 only, as7* published by the Free Software Foundation.8*9* This code is distributed in the hope that it will be useful, but WITHOUT10* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License12* version 2 for more details (a copy is included in the LICENSE file that13* accompanied this code).14*15* You should have received a copy of the GNU General Public License version16* 2 along with this work; if not, write to the Free Software Foundation,17* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.18*19* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA20* or visit www.oracle.com if you need additional information or have any21* questions.22*23*/2425#ifndef OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_HPP26#define OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_HPP2728static void setup_fpu() {}2930// Used to register dynamic code cache area with the OS31// Note: Currently only used in 64 bit Windows implementations32static bool register_code_area(char *low, char *high) { return true; }3334/*35* Work-around for broken NX emulation using CS limit, Red Hat patch "Exec-Shield"36* (IA32 only).37*38* Map and execute at a high VA to prevent CS lazy updates race with SMP MM39* invalidation.Further code generation by the JVM will no longer cause CS limit40* updates.41*42* Affects IA32: RHEL 5 & 6, Ubuntu 10.04 (LTS), 10.10, 11.04, 11.10, 12.04.43* @see JDK-802395644*/45static void workaround_expand_exec_shield_cs_limit();4647// Atomically copy 64 bits of data48static void atomic_copy64(const volatile void *src, volatile void *dst) {49#if defined(PPC32) && !defined(__SPE__)50double tmp;51asm volatile ("lfd %0, %2\n"52"stfd %0, %1\n"53: "=&f"(tmp), "=Q"(*(volatile double*)dst)54: "Q"(*(volatile double*)src));55#elif defined(PPC32) && defined(__SPE__)56long tmp;57asm volatile ("evldd %0, %2\n"58"evstdd %0, %1\n"59: "=&r"(tmp), "=Q"(*(volatile long*)dst)60: "Q"(*(volatile long*)src));61#elif defined(S390) && !defined(_LP64)62double tmp;63asm volatile ("ld %0, %2\n"64"std %0, %1\n"65: "=&f"(tmp), "=Q"(*(volatile double*)dst)66: "Q"(*(volatile double*)src));67#elif defined(__ARM_ARCH_7A__)68// The only way to perform the atomic 64-bit load/store69// is to use ldrexd/strexd for both reads and writes.70// For store, we need to have the matching (fake) load first.71// Put clrex between exclusive ops on src and dst for clarity.72uint64_t tmp_r, tmp_w;73uint32_t flag_w;74asm volatile ("ldrexd %[tmp_r], [%[src]]\n"75"clrex\n"76"1:\n"77"ldrexd %[tmp_w], [%[dst]]\n"78"strexd %[flag_w], %[tmp_r], [%[dst]]\n"79"cmp %[flag_w], 0\n"80"bne 1b\n"81: [tmp_r] "=&r" (tmp_r), [tmp_w] "=&r" (tmp_w),82[flag_w] "=&r" (flag_w)83: [src] "r" (src), [dst] "r" (dst)84: "cc", "memory");85#else86*(jlong *) dst = *(const jlong *) src;87#endif88}8990#endif // OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_HPP919293