Path: blob/master/src/hotspot/os/linux/os_linux.cpp
40951 views
/*1* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324// no precompiled headers25#include "jvm.h"26#include "classfile/vmSymbols.hpp"27#include "code/icBuffer.hpp"28#include "code/vtableStubs.hpp"29#include "compiler/compileBroker.hpp"30#include "compiler/disassembler.hpp"31#include "interpreter/interpreter.hpp"32#include "jvmtifiles/jvmti.h"33#include "logging/log.hpp"34#include "logging/logStream.hpp"35#include "memory/allocation.inline.hpp"36#include "oops/oop.inline.hpp"37#include "os_linux.inline.hpp"38#include "os_posix.inline.hpp"39#include "os_share_linux.hpp"40#include "osContainer_linux.hpp"41#include "prims/jniFastGetField.hpp"42#include "prims/jvm_misc.hpp"43#include "runtime/arguments.hpp"44#include "runtime/atomic.hpp"45#include "runtime/globals.hpp"46#include "runtime/globals_extension.hpp"47#include "runtime/interfaceSupport.inline.hpp"48#include "runtime/init.hpp"49#include "runtime/java.hpp"50#include "runtime/javaCalls.hpp"51#include "runtime/mutexLocker.hpp"52#include "runtime/objectMonitor.hpp"53#include "runtime/osThread.hpp"54#include "runtime/perfMemory.hpp"55#include "runtime/sharedRuntime.hpp"56#include "runtime/statSampler.hpp"57#include "runtime/stubRoutines.hpp"58#include "runtime/thread.inline.hpp"59#include "runtime/threadCritical.hpp"60#include "runtime/threadSMR.hpp"61#include "runtime/timer.hpp"62#include "runtime/vm_version.hpp"63#include "signals_posix.hpp"64#include "semaphore_posix.hpp"65#include "services/memTracker.hpp"66#include "services/runtimeService.hpp"67#include "utilities/align.hpp"68#include "utilities/decoder.hpp"69#include "utilities/defaultStream.hpp"70#include "utilities/events.hpp"71#include "utilities/elfFile.hpp"72#include "utilities/growableArray.hpp"73#include "utilities/macros.hpp"74#include "utilities/powerOfTwo.hpp"75#include "utilities/vmError.hpp"7677// put OS-includes here78# include <sys/types.h>79# include <sys/mman.h>80# include <sys/stat.h>81# include <sys/select.h>82# include <pthread.h>83# include <signal.h>84# include <endian.h>85# include <errno.h>86# include <dlfcn.h>87# include <stdio.h>88# include <unistd.h>89# include <sys/resource.h>90# include <pthread.h>91# include <sys/stat.h>92# include <sys/time.h>93# include <sys/times.h>94# include <sys/utsname.h>95# include <sys/socket.h>96# include <pwd.h>97# include <poll.h>98# include <fcntl.h>99# include <string.h>100# include <syscall.h>101# include <sys/sysinfo.h>102# include <sys/ipc.h>103# include <sys/shm.h>104# include <link.h>105# include <stdint.h>106# include <inttypes.h>107# include <sys/ioctl.h>108# include <linux/elf-em.h>109#ifdef __GLIBC__110# include <malloc.h>111#endif112113#ifndef _GNU_SOURCE114#define _GNU_SOURCE115#include <sched.h>116#undef _GNU_SOURCE117#else118#include <sched.h>119#endif120121// if RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling122// getrusage() is prepared to handle the associated failure.123#ifndef RUSAGE_THREAD124#define RUSAGE_THREAD (1) /* only the calling thread */125#endif126127#define MAX_PATH (2 * K)128129#define MAX_SECS 100000000130131// for timer info max values which include all bits132#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)133134#if defined(MUSL_LIBC) || defined(__ANDROID__)135// dlvsym is not a part of POSIX136// and musl libc doesn't implement it.137static void *dlvsym(void *handle,138const char *symbol,139const char *version) {140// load the latest version of symbol141return dlsym(handle, symbol);142}143#endif144145enum CoredumpFilterBit {146FILE_BACKED_PVT_BIT = 1 << 2,147FILE_BACKED_SHARED_BIT = 1 << 3,148LARGEPAGES_BIT = 1 << 6,149DAX_SHARED_BIT = 1 << 8150};151152////////////////////////////////////////////////////////////////////////////////153// global variables154julong os::Linux::_physical_memory = 0;155156address os::Linux::_initial_thread_stack_bottom = NULL;157uintptr_t os::Linux::_initial_thread_stack_size = 0;158159int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;160int (*os::Linux::_pthread_setname_np)(pthread_t, const char*) = NULL;161pthread_t os::Linux::_main_thread;162int os::Linux::_page_size = -1;163bool os::Linux::_supports_fast_thread_cpu_time = false;164const char * os::Linux::_libc_version = NULL;165const char * os::Linux::_libpthread_version = NULL;166size_t os::Linux::_default_large_page_size = 0;167168#ifdef __GLIBC__169os::Linux::mallinfo_func_t os::Linux::_mallinfo = NULL;170os::Linux::mallinfo2_func_t os::Linux::_mallinfo2 = NULL;171#endif // __GLIBC__172173static jlong initial_time_count=0;174175static int clock_tics_per_sec = 100;176177// If the VM might have been created on the primordial thread, we need to resolve the178// primordial thread stack bounds and check if the current thread might be the179// primordial thread in places. If we know that the primordial thread is never used,180// such as when the VM was created by one of the standard java launchers, we can181// avoid this182static bool suppress_primordial_thread_resolution = false;183184static bool read_so_path_from_maps(const char* so_name, char* buf, int buflen);185186// utility functions187188julong os::available_memory() {189return Linux::available_memory();190}191192julong os::Linux::available_memory() {193// values in struct sysinfo are "unsigned long"194struct sysinfo si;195julong avail_mem;196197if (OSContainer::is_containerized()) {198jlong mem_limit, mem_usage;199if ((mem_limit = OSContainer::memory_limit_in_bytes()) < 1) {200log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", using host value",201mem_limit == OSCONTAINER_ERROR ? "failed" : "unlimited", mem_limit);202}203if (mem_limit > 0 && (mem_usage = OSContainer::memory_usage_in_bytes()) < 1) {204log_debug(os, container)("container memory usage failed: " JLONG_FORMAT ", using host value", mem_usage);205}206if (mem_limit > 0 && mem_usage > 0 ) {207avail_mem = mem_limit > mem_usage ? (julong)mem_limit - (julong)mem_usage : 0;208log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem);209return avail_mem;210}211}212213sysinfo(&si);214avail_mem = (julong)si.freeram * si.mem_unit;215log_trace(os)("available memory: " JULONG_FORMAT, avail_mem);216return avail_mem;217}218219julong os::physical_memory() {220jlong phys_mem = 0;221if (OSContainer::is_containerized()) {222jlong mem_limit;223if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) {224log_trace(os)("total container memory: " JLONG_FORMAT, mem_limit);225return mem_limit;226}227log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", using host value",228mem_limit == OSCONTAINER_ERROR ? "failed" : "unlimited", mem_limit);229}230231phys_mem = Linux::physical_memory();232log_trace(os)("total system memory: " JLONG_FORMAT, phys_mem);233return phys_mem;234}235236static uint64_t initial_total_ticks = 0;237static uint64_t initial_steal_ticks = 0;238static bool has_initial_tick_info = false;239240static void next_line(FILE *f) {241int c;242do {243c = fgetc(f);244} while (c != '\n' && c != EOF);245}246247bool os::Linux::get_tick_information(CPUPerfTicks* pticks, int which_logical_cpu) {248FILE* fh;249uint64_t userTicks, niceTicks, systemTicks, idleTicks;250// since at least kernel 2.6 : iowait: time waiting for I/O to complete251// irq: time servicing interrupts; softirq: time servicing softirqs252uint64_t iowTicks = 0, irqTicks = 0, sirqTicks= 0;253// steal (since kernel 2.6.11): time spent in other OS when running in a virtualized environment254uint64_t stealTicks = 0;255// guest (since kernel 2.6.24): time spent running a virtual CPU for guest OS under the256// control of the Linux kernel257uint64_t guestNiceTicks = 0;258int logical_cpu = -1;259const int required_tickinfo_count = (which_logical_cpu == -1) ? 4 : 5;260int n;261262memset(pticks, 0, sizeof(CPUPerfTicks));263264if ((fh = fopen("/proc/stat", "r")) == NULL) {265return false;266}267268if (which_logical_cpu == -1) {269n = fscanf(fh, "cpu " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "270UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "271UINT64_FORMAT " " UINT64_FORMAT " ",272&userTicks, &niceTicks, &systemTicks, &idleTicks,273&iowTicks, &irqTicks, &sirqTicks,274&stealTicks, &guestNiceTicks);275} else {276// Move to next line277next_line(fh);278279// find the line for requested cpu faster to just iterate linefeeds?280for (int i = 0; i < which_logical_cpu; i++) {281next_line(fh);282}283284n = fscanf(fh, "cpu%u " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "285UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "286UINT64_FORMAT " " UINT64_FORMAT " ",287&logical_cpu, &userTicks, &niceTicks,288&systemTicks, &idleTicks, &iowTicks, &irqTicks, &sirqTicks,289&stealTicks, &guestNiceTicks);290}291292fclose(fh);293if (n < required_tickinfo_count || logical_cpu != which_logical_cpu) {294return false;295}296pticks->used = userTicks + niceTicks;297pticks->usedKernel = systemTicks + irqTicks + sirqTicks;298pticks->total = userTicks + niceTicks + systemTicks + idleTicks +299iowTicks + irqTicks + sirqTicks + stealTicks + guestNiceTicks;300301if (n > required_tickinfo_count + 3) {302pticks->steal = stealTicks;303pticks->has_steal_ticks = true;304} else {305pticks->steal = 0;306pticks->has_steal_ticks = false;307}308309return true;310}311312// Return true if user is running as root.313314bool os::have_special_privileges() {315static bool init = false;316static bool privileges = false;317if (!init) {318privileges = (getuid() != geteuid()) || (getgid() != getegid());319init = true;320}321return privileges;322}323324325#ifndef SYS_gettid326// i386 & arm: 224, ia64: 1105, amd64: 186, sparc: 143, aarch64: 178327#ifdef __ia64__328#define SYS_gettid 1105329#else330#if defined(__i386__) || defined(__arm__)331#define SYS_gettid 224332#else333#ifdef __amd64__334#define SYS_gettid 186335#else336#ifdef __sparc__337#define SYS_gettid 143338#elif defined(__arm64__) || defined(__aarch64__)339#define SYS_gettid 178340#else341#error define gettid for the arch342#endif343#endif344#endif345#endif346#endif347348349// pid_t gettid()350//351// Returns the kernel thread id of the currently running thread. Kernel352// thread id is used to access /proc.353pid_t os::Linux::gettid() {354int rslt = syscall(SYS_gettid);355assert(rslt != -1, "must be."); // old linuxthreads implementation?356return (pid_t)rslt;357}358359// Most versions of linux have a bug where the number of processors are360// determined by looking at the /proc file system. In a chroot environment,361// the system call returns 1.362static bool unsafe_chroot_detected = false;363static const char *unstable_chroot_error = "/proc file system not found.\n"364"Java may be unstable running multithreaded in a chroot "365"environment on Linux when /proc filesystem is not mounted.";366367void os::Linux::initialize_system_info() {368set_processor_count(sysconf(_SC_NPROCESSORS_CONF));369if (processor_count() == 1) {370pid_t pid = os::Linux::gettid();371char fname[32];372jio_snprintf(fname, sizeof(fname), "/proc/%d", pid);373FILE *fp = fopen(fname, "r");374if (fp == NULL) {375unsafe_chroot_detected = true;376} else {377fclose(fp);378}379}380_physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);381assert(processor_count() > 0, "linux error");382}383384void os::init_system_properties_values() {385// The next steps are taken in the product version:386//387// Obtain the JAVA_HOME value from the location of libjvm.so.388// This library should be located at:389// <JAVA_HOME>/lib/{client|server}/libjvm.so.390//391// If "/jre/lib/" appears at the right place in the path, then we392// assume libjvm.so is installed in a JDK and we use this path.393//394// Otherwise exit with message: "Could not create the Java virtual machine."395//396// The following extra steps are taken in the debugging version:397//398// If "/jre/lib/" does NOT appear at the right place in the path399// instead of exit check for $JAVA_HOME environment variable.400//401// If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,402// then we append a fake suffix "hotspot/libjvm.so" to this path so403// it looks like libjvm.so is installed there404// <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.405//406// Otherwise exit.407//408// Important note: if the location of libjvm.so changes this409// code needs to be changed accordingly.410411// See ld(1):412// The linker uses the following search paths to locate required413// shared libraries:414// 1: ...415// ...416// 7: The default directories, normally /lib and /usr/lib.417#ifndef OVERRIDE_LIBPATH418#if defined(AMD64) || (defined(_LP64) && defined(SPARC)) || defined(PPC64) || defined(S390)419#define DEFAULT_LIBPATH "/usr/lib64:/lib64:/lib:/usr/lib"420#else421#define DEFAULT_LIBPATH "/lib:/usr/lib"422#endif423#else424#define DEFAULT_LIBPATH OVERRIDE_LIBPATH425#endif426427// Base path of extensions installed on the system.428#define SYS_EXT_DIR "/usr/java/packages"429#define EXTENSIONS_DIR "/lib/ext"430431// Buffer that fits several sprintfs.432// Note that the space for the colon and the trailing null are provided433// by the nulls included by the sizeof operator.434const size_t bufsize =435MAX2((size_t)MAXPATHLEN, // For dll_dir & friends.436(size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir437char *buf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);438439// sysclasspath, java_home, dll_dir440{441char *pslash;442os::jvm_path(buf, bufsize);443444// Found the full path to libjvm.so.445// Now cut the path to <java_home>/jre if we can.446pslash = strrchr(buf, '/');447if (pslash != NULL) {448*pslash = '\0'; // Get rid of /libjvm.so.449}450pslash = strrchr(buf, '/');451if (pslash != NULL) {452*pslash = '\0'; // Get rid of /{client|server|hotspot}.453}454Arguments::set_dll_dir(buf);455456if (pslash != NULL) {457pslash = strrchr(buf, '/');458if (pslash != NULL) {459*pslash = '\0'; // Get rid of /lib.460}461}462Arguments::set_java_home(buf);463if (!set_boot_path('/', ':')) {464vm_exit_during_initialization("Failed setting boot class path.", NULL);465}466}467468// Where to look for native libraries.469//470// Note: Due to a legacy implementation, most of the library path471// is set in the launcher. This was to accomodate linking restrictions472// on legacy Linux implementations (which are no longer supported).473// Eventually, all the library path setting will be done here.474//475// However, to prevent the proliferation of improperly built native476// libraries, the new path component /usr/java/packages is added here.477// Eventually, all the library path setting will be done here.478{479// Get the user setting of LD_LIBRARY_PATH, and prepended it. It480// should always exist (until the legacy problem cited above is481// addressed).482const char *v = ::getenv("LD_LIBRARY_PATH");483const char *v_colon = ":";484if (v == NULL) { v = ""; v_colon = ""; }485// That's +1 for the colon and +1 for the trailing '\0'.486char *ld_library_path = NEW_C_HEAP_ARRAY(char,487strlen(v) + 1 +488sizeof(SYS_EXT_DIR) + sizeof("/lib/") + sizeof(DEFAULT_LIBPATH) + 1,489mtInternal);490sprintf(ld_library_path, "%s%s" SYS_EXT_DIR "/lib:" DEFAULT_LIBPATH, v, v_colon);491Arguments::set_library_path(ld_library_path);492FREE_C_HEAP_ARRAY(char, ld_library_path);493}494495// Extensions directories.496sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());497Arguments::set_ext_dirs(buf);498499FREE_C_HEAP_ARRAY(char, buf);500501#undef DEFAULT_LIBPATH502#undef SYS_EXT_DIR503#undef EXTENSIONS_DIR504}505506////////////////////////////////////////////////////////////////////////////////507// breakpoint support508509void os::breakpoint() {510BREAKPOINT;511}512513extern "C" void breakpoint() {514// use debugger to set breakpoint here515}516517//////////////////////////////////////////////////////////////////////////////518// detecting pthread library519520void os::Linux::libpthread_init() {521#ifndef __ANDROID__522// Save glibc and pthread version strings.523#if !defined(_CS_GNU_LIBC_VERSION) || \524!defined(_CS_GNU_LIBPTHREAD_VERSION)525#error "glibc too old (< 2.3.2)"526#endif527528#ifdef MUSL_LIBC529// confstr() from musl libc returns EINVAL for530// _CS_GNU_LIBC_VERSION and _CS_GNU_LIBPTHREAD_VERSION531os::Linux::set_libc_version("musl - unknown");532os::Linux::set_libpthread_version("musl - unknown");533#else534size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0);535assert(n > 0, "cannot retrieve glibc version");536char *str = (char *)malloc(n, mtInternal);537confstr(_CS_GNU_LIBC_VERSION, str, n);538os::Linux::set_libc_version(str);539540n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0);541assert(n > 0, "cannot retrieve pthread version");542str = (char *)malloc(n, mtInternal);543confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);544os::Linux::set_libpthread_version(str);545#endif546#else547os::Linux::set_libpthread_version("NPTL");548#endif549}550551/////////////////////////////////////////////////////////////////////////////552// thread stack expansion553554// os::Linux::manually_expand_stack() takes care of expanding the thread555// stack. Note that this is normally not needed: pthread stacks allocate556// thread stack using mmap() without MAP_NORESERVE, so the stack is already557// committed. Therefore it is not necessary to expand the stack manually.558//559// Manually expanding the stack was historically needed on LinuxThreads560// thread stacks, which were allocated with mmap(MAP_GROWSDOWN). Nowadays561// it is kept to deal with very rare corner cases:562//563// For one, user may run the VM on an own implementation of threads564// whose stacks are - like the old LinuxThreads - implemented using565// mmap(MAP_GROWSDOWN).566//567// Also, this coding may be needed if the VM is running on the primordial568// thread. Normally we avoid running on the primordial thread; however,569// user may still invoke the VM on the primordial thread.570//571// The following historical comment describes the details about running572// on a thread stack allocated with mmap(MAP_GROWSDOWN):573574575// Force Linux kernel to expand current thread stack. If "bottom" is close576// to the stack guard, caller should block all signals.577//578// MAP_GROWSDOWN:579// A special mmap() flag that is used to implement thread stacks. It tells580// kernel that the memory region should extend downwards when needed. This581// allows early versions of LinuxThreads to only mmap the first few pages582// when creating a new thread. Linux kernel will automatically expand thread583// stack as needed (on page faults).584//585// However, because the memory region of a MAP_GROWSDOWN stack can grow on586// demand, if a page fault happens outside an already mapped MAP_GROWSDOWN587// region, it's hard to tell if the fault is due to a legitimate stack588// access or because of reading/writing non-exist memory (e.g. buffer589// overrun). As a rule, if the fault happens below current stack pointer,590// Linux kernel does not expand stack, instead a SIGSEGV is sent to the591// application (see Linux kernel fault.c).592//593// This Linux feature can cause SIGSEGV when VM bangs thread stack for594// stack overflow detection.595//596// Newer version of LinuxThreads (since glibc-2.2, or, RH-7.x) and NPTL do597// not use MAP_GROWSDOWN.598//599// To get around the problem and allow stack banging on Linux, we need to600// manually expand thread stack after receiving the SIGSEGV.601//602// There are two ways to expand thread stack to address "bottom", we used603// both of them in JVM before 1.5:604// 1. adjust stack pointer first so that it is below "bottom", and then605// touch "bottom"606// 2. mmap() the page in question607//608// Now alternate signal stack is gone, it's harder to use 2. For instance,609// if current sp is already near the lower end of page 101, and we need to610// call mmap() to map page 100, it is possible that part of the mmap() frame611// will be placed in page 100. When page 100 is mapped, it is zero-filled.612// That will destroy the mmap() frame and cause VM to crash.613//614// The following code works by adjusting sp first, then accessing the "bottom"615// page to force a page fault. Linux kernel will then automatically expand the616// stack mapping.617//618// _expand_stack_to() assumes its frame size is less than page size, which619// should always be true if the function is not inlined.620621static void NOINLINE _expand_stack_to(address bottom) {622address sp;623size_t size;624volatile char *p;625626// Adjust bottom to point to the largest address within the same page, it627// gives us a one-page buffer if alloca() allocates slightly more memory.628bottom = (address)align_down((uintptr_t)bottom, os::Linux::page_size());629bottom += os::Linux::page_size() - 1;630631// sp might be slightly above current stack pointer; if that's the case, we632// will alloca() a little more space than necessary, which is OK. Don't use633// os::current_stack_pointer(), as its result can be slightly below current634// stack pointer, causing us to not alloca enough to reach "bottom".635sp = (address)&sp;636637if (sp > bottom) {638size = sp - bottom;639p = (volatile char *)alloca(size);640assert(p != NULL && p <= (volatile char *)bottom, "alloca problem?");641p[0] = '\0';642}643}644645void os::Linux::expand_stack_to(address bottom) {646_expand_stack_to(bottom);647}648649bool os::Linux::manually_expand_stack(JavaThread * t, address addr) {650assert(t!=NULL, "just checking");651assert(t->osthread()->expanding_stack(), "expand should be set");652653if (t->is_in_usable_stack(addr)) {654sigset_t mask_all, old_sigset;655sigfillset(&mask_all);656pthread_sigmask(SIG_SETMASK, &mask_all, &old_sigset);657_expand_stack_to(addr);658pthread_sigmask(SIG_SETMASK, &old_sigset, NULL);659return true;660}661return false;662}663664//////////////////////////////////////////////////////////////////////////////665// create new thread666667// Thread start routine for all newly created threads668static void *thread_native_entry(Thread *thread) {669670thread->record_stack_base_and_size();671672#ifndef __GLIBC__673// Try to randomize the cache line index of hot stack frames.674// This helps when threads of the same stack traces evict each other's675// cache lines. The threads can be either from the same JVM instance, or676// from different JVM instances. The benefit is especially true for677// processors with hyperthreading technology.678// This code is not needed anymore in glibc because it has MULTI_PAGE_ALIASING679// and we did not see any degradation in performance without `alloca()`.680static int counter = 0;681int pid = os::current_process_id();682void *stackmem = alloca(((pid ^ counter++) & 7) * 128);683// Ensure the alloca result is used in a way that prevents the compiler from eliding it.684*(char *)stackmem = 1;685#endif686687thread->initialize_thread_current();688689OSThread* osthread = thread->osthread();690Monitor* sync = osthread->startThread_lock();691692osthread->set_thread_id(os::current_thread_id());693694log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",695os::current_thread_id(), (uintx) pthread_self());696697if (UseNUMA) {698int lgrp_id = os::numa_get_group_id();699if (lgrp_id != -1) {700thread->set_lgrp_id(lgrp_id);701}702}703// initialize signal mask for this thread704PosixSignals::hotspot_sigmask(thread);705706// initialize floating point control register707os::Linux::init_thread_fpu_state();708709// handshaking with parent thread710{711MutexLocker ml(sync, Mutex::_no_safepoint_check_flag);712713// notify parent thread714osthread->set_state(INITIALIZED);715sync->notify_all();716717// wait until os::start_thread()718while (osthread->get_state() == INITIALIZED) {719sync->wait_without_safepoint_check();720}721}722723assert(osthread->pthread_id() != 0, "pthread_id was not set as expected");724725// call one more level start routine726thread->call_run();727728// Note: at this point the thread object may already have deleted itself.729// Prevent dereferencing it from here on out.730thread = NULL;731732log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",733os::current_thread_id(), (uintx) pthread_self());734735return 0;736}737738// On Linux, glibc places static TLS blocks (for __thread variables) on739// the thread stack. This decreases the stack size actually available740// to threads.741//742// For large static TLS sizes, this may cause threads to malfunction due743// to insufficient stack space. This is a well-known issue in glibc:744// http://sourceware.org/bugzilla/show_bug.cgi?id=11787.745//746// As a workaround, we call a private but assumed-stable glibc function,747// __pthread_get_minstack() to obtain the minstack size and derive the748// static TLS size from it. We then increase the user requested stack749// size by this TLS size.750//751// Due to compatibility concerns, this size adjustment is opt-in and752// controlled via AdjustStackSizeForTLS.753typedef size_t (*GetMinStack)(const pthread_attr_t *attr);754755GetMinStack _get_minstack_func = NULL;756757static void get_minstack_init() {758_get_minstack_func =759(GetMinStack)dlsym(RTLD_DEFAULT, "__pthread_get_minstack");760log_info(os, thread)("Lookup of __pthread_get_minstack %s",761_get_minstack_func == NULL ? "failed" : "succeeded");762}763764// Returns the size of the static TLS area glibc puts on thread stacks.765// The value is cached on first use, which occurs when the first thread766// is created during VM initialization.767static size_t get_static_tls_area_size(const pthread_attr_t *attr) {768size_t tls_size = 0;769if (_get_minstack_func != NULL) {770// Obtain the pthread minstack size by calling __pthread_get_minstack.771size_t minstack_size = _get_minstack_func(attr);772773// Remove non-TLS area size included in minstack size returned774// by __pthread_get_minstack() to get the static TLS size.775// In glibc before 2.27, minstack size includes guard_size.776// In glibc 2.27 and later, guard_size is automatically added777// to the stack size by pthread_create and is no longer included778// in minstack size. In both cases, the guard_size is taken into779// account, so there is no need to adjust the result for that.780//781// Although __pthread_get_minstack() is a private glibc function,782// it is expected to have a stable behavior across future glibc783// versions while glibc still allocates the static TLS blocks off784// the stack. Following is glibc 2.28 __pthread_get_minstack():785//786// size_t787// __pthread_get_minstack (const pthread_attr_t *attr)788// {789// return GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN;790// }791//792//793// The following 'minstack_size > os::vm_page_size() + PTHREAD_STACK_MIN'794// if check is done for precaution.795if (minstack_size > (size_t)os::vm_page_size() + PTHREAD_STACK_MIN) {796tls_size = minstack_size - os::vm_page_size() - PTHREAD_STACK_MIN;797}798}799800log_info(os, thread)("Stack size adjustment for TLS is " SIZE_FORMAT,801tls_size);802return tls_size;803}804805bool os::create_thread(Thread* thread, ThreadType thr_type,806size_t req_stack_size) {807assert(thread->osthread() == NULL, "caller responsible");808809// Allocate the OSThread object810OSThread* osthread = new OSThread(NULL, NULL);811if (osthread == NULL) {812return false;813}814815// set the correct thread state816osthread->set_thread_type(thr_type);817818// Initial state is ALLOCATED but not INITIALIZED819osthread->set_state(ALLOCATED);820821thread->set_osthread(osthread);822823// init thread attributes824pthread_attr_t attr;825pthread_attr_init(&attr);826pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);827828// Calculate stack size if it's not specified by caller.829size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);830// In glibc versions prior to 2.7 the guard size mechanism831// is not implemented properly. The posix standard requires adding832// the size of the guard pages to the stack size, instead Linux833// takes the space out of 'stacksize'. Thus we adapt the requested834// stack_size by the size of the guard pages to mimick proper835// behaviour. However, be careful not to end up with a size836// of zero due to overflow. Don't add the guard page in that case.837size_t guard_size = os::Linux::default_guard_size(thr_type);838// Configure glibc guard page. Must happen before calling839// get_static_tls_area_size(), which uses the guard_size.840pthread_attr_setguardsize(&attr, guard_size);841842size_t stack_adjust_size = 0;843if (AdjustStackSizeForTLS) {844// Adjust the stack_size for on-stack TLS - see get_static_tls_area_size().845stack_adjust_size += get_static_tls_area_size(&attr);846} else {847stack_adjust_size += guard_size;848}849850stack_adjust_size = align_up(stack_adjust_size, os::vm_page_size());851if (stack_size <= SIZE_MAX - stack_adjust_size) {852stack_size += stack_adjust_size;853}854assert(is_aligned(stack_size, os::vm_page_size()), "stack_size not aligned");855856int status = pthread_attr_setstacksize(&attr, stack_size);857if (status != 0) {858// pthread_attr_setstacksize() function can fail859// if the stack size exceeds a system-imposed limit.860assert_status(status == EINVAL, status, "pthread_attr_setstacksize");861log_warning(os, thread)("The %sthread stack size specified is invalid: " SIZE_FORMAT "k",862(thr_type == compiler_thread) ? "compiler " : ((thr_type == java_thread) ? "" : "VM "),863stack_size / K);864thread->set_osthread(NULL);865delete osthread;866return false;867}868869ThreadState state;870871{872pthread_t tid;873int ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);874875char buf[64];876if (ret == 0) {877log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",878(uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));879} else {880log_warning(os, thread)("Failed to start thread - pthread_create failed (%s) for attributes: %s.",881os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));882// Log some OS information which might explain why creating the thread failed.883log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());884LogStream st(Log(os, thread)::info());885os::Posix::print_rlimit_info(&st);886os::print_memory_info(&st);887os::Linux::print_proc_sys_info(&st);888os::Linux::print_container_info(&st);889}890891pthread_attr_destroy(&attr);892893if (ret != 0) {894// Need to clean up stuff we've allocated so far895thread->set_osthread(NULL);896delete osthread;897return false;898}899900// Store pthread info into the OSThread901osthread->set_pthread_id(tid);902903// Wait until child thread is either initialized or aborted904{905Monitor* sync_with_child = osthread->startThread_lock();906MutexLocker ml(sync_with_child, Mutex::_no_safepoint_check_flag);907while ((state = osthread->get_state()) == ALLOCATED) {908sync_with_child->wait_without_safepoint_check();909}910}911}912913// The thread is returned suspended (in state INITIALIZED),914// and is started higher up in the call chain915assert(state == INITIALIZED, "race condition");916return true;917}918919/////////////////////////////////////////////////////////////////////////////920// attach existing thread921922// bootstrap the main thread923bool os::create_main_thread(JavaThread* thread) {924assert(os::Linux::_main_thread == pthread_self(), "should be called inside main thread");925return create_attached_thread(thread);926}927928bool os::create_attached_thread(JavaThread* thread) {929#ifdef ASSERT930thread->verify_not_published();931#endif932933// Allocate the OSThread object934OSThread* osthread = new OSThread(NULL, NULL);935936if (osthread == NULL) {937return false;938}939940// Store pthread info into the OSThread941osthread->set_thread_id(os::Linux::gettid());942osthread->set_pthread_id(::pthread_self());943944// initialize floating point control register945os::Linux::init_thread_fpu_state();946947// Initial thread state is RUNNABLE948osthread->set_state(RUNNABLE);949950thread->set_osthread(osthread);951952if (UseNUMA) {953int lgrp_id = os::numa_get_group_id();954if (lgrp_id != -1) {955thread->set_lgrp_id(lgrp_id);956}957}958959if (os::is_primordial_thread()) {960// If current thread is primordial thread, its stack is mapped on demand,961// see notes about MAP_GROWSDOWN. Here we try to force kernel to map962// the entire stack region to avoid SEGV in stack banging.963// It is also useful to get around the heap-stack-gap problem on SuSE964// kernel (see 4821821 for details). We first expand stack to the top965// of yellow zone, then enable stack yellow zone (order is significant,966// enabling yellow zone first will crash JVM on SuSE Linux), so there967// is no gap between the last two virtual memory regions.968969StackOverflow* overflow_state = thread->stack_overflow_state();970address addr = overflow_state->stack_reserved_zone_base();971assert(addr != NULL, "initialization problem?");972assert(overflow_state->stack_available(addr) > 0, "stack guard should not be enabled");973974osthread->set_expanding_stack();975os::Linux::manually_expand_stack(thread, addr);976osthread->clear_expanding_stack();977}978979// initialize signal mask for this thread980// and save the caller's signal mask981PosixSignals::hotspot_sigmask(thread);982983log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",984os::current_thread_id(), (uintx) pthread_self());985986return true;987}988989void os::pd_start_thread(Thread* thread) {990OSThread * osthread = thread->osthread();991assert(osthread->get_state() != INITIALIZED, "just checking");992Monitor* sync_with_child = osthread->startThread_lock();993MutexLocker ml(sync_with_child, Mutex::_no_safepoint_check_flag);994sync_with_child->notify();995}996997// Free Linux resources related to the OSThread998void os::free_thread(OSThread* osthread) {999assert(osthread != NULL, "osthread not set");10001001// We are told to free resources of the argument thread,1002// but we can only really operate on the current thread.1003assert(Thread::current()->osthread() == osthread,1004"os::free_thread but not current thread");10051006#ifdef ASSERT1007sigset_t current;1008sigemptyset(¤t);1009pthread_sigmask(SIG_SETMASK, NULL, ¤t);1010assert(!sigismember(¤t, PosixSignals::SR_signum), "SR signal should not be blocked!");1011#endif10121013// Restore caller's signal mask1014sigset_t sigmask = osthread->caller_sigmask();1015pthread_sigmask(SIG_SETMASK, &sigmask, NULL);10161017delete osthread;1018}10191020//////////////////////////////////////////////////////////////////////////////1021// primordial thread10221023// Check if current thread is the primordial thread, similar to Solaris thr_main.1024bool os::is_primordial_thread(void) {1025if (suppress_primordial_thread_resolution) {1026return false;1027}1028char dummy;1029// If called before init complete, thread stack bottom will be null.1030// Can be called if fatal error occurs before initialization.1031if (os::Linux::initial_thread_stack_bottom() == NULL) return false;1032assert(os::Linux::initial_thread_stack_bottom() != NULL &&1033os::Linux::initial_thread_stack_size() != 0,1034"os::init did not locate primordial thread's stack region");1035if ((address)&dummy >= os::Linux::initial_thread_stack_bottom() &&1036(address)&dummy < os::Linux::initial_thread_stack_bottom() +1037os::Linux::initial_thread_stack_size()) {1038return true;1039} else {1040return false;1041}1042}10431044// Find the virtual memory area that contains addr1045static bool find_vma(address addr, address* vma_low, address* vma_high) {1046FILE *fp = fopen("/proc/self/maps", "r");1047if (fp) {1048address low, high;1049while (!feof(fp)) {1050if (fscanf(fp, "%p-%p", &low, &high) == 2) {1051if (low <= addr && addr < high) {1052if (vma_low) *vma_low = low;1053if (vma_high) *vma_high = high;1054fclose(fp);1055return true;1056}1057}1058for (;;) {1059int ch = fgetc(fp);1060if (ch == EOF || ch == (int)'\n') break;1061}1062}1063fclose(fp);1064}1065return false;1066}10671068// Locate primordial thread stack. This special handling of primordial thread stack1069// is needed because pthread_getattr_np() on most (all?) Linux distros returns1070// bogus value for the primordial process thread. While the launcher has created1071// the VM in a new thread since JDK 6, we still have to allow for the use of the1072// JNI invocation API from a primordial thread.1073void os::Linux::capture_initial_stack(size_t max_size) {10741075// max_size is either 0 (which means accept OS default for thread stacks) or1076// a user-specified value known to be at least the minimum needed. If we1077// are actually on the primordial thread we can make it appear that we have a1078// smaller max_size stack by inserting the guard pages at that location. But we1079// cannot do anything to emulate a larger stack than what has been provided by1080// the OS or threading library. In fact if we try to use a stack greater than1081// what is set by rlimit then we will crash the hosting process.10821083// Maximum stack size is the easy part, get it from RLIMIT_STACK.1084// If this is "unlimited" then it will be a huge value.1085struct rlimit rlim;1086getrlimit(RLIMIT_STACK, &rlim);1087size_t stack_size = rlim.rlim_cur;10881089// 6308388: a bug in ld.so will relocate its own .data section to the1090// lower end of primordial stack; reduce ulimit -s value a little bit1091// so we won't install guard page on ld.so's data section.1092// But ensure we don't underflow the stack size - allow 1 page spare1093if (stack_size >= (size_t)(3 * page_size())) {1094stack_size -= 2 * page_size();1095}10961097// Try to figure out where the stack base (top) is. This is harder.1098//1099// When an application is started, glibc saves the initial stack pointer in1100// a global variable "__libc_stack_end", which is then used by system1101// libraries. __libc_stack_end should be pretty close to stack top. The1102// variable is available since the very early days. However, because it is1103// a private interface, it could disappear in the future.1104//1105// Linux kernel saves start_stack information in /proc/<pid>/stat. Similar1106// to __libc_stack_end, it is very close to stack top, but isn't the real1107// stack top. Note that /proc may not exist if VM is running as a chroot1108// program, so reading /proc/<pid>/stat could fail. Also the contents of1109// /proc/<pid>/stat could change in the future (though unlikely).1110//1111// We try __libc_stack_end first. If that doesn't work, look for1112// /proc/<pid>/stat. If neither of them works, we use current stack pointer1113// as a hint, which should work well in most cases.11141115uintptr_t stack_start;11161117// try __libc_stack_end first1118uintptr_t *p = (uintptr_t *)dlsym(RTLD_DEFAULT, "__libc_stack_end");1119if (p && *p) {1120stack_start = *p;1121} else {1122// see if we can get the start_stack field from /proc/self/stat1123FILE *fp;1124int pid;1125char state;1126int ppid;1127int pgrp;1128int session;1129int nr;1130int tpgrp;1131unsigned long flags;1132unsigned long minflt;1133unsigned long cminflt;1134unsigned long majflt;1135unsigned long cmajflt;1136unsigned long utime;1137unsigned long stime;1138long cutime;1139long cstime;1140long prio;1141long nice;1142long junk;1143long it_real;1144uintptr_t start;1145uintptr_t vsize;1146intptr_t rss;1147uintptr_t rsslim;1148uintptr_t scodes;1149uintptr_t ecode;1150int i;11511152// Figure what the primordial thread stack base is. Code is inspired1153// by email from Hans Boehm. /proc/self/stat begins with current pid,1154// followed by command name surrounded by parentheses, state, etc.1155char stat[2048];1156int statlen;11571158fp = fopen("/proc/self/stat", "r");1159if (fp) {1160statlen = fread(stat, 1, 2047, fp);1161stat[statlen] = '\0';1162fclose(fp);11631164// Skip pid and the command string. Note that we could be dealing with1165// weird command names, e.g. user could decide to rename java launcher1166// to "java 1.4.2 :)", then the stat file would look like1167// 1234 (java 1.4.2 :)) R ... ...1168// We don't really need to know the command string, just find the last1169// occurrence of ")" and then start parsing from there. See bug 4726580.1170char * s = strrchr(stat, ')');11711172i = 0;1173if (s) {1174// Skip blank chars1175do { s++; } while (s && isspace(*s));11761177#define _UFM UINTX_FORMAT1178#define _DFM INTX_FORMAT11791180// 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 21181// 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 81182i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld " _UFM _UFM _DFM _UFM _UFM _UFM _UFM,1183&state, // 3 %c1184&ppid, // 4 %d1185&pgrp, // 5 %d1186&session, // 6 %d1187&nr, // 7 %d1188&tpgrp, // 8 %d1189&flags, // 9 %lu1190&minflt, // 10 %lu1191&cminflt, // 11 %lu1192&majflt, // 12 %lu1193&cmajflt, // 13 %lu1194&utime, // 14 %lu1195&stime, // 15 %lu1196&cutime, // 16 %ld1197&cstime, // 17 %ld1198&prio, // 18 %ld1199&nice, // 19 %ld1200&junk, // 20 %ld1201&it_real, // 21 %ld1202&start, // 22 UINTX_FORMAT1203&vsize, // 23 UINTX_FORMAT1204&rss, // 24 INTX_FORMAT1205&rsslim, // 25 UINTX_FORMAT1206&scodes, // 26 UINTX_FORMAT1207&ecode, // 27 UINTX_FORMAT1208&stack_start); // 28 UINTX_FORMAT1209}12101211#undef _UFM1212#undef _DFM12131214if (i != 28 - 2) {1215assert(false, "Bad conversion from /proc/self/stat");1216// product mode - assume we are the primordial thread, good luck in the1217// embedded case.1218warning("Can't detect primordial thread stack location - bad conversion");1219stack_start = (uintptr_t) &rlim;1220}1221} else {1222// For some reason we can't open /proc/self/stat (for example, running on1223// FreeBSD with a Linux emulator, or inside chroot), this should work for1224// most cases, so don't abort:1225warning("Can't detect primordial thread stack location - no /proc/self/stat");1226stack_start = (uintptr_t) &rlim;1227}1228}12291230// Now we have a pointer (stack_start) very close to the stack top, the1231// next thing to do is to figure out the exact location of stack top. We1232// can find out the virtual memory area that contains stack_start by1233// reading /proc/self/maps, it should be the last vma in /proc/self/maps,1234// and its upper limit is the real stack top. (again, this would fail if1235// running inside chroot, because /proc may not exist.)12361237uintptr_t stack_top;1238address low, high;1239if (find_vma((address)stack_start, &low, &high)) {1240// success, "high" is the true stack top. (ignore "low", because initial1241// thread stack grows on demand, its real bottom is high - RLIMIT_STACK.)1242stack_top = (uintptr_t)high;1243} else {1244// failed, likely because /proc/self/maps does not exist1245warning("Can't detect primordial thread stack location - find_vma failed");1246// best effort: stack_start is normally within a few pages below the real1247// stack top, use it as stack top, and reduce stack size so we won't put1248// guard page outside stack.1249stack_top = stack_start;1250stack_size -= 16 * page_size();1251}12521253// stack_top could be partially down the page so align it1254stack_top = align_up(stack_top, page_size());12551256// Allowed stack value is minimum of max_size and what we derived from rlimit1257if (max_size > 0) {1258_initial_thread_stack_size = MIN2(max_size, stack_size);1259} else {1260// Accept the rlimit max, but if stack is unlimited then it will be huge, so1261// clamp it at 8MB as we do on Solaris1262_initial_thread_stack_size = MIN2(stack_size, 8*M);1263}1264_initial_thread_stack_size = align_down(_initial_thread_stack_size, page_size());1265_initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size;12661267assert(_initial_thread_stack_bottom < (address)stack_top, "overflow!");12681269if (log_is_enabled(Info, os, thread)) {1270// See if we seem to be on primordial process thread1271bool primordial = uintptr_t(&rlim) > uintptr_t(_initial_thread_stack_bottom) &&1272uintptr_t(&rlim) < stack_top;12731274log_info(os, thread)("Capturing initial stack in %s thread: req. size: " SIZE_FORMAT "K, actual size: "1275SIZE_FORMAT "K, top=" INTPTR_FORMAT ", bottom=" INTPTR_FORMAT,1276primordial ? "primordial" : "user", max_size / K, _initial_thread_stack_size / K,1277stack_top, intptr_t(_initial_thread_stack_bottom));1278}1279}12801281////////////////////////////////////////////////////////////////////////////////1282// time support12831284// Time since start-up in seconds to a fine granularity.1285double os::elapsedTime() {1286return ((double)os::elapsed_counter()) / os::elapsed_frequency(); // nanosecond resolution1287}12881289jlong os::elapsed_counter() {1290return javaTimeNanos() - initial_time_count;1291}12921293jlong os::elapsed_frequency() {1294return NANOSECS_PER_SEC; // nanosecond resolution1295}12961297bool os::supports_vtime() { return true; }12981299double os::elapsedVTime() {1300struct rusage usage;1301int retval = getrusage(RUSAGE_THREAD, &usage);1302if (retval == 0) {1303return (double) (usage.ru_utime.tv_sec + usage.ru_stime.tv_sec) + (double) (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000 * 1000);1304} else {1305// better than nothing, but not much1306return elapsedTime();1307}1308}13091310void os::Linux::fast_thread_clock_init() {1311if (!UseLinuxPosixThreadCPUClocks) {1312return;1313}1314clockid_t clockid;1315struct timespec tp;1316int (*pthread_getcpuclockid_func)(pthread_t, clockid_t *) =1317(int(*)(pthread_t, clockid_t *)) dlsym(RTLD_DEFAULT, "pthread_getcpuclockid");13181319// Switch to using fast clocks for thread cpu time if1320// the clock_getres() returns 0 error code.1321// Note, that some kernels may support the current thread1322// clock (CLOCK_THREAD_CPUTIME_ID) but not the clocks1323// returned by the pthread_getcpuclockid().1324// If the fast Posix clocks are supported then the clock_getres()1325// must return at least tp.tv_sec == 0 which means a resolution1326// better than 1 sec. This is extra check for reliability.13271328if (pthread_getcpuclockid_func &&1329pthread_getcpuclockid_func(_main_thread, &clockid) == 0 &&1330clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) {1331_supports_fast_thread_cpu_time = true;1332_pthread_getcpuclockid = pthread_getcpuclockid_func;1333}1334}13351336// Return the real, user, and system times in seconds from an1337// arbitrary fixed point in the past.1338bool os::getTimesSecs(double* process_real_time,1339double* process_user_time,1340double* process_system_time) {1341struct tms ticks;1342clock_t real_ticks = times(&ticks);13431344if (real_ticks == (clock_t) (-1)) {1345return false;1346} else {1347double ticks_per_second = (double) clock_tics_per_sec;1348*process_user_time = ((double) ticks.tms_utime) / ticks_per_second;1349*process_system_time = ((double) ticks.tms_stime) / ticks_per_second;1350*process_real_time = ((double) real_ticks) / ticks_per_second;13511352return true;1353}1354}135513561357char * os::local_time_string(char *buf, size_t buflen) {1358struct tm t;1359time_t long_time;1360time(&long_time);1361localtime_r(&long_time, &t);1362jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",1363t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,1364t.tm_hour, t.tm_min, t.tm_sec);1365return buf;1366}13671368struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {1369return localtime_r(clock, res);1370}13711372// thread_id is kernel thread id (similar to Solaris LWP id)1373intx os::current_thread_id() { return os::Linux::gettid(); }1374int os::current_process_id() {1375return ::getpid();1376}13771378// DLL functions13791380const char* os::dll_file_extension() { return ".so"; }13811382// This must be hard coded because it's the system's temporary1383// directory not the java application's temp directory, ala java.io.tmpdir.1384const char* os::get_temp_directory() {1385#ifndef __ANDROID__1386return "/tmp";1387#else1388return "/data/tmp";1389#endif1390}13911392static bool file_exists(const char* filename) {1393struct stat statbuf;1394if (filename == NULL || strlen(filename) == 0) {1395return false;1396}1397return os::stat(filename, &statbuf) == 0;1398}13991400// check if addr is inside libjvm.so1401bool os::address_is_in_vm(address addr) {1402static address libjvm_base_addr;1403Dl_info dlinfo;14041405if (libjvm_base_addr == NULL) {1406if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {1407libjvm_base_addr = (address)dlinfo.dli_fbase;1408}1409assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");1410}14111412if (dladdr((void *)addr, &dlinfo) != 0) {1413if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;1414}14151416return false;1417}14181419bool os::dll_address_to_function_name(address addr, char *buf,1420int buflen, int *offset,1421bool demangle) {1422// buf is not optional, but offset is optional1423assert(buf != NULL, "sanity check");14241425Dl_info dlinfo;14261427if (dladdr((void*)addr, &dlinfo) != 0) {1428// see if we have a matching symbol1429if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {1430if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {1431jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);1432}1433if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;1434return true;1435}1436// no matching symbol so try for just file info1437if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {1438if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),1439buf, buflen, offset, dlinfo.dli_fname, demangle)) {1440return true;1441}1442}1443}14441445buf[0] = '\0';1446if (offset != NULL) *offset = -1;1447return false;1448}14491450struct _address_to_library_name {1451address addr; // input : memory address1452size_t buflen; // size of fname1453char* fname; // output: library name1454address base; // library base addr1455};14561457static int address_to_library_name_callback(struct dl_phdr_info *info,1458size_t size, void *data) {1459int i;1460bool found = false;1461address libbase = NULL;1462struct _address_to_library_name * d = (struct _address_to_library_name *)data;14631464// iterate through all loadable segments1465for (i = 0; i < info->dlpi_phnum; i++) {1466address segbase = (address)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);1467if (info->dlpi_phdr[i].p_type == PT_LOAD) {1468// base address of a library is the lowest address of its loaded1469// segments.1470if (libbase == NULL || libbase > segbase) {1471libbase = segbase;1472}1473// see if 'addr' is within current segment1474if (segbase <= d->addr &&1475d->addr < segbase + info->dlpi_phdr[i].p_memsz) {1476found = true;1477}1478}1479}14801481// dlpi_name is NULL or empty if the ELF file is executable, return 01482// so dll_address_to_library_name() can fall through to use dladdr() which1483// can figure out executable name from argv[0].1484if (found && info->dlpi_name && info->dlpi_name[0]) {1485d->base = libbase;1486if (d->fname) {1487jio_snprintf(d->fname, d->buflen, "%s", info->dlpi_name);1488}1489return 1;1490}1491return 0;1492}14931494bool os::dll_address_to_library_name(address addr, char* buf,1495int buflen, int* offset) {1496// buf is not optional, but offset is optional1497assert(buf != NULL, "sanity check");14981499Dl_info dlinfo;1500struct _address_to_library_name data;15011502// There is a bug in old glibc dladdr() implementation that it could resolve1503// to wrong library name if the .so file has a base address != NULL. Here1504// we iterate through the program headers of all loaded libraries to find1505// out which library 'addr' really belongs to. This workaround can be1506// removed once the minimum requirement for glibc is moved to 2.3.x.1507data.addr = addr;1508data.fname = buf;1509data.buflen = buflen;1510data.base = NULL;1511int rslt = dl_iterate_phdr(address_to_library_name_callback, (void *)&data);15121513if (rslt) {1514// buf already contains library name1515if (offset) *offset = addr - data.base;1516return true;1517}1518if (dladdr((void*)addr, &dlinfo) != 0) {1519if (dlinfo.dli_fname != NULL) {1520jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);1521}1522if (dlinfo.dli_fbase != NULL && offset != NULL) {1523*offset = addr - (address)dlinfo.dli_fbase;1524}1525return true;1526}15271528buf[0] = '\0';1529if (offset) *offset = -1;1530return false;1531}15321533static bool read_so_path_from_maps(const char* so_name, char* buf, int buflen) {1534FILE *fp = fopen("/proc/self/maps", "r");1535assert(fp, "Failed to open /proc/self/maps");1536if (!fp) {1537return false;1538}15391540char maps_buffer[2048];1541while (fgets(maps_buffer, 2048, fp) != NULL) {1542if (strstr(maps_buffer, so_name) == NULL) {1543continue;1544}15451546char *so_path = strchr(maps_buffer, '/');1547so_path[strlen(so_path) - 1] = '\0'; // Cut trailing \n1548jio_snprintf(buf, buflen, "%s", so_path);1549fclose(fp);1550return true;1551}15521553fclose(fp);1554return false;1555}15561557// Loads .dll/.so and1558// in case of error it checks if .dll/.so was built for the1559// same architecture as Hotspot is running on156015611562// Remember the stack's state. The Linux dynamic linker will change1563// the stack to 'executable' at most once, so we must safepoint only once.1564bool os::Linux::_stack_is_executable = false;15651566// VM operation that loads a library. This is necessary if stack protection1567// of the Java stacks can be lost during loading the library. If we1568// do not stop the Java threads, they can stack overflow before the stacks1569// are protected again.1570class VM_LinuxDllLoad: public VM_Operation {1571private:1572const char *_filename;1573char *_ebuf;1574int _ebuflen;1575void *_lib;1576public:1577VM_LinuxDllLoad(const char *fn, char *ebuf, int ebuflen) :1578_filename(fn), _ebuf(ebuf), _ebuflen(ebuflen), _lib(NULL) {}1579VMOp_Type type() const { return VMOp_LinuxDllLoad; }1580void doit() {1581_lib = os::Linux::dll_load_in_vmthread(_filename, _ebuf, _ebuflen);1582os::Linux::_stack_is_executable = true;1583}1584void* loaded_library() { return _lib; }1585};15861587void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {1588void * result = NULL;1589bool load_attempted = false;15901591log_info(os)("attempting shared library load of %s", filename);15921593// Check whether the library to load might change execution rights1594// of the stack. If they are changed, the protection of the stack1595// guard pages will be lost. We need a safepoint to fix this.1596//1597// See Linux man page execstack(8) for more info.1598if (os::uses_stack_guard_pages() && !os::Linux::_stack_is_executable) {1599if (!ElfFile::specifies_noexecstack(filename)) {1600if (!is_init_completed()) {1601os::Linux::_stack_is_executable = true;1602// This is OK - No Java threads have been created yet, and hence no1603// stack guard pages to fix.1604//1605// Dynamic loader will make all stacks executable after1606// this function returns, and will not do that again.1607assert(Threads::number_of_threads() == 0, "no Java threads should exist yet.");1608} else {1609warning("You have loaded library %s which might have disabled stack guard. "1610"The VM will try to fix the stack guard now.\n"1611"It's highly recommended that you fix the library with "1612"'execstack -c <libfile>', or link it with '-z noexecstack'.",1613filename);16141615JavaThread *jt = JavaThread::current();1616if (jt->thread_state() != _thread_in_native) {1617// This happens when a compiler thread tries to load a hsdis-<arch>.so file1618// that requires ExecStack. Cannot enter safe point. Let's give up.1619warning("Unable to fix stack guard. Giving up.");1620} else {1621if (!LoadExecStackDllInVMThread) {1622// This is for the case where the DLL has an static1623// constructor function that executes JNI code. We cannot1624// load such DLLs in the VMThread.1625result = os::Linux::dlopen_helper(filename, ebuf, ebuflen);1626}16271628ThreadInVMfromNative tiv(jt);1629debug_only(VMNativeEntryWrapper vew;)16301631VM_LinuxDllLoad op(filename, ebuf, ebuflen);1632VMThread::execute(&op);1633if (LoadExecStackDllInVMThread) {1634result = op.loaded_library();1635}1636load_attempted = true;1637}1638}1639}1640}16411642if (!load_attempted) {1643result = os::Linux::dlopen_helper(filename, ebuf, ebuflen);1644}16451646if (result != NULL) {1647// Successful loading1648return result;1649}16501651Elf32_Ehdr elf_head;1652int diag_msg_max_length=ebuflen-strlen(ebuf);1653char* diag_msg_buf=ebuf+strlen(ebuf);16541655if (diag_msg_max_length==0) {1656// No more space in ebuf for additional diagnostics message1657return NULL;1658}165916601661int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);16621663if (file_descriptor < 0) {1664// Can't open library, report dlerror() message1665return NULL;1666}16671668bool failed_to_read_elf_head=1669(sizeof(elf_head)!=1670(::read(file_descriptor, &elf_head,sizeof(elf_head))));16711672::close(file_descriptor);1673if (failed_to_read_elf_head) {1674// file i/o error - report dlerror() msg1675return NULL;1676}16771678if (elf_head.e_ident[EI_DATA] != LITTLE_ENDIAN_ONLY(ELFDATA2LSB) BIG_ENDIAN_ONLY(ELFDATA2MSB)) {1679// handle invalid/out of range endianness values1680if (elf_head.e_ident[EI_DATA] == 0 || elf_head.e_ident[EI_DATA] > 2) {1681return NULL;1682}16831684#if defined(VM_LITTLE_ENDIAN)1685// VM is LE, shared object BE1686elf_head.e_machine = be16toh(elf_head.e_machine);1687#else1688// VM is BE, shared object LE1689elf_head.e_machine = le16toh(elf_head.e_machine);1690#endif1691}16921693typedef struct {1694Elf32_Half code; // Actual value as defined in elf.h1695Elf32_Half compat_class; // Compatibility of archs at VM's sense1696unsigned char elf_class; // 32 or 64 bit1697unsigned char endianness; // MSB or LSB1698char* name; // String representation1699} arch_t;17001701#ifndef EM_AARCH641702#define EM_AARCH64 183 /* ARM AARCH64 */1703#endif1704#ifndef EM_RISCV1705#define EM_RISCV 243 /* RISC-V */1706#endif17071708static const arch_t arch_array[]={1709{EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},1710{EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},1711{EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},1712{EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},1713{EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},1714{EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},1715{EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},1716{EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},1717#if defined(VM_LITTLE_ENDIAN)1718{EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64 LE"},1719{EM_SH, EM_SH, ELFCLASS32, ELFDATA2LSB, (char*)"SuperH"},1720#else1721{EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},1722{EM_SH, EM_SH, ELFCLASS32, ELFDATA2MSB, (char*)"SuperH BE"},1723#endif1724{EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM"},1725// we only support 64 bit z architecture1726{EM_S390, EM_S390, ELFCLASS64, ELFDATA2MSB, (char*)"IBM System/390"},1727{EM_ALPHA, EM_ALPHA, ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"},1728{EM_MIPS_RS3_LE, EM_MIPS_RS3_LE, ELFCLASS32, ELFDATA2LSB, (char*)"MIPSel"},1729{EM_MIPS, EM_MIPS, ELFCLASS32, ELFDATA2MSB, (char*)"MIPS"},1730{EM_PARISC, EM_PARISC, ELFCLASS32, ELFDATA2MSB, (char*)"PARISC"},1731{EM_68K, EM_68K, ELFCLASS32, ELFDATA2MSB, (char*)"M68k"},1732{EM_AARCH64, EM_AARCH64, ELFCLASS64, ELFDATA2LSB, (char*)"AARCH64"},1733{EM_RISCV, EM_RISCV, ELFCLASS64, ELFDATA2LSB, (char*)"RISC-V"},1734};17351736#if (defined IA32)1737static Elf32_Half running_arch_code=EM_386;1738#elif (defined AMD64) || (defined X32)1739static Elf32_Half running_arch_code=EM_X86_64;1740#elif (defined IA64)1741static Elf32_Half running_arch_code=EM_IA_64;1742#elif (defined __sparc) && (defined _LP64)1743static Elf32_Half running_arch_code=EM_SPARCV9;1744#elif (defined __sparc) && (!defined _LP64)1745static Elf32_Half running_arch_code=EM_SPARC;1746#elif (defined __powerpc64__)1747static Elf32_Half running_arch_code=EM_PPC64;1748#elif (defined __powerpc__)1749static Elf32_Half running_arch_code=EM_PPC;1750#elif (defined AARCH64)1751static Elf32_Half running_arch_code=EM_AARCH64;1752#elif (defined ARM)1753static Elf32_Half running_arch_code=EM_ARM;1754#elif (defined S390)1755static Elf32_Half running_arch_code=EM_S390;1756#elif (defined ALPHA)1757static Elf32_Half running_arch_code=EM_ALPHA;1758#elif (defined MIPSEL)1759static Elf32_Half running_arch_code=EM_MIPS_RS3_LE;1760#elif (defined PARISC)1761static Elf32_Half running_arch_code=EM_PARISC;1762#elif (defined MIPS)1763static Elf32_Half running_arch_code=EM_MIPS;1764#elif (defined M68K)1765static Elf32_Half running_arch_code=EM_68K;1766#elif (defined SH)1767static Elf32_Half running_arch_code=EM_SH;1768#elif (defined RISCV)1769static Elf32_Half running_arch_code=EM_RISCV;1770#else1771#error Method os::dll_load requires that one of following is defined:\1772AARCH64, ALPHA, ARM, AMD64, IA32, IA64, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, RISCV, S390, SH, __sparc1773#endif17741775// Identify compatibility class for VM's architecture and library's architecture1776// Obtain string descriptions for architectures17771778arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};1779int running_arch_index=-1;17801781for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) {1782if (running_arch_code == arch_array[i].code) {1783running_arch_index = i;1784}1785if (lib_arch.code == arch_array[i].code) {1786lib_arch.compat_class = arch_array[i].compat_class;1787lib_arch.name = arch_array[i].name;1788}1789}17901791assert(running_arch_index != -1,1792"Didn't find running architecture code (running_arch_code) in arch_array");1793if (running_arch_index == -1) {1794// Even though running architecture detection failed1795// we may still continue with reporting dlerror() message1796return NULL;1797}17981799if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {1800if (lib_arch.name != NULL) {1801::snprintf(diag_msg_buf, diag_msg_max_length-1,1802" (Possible cause: can't load %s .so on a %s platform)",1803lib_arch.name, arch_array[running_arch_index].name);1804} else {1805::snprintf(diag_msg_buf, diag_msg_max_length-1,1806" (Possible cause: can't load this .so (machine code=0x%x) on a %s platform)",1807lib_arch.code, arch_array[running_arch_index].name);1808}1809return NULL;1810}18111812if (lib_arch.endianness != arch_array[running_arch_index].endianness) {1813::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: endianness mismatch)");1814return NULL;1815}18161817// ELF file class/capacity : 0 - invalid, 1 - 32bit, 2 - 64bit1818if (lib_arch.elf_class > 2 || lib_arch.elf_class < 1) {1819::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: invalid ELF file class)");1820return NULL;1821}18221823if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {1824::snprintf(diag_msg_buf, diag_msg_max_length-1,1825" (Possible cause: architecture word width mismatch, can't load %d-bit .so on a %d-bit platform)",1826(int) lib_arch.elf_class * 32, arch_array[running_arch_index].elf_class * 32);1827return NULL;1828}18291830return NULL;1831}18321833void * os::Linux::dlopen_helper(const char *filename, char *ebuf,1834int ebuflen) {1835void * result = ::dlopen(filename, RTLD_LAZY);1836if (result == NULL) {1837const char* error_report = ::dlerror();1838if (error_report == NULL) {1839error_report = "dlerror returned no error description";1840}1841if (ebuf != NULL && ebuflen > 0) {1842::strncpy(ebuf, error_report, ebuflen-1);1843ebuf[ebuflen-1]='\0';1844}1845Events::log(NULL, "Loading shared library %s failed, %s", filename, error_report);1846log_info(os)("shared library load of %s failed, %s", filename, error_report);1847} else {1848Events::log(NULL, "Loaded shared library %s", filename);1849log_info(os)("shared library load of %s was successful", filename);1850}1851return result;1852}18531854void * os::Linux::dll_load_in_vmthread(const char *filename, char *ebuf,1855int ebuflen) {1856void * result = NULL;1857if (LoadExecStackDllInVMThread) {1858result = dlopen_helper(filename, ebuf, ebuflen);1859}18601861// Since 7019808, libjvm.so is linked with -noexecstack. If the VM loads a1862// library that requires an executable stack, or which does not have this1863// stack attribute set, dlopen changes the stack attribute to executable. The1864// read protection of the guard pages gets lost.1865//1866// Need to check _stack_is_executable again as multiple VM_LinuxDllLoad1867// may have been queued at the same time.18681869if (!_stack_is_executable) {1870for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {1871StackOverflow* overflow_state = jt->stack_overflow_state();1872if (!overflow_state->stack_guard_zone_unused() && // Stack not yet fully initialized1873overflow_state->stack_guards_enabled()) { // No pending stack overflow exceptions1874if (!os::guard_memory((char *)jt->stack_end(), StackOverflow::stack_guard_zone_size())) {1875warning("Attempt to reguard stack yellow zone failed.");1876}1877}1878}1879}18801881return result;1882}18831884void* os::dll_lookup(void* handle, const char* name) {1885void* res = dlsym(handle, name);1886return res;1887}18881889void* os::get_default_process_handle() {1890return (void*)::dlopen(NULL, RTLD_LAZY);1891}18921893static bool _print_ascii_file(const char* filename, outputStream* st, const char* hdr = NULL) {1894int fd = ::open(filename, O_RDONLY);1895if (fd == -1) {1896return false;1897}18981899if (hdr != NULL) {1900st->print_cr("%s", hdr);1901}19021903char buf[33];1904int bytes;1905buf[32] = '\0';1906while ((bytes = ::read(fd, buf, sizeof(buf)-1)) > 0) {1907st->print_raw(buf, bytes);1908}19091910::close(fd);19111912return true;1913}19141915static void _print_ascii_file_h(const char* header, const char* filename, outputStream* st, bool same_line = true) {1916st->print("%s:%c", header, same_line ? ' ' : '\n');1917if (!_print_ascii_file(filename, st)) {1918st->print_cr("<Not Available>");1919}1920}19211922void os::print_dll_info(outputStream *st) {1923st->print_cr("Dynamic libraries:");19241925char fname[32];1926pid_t pid = os::Linux::gettid();19271928jio_snprintf(fname, sizeof(fname), "/proc/%d/maps", pid);19291930if (!_print_ascii_file(fname, st)) {1931st->print_cr("Can not get library information for pid = %d", pid);1932}1933}19341935struct loaded_modules_info_param {1936os::LoadedModulesCallbackFunc callback;1937void *param;1938};19391940static int dl_iterate_callback(struct dl_phdr_info *info, size_t size, void *data) {1941if ((info->dlpi_name == NULL) || (*info->dlpi_name == '\0')) {1942return 0;1943}19441945struct loaded_modules_info_param *callback_param = reinterpret_cast<struct loaded_modules_info_param *>(data);1946address base = NULL;1947address top = NULL;1948for (int idx = 0; idx < info->dlpi_phnum; idx++) {1949const ElfW(Phdr) *phdr = info->dlpi_phdr + idx;1950if (phdr->p_type == PT_LOAD) {1951address raw_phdr_base = reinterpret_cast<address>(info->dlpi_addr + phdr->p_vaddr);19521953address phdr_base = align_down(raw_phdr_base, phdr->p_align);1954if ((base == NULL) || (base > phdr_base)) {1955base = phdr_base;1956}19571958address phdr_top = align_up(raw_phdr_base + phdr->p_memsz, phdr->p_align);1959if ((top == NULL) || (top < phdr_top)) {1960top = phdr_top;1961}1962}1963}19641965return callback_param->callback(info->dlpi_name, base, top, callback_param->param);1966}19671968int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {1969struct loaded_modules_info_param callback_param = {callback, param};1970return dl_iterate_phdr(&dl_iterate_callback, &callback_param);1971}19721973void os::print_os_info_brief(outputStream* st) {1974os::Linux::print_distro_info(st);19751976os::Posix::print_uname_info(st);19771978os::Linux::print_libversion_info(st);19791980}19811982void os::print_os_info(outputStream* st) {1983st->print_cr("OS:");19841985os::Linux::print_distro_info(st);19861987os::Posix::print_uname_info(st);19881989os::Linux::print_uptime_info(st);19901991// Print warning if unsafe chroot environment detected1992if (unsafe_chroot_detected) {1993st->print_cr("WARNING!! %s", unstable_chroot_error);1994}19951996os::Linux::print_libversion_info(st);19971998os::Posix::print_rlimit_info(st);19992000os::Posix::print_load_average(st);2001st->cr();20022003os::Linux::print_system_memory_info(st);2004st->cr();20052006os::Linux::print_process_memory_info(st);2007st->cr();20082009os::Linux::print_proc_sys_info(st);2010st->cr();20112012if (os::Linux::print_ld_preload_file(st)) {2013st->cr();2014}20152016if (os::Linux::print_container_info(st)) {2017st->cr();2018}20192020VM_Version::print_platform_virtualization_info(st);20212022os::Linux::print_steal_info(st);2023}20242025// Try to identify popular distros.2026// Most Linux distributions have a /etc/XXX-release file, which contains2027// the OS version string. Newer Linux distributions have a /etc/lsb-release2028// file that also contains the OS version string. Some have more than one2029// /etc/XXX-release file (e.g. Mandrake has both /etc/mandrake-release and2030// /etc/redhat-release.), so the order is important.2031// Any Linux that is based on Redhat (i.e. Oracle, Mandrake, Sun JDS...) have2032// their own specific XXX-release file as well as a redhat-release file.2033// Because of this the XXX-release file needs to be searched for before the2034// redhat-release file.2035// Since Red Hat and SuSE have an lsb-release file that is not very descriptive the2036// search for redhat-release / SuSE-release needs to be before lsb-release.2037// Since the lsb-release file is the new standard it needs to be searched2038// before the older style release files.2039// Searching system-release (Red Hat) and os-release (other Linuxes) are a2040// next to last resort. The os-release file is a new standard that contains2041// distribution information and the system-release file seems to be an old2042// standard that has been replaced by the lsb-release and os-release files.2043// Searching for the debian_version file is the last resort. It contains2044// an informative string like "6.0.6" or "wheezy/sid". Because of this2045// "Debian " is printed before the contents of the debian_version file.20462047const char* distro_files[] = {2048"/etc/oracle-release",2049"/etc/mandriva-release",2050"/etc/mandrake-release",2051"/etc/sun-release",2052"/etc/redhat-release",2053"/etc/SuSE-release",2054"/etc/lsb-release",2055"/etc/turbolinux-release",2056"/etc/gentoo-release",2057"/etc/ltib-release",2058"/etc/angstrom-version",2059"/etc/system-release",2060"/etc/os-release",2061NULL };20622063void os::Linux::print_distro_info(outputStream* st) {2064for (int i = 0;; i++) {2065const char* file = distro_files[i];2066if (file == NULL) {2067break; // done2068}2069// If file prints, we found it.2070if (_print_ascii_file(file, st)) {2071return;2072}2073}20742075if (file_exists("/etc/debian_version")) {2076st->print("Debian ");2077_print_ascii_file("/etc/debian_version", st);2078} else {2079st->print_cr("Linux");2080}2081}20822083static void parse_os_info_helper(FILE* fp, char* distro, size_t length, bool get_first_line) {2084char buf[256];2085while (fgets(buf, sizeof(buf), fp)) {2086// Edit out extra stuff in expected format2087if (strstr(buf, "DISTRIB_DESCRIPTION=") != NULL || strstr(buf, "PRETTY_NAME=") != NULL) {2088char* ptr = strstr(buf, "\""); // the name is in quotes2089if (ptr != NULL) {2090ptr++; // go beyond first quote2091char* nl = strchr(ptr, '\"');2092if (nl != NULL) *nl = '\0';2093strncpy(distro, ptr, length);2094} else {2095ptr = strstr(buf, "=");2096ptr++; // go beyond equals then2097char* nl = strchr(ptr, '\n');2098if (nl != NULL) *nl = '\0';2099strncpy(distro, ptr, length);2100}2101return;2102} else if (get_first_line) {2103char* nl = strchr(buf, '\n');2104if (nl != NULL) *nl = '\0';2105strncpy(distro, buf, length);2106return;2107}2108}2109// print last line and close2110char* nl = strchr(buf, '\n');2111if (nl != NULL) *nl = '\0';2112strncpy(distro, buf, length);2113}21142115static void parse_os_info(char* distro, size_t length, const char* file) {2116FILE* fp = fopen(file, "r");2117if (fp != NULL) {2118// if suse format, print out first line2119bool get_first_line = (strcmp(file, "/etc/SuSE-release") == 0);2120parse_os_info_helper(fp, distro, length, get_first_line);2121fclose(fp);2122}2123}21242125void os::get_summary_os_info(char* buf, size_t buflen) {2126for (int i = 0;; i++) {2127const char* file = distro_files[i];2128if (file == NULL) {2129break; // ran out of distro_files2130}2131if (file_exists(file)) {2132parse_os_info(buf, buflen, file);2133return;2134}2135}2136// special case for debian2137if (file_exists("/etc/debian_version")) {2138strncpy(buf, "Debian ", buflen);2139if (buflen > 7) {2140parse_os_info(&buf[7], buflen-7, "/etc/debian_version");2141}2142} else {2143strncpy(buf, "Linux", buflen);2144}2145}21462147void os::Linux::print_libversion_info(outputStream* st) {2148// libc, pthread2149st->print("libc: ");2150st->print("%s ", os::Linux::libc_version());2151st->print("%s ", os::Linux::libpthread_version());2152st->cr();2153}21542155void os::Linux::print_proc_sys_info(outputStream* st) {2156_print_ascii_file_h("/proc/sys/kernel/threads-max (system-wide limit on the number of threads)",2157"/proc/sys/kernel/threads-max", st);2158_print_ascii_file_h("/proc/sys/vm/max_map_count (maximum number of memory map areas a process may have)",2159"/proc/sys/vm/max_map_count", st);2160_print_ascii_file_h("/proc/sys/kernel/pid_max (system-wide limit on number of process identifiers)",2161"/proc/sys/kernel/pid_max", st);2162}21632164void os::Linux::print_system_memory_info(outputStream* st) {2165_print_ascii_file_h("/proc/meminfo", "/proc/meminfo", st, false);2166st->cr();21672168// some information regarding THPs; for details see2169// https://www.kernel.org/doc/Documentation/vm/transhuge.txt2170_print_ascii_file_h("/sys/kernel/mm/transparent_hugepage/enabled",2171"/sys/kernel/mm/transparent_hugepage/enabled", st);2172_print_ascii_file_h("/sys/kernel/mm/transparent_hugepage/defrag (defrag/compaction efforts parameter)",2173"/sys/kernel/mm/transparent_hugepage/defrag", st);2174}21752176void os::Linux::print_process_memory_info(outputStream* st) {21772178st->print_cr("Process Memory:");21792180// Print virtual and resident set size; peak values; swap; and for2181// rss its components if the kernel is recent enough.2182ssize_t vmsize = -1, vmpeak = -1, vmswap = -1,2183vmrss = -1, vmhwm = -1, rssanon = -1, rssfile = -1, rssshmem = -1;2184const int num_values = 8;2185int num_found = 0;2186FILE* f = ::fopen("/proc/self/status", "r");2187char buf[256];2188if (f != NULL) {2189while (::fgets(buf, sizeof(buf), f) != NULL && num_found < num_values) {2190if ( (vmsize == -1 && sscanf(buf, "VmSize: " SSIZE_FORMAT " kB", &vmsize) == 1) ||2191(vmpeak == -1 && sscanf(buf, "VmPeak: " SSIZE_FORMAT " kB", &vmpeak) == 1) ||2192(vmswap == -1 && sscanf(buf, "VmSwap: " SSIZE_FORMAT " kB", &vmswap) == 1) ||2193(vmhwm == -1 && sscanf(buf, "VmHWM: " SSIZE_FORMAT " kB", &vmhwm) == 1) ||2194(vmrss == -1 && sscanf(buf, "VmRSS: " SSIZE_FORMAT " kB", &vmrss) == 1) ||2195(rssanon == -1 && sscanf(buf, "RssAnon: " SSIZE_FORMAT " kB", &rssanon) == 1) ||2196(rssfile == -1 && sscanf(buf, "RssFile: " SSIZE_FORMAT " kB", &rssfile) == 1) ||2197(rssshmem == -1 && sscanf(buf, "RssShmem: " SSIZE_FORMAT " kB", &rssshmem) == 1)2198)2199{2200num_found ++;2201}2202}2203fclose(f);22042205st->print_cr("Virtual Size: " SSIZE_FORMAT "K (peak: " SSIZE_FORMAT "K)", vmsize, vmpeak);2206st->print("Resident Set Size: " SSIZE_FORMAT "K (peak: " SSIZE_FORMAT "K)", vmrss, vmhwm);2207if (rssanon != -1) { // requires kernel >= 4.52208st->print(" (anon: " SSIZE_FORMAT "K, file: " SSIZE_FORMAT "K, shmem: " SSIZE_FORMAT "K)",2209rssanon, rssfile, rssshmem);2210}2211st->cr();2212if (vmswap != -1) { // requires kernel >= 2.6.342213st->print_cr("Swapped out: " SSIZE_FORMAT "K", vmswap);2214}2215} else {2216st->print_cr("Could not open /proc/self/status to get process memory related information");2217}22182219// Print glibc outstanding allocations.2220// (note: there is no implementation of mallinfo for muslc)2221#ifdef __GLIBC__2222size_t total_allocated = 0;2223bool might_have_wrapped = false;2224if (_mallinfo2 != NULL) {2225struct glibc_mallinfo2 mi = _mallinfo2();2226total_allocated = mi.uordblks;2227} else if (_mallinfo != NULL) {2228// mallinfo is an old API. Member names mean next to nothing and, beyond that, are int.2229// So values may have wrapped around. Still useful enough to see how much glibc thinks2230// we allocated.2231struct glibc_mallinfo mi = _mallinfo();2232total_allocated = (size_t)(unsigned)mi.uordblks;2233// Since mallinfo members are int, glibc values may have wrapped. Warn about this.2234might_have_wrapped = (vmrss * K) > UINT_MAX && (vmrss * K) > (total_allocated + UINT_MAX);2235}2236if (_mallinfo2 != NULL || _mallinfo != NULL) {2237st->print_cr("C-Heap outstanding allocations: " SIZE_FORMAT "K%s",2238total_allocated / K,2239might_have_wrapped ? " (may have wrapped)" : "");2240}2241#endif // __GLIBC__22422243}22442245bool os::Linux::print_ld_preload_file(outputStream* st) {2246return _print_ascii_file("/etc/ld.so.preload", st, "/etc/ld.so.preload:");2247}22482249void os::Linux::print_uptime_info(outputStream* st) {2250struct sysinfo sinfo;2251int ret = sysinfo(&sinfo);2252if (ret == 0) {2253os::print_dhm(st, "OS uptime:", (long) sinfo.uptime);2254}2255}22562257bool os::Linux::print_container_info(outputStream* st) {2258if (!OSContainer::is_containerized()) {2259return false;2260}22612262st->print_cr("container (cgroup) information:");22632264const char *p_ct = OSContainer::container_type();2265st->print_cr("container_type: %s", p_ct != NULL ? p_ct : "not supported");22662267char *p = OSContainer::cpu_cpuset_cpus();2268st->print_cr("cpu_cpuset_cpus: %s", p != NULL ? p : "not supported");2269free(p);22702271p = OSContainer::cpu_cpuset_memory_nodes();2272st->print_cr("cpu_memory_nodes: %s", p != NULL ? p : "not supported");2273free(p);22742275int i = OSContainer::active_processor_count();2276st->print("active_processor_count: ");2277if (i > 0) {2278st->print_cr("%d", i);2279} else {2280st->print_cr("not supported");2281}22822283i = OSContainer::cpu_quota();2284st->print("cpu_quota: ");2285if (i > 0) {2286st->print_cr("%d", i);2287} else {2288st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no quota");2289}22902291i = OSContainer::cpu_period();2292st->print("cpu_period: ");2293if (i > 0) {2294st->print_cr("%d", i);2295} else {2296st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no period");2297}22982299i = OSContainer::cpu_shares();2300st->print("cpu_shares: ");2301if (i > 0) {2302st->print_cr("%d", i);2303} else {2304st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no shares");2305}23062307jlong j = OSContainer::memory_limit_in_bytes();2308st->print("memory_limit_in_bytes: ");2309if (j > 0) {2310st->print_cr(JLONG_FORMAT, j);2311} else {2312st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited");2313}23142315j = OSContainer::memory_and_swap_limit_in_bytes();2316st->print("memory_and_swap_limit_in_bytes: ");2317if (j > 0) {2318st->print_cr(JLONG_FORMAT, j);2319} else {2320st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited");2321}23222323j = OSContainer::memory_soft_limit_in_bytes();2324st->print("memory_soft_limit_in_bytes: ");2325if (j > 0) {2326st->print_cr(JLONG_FORMAT, j);2327} else {2328st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited");2329}23302331j = OSContainer::OSContainer::memory_usage_in_bytes();2332st->print("memory_usage_in_bytes: ");2333if (j > 0) {2334st->print_cr(JLONG_FORMAT, j);2335} else {2336st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited");2337}23382339j = OSContainer::OSContainer::memory_max_usage_in_bytes();2340st->print("memory_max_usage_in_bytes: ");2341if (j > 0) {2342st->print_cr(JLONG_FORMAT, j);2343} else {2344st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited");2345}23462347return true;2348}23492350void os::Linux::print_steal_info(outputStream* st) {2351if (has_initial_tick_info) {2352CPUPerfTicks pticks;2353bool res = os::Linux::get_tick_information(&pticks, -1);23542355if (res && pticks.has_steal_ticks) {2356uint64_t steal_ticks_difference = pticks.steal - initial_steal_ticks;2357uint64_t total_ticks_difference = pticks.total - initial_total_ticks;2358double steal_ticks_perc = 0.0;2359if (total_ticks_difference != 0) {2360steal_ticks_perc = (double) steal_ticks_difference / total_ticks_difference;2361}2362st->print_cr("Steal ticks since vm start: " UINT64_FORMAT, steal_ticks_difference);2363st->print_cr("Steal ticks percentage since vm start:%7.3f", steal_ticks_perc);2364}2365}2366}23672368void os::print_memory_info(outputStream* st) {23692370st->print("Memory:");2371st->print(" %dk page", os::vm_page_size()>>10);23722373// values in struct sysinfo are "unsigned long"2374struct sysinfo si;2375sysinfo(&si);23762377st->print(", physical " UINT64_FORMAT "k",2378os::physical_memory() >> 10);2379st->print("(" UINT64_FORMAT "k free)",2380os::available_memory() >> 10);2381st->print(", swap " UINT64_FORMAT "k",2382((jlong)si.totalswap * si.mem_unit) >> 10);2383st->print("(" UINT64_FORMAT "k free)",2384((jlong)si.freeswap * si.mem_unit) >> 10);2385st->cr();2386st->print("Page Sizes: ");2387_page_sizes.print_on(st);2388st->cr();2389}23902391// Print the first "model name" line and the first "flags" line2392// that we find and nothing more. We assume "model name" comes2393// before "flags" so if we find a second "model name", then the2394// "flags" field is considered missing.2395static bool print_model_name_and_flags(outputStream* st, char* buf, size_t buflen) {2396#if defined(IA32) || defined(AMD64)2397// Other platforms have less repetitive cpuinfo files2398FILE *fp = fopen("/proc/cpuinfo", "r");2399if (fp) {2400bool model_name_printed = false;2401while (!feof(fp)) {2402if (fgets(buf, buflen, fp)) {2403// Assume model name comes before flags2404if (strstr(buf, "model name") != NULL) {2405if (!model_name_printed) {2406st->print_raw("CPU Model and flags from /proc/cpuinfo:\n");2407st->print_raw(buf);2408model_name_printed = true;2409} else {2410// model name printed but not flags? Odd, just return2411fclose(fp);2412return true;2413}2414}2415// print the flags line too2416if (strstr(buf, "flags") != NULL) {2417st->print_raw(buf);2418fclose(fp);2419return true;2420}2421}2422}2423fclose(fp);2424}2425#endif // x86 platforms2426return false;2427}24282429// additional information about CPU e.g. available frequency ranges2430static void print_sys_devices_cpu_info(outputStream* st, char* buf, size_t buflen) {2431_print_ascii_file_h("Online cpus", "/sys/devices/system/cpu/online", st);2432_print_ascii_file_h("Offline cpus", "/sys/devices/system/cpu/offline", st);24332434if (ExtensiveErrorReports) {2435// cache related info (cpu 0, should be similar for other CPUs)2436for (unsigned int i=0; i < 10; i++) { // handle max. 10 cache entries2437char hbuf_level[60];2438char hbuf_type[60];2439char hbuf_size[60];2440char hbuf_coherency_line_size[80];2441snprintf(hbuf_level, 60, "/sys/devices/system/cpu/cpu0/cache/index%u/level", i);2442snprintf(hbuf_type, 60, "/sys/devices/system/cpu/cpu0/cache/index%u/type", i);2443snprintf(hbuf_size, 60, "/sys/devices/system/cpu/cpu0/cache/index%u/size", i);2444snprintf(hbuf_coherency_line_size, 80, "/sys/devices/system/cpu/cpu0/cache/index%u/coherency_line_size", i);2445if (file_exists(hbuf_level)) {2446_print_ascii_file_h("cache level", hbuf_level, st);2447_print_ascii_file_h("cache type", hbuf_type, st);2448_print_ascii_file_h("cache size", hbuf_size, st);2449_print_ascii_file_h("cache coherency line size", hbuf_coherency_line_size, st);2450}2451}2452}24532454// we miss the cpufreq entries on Power and s390x2455#if defined(IA32) || defined(AMD64)2456_print_ascii_file_h("BIOS frequency limitation", "/sys/devices/system/cpu/cpu0/cpufreq/bios_limit", st);2457_print_ascii_file_h("Frequency switch latency (ns)", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_transition_latency", st);2458_print_ascii_file_h("Available cpu frequencies", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies", st);2459// min and max should be in the Available range but still print them (not all info might be available for all kernels)2460if (ExtensiveErrorReports) {2461_print_ascii_file_h("Maximum cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", st);2462_print_ascii_file_h("Minimum cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_min_freq", st);2463_print_ascii_file_h("Current cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq", st);2464}2465// governors are power schemes, see https://wiki.archlinux.org/index.php/CPU_frequency_scaling2466if (ExtensiveErrorReports) {2467_print_ascii_file_h("Available governors", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors", st);2468}2469_print_ascii_file_h("Current governor", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", st);2470// Core performance boost, see https://www.kernel.org/doc/Documentation/cpu-freq/boost.txt2471// Raise operating frequency of some cores in a multi-core package if certain conditions apply, e.g.2472// whole chip is not fully utilized2473_print_ascii_file_h("Core performance/turbo boost", "/sys/devices/system/cpu/cpufreq/boost", st);2474#endif2475}24762477void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {2478// Only print the model name if the platform provides this as a summary2479if (!print_model_name_and_flags(st, buf, buflen)) {2480_print_ascii_file_h("/proc/cpuinfo", "/proc/cpuinfo", st, false);2481}2482st->cr();2483print_sys_devices_cpu_info(st, buf, buflen);2484}24852486#if defined(AMD64) || defined(IA32) || defined(X32)2487const char* search_string = "model name";2488#elif defined(M68K)2489const char* search_string = "CPU";2490#elif defined(PPC64)2491const char* search_string = "cpu";2492#elif defined(S390)2493const char* search_string = "machine =";2494#elif defined(SPARC)2495const char* search_string = "cpu";2496#else2497const char* search_string = "Processor";2498#endif24992500// Parses the cpuinfo file for string representing the model name.2501void os::get_summary_cpu_info(char* cpuinfo, size_t length) {2502FILE* fp = fopen("/proc/cpuinfo", "r");2503if (fp != NULL) {2504while (!feof(fp)) {2505char buf[256];2506if (fgets(buf, sizeof(buf), fp)) {2507char* start = strstr(buf, search_string);2508if (start != NULL) {2509char *ptr = start + strlen(search_string);2510char *end = buf + strlen(buf);2511while (ptr != end) {2512// skip whitespace and colon for the rest of the name.2513if (*ptr != ' ' && *ptr != '\t' && *ptr != ':') {2514break;2515}2516ptr++;2517}2518if (ptr != end) {2519// reasonable string, get rid of newline and keep the rest2520char* nl = strchr(buf, '\n');2521if (nl != NULL) *nl = '\0';2522strncpy(cpuinfo, ptr, length);2523fclose(fp);2524return;2525}2526}2527}2528}2529fclose(fp);2530}2531// cpuinfo not found or parsing failed, just print generic string. The entire2532// /proc/cpuinfo file will be printed later in the file (or enough of it for x86)2533#if defined(AARCH64)2534strncpy(cpuinfo, "AArch64", length);2535#elif defined(AMD64)2536strncpy(cpuinfo, "x86_64", length);2537#elif defined(ARM) // Order wrt. AARCH64 is relevant!2538strncpy(cpuinfo, "ARM", length);2539#elif defined(IA32)2540strncpy(cpuinfo, "x86_32", length);2541#elif defined(IA64)2542strncpy(cpuinfo, "IA64", length);2543#elif defined(PPC)2544strncpy(cpuinfo, "PPC64", length);2545#elif defined(S390)2546strncpy(cpuinfo, "S390", length);2547#elif defined(SPARC)2548strncpy(cpuinfo, "sparcv9", length);2549#elif defined(ZERO_LIBARCH)2550strncpy(cpuinfo, ZERO_LIBARCH, length);2551#else2552strncpy(cpuinfo, "unknown", length);2553#endif2554}25552556static char saved_jvm_path[MAXPATHLEN] = {0};25572558// Find the full path to the current module, libjvm.so2559void os::jvm_path(char *buf, jint buflen) {2560// Error checking.2561if (buflen < MAXPATHLEN) {2562assert(false, "must use a large-enough buffer");2563buf[0] = '\0';2564return;2565}2566// Lazy resolve the path to current module.2567if (saved_jvm_path[0] != 0) {2568strcpy(buf, saved_jvm_path);2569return;2570}25712572char dli_fname[MAXPATHLEN];2573dli_fname[0] = '\0';2574bool ret = dll_address_to_library_name(2575CAST_FROM_FN_PTR(address, os::jvm_path),2576dli_fname, sizeof(dli_fname), NULL);2577assert(ret, "cannot locate libjvm");2578#ifdef __ANDROID__2579if (dli_fname[0] == '\0') {2580return;2581}25822583if (strchr(dli_fname, '/') == NULL) {2584bool ok = read_so_path_from_maps(dli_fname, buf, buflen);2585assert(ok, "unable to turn relative libjvm.so path into absolute");2586return;2587}25882589snprintf(buf, buflen, /* "%s/lib/%s/server/%s", java_home_var, cpu_arch, */ "%s", dli_fname);2590#else // !__ANDROID__2591char *rp = NULL;2592if (ret && dli_fname[0] != '\0') {2593rp = os::Posix::realpath(dli_fname, buf, buflen);2594}2595if (rp == NULL) {2596return;2597}25982599if (Arguments::sun_java_launcher_is_altjvm()) {2600// Support for the java launcher's '-XXaltjvm=<path>' option. Typical2601// value for buf is "<JAVA_HOME>/jre/lib/<vmtype>/libjvm.so".2602// If "/jre/lib/" appears at the right place in the string, then2603// assume we are installed in a JDK and we're done. Otherwise, check2604// for a JAVA_HOME environment variable and fix up the path so it2605// looks like libjvm.so is installed there (append a fake suffix2606// hotspot/libjvm.so).2607const char *p = buf + strlen(buf) - 1;2608for (int count = 0; p > buf && count < 5; ++count) {2609for (--p; p > buf && *p != '/'; --p)2610/* empty */ ;2611}26122613if (strncmp(p, "/jre/lib/", 9) != 0) {2614// Look for JAVA_HOME in the environment.2615char* java_home_var = ::getenv("JAVA_HOME");2616if (java_home_var != NULL && java_home_var[0] != 0) {2617char* jrelib_p;2618int len;26192620// Check the current module name "libjvm.so".2621p = strrchr(buf, '/');2622if (p == NULL) {2623return;2624}2625assert(strstr(p, "/libjvm") == p, "invalid library name");26262627rp = os::Posix::realpath(java_home_var, buf, buflen);2628if (rp == NULL) {2629return;2630}26312632// determine if this is a legacy image or modules image2633// modules image doesn't have "jre" subdirectory2634len = strlen(buf);2635assert(len < buflen, "Ran out of buffer room");2636jrelib_p = buf + len;2637snprintf(jrelib_p, buflen-len, "/jre/lib");2638if (0 != access(buf, F_OK)) {2639snprintf(jrelib_p, buflen-len, "/lib");2640}26412642if (0 == access(buf, F_OK)) {2643// Use current module name "libjvm.so"2644len = strlen(buf);2645snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");2646} else {2647// Go back to path of .so2648rp = os::Posix::realpath(dli_fname, buf, buflen);2649if (rp == NULL) {2650return;2651}2652}2653}2654}2655}2656#endif26572658strncpy(saved_jvm_path, buf, MAXPATHLEN);2659saved_jvm_path[MAXPATHLEN - 1] = '\0';2660}26612662void os::print_jni_name_prefix_on(outputStream* st, int args_size) {2663// no prefix required, not even "_"2664}26652666void os::print_jni_name_suffix_on(outputStream* st, int args_size) {2667// no suffix required2668}26692670////////////////////////////////////////////////////////////////////////////////2671// Virtual Memory26722673int os::vm_page_size() {2674// Seems redundant as all get out2675assert(os::Linux::page_size() != -1, "must call os::init");2676return os::Linux::page_size();2677}26782679// Solaris allocates memory by pages.2680int os::vm_allocation_granularity() {2681assert(os::Linux::page_size() != -1, "must call os::init");2682return os::Linux::page_size();2683}26842685// Rationale behind this function:2686// current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable2687// mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get2688// samples for JITted code. Here we create private executable mapping over the code cache2689// and then we can use standard (well, almost, as mapping can change) way to provide2690// info for the reporting script by storing timestamp and location of symbol2691void linux_wrap_code(char* base, size_t size) {2692static volatile jint cnt = 0;26932694if (!UseOprofile) {2695return;2696}26972698char buf[PATH_MAX+1];2699int num = Atomic::add(&cnt, 1);27002701snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",2702os::get_temp_directory(), os::current_process_id(), num);2703unlink(buf);27042705int fd = ::open(buf, O_CREAT | O_RDWR, S_IRWXU);27062707if (fd != -1) {2708off_t rv = ::lseek(fd, size-2, SEEK_SET);2709if (rv != (off_t)-1) {2710if (::write(fd, "", 1) == 1) {2711mmap(base, size,2712PROT_READ|PROT_WRITE|PROT_EXEC,2713MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0);2714}2715}2716::close(fd);2717unlink(buf);2718}2719}27202721static bool recoverable_mmap_error(int err) {2722// See if the error is one we can let the caller handle. This2723// list of errno values comes from JBS-6843484. I can't find a2724// Linux man page that documents this specific set of errno2725// values so while this list currently matches Solaris, it may2726// change as we gain experience with this failure mode.2727switch (err) {2728case EBADF:2729case EINVAL:2730case ENOTSUP:2731// let the caller deal with these errors2732return true;27332734default:2735// Any remaining errors on this OS can cause our reserved mapping2736// to be lost. That can cause confusion where different data2737// structures think they have the same memory mapped. The worst2738// scenario is if both the VM and a library think they have the2739// same memory mapped.2740return false;2741}2742}27432744static void warn_fail_commit_memory(char* addr, size_t size, bool exec,2745int err) {2746warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT2747", %d) failed; error='%s' (errno=%d)", p2i(addr), size, exec,2748os::strerror(err), err);2749}27502751static void warn_fail_commit_memory(char* addr, size_t size,2752size_t alignment_hint, bool exec,2753int err) {2754warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT2755", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", p2i(addr), size,2756alignment_hint, exec, os::strerror(err), err);2757}27582759// NOTE: Linux kernel does not really reserve the pages for us.2760// All it does is to check if there are enough free pages2761// left at the time of mmap(). This could be a potential2762// problem.2763int os::Linux::commit_memory_impl(char* addr, size_t size, bool exec) {2764int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;2765uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,2766MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);2767if (res != (uintptr_t) MAP_FAILED) {2768if (UseNUMAInterleaving) {2769numa_make_global(addr, size);2770}2771return 0;2772}27732774int err = errno; // save errno from mmap() call above27752776if (!recoverable_mmap_error(err)) {2777warn_fail_commit_memory(addr, size, exec, err);2778vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "committing reserved memory.");2779}27802781return err;2782}27832784bool os::pd_commit_memory(char* addr, size_t size, bool exec) {2785return os::Linux::commit_memory_impl(addr, size, exec) == 0;2786}27872788void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,2789const char* mesg) {2790assert(mesg != NULL, "mesg must be specified");2791int err = os::Linux::commit_memory_impl(addr, size, exec);2792if (err != 0) {2793// the caller wants all commit errors to exit with the specified mesg:2794warn_fail_commit_memory(addr, size, exec, err);2795vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);2796}2797}27982799// Define MAP_HUGETLB here so we can build HotSpot on old systems.2800#ifndef MAP_HUGETLB2801#define MAP_HUGETLB 0x400002802#endif28032804// If mmap flags are set with MAP_HUGETLB and the system supports multiple2805// huge page sizes, flag bits [26:31] can be used to encode the log2 of the2806// desired huge page size. Otherwise, the system's default huge page size will be used.2807// See mmap(2) man page for more info (since Linux 3.8).2808// https://lwn.net/Articles/533499/2809#ifndef MAP_HUGE_SHIFT2810#define MAP_HUGE_SHIFT 262811#endif28122813// Define MADV_HUGEPAGE here so we can build HotSpot on old systems.2814#ifndef MADV_HUGEPAGE2815#define MADV_HUGEPAGE 142816#endif28172818int os::Linux::commit_memory_impl(char* addr, size_t size,2819size_t alignment_hint, bool exec) {2820int err = os::Linux::commit_memory_impl(addr, size, exec);2821if (err == 0) {2822realign_memory(addr, size, alignment_hint);2823}2824return err;2825}28262827bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,2828bool exec) {2829return os::Linux::commit_memory_impl(addr, size, alignment_hint, exec) == 0;2830}28312832void os::pd_commit_memory_or_exit(char* addr, size_t size,2833size_t alignment_hint, bool exec,2834const char* mesg) {2835assert(mesg != NULL, "mesg must be specified");2836int err = os::Linux::commit_memory_impl(addr, size, alignment_hint, exec);2837if (err != 0) {2838// the caller wants all commit errors to exit with the specified mesg:2839warn_fail_commit_memory(addr, size, alignment_hint, exec, err);2840vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);2841}2842}28432844void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {2845if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) {2846// We don't check the return value: madvise(MADV_HUGEPAGE) may not2847// be supported or the memory may already be backed by huge pages.2848::madvise(addr, bytes, MADV_HUGEPAGE);2849}2850}28512852void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {2853// This method works by doing an mmap over an existing mmaping and effectively discarding2854// the existing pages. However it won't work for SHM-based large pages that cannot be2855// uncommitted at all. We don't do anything in this case to avoid creating a segment with2856// small pages on top of the SHM segment. This method always works for small pages, so we2857// allow that in any case.2858if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) {2859commit_memory(addr, bytes, alignment_hint, !ExecMem);2860}2861}28622863void os::numa_make_global(char *addr, size_t bytes) {2864Linux::numa_interleave_memory(addr, bytes);2865}28662867// Define for numa_set_bind_policy(int). Setting the argument to 0 will set the2868// bind policy to MPOL_PREFERRED for the current thread.2869#define USE_MPOL_PREFERRED 028702871void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {2872// To make NUMA and large pages more robust when both enabled, we need to ease2873// the requirements on where the memory should be allocated. MPOL_BIND is the2874// default policy and it will force memory to be allocated on the specified2875// node. Changing this to MPOL_PREFERRED will prefer to allocate the memory on2876// the specified node, but will not force it. Using this policy will prevent2877// getting SIGBUS when trying to allocate large pages on NUMA nodes with no2878// free large pages.2879Linux::numa_set_bind_policy(USE_MPOL_PREFERRED);2880Linux::numa_tonode_memory(addr, bytes, lgrp_hint);2881}28822883bool os::numa_topology_changed() { return false; }28842885size_t os::numa_get_groups_num() {2886// Return just the number of nodes in which it's possible to allocate memory2887// (in numa terminology, configured nodes).2888return Linux::numa_num_configured_nodes();2889}28902891int os::numa_get_group_id() {2892int cpu_id = Linux::sched_getcpu();2893if (cpu_id != -1) {2894int lgrp_id = Linux::get_node_by_cpu(cpu_id);2895if (lgrp_id != -1) {2896return lgrp_id;2897}2898}2899return 0;2900}29012902int os::numa_get_group_id_for_address(const void* address) {2903void** pages = const_cast<void**>(&address);2904int id = -1;29052906if (os::Linux::numa_move_pages(0, 1, pages, NULL, &id, 0) == -1) {2907return -1;2908}2909if (id < 0) {2910return -1;2911}2912return id;2913}29142915int os::Linux::get_existing_num_nodes() {2916int node;2917int highest_node_number = Linux::numa_max_node();2918int num_nodes = 0;29192920// Get the total number of nodes in the system including nodes without memory.2921for (node = 0; node <= highest_node_number; node++) {2922if (is_node_in_existing_nodes(node)) {2923num_nodes++;2924}2925}2926return num_nodes;2927}29282929size_t os::numa_get_leaf_groups(int *ids, size_t size) {2930int highest_node_number = Linux::numa_max_node();2931size_t i = 0;29322933// Map all node ids in which it is possible to allocate memory. Also nodes are2934// not always consecutively available, i.e. available from 0 to the highest2935// node number. If the nodes have been bound explicitly using numactl membind,2936// then allocate memory from those nodes only.2937for (int node = 0; node <= highest_node_number; node++) {2938if (Linux::is_node_in_bound_nodes((unsigned int)node)) {2939ids[i++] = node;2940}2941}2942return i;2943}29442945bool os::get_page_info(char *start, page_info* info) {2946return false;2947}29482949char *os::scan_pages(char *start, char* end, page_info* page_expected,2950page_info* page_found) {2951return end;2952}295329542955int os::Linux::sched_getcpu_syscall(void) {2956unsigned int cpu = 0;2957int retval = -1;29582959#if defined(IA32)2960#ifndef SYS_getcpu2961#define SYS_getcpu 3182962#endif2963retval = syscall(SYS_getcpu, &cpu, NULL, NULL);2964#elif defined(AMD64)2965// Unfortunately we have to bring all these macros here from vsyscall.h2966// to be able to compile on old linuxes.2967#define __NR_vgetcpu 22968#define VSYSCALL_START (-10UL << 20)2969#define VSYSCALL_SIZE 10242970#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))2971typedef long (*vgetcpu_t)(unsigned int *cpu, unsigned int *node, unsigned long *tcache);2972vgetcpu_t vgetcpu = (vgetcpu_t)VSYSCALL_ADDR(__NR_vgetcpu);2973retval = vgetcpu(&cpu, NULL, NULL);2974#endif29752976return (retval == -1) ? retval : cpu;2977}29782979void os::Linux::sched_getcpu_init() {2980// sched_getcpu() should be in libc.2981set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,2982dlsym(RTLD_DEFAULT, "sched_getcpu")));29832984// If it's not, try a direct syscall.2985if (sched_getcpu() == -1) {2986set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,2987(void*)&sched_getcpu_syscall));2988}29892990if (sched_getcpu() == -1) {2991// vm_exit_during_initialization2992warning("getcpu(2) system call not supported by kernel");2993}2994}29952996// Something to do with the numa-aware allocator needs these symbols2997extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { }2998extern "C" JNIEXPORT void numa_error(char *where) { }29993000// Handle request to load libnuma symbol version 1.1 (API v1). If it fails3001// load symbol from base version instead.3002void* os::Linux::libnuma_dlsym(void* handle, const char *name) {3003void *f = dlvsym(handle, name, "libnuma_1.1");3004if (f == NULL) {3005f = dlsym(handle, name);3006}3007return f;3008}30093010// Handle request to load libnuma symbol version 1.2 (API v2) only.3011// Return NULL if the symbol is not defined in this particular version.3012void* os::Linux::libnuma_v2_dlsym(void* handle, const char* name) {3013return dlvsym(handle, name, "libnuma_1.2");3014}30153016// Check numa dependent syscalls3017static bool numa_syscall_check() {3018// NUMA APIs depend on several syscalls. E.g., get_mempolicy is required for numa_get_membind and3019// numa_get_interleave_mask. But these dependent syscalls can be unsupported for various reasons.3020// Especially in dockers, get_mempolicy is not allowed with the default configuration. So it's necessary3021// to check whether the syscalls are available. Currently, only get_mempolicy is checked since checking3022// others like mbind would cause unexpected side effects.3023#ifdef SYS_get_mempolicy3024int dummy = 0;3025if (syscall(SYS_get_mempolicy, &dummy, NULL, 0, (void*)&dummy, 3) == -1) {3026return false;3027}3028#endif30293030return true;3031}30323033bool os::Linux::libnuma_init() {3034// Requires sched_getcpu() and numa dependent syscalls support3035if ((sched_getcpu() != -1) && numa_syscall_check()) {3036void *handle = dlopen("libnuma.so.1", RTLD_LAZY);3037if (handle != NULL) {3038set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t,3039libnuma_dlsym(handle, "numa_node_to_cpus")));3040set_numa_node_to_cpus_v2(CAST_TO_FN_PTR(numa_node_to_cpus_v2_func_t,3041libnuma_v2_dlsym(handle, "numa_node_to_cpus")));3042set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t,3043libnuma_dlsym(handle, "numa_max_node")));3044set_numa_num_configured_nodes(CAST_TO_FN_PTR(numa_num_configured_nodes_func_t,3045libnuma_dlsym(handle, "numa_num_configured_nodes")));3046set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,3047libnuma_dlsym(handle, "numa_available")));3048set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,3049libnuma_dlsym(handle, "numa_tonode_memory")));3050set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,3051libnuma_dlsym(handle, "numa_interleave_memory")));3052set_numa_interleave_memory_v2(CAST_TO_FN_PTR(numa_interleave_memory_v2_func_t,3053libnuma_v2_dlsym(handle, "numa_interleave_memory")));3054set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,3055libnuma_dlsym(handle, "numa_set_bind_policy")));3056set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t,3057libnuma_dlsym(handle, "numa_bitmask_isbitset")));3058set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t,3059libnuma_dlsym(handle, "numa_distance")));3060set_numa_get_membind(CAST_TO_FN_PTR(numa_get_membind_func_t,3061libnuma_v2_dlsym(handle, "numa_get_membind")));3062set_numa_get_interleave_mask(CAST_TO_FN_PTR(numa_get_interleave_mask_func_t,3063libnuma_v2_dlsym(handle, "numa_get_interleave_mask")));3064set_numa_move_pages(CAST_TO_FN_PTR(numa_move_pages_func_t,3065libnuma_dlsym(handle, "numa_move_pages")));3066set_numa_set_preferred(CAST_TO_FN_PTR(numa_set_preferred_func_t,3067libnuma_dlsym(handle, "numa_set_preferred")));30683069if (numa_available() != -1) {3070set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));3071set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr"));3072set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr"));3073set_numa_interleave_bitmask(_numa_get_interleave_mask());3074set_numa_membind_bitmask(_numa_get_membind());3075// Create an index -> node mapping, since nodes are not always consecutive3076_nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, mtInternal);3077rebuild_nindex_to_node_map();3078// Create a cpu -> node mapping3079_cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, mtInternal);3080rebuild_cpu_to_node_map();3081return true;3082}3083}3084}3085return false;3086}30873088size_t os::Linux::default_guard_size(os::ThreadType thr_type) {3089// Creating guard page is very expensive. Java thread has HotSpot3090// guard pages, only enable glibc guard page for non-Java threads.3091// (Remember: compiler thread is a Java thread, too!)3092return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : page_size());3093}30943095void os::Linux::rebuild_nindex_to_node_map() {3096int highest_node_number = Linux::numa_max_node();30973098nindex_to_node()->clear();3099for (int node = 0; node <= highest_node_number; node++) {3100if (Linux::is_node_in_existing_nodes(node)) {3101nindex_to_node()->append(node);3102}3103}3104}31053106// rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id.3107// The table is later used in get_node_by_cpu().3108void os::Linux::rebuild_cpu_to_node_map() {3109const size_t NCPUS = 32768; // Since the buffer size computation is very obscure3110// in libnuma (possible values are starting from 16,3111// and continuing up with every other power of 2, but less3112// than the maximum number of CPUs supported by kernel), and3113// is a subject to change (in libnuma version 2 the requirements3114// are more reasonable) we'll just hardcode the number they use3115// in the library.3116const size_t BitsPerCLong = sizeof(long) * CHAR_BIT;31173118size_t cpu_num = processor_count();3119size_t cpu_map_size = NCPUS / BitsPerCLong;3120size_t cpu_map_valid_size =3121MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size);31223123cpu_to_node()->clear();3124cpu_to_node()->at_grow(cpu_num - 1);31253126size_t node_num = get_existing_num_nodes();31273128int distance = 0;3129int closest_distance = INT_MAX;3130int closest_node = 0;3131unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size, mtInternal);3132for (size_t i = 0; i < node_num; i++) {3133// Check if node is configured (not a memory-less node). If it is not, find3134// the closest configured node. Check also if node is bound, i.e. it's allowed3135// to allocate memory from the node. If it's not allowed, map cpus in that node3136// to the closest node from which memory allocation is allowed.3137if (!is_node_in_configured_nodes(nindex_to_node()->at(i)) ||3138!is_node_in_bound_nodes(nindex_to_node()->at(i))) {3139closest_distance = INT_MAX;3140// Check distance from all remaining nodes in the system. Ignore distance3141// from itself, from another non-configured node, and from another non-bound3142// node.3143for (size_t m = 0; m < node_num; m++) {3144if (m != i &&3145is_node_in_configured_nodes(nindex_to_node()->at(m)) &&3146is_node_in_bound_nodes(nindex_to_node()->at(m))) {3147distance = numa_distance(nindex_to_node()->at(i), nindex_to_node()->at(m));3148// If a closest node is found, update. There is always at least one3149// configured and bound node in the system so there is always at least3150// one node close.3151if (distance != 0 && distance < closest_distance) {3152closest_distance = distance;3153closest_node = nindex_to_node()->at(m);3154}3155}3156}3157} else {3158// Current node is already a configured node.3159closest_node = nindex_to_node()->at(i);3160}31613162// Get cpus from the original node and map them to the closest node. If node3163// is a configured node (not a memory-less node), then original node and3164// closest node are the same.3165if (numa_node_to_cpus(nindex_to_node()->at(i), cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) {3166for (size_t j = 0; j < cpu_map_valid_size; j++) {3167if (cpu_map[j] != 0) {3168for (size_t k = 0; k < BitsPerCLong; k++) {3169if (cpu_map[j] & (1UL << k)) {3170int cpu_index = j * BitsPerCLong + k;31713172#ifndef PRODUCT3173if (UseDebuggerErgo1 && cpu_index >= (int)cpu_num) {3174// Some debuggers limit the processor count without3175// intercepting the NUMA APIs. Just fake the values.3176cpu_index = 0;3177}3178#endif31793180cpu_to_node()->at_put(cpu_index, closest_node);3181}3182}3183}3184}3185}3186}3187FREE_C_HEAP_ARRAY(unsigned long, cpu_map);3188}31893190int os::Linux::numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {3191// use the latest version of numa_node_to_cpus if available3192if (_numa_node_to_cpus_v2 != NULL) {31933194// libnuma bitmask struct3195struct bitmask {3196unsigned long size; /* number of bits in the map */3197unsigned long *maskp;3198};31993200struct bitmask mask;3201mask.maskp = (unsigned long *)buffer;3202mask.size = bufferlen * 8;3203return _numa_node_to_cpus_v2(node, &mask);3204} else if (_numa_node_to_cpus != NULL) {3205return _numa_node_to_cpus(node, buffer, bufferlen);3206}3207return -1;3208}32093210int os::Linux::get_node_by_cpu(int cpu_id) {3211if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {3212return cpu_to_node()->at(cpu_id);3213}3214return -1;3215}32163217GrowableArray<int>* os::Linux::_cpu_to_node;3218GrowableArray<int>* os::Linux::_nindex_to_node;3219os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu;3220os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus;3221os::Linux::numa_node_to_cpus_v2_func_t os::Linux::_numa_node_to_cpus_v2;3222os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;3223os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_nodes;3224os::Linux::numa_available_func_t os::Linux::_numa_available;3225os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;3226os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;3227os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2;3228os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;3229os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;3230os::Linux::numa_distance_func_t os::Linux::_numa_distance;3231os::Linux::numa_get_membind_func_t os::Linux::_numa_get_membind;3232os::Linux::numa_get_interleave_mask_func_t os::Linux::_numa_get_interleave_mask;3233os::Linux::numa_move_pages_func_t os::Linux::_numa_move_pages;3234os::Linux::numa_set_preferred_func_t os::Linux::_numa_set_preferred;3235os::Linux::NumaAllocationPolicy os::Linux::_current_numa_policy;3236unsigned long* os::Linux::_numa_all_nodes;3237struct bitmask* os::Linux::_numa_all_nodes_ptr;3238struct bitmask* os::Linux::_numa_nodes_ptr;3239struct bitmask* os::Linux::_numa_interleave_bitmask;3240struct bitmask* os::Linux::_numa_membind_bitmask;32413242bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {3243uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,3244MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);3245return res != (uintptr_t) MAP_FAILED;3246}32473248static address get_stack_commited_bottom(address bottom, size_t size) {3249address nbot = bottom;3250address ntop = bottom + size;32513252size_t page_sz = os::vm_page_size();3253unsigned pages = size / page_sz;32543255unsigned char vec[1];3256unsigned imin = 1, imax = pages + 1, imid;3257int mincore_return_value = 0;32583259assert(imin <= imax, "Unexpected page size");32603261while (imin < imax) {3262imid = (imax + imin) / 2;3263nbot = ntop - (imid * page_sz);32643265// Use a trick with mincore to check whether the page is mapped or not.3266// mincore sets vec to 1 if page resides in memory and to 0 if page3267// is swapped output but if page we are asking for is unmapped3268// it returns -1,ENOMEM3269mincore_return_value = mincore(nbot, page_sz, vec);32703271if (mincore_return_value == -1) {3272// Page is not mapped go up3273// to find first mapped page3274if (errno != EAGAIN) {3275assert(errno == ENOMEM, "Unexpected mincore errno");3276imax = imid;3277}3278} else {3279// Page is mapped go down3280// to find first not mapped page3281imin = imid + 1;3282}3283}32843285nbot = nbot + page_sz;32863287// Adjust stack bottom one page up if last checked page is not mapped3288if (mincore_return_value == -1) {3289nbot = nbot + page_sz;3290}32913292return nbot;3293}32943295bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {3296int mincore_return_value;3297const size_t stripe = 1024; // query this many pages each time3298unsigned char vec[stripe + 1];3299// set a guard3300vec[stripe] = 'X';33013302const size_t page_sz = os::vm_page_size();3303size_t pages = size / page_sz;33043305assert(is_aligned(start, page_sz), "Start address must be page aligned");3306assert(is_aligned(size, page_sz), "Size must be page aligned");33073308committed_start = NULL;33093310int loops = (pages + stripe - 1) / stripe;3311int committed_pages = 0;3312address loop_base = start;3313bool found_range = false;33143315for (int index = 0; index < loops && !found_range; index ++) {3316assert(pages > 0, "Nothing to do");3317int pages_to_query = (pages >= stripe) ? stripe : pages;3318pages -= pages_to_query;33193320// Get stable read3321while ((mincore_return_value = mincore(loop_base, pages_to_query * page_sz, vec)) == -1 && errno == EAGAIN);33223323// During shutdown, some memory goes away without properly notifying NMT,3324// E.g. ConcurrentGCThread/WatcherThread can exit without deleting thread object.3325// Bailout and return as not committed for now.3326if (mincore_return_value == -1 && errno == ENOMEM) {3327return false;3328}33293330assert(vec[stripe] == 'X', "overflow guard");3331assert(mincore_return_value == 0, "Range must be valid");3332// Process this stripe3333for (int vecIdx = 0; vecIdx < pages_to_query; vecIdx ++) {3334if ((vec[vecIdx] & 0x01) == 0) { // not committed3335// End of current contiguous region3336if (committed_start != NULL) {3337found_range = true;3338break;3339}3340} else { // committed3341// Start of region3342if (committed_start == NULL) {3343committed_start = loop_base + page_sz * vecIdx;3344}3345committed_pages ++;3346}3347}33483349loop_base += pages_to_query * page_sz;3350}33513352if (committed_start != NULL) {3353assert(committed_pages > 0, "Must have committed region");3354assert(committed_pages <= int(size / page_sz), "Can not commit more than it has");3355assert(committed_start >= start && committed_start < start + size, "Out of range");3356committed_size = page_sz * committed_pages;3357return true;3358} else {3359assert(committed_pages == 0, "Should not have committed region");3360return false;3361}3362}336333643365// Linux uses a growable mapping for the stack, and if the mapping for3366// the stack guard pages is not removed when we detach a thread the3367// stack cannot grow beyond the pages where the stack guard was3368// mapped. If at some point later in the process the stack expands to3369// that point, the Linux kernel cannot expand the stack any further3370// because the guard pages are in the way, and a segfault occurs.3371//3372// However, it's essential not to split the stack region by unmapping3373// a region (leaving a hole) that's already part of the stack mapping,3374// so if the stack mapping has already grown beyond the guard pages at3375// the time we create them, we have to truncate the stack mapping.3376// So, we need to know the extent of the stack mapping when3377// create_stack_guard_pages() is called.33783379// We only need this for stacks that are growable: at the time of3380// writing thread stacks don't use growable mappings (i.e. those3381// creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this3382// only applies to the main thread.33833384// If the (growable) stack mapping already extends beyond the point3385// where we're going to put our guard pages, truncate the mapping at3386// that point by munmap()ping it. This ensures that when we later3387// munmap() the guard pages we don't leave a hole in the stack3388// mapping. This only affects the main/primordial thread33893390bool os::pd_create_stack_guard_pages(char* addr, size_t size) {3391if (os::is_primordial_thread()) {3392// As we manually grow stack up to bottom inside create_attached_thread(),3393// it's likely that os::Linux::initial_thread_stack_bottom is mapped and3394// we don't need to do anything special.3395// Check it first, before calling heavy function.3396uintptr_t stack_extent = (uintptr_t) os::Linux::initial_thread_stack_bottom();3397unsigned char vec[1];33983399if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) {3400// Fallback to slow path on all errors, including EAGAIN3401stack_extent = (uintptr_t) get_stack_commited_bottom(3402os::Linux::initial_thread_stack_bottom(),3403(size_t)addr - stack_extent);3404}34053406if (stack_extent < (uintptr_t)addr) {3407::munmap((void*)stack_extent, (uintptr_t)(addr - stack_extent));3408}3409}34103411return os::commit_memory(addr, size, !ExecMem);3412}34133414// If this is a growable mapping, remove the guard pages entirely by3415// munmap()ping them. If not, just call uncommit_memory(). This only3416// affects the main/primordial thread, but guard against future OS changes.3417// It's safe to always unmap guard pages for primordial thread because we3418// always place it right after end of the mapped region.34193420bool os::remove_stack_guard_pages(char* addr, size_t size) {3421uintptr_t stack_extent, stack_base;34223423if (os::is_primordial_thread()) {3424return ::munmap(addr, size) == 0;3425}34263427return os::uncommit_memory(addr, size);3428}34293430// 'requested_addr' is only treated as a hint, the return value may or3431// may not start from the requested address. Unlike Linux mmap(), this3432// function returns NULL to indicate failure.3433static char* anon_mmap(char* requested_addr, size_t bytes) {3434// MAP_FIXED is intentionally left out, to leave existing mappings intact.3435const int flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;34363437// Map reserved/uncommitted pages PROT_NONE so we fail early if we3438// touch an uncommitted page. Otherwise, the read/write might3439// succeed if we have enough swap space to back the physical page.3440char* addr = (char*)::mmap(requested_addr, bytes, PROT_NONE, flags, -1, 0);34413442return addr == MAP_FAILED ? NULL : addr;3443}34443445// Allocate (using mmap, NO_RESERVE, with small pages) at either a given request address3446// (req_addr != NULL) or with a given alignment.3447// - bytes shall be a multiple of alignment.3448// - req_addr can be NULL. If not NULL, it must be a multiple of alignment.3449// - alignment sets the alignment at which memory shall be allocated.3450// It must be a multiple of allocation granularity.3451// Returns address of memory or NULL. If req_addr was not NULL, will only return3452// req_addr or NULL.3453static char* anon_mmap_aligned(char* req_addr, size_t bytes, size_t alignment) {3454size_t extra_size = bytes;3455if (req_addr == NULL && alignment > 0) {3456extra_size += alignment;3457}34583459char* start = anon_mmap(req_addr, extra_size);3460if (start != NULL) {3461if (req_addr != NULL) {3462if (start != req_addr) {3463::munmap(start, extra_size);3464start = NULL;3465}3466} else {3467char* const start_aligned = align_up(start, alignment);3468char* const end_aligned = start_aligned + bytes;3469char* const end = start + extra_size;3470if (start_aligned > start) {3471::munmap(start, start_aligned - start);3472}3473if (end_aligned < end) {3474::munmap(end_aligned, end - end_aligned);3475}3476start = start_aligned;3477}3478}3479return start;3480}34813482static int anon_munmap(char * addr, size_t size) {3483return ::munmap(addr, size) == 0;3484}34853486char* os::pd_reserve_memory(size_t bytes, bool exec) {3487return anon_mmap(NULL, bytes);3488}34893490bool os::pd_release_memory(char* addr, size_t size) {3491return anon_munmap(addr, size);3492}34933494#ifdef CAN_SHOW_REGISTERS_ON_ASSERT3495extern char* g_assert_poison; // assertion poison page address3496#endif34973498static bool linux_mprotect(char* addr, size_t size, int prot) {3499// Linux wants the mprotect address argument to be page aligned.3500char* bottom = (char*)align_down((intptr_t)addr, os::Linux::page_size());35013502// According to SUSv3, mprotect() should only be used with mappings3503// established by mmap(), and mmap() always maps whole pages. Unaligned3504// 'addr' likely indicates problem in the VM (e.g. trying to change3505// protection of malloc'ed or statically allocated memory). Check the3506// caller if you hit this assert.3507assert(addr == bottom, "sanity check");35083509size = align_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size());3510// Don't log anything if we're executing in the poison page signal handling3511// context. It can lead to reentrant use of other parts of the VM code.3512#ifdef CAN_SHOW_REGISTERS_ON_ASSERT3513if (addr != g_assert_poison)3514#endif3515Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(bottom), p2i(bottom+size), prot);3516return ::mprotect(bottom, size, prot) == 0;3517}35183519// Set protections specified3520bool os::protect_memory(char* addr, size_t bytes, ProtType prot,3521bool is_committed) {3522unsigned int p = 0;3523switch (prot) {3524case MEM_PROT_NONE: p = PROT_NONE; break;3525case MEM_PROT_READ: p = PROT_READ; break;3526case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break;3527case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break;3528default:3529ShouldNotReachHere();3530}3531// is_committed is unused.3532return linux_mprotect(addr, bytes, p);3533}35343535bool os::guard_memory(char* addr, size_t size) {3536return linux_mprotect(addr, size, PROT_NONE);3537}35383539bool os::unguard_memory(char* addr, size_t size) {3540return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);3541}35423543bool os::Linux::transparent_huge_pages_sanity_check(bool warn,3544size_t page_size) {3545bool result = false;3546void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,3547MAP_ANONYMOUS|MAP_PRIVATE,3548-1, 0);3549if (p != MAP_FAILED) {3550void *aligned_p = align_up(p, page_size);35513552result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0;35533554munmap(p, page_size * 2);3555}35563557if (warn && !result) {3558warning("TransparentHugePages is not supported by the operating system.");3559}35603561return result;3562}35633564int os::Linux::hugetlbfs_page_size_flag(size_t page_size) {3565if (page_size != default_large_page_size()) {3566return (exact_log2(page_size) << MAP_HUGE_SHIFT);3567}3568return 0;3569}35703571bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {3572// Include the page size flag to ensure we sanity check the correct page size.3573int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size);3574void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE, flags, -1, 0);35753576if (p != MAP_FAILED) {3577// Mapping succeeded, sanity check passed.3578munmap(p, page_size);3579return true;3580} else {3581log_info(pagesize)("Large page size (" SIZE_FORMAT "%s) failed sanity check, "3582"checking if smaller large page sizes are usable",3583byte_size_in_exact_unit(page_size),3584exact_unit_for_byte_size(page_size));3585for (size_t page_size_ = _page_sizes.next_smaller(page_size);3586page_size_ != (size_t)os::vm_page_size();3587page_size_ = _page_sizes.next_smaller(page_size_)) {3588flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size_);3589p = mmap(NULL, page_size_, PROT_READ|PROT_WRITE, flags, -1, 0);3590if (p != MAP_FAILED) {3591// Mapping succeeded, sanity check passed.3592munmap(p, page_size_);3593log_info(pagesize)("Large page size (" SIZE_FORMAT "%s) passed sanity check",3594byte_size_in_exact_unit(page_size_),3595exact_unit_for_byte_size(page_size_));3596return true;3597}3598}3599}36003601if (warn) {3602warning("HugeTLBFS is not configured or not supported by the operating system.");3603}36043605return false;3606}36073608bool os::Linux::shm_hugetlbfs_sanity_check(bool warn, size_t page_size) {3609#ifndef __ANDROID__3610// Try to create a large shared memory segment.3611int shmid = shmget(IPC_PRIVATE, page_size, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);3612if (shmid == -1) {3613// Possible reasons for shmget failure:3614// 1. shmmax is too small for the request.3615// > check shmmax value: cat /proc/sys/kernel/shmmax3616// > increase shmmax value: echo "new_value" > /proc/sys/kernel/shmmax3617// 2. not enough large page memory.3618// > check available large pages: cat /proc/meminfo3619// > increase amount of large pages:3620// sysctl -w vm.nr_hugepages=new_value3621// > For more information regarding large pages please refer to:3622// https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt3623if (warn) {3624warning("Large pages using UseSHM are not configured on this system.");3625}3626return false;3627}3628// Managed to create a segment, now delete it.3629shmctl(shmid, IPC_RMID, NULL);3630return true;3631#else3632warning("UseSHM not supported on this platform");3633return false;3634#endif3635}36363637// From the coredump_filter documentation:3638//3639// - (bit 0) anonymous private memory3640// - (bit 1) anonymous shared memory3641// - (bit 2) file-backed private memory3642// - (bit 3) file-backed shared memory3643// - (bit 4) ELF header pages in file-backed private memory areas (it is3644// effective only if the bit 2 is cleared)3645// - (bit 5) hugetlb private memory3646// - (bit 6) hugetlb shared memory3647// - (bit 7) dax private memory3648// - (bit 8) dax shared memory3649//3650static void set_coredump_filter(CoredumpFilterBit bit) {3651FILE *f;3652long cdm;36533654if ((f = fopen("/proc/self/coredump_filter", "r+")) == NULL) {3655return;3656}36573658if (fscanf(f, "%lx", &cdm) != 1) {3659fclose(f);3660return;3661}36623663long saved_cdm = cdm;3664rewind(f);3665cdm |= bit;36663667if (cdm != saved_cdm) {3668fprintf(f, "%#lx", cdm);3669}36703671fclose(f);3672}36733674// Large page support36753676static size_t _large_page_size = 0;36773678static size_t scan_default_large_page_size() {3679size_t default_large_page_size = 0;36803681// large_page_size on Linux is used to round up heap size. x86 uses either3682// 2M or 4M page, depending on whether PAE (Physical Address Extensions)3683// mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use3684// page as large as 1G.3685//3686// Here we try to figure out page size by parsing /proc/meminfo and looking3687// for a line with the following format:3688// Hugepagesize: 2048 kB3689//3690// If we can't determine the value (e.g. /proc is not mounted, or the text3691// format has been changed), we'll set largest page size to 036923693FILE *fp = fopen("/proc/meminfo", "r");3694if (fp) {3695while (!feof(fp)) {3696int x = 0;3697char buf[16];3698if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {3699if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {3700default_large_page_size = x * K;3701break;3702}3703} else {3704// skip to next line3705for (;;) {3706int ch = fgetc(fp);3707if (ch == EOF || ch == (int)'\n') break;3708}3709}3710}3711fclose(fp);3712}37133714return default_large_page_size;3715}37163717static os::PageSizes scan_multiple_page_support() {3718// Scan /sys/kernel/mm/hugepages3719// to discover the available page sizes3720const char* sys_hugepages = "/sys/kernel/mm/hugepages";3721os::PageSizes page_sizes;37223723DIR *dir = opendir(sys_hugepages);37243725struct dirent *entry;3726size_t page_size;3727while ((entry = readdir(dir)) != NULL) {3728if (entry->d_type == DT_DIR &&3729sscanf(entry->d_name, "hugepages-%zukB", &page_size) == 1) {3730// The kernel is using kB, hotspot uses bytes3731// Add each found Large Page Size to page_sizes3732page_sizes.add(page_size * K);3733}3734}3735closedir(dir);37363737LogTarget(Debug, pagesize) lt;3738if (lt.is_enabled()) {3739LogStream ls(lt);3740ls.print("Large Page sizes: ");3741page_sizes.print_on(&ls);3742}37433744return page_sizes;3745}37463747size_t os::Linux::default_large_page_size() {3748return _default_large_page_size;3749}37503751void warn_no_large_pages_configured() {3752if (!FLAG_IS_DEFAULT(UseLargePages)) {3753log_warning(pagesize)("UseLargePages disabled, no large pages configured and available on the system.");3754}3755}37563757bool os::Linux::setup_large_page_type(size_t page_size) {3758if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&3759FLAG_IS_DEFAULT(UseSHM) &&3760FLAG_IS_DEFAULT(UseTransparentHugePages)) {37613762// The type of large pages has not been specified by the user.37633764// Try UseHugeTLBFS and then UseSHM.3765UseHugeTLBFS = UseSHM = true;37663767// Don't try UseTransparentHugePages since there are known3768// performance issues with it turned on. This might change in the future.3769UseTransparentHugePages = false;3770}37713772if (UseTransparentHugePages) {3773bool warn_on_failure = !FLAG_IS_DEFAULT(UseTransparentHugePages);3774if (transparent_huge_pages_sanity_check(warn_on_failure, page_size)) {3775UseHugeTLBFS = false;3776UseSHM = false;3777return true;3778}3779UseTransparentHugePages = false;3780}37813782if (UseHugeTLBFS) {3783bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);3784if (hugetlbfs_sanity_check(warn_on_failure, page_size)) {3785UseSHM = false;3786return true;3787}3788UseHugeTLBFS = false;3789}37903791if (UseSHM) {3792bool warn_on_failure = !FLAG_IS_DEFAULT(UseSHM);3793if (shm_hugetlbfs_sanity_check(warn_on_failure, page_size)) {3794return true;3795}3796UseSHM = false;3797}37983799warn_no_large_pages_configured();3800return false;3801}38023803void os::large_page_init() {3804// 1) Handle the case where we do not want to use huge pages and hence3805// there is no need to scan the OS for related info3806if (!UseLargePages &&3807!UseTransparentHugePages &&3808!UseHugeTLBFS &&3809!UseSHM) {3810// Not using large pages.3811return;3812}38133814if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) {3815// The user explicitly turned off large pages.3816// Ignore the rest of the large pages flags.3817UseTransparentHugePages = false;3818UseHugeTLBFS = false;3819UseSHM = false;3820return;3821}38223823// 2) Scan OS info3824size_t default_large_page_size = scan_default_large_page_size();3825os::Linux::_default_large_page_size = default_large_page_size;3826if (default_large_page_size == 0) {3827// No large pages configured, return.3828warn_no_large_pages_configured();3829UseLargePages = false;3830UseTransparentHugePages = false;3831UseHugeTLBFS = false;3832UseSHM = false;3833return;3834}3835os::PageSizes all_large_pages = scan_multiple_page_support();38363837// 3) Consistency check and post-processing38383839// It is unclear if /sys/kernel/mm/hugepages/ and /proc/meminfo could disagree. Manually3840// re-add the default page size to the list of page sizes to be sure.3841all_large_pages.add(default_large_page_size);38423843// Check LargePageSizeInBytes matches an available page size and if so set _large_page_size3844// using LargePageSizeInBytes as the maximum allowed large page size. If LargePageSizeInBytes3845// doesn't match an available page size set _large_page_size to default_large_page_size3846// and use it as the maximum.3847if (FLAG_IS_DEFAULT(LargePageSizeInBytes) ||3848LargePageSizeInBytes == 0 ||3849LargePageSizeInBytes == default_large_page_size) {3850_large_page_size = default_large_page_size;3851log_info(pagesize)("Using the default large page size: " SIZE_FORMAT "%s",3852byte_size_in_exact_unit(_large_page_size),3853exact_unit_for_byte_size(_large_page_size));3854} else {3855if (all_large_pages.contains(LargePageSizeInBytes)) {3856_large_page_size = LargePageSizeInBytes;3857log_info(pagesize)("Overriding default large page size (" SIZE_FORMAT "%s) "3858"using LargePageSizeInBytes: " SIZE_FORMAT "%s",3859byte_size_in_exact_unit(default_large_page_size),3860exact_unit_for_byte_size(default_large_page_size),3861byte_size_in_exact_unit(_large_page_size),3862exact_unit_for_byte_size(_large_page_size));3863} else {3864_large_page_size = default_large_page_size;3865log_info(pagesize)("LargePageSizeInBytes is not a valid large page size (" SIZE_FORMAT "%s) "3866"using the default large page size: " SIZE_FORMAT "%s",3867byte_size_in_exact_unit(LargePageSizeInBytes),3868exact_unit_for_byte_size(LargePageSizeInBytes),3869byte_size_in_exact_unit(_large_page_size),3870exact_unit_for_byte_size(_large_page_size));3871}3872}38733874// Populate _page_sizes with large page sizes less than or equal to3875// _large_page_size.3876for (size_t page_size = _large_page_size; page_size != 0;3877page_size = all_large_pages.next_smaller(page_size)) {3878_page_sizes.add(page_size);3879}38803881LogTarget(Info, pagesize) lt;3882if (lt.is_enabled()) {3883LogStream ls(lt);3884ls.print("Usable page sizes: ");3885_page_sizes.print_on(&ls);3886}38873888// Now determine the type of large pages to use:3889UseLargePages = os::Linux::setup_large_page_type(_large_page_size);38903891set_coredump_filter(LARGEPAGES_BIT);3892}38933894#ifndef SHM_HUGETLB3895#define SHM_HUGETLB 040003896#endif38973898#ifndef __ANDROID__38993900#define shm_warning_format(format, ...) \3901do { \3902if (UseLargePages && \3903(!FLAG_IS_DEFAULT(UseLargePages) || \3904!FLAG_IS_DEFAULT(UseSHM) || \3905!FLAG_IS_DEFAULT(LargePageSizeInBytes))) { \3906warning(format, __VA_ARGS__); \3907} \3908} while (0)39093910#define shm_warning(str) shm_warning_format("%s", str)39113912#define shm_warning_with_errno(str) \3913do { \3914int err = errno; \3915shm_warning_format(str " (error = %d)", err); \3916} while (0)39173918static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment) {3919assert(is_aligned(bytes, alignment), "Must be divisible by the alignment");39203921if (!is_aligned(alignment, SHMLBA)) {3922assert(false, "Code below assumes that alignment is at least SHMLBA aligned");3923return NULL;3924}39253926// To ensure that we get 'alignment' aligned memory from shmat,3927// we pre-reserve aligned virtual memory and then attach to that.39283929char* pre_reserved_addr = anon_mmap_aligned(NULL /* req_addr */, bytes, alignment);3930if (pre_reserved_addr == NULL) {3931// Couldn't pre-reserve aligned memory.3932shm_warning("Failed to pre-reserve aligned memory for shmat.");3933return NULL;3934}39353936// SHM_REMAP is needed to allow shmat to map over an existing mapping.3937char* addr = (char*)shmat(shmid, pre_reserved_addr, SHM_REMAP);39383939if ((intptr_t)addr == -1) {3940int err = errno;3941shm_warning_with_errno("Failed to attach shared memory.");39423943assert(err != EACCES, "Unexpected error");3944assert(err != EIDRM, "Unexpected error");3945assert(err != EINVAL, "Unexpected error");39463947// Since we don't know if the kernel unmapped the pre-reserved memory area3948// we can't unmap it, since that would potentially unmap memory that was3949// mapped from other threads.3950return NULL;3951}39523953return addr;3954}39553956static char* shmat_at_address(int shmid, char* req_addr) {3957if (!is_aligned(req_addr, SHMLBA)) {3958assert(false, "Requested address needs to be SHMLBA aligned");3959return NULL;3960}39613962char* addr = (char*)shmat(shmid, req_addr, 0);39633964if ((intptr_t)addr == -1) {3965shm_warning_with_errno("Failed to attach shared memory.");3966return NULL;3967}39683969return addr;3970}39713972static char* shmat_large_pages(int shmid, size_t bytes, size_t alignment, char* req_addr) {3973// If a req_addr has been provided, we assume that the caller has already aligned the address.3974if (req_addr != NULL) {3975assert(is_aligned(req_addr, os::large_page_size()), "Must be divisible by the large page size");3976assert(is_aligned(req_addr, alignment), "Must be divisible by given alignment");3977return shmat_at_address(shmid, req_addr);3978}39793980// Since shmid has been setup with SHM_HUGETLB, shmat will automatically3981// return large page size aligned memory addresses when req_addr == NULL.3982// However, if the alignment is larger than the large page size, we have3983// to manually ensure that the memory returned is 'alignment' aligned.3984if (alignment > os::large_page_size()) {3985assert(is_aligned(alignment, os::large_page_size()), "Must be divisible by the large page size");3986return shmat_with_alignment(shmid, bytes, alignment);3987} else {3988return shmat_at_address(shmid, NULL);3989}3990}39913992#endif // !__ANDROID__39933994char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment,3995char* req_addr, bool exec) {3996#ifndef __ANDROID__3997// "exec" is passed in but not used. Creating the shared image for3998// the code cache doesn't have an SHM_X executable permission to check.3999assert(UseLargePages && UseSHM, "only for SHM large pages");4000assert(is_aligned(req_addr, os::large_page_size()), "Unaligned address");4001assert(is_aligned(req_addr, alignment), "Unaligned address");40024003if (!is_aligned(bytes, os::large_page_size())) {4004return NULL; // Fallback to small pages.4005}40064007// Create a large shared memory region to attach to based on size.4008// Currently, size is the total size of the heap.4009int shmid = shmget(IPC_PRIVATE, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);4010if (shmid == -1) {4011// Possible reasons for shmget failure:4012// 1. shmmax is too small for the request.4013// > check shmmax value: cat /proc/sys/kernel/shmmax4014// > increase shmmax value: echo "new_value" > /proc/sys/kernel/shmmax4015// 2. not enough large page memory.4016// > check available large pages: cat /proc/meminfo4017// > increase amount of large pages:4018// sysctl -w vm.nr_hugepages=new_value4019// > For more information regarding large pages please refer to:4020// https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt4021// Note 1: different Linux may use different name for this property,4022// e.g. on Redhat AS-3 it is "hugetlb_pool".4023// Note 2: it's possible there's enough physical memory available but4024// they are so fragmented after a long run that they can't4025// coalesce into large pages. Try to reserve large pages when4026// the system is still "fresh".4027shm_warning_with_errno("Failed to reserve shared memory.");4028return NULL;4029}40304031// Attach to the region.4032char* addr = shmat_large_pages(shmid, bytes, alignment, req_addr);40334034// Remove shmid. If shmat() is successful, the actual shared memory segment4035// will be deleted when it's detached by shmdt() or when the process4036// terminates. If shmat() is not successful this will remove the shared4037// segment immediately.4038shmctl(shmid, IPC_RMID, NULL);40394040return addr;4041#else4042assert(0, "SHM not supported on this platform");4043return NULL;4044#endif // !__ANDROID__4045}40464047static void warn_on_commit_special_failure(char* req_addr, size_t bytes,4048size_t page_size, int error) {4049assert(error == ENOMEM, "Only expect to fail if no memory is available");40504051bool warn_on_failure = UseLargePages &&4052(!FLAG_IS_DEFAULT(UseLargePages) ||4053!FLAG_IS_DEFAULT(UseHugeTLBFS) ||4054!FLAG_IS_DEFAULT(LargePageSizeInBytes));40554056if (warn_on_failure) {4057char msg[128];4058jio_snprintf(msg, sizeof(msg), "Failed to reserve and commit memory. req_addr: "4059PTR_FORMAT " bytes: " SIZE_FORMAT " page size: "4060SIZE_FORMAT " (errno = %d).",4061req_addr, bytes, page_size, error);4062warning("%s", msg);4063}4064}40654066bool os::Linux::commit_memory_special(size_t bytes,4067size_t page_size,4068char* req_addr,4069bool exec) {4070assert(UseLargePages && UseHugeTLBFS, "Should only get here when HugeTLBFS large pages are used");4071assert(is_aligned(bytes, page_size), "Unaligned size");4072assert(is_aligned(req_addr, page_size), "Unaligned address");4073assert(req_addr != NULL, "Must have a requested address for special mappings");40744075int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;4076int flags = MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED;40774078// For large pages additional flags are required.4079if (page_size > (size_t) os::vm_page_size()) {4080flags |= MAP_HUGETLB | hugetlbfs_page_size_flag(page_size);4081}4082char* addr = (char*)::mmap(req_addr, bytes, prot, flags, -1, 0);40834084if (addr == MAP_FAILED) {4085warn_on_commit_special_failure(req_addr, bytes, page_size, errno);4086return false;4087}40884089log_debug(pagesize)("Commit special mapping: " PTR_FORMAT ", size=" SIZE_FORMAT "%s, page size="4090SIZE_FORMAT "%s",4091p2i(addr), byte_size_in_exact_unit(bytes),4092exact_unit_for_byte_size(bytes),4093byte_size_in_exact_unit(page_size),4094exact_unit_for_byte_size(page_size));4095assert(is_aligned(addr, page_size), "Must be");4096return true;4097}40984099char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes,4100size_t alignment,4101size_t page_size,4102char* req_addr,4103bool exec) {4104assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");4105assert(is_aligned(req_addr, alignment), "Must be");4106assert(is_aligned(req_addr, page_size), "Must be");4107assert(is_aligned(alignment, os::vm_allocation_granularity()), "Must be");4108assert(_page_sizes.contains(page_size), "Must be a valid page size");4109assert(page_size > (size_t)os::vm_page_size(), "Must be a large page size");4110assert(bytes >= page_size, "Shouldn't allocate large pages for small sizes");41114112// We only end up here when at least 1 large page can be used.4113// If the size is not a multiple of the large page size, we4114// will mix the type of pages used, but in a decending order.4115// Start off by reserving a range of the given size that is4116// properly aligned. At this point no pages are committed. If4117// a requested address is given it will be used and it must be4118// aligned to both the large page size and the given alignment.4119// The larger of the two will be used.4120size_t required_alignment = MAX(page_size, alignment);4121char* const aligned_start = anon_mmap_aligned(req_addr, bytes, required_alignment);4122if (aligned_start == NULL) {4123return NULL;4124}41254126// First commit using large pages.4127size_t large_bytes = align_down(bytes, page_size);4128bool large_committed = commit_memory_special(large_bytes, page_size, aligned_start, exec);41294130if (large_committed && bytes == large_bytes) {4131// The size was large page aligned so no additional work is4132// needed even if the commit failed.4133return aligned_start;4134}41354136// The requested size requires some small pages as well.4137char* small_start = aligned_start + large_bytes;4138size_t small_size = bytes - large_bytes;4139if (!large_committed) {4140// Failed to commit large pages, so we need to unmap the4141// reminder of the orinal reservation.4142::munmap(small_start, small_size);4143return NULL;4144}41454146// Commit the remaining bytes using small pages.4147bool small_committed = commit_memory_special(small_size, os::vm_page_size(), small_start, exec);4148if (!small_committed) {4149// Failed to commit the remaining size, need to unmap4150// the large pages part of the reservation.4151::munmap(aligned_start, large_bytes);4152return NULL;4153}4154return aligned_start;4155}41564157char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size,4158char* req_addr, bool exec) {4159assert(UseLargePages, "only for large pages");41604161char* addr;4162if (UseSHM) {4163// No support for using specific page sizes with SHM.4164addr = os::Linux::reserve_memory_special_shm(bytes, alignment, req_addr, exec);4165} else {4166assert(UseHugeTLBFS, "must be");4167addr = os::Linux::reserve_memory_special_huge_tlbfs(bytes, alignment, page_size, req_addr, exec);4168}41694170if (addr != NULL) {4171if (UseNUMAInterleaving) {4172numa_make_global(addr, bytes);4173}4174}41754176return addr;4177}41784179bool os::Linux::release_memory_special_shm(char* base, size_t bytes) {4180#ifndef __ANDROID__4181// detaching the SHM segment will also delete it, see reserve_memory_special_shm()4182return shmdt(base) == 0;4183#else4184assert(0, "SHM not supported on this platform");4185return false;4186#endif // SUPPORTS_SHM4187}41884189bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {4190return pd_release_memory(base, bytes);4191}41924193bool os::pd_release_memory_special(char* base, size_t bytes) {4194assert(UseLargePages, "only for large pages");4195bool res;41964197if (UseSHM) {4198res = os::Linux::release_memory_special_shm(base, bytes);4199} else {4200assert(UseHugeTLBFS, "must be");4201res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);4202}4203return res;4204}42054206size_t os::large_page_size() {4207return _large_page_size;4208}42094210// With SysV SHM the entire memory region must be allocated as shared4211// memory.4212// HugeTLBFS allows application to commit large page memory on demand.4213// However, when committing memory with HugeTLBFS fails, the region4214// that was supposed to be committed will lose the old reservation4215// and allow other threads to steal that memory region. Because of this4216// behavior we can't commit HugeTLBFS memory.4217bool os::can_commit_large_page_memory() {4218return UseTransparentHugePages;4219}42204221bool os::can_execute_large_page_memory() {4222return UseTransparentHugePages || UseHugeTLBFS;4223}42244225char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {4226assert(file_desc >= 0, "file_desc is not valid");4227char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem);4228if (result != NULL) {4229if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {4230vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));4231}4232}4233return result;4234}42354236// Reserve memory at an arbitrary address, only if that area is4237// available (and not reserved for something else).42384239char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) {4240// Assert only that the size is a multiple of the page size, since4241// that's all that mmap requires, and since that's all we really know4242// about at this low abstraction level. If we need higher alignment,4243// we can either pass an alignment to this method or verify alignment4244// in one of the methods further up the call chain. See bug 5044738.4245assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");42464247// Repeatedly allocate blocks until the block is allocated at the4248// right spot.42494250// Linux mmap allows caller to pass an address as hint; give it a try first,4251// if kernel honors the hint then we can return immediately.4252char * addr = anon_mmap(requested_addr, bytes);4253if (addr == requested_addr) {4254return requested_addr;4255}42564257if (addr != NULL) {4258// mmap() is successful but it fails to reserve at the requested address4259anon_munmap(addr, bytes);4260}42614262return NULL;4263}42644265// Sleep forever; naked call to OS-specific sleep; use with CAUTION4266void os::infinite_sleep() {4267while (true) { // sleep forever ...4268::sleep(100); // ... 100 seconds at a time4269}4270}42714272// Used to convert frequent JVM_Yield() to nops4273bool os::dont_yield() {4274return DontYieldALot;4275}42764277// Linux CFS scheduler (since 2.6.23) does not guarantee sched_yield(2) will4278// actually give up the CPU. Since skip buddy (v2.6.28):4279//4280// * Sets the yielding task as skip buddy for current CPU's run queue.4281// * Picks next from run queue, if empty, picks a skip buddy (can be the yielding task).4282// * Clears skip buddies for this run queue (yielding task no longer a skip buddy).4283//4284// An alternative is calling os::naked_short_nanosleep with a small number to avoid4285// getting re-scheduled immediately.4286//4287void os::naked_yield() {4288sched_yield();4289}42904291////////////////////////////////////////////////////////////////////////////////4292// thread priority support42934294// Note: Normal Linux applications are run with SCHED_OTHER policy. SCHED_OTHER4295// only supports dynamic priority, static priority must be zero. For real-time4296// applications, Linux supports SCHED_RR which allows static priority (1-99).4297// However, for large multi-threaded applications, SCHED_RR is not only slower4298// than SCHED_OTHER, but also very unstable (my volano tests hang hard 4 out4299// of 5 runs - Sep 2005).4300//4301// The following code actually changes the niceness of kernel-thread/LWP. It4302// has an assumption that setpriority() only modifies one kernel-thread/LWP,4303// not the entire user process, and user level threads are 1:1 mapped to kernel4304// threads. It has always been the case, but could change in the future. For4305// this reason, the code should not be used as default (ThreadPriorityPolicy=0).4306// It is only used when ThreadPriorityPolicy=1 and may require system level permission4307// (e.g., root privilege or CAP_SYS_NICE capability).43084309int os::java_to_os_priority[CriticalPriority + 1] = {431019, // 0 Entry should never be used431143124, // 1 MinPriority43133, // 243142, // 3431543161, // 443170, // 5 NormPriority4318-1, // 643194320-2, // 74321-3, // 84322-4, // 9 NearMaxPriority43234324-5, // 10 MaxPriority43254326-5 // 11 CriticalPriority4327};43284329static int prio_init() {4330if (ThreadPriorityPolicy == 1) {4331if (geteuid() != 0) {4332if (!FLAG_IS_DEFAULT(ThreadPriorityPolicy) && !FLAG_IS_JIMAGE_RESOURCE(ThreadPriorityPolicy)) {4333warning("-XX:ThreadPriorityPolicy=1 may require system level permission, " \4334"e.g., being the root user. If the necessary permission is not " \4335"possessed, changes to priority will be silently ignored.");4336}4337}4338}4339if (UseCriticalJavaThreadPriority) {4340os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];4341}4342return 0;4343}43444345OSReturn os::set_native_priority(Thread* thread, int newpri) {4346if (!UseThreadPriorities || ThreadPriorityPolicy == 0) return OS_OK;43474348int ret = setpriority(PRIO_PROCESS, thread->osthread()->thread_id(), newpri);4349return (ret == 0) ? OS_OK : OS_ERR;4350}43514352OSReturn os::get_native_priority(const Thread* const thread,4353int *priority_ptr) {4354if (!UseThreadPriorities || ThreadPriorityPolicy == 0) {4355*priority_ptr = java_to_os_priority[NormPriority];4356return OS_OK;4357}43584359errno = 0;4360*priority_ptr = getpriority(PRIO_PROCESS, thread->osthread()->thread_id());4361return (*priority_ptr != -1 || errno == 0 ? OS_OK : OS_ERR);4362}43634364// This is the fastest way to get thread cpu time on Linux.4365// Returns cpu time (user+sys) for any thread, not only for current.4366// POSIX compliant clocks are implemented in the kernels 2.6.16+.4367// It might work on 2.6.10+ with a special kernel/glibc patch.4368// For reference, please, see IEEE Std 1003.1-2004:4369// http://www.unix.org/single_unix_specification43704371jlong os::Linux::fast_thread_cpu_time(clockid_t clockid) {4372struct timespec tp;4373int status = clock_gettime(clockid, &tp);4374assert(status == 0, "clock_gettime error: %s", os::strerror(errno));4375return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec;4376}43774378// Determine if the vmid is the parent pid for a child in a PID namespace.4379// Return the namespace pid if so, otherwise -1.4380int os::Linux::get_namespace_pid(int vmid) {4381char fname[24];4382int retpid = -1;43834384snprintf(fname, sizeof(fname), "/proc/%d/status", vmid);4385FILE *fp = fopen(fname, "r");43864387if (fp) {4388int pid, nspid;4389int ret;4390while (!feof(fp) && !ferror(fp)) {4391ret = fscanf(fp, "NSpid: %d %d", &pid, &nspid);4392if (ret == 1) {4393break;4394}4395if (ret == 2) {4396retpid = nspid;4397break;4398}4399for (;;) {4400int ch = fgetc(fp);4401if (ch == EOF || ch == (int)'\n') break;4402}4403}4404fclose(fp);4405}4406return retpid;4407}44084409extern void report_error(char* file_name, int line_no, char* title,4410char* format, ...);44114412// Some linux distributions (notably: Alpine Linux) include the4413// grsecurity in the kernel. Of particular interest from a JVM perspective4414// is PaX (https://pax.grsecurity.net/), which adds some security features4415// related to page attributes. Specifically, the MPROTECT PaX functionality4416// (https://pax.grsecurity.net/docs/mprotect.txt) prevents dynamic4417// code generation by disallowing a (previously) writable page to be4418// marked as executable. This is, of course, exactly what HotSpot does4419// for both JIT compiled method, as well as for stubs, adapters, etc.4420//4421// Instead of crashing "lazily" when trying to make a page executable,4422// this code probes for the presence of PaX and reports the failure4423// eagerly.4424static void check_pax(void) {4425// Zero doesn't generate code dynamically, so no need to perform the PaX check4426#ifndef ZERO4427size_t size = os::Linux::page_size();44284429void* p = ::mmap(NULL, size, PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);4430if (p == MAP_FAILED) {4431log_debug(os)("os_linux.cpp: check_pax: mmap failed (%s)" , os::strerror(errno));4432vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "failed to allocate memory for PaX check.");4433}44344435int res = ::mprotect(p, size, PROT_WRITE|PROT_EXEC);4436if (res == -1) {4437log_debug(os)("os_linux.cpp: check_pax: mprotect failed (%s)" , os::strerror(errno));4438vm_exit_during_initialization(4439"Failed to mark memory page as executable - check if grsecurity/PaX is enabled");4440}44414442::munmap(p, size);4443#endif4444}44454446// this is called _before_ most of the global arguments have been parsed4447void os::init(void) {4448char dummy; // used to get a guess on initial stack address44494450clock_tics_per_sec = sysconf(_SC_CLK_TCK);44514452Linux::set_page_size(sysconf(_SC_PAGESIZE));4453if (Linux::page_size() == -1) {4454fatal("os_linux.cpp: os::init: sysconf failed (%s)",4455os::strerror(errno));4456}4457_page_sizes.add(Linux::page_size());44584459Linux::initialize_system_info();44604461#ifdef __GLIBC__4462Linux::_mallinfo = CAST_TO_FN_PTR(Linux::mallinfo_func_t, dlsym(RTLD_DEFAULT, "mallinfo"));4463Linux::_mallinfo2 = CAST_TO_FN_PTR(Linux::mallinfo2_func_t, dlsym(RTLD_DEFAULT, "mallinfo2"));4464#endif // __GLIBC__44654466os::Linux::CPUPerfTicks pticks;4467bool res = os::Linux::get_tick_information(&pticks, -1);44684469if (res && pticks.has_steal_ticks) {4470has_initial_tick_info = true;4471initial_total_ticks = pticks.total;4472initial_steal_ticks = pticks.steal;4473}44744475// _main_thread points to the thread that created/loaded the JVM.4476Linux::_main_thread = pthread_self();44774478// retrieve entry point for pthread_setname_np4479Linux::_pthread_setname_np =4480(int(*)(pthread_t, const char*))dlsym(RTLD_DEFAULT, "pthread_setname_np");44814482check_pax();44834484os::Posix::init();44854486initial_time_count = javaTimeNanos();4487}44884489// To install functions for atexit system call4490extern "C" {4491static void perfMemory_exit_helper() {4492perfMemory_exit();4493}4494}44954496void os::pd_init_container_support() {4497OSContainer::init();4498}44994500void os::Linux::numa_init() {45014502// Java can be invoked as4503// 1. Without numactl and heap will be allocated/configured on all nodes as4504// per the system policy.4505// 2. With numactl --interleave:4506// Use numa_get_interleave_mask(v2) API to get nodes bitmask. The same4507// API for membind case bitmask is reset.4508// Interleave is only hint and Kernel can fallback to other nodes if4509// no memory is available on the target nodes.4510// 3. With numactl --membind:4511// Use numa_get_membind(v2) API to get nodes bitmask. The same API for4512// interleave case returns bitmask of all nodes.4513// numa_all_nodes_ptr holds bitmask of all nodes.4514// numa_get_interleave_mask(v2) and numa_get_membind(v2) APIs returns correct4515// bitmask when externally configured to run on all or fewer nodes.45164517if (!Linux::libnuma_init()) {4518FLAG_SET_ERGO(UseNUMA, false);4519FLAG_SET_ERGO(UseNUMAInterleaving, false); // Also depends on libnuma.4520} else {4521if ((Linux::numa_max_node() < 1) || Linux::is_bound_to_single_node()) {4522// If there's only one node (they start from 0) or if the process4523// is bound explicitly to a single node using membind, disable NUMA4524UseNUMA = false;4525} else {4526LogTarget(Info,os) log;4527LogStream ls(log);45284529Linux::set_configured_numa_policy(Linux::identify_numa_policy());45304531struct bitmask* bmp = Linux::_numa_membind_bitmask;4532const char* numa_mode = "membind";45334534if (Linux::is_running_in_interleave_mode()) {4535bmp = Linux::_numa_interleave_bitmask;4536numa_mode = "interleave";4537}45384539ls.print("UseNUMA is enabled and invoked in '%s' mode."4540" Heap will be configured using NUMA memory nodes:", numa_mode);45414542for (int node = 0; node <= Linux::numa_max_node(); node++) {4543if (Linux::_numa_bitmask_isbitset(bmp, node)) {4544ls.print(" %d", node);4545}4546}4547}4548}45494550// When NUMA requested, not-NUMA-aware allocations default to interleaving.4551if (UseNUMA && !UseNUMAInterleaving) {4552FLAG_SET_ERGO_IF_DEFAULT(UseNUMAInterleaving, true);4553}45544555if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) {4556// With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way4557// we can make the adaptive lgrp chunk resizing work. If the user specified both4558// UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn4559// and disable adaptive resizing.4560if (UseAdaptiveSizePolicy || UseAdaptiveNUMAChunkSizing) {4561warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, "4562"disabling adaptive resizing (-XX:-UseAdaptiveSizePolicy -XX:-UseAdaptiveNUMAChunkSizing)");4563UseAdaptiveSizePolicy = false;4564UseAdaptiveNUMAChunkSizing = false;4565}4566}4567}45684569// this is called _after_ the global arguments have been parsed4570jint os::init_2(void) {45714572// This could be set after os::Posix::init() but all platforms4573// have to set it the same so we have to mirror Solaris.4574DEBUG_ONLY(os::set_mutex_init_done();)45754576os::Posix::init_2();45774578Linux::fast_thread_clock_init();45794580if (PosixSignals::init() == JNI_ERR) {4581return JNI_ERR;4582}45834584if (AdjustStackSizeForTLS) {4585get_minstack_init();4586}45874588// Check and sets minimum stack sizes against command line options4589if (Posix::set_minimum_stack_sizes() == JNI_ERR) {4590return JNI_ERR;4591}45924593#if defined(IA32) && !defined(ZERO)4594// Need to ensure we've determined the process's initial stack to4595// perform the workaround4596Linux::capture_initial_stack(JavaThread::stack_size_at_create());4597workaround_expand_exec_shield_cs_limit();4598#else4599suppress_primordial_thread_resolution = Arguments::created_by_java_launcher();4600if (!suppress_primordial_thread_resolution) {4601Linux::capture_initial_stack(JavaThread::stack_size_at_create());4602}4603#endif46044605Linux::libpthread_init();4606Linux::sched_getcpu_init();4607log_info(os)("HotSpot is running with %s, %s",4608Linux::libc_version(), Linux::libpthread_version());46094610if (UseNUMA || UseNUMAInterleaving) {4611Linux::numa_init();4612}46134614if (MaxFDLimit) {4615// set the number of file descriptors to max. print out error4616// if getrlimit/setrlimit fails but continue regardless.4617struct rlimit nbr_files;4618int status = getrlimit(RLIMIT_NOFILE, &nbr_files);4619if (status != 0) {4620log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));4621} else {4622nbr_files.rlim_cur = nbr_files.rlim_max;4623status = setrlimit(RLIMIT_NOFILE, &nbr_files);4624if (status != 0) {4625log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));4626}4627}4628}46294630// at-exit methods are called in the reverse order of their registration.4631// atexit functions are called on return from main or as a result of a4632// call to exit(3C). There can be only 32 of these functions registered4633// and atexit() does not set errno.46344635if (PerfAllowAtExitRegistration) {4636// only register atexit functions if PerfAllowAtExitRegistration is set.4637// atexit functions can be delayed until process exit time, which4638// can be problematic for embedded VM situations. Embedded VMs should4639// call DestroyJavaVM() to assure that VM resources are released.46404641// note: perfMemory_exit_helper atexit function may be removed in4642// the future if the appropriate cleanup code can be added to the4643// VM_Exit VMOperation's doit method.4644if (atexit(perfMemory_exit_helper) != 0) {4645warning("os::init_2 atexit(perfMemory_exit_helper) failed");4646}4647}46484649// initialize thread priority policy4650prio_init();46514652if (!FLAG_IS_DEFAULT(AllocateHeapAt)) {4653set_coredump_filter(DAX_SHARED_BIT);4654}46554656if (DumpPrivateMappingsInCore) {4657set_coredump_filter(FILE_BACKED_PVT_BIT);4658}46594660if (DumpSharedMappingsInCore) {4661set_coredump_filter(FILE_BACKED_SHARED_BIT);4662}46634664if (DumpPerfMapAtExit && FLAG_IS_DEFAULT(UseCodeCacheFlushing)) {4665// Disable code cache flushing to ensure the map file written at4666// exit contains all nmethods generated during execution.4667FLAG_SET_DEFAULT(UseCodeCacheFlushing, false);4668}46694670return JNI_OK;4671}46724673// older glibc versions don't have this macro (which expands to4674// an optimized bit-counting function) so we have to roll our own4675#ifndef CPU_COUNT46764677static int _cpu_count(const cpu_set_t* cpus) {4678int count = 0;4679// only look up to the number of configured processors4680for (int i = 0; i < os::processor_count(); i++) {4681if (CPU_ISSET(i, cpus)) {4682count++;4683}4684}4685return count;4686}46874688#define CPU_COUNT(cpus) _cpu_count(cpus)46894690#endif // CPU_COUNT46914692// Get the current number of available processors for this process.4693// This value can change at any time during a process's lifetime.4694// sched_getaffinity gives an accurate answer as it accounts for cpusets.4695// If it appears there may be more than 1024 processors then we do a4696// dynamic check - see 6515172 for details.4697// If anything goes wrong we fallback to returning the number of online4698// processors - which can be greater than the number available to the process.4699int os::Linux::active_processor_count() {4700cpu_set_t cpus; // can represent at most 1024 (CPU_SETSIZE) processors4701cpu_set_t* cpus_p = &cpus;4702int cpus_size = sizeof(cpu_set_t);47034704int configured_cpus = os::processor_count(); // upper bound on available cpus4705int cpu_count = 0;47064707// old build platforms may not support dynamic cpu sets4708#ifdef CPU_ALLOC47094710// To enable easy testing of the dynamic path on different platforms we4711// introduce a diagnostic flag: UseCpuAllocPath4712if (configured_cpus >= CPU_SETSIZE || UseCpuAllocPath) {4713// kernel may use a mask bigger than cpu_set_t4714log_trace(os)("active_processor_count: using dynamic path %s"4715"- configured processors: %d",4716UseCpuAllocPath ? "(forced) " : "",4717configured_cpus);4718cpus_p = CPU_ALLOC(configured_cpus);4719if (cpus_p != NULL) {4720cpus_size = CPU_ALLOC_SIZE(configured_cpus);4721// zero it just to be safe4722CPU_ZERO_S(cpus_size, cpus_p);4723}4724else {4725// failed to allocate so fallback to online cpus4726int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);4727log_trace(os)("active_processor_count: "4728"CPU_ALLOC failed (%s) - using "4729"online processor count: %d",4730os::strerror(errno), online_cpus);4731return online_cpus;4732}4733}4734else {4735log_trace(os)("active_processor_count: using static path - configured processors: %d",4736configured_cpus);4737}4738#else // CPU_ALLOC4739// these stubs won't be executed4740#define CPU_COUNT_S(size, cpus) -14741#define CPU_FREE(cpus)47424743log_trace(os)("active_processor_count: only static path available - configured processors: %d",4744configured_cpus);4745#endif // CPU_ALLOC47464747// pid 0 means the current thread - which we have to assume represents the process4748if (sched_getaffinity(0, cpus_size, cpus_p) == 0) {4749if (cpus_p != &cpus) { // can only be true when CPU_ALLOC used4750cpu_count = CPU_COUNT_S(cpus_size, cpus_p);4751}4752else {4753cpu_count = CPU_COUNT(cpus_p);4754}4755log_trace(os)("active_processor_count: sched_getaffinity processor count: %d", cpu_count);4756}4757else {4758cpu_count = ::sysconf(_SC_NPROCESSORS_ONLN);4759warning("sched_getaffinity failed (%s)- using online processor count (%d) "4760"which may exceed available processors", os::strerror(errno), cpu_count);4761}47624763if (cpus_p != &cpus) { // can only be true when CPU_ALLOC used4764CPU_FREE(cpus_p);4765}47664767assert(cpu_count > 0 && cpu_count <= os::processor_count(), "sanity check");4768return cpu_count;4769}47704771// Determine the active processor count from one of4772// three different sources:4773//4774// 1. User option -XX:ActiveProcessorCount4775// 2. kernel os calls (sched_getaffinity or sysconf(_SC_NPROCESSORS_ONLN)4776// 3. extracted from cgroup cpu subsystem (shares and quotas)4777//4778// Option 1, if specified, will always override.4779// If the cgroup subsystem is active and configured, we4780// will return the min of the cgroup and option 2 results.4781// This is required since tools, such as numactl, that4782// alter cpu affinity do not update cgroup subsystem4783// cpuset configuration files.4784int os::active_processor_count() {4785// User has overridden the number of active processors4786if (ActiveProcessorCount > 0) {4787log_trace(os)("active_processor_count: "4788"active processor count set by user : %d",4789ActiveProcessorCount);4790return ActiveProcessorCount;4791}47924793int active_cpus;4794if (OSContainer::is_containerized()) {4795active_cpus = OSContainer::active_processor_count();4796log_trace(os)("active_processor_count: determined by OSContainer: %d",4797active_cpus);4798} else {4799active_cpus = os::Linux::active_processor_count();4800}48014802return active_cpus;4803}48044805static bool should_warn_invalid_processor_id() {4806if (os::processor_count() == 1) {4807// Don't warn if we only have one processor4808return false;4809}48104811static volatile int warn_once = 1;48124813if (Atomic::load(&warn_once) == 0 ||4814Atomic::xchg(&warn_once, 0) == 0) {4815// Don't warn more than once4816return false;4817}48184819return true;4820}48214822uint os::processor_id() {4823const int id = Linux::sched_getcpu();48244825if (id < processor_count()) {4826return (uint)id;4827}48284829// Some environments (e.g. openvz containers and the rr debugger) incorrectly4830// report a processor id that is higher than the number of processors available.4831// This is problematic, for example, when implementing CPU-local data structures,4832// where the processor id is used to index into an array of length processor_count().4833// If this happens we return 0 here. This is is safe since we always have at least4834// one processor, but it's not optimal for performance if we're actually executing4835// in an environment with more than one processor.4836if (should_warn_invalid_processor_id()) {4837log_warning(os)("Invalid processor id reported by the operating system "4838"(got processor id %d, valid processor id range is 0-%d)",4839id, processor_count() - 1);4840log_warning(os)("Falling back to assuming processor id is 0. "4841"This could have a negative impact on performance.");4842}48434844return 0;4845}48464847void os::set_native_thread_name(const char *name) {4848if (Linux::_pthread_setname_np) {4849char buf [16]; // according to glibc manpage, 16 chars incl. '/0'4850snprintf(buf, sizeof(buf), "%s", name);4851buf[sizeof(buf) - 1] = '\0';4852const int rc = Linux::_pthread_setname_np(pthread_self(), buf);4853// ERANGE should not happen; all other errors should just be ignored.4854assert(rc != ERANGE, "pthread_setname_np failed");4855}4856}48574858bool os::bind_to_processor(uint processor_id) {4859// Not yet implemented.4860return false;4861}48624863////////////////////////////////////////////////////////////////////////////////4864// debug support48654866bool os::find(address addr, outputStream* st) {4867Dl_info dlinfo;4868memset(&dlinfo, 0, sizeof(dlinfo));4869if (dladdr(addr, &dlinfo) != 0) {4870st->print(PTR_FORMAT ": ", p2i(addr));4871if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {4872st->print("%s+" PTR_FORMAT, dlinfo.dli_sname,4873p2i(addr) - p2i(dlinfo.dli_saddr));4874} else if (dlinfo.dli_fbase != NULL) {4875st->print("<offset " PTR_FORMAT ">", p2i(addr) - p2i(dlinfo.dli_fbase));4876} else {4877st->print("<absolute address>");4878}4879if (dlinfo.dli_fname != NULL) {4880st->print(" in %s", dlinfo.dli_fname);4881}4882if (dlinfo.dli_fbase != NULL) {4883st->print(" at " PTR_FORMAT, p2i(dlinfo.dli_fbase));4884}4885st->cr();48864887if (Verbose) {4888// decode some bytes around the PC4889address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());4890address end = clamp_address_in_page(addr+40, addr, os::vm_page_size());4891address lowest = (address) dlinfo.dli_sname;4892if (!lowest) lowest = (address) dlinfo.dli_fbase;4893if (begin < lowest) begin = lowest;4894Dl_info dlinfo2;4895if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr4896&& end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) {4897end = (address) dlinfo2.dli_saddr;4898}4899Disassembler::decode(begin, end, st);4900}4901return true;4902}4903return false;4904}49054906////////////////////////////////////////////////////////////////////////////////4907// misc49084909// This does not do anything on Linux. This is basically a hook for being4910// able to use structured exception handling (thread-local exception filters)4911// on, e.g., Win32.4912void4913os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,4914JavaCallArguments* args, JavaThread* thread) {4915f(value, method, args, thread);4916}49174918void os::print_statistics() {4919}49204921bool os::message_box(const char* title, const char* message) {4922int i;4923fdStream err(defaultStream::error_fd());4924for (i = 0; i < 78; i++) err.print_raw("=");4925err.cr();4926err.print_raw_cr(title);4927for (i = 0; i < 78; i++) err.print_raw("-");4928err.cr();4929err.print_raw_cr(message);4930for (i = 0; i < 78; i++) err.print_raw("=");4931err.cr();49324933char buf[16];4934// Prevent process from exiting upon "read error" without consuming all CPU4935while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }49364937return buf[0] == 'y' || buf[0] == 'Y';4938}49394940// Is a (classpath) directory empty?4941bool os::dir_is_empty(const char* path) {4942DIR *dir = NULL;4943struct dirent *ptr;49444945dir = opendir(path);4946if (dir == NULL) return true;49474948// Scan the directory4949bool result = true;4950while (result && (ptr = readdir(dir)) != NULL) {4951if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {4952result = false;4953}4954}4955closedir(dir);4956return result;4957}49584959// This code originates from JDK's sysOpen and open64_w4960// from src/solaris/hpi/src/system_md.c49614962int os::open(const char *path, int oflag, int mode) {4963if (strlen(path) > MAX_PATH - 1) {4964errno = ENAMETOOLONG;4965return -1;4966}49674968// All file descriptors that are opened in the Java process and not4969// specifically destined for a subprocess should have the close-on-exec4970// flag set. If we don't set it, then careless 3rd party native code4971// might fork and exec without closing all appropriate file descriptors4972// (e.g. as we do in closeDescriptors in UNIXProcess.c), and this in4973// turn might:4974//4975// - cause end-of-file to fail to be detected on some file4976// descriptors, resulting in mysterious hangs, or4977//4978// - might cause an fopen in the subprocess to fail on a system4979// suffering from bug 1085341.4980//4981// (Yes, the default setting of the close-on-exec flag is a Unix4982// design flaw)4983//4984// See:4985// 1085341: 32-bit stdio routines should support file descriptors >2554986// 4843136: (process) pipe file descriptor from Runtime.exec not being closed4987// 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 94988//4989// Modern Linux kernels (after 2.6.23 2007) support O_CLOEXEC with open().4990// O_CLOEXEC is preferable to using FD_CLOEXEC on an open file descriptor4991// because it saves a system call and removes a small window where the flag4992// is unset. On ancient Linux kernels the O_CLOEXEC flag will be ignored4993// and we fall back to using FD_CLOEXEC (see below).4994#ifdef O_CLOEXEC4995oflag |= O_CLOEXEC;4996#endif49974998int fd = ::open64(path, oflag, mode);4999if (fd == -1) return -1;50005001//If the open succeeded, the file might still be a directory5002{5003struct stat64 buf64;5004int ret = ::fstat64(fd, &buf64);5005int st_mode = buf64.st_mode;50065007if (ret != -1) {5008if ((st_mode & S_IFMT) == S_IFDIR) {5009errno = EISDIR;5010::close(fd);5011return -1;5012}5013} else {5014::close(fd);5015return -1;5016}5017}50185019#ifdef FD_CLOEXEC5020// Validate that the use of the O_CLOEXEC flag on open above worked.5021// With recent kernels, we will perform this check exactly once.5022static sig_atomic_t O_CLOEXEC_is_known_to_work = 0;5023if (!O_CLOEXEC_is_known_to_work) {5024int flags = ::fcntl(fd, F_GETFD);5025if (flags != -1) {5026if ((flags & FD_CLOEXEC) != 0)5027O_CLOEXEC_is_known_to_work = 1;5028else5029::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);5030}5031}5032#endif50335034return fd;5035}503650375038// create binary file, rewriting existing file if required5039int os::create_binary_file(const char* path, bool rewrite_existing) {5040int oflags = O_WRONLY | O_CREAT;5041if (!rewrite_existing) {5042oflags |= O_EXCL;5043}5044return ::open64(path, oflags, S_IREAD | S_IWRITE);5045}50465047// return current position of file pointer5048jlong os::current_file_offset(int fd) {5049return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);5050}50515052// move file pointer to the specified offset5053jlong os::seek_to_file_offset(int fd, jlong offset) {5054return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);5055}50565057// This code originates from JDK's sysAvailable5058// from src/solaris/hpi/src/native_threads/src/sys_api_td.c50595060int os::available(int fd, jlong *bytes) {5061jlong cur, end;5062int mode;5063struct stat64 buf64;50645065if (::fstat64(fd, &buf64) >= 0) {5066mode = buf64.st_mode;5067if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {5068int n;5069if (::ioctl(fd, FIONREAD, &n) >= 0) {5070*bytes = n;5071return 1;5072}5073}5074}5075if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {5076return 0;5077} else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {5078return 0;5079} else if (::lseek64(fd, cur, SEEK_SET) == -1) {5080return 0;5081}5082*bytes = end - cur;5083return 1;5084}50855086// Map a block of memory.5087char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,5088char *addr, size_t bytes, bool read_only,5089bool allow_exec) {5090int prot;5091int flags = MAP_PRIVATE;50925093if (read_only) {5094prot = PROT_READ;5095} else {5096prot = PROT_READ | PROT_WRITE;5097}50985099if (allow_exec) {5100prot |= PROT_EXEC;5101}51025103if (addr != NULL) {5104flags |= MAP_FIXED;5105}51065107char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,5108fd, file_offset);5109if (mapped_address == MAP_FAILED) {5110return NULL;5111}5112return mapped_address;5113}511451155116// Remap a block of memory.5117char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,5118char *addr, size_t bytes, bool read_only,5119bool allow_exec) {5120// same as map_memory() on this OS5121return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,5122allow_exec);5123}512451255126// Unmap a block of memory.5127bool os::pd_unmap_memory(char* addr, size_t bytes) {5128return munmap(addr, bytes) == 0;5129}51305131static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time);51325133static jlong fast_cpu_time(Thread *thread) {5134clockid_t clockid;5135int rc = os::Linux::pthread_getcpuclockid(thread->osthread()->pthread_id(),5136&clockid);5137if (rc == 0) {5138return os::Linux::fast_thread_cpu_time(clockid);5139} else {5140// It's possible to encounter a terminated native thread that failed5141// to detach itself from the VM - which should result in ESRCH.5142assert_status(rc == ESRCH, rc, "pthread_getcpuclockid failed");5143return -1;5144}5145}51465147// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)5148// are used by JVM M&M and JVMTI to get user+sys or user CPU time5149// of a thread.5150//5151// current_thread_cpu_time() and thread_cpu_time(Thread*) returns5152// the fast estimate available on the platform.51535154jlong os::current_thread_cpu_time() {5155if (os::Linux::supports_fast_thread_cpu_time()) {5156return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);5157} else {5158// return user + sys since the cost is the same5159return slow_thread_cpu_time(Thread::current(), true /* user + sys */);5160}5161}51625163jlong os::thread_cpu_time(Thread* thread) {5164// consistent with what current_thread_cpu_time() returns5165if (os::Linux::supports_fast_thread_cpu_time()) {5166return fast_cpu_time(thread);5167} else {5168return slow_thread_cpu_time(thread, true /* user + sys */);5169}5170}51715172jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {5173if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) {5174return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);5175} else {5176return slow_thread_cpu_time(Thread::current(), user_sys_cpu_time);5177}5178}51795180jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {5181if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) {5182return fast_cpu_time(thread);5183} else {5184return slow_thread_cpu_time(thread, user_sys_cpu_time);5185}5186}51875188// -1 on error.5189static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {5190pid_t tid = thread->osthread()->thread_id();5191char *s;5192char stat[2048];5193int statlen;5194char proc_name[64];5195int count;5196long sys_time, user_time;5197char cdummy;5198int idummy;5199long ldummy;5200FILE *fp;52015202snprintf(proc_name, 64, "/proc/self/task/%d/stat", tid);5203fp = fopen(proc_name, "r");5204if (fp == NULL) return -1;5205statlen = fread(stat, 1, 2047, fp);5206stat[statlen] = '\0';5207fclose(fp);52085209// Skip pid and the command string. Note that we could be dealing with5210// weird command names, e.g. user could decide to rename java launcher5211// to "java 1.4.2 :)", then the stat file would look like5212// 1234 (java 1.4.2 :)) R ... ...5213// We don't really need to know the command string, just find the last5214// occurrence of ")" and then start parsing from there. See bug 4726580.5215s = strrchr(stat, ')');5216if (s == NULL) return -1;52175218// Skip blank chars5219do { s++; } while (s && isspace(*s));52205221count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",5222&cdummy, &idummy, &idummy, &idummy, &idummy, &idummy,5223&ldummy, &ldummy, &ldummy, &ldummy, &ldummy,5224&user_time, &sys_time);5225if (count != 13) return -1;5226if (user_sys_cpu_time) {5227return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec);5228} else {5229return (jlong)user_time * (1000000000 / clock_tics_per_sec);5230}5231}52325233void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {5234info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits5235info_ptr->may_skip_backward = false; // elapsed time not wall time5236info_ptr->may_skip_forward = false; // elapsed time not wall time5237info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned5238}52395240void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {5241info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits5242info_ptr->may_skip_backward = false; // elapsed time not wall time5243info_ptr->may_skip_forward = false; // elapsed time not wall time5244info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned5245}52465247bool os::is_thread_cpu_time_supported() {5248return true;5249}52505251// System loadavg support. Returns -1 if load average cannot be obtained.5252// Linux doesn't yet have a (official) notion of processor sets,5253// so just return the system wide load average.5254int os::loadavg(double loadavg[], int nelem) {5255#ifndef __ANDROID__5256return ::getloadavg(loadavg, nelem);5257#else5258/*5259* Copyright (C) 2018 The Android Open Source Project5260* All rights reserved.5261*5262* Redistribution and use in source and binary forms, with or without5263* modification, are permitted provided that the following conditions5264* are met:5265* * Redistributions of source code must retain the above copyright5266* notice, this list of conditions and the following disclaimer.5267* * Redistributions in binary form must reproduce the above copyright5268* notice, this list of conditions and the following disclaimer in5269* the documentation and/or other materials provided with the5270* distribution.5271*5272* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS5273* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT5274* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS5275* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE5276* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,5277* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,5278* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS5279* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED5280* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,5281* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT5282* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF5283* SUCH DAMAGE.5284*/52855286if (nelem < 0) return -1;5287if (nelem > 3) nelem = 3;5288struct sysinfo si;5289if (sysinfo(&si) == -1) return -1;5290for (int i = 0; i < nelem; ++i) {5291loadavg[i] = static_cast<double>(si.loads[i]) / static_cast<double>(1 << SI_LOAD_SHIFT);5292}5293return nelem;5294#endif5295}52965297void os::pause() {5298char filename[MAX_PATH];5299if (PauseAtStartupFile && PauseAtStartupFile[0]) {5300jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile);5301} else {5302jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());5303}53045305int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);5306if (fd != -1) {5307struct stat buf;5308::close(fd);5309while (::stat(filename, &buf) == 0) {5310(void)::poll(NULL, 0, 100);5311}5312} else {5313jio_fprintf(stderr,5314"Could not open pause file '%s', continuing immediately.\n", filename);5315}5316}53175318// Get the default path to the core file5319// Returns the length of the string5320int os::get_core_path(char* buffer, size_t bufferSize) {5321/*5322* Max length of /proc/sys/kernel/core_pattern is 128 characters.5323* See https://www.kernel.org/doc/Documentation/sysctl/kernel.txt5324*/5325const int core_pattern_len = 129;5326char core_pattern[core_pattern_len] = {0};53275328int core_pattern_file = ::open("/proc/sys/kernel/core_pattern", O_RDONLY);5329if (core_pattern_file == -1) {5330return -1;5331}53325333ssize_t ret = ::read(core_pattern_file, core_pattern, core_pattern_len);5334::close(core_pattern_file);5335if (ret <= 0 || ret >= core_pattern_len || core_pattern[0] == '\n') {5336return -1;5337}5338if (core_pattern[ret-1] == '\n') {5339core_pattern[ret-1] = '\0';5340} else {5341core_pattern[ret] = '\0';5342}53435344// Replace the %p in the core pattern with the process id. NOTE: we do this5345// only if the pattern doesn't start with "|", and we support only one %p in5346// the pattern.5347char *pid_pos = strstr(core_pattern, "%p");5348const char* tail = (pid_pos != NULL) ? (pid_pos + 2) : ""; // skip over the "%p"5349int written;53505351if (core_pattern[0] == '/') {5352if (pid_pos != NULL) {5353*pid_pos = '\0';5354written = jio_snprintf(buffer, bufferSize, "%s%d%s", core_pattern,5355current_process_id(), tail);5356} else {5357written = jio_snprintf(buffer, bufferSize, "%s", core_pattern);5358}5359} else {5360char cwd[PATH_MAX];53615362const char* p = get_current_directory(cwd, PATH_MAX);5363if (p == NULL) {5364return -1;5365}53665367if (core_pattern[0] == '|') {5368written = jio_snprintf(buffer, bufferSize,5369"\"%s\" (or dumping to %s/core.%d)",5370&core_pattern[1], p, current_process_id());5371} else if (pid_pos != NULL) {5372*pid_pos = '\0';5373written = jio_snprintf(buffer, bufferSize, "%s/%s%d%s", p, core_pattern,5374current_process_id(), tail);5375} else {5376written = jio_snprintf(buffer, bufferSize, "%s/%s", p, core_pattern);5377}5378}53795380if (written < 0) {5381return -1;5382}53835384if (((size_t)written < bufferSize) && (pid_pos == NULL) && (core_pattern[0] != '|')) {5385int core_uses_pid_file = ::open("/proc/sys/kernel/core_uses_pid", O_RDONLY);53865387if (core_uses_pid_file != -1) {5388char core_uses_pid = 0;5389ssize_t ret = ::read(core_uses_pid_file, &core_uses_pid, 1);5390::close(core_uses_pid_file);53915392if (core_uses_pid == '1') {5393jio_snprintf(buffer + written, bufferSize - written,5394".%d", current_process_id());5395}5396}5397}53985399return strlen(buffer);5400}54015402bool os::start_debugging(char *buf, int buflen) {5403int len = (int)strlen(buf);5404char *p = &buf[len];54055406jio_snprintf(p, buflen-len,5407"\n\n"5408"Do you want to debug the problem?\n\n"5409"To debug, run 'gdb /proc/%d/exe %d'; then switch to thread " UINTX_FORMAT " (" INTPTR_FORMAT ")\n"5410"Enter 'yes' to launch gdb automatically (PATH must include gdb)\n"5411"Otherwise, press RETURN to abort...",5412os::current_process_id(), os::current_process_id(),5413os::current_thread_id(), os::current_thread_id());54145415bool yes = os::message_box("Unexpected Error", buf);54165417if (yes) {5418// yes, user asked VM to launch debugger5419jio_snprintf(buf, sizeof(char)*buflen, "gdb /proc/%d/exe %d",5420os::current_process_id(), os::current_process_id());54215422os::fork_and_exec(buf);5423yes = false;5424}5425return yes;5426}542754285429// Java/Compiler thread:5430//5431// Low memory addresses5432// P0 +------------------------+5433// | |\ Java thread created by VM does not have glibc5434// | glibc guard page | - guard page, attached Java thread usually has5435// | |/ 1 glibc guard page.5436// P1 +------------------------+ Thread::stack_base() - Thread::stack_size()5437// | |\5438// | HotSpot Guard Pages | - red, yellow and reserved pages5439// | |/5440// +------------------------+ StackOverflow::stack_reserved_zone_base()5441// | |\5442// | Normal Stack | -5443// | |/5444// P2 +------------------------+ Thread::stack_base()5445//5446// Non-Java thread:5447//5448// Low memory addresses5449// P0 +------------------------+5450// | |\5451// | glibc guard page | - usually 1 page5452// | |/5453// P1 +------------------------+ Thread::stack_base() - Thread::stack_size()5454// | |\5455// | Normal Stack | -5456// | |/5457// P2 +------------------------+ Thread::stack_base()5458//5459// ** P1 (aka bottom) and size (P2 = P1 - size) are the address and stack size5460// returned from pthread_attr_getstack().5461// ** Due to NPTL implementation error, linux takes the glibc guard page out5462// of the stack size given in pthread_attr. We work around this for5463// threads created by the VM. (We adapt bottom to be P1 and size accordingly.)5464//5465#ifndef ZERO5466static void current_stack_region(address * bottom, size_t * size) {5467if (os::is_primordial_thread()) {5468// primordial thread needs special handling because pthread_getattr_np()5469// may return bogus value.5470*bottom = os::Linux::initial_thread_stack_bottom();5471*size = os::Linux::initial_thread_stack_size();5472} else {5473pthread_attr_t attr;54745475int rslt = pthread_getattr_np(pthread_self(), &attr);54765477// JVM needs to know exact stack location, abort if it fails5478if (rslt != 0) {5479if (rslt == ENOMEM) {5480vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");5481} else {5482fatal("pthread_getattr_np failed with error = %d", rslt);5483}5484}54855486if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) {5487fatal("Cannot locate current stack attributes!");5488}54895490// Work around NPTL stack guard error.5491size_t guard_size = 0;5492rslt = pthread_attr_getguardsize(&attr, &guard_size);5493if (rslt != 0) {5494fatal("pthread_attr_getguardsize failed with error = %d", rslt);5495}5496*bottom += guard_size;5497*size -= guard_size;54985499pthread_attr_destroy(&attr);55005501}5502assert(os::current_stack_pointer() >= *bottom &&5503os::current_stack_pointer() < *bottom + *size, "just checking");5504}55055506address os::current_stack_base() {5507address bottom;5508size_t size;5509current_stack_region(&bottom, &size);5510return (bottom + size);5511}55125513size_t os::current_stack_size() {5514// This stack size includes the usable stack and HotSpot guard pages5515// (for the threads that have Hotspot guard pages).5516address bottom;5517size_t size;5518current_stack_region(&bottom, &size);5519return size;5520}5521#endif55225523static inline struct timespec get_mtime(const char* filename) {5524struct stat st;5525int ret = os::stat(filename, &st);5526assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));5527return st.st_mtim;5528}55295530int os::compare_file_modified_times(const char* file1, const char* file2) {5531struct timespec filetime1 = get_mtime(file1);5532struct timespec filetime2 = get_mtime(file2);5533int diff = filetime1.tv_sec - filetime2.tv_sec;5534if (diff == 0) {5535return filetime1.tv_nsec - filetime2.tv_nsec;5536}5537return diff;5538}55395540bool os::supports_map_sync() {5541return true;5542}55435544void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {5545unsigned long long start = (unsigned long long)addr;5546unsigned long long end = start + bytes;5547FILE* f = ::fopen("/proc/self/maps", "r");5548int num_found = 0;5549if (f != NULL) {5550st->print("Range [%llx-%llx) contains: ", start, end);5551char line[512];5552while(fgets(line, sizeof(line), f) == line) {5553unsigned long long a1 = 0;5554unsigned long long a2 = 0;5555if (::sscanf(line, "%llx-%llx", &a1, &a2) == 2) {5556// Lets print out every range which touches ours.5557if ((a1 >= start && a1 < end) || // left leg in5558(a2 >= start && a2 < end) || // right leg in5559(a1 < start && a2 >= end)) { // superimposition5560num_found ++;5561st->print("%s", line); // line includes \n5562}5563}5564}5565::fclose(f);5566if (num_found == 0) {5567st->print("nothing.");5568}5569st->cr();5570}5571}557255735574