Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/os/aix/vm/os_aix.cpp
32284 views
/*1* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.2* Copyright 2012, 2014 SAP AG. All rights reserved.3* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.4*5* This code is free software; you can redistribute it and/or modify it6* under the terms of the GNU General Public License version 2 only, as7* published by the Free Software Foundation.8*9* This code is distributed in the hope that it will be useful, but WITHOUT10* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License12* version 2 for more details (a copy is included in the LICENSE file that13* accompanied this code).14*15* You should have received a copy of the GNU General Public License version16* 2 along with this work; if not, write to the Free Software Foundation,17* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.18*19* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA20* or visit www.oracle.com if you need additional information or have any21* questions.22*23*/2425// According to the AIX OS doc #pragma alloca must be used26// with C++ compiler before referencing the function alloca()27#pragma alloca2829// no precompiled headers30#include "classfile/classLoader.hpp"31#include "classfile/systemDictionary.hpp"32#include "classfile/vmSymbols.hpp"33#include "code/icBuffer.hpp"34#include "code/vtableStubs.hpp"35#include "compiler/compileBroker.hpp"36#include "interpreter/interpreter.hpp"37#include "jvm_aix.h"38#include "libperfstat_aix.hpp"39#include "loadlib_aix.hpp"40#include "memory/allocation.inline.hpp"41#include "memory/filemap.hpp"42#include "mutex_aix.inline.hpp"43#include "oops/oop.inline.hpp"44#include "os_share_aix.hpp"45#include "porting_aix.hpp"46#include "prims/jniFastGetField.hpp"47#include "prims/jvm.h"48#include "prims/jvm_misc.hpp"49#include "runtime/arguments.hpp"50#include "runtime/extendedPC.hpp"51#include "runtime/globals.hpp"52#include "runtime/interfaceSupport.hpp"53#include "runtime/java.hpp"54#include "runtime/javaCalls.hpp"55#include "runtime/mutexLocker.hpp"56#include "runtime/objectMonitor.hpp"57#include "runtime/orderAccess.inline.hpp"58#include "runtime/osThread.hpp"59#include "runtime/perfMemory.hpp"60#include "runtime/sharedRuntime.hpp"61#include "runtime/statSampler.hpp"62#include "runtime/stubRoutines.hpp"63#include "runtime/thread.inline.hpp"64#include "runtime/threadCritical.hpp"65#include "runtime/timer.hpp"66#include "services/attachListener.hpp"67#include "services/runtimeService.hpp"68#include "utilities/decoder.hpp"69#include "utilities/defaultStream.hpp"70#include "utilities/events.hpp"71#include "utilities/growableArray.hpp"72#include "utilities/vmError.hpp"7374// put OS-includes here (sorted alphabetically)75#include <errno.h>76#include <fcntl.h>77#include <inttypes.h>78#include <poll.h>79#include <procinfo.h>80#include <pthread.h>81#include <pwd.h>82#include <semaphore.h>83#include <signal.h>84#include <stdint.h>85#include <stdio.h>86#include <string.h>87#include <unistd.h>88#include <sys/ioctl.h>89#include <sys/ipc.h>90#include <sys/mman.h>91#include <sys/resource.h>92#include <sys/select.h>93#include <sys/shm.h>94#include <sys/socket.h>95#include <sys/stat.h>96#include <sys/sysinfo.h>97#include <sys/systemcfg.h>98#include <sys/time.h>99#include <sys/times.h>100#include <sys/types.h>101#include <sys/utsname.h>102#include <sys/vminfo.h>103#include <sys/wait.h>104105// Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).106#if !defined(_AIXVERSION_610)107extern "C" {108int getthrds64(pid_t ProcessIdentifier,109struct thrdentry64* ThreadBuffer,110int ThreadSize,111tid64_t* IndexPointer,112int Count);113}114#endif115116#define MAX_PATH (2 * K)117118// for timer info max values which include all bits119#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)120// for multipage initialization error analysis (in 'g_multipage_error')121#define ERROR_MP_OS_TOO_OLD 100122#define ERROR_MP_EXTSHM_ACTIVE 101123#define ERROR_MP_VMGETINFO_FAILED 102124#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103125126// The semantics in this file are thus that codeptr_t is a *real code ptr*.127// This means that any function taking codeptr_t as arguments will assume128// a real codeptr and won't handle function descriptors (eg getFuncName),129// whereas functions taking address as args will deal with function130// descriptors (eg os::dll_address_to_library_name).131typedef unsigned int* codeptr_t;132133// Typedefs for stackslots, stack pointers, pointers to op codes.134typedef unsigned long stackslot_t;135typedef stackslot_t* stackptr_t;136137// Excerpts from systemcfg.h definitions newer than AIX 5.3.138#ifndef PV_7139#define PV_7 0x200000 /* Power PC 7 */140#define PV_7_Compat 0x208000 /* Power PC 7 */141#endif142#ifndef PV_8143#define PV_8 0x300000 /* Power PC 8 */144#define PV_8_Compat 0x308000 /* Power PC 8 */145#endif146147#define trcVerbose(fmt, ...) { /* PPC port */ \148if (Verbose) { \149fprintf(stderr, fmt, ##__VA_ARGS__); \150fputc('\n', stderr); fflush(stderr); \151} \152}153#define trc(fmt, ...) /* PPC port */154155#define ERRBYE(s) { \156trcVerbose(s); \157return -1; \158}159160// query dimensions of the stack of the calling thread161static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);162163// function to check a given stack pointer against given stack limits164inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {165if (((uintptr_t)sp) & 0x7) {166return false;167}168if (sp > stack_base) {169return false;170}171if (sp < (stackptr_t) ((address)stack_base - stack_size)) {172return false;173}174return true;175}176177// returns true if function is a valid codepointer178inline bool is_valid_codepointer(codeptr_t p) {179if (!p) {180return false;181}182if (((uintptr_t)p) & 0x3) {183return false;184}185if (LoadedLibraries::find_for_text_address((address)p) == NULL) {186return false;187}188return true;189}190191// Macro to check a given stack pointer against given stack limits and to die if test fails.192#define CHECK_STACK_PTR(sp, stack_base, stack_size) { \193guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \194}195196// Macro to check the current stack pointer against given stacklimits.197#define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \198address sp; \199sp = os::current_stack_pointer(); \200CHECK_STACK_PTR(sp, stack_base, stack_size); \201}202203////////////////////////////////////////////////////////////////////////////////204// global variables (for a description see os_aix.hpp)205206julong os::Aix::_physical_memory = 0;207pthread_t os::Aix::_main_thread = ((pthread_t)0);208int os::Aix::_page_size = -1;209int os::Aix::_on_pase = -1;210int os::Aix::_os_version = -1;211int os::Aix::_stack_page_size = -1;212size_t os::Aix::_shm_default_page_size = -1;213int os::Aix::_can_use_64K_pages = -1;214int os::Aix::_can_use_16M_pages = -1;215int os::Aix::_xpg_sus_mode = -1;216int os::Aix::_extshm = -1;217int os::Aix::_logical_cpus = -1;218219////////////////////////////////////////////////////////////////////////////////220// local variables221222static int g_multipage_error = -1; // error analysis for multipage initialization223static jlong initial_time_count = 0;224static int clock_tics_per_sec = 100;225static sigset_t check_signal_done; // For diagnostics to print a message once (see run_periodic_checks)226static bool check_signals = true;227static pid_t _initial_pid = 0;228static int SR_signum = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)229static sigset_t SR_sigset;230static pthread_mutex_t dl_mutex; // Used to protect dlsym() calls.231232julong os::available_memory() {233return Aix::available_memory();234}235236julong os::Aix::available_memory() {237os::Aix::meminfo_t mi;238if (os::Aix::get_meminfo(&mi)) {239return mi.real_free;240} else {241return 0xFFFFFFFFFFFFFFFFLL;242}243}244245julong os::physical_memory() {246return Aix::physical_memory();247}248249////////////////////////////////////////////////////////////////////////////////250// environment support251252bool os::getenv(const char* name, char* buf, int len) {253const char* val = ::getenv(name);254if (val != NULL && strlen(val) < (size_t)len) {255strcpy(buf, val);256return true;257}258if (len > 0) buf[0] = 0; // return a null string259return false;260}261262// Return true if user is running as root.263264bool os::have_special_privileges() {265static bool init = false;266static bool privileges = false;267if (!init) {268privileges = (getuid() != geteuid()) || (getgid() != getegid());269init = true;270}271return privileges;272}273274// Helper function, emulates disclaim64 using multiple 32bit disclaims275// because we cannot use disclaim64() on AS/400 and old AIX releases.276static bool my_disclaim64(char* addr, size_t size) {277278if (size == 0) {279return true;280}281282// Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)283const unsigned int maxDisclaimSize = 0x80000000;284285const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);286const unsigned int lastDisclaimSize = (size % maxDisclaimSize);287288char* p = addr;289290for (int i = 0; i < numFullDisclaimsNeeded; i ++) {291if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {292trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);293return false;294}295p += maxDisclaimSize;296}297298if (lastDisclaimSize > 0) {299if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {300trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);301return false;302}303}304305return true;306}307308// Cpu architecture string309#if defined(PPC32)310static char cpu_arch[] = "ppc";311#elif defined(PPC64)312static char cpu_arch[] = "ppc64";313#else314#error Add appropriate cpu_arch setting315#endif316317318// Given an address, returns the size of the page backing that address.319size_t os::Aix::query_pagesize(void* addr) {320321vm_page_info pi;322pi.addr = (uint64_t)addr;323if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {324return pi.pagesize;325} else {326fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);327assert(false, "vmgetinfo failed to retrieve page size");328return SIZE_4K;329}330331}332333// Returns the kernel thread id of the currently running thread.334pid_t os::Aix::gettid() {335return (pid_t) thread_self();336}337338void os::Aix::initialize_system_info() {339340// Get the number of online(logical) cpus instead of configured.341os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);342assert(_processor_count > 0, "_processor_count must be > 0");343344// Retrieve total physical storage.345os::Aix::meminfo_t mi;346if (!os::Aix::get_meminfo(&mi)) {347fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);348assert(false, "os::Aix::get_meminfo failed.");349}350_physical_memory = (julong) mi.real_total;351}352353// Helper function for tracing page sizes.354static const char* describe_pagesize(size_t pagesize) {355switch (pagesize) {356case SIZE_4K : return "4K";357case SIZE_64K: return "64K";358case SIZE_16M: return "16M";359case SIZE_16G: return "16G";360default:361assert(false, "surprise");362return "??";363}364}365366// Retrieve information about multipage size support. Will initialize367// Aix::_page_size, Aix::_stack_page_size, Aix::_can_use_64K_pages,368// Aix::_can_use_16M_pages.369// Must be called before calling os::large_page_init().370void os::Aix::query_multipage_support() {371372guarantee(_page_size == -1 &&373_stack_page_size == -1 &&374_can_use_64K_pages == -1 &&375_can_use_16M_pages == -1 &&376g_multipage_error == -1,377"do not call twice");378379_page_size = ::sysconf(_SC_PAGESIZE);380381// This really would surprise me.382assert(_page_size == SIZE_4K, "surprise!");383384385// Query default data page size (default page size for C-Heap, pthread stacks and .bss).386// Default data page size is influenced either by linker options (-bdatapsize)387// or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,388// default should be 4K.389size_t data_page_size = SIZE_4K;390{391void* p = ::malloc(SIZE_16M);392guarantee(p != NULL, "malloc failed");393data_page_size = os::Aix::query_pagesize(p);394::free(p);395}396397// query default shm page size (LDR_CNTRL SHMPSIZE)398{399const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);400guarantee(shmid != -1, "shmget failed");401void* p = ::shmat(shmid, NULL, 0);402::shmctl(shmid, IPC_RMID, NULL);403guarantee(p != (void*) -1, "shmat failed");404_shm_default_page_size = os::Aix::query_pagesize(p);405::shmdt(p);406}407408// before querying the stack page size, make sure we are not running as primordial409// thread (because primordial thread's stack may have different page size than410// pthread thread stacks). Running a VM on the primordial thread won't work for a411// number of reasons so we may just as well guarantee it here412guarantee(!os::is_primordial_thread(), "Must not be called for primordial thread");413414// query stack page size415{416int dummy = 0;417_stack_page_size = os::Aix::query_pagesize(&dummy);418// everything else would surprise me and should be looked into419guarantee(_stack_page_size == SIZE_4K || _stack_page_size == SIZE_64K, "Wrong page size");420// also, just for completeness: pthread stacks are allocated from C heap, so421// stack page size should be the same as data page size422guarantee(_stack_page_size == data_page_size, "stack page size should be the same as data page size");423}424425// EXTSHM is bad: among other things, it prevents setting pagesize dynamically426// for system V shm.427if (Aix::extshm()) {428if (Verbose) {429fprintf(stderr, "EXTSHM is active - will disable large page support.\n"430"Please make sure EXTSHM is OFF for large page support.\n");431}432g_multipage_error = ERROR_MP_EXTSHM_ACTIVE;433_can_use_64K_pages = _can_use_16M_pages = 0;434goto query_multipage_support_end;435}436437// now check which page sizes the OS claims it supports, and of those, which actually can be used.438{439const int MAX_PAGE_SIZES = 4;440psize_t sizes[MAX_PAGE_SIZES];441const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);442if (num_psizes == -1) {443if (Verbose) {444fprintf(stderr, "vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);445fprintf(stderr, "disabling multipage support.\n");446}447g_multipage_error = ERROR_MP_VMGETINFO_FAILED;448_can_use_64K_pages = _can_use_16M_pages = 0;449goto query_multipage_support_end;450}451guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");452assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");453if (Verbose) {454fprintf(stderr, "vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);455for (int i = 0; i < num_psizes; i ++) {456fprintf(stderr, " %s ", describe_pagesize(sizes[i]));457}458fprintf(stderr, " .\n");459}460461// Can we use 64K, 16M pages?462_can_use_64K_pages = 0;463_can_use_16M_pages = 0;464for (int i = 0; i < num_psizes; i ++) {465if (sizes[i] == SIZE_64K) {466_can_use_64K_pages = 1;467} else if (sizes[i] == SIZE_16M) {468_can_use_16M_pages = 1;469}470}471472if (!_can_use_64K_pages) {473g_multipage_error = ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K;474}475476// Double-check for 16M pages: Even if AIX claims to be able to use 16M pages,477// there must be an actual 16M page pool, and we must run with enough rights.478if (_can_use_16M_pages) {479const int shmid = ::shmget(IPC_PRIVATE, SIZE_16M, IPC_CREAT | S_IRUSR | S_IWUSR);480guarantee(shmid != -1, "shmget failed");481struct shmid_ds shm_buf = { 0 };482shm_buf.shm_pagesize = SIZE_16M;483const bool can_set_pagesize = ::shmctl(shmid, SHM_PAGESIZE, &shm_buf) == 0 ? true : false;484const int en = errno;485::shmctl(shmid, IPC_RMID, NULL);486if (!can_set_pagesize) {487if (Verbose) {488fprintf(stderr, "Failed to allocate even one misely 16M page. shmctl failed with %d (%s).\n"489"Will deactivate 16M support.\n", en, strerror(en));490}491_can_use_16M_pages = 0;492}493}494495} // end: check which pages can be used for shared memory496497query_multipage_support_end:498499guarantee(_page_size != -1 &&500_stack_page_size != -1 &&501_can_use_64K_pages != -1 &&502_can_use_16M_pages != -1, "Page sizes not properly initialized");503504if (_can_use_64K_pages) {505g_multipage_error = 0;506}507508if (Verbose) {509fprintf(stderr, "Data page size (C-Heap, bss, etc): %s\n", describe_pagesize(data_page_size));510fprintf(stderr, "Thread stack page size (pthread): %s\n", describe_pagesize(_stack_page_size));511fprintf(stderr, "Default shared memory page size: %s\n", describe_pagesize(_shm_default_page_size));512fprintf(stderr, "Can use 64K pages dynamically with shared meory: %s\n", (_can_use_64K_pages ? "yes" :"no"));513fprintf(stderr, "Can use 16M pages dynamically with shared memory: %s\n", (_can_use_16M_pages ? "yes" :"no"));514fprintf(stderr, "Multipage error details: %d\n", g_multipage_error);515}516517} // end os::Aix::query_multipage_support()518519void os::init_system_properties_values() {520521#define DEFAULT_LIBPATH "/usr/lib:/lib"522#define EXTENSIONS_DIR "/lib/ext"523#define ENDORSED_DIR "/lib/endorsed"524525// Buffer that fits several sprintfs.526// Note that the space for the trailing null is provided527// by the nulls included by the sizeof operator.528const size_t bufsize =529MAX3((size_t)MAXPATHLEN, // For dll_dir & friends.530(size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR), // extensions dir531(size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir532char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);533534// sysclasspath, java_home, dll_dir535{536char *pslash;537os::jvm_path(buf, bufsize);538539// Found the full path to libjvm.so.540// Now cut the path to <java_home>/jre if we can.541*(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.542pslash = strrchr(buf, '/');543if (pslash != NULL) {544*pslash = '\0'; // Get rid of /{client|server|hotspot}.545}546Arguments::set_dll_dir(buf);547548if (pslash != NULL) {549pslash = strrchr(buf, '/');550if (pslash != NULL) {551*pslash = '\0'; // Get rid of /<arch>.552pslash = strrchr(buf, '/');553if (pslash != NULL) {554*pslash = '\0'; // Get rid of /lib.555}556}557}558Arguments::set_java_home(buf);559set_boot_path('/', ':');560}561562// Where to look for native libraries.563564// On Aix we get the user setting of LIBPATH.565// Eventually, all the library path setting will be done here.566// Get the user setting of LIBPATH.567const char *v = ::getenv("LIBPATH");568const char *v_colon = ":";569if (v == NULL) { v = ""; v_colon = ""; }570571// Concatenate user and invariant part of ld_library_path.572// That's +1 for the colon and +1 for the trailing '\0'.573char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);574sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);575Arguments::set_library_path(ld_library_path);576FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);577578// Extensions directories.579sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());580Arguments::set_ext_dirs(buf);581582// Endorsed standards default directory.583sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());584Arguments::set_endorsed_dirs(buf);585586FREE_C_HEAP_ARRAY(char, buf, mtInternal);587588#undef DEFAULT_LIBPATH589#undef EXTENSIONS_DIR590#undef ENDORSED_DIR591}592593////////////////////////////////////////////////////////////////////////////////594// breakpoint support595596void os::breakpoint() {597BREAKPOINT;598}599600extern "C" void breakpoint() {601// use debugger to set breakpoint here602}603604////////////////////////////////////////////////////////////////////////////////605// signal support606607debug_only(static bool signal_sets_initialized = false);608static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;609610bool os::Aix::is_sig_ignored(int sig) {611struct sigaction oact;612sigaction(sig, (struct sigaction*)NULL, &oact);613void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)614: CAST_FROM_FN_PTR(void*, oact.sa_handler);615if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {616return true;617} else {618return false;619}620}621622void os::Aix::signal_sets_init() {623// Should also have an assertion stating we are still single-threaded.624assert(!signal_sets_initialized, "Already initialized");625// Fill in signals that are necessarily unblocked for all threads in626// the VM. Currently, we unblock the following signals:627// SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden628// by -Xrs (=ReduceSignalUsage));629// BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all630// other threads. The "ReduceSignalUsage" boolean tells us not to alter631// the dispositions or masks wrt these signals.632// Programs embedding the VM that want to use the above signals for their633// own purposes must, at this time, use the "-Xrs" option to prevent634// interference with shutdown hooks and BREAK_SIGNAL thread dumping.635// (See bug 4345157, and other related bugs).636// In reality, though, unblocking these signals is really a nop, since637// these signals are not blocked by default.638sigemptyset(&unblocked_sigs);639sigemptyset(&allowdebug_blocked_sigs);640sigaddset(&unblocked_sigs, SIGILL);641sigaddset(&unblocked_sigs, SIGSEGV);642sigaddset(&unblocked_sigs, SIGBUS);643sigaddset(&unblocked_sigs, SIGFPE);644sigaddset(&unblocked_sigs, SIGTRAP);645sigaddset(&unblocked_sigs, SIGDANGER);646sigaddset(&unblocked_sigs, SR_signum);647648if (!ReduceSignalUsage) {649if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {650sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);651sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);652}653if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {654sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);655sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);656}657if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {658sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);659sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);660}661}662// Fill in signals that are blocked by all but the VM thread.663sigemptyset(&vm_sigs);664if (!ReduceSignalUsage)665sigaddset(&vm_sigs, BREAK_SIGNAL);666debug_only(signal_sets_initialized = true);667}668669// These are signals that are unblocked while a thread is running Java.670// (For some reason, they get blocked by default.)671sigset_t* os::Aix::unblocked_signals() {672assert(signal_sets_initialized, "Not initialized");673return &unblocked_sigs;674}675676// These are the signals that are blocked while a (non-VM) thread is677// running Java. Only the VM thread handles these signals.678sigset_t* os::Aix::vm_signals() {679assert(signal_sets_initialized, "Not initialized");680return &vm_sigs;681}682683// These are signals that are blocked during cond_wait to allow debugger in684sigset_t* os::Aix::allowdebug_blocked_signals() {685assert(signal_sets_initialized, "Not initialized");686return &allowdebug_blocked_sigs;687}688689void os::Aix::hotspot_sigmask(Thread* thread) {690691//Save caller's signal mask before setting VM signal mask692sigset_t caller_sigmask;693pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);694695OSThread* osthread = thread->osthread();696osthread->set_caller_sigmask(caller_sigmask);697698pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);699700if (!ReduceSignalUsage) {701if (thread->is_VM_thread()) {702// Only the VM thread handles BREAK_SIGNAL ...703pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);704} else {705// ... all other threads block BREAK_SIGNAL706pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);707}708}709}710711// retrieve memory information.712// Returns false if something went wrong;713// content of pmi undefined in this case.714bool os::Aix::get_meminfo(meminfo_t* pmi) {715716assert(pmi, "get_meminfo: invalid parameter");717718memset(pmi, 0, sizeof(meminfo_t));719720if (os::Aix::on_pase()) {721722Unimplemented();723return false;724725} else {726727// On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics728// See:729// http://publib.boulder.ibm.com/infocenter/systems/index.jsp730// ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm731// http://publib.boulder.ibm.com/infocenter/systems/index.jsp732// ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm733734perfstat_memory_total_t psmt;735memset (&psmt, '\0', sizeof(psmt));736const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);737if (rc == -1) {738fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);739assert(0, "perfstat_memory_total() failed");740return false;741}742743assert(rc == 1, "perfstat_memory_total() - weird return code");744745// excerpt from746// http://publib.boulder.ibm.com/infocenter/systems/index.jsp747// ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm748// The fields of perfstat_memory_total_t:749// u_longlong_t virt_total Total virtual memory (in 4 KB pages).750// u_longlong_t real_total Total real memory (in 4 KB pages).751// u_longlong_t real_free Free real memory (in 4 KB pages).752// u_longlong_t pgsp_total Total paging space (in 4 KB pages).753// u_longlong_t pgsp_free Free paging space (in 4 KB pages).754755pmi->virt_total = psmt.virt_total * 4096;756pmi->real_total = psmt.real_total * 4096;757pmi->real_free = psmt.real_free * 4096;758pmi->pgsp_total = psmt.pgsp_total * 4096;759pmi->pgsp_free = psmt.pgsp_free * 4096;760761return true;762763}764} // end os::Aix::get_meminfo765766// Retrieve global cpu information.767// Returns false if something went wrong;768// the content of pci is undefined in this case.769bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {770assert(pci, "get_cpuinfo: invalid parameter");771memset(pci, 0, sizeof(cpuinfo_t));772773perfstat_cpu_total_t psct;774memset (&psct, '\0', sizeof(psct));775776if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {777fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);778assert(0, "perfstat_cpu_total() failed");779return false;780}781782// global cpu information783strcpy (pci->description, psct.description);784pci->processorHZ = psct.processorHZ;785pci->ncpus = psct.ncpus;786os::Aix::_logical_cpus = psct.ncpus;787for (int i = 0; i < 3; i++) {788pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);789}790791// get the processor version from _system_configuration792switch (_system_configuration.version) {793case PV_8:794strcpy(pci->version, "Power PC 8");795break;796case PV_7:797strcpy(pci->version, "Power PC 7");798break;799case PV_6_1:800strcpy(pci->version, "Power PC 6 DD1.x");801break;802case PV_6:803strcpy(pci->version, "Power PC 6");804break;805case PV_5:806strcpy(pci->version, "Power PC 5");807break;808case PV_5_2:809strcpy(pci->version, "Power PC 5_2");810break;811case PV_5_3:812strcpy(pci->version, "Power PC 5_3");813break;814case PV_5_Compat:815strcpy(pci->version, "PV_5_Compat");816break;817case PV_6_Compat:818strcpy(pci->version, "PV_6_Compat");819break;820case PV_7_Compat:821strcpy(pci->version, "PV_7_Compat");822break;823case PV_8_Compat:824strcpy(pci->version, "PV_8_Compat");825break;826default:827strcpy(pci->version, "unknown");828}829830return true;831832} //end os::Aix::get_cpuinfo833834//////////////////////////////////////////////////////////////////////////////835// detecting pthread library836837void os::Aix::libpthread_init() {838return;839}840841//////////////////////////////////////////////////////////////////////////////842// create new thread843844// Thread start routine for all newly created threads845static void *java_start(Thread *thread) {846847// find out my own stack dimensions848{849// actually, this should do exactly the same as thread->record_stack_base_and_size...850address base = 0;851size_t size = 0;852query_stack_dimensions(&base, &size);853thread->set_stack_base(base);854thread->set_stack_size(size);855}856857// Do some sanity checks.858CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());859860// Try to randomize the cache line index of hot stack frames.861// This helps when threads of the same stack traces evict each other's862// cache lines. The threads can be either from the same JVM instance, or863// from different JVM instances. The benefit is especially true for864// processors with hyperthreading technology.865866static int counter = 0;867int pid = os::current_process_id();868alloca(((pid ^ counter++) & 7) * 128);869870ThreadLocalStorage::set_thread(thread);871872OSThread* osthread = thread->osthread();873874// thread_id is kernel thread id (similar to Solaris LWP id)875osthread->set_thread_id(os::Aix::gettid());876877// initialize signal mask for this thread878os::Aix::hotspot_sigmask(thread);879880// initialize floating point control register881os::Aix::init_thread_fpu_state();882883assert(osthread->get_state() == RUNNABLE, "invalid os thread state");884885// call one more level start routine886thread->run();887888return 0;889}890891bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {892893// We want the whole function to be synchronized.894ThreadCritical cs;895896assert(thread->osthread() == NULL, "caller responsible");897898// Allocate the OSThread object899OSThread* osthread = new OSThread(NULL, NULL);900if (osthread == NULL) {901return false;902}903904// set the correct thread state905osthread->set_thread_type(thr_type);906907// Initial state is ALLOCATED but not INITIALIZED908osthread->set_state(ALLOCATED);909910thread->set_osthread(osthread);911912// init thread attributes913pthread_attr_t attr;914pthread_attr_init(&attr);915guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");916917// Make sure we run in 1:1 kernel-user-thread mode.918if (os::Aix::on_aix()) {919guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");920guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");921} // end: aix922923// Start in suspended state, and in os::thread_start, wake the thread up.924guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");925926// calculate stack size if it's not specified by caller927if (os::Aix::supports_variable_stack_size()) {928if (stack_size == 0) {929stack_size = os::Aix::default_stack_size(thr_type);930931switch (thr_type) {932case os::java_thread:933// Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.934assert(JavaThread::stack_size_at_create() > 0, "this should be set");935stack_size = JavaThread::stack_size_at_create();936break;937case os::compiler_thread:938if (CompilerThreadStackSize > 0) {939stack_size = (size_t)(CompilerThreadStackSize * K);940break;941} // else fall through:942// use VMThreadStackSize if CompilerThreadStackSize is not defined943case os::vm_thread:944case os::pgc_thread:945case os::cgc_thread:946case os::watcher_thread:947if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);948break;949}950}951952stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);953pthread_attr_setstacksize(&attr, stack_size);954} //else let thread_create() pick the default value (96 K on AIX)955956pthread_t tid;957int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);958959pthread_attr_destroy(&attr);960961if (ret == 0) {962// PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid));963} else {964if (PrintMiscellaneous && (Verbose || WizardMode)) {965perror("pthread_create()");966}967// Need to clean up stuff we've allocated so far968thread->set_osthread(NULL);969delete osthread;970return false;971}972973// Store pthread info into the OSThread974osthread->set_pthread_id(tid);975976return true;977}978979/////////////////////////////////////////////////////////////////////////////980// attach existing thread981982// bootstrap the main thread983bool os::create_main_thread(JavaThread* thread) {984assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");985return create_attached_thread(thread);986}987988bool os::create_attached_thread(JavaThread* thread) {989#ifdef ASSERT990thread->verify_not_published();991#endif992993// Allocate the OSThread object994OSThread* osthread = new OSThread(NULL, NULL);995996if (osthread == NULL) {997return false;998}9991000// Store pthread info into the OSThread1001osthread->set_thread_id(os::Aix::gettid());1002osthread->set_pthread_id(::pthread_self());10031004// initialize floating point control register1005os::Aix::init_thread_fpu_state();10061007// some sanity checks1008CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());10091010// Initial thread state is RUNNABLE1011osthread->set_state(RUNNABLE);10121013thread->set_osthread(osthread);10141015if (UseNUMA) {1016int lgrp_id = os::numa_get_group_id();1017if (lgrp_id != -1) {1018thread->set_lgrp_id(lgrp_id);1019}1020}10211022// initialize signal mask for this thread1023// and save the caller's signal mask1024os::Aix::hotspot_sigmask(thread);10251026return true;1027}10281029void os::pd_start_thread(Thread* thread) {1030int status = pthread_continue_np(thread->osthread()->pthread_id());1031assert(status == 0, "thr_continue failed");1032}10331034// Free OS resources related to the OSThread1035void os::free_thread(OSThread* osthread) {1036assert(osthread != NULL, "osthread not set");10371038if (Thread::current()->osthread() == osthread) {1039// Restore caller's signal mask1040sigset_t sigmask = osthread->caller_sigmask();1041pthread_sigmask(SIG_SETMASK, &sigmask, NULL);1042}10431044delete osthread;1045}10461047//////////////////////////////////////////////////////////////////////////////1048// thread local storage10491050int os::allocate_thread_local_storage() {1051pthread_key_t key;1052int rslt = pthread_key_create(&key, NULL);1053assert(rslt == 0, "cannot allocate thread local storage");1054return (int)key;1055}10561057// Note: This is currently not used by VM, as we don't destroy TLS key1058// on VM exit.1059void os::free_thread_local_storage(int index) {1060int rslt = pthread_key_delete((pthread_key_t)index);1061assert(rslt == 0, "invalid index");1062}10631064void os::thread_local_storage_at_put(int index, void* value) {1065int rslt = pthread_setspecific((pthread_key_t)index, value);1066assert(rslt == 0, "pthread_setspecific failed");1067}10681069extern "C" Thread* get_thread() {1070return ThreadLocalStorage::thread();1071}10721073////////////////////////////////////////////////////////////////////////////////1074// time support10751076// Time since start-up in seconds to a fine granularity.1077// Used by VMSelfDestructTimer and the MemProfiler.1078double os::elapsedTime() {1079return (double)(os::elapsed_counter()) * 0.000001;1080}10811082jlong os::elapsed_counter() {1083timeval time;1084int status = gettimeofday(&time, NULL);1085return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;1086}10871088jlong os::elapsed_frequency() {1089return (1000 * 1000);1090}10911092// For now, we say that linux does not support vtime. I have no idea1093// whether it can actually be made to (DLD, 9/13/05).10941095bool os::supports_vtime() { return false; }1096bool os::enable_vtime() { return false; }1097bool os::vtime_enabled() { return false; }1098double os::elapsedVTime() {1099// better than nothing, but not much1100return elapsedTime();1101}11021103jlong os::javaTimeMillis() {1104timeval time;1105int status = gettimeofday(&time, NULL);1106assert(status != -1, "aix error at gettimeofday()");1107return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);1108}11091110// We need to manually declare mread_real_time,1111// because IBM didn't provide a prototype in time.h.1112// (they probably only ever tested in C, not C++)1113extern "C"1114int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);11151116jlong os::javaTimeNanos() {1117if (os::Aix::on_pase()) {1118Unimplemented();1119return 0;1120} else {1121// On AIX use the precision of processors real time clock1122// or time base registers.1123timebasestruct_t time;1124int rc;11251126// If the CPU has a time register, it will be used and1127// we have to convert to real time first. After convertion we have following data:1128// time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]1129// time.tb_low [nanoseconds after the last full second above]1130// We better use mread_real_time here instead of read_real_time1131// to ensure that we will get a monotonic increasing time.1132if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {1133rc = time_base_to_time(&time, TIMEBASE_SZ);1134assert(rc != -1, "aix error at time_base_to_time()");1135}1136return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);1137}1138}11391140void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {1141{1142// gettimeofday - based on time in seconds since the Epoch thus does not wrap1143info_ptr->max_value = ALL_64_BITS;11441145// gettimeofday is a real time clock so it skips1146info_ptr->may_skip_backward = true;1147info_ptr->may_skip_forward = true;1148}11491150info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time1151}11521153// Return the real, user, and system times in seconds from an1154// arbitrary fixed point in the past.1155bool os::getTimesSecs(double* process_real_time,1156double* process_user_time,1157double* process_system_time) {1158struct tms ticks;1159clock_t real_ticks = times(&ticks);11601161if (real_ticks == (clock_t) (-1)) {1162return false;1163} else {1164double ticks_per_second = (double) clock_tics_per_sec;1165*process_user_time = ((double) ticks.tms_utime) / ticks_per_second;1166*process_system_time = ((double) ticks.tms_stime) / ticks_per_second;1167*process_real_time = ((double) real_ticks) / ticks_per_second;11681169return true;1170}1171}11721173char * os::local_time_string(char *buf, size_t buflen) {1174struct tm t;1175time_t long_time;1176time(&long_time);1177localtime_r(&long_time, &t);1178jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",1179t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,1180t.tm_hour, t.tm_min, t.tm_sec);1181return buf;1182}11831184struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {1185return localtime_r(clock, res);1186}11871188////////////////////////////////////////////////////////////////////////////////1189// runtime exit support11901191// Note: os::shutdown() might be called very early during initialization, or1192// called from signal handler. Before adding something to os::shutdown(), make1193// sure it is async-safe and can handle partially initialized VM.1194void os::shutdown() {11951196// allow PerfMemory to attempt cleanup of any persistent resources1197perfMemory_exit();11981199// needs to remove object in file system1200AttachListener::abort();12011202// flush buffered output, finish log files1203ostream_abort();12041205// Check for abort hook1206abort_hook_t abort_hook = Arguments::abort_hook();1207if (abort_hook != NULL) {1208abort_hook();1209}1210}12111212// Note: os::abort() might be called very early during initialization, or1213// called from signal handler. Before adding something to os::abort(), make1214// sure it is async-safe and can handle partially initialized VM.1215void os::abort(bool dump_core) {1216os::shutdown();1217if (dump_core) {1218#ifndef PRODUCT1219fdStream out(defaultStream::output_fd());1220out.print_raw("Current thread is ");1221char buf[16];1222jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());1223out.print_raw_cr(buf);1224out.print_raw_cr("Dumping core ...");1225#endif1226::abort(); // dump core1227}12281229::exit(1);1230}12311232// Die immediately, no exit hook, no abort hook, no cleanup.1233void os::die() {1234::abort();1235}12361237// This method is a copy of JDK's sysGetLastErrorString1238// from src/solaris/hpi/src/system_md.c12391240size_t os::lasterror(char *buf, size_t len) {1241if (errno == 0) return 0;12421243const char *s = ::strerror(errno);1244size_t n = ::strlen(s);1245if (n >= len) {1246n = len - 1;1247}1248::strncpy(buf, s, n);1249buf[n] = '\0';1250return n;1251}12521253intx os::current_thread_id() { return (intx)pthread_self(); }12541255int os::current_process_id() {12561257// This implementation returns a unique pid, the pid of the1258// launcher thread that starts the vm 'process'.12591260// Under POSIX, getpid() returns the same pid as the1261// launcher thread rather than a unique pid per thread.1262// Use gettid() if you want the old pre NPTL behaviour.12631264// if you are looking for the result of a call to getpid() that1265// returns a unique pid for the calling thread, then look at the1266// OSThread::thread_id() method in osThread_linux.hpp file12671268return (int)(_initial_pid ? _initial_pid : getpid());1269}12701271// DLL functions12721273const char* os::dll_file_extension() { return ".so"; }12741275// This must be hard coded because it's the system's temporary1276// directory not the java application's temp directory, ala java.io.tmpdir.1277const char* os::get_temp_directory() { return "/tmp"; }12781279static bool file_exists(const char* filename) {1280struct stat statbuf;1281if (filename == NULL || strlen(filename) == 0) {1282return false;1283}1284return os::stat(filename, &statbuf) == 0;1285}12861287bool os::dll_build_name(char* buffer, size_t buflen,1288const char* pname, const char* fname) {1289bool retval = false;1290// Copied from libhpi1291const size_t pnamelen = pname ? strlen(pname) : 0;12921293// Return error on buffer overflow.1294if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {1295*buffer = '\0';1296return retval;1297}12981299if (pnamelen == 0) {1300snprintf(buffer, buflen, "lib%s.so", fname);1301retval = true;1302} else if (strchr(pname, *os::path_separator()) != NULL) {1303int n;1304char** pelements = split_path(pname, &n);1305for (int i = 0; i < n; i++) {1306// Really shouldn't be NULL, but check can't hurt1307if (pelements[i] == NULL || strlen(pelements[i]) == 0) {1308continue; // skip the empty path values1309}1310snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);1311if (file_exists(buffer)) {1312retval = true;1313break;1314}1315}1316// release the storage1317for (int i = 0; i < n; i++) {1318if (pelements[i] != NULL) {1319FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);1320}1321}1322if (pelements != NULL) {1323FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);1324}1325} else {1326snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);1327retval = true;1328}1329return retval;1330}13311332// Check if addr is inside libjvm.so.1333bool os::address_is_in_vm(address addr) {13341335// Input could be a real pc or a function pointer literal. The latter1336// would be a function descriptor residing in the data segment of a module.13371338const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);1339if (lib) {1340if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {1341return true;1342} else {1343return false;1344}1345} else {1346lib = LoadedLibraries::find_for_data_address(addr);1347if (lib) {1348if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {1349return true;1350} else {1351return false;1352}1353} else {1354return false;1355}1356}1357}13581359// Resolve an AIX function descriptor literal to a code pointer.1360// If the input is a valid code pointer to a text segment of a loaded module,1361// it is returned unchanged.1362// If the input is a valid AIX function descriptor, it is resolved to the1363// code entry point.1364// If the input is neither a valid function descriptor nor a valid code pointer,1365// NULL is returned.1366static address resolve_function_descriptor_to_code_pointer(address p) {13671368const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(p);1369if (lib) {1370// its a real code pointer1371return p;1372} else {1373lib = LoadedLibraries::find_for_data_address(p);1374if (lib) {1375// pointer to data segment, potential function descriptor1376address code_entry = (address)(((FunctionDescriptor*)p)->entry());1377if (LoadedLibraries::find_for_text_address(code_entry)) {1378// Its a function descriptor1379return code_entry;1380}1381}1382}1383return NULL;1384}13851386bool os::dll_address_to_function_name(address addr, char *buf,1387int buflen, int *offset) {1388if (offset) {1389*offset = -1;1390}1391// Buf is not optional, but offset is optional.1392assert(buf != NULL, "sanity check");1393buf[0] = '\0';13941395// Resolve function ptr literals first.1396addr = resolve_function_descriptor_to_code_pointer(addr);1397if (!addr) {1398return false;1399}14001401// Go through Decoder::decode to call getFuncName which reads the name from the traceback table.1402return Decoder::decode(addr, buf, buflen, offset);1403}14041405static int getModuleName(codeptr_t pc, // [in] program counter1406char* p_name, size_t namelen, // [out] optional: function name1407char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages1408) {14091410// initialize output parameters1411if (p_name && namelen > 0) {1412*p_name = '\0';1413}1414if (p_errmsg && errmsglen > 0) {1415*p_errmsg = '\0';1416}14171418const LoadedLibraryModule* const lib = LoadedLibraries::find_for_text_address((address)pc);1419if (lib) {1420if (p_name && namelen > 0) {1421sprintf(p_name, "%.*s", namelen, lib->get_shortname());1422}1423return 0;1424}14251426trcVerbose("pc outside any module");14271428return -1;1429}14301431bool os::dll_address_to_library_name(address addr, char* buf,1432int buflen, int* offset) {1433if (offset) {1434*offset = -1;1435}1436// Buf is not optional, but offset is optional.1437assert(buf != NULL, "sanity check");1438buf[0] = '\0';14391440// Resolve function ptr literals first.1441addr = resolve_function_descriptor_to_code_pointer(addr);1442if (!addr) {1443return false;1444}14451446if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {1447return true;1448}1449return false;1450}14511452// Loads .dll/.so and in case of error it checks if .dll/.so was built1453// for the same architecture as Hotspot is running on.1454void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {14551456if (ebuf && ebuflen > 0) {1457ebuf[0] = '\0';1458ebuf[ebuflen - 1] = '\0';1459}14601461if (!filename || strlen(filename) == 0) {1462::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);1463return NULL;1464}14651466// RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.1467void * result= ::dlopen(filename, RTLD_LAZY);1468if (result != NULL) {1469// Reload dll cache. Don't do this in signal handling.1470LoadedLibraries::reload();1471return result;1472} else {1473// error analysis when dlopen fails1474const char* const error_report = ::dlerror();1475if (error_report && ebuf && ebuflen > 0) {1476snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",1477filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);1478}1479}1480return NULL;1481}14821483// Glibc-2.0 libdl is not MT safe. If you are building with any glibc,1484// chances are you might want to run the generated bits against glibc-2.01485// libdl.so, so always use locking for any version of glibc.1486void* os::dll_lookup(void* handle, const char* name) {1487pthread_mutex_lock(&dl_mutex);1488void* res = dlsym(handle, name);1489pthread_mutex_unlock(&dl_mutex);1490return res;1491}14921493void* os::get_default_process_handle() {1494return (void*)::dlopen(NULL, RTLD_LAZY);1495}14961497void os::print_dll_info(outputStream *st) {1498st->print_cr("Dynamic libraries:");1499LoadedLibraries::print(st);1500}15011502void os::print_os_info(outputStream* st) {1503st->print("OS:");15041505st->print("uname:");1506struct utsname name;1507uname(&name);1508st->print(name.sysname); st->print(" ");1509st->print(name.nodename); st->print(" ");1510st->print(name.release); st->print(" ");1511st->print(name.version); st->print(" ");1512st->print(name.machine);1513st->cr();15141515// rlimit1516st->print("rlimit:");1517struct rlimit rlim;15181519st->print(" STACK ");1520getrlimit(RLIMIT_STACK, &rlim);1521if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");1522else st->print("%uk", rlim.rlim_cur >> 10);15231524st->print(", CORE ");1525getrlimit(RLIMIT_CORE, &rlim);1526if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");1527else st->print("%uk", rlim.rlim_cur >> 10);15281529st->print(", NPROC ");1530st->print("%d", sysconf(_SC_CHILD_MAX));15311532st->print(", NOFILE ");1533getrlimit(RLIMIT_NOFILE, &rlim);1534if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");1535else st->print("%d", rlim.rlim_cur);15361537st->print(", AS ");1538getrlimit(RLIMIT_AS, &rlim);1539if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");1540else st->print("%uk", rlim.rlim_cur >> 10);15411542// Print limits on DATA, because it limits the C-heap.1543st->print(", DATA ");1544getrlimit(RLIMIT_DATA, &rlim);1545if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");1546else st->print("%uk", rlim.rlim_cur >> 10);1547st->cr();15481549// load average1550st->print("load average:");1551double loadavg[3] = {-1.L, -1.L, -1.L};1552os::loadavg(loadavg, 3);1553st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);1554st->cr();1555}15561557int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {1558// Not yet implemented.1559return 0;1560}15611562void os::print_memory_info(outputStream* st) {15631564st->print_cr("Memory:");15651566st->print_cr(" default page size: %s", describe_pagesize(os::vm_page_size()));1567st->print_cr(" default stack page size: %s", describe_pagesize(os::vm_page_size()));1568st->print_cr(" default shm page size: %s", describe_pagesize(os::Aix::shm_default_page_size()));1569st->print_cr(" can use 64K pages dynamically: %s", (os::Aix::can_use_64K_pages() ? "yes" :"no"));1570st->print_cr(" can use 16M pages dynamically: %s", (os::Aix::can_use_16M_pages() ? "yes" :"no"));1571if (g_multipage_error != 0) {1572st->print_cr(" multipage error: %d", g_multipage_error);1573}15741575// print out LDR_CNTRL because it affects the default page sizes1576const char* const ldr_cntrl = ::getenv("LDR_CNTRL");1577st->print_cr(" LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");15781579const char* const extshm = ::getenv("EXTSHM");1580st->print_cr(" EXTSHM=%s.", extshm ? extshm : "<unset>");15811582// Call os::Aix::get_meminfo() to retrieve memory statistics.1583os::Aix::meminfo_t mi;1584if (os::Aix::get_meminfo(&mi)) {1585char buffer[256];1586if (os::Aix::on_aix()) {1587jio_snprintf(buffer, sizeof(buffer),1588" physical total : %llu\n"1589" physical free : %llu\n"1590" swap total : %llu\n"1591" swap free : %llu\n",1592mi.real_total,1593mi.real_free,1594mi.pgsp_total,1595mi.pgsp_free);1596} else {1597Unimplemented();1598}1599st->print_raw(buffer);1600} else {1601st->print_cr(" (no more information available)");1602}1603}16041605void os::pd_print_cpu_info(outputStream* st) {1606// cpu1607st->print("CPU:");1608st->print("total %d", os::processor_count());1609// It's not safe to query number of active processors after crash1610// st->print("(active %d)", os::active_processor_count());1611st->print(" %s", VM_Version::cpu_features());1612st->cr();1613}16141615void os::print_siginfo(outputStream* st, void* siginfo) {1616// Use common posix version.1617os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);1618st->cr();1619}16201621static void print_signal_handler(outputStream* st, int sig,1622char* buf, size_t buflen);16231624void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {1625st->print_cr("Signal Handlers:");1626print_signal_handler(st, SIGSEGV, buf, buflen);1627print_signal_handler(st, SIGBUS , buf, buflen);1628print_signal_handler(st, SIGFPE , buf, buflen);1629print_signal_handler(st, SIGPIPE, buf, buflen);1630print_signal_handler(st, SIGXFSZ, buf, buflen);1631print_signal_handler(st, SIGILL , buf, buflen);1632print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);1633print_signal_handler(st, SR_signum, buf, buflen);1634print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);1635print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);1636print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);1637print_signal_handler(st, BREAK_SIGNAL, buf, buflen);1638print_signal_handler(st, SIGTRAP, buf, buflen);1639print_signal_handler(st, SIGDANGER, buf, buflen);1640}16411642static char saved_jvm_path[MAXPATHLEN] = {0};16431644// Find the full path to the current module, libjvm.so.1645void os::jvm_path(char *buf, jint buflen) {1646// Error checking.1647if (buflen < MAXPATHLEN) {1648assert(false, "must use a large-enough buffer");1649buf[0] = '\0';1650return;1651}1652// Lazy resolve the path to current module.1653if (saved_jvm_path[0] != 0) {1654strcpy(buf, saved_jvm_path);1655return;1656}16571658Dl_info dlinfo;1659int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);1660assert(ret != 0, "cannot locate libjvm");1661char* rp = realpath((char *)dlinfo.dli_fname, buf);1662assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");16631664strcpy(saved_jvm_path, buf);1665}16661667void os::print_jni_name_prefix_on(outputStream* st, int args_size) {1668// no prefix required, not even "_"1669}16701671void os::print_jni_name_suffix_on(outputStream* st, int args_size) {1672// no suffix required1673}16741675////////////////////////////////////////////////////////////////////////////////1676// sun.misc.Signal support16771678static volatile jint sigint_count = 0;16791680static void1681UserHandler(int sig, void *siginfo, void *context) {1682// 4511530 - sem_post is serialized and handled by the manager thread. When1683// the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We1684// don't want to flood the manager thread with sem_post requests.1685if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)1686return;16871688// Ctrl-C is pressed during error reporting, likely because the error1689// handler fails to abort. Let VM die immediately.1690if (sig == SIGINT && is_error_reported()) {1691os::die();1692}16931694os::signal_notify(sig);1695}16961697void* os::user_handler() {1698return CAST_FROM_FN_PTR(void*, UserHandler);1699}17001701extern "C" {1702typedef void (*sa_handler_t)(int);1703typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);1704}17051706void* os::signal(int signal_number, void* handler) {1707struct sigaction sigAct, oldSigAct;17081709sigfillset(&(sigAct.sa_mask));17101711// Do not block out synchronous signals in the signal handler.1712// Blocking synchronous signals only makes sense if you can really1713// be sure that those signals won't happen during signal handling,1714// when the blocking applies. Normal signal handlers are lean and1715// do not cause signals. But our signal handlers tend to be "risky"1716// - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.1717// On AIX, PASE there was a case where a SIGSEGV happened, followed1718// by a SIGILL, which was blocked due to the signal mask. The process1719// just hung forever. Better to crash from a secondary signal than to hang.1720sigdelset(&(sigAct.sa_mask), SIGSEGV);1721sigdelset(&(sigAct.sa_mask), SIGBUS);1722sigdelset(&(sigAct.sa_mask), SIGILL);1723sigdelset(&(sigAct.sa_mask), SIGFPE);1724sigdelset(&(sigAct.sa_mask), SIGTRAP);17251726sigAct.sa_flags = SA_RESTART|SA_SIGINFO;17271728sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);17291730if (sigaction(signal_number, &sigAct, &oldSigAct)) {1731// -1 means registration failed1732return (void *)-1;1733}17341735return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);1736}17371738void os::signal_raise(int signal_number) {1739::raise(signal_number);1740}17411742//1743// The following code is moved from os.cpp for making this1744// code platform specific, which it is by its very nature.1745//17461747// Will be modified when max signal is changed to be dynamic1748int os::sigexitnum_pd() {1749return NSIG;1750}17511752// a counter for each possible signal value1753static volatile jint pending_signals[NSIG+1] = { 0 };17541755// Linux(POSIX) specific hand shaking semaphore.1756static sem_t sig_sem;17571758void os::signal_init_pd() {1759// Initialize signal structures1760::memset((void*)pending_signals, 0, sizeof(pending_signals));17611762// Initialize signal semaphore1763int rc = ::sem_init(&sig_sem, 0, 0);1764guarantee(rc != -1, "sem_init failed");1765}17661767void os::signal_notify(int sig) {1768Atomic::inc(&pending_signals[sig]);1769::sem_post(&sig_sem);1770}17711772static int check_pending_signals(bool wait) {1773Atomic::store(0, &sigint_count);1774for (;;) {1775for (int i = 0; i < NSIG + 1; i++) {1776jint n = pending_signals[i];1777if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {1778return i;1779}1780}1781if (!wait) {1782return -1;1783}1784JavaThread *thread = JavaThread::current();1785ThreadBlockInVM tbivm(thread);17861787bool threadIsSuspended;1788do {1789thread->set_suspend_equivalent();1790// cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()17911792::sem_wait(&sig_sem);17931794// were we externally suspended while we were waiting?1795threadIsSuspended = thread->handle_special_suspend_equivalent_condition();1796if (threadIsSuspended) {1797//1798// The semaphore has been incremented, but while we were waiting1799// another thread suspended us. We don't want to continue running1800// while suspended because that would surprise the thread that1801// suspended us.1802//1803::sem_post(&sig_sem);18041805thread->java_suspend_self();1806}1807} while (threadIsSuspended);1808}1809}18101811int os::signal_lookup() {1812return check_pending_signals(false);1813}18141815int os::signal_wait() {1816return check_pending_signals(true);1817}18181819////////////////////////////////////////////////////////////////////////////////1820// Virtual Memory18211822// AddrRange describes an immutable address range1823//1824// This is a helper class for the 'shared memory bookkeeping' below.1825class AddrRange {1826friend class ShmBkBlock;18271828char* _start;1829size_t _size;18301831public:18321833AddrRange(char* start, size_t size)1834: _start(start), _size(size)1835{}18361837AddrRange(const AddrRange& r)1838: _start(r.start()), _size(r.size())1839{}18401841char* start() const { return _start; }1842size_t size() const { return _size; }1843char* end() const { return _start + _size; }1844bool is_empty() const { return _size == 0 ? true : false; }18451846static AddrRange empty_range() { return AddrRange(NULL, 0); }18471848bool contains(const char* p) const {1849return start() <= p && end() > p;1850}18511852bool contains(const AddrRange& range) const {1853return start() <= range.start() && end() >= range.end();1854}18551856bool intersects(const AddrRange& range) const {1857return (range.start() <= start() && range.end() > start()) ||1858(range.start() < end() && range.end() >= end()) ||1859contains(range);1860}18611862bool is_same_range(const AddrRange& range) const {1863return start() == range.start() && size() == range.size();1864}18651866// return the closest inside range consisting of whole pages1867AddrRange find_closest_aligned_range(size_t pagesize) const {1868if (pagesize == 0 || is_empty()) {1869return empty_range();1870}1871char* const from = (char*)align_size_up((intptr_t)_start, pagesize);1872char* const to = (char*)align_size_down((intptr_t)end(), pagesize);1873if (from > to) {1874return empty_range();1875}1876return AddrRange(from, to - from);1877}1878};18791880////////////////////////////////////////////////////////////////////////////1881// shared memory bookkeeping1882//1883// the os::reserve_memory() API and friends hand out different kind of memory, depending1884// on need and circumstances. Memory may be allocated with mmap() or with shmget/shmat.1885//1886// But these memory types have to be treated differently. For example, to uncommit1887// mmap-based memory, msync(MS_INVALIDATE) is needed, to uncommit shmat-based memory,1888// disclaim64() is needed.1889//1890// Therefore we need to keep track of the allocated memory segments and their1891// properties.18921893// ShmBkBlock: base class for all blocks in the shared memory bookkeeping1894class ShmBkBlock {18951896ShmBkBlock* _next;18971898protected:18991900AddrRange _range;1901const size_t _pagesize;1902const bool _pinned;19031904public:19051906ShmBkBlock(AddrRange range, size_t pagesize, bool pinned)1907: _range(range), _pagesize(pagesize), _pinned(pinned) , _next(NULL) {19081909assert(_pagesize == SIZE_4K || _pagesize == SIZE_64K || _pagesize == SIZE_16M, "invalid page size");1910assert(!_range.is_empty(), "invalid range");1911}19121913virtual void print(outputStream* st) const {1914st->print("0x%p ... 0x%p (%llu) - %d %s pages - %s",1915_range.start(), _range.end(), _range.size(),1916_range.size() / _pagesize, describe_pagesize(_pagesize),1917_pinned ? "pinned" : "");1918}19191920enum Type { MMAP, SHMAT };1921virtual Type getType() = 0;19221923char* base() const { return _range.start(); }1924size_t size() const { return _range.size(); }19251926void setAddrRange(AddrRange range) {1927_range = range;1928}19291930bool containsAddress(const char* p) const {1931return _range.contains(p);1932}19331934bool containsRange(const char* p, size_t size) const {1935return _range.contains(AddrRange((char*)p, size));1936}19371938bool isSameRange(const char* p, size_t size) const {1939return _range.is_same_range(AddrRange((char*)p, size));1940}19411942virtual bool disclaim(char* p, size_t size) = 0;1943virtual bool release() = 0;19441945// blocks live in a list.1946ShmBkBlock* next() const { return _next; }1947void set_next(ShmBkBlock* blk) { _next = blk; }19481949}; // end: ShmBkBlock195019511952// ShmBkMappedBlock: describes an block allocated with mmap()1953class ShmBkMappedBlock : public ShmBkBlock {1954public:19551956ShmBkMappedBlock(AddrRange range)1957: ShmBkBlock(range, SIZE_4K, false) {} // mmap: always 4K, never pinned19581959void print(outputStream* st) const {1960ShmBkBlock::print(st);1961st->print_cr(" - mmap'ed");1962}19631964Type getType() {1965return MMAP;1966}19671968bool disclaim(char* p, size_t size) {19691970AddrRange r(p, size);19711972guarantee(_range.contains(r), "invalid disclaim");19731974// only disclaim whole ranges.1975const AddrRange r2 = r.find_closest_aligned_range(_pagesize);1976if (r2.is_empty()) {1977return true;1978}19791980const int rc = ::msync(r2.start(), r2.size(), MS_INVALIDATE);19811982if (rc != 0) {1983warning("msync(0x%p, %llu, MS_INVALIDATE) failed (%d)\n", r2.start(), r2.size(), errno);1984}19851986return rc == 0 ? true : false;1987}19881989bool release() {1990// mmap'ed blocks are released using munmap1991if (::munmap(_range.start(), _range.size()) != 0) {1992warning("munmap(0x%p, %llu) failed (%d)\n", _range.start(), _range.size(), errno);1993return false;1994}1995return true;1996}1997}; // end: ShmBkMappedBlock19981999// ShmBkShmatedBlock: describes an block allocated with shmget/shmat()2000class ShmBkShmatedBlock : public ShmBkBlock {2001public:20022003ShmBkShmatedBlock(AddrRange range, size_t pagesize, bool pinned)2004: ShmBkBlock(range, pagesize, pinned) {}20052006void print(outputStream* st) const {2007ShmBkBlock::print(st);2008st->print_cr(" - shmat'ed");2009}20102011Type getType() {2012return SHMAT;2013}20142015bool disclaim(char* p, size_t size) {20162017AddrRange r(p, size);20182019if (_pinned) {2020return true;2021}20222023// shmat'ed blocks are disclaimed using disclaim642024guarantee(_range.contains(r), "invalid disclaim");20252026// only disclaim whole ranges.2027const AddrRange r2 = r.find_closest_aligned_range(_pagesize);2028if (r2.is_empty()) {2029return true;2030}20312032const bool rc = my_disclaim64(r2.start(), r2.size());20332034if (Verbose && !rc) {2035warning("failed to disclaim shm %p-%p\n", r2.start(), r2.end());2036}20372038return rc;2039}20402041bool release() {2042bool rc = false;2043if (::shmdt(_range.start()) != 0) {2044warning("shmdt(0x%p) failed (%d)\n", _range.start(), errno);2045} else {2046rc = true;2047}2048return rc;2049}20502051}; // end: ShmBkShmatedBlock20522053static ShmBkBlock* g_shmbk_list = NULL;2054static volatile jint g_shmbk_table_lock = 0;20552056// keep some usage statistics2057static struct {2058int nodes; // number of nodes in list2059size_t bytes; // reserved - not committed - bytes.2060int reserves; // how often reserve was called2061int lookups; // how often a lookup was made2062} g_shmbk_stats = { 0, 0, 0, 0 };20632064// add information about a shared memory segment to the bookkeeping2065static void shmbk_register(ShmBkBlock* p_block) {2066guarantee(p_block, "logic error");2067p_block->set_next(g_shmbk_list);2068g_shmbk_list = p_block;2069g_shmbk_stats.reserves ++;2070g_shmbk_stats.bytes += p_block->size();2071g_shmbk_stats.nodes ++;2072}20732074// remove information about a shared memory segment by its starting address2075static void shmbk_unregister(ShmBkBlock* p_block) {2076ShmBkBlock* p = g_shmbk_list;2077ShmBkBlock* prev = NULL;2078while (p) {2079if (p == p_block) {2080if (prev) {2081prev->set_next(p->next());2082} else {2083g_shmbk_list = p->next();2084}2085g_shmbk_stats.nodes --;2086g_shmbk_stats.bytes -= p->size();2087return;2088}2089prev = p;2090p = p->next();2091}2092assert(false, "should not happen");2093}20942095// given a pointer, return shared memory bookkeeping record for the segment it points into2096// using the returned block info must happen under lock protection2097static ShmBkBlock* shmbk_find_by_containing_address(const char* addr) {2098g_shmbk_stats.lookups ++;2099ShmBkBlock* p = g_shmbk_list;2100while (p) {2101if (p->containsAddress(addr)) {2102return p;2103}2104p = p->next();2105}2106return NULL;2107}21082109// dump all information about all memory segments allocated with os::reserve_memory()2110void shmbk_dump_info() {2111tty->print_cr("-- shared mem bookkeeping (alive: %d segments, %llu bytes, "2112"total reserves: %d total lookups: %d)",2113g_shmbk_stats.nodes, g_shmbk_stats.bytes, g_shmbk_stats.reserves, g_shmbk_stats.lookups);2114const ShmBkBlock* p = g_shmbk_list;2115int i = 0;2116while (p) {2117p->print(tty);2118p = p->next();2119i ++;2120}2121}21222123#define LOCK_SHMBK { ThreadCritical _LOCK_SHMBK;2124#define UNLOCK_SHMBK }21252126// End: shared memory bookkeeping2127////////////////////////////////////////////////////////////////////////////////////////////////////21282129int os::vm_page_size() {2130// Seems redundant as all get out2131assert(os::Aix::page_size() != -1, "must call os::init");2132return os::Aix::page_size();2133}21342135// Aix allocates memory by pages.2136int os::vm_allocation_granularity() {2137assert(os::Aix::page_size() != -1, "must call os::init");2138return os::Aix::page_size();2139}21402141int os::Aix::commit_memory_impl(char* addr, size_t size, bool exec) {21422143// Commit is a noop. There is no explicit commit2144// needed on AIX. Memory is committed when touched.2145//2146// Debug : check address range for validity2147#ifdef ASSERT2148LOCK_SHMBK2149ShmBkBlock* const block = shmbk_find_by_containing_address(addr);2150if (!block) {2151fprintf(stderr, "invalid pointer: " INTPTR_FORMAT "\n", addr);2152shmbk_dump_info();2153assert(false, "invalid pointer");2154return false;2155} else if (!block->containsRange(addr, size)) {2156fprintf(stderr, "invalid range: " INTPTR_FORMAT " .. " INTPTR_FORMAT "\n", addr, addr + size);2157shmbk_dump_info();2158assert(false, "invalid range");2159return false;2160}2161UNLOCK_SHMBK2162#endif // ASSERT21632164return 0;2165}21662167bool os::pd_commit_memory(char* addr, size_t size, bool exec) {2168return os::Aix::commit_memory_impl(addr, size, exec) == 0;2169}21702171void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,2172const char* mesg) {2173assert(mesg != NULL, "mesg must be specified");2174os::Aix::commit_memory_impl(addr, size, exec);2175}21762177int os::Aix::commit_memory_impl(char* addr, size_t size,2178size_t alignment_hint, bool exec) {2179return os::Aix::commit_memory_impl(addr, size, exec);2180}21812182bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,2183bool exec) {2184return os::Aix::commit_memory_impl(addr, size, alignment_hint, exec) == 0;2185}21862187void os::pd_commit_memory_or_exit(char* addr, size_t size,2188size_t alignment_hint, bool exec,2189const char* mesg) {2190os::Aix::commit_memory_impl(addr, size, alignment_hint, exec);2191}21922193bool os::pd_uncommit_memory(char* addr, size_t size) {21942195// Delegate to ShmBkBlock class which knows how to uncommit its memory.21962197bool rc = false;2198LOCK_SHMBK2199ShmBkBlock* const block = shmbk_find_by_containing_address(addr);2200if (!block) {2201fprintf(stderr, "invalid pointer: 0x%p.\n", addr);2202shmbk_dump_info();2203assert(false, "invalid pointer");2204return false;2205} else if (!block->containsRange(addr, size)) {2206fprintf(stderr, "invalid range: 0x%p .. 0x%p.\n", addr, addr + size);2207shmbk_dump_info();2208assert(false, "invalid range");2209return false;2210}2211rc = block->disclaim(addr, size);2212UNLOCK_SHMBK22132214if (Verbose && !rc) {2215warning("failed to disclaim 0x%p .. 0x%p (0x%llX bytes).", addr, addr + size, size);2216}2217return rc;2218}22192220bool os::pd_create_stack_guard_pages(char* addr, size_t size) {2221return os::guard_memory(addr, size);2222}22232224bool os::remove_stack_guard_pages(char* addr, size_t size) {2225return os::unguard_memory(addr, size);2226}22272228void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {2229}22302231void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {2232}22332234void os::numa_make_global(char *addr, size_t bytes) {2235}22362237void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {2238}22392240bool os::numa_topology_changed() {2241return false;2242}22432244size_t os::numa_get_groups_num() {2245return 1;2246}22472248int os::numa_get_group_id() {2249return 0;2250}22512252size_t os::numa_get_leaf_groups(int *ids, size_t size) {2253if (size > 0) {2254ids[0] = 0;2255return 1;2256}2257return 0;2258}22592260bool os::get_page_info(char *start, page_info* info) {2261return false;2262}22632264char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {2265return end;2266}22672268// Flags for reserve_shmatted_memory:2269#define RESSHM_WISHADDR_OR_FAIL 12270#define RESSHM_TRY_16M_PAGES 22271#define RESSHM_16M_PAGES_OR_FAIL 422722273// Result of reserve_shmatted_memory:2274struct shmatted_memory_info_t {2275char* addr;2276size_t pagesize;2277bool pinned;2278};22792280// Reserve a section of shmatted memory.2281// params:2282// bytes [in]: size of memory, in bytes2283// requested_addr [in]: wish address.2284// NULL = no wish.2285// If RESSHM_WISHADDR_OR_FAIL is set in flags and wish address cannot2286// be obtained, function will fail. Otherwise wish address is treated as hint and2287// another pointer is returned.2288// flags [in]: some flags. Valid flags are:2289// RESSHM_WISHADDR_OR_FAIL - fail if wish address is given and cannot be obtained.2290// RESSHM_TRY_16M_PAGES - try to allocate from 16M page pool2291// (requires UseLargePages and Use16MPages)2292// RESSHM_16M_PAGES_OR_FAIL - if you cannot allocate from 16M page pool, fail.2293// Otherwise any other page size will do.2294// p_info [out] : holds information about the created shared memory segment.2295static bool reserve_shmatted_memory(size_t bytes, char* requested_addr, int flags, shmatted_memory_info_t* p_info) {22962297assert(p_info, "parameter error");22982299// init output struct.2300p_info->addr = NULL;23012302// neither should we be here for EXTSHM=ON.2303if (os::Aix::extshm()) {2304ShouldNotReachHere();2305}23062307// extract flags. sanity checks.2308const bool wishaddr_or_fail =2309flags & RESSHM_WISHADDR_OR_FAIL;2310const bool try_16M_pages =2311flags & RESSHM_TRY_16M_PAGES;2312const bool f16M_pages_or_fail =2313flags & RESSHM_16M_PAGES_OR_FAIL;23142315// first check: if a wish address is given and it is mandatory, but not aligned to segment boundary,2316// shmat will fail anyway, so save some cycles by failing right away2317if (requested_addr && ((uintptr_t)requested_addr % SIZE_256M == 0)) {2318if (wishaddr_or_fail) {2319return false;2320} else {2321requested_addr = NULL;2322}2323}23242325char* addr = NULL;23262327// Align size of shm up to the largest possible page size, to avoid errors later on when we try to change2328// pagesize dynamically.2329const size_t size = align_size_up(bytes, SIZE_16M);23302331// reserve the shared segment2332int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);2333if (shmid == -1) {2334warning("shmget(.., %lld, ..) failed (errno: %d).", size, errno);2335return false;2336}23372338// Important note:2339// It is very important that we, upon leaving this function, do not leave a shm segment alive.2340// We must right after attaching it remove it from the system. System V shm segments are global and2341// survive the process.2342// So, from here on: Do not assert. Do not return. Always do a "goto cleanup_shm".23432344// try forcing the page size2345size_t pagesize = -1; // unknown so far23462347if (UseLargePages) {23482349struct shmid_ds shmbuf;2350memset(&shmbuf, 0, sizeof(shmbuf));23512352// First, try to take from 16M page pool if...2353if (os::Aix::can_use_16M_pages() // we can ...2354&& Use16MPages // we are not explicitly forbidden to do so (-XX:-Use16MPages)..2355&& try_16M_pages) { // caller wants us to.2356shmbuf.shm_pagesize = SIZE_16M;2357if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {2358pagesize = SIZE_16M;2359} else {2360warning("Failed to allocate %d 16M pages. 16M page pool might be exhausted. (shmctl failed with %d)",2361size / SIZE_16M, errno);2362if (f16M_pages_or_fail) {2363goto cleanup_shm;2364}2365}2366}23672368// Nothing yet? Try setting 64K pages. Note that I never saw this fail, but in theory it might,2369// because the 64K page pool may also be exhausted.2370if (pagesize == -1) {2371shmbuf.shm_pagesize = SIZE_64K;2372if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {2373pagesize = SIZE_64K;2374} else {2375warning("Failed to allocate %d 64K pages. (shmctl failed with %d)",2376size / SIZE_64K, errno);2377// here I give up. leave page_size -1 - later, after attaching, we will query the2378// real page size of the attached memory. (in theory, it may be something different2379// from 4K if LDR_CNTRL SHM_PSIZE is set)2380}2381}2382}23832384// sanity point2385assert(pagesize == -1 || pagesize == SIZE_16M || pagesize == SIZE_64K, "wrong page size");23862387// Now attach the shared segment.2388addr = (char*) shmat(shmid, requested_addr, 0);2389if (addr == (char*)-1) {2390// How to handle attach failure:2391// If it failed for a specific wish address, tolerate this: in that case, if wish address was2392// mandatory, fail, if not, retry anywhere.2393// If it failed for any other reason, treat that as fatal error.2394addr = NULL;2395if (requested_addr) {2396if (wishaddr_or_fail) {2397goto cleanup_shm;2398} else {2399addr = (char*) shmat(shmid, NULL, 0);2400if (addr == (char*)-1) { // fatal2401addr = NULL;2402warning("shmat failed (errno: %d)", errno);2403goto cleanup_shm;2404}2405}2406} else { // fatal2407addr = NULL;2408warning("shmat failed (errno: %d)", errno);2409goto cleanup_shm;2410}2411}24122413// sanity point2414assert(addr && addr != (char*) -1, "wrong address");24152416// after successful Attach remove the segment - right away.2417if (::shmctl(shmid, IPC_RMID, NULL) == -1) {2418warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);2419guarantee(false, "failed to remove shared memory segment!");2420}2421shmid = -1;24222423// query the real page size. In case setting the page size did not work (see above), the system2424// may have given us something other then 4K (LDR_CNTRL)2425{2426const size_t real_pagesize = os::Aix::query_pagesize(addr);2427if (pagesize != -1) {2428assert(pagesize == real_pagesize, "unexpected pagesize after shmat");2429} else {2430pagesize = real_pagesize;2431}2432}24332434// Now register the reserved block with internal book keeping.2435LOCK_SHMBK2436const bool pinned = pagesize >= SIZE_16M ? true : false;2437ShmBkShmatedBlock* const p_block = new ShmBkShmatedBlock(AddrRange(addr, size), pagesize, pinned);2438assert(p_block, "");2439shmbk_register(p_block);2440UNLOCK_SHMBK24412442cleanup_shm:24432444// if we have not done so yet, remove the shared memory segment. This is very important.2445if (shmid != -1) {2446if (::shmctl(shmid, IPC_RMID, NULL) == -1) {2447warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);2448guarantee(false, "failed to remove shared memory segment!");2449}2450shmid = -1;2451}24522453// trace2454if (Verbose && !addr) {2455if (requested_addr != NULL) {2456warning("failed to shm-allocate 0x%llX bytes at wish address 0x%p.", size, requested_addr);2457} else {2458warning("failed to shm-allocate 0x%llX bytes at any address.", size);2459}2460}24612462// hand info to caller2463if (addr) {2464p_info->addr = addr;2465p_info->pagesize = pagesize;2466p_info->pinned = pagesize == SIZE_16M ? true : false;2467}24682469// sanity test:2470if (requested_addr && addr && wishaddr_or_fail) {2471guarantee(addr == requested_addr, "shmat error");2472}24732474// just one more test to really make sure we have no dangling shm segments.2475guarantee(shmid == -1, "dangling shm segments");24762477return addr ? true : false;24782479} // end: reserve_shmatted_memory24802481// Reserve memory using mmap. Behaves the same as reserve_shmatted_memory():2482// will return NULL in case of an error.2483static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {24842485// if a wish address is given, but not aligned to 4K page boundary, mmap will fail.2486if (requested_addr && ((uintptr_t)requested_addr % os::vm_page_size() != 0)) {2487warning("Wish address 0x%p not aligned to page boundary.", requested_addr);2488return NULL;2489}24902491const size_t size = align_size_up(bytes, SIZE_4K);24922493// Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to2494// msync(MS_INVALIDATE) (see os::uncommit_memory)2495int flags = MAP_ANONYMOUS | MAP_SHARED;24962497// MAP_FIXED is needed to enforce requested_addr - manpage is vague about what2498// it means if wishaddress is given but MAP_FIXED is not set.2499//2500// Note however that this changes semantics in SPEC1170 mode insofar as MAP_FIXED2501// clobbers the address range, which is probably not what the caller wants. That's2502// why I assert here (again) that the SPEC1170 compat mode is off.2503// If we want to be able to run under SPEC1170, we have to do some porting and2504// testing.2505if (requested_addr != NULL) {2506assert(!os::Aix::xpg_sus_mode(), "SPEC1170 mode not allowed.");2507flags |= MAP_FIXED;2508}25092510char* addr = (char*)::mmap(requested_addr, size, PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);25112512if (addr == MAP_FAILED) {2513// attach failed: tolerate for specific wish addresses. Not being able to attach2514// anywhere is a fatal error.2515if (requested_addr == NULL) {2516// It's ok to fail here if the machine has not enough memory.2517warning("mmap(NULL, 0x%llX, ..) failed (%d)", size, errno);2518}2519addr = NULL;2520goto cleanup_mmap;2521}25222523// If we did request a specific address and that address was not available, fail.2524if (addr && requested_addr) {2525guarantee(addr == requested_addr, "unexpected");2526}25272528// register this mmap'ed segment with book keeping2529LOCK_SHMBK2530ShmBkMappedBlock* const p_block = new ShmBkMappedBlock(AddrRange(addr, size));2531assert(p_block, "");2532shmbk_register(p_block);2533UNLOCK_SHMBK25342535cleanup_mmap:25362537// trace2538if (Verbose) {2539if (addr) {2540fprintf(stderr, "mmap-allocated 0x%p .. 0x%p (0x%llX bytes)\n", addr, addr + bytes, bytes);2541}2542else {2543if (requested_addr != NULL) {2544warning("failed to mmap-allocate 0x%llX bytes at wish address 0x%p.", bytes, requested_addr);2545} else {2546warning("failed to mmap-allocate 0x%llX bytes at any address.", bytes);2547}2548}2549}25502551return addr;25522553} // end: reserve_mmaped_memory25542555// Reserves and attaches a shared memory segment.2556// Will assert if a wish address is given and could not be obtained.2557char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {2558return os::attempt_reserve_memory_at(bytes, requested_addr);2559}25602561bool os::pd_release_memory(char* addr, size_t size) {25622563// delegate to ShmBkBlock class which knows how to uncommit its memory.25642565bool rc = false;2566LOCK_SHMBK2567ShmBkBlock* const block = shmbk_find_by_containing_address(addr);2568if (!block) {2569fprintf(stderr, "invalid pointer: 0x%p.\n", addr);2570shmbk_dump_info();2571assert(false, "invalid pointer");2572return false;2573}2574else if (!block->isSameRange(addr, size)) {2575if (block->getType() == ShmBkBlock::MMAP) {2576// Release only the same range or a the beginning or the end of a range.2577if (block->base() == addr && size < block->size()) {2578ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base() + size, block->size() - size));2579assert(b, "");2580shmbk_register(b);2581block->setAddrRange(AddrRange(addr, size));2582}2583else if (addr > block->base() && addr + size == block->base() + block->size()) {2584ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base(), block->size() - size));2585assert(b, "");2586shmbk_register(b);2587block->setAddrRange(AddrRange(addr, size));2588}2589else {2590fprintf(stderr, "invalid mmap range: 0x%p .. 0x%p.\n", addr, addr + size);2591shmbk_dump_info();2592assert(false, "invalid mmap range");2593return false;2594}2595}2596else {2597// Release only the same range. No partial release allowed.2598// Soften the requirement a bit, because the user may think he owns a smaller size2599// than the block is due to alignment etc.2600if (block->base() != addr || block->size() < size) {2601fprintf(stderr, "invalid shmget range: 0x%p .. 0x%p.\n", addr, addr + size);2602shmbk_dump_info();2603assert(false, "invalid shmget range");2604return false;2605}2606}2607}2608rc = block->release();2609assert(rc, "release failed");2610// remove block from bookkeeping2611shmbk_unregister(block);2612delete block;2613UNLOCK_SHMBK26142615if (!rc) {2616warning("failed to released %lu bytes at 0x%p", size, addr);2617}26182619return rc;2620}26212622static bool checked_mprotect(char* addr, size_t size, int prot) {26232624// Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will2625// not tell me if protection failed when trying to protect an un-protectable range.2626//2627// This means if the memory was allocated using shmget/shmat, protection wont work2628// but mprotect will still return 0:2629//2630// See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm26312632bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;26332634if (!rc) {2635const char* const s_errno = strerror(errno);2636warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);2637return false;2638}26392640// mprotect success check2641//2642// Mprotect said it changed the protection but can I believe it?2643//2644// To be sure I need to check the protection afterwards. Try to2645// read from protected memory and check whether that causes a segfault.2646//2647if (!os::Aix::xpg_sus_mode()) {26482649if (StubRoutines::SafeFetch32_stub()) {26502651const bool read_protected =2652(SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&2653SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;26542655if (prot & PROT_READ) {2656rc = !read_protected;2657} else {2658rc = read_protected;2659}2660}2661}2662if (!rc) {2663assert(false, "mprotect failed.");2664}2665return rc;2666}26672668// Set protections specified2669bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {2670unsigned int p = 0;2671switch (prot) {2672case MEM_PROT_NONE: p = PROT_NONE; break;2673case MEM_PROT_READ: p = PROT_READ; break;2674case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break;2675case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break;2676default:2677ShouldNotReachHere();2678}2679// is_committed is unused.2680return checked_mprotect(addr, size, p);2681}26822683bool os::guard_memory(char* addr, size_t size) {2684return checked_mprotect(addr, size, PROT_NONE);2685}26862687bool os::unguard_memory(char* addr, size_t size) {2688return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);2689}26902691// Large page support26922693static size_t _large_page_size = 0;26942695// Enable large page support if OS allows that.2696void os::large_page_init() {26972698// Note: os::Aix::query_multipage_support must run first.26992700if (!UseLargePages) {2701return;2702}27032704if (!Aix::can_use_64K_pages()) {2705assert(!Aix::can_use_16M_pages(), "64K is a precondition for 16M.");2706UseLargePages = false;2707return;2708}27092710if (!Aix::can_use_16M_pages() && Use16MPages) {2711fprintf(stderr, "Cannot use 16M pages. Please ensure that there is a 16M page pool "2712" and that the VM runs with CAP_BYPASS_RAC_VMM and CAP_PROPAGATE capabilities.\n");2713}27142715// Do not report 16M page alignment as part of os::_page_sizes if we are2716// explicitly forbidden from using 16M pages. Doing so would increase the2717// alignment the garbage collector calculates with, slightly increasing2718// heap usage. We should only pay for 16M alignment if we really want to2719// use 16M pages.2720if (Use16MPages && Aix::can_use_16M_pages()) {2721_large_page_size = SIZE_16M;2722_page_sizes[0] = SIZE_16M;2723_page_sizes[1] = SIZE_64K;2724_page_sizes[2] = SIZE_4K;2725_page_sizes[3] = 0;2726} else if (Aix::can_use_64K_pages()) {2727_large_page_size = SIZE_64K;2728_page_sizes[0] = SIZE_64K;2729_page_sizes[1] = SIZE_4K;2730_page_sizes[2] = 0;2731}27322733if (Verbose) {2734("Default large page size is 0x%llX.", _large_page_size);2735}2736} // end: os::large_page_init()27372738char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {2739// "exec" is passed in but not used. Creating the shared image for2740// the code cache doesn't have an SHM_X executable permission to check.2741Unimplemented();2742return 0;2743}27442745bool os::release_memory_special(char* base, size_t bytes) {2746// detaching the SHM segment will also delete it, see reserve_memory_special()2747Unimplemented();2748return false;2749}27502751size_t os::large_page_size() {2752return _large_page_size;2753}27542755bool os::can_commit_large_page_memory() {2756// Well, sadly we cannot commit anything at all (see comment in2757// os::commit_memory) but we claim to so we can make use of large pages2758return true;2759}27602761bool os::can_execute_large_page_memory() {2762// We can do that2763return true;2764}27652766// Reserve memory at an arbitrary address, only if that area is2767// available (and not reserved for something else).2768char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {27692770bool use_mmap = false;27712772// mmap: smaller graining, no large page support2773// shm: large graining (256M), large page support, limited number of shm segments2774//2775// Prefer mmap wherever we either do not need large page support or have OS limits27762777if (!UseLargePages || bytes < SIZE_16M) {2778use_mmap = true;2779}27802781char* addr = NULL;2782if (use_mmap) {2783addr = reserve_mmaped_memory(bytes, requested_addr);2784} else {2785// shmat: wish address is mandatory, and do not try 16M pages here.2786shmatted_memory_info_t info;2787const int flags = RESSHM_WISHADDR_OR_FAIL;2788if (reserve_shmatted_memory(bytes, requested_addr, flags, &info)) {2789addr = info.addr;2790}2791}27922793return addr;2794}27952796size_t os::read(int fd, void *buf, unsigned int nBytes) {2797return ::read(fd, buf, nBytes);2798}27992800size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {2801return ::pread(fd, buf, nBytes, offset);2802}28032804#define NANOSECS_PER_MILLISEC 100000028052806int os::sleep(Thread* thread, jlong millis, bool interruptible) {2807assert(thread == Thread::current(), "thread consistency check");28082809// Prevent nasty overflow in deadline calculation2810// by handling long sleeps similar to solaris or windows.2811const jlong limit = INT_MAX;2812int result;2813while (millis > limit) {2814if ((result = os::sleep(thread, limit, interruptible)) != OS_OK) {2815return result;2816}2817millis -= limit;2818}28192820ParkEvent * const slp = thread->_SleepEvent;2821slp->reset();2822OrderAccess::fence();28232824if (interruptible) {2825jlong prevtime = javaTimeNanos();28262827// Prevent precision loss and too long sleeps2828jlong deadline = prevtime + millis * NANOSECS_PER_MILLISEC;28292830for (;;) {2831if (os::is_interrupted(thread, true)) {2832return OS_INTRPT;2833}28342835jlong newtime = javaTimeNanos();28362837assert(newtime >= prevtime, "time moving backwards");2838// Doing prevtime and newtime in microseconds doesn't help precision,2839// and trying to round up to avoid lost milliseconds can result in a2840// too-short delay.2841millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;28422843if (millis <= 0) {2844return OS_OK;2845}28462847// Stop sleeping if we passed the deadline2848if (newtime >= deadline) {2849return OS_OK;2850}28512852prevtime = newtime;28532854{2855assert(thread->is_Java_thread(), "sanity check");2856JavaThread *jt = (JavaThread *) thread;2857ThreadBlockInVM tbivm(jt);2858OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);28592860jt->set_suspend_equivalent();28612862slp->park(millis);28632864// were we externally suspended while we were waiting?2865jt->check_and_wait_while_suspended();2866}2867}2868} else {2869OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);2870jlong prevtime = javaTimeNanos();28712872// Prevent precision loss and too long sleeps2873jlong deadline = prevtime + millis * NANOSECS_PER_MILLISEC;28742875for (;;) {2876// It'd be nice to avoid the back-to-back javaTimeNanos() calls on2877// the 1st iteration ...2878jlong newtime = javaTimeNanos();28792880if (newtime - prevtime < 0) {2881// time moving backwards, should only happen if no monotonic clock2882// not a guarantee() because JVM should not abort on kernel/glibc bugs2883// - HS14 Commented out as not implemented.2884// - TODO Maybe we should implement it?2885//assert(!Aix::supports_monotonic_clock(), "time moving backwards");2886} else {2887millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;2888}28892890if (millis <= 0) break;28912892if (newtime >= deadline) {2893break;2894}28952896prevtime = newtime;2897slp->park(millis);2898}2899return OS_OK;2900}2901}29022903void os::naked_short_sleep(jlong ms) {2904struct timespec req;29052906assert(ms < 1000, "Un-interruptable sleep, short time use only");2907req.tv_sec = 0;2908if (ms > 0) {2909req.tv_nsec = (ms % 1000) * 1000000;2910}2911else {2912req.tv_nsec = 1;2913}29142915nanosleep(&req, NULL);29162917return;2918}29192920// Sleep forever; naked call to OS-specific sleep; use with CAUTION2921void os::infinite_sleep() {2922while (true) { // sleep forever ...2923::sleep(100); // ... 100 seconds at a time2924}2925}29262927// Used to convert frequent JVM_Yield() to nops2928bool os::dont_yield() {2929return DontYieldALot;2930}29312932void os::yield() {2933sched_yield();2934}29352936os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }29372938void os::yield_all(int attempts) {2939// Yields to all threads, including threads with lower priorities2940// Threads on Linux are all with same priority. The Solaris style2941// os::yield_all() with nanosleep(1ms) is not necessary.2942sched_yield();2943}29442945// Called from the tight loops to possibly influence time-sharing heuristics2946void os::loop_breaker(int attempts) {2947os::yield_all(attempts);2948}29492950////////////////////////////////////////////////////////////////////////////////2951// thread priority support29522953// From AIX manpage to pthread_setschedparam2954// (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?2955// topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):2956//2957// "If schedpolicy is SCHED_OTHER, then sched_priority must be in the2958// range from 40 to 80, where 40 is the least favored priority and 802959// is the most favored."2960//2961// (Actually, I doubt this even has an impact on AIX, as we do kernel2962// scheduling there; however, this still leaves iSeries.)2963//2964// We use the same values for AIX and PASE.2965int os::java_to_os_priority[CriticalPriority + 1] = {296654, // 0 Entry should never be used2967296855, // 1 MinPriority296955, // 2297056, // 32971297256, // 4297357, // 5 NormPriority297457, // 62975297658, // 7297758, // 8297859, // 9 NearMaxPriority2979298060, // 10 MaxPriority2981298260 // 11 CriticalPriority2983};29842985OSReturn os::set_native_priority(Thread* thread, int newpri) {2986if (!UseThreadPriorities) return OS_OK;2987pthread_t thr = thread->osthread()->pthread_id();2988int policy = SCHED_OTHER;2989struct sched_param param;2990param.sched_priority = newpri;2991int ret = pthread_setschedparam(thr, policy, ¶m);29922993if (ret != 0) {2994trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",2995(int)thr, newpri, ret, strerror(ret));2996}2997return (ret == 0) ? OS_OK : OS_ERR;2998}29993000OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {3001if (!UseThreadPriorities) {3002*priority_ptr = java_to_os_priority[NormPriority];3003return OS_OK;3004}3005pthread_t thr = thread->osthread()->pthread_id();3006int policy = SCHED_OTHER;3007struct sched_param param;3008int ret = pthread_getschedparam(thr, &policy, ¶m);3009*priority_ptr = param.sched_priority;30103011return (ret == 0) ? OS_OK : OS_ERR;3012}30133014// Hint to the underlying OS that a task switch would not be good.3015// Void return because it's a hint and can fail.3016void os::hint_no_preempt() {}30173018////////////////////////////////////////////////////////////////////////////////3019// suspend/resume support30203021// the low-level signal-based suspend/resume support is a remnant from the3022// old VM-suspension that used to be for java-suspension, safepoints etc,3023// within hotspot. Now there is a single use-case for this:3024// - calling get_thread_pc() on the VMThread by the flat-profiler task3025// that runs in the watcher thread.3026// The remaining code is greatly simplified from the more general suspension3027// code that used to be used.3028//3029// The protocol is quite simple:3030// - suspend:3031// - sends a signal to the target thread3032// - polls the suspend state of the osthread using a yield loop3033// - target thread signal handler (SR_handler) sets suspend state3034// and blocks in sigsuspend until continued3035// - resume:3036// - sets target osthread state to continue3037// - sends signal to end the sigsuspend loop in the SR_handler3038//3039// Note that the SR_lock plays no role in this suspend/resume protocol.3040//30413042static void resume_clear_context(OSThread *osthread) {3043osthread->set_ucontext(NULL);3044osthread->set_siginfo(NULL);3045}30463047static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {3048osthread->set_ucontext(context);3049osthread->set_siginfo(siginfo);3050}30513052//3053// Handler function invoked when a thread's execution is suspended or3054// resumed. We have to be careful that only async-safe functions are3055// called here (Note: most pthread functions are not async safe and3056// should be avoided.)3057//3058// Note: sigwait() is a more natural fit than sigsuspend() from an3059// interface point of view, but sigwait() prevents the signal hander3060// from being run. libpthread would get very confused by not having3061// its signal handlers run and prevents sigwait()'s use with the3062// mutex granting granting signal.3063//3064// Currently only ever called on the VMThread and JavaThreads (PC sampling).3065//3066static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {3067// Save and restore errno to avoid confusing native code with EINTR3068// after sigsuspend.3069int old_errno = errno;30703071Thread* thread = Thread::current();3072OSThread* osthread = thread->osthread();3073assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");30743075os::SuspendResume::State current = osthread->sr.state();3076if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {3077suspend_save_context(osthread, siginfo, context);30783079// attempt to switch the state, we assume we had a SUSPEND_REQUEST3080os::SuspendResume::State state = osthread->sr.suspended();3081if (state == os::SuspendResume::SR_SUSPENDED) {3082sigset_t suspend_set; // signals for sigsuspend()30833084// get current set of blocked signals and unblock resume signal3085pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);3086sigdelset(&suspend_set, SR_signum);30873088// wait here until we are resumed3089while (1) {3090sigsuspend(&suspend_set);30913092os::SuspendResume::State result = osthread->sr.running();3093if (result == os::SuspendResume::SR_RUNNING) {3094break;3095}3096}30973098} else if (state == os::SuspendResume::SR_RUNNING) {3099// request was cancelled, continue3100} else {3101ShouldNotReachHere();3102}31033104resume_clear_context(osthread);3105} else if (current == os::SuspendResume::SR_RUNNING) {3106// request was cancelled, continue3107} else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {3108// ignore3109} else {3110ShouldNotReachHere();3111}31123113errno = old_errno;3114}31153116static int SR_initialize() {3117struct sigaction act;3118char *s;3119// Get signal number to use for suspend/resume3120if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {3121int sig = ::strtol(s, 0, 10);3122if (sig > 0 || sig < NSIG) {3123SR_signum = sig;3124}3125}31263127assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,3128"SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");31293130sigemptyset(&SR_sigset);3131sigaddset(&SR_sigset, SR_signum);31323133// Set up signal handler for suspend/resume.3134act.sa_flags = SA_RESTART|SA_SIGINFO;3135act.sa_handler = (void (*)(int)) SR_handler;31363137// SR_signum is blocked by default.3138// 4528190 - We also need to block pthread restart signal (32 on all3139// supported Linux platforms). Note that LinuxThreads need to block3140// this signal for all threads to work properly. So we don't have3141// to use hard-coded signal number when setting up the mask.3142pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);31433144if (sigaction(SR_signum, &act, 0) == -1) {3145return -1;3146}31473148// Save signal flag3149os::Aix::set_our_sigflags(SR_signum, act.sa_flags);3150return 0;3151}31523153static int SR_finalize() {3154return 0;3155}31563157static int sr_notify(OSThread* osthread) {3158int status = pthread_kill(osthread->pthread_id(), SR_signum);3159assert_status(status == 0, status, "pthread_kill");3160return status;3161}31623163// "Randomly" selected value for how long we want to spin3164// before bailing out on suspending a thread, also how often3165// we send a signal to a thread we want to resume3166static const int RANDOMLY_LARGE_INTEGER = 1000000;3167static const int RANDOMLY_LARGE_INTEGER2 = 100;31683169// returns true on success and false on error - really an error is fatal3170// but this seems the normal response to library errors3171static bool do_suspend(OSThread* osthread) {3172assert(osthread->sr.is_running(), "thread should be running");3173// mark as suspended and send signal31743175if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {3176// failed to switch, state wasn't running?3177ShouldNotReachHere();3178return false;3179}31803181if (sr_notify(osthread) != 0) {3182// try to cancel, switch to running31833184os::SuspendResume::State result = osthread->sr.cancel_suspend();3185if (result == os::SuspendResume::SR_RUNNING) {3186// cancelled3187return false;3188} else if (result == os::SuspendResume::SR_SUSPENDED) {3189// somehow managed to suspend3190return true;3191} else {3192ShouldNotReachHere();3193return false;3194}3195}31963197// managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED31983199for (int n = 0; !osthread->sr.is_suspended(); n++) {3200for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {3201os::yield_all(i);3202}32033204// timeout, try to cancel the request3205if (n >= RANDOMLY_LARGE_INTEGER) {3206os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();3207if (cancelled == os::SuspendResume::SR_RUNNING) {3208return false;3209} else if (cancelled == os::SuspendResume::SR_SUSPENDED) {3210return true;3211} else {3212ShouldNotReachHere();3213return false;3214}3215}3216}32173218guarantee(osthread->sr.is_suspended(), "Must be suspended");3219return true;3220}32213222static void do_resume(OSThread* osthread) {3223//assert(osthread->sr.is_suspended(), "thread should be suspended");32243225if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {3226// failed to switch to WAKEUP_REQUEST3227ShouldNotReachHere();3228return;3229}32303231while (!osthread->sr.is_running()) {3232if (sr_notify(osthread) == 0) {3233for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {3234for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {3235os::yield_all(i);3236}3237}3238} else {3239ShouldNotReachHere();3240}3241}32423243guarantee(osthread->sr.is_running(), "Must be running!");3244}32453246////////////////////////////////////////////////////////////////////////////////3247// interrupt support32483249void os::interrupt(Thread* thread) {3250assert(Thread::current() == thread || Threads_lock->owned_by_self(),3251"possibility of dangling Thread pointer");32523253OSThread* osthread = thread->osthread();32543255if (!osthread->interrupted()) {3256osthread->set_interrupted(true);3257// More than one thread can get here with the same value of osthread,3258// resulting in multiple notifications. We do, however, want the store3259// to interrupted() to be visible to other threads before we execute unpark().3260OrderAccess::fence();3261ParkEvent * const slp = thread->_SleepEvent;3262if (slp != NULL) slp->unpark();3263}32643265// For JSR166. Unpark even if interrupt status already was set3266if (thread->is_Java_thread())3267((JavaThread*)thread)->parker()->unpark();32683269ParkEvent * ev = thread->_ParkEvent;3270if (ev != NULL) ev->unpark();32713272}32733274bool os::is_interrupted(Thread* thread, bool clear_interrupted) {3275assert(Thread::current() == thread || Threads_lock->owned_by_self(),3276"possibility of dangling Thread pointer");32773278OSThread* osthread = thread->osthread();32793280bool interrupted = osthread->interrupted();32813282if (interrupted && clear_interrupted) {3283osthread->set_interrupted(false);3284// consider thread->_SleepEvent->reset() ... optional optimization3285}32863287return interrupted;3288}32893290///////////////////////////////////////////////////////////////////////////////////3291// signal handling (except suspend/resume)32923293// This routine may be used by user applications as a "hook" to catch signals.3294// The user-defined signal handler must pass unrecognized signals to this3295// routine, and if it returns true (non-zero), then the signal handler must3296// return immediately. If the flag "abort_if_unrecognized" is true, then this3297// routine will never retun false (zero), but instead will execute a VM panic3298// routine kill the process.3299//3300// If this routine returns false, it is OK to call it again. This allows3301// the user-defined signal handler to perform checks either before or after3302// the VM performs its own checks. Naturally, the user code would be making3303// a serious error if it tried to handle an exception (such as a null check3304// or breakpoint) that the VM was generating for its own correct operation.3305//3306// This routine may recognize any of the following kinds of signals:3307// SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.3308// It should be consulted by handlers for any of those signals.3309//3310// The caller of this routine must pass in the three arguments supplied3311// to the function referred to in the "sa_sigaction" (not the "sa_handler")3312// field of the structure passed to sigaction(). This routine assumes that3313// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.3314//3315// Note that the VM will print warnings if it detects conflicting signal3316// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".3317//3318extern "C" JNIEXPORT int3319JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);33203321// Set thread signal mask (for some reason on AIX sigthreadmask() seems3322// to be the thing to call; documentation is not terribly clear about whether3323// pthread_sigmask also works, and if it does, whether it does the same.3324bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {3325const int rc = ::pthread_sigmask(how, set, oset);3326// return value semantics differ slightly for error case:3327// pthread_sigmask returns error number, sigthreadmask -1 and sets global errno3328// (so, pthread_sigmask is more theadsafe for error handling)3329// But success is always 0.3330return rc == 0 ? true : false;3331}33323333// Function to unblock all signals which are, according3334// to POSIX, typical program error signals. If they happen while being blocked,3335// they typically will bring down the process immediately.3336bool unblock_program_error_signals() {3337sigset_t set;3338::sigemptyset(&set);3339::sigaddset(&set, SIGILL);3340::sigaddset(&set, SIGBUS);3341::sigaddset(&set, SIGFPE);3342::sigaddset(&set, SIGSEGV);3343return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);3344}33453346// Renamed from 'signalHandler' to avoid collision with other shared libs.3347void javaSignalHandler(int sig, siginfo_t* info, void* uc) {3348assert(info != NULL && uc != NULL, "it must be old kernel");33493350// Never leave program error signals blocked;3351// on all our platforms they would bring down the process immediately when3352// getting raised while being blocked.3353unblock_program_error_signals();33543355JVM_handle_aix_signal(sig, info, uc, true);3356}33573358// This boolean allows users to forward their own non-matching signals3359// to JVM_handle_aix_signal, harmlessly.3360bool os::Aix::signal_handlers_are_installed = false;33613362// For signal-chaining3363struct sigaction os::Aix::sigact[MAXSIGNUM];3364unsigned int os::Aix::sigs = 0;3365bool os::Aix::libjsig_is_loaded = false;3366typedef struct sigaction *(*get_signal_t)(int);3367get_signal_t os::Aix::get_signal_action = NULL;33683369struct sigaction* os::Aix::get_chained_signal_action(int sig) {3370struct sigaction *actp = NULL;33713372if (libjsig_is_loaded) {3373// Retrieve the old signal handler from libjsig3374actp = (*get_signal_action)(sig);3375}3376if (actp == NULL) {3377// Retrieve the preinstalled signal handler from jvm3378actp = get_preinstalled_handler(sig);3379}33803381return actp;3382}33833384static bool call_chained_handler(struct sigaction *actp, int sig,3385siginfo_t *siginfo, void *context) {3386// Call the old signal handler3387if (actp->sa_handler == SIG_DFL) {3388// It's more reasonable to let jvm treat it as an unexpected exception3389// instead of taking the default action.3390return false;3391} else if (actp->sa_handler != SIG_IGN) {3392if ((actp->sa_flags & SA_NODEFER) == 0) {3393// automaticlly block the signal3394sigaddset(&(actp->sa_mask), sig);3395}33963397sa_handler_t hand = NULL;3398sa_sigaction_t sa = NULL;3399bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;3400// retrieve the chained handler3401if (siginfo_flag_set) {3402sa = actp->sa_sigaction;3403} else {3404hand = actp->sa_handler;3405}34063407if ((actp->sa_flags & SA_RESETHAND) != 0) {3408actp->sa_handler = SIG_DFL;3409}34103411// try to honor the signal mask3412sigset_t oset;3413pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);34143415// call into the chained handler3416if (siginfo_flag_set) {3417(*sa)(sig, siginfo, context);3418} else {3419(*hand)(sig);3420}34213422// restore the signal mask3423pthread_sigmask(SIG_SETMASK, &oset, 0);3424}3425// Tell jvm's signal handler the signal is taken care of.3426return true;3427}34283429bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {3430bool chained = false;3431// signal-chaining3432if (UseSignalChaining) {3433struct sigaction *actp = get_chained_signal_action(sig);3434if (actp != NULL) {3435chained = call_chained_handler(actp, sig, siginfo, context);3436}3437}3438return chained;3439}34403441struct sigaction* os::Aix::get_preinstalled_handler(int sig) {3442if ((((unsigned int)1 << sig) & sigs) != 0) {3443return &sigact[sig];3444}3445return NULL;3446}34473448void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {3449assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");3450sigact[sig] = oldAct;3451sigs |= (unsigned int)1 << sig;3452}34533454// for diagnostic3455int os::Aix::sigflags[MAXSIGNUM];34563457int os::Aix::get_our_sigflags(int sig) {3458assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");3459return sigflags[sig];3460}34613462void os::Aix::set_our_sigflags(int sig, int flags) {3463assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");3464sigflags[sig] = flags;3465}34663467void os::Aix::set_signal_handler(int sig, bool set_installed) {3468// Check for overwrite.3469struct sigaction oldAct;3470sigaction(sig, (struct sigaction*)NULL, &oldAct);34713472void* oldhand = oldAct.sa_sigaction3473? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)3474: CAST_FROM_FN_PTR(void*, oldAct.sa_handler);3475// Renamed 'signalHandler' to avoid collision with other shared libs.3476if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&3477oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&3478oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {3479if (AllowUserSignalHandlers || !set_installed) {3480// Do not overwrite; user takes responsibility to forward to us.3481return;3482} else if (UseSignalChaining) {3483// save the old handler in jvm3484save_preinstalled_handler(sig, oldAct);3485// libjsig also interposes the sigaction() call below and saves the3486// old sigaction on it own.3487} else {3488fatal(err_msg("Encountered unexpected pre-existing sigaction handler "3489"%#lx for signal %d.", (long)oldhand, sig));3490}3491}34923493struct sigaction sigAct;3494sigfillset(&(sigAct.sa_mask));3495if (!set_installed) {3496sigAct.sa_handler = SIG_DFL;3497sigAct.sa_flags = SA_RESTART;3498} else {3499// Renamed 'signalHandler' to avoid collision with other shared libs.3500sigAct.sa_sigaction = javaSignalHandler;3501sigAct.sa_flags = SA_SIGINFO|SA_RESTART;3502}3503// Save flags, which are set by ours3504assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");3505sigflags[sig] = sigAct.sa_flags;35063507int ret = sigaction(sig, &sigAct, &oldAct);3508assert(ret == 0, "check");35093510void* oldhand2 = oldAct.sa_sigaction3511? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)3512: CAST_FROM_FN_PTR(void*, oldAct.sa_handler);3513assert(oldhand2 == oldhand, "no concurrent signal handler installation");3514}35153516// install signal handlers for signals that HotSpot needs to3517// handle in order to support Java-level exception handling.3518void os::Aix::install_signal_handlers() {3519if (!signal_handlers_are_installed) {3520signal_handlers_are_installed = true;35213522// signal-chaining3523typedef void (*signal_setting_t)();3524signal_setting_t begin_signal_setting = NULL;3525signal_setting_t end_signal_setting = NULL;3526begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,3527dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));3528if (begin_signal_setting != NULL) {3529end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,3530dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));3531get_signal_action = CAST_TO_FN_PTR(get_signal_t,3532dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));3533libjsig_is_loaded = true;3534assert(UseSignalChaining, "should enable signal-chaining");3535}3536if (libjsig_is_loaded) {3537// Tell libjsig jvm is setting signal handlers3538(*begin_signal_setting)();3539}35403541set_signal_handler(SIGSEGV, true);3542set_signal_handler(SIGPIPE, true);3543set_signal_handler(SIGBUS, true);3544set_signal_handler(SIGILL, true);3545set_signal_handler(SIGFPE, true);3546set_signal_handler(SIGTRAP, true);3547set_signal_handler(SIGXFSZ, true);3548set_signal_handler(SIGDANGER, true);35493550if (libjsig_is_loaded) {3551// Tell libjsig jvm finishes setting signal handlers.3552(*end_signal_setting)();3553}35543555// We don't activate signal checker if libjsig is in place, we trust ourselves3556// and if UserSignalHandler is installed all bets are off.3557// Log that signal checking is off only if -verbose:jni is specified.3558if (CheckJNICalls) {3559if (libjsig_is_loaded) {3560tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");3561check_signals = false;3562}3563if (AllowUserSignalHandlers) {3564tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");3565check_signals = false;3566}3567// Need to initialize check_signal_done.3568::sigemptyset(&check_signal_done);3569}3570}3571}35723573static const char* get_signal_handler_name(address handler,3574char* buf, int buflen) {3575int offset;3576bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);3577if (found) {3578// skip directory names3579const char *p1, *p2;3580p1 = buf;3581size_t len = strlen(os::file_separator());3582while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;3583// The way os::dll_address_to_library_name is implemented on Aix3584// right now, it always returns -1 for the offset which is not3585// terribly informative.3586// Will fix that. For now, omit the offset.3587jio_snprintf(buf, buflen, "%s", p1);3588} else {3589jio_snprintf(buf, buflen, PTR_FORMAT, handler);3590}3591return buf;3592}35933594static void print_signal_handler(outputStream* st, int sig,3595char* buf, size_t buflen) {3596struct sigaction sa;3597sigaction(sig, NULL, &sa);35983599st->print("%s: ", os::exception_name(sig, buf, buflen));36003601address handler = (sa.sa_flags & SA_SIGINFO)3602? CAST_FROM_FN_PTR(address, sa.sa_sigaction)3603: CAST_FROM_FN_PTR(address, sa.sa_handler);36043605if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {3606st->print("SIG_DFL");3607} else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {3608st->print("SIG_IGN");3609} else {3610st->print("[%s]", get_signal_handler_name(handler, buf, buflen));3611}36123613// Print readable mask.3614st->print(", sa_mask[0]=");3615os::Posix::print_signal_set_short(st, &sa.sa_mask);36163617address rh = VMError::get_resetted_sighandler(sig);3618// May be, handler was resetted by VMError?3619if (rh != NULL) {3620handler = rh;3621sa.sa_flags = VMError::get_resetted_sigflags(sig);3622}36233624// Print textual representation of sa_flags.3625st->print(", sa_flags=");3626os::Posix::print_sa_flags(st, sa.sa_flags);36273628// Check: is it our handler?3629if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||3630handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {3631// It is our signal handler.3632// Check for flags, reset system-used one!3633if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {3634st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",3635os::Aix::get_our_sigflags(sig));3636}3637}3638st->cr();3639}36403641#define DO_SIGNAL_CHECK(sig) \3642if (!sigismember(&check_signal_done, sig)) \3643os::Aix::check_signal_handler(sig)36443645// This method is a periodic task to check for misbehaving JNI applications3646// under CheckJNI, we can add any periodic checks here36473648void os::run_periodic_checks() {36493650if (check_signals == false) return;36513652// SEGV and BUS if overridden could potentially prevent3653// generation of hs*.log in the event of a crash, debugging3654// such a case can be very challenging, so we absolutely3655// check the following for a good measure:3656DO_SIGNAL_CHECK(SIGSEGV);3657DO_SIGNAL_CHECK(SIGILL);3658DO_SIGNAL_CHECK(SIGFPE);3659DO_SIGNAL_CHECK(SIGBUS);3660DO_SIGNAL_CHECK(SIGPIPE);3661DO_SIGNAL_CHECK(SIGXFSZ);3662if (UseSIGTRAP) {3663DO_SIGNAL_CHECK(SIGTRAP);3664}3665DO_SIGNAL_CHECK(SIGDANGER);36663667// ReduceSignalUsage allows the user to override these handlers3668// see comments at the very top and jvm_solaris.h3669if (!ReduceSignalUsage) {3670DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);3671DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);3672DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);3673DO_SIGNAL_CHECK(BREAK_SIGNAL);3674}36753676DO_SIGNAL_CHECK(SR_signum);3677DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);3678}36793680typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);36813682static os_sigaction_t os_sigaction = NULL;36833684void os::Aix::check_signal_handler(int sig) {3685char buf[O_BUFLEN];3686address jvmHandler = NULL;36873688struct sigaction act;3689if (os_sigaction == NULL) {3690// only trust the default sigaction, in case it has been interposed3691os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");3692if (os_sigaction == NULL) return;3693}36943695os_sigaction(sig, (struct sigaction*)NULL, &act);36963697address thisHandler = (act.sa_flags & SA_SIGINFO)3698? CAST_FROM_FN_PTR(address, act.sa_sigaction)3699: CAST_FROM_FN_PTR(address, act.sa_handler);37003701switch(sig) {3702case SIGSEGV:3703case SIGBUS:3704case SIGFPE:3705case SIGPIPE:3706case SIGILL:3707case SIGXFSZ:3708// Renamed 'signalHandler' to avoid collision with other shared libs.3709jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);3710break;37113712case SHUTDOWN1_SIGNAL:3713case SHUTDOWN2_SIGNAL:3714case SHUTDOWN3_SIGNAL:3715case BREAK_SIGNAL:3716jvmHandler = (address)user_handler();3717break;37183719case INTERRUPT_SIGNAL:3720jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);3721break;37223723default:3724if (sig == SR_signum) {3725jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);3726} else {3727return;3728}3729break;3730}37313732if (thisHandler != jvmHandler) {3733tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));3734tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));3735tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));3736// No need to check this sig any longer3737sigaddset(&check_signal_done, sig);3738// Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN3739if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {3740tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",3741exception_name(sig, buf, O_BUFLEN));3742}3743} else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {3744tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));3745tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));3746tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags);3747// No need to check this sig any longer3748sigaddset(&check_signal_done, sig);3749}37503751// Dump all the signal3752if (sigismember(&check_signal_done, sig)) {3753print_signal_handlers(tty, buf, O_BUFLEN);3754}3755}37563757extern bool signal_name(int signo, char* buf, size_t len);37583759const char* os::exception_name(int exception_code, char* buf, size_t size) {3760if (0 < exception_code && exception_code <= SIGRTMAX) {3761// signal3762if (!signal_name(exception_code, buf, size)) {3763jio_snprintf(buf, size, "SIG%d", exception_code);3764}3765return buf;3766} else {3767return NULL;3768}3769}37703771// To install functions for atexit system call3772extern "C" {3773static void perfMemory_exit_helper() {3774perfMemory_exit();3775}3776}37773778// This is called _before_ the most of global arguments have been parsed.3779void os::init(void) {3780// This is basic, we want to know if that ever changes.3781// (shared memory boundary is supposed to be a 256M aligned)3782assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");37833784// First off, we need to know whether we run on AIX or PASE, and3785// the OS level we run on.3786os::Aix::initialize_os_info();37873788// Scan environment (SPEC1170 behaviour, etc)3789os::Aix::scan_environment();37903791// Check which pages are supported by AIX.3792os::Aix::query_multipage_support();37933794// Next, we need to initialize libo4 and libperfstat libraries.3795if (os::Aix::on_pase()) {3796os::Aix::initialize_libo4();3797} else {3798os::Aix::initialize_libperfstat();3799}38003801// Reset the perfstat information provided by ODM.3802if (os::Aix::on_aix()) {3803libperfstat::perfstat_reset();3804}38053806// Now initialze basic system properties. Note that for some of the values we3807// need libperfstat etc.3808os::Aix::initialize_system_info();38093810// Initialize large page support.3811if (UseLargePages) {3812os::large_page_init();3813if (!UseLargePages) {3814// initialize os::_page_sizes3815_page_sizes[0] = Aix::page_size();3816_page_sizes[1] = 0;3817if (Verbose) {3818fprintf(stderr, "Large Page initialization failed: setting UseLargePages=0.\n");3819}3820}3821} else {3822// initialize os::_page_sizes3823_page_sizes[0] = Aix::page_size();3824_page_sizes[1] = 0;3825}38263827// debug trace3828if (Verbose) {3829fprintf(stderr, "os::vm_page_size 0x%llX\n", os::vm_page_size());3830fprintf(stderr, "os::large_page_size 0x%llX\n", os::large_page_size());3831fprintf(stderr, "os::_page_sizes = ( ");3832for (int i = 0; _page_sizes[i]; i ++) {3833fprintf(stderr, " %s ", describe_pagesize(_page_sizes[i]));3834}3835fprintf(stderr, ")\n");3836}38373838_initial_pid = getpid();38393840clock_tics_per_sec = sysconf(_SC_CLK_TCK);38413842init_random(1234567);38433844ThreadCritical::initialize();38453846// _main_thread points to the thread that created/loaded the JVM.3847Aix::_main_thread = pthread_self();38483849initial_time_count = os::elapsed_counter();3850pthread_mutex_init(&dl_mutex, NULL);3851}38523853// This is called _after_ the global arguments have been parsed.3854jint os::init_2(void) {38553856trcVerbose("processor count: %d", os::_processor_count);3857trcVerbose("physical memory: %lu", Aix::_physical_memory);38583859// Initially build up the loaded dll map.3860LoadedLibraries::reload();38613862const int page_size = Aix::page_size();3863const int map_size = page_size;38643865address map_address = (address) MAP_FAILED;3866const int prot = PROT_READ;3867const int flags = MAP_PRIVATE|MAP_ANONYMOUS;38683869// use optimized addresses for the polling page,3870// e.g. map it to a special 32-bit address.3871if (OptimizePollingPageLocation) {3872// architecture-specific list of address wishes:3873address address_wishes[] = {3874// AIX: addresses lower than 0x30000000 don't seem to work on AIX.3875// PPC64: all address wishes are non-negative 32 bit values where3876// the lower 16 bits are all zero. we can load these addresses3877// with a single ppc_lis instruction.3878(address) 0x30000000, (address) 0x31000000,3879(address) 0x32000000, (address) 0x33000000,3880(address) 0x40000000, (address) 0x41000000,3881(address) 0x42000000, (address) 0x43000000,3882(address) 0x50000000, (address) 0x51000000,3883(address) 0x52000000, (address) 0x53000000,3884(address) 0x60000000, (address) 0x61000000,3885(address) 0x62000000, (address) 0x630000003886};3887int address_wishes_length = sizeof(address_wishes)/sizeof(address);38883889// iterate over the list of address wishes:3890for (int i=0; i<address_wishes_length; i++) {3891// try to map with current address wish.3892// AIX: AIX needs MAP_FIXED if we provide an address and mmap will3893// fail if the address is already mapped.3894map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,3895map_size, prot,3896flags | MAP_FIXED,3897-1, 0);3898if (Verbose) {3899fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",3900address_wishes[i], map_address + (ssize_t)page_size);3901}39023903if (map_address + (ssize_t)page_size == address_wishes[i]) {3904// map succeeded and map_address is at wished address, exit loop.3905break;3906}39073908if (map_address != (address) MAP_FAILED) {3909// Map succeeded, but polling_page is not at wished address, unmap and continue.3910::munmap(map_address, map_size);3911map_address = (address) MAP_FAILED;3912}3913// map failed, continue loop.3914}3915} // end OptimizePollingPageLocation39163917if (map_address == (address) MAP_FAILED) {3918map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);3919}3920guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");3921os::set_polling_page(map_address);39223923if (!UseMembar) {3924address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);3925guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");3926os::set_memory_serialize_page(mem_serialize_page);39273928#ifndef PRODUCT3929if (Verbose && PrintMiscellaneous)3930tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);3931#endif3932}39333934// initialize suspend/resume support - must do this before signal_sets_init()3935if (SR_initialize() != 0) {3936perror("SR_initialize failed");3937return JNI_ERR;3938}39393940Aix::signal_sets_init();3941Aix::install_signal_handlers();39423943// Check minimum allowable stack size for thread creation and to initialize3944// the java system classes, including StackOverflowError - depends on page3945// size. Add a page for compiler2 recursion in main thread.3946// Add in 2*BytesPerWord times page size to account for VM stack during3947// class initialization depending on 32 or 64 bit VM.3948os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,3949(size_t)(StackYellowPages+StackRedPages+StackShadowPages +39502*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::page_size());39513952size_t threadStackSizeInBytes = ThreadStackSize * K;3953if (threadStackSizeInBytes != 0 &&3954threadStackSizeInBytes < os::Aix::min_stack_allowed) {3955tty->print_cr("\nThe stack size specified is too small, "3956"Specify at least %dk",3957os::Aix::min_stack_allowed / K);3958return JNI_ERR;3959}39603961// Make the stack size a multiple of the page size so that3962// the yellow/red zones can be guarded.3963// Note that this can be 0, if no default stacksize was set.3964JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));39653966Aix::libpthread_init();39673968if (MaxFDLimit) {3969// set the number of file descriptors to max. print out error3970// if getrlimit/setrlimit fails but continue regardless.3971struct rlimit nbr_files;3972int status = getrlimit(RLIMIT_NOFILE, &nbr_files);3973if (status != 0) {3974if (PrintMiscellaneous && (Verbose || WizardMode))3975perror("os::init_2 getrlimit failed");3976} else {3977nbr_files.rlim_cur = nbr_files.rlim_max;3978status = setrlimit(RLIMIT_NOFILE, &nbr_files);3979if (status != 0) {3980if (PrintMiscellaneous && (Verbose || WizardMode))3981perror("os::init_2 setrlimit failed");3982}3983}3984}39853986if (PerfAllowAtExitRegistration) {3987// only register atexit functions if PerfAllowAtExitRegistration is set.3988// atexit functions can be delayed until process exit time, which3989// can be problematic for embedded VM situations. Embedded VMs should3990// call DestroyJavaVM() to assure that VM resources are released.39913992// note: perfMemory_exit_helper atexit function may be removed in3993// the future if the appropriate cleanup code can be added to the3994// VM_Exit VMOperation's doit method.3995if (atexit(perfMemory_exit_helper) != 0) {3996warning("os::init_2 atexit(perfMemory_exit_helper) failed");3997}3998}39994000return JNI_OK;4001}40024003// Mark the polling page as unreadable4004void os::make_polling_page_unreadable(void) {4005if (!guard_memory((char*)_polling_page, Aix::page_size())) {4006fatal("Could not disable polling page");4007}4008};40094010// Mark the polling page as readable4011void os::make_polling_page_readable(void) {4012// Changed according to os_linux.cpp.4013if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {4014fatal(err_msg("Could not enable polling page at " PTR_FORMAT, _polling_page));4015}4016};40174018int os::active_processor_count() {4019// User has overridden the number of active processors4020if (ActiveProcessorCount > 0) {4021if (PrintActiveCpus) {4022tty->print_cr("active_processor_count: "4023"active processor count set by user : %d",4024ActiveProcessorCount);4025}4026return ActiveProcessorCount;4027}40284029int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);4030assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");4031return online_cpus;4032}40334034void os::set_native_thread_name(const char *name) {4035// Not yet implemented.4036return;4037}40384039bool os::distribute_processes(uint length, uint* distribution) {4040// Not yet implemented.4041return false;4042}40434044bool os::bind_to_processor(uint processor_id) {4045// Not yet implemented.4046return false;4047}40484049void os::SuspendedThreadTask::internal_do_task() {4050if (do_suspend(_thread->osthread())) {4051SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());4052do_task(context);4053do_resume(_thread->osthread());4054}4055}40564057class PcFetcher : public os::SuspendedThreadTask {4058public:4059PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}4060ExtendedPC result();4061protected:4062void do_task(const os::SuspendedThreadTaskContext& context);4063private:4064ExtendedPC _epc;4065};40664067ExtendedPC PcFetcher::result() {4068guarantee(is_done(), "task is not done yet.");4069return _epc;4070}40714072void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {4073Thread* thread = context.thread();4074OSThread* osthread = thread->osthread();4075if (osthread->ucontext() != NULL) {4076_epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());4077} else {4078// NULL context is unexpected, double-check this is the VMThread.4079guarantee(thread->is_VM_thread(), "can only be called for VMThread");4080}4081}40824083// Suspends the target using the signal mechanism and then grabs the PC before4084// resuming the target. Used by the flat-profiler only4085ExtendedPC os::get_thread_pc(Thread* thread) {4086// Make sure that it is called by the watcher for the VMThread.4087assert(Thread::current()->is_Watcher_thread(), "Must be watcher");4088assert(thread->is_VM_thread(), "Can only be called for VMThread");40894090PcFetcher fetcher(thread);4091fetcher.run();4092return fetcher.result();4093}40944095// Not neede on Aix.4096// int os::Aix::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) {4097// }40984099////////////////////////////////////////////////////////////////////////////////4100// debug support41014102static address same_page(address x, address y) {4103intptr_t page_bits = -os::vm_page_size();4104if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))4105return x;4106else if (x > y)4107return (address)(intptr_t(y) | ~page_bits) + 1;4108else4109return (address)(intptr_t(y) & page_bits);4110}41114112bool os::find(address addr, outputStream* st) {41134114st->print(PTR_FORMAT ": ", addr);41154116const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);4117if (lib) {4118lib->print(st);4119return true;4120} else {4121lib = LoadedLibraries::find_for_data_address(addr);4122if (lib) {4123lib->print(st);4124return true;4125} else {4126st->print_cr("(outside any module)");4127}4128}41294130return false;4131}41324133////////////////////////////////////////////////////////////////////////////////4134// misc41354136// This does not do anything on Aix. This is basically a hook for being4137// able to use structured exception handling (thread-local exception filters)4138// on, e.g., Win32.4139void4140os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,4141JavaCallArguments* args, Thread* thread) {4142f(value, method, args, thread);4143}41444145void os::print_statistics() {4146}41474148int os::message_box(const char* title, const char* message) {4149int i;4150fdStream err(defaultStream::error_fd());4151for (i = 0; i < 78; i++) err.print_raw("=");4152err.cr();4153err.print_raw_cr(title);4154for (i = 0; i < 78; i++) err.print_raw("-");4155err.cr();4156err.print_raw_cr(message);4157for (i = 0; i < 78; i++) err.print_raw("=");4158err.cr();41594160char buf[16];4161// Prevent process from exiting upon "read error" without consuming all CPU4162while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }41634164return buf[0] == 'y' || buf[0] == 'Y';4165}41664167int os::stat(const char *path, struct stat *sbuf) {4168char pathbuf[MAX_PATH];4169if (strlen(path) > MAX_PATH - 1) {4170errno = ENAMETOOLONG;4171return -1;4172}4173os::native_path(strcpy(pathbuf, path));4174return ::stat(pathbuf, sbuf);4175}41764177bool os::check_heap(bool force) {4178return true;4179}41804181// int local_vsnprintf(char* buf, size_t count, const char* format, va_list args) {4182// return ::vsnprintf(buf, count, format, args);4183// }41844185// Is a (classpath) directory empty?4186bool os::dir_is_empty(const char* path) {4187DIR *dir = NULL;4188struct dirent *ptr;41894190dir = opendir(path);4191if (dir == NULL) return true;41924193/* Scan the directory */4194bool result = true;4195while (result && (ptr = readdir(dir)) != NULL) {4196if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {4197result = false;4198}4199}4200closedir(dir);4201return result;4202}42034204// This code originates from JDK's sysOpen and open64_w4205// from src/solaris/hpi/src/system_md.c42064207#ifndef O_DELETE4208#define O_DELETE 0x100004209#endif42104211// Open a file. Unlink the file immediately after open returns4212// if the specified oflag has the O_DELETE flag set.4213// O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c42144215int os::open(const char *path, int oflag, int mode) {42164217if (strlen(path) > MAX_PATH - 1) {4218errno = ENAMETOOLONG;4219return -1;4220}4221int fd;4222int o_delete = (oflag & O_DELETE);4223oflag = oflag & ~O_DELETE;42244225fd = ::open64(path, oflag, mode);4226if (fd == -1) return -1;42274228// If the open succeeded, the file might still be a directory.4229{4230struct stat64 buf64;4231int ret = ::fstat64(fd, &buf64);4232int st_mode = buf64.st_mode;42334234if (ret != -1) {4235if ((st_mode & S_IFMT) == S_IFDIR) {4236errno = EISDIR;4237::close(fd);4238return -1;4239}4240} else {4241::close(fd);4242return -1;4243}4244}42454246// All file descriptors that are opened in the JVM and not4247// specifically destined for a subprocess should have the4248// close-on-exec flag set. If we don't set it, then careless 3rd4249// party native code might fork and exec without closing all4250// appropriate file descriptors (e.g. as we do in closeDescriptors in4251// UNIXProcess.c), and this in turn might:4252//4253// - cause end-of-file to fail to be detected on some file4254// descriptors, resulting in mysterious hangs, or4255//4256// - might cause an fopen in the subprocess to fail on a system4257// suffering from bug 1085341.4258//4259// (Yes, the default setting of the close-on-exec flag is a Unix4260// design flaw.)4261//4262// See:4263// 1085341: 32-bit stdio routines should support file descriptors >2554264// 4843136: (process) pipe file descriptor from Runtime.exec not being closed4265// 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 94266#ifdef FD_CLOEXEC4267{4268int flags = ::fcntl(fd, F_GETFD);4269if (flags != -1)4270::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);4271}4272#endif42734274if (o_delete != 0) {4275::unlink(path);4276}4277return fd;4278}42794280// create binary file, rewriting existing file if required4281int os::create_binary_file(const char* path, bool rewrite_existing) {4282int oflags = O_WRONLY | O_CREAT;4283if (!rewrite_existing) {4284oflags |= O_EXCL;4285}4286return ::open64(path, oflags, S_IREAD | S_IWRITE);4287}42884289// return current position of file pointer4290jlong os::current_file_offset(int fd) {4291return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);4292}42934294// move file pointer to the specified offset4295jlong os::seek_to_file_offset(int fd, jlong offset) {4296return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);4297}42984299// This code originates from JDK's sysAvailable4300// from src/solaris/hpi/src/native_threads/src/sys_api_td.c43014302int os::available(int fd, jlong *bytes) {4303jlong cur, end;4304int mode;4305struct stat64 buf64;43064307if (::fstat64(fd, &buf64) >= 0) {4308mode = buf64.st_mode;4309if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {4310// XXX: is the following call interruptible? If so, this might4311// need to go through the INTERRUPT_IO() wrapper as for other4312// blocking, interruptible calls in this file.4313int n;4314if (::ioctl(fd, FIONREAD, &n) >= 0) {4315*bytes = n;4316return 1;4317}4318}4319}4320if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {4321return 0;4322} else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {4323return 0;4324} else if (::lseek64(fd, cur, SEEK_SET) == -1) {4325return 0;4326}4327*bytes = end - cur;4328return 1;4329}43304331int os::socket_available(int fd, jint *pbytes) {4332// Linux doc says EINTR not returned, unlike Solaris4333int ret = ::ioctl(fd, FIONREAD, pbytes);43344335//%% note ioctl can return 0 when successful, JVM_SocketAvailable4336// is expected to return 0 on failure and 1 on success to the jdk.4337return (ret < 0) ? 0 : 1;4338}43394340// Map a block of memory.4341char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,4342char *addr, size_t bytes, bool read_only,4343bool allow_exec) {4344Unimplemented();4345return NULL;4346}43474348// Remap a block of memory.4349char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,4350char *addr, size_t bytes, bool read_only,4351bool allow_exec) {4352// same as map_memory() on this OS4353return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,4354allow_exec);4355}43564357// Unmap a block of memory.4358bool os::pd_unmap_memory(char* addr, size_t bytes) {4359return munmap(addr, bytes) == 0;4360}43614362// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)4363// are used by JVM M&M and JVMTI to get user+sys or user CPU time4364// of a thread.4365//4366// current_thread_cpu_time() and thread_cpu_time(Thread*) returns4367// the fast estimate available on the platform.43684369jlong os::current_thread_cpu_time() {4370// return user + sys since the cost is the same4371const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);4372assert(n >= 0, "negative CPU time");4373return n;4374}43754376jlong os::thread_cpu_time(Thread* thread) {4377// consistent with what current_thread_cpu_time() returns4378const jlong n = os::thread_cpu_time(thread, true /* user + sys */);4379assert(n >= 0, "negative CPU time");4380return n;4381}43824383jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {4384const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);4385assert(n >= 0, "negative CPU time");4386return n;4387}43884389static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {4390bool error = false;43914392jlong sys_time = 0;4393jlong user_time = 0;43944395// Reimplemented using getthrds64().4396//4397// Works like this:4398// For the thread in question, get the kernel thread id. Then get the4399// kernel thread statistics using that id.4400//4401// This only works of course when no pthread scheduling is used,4402// i.e. there is a 1:1 relationship to kernel threads.4403// On AIX, see AIXTHREAD_SCOPE variable.44044405pthread_t pthtid = thread->osthread()->pthread_id();44064407// retrieve kernel thread id for the pthread:4408tid64_t tid = 0;4409struct __pthrdsinfo pinfo;4410// I just love those otherworldly IBM APIs which force me to hand down4411// dummy buffers for stuff I dont care for...4412char dummy[1];4413int dummy_size = sizeof(dummy);4414if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),4415dummy, &dummy_size) == 0) {4416tid = pinfo.__pi_tid;4417} else {4418tty->print_cr("pthread_getthrds_np failed.");4419error = true;4420}44214422// retrieve kernel timing info for that kernel thread4423if (!error) {4424struct thrdentry64 thrdentry;4425if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {4426sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;4427user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;4428} else {4429tty->print_cr("pthread_getthrds_np failed.");4430error = true;4431}4432}44334434if (p_sys_time) {4435*p_sys_time = sys_time;4436}44374438if (p_user_time) {4439*p_user_time = user_time;4440}44414442if (error) {4443return false;4444}44454446return true;4447}44484449jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {4450jlong sys_time;4451jlong user_time;44524453if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {4454return -1;4455}44564457return user_sys_cpu_time ? sys_time + user_time : user_time;4458}44594460void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {4461info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits4462info_ptr->may_skip_backward = false; // elapsed time not wall time4463info_ptr->may_skip_forward = false; // elapsed time not wall time4464info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned4465}44664467void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {4468info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits4469info_ptr->may_skip_backward = false; // elapsed time not wall time4470info_ptr->may_skip_forward = false; // elapsed time not wall time4471info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned4472}44734474bool os::is_thread_cpu_time_supported() {4475return true;4476}44774478// System loadavg support. Returns -1 if load average cannot be obtained.4479// For now just return the system wide load average (no processor sets).4480int os::loadavg(double values[], int nelem) {44814482// Implemented using libperfstat on AIX.44834484guarantee(nelem >= 0 && nelem <= 3, "argument error");4485guarantee(values, "argument error");44864487if (os::Aix::on_pase()) {4488Unimplemented();4489return -1;4490} else {4491// AIX: use libperfstat4492//4493// See also:4494// http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm4495// /usr/include/libperfstat.h:44964497// Use the already AIX version independent get_cpuinfo.4498os::Aix::cpuinfo_t ci;4499if (os::Aix::get_cpuinfo(&ci)) {4500for (int i = 0; i < nelem; i++) {4501values[i] = ci.loadavg[i];4502}4503} else {4504return -1;4505}4506return nelem;4507}4508}45094510void os::pause() {4511char filename[MAX_PATH];4512if (PauseAtStartupFile && PauseAtStartupFile[0]) {4513jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);4514} else {4515jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());4516}45174518int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);4519if (fd != -1) {4520struct stat buf;4521::close(fd);4522while (::stat(filename, &buf) == 0) {4523(void)::poll(NULL, 0, 100);4524}4525} else {4526jio_fprintf(stderr,4527"Could not open pause file '%s', continuing immediately.\n", filename);4528}4529}45304531bool os::is_primordial_thread(void) {4532if (pthread_self() == (pthread_t)1) {4533return true;4534} else {4535return false;4536}4537}45384539// OS recognitions (PASE/AIX, OS level) call this before calling any4540// one of Aix::on_pase(), Aix::os_version() static4541void os::Aix::initialize_os_info() {45424543assert(_on_pase == -1 && _os_version == -1, "already called.");45444545struct utsname uts;4546memset(&uts, 0, sizeof(uts));4547strcpy(uts.sysname, "?");4548if (::uname(&uts) == -1) {4549trc("uname failed (%d)", errno);4550guarantee(0, "Could not determine whether we run on AIX or PASE");4551} else {4552trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "4553"node \"%s\" machine \"%s\"\n",4554uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);4555const int major = atoi(uts.version);4556assert(major > 0, "invalid OS version");4557const int minor = atoi(uts.release);4558assert(minor > 0, "invalid OS release");4559_os_version = (major << 8) | minor;4560if (strcmp(uts.sysname, "OS400") == 0) {4561Unimplemented();4562} else if (strcmp(uts.sysname, "AIX") == 0) {4563// We run on AIX. We do not support versions older than AIX 5.3.4564_on_pase = 0;4565if (_os_version < 0x0503) {4566trc("AIX release older than AIX 5.3 not supported.");4567assert(false, "AIX release too old.");4568} else {4569trcVerbose("We run on AIX %d.%d\n", major, minor);4570}4571} else {4572assert(false, "unknown OS");4573}4574}45754576guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");4577} // end: os::Aix::initialize_os_info()45784579// Scan environment for important settings which might effect the VM.4580// Trace out settings. Warn about invalid settings and/or correct them.4581//4582// Must run after os::Aix::initialue_os_info().4583void os::Aix::scan_environment() {45844585char* p;4586int rc;45874588// Warn explicity if EXTSHM=ON is used. That switch changes how4589// System V shared memory behaves. One effect is that page size of4590// shared memory cannot be change dynamically, effectivly preventing4591// large pages from working.4592// This switch was needed on AIX 32bit, but on AIX 64bit the general4593// recommendation is (in OSS notes) to switch it off.4594p = ::getenv("EXTSHM");4595if (Verbose) {4596fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");4597}4598if (p && strcmp(p, "ON") == 0) {4599fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");4600_extshm = 1;4601} else {4602_extshm = 0;4603}46044605// SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.4606// Not tested, not supported.4607//4608// Note that it might be worth the trouble to test and to require it, if only to4609// get useful return codes for mprotect.4610//4611// Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before4612// exec() ? before loading the libjvm ? ....)4613p = ::getenv("XPG_SUS_ENV");4614trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");4615if (p && strcmp(p, "ON") == 0) {4616_xpg_sus_mode = 1;4617trc("Unsupported setting: XPG_SUS_ENV=ON");4618// This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to4619// clobber address ranges. If we ever want to support that, we have to do some4620// testing first.4621guarantee(false, "XPG_SUS_ENV=ON not supported");4622} else {4623_xpg_sus_mode = 0;4624}46254626// Switch off AIX internal (pthread) guard pages. This has4627// immediate effect for any pthread_create calls which follow.4628p = ::getenv("AIXTHREAD_GUARDPAGES");4629trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");4630rc = ::putenv("AIXTHREAD_GUARDPAGES=0");4631guarantee(rc == 0, "");46324633} // end: os::Aix::scan_environment()46344635// PASE: initialize the libo4 library (AS400 PASE porting library).4636void os::Aix::initialize_libo4() {4637Unimplemented();4638}46394640// AIX: initialize the libperfstat library (we load this dynamically4641// because it is only available on AIX.4642void os::Aix::initialize_libperfstat() {46434644assert(os::Aix::on_aix(), "AIX only");46454646if (!libperfstat::init()) {4647trc("libperfstat initialization failed.");4648assert(false, "libperfstat initialization failed");4649} else {4650if (Verbose) {4651fprintf(stderr, "libperfstat initialized.\n");4652}4653}4654} // end: os::Aix::initialize_libperfstat46554656/////////////////////////////////////////////////////////////////////////////4657// thread stack46584659// function to query the current stack size using pthread_getthrds_np4660//4661// ! do not change anything here unless you know what you are doing !4662static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {46634664// This only works when invoked on a pthread. As we agreed not to use4665// primordial threads anyway, I assert here4666guarantee(!os::is_primordial_thread(), "not allowed on the primordial thread");46674668// information about this api can be found (a) in the pthread.h header and4669// (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm4670//4671// The use of this API to find out the current stack is kind of undefined.4672// But after a lot of tries and asking IBM about it, I concluded that it is safe4673// enough for cases where I let the pthread library create its stacks. For cases4674// where I create an own stack and pass this to pthread_create, it seems not to4675// work (the returned stack size in that case is 0).46764677pthread_t tid = pthread_self();4678struct __pthrdsinfo pinfo;4679char dummy[1]; // we only need this to satisfy the api and to not get E4680int dummy_size = sizeof(dummy);46814682memset(&pinfo, 0, sizeof(pinfo));46834684const int rc = pthread_getthrds_np (&tid, PTHRDSINFO_QUERY_ALL, &pinfo,4685sizeof(pinfo), dummy, &dummy_size);46864687if (rc != 0) {4688fprintf(stderr, "pthread_getthrds_np failed (%d)\n", rc);4689guarantee(0, "pthread_getthrds_np failed");4690}46914692guarantee(pinfo.__pi_stackend, "returned stack base invalid");46934694// the following can happen when invoking pthread_getthrds_np on a pthread running on a user provided stack4695// (when handing down a stack to pthread create, see pthread_attr_setstackaddr).4696// Not sure what to do here - I feel inclined to forbid this use case completely.4697guarantee(pinfo.__pi_stacksize, "returned stack size invalid");46984699// On AIX, stacks are not necessarily page aligned so round the base and size accordingly4700if (p_stack_base) {4701(*p_stack_base) = (address) align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size());4702}47034704if (p_stack_size) {4705(*p_stack_size) = pinfo.__pi_stacksize - os::Aix::stack_page_size();4706}47074708#ifndef PRODUCT4709if (Verbose) {4710fprintf(stderr,4711"query_stack_dimensions() -> real stack_base=" INTPTR_FORMAT ", real stack_addr=" INTPTR_FORMAT4712", real stack_size=" INTPTR_FORMAT4713", stack_base=" INTPTR_FORMAT ", stack_size=" INTPTR_FORMAT "\n",4714(intptr_t)pinfo.__pi_stackend, (intptr_t)pinfo.__pi_stackaddr, pinfo.__pi_stacksize,4715(intptr_t)align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size()),4716pinfo.__pi_stacksize - os::Aix::stack_page_size());4717}4718#endif47194720} // end query_stack_dimensions47214722// get the current stack base from the OS (actually, the pthread library)4723address os::current_stack_base() {4724address p;4725query_stack_dimensions(&p, 0);4726return p;4727}47284729// get the current stack size from the OS (actually, the pthread library)4730size_t os::current_stack_size() {4731size_t s;4732query_stack_dimensions(0, &s);4733return s;4734}47354736// Refer to the comments in os_solaris.cpp park-unpark.4737//4738// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can4739// hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.4740// For specifics regarding the bug see GLIBC BUGID 261237 :4741// http://www.mail-archive.com/[email protected]/msg10837.html.4742// Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future4743// will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar4744// is used. (The simple C test-case provided in the GLIBC bug report manifests the4745// hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()4746// and monitorenter when we're using 1-0 locking. All those operations may result in4747// calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version4748// of libpthread avoids the problem, but isn't practical.4749//4750// Possible remedies:4751//4752// 1. Establish a minimum relative wait time. 50 to 100 msecs seems to work.4753// This is palliative and probabilistic, however. If the thread is preempted4754// between the call to compute_abstime() and pthread_cond_timedwait(), more4755// than the minimum period may have passed, and the abstime may be stale (in the4756// past) resultin in a hang. Using this technique reduces the odds of a hang4757// but the JVM is still vulnerable, particularly on heavily loaded systems.4758//4759// 2. Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead4760// of the usual flag-condvar-mutex idiom. The write side of the pipe is set4761// NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)4762// reduces to poll()+read(). This works well, but consumes 2 FDs per extant4763// thread.4764//4765// 3. Embargo pthread_cond_timedwait() and implement a native "chron" thread4766// that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing4767// a timeout request to the chron thread and then blocking via pthread_cond_wait().4768// This also works well. In fact it avoids kernel-level scalability impediments4769// on certain platforms that don't handle lots of active pthread_cond_timedwait()4770// timers in a graceful fashion.4771//4772// 4. When the abstime value is in the past it appears that control returns4773// correctly from pthread_cond_timedwait(), but the condvar is left corrupt.4774// Subsequent timedwait/wait calls may hang indefinitely. Given that, we4775// can avoid the problem by reinitializing the condvar -- by cond_destroy()4776// followed by cond_init() -- after all calls to pthread_cond_timedwait().4777// It may be possible to avoid reinitialization by checking the return4778// value from pthread_cond_timedwait(). In addition to reinitializing the4779// condvar we must establish the invariant that cond_signal() is only called4780// within critical sections protected by the adjunct mutex. This prevents4781// cond_signal() from "seeing" a condvar that's in the midst of being4782// reinitialized or that is corrupt. Sadly, this invariant obviates the4783// desirable signal-after-unlock optimization that avoids futile context switching.4784//4785// I'm also concerned that some versions of NTPL might allocate an auxilliary4786// structure when a condvar is used or initialized. cond_destroy() would4787// release the helper structure. Our reinitialize-after-timedwait fix4788// put excessive stress on malloc/free and locks protecting the c-heap.4789//4790// We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.4791// It may be possible to refine (4) by checking the kernel and NTPL verisons4792// and only enabling the work-around for vulnerable environments.47934794// utility to compute the abstime argument to timedwait:4795// millis is the relative timeout time4796// abstime will be the absolute timeout time4797// TODO: replace compute_abstime() with unpackTime()47984799static struct timespec* compute_abstime(timespec* abstime, jlong millis) {4800if (millis < 0) millis = 0;4801struct timeval now;4802int status = gettimeofday(&now, NULL);4803assert(status == 0, "gettimeofday");4804jlong seconds = millis / 1000;4805millis %= 1000;4806if (seconds > 50000000) { // see man cond_timedwait(3T)4807seconds = 50000000;4808}4809abstime->tv_sec = now.tv_sec + seconds;4810long usec = now.tv_usec + millis * 1000;4811if (usec >= 1000000) {4812abstime->tv_sec += 1;4813usec -= 1000000;4814}4815abstime->tv_nsec = usec * 1000;4816return abstime;4817}48184819// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.4820// Conceptually TryPark() should be equivalent to park(0).48214822int os::PlatformEvent::TryPark() {4823for (;;) {4824const int v = _Event;4825guarantee ((v == 0) || (v == 1), "invariant");4826if (Atomic::cmpxchg (0, &_Event, v) == v) return v;4827}4828}48294830void os::PlatformEvent::park() { // AKA "down()"4831// Invariant: Only the thread associated with the Event/PlatformEvent4832// may call park().4833// TODO: assert that _Assoc != NULL or _Assoc == Self4834int v;4835for (;;) {4836v = _Event;4837if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;4838}4839guarantee (v >= 0, "invariant");4840if (v == 0) {4841// Do this the hard way by blocking ...4842int status = pthread_mutex_lock(_mutex);4843assert_status(status == 0, status, "mutex_lock");4844guarantee (_nParked == 0, "invariant");4845++ _nParked;4846while (_Event < 0) {4847status = pthread_cond_wait(_cond, _mutex);4848assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");4849}4850-- _nParked;48514852// In theory we could move the ST of 0 into _Event past the unlock(),4853// but then we'd need a MEMBAR after the ST.4854_Event = 0;4855status = pthread_mutex_unlock(_mutex);4856assert_status(status == 0, status, "mutex_unlock");4857}4858guarantee (_Event >= 0, "invariant");4859}48604861int os::PlatformEvent::park(jlong millis) {4862guarantee (_nParked == 0, "invariant");48634864int v;4865for (;;) {4866v = _Event;4867if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;4868}4869guarantee (v >= 0, "invariant");4870if (v != 0) return OS_OK;48714872// We do this the hard way, by blocking the thread.4873// Consider enforcing a minimum timeout value.4874struct timespec abst;4875compute_abstime(&abst, millis);48764877int ret = OS_TIMEOUT;4878int status = pthread_mutex_lock(_mutex);4879assert_status(status == 0, status, "mutex_lock");4880guarantee (_nParked == 0, "invariant");4881++_nParked;48824883// Object.wait(timo) will return because of4884// (a) notification4885// (b) timeout4886// (c) thread.interrupt4887//4888// Thread.interrupt and object.notify{All} both call Event::set.4889// That is, we treat thread.interrupt as a special case of notification.4890// The underlying Solaris implementation, cond_timedwait, admits4891// spurious/premature wakeups, but the JLS/JVM spec prevents the4892// JVM from making those visible to Java code. As such, we must4893// filter out spurious wakeups. We assume all ETIME returns are valid.4894//4895// TODO: properly differentiate simultaneous notify+interrupt.4896// In that case, we should propagate the notify to another waiter.48974898while (_Event < 0) {4899status = pthread_cond_timedwait(_cond, _mutex, &abst);4900assert_status(status == 0 || status == ETIMEDOUT,4901status, "cond_timedwait");4902if (!FilterSpuriousWakeups) break; // previous semantics4903if (status == ETIMEDOUT) break;4904// We consume and ignore EINTR and spurious wakeups.4905}4906--_nParked;4907if (_Event >= 0) {4908ret = OS_OK;4909}4910_Event = 0;4911status = pthread_mutex_unlock(_mutex);4912assert_status(status == 0, status, "mutex_unlock");4913assert (_nParked == 0, "invariant");4914return ret;4915}49164917void os::PlatformEvent::unpark() {4918int v, AnyWaiters;4919for (;;) {4920v = _Event;4921if (v > 0) {4922// The LD of _Event could have reordered or be satisfied4923// by a read-aside from this processor's write buffer.4924// To avoid problems execute a barrier and then4925// ratify the value.4926OrderAccess::fence();4927if (_Event == v) return;4928continue;4929}4930if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;4931}4932if (v < 0) {4933// Wait for the thread associated with the event to vacate4934int status = pthread_mutex_lock(_mutex);4935assert_status(status == 0, status, "mutex_lock");4936AnyWaiters = _nParked;49374938if (AnyWaiters != 0) {4939// We intentional signal *after* dropping the lock4940// to avoid a common class of futile wakeups.4941status = pthread_cond_signal(_cond);4942assert_status(status == 0, status, "cond_signal");4943}4944// Mutex should be locked for pthread_cond_signal(_cond).4945status = pthread_mutex_unlock(_mutex);4946assert_status(status == 0, status, "mutex_unlock");4947}49484949// Note that we signal() _after dropping the lock for "immortal" Events.4950// This is safe and avoids a common class of futile wakeups. In rare4951// circumstances this can cause a thread to return prematurely from4952// cond_{timed}wait() but the spurious wakeup is benign and the victim will4953// simply re-test the condition and re-park itself.4954}495549564957// JSR1664958// -------------------------------------------------------49594960//4961// The solaris and linux implementations of park/unpark are fairly4962// conservative for now, but can be improved. They currently use a4963// mutex/condvar pair, plus a a count.4964// Park decrements count if > 0, else does a condvar wait. Unpark4965// sets count to 1 and signals condvar. Only one thread ever waits4966// on the condvar. Contention seen when trying to park implies that someone4967// is unparking you, so don't wait. And spurious returns are fine, so there4968// is no need to track notifications.4969//49704971#define MAX_SECS 1000000004972//4973// This code is common to linux and solaris and will be moved to a4974// common place in dolphin.4975//4976// The passed in time value is either a relative time in nanoseconds4977// or an absolute time in milliseconds. Either way it has to be unpacked4978// into suitable seconds and nanoseconds components and stored in the4979// given timespec structure.4980// Given time is a 64-bit value and the time_t used in the timespec is only4981// a signed-32-bit value (except on 64-bit Linux) we have to watch for4982// overflow if times way in the future are given. Further on Solaris versions4983// prior to 10 there is a restriction (see cond_timedwait) that the specified4984// number of seconds, in abstime, is less than current_time + 100,000,000.4985// As it will be 28 years before "now + 100000000" will overflow we can4986// ignore overflow and just impose a hard-limit on seconds using the value4987// of "now + 100,000,000". This places a limit on the timeout of about 3.174988// years from "now".4989//49904991static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {4992assert (time > 0, "convertTime");49934994struct timeval now;4995int status = gettimeofday(&now, NULL);4996assert(status == 0, "gettimeofday");49974998time_t max_secs = now.tv_sec + MAX_SECS;49995000if (isAbsolute) {5001jlong secs = time / 1000;5002if (secs > max_secs) {5003absTime->tv_sec = max_secs;5004}5005else {5006absTime->tv_sec = secs;5007}5008absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;5009}5010else {5011jlong secs = time / NANOSECS_PER_SEC;5012if (secs >= MAX_SECS) {5013absTime->tv_sec = max_secs;5014absTime->tv_nsec = 0;5015}5016else {5017absTime->tv_sec = now.tv_sec + secs;5018absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;5019if (absTime->tv_nsec >= NANOSECS_PER_SEC) {5020absTime->tv_nsec -= NANOSECS_PER_SEC;5021++absTime->tv_sec; // note: this must be <= max_secs5022}5023}5024}5025assert(absTime->tv_sec >= 0, "tv_sec < 0");5026assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");5027assert(absTime->tv_nsec >= 0, "tv_nsec < 0");5028assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");5029}50305031void Parker::park(bool isAbsolute, jlong time) {5032// Optional fast-path check:5033// Return immediately if a permit is available.5034if (_counter > 0) {5035_counter = 0;5036OrderAccess::fence();5037return;5038}50395040Thread* thread = Thread::current();5041assert(thread->is_Java_thread(), "Must be JavaThread");5042JavaThread *jt = (JavaThread *)thread;50435044// Optional optimization -- avoid state transitions if there's an interrupt pending.5045// Check interrupt before trying to wait5046if (Thread::is_interrupted(thread, false)) {5047return;5048}50495050// Next, demultiplex/decode time arguments5051timespec absTime;5052if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all5053return;5054}5055if (time > 0) {5056unpackTime(&absTime, isAbsolute, time);5057}50585059// Enter safepoint region5060// Beware of deadlocks such as 6317397.5061// The per-thread Parker:: mutex is a classic leaf-lock.5062// In particular a thread must never block on the Threads_lock while5063// holding the Parker:: mutex. If safepoints are pending both the5064// the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.5065ThreadBlockInVM tbivm(jt);50665067// Don't wait if cannot get lock since interference arises from5068// unblocking. Also. check interrupt before trying wait5069if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {5070return;5071}50725073int status;5074if (_counter > 0) { // no wait needed5075_counter = 0;5076status = pthread_mutex_unlock(_mutex);5077assert (status == 0, "invariant");5078OrderAccess::fence();5079return;5080}50815082#ifdef ASSERT5083// Don't catch signals while blocked; let the running threads have the signals.5084// (This allows a debugger to break into the running thread.)5085sigset_t oldsigs;5086sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();5087pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);5088#endif50895090OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);5091jt->set_suspend_equivalent();5092// cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()50935094if (time == 0) {5095status = pthread_cond_wait (_cond, _mutex);5096} else {5097status = pthread_cond_timedwait (_cond, _mutex, &absTime);5098if (status != 0 && WorkAroundNPTLTimedWaitHang) {5099pthread_cond_destroy (_cond);5100pthread_cond_init (_cond, NULL);5101}5102}5103assert_status(status == 0 || status == EINTR ||5104status == ETIME || status == ETIMEDOUT,5105status, "cond_timedwait");51065107#ifdef ASSERT5108pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);5109#endif51105111_counter = 0;5112status = pthread_mutex_unlock(_mutex);5113assert_status(status == 0, status, "invariant");5114// If externally suspended while waiting, re-suspend5115if (jt->handle_special_suspend_equivalent_condition()) {5116jt->java_suspend_self();5117}51185119OrderAccess::fence();5120}51215122void Parker::unpark() {5123int s, status;5124status = pthread_mutex_lock(_mutex);5125assert (status == 0, "invariant");5126s = _counter;5127_counter = 1;5128if (s < 1) {5129if (WorkAroundNPTLTimedWaitHang) {5130status = pthread_cond_signal (_cond);5131assert (status == 0, "invariant");5132status = pthread_mutex_unlock(_mutex);5133assert (status == 0, "invariant");5134} else {5135status = pthread_mutex_unlock(_mutex);5136assert (status == 0, "invariant");5137status = pthread_cond_signal (_cond);5138assert (status == 0, "invariant");5139}5140} else {5141pthread_mutex_unlock(_mutex);5142assert (status == 0, "invariant");5143}5144}51455146extern char** environ;51475148// Run the specified command in a separate process. Return its exit value,5149// or -1 on failure (e.g. can't fork a new process).5150// Unlike system(), this function can be called from signal handler. It5151// doesn't block SIGINT et al.5152int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {5153char * argv[4] = {"sh", "-c", cmd, NULL};51545155pid_t pid = fork();51565157if (pid < 0) {5158// fork failed5159return -1;51605161} else if (pid == 0) {5162// child process51635164// Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.5165execve("/usr/bin/sh", argv, environ);51665167// execve failed5168_exit(-1);51695170} else {5171// copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't5172// care about the actual exit code, for now.51735174int status;51755176// Wait for the child process to exit. This returns immediately if5177// the child has already exited. */5178while (waitpid(pid, &status, 0) < 0) {5179switch (errno) {5180case ECHILD: return 0;5181case EINTR: break;5182default: return -1;5183}5184}51855186if (WIFEXITED(status)) {5187// The child exited normally; get its exit code.5188return WEXITSTATUS(status);5189} else if (WIFSIGNALED(status)) {5190// The child exited because of a signal.5191// The best value to return is 0x80 + signal number,5192// because that is what all Unix shells do, and because5193// it allows callers to distinguish between process exit and5194// process death by signal.5195return 0x80 + WTERMSIG(status);5196} else {5197// Unknown exit code; pass it through.5198return status;5199}5200}5201return -1;5202}52035204// is_headless_jre()5205//5206// Test for the existence of xawt/libmawt.so or libawt_xawt.so5207// in order to report if we are running in a headless jre.5208//5209// Since JDK8 xawt/libmawt.so is moved into the same directory5210// as libawt.so, and renamed libawt_xawt.so5211bool os::is_headless_jre() {5212struct stat statbuf;5213char buf[MAXPATHLEN];5214char libmawtpath[MAXPATHLEN];5215const char *xawtstr = "/xawt/libmawt.so";5216const char *new_xawtstr = "/libawt_xawt.so";52175218char *p;52195220// Get path to libjvm.so5221os::jvm_path(buf, sizeof(buf));52225223// Get rid of libjvm.so5224p = strrchr(buf, '/');5225if (p == NULL) return false;5226else *p = '\0';52275228// Get rid of client or server5229p = strrchr(buf, '/');5230if (p == NULL) return false;5231else *p = '\0';52325233// check xawt/libmawt.so5234strcpy(libmawtpath, buf);5235strcat(libmawtpath, xawtstr);5236if (::stat(libmawtpath, &statbuf) == 0) return false;52375238// check libawt_xawt.so5239strcpy(libmawtpath, buf);5240strcat(libmawtpath, new_xawtstr);5241if (::stat(libmawtpath, &statbuf) == 0) return false;52425243return true;5244}52455246// Get the default path to the core file5247// Returns the length of the string5248int os::get_core_path(char* buffer, size_t bufferSize) {5249const char* p = get_current_directory(buffer, bufferSize);52505251if (p == NULL) {5252assert(p != NULL, "failed to get current directory");5253return 0;5254}52555256return strlen(buffer);5257}52585259#ifndef PRODUCT5260void TestReserveMemorySpecial_test() {5261// No tests available for this platform5262}5263#endif526452655266