Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/os/linux/vm/os_linux.cpp
32285 views
1
/*
2
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
// [TODO if need] Android SHM: use https://github.com/pelya/android-shmem
26
27
// no precompiled headers
28
#include "classfile/classLoader.hpp"
29
#include "classfile/systemDictionary.hpp"
30
#include "classfile/vmSymbols.hpp"
31
#include "code/icBuffer.hpp"
32
#include "code/vtableStubs.hpp"
33
#include "compiler/compileBroker.hpp"
34
#include "compiler/disassembler.hpp"
35
#include "interpreter/interpreter.hpp"
36
#include "jvm_linux.h"
37
#include "memory/allocation.inline.hpp"
38
#include "memory/filemap.hpp"
39
#include "mutex_linux.inline.hpp"
40
#include "oops/oop.inline.hpp"
41
#include "os_share_linux.hpp"
42
#include "osContainer_linux.hpp"
43
#include "prims/jniFastGetField.hpp"
44
#include "prims/jvm.h"
45
#include "prims/jvm_misc.hpp"
46
#include "runtime/arguments.hpp"
47
#include "runtime/extendedPC.hpp"
48
#include "runtime/globals.hpp"
49
#include "runtime/interfaceSupport.hpp"
50
#include "runtime/init.hpp"
51
#include "runtime/java.hpp"
52
#include "runtime/javaCalls.hpp"
53
#include "runtime/mutexLocker.hpp"
54
#include "runtime/objectMonitor.hpp"
55
#include "runtime/orderAccess.inline.hpp"
56
#include "runtime/osThread.hpp"
57
#include "runtime/perfMemory.hpp"
58
#include "runtime/sharedRuntime.hpp"
59
#include "runtime/statSampler.hpp"
60
#include "runtime/stubRoutines.hpp"
61
#include "runtime/thread.inline.hpp"
62
#include "runtime/threadCritical.hpp"
63
#include "runtime/timer.hpp"
64
#include "services/attachListener.hpp"
65
#include "services/memTracker.hpp"
66
#include "services/runtimeService.hpp"
67
#include "utilities/decoder.hpp"
68
#include "utilities/defaultStream.hpp"
69
#include "utilities/events.hpp"
70
#include "utilities/elfFile.hpp"
71
#include "utilities/growableArray.hpp"
72
#include "utilities/vmError.hpp"
73
74
// put OS-includes here
75
# include <sys/types.h>
76
# include <sys/mman.h>
77
# include <sys/stat.h>
78
# include <sys/select.h>
79
# include <pthread.h>
80
# include <signal.h>
81
# include <errno.h>
82
# include <dlfcn.h>
83
# include <stdio.h>
84
# include <unistd.h>
85
# include <sys/resource.h>
86
# include <pthread.h>
87
# include <sys/stat.h>
88
# include <sys/time.h>
89
# include <sys/times.h>
90
# include <sys/utsname.h>
91
# include <sys/socket.h>
92
# include <sys/wait.h>
93
# include <pwd.h>
94
# include <poll.h>
95
# include <semaphore.h>
96
# include <fcntl.h>
97
# include <string.h>
98
# include <sys/sysinfo.h>
99
#ifndef __ANDROID__
100
# include <gnu/libc-version.h>
101
#endif
102
# include <sys/ipc.h>
103
#if !defined(__ANDROID__)
104
# include <syscall.h>
105
# include <sys/shm.h>
106
#else
107
# include <sys/syscall.h>
108
#endif
109
# include <link.h>
110
# include <stdint.h>
111
# include <inttypes.h>
112
# include <sys/ioctl.h>
113
114
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
115
116
#ifdef __ANDROID__
117
# define DISABLE_SHM
118
#endif
119
120
#ifndef _GNU_SOURCE
121
#define _GNU_SOURCE
122
#include <sched.h>
123
#undef _GNU_SOURCE
124
#else
125
#include <sched.h>
126
#endif
127
128
// if RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
129
// getrusage() is prepared to handle the associated failure.
130
#ifndef RUSAGE_THREAD
131
#define RUSAGE_THREAD (1) /* only the calling thread */
132
#endif
133
134
#define MAX_PATH (2 * K)
135
136
#define MAX_SECS 100000000
137
138
// for timer info max values which include all bits
139
#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
140
141
#define LARGEPAGES_BIT (1 << 6)
142
////////////////////////////////////////////////////////////////////////////////
143
// global variables
144
julong os::Linux::_physical_memory = 0;
145
146
address os::Linux::_initial_thread_stack_bottom = NULL;
147
uintptr_t os::Linux::_initial_thread_stack_size = 0;
148
149
int (*os::Linux::_clock_gettime)(clockid_t, struct timespec *) = NULL;
150
int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
151
int (*os::Linux::_pthread_setname_np)(pthread_t, const char*) = NULL;
152
Mutex* os::Linux::_createThread_lock = NULL;
153
pthread_t os::Linux::_main_thread;
154
int os::Linux::_page_size = -1;
155
const int os::Linux::_vm_default_page_size = (8 * K);
156
bool os::Linux::_is_floating_stack = false;
157
bool os::Linux::_is_NPTL = false;
158
bool os::Linux::_supports_fast_thread_cpu_time = false;
159
const char * os::Linux::_glibc_version = NULL;
160
const char * os::Linux::_libpthread_version = NULL;
161
pthread_condattr_t os::Linux::_condattr[1];
162
163
static jlong initial_time_count=0;
164
165
static int clock_tics_per_sec = 100;
166
167
// For diagnostics to print a message once. see run_periodic_checks
168
static sigset_t check_signal_done;
169
static bool check_signals = true;
170
171
static pid_t _initial_pid = 0;
172
173
/* Signal number used to suspend/resume a thread */
174
175
/* do not use any signal number less than SIGSEGV, see 4355769 */
176
static int SR_signum = SIGUSR2;
177
sigset_t SR_sigset;
178
179
/* Used to protect dlsym() calls */
180
static pthread_mutex_t dl_mutex;
181
182
// Declarations
183
static bool read_so_path_from_maps(const char* so_name, char* buf, int buflen);
184
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
185
186
// utility functions
187
188
static int SR_initialize();
189
190
julong os::available_memory() {
191
return Linux::available_memory();
192
}
193
194
julong os::Linux::available_memory() {
195
// values in struct sysinfo are "unsigned long"
196
struct sysinfo si;
197
julong avail_mem;
198
199
if (OSContainer::is_containerized()) {
200
jlong mem_limit, mem_usage;
201
if ((mem_limit = OSContainer::memory_limit_in_bytes()) < 1) {
202
if (PrintContainerInfo) {
203
tty->print_cr("container memory limit %s: " JLONG_FORMAT ", using host value",
204
mem_limit == OSCONTAINER_ERROR ? "failed" : "unlimited", mem_limit);
205
}
206
}
207
208
if (mem_limit > 0 && (mem_usage = OSContainer::memory_usage_in_bytes()) < 1) {
209
if (PrintContainerInfo) {
210
tty->print_cr("container memory usage failed: " JLONG_FORMAT ", using host value", mem_usage);
211
}
212
}
213
214
if (mem_limit > 0 && mem_usage > 0 ) {
215
avail_mem = mem_limit > mem_usage ? (julong)mem_limit - (julong)mem_usage : 0;
216
if (PrintContainerInfo) {
217
tty->print_cr("available container memory: " JULONG_FORMAT, avail_mem);
218
}
219
return avail_mem;
220
}
221
}
222
223
sysinfo(&si);
224
avail_mem = (julong)si.freeram * si.mem_unit;
225
if (Verbose) {
226
tty->print_cr("available memory: " JULONG_FORMAT, avail_mem);
227
}
228
return avail_mem;
229
}
230
231
julong os::physical_memory() {
232
jlong phys_mem = 0;
233
if (OSContainer::is_containerized()) {
234
jlong mem_limit;
235
if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) {
236
if (PrintContainerInfo) {
237
tty->print_cr("total container memory: " JLONG_FORMAT, mem_limit);
238
}
239
return mem_limit;
240
}
241
242
if (PrintContainerInfo) {
243
tty->print_cr("container memory limit %s: " JLONG_FORMAT ", using host value",
244
mem_limit == OSCONTAINER_ERROR ? "failed" : "unlimited", mem_limit);
245
}
246
}
247
248
phys_mem = Linux::physical_memory();
249
if (Verbose) {
250
tty->print_cr("total system memory: " JLONG_FORMAT, phys_mem);
251
}
252
return phys_mem;
253
}
254
255
////////////////////////////////////////////////////////////////////////////////
256
// environment support
257
258
bool os::getenv(const char* name, char* buf, int len) {
259
const char* val = ::getenv(name);
260
if (val != NULL && strlen(val) < (size_t)len) {
261
strcpy(buf, val);
262
return true;
263
}
264
if (len > 0) buf[0] = 0; // return a null string
265
return false;
266
}
267
268
269
// Return true if user is running as root.
270
271
bool os::have_special_privileges() {
272
static bool init = false;
273
static bool privileges = false;
274
if (!init) {
275
privileges = (getuid() != geteuid()) || (getgid() != getegid());
276
init = true;
277
}
278
return privileges;
279
}
280
281
282
#ifndef SYS_gettid
283
// ref: https://chromium.googlesource.com/chromiumos/docs/+/master/constants/syscalls.md
284
// i386 & arm: 224, ia64: 1105, amd64: 186, sparc: 143, aarch64: 178
285
#ifdef __ia64__
286
#define SYS_gettid 1105
287
#else
288
#if defined(__i386__) || defined(__arm__)
289
#define SYS_gettid 224
290
#else
291
#ifdef __amd64__
292
#define SYS_gettid 186
293
#else
294
#ifdef __sparc__
295
#define SYS_gettid 143
296
#else
297
#if defined(__arm64__) || defined(__aarch64__)
298
#define SYS_gettid 178
299
#else
300
#error define gettid for the arch
301
#endif
302
#endif
303
#endif
304
#endif
305
#endif
306
#endif
307
308
// Cpu architecture string
309
static char cpu_arch[] = HOTSPOT_LIB_ARCH;
310
311
// pid_t gettid()
312
//
313
// Returns the kernel thread id of the currently running thread. Kernel
314
// thread id is used to access /proc.
315
//
316
// (Note that getpid() on LinuxThreads returns kernel thread id too; but
317
// on NPTL, it returns the same pid for all threads, as required by POSIX.)
318
//
319
pid_t os::Linux::gettid() {
320
int rslt = syscall(SYS_gettid);
321
if (rslt == -1) {
322
// old kernel, no NPTL support
323
return getpid();
324
} else {
325
return (pid_t)rslt;
326
}
327
}
328
329
// Most versions of linux have a bug where the number of processors are
330
// determined by looking at the /proc file system. In a chroot environment,
331
// the system call returns 1. This causes the VM to act as if it is
332
// a single processor and elide locking (see is_MP() call).
333
static bool unsafe_chroot_detected = false;
334
static const char *unstable_chroot_error = "/proc file system not found.\n"
335
"Java may be unstable running multithreaded in a chroot "
336
"environment on Linux when /proc filesystem is not mounted.";
337
338
void os::Linux::initialize_system_info() {
339
set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
340
if (processor_count() == 1) {
341
pid_t pid = os::Linux::gettid();
342
char fname[32];
343
jio_snprintf(fname, sizeof(fname), "/proc/%d", pid);
344
FILE *fp = fopen(fname, "r");
345
if (fp == NULL) {
346
unsafe_chroot_detected = true;
347
} else {
348
fclose(fp);
349
}
350
}
351
_physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
352
assert(processor_count() > 0, "linux error");
353
}
354
355
void os::init_system_properties_values() {
356
// The next steps are taken in the product version:
357
//
358
// Obtain the JAVA_HOME value from the location of libjvm.so.
359
// This library should be located at:
360
// <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
361
//
362
// If "/jre/lib/" appears at the right place in the path, then we
363
// assume libjvm.so is installed in a JDK and we use this path.
364
//
365
// Otherwise exit with message: "Could not create the Java virtual machine."
366
//
367
// The following extra steps are taken in the debugging version:
368
//
369
// If "/jre/lib/" does NOT appear at the right place in the path
370
// instead of exit check for $JAVA_HOME environment variable.
371
//
372
// If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
373
// then we append a fake suffix "hotspot/libjvm.so" to this path so
374
// it looks like libjvm.so is installed there
375
// <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
376
//
377
// Otherwise exit.
378
//
379
// Important note: if the location of libjvm.so changes this
380
// code needs to be changed accordingly.
381
382
// See ld(1):
383
// The linker uses the following search paths to locate required
384
// shared libraries:
385
// 1: ...
386
// ...
387
// 7: The default directories, normally /lib and /usr/lib.
388
#if defined(AMD64) || defined(_LP64) && (defined(SPARC) || defined(PPC) || defined(S390))
389
#define DEFAULT_LIBPATH "/usr/lib64:/lib64:/lib:/usr/lib"
390
#else
391
#define DEFAULT_LIBPATH "/lib:/usr/lib"
392
#endif
393
394
// Base path of extensions installed on the system.
395
#define SYS_EXT_DIR "/usr/java/packages"
396
#define EXTENSIONS_DIR "/lib/ext"
397
#define ENDORSED_DIR "/lib/endorsed"
398
399
// Buffer that fits several sprintfs.
400
// Note that the space for the colon and the trailing null are provided
401
// by the nulls included by the sizeof operator.
402
const size_t bufsize =
403
MAX3((size_t)MAXPATHLEN, // For dll_dir & friends.
404
(size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
405
(size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
406
char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
407
408
// sysclasspath, java_home, dll_dir
409
{
410
char *pslash;
411
os::jvm_path(buf, bufsize);
412
413
// Found the full path to libjvm.so.
414
// Now cut the path to <java_home>/jre if we can.
415
*(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
416
pslash = strrchr(buf, '/');
417
if (pslash != NULL) {
418
*pslash = '\0'; // Get rid of /{client|server|hotspot}.
419
}
420
Arguments::set_dll_dir(buf);
421
422
if (pslash != NULL) {
423
pslash = strrchr(buf, '/');
424
if (pslash != NULL) {
425
*pslash = '\0'; // Get rid of /<arch>.
426
pslash = strrchr(buf, '/');
427
if (pslash != NULL) {
428
*pslash = '\0'; // Get rid of /lib.
429
}
430
}
431
}
432
Arguments::set_java_home(buf);
433
set_boot_path('/', ':');
434
}
435
436
// Where to look for native libraries.
437
//
438
// Note: Due to a legacy implementation, most of the library path
439
// is set in the launcher. This was to accomodate linking restrictions
440
// on legacy Linux implementations (which are no longer supported).
441
// Eventually, all the library path setting will be done here.
442
//
443
// However, to prevent the proliferation of improperly built native
444
// libraries, the new path component /usr/java/packages is added here.
445
// Eventually, all the library path setting will be done here.
446
{
447
// Get the user setting of LD_LIBRARY_PATH, and prepended it. It
448
// should always exist (until the legacy problem cited above is
449
// addressed).
450
const char *v = ::getenv("LD_LIBRARY_PATH");
451
const char *v_colon = ":";
452
if (v == NULL) { v = ""; v_colon = ""; }
453
// That's +1 for the colon and +1 for the trailing '\0'.
454
char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char,
455
strlen(v) + 1 +
456
sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch) + sizeof(DEFAULT_LIBPATH) + 1,
457
mtInternal);
458
sprintf(ld_library_path, "%s%s" SYS_EXT_DIR "/lib/%s:" DEFAULT_LIBPATH, v, v_colon, cpu_arch);
459
Arguments::set_library_path(ld_library_path);
460
FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
461
}
462
463
// Extensions directories.
464
sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
465
Arguments::set_ext_dirs(buf);
466
467
// Endorsed standards default directory.
468
sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
469
Arguments::set_endorsed_dirs(buf);
470
471
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
472
473
#undef DEFAULT_LIBPATH
474
#undef SYS_EXT_DIR
475
#undef EXTENSIONS_DIR
476
#undef ENDORSED_DIR
477
}
478
479
////////////////////////////////////////////////////////////////////////////////
480
// breakpoint support
481
482
void os::breakpoint() {
483
BREAKPOINT;
484
}
485
486
extern "C" void breakpoint() {
487
// use debugger to set breakpoint here
488
}
489
490
////////////////////////////////////////////////////////////////////////////////
491
// signal support
492
493
debug_only(static bool signal_sets_initialized = false);
494
static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
495
496
bool os::Linux::is_sig_ignored(int sig) {
497
struct sigaction oact;
498
sigaction(sig, (struct sigaction*)NULL, &oact);
499
void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
500
: CAST_FROM_FN_PTR(void*, oact.sa_handler);
501
if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
502
return true;
503
else
504
return false;
505
}
506
507
void os::Linux::signal_sets_init() {
508
// Should also have an assertion stating we are still single-threaded.
509
assert(!signal_sets_initialized, "Already initialized");
510
// Fill in signals that are necessarily unblocked for all threads in
511
// the VM. Currently, we unblock the following signals:
512
// SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
513
// by -Xrs (=ReduceSignalUsage));
514
// BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
515
// other threads. The "ReduceSignalUsage" boolean tells us not to alter
516
// the dispositions or masks wrt these signals.
517
// Programs embedding the VM that want to use the above signals for their
518
// own purposes must, at this time, use the "-Xrs" option to prevent
519
// interference with shutdown hooks and BREAK_SIGNAL thread dumping.
520
// (See bug 4345157, and other related bugs).
521
// In reality, though, unblocking these signals is really a nop, since
522
// these signals are not blocked by default.
523
sigemptyset(&unblocked_sigs);
524
sigemptyset(&allowdebug_blocked_sigs);
525
sigaddset(&unblocked_sigs, SIGILL);
526
sigaddset(&unblocked_sigs, SIGSEGV);
527
sigaddset(&unblocked_sigs, SIGBUS);
528
sigaddset(&unblocked_sigs, SIGFPE);
529
#if defined(PPC64)
530
sigaddset(&unblocked_sigs, SIGTRAP);
531
#endif
532
sigaddset(&unblocked_sigs, SR_signum);
533
534
if (!ReduceSignalUsage) {
535
if (!os::Linux::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
536
sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
537
sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
538
}
539
if (!os::Linux::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
540
sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
541
sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
542
}
543
if (!os::Linux::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
544
sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
545
sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
546
}
547
}
548
// Fill in signals that are blocked by all but the VM thread.
549
sigemptyset(&vm_sigs);
550
if (!ReduceSignalUsage)
551
sigaddset(&vm_sigs, BREAK_SIGNAL);
552
debug_only(signal_sets_initialized = true);
553
554
}
555
556
// These are signals that are unblocked while a thread is running Java.
557
// (For some reason, they get blocked by default.)
558
sigset_t* os::Linux::unblocked_signals() {
559
assert(signal_sets_initialized, "Not initialized");
560
return &unblocked_sigs;
561
}
562
563
// These are the signals that are blocked while a (non-VM) thread is
564
// running Java. Only the VM thread handles these signals.
565
sigset_t* os::Linux::vm_signals() {
566
assert(signal_sets_initialized, "Not initialized");
567
return &vm_sigs;
568
}
569
570
// These are signals that are blocked during cond_wait to allow debugger in
571
sigset_t* os::Linux::allowdebug_blocked_signals() {
572
assert(signal_sets_initialized, "Not initialized");
573
return &allowdebug_blocked_sigs;
574
}
575
576
void os::Linux::hotspot_sigmask(Thread* thread) {
577
578
//Save caller's signal mask before setting VM signal mask
579
sigset_t caller_sigmask;
580
pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
581
582
OSThread* osthread = thread->osthread();
583
osthread->set_caller_sigmask(caller_sigmask);
584
585
pthread_sigmask(SIG_UNBLOCK, os::Linux::unblocked_signals(), NULL);
586
587
if (!ReduceSignalUsage) {
588
if (thread->is_VM_thread()) {
589
// Only the VM thread handles BREAK_SIGNAL ...
590
pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
591
} else {
592
// ... all other threads block BREAK_SIGNAL
593
pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
594
}
595
}
596
}
597
598
//////////////////////////////////////////////////////////////////////////////
599
// detecting pthread library
600
601
void os::Linux::libpthread_init() {
602
// Save glibc and pthread version strings. Note that _CS_GNU_LIBC_VERSION
603
// and _CS_GNU_LIBPTHREAD_VERSION are supported in glibc >= 2.3.2. Use a
604
// generic name for earlier versions.
605
// Define macros here so we can build HotSpot on old systems.
606
# ifndef _CS_GNU_LIBC_VERSION
607
# define _CS_GNU_LIBC_VERSION 2
608
# endif
609
# ifndef _CS_GNU_LIBPTHREAD_VERSION
610
# define _CS_GNU_LIBPTHREAD_VERSION 3
611
# endif
612
613
#ifdef __ANDROID__
614
os::Linux::set_glibc_version("android bionic libc api-21");
615
os::Linux::set_libpthread_version("android bionic libc api-21 NPTL");
616
#else
617
size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0);
618
if (n > 0) {
619
char *str = (char *)malloc(n, mtInternal);
620
confstr(_CS_GNU_LIBC_VERSION, str, n);
621
os::Linux::set_glibc_version(str);
622
} else {
623
// _CS_GNU_LIBC_VERSION is not supported, try gnu_get_libc_version()
624
static char _gnu_libc_version[32];
625
jio_snprintf(_gnu_libc_version, sizeof(_gnu_libc_version),
626
"glibc %s %s", gnu_get_libc_version(), gnu_get_libc_release());
627
os::Linux::set_glibc_version(_gnu_libc_version);
628
}
629
630
n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0);
631
if (n > 0) {
632
char *str = (char *)malloc(n, mtInternal);
633
confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
634
// Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells
635
// us "NPTL-0.29" even we are running with LinuxThreads. Check if this
636
// is the case. LinuxThreads has a hard limit on max number of threads.
637
// So sysconf(_SC_THREAD_THREADS_MAX) will return a positive value.
638
// On the other hand, NPTL does not have such a limit, sysconf()
639
// will return -1 and errno is not changed. Check if it is really NPTL.
640
if (strcmp(os::Linux::glibc_version(), "glibc 2.3.2") == 0 &&
641
strstr(str, "NPTL") &&
642
sysconf(_SC_THREAD_THREADS_MAX) > 0) {
643
free(str);
644
os::Linux::set_libpthread_version("linuxthreads");
645
} else {
646
os::Linux::set_libpthread_version(str);
647
}
648
} else {
649
// glibc before 2.3.2 only has LinuxThreads.
650
os::Linux::set_libpthread_version("linuxthreads");
651
}
652
#endif // __ANDROID__
653
654
if (strstr(libpthread_version(), "NPTL")) {
655
os::Linux::set_is_NPTL();
656
} else {
657
os::Linux::set_is_LinuxThreads();
658
}
659
660
// LinuxThreads have two flavors: floating-stack mode, which allows variable
661
// stack size; and fixed-stack mode. NPTL is always floating-stack.
662
if (os::Linux::is_NPTL() || os::Linux::supports_variable_stack_size()) {
663
os::Linux::set_is_floating_stack();
664
}
665
}
666
667
/////////////////////////////////////////////////////////////////////////////
668
// thread stack
669
670
// Force Linux kernel to expand current thread stack. If "bottom" is close
671
// to the stack guard, caller should block all signals.
672
//
673
// MAP_GROWSDOWN:
674
// A special mmap() flag that is used to implement thread stacks. It tells
675
// kernel that the memory region should extend downwards when needed. This
676
// allows early versions of LinuxThreads to only mmap the first few pages
677
// when creating a new thread. Linux kernel will automatically expand thread
678
// stack as needed (on page faults).
679
//
680
// However, because the memory region of a MAP_GROWSDOWN stack can grow on
681
// demand, if a page fault happens outside an already mapped MAP_GROWSDOWN
682
// region, it's hard to tell if the fault is due to a legitimate stack
683
// access or because of reading/writing non-exist memory (e.g. buffer
684
// overrun). As a rule, if the fault happens below current stack pointer,
685
// Linux kernel does not expand stack, instead a SIGSEGV is sent to the
686
// application (see Linux kernel fault.c).
687
//
688
// This Linux feature can cause SIGSEGV when VM bangs thread stack for
689
// stack overflow detection.
690
//
691
// Newer version of LinuxThreads (since glibc-2.2, or, RH-7.x) and NPTL do
692
// not use this flag. However, the stack of initial thread is not created
693
// by pthread, it is still MAP_GROWSDOWN. Also it's possible (though
694
// unlikely) that user code can create a thread with MAP_GROWSDOWN stack
695
// and then attach the thread to JVM.
696
//
697
// To get around the problem and allow stack banging on Linux, we need to
698
// manually expand thread stack after receiving the SIGSEGV.
699
//
700
// There are two ways to expand thread stack to address "bottom", we used
701
// both of them in JVM before 1.5:
702
// 1. adjust stack pointer first so that it is below "bottom", and then
703
// touch "bottom"
704
// 2. mmap() the page in question
705
//
706
// Now alternate signal stack is gone, it's harder to use 2. For instance,
707
// if current sp is already near the lower end of page 101, and we need to
708
// call mmap() to map page 100, it is possible that part of the mmap() frame
709
// will be placed in page 100. When page 100 is mapped, it is zero-filled.
710
// That will destroy the mmap() frame and cause VM to crash.
711
//
712
// The following code works by adjusting sp first, then accessing the "bottom"
713
// page to force a page fault. Linux kernel will then automatically expand the
714
// stack mapping.
715
//
716
// _expand_stack_to() assumes its frame size is less than page size, which
717
// should always be true if the function is not inlined.
718
719
#if __GNUC__ < 3 // gcc 2.x does not support noinline attribute
720
#define NOINLINE
721
#else
722
#define NOINLINE __attribute__ ((noinline))
723
#endif
724
725
static void _expand_stack_to(address bottom) NOINLINE;
726
727
static void _expand_stack_to(address bottom) {
728
address sp;
729
size_t size;
730
volatile char *p;
731
732
// Adjust bottom to point to the largest address within the same page, it
733
// gives us a one-page buffer if alloca() allocates slightly more memory.
734
bottom = (address)align_size_down((uintptr_t)bottom, os::Linux::page_size());
735
bottom += os::Linux::page_size() - 1;
736
737
// sp might be slightly above current stack pointer; if that's the case, we
738
// will alloca() a little more space than necessary, which is OK. Don't use
739
// os::current_stack_pointer(), as its result can be slightly below current
740
// stack pointer, causing us to not alloca enough to reach "bottom".
741
sp = (address)&sp;
742
743
if (sp > bottom) {
744
size = sp - bottom;
745
p = (volatile char *)alloca(size);
746
assert(p != NULL && p <= (volatile char *)bottom, "alloca problem?");
747
p[0] = '\0';
748
}
749
}
750
751
void os::Linux::expand_stack_to(address bottom) {
752
_expand_stack_to(bottom);
753
}
754
755
bool os::Linux::manually_expand_stack(JavaThread * t, address addr) {
756
assert(t!=NULL, "just checking");
757
assert(t->osthread()->expanding_stack(), "expand should be set");
758
assert(t->stack_base() != NULL, "stack_base was not initialized");
759
760
if (addr < t->stack_base() && addr >= t->stack_yellow_zone_base()) {
761
sigset_t mask_all, old_sigset;
762
sigfillset(&mask_all);
763
pthread_sigmask(SIG_SETMASK, &mask_all, &old_sigset);
764
_expand_stack_to(addr);
765
pthread_sigmask(SIG_SETMASK, &old_sigset, NULL);
766
return true;
767
}
768
return false;
769
}
770
771
//////////////////////////////////////////////////////////////////////////////
772
// create new thread
773
774
static address highest_vm_reserved_address();
775
776
// check if it's safe to start a new thread
777
static bool _thread_safety_check(Thread* thread) {
778
if (os::Linux::is_LinuxThreads() && !os::Linux::is_floating_stack()) {
779
// Fixed stack LinuxThreads (SuSE Linux/x86, and some versions of Redhat)
780
// Heap is mmap'ed at lower end of memory space. Thread stacks are
781
// allocated (MAP_FIXED) from high address space. Every thread stack
782
// occupies a fixed size slot (usually 2Mbytes, but user can change
783
// it to other values if they rebuild LinuxThreads).
784
//
785
// Problem with MAP_FIXED is that mmap() can still succeed even part of
786
// the memory region has already been mmap'ed. That means if we have too
787
// many threads and/or very large heap, eventually thread stack will
788
// collide with heap.
789
//
790
// Here we try to prevent heap/stack collision by comparing current
791
// stack bottom with the highest address that has been mmap'ed by JVM
792
// plus a safety margin for memory maps created by native code.
793
//
794
// This feature can be disabled by setting ThreadSafetyMargin to 0
795
//
796
if (ThreadSafetyMargin > 0) {
797
address stack_bottom = os::current_stack_base() - os::current_stack_size();
798
799
// not safe if our stack extends below the safety margin
800
return stack_bottom - ThreadSafetyMargin >= highest_vm_reserved_address();
801
} else {
802
return true;
803
}
804
} else {
805
// Floating stack LinuxThreads or NPTL:
806
// Unlike fixed stack LinuxThreads, thread stacks are not MAP_FIXED. When
807
// there's not enough space left, pthread_create() will fail. If we come
808
// here, that means enough space has been reserved for stack.
809
return true;
810
}
811
}
812
813
// Thread start routine for all newly created threads
814
static void *java_start(Thread *thread) {
815
// Try to randomize the cache line index of hot stack frames.
816
// This helps when threads of the same stack traces evict each other's
817
// cache lines. The threads can be either from the same JVM instance, or
818
// from different JVM instances. The benefit is especially true for
819
// processors with hyperthreading technology.
820
static int counter = 0;
821
int pid = os::current_process_id();
822
alloca(((pid ^ counter++) & 7) * 128);
823
824
ThreadLocalStorage::set_thread(thread);
825
826
OSThread* osthread = thread->osthread();
827
Monitor* sync = osthread->startThread_lock();
828
829
// non floating stack LinuxThreads needs extra check, see above
830
if (!_thread_safety_check(thread)) {
831
// notify parent thread
832
MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
833
osthread->set_state(ZOMBIE);
834
sync->notify_all();
835
return NULL;
836
}
837
838
// thread_id is kernel thread id (similar to Solaris LWP id)
839
osthread->set_thread_id(os::Linux::gettid());
840
841
if (UseNUMA) {
842
int lgrp_id = os::numa_get_group_id();
843
if (lgrp_id != -1) {
844
thread->set_lgrp_id(lgrp_id);
845
}
846
}
847
// initialize signal mask for this thread
848
os::Linux::hotspot_sigmask(thread);
849
850
// initialize floating point control register
851
os::Linux::init_thread_fpu_state();
852
853
// handshaking with parent thread
854
{
855
MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
856
857
// notify parent thread
858
osthread->set_state(INITIALIZED);
859
sync->notify_all();
860
861
// wait until os::start_thread()
862
while (osthread->get_state() == INITIALIZED) {
863
sync->wait(Mutex::_no_safepoint_check_flag);
864
}
865
}
866
867
// call one more level start routine
868
thread->run();
869
870
return 0;
871
}
872
873
bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
874
assert(thread->osthread() == NULL, "caller responsible");
875
876
// Allocate the OSThread object
877
OSThread* osthread = new OSThread(NULL, NULL);
878
if (osthread == NULL) {
879
return false;
880
}
881
882
// set the correct thread state
883
osthread->set_thread_type(thr_type);
884
885
// Initial state is ALLOCATED but not INITIALIZED
886
osthread->set_state(ALLOCATED);
887
888
thread->set_osthread(osthread);
889
890
// init thread attributes
891
pthread_attr_t attr;
892
pthread_attr_init(&attr);
893
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
894
895
// stack size
896
if (os::Linux::supports_variable_stack_size()) {
897
// calculate stack size if it's not specified by caller
898
if (stack_size == 0) {
899
stack_size = os::Linux::default_stack_size(thr_type);
900
901
switch (thr_type) {
902
case os::java_thread:
903
// Java threads use ThreadStackSize which default value can be
904
// changed with the flag -Xss
905
assert (JavaThread::stack_size_at_create() > 0, "this should be set");
906
stack_size = JavaThread::stack_size_at_create();
907
break;
908
case os::compiler_thread:
909
if (CompilerThreadStackSize > 0) {
910
stack_size = (size_t)(CompilerThreadStackSize * K);
911
break;
912
} // else fall through:
913
// use VMThreadStackSize if CompilerThreadStackSize is not defined
914
case os::vm_thread:
915
case os::pgc_thread:
916
case os::cgc_thread:
917
case os::watcher_thread:
918
if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
919
break;
920
}
921
}
922
923
stack_size = MAX2(stack_size, os::Linux::min_stack_allowed);
924
pthread_attr_setstacksize(&attr, stack_size);
925
} else {
926
// let pthread_create() pick the default value.
927
}
928
929
// glibc guard page
930
pthread_attr_setguardsize(&attr, os::Linux::default_guard_size(thr_type));
931
932
ThreadState state;
933
934
{
935
// Serialize thread creation if we are running with fixed stack LinuxThreads
936
bool lock = os::Linux::is_LinuxThreads() && !os::Linux::is_floating_stack();
937
if (lock) {
938
os::Linux::createThread_lock()->lock_without_safepoint_check();
939
}
940
941
pthread_t tid;
942
int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
943
944
pthread_attr_destroy(&attr);
945
946
if (ret != 0) {
947
if (PrintMiscellaneous && (Verbose || WizardMode)) {
948
perror("pthread_create()");
949
}
950
// Need to clean up stuff we've allocated so far
951
thread->set_osthread(NULL);
952
delete osthread;
953
if (lock) os::Linux::createThread_lock()->unlock();
954
return false;
955
}
956
957
// Store pthread info into the OSThread
958
osthread->set_pthread_id(tid);
959
960
// Wait until child thread is either initialized or aborted
961
{
962
Monitor* sync_with_child = osthread->startThread_lock();
963
MutexLockerEx ml(sync_with_child, Mutex::_no_safepoint_check_flag);
964
while ((state = osthread->get_state()) == ALLOCATED) {
965
sync_with_child->wait(Mutex::_no_safepoint_check_flag);
966
}
967
}
968
969
if (lock) {
970
os::Linux::createThread_lock()->unlock();
971
}
972
}
973
974
// Aborted due to thread limit being reached
975
if (state == ZOMBIE) {
976
thread->set_osthread(NULL);
977
delete osthread;
978
return false;
979
}
980
981
// The thread is returned suspended (in state INITIALIZED),
982
// and is started higher up in the call chain
983
assert(state == INITIALIZED, "race condition");
984
return true;
985
}
986
987
/////////////////////////////////////////////////////////////////////////////
988
// attach existing thread
989
990
// bootstrap the main thread
991
bool os::create_main_thread(JavaThread* thread) {
992
assert(os::Linux::_main_thread == pthread_self(), "should be called inside main thread");
993
return create_attached_thread(thread);
994
}
995
996
bool os::create_attached_thread(JavaThread* thread) {
997
#ifdef ASSERT
998
thread->verify_not_published();
999
#endif
1000
1001
// Allocate the OSThread object
1002
OSThread* osthread = new OSThread(NULL, NULL);
1003
1004
if (osthread == NULL) {
1005
return false;
1006
}
1007
1008
// Store pthread info into the OSThread
1009
osthread->set_thread_id(os::Linux::gettid());
1010
osthread->set_pthread_id(::pthread_self());
1011
1012
// initialize floating point control register
1013
os::Linux::init_thread_fpu_state();
1014
1015
// Initial thread state is RUNNABLE
1016
osthread->set_state(RUNNABLE);
1017
1018
thread->set_osthread(osthread);
1019
1020
if (UseNUMA) {
1021
int lgrp_id = os::numa_get_group_id();
1022
if (lgrp_id != -1) {
1023
thread->set_lgrp_id(lgrp_id);
1024
}
1025
}
1026
1027
if (os::is_primordial_thread()) {
1028
// If current thread is primordial thread, its stack is mapped on demand,
1029
// see notes about MAP_GROWSDOWN. Here we try to force kernel to map
1030
// the entire stack region to avoid SEGV in stack banging.
1031
// It is also useful to get around the heap-stack-gap problem on SuSE
1032
// kernel (see 4821821 for details). We first expand stack to the top
1033
// of yellow zone, then enable stack yellow zone (order is significant,
1034
// enabling yellow zone first will crash JVM on SuSE Linux), so there
1035
// is no gap between the last two virtual memory regions.
1036
1037
JavaThread *jt = (JavaThread *)thread;
1038
address addr = jt->stack_yellow_zone_base();
1039
assert(addr != NULL, "initialization problem?");
1040
assert(jt->stack_available(addr) > 0, "stack guard should not be enabled");
1041
1042
osthread->set_expanding_stack();
1043
os::Linux::manually_expand_stack(jt, addr);
1044
osthread->clear_expanding_stack();
1045
}
1046
1047
// initialize signal mask for this thread
1048
// and save the caller's signal mask
1049
os::Linux::hotspot_sigmask(thread);
1050
1051
return true;
1052
}
1053
1054
void os::pd_start_thread(Thread* thread) {
1055
OSThread * osthread = thread->osthread();
1056
assert(osthread->get_state() != INITIALIZED, "just checking");
1057
Monitor* sync_with_child = osthread->startThread_lock();
1058
MutexLockerEx ml(sync_with_child, Mutex::_no_safepoint_check_flag);
1059
sync_with_child->notify();
1060
}
1061
1062
// Free Linux resources related to the OSThread
1063
void os::free_thread(OSThread* osthread) {
1064
assert(osthread != NULL, "osthread not set");
1065
1066
if (Thread::current()->osthread() == osthread) {
1067
// Restore caller's signal mask
1068
sigset_t sigmask = osthread->caller_sigmask();
1069
pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1070
}
1071
1072
delete osthread;
1073
}
1074
1075
//////////////////////////////////////////////////////////////////////////////
1076
// thread local storage
1077
1078
// Restore the thread pointer if the destructor is called. This is in case
1079
// someone from JNI code sets up a destructor with pthread_key_create to run
1080
// detachCurrentThread on thread death. Unless we restore the thread pointer we
1081
// will hang or crash. When detachCurrentThread is called the key will be set
1082
// to null and we will not be called again. If detachCurrentThread is never
1083
// called we could loop forever depending on the pthread implementation.
1084
static void restore_thread_pointer(void* p) {
1085
Thread* thread = (Thread*) p;
1086
os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
1087
}
1088
1089
int os::allocate_thread_local_storage() {
1090
pthread_key_t key;
1091
int rslt = pthread_key_create(&key, restore_thread_pointer);
1092
assert(rslt == 0, "cannot allocate thread local storage");
1093
return (int)key;
1094
}
1095
1096
// Note: This is currently not used by VM, as we don't destroy TLS key
1097
// on VM exit.
1098
void os::free_thread_local_storage(int index) {
1099
int rslt = pthread_key_delete((pthread_key_t)index);
1100
assert(rslt == 0, "invalid index");
1101
}
1102
1103
void os::thread_local_storage_at_put(int index, void* value) {
1104
int rslt = pthread_setspecific((pthread_key_t)index, value);
1105
assert(rslt == 0, "pthread_setspecific failed");
1106
}
1107
1108
extern "C" Thread* get_thread() {
1109
return ThreadLocalStorage::thread();
1110
}
1111
1112
//////////////////////////////////////////////////////////////////////////////
1113
// primordial thread
1114
1115
// Check if current thread is the primordial thread, similar to Solaris thr_main.
1116
bool os::is_primordial_thread(void) {
1117
char dummy;
1118
// If called before init complete, thread stack bottom will be null.
1119
// Can be called if fatal error occurs before initialization.
1120
if (os::Linux::initial_thread_stack_bottom() == NULL) return false;
1121
assert(os::Linux::initial_thread_stack_bottom() != NULL &&
1122
os::Linux::initial_thread_stack_size() != 0,
1123
"os::init did not locate primordial thread's stack region");
1124
if ((address)&dummy >= os::Linux::initial_thread_stack_bottom() &&
1125
(address)&dummy < os::Linux::initial_thread_stack_bottom() +
1126
os::Linux::initial_thread_stack_size()) {
1127
return true;
1128
} else {
1129
return false;
1130
}
1131
}
1132
1133
// Find the virtual memory area that contains addr
1134
static bool find_vma(address addr, address* vma_low, address* vma_high) {
1135
FILE *fp = fopen("/proc/self/maps", "r");
1136
if (fp) {
1137
address low, high;
1138
while (!feof(fp)) {
1139
if (fscanf(fp, "%p-%p", &low, &high) == 2) {
1140
if (low <= addr && addr < high) {
1141
if (vma_low) *vma_low = low;
1142
if (vma_high) *vma_high = high;
1143
fclose (fp);
1144
return true;
1145
}
1146
}
1147
for (;;) {
1148
int ch = fgetc(fp);
1149
if (ch == EOF || ch == (int)'\n') break;
1150
}
1151
}
1152
fclose(fp);
1153
}
1154
return false;
1155
}
1156
1157
// Locate primordial thread stack. This special handling of primordial thread stack
1158
// is needed because pthread_getattr_np() on most (all?) Linux distros returns
1159
// bogus value for the primordial process thread. While the launcher has created
1160
// the VM in a new thread since JDK 6, we still have to allow for the use of the
1161
// JNI invocation API from a primordial thread.
1162
void os::Linux::capture_initial_stack(size_t max_size) {
1163
1164
// max_size is either 0 (which means accept OS default for thread stacks) or
1165
// a user-specified value known to be at least the minimum needed. If we
1166
// are actually on the primordial thread we can make it appear that we have a
1167
// smaller max_size stack by inserting the guard pages at that location. But we
1168
// cannot do anything to emulate a larger stack than what has been provided by
1169
// the OS or threading library. In fact if we try to use a stack greater than
1170
// what is set by rlimit then we will crash the hosting process.
1171
1172
// Maximum stack size is the easy part, get it from RLIMIT_STACK.
1173
// If this is "unlimited" then it will be a huge value.
1174
struct rlimit rlim;
1175
getrlimit(RLIMIT_STACK, &rlim);
1176
size_t stack_size = rlim.rlim_cur;
1177
1178
// 6308388: a bug in ld.so will relocate its own .data section to the
1179
// lower end of primordial stack; reduce ulimit -s value a little bit
1180
// so we won't install guard page on ld.so's data section.
1181
// But ensure we don't underflow the stack size - allow 1 page spare
1182
if (stack_size >= (size_t)(3 * page_size())) {
1183
stack_size -= 2 * page_size();
1184
}
1185
1186
// Try to figure out where the stack base (top) is. This is harder.
1187
//
1188
// When an application is started, glibc saves the initial stack pointer in
1189
// a global variable "__libc_stack_end", which is then used by system
1190
// libraries. __libc_stack_end should be pretty close to stack top. The
1191
// variable is available since the very early days. However, because it is
1192
// a private interface, it could disappear in the future.
1193
//
1194
// Linux kernel saves start_stack information in /proc/<pid>/stat. Similar
1195
// to __libc_stack_end, it is very close to stack top, but isn't the real
1196
// stack top. Note that /proc may not exist if VM is running as a chroot
1197
// program, so reading /proc/<pid>/stat could fail. Also the contents of
1198
// /proc/<pid>/stat could change in the future (though unlikely).
1199
//
1200
// We try __libc_stack_end first. If that doesn't work, look for
1201
// /proc/<pid>/stat. If neither of them works, we use current stack pointer
1202
// as a hint, which should work well in most cases.
1203
1204
uintptr_t stack_start;
1205
1206
// try __libc_stack_end first
1207
uintptr_t *p = (uintptr_t *)dlsym(RTLD_DEFAULT, "__libc_stack_end");
1208
if (p && *p) {
1209
stack_start = *p;
1210
} else {
1211
// see if we can get the start_stack field from /proc/self/stat
1212
FILE *fp;
1213
int pid;
1214
char state;
1215
int ppid;
1216
int pgrp;
1217
int session;
1218
int nr;
1219
int tpgrp;
1220
unsigned long flags;
1221
unsigned long minflt;
1222
unsigned long cminflt;
1223
unsigned long majflt;
1224
unsigned long cmajflt;
1225
unsigned long utime;
1226
unsigned long stime;
1227
long cutime;
1228
long cstime;
1229
long prio;
1230
long nice;
1231
long junk;
1232
long it_real;
1233
uintptr_t start;
1234
uintptr_t vsize;
1235
intptr_t rss;
1236
uintptr_t rsslim;
1237
uintptr_t scodes;
1238
uintptr_t ecode;
1239
int i;
1240
1241
// Figure what the primordial thread stack base is. Code is inspired
1242
// by email from Hans Boehm. /proc/self/stat begins with current pid,
1243
// followed by command name surrounded by parentheses, state, etc.
1244
char stat[2048];
1245
int statlen;
1246
1247
fp = fopen("/proc/self/stat", "r");
1248
if (fp) {
1249
statlen = fread(stat, 1, 2047, fp);
1250
stat[statlen] = '\0';
1251
fclose(fp);
1252
1253
// Skip pid and the command string. Note that we could be dealing with
1254
// weird command names, e.g. user could decide to rename java launcher
1255
// to "java 1.4.2 :)", then the stat file would look like
1256
// 1234 (java 1.4.2 :)) R ... ...
1257
// We don't really need to know the command string, just find the last
1258
// occurrence of ")" and then start parsing from there. See bug 4726580.
1259
char * s = strrchr(stat, ')');
1260
1261
i = 0;
1262
if (s) {
1263
// Skip blank chars
1264
do s++; while (isspace(*s));
1265
1266
#define _UFM UINTX_FORMAT
1267
#define _DFM INTX_FORMAT
1268
1269
/* 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 */
1270
/* 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 */
1271
i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld " _UFM _UFM _DFM _UFM _UFM _UFM _UFM,
1272
&state, /* 3 %c */
1273
&ppid, /* 4 %d */
1274
&pgrp, /* 5 %d */
1275
&session, /* 6 %d */
1276
&nr, /* 7 %d */
1277
&tpgrp, /* 8 %d */
1278
&flags, /* 9 %lu */
1279
&minflt, /* 10 %lu */
1280
&cminflt, /* 11 %lu */
1281
&majflt, /* 12 %lu */
1282
&cmajflt, /* 13 %lu */
1283
&utime, /* 14 %lu */
1284
&stime, /* 15 %lu */
1285
&cutime, /* 16 %ld */
1286
&cstime, /* 17 %ld */
1287
&prio, /* 18 %ld */
1288
&nice, /* 19 %ld */
1289
&junk, /* 20 %ld */
1290
&it_real, /* 21 %ld */
1291
&start, /* 22 UINTX_FORMAT */
1292
&vsize, /* 23 UINTX_FORMAT */
1293
&rss, /* 24 INTX_FORMAT */
1294
&rsslim, /* 25 UINTX_FORMAT */
1295
&scodes, /* 26 UINTX_FORMAT */
1296
&ecode, /* 27 UINTX_FORMAT */
1297
&stack_start); /* 28 UINTX_FORMAT */
1298
}
1299
1300
#undef _UFM
1301
#undef _DFM
1302
1303
if (i != 28 - 2) {
1304
assert(false, "Bad conversion from /proc/self/stat");
1305
// product mode - assume we are the primordial thread, good luck in the
1306
// embedded case.
1307
warning("Can't detect primordial thread stack location - bad conversion");
1308
stack_start = (uintptr_t) &rlim;
1309
}
1310
} else {
1311
// For some reason we can't open /proc/self/stat (for example, running on
1312
// FreeBSD with a Linux emulator, or inside chroot), this should work for
1313
// most cases, so don't abort:
1314
warning("Can't detect primordial thread stack location - no /proc/self/stat");
1315
stack_start = (uintptr_t) &rlim;
1316
}
1317
}
1318
1319
// Now we have a pointer (stack_start) very close to the stack top, the
1320
// next thing to do is to figure out the exact location of stack top. We
1321
// can find out the virtual memory area that contains stack_start by
1322
// reading /proc/self/maps, it should be the last vma in /proc/self/maps,
1323
// and its upper limit is the real stack top. (again, this would fail if
1324
// running inside chroot, because /proc may not exist.)
1325
1326
uintptr_t stack_top;
1327
address low, high;
1328
if (find_vma((address)stack_start, &low, &high)) {
1329
// success, "high" is the true stack top. (ignore "low", because initial
1330
// thread stack grows on demand, its real bottom is high - RLIMIT_STACK.)
1331
stack_top = (uintptr_t)high;
1332
} else {
1333
// failed, likely because /proc/self/maps does not exist
1334
warning("Can't detect primordial thread stack location - find_vma failed");
1335
// best effort: stack_start is normally within a few pages below the real
1336
// stack top, use it as stack top, and reduce stack size so we won't put
1337
// guard page outside stack.
1338
stack_top = stack_start;
1339
stack_size -= 16 * page_size();
1340
}
1341
1342
// stack_top could be partially down the page so align it
1343
stack_top = align_size_up(stack_top, page_size());
1344
1345
// Allowed stack value is minimum of max_size and what we derived from rlimit
1346
if (max_size > 0) {
1347
_initial_thread_stack_size = MIN2(max_size, stack_size);
1348
} else {
1349
// Accept the rlimit max, but if stack is unlimited then it will be huge, so
1350
// clamp it at 8MB as we do on Solaris
1351
_initial_thread_stack_size = MIN2(stack_size, 8*M);
1352
}
1353
1354
_initial_thread_stack_size = align_size_down(_initial_thread_stack_size, page_size());
1355
_initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size;
1356
assert(_initial_thread_stack_bottom < (address)stack_top, "overflow!");
1357
}
1358
1359
////////////////////////////////////////////////////////////////////////////////
1360
// time support
1361
1362
// Time since start-up in seconds to a fine granularity.
1363
// Used by VMSelfDestructTimer and the MemProfiler.
1364
double os::elapsedTime() {
1365
1366
return ((double)os::elapsed_counter()) / os::elapsed_frequency(); // nanosecond resolution
1367
}
1368
1369
jlong os::elapsed_counter() {
1370
return javaTimeNanos() - initial_time_count;
1371
}
1372
1373
jlong os::elapsed_frequency() {
1374
return NANOSECS_PER_SEC; // nanosecond resolution
1375
}
1376
1377
bool os::supports_vtime() { return true; }
1378
bool os::enable_vtime() { return false; }
1379
bool os::vtime_enabled() { return false; }
1380
1381
double os::elapsedVTime() {
1382
struct rusage usage;
1383
int retval = getrusage(RUSAGE_THREAD, &usage);
1384
if (retval == 0) {
1385
return (double) (usage.ru_utime.tv_sec + usage.ru_stime.tv_sec) + (double) (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000 * 1000);
1386
} else {
1387
// better than nothing, but not much
1388
return elapsedTime();
1389
}
1390
}
1391
1392
jlong os::javaTimeMillis() {
1393
timeval time;
1394
int status = gettimeofday(&time, NULL);
1395
assert(status != -1, "linux error");
1396
return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1397
}
1398
1399
#ifndef CLOCK_MONOTONIC
1400
#define CLOCK_MONOTONIC (1)
1401
#endif
1402
1403
void os::Linux::clock_init() {
1404
// we do dlopen's in this particular order due to bug in linux
1405
// dynamical loader (see 6348968) leading to crash on exit
1406
void* handle = dlopen("librt.so.1", RTLD_LAZY);
1407
if (handle == NULL) {
1408
handle = dlopen("librt.so", RTLD_LAZY);
1409
}
1410
#ifdef __ANDROID__
1411
if (handle == NULL) {
1412
// libc has clock_getres and clock_gettime
1413
handle = RTLD_DEFAULT;
1414
}
1415
#endif
1416
1417
if (handle) {
1418
int (*clock_getres_func)(clockid_t, struct timespec*) =
1419
(int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_getres");
1420
int (*clock_gettime_func)(clockid_t, struct timespec*) =
1421
(int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_gettime");
1422
if (clock_getres_func && clock_gettime_func) {
1423
// See if monotonic clock is supported by the kernel. Note that some
1424
// early implementations simply return kernel jiffies (updated every
1425
// 1/100 or 1/1000 second). It would be bad to use such a low res clock
1426
// for nano time (though the monotonic property is still nice to have).
1427
// It's fixed in newer kernels, however clock_getres() still returns
1428
// 1/HZ. We check if clock_getres() works, but will ignore its reported
1429
// resolution for now. Hopefully as people move to new kernels, this
1430
// won't be a problem.
1431
struct timespec res;
1432
struct timespec tp;
1433
if (clock_getres_func (CLOCK_MONOTONIC, &res) == 0 &&
1434
clock_gettime_func(CLOCK_MONOTONIC, &tp) == 0) {
1435
// yes, monotonic clock is supported
1436
_clock_gettime = clock_gettime_func;
1437
return;
1438
} else {
1439
// close librt if there is no monotonic clock
1440
#ifndef __ANDROID__ // we should not close RTLD_DEFAULT :)
1441
dlclose(handle);
1442
#endif
1443
}
1444
}
1445
}
1446
warning("No monotonic clock was available - timed services may " \
1447
"be adversely affected if the time-of-day clock changes");
1448
}
1449
1450
#ifndef SYS_clock_getres
1451
1452
#if defined(IA32) || defined(AMD64) || defined(AARCH64)
1453
#define SYS_clock_getres IA32_ONLY(266) AMD64_ONLY(229) AARCH64_ONLY(114)
1454
#define sys_clock_getres(x,y) ::syscall(SYS_clock_getres, x, y)
1455
#else
1456
#warning "SYS_clock_getres not defined for this platform, disabling fast_thread_cpu_time"
1457
#define sys_clock_getres(x,y) -1
1458
#endif
1459
1460
#else
1461
#define sys_clock_getres(x,y) ::syscall(SYS_clock_getres, x, y)
1462
#endif
1463
1464
void os::Linux::fast_thread_clock_init() {
1465
if (!UseLinuxPosixThreadCPUClocks) {
1466
return;
1467
}
1468
clockid_t clockid;
1469
struct timespec tp;
1470
int (*pthread_getcpuclockid_func)(pthread_t, clockid_t *) =
1471
(int(*)(pthread_t, clockid_t *)) dlsym(RTLD_DEFAULT, "pthread_getcpuclockid");
1472
1473
// Switch to using fast clocks for thread cpu time if
1474
// the sys_clock_getres() returns 0 error code.
1475
// Note, that some kernels may support the current thread
1476
// clock (CLOCK_THREAD_CPUTIME_ID) but not the clocks
1477
// returned by the pthread_getcpuclockid().
1478
// If the fast Posix clocks are supported then the sys_clock_getres()
1479
// must return at least tp.tv_sec == 0 which means a resolution
1480
// better than 1 sec. This is extra check for reliability.
1481
1482
if(pthread_getcpuclockid_func &&
1483
pthread_getcpuclockid_func(_main_thread, &clockid) == 0 &&
1484
sys_clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) {
1485
1486
_supports_fast_thread_cpu_time = true;
1487
_pthread_getcpuclockid = pthread_getcpuclockid_func;
1488
}
1489
}
1490
1491
jlong os::javaTimeNanos() {
1492
if (Linux::supports_monotonic_clock()) {
1493
struct timespec tp;
1494
int status = Linux::clock_gettime(CLOCK_MONOTONIC, &tp);
1495
assert(status == 0, "gettime error");
1496
jlong result = jlong(tp.tv_sec) * (1000 * 1000 * 1000) + jlong(tp.tv_nsec);
1497
return result;
1498
} else {
1499
timeval time;
1500
int status = gettimeofday(&time, NULL);
1501
assert(status != -1, "linux error");
1502
jlong usecs = jlong(time.tv_sec) * (1000 * 1000) + jlong(time.tv_usec);
1503
return 1000 * usecs;
1504
}
1505
}
1506
1507
void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1508
if (Linux::supports_monotonic_clock()) {
1509
info_ptr->max_value = ALL_64_BITS;
1510
1511
// CLOCK_MONOTONIC - amount of time since some arbitrary point in the past
1512
info_ptr->may_skip_backward = false; // not subject to resetting or drifting
1513
info_ptr->may_skip_forward = false; // not subject to resetting or drifting
1514
} else {
1515
// gettimeofday - based on time in seconds since the Epoch thus does not wrap
1516
info_ptr->max_value = ALL_64_BITS;
1517
1518
// gettimeofday is a real time clock so it skips
1519
info_ptr->may_skip_backward = true;
1520
info_ptr->may_skip_forward = true;
1521
}
1522
1523
info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
1524
}
1525
1526
// Return the real, user, and system times in seconds from an
1527
// arbitrary fixed point in the past.
1528
bool os::getTimesSecs(double* process_real_time,
1529
double* process_user_time,
1530
double* process_system_time) {
1531
struct tms ticks;
1532
clock_t real_ticks = times(&ticks);
1533
1534
if (real_ticks == (clock_t) (-1)) {
1535
return false;
1536
} else {
1537
double ticks_per_second = (double) clock_tics_per_sec;
1538
*process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1539
*process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1540
*process_real_time = ((double) real_ticks) / ticks_per_second;
1541
1542
return true;
1543
}
1544
}
1545
1546
1547
char * os::local_time_string(char *buf, size_t buflen) {
1548
struct tm t;
1549
time_t long_time;
1550
time(&long_time);
1551
localtime_r(&long_time, &t);
1552
jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1553
t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1554
t.tm_hour, t.tm_min, t.tm_sec);
1555
return buf;
1556
}
1557
1558
struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1559
return localtime_r(clock, res);
1560
}
1561
1562
////////////////////////////////////////////////////////////////////////////////
1563
// runtime exit support
1564
1565
// Note: os::shutdown() might be called very early during initialization, or
1566
// called from signal handler. Before adding something to os::shutdown(), make
1567
// sure it is async-safe and can handle partially initialized VM.
1568
void os::shutdown() {
1569
1570
// allow PerfMemory to attempt cleanup of any persistent resources
1571
perfMemory_exit();
1572
1573
// needs to remove object in file system
1574
AttachListener::abort();
1575
1576
// flush buffered output, finish log files
1577
ostream_abort();
1578
1579
// Check for abort hook
1580
abort_hook_t abort_hook = Arguments::abort_hook();
1581
if (abort_hook != NULL) {
1582
abort_hook();
1583
}
1584
1585
}
1586
1587
// Note: os::abort() might be called very early during initialization, or
1588
// called from signal handler. Before adding something to os::abort(), make
1589
// sure it is async-safe and can handle partially initialized VM.
1590
void os::abort(bool dump_core) {
1591
os::shutdown();
1592
if (dump_core) {
1593
#ifndef PRODUCT
1594
fdStream out(defaultStream::output_fd());
1595
out.print_raw("Current thread is ");
1596
char buf[16];
1597
jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1598
out.print_raw_cr(buf);
1599
out.print_raw_cr("Dumping core ...");
1600
#endif
1601
::abort(); // dump core
1602
}
1603
1604
::exit(1);
1605
}
1606
1607
// Die immediately, no exit hook, no abort hook, no cleanup.
1608
void os::die() {
1609
// _exit() on LinuxThreads only kills current thread
1610
::abort();
1611
}
1612
1613
1614
// This method is a copy of JDK's sysGetLastErrorString
1615
// from src/solaris/hpi/src/system_md.c
1616
1617
size_t os::lasterror(char *buf, size_t len) {
1618
1619
if (errno == 0) return 0;
1620
1621
const char *s = ::strerror(errno);
1622
size_t n = ::strlen(s);
1623
if (n >= len) {
1624
n = len - 1;
1625
}
1626
::strncpy(buf, s, n);
1627
buf[n] = '\0';
1628
return n;
1629
}
1630
1631
intx os::current_thread_id() { return (intx)pthread_self(); }
1632
int os::current_process_id() {
1633
1634
// Under the old linux thread library, linux gives each thread
1635
// its own process id. Because of this each thread will return
1636
// a different pid if this method were to return the result
1637
// of getpid(2). Linux provides no api that returns the pid
1638
// of the launcher thread for the vm. This implementation
1639
// returns a unique pid, the pid of the launcher thread
1640
// that starts the vm 'process'.
1641
1642
// Under the NPTL, getpid() returns the same pid as the
1643
// launcher thread rather than a unique pid per thread.
1644
// Use gettid() if you want the old pre NPTL behaviour.
1645
1646
// if you are looking for the result of a call to getpid() that
1647
// returns a unique pid for the calling thread, then look at the
1648
// OSThread::thread_id() method in osThread_linux.hpp file
1649
1650
return (int)(_initial_pid ? _initial_pid : getpid());
1651
}
1652
1653
// DLL functions
1654
1655
const char* os::dll_file_extension() { return ".so"; }
1656
1657
// This must be hard coded because it's the system's temporary
1658
// directory not the java application's temp directory, ala java.io.tmpdir.
1659
const char* os::get_temp_directory() { return "/tmp"; }
1660
1661
static bool file_exists(const char* filename) {
1662
struct stat statbuf;
1663
if (filename == NULL || strlen(filename) == 0) {
1664
return false;
1665
}
1666
return os::stat(filename, &statbuf) == 0;
1667
}
1668
1669
bool os::dll_build_name(char* buffer, size_t buflen,
1670
const char* pname, const char* fname) {
1671
bool retval = false;
1672
// Copied from libhpi
1673
const size_t pnamelen = pname ? strlen(pname) : 0;
1674
1675
// Return error on buffer overflow.
1676
if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1677
return retval;
1678
}
1679
1680
if (pnamelen == 0) {
1681
snprintf(buffer, buflen, "lib%s.so", fname);
1682
retval = true;
1683
} else if (strchr(pname, *os::path_separator()) != NULL) {
1684
int n;
1685
char** pelements = split_path(pname, &n);
1686
if (pelements == NULL) {
1687
return false;
1688
}
1689
for (int i = 0 ; i < n ; i++) {
1690
// Really shouldn't be NULL, but check can't hurt
1691
if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1692
continue; // skip the empty path values
1693
}
1694
snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1695
if (file_exists(buffer)) {
1696
retval = true;
1697
break;
1698
}
1699
}
1700
// release the storage
1701
for (int i = 0 ; i < n ; i++) {
1702
if (pelements[i] != NULL) {
1703
FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1704
}
1705
}
1706
if (pelements != NULL) {
1707
FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1708
}
1709
} else {
1710
snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1711
retval = true;
1712
}
1713
return retval;
1714
}
1715
1716
// check if addr is inside libjvm.so
1717
bool os::address_is_in_vm(address addr) {
1718
static address libjvm_base_addr;
1719
Dl_info dlinfo;
1720
1721
if (libjvm_base_addr == NULL) {
1722
if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1723
libjvm_base_addr = (address)dlinfo.dli_fbase;
1724
}
1725
assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1726
}
1727
1728
if (dladdr((void *)addr, &dlinfo) != 0) {
1729
if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1730
}
1731
1732
return false;
1733
}
1734
1735
bool os::dll_address_to_function_name(address addr, char *buf,
1736
int buflen, int *offset) {
1737
// buf is not optional, but offset is optional
1738
assert(buf != NULL, "sanity check");
1739
1740
Dl_info dlinfo;
1741
1742
if (dladdr((void*)addr, &dlinfo) != 0) {
1743
// see if we have a matching symbol
1744
if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1745
if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1746
jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1747
}
1748
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1749
return true;
1750
}
1751
// no matching symbol so try for just file info
1752
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1753
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1754
buf, buflen, offset, dlinfo.dli_fname)) {
1755
return true;
1756
}
1757
}
1758
}
1759
1760
buf[0] = '\0';
1761
if (offset != NULL) *offset = -1;
1762
return false;
1763
}
1764
1765
struct _address_to_library_name {
1766
address addr; // input : memory address
1767
size_t buflen; // size of fname
1768
char* fname; // output: library name
1769
address base; // library base addr
1770
};
1771
1772
static int address_to_library_name_callback(struct dl_phdr_info *info,
1773
size_t size, void *data) {
1774
int i;
1775
bool found = false;
1776
address libbase = NULL;
1777
struct _address_to_library_name * d = (struct _address_to_library_name *)data;
1778
1779
// iterate through all loadable segments
1780
for (i = 0; i < info->dlpi_phnum; i++) {
1781
address segbase = (address)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
1782
if (info->dlpi_phdr[i].p_type == PT_LOAD) {
1783
// base address of a library is the lowest address of its loaded
1784
// segments.
1785
if (libbase == NULL || libbase > segbase) {
1786
libbase = segbase;
1787
}
1788
// see if 'addr' is within current segment
1789
if (segbase <= d->addr &&
1790
d->addr < segbase + info->dlpi_phdr[i].p_memsz) {
1791
found = true;
1792
}
1793
}
1794
}
1795
1796
// dlpi_name is NULL or empty if the ELF file is executable, return 0
1797
// so dll_address_to_library_name() can fall through to use dladdr() which
1798
// can figure out executable name from argv[0].
1799
if (found && info->dlpi_name && info->dlpi_name[0]) {
1800
d->base = libbase;
1801
if (d->fname) {
1802
jio_snprintf(d->fname, d->buflen, "%s", info->dlpi_name);
1803
}
1804
return 1;
1805
}
1806
return 0;
1807
}
1808
1809
bool os::dll_address_to_library_name(address addr, char* buf,
1810
int buflen, int* offset) {
1811
// buf is not optional, but offset is optional
1812
assert(buf != NULL, "sanity check");
1813
1814
Dl_info dlinfo;
1815
1816
// Android bionic libc does not have the bug below.
1817
#ifndef __ANDROID__
1818
struct _address_to_library_name data;
1819
1820
// There is a bug in old glibc dladdr() implementation that it could resolve
1821
// to wrong library name if the .so file has a base address != NULL. Here
1822
// we iterate through the program headers of all loaded libraries to find
1823
// out which library 'addr' really belongs to. This workaround can be
1824
// removed once the minimum requirement for glibc is moved to 2.3.x.
1825
data.addr = addr;
1826
data.fname = buf;
1827
data.buflen = buflen;
1828
data.base = NULL;
1829
int rslt = dl_iterate_phdr(address_to_library_name_callback, (void *)&data);
1830
1831
if (rslt) {
1832
// buf already contains library name
1833
if (offset) *offset = addr - data.base;
1834
return true;
1835
}
1836
#endif // !__ANDROID__
1837
if (dladdr((void*)addr, &dlinfo) != 0) {
1838
if (dlinfo.dli_fname != NULL) {
1839
jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1840
}
1841
if (dlinfo.dli_fbase != NULL && offset != NULL) {
1842
*offset = addr - (address)dlinfo.dli_fbase;
1843
}
1844
return true;
1845
}
1846
1847
buf[0] = '\0';
1848
if (offset) *offset = -1;
1849
return false;
1850
}
1851
1852
static bool read_so_path_from_maps(const char* so_name, char* buf, int buflen) {
1853
FILE *fp = fopen("/proc/self/maps", "r");
1854
assert(fp, "Failed to open /proc/self/maps");
1855
if (!fp) {
1856
return false;
1857
}
1858
1859
char maps_buffer[2048];
1860
while (fgets(maps_buffer, 2048, fp) != NULL) {
1861
if (strstr(maps_buffer, so_name) == NULL) {
1862
continue;
1863
}
1864
1865
char *so_path = strchr(maps_buffer, '/');
1866
so_path[strlen(so_path) - 1] = '\0'; // Cut trailing \n
1867
jio_snprintf(buf, buflen, "%s", so_path);
1868
fclose(fp);
1869
return true;
1870
}
1871
1872
fclose(fp);
1873
return false;
1874
}
1875
1876
// Loads .dll/.so and
1877
// in case of error it checks if .dll/.so was built for the
1878
// same architecture as Hotspot is running on
1879
1880
1881
// Remember the stack's state. The Linux dynamic linker will change
1882
// the stack to 'executable' at most once, so we must safepoint only once.
1883
bool os::Linux::_stack_is_executable = false;
1884
1885
// VM operation that loads a library. This is necessary if stack protection
1886
// of the Java stacks can be lost during loading the library. If we
1887
// do not stop the Java threads, they can stack overflow before the stacks
1888
// are protected again.
1889
class VM_LinuxDllLoad: public VM_Operation {
1890
private:
1891
const char *_filename;
1892
char *_ebuf;
1893
int _ebuflen;
1894
void *_lib;
1895
public:
1896
VM_LinuxDllLoad(const char *fn, char *ebuf, int ebuflen) :
1897
_filename(fn), _ebuf(ebuf), _ebuflen(ebuflen), _lib(NULL) {}
1898
VMOp_Type type() const { return VMOp_LinuxDllLoad; }
1899
void doit() {
1900
_lib = os::Linux::dll_load_in_vmthread(_filename, _ebuf, _ebuflen);
1901
os::Linux::_stack_is_executable = true;
1902
}
1903
void* loaded_library() { return _lib; }
1904
};
1905
1906
void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
1907
{
1908
void * result = NULL;
1909
bool load_attempted = false;
1910
1911
// Check whether the library to load might change execution rights
1912
// of the stack. If they are changed, the protection of the stack
1913
// guard pages will be lost. We need a safepoint to fix this.
1914
//
1915
// See Linux man page execstack(8) for more info.
1916
if (os::uses_stack_guard_pages() && !os::Linux::_stack_is_executable) {
1917
ElfFile ef(filename);
1918
if (!ef.specifies_noexecstack()) {
1919
if (!is_init_completed()) {
1920
os::Linux::_stack_is_executable = true;
1921
// This is OK - No Java threads have been created yet, and hence no
1922
// stack guard pages to fix.
1923
//
1924
// This should happen only when you are building JDK7 using a very
1925
// old version of JDK6 (e.g., with JPRT) and running test_gamma.
1926
//
1927
// Dynamic loader will make all stacks executable after
1928
// this function returns, and will not do that again.
1929
assert(Threads::first() == NULL, "no Java threads should exist yet.");
1930
} else {
1931
warning("You have loaded library %s which might have disabled stack guard. "
1932
"The VM will try to fix the stack guard now.\n"
1933
"It's highly recommended that you fix the library with "
1934
"'execstack -c <libfile>', or link it with '-z noexecstack'.",
1935
filename);
1936
1937
assert(Thread::current()->is_Java_thread(), "must be Java thread");
1938
JavaThread *jt = JavaThread::current();
1939
if (jt->thread_state() != _thread_in_native) {
1940
// This happens when a compiler thread tries to load a hsdis-<arch>.so file
1941
// that requires ExecStack. Cannot enter safe point. Let's give up.
1942
warning("Unable to fix stack guard. Giving up.");
1943
} else {
1944
if (!LoadExecStackDllInVMThread) {
1945
// This is for the case where the DLL has an static
1946
// constructor function that executes JNI code. We cannot
1947
// load such DLLs in the VMThread.
1948
result = os::Linux::dlopen_helper(filename, ebuf, ebuflen);
1949
}
1950
1951
ThreadInVMfromNative tiv(jt);
1952
debug_only(VMNativeEntryWrapper vew;)
1953
1954
VM_LinuxDllLoad op(filename, ebuf, ebuflen);
1955
VMThread::execute(&op);
1956
if (LoadExecStackDllInVMThread) {
1957
result = op.loaded_library();
1958
}
1959
load_attempted = true;
1960
}
1961
}
1962
}
1963
}
1964
1965
if (!load_attempted) {
1966
result = os::Linux::dlopen_helper(filename, ebuf, ebuflen);
1967
}
1968
1969
if (result != NULL) {
1970
// Successful loading
1971
return result;
1972
}
1973
1974
Elf32_Ehdr elf_head;
1975
int diag_msg_max_length=ebuflen-strlen(ebuf);
1976
char* diag_msg_buf=ebuf+strlen(ebuf);
1977
1978
if (diag_msg_max_length==0) {
1979
// No more space in ebuf for additional diagnostics message
1980
return NULL;
1981
}
1982
1983
1984
int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1985
1986
if (file_descriptor < 0) {
1987
// Can't open library, report dlerror() message
1988
return NULL;
1989
}
1990
1991
bool failed_to_read_elf_head=
1992
(sizeof(elf_head)!=
1993
(::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
1994
1995
::close(file_descriptor);
1996
if (failed_to_read_elf_head) {
1997
// file i/o error - report dlerror() msg
1998
return NULL;
1999
}
2000
2001
typedef struct {
2002
Elf32_Half code; // Actual value as defined in elf.h
2003
Elf32_Half compat_class; // Compatibility of archs at VM's sense
2004
char elf_class; // 32 or 64 bit
2005
char endianess; // MSB or LSB
2006
char* name; // String representation
2007
} arch_t;
2008
2009
#ifndef EM_486
2010
#define EM_486 6 /* Intel 80486 */
2011
#endif
2012
#ifndef EM_AARCH64
2013
#define EM_AARCH64 183 /* ARM AARCH64 */
2014
#endif
2015
2016
static const arch_t arch_array[]={
2017
{EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
2018
{EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
2019
{EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
2020
{EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
2021
{EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
2022
{EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
2023
{EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
2024
{EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
2025
#if defined(VM_LITTLE_ENDIAN)
2026
{EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64 LE"},
2027
#else
2028
{EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
2029
#endif
2030
{EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM"},
2031
{EM_S390, EM_S390, ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"},
2032
{EM_ALPHA, EM_ALPHA, ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"},
2033
{EM_MIPS_RS3_LE, EM_MIPS_RS3_LE, ELFCLASS32, ELFDATA2LSB, (char*)"MIPSel"},
2034
{EM_MIPS, EM_MIPS, ELFCLASS32, ELFDATA2MSB, (char*)"MIPS"},
2035
{EM_PARISC, EM_PARISC, ELFCLASS32, ELFDATA2MSB, (char*)"PARISC"},
2036
{EM_68K, EM_68K, ELFCLASS32, ELFDATA2MSB, (char*)"M68k"},
2037
{EM_AARCH64, EM_AARCH64, ELFCLASS64, ELFDATA2LSB, (char*)"AARCH64"},
2038
};
2039
2040
#if (defined IA32)
2041
static Elf32_Half running_arch_code=EM_386;
2042
#elif (defined AMD64)
2043
static Elf32_Half running_arch_code=EM_X86_64;
2044
#elif (defined IA64)
2045
static Elf32_Half running_arch_code=EM_IA_64;
2046
#elif (defined __sparc) && (defined _LP64)
2047
static Elf32_Half running_arch_code=EM_SPARCV9;
2048
#elif (defined __sparc) && (!defined _LP64)
2049
static Elf32_Half running_arch_code=EM_SPARC;
2050
#elif (defined __powerpc64__)
2051
static Elf32_Half running_arch_code=EM_PPC64;
2052
#elif (defined __powerpc__)
2053
static Elf32_Half running_arch_code=EM_PPC;
2054
#elif (defined ARM)
2055
static Elf32_Half running_arch_code=EM_ARM;
2056
#elif (defined S390)
2057
static Elf32_Half running_arch_code=EM_S390;
2058
#elif (defined ALPHA)
2059
static Elf32_Half running_arch_code=EM_ALPHA;
2060
#elif (defined MIPSEL)
2061
static Elf32_Half running_arch_code=EM_MIPS_RS3_LE;
2062
#elif (defined PARISC)
2063
static Elf32_Half running_arch_code=EM_PARISC;
2064
#elif (defined MIPS)
2065
static Elf32_Half running_arch_code=EM_MIPS;
2066
#elif (defined M68K)
2067
static Elf32_Half running_arch_code=EM_68K;
2068
#elif (defined AARCH64)
2069
static Elf32_Half running_arch_code=EM_AARCH64;
2070
#else
2071
#error Method os::dll_load requires that one of following is defined:\
2072
IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K, AARCH64
2073
#endif
2074
2075
// Identify compatability class for VM's architecture and library's architecture
2076
// Obtain string descriptions for architectures
2077
2078
arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
2079
int running_arch_index=-1;
2080
2081
for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
2082
if (running_arch_code == arch_array[i].code) {
2083
running_arch_index = i;
2084
}
2085
if (lib_arch.code == arch_array[i].code) {
2086
lib_arch.compat_class = arch_array[i].compat_class;
2087
lib_arch.name = arch_array[i].name;
2088
}
2089
}
2090
2091
assert(running_arch_index != -1,
2092
"Didn't find running architecture code (running_arch_code) in arch_array");
2093
if (running_arch_index == -1) {
2094
// Even though running architecture detection failed
2095
// we may still continue with reporting dlerror() message
2096
return NULL;
2097
}
2098
2099
if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
2100
::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
2101
return NULL;
2102
}
2103
2104
#ifndef S390
2105
if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
2106
::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
2107
return NULL;
2108
}
2109
#endif // !S390
2110
2111
if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
2112
if ( lib_arch.name!=NULL ) {
2113
::snprintf(diag_msg_buf, diag_msg_max_length-1,
2114
" (Possible cause: can't load %s-bit .so on a %s-bit platform)",
2115
lib_arch.name, arch_array[running_arch_index].name);
2116
} else {
2117
::snprintf(diag_msg_buf, diag_msg_max_length-1,
2118
" (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
2119
lib_arch.code,
2120
arch_array[running_arch_index].name);
2121
}
2122
}
2123
2124
return NULL;
2125
}
2126
2127
void * os::Linux::dlopen_helper(const char *filename, char *ebuf, int ebuflen) {
2128
void * result = ::dlopen(filename, RTLD_LAZY);
2129
if (result == NULL) {
2130
::strncpy(ebuf, ::dlerror(), ebuflen - 1);
2131
ebuf[ebuflen-1] = '\0';
2132
}
2133
return result;
2134
}
2135
2136
void * os::Linux::dll_load_in_vmthread(const char *filename, char *ebuf, int ebuflen) {
2137
void * result = NULL;
2138
if (LoadExecStackDllInVMThread) {
2139
result = dlopen_helper(filename, ebuf, ebuflen);
2140
}
2141
2142
// Since 7019808, libjvm.so is linked with -noexecstack. If the VM loads a
2143
// library that requires an executable stack, or which does not have this
2144
// stack attribute set, dlopen changes the stack attribute to executable. The
2145
// read protection of the guard pages gets lost.
2146
//
2147
// Need to check _stack_is_executable again as multiple VM_LinuxDllLoad
2148
// may have been queued at the same time.
2149
2150
if (!_stack_is_executable) {
2151
JavaThread *jt = Threads::first();
2152
2153
while (jt) {
2154
if (!jt->stack_guard_zone_unused() && // Stack not yet fully initialized
2155
jt->stack_yellow_zone_enabled()) { // No pending stack overflow exceptions
2156
if (!os::guard_memory((char *) jt->stack_red_zone_base() - jt->stack_red_zone_size(),
2157
jt->stack_yellow_zone_size() + jt->stack_red_zone_size())) {
2158
warning("Attempt to reguard stack yellow zone failed.");
2159
}
2160
}
2161
jt = jt->next();
2162
}
2163
}
2164
2165
return result;
2166
}
2167
2168
/*
2169
* glibc-2.0 libdl is not MT safe. If you are building with any glibc,
2170
* chances are you might want to run the generated bits against glibc-2.0
2171
* libdl.so, so always use locking for any version of glibc.
2172
*/
2173
void* os::dll_lookup(void* handle, const char* name) {
2174
pthread_mutex_lock(&dl_mutex);
2175
void* res = dlsym(handle, name);
2176
pthread_mutex_unlock(&dl_mutex);
2177
return res;
2178
}
2179
2180
void* os::get_default_process_handle() {
2181
return (void*)::dlopen(NULL, RTLD_LAZY);
2182
}
2183
2184
static bool _print_ascii_file(const char* filename, outputStream* st) {
2185
int fd = ::open(filename, O_RDONLY);
2186
if (fd == -1) {
2187
return false;
2188
}
2189
2190
char buf[32];
2191
int bytes;
2192
while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
2193
st->print_raw(buf, bytes);
2194
}
2195
2196
::close(fd);
2197
2198
return true;
2199
}
2200
2201
void os::print_dll_info(outputStream *st) {
2202
st->print_cr("Dynamic libraries:");
2203
2204
char fname[32];
2205
pid_t pid = os::Linux::gettid();
2206
2207
jio_snprintf(fname, sizeof(fname), "/proc/%d/maps", pid);
2208
2209
if (!_print_ascii_file(fname, st)) {
2210
st->print("Can not get library information for pid = %d\n", pid);
2211
}
2212
}
2213
2214
int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
2215
FILE *procmapsFile = NULL;
2216
2217
// Open the procfs maps file for the current process
2218
if ((procmapsFile = fopen("/proc/self/maps", "r")) != NULL) {
2219
// Allocate PATH_MAX for file name plus a reasonable size for other fields.
2220
char line[PATH_MAX + 100];
2221
2222
// Read line by line from 'file'
2223
while (fgets(line, sizeof(line), procmapsFile) != NULL) {
2224
u8 base, top, offset, inode;
2225
char permissions[5];
2226
char device[6];
2227
char name[PATH_MAX + 1];
2228
2229
// Parse fields from line
2230
sscanf(line, UINT64_FORMAT_X "-" UINT64_FORMAT_X " %4s " UINT64_FORMAT_X " %7s " INT64_FORMAT " %s",
2231
&base, &top, permissions, &offset, device, &inode, name);
2232
2233
// Filter by device id '00:00' so that we only get file system mapped files.
2234
if (strcmp(device, "00:00") != 0) {
2235
2236
// Call callback with the fields of interest
2237
if(callback(name, (address)base, (address)top, param)) {
2238
// Oops abort, callback aborted
2239
fclose(procmapsFile);
2240
return 1;
2241
}
2242
}
2243
}
2244
fclose(procmapsFile);
2245
}
2246
return 0;
2247
}
2248
2249
void os::print_os_info_brief(outputStream* st) {
2250
os::Linux::print_distro_info(st);
2251
2252
os::Posix::print_uname_info(st);
2253
2254
os::Linux::print_libversion_info(st);
2255
2256
}
2257
2258
void os::print_os_info(outputStream* st) {
2259
st->print("OS:");
2260
2261
os::Linux::print_distro_info(st);
2262
2263
os::Posix::print_uname_info(st);
2264
2265
// Print warning if unsafe chroot environment detected
2266
if (unsafe_chroot_detected) {
2267
st->print("WARNING!! ");
2268
st->print_cr("%s", unstable_chroot_error);
2269
}
2270
2271
os::Linux::print_libversion_info(st);
2272
2273
os::Posix::print_rlimit_info(st);
2274
2275
os::Posix::print_load_average(st);
2276
2277
os::Linux::print_full_memory_info(st);
2278
2279
os::Linux::print_container_info(st);
2280
}
2281
2282
// Try to identify popular distros.
2283
// Most Linux distributions have a /etc/XXX-release file, which contains
2284
// the OS version string. Newer Linux distributions have a /etc/lsb-release
2285
// file that also contains the OS version string. Some have more than one
2286
// /etc/XXX-release file (e.g. Mandrake has both /etc/mandrake-release and
2287
// /etc/redhat-release.), so the order is important.
2288
// Any Linux that is based on Redhat (i.e. Oracle, Mandrake, Sun JDS...) have
2289
// their own specific XXX-release file as well as a redhat-release file.
2290
// Because of this the XXX-release file needs to be searched for before the
2291
// redhat-release file.
2292
// Since Red Hat has a lsb-release file that is not very descriptive the
2293
// search for redhat-release needs to be before lsb-release.
2294
// Since the lsb-release file is the new standard it needs to be searched
2295
// before the older style release files.
2296
// Searching system-release (Red Hat) and os-release (other Linuxes) are a
2297
// next to last resort. The os-release file is a new standard that contains
2298
// distribution information and the system-release file seems to be an old
2299
// standard that has been replaced by the lsb-release and os-release files.
2300
// Searching for the debian_version file is the last resort. It contains
2301
// an informative string like "6.0.6" or "wheezy/sid". Because of this
2302
// "Debian " is printed before the contents of the debian_version file.
2303
void os::Linux::print_distro_info(outputStream* st) {
2304
if (!_print_ascii_file("/etc/oracle-release", st) &&
2305
!_print_ascii_file("/etc/mandriva-release", st) &&
2306
!_print_ascii_file("/etc/mandrake-release", st) &&
2307
!_print_ascii_file("/etc/sun-release", st) &&
2308
!_print_ascii_file("/etc/redhat-release", st) &&
2309
!_print_ascii_file("/etc/lsb-release", st) &&
2310
!_print_ascii_file("/etc/SuSE-release", st) &&
2311
!_print_ascii_file("/etc/turbolinux-release", st) &&
2312
!_print_ascii_file("/etc/gentoo-release", st) &&
2313
!_print_ascii_file("/etc/ltib-release", st) &&
2314
!_print_ascii_file("/etc/angstrom-version", st) &&
2315
!_print_ascii_file("/etc/system-release", st) &&
2316
!_print_ascii_file("/etc/os-release", st)) {
2317
2318
if (file_exists("/etc/debian_version")) {
2319
st->print("Debian ");
2320
_print_ascii_file("/etc/debian_version", st);
2321
} else {
2322
st->print("Linux");
2323
}
2324
}
2325
st->cr();
2326
}
2327
2328
void os::Linux::print_libversion_info(outputStream* st) {
2329
// libc, pthread
2330
st->print("libc:");
2331
st->print("%s ", os::Linux::glibc_version());
2332
st->print("%s ", os::Linux::libpthread_version());
2333
if (os::Linux::is_LinuxThreads()) {
2334
st->print("(%s stack)", os::Linux::is_floating_stack() ? "floating" : "fixed");
2335
}
2336
st->cr();
2337
}
2338
2339
void os::Linux::print_full_memory_info(outputStream* st) {
2340
st->print("\n/proc/meminfo:\n");
2341
_print_ascii_file("/proc/meminfo", st);
2342
st->cr();
2343
}
2344
2345
void os::Linux::print_container_info(outputStream* st) {
2346
if (!OSContainer::is_containerized()) {
2347
return;
2348
}
2349
2350
st->print("container (cgroup) information:\n");
2351
2352
const char *p_ct = OSContainer::container_type();
2353
st->print("container_type: %s\n", p_ct != NULL ? p_ct : "failed");
2354
2355
char *p = OSContainer::cpu_cpuset_cpus();
2356
st->print("cpu_cpuset_cpus: %s\n", p != NULL ? p : "failed");
2357
free(p);
2358
2359
p = OSContainer::cpu_cpuset_memory_nodes();
2360
st->print("cpu_memory_nodes: %s\n", p != NULL ? p : "failed");
2361
free(p);
2362
2363
int i = OSContainer::active_processor_count();
2364
if (i > 0) {
2365
st->print("active_processor_count: %d\n", i);
2366
} else {
2367
st->print("active_processor_count: failed\n");
2368
}
2369
2370
i = OSContainer::cpu_quota();
2371
st->print("cpu_quota: %d\n", i);
2372
2373
i = OSContainer::cpu_period();
2374
st->print("cpu_period: %d\n", i);
2375
2376
i = OSContainer::cpu_shares();
2377
st->print("cpu_shares: %d\n", i);
2378
2379
jlong j = OSContainer::memory_limit_in_bytes();
2380
st->print("memory_limit_in_bytes: " JLONG_FORMAT "\n", j);
2381
2382
j = OSContainer::memory_and_swap_limit_in_bytes();
2383
st->print("memory_and_swap_limit_in_bytes: " JLONG_FORMAT "\n", j);
2384
2385
j = OSContainer::memory_soft_limit_in_bytes();
2386
st->print("memory_soft_limit_in_bytes: " JLONG_FORMAT "\n", j);
2387
2388
j = OSContainer::OSContainer::memory_usage_in_bytes();
2389
st->print("memory_usage_in_bytes: " JLONG_FORMAT "\n", j);
2390
2391
j = OSContainer::OSContainer::memory_max_usage_in_bytes();
2392
st->print("memory_max_usage_in_bytes: " JLONG_FORMAT "\n", j);
2393
st->cr();
2394
}
2395
2396
void os::print_memory_info(outputStream* st) {
2397
2398
st->print("Memory:");
2399
st->print(" %dk page", os::vm_page_size()>>10);
2400
2401
// values in struct sysinfo are "unsigned long"
2402
struct sysinfo si;
2403
sysinfo(&si);
2404
2405
st->print(", physical " UINT64_FORMAT "k",
2406
os::physical_memory() >> 10);
2407
st->print("(" UINT64_FORMAT "k free)",
2408
os::available_memory() >> 10);
2409
st->print(", swap " UINT64_FORMAT "k",
2410
((jlong)si.totalswap * si.mem_unit) >> 10);
2411
st->print("(" UINT64_FORMAT "k free)",
2412
((jlong)si.freeswap * si.mem_unit) >> 10);
2413
st->cr();
2414
}
2415
2416
void os::pd_print_cpu_info(outputStream* st) {
2417
st->print("\n/proc/cpuinfo:\n");
2418
if (!_print_ascii_file("/proc/cpuinfo", st)) {
2419
st->print(" <Not Available>");
2420
}
2421
st->cr();
2422
}
2423
2424
void os::print_siginfo(outputStream* st, void* siginfo) {
2425
const siginfo_t* si = (const siginfo_t*)siginfo;
2426
2427
os::Posix::print_siginfo_brief(st, si);
2428
#if INCLUDE_CDS
2429
if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2430
UseSharedSpaces) {
2431
FileMapInfo* mapinfo = FileMapInfo::current_info();
2432
if (mapinfo->is_in_shared_space(si->si_addr)) {
2433
st->print("\n\nError accessing class data sharing archive." \
2434
" Mapped file inaccessible during execution, " \
2435
" possible disk/network problem.");
2436
}
2437
}
2438
#endif
2439
st->cr();
2440
}
2441
2442
2443
static void print_signal_handler(outputStream* st, int sig,
2444
char* buf, size_t buflen);
2445
2446
void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2447
st->print_cr("Signal Handlers:");
2448
print_signal_handler(st, SIGSEGV, buf, buflen);
2449
print_signal_handler(st, SIGBUS , buf, buflen);
2450
print_signal_handler(st, SIGFPE , buf, buflen);
2451
print_signal_handler(st, SIGPIPE, buf, buflen);
2452
print_signal_handler(st, SIGXFSZ, buf, buflen);
2453
print_signal_handler(st, SIGILL , buf, buflen);
2454
print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2455
print_signal_handler(st, SR_signum, buf, buflen);
2456
print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
2457
print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2458
print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
2459
print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2460
#if defined(PPC64)
2461
print_signal_handler(st, SIGTRAP, buf, buflen);
2462
#endif
2463
}
2464
2465
static char saved_jvm_path[MAXPATHLEN] = {0};
2466
2467
// Find the full path to the current module, libjvm.so
2468
void os::jvm_path(char *buf, jint buflen) {
2469
// Error checking.
2470
if (buflen < MAXPATHLEN) {
2471
assert(false, "must use a large-enough buffer");
2472
buf[0] = '\0';
2473
return;
2474
}
2475
// Lazy resolve the path to current module.
2476
if (saved_jvm_path[0] != 0) {
2477
strcpy(buf, saved_jvm_path);
2478
return;
2479
}
2480
2481
char dli_fname[MAXPATHLEN];
2482
bool ret = dll_address_to_library_name(
2483
CAST_FROM_FN_PTR(address, os::jvm_path),
2484
dli_fname, sizeof(dli_fname), NULL);
2485
assert(ret, "cannot locate libjvm");
2486
#ifdef __ANDROID__
2487
if (dli_fname[0] == '\0') {
2488
return;
2489
}
2490
2491
if (strchr(dli_fname, '/') == NULL) {
2492
bool ok = read_so_path_from_maps(dli_fname, buf, buflen);
2493
assert(ok, "unable to turn relative libjvm.so path into absolute");
2494
return;
2495
}
2496
2497
snprintf(buf, buflen, /* "%s/lib/%s/server/%s", java_home_var, cpu_arch, */ "%s", dli_fname);
2498
#else // !__ANDROID__
2499
char *rp = NULL;
2500
if (ret && dli_fname[0] != '\0') {
2501
rp = realpath(dli_fname, buf);
2502
}
2503
if (rp == NULL)
2504
return;
2505
2506
if (Arguments::created_by_gamma_launcher()) {
2507
// Support for the gamma launcher. Typical value for buf is
2508
// "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at
2509
// the right place in the string, then assume we are installed in a JDK and
2510
// we're done. Otherwise, check for a JAVA_HOME environment variable and fix
2511
// up the path so it looks like libjvm.so is installed there (append a
2512
// fake suffix hotspot/libjvm.so).
2513
const char *p = buf + strlen(buf) - 1;
2514
for (int count = 0; p > buf && count < 5; ++count) {
2515
for (--p; p > buf && *p != '/'; --p)
2516
/* empty */ ;
2517
}
2518
2519
if (strncmp(p, "/jre/lib/", 9) != 0) {
2520
// Look for JAVA_HOME in the environment.
2521
char* java_home_var = ::getenv("JAVA_HOME");
2522
if (java_home_var != NULL && java_home_var[0] != 0) {
2523
char* jrelib_p;
2524
int len;
2525
2526
// Check the current module name "libjvm.so".
2527
p = strrchr(buf, '/');
2528
assert(strstr(p, "/libjvm") == p, "invalid library name");
2529
2530
rp = realpath(java_home_var, buf);
2531
if (rp == NULL)
2532
return;
2533
2534
// determine if this is a legacy image or modules image
2535
// modules image doesn't have "jre" subdirectory
2536
len = strlen(buf);
2537
assert(len < buflen, "Ran out of buffer room");
2538
jrelib_p = buf + len;
2539
snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2540
if (0 != access(buf, F_OK)) {
2541
snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2542
}
2543
2544
if (0 == access(buf, F_OK)) {
2545
// Use current module name "libjvm.so"
2546
len = strlen(buf);
2547
snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2548
} else {
2549
// Go back to path of .so
2550
rp = realpath(dli_fname, buf);
2551
if (rp == NULL)
2552
return;
2553
}
2554
}
2555
}
2556
}
2557
#endif // __ANDROID__
2558
strncpy(saved_jvm_path, buf, MAXPATHLEN);
2559
}
2560
2561
void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2562
// no prefix required, not even "_"
2563
}
2564
2565
void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2566
// no suffix required
2567
}
2568
2569
////////////////////////////////////////////////////////////////////////////////
2570
// sun.misc.Signal support
2571
2572
static volatile jint sigint_count = 0;
2573
2574
static void
2575
UserHandler(int sig, void *siginfo, void *context) {
2576
// 4511530 - sem_post is serialized and handled by the manager thread. When
2577
// the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
2578
// don't want to flood the manager thread with sem_post requests.
2579
if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
2580
return;
2581
2582
// Ctrl-C is pressed during error reporting, likely because the error
2583
// handler fails to abort. Let VM die immediately.
2584
if (sig == SIGINT && is_error_reported()) {
2585
os::die();
2586
}
2587
2588
os::signal_notify(sig);
2589
}
2590
2591
void* os::user_handler() {
2592
return CAST_FROM_FN_PTR(void*, UserHandler);
2593
}
2594
2595
class Semaphore : public StackObj {
2596
public:
2597
Semaphore();
2598
~Semaphore();
2599
void signal();
2600
void wait();
2601
bool trywait();
2602
bool timedwait(unsigned int sec, int nsec);
2603
private:
2604
sem_t _semaphore;
2605
};
2606
2607
Semaphore::Semaphore() {
2608
sem_init(&_semaphore, 0, 0);
2609
}
2610
2611
Semaphore::~Semaphore() {
2612
sem_destroy(&_semaphore);
2613
}
2614
2615
void Semaphore::signal() {
2616
sem_post(&_semaphore);
2617
}
2618
2619
void Semaphore::wait() {
2620
sem_wait(&_semaphore);
2621
}
2622
2623
bool Semaphore::trywait() {
2624
return sem_trywait(&_semaphore) == 0;
2625
}
2626
2627
bool Semaphore::timedwait(unsigned int sec, int nsec) {
2628
2629
struct timespec ts;
2630
// Semaphore's are always associated with CLOCK_REALTIME
2631
os::Linux::clock_gettime(CLOCK_REALTIME, &ts);
2632
// see unpackTime for discussion on overflow checking
2633
if (sec >= MAX_SECS) {
2634
ts.tv_sec += MAX_SECS;
2635
ts.tv_nsec = 0;
2636
} else {
2637
ts.tv_sec += sec;
2638
ts.tv_nsec += nsec;
2639
if (ts.tv_nsec >= NANOSECS_PER_SEC) {
2640
ts.tv_nsec -= NANOSECS_PER_SEC;
2641
++ts.tv_sec; // note: this must be <= max_secs
2642
}
2643
}
2644
2645
while (1) {
2646
int result = sem_timedwait(&_semaphore, &ts);
2647
if (result == 0) {
2648
return true;
2649
} else if (errno == EINTR) {
2650
continue;
2651
} else if (errno == ETIMEDOUT) {
2652
return false;
2653
} else {
2654
return false;
2655
}
2656
}
2657
}
2658
2659
extern "C" {
2660
typedef void (*sa_handler_t)(int);
2661
typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2662
}
2663
2664
void* os::signal(int signal_number, void* handler) {
2665
struct sigaction sigAct, oldSigAct;
2666
2667
sigfillset(&(sigAct.sa_mask));
2668
sigAct.sa_flags = SA_RESTART|SA_SIGINFO;
2669
sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2670
2671
if (sigaction(signal_number, &sigAct, &oldSigAct)) {
2672
// -1 means registration failed
2673
return (void *)-1;
2674
}
2675
2676
return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2677
}
2678
2679
void os::signal_raise(int signal_number) {
2680
::raise(signal_number);
2681
}
2682
2683
/*
2684
* The following code is moved from os.cpp for making this
2685
* code platform specific, which it is by its very nature.
2686
*/
2687
2688
// Will be modified when max signal is changed to be dynamic
2689
int os::sigexitnum_pd() {
2690
return NSIG;
2691
}
2692
2693
// a counter for each possible signal value
2694
static volatile jint pending_signals[NSIG+1] = { 0 };
2695
2696
// Linux(POSIX) specific hand shaking semaphore.
2697
static sem_t sig_sem;
2698
static Semaphore sr_semaphore;
2699
2700
void os::signal_init_pd() {
2701
// Initialize signal structures
2702
::memset((void*)pending_signals, 0, sizeof(pending_signals));
2703
2704
// Initialize signal semaphore
2705
::sem_init(&sig_sem, 0, 0);
2706
}
2707
2708
void os::signal_notify(int sig) {
2709
Atomic::inc(&pending_signals[sig]);
2710
::sem_post(&sig_sem);
2711
}
2712
2713
static int check_pending_signals(bool wait) {
2714
Atomic::store(0, &sigint_count);
2715
for (;;) {
2716
for (int i = 0; i < NSIG + 1; i++) {
2717
jint n = pending_signals[i];
2718
if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2719
return i;
2720
}
2721
}
2722
if (!wait) {
2723
return -1;
2724
}
2725
JavaThread *thread = JavaThread::current();
2726
ThreadBlockInVM tbivm(thread);
2727
2728
bool threadIsSuspended;
2729
do {
2730
thread->set_suspend_equivalent();
2731
// cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2732
::sem_wait(&sig_sem);
2733
2734
// were we externally suspended while we were waiting?
2735
threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2736
if (threadIsSuspended) {
2737
//
2738
// The semaphore has been incremented, but while we were waiting
2739
// another thread suspended us. We don't want to continue running
2740
// while suspended because that would surprise the thread that
2741
// suspended us.
2742
//
2743
::sem_post(&sig_sem);
2744
2745
thread->java_suspend_self();
2746
}
2747
} while (threadIsSuspended);
2748
}
2749
}
2750
2751
int os::signal_lookup() {
2752
return check_pending_signals(false);
2753
}
2754
2755
int os::signal_wait() {
2756
return check_pending_signals(true);
2757
}
2758
2759
////////////////////////////////////////////////////////////////////////////////
2760
// Virtual Memory
2761
2762
int os::vm_page_size() {
2763
// Seems redundant as all get out
2764
assert(os::Linux::page_size() != -1, "must call os::init");
2765
return os::Linux::page_size();
2766
}
2767
2768
// Solaris allocates memory by pages.
2769
int os::vm_allocation_granularity() {
2770
assert(os::Linux::page_size() != -1, "must call os::init");
2771
return os::Linux::page_size();
2772
}
2773
2774
// Rationale behind this function:
2775
// current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable
2776
// mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get
2777
// samples for JITted code. Here we create private executable mapping over the code cache
2778
// and then we can use standard (well, almost, as mapping can change) way to provide
2779
// info for the reporting script by storing timestamp and location of symbol
2780
void linux_wrap_code(char* base, size_t size) {
2781
static volatile jint cnt = 0;
2782
2783
if (!UseOprofile) {
2784
return;
2785
}
2786
2787
char buf[PATH_MAX+1];
2788
int num = Atomic::add(1, &cnt);
2789
2790
snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
2791
os::get_temp_directory(), os::current_process_id(), num);
2792
unlink(buf);
2793
2794
int fd = ::open(buf, O_CREAT | O_RDWR, S_IRWXU);
2795
2796
if (fd != -1) {
2797
off_t rv = ::lseek(fd, size-2, SEEK_SET);
2798
if (rv != (off_t)-1) {
2799
if (::write(fd, "", 1) == 1) {
2800
mmap(base, size,
2801
PROT_READ|PROT_WRITE|PROT_EXEC,
2802
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0);
2803
}
2804
}
2805
::close(fd);
2806
unlink(buf);
2807
}
2808
}
2809
2810
static bool recoverable_mmap_error(int err) {
2811
// See if the error is one we can let the caller handle. This
2812
// list of errno values comes from JBS-6843484. I can't find a
2813
// Linux man page that documents this specific set of errno
2814
// values so while this list currently matches Solaris, it may
2815
// change as we gain experience with this failure mode.
2816
switch (err) {
2817
case EBADF:
2818
case EINVAL:
2819
case ENOTSUP:
2820
// let the caller deal with these errors
2821
return true;
2822
2823
default:
2824
// Any remaining errors on this OS can cause our reserved mapping
2825
// to be lost. That can cause confusion where different data
2826
// structures think they have the same memory mapped. The worst
2827
// scenario is if both the VM and a library think they have the
2828
// same memory mapped.
2829
return false;
2830
}
2831
}
2832
2833
static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2834
int err) {
2835
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2836
", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2837
strerror(err), err);
2838
}
2839
2840
static void warn_fail_commit_memory(char* addr, size_t size,
2841
size_t alignment_hint, bool exec,
2842
int err) {
2843
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2844
", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, size,
2845
alignment_hint, exec, strerror(err), err);
2846
}
2847
2848
// NOTE: Linux kernel does not really reserve the pages for us.
2849
// All it does is to check if there are enough free pages
2850
// left at the time of mmap(). This could be a potential
2851
// problem.
2852
int os::Linux::commit_memory_impl(char* addr, size_t size, bool exec) {
2853
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2854
uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
2855
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
2856
if (res != (uintptr_t) MAP_FAILED) {
2857
if (UseNUMAInterleaving) {
2858
numa_make_global(addr, size);
2859
}
2860
return 0;
2861
}
2862
2863
int err = errno; // save errno from mmap() call above
2864
2865
if (!recoverable_mmap_error(err)) {
2866
warn_fail_commit_memory(addr, size, exec, err);
2867
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "committing reserved memory.");
2868
}
2869
2870
return err;
2871
}
2872
2873
bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2874
return os::Linux::commit_memory_impl(addr, size, exec) == 0;
2875
}
2876
2877
void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2878
const char* mesg) {
2879
assert(mesg != NULL, "mesg must be specified");
2880
int err = os::Linux::commit_memory_impl(addr, size, exec);
2881
if (err != 0) {
2882
// the caller wants all commit errors to exit with the specified mesg:
2883
warn_fail_commit_memory(addr, size, exec, err);
2884
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
2885
}
2886
}
2887
2888
// Define MAP_HUGETLB here so we can build HotSpot on old systems.
2889
#ifndef MAP_HUGETLB
2890
#define MAP_HUGETLB 0x40000
2891
#endif
2892
2893
// Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
2894
#ifndef MADV_HUGEPAGE
2895
#define MADV_HUGEPAGE 14
2896
#endif
2897
2898
int os::Linux::commit_memory_impl(char* addr, size_t size,
2899
size_t alignment_hint, bool exec) {
2900
int err = os::Linux::commit_memory_impl(addr, size, exec);
2901
if (err == 0) {
2902
realign_memory(addr, size, alignment_hint);
2903
}
2904
return err;
2905
}
2906
2907
bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
2908
bool exec) {
2909
return os::Linux::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
2910
}
2911
2912
void os::pd_commit_memory_or_exit(char* addr, size_t size,
2913
size_t alignment_hint, bool exec,
2914
const char* mesg) {
2915
assert(mesg != NULL, "mesg must be specified");
2916
int err = os::Linux::commit_memory_impl(addr, size, alignment_hint, exec);
2917
if (err != 0) {
2918
// the caller wants all commit errors to exit with the specified mesg:
2919
warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
2920
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
2921
}
2922
}
2923
2924
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2925
if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) {
2926
// We don't check the return value: madvise(MADV_HUGEPAGE) may not
2927
// be supported or the memory may already be backed by huge pages.
2928
::madvise(addr, bytes, MADV_HUGEPAGE);
2929
}
2930
}
2931
2932
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2933
// This method works by doing an mmap over an existing mmaping and effectively discarding
2934
// the existing pages. However it won't work for SHM-based large pages that cannot be
2935
// uncommitted at all. We don't do anything in this case to avoid creating a segment with
2936
// small pages on top of the SHM segment. This method always works for small pages, so we
2937
// allow that in any case.
2938
if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) {
2939
commit_memory(addr, bytes, alignment_hint, !ExecMem);
2940
}
2941
}
2942
2943
void os::numa_make_global(char *addr, size_t bytes) {
2944
Linux::numa_interleave_memory(addr, bytes);
2945
}
2946
2947
// Define for numa_set_bind_policy(int). Setting the argument to 0 will set the
2948
// bind policy to MPOL_PREFERRED for the current thread.
2949
#define USE_MPOL_PREFERRED 0
2950
2951
void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2952
// To make NUMA and large pages more robust when both enabled, we need to ease
2953
// the requirements on where the memory should be allocated. MPOL_BIND is the
2954
// default policy and it will force memory to be allocated on the specified
2955
// node. Changing this to MPOL_PREFERRED will prefer to allocate the memory on
2956
// the specified node, but will not force it. Using this policy will prevent
2957
// getting SIGBUS when trying to allocate large pages on NUMA nodes with no
2958
// free large pages.
2959
Linux::numa_set_bind_policy(USE_MPOL_PREFERRED);
2960
Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
2961
}
2962
2963
bool os::numa_topology_changed() { return false; }
2964
2965
size_t os::numa_get_groups_num() {
2966
// Return just the number of nodes in which it's possible to allocate memory
2967
// (in numa terminology, configured nodes).
2968
return Linux::numa_num_configured_nodes();
2969
}
2970
2971
int os::numa_get_group_id() {
2972
int cpu_id = Linux::sched_getcpu();
2973
if (cpu_id != -1) {
2974
int lgrp_id = Linux::get_node_by_cpu(cpu_id);
2975
if (lgrp_id != -1) {
2976
return lgrp_id;
2977
}
2978
}
2979
return 0;
2980
}
2981
2982
int os::Linux::get_existing_num_nodes() {
2983
size_t node;
2984
size_t highest_node_number = Linux::numa_max_node();
2985
int num_nodes = 0;
2986
2987
// Get the total number of nodes in the system including nodes without memory.
2988
for (node = 0; node <= highest_node_number; node++) {
2989
if (isnode_in_existing_nodes(node)) {
2990
num_nodes++;
2991
}
2992
}
2993
return num_nodes;
2994
}
2995
2996
size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2997
size_t highest_node_number = Linux::numa_max_node();
2998
size_t i = 0;
2999
3000
// Map all node ids in which is possible to allocate memory. Also nodes are
3001
// not always consecutively available, i.e. available from 0 to the highest
3002
// node number.
3003
for (size_t node = 0; node <= highest_node_number; node++) {
3004
if (Linux::isnode_in_configured_nodes(node)) {
3005
ids[i++] = node;
3006
}
3007
}
3008
return i;
3009
}
3010
3011
bool os::get_page_info(char *start, page_info* info) {
3012
return false;
3013
}
3014
3015
char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
3016
return end;
3017
}
3018
3019
3020
int os::Linux::sched_getcpu_syscall(void) {
3021
unsigned int cpu = 0;
3022
int retval = -1;
3023
3024
#if defined(AMD64)
3025
// Unfortunately we have to bring all these macros here from vsyscall.h
3026
// to be able to compile on old linuxes.
3027
# define __NR_vgetcpu 2
3028
# define VSYSCALL_START (-10UL << 20)
3029
# define VSYSCALL_SIZE 1024
3030
# define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
3031
typedef long (*vgetcpu_t)(unsigned int *cpu, unsigned int *node, unsigned long *tcache);
3032
vgetcpu_t vgetcpu = (vgetcpu_t)VSYSCALL_ADDR(__NR_vgetcpu);
3033
retval = vgetcpu(&cpu, NULL, NULL);
3034
#elif defined(IA32) || defined(AARCH64)
3035
# ifndef SYS_getcpu
3036
# define SYS_getcpu AARCH64_ONLY(168) IA32_ONLY(318)
3037
# endif
3038
retval = syscall(SYS_getcpu, &cpu, NULL, NULL);
3039
#endif
3040
3041
return (retval == -1) ? retval : cpu;
3042
}
3043
3044
// Something to do with the numa-aware allocator needs these symbols
3045
extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { }
3046
extern "C" JNIEXPORT void numa_error(char *where) { }
3047
extern "C" JNIEXPORT int fork1() { return fork(); }
3048
3049
// Handle request to load libnuma symbol version 1.1 (API v1). If it fails
3050
// load symbol from base version instead.
3051
void* os::Linux::libnuma_dlsym(void* handle, const char *name) {
3052
#ifndef __ANDROID__
3053
void *f = dlvsym(handle, name, "libnuma_1.1");
3054
#else
3055
void *f = NULL;
3056
#endif
3057
if (f == NULL) {
3058
f = dlsym(handle, name);
3059
}
3060
return f;
3061
}
3062
3063
// Handle request to load libnuma symbol version 1.2 (API v2) only.
3064
// Return NULL if the symbol is not defined in this particular version.
3065
void* os::Linux::libnuma_v2_dlsym(void* handle, const char* name) {
3066
#ifndef __ANDROID__
3067
return dlvsym(handle, name, "libnuma_1.2");
3068
#else // __ANDROID__
3069
return NULL;
3070
#endif // !__ANDROID__
3071
}
3072
3073
bool os::Linux::libnuma_init() {
3074
// sched_getcpu() should be in libc.
3075
set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
3076
dlsym(RTLD_DEFAULT, "sched_getcpu")));
3077
3078
// If it's not, try a direct syscall.
3079
if (sched_getcpu() == -1)
3080
set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t, (void*)&sched_getcpu_syscall));
3081
3082
if (sched_getcpu() != -1) { // Does it work?
3083
void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
3084
3085
if (handle == NULL) {
3086
handle = dlopen("libnuma.so", RTLD_LAZY);
3087
}
3088
3089
if (handle != NULL) {
3090
set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t,
3091
libnuma_dlsym(handle, "numa_node_to_cpus")));
3092
set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t,
3093
libnuma_dlsym(handle, "numa_max_node")));
3094
set_numa_num_configured_nodes(CAST_TO_FN_PTR(numa_num_configured_nodes_func_t,
3095
libnuma_dlsym(handle, "numa_num_configured_nodes")));
3096
set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,
3097
libnuma_dlsym(handle, "numa_available")));
3098
set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
3099
libnuma_dlsym(handle, "numa_tonode_memory")));
3100
set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
3101
libnuma_dlsym(handle, "numa_interleave_memory")));
3102
set_numa_interleave_memory_v2(CAST_TO_FN_PTR(numa_interleave_memory_v2_func_t,
3103
libnuma_v2_dlsym(handle, "numa_interleave_memory")));
3104
set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
3105
libnuma_dlsym(handle, "numa_set_bind_policy")));
3106
set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t,
3107
libnuma_dlsym(handle, "numa_bitmask_isbitset")));
3108
set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t,
3109
libnuma_dlsym(handle, "numa_distance")));
3110
3111
if (numa_available() != -1) {
3112
set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
3113
set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
3114
set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr"));
3115
// Create an index -> node mapping, since nodes are not always consecutive
3116
_nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
3117
rebuild_nindex_to_node_map();
3118
// Create a cpu -> node mapping
3119
_cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
3120
rebuild_cpu_to_node_map();
3121
return true;
3122
}
3123
}
3124
}
3125
return false;
3126
}
3127
3128
void os::Linux::rebuild_nindex_to_node_map() {
3129
int highest_node_number = Linux::numa_max_node();
3130
3131
nindex_to_node()->clear();
3132
for (int node = 0; node <= highest_node_number; node++) {
3133
if (Linux::isnode_in_existing_nodes(node)) {
3134
nindex_to_node()->append(node);
3135
}
3136
}
3137
}
3138
3139
// rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id.
3140
// The table is later used in get_node_by_cpu().
3141
void os::Linux::rebuild_cpu_to_node_map() {
3142
const size_t NCPUS = 32768; // Since the buffer size computation is very obscure
3143
// in libnuma (possible values are starting from 16,
3144
// and continuing up with every other power of 2, but less
3145
// than the maximum number of CPUs supported by kernel), and
3146
// is a subject to change (in libnuma version 2 the requirements
3147
// are more reasonable) we'll just hardcode the number they use
3148
// in the library.
3149
const size_t BitsPerCLong = sizeof(long) * CHAR_BIT;
3150
3151
size_t cpu_num = processor_count();
3152
size_t cpu_map_size = NCPUS / BitsPerCLong;
3153
size_t cpu_map_valid_size =
3154
MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size);
3155
3156
cpu_to_node()->clear();
3157
cpu_to_node()->at_grow(cpu_num - 1);
3158
3159
size_t node_num = get_existing_num_nodes();
3160
3161
int distance = 0;
3162
int closest_distance = INT_MAX;
3163
int closest_node = 0;
3164
unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size, mtInternal);
3165
for (size_t i = 0; i < node_num; i++) {
3166
// Check if node is configured (not a memory-less node). If it is not, find
3167
// the closest configured node.
3168
if (!isnode_in_configured_nodes(nindex_to_node()->at(i))) {
3169
closest_distance = INT_MAX;
3170
// Check distance from all remaining nodes in the system. Ignore distance
3171
// from itself and from another non-configured node.
3172
for (size_t m = 0; m < node_num; m++) {
3173
if (m != i && isnode_in_configured_nodes(nindex_to_node()->at(m))) {
3174
distance = numa_distance(nindex_to_node()->at(i), nindex_to_node()->at(m));
3175
// If a closest node is found, update. There is always at least one
3176
// configured node in the system so there is always at least one node
3177
// close.
3178
if (distance != 0 && distance < closest_distance) {
3179
closest_distance = distance;
3180
closest_node = nindex_to_node()->at(m);
3181
}
3182
}
3183
}
3184
} else {
3185
// Current node is already a configured node.
3186
closest_node = nindex_to_node()->at(i);
3187
}
3188
3189
// Get cpus from the original node and map them to the closest node. If node
3190
// is a configured node (not a memory-less node), then original node and
3191
// closest node are the same.
3192
if (numa_node_to_cpus(nindex_to_node()->at(i), cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) {
3193
for (size_t j = 0; j < cpu_map_valid_size; j++) {
3194
if (cpu_map[j] != 0) {
3195
for (size_t k = 0; k < BitsPerCLong; k++) {
3196
if (cpu_map[j] & (1UL << k)) {
3197
cpu_to_node()->at_put(j * BitsPerCLong + k, closest_node);
3198
}
3199
}
3200
}
3201
}
3202
}
3203
}
3204
FREE_C_HEAP_ARRAY(unsigned long, cpu_map, mtInternal);
3205
}
3206
3207
int os::Linux::get_node_by_cpu(int cpu_id) {
3208
if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {
3209
return cpu_to_node()->at(cpu_id);
3210
}
3211
return -1;
3212
}
3213
3214
GrowableArray<int>* os::Linux::_cpu_to_node;
3215
GrowableArray<int>* os::Linux::_nindex_to_node;
3216
os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu;
3217
os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus;
3218
os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
3219
os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_nodes;
3220
os::Linux::numa_available_func_t os::Linux::_numa_available;
3221
os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
3222
os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
3223
os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2;
3224
os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
3225
os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
3226
os::Linux::numa_distance_func_t os::Linux::_numa_distance;
3227
unsigned long* os::Linux::_numa_all_nodes;
3228
struct bitmask* os::Linux::_numa_all_nodes_ptr;
3229
struct bitmask* os::Linux::_numa_nodes_ptr;
3230
3231
bool os::pd_uncommit_memory(char* addr, size_t size) {
3232
uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
3233
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
3234
return res != (uintptr_t) MAP_FAILED;
3235
}
3236
3237
static
3238
address get_stack_commited_bottom(address bottom, size_t size) {
3239
address nbot = bottom;
3240
address ntop = bottom + size;
3241
3242
size_t page_sz = os::vm_page_size();
3243
unsigned pages = size / page_sz;
3244
3245
unsigned char vec[1];
3246
unsigned imin = 1, imax = pages + 1, imid;
3247
int mincore_return_value = 0;
3248
3249
assert(imin <= imax, "Unexpected page size");
3250
3251
while (imin < imax) {
3252
imid = (imax + imin) / 2;
3253
nbot = ntop - (imid * page_sz);
3254
3255
// Use a trick with mincore to check whether the page is mapped or not.
3256
// mincore sets vec to 1 if page resides in memory and to 0 if page
3257
// is swapped output but if page we are asking for is unmapped
3258
// it returns -1,ENOMEM
3259
mincore_return_value = mincore(nbot, page_sz, vec);
3260
3261
if (mincore_return_value == -1) {
3262
// Page is not mapped go up
3263
// to find first mapped page
3264
if (errno != EAGAIN) {
3265
assert(errno == ENOMEM, "Unexpected mincore errno");
3266
imax = imid;
3267
}
3268
} else {
3269
// Page is mapped go down
3270
// to find first not mapped page
3271
imin = imid + 1;
3272
}
3273
}
3274
3275
nbot = nbot + page_sz;
3276
3277
// Adjust stack bottom one page up if last checked page is not mapped
3278
if (mincore_return_value == -1) {
3279
nbot = nbot + page_sz;
3280
}
3281
3282
return nbot;
3283
}
3284
3285
3286
// Linux uses a growable mapping for the stack, and if the mapping for
3287
// the stack guard pages is not removed when we detach a thread the
3288
// stack cannot grow beyond the pages where the stack guard was
3289
// mapped. If at some point later in the process the stack expands to
3290
// that point, the Linux kernel cannot expand the stack any further
3291
// because the guard pages are in the way, and a segfault occurs.
3292
//
3293
// However, it's essential not to split the stack region by unmapping
3294
// a region (leaving a hole) that's already part of the stack mapping,
3295
// so if the stack mapping has already grown beyond the guard pages at
3296
// the time we create them, we have to truncate the stack mapping.
3297
// So, we need to know the extent of the stack mapping when
3298
// create_stack_guard_pages() is called.
3299
3300
// We only need this for stacks that are growable: at the time of
3301
// writing thread stacks don't use growable mappings (i.e. those
3302
// creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this
3303
// only applies to the main thread.
3304
3305
// If the (growable) stack mapping already extends beyond the point
3306
// where we're going to put our guard pages, truncate the mapping at
3307
// that point by munmap()ping it. This ensures that when we later
3308
// munmap() the guard pages we don't leave a hole in the stack
3309
// mapping. This only affects the main/primordial thread
3310
3311
bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3312
3313
if (os::is_primordial_thread()) {
3314
// As we manually grow stack up to bottom inside create_attached_thread(),
3315
// it's likely that os::Linux::initial_thread_stack_bottom is mapped and
3316
// we don't need to do anything special.
3317
// Check it first, before calling heavy function.
3318
uintptr_t stack_extent = (uintptr_t) os::Linux::initial_thread_stack_bottom();
3319
unsigned char vec[1];
3320
3321
if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) {
3322
// Fallback to slow path on all errors, including EAGAIN
3323
stack_extent = (uintptr_t) get_stack_commited_bottom(
3324
os::Linux::initial_thread_stack_bottom(),
3325
(size_t)addr - stack_extent);
3326
}
3327
3328
if (stack_extent < (uintptr_t)addr) {
3329
::munmap((void*)stack_extent, (uintptr_t)(addr - stack_extent));
3330
}
3331
}
3332
3333
return os::commit_memory(addr, size, !ExecMem);
3334
}
3335
3336
// If this is a growable mapping, remove the guard pages entirely by
3337
// munmap()ping them. If not, just call uncommit_memory(). This only
3338
// affects the main/primordial thread, but guard against future OS changes.
3339
// It's safe to always unmap guard pages for primordial thread because we
3340
// always place it right after end of the mapped region.
3341
3342
bool os::remove_stack_guard_pages(char* addr, size_t size) {
3343
uintptr_t stack_extent, stack_base;
3344
3345
if (os::is_primordial_thread()) {
3346
return ::munmap(addr, size) == 0;
3347
}
3348
3349
return os::uncommit_memory(addr, size);
3350
}
3351
3352
static address _highest_vm_reserved_address = NULL;
3353
3354
// If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
3355
// at 'requested_addr'. If there are existing memory mappings at the same
3356
// location, however, they will be overwritten. If 'fixed' is false,
3357
// 'requested_addr' is only treated as a hint, the return value may or
3358
// may not start from the requested address. Unlike Linux mmap(), this
3359
// function returns NULL to indicate failure.
3360
static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
3361
char * addr;
3362
int flags;
3363
3364
flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
3365
if (fixed) {
3366
assert((uintptr_t)requested_addr % os::Linux::page_size() == 0, "unaligned address");
3367
flags |= MAP_FIXED;
3368
}
3369
3370
// Map reserved/uncommitted pages PROT_NONE so we fail early if we
3371
// touch an uncommitted page. Otherwise, the read/write might
3372
// succeed if we have enough swap space to back the physical page.
3373
addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
3374
flags, -1, 0);
3375
3376
if (addr != MAP_FAILED) {
3377
// anon_mmap() should only get called during VM initialization,
3378
// don't need lock (actually we can skip locking even it can be called
3379
// from multiple threads, because _highest_vm_reserved_address is just a
3380
// hint about the upper limit of non-stack memory regions.)
3381
if ((address)addr + bytes > _highest_vm_reserved_address) {
3382
_highest_vm_reserved_address = (address)addr + bytes;
3383
}
3384
}
3385
3386
return addr == MAP_FAILED ? NULL : addr;
3387
}
3388
3389
// Allocate (using mmap, NO_RESERVE, with small pages) at either a given request address
3390
// (req_addr != NULL) or with a given alignment.
3391
// - bytes shall be a multiple of alignment.
3392
// - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
3393
// - alignment sets the alignment at which memory shall be allocated.
3394
// It must be a multiple of allocation granularity.
3395
// Returns address of memory or NULL. If req_addr was not NULL, will only return
3396
// req_addr or NULL.
3397
static char* anon_mmap_aligned(size_t bytes, size_t alignment, char* req_addr) {
3398
3399
size_t extra_size = bytes;
3400
if (req_addr == NULL && alignment > 0) {
3401
extra_size += alignment;
3402
}
3403
3404
char* start = (char*) ::mmap(req_addr, extra_size, PROT_NONE,
3405
MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
3406
-1, 0);
3407
if (start == MAP_FAILED) {
3408
start = NULL;
3409
} else {
3410
if (req_addr != NULL) {
3411
if (start != req_addr) {
3412
::munmap(start, extra_size);
3413
start = NULL;
3414
}
3415
} else {
3416
char* const start_aligned = (char*) align_ptr_up(start, alignment);
3417
char* const end_aligned = start_aligned + bytes;
3418
char* const end = start + extra_size;
3419
if (start_aligned > start) {
3420
::munmap(start, start_aligned - start);
3421
}
3422
if (end_aligned < end) {
3423
::munmap(end_aligned, end - end_aligned);
3424
}
3425
start = start_aligned;
3426
}
3427
}
3428
return start;
3429
}
3430
3431
// Don't update _highest_vm_reserved_address, because there might be memory
3432
// regions above addr + size. If so, releasing a memory region only creates
3433
// a hole in the address space, it doesn't help prevent heap-stack collision.
3434
//
3435
static int anon_munmap(char * addr, size_t size) {
3436
return ::munmap(addr, size) == 0;
3437
}
3438
3439
char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
3440
size_t alignment_hint) {
3441
return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
3442
}
3443
3444
bool os::pd_release_memory(char* addr, size_t size) {
3445
return anon_munmap(addr, size);
3446
}
3447
3448
static address highest_vm_reserved_address() {
3449
return _highest_vm_reserved_address;
3450
}
3451
3452
static bool linux_mprotect(char* addr, size_t size, int prot) {
3453
// Linux wants the mprotect address argument to be page aligned.
3454
char* bottom = (char*)align_size_down((intptr_t)addr, os::Linux::page_size());
3455
3456
// According to SUSv3, mprotect() should only be used with mappings
3457
// established by mmap(), and mmap() always maps whole pages. Unaligned
3458
// 'addr' likely indicates problem in the VM (e.g. trying to change
3459
// protection of malloc'ed or statically allocated memory). Check the
3460
// caller if you hit this assert.
3461
assert(addr == bottom, "sanity check");
3462
3463
size = align_size_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size());
3464
return ::mprotect(bottom, size, prot) == 0;
3465
}
3466
3467
// Set protections specified
3468
bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3469
bool is_committed) {
3470
unsigned int p = 0;
3471
switch (prot) {
3472
case MEM_PROT_NONE: p = PROT_NONE; break;
3473
case MEM_PROT_READ: p = PROT_READ; break;
3474
case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break;
3475
case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3476
default:
3477
ShouldNotReachHere();
3478
}
3479
// is_committed is unused.
3480
return linux_mprotect(addr, bytes, p);
3481
}
3482
3483
bool os::guard_memory(char* addr, size_t size) {
3484
return linux_mprotect(addr, size, PROT_NONE);
3485
}
3486
3487
bool os::unguard_memory(char* addr, size_t size) {
3488
return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
3489
}
3490
3491
bool os::Linux::transparent_huge_pages_sanity_check(bool warn, size_t page_size) {
3492
bool result = false;
3493
void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
3494
MAP_ANONYMOUS|MAP_PRIVATE,
3495
-1, 0);
3496
if (p != MAP_FAILED) {
3497
void *aligned_p = align_ptr_up(p, page_size);
3498
3499
result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0;
3500
3501
munmap(p, page_size * 2);
3502
}
3503
3504
if (warn && !result) {
3505
warning("TransparentHugePages is not supported by the operating system.");
3506
}
3507
3508
return result;
3509
}
3510
3511
bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
3512
bool result = false;
3513
void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE,
3514
MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
3515
-1, 0);
3516
3517
if (p != MAP_FAILED) {
3518
// We don't know if this really is a huge page or not.
3519
FILE *fp = fopen("/proc/self/maps", "r");
3520
if (fp) {
3521
while (!feof(fp)) {
3522
char chars[257];
3523
long x = 0;
3524
if (fgets(chars, sizeof(chars), fp)) {
3525
if (sscanf(chars, "%lx-%*x", &x) == 1
3526
&& x == (long)p) {
3527
if (strstr (chars, "hugepage")) {
3528
result = true;
3529
break;
3530
}
3531
}
3532
}
3533
}
3534
fclose(fp);
3535
}
3536
munmap(p, page_size);
3537
}
3538
3539
if (warn && !result) {
3540
warning("HugeTLBFS is not supported by the operating system.");
3541
}
3542
3543
return result;
3544
}
3545
3546
/*
3547
* Set the coredump_filter bits to include largepages in core dump (bit 6)
3548
*
3549
* From the coredump_filter documentation:
3550
*
3551
* - (bit 0) anonymous private memory
3552
* - (bit 1) anonymous shared memory
3553
* - (bit 2) file-backed private memory
3554
* - (bit 3) file-backed shared memory
3555
* - (bit 4) ELF header pages in file-backed private memory areas (it is
3556
* effective only if the bit 2 is cleared)
3557
* - (bit 5) hugetlb private memory
3558
* - (bit 6) hugetlb shared memory
3559
*/
3560
static void set_coredump_filter(void) {
3561
FILE *f;
3562
long cdm;
3563
3564
if ((f = fopen("/proc/self/coredump_filter", "r+")) == NULL) {
3565
return;
3566
}
3567
3568
if (fscanf(f, "%lx", &cdm) != 1) {
3569
fclose(f);
3570
return;
3571
}
3572
3573
rewind(f);
3574
3575
if ((cdm & LARGEPAGES_BIT) == 0) {
3576
cdm |= LARGEPAGES_BIT;
3577
fprintf(f, "%#lx", cdm);
3578
}
3579
3580
fclose(f);
3581
}
3582
3583
// Large page support
3584
3585
static size_t _large_page_size = 0;
3586
3587
size_t os::Linux::find_large_page_size() {
3588
size_t large_page_size = 0;
3589
3590
// large_page_size on Linux is used to round up heap size. x86 uses either
3591
// 2M or 4M page, depending on whether PAE (Physical Address Extensions)
3592
// mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
3593
// page as large as 256M.
3594
//
3595
// Here we try to figure out page size by parsing /proc/meminfo and looking
3596
// for a line with the following format:
3597
// Hugepagesize: 2048 kB
3598
//
3599
// If we can't determine the value (e.g. /proc is not mounted, or the text
3600
// format has been changed), we'll use the largest page size supported by
3601
// the processor.
3602
3603
#ifndef ZERO
3604
large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
3605
ARM_ONLY(2 * M) PPC_ONLY(4 * M) AARCH64_ONLY(2 * M);
3606
#endif // ZERO
3607
3608
FILE *fp = fopen("/proc/meminfo", "r");
3609
if (fp) {
3610
while (!feof(fp)) {
3611
int x = 0;
3612
char buf[16];
3613
if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
3614
if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
3615
large_page_size = x * K;
3616
break;
3617
}
3618
} else {
3619
// skip to next line
3620
for (;;) {
3621
int ch = fgetc(fp);
3622
if (ch == EOF || ch == (int)'\n') break;
3623
}
3624
}
3625
}
3626
fclose(fp);
3627
}
3628
3629
if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != large_page_size) {
3630
warning("Setting LargePageSizeInBytes has no effect on this OS. Large page size is "
3631
SIZE_FORMAT "%s.", byte_size_in_proper_unit(large_page_size),
3632
proper_unit_for_byte_size(large_page_size));
3633
}
3634
3635
return large_page_size;
3636
}
3637
3638
size_t os::Linux::setup_large_page_size() {
3639
_large_page_size = Linux::find_large_page_size();
3640
const size_t default_page_size = (size_t)Linux::page_size();
3641
if (_large_page_size > default_page_size) {
3642
_page_sizes[0] = _large_page_size;
3643
_page_sizes[1] = default_page_size;
3644
_page_sizes[2] = 0;
3645
}
3646
3647
return _large_page_size;
3648
}
3649
3650
bool os::Linux::setup_large_page_type(size_t page_size) {
3651
if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
3652
FLAG_IS_DEFAULT(UseSHM) &&
3653
FLAG_IS_DEFAULT(UseTransparentHugePages)) {
3654
3655
// The type of large pages has not been specified by the user.
3656
3657
// Try UseHugeTLBFS and then UseSHM.
3658
UseHugeTLBFS = UseSHM = true;
3659
3660
// Don't try UseTransparentHugePages since there are known
3661
// performance issues with it turned on. This might change in the future.
3662
UseTransparentHugePages = false;
3663
}
3664
3665
if (UseTransparentHugePages) {
3666
bool warn_on_failure = !FLAG_IS_DEFAULT(UseTransparentHugePages);
3667
if (transparent_huge_pages_sanity_check(warn_on_failure, page_size)) {
3668
UseHugeTLBFS = false;
3669
UseSHM = false;
3670
return true;
3671
}
3672
UseTransparentHugePages = false;
3673
}
3674
3675
if (UseHugeTLBFS) {
3676
bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
3677
if (hugetlbfs_sanity_check(warn_on_failure, page_size)) {
3678
UseSHM = false;
3679
return true;
3680
}
3681
UseHugeTLBFS = false;
3682
}
3683
3684
#ifdef DISABLE_SHM
3685
if (UseSHM) {
3686
warning("UseSHM is disabled");
3687
UseSHM = false;
3688
}
3689
#endif //DISABLE_SHM
3690
3691
return UseSHM;
3692
}
3693
3694
void os::large_page_init() {
3695
if (!UseLargePages &&
3696
!UseTransparentHugePages &&
3697
!UseHugeTLBFS &&
3698
!UseSHM) {
3699
// Not using large pages.
3700
return;
3701
}
3702
3703
if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) {
3704
// The user explicitly turned off large pages.
3705
// Ignore the rest of the large pages flags.
3706
UseTransparentHugePages = false;
3707
UseHugeTLBFS = false;
3708
UseSHM = false;
3709
return;
3710
}
3711
3712
size_t large_page_size = Linux::setup_large_page_size();
3713
UseLargePages = Linux::setup_large_page_type(large_page_size);
3714
3715
set_coredump_filter();
3716
}
3717
3718
#ifndef SHM_HUGETLB
3719
#define SHM_HUGETLB 04000
3720
#endif
3721
3722
#ifndef DISABLE_SHM
3723
#define shm_warning_format(format, ...) \
3724
do { \
3725
if (UseLargePages && \
3726
(!FLAG_IS_DEFAULT(UseLargePages) || \
3727
!FLAG_IS_DEFAULT(UseSHM) || \
3728
!FLAG_IS_DEFAULT(LargePageSizeInBytes))) { \
3729
warning(format, __VA_ARGS__); \
3730
} \
3731
} while (0)
3732
3733
#define shm_warning(str) shm_warning_format("%s", str)
3734
3735
#define shm_warning_with_errno(str) \
3736
do { \
3737
int err = errno; \
3738
shm_warning_format(str " (error = %d)", err); \
3739
} while (0)
3740
3741
static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment) {
3742
assert(is_size_aligned(bytes, alignment), "Must be divisible by the alignment");
3743
3744
if (!is_size_aligned(alignment, SHMLBA)) {
3745
assert(false, "Code below assumes that alignment is at least SHMLBA aligned");
3746
return NULL;
3747
}
3748
3749
// To ensure that we get 'alignment' aligned memory from shmat,
3750
// we pre-reserve aligned virtual memory and then attach to that.
3751
3752
char* pre_reserved_addr = anon_mmap_aligned(bytes, alignment, NULL);
3753
if (pre_reserved_addr == NULL) {
3754
// Couldn't pre-reserve aligned memory.
3755
shm_warning("Failed to pre-reserve aligned memory for shmat.");
3756
return NULL;
3757
}
3758
3759
// SHM_REMAP is needed to allow shmat to map over an existing mapping.
3760
char* addr = (char*)shmat(shmid, pre_reserved_addr, SHM_REMAP);
3761
3762
if ((intptr_t)addr == -1) {
3763
int err = errno;
3764
shm_warning_with_errno("Failed to attach shared memory.");
3765
3766
assert(err != EACCES, "Unexpected error");
3767
assert(err != EIDRM, "Unexpected error");
3768
assert(err != EINVAL, "Unexpected error");
3769
3770
// Since we don't know if the kernel unmapped the pre-reserved memory area
3771
// we can't unmap it, since that would potentially unmap memory that was
3772
// mapped from other threads.
3773
return NULL;
3774
}
3775
3776
return addr;
3777
}
3778
3779
static char* shmat_at_address(int shmid, char* req_addr) {
3780
if (!is_ptr_aligned(req_addr, SHMLBA)) {
3781
assert(false, "Requested address needs to be SHMLBA aligned");
3782
return NULL;
3783
}
3784
3785
char* addr = (char*)shmat(shmid, req_addr, 0);
3786
3787
if ((intptr_t)addr == -1) {
3788
shm_warning_with_errno("Failed to attach shared memory.");
3789
return NULL;
3790
}
3791
3792
return addr;
3793
}
3794
3795
static char* shmat_large_pages(int shmid, size_t bytes, size_t alignment, char* req_addr) {
3796
// If a req_addr has been provided, we assume that the caller has already aligned the address.
3797
if (req_addr != NULL) {
3798
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Must be divisible by the large page size");
3799
assert(is_ptr_aligned(req_addr, alignment), "Must be divisible by given alignment");
3800
return shmat_at_address(shmid, req_addr);
3801
}
3802
3803
// Since shmid has been setup with SHM_HUGETLB, shmat will automatically
3804
// return large page size aligned memory addresses when req_addr == NULL.
3805
// However, if the alignment is larger than the large page size, we have
3806
// to manually ensure that the memory returned is 'alignment' aligned.
3807
if (alignment > os::large_page_size()) {
3808
assert(is_size_aligned(alignment, os::large_page_size()), "Must be divisible by the large page size");
3809
return shmat_with_alignment(shmid, bytes, alignment);
3810
} else {
3811
return shmat_at_address(shmid, NULL);
3812
}
3813
}
3814
#endif // !DISABLE_SHM
3815
3816
char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) {
3817
#ifndef DISABLE_SHM
3818
// "exec" is passed in but not used. Creating the shared image for
3819
// the code cache doesn't have an SHM_X executable permission to check.
3820
assert(UseLargePages && UseSHM, "only for SHM large pages");
3821
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
3822
assert(is_ptr_aligned(req_addr, alignment), "Unaligned address");
3823
3824
if (!is_size_aligned(bytes, os::large_page_size())) {
3825
return NULL; // Fallback to small pages.
3826
}
3827
3828
// Create a large shared memory region to attach to based on size.
3829
// Currently, size is the total size of the heap.
3830
int shmid = shmget(IPC_PRIVATE, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
3831
if (shmid == -1) {
3832
// Possible reasons for shmget failure:
3833
// 1. shmmax is too small for Java heap.
3834
// > check shmmax value: cat /proc/sys/kernel/shmmax
3835
// > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax
3836
// 2. not enough large page memory.
3837
// > check available large pages: cat /proc/meminfo
3838
// > increase amount of large pages:
3839
// echo new_value > /proc/sys/vm/nr_hugepages
3840
// Note 1: different Linux may use different name for this property,
3841
// e.g. on Redhat AS-3 it is "hugetlb_pool".
3842
// Note 2: it's possible there's enough physical memory available but
3843
// they are so fragmented after a long run that they can't
3844
// coalesce into large pages. Try to reserve large pages when
3845
// the system is still "fresh".
3846
shm_warning_with_errno("Failed to reserve shared memory.");
3847
return NULL;
3848
}
3849
3850
// Attach to the region.
3851
char* addr = shmat_large_pages(shmid, bytes, alignment, req_addr);
3852
3853
// Remove shmid. If shmat() is successful, the actual shared memory segment
3854
// will be deleted when it's detached by shmdt() or when the process
3855
// terminates. If shmat() is not successful this will remove the shared
3856
// segment immediately.
3857
shmctl(shmid, IPC_RMID, NULL);
3858
3859
return addr;
3860
#else
3861
assert(0, "SHM was disabled on compile time");
3862
return NULL;
3863
#endif
3864
}
3865
3866
static void warn_on_large_pages_failure(char* req_addr, size_t bytes, int error) {
3867
assert(error == ENOMEM, "Only expect to fail if no memory is available");
3868
3869
bool warn_on_failure = UseLargePages &&
3870
(!FLAG_IS_DEFAULT(UseLargePages) ||
3871
!FLAG_IS_DEFAULT(UseHugeTLBFS) ||
3872
!FLAG_IS_DEFAULT(LargePageSizeInBytes));
3873
3874
if (warn_on_failure) {
3875
char msg[128];
3876
jio_snprintf(msg, sizeof(msg), "Failed to reserve large pages memory req_addr: "
3877
PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d).", req_addr, bytes, error);
3878
warning("%s", msg);
3879
}
3880
}
3881
3882
char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec) {
3883
assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
3884
assert(is_size_aligned(bytes, os::large_page_size()), "Unaligned size");
3885
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
3886
3887
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
3888
char* addr = (char*)::mmap(req_addr, bytes, prot,
3889
MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB,
3890
-1, 0);
3891
3892
if (addr == MAP_FAILED) {
3893
warn_on_large_pages_failure(req_addr, bytes, errno);
3894
return NULL;
3895
}
3896
3897
assert(is_ptr_aligned(addr, os::large_page_size()), "Must be");
3898
3899
return addr;
3900
}
3901
3902
// Reserve memory using mmap(MAP_HUGETLB).
3903
// - bytes shall be a multiple of alignment.
3904
// - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
3905
// - alignment sets the alignment at which memory shall be allocated.
3906
// It must be a multiple of allocation granularity.
3907
// Returns address of memory or NULL. If req_addr was not NULL, will only return
3908
// req_addr or NULL.
3909
char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) {
3910
size_t large_page_size = os::large_page_size();
3911
assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
3912
3913
assert(is_ptr_aligned(req_addr, alignment), "Must be");
3914
assert(is_size_aligned(bytes, alignment), "Must be");
3915
3916
// First reserve - but not commit - the address range in small pages.
3917
char* const start = anon_mmap_aligned(bytes, alignment, req_addr);
3918
3919
if (start == NULL) {
3920
return NULL;
3921
}
3922
3923
assert(is_ptr_aligned(start, alignment), "Must be");
3924
3925
char* end = start + bytes;
3926
3927
// Find the regions of the allocated chunk that can be promoted to large pages.
3928
char* lp_start = (char*)align_ptr_up(start, large_page_size);
3929
char* lp_end = (char*)align_ptr_down(end, large_page_size);
3930
3931
size_t lp_bytes = lp_end - lp_start;
3932
3933
assert(is_size_aligned(lp_bytes, large_page_size), "Must be");
3934
3935
if (lp_bytes == 0) {
3936
// The mapped region doesn't even span the start and the end of a large page.
3937
// Fall back to allocate a non-special area.
3938
::munmap(start, end - start);
3939
return NULL;
3940
}
3941
3942
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
3943
3944
void* result;
3945
3946
// Commit small-paged leading area.
3947
if (start != lp_start) {
3948
result = ::mmap(start, lp_start - start, prot,
3949
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
3950
-1, 0);
3951
if (result == MAP_FAILED) {
3952
::munmap(lp_start, end - lp_start);
3953
return NULL;
3954
}
3955
}
3956
3957
// Commit large-paged area.
3958
result = ::mmap(lp_start, lp_bytes, prot,
3959
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED|MAP_HUGETLB,
3960
-1, 0);
3961
if (result == MAP_FAILED) {
3962
warn_on_large_pages_failure(lp_start, lp_bytes, errno);
3963
// If the mmap above fails, the large pages region will be unmapped and we
3964
// have regions before and after with small pages. Release these regions.
3965
//
3966
// | mapped | unmapped | mapped |
3967
// ^ ^ ^ ^
3968
// start lp_start lp_end end
3969
//
3970
::munmap(start, lp_start - start);
3971
::munmap(lp_end, end - lp_end);
3972
return NULL;
3973
}
3974
3975
// Commit small-paged trailing area.
3976
if (lp_end != end) {
3977
result = ::mmap(lp_end, end - lp_end, prot,
3978
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
3979
-1, 0);
3980
if (result == MAP_FAILED) {
3981
::munmap(start, lp_end - start);
3982
return NULL;
3983
}
3984
}
3985
3986
return start;
3987
}
3988
3989
char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) {
3990
assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
3991
assert(is_ptr_aligned(req_addr, alignment), "Must be");
3992
assert(is_size_aligned(alignment, os::vm_allocation_granularity()), "Must be");
3993
assert(is_power_of_2(os::large_page_size()), "Must be");
3994
assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes");
3995
3996
if (is_size_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) {
3997
return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec);
3998
} else {
3999
return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec);
4000
}
4001
}
4002
4003
char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
4004
assert(UseLargePages, "only for large pages");
4005
4006
char* addr;
4007
if (UseSHM) {
4008
addr = os::Linux::reserve_memory_special_shm(bytes, alignment, req_addr, exec);
4009
} else {
4010
assert(UseHugeTLBFS, "must be");
4011
addr = os::Linux::reserve_memory_special_huge_tlbfs(bytes, alignment, req_addr, exec);
4012
}
4013
4014
if (addr != NULL) {
4015
if (UseNUMAInterleaving) {
4016
numa_make_global(addr, bytes);
4017
}
4018
4019
// The memory is committed
4020
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
4021
}
4022
4023
return addr;
4024
}
4025
4026
bool os::Linux::release_memory_special_shm(char* base, size_t bytes) {
4027
#ifndef DISABLE_SHM
4028
// detaching the SHM segment will also delete it, see reserve_memory_special_shm()
4029
return shmdt(base) == 0;
4030
#else
4031
assert(0, "SHM was disabled on compile time");
4032
#endif
4033
}
4034
4035
bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
4036
return pd_release_memory(base, bytes);
4037
}
4038
4039
bool os::release_memory_special(char* base, size_t bytes) {
4040
bool res;
4041
if (MemTracker::tracking_level() > NMT_minimal) {
4042
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
4043
res = os::Linux::release_memory_special_impl(base, bytes);
4044
if (res) {
4045
tkr.record((address)base, bytes);
4046
}
4047
4048
} else {
4049
res = os::Linux::release_memory_special_impl(base, bytes);
4050
}
4051
return res;
4052
}
4053
4054
bool os::Linux::release_memory_special_impl(char* base, size_t bytes) {
4055
assert(UseLargePages, "only for large pages");
4056
bool res;
4057
4058
if (UseSHM) {
4059
res = os::Linux::release_memory_special_shm(base, bytes);
4060
} else {
4061
assert(UseHugeTLBFS, "must be");
4062
res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
4063
}
4064
return res;
4065
}
4066
4067
size_t os::large_page_size() {
4068
return _large_page_size;
4069
}
4070
4071
// With SysV SHM the entire memory region must be allocated as shared
4072
// memory.
4073
// HugeTLBFS allows application to commit large page memory on demand.
4074
// However, when committing memory with HugeTLBFS fails, the region
4075
// that was supposed to be committed will lose the old reservation
4076
// and allow other threads to steal that memory region. Because of this
4077
// behavior we can't commit HugeTLBFS memory.
4078
bool os::can_commit_large_page_memory() {
4079
return UseTransparentHugePages;
4080
}
4081
4082
bool os::can_execute_large_page_memory() {
4083
return UseTransparentHugePages || UseHugeTLBFS;
4084
}
4085
4086
// Reserve memory at an arbitrary address, only if that area is
4087
// available (and not reserved for something else).
4088
4089
char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
4090
const int max_tries = 10;
4091
char* base[max_tries];
4092
size_t size[max_tries];
4093
const size_t gap = 0x000000;
4094
4095
// Assert only that the size is a multiple of the page size, since
4096
// that's all that mmap requires, and since that's all we really know
4097
// about at this low abstraction level. If we need higher alignment,
4098
// we can either pass an alignment to this method or verify alignment
4099
// in one of the methods further up the call chain. See bug 5044738.
4100
assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
4101
4102
// Repeatedly allocate blocks until the block is allocated at the
4103
// right spot. Give up after max_tries. Note that reserve_memory() will
4104
// automatically update _highest_vm_reserved_address if the call is
4105
// successful. The variable tracks the highest memory address every reserved
4106
// by JVM. It is used to detect heap-stack collision if running with
4107
// fixed-stack LinuxThreads. Because here we may attempt to reserve more
4108
// space than needed, it could confuse the collision detecting code. To
4109
// solve the problem, save current _highest_vm_reserved_address and
4110
// calculate the correct value before return.
4111
address old_highest = _highest_vm_reserved_address;
4112
4113
// Linux mmap allows caller to pass an address as hint; give it a try first,
4114
// if kernel honors the hint then we can return immediately.
4115
char * addr = anon_mmap(requested_addr, bytes, false);
4116
if (addr == requested_addr) {
4117
return requested_addr;
4118
}
4119
4120
if (addr != NULL) {
4121
// mmap() is successful but it fails to reserve at the requested address
4122
anon_munmap(addr, bytes);
4123
}
4124
4125
int i;
4126
for (i = 0; i < max_tries; ++i) {
4127
base[i] = reserve_memory(bytes);
4128
4129
if (base[i] != NULL) {
4130
// Is this the block we wanted?
4131
if (base[i] == requested_addr) {
4132
size[i] = bytes;
4133
break;
4134
}
4135
4136
// Does this overlap the block we wanted? Give back the overlapped
4137
// parts and try again.
4138
4139
size_t top_overlap = requested_addr + (bytes + gap) - base[i];
4140
if (top_overlap >= 0 && top_overlap < bytes) {
4141
unmap_memory(base[i], top_overlap);
4142
base[i] += top_overlap;
4143
size[i] = bytes - top_overlap;
4144
} else {
4145
size_t bottom_overlap = base[i] + bytes - requested_addr;
4146
if (bottom_overlap >= 0 && bottom_overlap < bytes) {
4147
unmap_memory(requested_addr, bottom_overlap);
4148
size[i] = bytes - bottom_overlap;
4149
} else {
4150
size[i] = bytes;
4151
}
4152
}
4153
}
4154
}
4155
4156
// Give back the unused reserved pieces.
4157
4158
for (int j = 0; j < i; ++j) {
4159
if (base[j] != NULL) {
4160
unmap_memory(base[j], size[j]);
4161
}
4162
}
4163
4164
if (i < max_tries) {
4165
_highest_vm_reserved_address = MAX2(old_highest, (address)requested_addr + bytes);
4166
return requested_addr;
4167
} else {
4168
_highest_vm_reserved_address = old_highest;
4169
return NULL;
4170
}
4171
}
4172
4173
size_t os::read(int fd, void *buf, unsigned int nBytes) {
4174
return ::read(fd, buf, nBytes);
4175
}
4176
4177
size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4178
return ::pread(fd, buf, nBytes, offset);
4179
}
4180
4181
// TODO-FIXME: reconcile Solaris' os::sleep with the linux variation.
4182
// Solaris uses poll(), linux uses park().
4183
// Poll() is likely a better choice, assuming that Thread.interrupt()
4184
// generates a SIGUSRx signal. Note that SIGUSR1 can interfere with
4185
// SIGSEGV, see 4355769.
4186
4187
int os::sleep(Thread* thread, jlong millis, bool interruptible) {
4188
assert(thread == Thread::current(), "thread consistency check");
4189
4190
ParkEvent * const slp = thread->_SleepEvent ;
4191
slp->reset() ;
4192
OrderAccess::fence() ;
4193
4194
if (interruptible) {
4195
jlong prevtime = javaTimeNanos();
4196
4197
for (;;) {
4198
if (os::is_interrupted(thread, true)) {
4199
return OS_INTRPT;
4200
}
4201
4202
jlong newtime = javaTimeNanos();
4203
4204
if (newtime - prevtime < 0) {
4205
// time moving backwards, should only happen if no monotonic clock
4206
// not a guarantee() because JVM should not abort on kernel/glibc bugs
4207
assert(!Linux::supports_monotonic_clock(), "time moving backwards");
4208
} else {
4209
millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
4210
}
4211
4212
if(millis <= 0) {
4213
return OS_OK;
4214
}
4215
4216
prevtime = newtime;
4217
4218
{
4219
assert(thread->is_Java_thread(), "sanity check");
4220
JavaThread *jt = (JavaThread *) thread;
4221
ThreadBlockInVM tbivm(jt);
4222
OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
4223
4224
jt->set_suspend_equivalent();
4225
// cleared by handle_special_suspend_equivalent_condition() or
4226
// java_suspend_self() via check_and_wait_while_suspended()
4227
4228
slp->park(millis);
4229
4230
// were we externally suspended while we were waiting?
4231
jt->check_and_wait_while_suspended();
4232
}
4233
}
4234
} else {
4235
OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4236
jlong prevtime = javaTimeNanos();
4237
4238
for (;;) {
4239
// It'd be nice to avoid the back-to-back javaTimeNanos() calls on
4240
// the 1st iteration ...
4241
jlong newtime = javaTimeNanos();
4242
4243
if (newtime - prevtime < 0) {
4244
// time moving backwards, should only happen if no monotonic clock
4245
// not a guarantee() because JVM should not abort on kernel/glibc bugs
4246
assert(!Linux::supports_monotonic_clock(), "time moving backwards");
4247
} else {
4248
millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
4249
}
4250
4251
if(millis <= 0) break ;
4252
4253
prevtime = newtime;
4254
slp->park(millis);
4255
}
4256
return OS_OK ;
4257
}
4258
}
4259
4260
//
4261
// Short sleep, direct OS call.
4262
//
4263
// Note: certain versions of Linux CFS scheduler (since 2.6.23) do not guarantee
4264
// sched_yield(2) will actually give up the CPU:
4265
//
4266
// * Alone on this pariticular CPU, keeps running.
4267
// * Before the introduction of "skip_buddy" with "compat_yield" disabled
4268
// (pre 2.6.39).
4269
//
4270
// So calling this with 0 is an alternative.
4271
//
4272
void os::naked_short_sleep(jlong ms) {
4273
struct timespec req;
4274
4275
assert(ms < 1000, "Un-interruptable sleep, short time use only");
4276
req.tv_sec = 0;
4277
if (ms > 0) {
4278
req.tv_nsec = (ms % 1000) * 1000000;
4279
}
4280
else {
4281
req.tv_nsec = 1;
4282
}
4283
4284
nanosleep(&req, NULL);
4285
4286
return;
4287
}
4288
4289
// Sleep forever; naked call to OS-specific sleep; use with CAUTION
4290
void os::infinite_sleep() {
4291
while (true) { // sleep forever ...
4292
::sleep(100); // ... 100 seconds at a time
4293
}
4294
}
4295
4296
// Used to convert frequent JVM_Yield() to nops
4297
bool os::dont_yield() {
4298
return DontYieldALot;
4299
}
4300
4301
void os::yield() {
4302
sched_yield();
4303
}
4304
4305
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN ;}
4306
4307
void os::yield_all(int attempts) {
4308
// Yields to all threads, including threads with lower priorities
4309
// Threads on Linux are all with same priority. The Solaris style
4310
// os::yield_all() with nanosleep(1ms) is not necessary.
4311
sched_yield();
4312
}
4313
4314
// Called from the tight loops to possibly influence time-sharing heuristics
4315
void os::loop_breaker(int attempts) {
4316
os::yield_all(attempts);
4317
}
4318
4319
////////////////////////////////////////////////////////////////////////////////
4320
// thread priority support
4321
4322
// Note: Normal Linux applications are run with SCHED_OTHER policy. SCHED_OTHER
4323
// only supports dynamic priority, static priority must be zero. For real-time
4324
// applications, Linux supports SCHED_RR which allows static priority (1-99).
4325
// However, for large multi-threaded applications, SCHED_RR is not only slower
4326
// than SCHED_OTHER, but also very unstable (my volano tests hang hard 4 out
4327
// of 5 runs - Sep 2005).
4328
//
4329
// The following code actually changes the niceness of kernel-thread/LWP. It
4330
// has an assumption that setpriority() only modifies one kernel-thread/LWP,
4331
// not the entire user process, and user level threads are 1:1 mapped to kernel
4332
// threads. It has always been the case, but could change in the future. For
4333
// this reason, the code should not be used as default (ThreadPriorityPolicy=0).
4334
// It is only used when ThreadPriorityPolicy=1 and requires root privilege.
4335
4336
int os::java_to_os_priority[CriticalPriority + 1] = {
4337
19, // 0 Entry should never be used
4338
4339
4, // 1 MinPriority
4340
3, // 2
4341
2, // 3
4342
4343
1, // 4
4344
0, // 5 NormPriority
4345
-1, // 6
4346
4347
-2, // 7
4348
-3, // 8
4349
-4, // 9 NearMaxPriority
4350
4351
-5, // 10 MaxPriority
4352
4353
-5 // 11 CriticalPriority
4354
};
4355
4356
static int prio_init() {
4357
if (ThreadPriorityPolicy == 1) {
4358
// Only root can raise thread priority. Don't allow ThreadPriorityPolicy=1
4359
// if effective uid is not root. Perhaps, a more elegant way of doing
4360
// this is to test CAP_SYS_NICE capability, but that will require libcap.so
4361
if (geteuid() != 0) {
4362
if (!FLAG_IS_DEFAULT(ThreadPriorityPolicy)) {
4363
warning("-XX:ThreadPriorityPolicy requires root privilege on Linux");
4364
}
4365
ThreadPriorityPolicy = 0;
4366
}
4367
}
4368
if (UseCriticalJavaThreadPriority) {
4369
os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
4370
}
4371
return 0;
4372
}
4373
4374
OSReturn os::set_native_priority(Thread* thread, int newpri) {
4375
if ( !UseThreadPriorities || ThreadPriorityPolicy == 0 ) return OS_OK;
4376
4377
int ret = setpriority(PRIO_PROCESS, thread->osthread()->thread_id(), newpri);
4378
return (ret == 0) ? OS_OK : OS_ERR;
4379
}
4380
4381
OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
4382
if ( !UseThreadPriorities || ThreadPriorityPolicy == 0 ) {
4383
*priority_ptr = java_to_os_priority[NormPriority];
4384
return OS_OK;
4385
}
4386
4387
errno = 0;
4388
*priority_ptr = getpriority(PRIO_PROCESS, thread->osthread()->thread_id());
4389
return (*priority_ptr != -1 || errno == 0 ? OS_OK : OS_ERR);
4390
}
4391
4392
// Hint to the underlying OS that a task switch would not be good.
4393
// Void return because it's a hint and can fail.
4394
void os::hint_no_preempt() {}
4395
4396
////////////////////////////////////////////////////////////////////////////////
4397
// suspend/resume support
4398
4399
// the low-level signal-based suspend/resume support is a remnant from the
4400
// old VM-suspension that used to be for java-suspension, safepoints etc,
4401
// within hotspot. Now there is a single use-case for this:
4402
// - calling get_thread_pc() on the VMThread by the flat-profiler task
4403
// that runs in the watcher thread.
4404
// The remaining code is greatly simplified from the more general suspension
4405
// code that used to be used.
4406
//
4407
// The protocol is quite simple:
4408
// - suspend:
4409
// - sends a signal to the target thread
4410
// - polls the suspend state of the osthread using a yield loop
4411
// - target thread signal handler (SR_handler) sets suspend state
4412
// and blocks in sigsuspend until continued
4413
// - resume:
4414
// - sets target osthread state to continue
4415
// - sends signal to end the sigsuspend loop in the SR_handler
4416
//
4417
// Note that the SR_lock plays no role in this suspend/resume protocol.
4418
//
4419
4420
static void resume_clear_context(OSThread *osthread) {
4421
osthread->set_ucontext(NULL);
4422
osthread->set_siginfo(NULL);
4423
}
4424
4425
static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
4426
osthread->set_ucontext(context);
4427
osthread->set_siginfo(siginfo);
4428
}
4429
4430
//
4431
// Handler function invoked when a thread's execution is suspended or
4432
// resumed. We have to be careful that only async-safe functions are
4433
// called here (Note: most pthread functions are not async safe and
4434
// should be avoided.)
4435
//
4436
// Note: sigwait() is a more natural fit than sigsuspend() from an
4437
// interface point of view, but sigwait() prevents the signal hander
4438
// from being run. libpthread would get very confused by not having
4439
// its signal handlers run and prevents sigwait()'s use with the
4440
// mutex granting granting signal.
4441
//
4442
// Currently only ever called on the VMThread and JavaThreads (PC sampling)
4443
//
4444
static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
4445
// Save and restore errno to avoid confusing native code with EINTR
4446
// after sigsuspend.
4447
int old_errno = errno;
4448
4449
Thread* thread = Thread::current();
4450
OSThread* osthread = thread->osthread();
4451
assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
4452
4453
os::SuspendResume::State current = osthread->sr.state();
4454
if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
4455
suspend_save_context(osthread, siginfo, context);
4456
4457
// attempt to switch the state, we assume we had a SUSPEND_REQUEST
4458
os::SuspendResume::State state = osthread->sr.suspended();
4459
if (state == os::SuspendResume::SR_SUSPENDED) {
4460
sigset_t suspend_set; // signals for sigsuspend()
4461
4462
// get current set of blocked signals and unblock resume signal
4463
pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
4464
sigdelset(&suspend_set, SR_signum);
4465
4466
sr_semaphore.signal();
4467
// wait here until we are resumed
4468
while (1) {
4469
sigsuspend(&suspend_set);
4470
4471
os::SuspendResume::State result = osthread->sr.running();
4472
if (result == os::SuspendResume::SR_RUNNING) {
4473
sr_semaphore.signal();
4474
break;
4475
}
4476
}
4477
4478
} else if (state == os::SuspendResume::SR_RUNNING) {
4479
// request was cancelled, continue
4480
} else {
4481
ShouldNotReachHere();
4482
}
4483
4484
resume_clear_context(osthread);
4485
} else if (current == os::SuspendResume::SR_RUNNING) {
4486
// request was cancelled, continue
4487
} else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
4488
// ignore
4489
} else {
4490
// ignore
4491
}
4492
4493
errno = old_errno;
4494
}
4495
4496
4497
static int SR_initialize() {
4498
struct sigaction act;
4499
char *s;
4500
/* Get signal number to use for suspend/resume */
4501
if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
4502
int sig = ::strtol(s, 0, 10);
4503
if (sig > 0 || sig < _NSIG) {
4504
SR_signum = sig;
4505
}
4506
}
4507
4508
assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
4509
"SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
4510
4511
sigemptyset(&SR_sigset);
4512
sigaddset(&SR_sigset, SR_signum);
4513
4514
/* Set up signal handler for suspend/resume */
4515
act.sa_flags = SA_RESTART|SA_SIGINFO;
4516
act.sa_handler = (void (*)(int)) SR_handler;
4517
4518
// SR_signum is blocked by default.
4519
// 4528190 - We also need to block pthread restart signal (32 on all
4520
// supported Linux platforms). Note that LinuxThreads need to block
4521
// this signal for all threads to work properly. So we don't have
4522
// to use hard-coded signal number when setting up the mask.
4523
pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
4524
4525
if (sigaction(SR_signum, &act, 0) == -1) {
4526
return -1;
4527
}
4528
4529
// Save signal flag
4530
os::Linux::set_our_sigflags(SR_signum, act.sa_flags);
4531
return 0;
4532
}
4533
4534
static int sr_notify(OSThread* osthread) {
4535
int status = pthread_kill(osthread->pthread_id(), SR_signum);
4536
assert_status(status == 0, status, "pthread_kill");
4537
return status;
4538
}
4539
4540
// "Randomly" selected value for how long we want to spin
4541
// before bailing out on suspending a thread, also how often
4542
// we send a signal to a thread we want to resume
4543
static const int RANDOMLY_LARGE_INTEGER = 1000000;
4544
static const int RANDOMLY_LARGE_INTEGER2 = 100;
4545
4546
// returns true on success and false on error - really an error is fatal
4547
// but this seems the normal response to library errors
4548
static bool do_suspend(OSThread* osthread) {
4549
assert(osthread->sr.is_running(), "thread should be running");
4550
assert(!sr_semaphore.trywait(), "semaphore has invalid state");
4551
4552
// mark as suspended and send signal
4553
if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
4554
// failed to switch, state wasn't running?
4555
ShouldNotReachHere();
4556
return false;
4557
}
4558
4559
if (sr_notify(osthread) != 0) {
4560
ShouldNotReachHere();
4561
}
4562
4563
// managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
4564
while (true) {
4565
if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
4566
break;
4567
} else {
4568
// timeout
4569
os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
4570
if (cancelled == os::SuspendResume::SR_RUNNING) {
4571
return false;
4572
} else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
4573
// make sure that we consume the signal on the semaphore as well
4574
sr_semaphore.wait();
4575
break;
4576
} else {
4577
ShouldNotReachHere();
4578
return false;
4579
}
4580
}
4581
}
4582
4583
guarantee(osthread->sr.is_suspended(), "Must be suspended");
4584
return true;
4585
}
4586
4587
static void do_resume(OSThread* osthread) {
4588
assert(osthread->sr.is_suspended(), "thread should be suspended");
4589
assert(!sr_semaphore.trywait(), "invalid semaphore state");
4590
4591
if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
4592
// failed to switch to WAKEUP_REQUEST
4593
ShouldNotReachHere();
4594
return;
4595
}
4596
4597
while (true) {
4598
if (sr_notify(osthread) == 0) {
4599
if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
4600
if (osthread->sr.is_running()) {
4601
return;
4602
}
4603
}
4604
} else {
4605
ShouldNotReachHere();
4606
}
4607
}
4608
4609
guarantee(osthread->sr.is_running(), "Must be running!");
4610
}
4611
4612
////////////////////////////////////////////////////////////////////////////////
4613
// interrupt support
4614
4615
void os::interrupt(Thread* thread) {
4616
assert(Thread::current() == thread || Threads_lock->owned_by_self(),
4617
"possibility of dangling Thread pointer");
4618
4619
OSThread* osthread = thread->osthread();
4620
4621
if (!osthread->interrupted()) {
4622
osthread->set_interrupted(true);
4623
// More than one thread can get here with the same value of osthread,
4624
// resulting in multiple notifications. We do, however, want the store
4625
// to interrupted() to be visible to other threads before we execute unpark().
4626
OrderAccess::fence();
4627
ParkEvent * const slp = thread->_SleepEvent ;
4628
if (slp != NULL) slp->unpark() ;
4629
}
4630
4631
// For JSR166. Unpark even if interrupt status already was set
4632
if (thread->is_Java_thread())
4633
((JavaThread*)thread)->parker()->unpark();
4634
4635
ParkEvent * ev = thread->_ParkEvent ;
4636
if (ev != NULL) ev->unpark() ;
4637
4638
}
4639
4640
bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
4641
assert(Thread::current() == thread || Threads_lock->owned_by_self(),
4642
"possibility of dangling Thread pointer");
4643
4644
OSThread* osthread = thread->osthread();
4645
4646
bool interrupted = osthread->interrupted();
4647
4648
if (interrupted && clear_interrupted) {
4649
osthread->set_interrupted(false);
4650
// consider thread->_SleepEvent->reset() ... optional optimization
4651
}
4652
4653
return interrupted;
4654
}
4655
4656
///////////////////////////////////////////////////////////////////////////////////
4657
// signal handling (except suspend/resume)
4658
4659
// This routine may be used by user applications as a "hook" to catch signals.
4660
// The user-defined signal handler must pass unrecognized signals to this
4661
// routine, and if it returns true (non-zero), then the signal handler must
4662
// return immediately. If the flag "abort_if_unrecognized" is true, then this
4663
// routine will never retun false (zero), but instead will execute a VM panic
4664
// routine kill the process.
4665
//
4666
// If this routine returns false, it is OK to call it again. This allows
4667
// the user-defined signal handler to perform checks either before or after
4668
// the VM performs its own checks. Naturally, the user code would be making
4669
// a serious error if it tried to handle an exception (such as a null check
4670
// or breakpoint) that the VM was generating for its own correct operation.
4671
//
4672
// This routine may recognize any of the following kinds of signals:
4673
// SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
4674
// It should be consulted by handlers for any of those signals.
4675
//
4676
// The caller of this routine must pass in the three arguments supplied
4677
// to the function referred to in the "sa_sigaction" (not the "sa_handler")
4678
// field of the structure passed to sigaction(). This routine assumes that
4679
// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
4680
//
4681
// Note that the VM will print warnings if it detects conflicting signal
4682
// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
4683
//
4684
extern "C" JNIEXPORT int
4685
JVM_handle_linux_signal(int signo, siginfo_t* siginfo,
4686
void* ucontext, int abort_if_unrecognized);
4687
4688
void signalHandler(int sig, siginfo_t* info, void* uc) {
4689
assert(info != NULL && uc != NULL, "it must be old kernel");
4690
int orig_errno = errno; // Preserve errno value over signal handler.
4691
JVM_handle_linux_signal(sig, info, uc, true);
4692
errno = orig_errno;
4693
}
4694
4695
4696
// This boolean allows users to forward their own non-matching signals
4697
// to JVM_handle_linux_signal, harmlessly.
4698
bool os::Linux::signal_handlers_are_installed = false;
4699
4700
// For signal-chaining
4701
struct sigaction os::Linux::sigact[MAXSIGNUM];
4702
unsigned int os::Linux::sigs = 0;
4703
bool os::Linux::libjsig_is_loaded = false;
4704
typedef struct sigaction *(*get_signal_t)(int);
4705
get_signal_t os::Linux::get_signal_action = NULL;
4706
4707
struct sigaction* os::Linux::get_chained_signal_action(int sig) {
4708
struct sigaction *actp = NULL;
4709
4710
if (libjsig_is_loaded) {
4711
// Retrieve the old signal handler from libjsig
4712
actp = (*get_signal_action)(sig);
4713
}
4714
if (actp == NULL) {
4715
// Retrieve the preinstalled signal handler from jvm
4716
actp = get_preinstalled_handler(sig);
4717
}
4718
4719
return actp;
4720
}
4721
4722
static bool call_chained_handler(struct sigaction *actp, int sig,
4723
siginfo_t *siginfo, void *context) {
4724
// Call the old signal handler
4725
if (actp->sa_handler == SIG_DFL) {
4726
// It's more reasonable to let jvm treat it as an unexpected exception
4727
// instead of taking the default action.
4728
return false;
4729
} else if (actp->sa_handler != SIG_IGN) {
4730
if ((actp->sa_flags & SA_NODEFER) == 0) {
4731
// automaticlly block the signal
4732
sigaddset(&(actp->sa_mask), sig);
4733
}
4734
4735
sa_handler_t hand = NULL;
4736
sa_sigaction_t sa = NULL;
4737
bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
4738
// retrieve the chained handler
4739
if (siginfo_flag_set) {
4740
sa = actp->sa_sigaction;
4741
} else {
4742
hand = actp->sa_handler;
4743
}
4744
4745
if ((actp->sa_flags & SA_RESETHAND) != 0) {
4746
actp->sa_handler = SIG_DFL;
4747
}
4748
4749
// try to honor the signal mask
4750
sigset_t oset;
4751
pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
4752
4753
// call into the chained handler
4754
if (siginfo_flag_set) {
4755
(*sa)(sig, siginfo, context);
4756
} else {
4757
(*hand)(sig);
4758
}
4759
4760
// restore the signal mask
4761
pthread_sigmask(SIG_SETMASK, &oset, 0);
4762
}
4763
// Tell jvm's signal handler the signal is taken care of.
4764
return true;
4765
}
4766
4767
bool os::Linux::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4768
bool chained = false;
4769
// signal-chaining
4770
if (UseSignalChaining) {
4771
struct sigaction *actp = get_chained_signal_action(sig);
4772
if (actp != NULL) {
4773
chained = call_chained_handler(actp, sig, siginfo, context);
4774
}
4775
}
4776
return chained;
4777
}
4778
4779
struct sigaction* os::Linux::get_preinstalled_handler(int sig) {
4780
if ((( (unsigned int)1 << sig ) & sigs) != 0) {
4781
return &sigact[sig];
4782
}
4783
return NULL;
4784
}
4785
4786
void os::Linux::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
4787
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
4788
sigact[sig] = oldAct;
4789
sigs |= (unsigned int)1 << sig;
4790
}
4791
4792
// for diagnostic
4793
int os::Linux::sigflags[MAXSIGNUM];
4794
4795
int os::Linux::get_our_sigflags(int sig) {
4796
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
4797
return sigflags[sig];
4798
}
4799
4800
void os::Linux::set_our_sigflags(int sig, int flags) {
4801
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
4802
sigflags[sig] = flags;
4803
}
4804
4805
void os::Linux::set_signal_handler(int sig, bool set_installed) {
4806
// Check for overwrite.
4807
struct sigaction oldAct;
4808
sigaction(sig, (struct sigaction*)NULL, &oldAct);
4809
4810
void* oldhand = oldAct.sa_sigaction
4811
? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4812
: CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4813
if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4814
oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4815
oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)signalHandler)) {
4816
if (AllowUserSignalHandlers || !set_installed) {
4817
// Do not overwrite; user takes responsibility to forward to us.
4818
return;
4819
} else if (UseSignalChaining) {
4820
// save the old handler in jvm
4821
save_preinstalled_handler(sig, oldAct);
4822
// libjsig also interposes the sigaction() call below and saves the
4823
// old sigaction on it own.
4824
} else {
4825
fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4826
"%#lx for signal %d.", (long)oldhand, sig));
4827
}
4828
}
4829
4830
struct sigaction sigAct;
4831
sigfillset(&(sigAct.sa_mask));
4832
sigAct.sa_handler = SIG_DFL;
4833
if (!set_installed) {
4834
sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
4835
} else {
4836
sigAct.sa_sigaction = signalHandler;
4837
sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
4838
}
4839
// Save flags, which are set by ours
4840
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
4841
sigflags[sig] = sigAct.sa_flags;
4842
4843
int ret = sigaction(sig, &sigAct, &oldAct);
4844
assert(ret == 0, "check");
4845
4846
void* oldhand2 = oldAct.sa_sigaction
4847
? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4848
: CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4849
assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4850
}
4851
4852
// install signal handlers for signals that HotSpot needs to
4853
// handle in order to support Java-level exception handling.
4854
4855
void os::Linux::install_signal_handlers() {
4856
if (!signal_handlers_are_installed) {
4857
signal_handlers_are_installed = true;
4858
4859
// signal-chaining
4860
typedef void (*signal_setting_t)();
4861
signal_setting_t begin_signal_setting = NULL;
4862
signal_setting_t end_signal_setting = NULL;
4863
begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4864
dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4865
if (begin_signal_setting != NULL) {
4866
end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4867
dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4868
get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4869
dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4870
libjsig_is_loaded = true;
4871
assert(UseSignalChaining, "should enable signal-chaining");
4872
}
4873
if (libjsig_is_loaded) {
4874
// Tell libjsig jvm is setting signal handlers
4875
(*begin_signal_setting)();
4876
}
4877
4878
set_signal_handler(SIGSEGV, true);
4879
set_signal_handler(SIGPIPE, true);
4880
set_signal_handler(SIGBUS, true);
4881
set_signal_handler(SIGILL, true);
4882
set_signal_handler(SIGFPE, true);
4883
#if defined(PPC64)
4884
set_signal_handler(SIGTRAP, true);
4885
#endif
4886
set_signal_handler(SIGXFSZ, true);
4887
4888
if (libjsig_is_loaded) {
4889
// Tell libjsig jvm finishes setting signal handlers
4890
(*end_signal_setting)();
4891
}
4892
4893
// We don't activate signal checker if libjsig is in place, we trust ourselves
4894
// and if UserSignalHandler is installed all bets are off.
4895
// Log that signal checking is off only if -verbose:jni is specified.
4896
if (CheckJNICalls) {
4897
if (libjsig_is_loaded) {
4898
if (PrintJNIResolving) {
4899
tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4900
}
4901
check_signals = false;
4902
}
4903
if (AllowUserSignalHandlers) {
4904
if (PrintJNIResolving) {
4905
tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4906
}
4907
check_signals = false;
4908
}
4909
}
4910
}
4911
}
4912
4913
// This is the fastest way to get thread cpu time on Linux.
4914
// Returns cpu time (user+sys) for any thread, not only for current.
4915
// POSIX compliant clocks are implemented in the kernels 2.6.16+.
4916
// It might work on 2.6.10+ with a special kernel/glibc patch.
4917
// For reference, please, see IEEE Std 1003.1-2004:
4918
// http://www.unix.org/single_unix_specification
4919
4920
jlong os::Linux::fast_thread_cpu_time(clockid_t clockid) {
4921
struct timespec tp;
4922
int rc = os::Linux::clock_gettime(clockid, &tp);
4923
assert(rc == 0, "clock_gettime is expected to return 0 code");
4924
4925
return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec;
4926
}
4927
4928
/////
4929
// glibc on Linux platform uses non-documented flag
4930
// to indicate, that some special sort of signal
4931
// trampoline is used.
4932
// We will never set this flag, and we should
4933
// ignore this flag in our diagnostic
4934
#ifdef SIGNIFICANT_SIGNAL_MASK
4935
#undef SIGNIFICANT_SIGNAL_MASK
4936
#endif
4937
#define SIGNIFICANT_SIGNAL_MASK (~0x04000000)
4938
4939
static const char* get_signal_handler_name(address handler,
4940
char* buf, int buflen) {
4941
int offset = 0;
4942
bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
4943
if (found) {
4944
// skip directory names
4945
const char *p1, *p2;
4946
p1 = buf;
4947
size_t len = strlen(os::file_separator());
4948
while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
4949
jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
4950
} else {
4951
jio_snprintf(buf, buflen, PTR_FORMAT, handler);
4952
}
4953
return buf;
4954
}
4955
4956
static void print_signal_handler(outputStream* st, int sig,
4957
char* buf, size_t buflen) {
4958
struct sigaction sa;
4959
4960
sigaction(sig, NULL, &sa);
4961
4962
// See comment for SIGNIFICANT_SIGNAL_MASK define
4963
sa.sa_flags &= SIGNIFICANT_SIGNAL_MASK;
4964
4965
st->print("%s: ", os::exception_name(sig, buf, buflen));
4966
4967
address handler = (sa.sa_flags & SA_SIGINFO)
4968
? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
4969
: CAST_FROM_FN_PTR(address, sa.sa_handler);
4970
4971
if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
4972
st->print("SIG_DFL");
4973
} else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
4974
st->print("SIG_IGN");
4975
} else {
4976
st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
4977
}
4978
4979
st->print(", sa_mask[0]=");
4980
os::Posix::print_signal_set_short(st, &sa.sa_mask);
4981
4982
address rh = VMError::get_resetted_sighandler(sig);
4983
// May be, handler was resetted by VMError?
4984
if(rh != NULL) {
4985
handler = rh;
4986
sa.sa_flags = VMError::get_resetted_sigflags(sig) & SIGNIFICANT_SIGNAL_MASK;
4987
}
4988
4989
st->print(", sa_flags=");
4990
os::Posix::print_sa_flags(st, sa.sa_flags);
4991
4992
// Check: is it our handler?
4993
if(handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler) ||
4994
handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
4995
// It is our signal handler
4996
// check for flags, reset system-used one!
4997
if((int)sa.sa_flags != os::Linux::get_our_sigflags(sig)) {
4998
st->print(
4999
", flags was changed from " PTR32_FORMAT ", consider using jsig library",
5000
os::Linux::get_our_sigflags(sig));
5001
}
5002
}
5003
st->cr();
5004
}
5005
5006
5007
#define DO_SIGNAL_CHECK(sig) \
5008
if (!sigismember(&check_signal_done, sig)) \
5009
os::Linux::check_signal_handler(sig)
5010
5011
// This method is a periodic task to check for misbehaving JNI applications
5012
// under CheckJNI, we can add any periodic checks here
5013
5014
void os::run_periodic_checks() {
5015
5016
if (check_signals == false) return;
5017
5018
// SEGV and BUS if overridden could potentially prevent
5019
// generation of hs*.log in the event of a crash, debugging
5020
// such a case can be very challenging, so we absolutely
5021
// check the following for a good measure:
5022
DO_SIGNAL_CHECK(SIGSEGV);
5023
DO_SIGNAL_CHECK(SIGILL);
5024
DO_SIGNAL_CHECK(SIGFPE);
5025
DO_SIGNAL_CHECK(SIGBUS);
5026
DO_SIGNAL_CHECK(SIGPIPE);
5027
DO_SIGNAL_CHECK(SIGXFSZ);
5028
#if defined(PPC64)
5029
DO_SIGNAL_CHECK(SIGTRAP);
5030
#endif
5031
5032
// ReduceSignalUsage allows the user to override these handlers
5033
// see comments at the very top and jvm_solaris.h
5034
if (!ReduceSignalUsage) {
5035
DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
5036
DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
5037
DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
5038
DO_SIGNAL_CHECK(BREAK_SIGNAL);
5039
}
5040
5041
DO_SIGNAL_CHECK(SR_signum);
5042
DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
5043
}
5044
5045
typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
5046
5047
static os_sigaction_t os_sigaction = NULL;
5048
5049
void os::Linux::check_signal_handler(int sig) {
5050
char buf[O_BUFLEN];
5051
address jvmHandler = NULL;
5052
5053
5054
struct sigaction act;
5055
if (os_sigaction == NULL) {
5056
// only trust the default sigaction, in case it has been interposed
5057
os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
5058
if (os_sigaction == NULL) return;
5059
}
5060
5061
os_sigaction(sig, (struct sigaction*)NULL, &act);
5062
5063
5064
act.sa_flags &= SIGNIFICANT_SIGNAL_MASK;
5065
5066
address thisHandler = (act.sa_flags & SA_SIGINFO)
5067
? CAST_FROM_FN_PTR(address, act.sa_sigaction)
5068
: CAST_FROM_FN_PTR(address, act.sa_handler) ;
5069
5070
5071
switch(sig) {
5072
case SIGSEGV:
5073
case SIGBUS:
5074
case SIGFPE:
5075
case SIGPIPE:
5076
case SIGILL:
5077
case SIGXFSZ:
5078
jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler);
5079
break;
5080
5081
case SHUTDOWN1_SIGNAL:
5082
case SHUTDOWN2_SIGNAL:
5083
case SHUTDOWN3_SIGNAL:
5084
case BREAK_SIGNAL:
5085
jvmHandler = (address)user_handler();
5086
break;
5087
5088
case INTERRUPT_SIGNAL:
5089
jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
5090
break;
5091
5092
default:
5093
if (sig == SR_signum) {
5094
jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
5095
} else {
5096
return;
5097
}
5098
break;
5099
}
5100
5101
if (thisHandler != jvmHandler) {
5102
tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
5103
tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
5104
tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
5105
// No need to check this sig any longer
5106
sigaddset(&check_signal_done, sig);
5107
// Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
5108
if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
5109
tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
5110
exception_name(sig, buf, O_BUFLEN));
5111
}
5112
} else if(os::Linux::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Linux::get_our_sigflags(sig)) {
5113
tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
5114
tty->print("expected:" PTR32_FORMAT, os::Linux::get_our_sigflags(sig));
5115
tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags);
5116
// No need to check this sig any longer
5117
sigaddset(&check_signal_done, sig);
5118
}
5119
5120
// Dump all the signal
5121
if (sigismember(&check_signal_done, sig)) {
5122
print_signal_handlers(tty, buf, O_BUFLEN);
5123
}
5124
}
5125
5126
extern void report_error(char* file_name, int line_no, char* title, char* format, ...);
5127
5128
extern bool signal_name(int signo, char* buf, size_t len);
5129
5130
const char* os::exception_name(int exception_code, char* buf, size_t size) {
5131
if (0 < exception_code && exception_code <= SIGRTMAX) {
5132
// signal
5133
if (!signal_name(exception_code, buf, size)) {
5134
jio_snprintf(buf, size, "SIG%d", exception_code);
5135
}
5136
return buf;
5137
} else {
5138
return NULL;
5139
}
5140
}
5141
5142
// this is called _before_ most of the global arguments have been parsed
5143
void os::init(void) {
5144
char dummy; /* used to get a guess on initial stack address */
5145
5146
// With LinuxThreads the JavaMain thread pid (primordial thread)
5147
// is different than the pid of the java launcher thread.
5148
// So, on Linux, the launcher thread pid is passed to the VM
5149
// via the sun.java.launcher.pid property.
5150
// Use this property instead of getpid() if it was correctly passed.
5151
// See bug 6351349.
5152
pid_t java_launcher_pid = (pid_t) Arguments::sun_java_launcher_pid();
5153
5154
_initial_pid = (java_launcher_pid > 0) ? java_launcher_pid : getpid();
5155
5156
clock_tics_per_sec = sysconf(_SC_CLK_TCK);
5157
5158
init_random(1234567);
5159
5160
ThreadCritical::initialize();
5161
5162
Linux::set_page_size(sysconf(_SC_PAGESIZE));
5163
if (Linux::page_size() == -1) {
5164
fatal(err_msg("os_linux.cpp: os::init: sysconf failed (%s)",
5165
strerror(errno)));
5166
}
5167
init_page_sizes((size_t) Linux::page_size());
5168
5169
Linux::initialize_system_info();
5170
5171
// _main_thread points to the thread that created/loaded the JVM.
5172
Linux::_main_thread = pthread_self();
5173
5174
Linux::clock_init();
5175
initial_time_count = javaTimeNanos();
5176
5177
// pthread_condattr initialization for monotonic clock
5178
int status;
5179
pthread_condattr_t* _condattr = os::Linux::condAttr();
5180
if ((status = pthread_condattr_init(_condattr)) != 0) {
5181
fatal(err_msg("pthread_condattr_init: %s", strerror(status)));
5182
}
5183
// Only set the clock if CLOCK_MONOTONIC is available
5184
if (Linux::supports_monotonic_clock()) {
5185
if ((status = pthread_condattr_setclock(_condattr, CLOCK_MONOTONIC)) != 0) {
5186
if (status == EINVAL) {
5187
warning("Unable to use monotonic clock with relative timed-waits" \
5188
" - changes to the time-of-day clock may have adverse affects");
5189
} else {
5190
fatal(err_msg("pthread_condattr_setclock: %s", strerror(status)));
5191
}
5192
}
5193
}
5194
// else it defaults to CLOCK_REALTIME
5195
5196
pthread_mutex_init(&dl_mutex, NULL);
5197
5198
// If the pagesize of the VM is greater than 8K determine the appropriate
5199
// number of initial guard pages. The user can change this with the
5200
// command line arguments, if needed.
5201
if (vm_page_size() > (int)Linux::vm_default_page_size()) {
5202
StackYellowPages = 1;
5203
StackRedPages = 1;
5204
StackShadowPages = round_to((StackShadowPages*Linux::vm_default_page_size()), vm_page_size()) / vm_page_size();
5205
}
5206
5207
// retrieve entry point for pthread_setname_np
5208
Linux::_pthread_setname_np =
5209
(int(*)(pthread_t, const char*))dlsym(RTLD_DEFAULT, "pthread_setname_np");
5210
5211
}
5212
5213
// To install functions for atexit system call
5214
extern "C" {
5215
static void perfMemory_exit_helper() {
5216
perfMemory_exit();
5217
}
5218
}
5219
5220
void os::pd_init_container_support() {
5221
OSContainer::init();
5222
}
5223
5224
// this is called _after_ the global arguments have been parsed
5225
jint os::init_2(void)
5226
{
5227
Linux::fast_thread_clock_init();
5228
5229
// Allocate a single page and mark it as readable for safepoint polling
5230
address polling_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
5231
guarantee( polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page" );
5232
5233
os::set_polling_page( polling_page );
5234
5235
#ifndef PRODUCT
5236
if(Verbose && PrintMiscellaneous)
5237
tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
5238
#endif
5239
5240
if (!UseMembar) {
5241
address mem_serialize_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
5242
guarantee( mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
5243
os::set_memory_serialize_page( mem_serialize_page );
5244
5245
#ifndef PRODUCT
5246
if(Verbose && PrintMiscellaneous)
5247
tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
5248
#endif
5249
}
5250
5251
// initialize suspend/resume support - must do this before signal_sets_init()
5252
if (SR_initialize() != 0) {
5253
perror("SR_initialize failed");
5254
return JNI_ERR;
5255
}
5256
5257
Linux::signal_sets_init();
5258
Linux::install_signal_handlers();
5259
5260
// Check minimum allowable stack size for thread creation and to initialize
5261
// the java system classes, including StackOverflowError - depends on page
5262
// size. Add a page for compiler2 recursion in main thread.
5263
// Add in 2*BytesPerWord times page size to account for VM stack during
5264
// class initialization depending on 32 or 64 bit VM.
5265
os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed,
5266
(size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Linux::page_size() +
5267
(2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::vm_default_page_size());
5268
5269
size_t threadStackSizeInBytes = ThreadStackSize * K;
5270
if (threadStackSizeInBytes != 0 &&
5271
threadStackSizeInBytes < os::Linux::min_stack_allowed) {
5272
tty->print_cr("\nThe stack size specified is too small, "
5273
"Specify at least %dk",
5274
os::Linux::min_stack_allowed/ K);
5275
return JNI_ERR;
5276
}
5277
5278
// Make the stack size a multiple of the page size so that
5279
// the yellow/red zones can be guarded.
5280
JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
5281
vm_page_size()));
5282
5283
Linux::capture_initial_stack(JavaThread::stack_size_at_create());
5284
5285
#if defined(IA32) && !defined(ZERO)
5286
workaround_expand_exec_shield_cs_limit();
5287
#endif
5288
5289
Linux::libpthread_init();
5290
if (PrintMiscellaneous && (Verbose || WizardMode)) {
5291
tty->print_cr("[HotSpot is running with %s, %s(%s)]\n",
5292
Linux::glibc_version(), Linux::libpthread_version(),
5293
Linux::is_floating_stack() ? "floating stack" : "fixed stack");
5294
}
5295
5296
if (UseNUMA) {
5297
if (!Linux::libnuma_init()) {
5298
UseNUMA = false;
5299
} else {
5300
if ((Linux::numa_max_node() < 1)) {
5301
// There's only one node(they start from 0), disable NUMA.
5302
UseNUMA = false;
5303
}
5304
}
5305
// With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
5306
// we can make the adaptive lgrp chunk resizing work. If the user specified
5307
// both UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn and
5308
// disable adaptive resizing.
5309
if (UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
5310
if (FLAG_IS_DEFAULT(UseNUMA)) {
5311
UseNUMA = false;
5312
} else {
5313
if (FLAG_IS_DEFAULT(UseLargePages) &&
5314
FLAG_IS_DEFAULT(UseSHM) &&
5315
FLAG_IS_DEFAULT(UseHugeTLBFS)) {
5316
UseLargePages = false;
5317
} else {
5318
warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, disabling adaptive resizing");
5319
UseAdaptiveSizePolicy = false;
5320
UseAdaptiveNUMAChunkSizing = false;
5321
}
5322
}
5323
}
5324
if (!UseNUMA && ForceNUMA) {
5325
UseNUMA = true;
5326
}
5327
}
5328
5329
if (MaxFDLimit) {
5330
// set the number of file descriptors to max. print out error
5331
// if getrlimit/setrlimit fails but continue regardless.
5332
struct rlimit nbr_files;
5333
int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
5334
if (status != 0) {
5335
if (PrintMiscellaneous && (Verbose || WizardMode))
5336
perror("os::init_2 getrlimit failed");
5337
} else {
5338
nbr_files.rlim_cur = nbr_files.rlim_max;
5339
status = setrlimit(RLIMIT_NOFILE, &nbr_files);
5340
if (status != 0) {
5341
if (PrintMiscellaneous && (Verbose || WizardMode))
5342
perror("os::init_2 setrlimit failed");
5343
}
5344
}
5345
}
5346
5347
// Initialize lock used to serialize thread creation (see os::create_thread)
5348
Linux::set_createThread_lock(new Mutex(Mutex::leaf, "createThread_lock", false));
5349
5350
// at-exit methods are called in the reverse order of their registration.
5351
// atexit functions are called on return from main or as a result of a
5352
// call to exit(3C). There can be only 32 of these functions registered
5353
// and atexit() does not set errno.
5354
5355
if (PerfAllowAtExitRegistration) {
5356
// only register atexit functions if PerfAllowAtExitRegistration is set.
5357
// atexit functions can be delayed until process exit time, which
5358
// can be problematic for embedded VM situations. Embedded VMs should
5359
// call DestroyJavaVM() to assure that VM resources are released.
5360
5361
// note: perfMemory_exit_helper atexit function may be removed in
5362
// the future if the appropriate cleanup code can be added to the
5363
// VM_Exit VMOperation's doit method.
5364
if (atexit(perfMemory_exit_helper) != 0) {
5365
warning("os::init_2 atexit(perfMemory_exit_helper) failed");
5366
}
5367
}
5368
5369
// initialize thread priority policy
5370
prio_init();
5371
5372
return JNI_OK;
5373
}
5374
5375
// Mark the polling page as unreadable
5376
void os::make_polling_page_unreadable(void) {
5377
if( !guard_memory((char*)_polling_page, Linux::page_size()) )
5378
fatal("Could not disable polling page");
5379
};
5380
5381
// Mark the polling page as readable
5382
void os::make_polling_page_readable(void) {
5383
if( !linux_mprotect((char *)_polling_page, Linux::page_size(), PROT_READ)) {
5384
fatal("Could not enable polling page");
5385
}
5386
};
5387
5388
static int os_cpu_count(const cpu_set_t* cpus) {
5389
int count = 0;
5390
// only look up to the number of configured processors
5391
for (int i = 0; i < os::processor_count(); i++) {
5392
if (CPU_ISSET(i, cpus)) {
5393
count++;
5394
}
5395
}
5396
return count;
5397
}
5398
5399
// Get the current number of available processors for this process.
5400
// This value can change at any time during a process's lifetime.
5401
// sched_getaffinity gives an accurate answer as it accounts for cpusets.
5402
// If anything goes wrong we fallback to returning the number of online
5403
// processors - which can be greater than the number available to the process.
5404
int os::Linux::active_processor_count() {
5405
cpu_set_t cpus; // can represent at most 1024 (CPU_SETSIZE) processors
5406
int cpus_size = sizeof(cpu_set_t);
5407
int cpu_count = 0;
5408
5409
// pid 0 means the current thread - which we have to assume represents the process
5410
if (sched_getaffinity(0, cpus_size, &cpus) == 0) {
5411
cpu_count = os_cpu_count(&cpus);
5412
if (PrintActiveCpus) {
5413
tty->print_cr("active_processor_count: sched_getaffinity processor count: %d", cpu_count);
5414
}
5415
}
5416
else {
5417
cpu_count = ::sysconf(_SC_NPROCESSORS_ONLN);
5418
warning("sched_getaffinity failed (%s)- using online processor count (%d) "
5419
"which may exceed available processors", strerror(errno), cpu_count);
5420
}
5421
5422
assert(cpu_count > 0 && cpu_count <= os::processor_count(), "sanity check");
5423
return cpu_count;
5424
}
5425
5426
// Determine the active processor count from one of
5427
// three different sources:
5428
//
5429
// 1. User option -XX:ActiveProcessorCount
5430
// 2. kernel os calls (sched_getaffinity or sysconf(_SC_NPROCESSORS_ONLN)
5431
// 3. extracted from cgroup cpu subsystem (shares and quotas)
5432
//
5433
// Option 1, if specified, will always override.
5434
// If the cgroup subsystem is active and configured, we
5435
// will return the min of the cgroup and option 2 results.
5436
// This is required since tools, such as numactl, that
5437
// alter cpu affinity do not update cgroup subsystem
5438
// cpuset configuration files.
5439
int os::active_processor_count() {
5440
// User has overridden the number of active processors
5441
if (ActiveProcessorCount > 0) {
5442
if (PrintActiveCpus) {
5443
tty->print_cr("active_processor_count: "
5444
"active processor count set by user : %d",
5445
ActiveProcessorCount);
5446
}
5447
return ActiveProcessorCount;
5448
}
5449
5450
int active_cpus;
5451
if (OSContainer::is_containerized()) {
5452
active_cpus = OSContainer::active_processor_count();
5453
if (PrintActiveCpus) {
5454
tty->print_cr("active_processor_count: determined by OSContainer: %d",
5455
active_cpus);
5456
}
5457
} else {
5458
active_cpus = os::Linux::active_processor_count();
5459
}
5460
5461
return active_cpus;
5462
}
5463
5464
void os::set_native_thread_name(const char *name) {
5465
if (Linux::_pthread_setname_np) {
5466
char buf [16]; // according to glibc manpage, 16 chars incl. '/0'
5467
snprintf(buf, sizeof(buf), "%s", name);
5468
buf[sizeof(buf) - 1] = '\0';
5469
const int rc = Linux::_pthread_setname_np(pthread_self(), buf);
5470
// ERANGE should not happen; all other errors should just be ignored.
5471
assert(rc != ERANGE, "pthread_setname_np failed");
5472
}
5473
}
5474
5475
bool os::distribute_processes(uint length, uint* distribution) {
5476
// Not yet implemented.
5477
return false;
5478
}
5479
5480
bool os::bind_to_processor(uint processor_id) {
5481
// Not yet implemented.
5482
return false;
5483
}
5484
5485
///
5486
5487
void os::SuspendedThreadTask::internal_do_task() {
5488
if (do_suspend(_thread->osthread())) {
5489
SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
5490
do_task(context);
5491
do_resume(_thread->osthread());
5492
}
5493
}
5494
5495
class PcFetcher : public os::SuspendedThreadTask {
5496
public:
5497
PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
5498
ExtendedPC result();
5499
protected:
5500
void do_task(const os::SuspendedThreadTaskContext& context);
5501
private:
5502
ExtendedPC _epc;
5503
};
5504
5505
ExtendedPC PcFetcher::result() {
5506
guarantee(is_done(), "task is not done yet.");
5507
return _epc;
5508
}
5509
5510
void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
5511
Thread* thread = context.thread();
5512
OSThread* osthread = thread->osthread();
5513
if (osthread->ucontext() != NULL) {
5514
_epc = os::Linux::ucontext_get_pc((ucontext_t *) context.ucontext());
5515
} else {
5516
// NULL context is unexpected, double-check this is the VMThread
5517
guarantee(thread->is_VM_thread(), "can only be called for VMThread");
5518
}
5519
}
5520
5521
// Suspends the target using the signal mechanism and then grabs the PC before
5522
// resuming the target. Used by the flat-profiler only
5523
ExtendedPC os::get_thread_pc(Thread* thread) {
5524
// Make sure that it is called by the watcher for the VMThread
5525
assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
5526
assert(thread->is_VM_thread(), "Can only be called for VMThread");
5527
5528
PcFetcher fetcher(thread);
5529
fetcher.run();
5530
return fetcher.result();
5531
}
5532
5533
int os::Linux::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime)
5534
{
5535
if (is_NPTL()) {
5536
return pthread_cond_timedwait(_cond, _mutex, _abstime);
5537
} else {
5538
// 6292965: LinuxThreads pthread_cond_timedwait() resets FPU control
5539
// word back to default 64bit precision if condvar is signaled. Java
5540
// wants 53bit precision. Save and restore current value.
5541
int fpu = get_fpu_control_word();
5542
int status = pthread_cond_timedwait(_cond, _mutex, _abstime);
5543
set_fpu_control_word(fpu);
5544
return status;
5545
}
5546
}
5547
5548
////////////////////////////////////////////////////////////////////////////////
5549
// debug support
5550
5551
bool os::find(address addr, outputStream* st) {
5552
Dl_info dlinfo;
5553
memset(&dlinfo, 0, sizeof(dlinfo));
5554
if (dladdr(addr, &dlinfo) != 0) {
5555
st->print(PTR_FORMAT ": ", addr);
5556
if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5557
st->print("%s+%#x", dlinfo.dli_sname,
5558
addr - (intptr_t)dlinfo.dli_saddr);
5559
} else if (dlinfo.dli_fbase != NULL) {
5560
st->print("<offset %#x>", addr - (intptr_t)dlinfo.dli_fbase);
5561
} else {
5562
st->print("<absolute address>");
5563
}
5564
if (dlinfo.dli_fname != NULL) {
5565
st->print(" in %s", dlinfo.dli_fname);
5566
}
5567
if (dlinfo.dli_fbase != NULL) {
5568
st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5569
}
5570
st->cr();
5571
5572
if (Verbose) {
5573
// decode some bytes around the PC
5574
address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5575
address end = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5576
address lowest = (address) dlinfo.dli_sname;
5577
if (!lowest) lowest = (address) dlinfo.dli_fbase;
5578
if (begin < lowest) begin = lowest;
5579
Dl_info dlinfo2;
5580
if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5581
&& end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
5582
end = (address) dlinfo2.dli_saddr;
5583
Disassembler::decode(begin, end, st);
5584
}
5585
return true;
5586
}
5587
return false;
5588
}
5589
5590
////////////////////////////////////////////////////////////////////////////////
5591
// misc
5592
5593
// This does not do anything on Linux. This is basically a hook for being
5594
// able to use structured exception handling (thread-local exception filters)
5595
// on, e.g., Win32.
5596
void
5597
os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
5598
JavaCallArguments* args, Thread* thread) {
5599
f(value, method, args, thread);
5600
}
5601
5602
void os::print_statistics() {
5603
}
5604
5605
int os::message_box(const char* title, const char* message) {
5606
int i;
5607
fdStream err(defaultStream::error_fd());
5608
for (i = 0; i < 78; i++) err.print_raw("=");
5609
err.cr();
5610
err.print_raw_cr(title);
5611
for (i = 0; i < 78; i++) err.print_raw("-");
5612
err.cr();
5613
err.print_raw_cr(message);
5614
for (i = 0; i < 78; i++) err.print_raw("=");
5615
err.cr();
5616
5617
char buf[16];
5618
// Prevent process from exiting upon "read error" without consuming all CPU
5619
while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
5620
5621
return buf[0] == 'y' || buf[0] == 'Y';
5622
}
5623
5624
int os::stat(const char *path, struct stat *sbuf) {
5625
char pathbuf[MAX_PATH];
5626
if (strlen(path) > MAX_PATH - 1) {
5627
errno = ENAMETOOLONG;
5628
return -1;
5629
}
5630
os::native_path(strcpy(pathbuf, path));
5631
return ::stat(pathbuf, sbuf);
5632
}
5633
5634
bool os::check_heap(bool force) {
5635
return true;
5636
}
5637
5638
int local_vsnprintf(char* buf, size_t count, const char* format, va_list args) {
5639
return ::vsnprintf(buf, count, format, args);
5640
}
5641
5642
// Is a (classpath) directory empty?
5643
bool os::dir_is_empty(const char* path) {
5644
DIR *dir = NULL;
5645
struct dirent *ptr;
5646
5647
dir = opendir(path);
5648
if (dir == NULL) return true;
5649
5650
/* Scan the directory */
5651
bool result = true;
5652
while (result && (ptr = readdir(dir)) != NULL) {
5653
if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
5654
result = false;
5655
}
5656
}
5657
closedir(dir);
5658
return result;
5659
}
5660
5661
// This code originates from JDK's sysOpen and open64_w
5662
// from src/solaris/hpi/src/system_md.c
5663
5664
#ifndef O_DELETE
5665
#define O_DELETE 0x10000
5666
#endif
5667
5668
#ifdef __ANDROID__
5669
int open64(const char* pathName, int flags, int mode) {
5670
return ::open(pathName, flags, mode);
5671
}
5672
#endif //__ANDROID__
5673
5674
// Open a file. Unlink the file immediately after open returns
5675
// if the specified oflag has the O_DELETE flag set.
5676
// O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
5677
5678
int os::open(const char *path, int oflag, int mode) {
5679
5680
if (strlen(path) > MAX_PATH - 1) {
5681
errno = ENAMETOOLONG;
5682
return -1;
5683
}
5684
int fd;
5685
int o_delete = (oflag & O_DELETE);
5686
oflag = oflag & ~O_DELETE;
5687
5688
fd = ::open64(path, oflag, mode);
5689
if (fd == -1) return -1;
5690
5691
//If the open succeeded, the file might still be a directory
5692
{
5693
struct stat64 buf64;
5694
int ret = ::fstat64(fd, &buf64);
5695
int st_mode = buf64.st_mode;
5696
5697
if (ret != -1) {
5698
if ((st_mode & S_IFMT) == S_IFDIR) {
5699
errno = EISDIR;
5700
::close(fd);
5701
return -1;
5702
}
5703
} else {
5704
::close(fd);
5705
return -1;
5706
}
5707
}
5708
5709
/*
5710
* All file descriptors that are opened in the JVM and not
5711
* specifically destined for a subprocess should have the
5712
* close-on-exec flag set. If we don't set it, then careless 3rd
5713
* party native code might fork and exec without closing all
5714
* appropriate file descriptors (e.g. as we do in closeDescriptors in
5715
* UNIXProcess.c), and this in turn might:
5716
*
5717
* - cause end-of-file to fail to be detected on some file
5718
* descriptors, resulting in mysterious hangs, or
5719
*
5720
* - might cause an fopen in the subprocess to fail on a system
5721
* suffering from bug 1085341.
5722
*
5723
* (Yes, the default setting of the close-on-exec flag is a Unix
5724
* design flaw)
5725
*
5726
* See:
5727
* 1085341: 32-bit stdio routines should support file descriptors >255
5728
* 4843136: (process) pipe file descriptor from Runtime.exec not being closed
5729
* 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
5730
*/
5731
#ifdef FD_CLOEXEC
5732
{
5733
int flags = ::fcntl(fd, F_GETFD);
5734
if (flags != -1)
5735
::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
5736
}
5737
#endif
5738
5739
if (o_delete != 0) {
5740
::unlink(path);
5741
}
5742
return fd;
5743
}
5744
5745
#ifdef __ANDROID__
5746
#define S_IREAD S_IRUSR
5747
#define S_IWRITE S_IWUSR
5748
#endif
5749
// create binary file, rewriting existing file if required
5750
int os::create_binary_file(const char* path, bool rewrite_existing) {
5751
int oflags = O_WRONLY | O_CREAT;
5752
if (!rewrite_existing) {
5753
oflags |= O_EXCL;
5754
}
5755
return ::open64(path, oflags, S_IREAD | S_IWRITE);
5756
}
5757
5758
// return current position of file pointer
5759
jlong os::current_file_offset(int fd) {
5760
return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
5761
}
5762
5763
// move file pointer to the specified offset
5764
jlong os::seek_to_file_offset(int fd, jlong offset) {
5765
return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
5766
}
5767
5768
// This code originates from JDK's sysAvailable
5769
// from src/solaris/hpi/src/native_threads/src/sys_api_td.c
5770
5771
int os::available(int fd, jlong *bytes) {
5772
jlong cur, end;
5773
int mode;
5774
struct stat64 buf64;
5775
5776
if (::fstat64(fd, &buf64) >= 0) {
5777
mode = buf64.st_mode;
5778
if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
5779
/*
5780
* XXX: is the following call interruptible? If so, this might
5781
* need to go through the INTERRUPT_IO() wrapper as for other
5782
* blocking, interruptible calls in this file.
5783
*/
5784
int n;
5785
if (::ioctl(fd, FIONREAD, &n) >= 0) {
5786
*bytes = n;
5787
return 1;
5788
}
5789
}
5790
}
5791
if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
5792
return 0;
5793
} else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
5794
return 0;
5795
} else if (::lseek64(fd, cur, SEEK_SET) == -1) {
5796
return 0;
5797
}
5798
*bytes = end - cur;
5799
return 1;
5800
}
5801
5802
int os::socket_available(int fd, jint *pbytes) {
5803
// Linux doc says EINTR not returned, unlike Solaris
5804
int ret = ::ioctl(fd, FIONREAD, pbytes);
5805
5806
//%% note ioctl can return 0 when successful, JVM_SocketAvailable
5807
// is expected to return 0 on failure and 1 on success to the jdk.
5808
return (ret < 0) ? 0 : 1;
5809
}
5810
5811
// Map a block of memory.
5812
char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
5813
char *addr, size_t bytes, bool read_only,
5814
bool allow_exec) {
5815
int prot;
5816
int flags = MAP_PRIVATE;
5817
5818
if (read_only) {
5819
prot = PROT_READ;
5820
} else {
5821
prot = PROT_READ | PROT_WRITE;
5822
}
5823
5824
if (allow_exec) {
5825
prot |= PROT_EXEC;
5826
}
5827
5828
if (addr != NULL) {
5829
flags |= MAP_FIXED;
5830
}
5831
5832
char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5833
fd, file_offset);
5834
if (mapped_address == MAP_FAILED) {
5835
return NULL;
5836
}
5837
return mapped_address;
5838
}
5839
5840
5841
// Remap a block of memory.
5842
char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5843
char *addr, size_t bytes, bool read_only,
5844
bool allow_exec) {
5845
// same as map_memory() on this OS
5846
return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5847
allow_exec);
5848
}
5849
5850
5851
// Unmap a block of memory.
5852
bool os::pd_unmap_memory(char* addr, size_t bytes) {
5853
return munmap(addr, bytes) == 0;
5854
}
5855
5856
static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time);
5857
5858
static clockid_t thread_cpu_clockid(Thread* thread) {
5859
pthread_t tid = thread->osthread()->pthread_id();
5860
clockid_t clockid;
5861
5862
// Get thread clockid
5863
int rc = os::Linux::pthread_getcpuclockid(tid, &clockid);
5864
assert(rc == 0, "pthread_getcpuclockid is expected to return 0 code");
5865
return clockid;
5866
}
5867
5868
// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5869
// are used by JVM M&M and JVMTI to get user+sys or user CPU time
5870
// of a thread.
5871
//
5872
// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
5873
// the fast estimate available on the platform.
5874
5875
jlong os::current_thread_cpu_time() {
5876
if (os::Linux::supports_fast_thread_cpu_time()) {
5877
return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
5878
} else {
5879
// return user + sys since the cost is the same
5880
return slow_thread_cpu_time(Thread::current(), true /* user + sys */);
5881
}
5882
}
5883
5884
jlong os::thread_cpu_time(Thread* thread) {
5885
// consistent with what current_thread_cpu_time() returns
5886
if (os::Linux::supports_fast_thread_cpu_time()) {
5887
return os::Linux::fast_thread_cpu_time(thread_cpu_clockid(thread));
5888
} else {
5889
return slow_thread_cpu_time(thread, true /* user + sys */);
5890
}
5891
}
5892
5893
jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5894
if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) {
5895
return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
5896
} else {
5897
return slow_thread_cpu_time(Thread::current(), user_sys_cpu_time);
5898
}
5899
}
5900
5901
jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5902
if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) {
5903
return os::Linux::fast_thread_cpu_time(thread_cpu_clockid(thread));
5904
} else {
5905
return slow_thread_cpu_time(thread, user_sys_cpu_time);
5906
}
5907
}
5908
5909
//
5910
// -1 on error.
5911
//
5912
5913
PRAGMA_DIAG_PUSH
5914
PRAGMA_FORMAT_NONLITERAL_IGNORED
5915
static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5916
pid_t tid = thread->osthread()->thread_id();
5917
char *s;
5918
char stat[2048];
5919
int statlen;
5920
char proc_name[64];
5921
int count;
5922
long sys_time, user_time;
5923
char cdummy;
5924
int idummy;
5925
long ldummy;
5926
FILE *fp;
5927
5928
snprintf(proc_name, 64, "/proc/self/task/%d/stat", tid);
5929
fp = fopen(proc_name, "r");
5930
if ( fp == NULL ) return -1;
5931
statlen = fread(stat, 1, 2047, fp);
5932
stat[statlen] = '\0';
5933
fclose(fp);
5934
5935
// Skip pid and the command string. Note that we could be dealing with
5936
// weird command names, e.g. user could decide to rename java launcher
5937
// to "java 1.4.2 :)", then the stat file would look like
5938
// 1234 (java 1.4.2 :)) R ... ...
5939
// We don't really need to know the command string, just find the last
5940
// occurrence of ")" and then start parsing from there. See bug 4726580.
5941
s = strrchr(stat, ')');
5942
if (s == NULL ) return -1;
5943
5944
// Skip blank chars
5945
do s++; while (isspace(*s));
5946
5947
count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
5948
&cdummy, &idummy, &idummy, &idummy, &idummy, &idummy,
5949
&ldummy, &ldummy, &ldummy, &ldummy, &ldummy,
5950
&user_time, &sys_time);
5951
if ( count != 13 ) return -1;
5952
if (user_sys_cpu_time) {
5953
return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec);
5954
} else {
5955
return (jlong)user_time * (1000000000 / clock_tics_per_sec);
5956
}
5957
}
5958
PRAGMA_DIAG_POP
5959
5960
void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5961
info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
5962
info_ptr->may_skip_backward = false; // elapsed time not wall time
5963
info_ptr->may_skip_forward = false; // elapsed time not wall time
5964
info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
5965
}
5966
5967
void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5968
info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
5969
info_ptr->may_skip_backward = false; // elapsed time not wall time
5970
info_ptr->may_skip_forward = false; // elapsed time not wall time
5971
info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
5972
}
5973
5974
bool os::is_thread_cpu_time_supported() {
5975
return true;
5976
}
5977
5978
// System loadavg support. Returns -1 if load average cannot be obtained.
5979
// Linux doesn't yet have a (official) notion of processor sets,
5980
// so just return the system wide load average.
5981
int os::loadavg(double loadavg[], int nelem) {
5982
#ifdef __ANDROID__
5983
return -1;
5984
#else
5985
return ::getloadavg(loadavg, nelem);
5986
#endif // !__ANDROID__
5987
}
5988
5989
void os::pause() {
5990
char filename[MAX_PATH];
5991
if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5992
jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5993
} else {
5994
jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5995
}
5996
5997
int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5998
if (fd != -1) {
5999
struct stat buf;
6000
::close(fd);
6001
while (::stat(filename, &buf) == 0) {
6002
(void)::poll(NULL, 0, 100);
6003
}
6004
} else {
6005
jio_fprintf(stderr,
6006
"Could not open pause file '%s', continuing immediately.\n", filename);
6007
}
6008
}
6009
6010
6011
// Refer to the comments in os_solaris.cpp park-unpark.
6012
//
6013
// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
6014
// hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
6015
// For specifics regarding the bug see GLIBC BUGID 261237 :
6016
// http://www.mail-archive.com/[email protected]/msg10837.html.
6017
// Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
6018
// will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
6019
// is used. (The simple C test-case provided in the GLIBC bug report manifests the
6020
// hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
6021
// and monitorenter when we're using 1-0 locking. All those operations may result in
6022
// calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
6023
// of libpthread avoids the problem, but isn't practical.
6024
//
6025
// Possible remedies:
6026
//
6027
// 1. Establish a minimum relative wait time. 50 to 100 msecs seems to work.
6028
// This is palliative and probabilistic, however. If the thread is preempted
6029
// between the call to compute_abstime() and pthread_cond_timedwait(), more
6030
// than the minimum period may have passed, and the abstime may be stale (in the
6031
// past) resultin in a hang. Using this technique reduces the odds of a hang
6032
// but the JVM is still vulnerable, particularly on heavily loaded systems.
6033
//
6034
// 2. Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
6035
// of the usual flag-condvar-mutex idiom. The write side of the pipe is set
6036
// NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
6037
// reduces to poll()+read(). This works well, but consumes 2 FDs per extant
6038
// thread.
6039
//
6040
// 3. Embargo pthread_cond_timedwait() and implement a native "chron" thread
6041
// that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
6042
// a timeout request to the chron thread and then blocking via pthread_cond_wait().
6043
// This also works well. In fact it avoids kernel-level scalability impediments
6044
// on certain platforms that don't handle lots of active pthread_cond_timedwait()
6045
// timers in a graceful fashion.
6046
//
6047
// 4. When the abstime value is in the past it appears that control returns
6048
// correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
6049
// Subsequent timedwait/wait calls may hang indefinitely. Given that, we
6050
// can avoid the problem by reinitializing the condvar -- by cond_destroy()
6051
// followed by cond_init() -- after all calls to pthread_cond_timedwait().
6052
// It may be possible to avoid reinitialization by checking the return
6053
// value from pthread_cond_timedwait(). In addition to reinitializing the
6054
// condvar we must establish the invariant that cond_signal() is only called
6055
// within critical sections protected by the adjunct mutex. This prevents
6056
// cond_signal() from "seeing" a condvar that's in the midst of being
6057
// reinitialized or that is corrupt. Sadly, this invariant obviates the
6058
// desirable signal-after-unlock optimization that avoids futile context switching.
6059
//
6060
// I'm also concerned that some versions of NTPL might allocate an auxilliary
6061
// structure when a condvar is used or initialized. cond_destroy() would
6062
// release the helper structure. Our reinitialize-after-timedwait fix
6063
// put excessive stress on malloc/free and locks protecting the c-heap.
6064
//
6065
// We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
6066
// It may be possible to refine (4) by checking the kernel and NTPL verisons
6067
// and only enabling the work-around for vulnerable environments.
6068
6069
// utility to compute the abstime argument to timedwait:
6070
// millis is the relative timeout time
6071
// abstime will be the absolute timeout time
6072
// TODO: replace compute_abstime() with unpackTime()
6073
6074
static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
6075
if (millis < 0) millis = 0;
6076
6077
jlong seconds = millis / 1000;
6078
millis %= 1000;
6079
if (seconds > 50000000) { // see man cond_timedwait(3T)
6080
seconds = 50000000;
6081
}
6082
6083
if (os::Linux::supports_monotonic_clock()) {
6084
struct timespec now;
6085
int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
6086
assert_status(status == 0, status, "clock_gettime");
6087
abstime->tv_sec = now.tv_sec + seconds;
6088
long nanos = now.tv_nsec + millis * NANOSECS_PER_MILLISEC;
6089
if (nanos >= NANOSECS_PER_SEC) {
6090
abstime->tv_sec += 1;
6091
nanos -= NANOSECS_PER_SEC;
6092
}
6093
abstime->tv_nsec = nanos;
6094
} else {
6095
struct timeval now;
6096
int status = gettimeofday(&now, NULL);
6097
assert(status == 0, "gettimeofday");
6098
abstime->tv_sec = now.tv_sec + seconds;
6099
long usec = now.tv_usec + millis * 1000;
6100
if (usec >= 1000000) {
6101
abstime->tv_sec += 1;
6102
usec -= 1000000;
6103
}
6104
abstime->tv_nsec = usec * 1000;
6105
}
6106
return abstime;
6107
}
6108
6109
6110
// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
6111
// Conceptually TryPark() should be equivalent to park(0).
6112
6113
int os::PlatformEvent::TryPark() {
6114
for (;;) {
6115
const int v = _Event ;
6116
guarantee ((v == 0) || (v == 1), "invariant") ;
6117
if (Atomic::cmpxchg (0, &_Event, v) == v) return v ;
6118
}
6119
}
6120
6121
void os::PlatformEvent::park() { // AKA "down()"
6122
// Invariant: Only the thread associated with the Event/PlatformEvent
6123
// may call park().
6124
// TODO: assert that _Assoc != NULL or _Assoc == Self
6125
int v ;
6126
for (;;) {
6127
v = _Event ;
6128
if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
6129
}
6130
guarantee (v >= 0, "invariant") ;
6131
if (v == 0) {
6132
// Do this the hard way by blocking ...
6133
int status = pthread_mutex_lock(_mutex);
6134
assert_status(status == 0, status, "mutex_lock");
6135
guarantee (_nParked == 0, "invariant") ;
6136
++ _nParked ;
6137
while (_Event < 0) {
6138
status = pthread_cond_wait(_cond, _mutex);
6139
// for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
6140
// Treat this the same as if the wait was interrupted
6141
if (status == ETIME) { status = EINTR; }
6142
assert_status(status == 0 || status == EINTR, status, "cond_wait");
6143
}
6144
-- _nParked ;
6145
6146
_Event = 0 ;
6147
status = pthread_mutex_unlock(_mutex);
6148
assert_status(status == 0, status, "mutex_unlock");
6149
// Paranoia to ensure our locked and lock-free paths interact
6150
// correctly with each other.
6151
OrderAccess::fence();
6152
}
6153
guarantee (_Event >= 0, "invariant") ;
6154
}
6155
6156
int os::PlatformEvent::park(jlong millis) {
6157
guarantee (_nParked == 0, "invariant") ;
6158
6159
int v ;
6160
for (;;) {
6161
v = _Event ;
6162
if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
6163
}
6164
guarantee (v >= 0, "invariant") ;
6165
if (v != 0) return OS_OK ;
6166
6167
// We do this the hard way, by blocking the thread.
6168
// Consider enforcing a minimum timeout value.
6169
struct timespec abst;
6170
compute_abstime(&abst, millis);
6171
6172
int ret = OS_TIMEOUT;
6173
int status = pthread_mutex_lock(_mutex);
6174
assert_status(status == 0, status, "mutex_lock");
6175
guarantee (_nParked == 0, "invariant") ;
6176
++_nParked ;
6177
6178
// Object.wait(timo) will return because of
6179
// (a) notification
6180
// (b) timeout
6181
// (c) thread.interrupt
6182
//
6183
// Thread.interrupt and object.notify{All} both call Event::set.
6184
// That is, we treat thread.interrupt as a special case of notification.
6185
// The underlying Solaris implementation, cond_timedwait, admits
6186
// spurious/premature wakeups, but the JLS/JVM spec prevents the
6187
// JVM from making those visible to Java code. As such, we must
6188
// filter out spurious wakeups. We assume all ETIME returns are valid.
6189
//
6190
// TODO: properly differentiate simultaneous notify+interrupt.
6191
// In that case, we should propagate the notify to another waiter.
6192
6193
while (_Event < 0) {
6194
status = os::Linux::safe_cond_timedwait(_cond, _mutex, &abst);
6195
if (status != 0 && WorkAroundNPTLTimedWaitHang) {
6196
pthread_cond_destroy (_cond);
6197
pthread_cond_init (_cond, os::Linux::condAttr()) ;
6198
}
6199
assert_status(status == 0 || status == EINTR ||
6200
status == ETIME || status == ETIMEDOUT,
6201
status, "cond_timedwait");
6202
if (!FilterSpuriousWakeups) break ; // previous semantics
6203
if (status == ETIME || status == ETIMEDOUT) break ;
6204
// We consume and ignore EINTR and spurious wakeups.
6205
}
6206
--_nParked ;
6207
if (_Event >= 0) {
6208
ret = OS_OK;
6209
}
6210
_Event = 0 ;
6211
status = pthread_mutex_unlock(_mutex);
6212
assert_status(status == 0, status, "mutex_unlock");
6213
assert (_nParked == 0, "invariant") ;
6214
// Paranoia to ensure our locked and lock-free paths interact
6215
// correctly with each other.
6216
OrderAccess::fence();
6217
return ret;
6218
}
6219
6220
void os::PlatformEvent::unpark() {
6221
// Transitions for _Event:
6222
// 0 :=> 1
6223
// 1 :=> 1
6224
// -1 :=> either 0 or 1; must signal target thread
6225
// That is, we can safely transition _Event from -1 to either
6226
// 0 or 1. Forcing 1 is slightly more efficient for back-to-back
6227
// unpark() calls.
6228
// See also: "Semaphores in Plan 9" by Mullender & Cox
6229
//
6230
// Note: Forcing a transition from "-1" to "1" on an unpark() means
6231
// that it will take two back-to-back park() calls for the owning
6232
// thread to block. This has the benefit of forcing a spurious return
6233
// from the first park() call after an unpark() call which will help
6234
// shake out uses of park() and unpark() without condition variables.
6235
6236
if (Atomic::xchg(1, &_Event) >= 0) return;
6237
6238
// Wait for the thread associated with the event to vacate
6239
int status = pthread_mutex_lock(_mutex);
6240
assert_status(status == 0, status, "mutex_lock");
6241
int AnyWaiters = _nParked;
6242
assert(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
6243
if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) {
6244
AnyWaiters = 0;
6245
pthread_cond_signal(_cond);
6246
}
6247
status = pthread_mutex_unlock(_mutex);
6248
assert_status(status == 0, status, "mutex_unlock");
6249
if (AnyWaiters != 0) {
6250
status = pthread_cond_signal(_cond);
6251
assert_status(status == 0, status, "cond_signal");
6252
}
6253
6254
// Note that we signal() _after dropping the lock for "immortal" Events.
6255
// This is safe and avoids a common class of futile wakeups. In rare
6256
// circumstances this can cause a thread to return prematurely from
6257
// cond_{timed}wait() but the spurious wakeup is benign and the victim will
6258
// simply re-test the condition and re-park itself.
6259
}
6260
6261
6262
// JSR166
6263
// -------------------------------------------------------
6264
6265
/*
6266
* The solaris and linux implementations of park/unpark are fairly
6267
* conservative for now, but can be improved. They currently use a
6268
* mutex/condvar pair, plus a a count.
6269
* Park decrements count if > 0, else does a condvar wait. Unpark
6270
* sets count to 1 and signals condvar. Only one thread ever waits
6271
* on the condvar. Contention seen when trying to park implies that someone
6272
* is unparking you, so don't wait. And spurious returns are fine, so there
6273
* is no need to track notifications.
6274
*/
6275
6276
/*
6277
* This code is common to linux and solaris and will be moved to a
6278
* common place in dolphin.
6279
*
6280
* The passed in time value is either a relative time in nanoseconds
6281
* or an absolute time in milliseconds. Either way it has to be unpacked
6282
* into suitable seconds and nanoseconds components and stored in the
6283
* given timespec structure.
6284
* Given time is a 64-bit value and the time_t used in the timespec is only
6285
* a signed-32-bit value (except on 64-bit Linux) we have to watch for
6286
* overflow if times way in the future are given. Further on Solaris versions
6287
* prior to 10 there is a restriction (see cond_timedwait) that the specified
6288
* number of seconds, in abstime, is less than current_time + 100,000,000.
6289
* As it will be 28 years before "now + 100000000" will overflow we can
6290
* ignore overflow and just impose a hard-limit on seconds using the value
6291
* of "now + 100,000,000". This places a limit on the timeout of about 3.17
6292
* years from "now".
6293
*/
6294
6295
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
6296
assert (time > 0, "convertTime");
6297
time_t max_secs = 0;
6298
6299
if (!os::Linux::supports_monotonic_clock() || isAbsolute) {
6300
struct timeval now;
6301
int status = gettimeofday(&now, NULL);
6302
assert(status == 0, "gettimeofday");
6303
6304
max_secs = now.tv_sec + MAX_SECS;
6305
6306
if (isAbsolute) {
6307
jlong secs = time / 1000;
6308
if (secs > max_secs) {
6309
absTime->tv_sec = max_secs;
6310
} else {
6311
absTime->tv_sec = secs;
6312
}
6313
absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
6314
} else {
6315
jlong secs = time / NANOSECS_PER_SEC;
6316
if (secs >= MAX_SECS) {
6317
absTime->tv_sec = max_secs;
6318
absTime->tv_nsec = 0;
6319
} else {
6320
absTime->tv_sec = now.tv_sec + secs;
6321
absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
6322
if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
6323
absTime->tv_nsec -= NANOSECS_PER_SEC;
6324
++absTime->tv_sec; // note: this must be <= max_secs
6325
}
6326
}
6327
}
6328
} else {
6329
// must be relative using monotonic clock
6330
struct timespec now;
6331
int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
6332
assert_status(status == 0, status, "clock_gettime");
6333
max_secs = now.tv_sec + MAX_SECS;
6334
jlong secs = time / NANOSECS_PER_SEC;
6335
if (secs >= MAX_SECS) {
6336
absTime->tv_sec = max_secs;
6337
absTime->tv_nsec = 0;
6338
} else {
6339
absTime->tv_sec = now.tv_sec + secs;
6340
absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_nsec;
6341
if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
6342
absTime->tv_nsec -= NANOSECS_PER_SEC;
6343
++absTime->tv_sec; // note: this must be <= max_secs
6344
}
6345
}
6346
}
6347
assert(absTime->tv_sec >= 0, "tv_sec < 0");
6348
assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
6349
assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
6350
assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
6351
}
6352
6353
void Parker::park(bool isAbsolute, jlong time) {
6354
// Ideally we'd do something useful while spinning, such
6355
// as calling unpackTime().
6356
6357
// Optional fast-path check:
6358
// Return immediately if a permit is available.
6359
// We depend on Atomic::xchg() having full barrier semantics
6360
// since we are doing a lock-free update to _counter.
6361
if (Atomic::xchg(0, &_counter) > 0) return;
6362
6363
Thread* thread = Thread::current();
6364
assert(thread->is_Java_thread(), "Must be JavaThread");
6365
JavaThread *jt = (JavaThread *)thread;
6366
6367
// Optional optimization -- avoid state transitions if there's an interrupt pending.
6368
// Check interrupt before trying to wait
6369
if (Thread::is_interrupted(thread, false)) {
6370
return;
6371
}
6372
6373
// Next, demultiplex/decode time arguments
6374
timespec absTime;
6375
if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
6376
return;
6377
}
6378
if (time > 0) {
6379
unpackTime(&absTime, isAbsolute, time);
6380
}
6381
6382
6383
// Enter safepoint region
6384
// Beware of deadlocks such as 6317397.
6385
// The per-thread Parker:: mutex is a classic leaf-lock.
6386
// In particular a thread must never block on the Threads_lock while
6387
// holding the Parker:: mutex. If safepoints are pending both the
6388
// the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
6389
ThreadBlockInVM tbivm(jt);
6390
6391
// Don't wait if cannot get lock since interference arises from
6392
// unblocking. Also. check interrupt before trying wait
6393
if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
6394
return;
6395
}
6396
6397
int status ;
6398
if (_counter > 0) { // no wait needed
6399
_counter = 0;
6400
status = pthread_mutex_unlock(_mutex);
6401
assert (status == 0, "invariant") ;
6402
// Paranoia to ensure our locked and lock-free paths interact
6403
// correctly with each other and Java-level accesses.
6404
OrderAccess::fence();
6405
return;
6406
}
6407
6408
#ifdef ASSERT
6409
// Don't catch signals while blocked; let the running threads have the signals.
6410
// (This allows a debugger to break into the running thread.)
6411
sigset_t oldsigs;
6412
sigset_t* allowdebug_blocked = os::Linux::allowdebug_blocked_signals();
6413
pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
6414
#endif
6415
6416
OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
6417
jt->set_suspend_equivalent();
6418
// cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
6419
6420
assert(_cur_index == -1, "invariant");
6421
if (time == 0) {
6422
_cur_index = REL_INDEX; // arbitrary choice when not timed
6423
status = pthread_cond_wait (&_cond[_cur_index], _mutex) ;
6424
} else {
6425
_cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
6426
status = os::Linux::safe_cond_timedwait (&_cond[_cur_index], _mutex, &absTime) ;
6427
if (status != 0 && WorkAroundNPTLTimedWaitHang) {
6428
pthread_cond_destroy (&_cond[_cur_index]) ;
6429
pthread_cond_init (&_cond[_cur_index], isAbsolute ? NULL : os::Linux::condAttr());
6430
}
6431
}
6432
_cur_index = -1;
6433
assert_status(status == 0 || status == EINTR ||
6434
status == ETIME || status == ETIMEDOUT,
6435
status, "cond_timedwait");
6436
6437
#ifdef ASSERT
6438
pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
6439
#endif
6440
6441
_counter = 0 ;
6442
status = pthread_mutex_unlock(_mutex) ;
6443
assert_status(status == 0, status, "invariant") ;
6444
// Paranoia to ensure our locked and lock-free paths interact
6445
// correctly with each other and Java-level accesses.
6446
OrderAccess::fence();
6447
6448
// If externally suspended while waiting, re-suspend
6449
if (jt->handle_special_suspend_equivalent_condition()) {
6450
jt->java_suspend_self();
6451
}
6452
}
6453
6454
void Parker::unpark() {
6455
int s, status ;
6456
status = pthread_mutex_lock(_mutex);
6457
assert (status == 0, "invariant") ;
6458
s = _counter;
6459
_counter = 1;
6460
if (s < 1) {
6461
// thread might be parked
6462
if (_cur_index != -1) {
6463
// thread is definitely parked
6464
if (WorkAroundNPTLTimedWaitHang) {
6465
status = pthread_cond_signal (&_cond[_cur_index]);
6466
assert (status == 0, "invariant");
6467
status = pthread_mutex_unlock(_mutex);
6468
assert (status == 0, "invariant");
6469
} else {
6470
// must capture correct index before unlocking
6471
int index = _cur_index;
6472
status = pthread_mutex_unlock(_mutex);
6473
assert (status == 0, "invariant");
6474
status = pthread_cond_signal (&_cond[index]);
6475
assert (status == 0, "invariant");
6476
}
6477
} else {
6478
pthread_mutex_unlock(_mutex);
6479
assert (status == 0, "invariant") ;
6480
}
6481
} else {
6482
pthread_mutex_unlock(_mutex);
6483
assert (status == 0, "invariant") ;
6484
}
6485
}
6486
6487
6488
extern char** environ;
6489
6490
// Run the specified command in a separate process. Return its exit value,
6491
// or -1 on failure (e.g. can't fork a new process).
6492
// Unlike system(), this function can be called from signal handler. It
6493
// doesn't block SIGINT et al.
6494
int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
6495
const char * argv[4] = {"sh", "-c", cmd, NULL};
6496
6497
pid_t pid ;
6498
6499
if (use_vfork_if_available) {
6500
pid = vfork();
6501
} else {
6502
pid = fork();
6503
}
6504
6505
if (pid < 0) {
6506
// fork failed
6507
return -1;
6508
6509
} else if (pid == 0) {
6510
// child process
6511
6512
execve("/bin/sh", (char* const*)argv, environ);
6513
6514
// execve failed
6515
_exit(-1);
6516
6517
} else {
6518
// copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
6519
// care about the actual exit code, for now.
6520
6521
int status;
6522
6523
// Wait for the child process to exit. This returns immediately if
6524
// the child has already exited. */
6525
while (waitpid(pid, &status, 0) < 0) {
6526
switch (errno) {
6527
case ECHILD: return 0;
6528
case EINTR: break;
6529
default: return -1;
6530
}
6531
}
6532
6533
if (WIFEXITED(status)) {
6534
// The child exited normally; get its exit code.
6535
return WEXITSTATUS(status);
6536
} else if (WIFSIGNALED(status)) {
6537
// The child exited because of a signal
6538
// The best value to return is 0x80 + signal number,
6539
// because that is what all Unix shells do, and because
6540
// it allows callers to distinguish between process exit and
6541
// process death by signal.
6542
return 0x80 + WTERMSIG(status);
6543
} else {
6544
// Unknown exit code; pass it through
6545
return status;
6546
}
6547
}
6548
}
6549
6550
// is_headless_jre()
6551
//
6552
// Test for the existence of xawt/libmawt.so or libawt_xawt.so
6553
// in order to report if we are running in a headless jre
6554
//
6555
// Since JDK8 xawt/libmawt.so was moved into the same directory
6556
// as libawt.so, and renamed libawt_xawt.so
6557
//
6558
bool os::is_headless_jre() {
6559
struct stat statbuf;
6560
char buf[MAXPATHLEN];
6561
char libmawtpath[MAXPATHLEN];
6562
const char *xawtstr = "/xawt/libmawt.so";
6563
const char *new_xawtstr = "/libawt_xawt.so";
6564
char *p;
6565
6566
// Get path to libjvm.so
6567
os::jvm_path(buf, sizeof(buf));
6568
6569
// Get rid of libjvm.so
6570
p = strrchr(buf, '/');
6571
if (p == NULL) return false;
6572
else *p = '\0';
6573
6574
// Get rid of client or server
6575
p = strrchr(buf, '/');
6576
if (p == NULL) return false;
6577
else *p = '\0';
6578
6579
// check xawt/libmawt.so
6580
strcpy(libmawtpath, buf);
6581
strcat(libmawtpath, xawtstr);
6582
if (::stat(libmawtpath, &statbuf) == 0) return false;
6583
6584
// check libawt_xawt.so
6585
strcpy(libmawtpath, buf);
6586
strcat(libmawtpath, new_xawtstr);
6587
if (::stat(libmawtpath, &statbuf) == 0) return false;
6588
6589
return true;
6590
}
6591
6592
// Get the default path to the core file
6593
// Returns the length of the string
6594
int os::get_core_path(char* buffer, size_t bufferSize) {
6595
const char* p = get_current_directory(buffer, bufferSize);
6596
6597
if (p == NULL) {
6598
assert(p != NULL, "failed to get current directory");
6599
return 0;
6600
}
6601
6602
return strlen(buffer);
6603
}
6604
6605
/////////////// Unit tests ///////////////
6606
6607
#ifndef PRODUCT
6608
6609
#define test_log(...) \
6610
do {\
6611
if (VerboseInternalVMTests) { \
6612
tty->print_cr(__VA_ARGS__); \
6613
tty->flush(); \
6614
}\
6615
} while (false)
6616
6617
class TestReserveMemorySpecial : AllStatic {
6618
public:
6619
static void small_page_write(void* addr, size_t size) {
6620
size_t page_size = os::vm_page_size();
6621
6622
char* end = (char*)addr + size;
6623
for (char* p = (char*)addr; p < end; p += page_size) {
6624
*p = 1;
6625
}
6626
}
6627
6628
static void test_reserve_memory_special_huge_tlbfs_only(size_t size) {
6629
if (!UseHugeTLBFS) {
6630
return;
6631
}
6632
6633
test_log("test_reserve_memory_special_huge_tlbfs_only(" SIZE_FORMAT ")", size);
6634
6635
char* addr = os::Linux::reserve_memory_special_huge_tlbfs_only(size, NULL, false);
6636
6637
if (addr != NULL) {
6638
small_page_write(addr, size);
6639
6640
os::Linux::release_memory_special_huge_tlbfs(addr, size);
6641
}
6642
}
6643
6644
static void test_reserve_memory_special_huge_tlbfs_only() {
6645
if (!UseHugeTLBFS) {
6646
return;
6647
}
6648
6649
size_t lp = os::large_page_size();
6650
6651
for (size_t size = lp; size <= lp * 10; size += lp) {
6652
test_reserve_memory_special_huge_tlbfs_only(size);
6653
}
6654
}
6655
6656
static void test_reserve_memory_special_huge_tlbfs_mixed() {
6657
size_t lp = os::large_page_size();
6658
size_t ag = os::vm_allocation_granularity();
6659
6660
// sizes to test
6661
const size_t sizes[] = {
6662
lp, lp + ag, lp + lp / 2, lp * 2,
6663
lp * 2 + ag, lp * 2 - ag, lp * 2 + lp / 2,
6664
lp * 10, lp * 10 + lp / 2
6665
};
6666
const int num_sizes = sizeof(sizes) / sizeof(size_t);
6667
6668
// For each size/alignment combination, we test three scenarios:
6669
// 1) with req_addr == NULL
6670
// 2) with a non-null req_addr at which we expect to successfully allocate
6671
// 3) with a non-null req_addr which contains a pre-existing mapping, at which we
6672
// expect the allocation to either fail or to ignore req_addr
6673
6674
// Pre-allocate two areas; they shall be as large as the largest allocation
6675
// and aligned to the largest alignment we will be testing.
6676
const size_t mapping_size = sizes[num_sizes - 1] * 2;
6677
char* const mapping1 = (char*) ::mmap(NULL, mapping_size,
6678
PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
6679
-1, 0);
6680
assert(mapping1 != MAP_FAILED, "should work");
6681
6682
char* const mapping2 = (char*) ::mmap(NULL, mapping_size,
6683
PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
6684
-1, 0);
6685
assert(mapping2 != MAP_FAILED, "should work");
6686
6687
// Unmap the first mapping, but leave the second mapping intact: the first
6688
// mapping will serve as a value for a "good" req_addr (case 2). The second
6689
// mapping, still intact, as "bad" req_addr (case 3).
6690
::munmap(mapping1, mapping_size);
6691
6692
// Case 1
6693
test_log("%s, req_addr NULL:", __FUNCTION__);
6694
test_log("size align result");
6695
6696
for (int i = 0; i < num_sizes; i++) {
6697
const size_t size = sizes[i];
6698
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
6699
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
6700
test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " -> " PTR_FORMAT " %s",
6701
size, alignment, p, (p != NULL ? "" : "(failed)"));
6702
if (p != NULL) {
6703
assert(is_ptr_aligned(p, alignment), "must be");
6704
small_page_write(p, size);
6705
os::Linux::release_memory_special_huge_tlbfs(p, size);
6706
}
6707
}
6708
}
6709
6710
// Case 2
6711
test_log("%s, req_addr non-NULL:", __FUNCTION__);
6712
test_log("size align req_addr result");
6713
6714
for (int i = 0; i < num_sizes; i++) {
6715
const size_t size = sizes[i];
6716
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
6717
char* const req_addr = (char*) align_ptr_up(mapping1, alignment);
6718
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
6719
test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " -> " PTR_FORMAT " %s",
6720
size, alignment, req_addr, p,
6721
((p != NULL ? (p == req_addr ? "(exact match)" : "") : "(failed)")));
6722
if (p != NULL) {
6723
assert(p == req_addr, "must be");
6724
small_page_write(p, size);
6725
os::Linux::release_memory_special_huge_tlbfs(p, size);
6726
}
6727
}
6728
}
6729
6730
// Case 3
6731
test_log("%s, req_addr non-NULL with preexisting mapping:", __FUNCTION__);
6732
test_log("size align req_addr result");
6733
6734
for (int i = 0; i < num_sizes; i++) {
6735
const size_t size = sizes[i];
6736
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
6737
char* const req_addr = (char*) align_ptr_up(mapping2, alignment);
6738
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
6739
test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " -> " PTR_FORMAT " %s",
6740
size, alignment, req_addr, p,
6741
((p != NULL ? "" : "(failed)")));
6742
// as the area around req_addr contains already existing mappings, the API should always
6743
// return NULL (as per contract, it cannot return another address)
6744
assert(p == NULL, "must be");
6745
}
6746
}
6747
6748
::munmap(mapping2, mapping_size);
6749
6750
}
6751
6752
static void test_reserve_memory_special_huge_tlbfs() {
6753
if (!UseHugeTLBFS) {
6754
return;
6755
}
6756
6757
test_reserve_memory_special_huge_tlbfs_only();
6758
test_reserve_memory_special_huge_tlbfs_mixed();
6759
}
6760
6761
static void test_reserve_memory_special_shm(size_t size, size_t alignment) {
6762
if (!UseSHM) {
6763
return;
6764
}
6765
6766
test_log("test_reserve_memory_special_shm(" SIZE_FORMAT ", " SIZE_FORMAT ")", size, alignment);
6767
6768
char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
6769
6770
if (addr != NULL) {
6771
assert(is_ptr_aligned(addr, alignment), "Check");
6772
assert(is_ptr_aligned(addr, os::large_page_size()), "Check");
6773
6774
small_page_write(addr, size);
6775
6776
os::Linux::release_memory_special_shm(addr, size);
6777
}
6778
}
6779
6780
static void test_reserve_memory_special_shm() {
6781
size_t lp = os::large_page_size();
6782
size_t ag = os::vm_allocation_granularity();
6783
6784
for (size_t size = ag; size < lp * 3; size += ag) {
6785
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
6786
test_reserve_memory_special_shm(size, alignment);
6787
}
6788
}
6789
}
6790
6791
static void test() {
6792
test_reserve_memory_special_huge_tlbfs();
6793
test_reserve_memory_special_shm();
6794
}
6795
};
6796
6797
void TestReserveMemorySpecial_test() {
6798
TestReserveMemorySpecial::test();
6799
}
6800
6801
#endif
6802
6803