Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-aarch32-jdk8u
Path: blob/jdk8u272-b10-aarch32-20201026/hotspot/src/os/linux/vm/os_linux.cpp
48785 views
1
/*
2
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
// no precompiled headers
26
#include "classfile/classLoader.hpp"
27
#include "classfile/systemDictionary.hpp"
28
#include "classfile/vmSymbols.hpp"
29
#include "code/icBuffer.hpp"
30
#include "code/vtableStubs.hpp"
31
#include "compiler/compileBroker.hpp"
32
#include "compiler/disassembler.hpp"
33
#include "interpreter/interpreter.hpp"
34
#include "jvm_linux.h"
35
#include "memory/allocation.inline.hpp"
36
#include "memory/filemap.hpp"
37
#include "mutex_linux.inline.hpp"
38
#include "oops/oop.inline.hpp"
39
#include "os_share_linux.hpp"
40
#include "osContainer_linux.hpp"
41
#include "prims/jniFastGetField.hpp"
42
#include "prims/jvm.h"
43
#include "prims/jvm_misc.hpp"
44
#include "runtime/arguments.hpp"
45
#include "runtime/extendedPC.hpp"
46
#include "runtime/globals.hpp"
47
#include "runtime/interfaceSupport.hpp"
48
#include "runtime/init.hpp"
49
#include "runtime/java.hpp"
50
#include "runtime/javaCalls.hpp"
51
#include "runtime/mutexLocker.hpp"
52
#include "runtime/objectMonitor.hpp"
53
#include "runtime/orderAccess.inline.hpp"
54
#include "runtime/osThread.hpp"
55
#include "runtime/perfMemory.hpp"
56
#include "runtime/sharedRuntime.hpp"
57
#include "runtime/statSampler.hpp"
58
#include "runtime/stubRoutines.hpp"
59
#include "runtime/thread.inline.hpp"
60
#include "runtime/threadCritical.hpp"
61
#include "runtime/timer.hpp"
62
#include "services/attachListener.hpp"
63
#include "services/memTracker.hpp"
64
#include "services/runtimeService.hpp"
65
#include "utilities/decoder.hpp"
66
#include "utilities/defaultStream.hpp"
67
#include "utilities/events.hpp"
68
#include "utilities/elfFile.hpp"
69
#include "utilities/growableArray.hpp"
70
#include "utilities/vmError.hpp"
71
72
// put OS-includes here
73
# include <sys/types.h>
74
# include <sys/mman.h>
75
# include <sys/stat.h>
76
# include <sys/select.h>
77
# include <pthread.h>
78
# include <signal.h>
79
# include <errno.h>
80
# include <dlfcn.h>
81
# include <stdio.h>
82
# include <unistd.h>
83
# include <sys/resource.h>
84
# include <pthread.h>
85
# include <sys/stat.h>
86
# include <sys/time.h>
87
# include <sys/times.h>
88
# include <sys/utsname.h>
89
# include <sys/socket.h>
90
# include <sys/wait.h>
91
# include <pwd.h>
92
# include <poll.h>
93
# include <semaphore.h>
94
# include <fcntl.h>
95
# include <string.h>
96
# include <sys/sysinfo.h>
97
#if !defined(__UCLIBC__) && !defined(__ANDROID__)
98
# include <gnu/libc-version.h>
99
#endif
100
# include <sys/ipc.h>
101
#if !defined(__ANDROID__)
102
# include <syscall.h>
103
# include <sys/shm.h>
104
#else
105
# include <sys/syscall.h>
106
#include <libgen.h>
107
#endif
108
# include <link.h>
109
# include <stdint.h>
110
# include <inttypes.h>
111
# include <sys/ioctl.h>
112
113
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
114
115
#ifdef __ANDROID__
116
# define DISABLE_SHM
117
#endif
118
/*
119
#ifdef __ANDROID__
120
# define lseek lseek64
121
# define open open64
122
# define off_t off64_t
123
#endif
124
*/
125
#ifndef _GNU_SOURCE
126
#define _GNU_SOURCE
127
#include <sched.h>
128
#undef _GNU_SOURCE
129
#else
130
#include <sched.h>
131
#endif
132
133
// if RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
134
// getrusage() is prepared to handle the associated failure.
135
#ifndef RUSAGE_THREAD
136
#define RUSAGE_THREAD (1) /* only the calling thread */
137
#endif
138
139
#define MAX_PATH (2 * K)
140
141
#define MAX_SECS 100000000
142
143
// for timer info max values which include all bits
144
#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
145
146
#define LARGEPAGES_BIT (1 << 6)
147
////////////////////////////////////////////////////////////////////////////////
148
// global variables
149
julong os::Linux::_physical_memory = 0;
150
151
address os::Linux::_initial_thread_stack_bottom = NULL;
152
uintptr_t os::Linux::_initial_thread_stack_size = 0;
153
154
int (*os::Linux::_clock_gettime)(clockid_t, struct timespec *) = NULL;
155
int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
156
int (*os::Linux::_pthread_setname_np)(pthread_t, const char*) = NULL;
157
Mutex* os::Linux::_createThread_lock = NULL;
158
pthread_t os::Linux::_main_thread;
159
int os::Linux::_page_size = -1;
160
const int os::Linux::_vm_default_page_size = (8 * K);
161
bool os::Linux::_is_floating_stack = false;
162
bool os::Linux::_is_NPTL = false;
163
bool os::Linux::_supports_fast_thread_cpu_time = false;
164
const char * os::Linux::_glibc_version = NULL;
165
const char * os::Linux::_libpthread_version = NULL;
166
pthread_condattr_t os::Linux::_condattr[1];
167
168
static jlong initial_time_count=0;
169
170
static int clock_tics_per_sec = 100;
171
172
// For diagnostics to print a message once. see run_periodic_checks
173
static sigset_t check_signal_done;
174
static bool check_signals = true;
175
176
static pid_t _initial_pid = 0;
177
178
/* Signal number used to suspend/resume a thread */
179
180
/* do not use any signal number less than SIGSEGV, see 4355769 */
181
static int SR_signum = SIGUSR2;
182
sigset_t SR_sigset;
183
184
/* Used to protect dlsym() calls */
185
static pthread_mutex_t dl_mutex;
186
187
// Declarations
188
static bool read_so_path_from_maps(const char* so_name, char* buf, int buflen);
189
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
190
191
// utility functions
192
193
static int SR_initialize();
194
195
julong os::available_memory() {
196
return Linux::available_memory();
197
}
198
199
julong os::Linux::available_memory() {
200
// values in struct sysinfo are "unsigned long"
201
struct sysinfo si;
202
julong avail_mem;
203
204
if (OSContainer::is_containerized()) {
205
jlong mem_limit, mem_usage;
206
if ((mem_limit = OSContainer::memory_limit_in_bytes()) < 1) {
207
if (PrintContainerInfo) {
208
tty->print_cr("container memory limit %s: " JLONG_FORMAT ", using host value",
209
mem_limit == OSCONTAINER_ERROR ? "failed" : "unlimited", mem_limit);
210
}
211
}
212
213
if (mem_limit > 0 && (mem_usage = OSContainer::memory_usage_in_bytes()) < 1) {
214
if (PrintContainerInfo) {
215
tty->print_cr("container memory usage failed: " JLONG_FORMAT ", using host value", mem_usage);
216
}
217
}
218
219
if (mem_limit > 0 && mem_usage > 0 ) {
220
avail_mem = mem_limit > mem_usage ? (julong)mem_limit - (julong)mem_usage : 0;
221
if (PrintContainerInfo) {
222
tty->print_cr("available container memory: " JULONG_FORMAT, avail_mem);
223
}
224
return avail_mem;
225
}
226
}
227
228
sysinfo(&si);
229
avail_mem = (julong)si.freeram * si.mem_unit;
230
if (Verbose) {
231
tty->print_cr("available memory: " JULONG_FORMAT, avail_mem);
232
}
233
return avail_mem;
234
}
235
236
julong os::physical_memory() {
237
jlong phys_mem = 0;
238
if (OSContainer::is_containerized()) {
239
jlong mem_limit;
240
if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) {
241
if (PrintContainerInfo) {
242
tty->print_cr("total container memory: " JLONG_FORMAT, mem_limit);
243
}
244
return mem_limit;
245
}
246
247
if (PrintContainerInfo) {
248
tty->print_cr("container memory limit %s: " JLONG_FORMAT ", using host value",
249
mem_limit == OSCONTAINER_ERROR ? "failed" : "unlimited", mem_limit);
250
}
251
}
252
253
phys_mem = Linux::physical_memory();
254
if (Verbose) {
255
tty->print_cr("total system memory: " JLONG_FORMAT, phys_mem);
256
}
257
return phys_mem;
258
}
259
260
////////////////////////////////////////////////////////////////////////////////
261
// environment support
262
263
bool os::getenv(const char* name, char* buf, int len) {
264
const char* val = ::getenv(name);
265
if (val != NULL && strlen(val) < (size_t)len) {
266
strcpy(buf, val);
267
return true;
268
}
269
if (len > 0) buf[0] = 0; // return a null string
270
return false;
271
}
272
273
274
// Return true if user is running as root.
275
276
bool os::have_special_privileges() {
277
static bool init = false;
278
static bool privileges = false;
279
if (!init) {
280
privileges = (getuid() != geteuid()) || (getgid() != getegid());
281
init = true;
282
}
283
return privileges;
284
}
285
286
287
#ifndef SYS_gettid
288
// i386: 224, ia64: 1105, amd64: 186, sparc 143
289
#ifdef __ia64__
290
#define SYS_gettid 1105
291
#else
292
#if defined(__i386__) || defined(__arm__)
293
#define SYS_gettid 224
294
#else
295
#ifdef __amd64__
296
#define SYS_gettid 186
297
#else
298
#ifdef __sparc__
299
#define SYS_gettid 143
300
#else
301
#error define gettid for the arch
302
#endif
303
#endif
304
#endif
305
#endif
306
#endif
307
308
// Cpu architecture string
309
static char cpu_arch[] = HOTSPOT_LIB_ARCH;
310
311
// pid_t gettid()
312
//
313
// Returns the kernel thread id of the currently running thread. Kernel
314
// thread id is used to access /proc.
315
//
316
// (Note that getpid() on LinuxThreads returns kernel thread id too; but
317
// on NPTL, it returns the same pid for all threads, as required by POSIX.)
318
//
319
pid_t os::Linux::gettid() {
320
int rslt = syscall(SYS_gettid);
321
if (rslt == -1) {
322
// old kernel, no NPTL support
323
return getpid();
324
} else {
325
return (pid_t)rslt;
326
}
327
}
328
329
// Most versions of linux have a bug where the number of processors are
330
// determined by looking at the /proc file system. In a chroot environment,
331
// the system call returns 1. This causes the VM to act as if it is
332
// a single processor and elide locking (see is_MP() call).
333
static bool unsafe_chroot_detected = false;
334
static const char *unstable_chroot_error = "/proc file system not found.\n"
335
"Java may be unstable running multithreaded in a chroot "
336
"environment on Linux when /proc filesystem is not mounted.";
337
338
void os::Linux::initialize_system_info() {
339
set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
340
if (processor_count() == 1) {
341
pid_t pid = os::Linux::gettid();
342
char fname[32];
343
jio_snprintf(fname, sizeof(fname), "/proc/%d", pid);
344
FILE *fp = fopen(fname, "r");
345
if (fp == NULL) {
346
unsafe_chroot_detected = true;
347
} else {
348
fclose(fp);
349
}
350
}
351
_physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
352
assert(processor_count() > 0, "linux error");
353
}
354
355
void os::init_system_properties_values() {
356
// The next steps are taken in the product version:
357
//
358
// Obtain the JAVA_HOME value from the location of libjvm.so.
359
// This library should be located at:
360
// <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
361
//
362
// If "/jre/lib/" appears at the right place in the path, then we
363
// assume libjvm.so is installed in a JDK and we use this path.
364
//
365
// Otherwise exit with message: "Could not create the Java virtual machine."
366
//
367
// The following extra steps are taken in the debugging version:
368
//
369
// If "/jre/lib/" does NOT appear at the right place in the path
370
// instead of exit check for $JAVA_HOME environment variable.
371
//
372
// If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
373
// then we append a fake suffix "hotspot/libjvm.so" to this path so
374
// it looks like libjvm.so is installed there
375
// <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
376
//
377
// Otherwise exit.
378
//
379
// Important note: if the location of libjvm.so changes this
380
// code needs to be changed accordingly.
381
382
// See ld(1):
383
// The linker uses the following search paths to locate required
384
// shared libraries:
385
// 1: ...
386
// ...
387
// 7: The default directories, normally /lib and /usr/lib.
388
#if defined(AMD64) || defined(_LP64) && (defined(SPARC) || defined(PPC) || defined(S390))
389
#define DEFAULT_LIBPATH "/usr/lib64:/lib64:/lib:/usr/lib"
390
#else
391
#define DEFAULT_LIBPATH "/lib:/usr/lib"
392
#endif
393
394
// Base path of extensions installed on the system.
395
#define SYS_EXT_DIR "/usr/java/packages"
396
#define EXTENSIONS_DIR "/lib/ext"
397
#define ENDORSED_DIR "/lib/endorsed"
398
399
// Buffer that fits several sprintfs.
400
// Note that the space for the colon and the trailing null are provided
401
// by the nulls included by the sizeof operator.
402
const size_t bufsize =
403
MAX3((size_t)MAXPATHLEN, // For dll_dir & friends.
404
(size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
405
(size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
406
char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
407
408
// sysclasspath, java_home, dll_dir
409
{
410
char *pslash;
411
os::jvm_path(buf, bufsize);
412
413
// Found the full path to libjvm.so.
414
// Now cut the path to <java_home>/jre if we can.
415
*(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
416
pslash = strrchr(buf, '/');
417
if (pslash != NULL) {
418
*pslash = '\0'; // Get rid of /{client|server|hotspot}.
419
}
420
Arguments::set_dll_dir(buf);
421
422
if (pslash != NULL) {
423
pslash = strrchr(buf, '/');
424
if (pslash != NULL) {
425
*pslash = '\0'; // Get rid of /<arch>.
426
pslash = strrchr(buf, '/');
427
if (pslash != NULL) {
428
*pslash = '\0'; // Get rid of /lib.
429
}
430
}
431
}
432
Arguments::set_java_home(buf);
433
set_boot_path('/', ':');
434
}
435
436
// Where to look for native libraries.
437
//
438
// Note: Due to a legacy implementation, most of the library path
439
// is set in the launcher. This was to accomodate linking restrictions
440
// on legacy Linux implementations (which are no longer supported).
441
// Eventually, all the library path setting will be done here.
442
//
443
// However, to prevent the proliferation of improperly built native
444
// libraries, the new path component /usr/java/packages is added here.
445
// Eventually, all the library path setting will be done here.
446
{
447
// Get the user setting of LD_LIBRARY_PATH, and prepended it. It
448
// should always exist (until the legacy problem cited above is
449
// addressed).
450
const char *v = ::getenv("LD_LIBRARY_PATH");
451
const char *v_colon = ":";
452
if (v == NULL) { v = ""; v_colon = ""; }
453
// That's +1 for the colon and +1 for the trailing '\0'.
454
char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char,
455
strlen(v) + 1 +
456
sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch) + sizeof(DEFAULT_LIBPATH) + 1,
457
mtInternal);
458
sprintf(ld_library_path, "%s%s" SYS_EXT_DIR "/lib/%s:" DEFAULT_LIBPATH, v, v_colon, cpu_arch);
459
Arguments::set_library_path(ld_library_path);
460
FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
461
}
462
463
// Extensions directories.
464
sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
465
Arguments::set_ext_dirs(buf);
466
467
// Endorsed standards default directory.
468
sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
469
Arguments::set_endorsed_dirs(buf);
470
471
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
472
473
#undef DEFAULT_LIBPATH
474
#undef SYS_EXT_DIR
475
#undef EXTENSIONS_DIR
476
#undef ENDORSED_DIR
477
}
478
479
////////////////////////////////////////////////////////////////////////////////
480
// breakpoint support
481
482
void os::breakpoint() {
483
BREAKPOINT;
484
}
485
486
extern "C" void breakpoint() {
487
// use debugger to set breakpoint here
488
}
489
490
////////////////////////////////////////////////////////////////////////////////
491
// signal support
492
493
debug_only(static bool signal_sets_initialized = false);
494
static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
495
496
bool os::Linux::is_sig_ignored(int sig) {
497
struct sigaction oact;
498
sigaction(sig, (struct sigaction*)NULL, &oact);
499
void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
500
: CAST_FROM_FN_PTR(void*, oact.sa_handler);
501
if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
502
return true;
503
else
504
return false;
505
}
506
507
void os::Linux::signal_sets_init() {
508
// Should also have an assertion stating we are still single-threaded.
509
assert(!signal_sets_initialized, "Already initialized");
510
// Fill in signals that are necessarily unblocked for all threads in
511
// the VM. Currently, we unblock the following signals:
512
// SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
513
// by -Xrs (=ReduceSignalUsage));
514
// BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
515
// other threads. The "ReduceSignalUsage" boolean tells us not to alter
516
// the dispositions or masks wrt these signals.
517
// Programs embedding the VM that want to use the above signals for their
518
// own purposes must, at this time, use the "-Xrs" option to prevent
519
// interference with shutdown hooks and BREAK_SIGNAL thread dumping.
520
// (See bug 4345157, and other related bugs).
521
// In reality, though, unblocking these signals is really a nop, since
522
// these signals are not blocked by default.
523
sigemptyset(&unblocked_sigs);
524
sigemptyset(&allowdebug_blocked_sigs);
525
sigaddset(&unblocked_sigs, SIGILL);
526
sigaddset(&unblocked_sigs, SIGSEGV);
527
sigaddset(&unblocked_sigs, SIGBUS);
528
sigaddset(&unblocked_sigs, SIGFPE);
529
#if defined(PPC64)
530
sigaddset(&unblocked_sigs, SIGTRAP);
531
#endif
532
sigaddset(&unblocked_sigs, SR_signum);
533
534
if (!ReduceSignalUsage) {
535
if (!os::Linux::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
536
sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
537
sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
538
}
539
if (!os::Linux::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
540
sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
541
sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
542
}
543
if (!os::Linux::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
544
sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
545
sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
546
}
547
}
548
// Fill in signals that are blocked by all but the VM thread.
549
sigemptyset(&vm_sigs);
550
if (!ReduceSignalUsage)
551
sigaddset(&vm_sigs, BREAK_SIGNAL);
552
debug_only(signal_sets_initialized = true);
553
554
}
555
556
// These are signals that are unblocked while a thread is running Java.
557
// (For some reason, they get blocked by default.)
558
sigset_t* os::Linux::unblocked_signals() {
559
assert(signal_sets_initialized, "Not initialized");
560
return &unblocked_sigs;
561
}
562
563
// These are the signals that are blocked while a (non-VM) thread is
564
// running Java. Only the VM thread handles these signals.
565
sigset_t* os::Linux::vm_signals() {
566
assert(signal_sets_initialized, "Not initialized");
567
return &vm_sigs;
568
}
569
570
// These are signals that are blocked during cond_wait to allow debugger in
571
sigset_t* os::Linux::allowdebug_blocked_signals() {
572
assert(signal_sets_initialized, "Not initialized");
573
return &allowdebug_blocked_sigs;
574
}
575
576
void os::Linux::hotspot_sigmask(Thread* thread) {
577
578
//Save caller's signal mask before setting VM signal mask
579
sigset_t caller_sigmask;
580
pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
581
582
OSThread* osthread = thread->osthread();
583
osthread->set_caller_sigmask(caller_sigmask);
584
585
pthread_sigmask(SIG_UNBLOCK, os::Linux::unblocked_signals(), NULL);
586
587
if (!ReduceSignalUsage) {
588
if (thread->is_VM_thread()) {
589
// Only the VM thread handles BREAK_SIGNAL ...
590
pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
591
} else {
592
// ... all other threads block BREAK_SIGNAL
593
pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
594
}
595
}
596
}
597
598
//////////////////////////////////////////////////////////////////////////////
599
// detecting pthread library
600
601
void os::Linux::libpthread_init() {
602
// Save glibc and pthread version strings. Note that _CS_GNU_LIBC_VERSION
603
// and _CS_GNU_LIBPTHREAD_VERSION are supported in glibc >= 2.3.2. Use a
604
// generic name for earlier versions.
605
// Define macros here so we can build HotSpot on old systems.
606
# ifndef _CS_GNU_LIBC_VERSION
607
# define _CS_GNU_LIBC_VERSION 2
608
# endif
609
# ifndef _CS_GNU_LIBPTHREAD_VERSION
610
# define _CS_GNU_LIBPTHREAD_VERSION 3
611
# endif
612
613
#ifdef __ANDROID__
614
os::Linux::set_glibc_version("android bionic libc api-21");
615
os::Linux::set_libpthread_version("android bionic libc api-21 NPTL");
616
#else
617
size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0);
618
if (n > 0) {
619
char *str = (char *)malloc(n, mtInternal);
620
confstr(_CS_GNU_LIBC_VERSION, str, n);
621
os::Linux::set_glibc_version(str);
622
} else {
623
#ifndef __UCLIBC__
624
// _CS_GNU_LIBC_VERSION is not supported, try gnu_get_libc_version()
625
static char _gnu_libc_version[32];
626
jio_snprintf(_gnu_libc_version, sizeof(_gnu_libc_version),
627
"glibc %s %s", gnu_get_libc_version(), gnu_get_libc_release());
628
os::Linux::set_glibc_version(_gnu_libc_version);
629
#else
630
#define STRFY(s) #s
631
os::Linux::set_glibc_version("uclibc " STRFY(__UCLIB_MAJOR__) "." STRFY(__UCLIBC_MINOR__) " stable");
632
#undef STRFY
633
#endif
634
}
635
636
n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0);
637
if (n > 0) {
638
char *str = (char *)malloc(n, mtInternal);
639
confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
640
// Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells
641
// us "NPTL-0.29" even we are running with LinuxThreads. Check if this
642
// is the case. LinuxThreads has a hard limit on max number of threads.
643
// So sysconf(_SC_THREAD_THREADS_MAX) will return a positive value.
644
// On the other hand, NPTL does not have such a limit, sysconf()
645
// will return -1 and errno is not changed. Check if it is really NPTL.
646
if (strcmp(os::Linux::glibc_version(), "glibc 2.3.2") == 0 &&
647
strstr(str, "NPTL") &&
648
sysconf(_SC_THREAD_THREADS_MAX) > 0) {
649
free(str);
650
os::Linux::set_libpthread_version("linuxthreads");
651
} else {
652
os::Linux::set_libpthread_version(str);
653
}
654
} else {
655
// glibc before 2.3.2 only has LinuxThreads.
656
os::Linux::set_libpthread_version("linuxthreads");
657
}
658
#endif // !__ANDROID__
659
660
if (strstr(libpthread_version(), "NPTL")) {
661
os::Linux::set_is_NPTL();
662
} else {
663
os::Linux::set_is_LinuxThreads();
664
}
665
666
// LinuxThreads have two flavors: floating-stack mode, which allows variable
667
// stack size; and fixed-stack mode. NPTL is always floating-stack.
668
if (os::Linux::is_NPTL() || os::Linux::supports_variable_stack_size()) {
669
os::Linux::set_is_floating_stack();
670
}
671
}
672
673
/////////////////////////////////////////////////////////////////////////////
674
// thread stack
675
676
// Force Linux kernel to expand current thread stack. If "bottom" is close
677
// to the stack guard, caller should block all signals.
678
//
679
// MAP_GROWSDOWN:
680
// A special mmap() flag that is used to implement thread stacks. It tells
681
// kernel that the memory region should extend downwards when needed. This
682
// allows early versions of LinuxThreads to only mmap the first few pages
683
// when creating a new thread. Linux kernel will automatically expand thread
684
// stack as needed (on page faults).
685
//
686
// However, because the memory region of a MAP_GROWSDOWN stack can grow on
687
// demand, if a page fault happens outside an already mapped MAP_GROWSDOWN
688
// region, it's hard to tell if the fault is due to a legitimate stack
689
// access or because of reading/writing non-exist memory (e.g. buffer
690
// overrun). As a rule, if the fault happens below current stack pointer,
691
// Linux kernel does not expand stack, instead a SIGSEGV is sent to the
692
// application (see Linux kernel fault.c).
693
//
694
// This Linux feature can cause SIGSEGV when VM bangs thread stack for
695
// stack overflow detection.
696
//
697
// Newer version of LinuxThreads (since glibc-2.2, or, RH-7.x) and NPTL do
698
// not use this flag. However, the stack of initial thread is not created
699
// by pthread, it is still MAP_GROWSDOWN. Also it's possible (though
700
// unlikely) that user code can create a thread with MAP_GROWSDOWN stack
701
// and then attach the thread to JVM.
702
//
703
// To get around the problem and allow stack banging on Linux, we need to
704
// manually expand thread stack after receiving the SIGSEGV.
705
//
706
// There are two ways to expand thread stack to address "bottom", we used
707
// both of them in JVM before 1.5:
708
// 1. adjust stack pointer first so that it is below "bottom", and then
709
// touch "bottom"
710
// 2. mmap() the page in question
711
//
712
// Now alternate signal stack is gone, it's harder to use 2. For instance,
713
// if current sp is already near the lower end of page 101, and we need to
714
// call mmap() to map page 100, it is possible that part of the mmap() frame
715
// will be placed in page 100. When page 100 is mapped, it is zero-filled.
716
// That will destroy the mmap() frame and cause VM to crash.
717
//
718
// The following code works by adjusting sp first, then accessing the "bottom"
719
// page to force a page fault. Linux kernel will then automatically expand the
720
// stack mapping.
721
//
722
// _expand_stack_to() assumes its frame size is less than page size, which
723
// should always be true if the function is not inlined.
724
725
#if __GNUC__ < 3 // gcc 2.x does not support noinline attribute
726
#define NOINLINE
727
#else
728
#define NOINLINE __attribute__ ((noinline))
729
#endif
730
731
static void _expand_stack_to(address bottom) NOINLINE;
732
733
static void _expand_stack_to(address bottom) {
734
address sp;
735
size_t size;
736
volatile char *p;
737
738
// Adjust bottom to point to the largest address within the same page, it
739
// gives us a one-page buffer if alloca() allocates slightly more memory.
740
bottom = (address)align_size_down((uintptr_t)bottom, os::Linux::page_size());
741
bottom += os::Linux::page_size() - 1;
742
743
// sp might be slightly above current stack pointer; if that's the case, we
744
// will alloca() a little more space than necessary, which is OK. Don't use
745
// os::current_stack_pointer(), as its result can be slightly below current
746
// stack pointer, causing us to not alloca enough to reach "bottom".
747
sp = (address)&sp;
748
749
if (sp > bottom) {
750
size = sp - bottom;
751
p = (volatile char *)alloca(size);
752
assert(p != NULL && p <= (volatile char *)bottom, "alloca problem?");
753
p[0] = '\0';
754
}
755
}
756
757
void os::Linux::expand_stack_to(address bottom) {
758
_expand_stack_to(bottom);
759
}
760
761
bool os::Linux::manually_expand_stack(JavaThread * t, address addr) {
762
assert(t!=NULL, "just checking");
763
assert(t->osthread()->expanding_stack(), "expand should be set");
764
assert(t->stack_base() != NULL, "stack_base was not initialized");
765
766
if (addr < t->stack_base() && addr >= t->stack_yellow_zone_base()) {
767
sigset_t mask_all, old_sigset;
768
sigfillset(&mask_all);
769
pthread_sigmask(SIG_SETMASK, &mask_all, &old_sigset);
770
_expand_stack_to(addr);
771
pthread_sigmask(SIG_SETMASK, &old_sigset, NULL);
772
return true;
773
}
774
return false;
775
}
776
777
//////////////////////////////////////////////////////////////////////////////
778
// create new thread
779
780
static address highest_vm_reserved_address();
781
782
// check if it's safe to start a new thread
783
static bool _thread_safety_check(Thread* thread) {
784
if (os::Linux::is_LinuxThreads() && !os::Linux::is_floating_stack()) {
785
// Fixed stack LinuxThreads (SuSE Linux/x86, and some versions of Redhat)
786
// Heap is mmap'ed at lower end of memory space. Thread stacks are
787
// allocated (MAP_FIXED) from high address space. Every thread stack
788
// occupies a fixed size slot (usually 2Mbytes, but user can change
789
// it to other values if they rebuild LinuxThreads).
790
//
791
// Problem with MAP_FIXED is that mmap() can still succeed even part of
792
// the memory region has already been mmap'ed. That means if we have too
793
// many threads and/or very large heap, eventually thread stack will
794
// collide with heap.
795
//
796
// Here we try to prevent heap/stack collision by comparing current
797
// stack bottom with the highest address that has been mmap'ed by JVM
798
// plus a safety margin for memory maps created by native code.
799
//
800
// This feature can be disabled by setting ThreadSafetyMargin to 0
801
//
802
if (ThreadSafetyMargin > 0) {
803
address stack_bottom = os::current_stack_base() - os::current_stack_size();
804
805
// not safe if our stack extends below the safety margin
806
return stack_bottom - ThreadSafetyMargin >= highest_vm_reserved_address();
807
} else {
808
return true;
809
}
810
} else {
811
// Floating stack LinuxThreads or NPTL:
812
// Unlike fixed stack LinuxThreads, thread stacks are not MAP_FIXED. When
813
// there's not enough space left, pthread_create() will fail. If we come
814
// here, that means enough space has been reserved for stack.
815
return true;
816
}
817
}
818
819
// Thread start routine for all newly created threads
820
static void *java_start(Thread *thread) {
821
// Try to randomize the cache line index of hot stack frames.
822
// This helps when threads of the same stack traces evict each other's
823
// cache lines. The threads can be either from the same JVM instance, or
824
// from different JVM instances. The benefit is especially true for
825
// processors with hyperthreading technology.
826
static int counter = 0;
827
int pid = os::current_process_id();
828
alloca(((pid ^ counter++) & 7) * 128);
829
830
ThreadLocalStorage::set_thread(thread);
831
832
OSThread* osthread = thread->osthread();
833
Monitor* sync = osthread->startThread_lock();
834
835
// non floating stack LinuxThreads needs extra check, see above
836
if (!_thread_safety_check(thread)) {
837
// notify parent thread
838
MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
839
osthread->set_state(ZOMBIE);
840
sync->notify_all();
841
return NULL;
842
}
843
844
// thread_id is kernel thread id (similar to Solaris LWP id)
845
osthread->set_thread_id(os::Linux::gettid());
846
847
if (UseNUMA) {
848
int lgrp_id = os::numa_get_group_id();
849
if (lgrp_id != -1) {
850
thread->set_lgrp_id(lgrp_id);
851
}
852
}
853
// initialize signal mask for this thread
854
os::Linux::hotspot_sigmask(thread);
855
856
// initialize floating point control register
857
os::Linux::init_thread_fpu_state();
858
859
// handshaking with parent thread
860
{
861
MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
862
863
// notify parent thread
864
osthread->set_state(INITIALIZED);
865
sync->notify_all();
866
867
// wait until os::start_thread()
868
while (osthread->get_state() == INITIALIZED) {
869
sync->wait(Mutex::_no_safepoint_check_flag);
870
}
871
}
872
873
// call one more level start routine
874
thread->run();
875
876
return 0;
877
}
878
879
bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
880
assert(thread->osthread() == NULL, "caller responsible");
881
882
// Allocate the OSThread object
883
OSThread* osthread = new OSThread(NULL, NULL);
884
if (osthread == NULL) {
885
return false;
886
}
887
888
// set the correct thread state
889
osthread->set_thread_type(thr_type);
890
891
// Initial state is ALLOCATED but not INITIALIZED
892
osthread->set_state(ALLOCATED);
893
894
thread->set_osthread(osthread);
895
896
// init thread attributes
897
pthread_attr_t attr;
898
pthread_attr_init(&attr);
899
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
900
901
// stack size
902
if (os::Linux::supports_variable_stack_size()) {
903
// calculate stack size if it's not specified by caller
904
if (stack_size == 0) {
905
stack_size = os::Linux::default_stack_size(thr_type);
906
907
switch (thr_type) {
908
case os::java_thread:
909
// Java threads use ThreadStackSize which default value can be
910
// changed with the flag -Xss
911
assert (JavaThread::stack_size_at_create() > 0, "this should be set");
912
stack_size = JavaThread::stack_size_at_create();
913
break;
914
case os::compiler_thread:
915
if (CompilerThreadStackSize > 0) {
916
stack_size = (size_t)(CompilerThreadStackSize * K);
917
break;
918
} // else fall through:
919
// use VMThreadStackSize if CompilerThreadStackSize is not defined
920
case os::vm_thread:
921
case os::pgc_thread:
922
case os::cgc_thread:
923
case os::watcher_thread:
924
if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
925
break;
926
}
927
}
928
929
stack_size = MAX2(stack_size, os::Linux::min_stack_allowed);
930
pthread_attr_setstacksize(&attr, stack_size);
931
} else {
932
// let pthread_create() pick the default value.
933
}
934
935
// glibc guard page
936
pthread_attr_setguardsize(&attr, os::Linux::default_guard_size(thr_type));
937
938
ThreadState state;
939
940
{
941
// Serialize thread creation if we are running with fixed stack LinuxThreads
942
bool lock = os::Linux::is_LinuxThreads() && !os::Linux::is_floating_stack();
943
if (lock) {
944
os::Linux::createThread_lock()->lock_without_safepoint_check();
945
}
946
947
pthread_t tid;
948
int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
949
950
pthread_attr_destroy(&attr);
951
952
if (ret != 0) {
953
if (PrintMiscellaneous && (Verbose || WizardMode)) {
954
perror("pthread_create()");
955
}
956
// Need to clean up stuff we've allocated so far
957
thread->set_osthread(NULL);
958
delete osthread;
959
if (lock) os::Linux::createThread_lock()->unlock();
960
return false;
961
}
962
963
// Store pthread info into the OSThread
964
osthread->set_pthread_id(tid);
965
966
// Wait until child thread is either initialized or aborted
967
{
968
Monitor* sync_with_child = osthread->startThread_lock();
969
MutexLockerEx ml(sync_with_child, Mutex::_no_safepoint_check_flag);
970
while ((state = osthread->get_state()) == ALLOCATED) {
971
sync_with_child->wait(Mutex::_no_safepoint_check_flag);
972
}
973
}
974
975
if (lock) {
976
os::Linux::createThread_lock()->unlock();
977
}
978
}
979
980
// Aborted due to thread limit being reached
981
if (state == ZOMBIE) {
982
thread->set_osthread(NULL);
983
delete osthread;
984
return false;
985
}
986
987
// The thread is returned suspended (in state INITIALIZED),
988
// and is started higher up in the call chain
989
assert(state == INITIALIZED, "race condition");
990
return true;
991
}
992
993
/////////////////////////////////////////////////////////////////////////////
994
// attach existing thread
995
996
// bootstrap the main thread
997
bool os::create_main_thread(JavaThread* thread) {
998
assert(os::Linux::_main_thread == pthread_self(), "should be called inside main thread");
999
return create_attached_thread(thread);
1000
}
1001
1002
bool os::create_attached_thread(JavaThread* thread) {
1003
#ifdef ASSERT
1004
thread->verify_not_published();
1005
#endif
1006
1007
// Allocate the OSThread object
1008
OSThread* osthread = new OSThread(NULL, NULL);
1009
1010
if (osthread == NULL) {
1011
return false;
1012
}
1013
1014
// Store pthread info into the OSThread
1015
osthread->set_thread_id(os::Linux::gettid());
1016
osthread->set_pthread_id(::pthread_self());
1017
1018
// initialize floating point control register
1019
os::Linux::init_thread_fpu_state();
1020
1021
// Initial thread state is RUNNABLE
1022
osthread->set_state(RUNNABLE);
1023
1024
thread->set_osthread(osthread);
1025
1026
if (UseNUMA) {
1027
int lgrp_id = os::numa_get_group_id();
1028
if (lgrp_id != -1) {
1029
thread->set_lgrp_id(lgrp_id);
1030
}
1031
}
1032
1033
if (os::is_primordial_thread()) {
1034
// If current thread is primordial thread, its stack is mapped on demand,
1035
// see notes about MAP_GROWSDOWN. Here we try to force kernel to map
1036
// the entire stack region to avoid SEGV in stack banging.
1037
// It is also useful to get around the heap-stack-gap problem on SuSE
1038
// kernel (see 4821821 for details). We first expand stack to the top
1039
// of yellow zone, then enable stack yellow zone (order is significant,
1040
// enabling yellow zone first will crash JVM on SuSE Linux), so there
1041
// is no gap between the last two virtual memory regions.
1042
1043
JavaThread *jt = (JavaThread *)thread;
1044
address addr = jt->stack_yellow_zone_base();
1045
assert(addr != NULL, "initialization problem?");
1046
assert(jt->stack_available(addr) > 0, "stack guard should not be enabled");
1047
1048
osthread->set_expanding_stack();
1049
os::Linux::manually_expand_stack(jt, addr);
1050
osthread->clear_expanding_stack();
1051
}
1052
1053
// initialize signal mask for this thread
1054
// and save the caller's signal mask
1055
os::Linux::hotspot_sigmask(thread);
1056
1057
return true;
1058
}
1059
1060
void os::pd_start_thread(Thread* thread) {
1061
OSThread * osthread = thread->osthread();
1062
assert(osthread->get_state() != INITIALIZED, "just checking");
1063
Monitor* sync_with_child = osthread->startThread_lock();
1064
MutexLockerEx ml(sync_with_child, Mutex::_no_safepoint_check_flag);
1065
sync_with_child->notify();
1066
}
1067
1068
// Free Linux resources related to the OSThread
1069
void os::free_thread(OSThread* osthread) {
1070
assert(osthread != NULL, "osthread not set");
1071
1072
if (Thread::current()->osthread() == osthread) {
1073
// Restore caller's signal mask
1074
sigset_t sigmask = osthread->caller_sigmask();
1075
pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1076
}
1077
1078
delete osthread;
1079
}
1080
1081
//////////////////////////////////////////////////////////////////////////////
1082
// thread local storage
1083
1084
// Restore the thread pointer if the destructor is called. This is in case
1085
// someone from JNI code sets up a destructor with pthread_key_create to run
1086
// detachCurrentThread on thread death. Unless we restore the thread pointer we
1087
// will hang or crash. When detachCurrentThread is called the key will be set
1088
// to null and we will not be called again. If detachCurrentThread is never
1089
// called we could loop forever depending on the pthread implementation.
1090
static void restore_thread_pointer(void* p) {
1091
Thread* thread = (Thread*) p;
1092
os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
1093
}
1094
1095
int os::allocate_thread_local_storage() {
1096
pthread_key_t key;
1097
int rslt = pthread_key_create(&key, restore_thread_pointer);
1098
assert(rslt == 0, "cannot allocate thread local storage");
1099
return (int)key;
1100
}
1101
1102
// Note: This is currently not used by VM, as we don't destroy TLS key
1103
// on VM exit.
1104
void os::free_thread_local_storage(int index) {
1105
int rslt = pthread_key_delete((pthread_key_t)index);
1106
assert(rslt == 0, "invalid index");
1107
}
1108
1109
void os::thread_local_storage_at_put(int index, void* value) {
1110
int rslt = pthread_setspecific((pthread_key_t)index, value);
1111
assert(rslt == 0, "pthread_setspecific failed");
1112
}
1113
1114
extern "C" Thread* get_thread() {
1115
return ThreadLocalStorage::thread();
1116
}
1117
1118
//////////////////////////////////////////////////////////////////////////////
1119
// primordial thread
1120
1121
// Check if current thread is the primordial thread, similar to Solaris thr_main.
1122
bool os::is_primordial_thread(void) {
1123
char dummy;
1124
// If called before init complete, thread stack bottom will be null.
1125
// Can be called if fatal error occurs before initialization.
1126
if (os::Linux::initial_thread_stack_bottom() == NULL) return false;
1127
assert(os::Linux::initial_thread_stack_bottom() != NULL &&
1128
os::Linux::initial_thread_stack_size() != 0,
1129
"os::init did not locate primordial thread's stack region");
1130
if ((address)&dummy >= os::Linux::initial_thread_stack_bottom() &&
1131
(address)&dummy < os::Linux::initial_thread_stack_bottom() +
1132
os::Linux::initial_thread_stack_size()) {
1133
return true;
1134
} else {
1135
return false;
1136
}
1137
}
1138
1139
// Find the virtual memory area that contains addr
1140
static bool find_vma(address addr, address* vma_low, address* vma_high) {
1141
FILE *fp = fopen("/proc/self/maps", "r");
1142
if (fp) {
1143
address low, high;
1144
while (!feof(fp)) {
1145
if (fscanf(fp, "%p-%p", &low, &high) == 2) {
1146
if (low <= addr && addr < high) {
1147
if (vma_low) *vma_low = low;
1148
if (vma_high) *vma_high = high;
1149
fclose (fp);
1150
return true;
1151
}
1152
}
1153
for (;;) {
1154
int ch = fgetc(fp);
1155
if (ch == EOF || ch == (int)'\n') break;
1156
}
1157
}
1158
fclose(fp);
1159
}
1160
return false;
1161
}
1162
1163
// Locate primordial thread stack. This special handling of primordial thread stack
1164
// is needed because pthread_getattr_np() on most (all?) Linux distros returns
1165
// bogus value for the primordial process thread. While the launcher has created
1166
// the VM in a new thread since JDK 6, we still have to allow for the use of the
1167
// JNI invocation API from a primordial thread.
1168
void os::Linux::capture_initial_stack(size_t max_size) {
1169
1170
// max_size is either 0 (which means accept OS default for thread stacks) or
1171
// a user-specified value known to be at least the minimum needed. If we
1172
// are actually on the primordial thread we can make it appear that we have a
1173
// smaller max_size stack by inserting the guard pages at that location. But we
1174
// cannot do anything to emulate a larger stack than what has been provided by
1175
// the OS or threading library. In fact if we try to use a stack greater than
1176
// what is set by rlimit then we will crash the hosting process.
1177
1178
// Maximum stack size is the easy part, get it from RLIMIT_STACK.
1179
// If this is "unlimited" then it will be a huge value.
1180
struct rlimit rlim;
1181
getrlimit(RLIMIT_STACK, &rlim);
1182
size_t stack_size = rlim.rlim_cur;
1183
1184
// 6308388: a bug in ld.so will relocate its own .data section to the
1185
// lower end of primordial stack; reduce ulimit -s value a little bit
1186
// so we won't install guard page on ld.so's data section.
1187
// But ensure we don't underflow the stack size - allow 1 page spare
1188
if (stack_size >= (size_t)(3 * page_size())) {
1189
stack_size -= 2 * page_size();
1190
}
1191
1192
// Try to figure out where the stack base (top) is. This is harder.
1193
//
1194
// When an application is started, glibc saves the initial stack pointer in
1195
// a global variable "__libc_stack_end", which is then used by system
1196
// libraries. __libc_stack_end should be pretty close to stack top. The
1197
// variable is available since the very early days. However, because it is
1198
// a private interface, it could disappear in the future.
1199
//
1200
// Linux kernel saves start_stack information in /proc/<pid>/stat. Similar
1201
// to __libc_stack_end, it is very close to stack top, but isn't the real
1202
// stack top. Note that /proc may not exist if VM is running as a chroot
1203
// program, so reading /proc/<pid>/stat could fail. Also the contents of
1204
// /proc/<pid>/stat could change in the future (though unlikely).
1205
//
1206
// We try __libc_stack_end first. If that doesn't work, look for
1207
// /proc/<pid>/stat. If neither of them works, we use current stack pointer
1208
// as a hint, which should work well in most cases.
1209
1210
uintptr_t stack_start;
1211
1212
// try __libc_stack_end first
1213
uintptr_t *p = (uintptr_t *)dlsym(RTLD_DEFAULT, "__libc_stack_end");
1214
if (p && *p) {
1215
stack_start = *p;
1216
} else {
1217
// see if we can get the start_stack field from /proc/self/stat
1218
FILE *fp;
1219
int pid;
1220
char state;
1221
int ppid;
1222
int pgrp;
1223
int session;
1224
int nr;
1225
int tpgrp;
1226
unsigned long flags;
1227
unsigned long minflt;
1228
unsigned long cminflt;
1229
unsigned long majflt;
1230
unsigned long cmajflt;
1231
unsigned long utime;
1232
unsigned long stime;
1233
long cutime;
1234
long cstime;
1235
long prio;
1236
long nice;
1237
long junk;
1238
long it_real;
1239
uintptr_t start;
1240
uintptr_t vsize;
1241
intptr_t rss;
1242
uintptr_t rsslim;
1243
uintptr_t scodes;
1244
uintptr_t ecode;
1245
int i;
1246
1247
// Figure what the primordial thread stack base is. Code is inspired
1248
// by email from Hans Boehm. /proc/self/stat begins with current pid,
1249
// followed by command name surrounded by parentheses, state, etc.
1250
char stat[2048];
1251
int statlen;
1252
1253
fp = fopen("/proc/self/stat", "r");
1254
if (fp) {
1255
statlen = fread(stat, 1, 2047, fp);
1256
stat[statlen] = '\0';
1257
fclose(fp);
1258
1259
// Skip pid and the command string. Note that we could be dealing with
1260
// weird command names, e.g. user could decide to rename java launcher
1261
// to "java 1.4.2 :)", then the stat file would look like
1262
// 1234 (java 1.4.2 :)) R ... ...
1263
// We don't really need to know the command string, just find the last
1264
// occurrence of ")" and then start parsing from there. See bug 4726580.
1265
char * s = strrchr(stat, ')');
1266
1267
i = 0;
1268
if (s) {
1269
// Skip blank chars
1270
do s++; while (isspace(*s));
1271
1272
#define _UFM UINTX_FORMAT
1273
#define _DFM INTX_FORMAT
1274
1275
/* 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 */
1276
/* 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 */
1277
i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld " _UFM _UFM _DFM _UFM _UFM _UFM _UFM,
1278
&state, /* 3 %c */
1279
&ppid, /* 4 %d */
1280
&pgrp, /* 5 %d */
1281
&session, /* 6 %d */
1282
&nr, /* 7 %d */
1283
&tpgrp, /* 8 %d */
1284
&flags, /* 9 %lu */
1285
&minflt, /* 10 %lu */
1286
&cminflt, /* 11 %lu */
1287
&majflt, /* 12 %lu */
1288
&cmajflt, /* 13 %lu */
1289
&utime, /* 14 %lu */
1290
&stime, /* 15 %lu */
1291
&cutime, /* 16 %ld */
1292
&cstime, /* 17 %ld */
1293
&prio, /* 18 %ld */
1294
&nice, /* 19 %ld */
1295
&junk, /* 20 %ld */
1296
&it_real, /* 21 %ld */
1297
&start, /* 22 UINTX_FORMAT */
1298
&vsize, /* 23 UINTX_FORMAT */
1299
&rss, /* 24 INTX_FORMAT */
1300
&rsslim, /* 25 UINTX_FORMAT */
1301
&scodes, /* 26 UINTX_FORMAT */
1302
&ecode, /* 27 UINTX_FORMAT */
1303
&stack_start); /* 28 UINTX_FORMAT */
1304
}
1305
1306
#undef _UFM
1307
#undef _DFM
1308
1309
if (i != 28 - 2) {
1310
assert(false, "Bad conversion from /proc/self/stat");
1311
// product mode - assume we are the primordial thread, good luck in the
1312
// embedded case.
1313
warning("Can't detect primordial thread stack location - bad conversion");
1314
stack_start = (uintptr_t) &rlim;
1315
}
1316
} else {
1317
// For some reason we can't open /proc/self/stat (for example, running on
1318
// FreeBSD with a Linux emulator, or inside chroot), this should work for
1319
// most cases, so don't abort:
1320
warning("Can't detect primordial thread stack location - no /proc/self/stat");
1321
stack_start = (uintptr_t) &rlim;
1322
}
1323
}
1324
1325
// Now we have a pointer (stack_start) very close to the stack top, the
1326
// next thing to do is to figure out the exact location of stack top. We
1327
// can find out the virtual memory area that contains stack_start by
1328
// reading /proc/self/maps, it should be the last vma in /proc/self/maps,
1329
// and its upper limit is the real stack top. (again, this would fail if
1330
// running inside chroot, because /proc may not exist.)
1331
1332
uintptr_t stack_top;
1333
address low, high;
1334
if (find_vma((address)stack_start, &low, &high)) {
1335
// success, "high" is the true stack top. (ignore "low", because initial
1336
// thread stack grows on demand, its real bottom is high - RLIMIT_STACK.)
1337
stack_top = (uintptr_t)high;
1338
} else {
1339
// failed, likely because /proc/self/maps does not exist
1340
warning("Can't detect primordial thread stack location - find_vma failed");
1341
// best effort: stack_start is normally within a few pages below the real
1342
// stack top, use it as stack top, and reduce stack size so we won't put
1343
// guard page outside stack.
1344
stack_top = stack_start;
1345
stack_size -= 16 * page_size();
1346
}
1347
1348
// stack_top could be partially down the page so align it
1349
stack_top = align_size_up(stack_top, page_size());
1350
1351
// Allowed stack value is minimum of max_size and what we derived from rlimit
1352
if (max_size > 0) {
1353
_initial_thread_stack_size = MIN2(max_size, stack_size);
1354
} else {
1355
// Accept the rlimit max, but if stack is unlimited then it will be huge, so
1356
// clamp it at 8MB as we do on Solaris
1357
_initial_thread_stack_size = MIN2(stack_size, 8*M);
1358
}
1359
1360
_initial_thread_stack_size = align_size_down(_initial_thread_stack_size, page_size());
1361
_initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size;
1362
assert(_initial_thread_stack_bottom < (address)stack_top, "overflow!");
1363
}
1364
1365
////////////////////////////////////////////////////////////////////////////////
1366
// time support
1367
1368
// Time since start-up in seconds to a fine granularity.
1369
// Used by VMSelfDestructTimer and the MemProfiler.
1370
double os::elapsedTime() {
1371
1372
return ((double)os::elapsed_counter()) / os::elapsed_frequency(); // nanosecond resolution
1373
}
1374
1375
jlong os::elapsed_counter() {
1376
return javaTimeNanos() - initial_time_count;
1377
}
1378
1379
jlong os::elapsed_frequency() {
1380
return NANOSECS_PER_SEC; // nanosecond resolution
1381
}
1382
1383
bool os::supports_vtime() { return true; }
1384
bool os::enable_vtime() { return false; }
1385
bool os::vtime_enabled() { return false; }
1386
1387
double os::elapsedVTime() {
1388
struct rusage usage;
1389
int retval = getrusage(RUSAGE_THREAD, &usage);
1390
if (retval == 0) {
1391
return (double) (usage.ru_utime.tv_sec + usage.ru_stime.tv_sec) + (double) (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000 * 1000);
1392
} else {
1393
// better than nothing, but not much
1394
return elapsedTime();
1395
}
1396
}
1397
1398
jlong os::javaTimeMillis() {
1399
timeval time;
1400
int status = gettimeofday(&time, NULL);
1401
assert(status != -1, "linux error");
1402
return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1403
}
1404
1405
#ifndef CLOCK_MONOTONIC
1406
#define CLOCK_MONOTONIC (1)
1407
#endif
1408
1409
void os::Linux::clock_init() {
1410
// we do dlopen's in this particular order due to bug in linux
1411
// dynamical loader (see 6348968) leading to crash on exit
1412
void* handle = dlopen("librt.so.1", RTLD_LAZY);
1413
if (handle == NULL) {
1414
handle = dlopen("librt.so", RTLD_LAZY);
1415
}
1416
#ifdef __ANDROID__
1417
if (handle == NULL) {
1418
// libc has clock_getres and clock_gettime
1419
handle = RTLD_DEFAULT;
1420
}
1421
#endif
1422
1423
if (handle) {
1424
int (*clock_getres_func)(clockid_t, struct timespec*) =
1425
(int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_getres");
1426
int (*clock_gettime_func)(clockid_t, struct timespec*) =
1427
(int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_gettime");
1428
if (clock_getres_func && clock_gettime_func) {
1429
// See if monotonic clock is supported by the kernel. Note that some
1430
// early implementations simply return kernel jiffies (updated every
1431
// 1/100 or 1/1000 second). It would be bad to use such a low res clock
1432
// for nano time (though the monotonic property is still nice to have).
1433
// It's fixed in newer kernels, however clock_getres() still returns
1434
// 1/HZ. We check if clock_getres() works, but will ignore its reported
1435
// resolution for now. Hopefully as people move to new kernels, this
1436
// won't be a problem.
1437
struct timespec res;
1438
struct timespec tp;
1439
if (clock_getres_func (CLOCK_MONOTONIC, &res) == 0 &&
1440
clock_gettime_func(CLOCK_MONOTONIC, &tp) == 0) {
1441
// yes, monotonic clock is supported
1442
_clock_gettime = clock_gettime_func;
1443
return;
1444
} else {
1445
// close librt if there is no monotonic clock
1446
#ifndef __ANDROID__ // we should not close RTLD_DEFAULT :)
1447
dlclose(handle);
1448
#endif
1449
}
1450
}
1451
}
1452
warning("No monotonic clock was available - timed services may " \
1453
"be adversely affected if the time-of-day clock changes");
1454
}
1455
1456
#ifndef SYS_clock_getres
1457
1458
#if defined(IA32) || defined(AMD64) || defined(AARCH64)
1459
#define SYS_clock_getres IA32_ONLY(266) AMD64_ONLY(229) AARCH64_ONLY(114)
1460
#define sys_clock_getres(x,y) ::syscall(SYS_clock_getres, x, y)
1461
#else
1462
#warning "SYS_clock_getres not defined for this platform, disabling fast_thread_cpu_time"
1463
#define sys_clock_getres(x,y) -1
1464
#endif
1465
1466
#else
1467
#define sys_clock_getres(x,y) ::syscall(SYS_clock_getres, x, y)
1468
#endif
1469
1470
void os::Linux::fast_thread_clock_init() {
1471
if (!UseLinuxPosixThreadCPUClocks) {
1472
return;
1473
}
1474
clockid_t clockid;
1475
struct timespec tp;
1476
int (*pthread_getcpuclockid_func)(pthread_t, clockid_t *) =
1477
(int(*)(pthread_t, clockid_t *)) dlsym(RTLD_DEFAULT, "pthread_getcpuclockid");
1478
1479
// Switch to using fast clocks for thread cpu time if
1480
// the sys_clock_getres() returns 0 error code.
1481
// Note, that some kernels may support the current thread
1482
// clock (CLOCK_THREAD_CPUTIME_ID) but not the clocks
1483
// returned by the pthread_getcpuclockid().
1484
// If the fast Posix clocks are supported then the sys_clock_getres()
1485
// must return at least tp.tv_sec == 0 which means a resolution
1486
// better than 1 sec. This is extra check for reliability.
1487
1488
if(pthread_getcpuclockid_func &&
1489
pthread_getcpuclockid_func(_main_thread, &clockid) == 0 &&
1490
sys_clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) {
1491
1492
_supports_fast_thread_cpu_time = true;
1493
_pthread_getcpuclockid = pthread_getcpuclockid_func;
1494
}
1495
}
1496
1497
jlong os::javaTimeNanos() {
1498
if (Linux::supports_monotonic_clock()) {
1499
struct timespec tp;
1500
int status = Linux::clock_gettime(CLOCK_MONOTONIC, &tp);
1501
assert(status == 0, "gettime error");
1502
jlong result = jlong(tp.tv_sec) * (1000 * 1000 * 1000) + jlong(tp.tv_nsec);
1503
return result;
1504
} else {
1505
timeval time;
1506
int status = gettimeofday(&time, NULL);
1507
assert(status != -1, "linux error");
1508
jlong usecs = jlong(time.tv_sec) * (1000 * 1000) + jlong(time.tv_usec);
1509
return 1000 * usecs;
1510
}
1511
}
1512
1513
void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1514
if (Linux::supports_monotonic_clock()) {
1515
info_ptr->max_value = ALL_64_BITS;
1516
1517
// CLOCK_MONOTONIC - amount of time since some arbitrary point in the past
1518
info_ptr->may_skip_backward = false; // not subject to resetting or drifting
1519
info_ptr->may_skip_forward = false; // not subject to resetting or drifting
1520
} else {
1521
// gettimeofday - based on time in seconds since the Epoch thus does not wrap
1522
info_ptr->max_value = ALL_64_BITS;
1523
1524
// gettimeofday is a real time clock so it skips
1525
info_ptr->may_skip_backward = true;
1526
info_ptr->may_skip_forward = true;
1527
}
1528
1529
info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
1530
}
1531
1532
// Return the real, user, and system times in seconds from an
1533
// arbitrary fixed point in the past.
1534
bool os::getTimesSecs(double* process_real_time,
1535
double* process_user_time,
1536
double* process_system_time) {
1537
struct tms ticks;
1538
clock_t real_ticks = times(&ticks);
1539
1540
if (real_ticks == (clock_t) (-1)) {
1541
return false;
1542
} else {
1543
double ticks_per_second = (double) clock_tics_per_sec;
1544
*process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1545
*process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1546
*process_real_time = ((double) real_ticks) / ticks_per_second;
1547
1548
return true;
1549
}
1550
}
1551
1552
1553
char * os::local_time_string(char *buf, size_t buflen) {
1554
struct tm t;
1555
time_t long_time;
1556
time(&long_time);
1557
localtime_r(&long_time, &t);
1558
jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1559
t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1560
t.tm_hour, t.tm_min, t.tm_sec);
1561
return buf;
1562
}
1563
1564
struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1565
return localtime_r(clock, res);
1566
}
1567
1568
////////////////////////////////////////////////////////////////////////////////
1569
// runtime exit support
1570
1571
// Note: os::shutdown() might be called very early during initialization, or
1572
// called from signal handler. Before adding something to os::shutdown(), make
1573
// sure it is async-safe and can handle partially initialized VM.
1574
void os::shutdown() {
1575
1576
// allow PerfMemory to attempt cleanup of any persistent resources
1577
perfMemory_exit();
1578
1579
// needs to remove object in file system
1580
AttachListener::abort();
1581
1582
// flush buffered output, finish log files
1583
ostream_abort();
1584
1585
// Check for abort hook
1586
abort_hook_t abort_hook = Arguments::abort_hook();
1587
if (abort_hook != NULL) {
1588
abort_hook();
1589
}
1590
1591
}
1592
1593
// Note: os::abort() might be called very early during initialization, or
1594
// called from signal handler. Before adding something to os::abort(), make
1595
// sure it is async-safe and can handle partially initialized VM.
1596
void os::abort(bool dump_core) {
1597
os::shutdown();
1598
if (dump_core) {
1599
#ifndef PRODUCT
1600
fdStream out(defaultStream::output_fd());
1601
out.print_raw("Current thread is ");
1602
char buf[16];
1603
jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1604
out.print_raw_cr(buf);
1605
out.print_raw_cr("Dumping core ...");
1606
#endif
1607
::abort(); // dump core
1608
}
1609
1610
::exit(1);
1611
}
1612
1613
// Die immediately, no exit hook, no abort hook, no cleanup.
1614
void os::die() {
1615
// _exit() on LinuxThreads only kills current thread
1616
::abort();
1617
}
1618
1619
1620
// This method is a copy of JDK's sysGetLastErrorString
1621
// from src/solaris/hpi/src/system_md.c
1622
1623
size_t os::lasterror(char *buf, size_t len) {
1624
1625
if (errno == 0) return 0;
1626
1627
const char *s = ::strerror(errno);
1628
size_t n = ::strlen(s);
1629
if (n >= len) {
1630
n = len - 1;
1631
}
1632
::strncpy(buf, s, n);
1633
buf[n] = '\0';
1634
return n;
1635
}
1636
1637
intx os::current_thread_id() { return (intx)pthread_self(); }
1638
int os::current_process_id() {
1639
1640
// Under the old linux thread library, linux gives each thread
1641
// its own process id. Because of this each thread will return
1642
// a different pid if this method were to return the result
1643
// of getpid(2). Linux provides no api that returns the pid
1644
// of the launcher thread for the vm. This implementation
1645
// returns a unique pid, the pid of the launcher thread
1646
// that starts the vm 'process'.
1647
1648
// Under the NPTL, getpid() returns the same pid as the
1649
// launcher thread rather than a unique pid per thread.
1650
// Use gettid() if you want the old pre NPTL behaviour.
1651
1652
// if you are looking for the result of a call to getpid() that
1653
// returns a unique pid for the calling thread, then look at the
1654
// OSThread::thread_id() method in osThread_linux.hpp file
1655
1656
return (int)(_initial_pid ? _initial_pid : getpid());
1657
}
1658
1659
// DLL functions
1660
1661
const char* os::dll_file_extension() { return ".so"; }
1662
1663
// This must be hard coded because it's the system's temporary
1664
// directory not the java application's temp directory, ala java.io.tmpdir.
1665
const char* os::get_temp_directory() { return "/tmp"; }
1666
1667
static bool file_exists(const char* filename) {
1668
struct stat statbuf;
1669
if (filename == NULL || strlen(filename) == 0) {
1670
return false;
1671
}
1672
return os::stat(filename, &statbuf) == 0;
1673
}
1674
1675
bool os::dll_build_name(char* buffer, size_t buflen,
1676
const char* pname, const char* fname) {
1677
bool retval = false;
1678
// Copied from libhpi
1679
const size_t pnamelen = pname ? strlen(pname) : 0;
1680
1681
// Return error on buffer overflow.
1682
if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1683
return retval;
1684
}
1685
1686
if (pnamelen == 0) {
1687
snprintf(buffer, buflen, "lib%s.so", fname);
1688
retval = true;
1689
} else if (strchr(pname, *os::path_separator()) != NULL) {
1690
int n;
1691
char** pelements = split_path(pname, &n);
1692
if (pelements == NULL) {
1693
return false;
1694
}
1695
for (int i = 0 ; i < n ; i++) {
1696
// Really shouldn't be NULL, but check can't hurt
1697
if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1698
continue; // skip the empty path values
1699
}
1700
snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1701
if (file_exists(buffer)) {
1702
retval = true;
1703
break;
1704
}
1705
}
1706
// release the storage
1707
for (int i = 0 ; i < n ; i++) {
1708
if (pelements[i] != NULL) {
1709
FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1710
}
1711
}
1712
if (pelements != NULL) {
1713
FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1714
}
1715
} else {
1716
snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1717
retval = true;
1718
}
1719
return retval;
1720
}
1721
1722
// check if addr is inside libjvm.so
1723
bool os::address_is_in_vm(address addr) {
1724
static address libjvm_base_addr;
1725
Dl_info dlinfo;
1726
1727
if (libjvm_base_addr == NULL) {
1728
if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1729
libjvm_base_addr = (address)dlinfo.dli_fbase;
1730
}
1731
assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1732
}
1733
1734
if (dladdr((void *)addr, &dlinfo) != 0) {
1735
if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1736
}
1737
1738
return false;
1739
}
1740
1741
bool os::dll_address_to_function_name(address addr, char *buf,
1742
int buflen, int *offset) {
1743
// buf is not optional, but offset is optional
1744
assert(buf != NULL, "sanity check");
1745
1746
Dl_info dlinfo;
1747
1748
if (dladdr((void*)addr, &dlinfo) != 0) {
1749
// see if we have a matching symbol
1750
if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1751
if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1752
jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1753
}
1754
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1755
return true;
1756
}
1757
// no matching symbol so try for just file info
1758
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1759
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1760
buf, buflen, offset, dlinfo.dli_fname)) {
1761
return true;
1762
}
1763
}
1764
}
1765
1766
buf[0] = '\0';
1767
if (offset != NULL) *offset = -1;
1768
return false;
1769
}
1770
1771
struct _address_to_library_name {
1772
address addr; // input : memory address
1773
size_t buflen; // size of fname
1774
char* fname; // output: library name
1775
address base; // library base addr
1776
};
1777
1778
static int address_to_library_name_callback(struct dl_phdr_info *info,
1779
size_t size, void *data) {
1780
int i;
1781
bool found = false;
1782
address libbase = NULL;
1783
struct _address_to_library_name * d = (struct _address_to_library_name *)data;
1784
1785
// iterate through all loadable segments
1786
for (i = 0; i < info->dlpi_phnum; i++) {
1787
address segbase = (address)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
1788
if (info->dlpi_phdr[i].p_type == PT_LOAD) {
1789
// base address of a library is the lowest address of its loaded
1790
// segments.
1791
if (libbase == NULL || libbase > segbase) {
1792
libbase = segbase;
1793
}
1794
// see if 'addr' is within current segment
1795
if (segbase <= d->addr &&
1796
d->addr < segbase + info->dlpi_phdr[i].p_memsz) {
1797
found = true;
1798
}
1799
}
1800
}
1801
1802
// dlpi_name is NULL or empty if the ELF file is executable, return 0
1803
// so dll_address_to_library_name() can fall through to use dladdr() which
1804
// can figure out executable name from argv[0].
1805
if (found && info->dlpi_name && info->dlpi_name[0]) {
1806
d->base = libbase;
1807
if (d->fname) {
1808
jio_snprintf(d->fname, d->buflen, "%s", info->dlpi_name);
1809
}
1810
return 1;
1811
}
1812
return 0;
1813
}
1814
bool os::dll_address_to_library_name(address addr, char* buf,
1815
int buflen, int* offset) {
1816
// buf is not optional, but offset is optional
1817
assert(buf != NULL, "sanity check");
1818
1819
Dl_info dlinfo;
1820
1821
// Android bionic libc does not have the bug below (?)
1822
#ifndef __ANDROID__
1823
struct _address_to_library_name data;
1824
1825
// There is a bug in old glibc dladdr() implementation that it could resolve
1826
// to wrong library name if the .so file has a base address != NULL. Here
1827
// we iterate through the program headers of all loaded libraries to find
1828
// out which library 'addr' really belongs to. This workaround can be
1829
// removed once the minimum requirement for glibc is moved to 2.3.x.
1830
data.addr = addr;
1831
data.fname = buf;
1832
data.buflen = buflen;
1833
data.base = NULL;
1834
int rslt = dl_iterate_phdr(address_to_library_name_callback, (void *)&data);
1835
1836
if (rslt) {
1837
// buf already contains library name
1838
if (offset) *offset = addr - data.base;
1839
return true;
1840
}
1841
#endif // !__ANDROID__
1842
if (dladdr((void*)addr, &dlinfo) != 0) {
1843
if (dlinfo.dli_fname != NULL) {
1844
jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1845
}
1846
if (dlinfo.dli_fbase != NULL && offset != NULL) {
1847
*offset = addr - (address)dlinfo.dli_fbase;
1848
}
1849
return true;
1850
}
1851
1852
buf[0] = '\0';
1853
if (offset) *offset = -1;
1854
return false;
1855
}
1856
1857
static bool read_so_path_from_maps(const char* so_name, char* buf, int buflen) {
1858
FILE *fp = fopen("/proc/self/maps", "r");
1859
assert(fp, "Failed to open /proc/self/maps");
1860
if (!fp) {
1861
return false;
1862
}
1863
1864
char maps_buffer[2048];
1865
while (fgets(maps_buffer, 2048, fp) != NULL) {
1866
if (strstr(maps_buffer, so_name) == NULL) {
1867
continue;
1868
}
1869
1870
char *so_path = strchr(maps_buffer, '/');
1871
so_path[strlen(so_path) - 1] = '\0'; // Cut trailing \n
1872
jio_snprintf(buf, buflen, "%s", so_path);
1873
fclose(fp);
1874
return true;
1875
}
1876
1877
fclose(fp);
1878
return false;
1879
}
1880
1881
// Loads .dll/.so and
1882
// in case of error it checks if .dll/.so was built for the
1883
// same architecture as Hotspot is running on
1884
1885
1886
// Remember the stack's state. The Linux dynamic linker will change
1887
// the stack to 'executable' at most once, so we must safepoint only once.
1888
bool os::Linux::_stack_is_executable = false;
1889
1890
// VM operation that loads a library. This is necessary if stack protection
1891
// of the Java stacks can be lost during loading the library. If we
1892
// do not stop the Java threads, they can stack overflow before the stacks
1893
// are protected again.
1894
class VM_LinuxDllLoad: public VM_Operation {
1895
private:
1896
const char *_filename;
1897
char *_ebuf;
1898
int _ebuflen;
1899
void *_lib;
1900
public:
1901
VM_LinuxDllLoad(const char *fn, char *ebuf, int ebuflen) :
1902
_filename(fn), _ebuf(ebuf), _ebuflen(ebuflen), _lib(NULL) {}
1903
VMOp_Type type() const { return VMOp_LinuxDllLoad; }
1904
void doit() {
1905
_lib = os::Linux::dll_load_in_vmthread(_filename, _ebuf, _ebuflen);
1906
os::Linux::_stack_is_executable = true;
1907
}
1908
void* loaded_library() { return _lib; }
1909
};
1910
1911
void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
1912
{
1913
void * result = NULL;
1914
bool load_attempted = false;
1915
1916
// Check whether the library to load might change execution rights
1917
// of the stack. If they are changed, the protection of the stack
1918
// guard pages will be lost. We need a safepoint to fix this.
1919
//
1920
// See Linux man page execstack(8) for more info.
1921
if (os::uses_stack_guard_pages() && !os::Linux::_stack_is_executable) {
1922
ElfFile ef(filename);
1923
if (!ef.specifies_noexecstack()) {
1924
if (!is_init_completed()) {
1925
os::Linux::_stack_is_executable = true;
1926
// This is OK - No Java threads have been created yet, and hence no
1927
// stack guard pages to fix.
1928
//
1929
// This should happen only when you are building JDK7 using a very
1930
// old version of JDK6 (e.g., with JPRT) and running test_gamma.
1931
//
1932
// Dynamic loader will make all stacks executable after
1933
// this function returns, and will not do that again.
1934
assert(Threads::first() == NULL, "no Java threads should exist yet.");
1935
} else {
1936
warning("You have loaded library %s which might have disabled stack guard. "
1937
"The VM will try to fix the stack guard now.\n"
1938
"It's highly recommended that you fix the library with "
1939
"'execstack -c <libfile>', or link it with '-z noexecstack'.",
1940
filename);
1941
1942
assert(Thread::current()->is_Java_thread(), "must be Java thread");
1943
JavaThread *jt = JavaThread::current();
1944
if (jt->thread_state() != _thread_in_native) {
1945
// This happens when a compiler thread tries to load a hsdis-<arch>.so file
1946
// that requires ExecStack. Cannot enter safe point. Let's give up.
1947
warning("Unable to fix stack guard. Giving up.");
1948
} else {
1949
if (!LoadExecStackDllInVMThread) {
1950
// This is for the case where the DLL has an static
1951
// constructor function that executes JNI code. We cannot
1952
// load such DLLs in the VMThread.
1953
result = os::Linux::dlopen_helper(filename, ebuf, ebuflen);
1954
}
1955
1956
ThreadInVMfromNative tiv(jt);
1957
debug_only(VMNativeEntryWrapper vew;)
1958
1959
VM_LinuxDllLoad op(filename, ebuf, ebuflen);
1960
VMThread::execute(&op);
1961
if (LoadExecStackDllInVMThread) {
1962
result = op.loaded_library();
1963
}
1964
load_attempted = true;
1965
}
1966
}
1967
}
1968
}
1969
1970
if (!load_attempted) {
1971
result = os::Linux::dlopen_helper(filename, ebuf, ebuflen);
1972
}
1973
1974
if (result != NULL) {
1975
// Successful loading
1976
return result;
1977
}
1978
1979
Elf32_Ehdr elf_head;
1980
int diag_msg_max_length=ebuflen-strlen(ebuf);
1981
char* diag_msg_buf=ebuf+strlen(ebuf);
1982
1983
if (diag_msg_max_length==0) {
1984
// No more space in ebuf for additional diagnostics message
1985
return NULL;
1986
}
1987
1988
1989
int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1990
1991
if (file_descriptor < 0) {
1992
// Can't open library, report dlerror() message
1993
return NULL;
1994
}
1995
1996
bool failed_to_read_elf_head=
1997
(sizeof(elf_head)!=
1998
(::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
1999
2000
::close(file_descriptor);
2001
if (failed_to_read_elf_head) {
2002
// file i/o error - report dlerror() msg
2003
return NULL;
2004
}
2005
2006
typedef struct {
2007
Elf32_Half code; // Actual value as defined in elf.h
2008
Elf32_Half compat_class; // Compatibility of archs at VM's sense
2009
char elf_class; // 32 or 64 bit
2010
char endianess; // MSB or LSB
2011
char* name; // String representation
2012
} arch_t;
2013
2014
#ifndef EM_486
2015
#define EM_486 6 /* Intel 80486 */
2016
#endif
2017
#ifndef EM_AARCH64
2018
#define EM_AARCH64 183 /* ARM AARCH64 */
2019
#endif
2020
2021
static const arch_t arch_array[]={
2022
{EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
2023
{EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
2024
{EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
2025
{EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
2026
{EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
2027
{EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
2028
{EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
2029
{EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
2030
#if defined(VM_LITTLE_ENDIAN)
2031
{EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64 LE"},
2032
#else
2033
{EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
2034
#endif
2035
{EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM"},
2036
{EM_S390, EM_S390, ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"},
2037
{EM_ALPHA, EM_ALPHA, ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"},
2038
{EM_MIPS_RS3_LE, EM_MIPS_RS3_LE, ELFCLASS32, ELFDATA2LSB, (char*)"MIPSel"},
2039
{EM_MIPS, EM_MIPS, ELFCLASS32, ELFDATA2MSB, (char*)"MIPS"},
2040
{EM_PARISC, EM_PARISC, ELFCLASS32, ELFDATA2MSB, (char*)"PARISC"},
2041
{EM_68K, EM_68K, ELFCLASS32, ELFDATA2MSB, (char*)"M68k"},
2042
{EM_AARCH64, EM_AARCH64, ELFCLASS64, ELFDATA2LSB, (char*)"AARCH64"},
2043
};
2044
2045
#if (defined IA32)
2046
static Elf32_Half running_arch_code=EM_386;
2047
#elif (defined AMD64)
2048
static Elf32_Half running_arch_code=EM_X86_64;
2049
#elif (defined IA64)
2050
static Elf32_Half running_arch_code=EM_IA_64;
2051
#elif (defined __sparc) && (defined _LP64)
2052
static Elf32_Half running_arch_code=EM_SPARCV9;
2053
#elif (defined __sparc) && (!defined _LP64)
2054
static Elf32_Half running_arch_code=EM_SPARC;
2055
#elif (defined __powerpc64__)
2056
static Elf32_Half running_arch_code=EM_PPC64;
2057
#elif (defined __powerpc__)
2058
static Elf32_Half running_arch_code=EM_PPC;
2059
#elif (defined ARM)
2060
static Elf32_Half running_arch_code=EM_ARM;
2061
#elif (defined S390)
2062
static Elf32_Half running_arch_code=EM_S390;
2063
#elif (defined ALPHA)
2064
static Elf32_Half running_arch_code=EM_ALPHA;
2065
#elif (defined MIPSEL)
2066
static Elf32_Half running_arch_code=EM_MIPS_RS3_LE;
2067
#elif (defined PARISC)
2068
static Elf32_Half running_arch_code=EM_PARISC;
2069
#elif (defined MIPS)
2070
static Elf32_Half running_arch_code=EM_MIPS;
2071
#elif (defined M68K)
2072
static Elf32_Half running_arch_code=EM_68K;
2073
#elif (defined AARCH64)
2074
static Elf32_Half running_arch_code=EM_AARCH64;
2075
#else
2076
#error Method os::dll_load requires that one of following is defined:\
2077
IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K, AARCH64
2078
#endif
2079
2080
// Identify compatability class for VM's architecture and library's architecture
2081
// Obtain string descriptions for architectures
2082
2083
arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
2084
int running_arch_index=-1;
2085
2086
for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
2087
if (running_arch_code == arch_array[i].code) {
2088
running_arch_index = i;
2089
}
2090
if (lib_arch.code == arch_array[i].code) {
2091
lib_arch.compat_class = arch_array[i].compat_class;
2092
lib_arch.name = arch_array[i].name;
2093
}
2094
}
2095
2096
assert(running_arch_index != -1,
2097
"Didn't find running architecture code (running_arch_code) in arch_array");
2098
if (running_arch_index == -1) {
2099
// Even though running architecture detection failed
2100
// we may still continue with reporting dlerror() message
2101
return NULL;
2102
}
2103
2104
if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
2105
::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
2106
return NULL;
2107
}
2108
2109
#ifndef S390
2110
if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
2111
::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
2112
return NULL;
2113
}
2114
#endif // !S390
2115
2116
if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
2117
if ( lib_arch.name!=NULL ) {
2118
::snprintf(diag_msg_buf, diag_msg_max_length-1,
2119
" (Possible cause: can't load %s-bit .so on a %s-bit platform)",
2120
lib_arch.name, arch_array[running_arch_index].name);
2121
} else {
2122
::snprintf(diag_msg_buf, diag_msg_max_length-1,
2123
" (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
2124
lib_arch.code,
2125
arch_array[running_arch_index].name);
2126
}
2127
}
2128
2129
return NULL;
2130
}
2131
2132
void * os::Linux::dlopen_helper(const char *filename, char *ebuf, int ebuflen) {
2133
void * result = ::dlopen(filename, RTLD_LAZY);
2134
if (result == NULL) {
2135
::strncpy(ebuf, ::dlerror(), ebuflen - 1);
2136
ebuf[ebuflen-1] = '\0';
2137
}
2138
return result;
2139
}
2140
2141
void * os::Linux::dll_load_in_vmthread(const char *filename, char *ebuf, int ebuflen) {
2142
void * result = NULL;
2143
if (LoadExecStackDllInVMThread) {
2144
result = dlopen_helper(filename, ebuf, ebuflen);
2145
}
2146
2147
// Since 7019808, libjvm.so is linked with -noexecstack. If the VM loads a
2148
// library that requires an executable stack, or which does not have this
2149
// stack attribute set, dlopen changes the stack attribute to executable. The
2150
// read protection of the guard pages gets lost.
2151
//
2152
// Need to check _stack_is_executable again as multiple VM_LinuxDllLoad
2153
// may have been queued at the same time.
2154
2155
if (!_stack_is_executable) {
2156
JavaThread *jt = Threads::first();
2157
2158
while (jt) {
2159
if (!jt->stack_guard_zone_unused() && // Stack not yet fully initialized
2160
jt->stack_yellow_zone_enabled()) { // No pending stack overflow exceptions
2161
if (!os::guard_memory((char *) jt->stack_red_zone_base() - jt->stack_red_zone_size(),
2162
jt->stack_yellow_zone_size() + jt->stack_red_zone_size())) {
2163
warning("Attempt to reguard stack yellow zone failed.");
2164
}
2165
}
2166
jt = jt->next();
2167
}
2168
}
2169
2170
return result;
2171
}
2172
2173
/*
2174
* glibc-2.0 libdl is not MT safe. If you are building with any glibc,
2175
* chances are you might want to run the generated bits against glibc-2.0
2176
* libdl.so, so always use locking for any version of glibc.
2177
*/
2178
void* os::dll_lookup(void* handle, const char* name) {
2179
pthread_mutex_lock(&dl_mutex);
2180
void* res = dlsym(handle, name);
2181
pthread_mutex_unlock(&dl_mutex);
2182
return res;
2183
}
2184
2185
void* os::get_default_process_handle() {
2186
return (void*)::dlopen(NULL, RTLD_LAZY);
2187
}
2188
2189
static bool _print_ascii_file(const char* filename, outputStream* st) {
2190
int fd = ::open(filename, O_RDONLY);
2191
if (fd == -1) {
2192
return false;
2193
}
2194
2195
char buf[32];
2196
int bytes;
2197
while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
2198
st->print_raw(buf, bytes);
2199
}
2200
2201
::close(fd);
2202
2203
return true;
2204
}
2205
2206
void os::print_dll_info(outputStream *st) {
2207
st->print_cr("Dynamic libraries:");
2208
2209
char fname[32];
2210
pid_t pid = os::Linux::gettid();
2211
2212
jio_snprintf(fname, sizeof(fname), "/proc/%d/maps", pid);
2213
2214
if (!_print_ascii_file(fname, st)) {
2215
st->print("Can not get library information for pid = %d\n", pid);
2216
}
2217
}
2218
2219
int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
2220
FILE *procmapsFile = NULL;
2221
2222
// Open the procfs maps file for the current process
2223
if ((procmapsFile = fopen("/proc/self/maps", "r")) != NULL) {
2224
// Allocate PATH_MAX for file name plus a reasonable size for other fields.
2225
char line[PATH_MAX + 100];
2226
2227
// Read line by line from 'file'
2228
while (fgets(line, sizeof(line), procmapsFile) != NULL) {
2229
u8 base, top, offset, inode;
2230
char permissions[5];
2231
char device[6];
2232
char name[PATH_MAX + 1];
2233
2234
// Parse fields from line
2235
sscanf(line, UINT64_FORMAT_X "-" UINT64_FORMAT_X " %4s " UINT64_FORMAT_X " %7s " INT64_FORMAT " %s",
2236
&base, &top, permissions, &offset, device, &inode, name);
2237
2238
// Filter by device id '00:00' so that we only get file system mapped files.
2239
if (strcmp(device, "00:00") != 0) {
2240
2241
// Call callback with the fields of interest
2242
if(callback(name, (address)base, (address)top, param)) {
2243
// Oops abort, callback aborted
2244
fclose(procmapsFile);
2245
return 1;
2246
}
2247
}
2248
}
2249
fclose(procmapsFile);
2250
}
2251
return 0;
2252
}
2253
2254
void os::print_os_info_brief(outputStream* st) {
2255
os::Linux::print_distro_info(st);
2256
2257
os::Posix::print_uname_info(st);
2258
2259
os::Linux::print_libversion_info(st);
2260
2261
}
2262
2263
void os::print_os_info(outputStream* st) {
2264
st->print("OS:");
2265
2266
os::Linux::print_distro_info(st);
2267
2268
os::Posix::print_uname_info(st);
2269
2270
// Print warning if unsafe chroot environment detected
2271
if (unsafe_chroot_detected) {
2272
st->print("WARNING!! ");
2273
st->print_cr("%s", unstable_chroot_error);
2274
}
2275
2276
os::Linux::print_libversion_info(st);
2277
2278
os::Posix::print_rlimit_info(st);
2279
2280
os::Posix::print_load_average(st);
2281
2282
os::Linux::print_full_memory_info(st);
2283
2284
os::Linux::print_container_info(st);
2285
}
2286
2287
// Try to identify popular distros.
2288
// Most Linux distributions have a /etc/XXX-release file, which contains
2289
// the OS version string. Newer Linux distributions have a /etc/lsb-release
2290
// file that also contains the OS version string. Some have more than one
2291
// /etc/XXX-release file (e.g. Mandrake has both /etc/mandrake-release and
2292
// /etc/redhat-release.), so the order is important.
2293
// Any Linux that is based on Redhat (i.e. Oracle, Mandrake, Sun JDS...) have
2294
// their own specific XXX-release file as well as a redhat-release file.
2295
// Because of this the XXX-release file needs to be searched for before the
2296
// redhat-release file.
2297
// Since Red Hat has a lsb-release file that is not very descriptive the
2298
// search for redhat-release needs to be before lsb-release.
2299
// Since the lsb-release file is the new standard it needs to be searched
2300
// before the older style release files.
2301
// Searching system-release (Red Hat) and os-release (other Linuxes) are a
2302
// next to last resort. The os-release file is a new standard that contains
2303
// distribution information and the system-release file seems to be an old
2304
// standard that has been replaced by the lsb-release and os-release files.
2305
// Searching for the debian_version file is the last resort. It contains
2306
// an informative string like "6.0.6" or "wheezy/sid". Because of this
2307
// "Debian " is printed before the contents of the debian_version file.
2308
void os::Linux::print_distro_info(outputStream* st) {
2309
if (!_print_ascii_file("/etc/oracle-release", st) &&
2310
!_print_ascii_file("/etc/mandriva-release", st) &&
2311
!_print_ascii_file("/etc/mandrake-release", st) &&
2312
!_print_ascii_file("/etc/sun-release", st) &&
2313
!_print_ascii_file("/etc/redhat-release", st) &&
2314
!_print_ascii_file("/etc/lsb-release", st) &&
2315
!_print_ascii_file("/etc/SuSE-release", st) &&
2316
!_print_ascii_file("/etc/turbolinux-release", st) &&
2317
!_print_ascii_file("/etc/gentoo-release", st) &&
2318
!_print_ascii_file("/etc/ltib-release", st) &&
2319
!_print_ascii_file("/etc/angstrom-version", st) &&
2320
!_print_ascii_file("/etc/system-release", st) &&
2321
!_print_ascii_file("/etc/os-release", st)) {
2322
2323
if (file_exists("/etc/debian_version")) {
2324
st->print("Debian ");
2325
_print_ascii_file("/etc/debian_version", st);
2326
} else {
2327
st->print("Linux");
2328
}
2329
}
2330
st->cr();
2331
}
2332
2333
void os::Linux::print_libversion_info(outputStream* st) {
2334
// libc, pthread
2335
st->print("libc:");
2336
st->print("%s ", os::Linux::glibc_version());
2337
st->print("%s ", os::Linux::libpthread_version());
2338
if (os::Linux::is_LinuxThreads()) {
2339
st->print("(%s stack)", os::Linux::is_floating_stack() ? "floating" : "fixed");
2340
}
2341
st->cr();
2342
}
2343
2344
void os::Linux::print_full_memory_info(outputStream* st) {
2345
st->print("\n/proc/meminfo:\n");
2346
_print_ascii_file("/proc/meminfo", st);
2347
st->cr();
2348
}
2349
2350
void os::Linux::print_container_info(outputStream* st) {
2351
if (!OSContainer::is_containerized()) {
2352
return;
2353
}
2354
2355
st->print("container (cgroup) information:\n");
2356
2357
const char *p_ct = OSContainer::container_type();
2358
st->print("container_type: %s\n", p_ct != NULL ? p_ct : "failed");
2359
2360
char *p = OSContainer::cpu_cpuset_cpus();
2361
st->print("cpu_cpuset_cpus: %s\n", p != NULL ? p : "failed");
2362
free(p);
2363
2364
p = OSContainer::cpu_cpuset_memory_nodes();
2365
st->print("cpu_memory_nodes: %s\n", p != NULL ? p : "failed");
2366
free(p);
2367
2368
int i = OSContainer::active_processor_count();
2369
if (i > 0) {
2370
st->print("active_processor_count: %d\n", i);
2371
} else {
2372
st->print("active_processor_count: failed\n");
2373
}
2374
2375
i = OSContainer::cpu_quota();
2376
st->print("cpu_quota: %d\n", i);
2377
2378
i = OSContainer::cpu_period();
2379
st->print("cpu_period: %d\n", i);
2380
2381
i = OSContainer::cpu_shares();
2382
st->print("cpu_shares: %d\n", i);
2383
2384
jlong j = OSContainer::memory_limit_in_bytes();
2385
st->print("memory_limit_in_bytes: " JLONG_FORMAT "\n", j);
2386
2387
j = OSContainer::memory_and_swap_limit_in_bytes();
2388
st->print("memory_and_swap_limit_in_bytes: " JLONG_FORMAT "\n", j);
2389
2390
j = OSContainer::memory_soft_limit_in_bytes();
2391
st->print("memory_soft_limit_in_bytes: " JLONG_FORMAT "\n", j);
2392
2393
j = OSContainer::OSContainer::memory_usage_in_bytes();
2394
st->print("memory_usage_in_bytes: " JLONG_FORMAT "\n", j);
2395
2396
j = OSContainer::OSContainer::memory_max_usage_in_bytes();
2397
st->print("memory_max_usage_in_bytes: " JLONG_FORMAT "\n", j);
2398
st->cr();
2399
}
2400
2401
void os::print_memory_info(outputStream* st) {
2402
2403
st->print("Memory:");
2404
st->print(" %dk page", os::vm_page_size()>>10);
2405
2406
// values in struct sysinfo are "unsigned long"
2407
struct sysinfo si;
2408
sysinfo(&si);
2409
2410
st->print(", physical " UINT64_FORMAT "k",
2411
os::physical_memory() >> 10);
2412
st->print("(" UINT64_FORMAT "k free)",
2413
os::available_memory() >> 10);
2414
st->print(", swap " UINT64_FORMAT "k",
2415
((jlong)si.totalswap * si.mem_unit) >> 10);
2416
st->print("(" UINT64_FORMAT "k free)",
2417
((jlong)si.freeswap * si.mem_unit) >> 10);
2418
st->cr();
2419
}
2420
2421
void os::pd_print_cpu_info(outputStream* st) {
2422
st->print("\n/proc/cpuinfo:\n");
2423
if (!_print_ascii_file("/proc/cpuinfo", st)) {
2424
st->print(" <Not Available>");
2425
}
2426
st->cr();
2427
}
2428
2429
void os::print_siginfo(outputStream* st, void* siginfo) {
2430
const siginfo_t* si = (const siginfo_t*)siginfo;
2431
2432
os::Posix::print_siginfo_brief(st, si);
2433
#if INCLUDE_CDS
2434
if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2435
UseSharedSpaces) {
2436
FileMapInfo* mapinfo = FileMapInfo::current_info();
2437
if (mapinfo->is_in_shared_space(si->si_addr)) {
2438
st->print("\n\nError accessing class data sharing archive." \
2439
" Mapped file inaccessible during execution, " \
2440
" possible disk/network problem.");
2441
}
2442
}
2443
#endif
2444
st->cr();
2445
}
2446
2447
2448
static void print_signal_handler(outputStream* st, int sig,
2449
char* buf, size_t buflen);
2450
2451
void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2452
st->print_cr("Signal Handlers:");
2453
print_signal_handler(st, SIGSEGV, buf, buflen);
2454
print_signal_handler(st, SIGBUS , buf, buflen);
2455
print_signal_handler(st, SIGFPE , buf, buflen);
2456
print_signal_handler(st, SIGPIPE, buf, buflen);
2457
print_signal_handler(st, SIGXFSZ, buf, buflen);
2458
print_signal_handler(st, SIGILL , buf, buflen);
2459
print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2460
print_signal_handler(st, SR_signum, buf, buflen);
2461
print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
2462
print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2463
print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
2464
print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2465
#if defined(PPC64)
2466
print_signal_handler(st, SIGTRAP, buf, buflen);
2467
#endif
2468
}
2469
2470
static char saved_jvm_path[MAXPATHLEN] = {0};
2471
2472
// Find the full path to the current module, libjvm.so
2473
void os::jvm_path(char *buf, jint buflen) {
2474
// Error checking.
2475
if (buflen < MAXPATHLEN) {
2476
assert(false, "must use a large-enough buffer");
2477
buf[0] = '\0';
2478
return;
2479
}
2480
// Lazy resolve the path to current module.
2481
if (saved_jvm_path[0] != 0) {
2482
strcpy(buf, saved_jvm_path);
2483
return;
2484
}
2485
2486
char dli_fname[MAXPATHLEN];
2487
bool ret = dll_address_to_library_name(
2488
CAST_FROM_FN_PTR(address, os::jvm_path),
2489
dli_fname, sizeof(dli_fname), NULL);
2490
assert(ret, "cannot locate libjvm");
2491
#ifdef __ANDROID__
2492
if (dli_fname[0] == '\0') {
2493
return;
2494
}
2495
if (strchr(dli_fname, '/') == NULL) {
2496
bool ok = read_so_path_from_maps(dli_fname, buf, buflen);
2497
assert(ok, "unable to turn relative libjvm.so path into absolute");
2498
return;
2499
}
2500
snprintf(buf, buflen, /* "%s/lib/%s/server/%s", java_home_var, cpu_arch, */ "%s", dli_fname);
2501
#else // !__ANDROID__
2502
char *rp = NULL;
2503
if (ret && dli_fname[0] != '\0') {
2504
rp = realpath(dli_fname, buf);
2505
}
2506
if (rp == NULL)
2507
return;
2508
2509
if (Arguments::created_by_gamma_launcher()) {
2510
// Support for the gamma launcher. Typical value for buf is
2511
// "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at
2512
// the right place in the string, then assume we are installed in a JDK and
2513
// we're done. Otherwise, check for a JAVA_HOME environment variable and fix
2514
// up the path so it looks like libjvm.so is installed there (append a
2515
// fake suffix hotspot/libjvm.so).
2516
const char *p = buf + strlen(buf) - 1;
2517
for (int count = 0; p > buf && count < 5; ++count) {
2518
for (--p; p > buf && *p != '/'; --p)
2519
/* empty */ ;
2520
}
2521
2522
if (strncmp(p, "/jre/lib/", 9) != 0) {
2523
// Look for JAVA_HOME in the environment.
2524
char* java_home_var = ::getenv("JAVA_HOME");
2525
if (java_home_var != NULL && java_home_var[0] != 0) {
2526
char* jrelib_p;
2527
int len;
2528
2529
// Check the current module name "libjvm.so".
2530
p = strrchr(buf, '/');
2531
assert(strstr(p, "/libjvm") == p, "invalid library name");
2532
2533
rp = realpath(java_home_var, buf);
2534
if (rp == NULL)
2535
return;
2536
2537
// determine if this is a legacy image or modules image
2538
// modules image doesn't have "jre" subdirectory
2539
len = strlen(buf);
2540
assert(len < buflen, "Ran out of buffer room");
2541
jrelib_p = buf + len;
2542
snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2543
if (0 != access(buf, F_OK)) {
2544
snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2545
}
2546
2547
if (0 == access(buf, F_OK)) {
2548
// Use current module name "libjvm.so"
2549
len = strlen(buf);
2550
snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2551
} else {
2552
// Go back to path of .so
2553
rp = realpath(dli_fname, buf);
2554
if (rp == NULL)
2555
return;
2556
}
2557
}
2558
}
2559
}
2560
#endif // !__ANDROID__
2561
2562
strncpy(saved_jvm_path, buf, MAXPATHLEN);
2563
}
2564
2565
void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2566
// no prefix required, not even "_"
2567
}
2568
2569
void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2570
// no suffix required
2571
}
2572
2573
////////////////////////////////////////////////////////////////////////////////
2574
// sun.misc.Signal support
2575
2576
static volatile jint sigint_count = 0;
2577
2578
static void
2579
UserHandler(int sig, void *siginfo, void *context) {
2580
// 4511530 - sem_post is serialized and handled by the manager thread. When
2581
// the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
2582
// don't want to flood the manager thread with sem_post requests.
2583
if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
2584
return;
2585
2586
// Ctrl-C is pressed during error reporting, likely because the error
2587
// handler fails to abort. Let VM die immediately.
2588
if (sig == SIGINT && is_error_reported()) {
2589
os::die();
2590
}
2591
2592
os::signal_notify(sig);
2593
}
2594
2595
void* os::user_handler() {
2596
return CAST_FROM_FN_PTR(void*, UserHandler);
2597
}
2598
2599
class Semaphore : public StackObj {
2600
public:
2601
Semaphore();
2602
~Semaphore();
2603
void signal();
2604
void wait();
2605
bool trywait();
2606
bool timedwait(unsigned int sec, int nsec);
2607
private:
2608
sem_t _semaphore;
2609
};
2610
2611
Semaphore::Semaphore() {
2612
sem_init(&_semaphore, 0, 0);
2613
}
2614
2615
Semaphore::~Semaphore() {
2616
sem_destroy(&_semaphore);
2617
}
2618
2619
void Semaphore::signal() {
2620
sem_post(&_semaphore);
2621
}
2622
2623
void Semaphore::wait() {
2624
sem_wait(&_semaphore);
2625
}
2626
2627
bool Semaphore::trywait() {
2628
return sem_trywait(&_semaphore) == 0;
2629
}
2630
2631
bool Semaphore::timedwait(unsigned int sec, int nsec) {
2632
2633
struct timespec ts;
2634
// Semaphore's are always associated with CLOCK_REALTIME
2635
os::Linux::clock_gettime(CLOCK_REALTIME, &ts);
2636
// see unpackTime for discussion on overflow checking
2637
if (sec >= MAX_SECS) {
2638
ts.tv_sec += MAX_SECS;
2639
ts.tv_nsec = 0;
2640
} else {
2641
ts.tv_sec += sec;
2642
ts.tv_nsec += nsec;
2643
if (ts.tv_nsec >= NANOSECS_PER_SEC) {
2644
ts.tv_nsec -= NANOSECS_PER_SEC;
2645
++ts.tv_sec; // note: this must be <= max_secs
2646
}
2647
}
2648
2649
while (1) {
2650
int result = sem_timedwait(&_semaphore, &ts);
2651
if (result == 0) {
2652
return true;
2653
} else if (errno == EINTR) {
2654
continue;
2655
} else if (errno == ETIMEDOUT) {
2656
return false;
2657
} else {
2658
return false;
2659
}
2660
}
2661
}
2662
2663
extern "C" {
2664
typedef void (*sa_handler_t)(int);
2665
typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2666
}
2667
2668
void* os::signal(int signal_number, void* handler) {
2669
struct sigaction sigAct, oldSigAct;
2670
2671
sigfillset(&(sigAct.sa_mask));
2672
sigAct.sa_flags = SA_RESTART|SA_SIGINFO;
2673
sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2674
2675
if (sigaction(signal_number, &sigAct, &oldSigAct)) {
2676
// -1 means registration failed
2677
return (void *)-1;
2678
}
2679
2680
return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2681
}
2682
2683
void os::signal_raise(int signal_number) {
2684
::raise(signal_number);
2685
}
2686
2687
/*
2688
* The following code is moved from os.cpp for making this
2689
* code platform specific, which it is by its very nature.
2690
*/
2691
2692
// Will be modified when max signal is changed to be dynamic
2693
int os::sigexitnum_pd() {
2694
return NSIG;
2695
}
2696
2697
// a counter for each possible signal value
2698
static volatile jint pending_signals[NSIG+1] = { 0 };
2699
2700
// Linux(POSIX) specific hand shaking semaphore.
2701
static sem_t sig_sem;
2702
static Semaphore sr_semaphore;
2703
2704
void os::signal_init_pd() {
2705
// Initialize signal structures
2706
::memset((void*)pending_signals, 0, sizeof(pending_signals));
2707
2708
// Initialize signal semaphore
2709
::sem_init(&sig_sem, 0, 0);
2710
}
2711
2712
void os::signal_notify(int sig) {
2713
Atomic::inc(&pending_signals[sig]);
2714
::sem_post(&sig_sem);
2715
}
2716
2717
static int check_pending_signals(bool wait) {
2718
Atomic::store(0, &sigint_count);
2719
for (;;) {
2720
for (int i = 0; i < NSIG + 1; i++) {
2721
jint n = pending_signals[i];
2722
if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2723
return i;
2724
}
2725
}
2726
if (!wait) {
2727
return -1;
2728
}
2729
JavaThread *thread = JavaThread::current();
2730
ThreadBlockInVM tbivm(thread);
2731
2732
bool threadIsSuspended;
2733
do {
2734
thread->set_suspend_equivalent();
2735
// cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2736
::sem_wait(&sig_sem);
2737
2738
// were we externally suspended while we were waiting?
2739
threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2740
if (threadIsSuspended) {
2741
//
2742
// The semaphore has been incremented, but while we were waiting
2743
// another thread suspended us. We don't want to continue running
2744
// while suspended because that would surprise the thread that
2745
// suspended us.
2746
//
2747
::sem_post(&sig_sem);
2748
2749
thread->java_suspend_self();
2750
}
2751
} while (threadIsSuspended);
2752
}
2753
}
2754
2755
int os::signal_lookup() {
2756
return check_pending_signals(false);
2757
}
2758
2759
int os::signal_wait() {
2760
return check_pending_signals(true);
2761
}
2762
2763
////////////////////////////////////////////////////////////////////////////////
2764
// Virtual Memory
2765
2766
int os::vm_page_size() {
2767
// Seems redundant as all get out
2768
assert(os::Linux::page_size() != -1, "must call os::init");
2769
return os::Linux::page_size();
2770
}
2771
2772
// Solaris allocates memory by pages.
2773
int os::vm_allocation_granularity() {
2774
assert(os::Linux::page_size() != -1, "must call os::init");
2775
return os::Linux::page_size();
2776
}
2777
2778
// Rationale behind this function:
2779
// current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable
2780
// mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get
2781
// samples for JITted code. Here we create private executable mapping over the code cache
2782
// and then we can use standard (well, almost, as mapping can change) way to provide
2783
// info for the reporting script by storing timestamp and location of symbol
2784
void linux_wrap_code(char* base, size_t size) {
2785
static volatile jint cnt = 0;
2786
2787
if (!UseOprofile) {
2788
return;
2789
}
2790
2791
char buf[PATH_MAX+1];
2792
int num = Atomic::add(1, &cnt);
2793
2794
snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
2795
os::get_temp_directory(), os::current_process_id(), num);
2796
unlink(buf);
2797
2798
int fd = ::open(buf, O_CREAT | O_RDWR, S_IRWXU);
2799
2800
if (fd != -1) {
2801
off_t rv = ::lseek(fd, size-2, SEEK_SET);
2802
if (rv != (off_t)-1) {
2803
if (::write(fd, "", 1) == 1) {
2804
mmap(base, size,
2805
PROT_READ|PROT_WRITE|PROT_EXEC,
2806
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0);
2807
}
2808
}
2809
::close(fd);
2810
unlink(buf);
2811
}
2812
}
2813
2814
static bool recoverable_mmap_error(int err) {
2815
// See if the error is one we can let the caller handle. This
2816
// list of errno values comes from JBS-6843484. I can't find a
2817
// Linux man page that documents this specific set of errno
2818
// values so while this list currently matches Solaris, it may
2819
// change as we gain experience with this failure mode.
2820
switch (err) {
2821
case EBADF:
2822
case EINVAL:
2823
case ENOTSUP:
2824
// let the caller deal with these errors
2825
return true;
2826
2827
default:
2828
// Any remaining errors on this OS can cause our reserved mapping
2829
// to be lost. That can cause confusion where different data
2830
// structures think they have the same memory mapped. The worst
2831
// scenario is if both the VM and a library think they have the
2832
// same memory mapped.
2833
return false;
2834
}
2835
}
2836
2837
static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2838
int err) {
2839
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2840
", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2841
strerror(err), err);
2842
}
2843
2844
static void warn_fail_commit_memory(char* addr, size_t size,
2845
size_t alignment_hint, bool exec,
2846
int err) {
2847
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2848
", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, size,
2849
alignment_hint, exec, strerror(err), err);
2850
}
2851
2852
// NOTE: Linux kernel does not really reserve the pages for us.
2853
// All it does is to check if there are enough free pages
2854
// left at the time of mmap(). This could be a potential
2855
// problem.
2856
int os::Linux::commit_memory_impl(char* addr, size_t size, bool exec) {
2857
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2858
uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
2859
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
2860
if (res != (uintptr_t) MAP_FAILED) {
2861
if (UseNUMAInterleaving) {
2862
numa_make_global(addr, size);
2863
}
2864
return 0;
2865
}
2866
2867
int err = errno; // save errno from mmap() call above
2868
2869
if (!recoverable_mmap_error(err)) {
2870
warn_fail_commit_memory(addr, size, exec, err);
2871
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "committing reserved memory.");
2872
}
2873
2874
return err;
2875
}
2876
2877
bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2878
return os::Linux::commit_memory_impl(addr, size, exec) == 0;
2879
}
2880
2881
void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2882
const char* mesg) {
2883
assert(mesg != NULL, "mesg must be specified");
2884
int err = os::Linux::commit_memory_impl(addr, size, exec);
2885
if (err != 0) {
2886
// the caller wants all commit errors to exit with the specified mesg:
2887
warn_fail_commit_memory(addr, size, exec, err);
2888
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
2889
}
2890
}
2891
2892
// Define MAP_HUGETLB here so we can build HotSpot on old systems.
2893
#ifndef MAP_HUGETLB
2894
#define MAP_HUGETLB 0x40000
2895
#endif
2896
2897
// Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
2898
#ifndef MADV_HUGEPAGE
2899
#define MADV_HUGEPAGE 14
2900
#endif
2901
2902
int os::Linux::commit_memory_impl(char* addr, size_t size,
2903
size_t alignment_hint, bool exec) {
2904
int err = os::Linux::commit_memory_impl(addr, size, exec);
2905
if (err == 0) {
2906
realign_memory(addr, size, alignment_hint);
2907
}
2908
return err;
2909
}
2910
2911
bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
2912
bool exec) {
2913
return os::Linux::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
2914
}
2915
2916
void os::pd_commit_memory_or_exit(char* addr, size_t size,
2917
size_t alignment_hint, bool exec,
2918
const char* mesg) {
2919
assert(mesg != NULL, "mesg must be specified");
2920
int err = os::Linux::commit_memory_impl(addr, size, alignment_hint, exec);
2921
if (err != 0) {
2922
// the caller wants all commit errors to exit with the specified mesg:
2923
warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
2924
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
2925
}
2926
}
2927
2928
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2929
if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) {
2930
// We don't check the return value: madvise(MADV_HUGEPAGE) may not
2931
// be supported or the memory may already be backed by huge pages.
2932
::madvise(addr, bytes, MADV_HUGEPAGE);
2933
}
2934
}
2935
2936
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2937
// This method works by doing an mmap over an existing mmaping and effectively discarding
2938
// the existing pages. However it won't work for SHM-based large pages that cannot be
2939
// uncommitted at all. We don't do anything in this case to avoid creating a segment with
2940
// small pages on top of the SHM segment. This method always works for small pages, so we
2941
// allow that in any case.
2942
if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) {
2943
commit_memory(addr, bytes, alignment_hint, !ExecMem);
2944
}
2945
}
2946
2947
void os::numa_make_global(char *addr, size_t bytes) {
2948
Linux::numa_interleave_memory(addr, bytes);
2949
}
2950
2951
// Define for numa_set_bind_policy(int). Setting the argument to 0 will set the
2952
// bind policy to MPOL_PREFERRED for the current thread.
2953
#define USE_MPOL_PREFERRED 0
2954
2955
void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2956
// To make NUMA and large pages more robust when both enabled, we need to ease
2957
// the requirements on where the memory should be allocated. MPOL_BIND is the
2958
// default policy and it will force memory to be allocated on the specified
2959
// node. Changing this to MPOL_PREFERRED will prefer to allocate the memory on
2960
// the specified node, but will not force it. Using this policy will prevent
2961
// getting SIGBUS when trying to allocate large pages on NUMA nodes with no
2962
// free large pages.
2963
Linux::numa_set_bind_policy(USE_MPOL_PREFERRED);
2964
Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
2965
}
2966
2967
bool os::numa_topology_changed() { return false; }
2968
2969
size_t os::numa_get_groups_num() {
2970
// Return just the number of nodes in which it's possible to allocate memory
2971
// (in numa terminology, configured nodes).
2972
return Linux::numa_num_configured_nodes();
2973
}
2974
2975
int os::numa_get_group_id() {
2976
int cpu_id = Linux::sched_getcpu();
2977
if (cpu_id != -1) {
2978
int lgrp_id = Linux::get_node_by_cpu(cpu_id);
2979
if (lgrp_id != -1) {
2980
return lgrp_id;
2981
}
2982
}
2983
return 0;
2984
}
2985
2986
int os::Linux::get_existing_num_nodes() {
2987
size_t node;
2988
size_t highest_node_number = Linux::numa_max_node();
2989
int num_nodes = 0;
2990
2991
// Get the total number of nodes in the system including nodes without memory.
2992
for (node = 0; node <= highest_node_number; node++) {
2993
if (isnode_in_existing_nodes(node)) {
2994
num_nodes++;
2995
}
2996
}
2997
return num_nodes;
2998
}
2999
3000
size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3001
size_t highest_node_number = Linux::numa_max_node();
3002
size_t i = 0;
3003
3004
// Map all node ids in which is possible to allocate memory. Also nodes are
3005
// not always consecutively available, i.e. available from 0 to the highest
3006
// node number.
3007
for (size_t node = 0; node <= highest_node_number; node++) {
3008
if (Linux::isnode_in_configured_nodes(node)) {
3009
ids[i++] = node;
3010
}
3011
}
3012
return i;
3013
}
3014
3015
bool os::get_page_info(char *start, page_info* info) {
3016
return false;
3017
}
3018
3019
char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
3020
return end;
3021
}
3022
3023
3024
int os::Linux::sched_getcpu_syscall(void) {
3025
unsigned int cpu = 0;
3026
int retval = -1;
3027
3028
#if defined(AMD64)
3029
// Unfortunately we have to bring all these macros here from vsyscall.h
3030
// to be able to compile on old linuxes.
3031
# define __NR_vgetcpu 2
3032
# define VSYSCALL_START (-10UL << 20)
3033
# define VSYSCALL_SIZE 1024
3034
# define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
3035
typedef long (*vgetcpu_t)(unsigned int *cpu, unsigned int *node, unsigned long *tcache);
3036
vgetcpu_t vgetcpu = (vgetcpu_t)VSYSCALL_ADDR(__NR_vgetcpu);
3037
retval = vgetcpu(&cpu, NULL, NULL);
3038
#elif defined(IA32) || defined(AARCH64)
3039
# ifndef SYS_getcpu
3040
# define SYS_getcpu AARCH64_ONLY(168) IA32_ONLY(318)
3041
# endif
3042
retval = syscall(SYS_getcpu, &cpu, NULL, NULL);
3043
#endif
3044
3045
return (retval == -1) ? retval : cpu;
3046
}
3047
3048
// Something to do with the numa-aware allocator needs these symbols
3049
extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { }
3050
extern "C" JNIEXPORT void numa_error(char *where) { }
3051
extern "C" JNIEXPORT int fork1() { return fork(); }
3052
3053
// Handle request to load libnuma symbol version 1.1 (API v1). If it fails
3054
// load symbol from base version instead.
3055
void* os::Linux::libnuma_dlsym(void* handle, const char *name) {
3056
#if !defined(__UCLIBC__) && !defined(__ANDROID__)
3057
void *f = dlvsym(handle, name, "libnuma_1.1");
3058
if (f == NULL) {
3059
f = dlsym(handle, name);
3060
}
3061
return f;
3062
#else
3063
return dlsym(handle, name);
3064
#endif
3065
}
3066
3067
// Handle request to load libnuma symbol version 1.2 (API v2) only.
3068
// Return NULL if the symbol is not defined in this particular version.
3069
void* os::Linux::libnuma_v2_dlsym(void* handle, const char* name) {
3070
#ifndef __ANDROID__
3071
return dlvsym(handle, name, "libnuma_1.2");
3072
#else // __ANDROID__
3073
return NULL;
3074
#endif // !__ANDROID__
3075
}
3076
3077
bool os::Linux::libnuma_init() {
3078
// sched_getcpu() should be in libc.
3079
set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
3080
dlsym(RTLD_DEFAULT, "sched_getcpu")));
3081
3082
// If it's not, try a direct syscall.
3083
if (sched_getcpu() == -1)
3084
set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t, (void*)&sched_getcpu_syscall));
3085
3086
if (sched_getcpu() != -1) { // Does it work?
3087
void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
3088
if (handle != NULL) {
3089
set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t,
3090
libnuma_dlsym(handle, "numa_node_to_cpus")));
3091
set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t,
3092
libnuma_dlsym(handle, "numa_max_node")));
3093
set_numa_num_configured_nodes(CAST_TO_FN_PTR(numa_num_configured_nodes_func_t,
3094
libnuma_dlsym(handle, "numa_num_configured_nodes")));
3095
set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,
3096
libnuma_dlsym(handle, "numa_available")));
3097
set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
3098
libnuma_dlsym(handle, "numa_tonode_memory")));
3099
set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
3100
libnuma_dlsym(handle, "numa_interleave_memory")));
3101
set_numa_interleave_memory_v2(CAST_TO_FN_PTR(numa_interleave_memory_v2_func_t,
3102
libnuma_v2_dlsym(handle, "numa_interleave_memory")));
3103
set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
3104
libnuma_dlsym(handle, "numa_set_bind_policy")));
3105
set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t,
3106
libnuma_dlsym(handle, "numa_bitmask_isbitset")));
3107
set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t,
3108
libnuma_dlsym(handle, "numa_distance")));
3109
3110
if (numa_available() != -1) {
3111
set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
3112
set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
3113
set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr"));
3114
// Create an index -> node mapping, since nodes are not always consecutive
3115
_nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
3116
rebuild_nindex_to_node_map();
3117
// Create a cpu -> node mapping
3118
_cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
3119
rebuild_cpu_to_node_map();
3120
return true;
3121
}
3122
}
3123
}
3124
return false;
3125
}
3126
3127
void os::Linux::rebuild_nindex_to_node_map() {
3128
int highest_node_number = Linux::numa_max_node();
3129
3130
nindex_to_node()->clear();
3131
for (int node = 0; node <= highest_node_number; node++) {
3132
if (Linux::isnode_in_existing_nodes(node)) {
3133
nindex_to_node()->append(node);
3134
}
3135
}
3136
}
3137
3138
// rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id.
3139
// The table is later used in get_node_by_cpu().
3140
void os::Linux::rebuild_cpu_to_node_map() {
3141
const size_t NCPUS = 32768; // Since the buffer size computation is very obscure
3142
// in libnuma (possible values are starting from 16,
3143
// and continuing up with every other power of 2, but less
3144
// than the maximum number of CPUs supported by kernel), and
3145
// is a subject to change (in libnuma version 2 the requirements
3146
// are more reasonable) we'll just hardcode the number they use
3147
// in the library.
3148
const size_t BitsPerCLong = sizeof(long) * CHAR_BIT;
3149
3150
size_t cpu_num = processor_count();
3151
size_t cpu_map_size = NCPUS / BitsPerCLong;
3152
size_t cpu_map_valid_size =
3153
MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size);
3154
3155
cpu_to_node()->clear();
3156
cpu_to_node()->at_grow(cpu_num - 1);
3157
3158
size_t node_num = get_existing_num_nodes();
3159
3160
int distance = 0;
3161
int closest_distance = INT_MAX;
3162
int closest_node = 0;
3163
unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size, mtInternal);
3164
for (size_t i = 0; i < node_num; i++) {
3165
// Check if node is configured (not a memory-less node). If it is not, find
3166
// the closest configured node.
3167
if (!isnode_in_configured_nodes(nindex_to_node()->at(i))) {
3168
closest_distance = INT_MAX;
3169
// Check distance from all remaining nodes in the system. Ignore distance
3170
// from itself and from another non-configured node.
3171
for (size_t m = 0; m < node_num; m++) {
3172
if (m != i && isnode_in_configured_nodes(nindex_to_node()->at(m))) {
3173
distance = numa_distance(nindex_to_node()->at(i), nindex_to_node()->at(m));
3174
// If a closest node is found, update. There is always at least one
3175
// configured node in the system so there is always at least one node
3176
// close.
3177
if (distance != 0 && distance < closest_distance) {
3178
closest_distance = distance;
3179
closest_node = nindex_to_node()->at(m);
3180
}
3181
}
3182
}
3183
} else {
3184
// Current node is already a configured node.
3185
closest_node = nindex_to_node()->at(i);
3186
}
3187
3188
// Get cpus from the original node and map them to the closest node. If node
3189
// is a configured node (not a memory-less node), then original node and
3190
// closest node are the same.
3191
if (numa_node_to_cpus(nindex_to_node()->at(i), cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) {
3192
for (size_t j = 0; j < cpu_map_valid_size; j++) {
3193
if (cpu_map[j] != 0) {
3194
for (size_t k = 0; k < BitsPerCLong; k++) {
3195
if (cpu_map[j] & (1UL << k)) {
3196
cpu_to_node()->at_put(j * BitsPerCLong + k, closest_node);
3197
}
3198
}
3199
}
3200
}
3201
}
3202
}
3203
FREE_C_HEAP_ARRAY(unsigned long, cpu_map, mtInternal);
3204
}
3205
3206
int os::Linux::get_node_by_cpu(int cpu_id) {
3207
if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {
3208
return cpu_to_node()->at(cpu_id);
3209
}
3210
return -1;
3211
}
3212
3213
GrowableArray<int>* os::Linux::_cpu_to_node;
3214
GrowableArray<int>* os::Linux::_nindex_to_node;
3215
os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu;
3216
os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus;
3217
os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
3218
os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_nodes;
3219
os::Linux::numa_available_func_t os::Linux::_numa_available;
3220
os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
3221
os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
3222
os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2;
3223
os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
3224
os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
3225
os::Linux::numa_distance_func_t os::Linux::_numa_distance;
3226
unsigned long* os::Linux::_numa_all_nodes;
3227
struct bitmask* os::Linux::_numa_all_nodes_ptr;
3228
struct bitmask* os::Linux::_numa_nodes_ptr;
3229
3230
bool os::pd_uncommit_memory(char* addr, size_t size) {
3231
uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
3232
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
3233
return res != (uintptr_t) MAP_FAILED;
3234
}
3235
3236
static
3237
address get_stack_commited_bottom(address bottom, size_t size) {
3238
address nbot = bottom;
3239
address ntop = bottom + size;
3240
3241
size_t page_sz = os::vm_page_size();
3242
unsigned pages = size / page_sz;
3243
3244
unsigned char vec[1];
3245
unsigned imin = 1, imax = pages + 1, imid;
3246
int mincore_return_value = 0;
3247
3248
assert(imin <= imax, "Unexpected page size");
3249
3250
while (imin < imax) {
3251
imid = (imax + imin) / 2;
3252
nbot = ntop - (imid * page_sz);
3253
3254
// Use a trick with mincore to check whether the page is mapped or not.
3255
// mincore sets vec to 1 if page resides in memory and to 0 if page
3256
// is swapped output but if page we are asking for is unmapped
3257
// it returns -1,ENOMEM
3258
mincore_return_value = mincore(nbot, page_sz, vec);
3259
3260
if (mincore_return_value == -1) {
3261
// Page is not mapped go up
3262
// to find first mapped page
3263
if (errno != EAGAIN) {
3264
assert(errno == ENOMEM, "Unexpected mincore errno");
3265
imax = imid;
3266
}
3267
} else {
3268
// Page is mapped go down
3269
// to find first not mapped page
3270
imin = imid + 1;
3271
}
3272
}
3273
3274
nbot = nbot + page_sz;
3275
3276
// Adjust stack bottom one page up if last checked page is not mapped
3277
if (mincore_return_value == -1) {
3278
nbot = nbot + page_sz;
3279
}
3280
3281
return nbot;
3282
}
3283
3284
3285
// Linux uses a growable mapping for the stack, and if the mapping for
3286
// the stack guard pages is not removed when we detach a thread the
3287
// stack cannot grow beyond the pages where the stack guard was
3288
// mapped. If at some point later in the process the stack expands to
3289
// that point, the Linux kernel cannot expand the stack any further
3290
// because the guard pages are in the way, and a segfault occurs.
3291
//
3292
// However, it's essential not to split the stack region by unmapping
3293
// a region (leaving a hole) that's already part of the stack mapping,
3294
// so if the stack mapping has already grown beyond the guard pages at
3295
// the time we create them, we have to truncate the stack mapping.
3296
// So, we need to know the extent of the stack mapping when
3297
// create_stack_guard_pages() is called.
3298
3299
// We only need this for stacks that are growable: at the time of
3300
// writing thread stacks don't use growable mappings (i.e. those
3301
// creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this
3302
// only applies to the main thread.
3303
3304
// If the (growable) stack mapping already extends beyond the point
3305
// where we're going to put our guard pages, truncate the mapping at
3306
// that point by munmap()ping it. This ensures that when we later
3307
// munmap() the guard pages we don't leave a hole in the stack
3308
// mapping. This only affects the main/primordial thread
3309
3310
bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3311
3312
if (os::is_primordial_thread()) {
3313
// As we manually grow stack up to bottom inside create_attached_thread(),
3314
// it's likely that os::Linux::initial_thread_stack_bottom is mapped and
3315
// we don't need to do anything special.
3316
// Check it first, before calling heavy function.
3317
uintptr_t stack_extent = (uintptr_t) os::Linux::initial_thread_stack_bottom();
3318
unsigned char vec[1];
3319
3320
if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) {
3321
// Fallback to slow path on all errors, including EAGAIN
3322
stack_extent = (uintptr_t) get_stack_commited_bottom(
3323
os::Linux::initial_thread_stack_bottom(),
3324
(size_t)addr - stack_extent);
3325
}
3326
3327
if (stack_extent < (uintptr_t)addr) {
3328
::munmap((void*)stack_extent, (uintptr_t)(addr - stack_extent));
3329
}
3330
}
3331
3332
return os::commit_memory(addr, size, !ExecMem);
3333
}
3334
3335
// If this is a growable mapping, remove the guard pages entirely by
3336
// munmap()ping them. If not, just call uncommit_memory(). This only
3337
// affects the main/primordial thread, but guard against future OS changes.
3338
// It's safe to always unmap guard pages for primordial thread because we
3339
// always place it right after end of the mapped region.
3340
3341
bool os::remove_stack_guard_pages(char* addr, size_t size) {
3342
uintptr_t stack_extent, stack_base;
3343
3344
if (os::is_primordial_thread()) {
3345
return ::munmap(addr, size) == 0;
3346
}
3347
3348
return os::uncommit_memory(addr, size);
3349
}
3350
3351
static address _highest_vm_reserved_address = NULL;
3352
3353
// If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
3354
// at 'requested_addr'. If there are existing memory mappings at the same
3355
// location, however, they will be overwritten. If 'fixed' is false,
3356
// 'requested_addr' is only treated as a hint, the return value may or
3357
// may not start from the requested address. Unlike Linux mmap(), this
3358
// function returns NULL to indicate failure.
3359
static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
3360
char * addr;
3361
int flags;
3362
3363
flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
3364
if (fixed) {
3365
assert((uintptr_t)requested_addr % os::Linux::page_size() == 0, "unaligned address");
3366
flags |= MAP_FIXED;
3367
}
3368
3369
// Map reserved/uncommitted pages PROT_NONE so we fail early if we
3370
// touch an uncommitted page. Otherwise, the read/write might
3371
// succeed if we have enough swap space to back the physical page.
3372
addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
3373
flags, -1, 0);
3374
3375
if (addr != MAP_FAILED) {
3376
// anon_mmap() should only get called during VM initialization,
3377
// don't need lock (actually we can skip locking even it can be called
3378
// from multiple threads, because _highest_vm_reserved_address is just a
3379
// hint about the upper limit of non-stack memory regions.)
3380
if ((address)addr + bytes > _highest_vm_reserved_address) {
3381
_highest_vm_reserved_address = (address)addr + bytes;
3382
}
3383
}
3384
3385
return addr == MAP_FAILED ? NULL : addr;
3386
}
3387
3388
// Allocate (using mmap, NO_RESERVE, with small pages) at either a given request address
3389
// (req_addr != NULL) or with a given alignment.
3390
// - bytes shall be a multiple of alignment.
3391
// - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
3392
// - alignment sets the alignment at which memory shall be allocated.
3393
// It must be a multiple of allocation granularity.
3394
// Returns address of memory or NULL. If req_addr was not NULL, will only return
3395
// req_addr or NULL.
3396
static char* anon_mmap_aligned(size_t bytes, size_t alignment, char* req_addr) {
3397
3398
size_t extra_size = bytes;
3399
if (req_addr == NULL && alignment > 0) {
3400
extra_size += alignment;
3401
}
3402
3403
char* start = (char*) ::mmap(req_addr, extra_size, PROT_NONE,
3404
MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
3405
-1, 0);
3406
if (start == MAP_FAILED) {
3407
start = NULL;
3408
} else {
3409
if (req_addr != NULL) {
3410
if (start != req_addr) {
3411
::munmap(start, extra_size);
3412
start = NULL;
3413
}
3414
} else {
3415
char* const start_aligned = (char*) align_ptr_up(start, alignment);
3416
char* const end_aligned = start_aligned + bytes;
3417
char* const end = start + extra_size;
3418
if (start_aligned > start) {
3419
::munmap(start, start_aligned - start);
3420
}
3421
if (end_aligned < end) {
3422
::munmap(end_aligned, end - end_aligned);
3423
}
3424
start = start_aligned;
3425
}
3426
}
3427
return start;
3428
}
3429
3430
// Don't update _highest_vm_reserved_address, because there might be memory
3431
// regions above addr + size. If so, releasing a memory region only creates
3432
// a hole in the address space, it doesn't help prevent heap-stack collision.
3433
//
3434
static int anon_munmap(char * addr, size_t size) {
3435
return ::munmap(addr, size) == 0;
3436
}
3437
3438
char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
3439
size_t alignment_hint) {
3440
return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
3441
}
3442
3443
bool os::pd_release_memory(char* addr, size_t size) {
3444
return anon_munmap(addr, size);
3445
}
3446
3447
static address highest_vm_reserved_address() {
3448
return _highest_vm_reserved_address;
3449
}
3450
3451
static bool linux_mprotect(char* addr, size_t size, int prot) {
3452
// Linux wants the mprotect address argument to be page aligned.
3453
char* bottom = (char*)align_size_down((intptr_t)addr, os::Linux::page_size());
3454
3455
// According to SUSv3, mprotect() should only be used with mappings
3456
// established by mmap(), and mmap() always maps whole pages. Unaligned
3457
// 'addr' likely indicates problem in the VM (e.g. trying to change
3458
// protection of malloc'ed or statically allocated memory). Check the
3459
// caller if you hit this assert.
3460
assert(addr == bottom, "sanity check");
3461
3462
size = align_size_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size());
3463
return ::mprotect(bottom, size, prot) == 0;
3464
}
3465
3466
// Set protections specified
3467
bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3468
bool is_committed) {
3469
unsigned int p = 0;
3470
switch (prot) {
3471
case MEM_PROT_NONE: p = PROT_NONE; break;
3472
case MEM_PROT_READ: p = PROT_READ; break;
3473
case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break;
3474
case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3475
default:
3476
ShouldNotReachHere();
3477
}
3478
// is_committed is unused.
3479
return linux_mprotect(addr, bytes, p);
3480
}
3481
3482
bool os::guard_memory(char* addr, size_t size) {
3483
return linux_mprotect(addr, size, PROT_NONE);
3484
}
3485
3486
bool os::unguard_memory(char* addr, size_t size) {
3487
return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
3488
}
3489
3490
bool os::Linux::transparent_huge_pages_sanity_check(bool warn, size_t page_size) {
3491
bool result = false;
3492
void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
3493
MAP_ANONYMOUS|MAP_PRIVATE,
3494
-1, 0);
3495
if (p != MAP_FAILED) {
3496
void *aligned_p = align_ptr_up(p, page_size);
3497
3498
result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0;
3499
3500
munmap(p, page_size * 2);
3501
}
3502
3503
if (warn && !result) {
3504
warning("TransparentHugePages is not supported by the operating system.");
3505
}
3506
3507
return result;
3508
}
3509
3510
bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
3511
bool result = false;
3512
void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE,
3513
MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
3514
-1, 0);
3515
3516
if (p != MAP_FAILED) {
3517
// We don't know if this really is a huge page or not.
3518
FILE *fp = fopen("/proc/self/maps", "r");
3519
if (fp) {
3520
while (!feof(fp)) {
3521
char chars[257];
3522
long x = 0;
3523
if (fgets(chars, sizeof(chars), fp)) {
3524
if (sscanf(chars, "%lx-%*x", &x) == 1
3525
&& x == (long)p) {
3526
if (strstr (chars, "hugepage")) {
3527
result = true;
3528
break;
3529
}
3530
}
3531
}
3532
}
3533
fclose(fp);
3534
}
3535
munmap(p, page_size);
3536
}
3537
3538
if (warn && !result) {
3539
warning("HugeTLBFS is not supported by the operating system.");
3540
}
3541
3542
return result;
3543
}
3544
3545
/*
3546
* Set the coredump_filter bits to include largepages in core dump (bit 6)
3547
*
3548
* From the coredump_filter documentation:
3549
*
3550
* - (bit 0) anonymous private memory
3551
* - (bit 1) anonymous shared memory
3552
* - (bit 2) file-backed private memory
3553
* - (bit 3) file-backed shared memory
3554
* - (bit 4) ELF header pages in file-backed private memory areas (it is
3555
* effective only if the bit 2 is cleared)
3556
* - (bit 5) hugetlb private memory
3557
* - (bit 6) hugetlb shared memory
3558
*/
3559
static void set_coredump_filter(void) {
3560
FILE *f;
3561
long cdm;
3562
3563
if ((f = fopen("/proc/self/coredump_filter", "r+")) == NULL) {
3564
return;
3565
}
3566
3567
if (fscanf(f, "%lx", &cdm) != 1) {
3568
fclose(f);
3569
return;
3570
}
3571
3572
rewind(f);
3573
3574
if ((cdm & LARGEPAGES_BIT) == 0) {
3575
cdm |= LARGEPAGES_BIT;
3576
fprintf(f, "%#lx", cdm);
3577
}
3578
3579
fclose(f);
3580
}
3581
3582
// Large page support
3583
3584
static size_t _large_page_size = 0;
3585
3586
size_t os::Linux::find_large_page_size() {
3587
size_t large_page_size = 0;
3588
3589
// large_page_size on Linux is used to round up heap size. x86 uses either
3590
// 2M or 4M page, depending on whether PAE (Physical Address Extensions)
3591
// mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
3592
// page as large as 256M.
3593
//
3594
// Here we try to figure out page size by parsing /proc/meminfo and looking
3595
// for a line with the following format:
3596
// Hugepagesize: 2048 kB
3597
//
3598
// If we can't determine the value (e.g. /proc is not mounted, or the text
3599
// format has been changed), we'll use the largest page size supported by
3600
// the processor.
3601
3602
#ifndef ZERO
3603
large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
3604
ARM_ONLY(2 * M) PPC_ONLY(4 * M) AARCH64_ONLY(2 * M);
3605
#endif // ZERO
3606
3607
FILE *fp = fopen("/proc/meminfo", "r");
3608
if (fp) {
3609
while (!feof(fp)) {
3610
int x = 0;
3611
char buf[16];
3612
if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
3613
if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
3614
large_page_size = x * K;
3615
break;
3616
}
3617
} else {
3618
// skip to next line
3619
for (;;) {
3620
int ch = fgetc(fp);
3621
if (ch == EOF || ch == (int)'\n') break;
3622
}
3623
}
3624
}
3625
fclose(fp);
3626
}
3627
3628
if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != large_page_size) {
3629
warning("Setting LargePageSizeInBytes has no effect on this OS. Large page size is "
3630
SIZE_FORMAT "%s.", byte_size_in_proper_unit(large_page_size),
3631
proper_unit_for_byte_size(large_page_size));
3632
}
3633
3634
return large_page_size;
3635
}
3636
3637
size_t os::Linux::setup_large_page_size() {
3638
_large_page_size = Linux::find_large_page_size();
3639
const size_t default_page_size = (size_t)Linux::page_size();
3640
if (_large_page_size > default_page_size) {
3641
_page_sizes[0] = _large_page_size;
3642
_page_sizes[1] = default_page_size;
3643
_page_sizes[2] = 0;
3644
}
3645
3646
return _large_page_size;
3647
}
3648
3649
bool os::Linux::setup_large_page_type(size_t page_size) {
3650
if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
3651
FLAG_IS_DEFAULT(UseSHM) &&
3652
FLAG_IS_DEFAULT(UseTransparentHugePages)) {
3653
3654
// The type of large pages has not been specified by the user.
3655
3656
// Try UseHugeTLBFS and then UseSHM.
3657
UseHugeTLBFS = UseSHM = true;
3658
3659
// Don't try UseTransparentHugePages since there are known
3660
// performance issues with it turned on. This might change in the future.
3661
UseTransparentHugePages = false;
3662
}
3663
3664
if (UseTransparentHugePages) {
3665
bool warn_on_failure = !FLAG_IS_DEFAULT(UseTransparentHugePages);
3666
if (transparent_huge_pages_sanity_check(warn_on_failure, page_size)) {
3667
UseHugeTLBFS = false;
3668
UseSHM = false;
3669
return true;
3670
}
3671
UseTransparentHugePages = false;
3672
}
3673
3674
if (UseHugeTLBFS) {
3675
bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
3676
if (hugetlbfs_sanity_check(warn_on_failure, page_size)) {
3677
UseSHM = false;
3678
return true;
3679
}
3680
UseHugeTLBFS = false;
3681
}
3682
3683
#ifdef DISABLE_SHM
3684
if (UseSHM) {
3685
warning("UseSHM is disabled");
3686
UseSHM = false;
3687
}
3688
#endif //DISABLE_SHM
3689
3690
return UseSHM;
3691
}
3692
3693
void os::large_page_init() {
3694
if (!UseLargePages &&
3695
!UseTransparentHugePages &&
3696
!UseHugeTLBFS &&
3697
!UseSHM) {
3698
// Not using large pages.
3699
return;
3700
}
3701
3702
if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) {
3703
// The user explicitly turned off large pages.
3704
// Ignore the rest of the large pages flags.
3705
UseTransparentHugePages = false;
3706
UseHugeTLBFS = false;
3707
UseSHM = false;
3708
return;
3709
}
3710
3711
size_t large_page_size = Linux::setup_large_page_size();
3712
UseLargePages = Linux::setup_large_page_type(large_page_size);
3713
3714
set_coredump_filter();
3715
}
3716
3717
#ifndef SHM_HUGETLB
3718
#define SHM_HUGETLB 04000
3719
#endif
3720
3721
#ifndef DISABLE_SHM
3722
#define shm_warning_format(format, ...) \
3723
do { \
3724
if (UseLargePages && \
3725
(!FLAG_IS_DEFAULT(UseLargePages) || \
3726
!FLAG_IS_DEFAULT(UseSHM) || \
3727
!FLAG_IS_DEFAULT(LargePageSizeInBytes))) { \
3728
warning(format, __VA_ARGS__); \
3729
} \
3730
} while (0)
3731
3732
#define shm_warning(str) shm_warning_format("%s", str)
3733
3734
#define shm_warning_with_errno(str) \
3735
do { \
3736
int err = errno; \
3737
shm_warning_format(str " (error = %d)", err); \
3738
} while (0)
3739
3740
static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment) {
3741
assert(is_size_aligned(bytes, alignment), "Must be divisible by the alignment");
3742
3743
if (!is_size_aligned(alignment, SHMLBA)) {
3744
assert(false, "Code below assumes that alignment is at least SHMLBA aligned");
3745
return NULL;
3746
}
3747
3748
// To ensure that we get 'alignment' aligned memory from shmat,
3749
// we pre-reserve aligned virtual memory and then attach to that.
3750
3751
char* pre_reserved_addr = anon_mmap_aligned(bytes, alignment, NULL);
3752
if (pre_reserved_addr == NULL) {
3753
// Couldn't pre-reserve aligned memory.
3754
shm_warning("Failed to pre-reserve aligned memory for shmat.");
3755
return NULL;
3756
}
3757
3758
// SHM_REMAP is needed to allow shmat to map over an existing mapping.
3759
char* addr = (char*)shmat(shmid, pre_reserved_addr, SHM_REMAP);
3760
3761
if ((intptr_t)addr == -1) {
3762
int err = errno;
3763
shm_warning_with_errno("Failed to attach shared memory.");
3764
3765
assert(err != EACCES, "Unexpected error");
3766
assert(err != EIDRM, "Unexpected error");
3767
assert(err != EINVAL, "Unexpected error");
3768
3769
// Since we don't know if the kernel unmapped the pre-reserved memory area
3770
// we can't unmap it, since that would potentially unmap memory that was
3771
// mapped from other threads.
3772
return NULL;
3773
}
3774
3775
return addr;
3776
}
3777
3778
static char* shmat_at_address(int shmid, char* req_addr) {
3779
if (!is_ptr_aligned(req_addr, SHMLBA)) {
3780
assert(false, "Requested address needs to be SHMLBA aligned");
3781
return NULL;
3782
}
3783
3784
char* addr = (char*)shmat(shmid, req_addr, 0);
3785
3786
if ((intptr_t)addr == -1) {
3787
shm_warning_with_errno("Failed to attach shared memory.");
3788
return NULL;
3789
}
3790
3791
return addr;
3792
}
3793
3794
static char* shmat_large_pages(int shmid, size_t bytes, size_t alignment, char* req_addr) {
3795
// If a req_addr has been provided, we assume that the caller has already aligned the address.
3796
if (req_addr != NULL) {
3797
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Must be divisible by the large page size");
3798
assert(is_ptr_aligned(req_addr, alignment), "Must be divisible by given alignment");
3799
return shmat_at_address(shmid, req_addr);
3800
}
3801
3802
// Since shmid has been setup with SHM_HUGETLB, shmat will automatically
3803
// return large page size aligned memory addresses when req_addr == NULL.
3804
// However, if the alignment is larger than the large page size, we have
3805
// to manually ensure that the memory returned is 'alignment' aligned.
3806
if (alignment > os::large_page_size()) {
3807
assert(is_size_aligned(alignment, os::large_page_size()), "Must be divisible by the large page size");
3808
return shmat_with_alignment(shmid, bytes, alignment);
3809
} else {
3810
return shmat_at_address(shmid, NULL);
3811
}
3812
}
3813
#endif // !DISABLE_SHM
3814
3815
char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) {
3816
#ifndef DISABLE_SHM
3817
// "exec" is passed in but not used. Creating the shared image for
3818
// the code cache doesn't have an SHM_X executable permission to check.
3819
assert(UseLargePages && UseSHM, "only for SHM large pages");
3820
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
3821
assert(is_ptr_aligned(req_addr, alignment), "Unaligned address");
3822
3823
if (!is_size_aligned(bytes, os::large_page_size())) {
3824
return NULL; // Fallback to small pages.
3825
}
3826
3827
// Create a large shared memory region to attach to based on size.
3828
// Currently, size is the total size of the heap.
3829
int shmid = shmget(IPC_PRIVATE, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
3830
if (shmid == -1) {
3831
// Possible reasons for shmget failure:
3832
// 1. shmmax is too small for Java heap.
3833
// > check shmmax value: cat /proc/sys/kernel/shmmax
3834
// > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax
3835
// 2. not enough large page memory.
3836
// > check available large pages: cat /proc/meminfo
3837
// > increase amount of large pages:
3838
// echo new_value > /proc/sys/vm/nr_hugepages
3839
// Note 1: different Linux may use different name for this property,
3840
// e.g. on Redhat AS-3 it is "hugetlb_pool".
3841
// Note 2: it's possible there's enough physical memory available but
3842
// they are so fragmented after a long run that they can't
3843
// coalesce into large pages. Try to reserve large pages when
3844
// the system is still "fresh".
3845
shm_warning_with_errno("Failed to reserve shared memory.");
3846
return NULL;
3847
}
3848
3849
// Attach to the region.
3850
char* addr = shmat_large_pages(shmid, bytes, alignment, req_addr);
3851
3852
// Remove shmid. If shmat() is successful, the actual shared memory segment
3853
// will be deleted when it's detached by shmdt() or when the process
3854
// terminates. If shmat() is not successful this will remove the shared
3855
// segment immediately.
3856
shmctl(shmid, IPC_RMID, NULL);
3857
3858
return addr;
3859
#else
3860
assert(0, "SHM was disabled on compile time");
3861
return NULL;
3862
#endif
3863
}
3864
3865
static void warn_on_large_pages_failure(char* req_addr, size_t bytes, int error) {
3866
assert(error == ENOMEM, "Only expect to fail if no memory is available");
3867
3868
bool warn_on_failure = UseLargePages &&
3869
(!FLAG_IS_DEFAULT(UseLargePages) ||
3870
!FLAG_IS_DEFAULT(UseHugeTLBFS) ||
3871
!FLAG_IS_DEFAULT(LargePageSizeInBytes));
3872
3873
if (warn_on_failure) {
3874
char msg[128];
3875
jio_snprintf(msg, sizeof(msg), "Failed to reserve large pages memory req_addr: "
3876
PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d).", req_addr, bytes, error);
3877
warning("%s", msg);
3878
}
3879
}
3880
3881
char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec) {
3882
assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
3883
assert(is_size_aligned(bytes, os::large_page_size()), "Unaligned size");
3884
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
3885
3886
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
3887
char* addr = (char*)::mmap(req_addr, bytes, prot,
3888
MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB,
3889
-1, 0);
3890
3891
if (addr == MAP_FAILED) {
3892
warn_on_large_pages_failure(req_addr, bytes, errno);
3893
return NULL;
3894
}
3895
3896
assert(is_ptr_aligned(addr, os::large_page_size()), "Must be");
3897
3898
return addr;
3899
}
3900
3901
// Reserve memory using mmap(MAP_HUGETLB).
3902
// - bytes shall be a multiple of alignment.
3903
// - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
3904
// - alignment sets the alignment at which memory shall be allocated.
3905
// It must be a multiple of allocation granularity.
3906
// Returns address of memory or NULL. If req_addr was not NULL, will only return
3907
// req_addr or NULL.
3908
char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) {
3909
size_t large_page_size = os::large_page_size();
3910
assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
3911
3912
assert(is_ptr_aligned(req_addr, alignment), "Must be");
3913
assert(is_size_aligned(bytes, alignment), "Must be");
3914
3915
// First reserve - but not commit - the address range in small pages.
3916
char* const start = anon_mmap_aligned(bytes, alignment, req_addr);
3917
3918
if (start == NULL) {
3919
return NULL;
3920
}
3921
3922
assert(is_ptr_aligned(start, alignment), "Must be");
3923
3924
char* end = start + bytes;
3925
3926
// Find the regions of the allocated chunk that can be promoted to large pages.
3927
char* lp_start = (char*)align_ptr_up(start, large_page_size);
3928
char* lp_end = (char*)align_ptr_down(end, large_page_size);
3929
3930
size_t lp_bytes = lp_end - lp_start;
3931
3932
assert(is_size_aligned(lp_bytes, large_page_size), "Must be");
3933
3934
if (lp_bytes == 0) {
3935
// The mapped region doesn't even span the start and the end of a large page.
3936
// Fall back to allocate a non-special area.
3937
::munmap(start, end - start);
3938
return NULL;
3939
}
3940
3941
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
3942
3943
void* result;
3944
3945
// Commit small-paged leading area.
3946
if (start != lp_start) {
3947
result = ::mmap(start, lp_start - start, prot,
3948
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
3949
-1, 0);
3950
if (result == MAP_FAILED) {
3951
::munmap(lp_start, end - lp_start);
3952
return NULL;
3953
}
3954
}
3955
3956
// Commit large-paged area.
3957
result = ::mmap(lp_start, lp_bytes, prot,
3958
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED|MAP_HUGETLB,
3959
-1, 0);
3960
if (result == MAP_FAILED) {
3961
warn_on_large_pages_failure(lp_start, lp_bytes, errno);
3962
// If the mmap above fails, the large pages region will be unmapped and we
3963
// have regions before and after with small pages. Release these regions.
3964
//
3965
// | mapped | unmapped | mapped |
3966
// ^ ^ ^ ^
3967
// start lp_start lp_end end
3968
//
3969
::munmap(start, lp_start - start);
3970
::munmap(lp_end, end - lp_end);
3971
return NULL;
3972
}
3973
3974
// Commit small-paged trailing area.
3975
if (lp_end != end) {
3976
result = ::mmap(lp_end, end - lp_end, prot,
3977
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
3978
-1, 0);
3979
if (result == MAP_FAILED) {
3980
::munmap(start, lp_end - start);
3981
return NULL;
3982
}
3983
}
3984
3985
return start;
3986
}
3987
3988
char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) {
3989
assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
3990
assert(is_ptr_aligned(req_addr, alignment), "Must be");
3991
assert(is_size_aligned(alignment, os::vm_allocation_granularity()), "Must be");
3992
assert(is_power_of_2(os::large_page_size()), "Must be");
3993
assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes");
3994
3995
if (is_size_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) {
3996
return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec);
3997
} else {
3998
return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec);
3999
}
4000
}
4001
4002
char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
4003
assert(UseLargePages, "only for large pages");
4004
4005
char* addr;
4006
if (UseSHM) {
4007
addr = os::Linux::reserve_memory_special_shm(bytes, alignment, req_addr, exec);
4008
} else {
4009
assert(UseHugeTLBFS, "must be");
4010
addr = os::Linux::reserve_memory_special_huge_tlbfs(bytes, alignment, req_addr, exec);
4011
}
4012
4013
if (addr != NULL) {
4014
if (UseNUMAInterleaving) {
4015
numa_make_global(addr, bytes);
4016
}
4017
4018
// The memory is committed
4019
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
4020
}
4021
4022
return addr;
4023
}
4024
4025
bool os::Linux::release_memory_special_shm(char* base, size_t bytes) {
4026
#ifndef DISABLE_SHM
4027
// detaching the SHM segment will also delete it, see reserve_memory_special_shm()
4028
return shmdt(base) == 0;
4029
#else
4030
assert(0, "SHM was disabled on compile time");
4031
#endif
4032
}
4033
4034
bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
4035
return pd_release_memory(base, bytes);
4036
}
4037
4038
bool os::release_memory_special(char* base, size_t bytes) {
4039
bool res;
4040
if (MemTracker::tracking_level() > NMT_minimal) {
4041
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
4042
res = os::Linux::release_memory_special_impl(base, bytes);
4043
if (res) {
4044
tkr.record((address)base, bytes);
4045
}
4046
4047
} else {
4048
res = os::Linux::release_memory_special_impl(base, bytes);
4049
}
4050
return res;
4051
}
4052
4053
bool os::Linux::release_memory_special_impl(char* base, size_t bytes) {
4054
assert(UseLargePages, "only for large pages");
4055
bool res;
4056
4057
if (UseSHM) {
4058
res = os::Linux::release_memory_special_shm(base, bytes);
4059
} else {
4060
assert(UseHugeTLBFS, "must be");
4061
res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
4062
}
4063
return res;
4064
}
4065
4066
size_t os::large_page_size() {
4067
return _large_page_size;
4068
}
4069
4070
// With SysV SHM the entire memory region must be allocated as shared
4071
// memory.
4072
// HugeTLBFS allows application to commit large page memory on demand.
4073
// However, when committing memory with HugeTLBFS fails, the region
4074
// that was supposed to be committed will lose the old reservation
4075
// and allow other threads to steal that memory region. Because of this
4076
// behavior we can't commit HugeTLBFS memory.
4077
bool os::can_commit_large_page_memory() {
4078
return UseTransparentHugePages;
4079
}
4080
4081
bool os::can_execute_large_page_memory() {
4082
return UseTransparentHugePages || UseHugeTLBFS;
4083
}
4084
4085
// Reserve memory at an arbitrary address, only if that area is
4086
// available (and not reserved for something else).
4087
4088
char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
4089
const int max_tries = 10;
4090
char* base[max_tries];
4091
size_t size[max_tries];
4092
const size_t gap = 0x000000;
4093
4094
// Assert only that the size is a multiple of the page size, since
4095
// that's all that mmap requires, and since that's all we really know
4096
// about at this low abstraction level. If we need higher alignment,
4097
// we can either pass an alignment to this method or verify alignment
4098
// in one of the methods further up the call chain. See bug 5044738.
4099
assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
4100
4101
// Repeatedly allocate blocks until the block is allocated at the
4102
// right spot. Give up after max_tries. Note that reserve_memory() will
4103
// automatically update _highest_vm_reserved_address if the call is
4104
// successful. The variable tracks the highest memory address every reserved
4105
// by JVM. It is used to detect heap-stack collision if running with
4106
// fixed-stack LinuxThreads. Because here we may attempt to reserve more
4107
// space than needed, it could confuse the collision detecting code. To
4108
// solve the problem, save current _highest_vm_reserved_address and
4109
// calculate the correct value before return.
4110
address old_highest = _highest_vm_reserved_address;
4111
4112
// Linux mmap allows caller to pass an address as hint; give it a try first,
4113
// if kernel honors the hint then we can return immediately.
4114
char * addr = anon_mmap(requested_addr, bytes, false);
4115
if (addr == requested_addr) {
4116
return requested_addr;
4117
}
4118
4119
if (addr != NULL) {
4120
// mmap() is successful but it fails to reserve at the requested address
4121
anon_munmap(addr, bytes);
4122
}
4123
4124
int i;
4125
for (i = 0; i < max_tries; ++i) {
4126
base[i] = reserve_memory(bytes);
4127
4128
if (base[i] != NULL) {
4129
// Is this the block we wanted?
4130
if (base[i] == requested_addr) {
4131
size[i] = bytes;
4132
break;
4133
}
4134
4135
// Does this overlap the block we wanted? Give back the overlapped
4136
// parts and try again.
4137
4138
size_t top_overlap = requested_addr + (bytes + gap) - base[i];
4139
if (top_overlap >= 0 && top_overlap < bytes) {
4140
unmap_memory(base[i], top_overlap);
4141
base[i] += top_overlap;
4142
size[i] = bytes - top_overlap;
4143
} else {
4144
size_t bottom_overlap = base[i] + bytes - requested_addr;
4145
if (bottom_overlap >= 0 && bottom_overlap < bytes) {
4146
unmap_memory(requested_addr, bottom_overlap);
4147
size[i] = bytes - bottom_overlap;
4148
} else {
4149
size[i] = bytes;
4150
}
4151
}
4152
}
4153
}
4154
4155
// Give back the unused reserved pieces.
4156
4157
for (int j = 0; j < i; ++j) {
4158
if (base[j] != NULL) {
4159
unmap_memory(base[j], size[j]);
4160
}
4161
}
4162
4163
if (i < max_tries) {
4164
_highest_vm_reserved_address = MAX2(old_highest, (address)requested_addr + bytes);
4165
return requested_addr;
4166
} else {
4167
_highest_vm_reserved_address = old_highest;
4168
return NULL;
4169
}
4170
}
4171
4172
size_t os::read(int fd, void *buf, unsigned int nBytes) {
4173
return ::read(fd, buf, nBytes);
4174
}
4175
4176
size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4177
return ::pread(fd, buf, nBytes, offset);
4178
}
4179
4180
// TODO-FIXME: reconcile Solaris' os::sleep with the linux variation.
4181
// Solaris uses poll(), linux uses park().
4182
// Poll() is likely a better choice, assuming that Thread.interrupt()
4183
// generates a SIGUSRx signal. Note that SIGUSR1 can interfere with
4184
// SIGSEGV, see 4355769.
4185
4186
int os::sleep(Thread* thread, jlong millis, bool interruptible) {
4187
assert(thread == Thread::current(), "thread consistency check");
4188
4189
ParkEvent * const slp = thread->_SleepEvent ;
4190
slp->reset() ;
4191
OrderAccess::fence() ;
4192
4193
if (interruptible) {
4194
jlong prevtime = javaTimeNanos();
4195
4196
for (;;) {
4197
if (os::is_interrupted(thread, true)) {
4198
return OS_INTRPT;
4199
}
4200
4201
jlong newtime = javaTimeNanos();
4202
4203
if (newtime - prevtime < 0) {
4204
// time moving backwards, should only happen if no monotonic clock
4205
// not a guarantee() because JVM should not abort on kernel/glibc bugs
4206
assert(!Linux::supports_monotonic_clock(), "time moving backwards");
4207
} else {
4208
millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
4209
}
4210
4211
if(millis <= 0) {
4212
return OS_OK;
4213
}
4214
4215
prevtime = newtime;
4216
4217
{
4218
assert(thread->is_Java_thread(), "sanity check");
4219
JavaThread *jt = (JavaThread *) thread;
4220
ThreadBlockInVM tbivm(jt);
4221
OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
4222
4223
jt->set_suspend_equivalent();
4224
// cleared by handle_special_suspend_equivalent_condition() or
4225
// java_suspend_self() via check_and_wait_while_suspended()
4226
4227
slp->park(millis);
4228
4229
// were we externally suspended while we were waiting?
4230
jt->check_and_wait_while_suspended();
4231
}
4232
}
4233
} else {
4234
OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4235
jlong prevtime = javaTimeNanos();
4236
4237
for (;;) {
4238
// It'd be nice to avoid the back-to-back javaTimeNanos() calls on
4239
// the 1st iteration ...
4240
jlong newtime = javaTimeNanos();
4241
4242
if (newtime - prevtime < 0) {
4243
// time moving backwards, should only happen if no monotonic clock
4244
// not a guarantee() because JVM should not abort on kernel/glibc bugs
4245
assert(!Linux::supports_monotonic_clock(), "time moving backwards");
4246
} else {
4247
millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
4248
}
4249
4250
if(millis <= 0) break ;
4251
4252
prevtime = newtime;
4253
slp->park(millis);
4254
}
4255
return OS_OK ;
4256
}
4257
}
4258
4259
//
4260
// Short sleep, direct OS call.
4261
//
4262
// Note: certain versions of Linux CFS scheduler (since 2.6.23) do not guarantee
4263
// sched_yield(2) will actually give up the CPU:
4264
//
4265
// * Alone on this pariticular CPU, keeps running.
4266
// * Before the introduction of "skip_buddy" with "compat_yield" disabled
4267
// (pre 2.6.39).
4268
//
4269
// So calling this with 0 is an alternative.
4270
//
4271
void os::naked_short_sleep(jlong ms) {
4272
struct timespec req;
4273
4274
assert(ms < 1000, "Un-interruptable sleep, short time use only");
4275
req.tv_sec = 0;
4276
if (ms > 0) {
4277
req.tv_nsec = (ms % 1000) * 1000000;
4278
}
4279
else {
4280
req.tv_nsec = 1;
4281
}
4282
4283
nanosleep(&req, NULL);
4284
4285
return;
4286
}
4287
4288
// Sleep forever; naked call to OS-specific sleep; use with CAUTION
4289
void os::infinite_sleep() {
4290
while (true) { // sleep forever ...
4291
::sleep(100); // ... 100 seconds at a time
4292
}
4293
}
4294
4295
// Used to convert frequent JVM_Yield() to nops
4296
bool os::dont_yield() {
4297
return DontYieldALot;
4298
}
4299
4300
void os::yield() {
4301
sched_yield();
4302
}
4303
4304
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN ;}
4305
4306
void os::yield_all(int attempts) {
4307
// Yields to all threads, including threads with lower priorities
4308
// Threads on Linux are all with same priority. The Solaris style
4309
// os::yield_all() with nanosleep(1ms) is not necessary.
4310
sched_yield();
4311
}
4312
4313
// Called from the tight loops to possibly influence time-sharing heuristics
4314
void os::loop_breaker(int attempts) {
4315
os::yield_all(attempts);
4316
}
4317
4318
////////////////////////////////////////////////////////////////////////////////
4319
// thread priority support
4320
4321
// Note: Normal Linux applications are run with SCHED_OTHER policy. SCHED_OTHER
4322
// only supports dynamic priority, static priority must be zero. For real-time
4323
// applications, Linux supports SCHED_RR which allows static priority (1-99).
4324
// However, for large multi-threaded applications, SCHED_RR is not only slower
4325
// than SCHED_OTHER, but also very unstable (my volano tests hang hard 4 out
4326
// of 5 runs - Sep 2005).
4327
//
4328
// The following code actually changes the niceness of kernel-thread/LWP. It
4329
// has an assumption that setpriority() only modifies one kernel-thread/LWP,
4330
// not the entire user process, and user level threads are 1:1 mapped to kernel
4331
// threads. It has always been the case, but could change in the future. For
4332
// this reason, the code should not be used as default (ThreadPriorityPolicy=0).
4333
// It is only used when ThreadPriorityPolicy=1 and requires root privilege.
4334
4335
int os::java_to_os_priority[CriticalPriority + 1] = {
4336
19, // 0 Entry should never be used
4337
4338
4, // 1 MinPriority
4339
3, // 2
4340
2, // 3
4341
4342
1, // 4
4343
0, // 5 NormPriority
4344
-1, // 6
4345
4346
-2, // 7
4347
-3, // 8
4348
-4, // 9 NearMaxPriority
4349
4350
-5, // 10 MaxPriority
4351
4352
-5 // 11 CriticalPriority
4353
};
4354
4355
static int prio_init() {
4356
if (ThreadPriorityPolicy == 1) {
4357
// Only root can raise thread priority. Don't allow ThreadPriorityPolicy=1
4358
// if effective uid is not root. Perhaps, a more elegant way of doing
4359
// this is to test CAP_SYS_NICE capability, but that will require libcap.so
4360
if (geteuid() != 0) {
4361
if (!FLAG_IS_DEFAULT(ThreadPriorityPolicy)) {
4362
warning("-XX:ThreadPriorityPolicy requires root privilege on Linux");
4363
}
4364
ThreadPriorityPolicy = 0;
4365
}
4366
}
4367
if (UseCriticalJavaThreadPriority) {
4368
os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
4369
}
4370
return 0;
4371
}
4372
4373
OSReturn os::set_native_priority(Thread* thread, int newpri) {
4374
if ( !UseThreadPriorities || ThreadPriorityPolicy == 0 ) return OS_OK;
4375
4376
int ret = setpriority(PRIO_PROCESS, thread->osthread()->thread_id(), newpri);
4377
return (ret == 0) ? OS_OK : OS_ERR;
4378
}
4379
4380
OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
4381
if ( !UseThreadPriorities || ThreadPriorityPolicy == 0 ) {
4382
*priority_ptr = java_to_os_priority[NormPriority];
4383
return OS_OK;
4384
}
4385
4386
errno = 0;
4387
*priority_ptr = getpriority(PRIO_PROCESS, thread->osthread()->thread_id());
4388
return (*priority_ptr != -1 || errno == 0 ? OS_OK : OS_ERR);
4389
}
4390
4391
// Hint to the underlying OS that a task switch would not be good.
4392
// Void return because it's a hint and can fail.
4393
void os::hint_no_preempt() {}
4394
4395
////////////////////////////////////////////////////////////////////////////////
4396
// suspend/resume support
4397
4398
// the low-level signal-based suspend/resume support is a remnant from the
4399
// old VM-suspension that used to be for java-suspension, safepoints etc,
4400
// within hotspot. Now there is a single use-case for this:
4401
// - calling get_thread_pc() on the VMThread by the flat-profiler task
4402
// that runs in the watcher thread.
4403
// The remaining code is greatly simplified from the more general suspension
4404
// code that used to be used.
4405
//
4406
// The protocol is quite simple:
4407
// - suspend:
4408
// - sends a signal to the target thread
4409
// - polls the suspend state of the osthread using a yield loop
4410
// - target thread signal handler (SR_handler) sets suspend state
4411
// and blocks in sigsuspend until continued
4412
// - resume:
4413
// - sets target osthread state to continue
4414
// - sends signal to end the sigsuspend loop in the SR_handler
4415
//
4416
// Note that the SR_lock plays no role in this suspend/resume protocol.
4417
//
4418
4419
static void resume_clear_context(OSThread *osthread) {
4420
osthread->set_ucontext(NULL);
4421
osthread->set_siginfo(NULL);
4422
}
4423
4424
static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
4425
osthread->set_ucontext(context);
4426
osthread->set_siginfo(siginfo);
4427
}
4428
4429
//
4430
// Handler function invoked when a thread's execution is suspended or
4431
// resumed. We have to be careful that only async-safe functions are
4432
// called here (Note: most pthread functions are not async safe and
4433
// should be avoided.)
4434
//
4435
// Note: sigwait() is a more natural fit than sigsuspend() from an
4436
// interface point of view, but sigwait() prevents the signal hander
4437
// from being run. libpthread would get very confused by not having
4438
// its signal handlers run and prevents sigwait()'s use with the
4439
// mutex granting granting signal.
4440
//
4441
// Currently only ever called on the VMThread and JavaThreads (PC sampling)
4442
//
4443
static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
4444
// Save and restore errno to avoid confusing native code with EINTR
4445
// after sigsuspend.
4446
int old_errno = errno;
4447
4448
Thread* thread = Thread::current();
4449
OSThread* osthread = thread->osthread();
4450
assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
4451
4452
os::SuspendResume::State current = osthread->sr.state();
4453
if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
4454
suspend_save_context(osthread, siginfo, context);
4455
4456
// attempt to switch the state, we assume we had a SUSPEND_REQUEST
4457
os::SuspendResume::State state = osthread->sr.suspended();
4458
if (state == os::SuspendResume::SR_SUSPENDED) {
4459
sigset_t suspend_set; // signals for sigsuspend()
4460
4461
// get current set of blocked signals and unblock resume signal
4462
pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
4463
sigdelset(&suspend_set, SR_signum);
4464
4465
sr_semaphore.signal();
4466
// wait here until we are resumed
4467
while (1) {
4468
sigsuspend(&suspend_set);
4469
4470
os::SuspendResume::State result = osthread->sr.running();
4471
if (result == os::SuspendResume::SR_RUNNING) {
4472
sr_semaphore.signal();
4473
break;
4474
}
4475
}
4476
4477
} else if (state == os::SuspendResume::SR_RUNNING) {
4478
// request was cancelled, continue
4479
} else {
4480
ShouldNotReachHere();
4481
}
4482
4483
resume_clear_context(osthread);
4484
} else if (current == os::SuspendResume::SR_RUNNING) {
4485
// request was cancelled, continue
4486
} else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
4487
// ignore
4488
} else {
4489
// ignore
4490
}
4491
4492
errno = old_errno;
4493
}
4494
4495
4496
static int SR_initialize() {
4497
struct sigaction act;
4498
char *s;
4499
/* Get signal number to use for suspend/resume */
4500
if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
4501
int sig = ::strtol(s, 0, 10);
4502
if (sig > 0 || sig < _NSIG) {
4503
SR_signum = sig;
4504
}
4505
}
4506
4507
assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
4508
"SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
4509
4510
sigemptyset(&SR_sigset);
4511
sigaddset(&SR_sigset, SR_signum);
4512
4513
/* Set up signal handler for suspend/resume */
4514
act.sa_flags = SA_RESTART|SA_SIGINFO;
4515
act.sa_handler = (void (*)(int)) SR_handler;
4516
4517
// SR_signum is blocked by default.
4518
// 4528190 - We also need to block pthread restart signal (32 on all
4519
// supported Linux platforms). Note that LinuxThreads need to block
4520
// this signal for all threads to work properly. So we don't have
4521
// to use hard-coded signal number when setting up the mask.
4522
pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
4523
4524
if (sigaction(SR_signum, &act, 0) == -1) {
4525
return -1;
4526
}
4527
4528
// Save signal flag
4529
os::Linux::set_our_sigflags(SR_signum, act.sa_flags);
4530
return 0;
4531
}
4532
4533
static int sr_notify(OSThread* osthread) {
4534
int status = pthread_kill(osthread->pthread_id(), SR_signum);
4535
assert_status(status == 0, status, "pthread_kill");
4536
return status;
4537
}
4538
4539
// "Randomly" selected value for how long we want to spin
4540
// before bailing out on suspending a thread, also how often
4541
// we send a signal to a thread we want to resume
4542
static const int RANDOMLY_LARGE_INTEGER = 1000000;
4543
static const int RANDOMLY_LARGE_INTEGER2 = 100;
4544
4545
// returns true on success and false on error - really an error is fatal
4546
// but this seems the normal response to library errors
4547
static bool do_suspend(OSThread* osthread) {
4548
assert(osthread->sr.is_running(), "thread should be running");
4549
assert(!sr_semaphore.trywait(), "semaphore has invalid state");
4550
4551
// mark as suspended and send signal
4552
if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
4553
// failed to switch, state wasn't running?
4554
ShouldNotReachHere();
4555
return false;
4556
}
4557
4558
if (sr_notify(osthread) != 0) {
4559
ShouldNotReachHere();
4560
}
4561
4562
// managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
4563
while (true) {
4564
if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
4565
break;
4566
} else {
4567
// timeout
4568
os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
4569
if (cancelled == os::SuspendResume::SR_RUNNING) {
4570
return false;
4571
} else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
4572
// make sure that we consume the signal on the semaphore as well
4573
sr_semaphore.wait();
4574
break;
4575
} else {
4576
ShouldNotReachHere();
4577
return false;
4578
}
4579
}
4580
}
4581
4582
guarantee(osthread->sr.is_suspended(), "Must be suspended");
4583
return true;
4584
}
4585
4586
static void do_resume(OSThread* osthread) {
4587
assert(osthread->sr.is_suspended(), "thread should be suspended");
4588
assert(!sr_semaphore.trywait(), "invalid semaphore state");
4589
4590
if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
4591
// failed to switch to WAKEUP_REQUEST
4592
ShouldNotReachHere();
4593
return;
4594
}
4595
4596
while (true) {
4597
if (sr_notify(osthread) == 0) {
4598
if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
4599
if (osthread->sr.is_running()) {
4600
return;
4601
}
4602
}
4603
} else {
4604
ShouldNotReachHere();
4605
}
4606
}
4607
4608
guarantee(osthread->sr.is_running(), "Must be running!");
4609
}
4610
4611
////////////////////////////////////////////////////////////////////////////////
4612
// interrupt support
4613
4614
void os::interrupt(Thread* thread) {
4615
assert(Thread::current() == thread || Threads_lock->owned_by_self(),
4616
"possibility of dangling Thread pointer");
4617
4618
OSThread* osthread = thread->osthread();
4619
4620
if (!osthread->interrupted()) {
4621
osthread->set_interrupted(true);
4622
// More than one thread can get here with the same value of osthread,
4623
// resulting in multiple notifications. We do, however, want the store
4624
// to interrupted() to be visible to other threads before we execute unpark().
4625
OrderAccess::fence();
4626
ParkEvent * const slp = thread->_SleepEvent ;
4627
if (slp != NULL) slp->unpark() ;
4628
}
4629
4630
// For JSR166. Unpark even if interrupt status already was set
4631
if (thread->is_Java_thread())
4632
((JavaThread*)thread)->parker()->unpark();
4633
4634
ParkEvent * ev = thread->_ParkEvent ;
4635
if (ev != NULL) ev->unpark() ;
4636
4637
}
4638
4639
bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
4640
assert(Thread::current() == thread || Threads_lock->owned_by_self(),
4641
"possibility of dangling Thread pointer");
4642
4643
OSThread* osthread = thread->osthread();
4644
4645
bool interrupted = osthread->interrupted();
4646
4647
if (interrupted && clear_interrupted) {
4648
osthread->set_interrupted(false);
4649
// consider thread->_SleepEvent->reset() ... optional optimization
4650
}
4651
4652
return interrupted;
4653
}
4654
4655
///////////////////////////////////////////////////////////////////////////////////
4656
// signal handling (except suspend/resume)
4657
4658
// This routine may be used by user applications as a "hook" to catch signals.
4659
// The user-defined signal handler must pass unrecognized signals to this
4660
// routine, and if it returns true (non-zero), then the signal handler must
4661
// return immediately. If the flag "abort_if_unrecognized" is true, then this
4662
// routine will never retun false (zero), but instead will execute a VM panic
4663
// routine kill the process.
4664
//
4665
// If this routine returns false, it is OK to call it again. This allows
4666
// the user-defined signal handler to perform checks either before or after
4667
// the VM performs its own checks. Naturally, the user code would be making
4668
// a serious error if it tried to handle an exception (such as a null check
4669
// or breakpoint) that the VM was generating for its own correct operation.
4670
//
4671
// This routine may recognize any of the following kinds of signals:
4672
// SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
4673
// It should be consulted by handlers for any of those signals.
4674
//
4675
// The caller of this routine must pass in the three arguments supplied
4676
// to the function referred to in the "sa_sigaction" (not the "sa_handler")
4677
// field of the structure passed to sigaction(). This routine assumes that
4678
// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
4679
//
4680
// Note that the VM will print warnings if it detects conflicting signal
4681
// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
4682
//
4683
extern "C" JNIEXPORT int
4684
JVM_handle_linux_signal(int signo, siginfo_t* siginfo,
4685
void* ucontext, int abort_if_unrecognized);
4686
4687
void signalHandler(int sig, siginfo_t* info, void* uc) {
4688
assert(info != NULL && uc != NULL, "it must be old kernel");
4689
int orig_errno = errno; // Preserve errno value over signal handler.
4690
JVM_handle_linux_signal(sig, info, uc, true);
4691
errno = orig_errno;
4692
}
4693
4694
4695
// This boolean allows users to forward their own non-matching signals
4696
// to JVM_handle_linux_signal, harmlessly.
4697
bool os::Linux::signal_handlers_are_installed = false;
4698
4699
// For signal-chaining
4700
struct sigaction os::Linux::sigact[MAXSIGNUM];
4701
unsigned int os::Linux::sigs = 0;
4702
bool os::Linux::libjsig_is_loaded = false;
4703
typedef struct sigaction *(*get_signal_t)(int);
4704
get_signal_t os::Linux::get_signal_action = NULL;
4705
4706
struct sigaction* os::Linux::get_chained_signal_action(int sig) {
4707
struct sigaction *actp = NULL;
4708
4709
if (libjsig_is_loaded) {
4710
// Retrieve the old signal handler from libjsig
4711
actp = (*get_signal_action)(sig);
4712
}
4713
if (actp == NULL) {
4714
// Retrieve the preinstalled signal handler from jvm
4715
actp = get_preinstalled_handler(sig);
4716
}
4717
4718
return actp;
4719
}
4720
4721
static bool call_chained_handler(struct sigaction *actp, int sig,
4722
siginfo_t *siginfo, void *context) {
4723
// Call the old signal handler
4724
if (actp->sa_handler == SIG_DFL) {
4725
// It's more reasonable to let jvm treat it as an unexpected exception
4726
// instead of taking the default action.
4727
return false;
4728
} else if (actp->sa_handler != SIG_IGN) {
4729
if ((actp->sa_flags & SA_NODEFER) == 0) {
4730
// automaticlly block the signal
4731
sigaddset(&(actp->sa_mask), sig);
4732
}
4733
4734
sa_handler_t hand = NULL;
4735
sa_sigaction_t sa = NULL;
4736
bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
4737
// retrieve the chained handler
4738
if (siginfo_flag_set) {
4739
sa = actp->sa_sigaction;
4740
} else {
4741
hand = actp->sa_handler;
4742
}
4743
4744
if ((actp->sa_flags & SA_RESETHAND) != 0) {
4745
actp->sa_handler = SIG_DFL;
4746
}
4747
4748
// try to honor the signal mask
4749
sigset_t oset;
4750
pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
4751
4752
// call into the chained handler
4753
if (siginfo_flag_set) {
4754
(*sa)(sig, siginfo, context);
4755
} else {
4756
(*hand)(sig);
4757
}
4758
4759
// restore the signal mask
4760
pthread_sigmask(SIG_SETMASK, &oset, 0);
4761
}
4762
// Tell jvm's signal handler the signal is taken care of.
4763
return true;
4764
}
4765
4766
bool os::Linux::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4767
bool chained = false;
4768
// signal-chaining
4769
if (UseSignalChaining) {
4770
struct sigaction *actp = get_chained_signal_action(sig);
4771
if (actp != NULL) {
4772
chained = call_chained_handler(actp, sig, siginfo, context);
4773
}
4774
}
4775
return chained;
4776
}
4777
4778
struct sigaction* os::Linux::get_preinstalled_handler(int sig) {
4779
if ((( (unsigned int)1 << sig ) & sigs) != 0) {
4780
return &sigact[sig];
4781
}
4782
return NULL;
4783
}
4784
4785
void os::Linux::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
4786
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
4787
sigact[sig] = oldAct;
4788
sigs |= (unsigned int)1 << sig;
4789
}
4790
4791
// for diagnostic
4792
int os::Linux::sigflags[MAXSIGNUM];
4793
4794
int os::Linux::get_our_sigflags(int sig) {
4795
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
4796
return sigflags[sig];
4797
}
4798
4799
void os::Linux::set_our_sigflags(int sig, int flags) {
4800
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
4801
sigflags[sig] = flags;
4802
}
4803
4804
void os::Linux::set_signal_handler(int sig, bool set_installed) {
4805
// Check for overwrite.
4806
struct sigaction oldAct;
4807
sigaction(sig, (struct sigaction*)NULL, &oldAct);
4808
4809
void* oldhand = oldAct.sa_sigaction
4810
? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4811
: CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4812
if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4813
oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4814
oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)signalHandler)) {
4815
if (AllowUserSignalHandlers || !set_installed) {
4816
// Do not overwrite; user takes responsibility to forward to us.
4817
return;
4818
} else if (UseSignalChaining) {
4819
// save the old handler in jvm
4820
save_preinstalled_handler(sig, oldAct);
4821
// libjsig also interposes the sigaction() call below and saves the
4822
// old sigaction on it own.
4823
} else {
4824
fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4825
"%#lx for signal %d.", (long)oldhand, sig));
4826
}
4827
}
4828
4829
struct sigaction sigAct;
4830
sigfillset(&(sigAct.sa_mask));
4831
sigAct.sa_handler = SIG_DFL;
4832
if (!set_installed) {
4833
sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
4834
} else {
4835
sigAct.sa_sigaction = signalHandler;
4836
sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
4837
}
4838
// Save flags, which are set by ours
4839
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
4840
sigflags[sig] = sigAct.sa_flags;
4841
4842
int ret = sigaction(sig, &sigAct, &oldAct);
4843
assert(ret == 0, "check");
4844
4845
void* oldhand2 = oldAct.sa_sigaction
4846
? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4847
: CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4848
assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4849
}
4850
4851
// install signal handlers for signals that HotSpot needs to
4852
// handle in order to support Java-level exception handling.
4853
4854
void os::Linux::install_signal_handlers() {
4855
if (!signal_handlers_are_installed) {
4856
signal_handlers_are_installed = true;
4857
4858
// signal-chaining
4859
typedef void (*signal_setting_t)();
4860
signal_setting_t begin_signal_setting = NULL;
4861
signal_setting_t end_signal_setting = NULL;
4862
begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4863
dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4864
if (begin_signal_setting != NULL) {
4865
end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4866
dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4867
get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4868
dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4869
libjsig_is_loaded = true;
4870
assert(UseSignalChaining, "should enable signal-chaining");
4871
}
4872
if (libjsig_is_loaded) {
4873
// Tell libjsig jvm is setting signal handlers
4874
(*begin_signal_setting)();
4875
}
4876
4877
set_signal_handler(SIGSEGV, true);
4878
set_signal_handler(SIGPIPE, true);
4879
set_signal_handler(SIGBUS, true);
4880
set_signal_handler(SIGILL, true);
4881
set_signal_handler(SIGFPE, true);
4882
#if defined(PPC64)
4883
set_signal_handler(SIGTRAP, true);
4884
#endif
4885
set_signal_handler(SIGXFSZ, true);
4886
4887
if (libjsig_is_loaded) {
4888
// Tell libjsig jvm finishes setting signal handlers
4889
(*end_signal_setting)();
4890
}
4891
4892
// We don't activate signal checker if libjsig is in place, we trust ourselves
4893
// and if UserSignalHandler is installed all bets are off.
4894
// Log that signal checking is off only if -verbose:jni is specified.
4895
if (CheckJNICalls) {
4896
if (libjsig_is_loaded) {
4897
if (PrintJNIResolving) {
4898
tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4899
}
4900
check_signals = false;
4901
}
4902
if (AllowUserSignalHandlers) {
4903
if (PrintJNIResolving) {
4904
tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4905
}
4906
check_signals = false;
4907
}
4908
}
4909
}
4910
}
4911
4912
// This is the fastest way to get thread cpu time on Linux.
4913
// Returns cpu time (user+sys) for any thread, not only for current.
4914
// POSIX compliant clocks are implemented in the kernels 2.6.16+.
4915
// It might work on 2.6.10+ with a special kernel/glibc patch.
4916
// For reference, please, see IEEE Std 1003.1-2004:
4917
// http://www.unix.org/single_unix_specification
4918
4919
jlong os::Linux::fast_thread_cpu_time(clockid_t clockid) {
4920
struct timespec tp;
4921
int rc = os::Linux::clock_gettime(clockid, &tp);
4922
assert(rc == 0, "clock_gettime is expected to return 0 code");
4923
4924
return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec;
4925
}
4926
4927
/////
4928
// glibc on Linux platform uses non-documented flag
4929
// to indicate, that some special sort of signal
4930
// trampoline is used.
4931
// We will never set this flag, and we should
4932
// ignore this flag in our diagnostic
4933
#ifdef SIGNIFICANT_SIGNAL_MASK
4934
#undef SIGNIFICANT_SIGNAL_MASK
4935
#endif
4936
#define SIGNIFICANT_SIGNAL_MASK (~0x04000000)
4937
4938
static const char* get_signal_handler_name(address handler,
4939
char* buf, int buflen) {
4940
int offset = 0;
4941
bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
4942
if (found) {
4943
// skip directory names
4944
const char *p1, *p2;
4945
p1 = buf;
4946
size_t len = strlen(os::file_separator());
4947
while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
4948
jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
4949
} else {
4950
jio_snprintf(buf, buflen, PTR_FORMAT, handler);
4951
}
4952
return buf;
4953
}
4954
4955
static void print_signal_handler(outputStream* st, int sig,
4956
char* buf, size_t buflen) {
4957
struct sigaction sa;
4958
4959
sigaction(sig, NULL, &sa);
4960
4961
// See comment for SIGNIFICANT_SIGNAL_MASK define
4962
sa.sa_flags &= SIGNIFICANT_SIGNAL_MASK;
4963
4964
st->print("%s: ", os::exception_name(sig, buf, buflen));
4965
4966
address handler = (sa.sa_flags & SA_SIGINFO)
4967
? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
4968
: CAST_FROM_FN_PTR(address, sa.sa_handler);
4969
4970
if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
4971
st->print("SIG_DFL");
4972
} else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
4973
st->print("SIG_IGN");
4974
} else {
4975
st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
4976
}
4977
4978
st->print(", sa_mask[0]=");
4979
os::Posix::print_signal_set_short(st, &sa.sa_mask);
4980
4981
address rh = VMError::get_resetted_sighandler(sig);
4982
// May be, handler was resetted by VMError?
4983
if(rh != NULL) {
4984
handler = rh;
4985
sa.sa_flags = VMError::get_resetted_sigflags(sig) & SIGNIFICANT_SIGNAL_MASK;
4986
}
4987
4988
st->print(", sa_flags=");
4989
os::Posix::print_sa_flags(st, sa.sa_flags);
4990
4991
// Check: is it our handler?
4992
if(handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler) ||
4993
handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
4994
// It is our signal handler
4995
// check for flags, reset system-used one!
4996
if((int)sa.sa_flags != os::Linux::get_our_sigflags(sig)) {
4997
st->print(
4998
", flags was changed from " PTR32_FORMAT ", consider using jsig library",
4999
os::Linux::get_our_sigflags(sig));
5000
}
5001
}
5002
st->cr();
5003
}
5004
5005
5006
#define DO_SIGNAL_CHECK(sig) \
5007
if (!sigismember(&check_signal_done, sig)) \
5008
os::Linux::check_signal_handler(sig)
5009
5010
// This method is a periodic task to check for misbehaving JNI applications
5011
// under CheckJNI, we can add any periodic checks here
5012
5013
void os::run_periodic_checks() {
5014
5015
if (check_signals == false) return;
5016
5017
// SEGV and BUS if overridden could potentially prevent
5018
// generation of hs*.log in the event of a crash, debugging
5019
// such a case can be very challenging, so we absolutely
5020
// check the following for a good measure:
5021
DO_SIGNAL_CHECK(SIGSEGV);
5022
DO_SIGNAL_CHECK(SIGILL);
5023
DO_SIGNAL_CHECK(SIGFPE);
5024
DO_SIGNAL_CHECK(SIGBUS);
5025
DO_SIGNAL_CHECK(SIGPIPE);
5026
DO_SIGNAL_CHECK(SIGXFSZ);
5027
#if defined(PPC64)
5028
DO_SIGNAL_CHECK(SIGTRAP);
5029
#endif
5030
5031
// ReduceSignalUsage allows the user to override these handlers
5032
// see comments at the very top and jvm_solaris.h
5033
if (!ReduceSignalUsage) {
5034
DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
5035
DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
5036
DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
5037
DO_SIGNAL_CHECK(BREAK_SIGNAL);
5038
}
5039
5040
DO_SIGNAL_CHECK(SR_signum);
5041
DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
5042
}
5043
5044
typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
5045
5046
static os_sigaction_t os_sigaction = NULL;
5047
5048
void os::Linux::check_signal_handler(int sig) {
5049
char buf[O_BUFLEN];
5050
address jvmHandler = NULL;
5051
5052
5053
struct sigaction act;
5054
if (os_sigaction == NULL) {
5055
// only trust the default sigaction, in case it has been interposed
5056
os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
5057
if (os_sigaction == NULL) return;
5058
}
5059
5060
os_sigaction(sig, (struct sigaction*)NULL, &act);
5061
5062
5063
act.sa_flags &= SIGNIFICANT_SIGNAL_MASK;
5064
5065
address thisHandler = (act.sa_flags & SA_SIGINFO)
5066
? CAST_FROM_FN_PTR(address, act.sa_sigaction)
5067
: CAST_FROM_FN_PTR(address, act.sa_handler) ;
5068
5069
5070
switch(sig) {
5071
case SIGSEGV:
5072
case SIGBUS:
5073
case SIGFPE:
5074
case SIGPIPE:
5075
case SIGILL:
5076
case SIGXFSZ:
5077
jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler);
5078
break;
5079
5080
case SHUTDOWN1_SIGNAL:
5081
case SHUTDOWN2_SIGNAL:
5082
case SHUTDOWN3_SIGNAL:
5083
case BREAK_SIGNAL:
5084
jvmHandler = (address)user_handler();
5085
break;
5086
5087
case INTERRUPT_SIGNAL:
5088
jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
5089
break;
5090
5091
default:
5092
if (sig == SR_signum) {
5093
jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
5094
} else {
5095
return;
5096
}
5097
break;
5098
}
5099
5100
if (thisHandler != jvmHandler) {
5101
tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
5102
tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
5103
tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
5104
// No need to check this sig any longer
5105
sigaddset(&check_signal_done, sig);
5106
// Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
5107
if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
5108
tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
5109
exception_name(sig, buf, O_BUFLEN));
5110
}
5111
} else if(os::Linux::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Linux::get_our_sigflags(sig)) {
5112
tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
5113
tty->print("expected:" PTR32_FORMAT, os::Linux::get_our_sigflags(sig));
5114
tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags);
5115
// No need to check this sig any longer
5116
sigaddset(&check_signal_done, sig);
5117
}
5118
5119
// Dump all the signal
5120
if (sigismember(&check_signal_done, sig)) {
5121
print_signal_handlers(tty, buf, O_BUFLEN);
5122
}
5123
}
5124
5125
extern void report_error(char* file_name, int line_no, char* title, char* format, ...);
5126
5127
extern bool signal_name(int signo, char* buf, size_t len);
5128
5129
const char* os::exception_name(int exception_code, char* buf, size_t size) {
5130
if (0 < exception_code && exception_code <= SIGRTMAX) {
5131
// signal
5132
if (!signal_name(exception_code, buf, size)) {
5133
jio_snprintf(buf, size, "SIG%d", exception_code);
5134
}
5135
return buf;
5136
} else {
5137
return NULL;
5138
}
5139
}
5140
5141
// this is called _before_ most of the global arguments have been parsed
5142
void os::init(void) {
5143
char dummy; /* used to get a guess on initial stack address */
5144
5145
// With LinuxThreads the JavaMain thread pid (primordial thread)
5146
// is different than the pid of the java launcher thread.
5147
// So, on Linux, the launcher thread pid is passed to the VM
5148
// via the sun.java.launcher.pid property.
5149
// Use this property instead of getpid() if it was correctly passed.
5150
// See bug 6351349.
5151
pid_t java_launcher_pid = (pid_t) Arguments::sun_java_launcher_pid();
5152
5153
_initial_pid = (java_launcher_pid > 0) ? java_launcher_pid : getpid();
5154
5155
clock_tics_per_sec = sysconf(_SC_CLK_TCK);
5156
5157
init_random(1234567);
5158
5159
ThreadCritical::initialize();
5160
5161
Linux::set_page_size(sysconf(_SC_PAGESIZE));
5162
if (Linux::page_size() == -1) {
5163
fatal(err_msg("os_linux.cpp: os::init: sysconf failed (%s)",
5164
strerror(errno)));
5165
}
5166
init_page_sizes((size_t) Linux::page_size());
5167
5168
Linux::initialize_system_info();
5169
5170
// _main_thread points to the thread that created/loaded the JVM.
5171
Linux::_main_thread = pthread_self();
5172
5173
Linux::clock_init();
5174
initial_time_count = javaTimeNanos();
5175
5176
// pthread_condattr initialization for monotonic clock
5177
int status;
5178
pthread_condattr_t* _condattr = os::Linux::condAttr();
5179
if ((status = pthread_condattr_init(_condattr)) != 0) {
5180
fatal(err_msg("pthread_condattr_init: %s", strerror(status)));
5181
}
5182
// Only set the clock if CLOCK_MONOTONIC is available
5183
if (Linux::supports_monotonic_clock()) {
5184
if ((status = pthread_condattr_setclock(_condattr, CLOCK_MONOTONIC)) != 0) {
5185
if (status == EINVAL) {
5186
warning("Unable to use monotonic clock with relative timed-waits" \
5187
" - changes to the time-of-day clock may have adverse affects");
5188
} else {
5189
fatal(err_msg("pthread_condattr_setclock: %s", strerror(status)));
5190
}
5191
}
5192
}
5193
// else it defaults to CLOCK_REALTIME
5194
5195
pthread_mutex_init(&dl_mutex, NULL);
5196
5197
// If the pagesize of the VM is greater than 8K determine the appropriate
5198
// number of initial guard pages. The user can change this with the
5199
// command line arguments, if needed.
5200
if (vm_page_size() > (int)Linux::vm_default_page_size()) {
5201
StackYellowPages = 1;
5202
StackRedPages = 1;
5203
StackShadowPages = round_to((StackShadowPages*Linux::vm_default_page_size()), vm_page_size()) / vm_page_size();
5204
}
5205
5206
// retrieve entry point for pthread_setname_np
5207
Linux::_pthread_setname_np =
5208
(int(*)(pthread_t, const char*))dlsym(RTLD_DEFAULT, "pthread_setname_np");
5209
5210
}
5211
5212
// To install functions for atexit system call
5213
extern "C" {
5214
static void perfMemory_exit_helper() {
5215
perfMemory_exit();
5216
}
5217
}
5218
5219
void os::pd_init_container_support() {
5220
OSContainer::init();
5221
}
5222
5223
// this is called _after_ the global arguments have been parsed
5224
jint os::init_2(void)
5225
{
5226
Linux::fast_thread_clock_init();
5227
5228
// Allocate a single page and mark it as readable for safepoint polling
5229
address polling_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
5230
guarantee( polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page" );
5231
5232
os::set_polling_page( polling_page );
5233
5234
#ifndef PRODUCT
5235
if(Verbose && PrintMiscellaneous)
5236
tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
5237
#endif
5238
5239
if (!UseMembar) {
5240
address mem_serialize_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
5241
guarantee( mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
5242
os::set_memory_serialize_page( mem_serialize_page );
5243
5244
#ifndef PRODUCT
5245
if(Verbose && PrintMiscellaneous)
5246
tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
5247
#endif
5248
}
5249
5250
// initialize suspend/resume support - must do this before signal_sets_init()
5251
if (SR_initialize() != 0) {
5252
perror("SR_initialize failed");
5253
return JNI_ERR;
5254
}
5255
5256
Linux::signal_sets_init();
5257
Linux::install_signal_handlers();
5258
5259
// Check minimum allowable stack size for thread creation and to initialize
5260
// the java system classes, including StackOverflowError - depends on page
5261
// size. Add a page for compiler2 recursion in main thread.
5262
// Add in 2*BytesPerWord times page size to account for VM stack during
5263
// class initialization depending on 32 or 64 bit VM.
5264
os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed,
5265
(size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Linux::page_size() +
5266
(2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::vm_default_page_size());
5267
5268
size_t threadStackSizeInBytes = ThreadStackSize * K;
5269
if (threadStackSizeInBytes != 0 &&
5270
threadStackSizeInBytes < os::Linux::min_stack_allowed) {
5271
tty->print_cr("\nThe stack size specified is too small, "
5272
"Specify at least %dk",
5273
os::Linux::min_stack_allowed/ K);
5274
return JNI_ERR;
5275
}
5276
5277
// Make the stack size a multiple of the page size so that
5278
// the yellow/red zones can be guarded.
5279
JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
5280
vm_page_size()));
5281
5282
Linux::capture_initial_stack(JavaThread::stack_size_at_create());
5283
5284
#if defined(IA32) && !defined(ZERO)
5285
workaround_expand_exec_shield_cs_limit();
5286
#endif
5287
5288
Linux::libpthread_init();
5289
if (PrintMiscellaneous && (Verbose || WizardMode)) {
5290
tty->print_cr("[HotSpot is running with %s, %s(%s)]\n",
5291
Linux::glibc_version(), Linux::libpthread_version(),
5292
Linux::is_floating_stack() ? "floating stack" : "fixed stack");
5293
}
5294
5295
if (UseNUMA) {
5296
if (!Linux::libnuma_init()) {
5297
UseNUMA = false;
5298
} else {
5299
if ((Linux::numa_max_node() < 1)) {
5300
// There's only one node(they start from 0), disable NUMA.
5301
UseNUMA = false;
5302
}
5303
}
5304
// With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
5305
// we can make the adaptive lgrp chunk resizing work. If the user specified
5306
// both UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn and
5307
// disable adaptive resizing.
5308
if (UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
5309
if (FLAG_IS_DEFAULT(UseNUMA)) {
5310
UseNUMA = false;
5311
} else {
5312
if (FLAG_IS_DEFAULT(UseLargePages) &&
5313
FLAG_IS_DEFAULT(UseSHM) &&
5314
FLAG_IS_DEFAULT(UseHugeTLBFS)) {
5315
UseLargePages = false;
5316
} else {
5317
warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, disabling adaptive resizing");
5318
UseAdaptiveSizePolicy = false;
5319
UseAdaptiveNUMAChunkSizing = false;
5320
}
5321
}
5322
}
5323
if (!UseNUMA && ForceNUMA) {
5324
UseNUMA = true;
5325
}
5326
}
5327
5328
if (MaxFDLimit) {
5329
// set the number of file descriptors to max. print out error
5330
// if getrlimit/setrlimit fails but continue regardless.
5331
struct rlimit nbr_files;
5332
int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
5333
if (status != 0) {
5334
if (PrintMiscellaneous && (Verbose || WizardMode))
5335
perror("os::init_2 getrlimit failed");
5336
} else {
5337
nbr_files.rlim_cur = nbr_files.rlim_max;
5338
status = setrlimit(RLIMIT_NOFILE, &nbr_files);
5339
if (status != 0) {
5340
if (PrintMiscellaneous && (Verbose || WizardMode))
5341
perror("os::init_2 setrlimit failed");
5342
}
5343
}
5344
}
5345
5346
// Initialize lock used to serialize thread creation (see os::create_thread)
5347
Linux::set_createThread_lock(new Mutex(Mutex::leaf, "createThread_lock", false));
5348
5349
// at-exit methods are called in the reverse order of their registration.
5350
// atexit functions are called on return from main or as a result of a
5351
// call to exit(3C). There can be only 32 of these functions registered
5352
// and atexit() does not set errno.
5353
5354
if (PerfAllowAtExitRegistration) {
5355
// only register atexit functions if PerfAllowAtExitRegistration is set.
5356
// atexit functions can be delayed until process exit time, which
5357
// can be problematic for embedded VM situations. Embedded VMs should
5358
// call DestroyJavaVM() to assure that VM resources are released.
5359
5360
// note: perfMemory_exit_helper atexit function may be removed in
5361
// the future if the appropriate cleanup code can be added to the
5362
// VM_Exit VMOperation's doit method.
5363
if (atexit(perfMemory_exit_helper) != 0) {
5364
warning("os::init_2 atexit(perfMemory_exit_helper) failed");
5365
}
5366
}
5367
5368
// initialize thread priority policy
5369
prio_init();
5370
5371
return JNI_OK;
5372
}
5373
5374
// Mark the polling page as unreadable
5375
void os::make_polling_page_unreadable(void) {
5376
if( !guard_memory((char*)_polling_page, Linux::page_size()) )
5377
fatal("Could not disable polling page");
5378
};
5379
5380
// Mark the polling page as readable
5381
void os::make_polling_page_readable(void) {
5382
if( !linux_mprotect((char *)_polling_page, Linux::page_size(), PROT_READ)) {
5383
fatal("Could not enable polling page");
5384
}
5385
};
5386
5387
static int os_cpu_count(const cpu_set_t* cpus) {
5388
int count = 0;
5389
// only look up to the number of configured processors
5390
for (int i = 0; i < os::processor_count(); i++) {
5391
if (CPU_ISSET(i, cpus)) {
5392
count++;
5393
}
5394
}
5395
return count;
5396
}
5397
5398
// Get the current number of available processors for this process.
5399
// This value can change at any time during a process's lifetime.
5400
// sched_getaffinity gives an accurate answer as it accounts for cpusets.
5401
// If anything goes wrong we fallback to returning the number of online
5402
// processors - which can be greater than the number available to the process.
5403
int os::Linux::active_processor_count() {
5404
cpu_set_t cpus; // can represent at most 1024 (CPU_SETSIZE) processors
5405
int cpus_size = sizeof(cpu_set_t);
5406
int cpu_count = 0;
5407
5408
// pid 0 means the current thread - which we have to assume represents the process
5409
if (sched_getaffinity(0, cpus_size, &cpus) == 0) {
5410
cpu_count = os_cpu_count(&cpus);
5411
if (PrintActiveCpus) {
5412
tty->print_cr("active_processor_count: sched_getaffinity processor count: %d", cpu_count);
5413
}
5414
}
5415
else {
5416
cpu_count = ::sysconf(_SC_NPROCESSORS_ONLN);
5417
warning("sched_getaffinity failed (%s)- using online processor count (%d) "
5418
"which may exceed available processors", strerror(errno), cpu_count);
5419
}
5420
5421
assert(cpu_count > 0 && cpu_count <= os::processor_count(), "sanity check");
5422
return cpu_count;
5423
}
5424
5425
// Determine the active processor count from one of
5426
// three different sources:
5427
//
5428
// 1. User option -XX:ActiveProcessorCount
5429
// 2. kernel os calls (sched_getaffinity or sysconf(_SC_NPROCESSORS_ONLN)
5430
// 3. extracted from cgroup cpu subsystem (shares and quotas)
5431
//
5432
// Option 1, if specified, will always override.
5433
// If the cgroup subsystem is active and configured, we
5434
// will return the min of the cgroup and option 2 results.
5435
// This is required since tools, such as numactl, that
5436
// alter cpu affinity do not update cgroup subsystem
5437
// cpuset configuration files.
5438
int os::active_processor_count() {
5439
// User has overridden the number of active processors
5440
if (ActiveProcessorCount > 0) {
5441
if (PrintActiveCpus) {
5442
tty->print_cr("active_processor_count: "
5443
"active processor count set by user : %d",
5444
ActiveProcessorCount);
5445
}
5446
return ActiveProcessorCount;
5447
}
5448
5449
int active_cpus;
5450
if (OSContainer::is_containerized()) {
5451
active_cpus = OSContainer::active_processor_count();
5452
if (PrintActiveCpus) {
5453
tty->print_cr("active_processor_count: determined by OSContainer: %d",
5454
active_cpus);
5455
}
5456
} else {
5457
active_cpus = os::Linux::active_processor_count();
5458
}
5459
5460
return active_cpus;
5461
}
5462
5463
void os::set_native_thread_name(const char *name) {
5464
if (Linux::_pthread_setname_np) {
5465
char buf [16]; // according to glibc manpage, 16 chars incl. '/0'
5466
snprintf(buf, sizeof(buf), "%s", name);
5467
buf[sizeof(buf) - 1] = '\0';
5468
const int rc = Linux::_pthread_setname_np(pthread_self(), buf);
5469
// ERANGE should not happen; all other errors should just be ignored.
5470
assert(rc != ERANGE, "pthread_setname_np failed");
5471
}
5472
}
5473
5474
bool os::distribute_processes(uint length, uint* distribution) {
5475
// Not yet implemented.
5476
return false;
5477
}
5478
5479
bool os::bind_to_processor(uint processor_id) {
5480
// Not yet implemented.
5481
return false;
5482
}
5483
5484
///
5485
5486
void os::SuspendedThreadTask::internal_do_task() {
5487
if (do_suspend(_thread->osthread())) {
5488
SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
5489
do_task(context);
5490
do_resume(_thread->osthread());
5491
}
5492
}
5493
5494
class PcFetcher : public os::SuspendedThreadTask {
5495
public:
5496
PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
5497
ExtendedPC result();
5498
protected:
5499
void do_task(const os::SuspendedThreadTaskContext& context);
5500
private:
5501
ExtendedPC _epc;
5502
};
5503
5504
ExtendedPC PcFetcher::result() {
5505
guarantee(is_done(), "task is not done yet.");
5506
return _epc;
5507
}
5508
5509
void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
5510
Thread* thread = context.thread();
5511
OSThread* osthread = thread->osthread();
5512
if (osthread->ucontext() != NULL) {
5513
_epc = os::Linux::ucontext_get_pc((ucontext_t *) context.ucontext());
5514
} else {
5515
// NULL context is unexpected, double-check this is the VMThread
5516
guarantee(thread->is_VM_thread(), "can only be called for VMThread");
5517
}
5518
}
5519
5520
// Suspends the target using the signal mechanism and then grabs the PC before
5521
// resuming the target. Used by the flat-profiler only
5522
ExtendedPC os::get_thread_pc(Thread* thread) {
5523
// Make sure that it is called by the watcher for the VMThread
5524
assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
5525
assert(thread->is_VM_thread(), "Can only be called for VMThread");
5526
5527
PcFetcher fetcher(thread);
5528
fetcher.run();
5529
return fetcher.result();
5530
}
5531
5532
int os::Linux::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime)
5533
{
5534
if (is_NPTL()) {
5535
return pthread_cond_timedwait(_cond, _mutex, _abstime);
5536
} else {
5537
// 6292965: LinuxThreads pthread_cond_timedwait() resets FPU control
5538
// word back to default 64bit precision if condvar is signaled. Java
5539
// wants 53bit precision. Save and restore current value.
5540
int fpu = get_fpu_control_word();
5541
int status = pthread_cond_timedwait(_cond, _mutex, _abstime);
5542
set_fpu_control_word(fpu);
5543
return status;
5544
}
5545
}
5546
5547
////////////////////////////////////////////////////////////////////////////////
5548
// debug support
5549
5550
bool os::find(address addr, outputStream* st) {
5551
Dl_info dlinfo;
5552
memset(&dlinfo, 0, sizeof(dlinfo));
5553
if (dladdr(addr, &dlinfo) != 0) {
5554
st->print(PTR_FORMAT ": ", addr);
5555
if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5556
st->print("%s+%#x", dlinfo.dli_sname,
5557
addr - (intptr_t)dlinfo.dli_saddr);
5558
} else if (dlinfo.dli_fbase != NULL) {
5559
st->print("<offset %#x>", addr - (intptr_t)dlinfo.dli_fbase);
5560
} else {
5561
st->print("<absolute address>");
5562
}
5563
if (dlinfo.dli_fname != NULL) {
5564
st->print(" in %s", dlinfo.dli_fname);
5565
}
5566
if (dlinfo.dli_fbase != NULL) {
5567
st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5568
}
5569
st->cr();
5570
5571
if (Verbose) {
5572
// decode some bytes around the PC
5573
address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5574
address end = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5575
address lowest = (address) dlinfo.dli_sname;
5576
if (!lowest) lowest = (address) dlinfo.dli_fbase;
5577
if (begin < lowest) begin = lowest;
5578
Dl_info dlinfo2;
5579
if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5580
&& end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
5581
end = (address) dlinfo2.dli_saddr;
5582
Disassembler::decode(begin, end, st);
5583
}
5584
return true;
5585
}
5586
return false;
5587
}
5588
5589
////////////////////////////////////////////////////////////////////////////////
5590
// misc
5591
5592
// This does not do anything on Linux. This is basically a hook for being
5593
// able to use structured exception handling (thread-local exception filters)
5594
// on, e.g., Win32.
5595
void
5596
os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
5597
JavaCallArguments* args, Thread* thread) {
5598
f(value, method, args, thread);
5599
}
5600
5601
void os::print_statistics() {
5602
}
5603
5604
int os::message_box(const char* title, const char* message) {
5605
int i;
5606
fdStream err(defaultStream::error_fd());
5607
for (i = 0; i < 78; i++) err.print_raw("=");
5608
err.cr();
5609
err.print_raw_cr(title);
5610
for (i = 0; i < 78; i++) err.print_raw("-");
5611
err.cr();
5612
err.print_raw_cr(message);
5613
for (i = 0; i < 78; i++) err.print_raw("=");
5614
err.cr();
5615
5616
char buf[16];
5617
// Prevent process from exiting upon "read error" without consuming all CPU
5618
while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
5619
5620
return buf[0] == 'y' || buf[0] == 'Y';
5621
}
5622
5623
int os::stat(const char *path, struct stat *sbuf) {
5624
char pathbuf[MAX_PATH];
5625
if (strlen(path) > MAX_PATH - 1) {
5626
errno = ENAMETOOLONG;
5627
return -1;
5628
}
5629
os::native_path(strcpy(pathbuf, path));
5630
return ::stat(pathbuf, sbuf);
5631
}
5632
5633
bool os::check_heap(bool force) {
5634
return true;
5635
}
5636
5637
int local_vsnprintf(char* buf, size_t count, const char* format, va_list args) {
5638
return ::vsnprintf(buf, count, format, args);
5639
}
5640
5641
// Is a (classpath) directory empty?
5642
bool os::dir_is_empty(const char* path) {
5643
DIR *dir = NULL;
5644
struct dirent *ptr;
5645
5646
dir = opendir(path);
5647
if (dir == NULL) return true;
5648
5649
/* Scan the directory */
5650
bool result = true;
5651
while (result && (ptr = readdir(dir)) != NULL) {
5652
if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
5653
result = false;
5654
}
5655
}
5656
closedir(dir);
5657
return result;
5658
}
5659
5660
// This code originates from JDK's sysOpen and open64_w
5661
// from src/solaris/hpi/src/system_md.c
5662
5663
#ifndef O_DELETE
5664
#define O_DELETE 0x10000
5665
#endif
5666
5667
#ifdef __ANDROID__
5668
int open64(const char* pathName, int flags, int mode) {
5669
return ::open(pathName, flags, mode);
5670
}
5671
#endif //__ANDROID__
5672
5673
// Open a file. Unlink the file immediately after open returns
5674
// if the specified oflag has the O_DELETE flag set.
5675
// O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
5676
5677
int os::open(const char *path, int oflag, int mode) {
5678
5679
if (strlen(path) > MAX_PATH - 1) {
5680
errno = ENAMETOOLONG;
5681
return -1;
5682
}
5683
int fd;
5684
int o_delete = (oflag & O_DELETE);
5685
oflag = oflag & ~O_DELETE;
5686
5687
fd = ::open64(path, oflag, mode);
5688
if (fd == -1) return -1;
5689
5690
//If the open succeeded, the file might still be a directory
5691
{
5692
struct stat64 buf64;
5693
int ret = ::fstat64(fd, &buf64);
5694
int st_mode = buf64.st_mode;
5695
5696
if (ret != -1) {
5697
if ((st_mode & S_IFMT) == S_IFDIR) {
5698
errno = EISDIR;
5699
::close(fd);
5700
return -1;
5701
}
5702
} else {
5703
::close(fd);
5704
return -1;
5705
}
5706
}
5707
5708
/*
5709
* All file descriptors that are opened in the JVM and not
5710
* specifically destined for a subprocess should have the
5711
* close-on-exec flag set. If we don't set it, then careless 3rd
5712
* party native code might fork and exec without closing all
5713
* appropriate file descriptors (e.g. as we do in closeDescriptors in
5714
* UNIXProcess.c), and this in turn might:
5715
*
5716
* - cause end-of-file to fail to be detected on some file
5717
* descriptors, resulting in mysterious hangs, or
5718
*
5719
* - might cause an fopen in the subprocess to fail on a system
5720
* suffering from bug 1085341.
5721
*
5722
* (Yes, the default setting of the close-on-exec flag is a Unix
5723
* design flaw)
5724
*
5725
* See:
5726
* 1085341: 32-bit stdio routines should support file descriptors >255
5727
* 4843136: (process) pipe file descriptor from Runtime.exec not being closed
5728
* 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
5729
*/
5730
#ifdef FD_CLOEXEC
5731
{
5732
int flags = ::fcntl(fd, F_GETFD);
5733
if (flags != -1)
5734
::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
5735
}
5736
#endif
5737
5738
if (o_delete != 0) {
5739
::unlink(path);
5740
}
5741
return fd;
5742
}
5743
5744
#ifdef __ANDROID__
5745
#define S_IREAD S_IRUSR
5746
#define S_IWRITE S_IWUSR
5747
#endif
5748
// create binary file, rewriting existing file if required
5749
int os::create_binary_file(const char* path, bool rewrite_existing) {
5750
int oflags = O_WRONLY | O_CREAT;
5751
if (!rewrite_existing) {
5752
oflags |= O_EXCL;
5753
}
5754
return ::open64(path, oflags, S_IREAD | S_IWRITE);
5755
}
5756
5757
// return current position of file pointer
5758
jlong os::current_file_offset(int fd) {
5759
return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
5760
}
5761
5762
// move file pointer to the specified offset
5763
jlong os::seek_to_file_offset(int fd, jlong offset) {
5764
return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
5765
}
5766
5767
// This code originates from JDK's sysAvailable
5768
// from src/solaris/hpi/src/native_threads/src/sys_api_td.c
5769
5770
int os::available(int fd, jlong *bytes) {
5771
jlong cur, end;
5772
int mode;
5773
struct stat64 buf64;
5774
5775
if (::fstat64(fd, &buf64) >= 0) {
5776
mode = buf64.st_mode;
5777
if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
5778
/*
5779
* XXX: is the following call interruptible? If so, this might
5780
* need to go through the INTERRUPT_IO() wrapper as for other
5781
* blocking, interruptible calls in this file.
5782
*/
5783
int n;
5784
if (::ioctl(fd, FIONREAD, &n) >= 0) {
5785
*bytes = n;
5786
return 1;
5787
}
5788
}
5789
}
5790
if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
5791
return 0;
5792
} else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
5793
return 0;
5794
} else if (::lseek64(fd, cur, SEEK_SET) == -1) {
5795
return 0;
5796
}
5797
*bytes = end - cur;
5798
return 1;
5799
}
5800
5801
int os::socket_available(int fd, jint *pbytes) {
5802
// Linux doc says EINTR not returned, unlike Solaris
5803
int ret = ::ioctl(fd, FIONREAD, pbytes);
5804
5805
//%% note ioctl can return 0 when successful, JVM_SocketAvailable
5806
// is expected to return 0 on failure and 1 on success to the jdk.
5807
return (ret < 0) ? 0 : 1;
5808
}
5809
5810
// Map a block of memory.
5811
char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
5812
char *addr, size_t bytes, bool read_only,
5813
bool allow_exec) {
5814
int prot;
5815
int flags = MAP_PRIVATE;
5816
5817
if (read_only) {
5818
prot = PROT_READ;
5819
} else {
5820
prot = PROT_READ | PROT_WRITE;
5821
}
5822
5823
if (allow_exec) {
5824
prot |= PROT_EXEC;
5825
}
5826
5827
if (addr != NULL) {
5828
flags |= MAP_FIXED;
5829
}
5830
5831
char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5832
fd, file_offset);
5833
if (mapped_address == MAP_FAILED) {
5834
return NULL;
5835
}
5836
return mapped_address;
5837
}
5838
5839
5840
// Remap a block of memory.
5841
char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5842
char *addr, size_t bytes, bool read_only,
5843
bool allow_exec) {
5844
// same as map_memory() on this OS
5845
return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5846
allow_exec);
5847
}
5848
5849
5850
// Unmap a block of memory.
5851
bool os::pd_unmap_memory(char* addr, size_t bytes) {
5852
return munmap(addr, bytes) == 0;
5853
}
5854
5855
static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time);
5856
5857
static clockid_t thread_cpu_clockid(Thread* thread) {
5858
pthread_t tid = thread->osthread()->pthread_id();
5859
clockid_t clockid;
5860
5861
// Get thread clockid
5862
int rc = os::Linux::pthread_getcpuclockid(tid, &clockid);
5863
assert(rc == 0, "pthread_getcpuclockid is expected to return 0 code");
5864
return clockid;
5865
}
5866
5867
// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5868
// are used by JVM M&M and JVMTI to get user+sys or user CPU time
5869
// of a thread.
5870
//
5871
// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
5872
// the fast estimate available on the platform.
5873
5874
jlong os::current_thread_cpu_time() {
5875
if (os::Linux::supports_fast_thread_cpu_time()) {
5876
return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
5877
} else {
5878
// return user + sys since the cost is the same
5879
return slow_thread_cpu_time(Thread::current(), true /* user + sys */);
5880
}
5881
}
5882
5883
jlong os::thread_cpu_time(Thread* thread) {
5884
// consistent with what current_thread_cpu_time() returns
5885
if (os::Linux::supports_fast_thread_cpu_time()) {
5886
return os::Linux::fast_thread_cpu_time(thread_cpu_clockid(thread));
5887
} else {
5888
return slow_thread_cpu_time(thread, true /* user + sys */);
5889
}
5890
}
5891
5892
jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5893
if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) {
5894
return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
5895
} else {
5896
return slow_thread_cpu_time(Thread::current(), user_sys_cpu_time);
5897
}
5898
}
5899
5900
jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5901
if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) {
5902
return os::Linux::fast_thread_cpu_time(thread_cpu_clockid(thread));
5903
} else {
5904
return slow_thread_cpu_time(thread, user_sys_cpu_time);
5905
}
5906
}
5907
5908
//
5909
// -1 on error.
5910
//
5911
5912
PRAGMA_DIAG_PUSH
5913
PRAGMA_FORMAT_NONLITERAL_IGNORED
5914
static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5915
pid_t tid = thread->osthread()->thread_id();
5916
char *s;
5917
char stat[2048];
5918
int statlen;
5919
char proc_name[64];
5920
int count;
5921
long sys_time, user_time;
5922
char cdummy;
5923
int idummy;
5924
long ldummy;
5925
FILE *fp;
5926
5927
snprintf(proc_name, 64, "/proc/self/task/%d/stat", tid);
5928
fp = fopen(proc_name, "r");
5929
if ( fp == NULL ) return -1;
5930
statlen = fread(stat, 1, 2047, fp);
5931
stat[statlen] = '\0';
5932
fclose(fp);
5933
5934
// Skip pid and the command string. Note that we could be dealing with
5935
// weird command names, e.g. user could decide to rename java launcher
5936
// to "java 1.4.2 :)", then the stat file would look like
5937
// 1234 (java 1.4.2 :)) R ... ...
5938
// We don't really need to know the command string, just find the last
5939
// occurrence of ")" and then start parsing from there. See bug 4726580.
5940
s = strrchr(stat, ')');
5941
if (s == NULL ) return -1;
5942
5943
// Skip blank chars
5944
do s++; while (isspace(*s));
5945
5946
count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
5947
&cdummy, &idummy, &idummy, &idummy, &idummy, &idummy,
5948
&ldummy, &ldummy, &ldummy, &ldummy, &ldummy,
5949
&user_time, &sys_time);
5950
if ( count != 13 ) return -1;
5951
if (user_sys_cpu_time) {
5952
return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec);
5953
} else {
5954
return (jlong)user_time * (1000000000 / clock_tics_per_sec);
5955
}
5956
}
5957
PRAGMA_DIAG_POP
5958
5959
void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5960
info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
5961
info_ptr->may_skip_backward = false; // elapsed time not wall time
5962
info_ptr->may_skip_forward = false; // elapsed time not wall time
5963
info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
5964
}
5965
5966
void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5967
info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
5968
info_ptr->may_skip_backward = false; // elapsed time not wall time
5969
info_ptr->may_skip_forward = false; // elapsed time not wall time
5970
info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
5971
}
5972
5973
bool os::is_thread_cpu_time_supported() {
5974
return true;
5975
}
5976
5977
// System loadavg support. Returns -1 if load average cannot be obtained.
5978
// Linux doesn't yet have a (official) notion of processor sets,
5979
// so just return the system wide load average.
5980
int os::loadavg(double loadavg[], int nelem) {
5981
#if !defined(__UCLIBC__) && !defined(__ANDROID__)
5982
return ::getloadavg(loadavg, nelem);
5983
#else
5984
return -1;
5985
#endif
5986
}
5987
5988
void os::pause() {
5989
char filename[MAX_PATH];
5990
if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5991
jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5992
} else {
5993
jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5994
}
5995
5996
int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5997
if (fd != -1) {
5998
struct stat buf;
5999
::close(fd);
6000
while (::stat(filename, &buf) == 0) {
6001
(void)::poll(NULL, 0, 100);
6002
}
6003
} else {
6004
jio_fprintf(stderr,
6005
"Could not open pause file '%s', continuing immediately.\n", filename);
6006
}
6007
}
6008
6009
6010
// Refer to the comments in os_solaris.cpp park-unpark.
6011
//
6012
// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
6013
// hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
6014
// For specifics regarding the bug see GLIBC BUGID 261237 :
6015
// http://www.mail-archive.com/[email protected]/msg10837.html.
6016
// Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
6017
// will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
6018
// is used. (The simple C test-case provided in the GLIBC bug report manifests the
6019
// hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
6020
// and monitorenter when we're using 1-0 locking. All those operations may result in
6021
// calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
6022
// of libpthread avoids the problem, but isn't practical.
6023
//
6024
// Possible remedies:
6025
//
6026
// 1. Establish a minimum relative wait time. 50 to 100 msecs seems to work.
6027
// This is palliative and probabilistic, however. If the thread is preempted
6028
// between the call to compute_abstime() and pthread_cond_timedwait(), more
6029
// than the minimum period may have passed, and the abstime may be stale (in the
6030
// past) resultin in a hang. Using this technique reduces the odds of a hang
6031
// but the JVM is still vulnerable, particularly on heavily loaded systems.
6032
//
6033
// 2. Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
6034
// of the usual flag-condvar-mutex idiom. The write side of the pipe is set
6035
// NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
6036
// reduces to poll()+read(). This works well, but consumes 2 FDs per extant
6037
// thread.
6038
//
6039
// 3. Embargo pthread_cond_timedwait() and implement a native "chron" thread
6040
// that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
6041
// a timeout request to the chron thread and then blocking via pthread_cond_wait().
6042
// This also works well. In fact it avoids kernel-level scalability impediments
6043
// on certain platforms that don't handle lots of active pthread_cond_timedwait()
6044
// timers in a graceful fashion.
6045
//
6046
// 4. When the abstime value is in the past it appears that control returns
6047
// correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
6048
// Subsequent timedwait/wait calls may hang indefinitely. Given that, we
6049
// can avoid the problem by reinitializing the condvar -- by cond_destroy()
6050
// followed by cond_init() -- after all calls to pthread_cond_timedwait().
6051
// It may be possible to avoid reinitialization by checking the return
6052
// value from pthread_cond_timedwait(). In addition to reinitializing the
6053
// condvar we must establish the invariant that cond_signal() is only called
6054
// within critical sections protected by the adjunct mutex. This prevents
6055
// cond_signal() from "seeing" a condvar that's in the midst of being
6056
// reinitialized or that is corrupt. Sadly, this invariant obviates the
6057
// desirable signal-after-unlock optimization that avoids futile context switching.
6058
//
6059
// I'm also concerned that some versions of NTPL might allocate an auxilliary
6060
// structure when a condvar is used or initialized. cond_destroy() would
6061
// release the helper structure. Our reinitialize-after-timedwait fix
6062
// put excessive stress on malloc/free and locks protecting the c-heap.
6063
//
6064
// We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
6065
// It may be possible to refine (4) by checking the kernel and NTPL verisons
6066
// and only enabling the work-around for vulnerable environments.
6067
6068
// utility to compute the abstime argument to timedwait:
6069
// millis is the relative timeout time
6070
// abstime will be the absolute timeout time
6071
// TODO: replace compute_abstime() with unpackTime()
6072
6073
static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
6074
if (millis < 0) millis = 0;
6075
6076
jlong seconds = millis / 1000;
6077
millis %= 1000;
6078
if (seconds > 50000000) { // see man cond_timedwait(3T)
6079
seconds = 50000000;
6080
}
6081
6082
if (os::Linux::supports_monotonic_clock()) {
6083
struct timespec now;
6084
int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
6085
assert_status(status == 0, status, "clock_gettime");
6086
abstime->tv_sec = now.tv_sec + seconds;
6087
long nanos = now.tv_nsec + millis * NANOSECS_PER_MILLISEC;
6088
if (nanos >= NANOSECS_PER_SEC) {
6089
abstime->tv_sec += 1;
6090
nanos -= NANOSECS_PER_SEC;
6091
}
6092
abstime->tv_nsec = nanos;
6093
} else {
6094
struct timeval now;
6095
int status = gettimeofday(&now, NULL);
6096
assert(status == 0, "gettimeofday");
6097
abstime->tv_sec = now.tv_sec + seconds;
6098
long usec = now.tv_usec + millis * 1000;
6099
if (usec >= 1000000) {
6100
abstime->tv_sec += 1;
6101
usec -= 1000000;
6102
}
6103
abstime->tv_nsec = usec * 1000;
6104
}
6105
return abstime;
6106
}
6107
6108
6109
// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
6110
// Conceptually TryPark() should be equivalent to park(0).
6111
6112
int os::PlatformEvent::TryPark() {
6113
for (;;) {
6114
const int v = _Event ;
6115
guarantee ((v == 0) || (v == 1), "invariant") ;
6116
if (Atomic::cmpxchg (0, &_Event, v) == v) return v ;
6117
}
6118
}
6119
6120
void os::PlatformEvent::park() { // AKA "down()"
6121
// Invariant: Only the thread associated with the Event/PlatformEvent
6122
// may call park().
6123
// TODO: assert that _Assoc != NULL or _Assoc == Self
6124
int v ;
6125
for (;;) {
6126
v = _Event ;
6127
if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
6128
}
6129
guarantee (v >= 0, "invariant") ;
6130
if (v == 0) {
6131
// Do this the hard way by blocking ...
6132
int status = pthread_mutex_lock(_mutex);
6133
assert_status(status == 0, status, "mutex_lock");
6134
guarantee (_nParked == 0, "invariant") ;
6135
++ _nParked ;
6136
while (_Event < 0) {
6137
status = pthread_cond_wait(_cond, _mutex);
6138
// for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
6139
// Treat this the same as if the wait was interrupted
6140
if (status == ETIME) { status = EINTR; }
6141
assert_status(status == 0 || status == EINTR, status, "cond_wait");
6142
}
6143
-- _nParked ;
6144
6145
_Event = 0 ;
6146
status = pthread_mutex_unlock(_mutex);
6147
assert_status(status == 0, status, "mutex_unlock");
6148
// Paranoia to ensure our locked and lock-free paths interact
6149
// correctly with each other.
6150
OrderAccess::fence();
6151
}
6152
guarantee (_Event >= 0, "invariant") ;
6153
}
6154
6155
int os::PlatformEvent::park(jlong millis) {
6156
guarantee (_nParked == 0, "invariant") ;
6157
6158
int v ;
6159
for (;;) {
6160
v = _Event ;
6161
if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
6162
}
6163
guarantee (v >= 0, "invariant") ;
6164
if (v != 0) return OS_OK ;
6165
6166
// We do this the hard way, by blocking the thread.
6167
// Consider enforcing a minimum timeout value.
6168
struct timespec abst;
6169
compute_abstime(&abst, millis);
6170
6171
int ret = OS_TIMEOUT;
6172
int status = pthread_mutex_lock(_mutex);
6173
assert_status(status == 0, status, "mutex_lock");
6174
guarantee (_nParked == 0, "invariant") ;
6175
++_nParked ;
6176
6177
// Object.wait(timo) will return because of
6178
// (a) notification
6179
// (b) timeout
6180
// (c) thread.interrupt
6181
//
6182
// Thread.interrupt and object.notify{All} both call Event::set.
6183
// That is, we treat thread.interrupt as a special case of notification.
6184
// The underlying Solaris implementation, cond_timedwait, admits
6185
// spurious/premature wakeups, but the JLS/JVM spec prevents the
6186
// JVM from making those visible to Java code. As such, we must
6187
// filter out spurious wakeups. We assume all ETIME returns are valid.
6188
//
6189
// TODO: properly differentiate simultaneous notify+interrupt.
6190
// In that case, we should propagate the notify to another waiter.
6191
6192
while (_Event < 0) {
6193
status = os::Linux::safe_cond_timedwait(_cond, _mutex, &abst);
6194
if (status != 0 && WorkAroundNPTLTimedWaitHang) {
6195
pthread_cond_destroy (_cond);
6196
pthread_cond_init (_cond, os::Linux::condAttr()) ;
6197
}
6198
assert_status(status == 0 || status == EINTR ||
6199
status == ETIME || status == ETIMEDOUT,
6200
status, "cond_timedwait");
6201
if (!FilterSpuriousWakeups) break ; // previous semantics
6202
if (status == ETIME || status == ETIMEDOUT) break ;
6203
// We consume and ignore EINTR and spurious wakeups.
6204
}
6205
--_nParked ;
6206
if (_Event >= 0) {
6207
ret = OS_OK;
6208
}
6209
_Event = 0 ;
6210
status = pthread_mutex_unlock(_mutex);
6211
assert_status(status == 0, status, "mutex_unlock");
6212
assert (_nParked == 0, "invariant") ;
6213
// Paranoia to ensure our locked and lock-free paths interact
6214
// correctly with each other.
6215
OrderAccess::fence();
6216
return ret;
6217
}
6218
6219
void os::PlatformEvent::unpark() {
6220
// Transitions for _Event:
6221
// 0 :=> 1
6222
// 1 :=> 1
6223
// -1 :=> either 0 or 1; must signal target thread
6224
// That is, we can safely transition _Event from -1 to either
6225
// 0 or 1. Forcing 1 is slightly more efficient for back-to-back
6226
// unpark() calls.
6227
// See also: "Semaphores in Plan 9" by Mullender & Cox
6228
//
6229
// Note: Forcing a transition from "-1" to "1" on an unpark() means
6230
// that it will take two back-to-back park() calls for the owning
6231
// thread to block. This has the benefit of forcing a spurious return
6232
// from the first park() call after an unpark() call which will help
6233
// shake out uses of park() and unpark() without condition variables.
6234
6235
if (Atomic::xchg(1, &_Event) >= 0) return;
6236
6237
// Wait for the thread associated with the event to vacate
6238
int status = pthread_mutex_lock(_mutex);
6239
assert_status(status == 0, status, "mutex_lock");
6240
int AnyWaiters = _nParked;
6241
assert(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
6242
if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) {
6243
AnyWaiters = 0;
6244
pthread_cond_signal(_cond);
6245
}
6246
status = pthread_mutex_unlock(_mutex);
6247
assert_status(status == 0, status, "mutex_unlock");
6248
if (AnyWaiters != 0) {
6249
status = pthread_cond_signal(_cond);
6250
assert_status(status == 0, status, "cond_signal");
6251
}
6252
6253
// Note that we signal() _after dropping the lock for "immortal" Events.
6254
// This is safe and avoids a common class of futile wakeups. In rare
6255
// circumstances this can cause a thread to return prematurely from
6256
// cond_{timed}wait() but the spurious wakeup is benign and the victim will
6257
// simply re-test the condition and re-park itself.
6258
}
6259
6260
6261
// JSR166
6262
// -------------------------------------------------------
6263
6264
/*
6265
* The solaris and linux implementations of park/unpark are fairly
6266
* conservative for now, but can be improved. They currently use a
6267
* mutex/condvar pair, plus a a count.
6268
* Park decrements count if > 0, else does a condvar wait. Unpark
6269
* sets count to 1 and signals condvar. Only one thread ever waits
6270
* on the condvar. Contention seen when trying to park implies that someone
6271
* is unparking you, so don't wait. And spurious returns are fine, so there
6272
* is no need to track notifications.
6273
*/
6274
6275
/*
6276
* This code is common to linux and solaris and will be moved to a
6277
* common place in dolphin.
6278
*
6279
* The passed in time value is either a relative time in nanoseconds
6280
* or an absolute time in milliseconds. Either way it has to be unpacked
6281
* into suitable seconds and nanoseconds components and stored in the
6282
* given timespec structure.
6283
* Given time is a 64-bit value and the time_t used in the timespec is only
6284
* a signed-32-bit value (except on 64-bit Linux) we have to watch for
6285
* overflow if times way in the future are given. Further on Solaris versions
6286
* prior to 10 there is a restriction (see cond_timedwait) that the specified
6287
* number of seconds, in abstime, is less than current_time + 100,000,000.
6288
* As it will be 28 years before "now + 100000000" will overflow we can
6289
* ignore overflow and just impose a hard-limit on seconds using the value
6290
* of "now + 100,000,000". This places a limit on the timeout of about 3.17
6291
* years from "now".
6292
*/
6293
6294
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
6295
assert (time > 0, "convertTime");
6296
time_t max_secs = 0;
6297
6298
if (!os::Linux::supports_monotonic_clock() || isAbsolute) {
6299
struct timeval now;
6300
int status = gettimeofday(&now, NULL);
6301
assert(status == 0, "gettimeofday");
6302
6303
max_secs = now.tv_sec + MAX_SECS;
6304
6305
if (isAbsolute) {
6306
jlong secs = time / 1000;
6307
if (secs > max_secs) {
6308
absTime->tv_sec = max_secs;
6309
} else {
6310
absTime->tv_sec = secs;
6311
}
6312
absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
6313
} else {
6314
jlong secs = time / NANOSECS_PER_SEC;
6315
if (secs >= MAX_SECS) {
6316
absTime->tv_sec = max_secs;
6317
absTime->tv_nsec = 0;
6318
} else {
6319
absTime->tv_sec = now.tv_sec + secs;
6320
absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
6321
if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
6322
absTime->tv_nsec -= NANOSECS_PER_SEC;
6323
++absTime->tv_sec; // note: this must be <= max_secs
6324
}
6325
}
6326
}
6327
} else {
6328
// must be relative using monotonic clock
6329
struct timespec now;
6330
int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
6331
assert_status(status == 0, status, "clock_gettime");
6332
max_secs = now.tv_sec + MAX_SECS;
6333
jlong secs = time / NANOSECS_PER_SEC;
6334
if (secs >= MAX_SECS) {
6335
absTime->tv_sec = max_secs;
6336
absTime->tv_nsec = 0;
6337
} else {
6338
absTime->tv_sec = now.tv_sec + secs;
6339
absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_nsec;
6340
if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
6341
absTime->tv_nsec -= NANOSECS_PER_SEC;
6342
++absTime->tv_sec; // note: this must be <= max_secs
6343
}
6344
}
6345
}
6346
assert(absTime->tv_sec >= 0, "tv_sec < 0");
6347
assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
6348
assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
6349
assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
6350
}
6351
6352
void Parker::park(bool isAbsolute, jlong time) {
6353
// Ideally we'd do something useful while spinning, such
6354
// as calling unpackTime().
6355
6356
// Optional fast-path check:
6357
// Return immediately if a permit is available.
6358
// We depend on Atomic::xchg() having full barrier semantics
6359
// since we are doing a lock-free update to _counter.
6360
if (Atomic::xchg(0, &_counter) > 0) return;
6361
6362
Thread* thread = Thread::current();
6363
assert(thread->is_Java_thread(), "Must be JavaThread");
6364
JavaThread *jt = (JavaThread *)thread;
6365
6366
// Optional optimization -- avoid state transitions if there's an interrupt pending.
6367
// Check interrupt before trying to wait
6368
if (Thread::is_interrupted(thread, false)) {
6369
return;
6370
}
6371
6372
// Next, demultiplex/decode time arguments
6373
timespec absTime;
6374
if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
6375
return;
6376
}
6377
if (time > 0) {
6378
unpackTime(&absTime, isAbsolute, time);
6379
}
6380
6381
6382
// Enter safepoint region
6383
// Beware of deadlocks such as 6317397.
6384
// The per-thread Parker:: mutex is a classic leaf-lock.
6385
// In particular a thread must never block on the Threads_lock while
6386
// holding the Parker:: mutex. If safepoints are pending both the
6387
// the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
6388
ThreadBlockInVM tbivm(jt);
6389
6390
// Don't wait if cannot get lock since interference arises from
6391
// unblocking. Also. check interrupt before trying wait
6392
if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
6393
return;
6394
}
6395
6396
int status ;
6397
if (_counter > 0) { // no wait needed
6398
_counter = 0;
6399
status = pthread_mutex_unlock(_mutex);
6400
assert (status == 0, "invariant") ;
6401
// Paranoia to ensure our locked and lock-free paths interact
6402
// correctly with each other and Java-level accesses.
6403
OrderAccess::fence();
6404
return;
6405
}
6406
6407
#ifdef ASSERT
6408
// Don't catch signals while blocked; let the running threads have the signals.
6409
// (This allows a debugger to break into the running thread.)
6410
sigset_t oldsigs;
6411
sigset_t* allowdebug_blocked = os::Linux::allowdebug_blocked_signals();
6412
pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
6413
#endif
6414
6415
OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
6416
jt->set_suspend_equivalent();
6417
// cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
6418
6419
assert(_cur_index == -1, "invariant");
6420
if (time == 0) {
6421
_cur_index = REL_INDEX; // arbitrary choice when not timed
6422
status = pthread_cond_wait (&_cond[_cur_index], _mutex) ;
6423
} else {
6424
_cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
6425
status = os::Linux::safe_cond_timedwait (&_cond[_cur_index], _mutex, &absTime) ;
6426
if (status != 0 && WorkAroundNPTLTimedWaitHang) {
6427
pthread_cond_destroy (&_cond[_cur_index]) ;
6428
pthread_cond_init (&_cond[_cur_index], isAbsolute ? NULL : os::Linux::condAttr());
6429
}
6430
}
6431
_cur_index = -1;
6432
assert_status(status == 0 || status == EINTR ||
6433
status == ETIME || status == ETIMEDOUT,
6434
status, "cond_timedwait");
6435
6436
#ifdef ASSERT
6437
pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
6438
#endif
6439
6440
_counter = 0 ;
6441
status = pthread_mutex_unlock(_mutex) ;
6442
assert_status(status == 0, status, "invariant") ;
6443
// Paranoia to ensure our locked and lock-free paths interact
6444
// correctly with each other and Java-level accesses.
6445
OrderAccess::fence();
6446
6447
// If externally suspended while waiting, re-suspend
6448
if (jt->handle_special_suspend_equivalent_condition()) {
6449
jt->java_suspend_self();
6450
}
6451
}
6452
6453
void Parker::unpark() {
6454
int s, status ;
6455
status = pthread_mutex_lock(_mutex);
6456
assert (status == 0, "invariant") ;
6457
s = _counter;
6458
_counter = 1;
6459
if (s < 1) {
6460
// thread might be parked
6461
if (_cur_index != -1) {
6462
// thread is definitely parked
6463
if (WorkAroundNPTLTimedWaitHang) {
6464
status = pthread_cond_signal (&_cond[_cur_index]);
6465
assert (status == 0, "invariant");
6466
status = pthread_mutex_unlock(_mutex);
6467
assert (status == 0, "invariant");
6468
} else {
6469
// must capture correct index before unlocking
6470
int index = _cur_index;
6471
status = pthread_mutex_unlock(_mutex);
6472
assert (status == 0, "invariant");
6473
status = pthread_cond_signal (&_cond[index]);
6474
assert (status == 0, "invariant");
6475
}
6476
} else {
6477
pthread_mutex_unlock(_mutex);
6478
assert (status == 0, "invariant") ;
6479
}
6480
} else {
6481
pthread_mutex_unlock(_mutex);
6482
assert (status == 0, "invariant") ;
6483
}
6484
}
6485
6486
6487
extern char** environ;
6488
6489
// Run the specified command in a separate process. Return its exit value,
6490
// or -1 on failure (e.g. can't fork a new process).
6491
// Unlike system(), this function can be called from signal handler. It
6492
// doesn't block SIGINT et al.
6493
int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
6494
const char * argv[4] = {"sh", "-c", cmd, NULL};
6495
6496
pid_t pid ;
6497
6498
if (use_vfork_if_available) {
6499
pid = vfork();
6500
} else {
6501
pid = fork();
6502
}
6503
6504
if (pid < 0) {
6505
// fork failed
6506
return -1;
6507
6508
} else if (pid == 0) {
6509
// child process
6510
6511
execve("/bin/sh", (char* const*)argv, environ);
6512
6513
// execve failed
6514
_exit(-1);
6515
6516
} else {
6517
// copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
6518
// care about the actual exit code, for now.
6519
6520
int status;
6521
6522
// Wait for the child process to exit. This returns immediately if
6523
// the child has already exited. */
6524
while (waitpid(pid, &status, 0) < 0) {
6525
switch (errno) {
6526
case ECHILD: return 0;
6527
case EINTR: break;
6528
default: return -1;
6529
}
6530
}
6531
6532
if (WIFEXITED(status)) {
6533
// The child exited normally; get its exit code.
6534
return WEXITSTATUS(status);
6535
} else if (WIFSIGNALED(status)) {
6536
// The child exited because of a signal
6537
// The best value to return is 0x80 + signal number,
6538
// because that is what all Unix shells do, and because
6539
// it allows callers to distinguish between process exit and
6540
// process death by signal.
6541
return 0x80 + WTERMSIG(status);
6542
} else {
6543
// Unknown exit code; pass it through
6544
return status;
6545
}
6546
}
6547
}
6548
6549
// is_headless_jre()
6550
//
6551
// Test for the existence of xawt/libmawt.so or libawt_xawt.so
6552
// in order to report if we are running in a headless jre
6553
//
6554
// Since JDK8 xawt/libmawt.so was moved into the same directory
6555
// as libawt.so, and renamed libawt_xawt.so
6556
//
6557
bool os::is_headless_jre() {
6558
struct stat statbuf;
6559
char buf[MAXPATHLEN];
6560
char libmawtpath[MAXPATHLEN];
6561
const char *xawtstr = "/xawt/libmawt.so";
6562
const char *new_xawtstr = "/libawt_xawt.so";
6563
char *p;
6564
6565
// Get path to libjvm.so
6566
os::jvm_path(buf, sizeof(buf));
6567
6568
// Get rid of libjvm.so
6569
p = strrchr(buf, '/');
6570
if (p == NULL) return false;
6571
else *p = '\0';
6572
6573
// Get rid of client or server
6574
p = strrchr(buf, '/');
6575
if (p == NULL) return false;
6576
else *p = '\0';
6577
6578
// check xawt/libmawt.so
6579
strcpy(libmawtpath, buf);
6580
strcat(libmawtpath, xawtstr);
6581
if (::stat(libmawtpath, &statbuf) == 0) return false;
6582
6583
// check libawt_xawt.so
6584
strcpy(libmawtpath, buf);
6585
strcat(libmawtpath, new_xawtstr);
6586
if (::stat(libmawtpath, &statbuf) == 0) return false;
6587
6588
return true;
6589
}
6590
6591
// Get the default path to the core file
6592
// Returns the length of the string
6593
int os::get_core_path(char* buffer, size_t bufferSize) {
6594
const char* p = get_current_directory(buffer, bufferSize);
6595
6596
if (p == NULL) {
6597
assert(p != NULL, "failed to get current directory");
6598
return 0;
6599
}
6600
6601
return strlen(buffer);
6602
}
6603
6604
/////////////// Unit tests ///////////////
6605
6606
#ifndef PRODUCT
6607
6608
#define test_log(...) \
6609
do {\
6610
if (VerboseInternalVMTests) { \
6611
tty->print_cr(__VA_ARGS__); \
6612
tty->flush(); \
6613
}\
6614
} while (false)
6615
6616
class TestReserveMemorySpecial : AllStatic {
6617
public:
6618
static void small_page_write(void* addr, size_t size) {
6619
size_t page_size = os::vm_page_size();
6620
6621
char* end = (char*)addr + size;
6622
for (char* p = (char*)addr; p < end; p += page_size) {
6623
*p = 1;
6624
}
6625
}
6626
6627
static void test_reserve_memory_special_huge_tlbfs_only(size_t size) {
6628
if (!UseHugeTLBFS) {
6629
return;
6630
}
6631
6632
test_log("test_reserve_memory_special_huge_tlbfs_only(" SIZE_FORMAT ")", size);
6633
6634
char* addr = os::Linux::reserve_memory_special_huge_tlbfs_only(size, NULL, false);
6635
6636
if (addr != NULL) {
6637
small_page_write(addr, size);
6638
6639
os::Linux::release_memory_special_huge_tlbfs(addr, size);
6640
}
6641
}
6642
6643
static void test_reserve_memory_special_huge_tlbfs_only() {
6644
if (!UseHugeTLBFS) {
6645
return;
6646
}
6647
6648
size_t lp = os::large_page_size();
6649
6650
for (size_t size = lp; size <= lp * 10; size += lp) {
6651
test_reserve_memory_special_huge_tlbfs_only(size);
6652
}
6653
}
6654
6655
static void test_reserve_memory_special_huge_tlbfs_mixed() {
6656
size_t lp = os::large_page_size();
6657
size_t ag = os::vm_allocation_granularity();
6658
6659
// sizes to test
6660
const size_t sizes[] = {
6661
lp, lp + ag, lp + lp / 2, lp * 2,
6662
lp * 2 + ag, lp * 2 - ag, lp * 2 + lp / 2,
6663
lp * 10, lp * 10 + lp / 2
6664
};
6665
const int num_sizes = sizeof(sizes) / sizeof(size_t);
6666
6667
// For each size/alignment combination, we test three scenarios:
6668
// 1) with req_addr == NULL
6669
// 2) with a non-null req_addr at which we expect to successfully allocate
6670
// 3) with a non-null req_addr which contains a pre-existing mapping, at which we
6671
// expect the allocation to either fail or to ignore req_addr
6672
6673
// Pre-allocate two areas; they shall be as large as the largest allocation
6674
// and aligned to the largest alignment we will be testing.
6675
const size_t mapping_size = sizes[num_sizes - 1] * 2;
6676
char* const mapping1 = (char*) ::mmap(NULL, mapping_size,
6677
PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
6678
-1, 0);
6679
assert(mapping1 != MAP_FAILED, "should work");
6680
6681
char* const mapping2 = (char*) ::mmap(NULL, mapping_size,
6682
PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
6683
-1, 0);
6684
assert(mapping2 != MAP_FAILED, "should work");
6685
6686
// Unmap the first mapping, but leave the second mapping intact: the first
6687
// mapping will serve as a value for a "good" req_addr (case 2). The second
6688
// mapping, still intact, as "bad" req_addr (case 3).
6689
::munmap(mapping1, mapping_size);
6690
6691
// Case 1
6692
test_log("%s, req_addr NULL:", __FUNCTION__);
6693
test_log("size align result");
6694
6695
for (int i = 0; i < num_sizes; i++) {
6696
const size_t size = sizes[i];
6697
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
6698
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
6699
test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " -> " PTR_FORMAT " %s",
6700
size, alignment, p, (p != NULL ? "" : "(failed)"));
6701
if (p != NULL) {
6702
assert(is_ptr_aligned(p, alignment), "must be");
6703
small_page_write(p, size);
6704
os::Linux::release_memory_special_huge_tlbfs(p, size);
6705
}
6706
}
6707
}
6708
6709
// Case 2
6710
test_log("%s, req_addr non-NULL:", __FUNCTION__);
6711
test_log("size align req_addr result");
6712
6713
for (int i = 0; i < num_sizes; i++) {
6714
const size_t size = sizes[i];
6715
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
6716
char* const req_addr = (char*) align_ptr_up(mapping1, alignment);
6717
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
6718
test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " -> " PTR_FORMAT " %s",
6719
size, alignment, req_addr, p,
6720
((p != NULL ? (p == req_addr ? "(exact match)" : "") : "(failed)")));
6721
if (p != NULL) {
6722
assert(p == req_addr, "must be");
6723
small_page_write(p, size);
6724
os::Linux::release_memory_special_huge_tlbfs(p, size);
6725
}
6726
}
6727
}
6728
6729
// Case 3
6730
test_log("%s, req_addr non-NULL with preexisting mapping:", __FUNCTION__);
6731
test_log("size align req_addr result");
6732
6733
for (int i = 0; i < num_sizes; i++) {
6734
const size_t size = sizes[i];
6735
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
6736
char* const req_addr = (char*) align_ptr_up(mapping2, alignment);
6737
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
6738
test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " -> " PTR_FORMAT " %s",
6739
size, alignment, req_addr, p,
6740
((p != NULL ? "" : "(failed)")));
6741
// as the area around req_addr contains already existing mappings, the API should always
6742
// return NULL (as per contract, it cannot return another address)
6743
assert(p == NULL, "must be");
6744
}
6745
}
6746
6747
::munmap(mapping2, mapping_size);
6748
6749
}
6750
6751
static void test_reserve_memory_special_huge_tlbfs() {
6752
if (!UseHugeTLBFS) {
6753
return;
6754
}
6755
6756
test_reserve_memory_special_huge_tlbfs_only();
6757
test_reserve_memory_special_huge_tlbfs_mixed();
6758
}
6759
6760
static void test_reserve_memory_special_shm(size_t size, size_t alignment) {
6761
if (!UseSHM) {
6762
return;
6763
}
6764
6765
test_log("test_reserve_memory_special_shm(" SIZE_FORMAT ", " SIZE_FORMAT ")", size, alignment);
6766
6767
char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
6768
6769
if (addr != NULL) {
6770
assert(is_ptr_aligned(addr, alignment), "Check");
6771
assert(is_ptr_aligned(addr, os::large_page_size()), "Check");
6772
6773
small_page_write(addr, size);
6774
6775
os::Linux::release_memory_special_shm(addr, size);
6776
}
6777
}
6778
6779
static void test_reserve_memory_special_shm() {
6780
size_t lp = os::large_page_size();
6781
size_t ag = os::vm_allocation_granularity();
6782
6783
for (size_t size = ag; size < lp * 3; size += ag) {
6784
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
6785
test_reserve_memory_special_shm(size, alignment);
6786
}
6787
}
6788
}
6789
6790
static void test() {
6791
test_reserve_memory_special_huge_tlbfs();
6792
test_reserve_memory_special_shm();
6793
}
6794
};
6795
6796
void TestReserveMemorySpecial_test() {
6797
TestReserveMemorySpecial::test();
6798
}
6799
6800
#endif
6801
6802