Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/os/linux/os_linux.cpp
64440 views
1
/*
2
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2015, 2022 SAP SE. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
// no precompiled headers
27
#include "jvm.h"
28
#include "classfile/vmSymbols.hpp"
29
#include "code/icBuffer.hpp"
30
#include "code/vtableStubs.hpp"
31
#include "compiler/compileBroker.hpp"
32
#include "compiler/disassembler.hpp"
33
#include "interpreter/interpreter.hpp"
34
#include "jvmtifiles/jvmti.h"
35
#include "logging/log.hpp"
36
#include "logging/logStream.hpp"
37
#include "memory/allocation.inline.hpp"
38
#include "oops/oop.inline.hpp"
39
#include "os_linux.inline.hpp"
40
#include "os_posix.inline.hpp"
41
#include "os_share_linux.hpp"
42
#include "osContainer_linux.hpp"
43
#include "prims/jniFastGetField.hpp"
44
#include "prims/jvm_misc.hpp"
45
#include "runtime/arguments.hpp"
46
#include "runtime/atomic.hpp"
47
#include "runtime/globals.hpp"
48
#include "runtime/globals_extension.hpp"
49
#include "runtime/interfaceSupport.inline.hpp"
50
#include "runtime/init.hpp"
51
#include "runtime/java.hpp"
52
#include "runtime/javaCalls.hpp"
53
#include "runtime/mutexLocker.hpp"
54
#include "runtime/objectMonitor.hpp"
55
#include "runtime/osThread.hpp"
56
#include "runtime/perfMemory.hpp"
57
#include "runtime/sharedRuntime.hpp"
58
#include "runtime/statSampler.hpp"
59
#include "runtime/stubRoutines.hpp"
60
#include "runtime/thread.inline.hpp"
61
#include "runtime/threadCritical.hpp"
62
#include "runtime/threadSMR.hpp"
63
#include "runtime/timer.hpp"
64
#include "runtime/vm_version.hpp"
65
#include "signals_posix.hpp"
66
#include "semaphore_posix.hpp"
67
#include "services/memTracker.hpp"
68
#include "services/runtimeService.hpp"
69
#include "utilities/align.hpp"
70
#include "utilities/decoder.hpp"
71
#include "utilities/defaultStream.hpp"
72
#include "utilities/events.hpp"
73
#include "utilities/elfFile.hpp"
74
#include "utilities/growableArray.hpp"
75
#include "utilities/macros.hpp"
76
#include "utilities/powerOfTwo.hpp"
77
#include "utilities/vmError.hpp"
78
79
// put OS-includes here
80
# include <sys/types.h>
81
# include <sys/mman.h>
82
# include <sys/stat.h>
83
# include <sys/select.h>
84
# include <pthread.h>
85
# include <signal.h>
86
# include <endian.h>
87
# include <errno.h>
88
# include <dlfcn.h>
89
# include <stdio.h>
90
# include <unistd.h>
91
# include <sys/resource.h>
92
# include <pthread.h>
93
# include <sys/stat.h>
94
# include <sys/time.h>
95
# include <sys/times.h>
96
# include <sys/utsname.h>
97
# include <sys/socket.h>
98
# include <pwd.h>
99
# include <poll.h>
100
# include <fcntl.h>
101
# include <string.h>
102
# include <syscall.h>
103
# include <sys/sysinfo.h>
104
# include <sys/ipc.h>
105
# include <sys/shm.h>
106
# include <link.h>
107
# include <stdint.h>
108
# include <inttypes.h>
109
# include <sys/ioctl.h>
110
# include <linux/elf-em.h>
111
#ifdef __GLIBC__
112
# include <malloc.h>
113
#endif
114
115
#ifndef _GNU_SOURCE
116
#define _GNU_SOURCE
117
#include <sched.h>
118
#undef _GNU_SOURCE
119
#else
120
#include <sched.h>
121
#endif
122
123
// if RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
124
// getrusage() is prepared to handle the associated failure.
125
#ifndef RUSAGE_THREAD
126
#define RUSAGE_THREAD (1) /* only the calling thread */
127
#endif
128
129
#define MAX_PATH (2 * K)
130
131
#define MAX_SECS 100000000
132
133
// for timer info max values which include all bits
134
#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
135
136
#ifdef MUSL_LIBC
137
// dlvsym is not a part of POSIX
138
// and musl libc doesn't implement it.
139
static void *dlvsym(void *handle,
140
const char *symbol,
141
const char *version) {
142
// load the latest version of symbol
143
return dlsym(handle, symbol);
144
}
145
#endif
146
147
enum CoredumpFilterBit {
148
FILE_BACKED_PVT_BIT = 1 << 2,
149
FILE_BACKED_SHARED_BIT = 1 << 3,
150
LARGEPAGES_BIT = 1 << 6,
151
DAX_SHARED_BIT = 1 << 8
152
};
153
154
////////////////////////////////////////////////////////////////////////////////
155
// global variables
156
julong os::Linux::_physical_memory = 0;
157
158
address os::Linux::_initial_thread_stack_bottom = NULL;
159
uintptr_t os::Linux::_initial_thread_stack_size = 0;
160
161
int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
162
int (*os::Linux::_pthread_setname_np)(pthread_t, const char*) = NULL;
163
pthread_t os::Linux::_main_thread;
164
int os::Linux::_page_size = -1;
165
bool os::Linux::_supports_fast_thread_cpu_time = false;
166
const char * os::Linux::_libc_version = NULL;
167
const char * os::Linux::_libpthread_version = NULL;
168
size_t os::Linux::_default_large_page_size = 0;
169
170
#ifdef __GLIBC__
171
os::Linux::mallinfo_func_t os::Linux::_mallinfo = NULL;
172
os::Linux::mallinfo2_func_t os::Linux::_mallinfo2 = NULL;
173
#endif // __GLIBC__
174
175
static jlong initial_time_count=0;
176
177
static int clock_tics_per_sec = 100;
178
179
// If the VM might have been created on the primordial thread, we need to resolve the
180
// primordial thread stack bounds and check if the current thread might be the
181
// primordial thread in places. If we know that the primordial thread is never used,
182
// such as when the VM was created by one of the standard java launchers, we can
183
// avoid this
184
static bool suppress_primordial_thread_resolution = false;
185
186
// utility functions
187
188
julong os::available_memory() {
189
return Linux::available_memory();
190
}
191
192
julong os::Linux::available_memory() {
193
// values in struct sysinfo are "unsigned long"
194
struct sysinfo si;
195
julong avail_mem;
196
197
if (OSContainer::is_containerized()) {
198
jlong mem_limit, mem_usage;
199
if ((mem_limit = OSContainer::memory_limit_in_bytes()) < 1) {
200
log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", using host value",
201
mem_limit == OSCONTAINER_ERROR ? "failed" : "unlimited", mem_limit);
202
}
203
if (mem_limit > 0 && (mem_usage = OSContainer::memory_usage_in_bytes()) < 1) {
204
log_debug(os, container)("container memory usage failed: " JLONG_FORMAT ", using host value", mem_usage);
205
}
206
if (mem_limit > 0 && mem_usage > 0 ) {
207
avail_mem = mem_limit > mem_usage ? (julong)mem_limit - (julong)mem_usage : 0;
208
log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem);
209
return avail_mem;
210
}
211
}
212
213
sysinfo(&si);
214
avail_mem = (julong)si.freeram * si.mem_unit;
215
log_trace(os)("available memory: " JULONG_FORMAT, avail_mem);
216
return avail_mem;
217
}
218
219
julong os::physical_memory() {
220
jlong phys_mem = 0;
221
if (OSContainer::is_containerized()) {
222
jlong mem_limit;
223
if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) {
224
log_trace(os)("total container memory: " JLONG_FORMAT, mem_limit);
225
return mem_limit;
226
}
227
log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", using host value",
228
mem_limit == OSCONTAINER_ERROR ? "failed" : "unlimited", mem_limit);
229
}
230
231
phys_mem = Linux::physical_memory();
232
log_trace(os)("total system memory: " JLONG_FORMAT, phys_mem);
233
return phys_mem;
234
}
235
236
static uint64_t initial_total_ticks = 0;
237
static uint64_t initial_steal_ticks = 0;
238
static bool has_initial_tick_info = false;
239
240
static void next_line(FILE *f) {
241
int c;
242
do {
243
c = fgetc(f);
244
} while (c != '\n' && c != EOF);
245
}
246
247
bool os::Linux::get_tick_information(CPUPerfTicks* pticks, int which_logical_cpu) {
248
FILE* fh;
249
uint64_t userTicks, niceTicks, systemTicks, idleTicks;
250
// since at least kernel 2.6 : iowait: time waiting for I/O to complete
251
// irq: time servicing interrupts; softirq: time servicing softirqs
252
uint64_t iowTicks = 0, irqTicks = 0, sirqTicks= 0;
253
// steal (since kernel 2.6.11): time spent in other OS when running in a virtualized environment
254
uint64_t stealTicks = 0;
255
// guest (since kernel 2.6.24): time spent running a virtual CPU for guest OS under the
256
// control of the Linux kernel
257
uint64_t guestNiceTicks = 0;
258
int logical_cpu = -1;
259
const int required_tickinfo_count = (which_logical_cpu == -1) ? 4 : 5;
260
int n;
261
262
memset(pticks, 0, sizeof(CPUPerfTicks));
263
264
if ((fh = fopen("/proc/stat", "r")) == NULL) {
265
return false;
266
}
267
268
if (which_logical_cpu == -1) {
269
n = fscanf(fh, "cpu " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
270
UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
271
UINT64_FORMAT " " UINT64_FORMAT " ",
272
&userTicks, &niceTicks, &systemTicks, &idleTicks,
273
&iowTicks, &irqTicks, &sirqTicks,
274
&stealTicks, &guestNiceTicks);
275
} else {
276
// Move to next line
277
next_line(fh);
278
279
// find the line for requested cpu faster to just iterate linefeeds?
280
for (int i = 0; i < which_logical_cpu; i++) {
281
next_line(fh);
282
}
283
284
n = fscanf(fh, "cpu%u " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
285
UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
286
UINT64_FORMAT " " UINT64_FORMAT " ",
287
&logical_cpu, &userTicks, &niceTicks,
288
&systemTicks, &idleTicks, &iowTicks, &irqTicks, &sirqTicks,
289
&stealTicks, &guestNiceTicks);
290
}
291
292
fclose(fh);
293
if (n < required_tickinfo_count || logical_cpu != which_logical_cpu) {
294
return false;
295
}
296
pticks->used = userTicks + niceTicks;
297
pticks->usedKernel = systemTicks + irqTicks + sirqTicks;
298
pticks->total = userTicks + niceTicks + systemTicks + idleTicks +
299
iowTicks + irqTicks + sirqTicks + stealTicks + guestNiceTicks;
300
301
if (n > required_tickinfo_count + 3) {
302
pticks->steal = stealTicks;
303
pticks->has_steal_ticks = true;
304
} else {
305
pticks->steal = 0;
306
pticks->has_steal_ticks = false;
307
}
308
309
return true;
310
}
311
312
// Return true if user is running as root.
313
314
bool os::have_special_privileges() {
315
static bool init = false;
316
static bool privileges = false;
317
if (!init) {
318
privileges = (getuid() != geteuid()) || (getgid() != getegid());
319
init = true;
320
}
321
return privileges;
322
}
323
324
325
#ifndef SYS_gettid
326
// i386: 224, ia64: 1105, amd64: 186, sparc: 143
327
#ifdef __ia64__
328
#define SYS_gettid 1105
329
#else
330
#ifdef __i386__
331
#define SYS_gettid 224
332
#else
333
#ifdef __amd64__
334
#define SYS_gettid 186
335
#else
336
#ifdef __sparc__
337
#define SYS_gettid 143
338
#else
339
#error define gettid for the arch
340
#endif
341
#endif
342
#endif
343
#endif
344
#endif
345
346
347
// pid_t gettid()
348
//
349
// Returns the kernel thread id of the currently running thread. Kernel
350
// thread id is used to access /proc.
351
pid_t os::Linux::gettid() {
352
int rslt = syscall(SYS_gettid);
353
assert(rslt != -1, "must be."); // old linuxthreads implementation?
354
return (pid_t)rslt;
355
}
356
357
// Most versions of linux have a bug where the number of processors are
358
// determined by looking at the /proc file system. In a chroot environment,
359
// the system call returns 1.
360
static bool unsafe_chroot_detected = false;
361
static const char *unstable_chroot_error = "/proc file system not found.\n"
362
"Java may be unstable running multithreaded in a chroot "
363
"environment on Linux when /proc filesystem is not mounted.";
364
365
void os::Linux::initialize_system_info() {
366
set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
367
if (processor_count() == 1) {
368
pid_t pid = os::Linux::gettid();
369
char fname[32];
370
jio_snprintf(fname, sizeof(fname), "/proc/%d", pid);
371
FILE *fp = fopen(fname, "r");
372
if (fp == NULL) {
373
unsafe_chroot_detected = true;
374
} else {
375
fclose(fp);
376
}
377
}
378
_physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
379
assert(processor_count() > 0, "linux error");
380
}
381
382
void os::init_system_properties_values() {
383
// The next steps are taken in the product version:
384
//
385
// Obtain the JAVA_HOME value from the location of libjvm.so.
386
// This library should be located at:
387
// <JAVA_HOME>/lib/{client|server}/libjvm.so.
388
//
389
// If "/jre/lib/" appears at the right place in the path, then we
390
// assume libjvm.so is installed in a JDK and we use this path.
391
//
392
// Otherwise exit with message: "Could not create the Java virtual machine."
393
//
394
// The following extra steps are taken in the debugging version:
395
//
396
// If "/jre/lib/" does NOT appear at the right place in the path
397
// instead of exit check for $JAVA_HOME environment variable.
398
//
399
// If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
400
// then we append a fake suffix "hotspot/libjvm.so" to this path so
401
// it looks like libjvm.so is installed there
402
// <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
403
//
404
// Otherwise exit.
405
//
406
// Important note: if the location of libjvm.so changes this
407
// code needs to be changed accordingly.
408
409
// See ld(1):
410
// The linker uses the following search paths to locate required
411
// shared libraries:
412
// 1: ...
413
// ...
414
// 7: The default directories, normally /lib and /usr/lib.
415
#ifndef OVERRIDE_LIBPATH
416
#if defined(_LP64)
417
#define DEFAULT_LIBPATH "/usr/lib64:/lib64:/lib:/usr/lib"
418
#else
419
#define DEFAULT_LIBPATH "/lib:/usr/lib"
420
#endif
421
#else
422
#define DEFAULT_LIBPATH OVERRIDE_LIBPATH
423
#endif
424
425
// Base path of extensions installed on the system.
426
#define SYS_EXT_DIR "/usr/java/packages"
427
#define EXTENSIONS_DIR "/lib/ext"
428
429
// Buffer that fits several sprintfs.
430
// Note that the space for the colon and the trailing null are provided
431
// by the nulls included by the sizeof operator.
432
const size_t bufsize =
433
MAX2((size_t)MAXPATHLEN, // For dll_dir & friends.
434
(size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir
435
char *buf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
436
437
// sysclasspath, java_home, dll_dir
438
{
439
char *pslash;
440
os::jvm_path(buf, bufsize);
441
442
// Found the full path to libjvm.so.
443
// Now cut the path to <java_home>/jre if we can.
444
pslash = strrchr(buf, '/');
445
if (pslash != NULL) {
446
*pslash = '\0'; // Get rid of /libjvm.so.
447
}
448
pslash = strrchr(buf, '/');
449
if (pslash != NULL) {
450
*pslash = '\0'; // Get rid of /{client|server|hotspot}.
451
}
452
Arguments::set_dll_dir(buf);
453
454
if (pslash != NULL) {
455
pslash = strrchr(buf, '/');
456
if (pslash != NULL) {
457
*pslash = '\0'; // Get rid of /lib.
458
}
459
}
460
Arguments::set_java_home(buf);
461
if (!set_boot_path('/', ':')) {
462
vm_exit_during_initialization("Failed setting boot class path.", NULL);
463
}
464
}
465
466
// Where to look for native libraries.
467
//
468
// Note: Due to a legacy implementation, most of the library path
469
// is set in the launcher. This was to accomodate linking restrictions
470
// on legacy Linux implementations (which are no longer supported).
471
// Eventually, all the library path setting will be done here.
472
//
473
// However, to prevent the proliferation of improperly built native
474
// libraries, the new path component /usr/java/packages is added here.
475
// Eventually, all the library path setting will be done here.
476
{
477
// Get the user setting of LD_LIBRARY_PATH, and prepended it. It
478
// should always exist (until the legacy problem cited above is
479
// addressed).
480
const char *v = ::getenv("LD_LIBRARY_PATH");
481
const char *v_colon = ":";
482
if (v == NULL) { v = ""; v_colon = ""; }
483
// That's +1 for the colon and +1 for the trailing '\0'.
484
char *ld_library_path = NEW_C_HEAP_ARRAY(char,
485
strlen(v) + 1 +
486
sizeof(SYS_EXT_DIR) + sizeof("/lib/") + sizeof(DEFAULT_LIBPATH) + 1,
487
mtInternal);
488
sprintf(ld_library_path, "%s%s" SYS_EXT_DIR "/lib:" DEFAULT_LIBPATH, v, v_colon);
489
Arguments::set_library_path(ld_library_path);
490
FREE_C_HEAP_ARRAY(char, ld_library_path);
491
}
492
493
// Extensions directories.
494
sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
495
Arguments::set_ext_dirs(buf);
496
497
FREE_C_HEAP_ARRAY(char, buf);
498
499
#undef DEFAULT_LIBPATH
500
#undef SYS_EXT_DIR
501
#undef EXTENSIONS_DIR
502
}
503
504
////////////////////////////////////////////////////////////////////////////////
505
// breakpoint support
506
507
void os::breakpoint() {
508
BREAKPOINT;
509
}
510
511
extern "C" void breakpoint() {
512
// use debugger to set breakpoint here
513
}
514
515
//////////////////////////////////////////////////////////////////////////////
516
// detecting pthread library
517
518
void os::Linux::libpthread_init() {
519
// Save glibc and pthread version strings.
520
#if !defined(_CS_GNU_LIBC_VERSION) || \
521
!defined(_CS_GNU_LIBPTHREAD_VERSION)
522
#error "glibc too old (< 2.3.2)"
523
#endif
524
525
#ifdef MUSL_LIBC
526
// confstr() from musl libc returns EINVAL for
527
// _CS_GNU_LIBC_VERSION and _CS_GNU_LIBPTHREAD_VERSION
528
os::Linux::set_libc_version("musl - unknown");
529
os::Linux::set_libpthread_version("musl - unknown");
530
#else
531
size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0);
532
assert(n > 0, "cannot retrieve glibc version");
533
char *str = (char *)malloc(n, mtInternal);
534
confstr(_CS_GNU_LIBC_VERSION, str, n);
535
os::Linux::set_libc_version(str);
536
537
n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0);
538
assert(n > 0, "cannot retrieve pthread version");
539
str = (char *)malloc(n, mtInternal);
540
confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
541
os::Linux::set_libpthread_version(str);
542
#endif
543
}
544
545
/////////////////////////////////////////////////////////////////////////////
546
// thread stack expansion
547
548
// os::Linux::manually_expand_stack() takes care of expanding the thread
549
// stack. Note that this is normally not needed: pthread stacks allocate
550
// thread stack using mmap() without MAP_NORESERVE, so the stack is already
551
// committed. Therefore it is not necessary to expand the stack manually.
552
//
553
// Manually expanding the stack was historically needed on LinuxThreads
554
// thread stacks, which were allocated with mmap(MAP_GROWSDOWN). Nowadays
555
// it is kept to deal with very rare corner cases:
556
//
557
// For one, user may run the VM on an own implementation of threads
558
// whose stacks are - like the old LinuxThreads - implemented using
559
// mmap(MAP_GROWSDOWN).
560
//
561
// Also, this coding may be needed if the VM is running on the primordial
562
// thread. Normally we avoid running on the primordial thread; however,
563
// user may still invoke the VM on the primordial thread.
564
//
565
// The following historical comment describes the details about running
566
// on a thread stack allocated with mmap(MAP_GROWSDOWN):
567
568
569
// Force Linux kernel to expand current thread stack. If "bottom" is close
570
// to the stack guard, caller should block all signals.
571
//
572
// MAP_GROWSDOWN:
573
// A special mmap() flag that is used to implement thread stacks. It tells
574
// kernel that the memory region should extend downwards when needed. This
575
// allows early versions of LinuxThreads to only mmap the first few pages
576
// when creating a new thread. Linux kernel will automatically expand thread
577
// stack as needed (on page faults).
578
//
579
// However, because the memory region of a MAP_GROWSDOWN stack can grow on
580
// demand, if a page fault happens outside an already mapped MAP_GROWSDOWN
581
// region, it's hard to tell if the fault is due to a legitimate stack
582
// access or because of reading/writing non-exist memory (e.g. buffer
583
// overrun). As a rule, if the fault happens below current stack pointer,
584
// Linux kernel does not expand stack, instead a SIGSEGV is sent to the
585
// application (see Linux kernel fault.c).
586
//
587
// This Linux feature can cause SIGSEGV when VM bangs thread stack for
588
// stack overflow detection.
589
//
590
// Newer version of LinuxThreads (since glibc-2.2, or, RH-7.x) and NPTL do
591
// not use MAP_GROWSDOWN.
592
//
593
// To get around the problem and allow stack banging on Linux, we need to
594
// manually expand thread stack after receiving the SIGSEGV.
595
//
596
// There are two ways to expand thread stack to address "bottom", we used
597
// both of them in JVM before 1.5:
598
// 1. adjust stack pointer first so that it is below "bottom", and then
599
// touch "bottom"
600
// 2. mmap() the page in question
601
//
602
// Now alternate signal stack is gone, it's harder to use 2. For instance,
603
// if current sp is already near the lower end of page 101, and we need to
604
// call mmap() to map page 100, it is possible that part of the mmap() frame
605
// will be placed in page 100. When page 100 is mapped, it is zero-filled.
606
// That will destroy the mmap() frame and cause VM to crash.
607
//
608
// The following code works by adjusting sp first, then accessing the "bottom"
609
// page to force a page fault. Linux kernel will then automatically expand the
610
// stack mapping.
611
//
612
// _expand_stack_to() assumes its frame size is less than page size, which
613
// should always be true if the function is not inlined.
614
615
static void NOINLINE _expand_stack_to(address bottom) {
616
address sp;
617
size_t size;
618
volatile char *p;
619
620
// Adjust bottom to point to the largest address within the same page, it
621
// gives us a one-page buffer if alloca() allocates slightly more memory.
622
bottom = (address)align_down((uintptr_t)bottom, os::Linux::page_size());
623
bottom += os::Linux::page_size() - 1;
624
625
// sp might be slightly above current stack pointer; if that's the case, we
626
// will alloca() a little more space than necessary, which is OK. Don't use
627
// os::current_stack_pointer(), as its result can be slightly below current
628
// stack pointer, causing us to not alloca enough to reach "bottom".
629
sp = (address)&sp;
630
631
if (sp > bottom) {
632
size = sp - bottom;
633
p = (volatile char *)alloca(size);
634
assert(p != NULL && p <= (volatile char *)bottom, "alloca problem?");
635
p[0] = '\0';
636
}
637
}
638
639
void os::Linux::expand_stack_to(address bottom) {
640
_expand_stack_to(bottom);
641
}
642
643
bool os::Linux::manually_expand_stack(JavaThread * t, address addr) {
644
assert(t!=NULL, "just checking");
645
assert(t->osthread()->expanding_stack(), "expand should be set");
646
647
if (t->is_in_usable_stack(addr)) {
648
sigset_t mask_all, old_sigset;
649
sigfillset(&mask_all);
650
pthread_sigmask(SIG_SETMASK, &mask_all, &old_sigset);
651
_expand_stack_to(addr);
652
pthread_sigmask(SIG_SETMASK, &old_sigset, NULL);
653
return true;
654
}
655
return false;
656
}
657
658
//////////////////////////////////////////////////////////////////////////////
659
// create new thread
660
661
// Thread start routine for all newly created threads
662
static void *thread_native_entry(Thread *thread) {
663
664
thread->record_stack_base_and_size();
665
666
#ifndef __GLIBC__
667
// Try to randomize the cache line index of hot stack frames.
668
// This helps when threads of the same stack traces evict each other's
669
// cache lines. The threads can be either from the same JVM instance, or
670
// from different JVM instances. The benefit is especially true for
671
// processors with hyperthreading technology.
672
// This code is not needed anymore in glibc because it has MULTI_PAGE_ALIASING
673
// and we did not see any degradation in performance without `alloca()`.
674
static int counter = 0;
675
int pid = os::current_process_id();
676
int random = ((pid ^ counter++) & 7) * 128;
677
void *stackmem = alloca(random != 0 ? random : 1); // ensure we allocate > 0
678
// Ensure the alloca result is used in a way that prevents the compiler from eliding it.
679
*(char *)stackmem = 1;
680
#endif
681
682
thread->initialize_thread_current();
683
684
OSThread* osthread = thread->osthread();
685
Monitor* sync = osthread->startThread_lock();
686
687
osthread->set_thread_id(os::current_thread_id());
688
689
if (UseNUMA) {
690
int lgrp_id = os::numa_get_group_id();
691
if (lgrp_id != -1) {
692
thread->set_lgrp_id(lgrp_id);
693
}
694
}
695
// initialize signal mask for this thread
696
PosixSignals::hotspot_sigmask(thread);
697
698
// initialize floating point control register
699
os::Linux::init_thread_fpu_state();
700
701
// handshaking with parent thread
702
{
703
MutexLocker ml(sync, Mutex::_no_safepoint_check_flag);
704
705
// notify parent thread
706
osthread->set_state(INITIALIZED);
707
sync->notify_all();
708
709
// wait until os::start_thread()
710
while (osthread->get_state() == INITIALIZED) {
711
sync->wait_without_safepoint_check();
712
}
713
}
714
715
log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
716
os::current_thread_id(), (uintx) pthread_self());
717
718
assert(osthread->pthread_id() != 0, "pthread_id was not set as expected");
719
720
// call one more level start routine
721
thread->call_run();
722
723
// Note: at this point the thread object may already have deleted itself.
724
// Prevent dereferencing it from here on out.
725
thread = NULL;
726
727
log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
728
os::current_thread_id(), (uintx) pthread_self());
729
730
return 0;
731
}
732
733
// On Linux, glibc places static TLS blocks (for __thread variables) on
734
// the thread stack. This decreases the stack size actually available
735
// to threads.
736
//
737
// For large static TLS sizes, this may cause threads to malfunction due
738
// to insufficient stack space. This is a well-known issue in glibc:
739
// http://sourceware.org/bugzilla/show_bug.cgi?id=11787.
740
//
741
// As a workaround, we call a private but assumed-stable glibc function,
742
// __pthread_get_minstack() to obtain the minstack size and derive the
743
// static TLS size from it. We then increase the user requested stack
744
// size by this TLS size.
745
//
746
// Due to compatibility concerns, this size adjustment is opt-in and
747
// controlled via AdjustStackSizeForTLS.
748
typedef size_t (*GetMinStack)(const pthread_attr_t *attr);
749
750
GetMinStack _get_minstack_func = NULL;
751
752
static void get_minstack_init() {
753
_get_minstack_func =
754
(GetMinStack)dlsym(RTLD_DEFAULT, "__pthread_get_minstack");
755
log_info(os, thread)("Lookup of __pthread_get_minstack %s",
756
_get_minstack_func == NULL ? "failed" : "succeeded");
757
}
758
759
// Returns the size of the static TLS area glibc puts on thread stacks.
760
// The value is cached on first use, which occurs when the first thread
761
// is created during VM initialization.
762
static size_t get_static_tls_area_size(const pthread_attr_t *attr) {
763
size_t tls_size = 0;
764
if (_get_minstack_func != NULL) {
765
// Obtain the pthread minstack size by calling __pthread_get_minstack.
766
size_t minstack_size = _get_minstack_func(attr);
767
768
// Remove non-TLS area size included in minstack size returned
769
// by __pthread_get_minstack() to get the static TLS size.
770
// In glibc before 2.27, minstack size includes guard_size.
771
// In glibc 2.27 and later, guard_size is automatically added
772
// to the stack size by pthread_create and is no longer included
773
// in minstack size. In both cases, the guard_size is taken into
774
// account, so there is no need to adjust the result for that.
775
//
776
// Although __pthread_get_minstack() is a private glibc function,
777
// it is expected to have a stable behavior across future glibc
778
// versions while glibc still allocates the static TLS blocks off
779
// the stack. Following is glibc 2.28 __pthread_get_minstack():
780
//
781
// size_t
782
// __pthread_get_minstack (const pthread_attr_t *attr)
783
// {
784
// return GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN;
785
// }
786
//
787
//
788
// The following 'minstack_size > os::vm_page_size() + PTHREAD_STACK_MIN'
789
// if check is done for precaution.
790
if (minstack_size > (size_t)os::vm_page_size() + PTHREAD_STACK_MIN) {
791
tls_size = minstack_size - os::vm_page_size() - PTHREAD_STACK_MIN;
792
}
793
}
794
795
log_info(os, thread)("Stack size adjustment for TLS is " SIZE_FORMAT,
796
tls_size);
797
return tls_size;
798
}
799
800
bool os::create_thread(Thread* thread, ThreadType thr_type,
801
size_t req_stack_size) {
802
assert(thread->osthread() == NULL, "caller responsible");
803
804
// Allocate the OSThread object
805
OSThread* osthread = new OSThread(NULL, NULL);
806
if (osthread == NULL) {
807
return false;
808
}
809
810
// set the correct thread state
811
osthread->set_thread_type(thr_type);
812
813
// Initial state is ALLOCATED but not INITIALIZED
814
osthread->set_state(ALLOCATED);
815
816
thread->set_osthread(osthread);
817
818
// init thread attributes
819
pthread_attr_t attr;
820
pthread_attr_init(&attr);
821
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
822
823
// Calculate stack size if it's not specified by caller.
824
size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
825
// In glibc versions prior to 2.7 the guard size mechanism
826
// is not implemented properly. The posix standard requires adding
827
// the size of the guard pages to the stack size, instead Linux
828
// takes the space out of 'stacksize'. Thus we adapt the requested
829
// stack_size by the size of the guard pages to mimick proper
830
// behaviour. However, be careful not to end up with a size
831
// of zero due to overflow. Don't add the guard page in that case.
832
size_t guard_size = os::Linux::default_guard_size(thr_type);
833
// Configure glibc guard page. Must happen before calling
834
// get_static_tls_area_size(), which uses the guard_size.
835
pthread_attr_setguardsize(&attr, guard_size);
836
837
size_t stack_adjust_size = 0;
838
if (AdjustStackSizeForTLS) {
839
// Adjust the stack_size for on-stack TLS - see get_static_tls_area_size().
840
stack_adjust_size += get_static_tls_area_size(&attr);
841
} else {
842
stack_adjust_size += guard_size;
843
}
844
845
stack_adjust_size = align_up(stack_adjust_size, os::vm_page_size());
846
if (stack_size <= SIZE_MAX - stack_adjust_size) {
847
stack_size += stack_adjust_size;
848
}
849
assert(is_aligned(stack_size, os::vm_page_size()), "stack_size not aligned");
850
851
int status = pthread_attr_setstacksize(&attr, stack_size);
852
if (status != 0) {
853
// pthread_attr_setstacksize() function can fail
854
// if the stack size exceeds a system-imposed limit.
855
assert_status(status == EINVAL, status, "pthread_attr_setstacksize");
856
log_warning(os, thread)("The %sthread stack size specified is invalid: " SIZE_FORMAT "k",
857
(thr_type == compiler_thread) ? "compiler " : ((thr_type == java_thread) ? "" : "VM "),
858
stack_size / K);
859
thread->set_osthread(NULL);
860
delete osthread;
861
return false;
862
}
863
864
ThreadState state;
865
866
{
867
ResourceMark rm;
868
pthread_t tid;
869
int ret = 0;
870
int limit = 3;
871
do {
872
ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
873
} while (ret == EAGAIN && limit-- > 0);
874
875
char buf[64];
876
if (ret == 0) {
877
log_info(os, thread)("Thread \"%s\" started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
878
thread->name(), (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
879
} else {
880
log_warning(os, thread)("Failed to start thread \"%s\" - pthread_create failed (%s) for attributes: %s.",
881
thread->name(), os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
882
// Log some OS information which might explain why creating the thread failed.
883
log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
884
LogStream st(Log(os, thread)::info());
885
os::Posix::print_rlimit_info(&st);
886
os::print_memory_info(&st);
887
os::Linux::print_proc_sys_info(&st);
888
os::Linux::print_container_info(&st);
889
}
890
891
pthread_attr_destroy(&attr);
892
893
if (ret != 0) {
894
// Need to clean up stuff we've allocated so far
895
thread->set_osthread(NULL);
896
delete osthread;
897
return false;
898
}
899
900
// Store pthread info into the OSThread
901
osthread->set_pthread_id(tid);
902
903
// Wait until child thread is either initialized or aborted
904
{
905
Monitor* sync_with_child = osthread->startThread_lock();
906
MutexLocker ml(sync_with_child, Mutex::_no_safepoint_check_flag);
907
while ((state = osthread->get_state()) == ALLOCATED) {
908
sync_with_child->wait_without_safepoint_check();
909
}
910
}
911
}
912
913
// The thread is returned suspended (in state INITIALIZED),
914
// and is started higher up in the call chain
915
assert(state == INITIALIZED, "race condition");
916
return true;
917
}
918
919
/////////////////////////////////////////////////////////////////////////////
920
// attach existing thread
921
922
// bootstrap the main thread
923
bool os::create_main_thread(JavaThread* thread) {
924
assert(os::Linux::_main_thread == pthread_self(), "should be called inside main thread");
925
return create_attached_thread(thread);
926
}
927
928
bool os::create_attached_thread(JavaThread* thread) {
929
#ifdef ASSERT
930
thread->verify_not_published();
931
#endif
932
933
// Allocate the OSThread object
934
OSThread* osthread = new OSThread(NULL, NULL);
935
936
if (osthread == NULL) {
937
return false;
938
}
939
940
// Store pthread info into the OSThread
941
osthread->set_thread_id(os::Linux::gettid());
942
osthread->set_pthread_id(::pthread_self());
943
944
// initialize floating point control register
945
os::Linux::init_thread_fpu_state();
946
947
// Initial thread state is RUNNABLE
948
osthread->set_state(RUNNABLE);
949
950
thread->set_osthread(osthread);
951
952
if (UseNUMA) {
953
int lgrp_id = os::numa_get_group_id();
954
if (lgrp_id != -1) {
955
thread->set_lgrp_id(lgrp_id);
956
}
957
}
958
959
if (os::is_primordial_thread()) {
960
// If current thread is primordial thread, its stack is mapped on demand,
961
// see notes about MAP_GROWSDOWN. Here we try to force kernel to map
962
// the entire stack region to avoid SEGV in stack banging.
963
// It is also useful to get around the heap-stack-gap problem on SuSE
964
// kernel (see 4821821 for details). We first expand stack to the top
965
// of yellow zone, then enable stack yellow zone (order is significant,
966
// enabling yellow zone first will crash JVM on SuSE Linux), so there
967
// is no gap between the last two virtual memory regions.
968
969
StackOverflow* overflow_state = thread->stack_overflow_state();
970
address addr = overflow_state->stack_reserved_zone_base();
971
assert(addr != NULL, "initialization problem?");
972
assert(overflow_state->stack_available(addr) > 0, "stack guard should not be enabled");
973
974
osthread->set_expanding_stack();
975
os::Linux::manually_expand_stack(thread, addr);
976
osthread->clear_expanding_stack();
977
}
978
979
// initialize signal mask for this thread
980
// and save the caller's signal mask
981
PosixSignals::hotspot_sigmask(thread);
982
983
log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
984
os::current_thread_id(), (uintx) pthread_self());
985
986
return true;
987
}
988
989
void os::pd_start_thread(Thread* thread) {
990
OSThread * osthread = thread->osthread();
991
assert(osthread->get_state() != INITIALIZED, "just checking");
992
Monitor* sync_with_child = osthread->startThread_lock();
993
MutexLocker ml(sync_with_child, Mutex::_no_safepoint_check_flag);
994
sync_with_child->notify();
995
}
996
997
// Free Linux resources related to the OSThread
998
void os::free_thread(OSThread* osthread) {
999
assert(osthread != NULL, "osthread not set");
1000
1001
// We are told to free resources of the argument thread,
1002
// but we can only really operate on the current thread.
1003
assert(Thread::current()->osthread() == osthread,
1004
"os::free_thread but not current thread");
1005
1006
#ifdef ASSERT
1007
sigset_t current;
1008
sigemptyset(&current);
1009
pthread_sigmask(SIG_SETMASK, NULL, &current);
1010
assert(!sigismember(&current, PosixSignals::SR_signum), "SR signal should not be blocked!");
1011
#endif
1012
1013
// Restore caller's signal mask
1014
sigset_t sigmask = osthread->caller_sigmask();
1015
pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1016
1017
delete osthread;
1018
}
1019
1020
//////////////////////////////////////////////////////////////////////////////
1021
// primordial thread
1022
1023
// Check if current thread is the primordial thread, similar to Solaris thr_main.
1024
bool os::is_primordial_thread(void) {
1025
if (suppress_primordial_thread_resolution) {
1026
return false;
1027
}
1028
char dummy;
1029
// If called before init complete, thread stack bottom will be null.
1030
// Can be called if fatal error occurs before initialization.
1031
if (os::Linux::initial_thread_stack_bottom() == NULL) return false;
1032
assert(os::Linux::initial_thread_stack_bottom() != NULL &&
1033
os::Linux::initial_thread_stack_size() != 0,
1034
"os::init did not locate primordial thread's stack region");
1035
if ((address)&dummy >= os::Linux::initial_thread_stack_bottom() &&
1036
(address)&dummy < os::Linux::initial_thread_stack_bottom() +
1037
os::Linux::initial_thread_stack_size()) {
1038
return true;
1039
} else {
1040
return false;
1041
}
1042
}
1043
1044
// Find the virtual memory area that contains addr
1045
static bool find_vma(address addr, address* vma_low, address* vma_high) {
1046
FILE *fp = fopen("/proc/self/maps", "r");
1047
if (fp) {
1048
address low, high;
1049
while (!feof(fp)) {
1050
if (fscanf(fp, "%p-%p", &low, &high) == 2) {
1051
if (low <= addr && addr < high) {
1052
if (vma_low) *vma_low = low;
1053
if (vma_high) *vma_high = high;
1054
fclose(fp);
1055
return true;
1056
}
1057
}
1058
for (;;) {
1059
int ch = fgetc(fp);
1060
if (ch == EOF || ch == (int)'\n') break;
1061
}
1062
}
1063
fclose(fp);
1064
}
1065
return false;
1066
}
1067
1068
// Locate primordial thread stack. This special handling of primordial thread stack
1069
// is needed because pthread_getattr_np() on most (all?) Linux distros returns
1070
// bogus value for the primordial process thread. While the launcher has created
1071
// the VM in a new thread since JDK 6, we still have to allow for the use of the
1072
// JNI invocation API from a primordial thread.
1073
void os::Linux::capture_initial_stack(size_t max_size) {
1074
1075
// max_size is either 0 (which means accept OS default for thread stacks) or
1076
// a user-specified value known to be at least the minimum needed. If we
1077
// are actually on the primordial thread we can make it appear that we have a
1078
// smaller max_size stack by inserting the guard pages at that location. But we
1079
// cannot do anything to emulate a larger stack than what has been provided by
1080
// the OS or threading library. In fact if we try to use a stack greater than
1081
// what is set by rlimit then we will crash the hosting process.
1082
1083
// Maximum stack size is the easy part, get it from RLIMIT_STACK.
1084
// If this is "unlimited" then it will be a huge value.
1085
struct rlimit rlim;
1086
getrlimit(RLIMIT_STACK, &rlim);
1087
size_t stack_size = rlim.rlim_cur;
1088
1089
// 6308388: a bug in ld.so will relocate its own .data section to the
1090
// lower end of primordial stack; reduce ulimit -s value a little bit
1091
// so we won't install guard page on ld.so's data section.
1092
// But ensure we don't underflow the stack size - allow 1 page spare
1093
if (stack_size >= (size_t)(3 * page_size())) {
1094
stack_size -= 2 * page_size();
1095
}
1096
1097
// Try to figure out where the stack base (top) is. This is harder.
1098
//
1099
// When an application is started, glibc saves the initial stack pointer in
1100
// a global variable "__libc_stack_end", which is then used by system
1101
// libraries. __libc_stack_end should be pretty close to stack top. The
1102
// variable is available since the very early days. However, because it is
1103
// a private interface, it could disappear in the future.
1104
//
1105
// Linux kernel saves start_stack information in /proc/<pid>/stat. Similar
1106
// to __libc_stack_end, it is very close to stack top, but isn't the real
1107
// stack top. Note that /proc may not exist if VM is running as a chroot
1108
// program, so reading /proc/<pid>/stat could fail. Also the contents of
1109
// /proc/<pid>/stat could change in the future (though unlikely).
1110
//
1111
// We try __libc_stack_end first. If that doesn't work, look for
1112
// /proc/<pid>/stat. If neither of them works, we use current stack pointer
1113
// as a hint, which should work well in most cases.
1114
1115
uintptr_t stack_start;
1116
1117
// try __libc_stack_end first
1118
uintptr_t *p = (uintptr_t *)dlsym(RTLD_DEFAULT, "__libc_stack_end");
1119
if (p && *p) {
1120
stack_start = *p;
1121
} else {
1122
// see if we can get the start_stack field from /proc/self/stat
1123
FILE *fp;
1124
int pid;
1125
char state;
1126
int ppid;
1127
int pgrp;
1128
int session;
1129
int nr;
1130
int tpgrp;
1131
unsigned long flags;
1132
unsigned long minflt;
1133
unsigned long cminflt;
1134
unsigned long majflt;
1135
unsigned long cmajflt;
1136
unsigned long utime;
1137
unsigned long stime;
1138
long cutime;
1139
long cstime;
1140
long prio;
1141
long nice;
1142
long junk;
1143
long it_real;
1144
uintptr_t start;
1145
uintptr_t vsize;
1146
intptr_t rss;
1147
uintptr_t rsslim;
1148
uintptr_t scodes;
1149
uintptr_t ecode;
1150
int i;
1151
1152
// Figure what the primordial thread stack base is. Code is inspired
1153
// by email from Hans Boehm. /proc/self/stat begins with current pid,
1154
// followed by command name surrounded by parentheses, state, etc.
1155
char stat[2048];
1156
int statlen;
1157
1158
fp = fopen("/proc/self/stat", "r");
1159
if (fp) {
1160
statlen = fread(stat, 1, 2047, fp);
1161
stat[statlen] = '\0';
1162
fclose(fp);
1163
1164
// Skip pid and the command string. Note that we could be dealing with
1165
// weird command names, e.g. user could decide to rename java launcher
1166
// to "java 1.4.2 :)", then the stat file would look like
1167
// 1234 (java 1.4.2 :)) R ... ...
1168
// We don't really need to know the command string, just find the last
1169
// occurrence of ")" and then start parsing from there. See bug 4726580.
1170
char * s = strrchr(stat, ')');
1171
1172
i = 0;
1173
if (s) {
1174
// Skip blank chars
1175
do { s++; } while (s && isspace(*s));
1176
1177
#define _UFM UINTX_FORMAT
1178
#define _DFM INTX_FORMAT
1179
1180
// 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2
1181
// 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8
1182
i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld " _UFM _UFM _DFM _UFM _UFM _UFM _UFM,
1183
&state, // 3 %c
1184
&ppid, // 4 %d
1185
&pgrp, // 5 %d
1186
&session, // 6 %d
1187
&nr, // 7 %d
1188
&tpgrp, // 8 %d
1189
&flags, // 9 %lu
1190
&minflt, // 10 %lu
1191
&cminflt, // 11 %lu
1192
&majflt, // 12 %lu
1193
&cmajflt, // 13 %lu
1194
&utime, // 14 %lu
1195
&stime, // 15 %lu
1196
&cutime, // 16 %ld
1197
&cstime, // 17 %ld
1198
&prio, // 18 %ld
1199
&nice, // 19 %ld
1200
&junk, // 20 %ld
1201
&it_real, // 21 %ld
1202
&start, // 22 UINTX_FORMAT
1203
&vsize, // 23 UINTX_FORMAT
1204
&rss, // 24 INTX_FORMAT
1205
&rsslim, // 25 UINTX_FORMAT
1206
&scodes, // 26 UINTX_FORMAT
1207
&ecode, // 27 UINTX_FORMAT
1208
&stack_start); // 28 UINTX_FORMAT
1209
}
1210
1211
#undef _UFM
1212
#undef _DFM
1213
1214
if (i != 28 - 2) {
1215
assert(false, "Bad conversion from /proc/self/stat");
1216
// product mode - assume we are the primordial thread, good luck in the
1217
// embedded case.
1218
warning("Can't detect primordial thread stack location - bad conversion");
1219
stack_start = (uintptr_t) &rlim;
1220
}
1221
} else {
1222
// For some reason we can't open /proc/self/stat (for example, running on
1223
// FreeBSD with a Linux emulator, or inside chroot), this should work for
1224
// most cases, so don't abort:
1225
warning("Can't detect primordial thread stack location - no /proc/self/stat");
1226
stack_start = (uintptr_t) &rlim;
1227
}
1228
}
1229
1230
// Now we have a pointer (stack_start) very close to the stack top, the
1231
// next thing to do is to figure out the exact location of stack top. We
1232
// can find out the virtual memory area that contains stack_start by
1233
// reading /proc/self/maps, it should be the last vma in /proc/self/maps,
1234
// and its upper limit is the real stack top. (again, this would fail if
1235
// running inside chroot, because /proc may not exist.)
1236
1237
uintptr_t stack_top;
1238
address low, high;
1239
if (find_vma((address)stack_start, &low, &high)) {
1240
// success, "high" is the true stack top. (ignore "low", because initial
1241
// thread stack grows on demand, its real bottom is high - RLIMIT_STACK.)
1242
stack_top = (uintptr_t)high;
1243
} else {
1244
// failed, likely because /proc/self/maps does not exist
1245
warning("Can't detect primordial thread stack location - find_vma failed");
1246
// best effort: stack_start is normally within a few pages below the real
1247
// stack top, use it as stack top, and reduce stack size so we won't put
1248
// guard page outside stack.
1249
stack_top = stack_start;
1250
stack_size -= 16 * page_size();
1251
}
1252
1253
// stack_top could be partially down the page so align it
1254
stack_top = align_up(stack_top, page_size());
1255
1256
// Allowed stack value is minimum of max_size and what we derived from rlimit
1257
if (max_size > 0) {
1258
_initial_thread_stack_size = MIN2(max_size, stack_size);
1259
} else {
1260
// Accept the rlimit max, but if stack is unlimited then it will be huge, so
1261
// clamp it at 8MB as we do on Solaris
1262
_initial_thread_stack_size = MIN2(stack_size, 8*M);
1263
}
1264
_initial_thread_stack_size = align_down(_initial_thread_stack_size, page_size());
1265
_initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size;
1266
1267
assert(_initial_thread_stack_bottom < (address)stack_top, "overflow!");
1268
1269
if (log_is_enabled(Info, os, thread)) {
1270
// See if we seem to be on primordial process thread
1271
bool primordial = uintptr_t(&rlim) > uintptr_t(_initial_thread_stack_bottom) &&
1272
uintptr_t(&rlim) < stack_top;
1273
1274
log_info(os, thread)("Capturing initial stack in %s thread: req. size: " SIZE_FORMAT "K, actual size: "
1275
SIZE_FORMAT "K, top=" INTPTR_FORMAT ", bottom=" INTPTR_FORMAT,
1276
primordial ? "primordial" : "user", max_size / K, _initial_thread_stack_size / K,
1277
stack_top, intptr_t(_initial_thread_stack_bottom));
1278
}
1279
}
1280
1281
////////////////////////////////////////////////////////////////////////////////
1282
// time support
1283
1284
// Time since start-up in seconds to a fine granularity.
1285
double os::elapsedTime() {
1286
return ((double)os::elapsed_counter()) / os::elapsed_frequency(); // nanosecond resolution
1287
}
1288
1289
jlong os::elapsed_counter() {
1290
return javaTimeNanos() - initial_time_count;
1291
}
1292
1293
jlong os::elapsed_frequency() {
1294
return NANOSECS_PER_SEC; // nanosecond resolution
1295
}
1296
1297
bool os::supports_vtime() { return true; }
1298
1299
double os::elapsedVTime() {
1300
struct rusage usage;
1301
int retval = getrusage(RUSAGE_THREAD, &usage);
1302
if (retval == 0) {
1303
return (double) (usage.ru_utime.tv_sec + usage.ru_stime.tv_sec) + (double) (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000 * 1000);
1304
} else {
1305
// better than nothing, but not much
1306
return elapsedTime();
1307
}
1308
}
1309
1310
void os::Linux::fast_thread_clock_init() {
1311
if (!UseLinuxPosixThreadCPUClocks) {
1312
return;
1313
}
1314
clockid_t clockid;
1315
struct timespec tp;
1316
int (*pthread_getcpuclockid_func)(pthread_t, clockid_t *) =
1317
(int(*)(pthread_t, clockid_t *)) dlsym(RTLD_DEFAULT, "pthread_getcpuclockid");
1318
1319
// Switch to using fast clocks for thread cpu time if
1320
// the clock_getres() returns 0 error code.
1321
// Note, that some kernels may support the current thread
1322
// clock (CLOCK_THREAD_CPUTIME_ID) but not the clocks
1323
// returned by the pthread_getcpuclockid().
1324
// If the fast Posix clocks are supported then the clock_getres()
1325
// must return at least tp.tv_sec == 0 which means a resolution
1326
// better than 1 sec. This is extra check for reliability.
1327
1328
if (pthread_getcpuclockid_func &&
1329
pthread_getcpuclockid_func(_main_thread, &clockid) == 0 &&
1330
clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) {
1331
_supports_fast_thread_cpu_time = true;
1332
_pthread_getcpuclockid = pthread_getcpuclockid_func;
1333
}
1334
}
1335
1336
// Return the real, user, and system times in seconds from an
1337
// arbitrary fixed point in the past.
1338
bool os::getTimesSecs(double* process_real_time,
1339
double* process_user_time,
1340
double* process_system_time) {
1341
struct tms ticks;
1342
clock_t real_ticks = times(&ticks);
1343
1344
if (real_ticks == (clock_t) (-1)) {
1345
return false;
1346
} else {
1347
double ticks_per_second = (double) clock_tics_per_sec;
1348
*process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1349
*process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1350
*process_real_time = ((double) real_ticks) / ticks_per_second;
1351
1352
return true;
1353
}
1354
}
1355
1356
1357
char * os::local_time_string(char *buf, size_t buflen) {
1358
struct tm t;
1359
time_t long_time;
1360
time(&long_time);
1361
localtime_r(&long_time, &t);
1362
jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1363
t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1364
t.tm_hour, t.tm_min, t.tm_sec);
1365
return buf;
1366
}
1367
1368
struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1369
return localtime_r(clock, res);
1370
}
1371
1372
// thread_id is kernel thread id (similar to Solaris LWP id)
1373
intx os::current_thread_id() { return os::Linux::gettid(); }
1374
int os::current_process_id() {
1375
return ::getpid();
1376
}
1377
1378
// DLL functions
1379
1380
const char* os::dll_file_extension() { return ".so"; }
1381
1382
// This must be hard coded because it's the system's temporary
1383
// directory not the java application's temp directory, ala java.io.tmpdir.
1384
const char* os::get_temp_directory() { return "/tmp"; }
1385
1386
static bool file_exists(const char* filename) {
1387
struct stat statbuf;
1388
if (filename == NULL || strlen(filename) == 0) {
1389
return false;
1390
}
1391
return os::stat(filename, &statbuf) == 0;
1392
}
1393
1394
// check if addr is inside libjvm.so
1395
bool os::address_is_in_vm(address addr) {
1396
static address libjvm_base_addr;
1397
Dl_info dlinfo;
1398
1399
if (libjvm_base_addr == NULL) {
1400
if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1401
libjvm_base_addr = (address)dlinfo.dli_fbase;
1402
}
1403
assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1404
}
1405
1406
if (dladdr((void *)addr, &dlinfo) != 0) {
1407
if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1408
}
1409
1410
return false;
1411
}
1412
1413
bool os::dll_address_to_function_name(address addr, char *buf,
1414
int buflen, int *offset,
1415
bool demangle) {
1416
// buf is not optional, but offset is optional
1417
assert(buf != NULL, "sanity check");
1418
1419
Dl_info dlinfo;
1420
1421
if (dladdr((void*)addr, &dlinfo) != 0) {
1422
// see if we have a matching symbol
1423
if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1424
if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
1425
jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1426
}
1427
if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1428
return true;
1429
}
1430
// no matching symbol so try for just file info
1431
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1432
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1433
buf, buflen, offset, dlinfo.dli_fname, demangle)) {
1434
return true;
1435
}
1436
}
1437
}
1438
1439
buf[0] = '\0';
1440
if (offset != NULL) *offset = -1;
1441
return false;
1442
}
1443
1444
struct _address_to_library_name {
1445
address addr; // input : memory address
1446
size_t buflen; // size of fname
1447
char* fname; // output: library name
1448
address base; // library base addr
1449
};
1450
1451
static int address_to_library_name_callback(struct dl_phdr_info *info,
1452
size_t size, void *data) {
1453
int i;
1454
bool found = false;
1455
address libbase = NULL;
1456
struct _address_to_library_name * d = (struct _address_to_library_name *)data;
1457
1458
// iterate through all loadable segments
1459
for (i = 0; i < info->dlpi_phnum; i++) {
1460
address segbase = (address)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
1461
if (info->dlpi_phdr[i].p_type == PT_LOAD) {
1462
// base address of a library is the lowest address of its loaded
1463
// segments.
1464
if (libbase == NULL || libbase > segbase) {
1465
libbase = segbase;
1466
}
1467
// see if 'addr' is within current segment
1468
if (segbase <= d->addr &&
1469
d->addr < segbase + info->dlpi_phdr[i].p_memsz) {
1470
found = true;
1471
}
1472
}
1473
}
1474
1475
// dlpi_name is NULL or empty if the ELF file is executable, return 0
1476
// so dll_address_to_library_name() can fall through to use dladdr() which
1477
// can figure out executable name from argv[0].
1478
if (found && info->dlpi_name && info->dlpi_name[0]) {
1479
d->base = libbase;
1480
if (d->fname) {
1481
jio_snprintf(d->fname, d->buflen, "%s", info->dlpi_name);
1482
}
1483
return 1;
1484
}
1485
return 0;
1486
}
1487
1488
bool os::dll_address_to_library_name(address addr, char* buf,
1489
int buflen, int* offset) {
1490
// buf is not optional, but offset is optional
1491
assert(buf != NULL, "sanity check");
1492
1493
Dl_info dlinfo;
1494
struct _address_to_library_name data;
1495
1496
// There is a bug in old glibc dladdr() implementation that it could resolve
1497
// to wrong library name if the .so file has a base address != NULL. Here
1498
// we iterate through the program headers of all loaded libraries to find
1499
// out which library 'addr' really belongs to. This workaround can be
1500
// removed once the minimum requirement for glibc is moved to 2.3.x.
1501
data.addr = addr;
1502
data.fname = buf;
1503
data.buflen = buflen;
1504
data.base = NULL;
1505
int rslt = dl_iterate_phdr(address_to_library_name_callback, (void *)&data);
1506
1507
if (rslt) {
1508
// buf already contains library name
1509
if (offset) *offset = addr - data.base;
1510
return true;
1511
}
1512
if (dladdr((void*)addr, &dlinfo) != 0) {
1513
if (dlinfo.dli_fname != NULL) {
1514
jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1515
}
1516
if (dlinfo.dli_fbase != NULL && offset != NULL) {
1517
*offset = addr - (address)dlinfo.dli_fbase;
1518
}
1519
return true;
1520
}
1521
1522
buf[0] = '\0';
1523
if (offset) *offset = -1;
1524
return false;
1525
}
1526
1527
// Loads .dll/.so and
1528
// in case of error it checks if .dll/.so was built for the
1529
// same architecture as Hotspot is running on
1530
1531
1532
// Remember the stack's state. The Linux dynamic linker will change
1533
// the stack to 'executable' at most once, so we must safepoint only once.
1534
bool os::Linux::_stack_is_executable = false;
1535
1536
// VM operation that loads a library. This is necessary if stack protection
1537
// of the Java stacks can be lost during loading the library. If we
1538
// do not stop the Java threads, they can stack overflow before the stacks
1539
// are protected again.
1540
class VM_LinuxDllLoad: public VM_Operation {
1541
private:
1542
const char *_filename;
1543
char *_ebuf;
1544
int _ebuflen;
1545
void *_lib;
1546
public:
1547
VM_LinuxDllLoad(const char *fn, char *ebuf, int ebuflen) :
1548
_filename(fn), _ebuf(ebuf), _ebuflen(ebuflen), _lib(NULL) {}
1549
VMOp_Type type() const { return VMOp_LinuxDllLoad; }
1550
void doit() {
1551
_lib = os::Linux::dll_load_in_vmthread(_filename, _ebuf, _ebuflen);
1552
os::Linux::_stack_is_executable = true;
1553
}
1554
void* loaded_library() { return _lib; }
1555
};
1556
1557
void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1558
void * result = NULL;
1559
bool load_attempted = false;
1560
1561
log_info(os)("attempting shared library load of %s", filename);
1562
1563
// Check whether the library to load might change execution rights
1564
// of the stack. If they are changed, the protection of the stack
1565
// guard pages will be lost. We need a safepoint to fix this.
1566
//
1567
// See Linux man page execstack(8) for more info.
1568
if (os::uses_stack_guard_pages() && !os::Linux::_stack_is_executable) {
1569
if (!ElfFile::specifies_noexecstack(filename)) {
1570
if (!is_init_completed()) {
1571
os::Linux::_stack_is_executable = true;
1572
// This is OK - No Java threads have been created yet, and hence no
1573
// stack guard pages to fix.
1574
//
1575
// Dynamic loader will make all stacks executable after
1576
// this function returns, and will not do that again.
1577
assert(Threads::number_of_threads() == 0, "no Java threads should exist yet.");
1578
} else {
1579
warning("You have loaded library %s which might have disabled stack guard. "
1580
"The VM will try to fix the stack guard now.\n"
1581
"It's highly recommended that you fix the library with "
1582
"'execstack -c <libfile>', or link it with '-z noexecstack'.",
1583
filename);
1584
1585
JavaThread *jt = JavaThread::current();
1586
if (jt->thread_state() != _thread_in_native) {
1587
// This happens when a compiler thread tries to load a hsdis-<arch>.so file
1588
// that requires ExecStack. Cannot enter safe point. Let's give up.
1589
warning("Unable to fix stack guard. Giving up.");
1590
} else {
1591
if (!LoadExecStackDllInVMThread) {
1592
// This is for the case where the DLL has an static
1593
// constructor function that executes JNI code. We cannot
1594
// load such DLLs in the VMThread.
1595
result = os::Linux::dlopen_helper(filename, ebuf, ebuflen);
1596
}
1597
1598
ThreadInVMfromNative tiv(jt);
1599
debug_only(VMNativeEntryWrapper vew;)
1600
1601
VM_LinuxDllLoad op(filename, ebuf, ebuflen);
1602
VMThread::execute(&op);
1603
if (LoadExecStackDllInVMThread) {
1604
result = op.loaded_library();
1605
}
1606
load_attempted = true;
1607
}
1608
}
1609
}
1610
}
1611
1612
if (!load_attempted) {
1613
result = os::Linux::dlopen_helper(filename, ebuf, ebuflen);
1614
}
1615
1616
if (result != NULL) {
1617
// Successful loading
1618
return result;
1619
}
1620
1621
Elf32_Ehdr elf_head;
1622
int diag_msg_max_length=ebuflen-strlen(ebuf);
1623
char* diag_msg_buf=ebuf+strlen(ebuf);
1624
1625
if (diag_msg_max_length==0) {
1626
// No more space in ebuf for additional diagnostics message
1627
return NULL;
1628
}
1629
1630
1631
int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1632
1633
if (file_descriptor < 0) {
1634
// Can't open library, report dlerror() message
1635
return NULL;
1636
}
1637
1638
bool failed_to_read_elf_head=
1639
(sizeof(elf_head)!=
1640
(::read(file_descriptor, &elf_head,sizeof(elf_head))));
1641
1642
::close(file_descriptor);
1643
if (failed_to_read_elf_head) {
1644
// file i/o error - report dlerror() msg
1645
return NULL;
1646
}
1647
1648
if (elf_head.e_ident[EI_DATA] != LITTLE_ENDIAN_ONLY(ELFDATA2LSB) BIG_ENDIAN_ONLY(ELFDATA2MSB)) {
1649
// handle invalid/out of range endianness values
1650
if (elf_head.e_ident[EI_DATA] == 0 || elf_head.e_ident[EI_DATA] > 2) {
1651
return NULL;
1652
}
1653
1654
#if defined(VM_LITTLE_ENDIAN)
1655
// VM is LE, shared object BE
1656
elf_head.e_machine = be16toh(elf_head.e_machine);
1657
#else
1658
// VM is BE, shared object LE
1659
elf_head.e_machine = le16toh(elf_head.e_machine);
1660
#endif
1661
}
1662
1663
typedef struct {
1664
Elf32_Half code; // Actual value as defined in elf.h
1665
Elf32_Half compat_class; // Compatibility of archs at VM's sense
1666
unsigned char elf_class; // 32 or 64 bit
1667
unsigned char endianness; // MSB or LSB
1668
char* name; // String representation
1669
} arch_t;
1670
1671
#ifndef EM_AARCH64
1672
#define EM_AARCH64 183 /* ARM AARCH64 */
1673
#endif
1674
#ifndef EM_RISCV
1675
#define EM_RISCV 243 /* RISC-V */
1676
#endif
1677
#ifndef EM_LOONGARCH
1678
#define EM_LOONGARCH 258 /* LoongArch */
1679
#endif
1680
1681
static const arch_t arch_array[]={
1682
{EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1683
{EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1684
{EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1685
{EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1686
{EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1687
{EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1688
{EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1689
{EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1690
#if defined(VM_LITTLE_ENDIAN)
1691
{EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64 LE"},
1692
{EM_SH, EM_SH, ELFCLASS32, ELFDATA2LSB, (char*)"SuperH"},
1693
#else
1694
{EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1695
{EM_SH, EM_SH, ELFCLASS32, ELFDATA2MSB, (char*)"SuperH BE"},
1696
#endif
1697
{EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM"},
1698
// we only support 64 bit z architecture
1699
{EM_S390, EM_S390, ELFCLASS64, ELFDATA2MSB, (char*)"IBM System/390"},
1700
{EM_ALPHA, EM_ALPHA, ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"},
1701
{EM_MIPS_RS3_LE, EM_MIPS_RS3_LE, ELFCLASS32, ELFDATA2LSB, (char*)"MIPSel"},
1702
{EM_MIPS, EM_MIPS, ELFCLASS32, ELFDATA2MSB, (char*)"MIPS"},
1703
{EM_PARISC, EM_PARISC, ELFCLASS32, ELFDATA2MSB, (char*)"PARISC"},
1704
{EM_68K, EM_68K, ELFCLASS32, ELFDATA2MSB, (char*)"M68k"},
1705
{EM_AARCH64, EM_AARCH64, ELFCLASS64, ELFDATA2LSB, (char*)"AARCH64"},
1706
{EM_RISCV, EM_RISCV, ELFCLASS64, ELFDATA2LSB, (char*)"RISC-V"},
1707
{EM_LOONGARCH, EM_LOONGARCH, ELFCLASS64, ELFDATA2LSB, (char*)"LoongArch"},
1708
};
1709
1710
#if (defined IA32)
1711
static Elf32_Half running_arch_code=EM_386;
1712
#elif (defined AMD64) || (defined X32)
1713
static Elf32_Half running_arch_code=EM_X86_64;
1714
#elif (defined IA64)
1715
static Elf32_Half running_arch_code=EM_IA_64;
1716
#elif (defined __sparc) && (defined _LP64)
1717
static Elf32_Half running_arch_code=EM_SPARCV9;
1718
#elif (defined __sparc) && (!defined _LP64)
1719
static Elf32_Half running_arch_code=EM_SPARC;
1720
#elif (defined __powerpc64__)
1721
static Elf32_Half running_arch_code=EM_PPC64;
1722
#elif (defined __powerpc__)
1723
static Elf32_Half running_arch_code=EM_PPC;
1724
#elif (defined AARCH64)
1725
static Elf32_Half running_arch_code=EM_AARCH64;
1726
#elif (defined ARM)
1727
static Elf32_Half running_arch_code=EM_ARM;
1728
#elif (defined S390)
1729
static Elf32_Half running_arch_code=EM_S390;
1730
#elif (defined ALPHA)
1731
static Elf32_Half running_arch_code=EM_ALPHA;
1732
#elif (defined MIPSEL)
1733
static Elf32_Half running_arch_code=EM_MIPS_RS3_LE;
1734
#elif (defined PARISC)
1735
static Elf32_Half running_arch_code=EM_PARISC;
1736
#elif (defined MIPS)
1737
static Elf32_Half running_arch_code=EM_MIPS;
1738
#elif (defined M68K)
1739
static Elf32_Half running_arch_code=EM_68K;
1740
#elif (defined SH)
1741
static Elf32_Half running_arch_code=EM_SH;
1742
#elif (defined RISCV)
1743
static Elf32_Half running_arch_code=EM_RISCV;
1744
#elif (defined LOONGARCH)
1745
static Elf32_Half running_arch_code=EM_LOONGARCH;
1746
#else
1747
#error Method os::dll_load requires that one of following is defined:\
1748
AARCH64, ALPHA, ARM, AMD64, IA32, IA64, LOONGARCH, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, RISCV, S390, SH, __sparc
1749
#endif
1750
1751
// Identify compatibility class for VM's architecture and library's architecture
1752
// Obtain string descriptions for architectures
1753
1754
arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1755
int running_arch_index=-1;
1756
1757
for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) {
1758
if (running_arch_code == arch_array[i].code) {
1759
running_arch_index = i;
1760
}
1761
if (lib_arch.code == arch_array[i].code) {
1762
lib_arch.compat_class = arch_array[i].compat_class;
1763
lib_arch.name = arch_array[i].name;
1764
}
1765
}
1766
1767
assert(running_arch_index != -1,
1768
"Didn't find running architecture code (running_arch_code) in arch_array");
1769
if (running_arch_index == -1) {
1770
// Even though running architecture detection failed
1771
// we may still continue with reporting dlerror() message
1772
return NULL;
1773
}
1774
1775
if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1776
if (lib_arch.name != NULL) {
1777
::snprintf(diag_msg_buf, diag_msg_max_length-1,
1778
" (Possible cause: can't load %s .so on a %s platform)",
1779
lib_arch.name, arch_array[running_arch_index].name);
1780
} else {
1781
::snprintf(diag_msg_buf, diag_msg_max_length-1,
1782
" (Possible cause: can't load this .so (machine code=0x%x) on a %s platform)",
1783
lib_arch.code, arch_array[running_arch_index].name);
1784
}
1785
return NULL;
1786
}
1787
1788
if (lib_arch.endianness != arch_array[running_arch_index].endianness) {
1789
::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: endianness mismatch)");
1790
return NULL;
1791
}
1792
1793
// ELF file class/capacity : 0 - invalid, 1 - 32bit, 2 - 64bit
1794
if (lib_arch.elf_class > 2 || lib_arch.elf_class < 1) {
1795
::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: invalid ELF file class)");
1796
return NULL;
1797
}
1798
1799
if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1800
::snprintf(diag_msg_buf, diag_msg_max_length-1,
1801
" (Possible cause: architecture word width mismatch, can't load %d-bit .so on a %d-bit platform)",
1802
(int) lib_arch.elf_class * 32, arch_array[running_arch_index].elf_class * 32);
1803
return NULL;
1804
}
1805
1806
return NULL;
1807
}
1808
1809
void * os::Linux::dlopen_helper(const char *filename, char *ebuf,
1810
int ebuflen) {
1811
void * result = ::dlopen(filename, RTLD_LAZY);
1812
if (result == NULL) {
1813
const char* error_report = ::dlerror();
1814
if (error_report == NULL) {
1815
error_report = "dlerror returned no error description";
1816
}
1817
if (ebuf != NULL && ebuflen > 0) {
1818
::strncpy(ebuf, error_report, ebuflen-1);
1819
ebuf[ebuflen-1]='\0';
1820
}
1821
Events::log_dll_message(NULL, "Loading shared library %s failed, %s", filename, error_report);
1822
log_info(os)("shared library load of %s failed, %s", filename, error_report);
1823
} else {
1824
Events::log_dll_message(NULL, "Loaded shared library %s", filename);
1825
log_info(os)("shared library load of %s was successful", filename);
1826
}
1827
return result;
1828
}
1829
1830
void * os::Linux::dll_load_in_vmthread(const char *filename, char *ebuf,
1831
int ebuflen) {
1832
void * result = NULL;
1833
if (LoadExecStackDllInVMThread) {
1834
result = dlopen_helper(filename, ebuf, ebuflen);
1835
}
1836
1837
// Since 7019808, libjvm.so is linked with -noexecstack. If the VM loads a
1838
// library that requires an executable stack, or which does not have this
1839
// stack attribute set, dlopen changes the stack attribute to executable. The
1840
// read protection of the guard pages gets lost.
1841
//
1842
// Need to check _stack_is_executable again as multiple VM_LinuxDllLoad
1843
// may have been queued at the same time.
1844
1845
if (!_stack_is_executable) {
1846
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
1847
StackOverflow* overflow_state = jt->stack_overflow_state();
1848
if (!overflow_state->stack_guard_zone_unused() && // Stack not yet fully initialized
1849
overflow_state->stack_guards_enabled()) { // No pending stack overflow exceptions
1850
if (!os::guard_memory((char *)jt->stack_end(), StackOverflow::stack_guard_zone_size())) {
1851
warning("Attempt to reguard stack yellow zone failed.");
1852
}
1853
}
1854
}
1855
}
1856
1857
return result;
1858
}
1859
1860
const char* os::Linux::dll_path(void* lib) {
1861
struct link_map *lmap;
1862
const char* l_path = NULL;
1863
assert(lib != NULL, "dll_path parameter must not be NULL");
1864
1865
int res_dli = ::dlinfo(lib, RTLD_DI_LINKMAP, &lmap);
1866
if (res_dli == 0) {
1867
l_path = lmap->l_name;
1868
}
1869
return l_path;
1870
}
1871
1872
static bool _print_ascii_file(const char* filename, outputStream* st, const char* hdr = NULL) {
1873
int fd = ::open(filename, O_RDONLY);
1874
if (fd == -1) {
1875
return false;
1876
}
1877
1878
if (hdr != NULL) {
1879
st->print_cr("%s", hdr);
1880
}
1881
1882
char buf[33];
1883
int bytes;
1884
buf[32] = '\0';
1885
while ((bytes = ::read(fd, buf, sizeof(buf)-1)) > 0) {
1886
st->print_raw(buf, bytes);
1887
}
1888
1889
::close(fd);
1890
1891
return true;
1892
}
1893
1894
static void _print_ascii_file_h(const char* header, const char* filename, outputStream* st, bool same_line = true) {
1895
st->print("%s:%c", header, same_line ? ' ' : '\n');
1896
if (!_print_ascii_file(filename, st)) {
1897
st->print_cr("<Not Available>");
1898
}
1899
}
1900
1901
void os::print_dll_info(outputStream *st) {
1902
st->print_cr("Dynamic libraries:");
1903
1904
char fname[32];
1905
pid_t pid = os::Linux::gettid();
1906
1907
jio_snprintf(fname, sizeof(fname), "/proc/%d/maps", pid);
1908
1909
if (!_print_ascii_file(fname, st)) {
1910
st->print_cr("Can not get library information for pid = %d", pid);
1911
}
1912
}
1913
1914
struct loaded_modules_info_param {
1915
os::LoadedModulesCallbackFunc callback;
1916
void *param;
1917
};
1918
1919
static int dl_iterate_callback(struct dl_phdr_info *info, size_t size, void *data) {
1920
if ((info->dlpi_name == NULL) || (*info->dlpi_name == '\0')) {
1921
return 0;
1922
}
1923
1924
struct loaded_modules_info_param *callback_param = reinterpret_cast<struct loaded_modules_info_param *>(data);
1925
address base = NULL;
1926
address top = NULL;
1927
for (int idx = 0; idx < info->dlpi_phnum; idx++) {
1928
const ElfW(Phdr) *phdr = info->dlpi_phdr + idx;
1929
if (phdr->p_type == PT_LOAD) {
1930
address raw_phdr_base = reinterpret_cast<address>(info->dlpi_addr + phdr->p_vaddr);
1931
1932
address phdr_base = align_down(raw_phdr_base, phdr->p_align);
1933
if ((base == NULL) || (base > phdr_base)) {
1934
base = phdr_base;
1935
}
1936
1937
address phdr_top = align_up(raw_phdr_base + phdr->p_memsz, phdr->p_align);
1938
if ((top == NULL) || (top < phdr_top)) {
1939
top = phdr_top;
1940
}
1941
}
1942
}
1943
1944
return callback_param->callback(info->dlpi_name, base, top, callback_param->param);
1945
}
1946
1947
int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1948
struct loaded_modules_info_param callback_param = {callback, param};
1949
return dl_iterate_phdr(&dl_iterate_callback, &callback_param);
1950
}
1951
1952
void os::print_os_info_brief(outputStream* st) {
1953
os::Linux::print_distro_info(st);
1954
1955
os::Posix::print_uname_info(st);
1956
1957
os::Linux::print_libversion_info(st);
1958
1959
}
1960
1961
void os::print_os_info(outputStream* st) {
1962
st->print_cr("OS:");
1963
1964
os::Linux::print_distro_info(st);
1965
1966
os::Posix::print_uname_info(st);
1967
1968
os::Linux::print_uptime_info(st);
1969
1970
// Print warning if unsafe chroot environment detected
1971
if (unsafe_chroot_detected) {
1972
st->print_cr("WARNING!! %s", unstable_chroot_error);
1973
}
1974
1975
os::Linux::print_libversion_info(st);
1976
1977
os::Posix::print_rlimit_info(st);
1978
1979
os::Posix::print_load_average(st);
1980
st->cr();
1981
1982
os::Linux::print_system_memory_info(st);
1983
st->cr();
1984
1985
os::Linux::print_process_memory_info(st);
1986
st->cr();
1987
1988
os::Linux::print_proc_sys_info(st);
1989
st->cr();
1990
1991
if (os::Linux::print_ld_preload_file(st)) {
1992
st->cr();
1993
}
1994
1995
if (os::Linux::print_container_info(st)) {
1996
st->cr();
1997
}
1998
1999
VM_Version::print_platform_virtualization_info(st);
2000
2001
os::Linux::print_steal_info(st);
2002
}
2003
2004
// Try to identify popular distros.
2005
// Most Linux distributions have a /etc/XXX-release file, which contains
2006
// the OS version string. Newer Linux distributions have a /etc/lsb-release
2007
// file that also contains the OS version string. Some have more than one
2008
// /etc/XXX-release file (e.g. Mandrake has both /etc/mandrake-release and
2009
// /etc/redhat-release.), so the order is important.
2010
// Any Linux that is based on Redhat (i.e. Oracle, Mandrake, Sun JDS...) have
2011
// their own specific XXX-release file as well as a redhat-release file.
2012
// Because of this the XXX-release file needs to be searched for before the
2013
// redhat-release file.
2014
// Since Red Hat and SuSE have an lsb-release file that is not very descriptive the
2015
// search for redhat-release / SuSE-release needs to be before lsb-release.
2016
// Since the lsb-release file is the new standard it needs to be searched
2017
// before the older style release files.
2018
// Searching system-release (Red Hat) and os-release (other Linuxes) are a
2019
// next to last resort. The os-release file is a new standard that contains
2020
// distribution information and the system-release file seems to be an old
2021
// standard that has been replaced by the lsb-release and os-release files.
2022
// Searching for the debian_version file is the last resort. It contains
2023
// an informative string like "6.0.6" or "wheezy/sid". Because of this
2024
// "Debian " is printed before the contents of the debian_version file.
2025
2026
const char* distro_files[] = {
2027
"/etc/oracle-release",
2028
"/etc/mandriva-release",
2029
"/etc/mandrake-release",
2030
"/etc/sun-release",
2031
"/etc/redhat-release",
2032
"/etc/SuSE-release",
2033
"/etc/lsb-release",
2034
"/etc/turbolinux-release",
2035
"/etc/gentoo-release",
2036
"/etc/ltib-release",
2037
"/etc/angstrom-version",
2038
"/etc/system-release",
2039
"/etc/os-release",
2040
NULL };
2041
2042
void os::Linux::print_distro_info(outputStream* st) {
2043
for (int i = 0;; i++) {
2044
const char* file = distro_files[i];
2045
if (file == NULL) {
2046
break; // done
2047
}
2048
// If file prints, we found it.
2049
if (_print_ascii_file(file, st)) {
2050
return;
2051
}
2052
}
2053
2054
if (file_exists("/etc/debian_version")) {
2055
st->print("Debian ");
2056
_print_ascii_file("/etc/debian_version", st);
2057
} else {
2058
st->print_cr("Linux");
2059
}
2060
}
2061
2062
static void parse_os_info_helper(FILE* fp, char* distro, size_t length, bool get_first_line) {
2063
char buf[256];
2064
while (fgets(buf, sizeof(buf), fp)) {
2065
// Edit out extra stuff in expected format
2066
if (strstr(buf, "DISTRIB_DESCRIPTION=") != NULL || strstr(buf, "PRETTY_NAME=") != NULL) {
2067
char* ptr = strstr(buf, "\""); // the name is in quotes
2068
if (ptr != NULL) {
2069
ptr++; // go beyond first quote
2070
char* nl = strchr(ptr, '\"');
2071
if (nl != NULL) *nl = '\0';
2072
strncpy(distro, ptr, length);
2073
} else {
2074
ptr = strstr(buf, "=");
2075
ptr++; // go beyond equals then
2076
char* nl = strchr(ptr, '\n');
2077
if (nl != NULL) *nl = '\0';
2078
strncpy(distro, ptr, length);
2079
}
2080
return;
2081
} else if (get_first_line) {
2082
char* nl = strchr(buf, '\n');
2083
if (nl != NULL) *nl = '\0';
2084
strncpy(distro, buf, length);
2085
return;
2086
}
2087
}
2088
// print last line and close
2089
char* nl = strchr(buf, '\n');
2090
if (nl != NULL) *nl = '\0';
2091
strncpy(distro, buf, length);
2092
}
2093
2094
static void parse_os_info(char* distro, size_t length, const char* file) {
2095
FILE* fp = fopen(file, "r");
2096
if (fp != NULL) {
2097
// if suse format, print out first line
2098
bool get_first_line = (strcmp(file, "/etc/SuSE-release") == 0);
2099
parse_os_info_helper(fp, distro, length, get_first_line);
2100
fclose(fp);
2101
}
2102
}
2103
2104
void os::get_summary_os_info(char* buf, size_t buflen) {
2105
for (int i = 0;; i++) {
2106
const char* file = distro_files[i];
2107
if (file == NULL) {
2108
break; // ran out of distro_files
2109
}
2110
if (file_exists(file)) {
2111
parse_os_info(buf, buflen, file);
2112
return;
2113
}
2114
}
2115
// special case for debian
2116
if (file_exists("/etc/debian_version")) {
2117
strncpy(buf, "Debian ", buflen);
2118
if (buflen > 7) {
2119
parse_os_info(&buf[7], buflen-7, "/etc/debian_version");
2120
}
2121
} else {
2122
strncpy(buf, "Linux", buflen);
2123
}
2124
}
2125
2126
void os::Linux::print_libversion_info(outputStream* st) {
2127
// libc, pthread
2128
st->print("libc: ");
2129
st->print("%s ", os::Linux::libc_version());
2130
st->print("%s ", os::Linux::libpthread_version());
2131
st->cr();
2132
}
2133
2134
void os::Linux::print_proc_sys_info(outputStream* st) {
2135
_print_ascii_file_h("/proc/sys/kernel/threads-max (system-wide limit on the number of threads)",
2136
"/proc/sys/kernel/threads-max", st);
2137
_print_ascii_file_h("/proc/sys/vm/max_map_count (maximum number of memory map areas a process may have)",
2138
"/proc/sys/vm/max_map_count", st);
2139
_print_ascii_file_h("/proc/sys/kernel/pid_max (system-wide limit on number of process identifiers)",
2140
"/proc/sys/kernel/pid_max", st);
2141
}
2142
2143
void os::Linux::print_system_memory_info(outputStream* st) {
2144
_print_ascii_file_h("/proc/meminfo", "/proc/meminfo", st, false);
2145
st->cr();
2146
2147
// some information regarding THPs; for details see
2148
// https://www.kernel.org/doc/Documentation/vm/transhuge.txt
2149
_print_ascii_file_h("/sys/kernel/mm/transparent_hugepage/enabled",
2150
"/sys/kernel/mm/transparent_hugepage/enabled", st);
2151
_print_ascii_file_h("/sys/kernel/mm/transparent_hugepage/defrag (defrag/compaction efforts parameter)",
2152
"/sys/kernel/mm/transparent_hugepage/defrag", st);
2153
}
2154
2155
bool os::Linux::query_process_memory_info(os::Linux::meminfo_t* info) {
2156
FILE* f = ::fopen("/proc/self/status", "r");
2157
const int num_values = sizeof(os::Linux::meminfo_t) / sizeof(size_t);
2158
int num_found = 0;
2159
char buf[256];
2160
info->vmsize = info->vmpeak = info->vmrss = info->vmhwm = info->vmswap =
2161
info->rssanon = info->rssfile = info->rssshmem = -1;
2162
if (f != NULL) {
2163
while (::fgets(buf, sizeof(buf), f) != NULL && num_found < num_values) {
2164
if ( (info->vmsize == -1 && sscanf(buf, "VmSize: " SSIZE_FORMAT " kB", &info->vmsize) == 1) ||
2165
(info->vmpeak == -1 && sscanf(buf, "VmPeak: " SSIZE_FORMAT " kB", &info->vmpeak) == 1) ||
2166
(info->vmswap == -1 && sscanf(buf, "VmSwap: " SSIZE_FORMAT " kB", &info->vmswap) == 1) ||
2167
(info->vmhwm == -1 && sscanf(buf, "VmHWM: " SSIZE_FORMAT " kB", &info->vmhwm) == 1) ||
2168
(info->vmrss == -1 && sscanf(buf, "VmRSS: " SSIZE_FORMAT " kB", &info->vmrss) == 1) ||
2169
(info->rssanon == -1 && sscanf(buf, "RssAnon: " SSIZE_FORMAT " kB", &info->rssanon) == 1) || // Needs Linux 4.5
2170
(info->rssfile == -1 && sscanf(buf, "RssFile: " SSIZE_FORMAT " kB", &info->rssfile) == 1) || // Needs Linux 4.5
2171
(info->rssshmem == -1 && sscanf(buf, "RssShmem: " SSIZE_FORMAT " kB", &info->rssshmem) == 1) // Needs Linux 4.5
2172
)
2173
{
2174
num_found ++;
2175
}
2176
}
2177
fclose(f);
2178
return true;
2179
}
2180
return false;
2181
}
2182
2183
#ifdef __GLIBC__
2184
// For Glibc, print a one-liner with the malloc tunables.
2185
// Most important and popular is MALLOC_ARENA_MAX, but we are
2186
// thorough and print them all.
2187
static void print_glibc_malloc_tunables(outputStream* st) {
2188
static const char* var[] = {
2189
// the new variant
2190
"GLIBC_TUNABLES",
2191
// legacy variants
2192
"MALLOC_CHECK_", "MALLOC_TOP_PAD_", "MALLOC_PERTURB_",
2193
"MALLOC_MMAP_THRESHOLD_", "MALLOC_TRIM_THRESHOLD_",
2194
"MALLOC_MMAP_MAX_", "MALLOC_ARENA_TEST", "MALLOC_ARENA_MAX",
2195
NULL};
2196
st->print("glibc malloc tunables: ");
2197
bool printed = false;
2198
for (int i = 0; var[i] != NULL; i ++) {
2199
const char* const val = ::getenv(var[i]);
2200
if (val != NULL) {
2201
st->print("%s%s=%s", (printed ? ", " : ""), var[i], val);
2202
printed = true;
2203
}
2204
}
2205
if (!printed) {
2206
st->print("(default)");
2207
}
2208
}
2209
#endif // __GLIBC__
2210
2211
void os::Linux::print_process_memory_info(outputStream* st) {
2212
2213
st->print_cr("Process Memory:");
2214
2215
// Print virtual and resident set size; peak values; swap; and for
2216
// rss its components if the kernel is recent enough.
2217
meminfo_t info;
2218
if (query_process_memory_info(&info)) {
2219
st->print_cr("Virtual Size: " SSIZE_FORMAT "K (peak: " SSIZE_FORMAT "K)", info.vmsize, info.vmpeak);
2220
st->print("Resident Set Size: " SSIZE_FORMAT "K (peak: " SSIZE_FORMAT "K)", info.vmrss, info.vmhwm);
2221
if (info.rssanon != -1) { // requires kernel >= 4.5
2222
st->print(" (anon: " SSIZE_FORMAT "K, file: " SSIZE_FORMAT "K, shmem: " SSIZE_FORMAT "K)",
2223
info.rssanon, info.rssfile, info.rssshmem);
2224
}
2225
st->cr();
2226
if (info.vmswap != -1) { // requires kernel >= 2.6.34
2227
st->print_cr("Swapped out: " SSIZE_FORMAT "K", info.vmswap);
2228
}
2229
} else {
2230
st->print_cr("Could not open /proc/self/status to get process memory related information");
2231
}
2232
2233
// glibc only:
2234
// - Print outstanding allocations using mallinfo
2235
// - Print glibc tunables
2236
#ifdef __GLIBC__
2237
size_t total_allocated = 0;
2238
size_t free_retained = 0;
2239
bool might_have_wrapped = false;
2240
if (_mallinfo2 != NULL) {
2241
struct glibc_mallinfo2 mi = _mallinfo2();
2242
total_allocated = mi.uordblks + mi.hblkhd;
2243
free_retained = mi.fordblks;
2244
} else if (_mallinfo != NULL) {
2245
// mallinfo is an old API. Member names mean next to nothing and, beyond that, are 32-bit signed.
2246
// So for larger footprints the values may have wrapped around. We try to detect this here: if the
2247
// process whole resident set size is smaller than 4G, malloc footprint has to be less than that
2248
// and the numbers are reliable.
2249
struct glibc_mallinfo mi = _mallinfo();
2250
total_allocated = (size_t)(unsigned)mi.uordblks + (size_t)(unsigned)mi.hblkhd;
2251
free_retained = (size_t)(unsigned)mi.fordblks;
2252
// Since mallinfo members are int, glibc values may have wrapped. Warn about this.
2253
might_have_wrapped = (info.vmrss * K) > UINT_MAX && (info.vmrss * K) > (total_allocated + UINT_MAX);
2254
}
2255
if (_mallinfo2 != NULL || _mallinfo != NULL) {
2256
st->print_cr("C-Heap outstanding allocations: " SIZE_FORMAT "K, retained: " SIZE_FORMAT "K%s",
2257
total_allocated / K, free_retained / K,
2258
might_have_wrapped ? " (may have wrapped)" : "");
2259
}
2260
// Tunables
2261
print_glibc_malloc_tunables(st);
2262
st->cr();
2263
#endif
2264
}
2265
2266
bool os::Linux::print_ld_preload_file(outputStream* st) {
2267
return _print_ascii_file("/etc/ld.so.preload", st, "/etc/ld.so.preload:");
2268
}
2269
2270
void os::Linux::print_uptime_info(outputStream* st) {
2271
struct sysinfo sinfo;
2272
int ret = sysinfo(&sinfo);
2273
if (ret == 0) {
2274
os::print_dhm(st, "OS uptime:", (long) sinfo.uptime);
2275
}
2276
}
2277
2278
static void print_container_helper(outputStream* st, jlong j, const char* metrics) {
2279
st->print("%s: ", metrics);
2280
if (j > 0) {
2281
if (j >= 1024) {
2282
st->print_cr(UINT64_FORMAT " k", uint64_t(j) / 1024);
2283
} else {
2284
st->print_cr(UINT64_FORMAT, uint64_t(j));
2285
}
2286
} else {
2287
st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited");
2288
}
2289
}
2290
2291
bool os::Linux::print_container_info(outputStream* st) {
2292
if (!OSContainer::is_containerized()) {
2293
st->print_cr("container information not found.");
2294
return false;
2295
}
2296
2297
st->print_cr("container (cgroup) information:");
2298
2299
const char *p_ct = OSContainer::container_type();
2300
st->print_cr("container_type: %s", p_ct != NULL ? p_ct : "not supported");
2301
2302
char *p = OSContainer::cpu_cpuset_cpus();
2303
st->print_cr("cpu_cpuset_cpus: %s", p != NULL ? p : "not supported");
2304
free(p);
2305
2306
p = OSContainer::cpu_cpuset_memory_nodes();
2307
st->print_cr("cpu_memory_nodes: %s", p != NULL ? p : "not supported");
2308
free(p);
2309
2310
int i = OSContainer::active_processor_count();
2311
st->print("active_processor_count: ");
2312
if (i > 0) {
2313
if (ActiveProcessorCount > 0) {
2314
st->print_cr("%d, but overridden by -XX:ActiveProcessorCount %d", i, ActiveProcessorCount);
2315
} else {
2316
st->print_cr("%d", i);
2317
}
2318
} else {
2319
st->print_cr("not supported");
2320
}
2321
2322
i = OSContainer::cpu_quota();
2323
st->print("cpu_quota: ");
2324
if (i > 0) {
2325
st->print_cr("%d", i);
2326
} else {
2327
st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no quota");
2328
}
2329
2330
i = OSContainer::cpu_period();
2331
st->print("cpu_period: ");
2332
if (i > 0) {
2333
st->print_cr("%d", i);
2334
} else {
2335
st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no period");
2336
}
2337
2338
i = OSContainer::cpu_shares();
2339
st->print("cpu_shares: ");
2340
if (i > 0) {
2341
st->print_cr("%d", i);
2342
} else {
2343
st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no shares");
2344
}
2345
2346
print_container_helper(st, OSContainer::memory_limit_in_bytes(), "memory_limit_in_bytes");
2347
print_container_helper(st, OSContainer::memory_and_swap_limit_in_bytes(), "memory_and_swap_limit_in_bytes");
2348
print_container_helper(st, OSContainer::memory_soft_limit_in_bytes(), "memory_soft_limit_in_bytes");
2349
print_container_helper(st, OSContainer::memory_usage_in_bytes(), "memory_usage_in_bytes");
2350
print_container_helper(st, OSContainer::memory_max_usage_in_bytes(), "memory_max_usage_in_bytes");
2351
2352
jlong j = OSContainer::pids_max();
2353
st->print("maximum number of tasks: ");
2354
if (j > 0) {
2355
st->print_cr(JLONG_FORMAT, j);
2356
} else {
2357
st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited");
2358
}
2359
2360
j = OSContainer::pids_current();
2361
st->print("current number of tasks: ");
2362
if (j > 0) {
2363
st->print_cr(JLONG_FORMAT, j);
2364
} else {
2365
if (j == OSCONTAINER_ERROR) {
2366
st->print_cr("not supported");
2367
}
2368
}
2369
2370
return true;
2371
}
2372
2373
void os::Linux::print_steal_info(outputStream* st) {
2374
if (has_initial_tick_info) {
2375
CPUPerfTicks pticks;
2376
bool res = os::Linux::get_tick_information(&pticks, -1);
2377
2378
if (res && pticks.has_steal_ticks) {
2379
uint64_t steal_ticks_difference = pticks.steal - initial_steal_ticks;
2380
uint64_t total_ticks_difference = pticks.total - initial_total_ticks;
2381
double steal_ticks_perc = 0.0;
2382
if (total_ticks_difference != 0) {
2383
steal_ticks_perc = (double) steal_ticks_difference / total_ticks_difference;
2384
}
2385
st->print_cr("Steal ticks since vm start: " UINT64_FORMAT, steal_ticks_difference);
2386
st->print_cr("Steal ticks percentage since vm start:%7.3f", steal_ticks_perc);
2387
}
2388
}
2389
}
2390
2391
void os::print_memory_info(outputStream* st) {
2392
2393
st->print("Memory:");
2394
st->print(" %dk page", os::vm_page_size()>>10);
2395
2396
// values in struct sysinfo are "unsigned long"
2397
struct sysinfo si;
2398
sysinfo(&si);
2399
2400
st->print(", physical " UINT64_FORMAT "k",
2401
os::physical_memory() >> 10);
2402
st->print("(" UINT64_FORMAT "k free)",
2403
os::available_memory() >> 10);
2404
st->print(", swap " UINT64_FORMAT "k",
2405
((jlong)si.totalswap * si.mem_unit) >> 10);
2406
st->print("(" UINT64_FORMAT "k free)",
2407
((jlong)si.freeswap * si.mem_unit) >> 10);
2408
st->cr();
2409
st->print("Page Sizes: ");
2410
_page_sizes.print_on(st);
2411
st->cr();
2412
}
2413
2414
// Print the first "model name" line and the first "flags" line
2415
// that we find and nothing more. We assume "model name" comes
2416
// before "flags" so if we find a second "model name", then the
2417
// "flags" field is considered missing.
2418
static bool print_model_name_and_flags(outputStream* st, char* buf, size_t buflen) {
2419
#if defined(IA32) || defined(AMD64)
2420
// Other platforms have less repetitive cpuinfo files
2421
FILE *fp = fopen("/proc/cpuinfo", "r");
2422
if (fp) {
2423
bool model_name_printed = false;
2424
while (!feof(fp)) {
2425
if (fgets(buf, buflen, fp)) {
2426
// Assume model name comes before flags
2427
if (strstr(buf, "model name") != NULL) {
2428
if (!model_name_printed) {
2429
st->print_raw("CPU Model and flags from /proc/cpuinfo:\n");
2430
st->print_raw(buf);
2431
model_name_printed = true;
2432
} else {
2433
// model name printed but not flags? Odd, just return
2434
fclose(fp);
2435
return true;
2436
}
2437
}
2438
// print the flags line too
2439
if (strstr(buf, "flags") != NULL) {
2440
st->print_raw(buf);
2441
fclose(fp);
2442
return true;
2443
}
2444
}
2445
}
2446
fclose(fp);
2447
}
2448
#endif // x86 platforms
2449
return false;
2450
}
2451
2452
// additional information about CPU e.g. available frequency ranges
2453
static void print_sys_devices_cpu_info(outputStream* st, char* buf, size_t buflen) {
2454
_print_ascii_file_h("Online cpus", "/sys/devices/system/cpu/online", st);
2455
_print_ascii_file_h("Offline cpus", "/sys/devices/system/cpu/offline", st);
2456
2457
if (ExtensiveErrorReports) {
2458
// cache related info (cpu 0, should be similar for other CPUs)
2459
for (unsigned int i=0; i < 10; i++) { // handle max. 10 cache entries
2460
char hbuf_level[60];
2461
char hbuf_type[60];
2462
char hbuf_size[60];
2463
char hbuf_coherency_line_size[80];
2464
snprintf(hbuf_level, 60, "/sys/devices/system/cpu/cpu0/cache/index%u/level", i);
2465
snprintf(hbuf_type, 60, "/sys/devices/system/cpu/cpu0/cache/index%u/type", i);
2466
snprintf(hbuf_size, 60, "/sys/devices/system/cpu/cpu0/cache/index%u/size", i);
2467
snprintf(hbuf_coherency_line_size, 80, "/sys/devices/system/cpu/cpu0/cache/index%u/coherency_line_size", i);
2468
if (file_exists(hbuf_level)) {
2469
_print_ascii_file_h("cache level", hbuf_level, st);
2470
_print_ascii_file_h("cache type", hbuf_type, st);
2471
_print_ascii_file_h("cache size", hbuf_size, st);
2472
_print_ascii_file_h("cache coherency line size", hbuf_coherency_line_size, st);
2473
}
2474
}
2475
}
2476
2477
// we miss the cpufreq entries on Power and s390x
2478
#if defined(IA32) || defined(AMD64)
2479
_print_ascii_file_h("BIOS frequency limitation", "/sys/devices/system/cpu/cpu0/cpufreq/bios_limit", st);
2480
_print_ascii_file_h("Frequency switch latency (ns)", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_transition_latency", st);
2481
_print_ascii_file_h("Available cpu frequencies", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies", st);
2482
// min and max should be in the Available range but still print them (not all info might be available for all kernels)
2483
if (ExtensiveErrorReports) {
2484
_print_ascii_file_h("Maximum cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", st);
2485
_print_ascii_file_h("Minimum cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_min_freq", st);
2486
_print_ascii_file_h("Current cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq", st);
2487
}
2488
// governors are power schemes, see https://wiki.archlinux.org/index.php/CPU_frequency_scaling
2489
if (ExtensiveErrorReports) {
2490
_print_ascii_file_h("Available governors", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors", st);
2491
}
2492
_print_ascii_file_h("Current governor", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", st);
2493
// Core performance boost, see https://www.kernel.org/doc/Documentation/cpu-freq/boost.txt
2494
// Raise operating frequency of some cores in a multi-core package if certain conditions apply, e.g.
2495
// whole chip is not fully utilized
2496
_print_ascii_file_h("Core performance/turbo boost", "/sys/devices/system/cpu/cpufreq/boost", st);
2497
#endif
2498
}
2499
2500
void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
2501
// Only print the model name if the platform provides this as a summary
2502
if (!print_model_name_and_flags(st, buf, buflen)) {
2503
_print_ascii_file_h("/proc/cpuinfo", "/proc/cpuinfo", st, false);
2504
}
2505
st->cr();
2506
print_sys_devices_cpu_info(st, buf, buflen);
2507
}
2508
2509
#if defined(AMD64) || defined(IA32) || defined(X32)
2510
const char* search_string = "model name";
2511
#elif defined(M68K)
2512
const char* search_string = "CPU";
2513
#elif defined(PPC64)
2514
const char* search_string = "cpu";
2515
#elif defined(S390)
2516
const char* search_string = "machine =";
2517
#elif defined(SPARC)
2518
const char* search_string = "cpu";
2519
#else
2520
const char* search_string = "Processor";
2521
#endif
2522
2523
// Parses the cpuinfo file for string representing the model name.
2524
void os::get_summary_cpu_info(char* cpuinfo, size_t length) {
2525
FILE* fp = fopen("/proc/cpuinfo", "r");
2526
if (fp != NULL) {
2527
while (!feof(fp)) {
2528
char buf[256];
2529
if (fgets(buf, sizeof(buf), fp)) {
2530
char* start = strstr(buf, search_string);
2531
if (start != NULL) {
2532
char *ptr = start + strlen(search_string);
2533
char *end = buf + strlen(buf);
2534
while (ptr != end) {
2535
// skip whitespace and colon for the rest of the name.
2536
if (*ptr != ' ' && *ptr != '\t' && *ptr != ':') {
2537
break;
2538
}
2539
ptr++;
2540
}
2541
if (ptr != end) {
2542
// reasonable string, get rid of newline and keep the rest
2543
char* nl = strchr(buf, '\n');
2544
if (nl != NULL) *nl = '\0';
2545
strncpy(cpuinfo, ptr, length);
2546
fclose(fp);
2547
return;
2548
}
2549
}
2550
}
2551
}
2552
fclose(fp);
2553
}
2554
// cpuinfo not found or parsing failed, just print generic string. The entire
2555
// /proc/cpuinfo file will be printed later in the file (or enough of it for x86)
2556
#if defined(AARCH64)
2557
strncpy(cpuinfo, "AArch64", length);
2558
#elif defined(AMD64)
2559
strncpy(cpuinfo, "x86_64", length);
2560
#elif defined(ARM) // Order wrt. AARCH64 is relevant!
2561
strncpy(cpuinfo, "ARM", length);
2562
#elif defined(IA32)
2563
strncpy(cpuinfo, "x86_32", length);
2564
#elif defined(IA64)
2565
strncpy(cpuinfo, "IA64", length);
2566
#elif defined(PPC)
2567
strncpy(cpuinfo, "PPC64", length);
2568
#elif defined(S390)
2569
strncpy(cpuinfo, "S390", length);
2570
#elif defined(SPARC)
2571
strncpy(cpuinfo, "sparcv9", length);
2572
#elif defined(ZERO_LIBARCH)
2573
strncpy(cpuinfo, ZERO_LIBARCH, length);
2574
#else
2575
strncpy(cpuinfo, "unknown", length);
2576
#endif
2577
}
2578
2579
static char saved_jvm_path[MAXPATHLEN] = {0};
2580
2581
// Find the full path to the current module, libjvm.so
2582
void os::jvm_path(char *buf, jint buflen) {
2583
// Error checking.
2584
if (buflen < MAXPATHLEN) {
2585
assert(false, "must use a large-enough buffer");
2586
buf[0] = '\0';
2587
return;
2588
}
2589
// Lazy resolve the path to current module.
2590
if (saved_jvm_path[0] != 0) {
2591
strcpy(buf, saved_jvm_path);
2592
return;
2593
}
2594
2595
char dli_fname[MAXPATHLEN];
2596
dli_fname[0] = '\0';
2597
bool ret = dll_address_to_library_name(
2598
CAST_FROM_FN_PTR(address, os::jvm_path),
2599
dli_fname, sizeof(dli_fname), NULL);
2600
assert(ret, "cannot locate libjvm");
2601
char *rp = NULL;
2602
if (ret && dli_fname[0] != '\0') {
2603
rp = os::Posix::realpath(dli_fname, buf, buflen);
2604
}
2605
if (rp == NULL) {
2606
return;
2607
}
2608
2609
if (Arguments::sun_java_launcher_is_altjvm()) {
2610
// Support for the java launcher's '-XXaltjvm=<path>' option. Typical
2611
// value for buf is "<JAVA_HOME>/jre/lib/<vmtype>/libjvm.so".
2612
// If "/jre/lib/" appears at the right place in the string, then
2613
// assume we are installed in a JDK and we're done. Otherwise, check
2614
// for a JAVA_HOME environment variable and fix up the path so it
2615
// looks like libjvm.so is installed there (append a fake suffix
2616
// hotspot/libjvm.so).
2617
const char *p = buf + strlen(buf) - 1;
2618
for (int count = 0; p > buf && count < 5; ++count) {
2619
for (--p; p > buf && *p != '/'; --p)
2620
/* empty */ ;
2621
}
2622
2623
if (strncmp(p, "/jre/lib/", 9) != 0) {
2624
// Look for JAVA_HOME in the environment.
2625
char* java_home_var = ::getenv("JAVA_HOME");
2626
if (java_home_var != NULL && java_home_var[0] != 0) {
2627
char* jrelib_p;
2628
int len;
2629
2630
// Check the current module name "libjvm.so".
2631
p = strrchr(buf, '/');
2632
if (p == NULL) {
2633
return;
2634
}
2635
assert(strstr(p, "/libjvm") == p, "invalid library name");
2636
2637
rp = os::Posix::realpath(java_home_var, buf, buflen);
2638
if (rp == NULL) {
2639
return;
2640
}
2641
2642
// determine if this is a legacy image or modules image
2643
// modules image doesn't have "jre" subdirectory
2644
len = strlen(buf);
2645
assert(len < buflen, "Ran out of buffer room");
2646
jrelib_p = buf + len;
2647
snprintf(jrelib_p, buflen-len, "/jre/lib");
2648
if (0 != access(buf, F_OK)) {
2649
snprintf(jrelib_p, buflen-len, "/lib");
2650
}
2651
2652
if (0 == access(buf, F_OK)) {
2653
// Use current module name "libjvm.so"
2654
len = strlen(buf);
2655
snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2656
} else {
2657
// Go back to path of .so
2658
rp = os::Posix::realpath(dli_fname, buf, buflen);
2659
if (rp == NULL) {
2660
return;
2661
}
2662
}
2663
}
2664
}
2665
}
2666
2667
strncpy(saved_jvm_path, buf, MAXPATHLEN);
2668
saved_jvm_path[MAXPATHLEN - 1] = '\0';
2669
}
2670
2671
void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2672
// no prefix required, not even "_"
2673
}
2674
2675
void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2676
// no suffix required
2677
}
2678
2679
////////////////////////////////////////////////////////////////////////////////
2680
// Virtual Memory
2681
2682
int os::vm_page_size() {
2683
// Seems redundant as all get out
2684
assert(os::Linux::page_size() != -1, "must call os::init");
2685
return os::Linux::page_size();
2686
}
2687
2688
// Solaris allocates memory by pages.
2689
int os::vm_allocation_granularity() {
2690
assert(os::Linux::page_size() != -1, "must call os::init");
2691
return os::Linux::page_size();
2692
}
2693
2694
// Rationale behind this function:
2695
// current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable
2696
// mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get
2697
// samples for JITted code. Here we create private executable mapping over the code cache
2698
// and then we can use standard (well, almost, as mapping can change) way to provide
2699
// info for the reporting script by storing timestamp and location of symbol
2700
void linux_wrap_code(char* base, size_t size) {
2701
static volatile jint cnt = 0;
2702
2703
if (!UseOprofile) {
2704
return;
2705
}
2706
2707
char buf[PATH_MAX+1];
2708
int num = Atomic::add(&cnt, 1);
2709
2710
snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
2711
os::get_temp_directory(), os::current_process_id(), num);
2712
unlink(buf);
2713
2714
int fd = ::open(buf, O_CREAT | O_RDWR, S_IRWXU);
2715
2716
if (fd != -1) {
2717
off_t rv = ::lseek(fd, size-2, SEEK_SET);
2718
if (rv != (off_t)-1) {
2719
if (::write(fd, "", 1) == 1) {
2720
mmap(base, size,
2721
PROT_READ|PROT_WRITE|PROT_EXEC,
2722
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0);
2723
}
2724
}
2725
::close(fd);
2726
unlink(buf);
2727
}
2728
}
2729
2730
static bool recoverable_mmap_error(int err) {
2731
// See if the error is one we can let the caller handle. This
2732
// list of errno values comes from JBS-6843484. I can't find a
2733
// Linux man page that documents this specific set of errno
2734
// values so while this list currently matches Solaris, it may
2735
// change as we gain experience with this failure mode.
2736
switch (err) {
2737
case EBADF:
2738
case EINVAL:
2739
case ENOTSUP:
2740
// let the caller deal with these errors
2741
return true;
2742
2743
default:
2744
// Any remaining errors on this OS can cause our reserved mapping
2745
// to be lost. That can cause confusion where different data
2746
// structures think they have the same memory mapped. The worst
2747
// scenario is if both the VM and a library think they have the
2748
// same memory mapped.
2749
return false;
2750
}
2751
}
2752
2753
static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2754
int err) {
2755
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2756
", %d) failed; error='%s' (errno=%d)", p2i(addr), size, exec,
2757
os::strerror(err), err);
2758
}
2759
2760
static void warn_fail_commit_memory(char* addr, size_t size,
2761
size_t alignment_hint, bool exec,
2762
int err) {
2763
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2764
", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", p2i(addr), size,
2765
alignment_hint, exec, os::strerror(err), err);
2766
}
2767
2768
// NOTE: Linux kernel does not really reserve the pages for us.
2769
// All it does is to check if there are enough free pages
2770
// left at the time of mmap(). This could be a potential
2771
// problem.
2772
int os::Linux::commit_memory_impl(char* addr, size_t size, bool exec) {
2773
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2774
uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
2775
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
2776
if (res != (uintptr_t) MAP_FAILED) {
2777
if (UseNUMAInterleaving) {
2778
numa_make_global(addr, size);
2779
}
2780
return 0;
2781
}
2782
2783
int err = errno; // save errno from mmap() call above
2784
2785
if (!recoverable_mmap_error(err)) {
2786
warn_fail_commit_memory(addr, size, exec, err);
2787
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "committing reserved memory.");
2788
}
2789
2790
return err;
2791
}
2792
2793
bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2794
return os::Linux::commit_memory_impl(addr, size, exec) == 0;
2795
}
2796
2797
void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2798
const char* mesg) {
2799
assert(mesg != NULL, "mesg must be specified");
2800
int err = os::Linux::commit_memory_impl(addr, size, exec);
2801
if (err != 0) {
2802
// the caller wants all commit errors to exit with the specified mesg:
2803
warn_fail_commit_memory(addr, size, exec, err);
2804
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2805
}
2806
}
2807
2808
// Define MAP_HUGETLB here so we can build HotSpot on old systems.
2809
#ifndef MAP_HUGETLB
2810
#define MAP_HUGETLB 0x40000
2811
#endif
2812
2813
// If mmap flags are set with MAP_HUGETLB and the system supports multiple
2814
// huge page sizes, flag bits [26:31] can be used to encode the log2 of the
2815
// desired huge page size. Otherwise, the system's default huge page size will be used.
2816
// See mmap(2) man page for more info (since Linux 3.8).
2817
// https://lwn.net/Articles/533499/
2818
#ifndef MAP_HUGE_SHIFT
2819
#define MAP_HUGE_SHIFT 26
2820
#endif
2821
2822
// Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
2823
#ifndef MADV_HUGEPAGE
2824
#define MADV_HUGEPAGE 14
2825
#endif
2826
2827
int os::Linux::commit_memory_impl(char* addr, size_t size,
2828
size_t alignment_hint, bool exec) {
2829
int err = os::Linux::commit_memory_impl(addr, size, exec);
2830
if (err == 0) {
2831
realign_memory(addr, size, alignment_hint);
2832
}
2833
return err;
2834
}
2835
2836
bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
2837
bool exec) {
2838
return os::Linux::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
2839
}
2840
2841
void os::pd_commit_memory_or_exit(char* addr, size_t size,
2842
size_t alignment_hint, bool exec,
2843
const char* mesg) {
2844
assert(mesg != NULL, "mesg must be specified");
2845
int err = os::Linux::commit_memory_impl(addr, size, alignment_hint, exec);
2846
if (err != 0) {
2847
// the caller wants all commit errors to exit with the specified mesg:
2848
warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
2849
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2850
}
2851
}
2852
2853
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2854
if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) {
2855
// We don't check the return value: madvise(MADV_HUGEPAGE) may not
2856
// be supported or the memory may already be backed by huge pages.
2857
::madvise(addr, bytes, MADV_HUGEPAGE);
2858
}
2859
}
2860
2861
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2862
// This method works by doing an mmap over an existing mmaping and effectively discarding
2863
// the existing pages. However it won't work for SHM-based large pages that cannot be
2864
// uncommitted at all. We don't do anything in this case to avoid creating a segment with
2865
// small pages on top of the SHM segment. This method always works for small pages, so we
2866
// allow that in any case.
2867
if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) {
2868
commit_memory(addr, bytes, alignment_hint, !ExecMem);
2869
}
2870
}
2871
2872
void os::numa_make_global(char *addr, size_t bytes) {
2873
Linux::numa_interleave_memory(addr, bytes);
2874
}
2875
2876
// Define for numa_set_bind_policy(int). Setting the argument to 0 will set the
2877
// bind policy to MPOL_PREFERRED for the current thread.
2878
#define USE_MPOL_PREFERRED 0
2879
2880
void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2881
// To make NUMA and large pages more robust when both enabled, we need to ease
2882
// the requirements on where the memory should be allocated. MPOL_BIND is the
2883
// default policy and it will force memory to be allocated on the specified
2884
// node. Changing this to MPOL_PREFERRED will prefer to allocate the memory on
2885
// the specified node, but will not force it. Using this policy will prevent
2886
// getting SIGBUS when trying to allocate large pages on NUMA nodes with no
2887
// free large pages.
2888
Linux::numa_set_bind_policy(USE_MPOL_PREFERRED);
2889
Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
2890
}
2891
2892
bool os::numa_topology_changed() { return false; }
2893
2894
size_t os::numa_get_groups_num() {
2895
// Return just the number of nodes in which it's possible to allocate memory
2896
// (in numa terminology, configured nodes).
2897
return Linux::numa_num_configured_nodes();
2898
}
2899
2900
int os::numa_get_group_id() {
2901
int cpu_id = Linux::sched_getcpu();
2902
if (cpu_id != -1) {
2903
int lgrp_id = Linux::get_node_by_cpu(cpu_id);
2904
if (lgrp_id != -1) {
2905
return lgrp_id;
2906
}
2907
}
2908
return 0;
2909
}
2910
2911
int os::numa_get_group_id_for_address(const void* address) {
2912
void** pages = const_cast<void**>(&address);
2913
int id = -1;
2914
2915
if (os::Linux::numa_move_pages(0, 1, pages, NULL, &id, 0) == -1) {
2916
return -1;
2917
}
2918
if (id < 0) {
2919
return -1;
2920
}
2921
return id;
2922
}
2923
2924
int os::Linux::get_existing_num_nodes() {
2925
int node;
2926
int highest_node_number = Linux::numa_max_node();
2927
int num_nodes = 0;
2928
2929
// Get the total number of nodes in the system including nodes without memory.
2930
for (node = 0; node <= highest_node_number; node++) {
2931
if (is_node_in_existing_nodes(node)) {
2932
num_nodes++;
2933
}
2934
}
2935
return num_nodes;
2936
}
2937
2938
size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2939
int highest_node_number = Linux::numa_max_node();
2940
size_t i = 0;
2941
2942
// Map all node ids in which it is possible to allocate memory. Also nodes are
2943
// not always consecutively available, i.e. available from 0 to the highest
2944
// node number. If the nodes have been bound explicitly using numactl membind,
2945
// then allocate memory from those nodes only.
2946
for (int node = 0; node <= highest_node_number; node++) {
2947
if (Linux::is_node_in_bound_nodes((unsigned int)node)) {
2948
ids[i++] = node;
2949
}
2950
}
2951
return i;
2952
}
2953
2954
bool os::get_page_info(char *start, page_info* info) {
2955
return false;
2956
}
2957
2958
char *os::scan_pages(char *start, char* end, page_info* page_expected,
2959
page_info* page_found) {
2960
return end;
2961
}
2962
2963
2964
int os::Linux::sched_getcpu_syscall(void) {
2965
unsigned int cpu = 0;
2966
int retval = -1;
2967
2968
#if defined(IA32)
2969
#ifndef SYS_getcpu
2970
#define SYS_getcpu 318
2971
#endif
2972
retval = syscall(SYS_getcpu, &cpu, NULL, NULL);
2973
#elif defined(AMD64)
2974
// Unfortunately we have to bring all these macros here from vsyscall.h
2975
// to be able to compile on old linuxes.
2976
#define __NR_vgetcpu 2
2977
#define VSYSCALL_START (-10UL << 20)
2978
#define VSYSCALL_SIZE 1024
2979
#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
2980
typedef long (*vgetcpu_t)(unsigned int *cpu, unsigned int *node, unsigned long *tcache);
2981
vgetcpu_t vgetcpu = (vgetcpu_t)VSYSCALL_ADDR(__NR_vgetcpu);
2982
retval = vgetcpu(&cpu, NULL, NULL);
2983
#endif
2984
2985
return (retval == -1) ? retval : cpu;
2986
}
2987
2988
void os::Linux::sched_getcpu_init() {
2989
// sched_getcpu() should be in libc.
2990
set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
2991
dlsym(RTLD_DEFAULT, "sched_getcpu")));
2992
2993
// If it's not, try a direct syscall.
2994
if (sched_getcpu() == -1) {
2995
set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
2996
(void*)&sched_getcpu_syscall));
2997
}
2998
2999
if (sched_getcpu() == -1) {
3000
vm_exit_during_initialization("getcpu(2) system call not supported by kernel");
3001
}
3002
}
3003
3004
// Something to do with the numa-aware allocator needs these symbols
3005
extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { }
3006
extern "C" JNIEXPORT void numa_error(char *where) { }
3007
3008
// Handle request to load libnuma symbol version 1.1 (API v1). If it fails
3009
// load symbol from base version instead.
3010
void* os::Linux::libnuma_dlsym(void* handle, const char *name) {
3011
void *f = dlvsym(handle, name, "libnuma_1.1");
3012
if (f == NULL) {
3013
f = dlsym(handle, name);
3014
}
3015
return f;
3016
}
3017
3018
// Handle request to load libnuma symbol version 1.2 (API v2) only.
3019
// Return NULL if the symbol is not defined in this particular version.
3020
void* os::Linux::libnuma_v2_dlsym(void* handle, const char* name) {
3021
return dlvsym(handle, name, "libnuma_1.2");
3022
}
3023
3024
// Check numa dependent syscalls
3025
static bool numa_syscall_check() {
3026
// NUMA APIs depend on several syscalls. E.g., get_mempolicy is required for numa_get_membind and
3027
// numa_get_interleave_mask. But these dependent syscalls can be unsupported for various reasons.
3028
// Especially in dockers, get_mempolicy is not allowed with the default configuration. So it's necessary
3029
// to check whether the syscalls are available. Currently, only get_mempolicy is checked since checking
3030
// others like mbind would cause unexpected side effects.
3031
#ifdef SYS_get_mempolicy
3032
int dummy = 0;
3033
if (syscall(SYS_get_mempolicy, &dummy, NULL, 0, (void*)&dummy, 3) == -1) {
3034
return false;
3035
}
3036
#endif
3037
3038
return true;
3039
}
3040
3041
bool os::Linux::libnuma_init() {
3042
// Requires sched_getcpu() and numa dependent syscalls support
3043
if ((sched_getcpu() != -1) && numa_syscall_check()) {
3044
void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
3045
if (handle != NULL) {
3046
set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t,
3047
libnuma_dlsym(handle, "numa_node_to_cpus")));
3048
set_numa_node_to_cpus_v2(CAST_TO_FN_PTR(numa_node_to_cpus_v2_func_t,
3049
libnuma_v2_dlsym(handle, "numa_node_to_cpus")));
3050
set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t,
3051
libnuma_dlsym(handle, "numa_max_node")));
3052
set_numa_num_configured_nodes(CAST_TO_FN_PTR(numa_num_configured_nodes_func_t,
3053
libnuma_dlsym(handle, "numa_num_configured_nodes")));
3054
set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,
3055
libnuma_dlsym(handle, "numa_available")));
3056
set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
3057
libnuma_dlsym(handle, "numa_tonode_memory")));
3058
set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
3059
libnuma_dlsym(handle, "numa_interleave_memory")));
3060
set_numa_interleave_memory_v2(CAST_TO_FN_PTR(numa_interleave_memory_v2_func_t,
3061
libnuma_v2_dlsym(handle, "numa_interleave_memory")));
3062
set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
3063
libnuma_dlsym(handle, "numa_set_bind_policy")));
3064
set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t,
3065
libnuma_dlsym(handle, "numa_bitmask_isbitset")));
3066
set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t,
3067
libnuma_dlsym(handle, "numa_distance")));
3068
set_numa_get_membind(CAST_TO_FN_PTR(numa_get_membind_func_t,
3069
libnuma_v2_dlsym(handle, "numa_get_membind")));
3070
set_numa_get_interleave_mask(CAST_TO_FN_PTR(numa_get_interleave_mask_func_t,
3071
libnuma_v2_dlsym(handle, "numa_get_interleave_mask")));
3072
set_numa_move_pages(CAST_TO_FN_PTR(numa_move_pages_func_t,
3073
libnuma_dlsym(handle, "numa_move_pages")));
3074
set_numa_set_preferred(CAST_TO_FN_PTR(numa_set_preferred_func_t,
3075
libnuma_dlsym(handle, "numa_set_preferred")));
3076
3077
if (numa_available() != -1) {
3078
set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
3079
set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
3080
set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr"));
3081
set_numa_interleave_bitmask(_numa_get_interleave_mask());
3082
set_numa_membind_bitmask(_numa_get_membind());
3083
// Create an index -> node mapping, since nodes are not always consecutive
3084
_nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, mtInternal);
3085
rebuild_nindex_to_node_map();
3086
// Create a cpu -> node mapping
3087
_cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, mtInternal);
3088
rebuild_cpu_to_node_map();
3089
return true;
3090
}
3091
}
3092
}
3093
return false;
3094
}
3095
3096
size_t os::Linux::default_guard_size(os::ThreadType thr_type) {
3097
// Creating guard page is very expensive. Java thread has HotSpot
3098
// guard pages, only enable glibc guard page for non-Java threads.
3099
// (Remember: compiler thread is a Java thread, too!)
3100
return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : page_size());
3101
}
3102
3103
void os::Linux::rebuild_nindex_to_node_map() {
3104
int highest_node_number = Linux::numa_max_node();
3105
3106
nindex_to_node()->clear();
3107
for (int node = 0; node <= highest_node_number; node++) {
3108
if (Linux::is_node_in_existing_nodes(node)) {
3109
nindex_to_node()->append(node);
3110
}
3111
}
3112
}
3113
3114
// rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id.
3115
// The table is later used in get_node_by_cpu().
3116
void os::Linux::rebuild_cpu_to_node_map() {
3117
const size_t NCPUS = 32768; // Since the buffer size computation is very obscure
3118
// in libnuma (possible values are starting from 16,
3119
// and continuing up with every other power of 2, but less
3120
// than the maximum number of CPUs supported by kernel), and
3121
// is a subject to change (in libnuma version 2 the requirements
3122
// are more reasonable) we'll just hardcode the number they use
3123
// in the library.
3124
const size_t BitsPerCLong = sizeof(long) * CHAR_BIT;
3125
3126
size_t cpu_num = processor_count();
3127
size_t cpu_map_size = NCPUS / BitsPerCLong;
3128
size_t cpu_map_valid_size =
3129
MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size);
3130
3131
cpu_to_node()->clear();
3132
cpu_to_node()->at_grow(cpu_num - 1);
3133
3134
size_t node_num = get_existing_num_nodes();
3135
3136
int distance = 0;
3137
int closest_distance = INT_MAX;
3138
int closest_node = 0;
3139
unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size, mtInternal);
3140
for (size_t i = 0; i < node_num; i++) {
3141
// Check if node is configured (not a memory-less node). If it is not, find
3142
// the closest configured node. Check also if node is bound, i.e. it's allowed
3143
// to allocate memory from the node. If it's not allowed, map cpus in that node
3144
// to the closest node from which memory allocation is allowed.
3145
if (!is_node_in_configured_nodes(nindex_to_node()->at(i)) ||
3146
!is_node_in_bound_nodes(nindex_to_node()->at(i))) {
3147
closest_distance = INT_MAX;
3148
// Check distance from all remaining nodes in the system. Ignore distance
3149
// from itself, from another non-configured node, and from another non-bound
3150
// node.
3151
for (size_t m = 0; m < node_num; m++) {
3152
if (m != i &&
3153
is_node_in_configured_nodes(nindex_to_node()->at(m)) &&
3154
is_node_in_bound_nodes(nindex_to_node()->at(m))) {
3155
distance = numa_distance(nindex_to_node()->at(i), nindex_to_node()->at(m));
3156
// If a closest node is found, update. There is always at least one
3157
// configured and bound node in the system so there is always at least
3158
// one node close.
3159
if (distance != 0 && distance < closest_distance) {
3160
closest_distance = distance;
3161
closest_node = nindex_to_node()->at(m);
3162
}
3163
}
3164
}
3165
} else {
3166
// Current node is already a configured node.
3167
closest_node = nindex_to_node()->at(i);
3168
}
3169
3170
// Get cpus from the original node and map them to the closest node. If node
3171
// is a configured node (not a memory-less node), then original node and
3172
// closest node are the same.
3173
if (numa_node_to_cpus(nindex_to_node()->at(i), cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) {
3174
for (size_t j = 0; j < cpu_map_valid_size; j++) {
3175
if (cpu_map[j] != 0) {
3176
for (size_t k = 0; k < BitsPerCLong; k++) {
3177
if (cpu_map[j] & (1UL << k)) {
3178
int cpu_index = j * BitsPerCLong + k;
3179
3180
#ifndef PRODUCT
3181
if (UseDebuggerErgo1 && cpu_index >= (int)cpu_num) {
3182
// Some debuggers limit the processor count without
3183
// intercepting the NUMA APIs. Just fake the values.
3184
cpu_index = 0;
3185
}
3186
#endif
3187
3188
cpu_to_node()->at_put(cpu_index, closest_node);
3189
}
3190
}
3191
}
3192
}
3193
}
3194
}
3195
FREE_C_HEAP_ARRAY(unsigned long, cpu_map);
3196
}
3197
3198
int os::Linux::numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
3199
// use the latest version of numa_node_to_cpus if available
3200
if (_numa_node_to_cpus_v2 != NULL) {
3201
3202
// libnuma bitmask struct
3203
struct bitmask {
3204
unsigned long size; /* number of bits in the map */
3205
unsigned long *maskp;
3206
};
3207
3208
struct bitmask mask;
3209
mask.maskp = (unsigned long *)buffer;
3210
mask.size = bufferlen * 8;
3211
return _numa_node_to_cpus_v2(node, &mask);
3212
} else if (_numa_node_to_cpus != NULL) {
3213
return _numa_node_to_cpus(node, buffer, bufferlen);
3214
}
3215
return -1;
3216
}
3217
3218
int os::Linux::get_node_by_cpu(int cpu_id) {
3219
if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {
3220
return cpu_to_node()->at(cpu_id);
3221
}
3222
return -1;
3223
}
3224
3225
GrowableArray<int>* os::Linux::_cpu_to_node;
3226
GrowableArray<int>* os::Linux::_nindex_to_node;
3227
os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu;
3228
os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus;
3229
os::Linux::numa_node_to_cpus_v2_func_t os::Linux::_numa_node_to_cpus_v2;
3230
os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
3231
os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_nodes;
3232
os::Linux::numa_available_func_t os::Linux::_numa_available;
3233
os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
3234
os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
3235
os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2;
3236
os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
3237
os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
3238
os::Linux::numa_distance_func_t os::Linux::_numa_distance;
3239
os::Linux::numa_get_membind_func_t os::Linux::_numa_get_membind;
3240
os::Linux::numa_get_interleave_mask_func_t os::Linux::_numa_get_interleave_mask;
3241
os::Linux::numa_move_pages_func_t os::Linux::_numa_move_pages;
3242
os::Linux::numa_set_preferred_func_t os::Linux::_numa_set_preferred;
3243
os::Linux::NumaAllocationPolicy os::Linux::_current_numa_policy;
3244
unsigned long* os::Linux::_numa_all_nodes;
3245
struct bitmask* os::Linux::_numa_all_nodes_ptr;
3246
struct bitmask* os::Linux::_numa_nodes_ptr;
3247
struct bitmask* os::Linux::_numa_interleave_bitmask;
3248
struct bitmask* os::Linux::_numa_membind_bitmask;
3249
3250
bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
3251
uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
3252
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
3253
return res != (uintptr_t) MAP_FAILED;
3254
}
3255
3256
static address get_stack_commited_bottom(address bottom, size_t size) {
3257
address nbot = bottom;
3258
address ntop = bottom + size;
3259
3260
size_t page_sz = os::vm_page_size();
3261
unsigned pages = size / page_sz;
3262
3263
unsigned char vec[1];
3264
unsigned imin = 1, imax = pages + 1, imid;
3265
int mincore_return_value = 0;
3266
3267
assert(imin <= imax, "Unexpected page size");
3268
3269
while (imin < imax) {
3270
imid = (imax + imin) / 2;
3271
nbot = ntop - (imid * page_sz);
3272
3273
// Use a trick with mincore to check whether the page is mapped or not.
3274
// mincore sets vec to 1 if page resides in memory and to 0 if page
3275
// is swapped output but if page we are asking for is unmapped
3276
// it returns -1,ENOMEM
3277
mincore_return_value = mincore(nbot, page_sz, vec);
3278
3279
if (mincore_return_value == -1) {
3280
// Page is not mapped go up
3281
// to find first mapped page
3282
if (errno != EAGAIN) {
3283
assert(errno == ENOMEM, "Unexpected mincore errno");
3284
imax = imid;
3285
}
3286
} else {
3287
// Page is mapped go down
3288
// to find first not mapped page
3289
imin = imid + 1;
3290
}
3291
}
3292
3293
nbot = nbot + page_sz;
3294
3295
// Adjust stack bottom one page up if last checked page is not mapped
3296
if (mincore_return_value == -1) {
3297
nbot = nbot + page_sz;
3298
}
3299
3300
return nbot;
3301
}
3302
3303
bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
3304
int mincore_return_value;
3305
const size_t stripe = 1024; // query this many pages each time
3306
unsigned char vec[stripe + 1];
3307
// set a guard
3308
vec[stripe] = 'X';
3309
3310
const size_t page_sz = os::vm_page_size();
3311
size_t pages = size / page_sz;
3312
3313
assert(is_aligned(start, page_sz), "Start address must be page aligned");
3314
assert(is_aligned(size, page_sz), "Size must be page aligned");
3315
3316
committed_start = NULL;
3317
3318
int loops = (pages + stripe - 1) / stripe;
3319
int committed_pages = 0;
3320
address loop_base = start;
3321
bool found_range = false;
3322
3323
for (int index = 0; index < loops && !found_range; index ++) {
3324
assert(pages > 0, "Nothing to do");
3325
int pages_to_query = (pages >= stripe) ? stripe : pages;
3326
pages -= pages_to_query;
3327
3328
// Get stable read
3329
while ((mincore_return_value = mincore(loop_base, pages_to_query * page_sz, vec)) == -1 && errno == EAGAIN);
3330
3331
// During shutdown, some memory goes away without properly notifying NMT,
3332
// E.g. ConcurrentGCThread/WatcherThread can exit without deleting thread object.
3333
// Bailout and return as not committed for now.
3334
if (mincore_return_value == -1 && errno == ENOMEM) {
3335
return false;
3336
}
3337
3338
assert(vec[stripe] == 'X', "overflow guard");
3339
assert(mincore_return_value == 0, "Range must be valid");
3340
// Process this stripe
3341
for (int vecIdx = 0; vecIdx < pages_to_query; vecIdx ++) {
3342
if ((vec[vecIdx] & 0x01) == 0) { // not committed
3343
// End of current contiguous region
3344
if (committed_start != NULL) {
3345
found_range = true;
3346
break;
3347
}
3348
} else { // committed
3349
// Start of region
3350
if (committed_start == NULL) {
3351
committed_start = loop_base + page_sz * vecIdx;
3352
}
3353
committed_pages ++;
3354
}
3355
}
3356
3357
loop_base += pages_to_query * page_sz;
3358
}
3359
3360
if (committed_start != NULL) {
3361
assert(committed_pages > 0, "Must have committed region");
3362
assert(committed_pages <= int(size / page_sz), "Can not commit more than it has");
3363
assert(committed_start >= start && committed_start < start + size, "Out of range");
3364
committed_size = page_sz * committed_pages;
3365
return true;
3366
} else {
3367
assert(committed_pages == 0, "Should not have committed region");
3368
return false;
3369
}
3370
}
3371
3372
3373
// Linux uses a growable mapping for the stack, and if the mapping for
3374
// the stack guard pages is not removed when we detach a thread the
3375
// stack cannot grow beyond the pages where the stack guard was
3376
// mapped. If at some point later in the process the stack expands to
3377
// that point, the Linux kernel cannot expand the stack any further
3378
// because the guard pages are in the way, and a segfault occurs.
3379
//
3380
// However, it's essential not to split the stack region by unmapping
3381
// a region (leaving a hole) that's already part of the stack mapping,
3382
// so if the stack mapping has already grown beyond the guard pages at
3383
// the time we create them, we have to truncate the stack mapping.
3384
// So, we need to know the extent of the stack mapping when
3385
// create_stack_guard_pages() is called.
3386
3387
// We only need this for stacks that are growable: at the time of
3388
// writing thread stacks don't use growable mappings (i.e. those
3389
// creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this
3390
// only applies to the main thread.
3391
3392
// If the (growable) stack mapping already extends beyond the point
3393
// where we're going to put our guard pages, truncate the mapping at
3394
// that point by munmap()ping it. This ensures that when we later
3395
// munmap() the guard pages we don't leave a hole in the stack
3396
// mapping. This only affects the main/primordial thread
3397
3398
bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3399
if (os::is_primordial_thread()) {
3400
// As we manually grow stack up to bottom inside create_attached_thread(),
3401
// it's likely that os::Linux::initial_thread_stack_bottom is mapped and
3402
// we don't need to do anything special.
3403
// Check it first, before calling heavy function.
3404
uintptr_t stack_extent = (uintptr_t) os::Linux::initial_thread_stack_bottom();
3405
unsigned char vec[1];
3406
3407
if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) {
3408
// Fallback to slow path on all errors, including EAGAIN
3409
assert((uintptr_t)addr >= stack_extent,
3410
"Sanity: addr should be larger than extent, " PTR_FORMAT " >= " PTR_FORMAT,
3411
p2i(addr), stack_extent);
3412
stack_extent = (uintptr_t) get_stack_commited_bottom(
3413
os::Linux::initial_thread_stack_bottom(),
3414
(size_t)addr - stack_extent);
3415
}
3416
3417
if (stack_extent < (uintptr_t)addr) {
3418
::munmap((void*)stack_extent, (uintptr_t)(addr - stack_extent));
3419
}
3420
}
3421
3422
return os::commit_memory(addr, size, !ExecMem);
3423
}
3424
3425
// If this is a growable mapping, remove the guard pages entirely by
3426
// munmap()ping them. If not, just call uncommit_memory(). This only
3427
// affects the main/primordial thread, but guard against future OS changes.
3428
// It's safe to always unmap guard pages for primordial thread because we
3429
// always place it right after end of the mapped region.
3430
3431
bool os::remove_stack_guard_pages(char* addr, size_t size) {
3432
uintptr_t stack_extent, stack_base;
3433
3434
if (os::is_primordial_thread()) {
3435
return ::munmap(addr, size) == 0;
3436
}
3437
3438
return os::uncommit_memory(addr, size);
3439
}
3440
3441
// 'requested_addr' is only treated as a hint, the return value may or
3442
// may not start from the requested address. Unlike Linux mmap(), this
3443
// function returns NULL to indicate failure.
3444
static char* anon_mmap(char* requested_addr, size_t bytes) {
3445
// MAP_FIXED is intentionally left out, to leave existing mappings intact.
3446
const int flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
3447
3448
// Map reserved/uncommitted pages PROT_NONE so we fail early if we
3449
// touch an uncommitted page. Otherwise, the read/write might
3450
// succeed if we have enough swap space to back the physical page.
3451
char* addr = (char*)::mmap(requested_addr, bytes, PROT_NONE, flags, -1, 0);
3452
3453
return addr == MAP_FAILED ? NULL : addr;
3454
}
3455
3456
// Allocate (using mmap, NO_RESERVE, with small pages) at either a given request address
3457
// (req_addr != NULL) or with a given alignment.
3458
// - bytes shall be a multiple of alignment.
3459
// - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
3460
// - alignment sets the alignment at which memory shall be allocated.
3461
// It must be a multiple of allocation granularity.
3462
// Returns address of memory or NULL. If req_addr was not NULL, will only return
3463
// req_addr or NULL.
3464
static char* anon_mmap_aligned(char* req_addr, size_t bytes, size_t alignment) {
3465
size_t extra_size = bytes;
3466
if (req_addr == NULL && alignment > 0) {
3467
extra_size += alignment;
3468
}
3469
3470
char* start = anon_mmap(req_addr, extra_size);
3471
if (start != NULL) {
3472
if (req_addr != NULL) {
3473
if (start != req_addr) {
3474
::munmap(start, extra_size);
3475
start = NULL;
3476
}
3477
} else {
3478
char* const start_aligned = align_up(start, alignment);
3479
char* const end_aligned = start_aligned + bytes;
3480
char* const end = start + extra_size;
3481
if (start_aligned > start) {
3482
::munmap(start, start_aligned - start);
3483
}
3484
if (end_aligned < end) {
3485
::munmap(end_aligned, end - end_aligned);
3486
}
3487
start = start_aligned;
3488
}
3489
}
3490
return start;
3491
}
3492
3493
static int anon_munmap(char * addr, size_t size) {
3494
return ::munmap(addr, size) == 0;
3495
}
3496
3497
char* os::pd_reserve_memory(size_t bytes, bool exec) {
3498
return anon_mmap(NULL, bytes);
3499
}
3500
3501
bool os::pd_release_memory(char* addr, size_t size) {
3502
return anon_munmap(addr, size);
3503
}
3504
3505
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
3506
extern char* g_assert_poison; // assertion poison page address
3507
#endif
3508
3509
static bool linux_mprotect(char* addr, size_t size, int prot) {
3510
// Linux wants the mprotect address argument to be page aligned.
3511
char* bottom = (char*)align_down((intptr_t)addr, os::Linux::page_size());
3512
3513
// According to SUSv3, mprotect() should only be used with mappings
3514
// established by mmap(), and mmap() always maps whole pages. Unaligned
3515
// 'addr' likely indicates problem in the VM (e.g. trying to change
3516
// protection of malloc'ed or statically allocated memory). Check the
3517
// caller if you hit this assert.
3518
assert(addr == bottom, "sanity check");
3519
3520
size = align_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size());
3521
// Don't log anything if we're executing in the poison page signal handling
3522
// context. It can lead to reentrant use of other parts of the VM code.
3523
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
3524
if (addr != g_assert_poison)
3525
#endif
3526
Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(bottom), p2i(bottom+size), prot);
3527
return ::mprotect(bottom, size, prot) == 0;
3528
}
3529
3530
// Set protections specified
3531
bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3532
bool is_committed) {
3533
unsigned int p = 0;
3534
switch (prot) {
3535
case MEM_PROT_NONE: p = PROT_NONE; break;
3536
case MEM_PROT_READ: p = PROT_READ; break;
3537
case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break;
3538
case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3539
default:
3540
ShouldNotReachHere();
3541
}
3542
// is_committed is unused.
3543
return linux_mprotect(addr, bytes, p);
3544
}
3545
3546
bool os::guard_memory(char* addr, size_t size) {
3547
return linux_mprotect(addr, size, PROT_NONE);
3548
}
3549
3550
bool os::unguard_memory(char* addr, size_t size) {
3551
return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
3552
}
3553
3554
bool os::Linux::transparent_huge_pages_sanity_check(bool warn,
3555
size_t page_size) {
3556
bool result = false;
3557
void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
3558
MAP_ANONYMOUS|MAP_PRIVATE,
3559
-1, 0);
3560
if (p != MAP_FAILED) {
3561
void *aligned_p = align_up(p, page_size);
3562
3563
result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0;
3564
3565
munmap(p, page_size * 2);
3566
}
3567
3568
if (warn && !result) {
3569
warning("TransparentHugePages is not supported by the operating system.");
3570
}
3571
3572
return result;
3573
}
3574
3575
int os::Linux::hugetlbfs_page_size_flag(size_t page_size) {
3576
if (page_size != default_large_page_size()) {
3577
return (exact_log2(page_size) << MAP_HUGE_SHIFT);
3578
}
3579
return 0;
3580
}
3581
3582
bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
3583
// Include the page size flag to ensure we sanity check the correct page size.
3584
int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size);
3585
void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE, flags, -1, 0);
3586
3587
if (p != MAP_FAILED) {
3588
// Mapping succeeded, sanity check passed.
3589
munmap(p, page_size);
3590
return true;
3591
} else {
3592
log_info(pagesize)("Large page size (" SIZE_FORMAT "%s) failed sanity check, "
3593
"checking if smaller large page sizes are usable",
3594
byte_size_in_exact_unit(page_size),
3595
exact_unit_for_byte_size(page_size));
3596
for (size_t page_size_ = _page_sizes.next_smaller(page_size);
3597
page_size_ != (size_t)os::vm_page_size();
3598
page_size_ = _page_sizes.next_smaller(page_size_)) {
3599
flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size_);
3600
p = mmap(NULL, page_size_, PROT_READ|PROT_WRITE, flags, -1, 0);
3601
if (p != MAP_FAILED) {
3602
// Mapping succeeded, sanity check passed.
3603
munmap(p, page_size_);
3604
log_info(pagesize)("Large page size (" SIZE_FORMAT "%s) passed sanity check",
3605
byte_size_in_exact_unit(page_size_),
3606
exact_unit_for_byte_size(page_size_));
3607
return true;
3608
}
3609
}
3610
}
3611
3612
if (warn) {
3613
warning("HugeTLBFS is not configured or not supported by the operating system.");
3614
}
3615
3616
return false;
3617
}
3618
3619
bool os::Linux::shm_hugetlbfs_sanity_check(bool warn, size_t page_size) {
3620
// Try to create a large shared memory segment.
3621
int shmid = shmget(IPC_PRIVATE, page_size, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
3622
if (shmid == -1) {
3623
// Possible reasons for shmget failure:
3624
// 1. shmmax is too small for the request.
3625
// > check shmmax value: cat /proc/sys/kernel/shmmax
3626
// > increase shmmax value: echo "new_value" > /proc/sys/kernel/shmmax
3627
// 2. not enough large page memory.
3628
// > check available large pages: cat /proc/meminfo
3629
// > increase amount of large pages:
3630
// sysctl -w vm.nr_hugepages=new_value
3631
// > For more information regarding large pages please refer to:
3632
// https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
3633
if (warn) {
3634
warning("Large pages using UseSHM are not configured on this system.");
3635
}
3636
return false;
3637
}
3638
// Managed to create a segment, now delete it.
3639
shmctl(shmid, IPC_RMID, NULL);
3640
return true;
3641
}
3642
3643
// From the coredump_filter documentation:
3644
//
3645
// - (bit 0) anonymous private memory
3646
// - (bit 1) anonymous shared memory
3647
// - (bit 2) file-backed private memory
3648
// - (bit 3) file-backed shared memory
3649
// - (bit 4) ELF header pages in file-backed private memory areas (it is
3650
// effective only if the bit 2 is cleared)
3651
// - (bit 5) hugetlb private memory
3652
// - (bit 6) hugetlb shared memory
3653
// - (bit 7) dax private memory
3654
// - (bit 8) dax shared memory
3655
//
3656
static void set_coredump_filter(CoredumpFilterBit bit) {
3657
FILE *f;
3658
long cdm;
3659
3660
if ((f = fopen("/proc/self/coredump_filter", "r+")) == NULL) {
3661
return;
3662
}
3663
3664
if (fscanf(f, "%lx", &cdm) != 1) {
3665
fclose(f);
3666
return;
3667
}
3668
3669
long saved_cdm = cdm;
3670
rewind(f);
3671
cdm |= bit;
3672
3673
if (cdm != saved_cdm) {
3674
fprintf(f, "%#lx", cdm);
3675
}
3676
3677
fclose(f);
3678
}
3679
3680
// Large page support
3681
3682
static size_t _large_page_size = 0;
3683
3684
static size_t scan_default_large_page_size() {
3685
size_t default_large_page_size = 0;
3686
3687
// large_page_size on Linux is used to round up heap size. x86 uses either
3688
// 2M or 4M page, depending on whether PAE (Physical Address Extensions)
3689
// mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
3690
// page as large as 1G.
3691
//
3692
// Here we try to figure out page size by parsing /proc/meminfo and looking
3693
// for a line with the following format:
3694
// Hugepagesize: 2048 kB
3695
//
3696
// If we can't determine the value (e.g. /proc is not mounted, or the text
3697
// format has been changed), we'll set largest page size to 0
3698
3699
FILE *fp = fopen("/proc/meminfo", "r");
3700
if (fp) {
3701
while (!feof(fp)) {
3702
int x = 0;
3703
char buf[16];
3704
if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
3705
if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
3706
default_large_page_size = x * K;
3707
break;
3708
}
3709
} else {
3710
// skip to next line
3711
for (;;) {
3712
int ch = fgetc(fp);
3713
if (ch == EOF || ch == (int)'\n') break;
3714
}
3715
}
3716
}
3717
fclose(fp);
3718
}
3719
3720
return default_large_page_size;
3721
}
3722
3723
static os::PageSizes scan_multiple_page_support() {
3724
// Scan /sys/kernel/mm/hugepages
3725
// to discover the available page sizes
3726
const char* sys_hugepages = "/sys/kernel/mm/hugepages";
3727
os::PageSizes page_sizes;
3728
3729
DIR *dir = opendir(sys_hugepages);
3730
3731
struct dirent *entry;
3732
size_t page_size;
3733
while ((entry = readdir(dir)) != NULL) {
3734
if (entry->d_type == DT_DIR &&
3735
sscanf(entry->d_name, "hugepages-%zukB", &page_size) == 1) {
3736
// The kernel is using kB, hotspot uses bytes
3737
// Add each found Large Page Size to page_sizes
3738
page_sizes.add(page_size * K);
3739
}
3740
}
3741
closedir(dir);
3742
3743
LogTarget(Debug, pagesize) lt;
3744
if (lt.is_enabled()) {
3745
LogStream ls(lt);
3746
ls.print("Large Page sizes: ");
3747
page_sizes.print_on(&ls);
3748
}
3749
3750
return page_sizes;
3751
}
3752
3753
size_t os::Linux::default_large_page_size() {
3754
return _default_large_page_size;
3755
}
3756
3757
void warn_no_large_pages_configured() {
3758
if (!FLAG_IS_DEFAULT(UseLargePages)) {
3759
log_warning(pagesize)("UseLargePages disabled, no large pages configured and available on the system.");
3760
}
3761
}
3762
3763
bool os::Linux::setup_large_page_type(size_t page_size) {
3764
if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
3765
FLAG_IS_DEFAULT(UseSHM) &&
3766
FLAG_IS_DEFAULT(UseTransparentHugePages)) {
3767
3768
// The type of large pages has not been specified by the user.
3769
3770
// Try UseHugeTLBFS and then UseSHM.
3771
UseHugeTLBFS = UseSHM = true;
3772
3773
// Don't try UseTransparentHugePages since there are known
3774
// performance issues with it turned on. This might change in the future.
3775
UseTransparentHugePages = false;
3776
}
3777
3778
if (UseTransparentHugePages) {
3779
bool warn_on_failure = !FLAG_IS_DEFAULT(UseTransparentHugePages);
3780
if (transparent_huge_pages_sanity_check(warn_on_failure, page_size)) {
3781
UseHugeTLBFS = false;
3782
UseSHM = false;
3783
return true;
3784
}
3785
UseTransparentHugePages = false;
3786
}
3787
3788
if (UseHugeTLBFS) {
3789
bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
3790
if (hugetlbfs_sanity_check(warn_on_failure, page_size)) {
3791
UseSHM = false;
3792
return true;
3793
}
3794
UseHugeTLBFS = false;
3795
}
3796
3797
if (UseSHM) {
3798
bool warn_on_failure = !FLAG_IS_DEFAULT(UseSHM);
3799
if (shm_hugetlbfs_sanity_check(warn_on_failure, page_size)) {
3800
return true;
3801
}
3802
UseSHM = false;
3803
}
3804
3805
warn_no_large_pages_configured();
3806
return false;
3807
}
3808
3809
void os::large_page_init() {
3810
// 1) Handle the case where we do not want to use huge pages and hence
3811
// there is no need to scan the OS for related info
3812
if (!UseLargePages &&
3813
!UseTransparentHugePages &&
3814
!UseHugeTLBFS &&
3815
!UseSHM) {
3816
// Not using large pages.
3817
return;
3818
}
3819
3820
if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) {
3821
// The user explicitly turned off large pages.
3822
// Ignore the rest of the large pages flags.
3823
UseTransparentHugePages = false;
3824
UseHugeTLBFS = false;
3825
UseSHM = false;
3826
return;
3827
}
3828
3829
// 2) Scan OS info
3830
size_t default_large_page_size = scan_default_large_page_size();
3831
os::Linux::_default_large_page_size = default_large_page_size;
3832
if (default_large_page_size == 0) {
3833
// No large pages configured, return.
3834
warn_no_large_pages_configured();
3835
UseLargePages = false;
3836
UseTransparentHugePages = false;
3837
UseHugeTLBFS = false;
3838
UseSHM = false;
3839
return;
3840
}
3841
os::PageSizes all_large_pages = scan_multiple_page_support();
3842
3843
// 3) Consistency check and post-processing
3844
3845
// It is unclear if /sys/kernel/mm/hugepages/ and /proc/meminfo could disagree. Manually
3846
// re-add the default page size to the list of page sizes to be sure.
3847
all_large_pages.add(default_large_page_size);
3848
3849
// Check LargePageSizeInBytes matches an available page size and if so set _large_page_size
3850
// using LargePageSizeInBytes as the maximum allowed large page size. If LargePageSizeInBytes
3851
// doesn't match an available page size set _large_page_size to default_large_page_size
3852
// and use it as the maximum.
3853
if (FLAG_IS_DEFAULT(LargePageSizeInBytes) ||
3854
LargePageSizeInBytes == 0 ||
3855
LargePageSizeInBytes == default_large_page_size) {
3856
_large_page_size = default_large_page_size;
3857
log_info(pagesize)("Using the default large page size: " SIZE_FORMAT "%s",
3858
byte_size_in_exact_unit(_large_page_size),
3859
exact_unit_for_byte_size(_large_page_size));
3860
} else {
3861
if (all_large_pages.contains(LargePageSizeInBytes)) {
3862
_large_page_size = LargePageSizeInBytes;
3863
log_info(pagesize)("Overriding default large page size (" SIZE_FORMAT "%s) "
3864
"using LargePageSizeInBytes: " SIZE_FORMAT "%s",
3865
byte_size_in_exact_unit(default_large_page_size),
3866
exact_unit_for_byte_size(default_large_page_size),
3867
byte_size_in_exact_unit(_large_page_size),
3868
exact_unit_for_byte_size(_large_page_size));
3869
} else {
3870
_large_page_size = default_large_page_size;
3871
log_info(pagesize)("LargePageSizeInBytes is not a valid large page size (" SIZE_FORMAT "%s) "
3872
"using the default large page size: " SIZE_FORMAT "%s",
3873
byte_size_in_exact_unit(LargePageSizeInBytes),
3874
exact_unit_for_byte_size(LargePageSizeInBytes),
3875
byte_size_in_exact_unit(_large_page_size),
3876
exact_unit_for_byte_size(_large_page_size));
3877
}
3878
}
3879
3880
// Populate _page_sizes with large page sizes less than or equal to
3881
// _large_page_size.
3882
for (size_t page_size = _large_page_size; page_size != 0;
3883
page_size = all_large_pages.next_smaller(page_size)) {
3884
_page_sizes.add(page_size);
3885
}
3886
3887
LogTarget(Info, pagesize) lt;
3888
if (lt.is_enabled()) {
3889
LogStream ls(lt);
3890
ls.print("Usable page sizes: ");
3891
_page_sizes.print_on(&ls);
3892
}
3893
3894
// Now determine the type of large pages to use:
3895
UseLargePages = os::Linux::setup_large_page_type(_large_page_size);
3896
3897
set_coredump_filter(LARGEPAGES_BIT);
3898
}
3899
3900
#ifndef SHM_HUGETLB
3901
#define SHM_HUGETLB 04000
3902
#endif
3903
3904
#define shm_warning_format(format, ...) \
3905
do { \
3906
if (UseLargePages && \
3907
(!FLAG_IS_DEFAULT(UseLargePages) || \
3908
!FLAG_IS_DEFAULT(UseSHM) || \
3909
!FLAG_IS_DEFAULT(LargePageSizeInBytes))) { \
3910
warning(format, __VA_ARGS__); \
3911
} \
3912
} while (0)
3913
3914
#define shm_warning(str) shm_warning_format("%s", str)
3915
3916
#define shm_warning_with_errno(str) \
3917
do { \
3918
int err = errno; \
3919
shm_warning_format(str " (error = %d)", err); \
3920
} while (0)
3921
3922
static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment) {
3923
assert(is_aligned(bytes, alignment), "Must be divisible by the alignment");
3924
3925
if (!is_aligned(alignment, SHMLBA)) {
3926
assert(false, "Code below assumes that alignment is at least SHMLBA aligned");
3927
return NULL;
3928
}
3929
3930
// To ensure that we get 'alignment' aligned memory from shmat,
3931
// we pre-reserve aligned virtual memory and then attach to that.
3932
3933
char* pre_reserved_addr = anon_mmap_aligned(NULL /* req_addr */, bytes, alignment);
3934
if (pre_reserved_addr == NULL) {
3935
// Couldn't pre-reserve aligned memory.
3936
shm_warning("Failed to pre-reserve aligned memory for shmat.");
3937
return NULL;
3938
}
3939
3940
// SHM_REMAP is needed to allow shmat to map over an existing mapping.
3941
char* addr = (char*)shmat(shmid, pre_reserved_addr, SHM_REMAP);
3942
3943
if ((intptr_t)addr == -1) {
3944
int err = errno;
3945
shm_warning_with_errno("Failed to attach shared memory.");
3946
3947
assert(err != EACCES, "Unexpected error");
3948
assert(err != EIDRM, "Unexpected error");
3949
assert(err != EINVAL, "Unexpected error");
3950
3951
// Since we don't know if the kernel unmapped the pre-reserved memory area
3952
// we can't unmap it, since that would potentially unmap memory that was
3953
// mapped from other threads.
3954
return NULL;
3955
}
3956
3957
return addr;
3958
}
3959
3960
static char* shmat_at_address(int shmid, char* req_addr) {
3961
if (!is_aligned(req_addr, SHMLBA)) {
3962
assert(false, "Requested address needs to be SHMLBA aligned");
3963
return NULL;
3964
}
3965
3966
char* addr = (char*)shmat(shmid, req_addr, 0);
3967
3968
if ((intptr_t)addr == -1) {
3969
shm_warning_with_errno("Failed to attach shared memory.");
3970
return NULL;
3971
}
3972
3973
return addr;
3974
}
3975
3976
static char* shmat_large_pages(int shmid, size_t bytes, size_t alignment, char* req_addr) {
3977
// If a req_addr has been provided, we assume that the caller has already aligned the address.
3978
if (req_addr != NULL) {
3979
assert(is_aligned(req_addr, os::large_page_size()), "Must be divisible by the large page size");
3980
assert(is_aligned(req_addr, alignment), "Must be divisible by given alignment");
3981
return shmat_at_address(shmid, req_addr);
3982
}
3983
3984
// Since shmid has been setup with SHM_HUGETLB, shmat will automatically
3985
// return large page size aligned memory addresses when req_addr == NULL.
3986
// However, if the alignment is larger than the large page size, we have
3987
// to manually ensure that the memory returned is 'alignment' aligned.
3988
if (alignment > os::large_page_size()) {
3989
assert(is_aligned(alignment, os::large_page_size()), "Must be divisible by the large page size");
3990
return shmat_with_alignment(shmid, bytes, alignment);
3991
} else {
3992
return shmat_at_address(shmid, NULL);
3993
}
3994
}
3995
3996
char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment,
3997
char* req_addr, bool exec) {
3998
// "exec" is passed in but not used. Creating the shared image for
3999
// the code cache doesn't have an SHM_X executable permission to check.
4000
assert(UseLargePages && UseSHM, "only for SHM large pages");
4001
assert(is_aligned(req_addr, os::large_page_size()), "Unaligned address");
4002
assert(is_aligned(req_addr, alignment), "Unaligned address");
4003
4004
if (!is_aligned(bytes, os::large_page_size())) {
4005
return NULL; // Fallback to small pages.
4006
}
4007
4008
// Create a large shared memory region to attach to based on size.
4009
// Currently, size is the total size of the heap.
4010
int shmid = shmget(IPC_PRIVATE, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
4011
if (shmid == -1) {
4012
// Possible reasons for shmget failure:
4013
// 1. shmmax is too small for the request.
4014
// > check shmmax value: cat /proc/sys/kernel/shmmax
4015
// > increase shmmax value: echo "new_value" > /proc/sys/kernel/shmmax
4016
// 2. not enough large page memory.
4017
// > check available large pages: cat /proc/meminfo
4018
// > increase amount of large pages:
4019
// sysctl -w vm.nr_hugepages=new_value
4020
// > For more information regarding large pages please refer to:
4021
// https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
4022
// Note 1: different Linux may use different name for this property,
4023
// e.g. on Redhat AS-3 it is "hugetlb_pool".
4024
// Note 2: it's possible there's enough physical memory available but
4025
// they are so fragmented after a long run that they can't
4026
// coalesce into large pages. Try to reserve large pages when
4027
// the system is still "fresh".
4028
shm_warning_with_errno("Failed to reserve shared memory.");
4029
return NULL;
4030
}
4031
4032
// Attach to the region.
4033
char* addr = shmat_large_pages(shmid, bytes, alignment, req_addr);
4034
4035
// Remove shmid. If shmat() is successful, the actual shared memory segment
4036
// will be deleted when it's detached by shmdt() or when the process
4037
// terminates. If shmat() is not successful this will remove the shared
4038
// segment immediately.
4039
shmctl(shmid, IPC_RMID, NULL);
4040
4041
return addr;
4042
}
4043
4044
static void warn_on_commit_special_failure(char* req_addr, size_t bytes,
4045
size_t page_size, int error) {
4046
assert(error == ENOMEM, "Only expect to fail if no memory is available");
4047
4048
bool warn_on_failure = UseLargePages &&
4049
(!FLAG_IS_DEFAULT(UseLargePages) ||
4050
!FLAG_IS_DEFAULT(UseHugeTLBFS) ||
4051
!FLAG_IS_DEFAULT(LargePageSizeInBytes));
4052
4053
if (warn_on_failure) {
4054
char msg[128];
4055
jio_snprintf(msg, sizeof(msg), "Failed to reserve and commit memory. req_addr: "
4056
PTR_FORMAT " bytes: " SIZE_FORMAT " page size: "
4057
SIZE_FORMAT " (errno = %d).",
4058
req_addr, bytes, page_size, error);
4059
warning("%s", msg);
4060
}
4061
}
4062
4063
bool os::Linux::commit_memory_special(size_t bytes,
4064
size_t page_size,
4065
char* req_addr,
4066
bool exec) {
4067
assert(UseLargePages && UseHugeTLBFS, "Should only get here when HugeTLBFS large pages are used");
4068
assert(is_aligned(bytes, page_size), "Unaligned size");
4069
assert(is_aligned(req_addr, page_size), "Unaligned address");
4070
assert(req_addr != NULL, "Must have a requested address for special mappings");
4071
4072
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
4073
int flags = MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED;
4074
4075
// For large pages additional flags are required.
4076
if (page_size > (size_t) os::vm_page_size()) {
4077
flags |= MAP_HUGETLB | hugetlbfs_page_size_flag(page_size);
4078
}
4079
char* addr = (char*)::mmap(req_addr, bytes, prot, flags, -1, 0);
4080
4081
if (addr == MAP_FAILED) {
4082
warn_on_commit_special_failure(req_addr, bytes, page_size, errno);
4083
return false;
4084
}
4085
4086
log_debug(pagesize)("Commit special mapping: " PTR_FORMAT ", size=" SIZE_FORMAT "%s, page size="
4087
SIZE_FORMAT "%s",
4088
p2i(addr), byte_size_in_exact_unit(bytes),
4089
exact_unit_for_byte_size(bytes),
4090
byte_size_in_exact_unit(page_size),
4091
exact_unit_for_byte_size(page_size));
4092
assert(is_aligned(addr, page_size), "Must be");
4093
return true;
4094
}
4095
4096
char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes,
4097
size_t alignment,
4098
size_t page_size,
4099
char* req_addr,
4100
bool exec) {
4101
assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
4102
assert(is_aligned(req_addr, alignment), "Must be");
4103
assert(is_aligned(req_addr, page_size), "Must be");
4104
assert(is_aligned(alignment, os::vm_allocation_granularity()), "Must be");
4105
assert(_page_sizes.contains(page_size), "Must be a valid page size");
4106
assert(page_size > (size_t)os::vm_page_size(), "Must be a large page size");
4107
assert(bytes >= page_size, "Shouldn't allocate large pages for small sizes");
4108
4109
// We only end up here when at least 1 large page can be used.
4110
// If the size is not a multiple of the large page size, we
4111
// will mix the type of pages used, but in a decending order.
4112
// Start off by reserving a range of the given size that is
4113
// properly aligned. At this point no pages are committed. If
4114
// a requested address is given it will be used and it must be
4115
// aligned to both the large page size and the given alignment.
4116
// The larger of the two will be used.
4117
size_t required_alignment = MAX(page_size, alignment);
4118
char* const aligned_start = anon_mmap_aligned(req_addr, bytes, required_alignment);
4119
if (aligned_start == NULL) {
4120
return NULL;
4121
}
4122
4123
// First commit using large pages.
4124
size_t large_bytes = align_down(bytes, page_size);
4125
bool large_committed = commit_memory_special(large_bytes, page_size, aligned_start, exec);
4126
4127
if (large_committed && bytes == large_bytes) {
4128
// The size was large page aligned so no additional work is
4129
// needed even if the commit failed.
4130
return aligned_start;
4131
}
4132
4133
// The requested size requires some small pages as well.
4134
char* small_start = aligned_start + large_bytes;
4135
size_t small_size = bytes - large_bytes;
4136
if (!large_committed) {
4137
// Failed to commit large pages, so we need to unmap the
4138
// reminder of the orinal reservation.
4139
::munmap(small_start, small_size);
4140
return NULL;
4141
}
4142
4143
// Commit the remaining bytes using small pages.
4144
bool small_committed = commit_memory_special(small_size, os::vm_page_size(), small_start, exec);
4145
if (!small_committed) {
4146
// Failed to commit the remaining size, need to unmap
4147
// the large pages part of the reservation.
4148
::munmap(aligned_start, large_bytes);
4149
return NULL;
4150
}
4151
return aligned_start;
4152
}
4153
4154
char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size,
4155
char* req_addr, bool exec) {
4156
assert(UseLargePages, "only for large pages");
4157
4158
char* addr;
4159
if (UseSHM) {
4160
// No support for using specific page sizes with SHM.
4161
addr = os::Linux::reserve_memory_special_shm(bytes, alignment, req_addr, exec);
4162
} else {
4163
assert(UseHugeTLBFS, "must be");
4164
addr = os::Linux::reserve_memory_special_huge_tlbfs(bytes, alignment, page_size, req_addr, exec);
4165
}
4166
4167
if (addr != NULL) {
4168
if (UseNUMAInterleaving) {
4169
numa_make_global(addr, bytes);
4170
}
4171
}
4172
4173
return addr;
4174
}
4175
4176
bool os::Linux::release_memory_special_shm(char* base, size_t bytes) {
4177
// detaching the SHM segment will also delete it, see reserve_memory_special_shm()
4178
return shmdt(base) == 0;
4179
}
4180
4181
bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
4182
return pd_release_memory(base, bytes);
4183
}
4184
4185
bool os::pd_release_memory_special(char* base, size_t bytes) {
4186
assert(UseLargePages, "only for large pages");
4187
bool res;
4188
4189
if (UseSHM) {
4190
res = os::Linux::release_memory_special_shm(base, bytes);
4191
} else {
4192
assert(UseHugeTLBFS, "must be");
4193
res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
4194
}
4195
return res;
4196
}
4197
4198
size_t os::large_page_size() {
4199
return _large_page_size;
4200
}
4201
4202
// With SysV SHM the entire memory region must be allocated as shared
4203
// memory.
4204
// HugeTLBFS allows application to commit large page memory on demand.
4205
// However, when committing memory with HugeTLBFS fails, the region
4206
// that was supposed to be committed will lose the old reservation
4207
// and allow other threads to steal that memory region. Because of this
4208
// behavior we can't commit HugeTLBFS memory.
4209
bool os::can_commit_large_page_memory() {
4210
return UseTransparentHugePages;
4211
}
4212
4213
bool os::can_execute_large_page_memory() {
4214
return UseTransparentHugePages || UseHugeTLBFS;
4215
}
4216
4217
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
4218
assert(file_desc >= 0, "file_desc is not valid");
4219
char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem);
4220
if (result != NULL) {
4221
if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
4222
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
4223
}
4224
}
4225
return result;
4226
}
4227
4228
// Reserve memory at an arbitrary address, only if that area is
4229
// available (and not reserved for something else).
4230
4231
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) {
4232
// Assert only that the size is a multiple of the page size, since
4233
// that's all that mmap requires, and since that's all we really know
4234
// about at this low abstraction level. If we need higher alignment,
4235
// we can either pass an alignment to this method or verify alignment
4236
// in one of the methods further up the call chain. See bug 5044738.
4237
assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
4238
4239
// Repeatedly allocate blocks until the block is allocated at the
4240
// right spot.
4241
4242
// Linux mmap allows caller to pass an address as hint; give it a try first,
4243
// if kernel honors the hint then we can return immediately.
4244
char * addr = anon_mmap(requested_addr, bytes);
4245
if (addr == requested_addr) {
4246
return requested_addr;
4247
}
4248
4249
if (addr != NULL) {
4250
// mmap() is successful but it fails to reserve at the requested address
4251
anon_munmap(addr, bytes);
4252
}
4253
4254
return NULL;
4255
}
4256
4257
// Used to convert frequent JVM_Yield() to nops
4258
bool os::dont_yield() {
4259
return DontYieldALot;
4260
}
4261
4262
// Linux CFS scheduler (since 2.6.23) does not guarantee sched_yield(2) will
4263
// actually give up the CPU. Since skip buddy (v2.6.28):
4264
//
4265
// * Sets the yielding task as skip buddy for current CPU's run queue.
4266
// * Picks next from run queue, if empty, picks a skip buddy (can be the yielding task).
4267
// * Clears skip buddies for this run queue (yielding task no longer a skip buddy).
4268
//
4269
// An alternative is calling os::naked_short_nanosleep with a small number to avoid
4270
// getting re-scheduled immediately.
4271
//
4272
void os::naked_yield() {
4273
sched_yield();
4274
}
4275
4276
////////////////////////////////////////////////////////////////////////////////
4277
// thread priority support
4278
4279
// Note: Normal Linux applications are run with SCHED_OTHER policy. SCHED_OTHER
4280
// only supports dynamic priority, static priority must be zero. For real-time
4281
// applications, Linux supports SCHED_RR which allows static priority (1-99).
4282
// However, for large multi-threaded applications, SCHED_RR is not only slower
4283
// than SCHED_OTHER, but also very unstable (my volano tests hang hard 4 out
4284
// of 5 runs - Sep 2005).
4285
//
4286
// The following code actually changes the niceness of kernel-thread/LWP. It
4287
// has an assumption that setpriority() only modifies one kernel-thread/LWP,
4288
// not the entire user process, and user level threads are 1:1 mapped to kernel
4289
// threads. It has always been the case, but could change in the future. For
4290
// this reason, the code should not be used as default (ThreadPriorityPolicy=0).
4291
// It is only used when ThreadPriorityPolicy=1 and may require system level permission
4292
// (e.g., root privilege or CAP_SYS_NICE capability).
4293
4294
int os::java_to_os_priority[CriticalPriority + 1] = {
4295
19, // 0 Entry should never be used
4296
4297
4, // 1 MinPriority
4298
3, // 2
4299
2, // 3
4300
4301
1, // 4
4302
0, // 5 NormPriority
4303
-1, // 6
4304
4305
-2, // 7
4306
-3, // 8
4307
-4, // 9 NearMaxPriority
4308
4309
-5, // 10 MaxPriority
4310
4311
-5 // 11 CriticalPriority
4312
};
4313
4314
static int prio_init() {
4315
if (ThreadPriorityPolicy == 1) {
4316
if (geteuid() != 0) {
4317
if (!FLAG_IS_DEFAULT(ThreadPriorityPolicy) && !FLAG_IS_JIMAGE_RESOURCE(ThreadPriorityPolicy)) {
4318
warning("-XX:ThreadPriorityPolicy=1 may require system level permission, " \
4319
"e.g., being the root user. If the necessary permission is not " \
4320
"possessed, changes to priority will be silently ignored.");
4321
}
4322
}
4323
}
4324
if (UseCriticalJavaThreadPriority) {
4325
os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
4326
}
4327
return 0;
4328
}
4329
4330
OSReturn os::set_native_priority(Thread* thread, int newpri) {
4331
if (!UseThreadPriorities || ThreadPriorityPolicy == 0) return OS_OK;
4332
4333
int ret = setpriority(PRIO_PROCESS, thread->osthread()->thread_id(), newpri);
4334
return (ret == 0) ? OS_OK : OS_ERR;
4335
}
4336
4337
OSReturn os::get_native_priority(const Thread* const thread,
4338
int *priority_ptr) {
4339
if (!UseThreadPriorities || ThreadPriorityPolicy == 0) {
4340
*priority_ptr = java_to_os_priority[NormPriority];
4341
return OS_OK;
4342
}
4343
4344
errno = 0;
4345
*priority_ptr = getpriority(PRIO_PROCESS, thread->osthread()->thread_id());
4346
return (*priority_ptr != -1 || errno == 0 ? OS_OK : OS_ERR);
4347
}
4348
4349
// This is the fastest way to get thread cpu time on Linux.
4350
// Returns cpu time (user+sys) for any thread, not only for current.
4351
// POSIX compliant clocks are implemented in the kernels 2.6.16+.
4352
// It might work on 2.6.10+ with a special kernel/glibc patch.
4353
// For reference, please, see IEEE Std 1003.1-2004:
4354
// http://www.unix.org/single_unix_specification
4355
4356
jlong os::Linux::fast_thread_cpu_time(clockid_t clockid) {
4357
struct timespec tp;
4358
int status = clock_gettime(clockid, &tp);
4359
assert(status == 0, "clock_gettime error: %s", os::strerror(errno));
4360
return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec;
4361
}
4362
4363
// Determine if the vmid is the parent pid for a child in a PID namespace.
4364
// Return the namespace pid if so, otherwise -1.
4365
int os::Linux::get_namespace_pid(int vmid) {
4366
char fname[24];
4367
int retpid = -1;
4368
4369
snprintf(fname, sizeof(fname), "/proc/%d/status", vmid);
4370
FILE *fp = fopen(fname, "r");
4371
4372
if (fp) {
4373
int pid, nspid;
4374
int ret;
4375
while (!feof(fp) && !ferror(fp)) {
4376
ret = fscanf(fp, "NSpid: %d %d", &pid, &nspid);
4377
if (ret == 1) {
4378
break;
4379
}
4380
if (ret == 2) {
4381
retpid = nspid;
4382
break;
4383
}
4384
for (;;) {
4385
int ch = fgetc(fp);
4386
if (ch == EOF || ch == (int)'\n') break;
4387
}
4388
}
4389
fclose(fp);
4390
}
4391
return retpid;
4392
}
4393
4394
extern void report_error(char* file_name, int line_no, char* title,
4395
char* format, ...);
4396
4397
// Some linux distributions (notably: Alpine Linux) include the
4398
// grsecurity in the kernel. Of particular interest from a JVM perspective
4399
// is PaX (https://pax.grsecurity.net/), which adds some security features
4400
// related to page attributes. Specifically, the MPROTECT PaX functionality
4401
// (https://pax.grsecurity.net/docs/mprotect.txt) prevents dynamic
4402
// code generation by disallowing a (previously) writable page to be
4403
// marked as executable. This is, of course, exactly what HotSpot does
4404
// for both JIT compiled method, as well as for stubs, adapters, etc.
4405
//
4406
// Instead of crashing "lazily" when trying to make a page executable,
4407
// this code probes for the presence of PaX and reports the failure
4408
// eagerly.
4409
static void check_pax(void) {
4410
// Zero doesn't generate code dynamically, so no need to perform the PaX check
4411
#ifndef ZERO
4412
size_t size = os::Linux::page_size();
4413
4414
void* p = ::mmap(NULL, size, PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
4415
if (p == MAP_FAILED) {
4416
log_debug(os)("os_linux.cpp: check_pax: mmap failed (%s)" , os::strerror(errno));
4417
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "failed to allocate memory for PaX check.");
4418
}
4419
4420
int res = ::mprotect(p, size, PROT_WRITE|PROT_EXEC);
4421
if (res == -1) {
4422
log_debug(os)("os_linux.cpp: check_pax: mprotect failed (%s)" , os::strerror(errno));
4423
vm_exit_during_initialization(
4424
"Failed to mark memory page as executable - check if grsecurity/PaX is enabled");
4425
}
4426
4427
::munmap(p, size);
4428
#endif
4429
}
4430
4431
// this is called _before_ most of the global arguments have been parsed
4432
void os::init(void) {
4433
char dummy; // used to get a guess on initial stack address
4434
4435
clock_tics_per_sec = sysconf(_SC_CLK_TCK);
4436
4437
Linux::set_page_size(sysconf(_SC_PAGESIZE));
4438
if (Linux::page_size() == -1) {
4439
fatal("os_linux.cpp: os::init: sysconf failed (%s)",
4440
os::strerror(errno));
4441
}
4442
_page_sizes.add(Linux::page_size());
4443
4444
Linux::initialize_system_info();
4445
4446
#ifdef __GLIBC__
4447
Linux::_mallinfo = CAST_TO_FN_PTR(Linux::mallinfo_func_t, dlsym(RTLD_DEFAULT, "mallinfo"));
4448
Linux::_mallinfo2 = CAST_TO_FN_PTR(Linux::mallinfo2_func_t, dlsym(RTLD_DEFAULT, "mallinfo2"));
4449
#endif // __GLIBC__
4450
4451
os::Linux::CPUPerfTicks pticks;
4452
bool res = os::Linux::get_tick_information(&pticks, -1);
4453
4454
if (res && pticks.has_steal_ticks) {
4455
has_initial_tick_info = true;
4456
initial_total_ticks = pticks.total;
4457
initial_steal_ticks = pticks.steal;
4458
}
4459
4460
// _main_thread points to the thread that created/loaded the JVM.
4461
Linux::_main_thread = pthread_self();
4462
4463
// retrieve entry point for pthread_setname_np
4464
Linux::_pthread_setname_np =
4465
(int(*)(pthread_t, const char*))dlsym(RTLD_DEFAULT, "pthread_setname_np");
4466
4467
check_pax();
4468
4469
os::Posix::init();
4470
4471
initial_time_count = javaTimeNanos();
4472
}
4473
4474
// To install functions for atexit system call
4475
extern "C" {
4476
static void perfMemory_exit_helper() {
4477
perfMemory_exit();
4478
}
4479
}
4480
4481
void os::pd_init_container_support() {
4482
OSContainer::init();
4483
}
4484
4485
void os::Linux::numa_init() {
4486
4487
// Java can be invoked as
4488
// 1. Without numactl and heap will be allocated/configured on all nodes as
4489
// per the system policy.
4490
// 2. With numactl --interleave:
4491
// Use numa_get_interleave_mask(v2) API to get nodes bitmask. The same
4492
// API for membind case bitmask is reset.
4493
// Interleave is only hint and Kernel can fallback to other nodes if
4494
// no memory is available on the target nodes.
4495
// 3. With numactl --membind:
4496
// Use numa_get_membind(v2) API to get nodes bitmask. The same API for
4497
// interleave case returns bitmask of all nodes.
4498
// numa_all_nodes_ptr holds bitmask of all nodes.
4499
// numa_get_interleave_mask(v2) and numa_get_membind(v2) APIs returns correct
4500
// bitmask when externally configured to run on all or fewer nodes.
4501
4502
if (!Linux::libnuma_init()) {
4503
FLAG_SET_ERGO(UseNUMA, false);
4504
FLAG_SET_ERGO(UseNUMAInterleaving, false); // Also depends on libnuma.
4505
} else {
4506
if ((Linux::numa_max_node() < 1) || Linux::is_bound_to_single_node()) {
4507
// If there's only one node (they start from 0) or if the process
4508
// is bound explicitly to a single node using membind, disable NUMA
4509
UseNUMA = false;
4510
} else {
4511
LogTarget(Info,os) log;
4512
LogStream ls(log);
4513
4514
Linux::set_configured_numa_policy(Linux::identify_numa_policy());
4515
4516
struct bitmask* bmp = Linux::_numa_membind_bitmask;
4517
const char* numa_mode = "membind";
4518
4519
if (Linux::is_running_in_interleave_mode()) {
4520
bmp = Linux::_numa_interleave_bitmask;
4521
numa_mode = "interleave";
4522
}
4523
4524
ls.print("UseNUMA is enabled and invoked in '%s' mode."
4525
" Heap will be configured using NUMA memory nodes:", numa_mode);
4526
4527
for (int node = 0; node <= Linux::numa_max_node(); node++) {
4528
if (Linux::_numa_bitmask_isbitset(bmp, node)) {
4529
ls.print(" %d", node);
4530
}
4531
}
4532
}
4533
}
4534
4535
// When NUMA requested, not-NUMA-aware allocations default to interleaving.
4536
if (UseNUMA && !UseNUMAInterleaving) {
4537
FLAG_SET_ERGO_IF_DEFAULT(UseNUMAInterleaving, true);
4538
}
4539
4540
if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
4541
// With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
4542
// we can make the adaptive lgrp chunk resizing work. If the user specified both
4543
// UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn
4544
// and disable adaptive resizing.
4545
if (UseAdaptiveSizePolicy || UseAdaptiveNUMAChunkSizing) {
4546
warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, "
4547
"disabling adaptive resizing (-XX:-UseAdaptiveSizePolicy -XX:-UseAdaptiveNUMAChunkSizing)");
4548
UseAdaptiveSizePolicy = false;
4549
UseAdaptiveNUMAChunkSizing = false;
4550
}
4551
}
4552
}
4553
4554
// this is called _after_ the global arguments have been parsed
4555
jint os::init_2(void) {
4556
4557
// This could be set after os::Posix::init() but all platforms
4558
// have to set it the same so we have to mirror Solaris.
4559
DEBUG_ONLY(os::set_mutex_init_done();)
4560
4561
os::Posix::init_2();
4562
4563
Linux::fast_thread_clock_init();
4564
4565
if (PosixSignals::init() == JNI_ERR) {
4566
return JNI_ERR;
4567
}
4568
4569
if (AdjustStackSizeForTLS) {
4570
get_minstack_init();
4571
}
4572
4573
// Check and sets minimum stack sizes against command line options
4574
if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
4575
return JNI_ERR;
4576
}
4577
4578
#if defined(IA32) && !defined(ZERO)
4579
// Need to ensure we've determined the process's initial stack to
4580
// perform the workaround
4581
Linux::capture_initial_stack(JavaThread::stack_size_at_create());
4582
workaround_expand_exec_shield_cs_limit();
4583
#else
4584
suppress_primordial_thread_resolution = Arguments::created_by_java_launcher();
4585
if (!suppress_primordial_thread_resolution) {
4586
Linux::capture_initial_stack(JavaThread::stack_size_at_create());
4587
}
4588
#endif
4589
4590
Linux::libpthread_init();
4591
Linux::sched_getcpu_init();
4592
log_info(os)("HotSpot is running with %s, %s",
4593
Linux::libc_version(), Linux::libpthread_version());
4594
4595
if (UseNUMA || UseNUMAInterleaving) {
4596
Linux::numa_init();
4597
}
4598
4599
if (MaxFDLimit) {
4600
// set the number of file descriptors to max. print out error
4601
// if getrlimit/setrlimit fails but continue regardless.
4602
struct rlimit nbr_files;
4603
int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
4604
if (status != 0) {
4605
log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
4606
} else {
4607
nbr_files.rlim_cur = nbr_files.rlim_max;
4608
status = setrlimit(RLIMIT_NOFILE, &nbr_files);
4609
if (status != 0) {
4610
log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
4611
}
4612
}
4613
}
4614
4615
// at-exit methods are called in the reverse order of their registration.
4616
// atexit functions are called on return from main or as a result of a
4617
// call to exit(3C). There can be only 32 of these functions registered
4618
// and atexit() does not set errno.
4619
4620
if (PerfAllowAtExitRegistration) {
4621
// only register atexit functions if PerfAllowAtExitRegistration is set.
4622
// atexit functions can be delayed until process exit time, which
4623
// can be problematic for embedded VM situations. Embedded VMs should
4624
// call DestroyJavaVM() to assure that VM resources are released.
4625
4626
// note: perfMemory_exit_helper atexit function may be removed in
4627
// the future if the appropriate cleanup code can be added to the
4628
// VM_Exit VMOperation's doit method.
4629
if (atexit(perfMemory_exit_helper) != 0) {
4630
warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4631
}
4632
}
4633
4634
// initialize thread priority policy
4635
prio_init();
4636
4637
if (!FLAG_IS_DEFAULT(AllocateHeapAt)) {
4638
set_coredump_filter(DAX_SHARED_BIT);
4639
}
4640
4641
if (DumpPrivateMappingsInCore) {
4642
set_coredump_filter(FILE_BACKED_PVT_BIT);
4643
}
4644
4645
if (DumpSharedMappingsInCore) {
4646
set_coredump_filter(FILE_BACKED_SHARED_BIT);
4647
}
4648
4649
if (DumpPerfMapAtExit && FLAG_IS_DEFAULT(UseCodeCacheFlushing)) {
4650
// Disable code cache flushing to ensure the map file written at
4651
// exit contains all nmethods generated during execution.
4652
FLAG_SET_DEFAULT(UseCodeCacheFlushing, false);
4653
}
4654
4655
return JNI_OK;
4656
}
4657
4658
// older glibc versions don't have this macro (which expands to
4659
// an optimized bit-counting function) so we have to roll our own
4660
#ifndef CPU_COUNT
4661
4662
static int _cpu_count(const cpu_set_t* cpus) {
4663
int count = 0;
4664
// only look up to the number of configured processors
4665
for (int i = 0; i < os::processor_count(); i++) {
4666
if (CPU_ISSET(i, cpus)) {
4667
count++;
4668
}
4669
}
4670
return count;
4671
}
4672
4673
#define CPU_COUNT(cpus) _cpu_count(cpus)
4674
4675
#endif // CPU_COUNT
4676
4677
// Get the current number of available processors for this process.
4678
// This value can change at any time during a process's lifetime.
4679
// sched_getaffinity gives an accurate answer as it accounts for cpusets.
4680
// If it appears there may be more than 1024 processors then we do a
4681
// dynamic check - see 6515172 for details.
4682
// If anything goes wrong we fallback to returning the number of online
4683
// processors - which can be greater than the number available to the process.
4684
static int get_active_processor_count() {
4685
// Note: keep this function, with its CPU_xx macros, *outside* the os namespace (see JDK-8289477).
4686
cpu_set_t cpus; // can represent at most 1024 (CPU_SETSIZE) processors
4687
cpu_set_t* cpus_p = &cpus;
4688
int cpus_size = sizeof(cpu_set_t);
4689
4690
int configured_cpus = os::processor_count(); // upper bound on available cpus
4691
int cpu_count = 0;
4692
4693
// old build platforms may not support dynamic cpu sets
4694
#ifdef CPU_ALLOC
4695
4696
// To enable easy testing of the dynamic path on different platforms we
4697
// introduce a diagnostic flag: UseCpuAllocPath
4698
if (configured_cpus >= CPU_SETSIZE || UseCpuAllocPath) {
4699
// kernel may use a mask bigger than cpu_set_t
4700
log_trace(os)("active_processor_count: using dynamic path %s"
4701
"- configured processors: %d",
4702
UseCpuAllocPath ? "(forced) " : "",
4703
configured_cpus);
4704
cpus_p = CPU_ALLOC(configured_cpus);
4705
if (cpus_p != NULL) {
4706
cpus_size = CPU_ALLOC_SIZE(configured_cpus);
4707
// zero it just to be safe
4708
CPU_ZERO_S(cpus_size, cpus_p);
4709
}
4710
else {
4711
// failed to allocate so fallback to online cpus
4712
int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
4713
log_trace(os)("active_processor_count: "
4714
"CPU_ALLOC failed (%s) - using "
4715
"online processor count: %d",
4716
os::strerror(errno), online_cpus);
4717
return online_cpus;
4718
}
4719
}
4720
else {
4721
log_trace(os)("active_processor_count: using static path - configured processors: %d",
4722
configured_cpus);
4723
}
4724
#else // CPU_ALLOC
4725
// these stubs won't be executed
4726
#define CPU_COUNT_S(size, cpus) -1
4727
#define CPU_FREE(cpus)
4728
4729
log_trace(os)("active_processor_count: only static path available - configured processors: %d",
4730
configured_cpus);
4731
#endif // CPU_ALLOC
4732
4733
// pid 0 means the current thread - which we have to assume represents the process
4734
if (sched_getaffinity(0, cpus_size, cpus_p) == 0) {
4735
if (cpus_p != &cpus) { // can only be true when CPU_ALLOC used
4736
cpu_count = CPU_COUNT_S(cpus_size, cpus_p);
4737
}
4738
else {
4739
cpu_count = CPU_COUNT(cpus_p);
4740
}
4741
log_trace(os)("active_processor_count: sched_getaffinity processor count: %d", cpu_count);
4742
}
4743
else {
4744
cpu_count = ::sysconf(_SC_NPROCESSORS_ONLN);
4745
warning("sched_getaffinity failed (%s)- using online processor count (%d) "
4746
"which may exceed available processors", os::strerror(errno), cpu_count);
4747
}
4748
4749
if (cpus_p != &cpus) { // can only be true when CPU_ALLOC used
4750
CPU_FREE(cpus_p);
4751
}
4752
4753
assert(cpu_count > 0 && cpu_count <= os::processor_count(), "sanity check");
4754
return cpu_count;
4755
}
4756
4757
int os::Linux::active_processor_count() {
4758
return get_active_processor_count();
4759
}
4760
4761
// Determine the active processor count from one of
4762
// three different sources:
4763
//
4764
// 1. User option -XX:ActiveProcessorCount
4765
// 2. kernel os calls (sched_getaffinity or sysconf(_SC_NPROCESSORS_ONLN)
4766
// 3. extracted from cgroup cpu subsystem (shares and quotas)
4767
//
4768
// Option 1, if specified, will always override.
4769
// If the cgroup subsystem is active and configured, we
4770
// will return the min of the cgroup and option 2 results.
4771
// This is required since tools, such as numactl, that
4772
// alter cpu affinity do not update cgroup subsystem
4773
// cpuset configuration files.
4774
int os::active_processor_count() {
4775
// User has overridden the number of active processors
4776
if (ActiveProcessorCount > 0) {
4777
log_trace(os)("active_processor_count: "
4778
"active processor count set by user : %d",
4779
ActiveProcessorCount);
4780
return ActiveProcessorCount;
4781
}
4782
4783
int active_cpus;
4784
if (OSContainer::is_containerized()) {
4785
active_cpus = OSContainer::active_processor_count();
4786
log_trace(os)("active_processor_count: determined by OSContainer: %d",
4787
active_cpus);
4788
} else {
4789
active_cpus = os::Linux::active_processor_count();
4790
}
4791
4792
return active_cpus;
4793
}
4794
4795
static bool should_warn_invalid_processor_id() {
4796
if (os::processor_count() == 1) {
4797
// Don't warn if we only have one processor
4798
return false;
4799
}
4800
4801
static volatile int warn_once = 1;
4802
4803
if (Atomic::load(&warn_once) == 0 ||
4804
Atomic::xchg(&warn_once, 0) == 0) {
4805
// Don't warn more than once
4806
return false;
4807
}
4808
4809
return true;
4810
}
4811
4812
uint os::processor_id() {
4813
const int id = Linux::sched_getcpu();
4814
4815
if (id < processor_count()) {
4816
return (uint)id;
4817
}
4818
4819
// Some environments (e.g. openvz containers and the rr debugger) incorrectly
4820
// report a processor id that is higher than the number of processors available.
4821
// This is problematic, for example, when implementing CPU-local data structures,
4822
// where the processor id is used to index into an array of length processor_count().
4823
// If this happens we return 0 here. This is is safe since we always have at least
4824
// one processor, but it's not optimal for performance if we're actually executing
4825
// in an environment with more than one processor.
4826
if (should_warn_invalid_processor_id()) {
4827
log_warning(os)("Invalid processor id reported by the operating system "
4828
"(got processor id %d, valid processor id range is 0-%d)",
4829
id, processor_count() - 1);
4830
log_warning(os)("Falling back to assuming processor id is 0. "
4831
"This could have a negative impact on performance.");
4832
}
4833
4834
return 0;
4835
}
4836
4837
void os::set_native_thread_name(const char *name) {
4838
if (Linux::_pthread_setname_np) {
4839
char buf [16]; // according to glibc manpage, 16 chars incl. '/0'
4840
snprintf(buf, sizeof(buf), "%s", name);
4841
buf[sizeof(buf) - 1] = '\0';
4842
const int rc = Linux::_pthread_setname_np(pthread_self(), buf);
4843
// ERANGE should not happen; all other errors should just be ignored.
4844
assert(rc != ERANGE, "pthread_setname_np failed");
4845
}
4846
}
4847
4848
bool os::bind_to_processor(uint processor_id) {
4849
// Not yet implemented.
4850
return false;
4851
}
4852
4853
////////////////////////////////////////////////////////////////////////////////
4854
// debug support
4855
4856
bool os::find(address addr, outputStream* st) {
4857
Dl_info dlinfo;
4858
memset(&dlinfo, 0, sizeof(dlinfo));
4859
if (dladdr(addr, &dlinfo) != 0) {
4860
st->print(PTR_FORMAT ": ", p2i(addr));
4861
if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
4862
st->print("%s+" PTR_FORMAT, dlinfo.dli_sname,
4863
p2i(addr) - p2i(dlinfo.dli_saddr));
4864
} else if (dlinfo.dli_fbase != NULL) {
4865
st->print("<offset " PTR_FORMAT ">", p2i(addr) - p2i(dlinfo.dli_fbase));
4866
} else {
4867
st->print("<absolute address>");
4868
}
4869
if (dlinfo.dli_fname != NULL) {
4870
st->print(" in %s", dlinfo.dli_fname);
4871
}
4872
if (dlinfo.dli_fbase != NULL) {
4873
st->print(" at " PTR_FORMAT, p2i(dlinfo.dli_fbase));
4874
}
4875
st->cr();
4876
4877
if (Verbose) {
4878
// decode some bytes around the PC
4879
address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
4880
address end = clamp_address_in_page(addr+40, addr, os::vm_page_size());
4881
address lowest = (address) dlinfo.dli_sname;
4882
if (!lowest) lowest = (address) dlinfo.dli_fbase;
4883
if (begin < lowest) begin = lowest;
4884
Dl_info dlinfo2;
4885
if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
4886
&& end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) {
4887
end = (address) dlinfo2.dli_saddr;
4888
}
4889
Disassembler::decode(begin, end, st);
4890
}
4891
return true;
4892
}
4893
return false;
4894
}
4895
4896
////////////////////////////////////////////////////////////////////////////////
4897
// misc
4898
4899
// This does not do anything on Linux. This is basically a hook for being
4900
// able to use structured exception handling (thread-local exception filters)
4901
// on, e.g., Win32.
4902
void
4903
os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
4904
JavaCallArguments* args, JavaThread* thread) {
4905
f(value, method, args, thread);
4906
}
4907
4908
void os::print_statistics() {
4909
}
4910
4911
bool os::message_box(const char* title, const char* message) {
4912
int i;
4913
fdStream err(defaultStream::error_fd());
4914
for (i = 0; i < 78; i++) err.print_raw("=");
4915
err.cr();
4916
err.print_raw_cr(title);
4917
for (i = 0; i < 78; i++) err.print_raw("-");
4918
err.cr();
4919
err.print_raw_cr(message);
4920
for (i = 0; i < 78; i++) err.print_raw("=");
4921
err.cr();
4922
4923
char buf[16];
4924
// Prevent process from exiting upon "read error" without consuming all CPU
4925
while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
4926
4927
return buf[0] == 'y' || buf[0] == 'Y';
4928
}
4929
4930
// This code originates from JDK's sysOpen and open64_w
4931
// from src/solaris/hpi/src/system_md.c
4932
4933
int os::open(const char *path, int oflag, int mode) {
4934
if (strlen(path) > MAX_PATH - 1) {
4935
errno = ENAMETOOLONG;
4936
return -1;
4937
}
4938
4939
// All file descriptors that are opened in the Java process and not
4940
// specifically destined for a subprocess should have the close-on-exec
4941
// flag set. If we don't set it, then careless 3rd party native code
4942
// might fork and exec without closing all appropriate file descriptors
4943
// (e.g. as we do in closeDescriptors in UNIXProcess.c), and this in
4944
// turn might:
4945
//
4946
// - cause end-of-file to fail to be detected on some file
4947
// descriptors, resulting in mysterious hangs, or
4948
//
4949
// - might cause an fopen in the subprocess to fail on a system
4950
// suffering from bug 1085341.
4951
//
4952
// (Yes, the default setting of the close-on-exec flag is a Unix
4953
// design flaw)
4954
//
4955
// See:
4956
// 1085341: 32-bit stdio routines should support file descriptors >255
4957
// 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4958
// 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4959
//
4960
// Modern Linux kernels (after 2.6.23 2007) support O_CLOEXEC with open().
4961
// O_CLOEXEC is preferable to using FD_CLOEXEC on an open file descriptor
4962
// because it saves a system call and removes a small window where the flag
4963
// is unset. On ancient Linux kernels the O_CLOEXEC flag will be ignored
4964
// and we fall back to using FD_CLOEXEC (see below).
4965
#ifdef O_CLOEXEC
4966
oflag |= O_CLOEXEC;
4967
#endif
4968
4969
int fd = ::open64(path, oflag, mode);
4970
if (fd == -1) return -1;
4971
4972
//If the open succeeded, the file might still be a directory
4973
{
4974
struct stat64 buf64;
4975
int ret = ::fstat64(fd, &buf64);
4976
int st_mode = buf64.st_mode;
4977
4978
if (ret != -1) {
4979
if ((st_mode & S_IFMT) == S_IFDIR) {
4980
errno = EISDIR;
4981
::close(fd);
4982
return -1;
4983
}
4984
} else {
4985
::close(fd);
4986
return -1;
4987
}
4988
}
4989
4990
#ifdef FD_CLOEXEC
4991
// Validate that the use of the O_CLOEXEC flag on open above worked.
4992
// With recent kernels, we will perform this check exactly once.
4993
static sig_atomic_t O_CLOEXEC_is_known_to_work = 0;
4994
if (!O_CLOEXEC_is_known_to_work) {
4995
int flags = ::fcntl(fd, F_GETFD);
4996
if (flags != -1) {
4997
if ((flags & FD_CLOEXEC) != 0)
4998
O_CLOEXEC_is_known_to_work = 1;
4999
else
5000
::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
5001
}
5002
}
5003
#endif
5004
5005
return fd;
5006
}
5007
5008
5009
// create binary file, rewriting existing file if required
5010
int os::create_binary_file(const char* path, bool rewrite_existing) {
5011
int oflags = O_WRONLY | O_CREAT;
5012
oflags |= rewrite_existing ? O_TRUNC : O_EXCL;
5013
return ::open64(path, oflags, S_IREAD | S_IWRITE);
5014
}
5015
5016
// return current position of file pointer
5017
jlong os::current_file_offset(int fd) {
5018
return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
5019
}
5020
5021
// move file pointer to the specified offset
5022
jlong os::seek_to_file_offset(int fd, jlong offset) {
5023
return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
5024
}
5025
5026
// This code originates from JDK's sysAvailable
5027
// from src/solaris/hpi/src/native_threads/src/sys_api_td.c
5028
5029
int os::available(int fd, jlong *bytes) {
5030
jlong cur, end;
5031
int mode;
5032
struct stat64 buf64;
5033
5034
if (::fstat64(fd, &buf64) >= 0) {
5035
mode = buf64.st_mode;
5036
if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
5037
int n;
5038
if (::ioctl(fd, FIONREAD, &n) >= 0) {
5039
*bytes = n;
5040
return 1;
5041
}
5042
}
5043
}
5044
if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
5045
return 0;
5046
} else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
5047
return 0;
5048
} else if (::lseek64(fd, cur, SEEK_SET) == -1) {
5049
return 0;
5050
}
5051
*bytes = end - cur;
5052
return 1;
5053
}
5054
5055
// Map a block of memory.
5056
char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
5057
char *addr, size_t bytes, bool read_only,
5058
bool allow_exec) {
5059
int prot;
5060
int flags = MAP_PRIVATE;
5061
5062
if (read_only) {
5063
prot = PROT_READ;
5064
} else {
5065
prot = PROT_READ | PROT_WRITE;
5066
}
5067
5068
if (allow_exec) {
5069
prot |= PROT_EXEC;
5070
}
5071
5072
if (addr != NULL) {
5073
flags |= MAP_FIXED;
5074
}
5075
5076
char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5077
fd, file_offset);
5078
if (mapped_address == MAP_FAILED) {
5079
return NULL;
5080
}
5081
return mapped_address;
5082
}
5083
5084
5085
// Remap a block of memory.
5086
char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5087
char *addr, size_t bytes, bool read_only,
5088
bool allow_exec) {
5089
// same as map_memory() on this OS
5090
return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5091
allow_exec);
5092
}
5093
5094
5095
// Unmap a block of memory.
5096
bool os::pd_unmap_memory(char* addr, size_t bytes) {
5097
return munmap(addr, bytes) == 0;
5098
}
5099
5100
static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time);
5101
5102
static jlong fast_cpu_time(Thread *thread) {
5103
clockid_t clockid;
5104
int rc = os::Linux::pthread_getcpuclockid(thread->osthread()->pthread_id(),
5105
&clockid);
5106
if (rc == 0) {
5107
return os::Linux::fast_thread_cpu_time(clockid);
5108
} else {
5109
// It's possible to encounter a terminated native thread that failed
5110
// to detach itself from the VM - which should result in ESRCH.
5111
assert_status(rc == ESRCH, rc, "pthread_getcpuclockid failed");
5112
return -1;
5113
}
5114
}
5115
5116
// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5117
// are used by JVM M&M and JVMTI to get user+sys or user CPU time
5118
// of a thread.
5119
//
5120
// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
5121
// the fast estimate available on the platform.
5122
5123
jlong os::current_thread_cpu_time() {
5124
if (os::Linux::supports_fast_thread_cpu_time()) {
5125
return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
5126
} else {
5127
// return user + sys since the cost is the same
5128
return slow_thread_cpu_time(Thread::current(), true /* user + sys */);
5129
}
5130
}
5131
5132
jlong os::thread_cpu_time(Thread* thread) {
5133
// consistent with what current_thread_cpu_time() returns
5134
if (os::Linux::supports_fast_thread_cpu_time()) {
5135
return fast_cpu_time(thread);
5136
} else {
5137
return slow_thread_cpu_time(thread, true /* user + sys */);
5138
}
5139
}
5140
5141
jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5142
if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) {
5143
return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
5144
} else {
5145
return slow_thread_cpu_time(Thread::current(), user_sys_cpu_time);
5146
}
5147
}
5148
5149
jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5150
if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) {
5151
return fast_cpu_time(thread);
5152
} else {
5153
return slow_thread_cpu_time(thread, user_sys_cpu_time);
5154
}
5155
}
5156
5157
// -1 on error.
5158
static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5159
pid_t tid = thread->osthread()->thread_id();
5160
char *s;
5161
char stat[2048];
5162
int statlen;
5163
char proc_name[64];
5164
int count;
5165
long sys_time, user_time;
5166
char cdummy;
5167
int idummy;
5168
long ldummy;
5169
FILE *fp;
5170
5171
snprintf(proc_name, 64, "/proc/self/task/%d/stat", tid);
5172
fp = fopen(proc_name, "r");
5173
if (fp == NULL) return -1;
5174
statlen = fread(stat, 1, 2047, fp);
5175
stat[statlen] = '\0';
5176
fclose(fp);
5177
5178
// Skip pid and the command string. Note that we could be dealing with
5179
// weird command names, e.g. user could decide to rename java launcher
5180
// to "java 1.4.2 :)", then the stat file would look like
5181
// 1234 (java 1.4.2 :)) R ... ...
5182
// We don't really need to know the command string, just find the last
5183
// occurrence of ")" and then start parsing from there. See bug 4726580.
5184
s = strrchr(stat, ')');
5185
if (s == NULL) return -1;
5186
5187
// Skip blank chars
5188
do { s++; } while (s && isspace(*s));
5189
5190
count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
5191
&cdummy, &idummy, &idummy, &idummy, &idummy, &idummy,
5192
&ldummy, &ldummy, &ldummy, &ldummy, &ldummy,
5193
&user_time, &sys_time);
5194
if (count != 13) return -1;
5195
if (user_sys_cpu_time) {
5196
return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec);
5197
} else {
5198
return (jlong)user_time * (1000000000 / clock_tics_per_sec);
5199
}
5200
}
5201
5202
void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5203
info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
5204
info_ptr->may_skip_backward = false; // elapsed time not wall time
5205
info_ptr->may_skip_forward = false; // elapsed time not wall time
5206
info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
5207
}
5208
5209
void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5210
info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
5211
info_ptr->may_skip_backward = false; // elapsed time not wall time
5212
info_ptr->may_skip_forward = false; // elapsed time not wall time
5213
info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
5214
}
5215
5216
bool os::is_thread_cpu_time_supported() {
5217
return true;
5218
}
5219
5220
// System loadavg support. Returns -1 if load average cannot be obtained.
5221
// Linux doesn't yet have a (official) notion of processor sets,
5222
// so just return the system wide load average.
5223
int os::loadavg(double loadavg[], int nelem) {
5224
return ::getloadavg(loadavg, nelem);
5225
}
5226
5227
void os::pause() {
5228
char filename[MAX_PATH];
5229
if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5230
jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile);
5231
} else {
5232
jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5233
}
5234
5235
int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5236
if (fd != -1) {
5237
struct stat buf;
5238
::close(fd);
5239
while (::stat(filename, &buf) == 0) {
5240
(void)::poll(NULL, 0, 100);
5241
}
5242
} else {
5243
jio_fprintf(stderr,
5244
"Could not open pause file '%s', continuing immediately.\n", filename);
5245
}
5246
}
5247
5248
// Get the default path to the core file
5249
// Returns the length of the string
5250
int os::get_core_path(char* buffer, size_t bufferSize) {
5251
/*
5252
* Max length of /proc/sys/kernel/core_pattern is 128 characters.
5253
* See https://www.kernel.org/doc/Documentation/sysctl/kernel.txt
5254
*/
5255
const int core_pattern_len = 129;
5256
char core_pattern[core_pattern_len] = {0};
5257
5258
int core_pattern_file = ::open("/proc/sys/kernel/core_pattern", O_RDONLY);
5259
if (core_pattern_file == -1) {
5260
return -1;
5261
}
5262
5263
ssize_t ret = ::read(core_pattern_file, core_pattern, core_pattern_len);
5264
::close(core_pattern_file);
5265
if (ret <= 0 || ret >= core_pattern_len || core_pattern[0] == '\n') {
5266
return -1;
5267
}
5268
if (core_pattern[ret-1] == '\n') {
5269
core_pattern[ret-1] = '\0';
5270
} else {
5271
core_pattern[ret] = '\0';
5272
}
5273
5274
// Replace the %p in the core pattern with the process id. NOTE: we do this
5275
// only if the pattern doesn't start with "|", and we support only one %p in
5276
// the pattern.
5277
char *pid_pos = strstr(core_pattern, "%p");
5278
const char* tail = (pid_pos != NULL) ? (pid_pos + 2) : ""; // skip over the "%p"
5279
int written;
5280
5281
if (core_pattern[0] == '/') {
5282
if (pid_pos != NULL) {
5283
*pid_pos = '\0';
5284
written = jio_snprintf(buffer, bufferSize, "%s%d%s", core_pattern,
5285
current_process_id(), tail);
5286
} else {
5287
written = jio_snprintf(buffer, bufferSize, "%s", core_pattern);
5288
}
5289
} else {
5290
char cwd[PATH_MAX];
5291
5292
const char* p = get_current_directory(cwd, PATH_MAX);
5293
if (p == NULL) {
5294
return -1;
5295
}
5296
5297
if (core_pattern[0] == '|') {
5298
written = jio_snprintf(buffer, bufferSize,
5299
"\"%s\" (or dumping to %s/core.%d)",
5300
&core_pattern[1], p, current_process_id());
5301
} else if (pid_pos != NULL) {
5302
*pid_pos = '\0';
5303
written = jio_snprintf(buffer, bufferSize, "%s/%s%d%s", p, core_pattern,
5304
current_process_id(), tail);
5305
} else {
5306
written = jio_snprintf(buffer, bufferSize, "%s/%s", p, core_pattern);
5307
}
5308
}
5309
5310
if (written < 0) {
5311
return -1;
5312
}
5313
5314
if (((size_t)written < bufferSize) && (pid_pos == NULL) && (core_pattern[0] != '|')) {
5315
int core_uses_pid_file = ::open("/proc/sys/kernel/core_uses_pid", O_RDONLY);
5316
5317
if (core_uses_pid_file != -1) {
5318
char core_uses_pid = 0;
5319
ssize_t ret = ::read(core_uses_pid_file, &core_uses_pid, 1);
5320
::close(core_uses_pid_file);
5321
5322
if (core_uses_pid == '1') {
5323
jio_snprintf(buffer + written, bufferSize - written,
5324
".%d", current_process_id());
5325
}
5326
}
5327
}
5328
5329
return strlen(buffer);
5330
}
5331
5332
bool os::start_debugging(char *buf, int buflen) {
5333
int len = (int)strlen(buf);
5334
char *p = &buf[len];
5335
5336
jio_snprintf(p, buflen-len,
5337
"\n\n"
5338
"Do you want to debug the problem?\n\n"
5339
"To debug, run 'gdb /proc/%d/exe %d'; then switch to thread " UINTX_FORMAT " (" INTPTR_FORMAT ")\n"
5340
"Enter 'yes' to launch gdb automatically (PATH must include gdb)\n"
5341
"Otherwise, press RETURN to abort...",
5342
os::current_process_id(), os::current_process_id(),
5343
os::current_thread_id(), os::current_thread_id());
5344
5345
bool yes = os::message_box("Unexpected Error", buf);
5346
5347
if (yes) {
5348
// yes, user asked VM to launch debugger
5349
jio_snprintf(buf, sizeof(char)*buflen, "gdb /proc/%d/exe %d",
5350
os::current_process_id(), os::current_process_id());
5351
5352
os::fork_and_exec(buf);
5353
yes = false;
5354
}
5355
return yes;
5356
}
5357
5358
5359
// Java/Compiler thread:
5360
//
5361
// Low memory addresses
5362
// P0 +------------------------+
5363
// | |\ Java thread created by VM does not have glibc
5364
// | glibc guard page | - guard page, attached Java thread usually has
5365
// | |/ 1 glibc guard page.
5366
// P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
5367
// | |\
5368
// | HotSpot Guard Pages | - red, yellow and reserved pages
5369
// | |/
5370
// +------------------------+ StackOverflow::stack_reserved_zone_base()
5371
// | |\
5372
// | Normal Stack | -
5373
// | |/
5374
// P2 +------------------------+ Thread::stack_base()
5375
//
5376
// Non-Java thread:
5377
//
5378
// Low memory addresses
5379
// P0 +------------------------+
5380
// | |\
5381
// | glibc guard page | - usually 1 page
5382
// | |/
5383
// P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
5384
// | |\
5385
// | Normal Stack | -
5386
// | |/
5387
// P2 +------------------------+ Thread::stack_base()
5388
//
5389
// ** P1 (aka bottom) and size (P2 = P1 - size) are the address and stack size
5390
// returned from pthread_attr_getstack().
5391
// ** Due to NPTL implementation error, linux takes the glibc guard page out
5392
// of the stack size given in pthread_attr. We work around this for
5393
// threads created by the VM. (We adapt bottom to be P1 and size accordingly.)
5394
//
5395
#ifndef ZERO
5396
static void current_stack_region(address * bottom, size_t * size) {
5397
if (os::is_primordial_thread()) {
5398
// primordial thread needs special handling because pthread_getattr_np()
5399
// may return bogus value.
5400
*bottom = os::Linux::initial_thread_stack_bottom();
5401
*size = os::Linux::initial_thread_stack_size();
5402
} else {
5403
pthread_attr_t attr;
5404
5405
int rslt = pthread_getattr_np(pthread_self(), &attr);
5406
5407
// JVM needs to know exact stack location, abort if it fails
5408
if (rslt != 0) {
5409
if (rslt == ENOMEM) {
5410
vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
5411
} else {
5412
fatal("pthread_getattr_np failed with error = %d", rslt);
5413
}
5414
}
5415
5416
if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) {
5417
fatal("Cannot locate current stack attributes!");
5418
}
5419
5420
// Work around NPTL stack guard error.
5421
size_t guard_size = 0;
5422
rslt = pthread_attr_getguardsize(&attr, &guard_size);
5423
if (rslt != 0) {
5424
fatal("pthread_attr_getguardsize failed with error = %d", rslt);
5425
}
5426
*bottom += guard_size;
5427
*size -= guard_size;
5428
5429
pthread_attr_destroy(&attr);
5430
5431
}
5432
assert(os::current_stack_pointer() >= *bottom &&
5433
os::current_stack_pointer() < *bottom + *size, "just checking");
5434
}
5435
5436
address os::current_stack_base() {
5437
address bottom;
5438
size_t size;
5439
current_stack_region(&bottom, &size);
5440
return (bottom + size);
5441
}
5442
5443
size_t os::current_stack_size() {
5444
// This stack size includes the usable stack and HotSpot guard pages
5445
// (for the threads that have Hotspot guard pages).
5446
address bottom;
5447
size_t size;
5448
current_stack_region(&bottom, &size);
5449
return size;
5450
}
5451
#endif
5452
5453
static inline struct timespec get_mtime(const char* filename) {
5454
struct stat st;
5455
int ret = os::stat(filename, &st);
5456
assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
5457
return st.st_mtim;
5458
}
5459
5460
int os::compare_file_modified_times(const char* file1, const char* file2) {
5461
struct timespec filetime1 = get_mtime(file1);
5462
struct timespec filetime2 = get_mtime(file2);
5463
int diff = filetime1.tv_sec - filetime2.tv_sec;
5464
if (diff == 0) {
5465
return filetime1.tv_nsec - filetime2.tv_nsec;
5466
}
5467
return diff;
5468
}
5469
5470
bool os::supports_map_sync() {
5471
return true;
5472
}
5473
5474
void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {
5475
// Note: all ranges are "[..)"
5476
unsigned long long start = (unsigned long long)addr;
5477
unsigned long long end = start + bytes;
5478
FILE* f = ::fopen("/proc/self/maps", "r");
5479
int num_found = 0;
5480
if (f != NULL) {
5481
st->print_cr("Range [%llx-%llx) contains: ", start, end);
5482
char line[512];
5483
while(fgets(line, sizeof(line), f) == line) {
5484
unsigned long long segment_start = 0;
5485
unsigned long long segment_end = 0;
5486
if (::sscanf(line, "%llx-%llx", &segment_start, &segment_end) == 2) {
5487
// Lets print out every range which touches ours.
5488
if (segment_start < end && segment_end > start) {
5489
num_found ++;
5490
st->print("%s", line); // line includes \n
5491
}
5492
}
5493
}
5494
::fclose(f);
5495
if (num_found == 0) {
5496
st->print_cr("nothing.");
5497
}
5498
st->cr();
5499
}
5500
}
5501
5502