Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/os/aix/vm/os_aix.cpp
32284 views
1
/*
2
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
3
* Copyright 2012, 2014 SAP AG. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
// According to the AIX OS doc #pragma alloca must be used
27
// with C++ compiler before referencing the function alloca()
28
#pragma alloca
29
30
// no precompiled headers
31
#include "classfile/classLoader.hpp"
32
#include "classfile/systemDictionary.hpp"
33
#include "classfile/vmSymbols.hpp"
34
#include "code/icBuffer.hpp"
35
#include "code/vtableStubs.hpp"
36
#include "compiler/compileBroker.hpp"
37
#include "interpreter/interpreter.hpp"
38
#include "jvm_aix.h"
39
#include "libperfstat_aix.hpp"
40
#include "loadlib_aix.hpp"
41
#include "memory/allocation.inline.hpp"
42
#include "memory/filemap.hpp"
43
#include "mutex_aix.inline.hpp"
44
#include "oops/oop.inline.hpp"
45
#include "os_share_aix.hpp"
46
#include "porting_aix.hpp"
47
#include "prims/jniFastGetField.hpp"
48
#include "prims/jvm.h"
49
#include "prims/jvm_misc.hpp"
50
#include "runtime/arguments.hpp"
51
#include "runtime/extendedPC.hpp"
52
#include "runtime/globals.hpp"
53
#include "runtime/interfaceSupport.hpp"
54
#include "runtime/java.hpp"
55
#include "runtime/javaCalls.hpp"
56
#include "runtime/mutexLocker.hpp"
57
#include "runtime/objectMonitor.hpp"
58
#include "runtime/orderAccess.inline.hpp"
59
#include "runtime/osThread.hpp"
60
#include "runtime/perfMemory.hpp"
61
#include "runtime/sharedRuntime.hpp"
62
#include "runtime/statSampler.hpp"
63
#include "runtime/stubRoutines.hpp"
64
#include "runtime/thread.inline.hpp"
65
#include "runtime/threadCritical.hpp"
66
#include "runtime/timer.hpp"
67
#include "services/attachListener.hpp"
68
#include "services/runtimeService.hpp"
69
#include "utilities/decoder.hpp"
70
#include "utilities/defaultStream.hpp"
71
#include "utilities/events.hpp"
72
#include "utilities/growableArray.hpp"
73
#include "utilities/vmError.hpp"
74
75
// put OS-includes here (sorted alphabetically)
76
#include <errno.h>
77
#include <fcntl.h>
78
#include <inttypes.h>
79
#include <poll.h>
80
#include <procinfo.h>
81
#include <pthread.h>
82
#include <pwd.h>
83
#include <semaphore.h>
84
#include <signal.h>
85
#include <stdint.h>
86
#include <stdio.h>
87
#include <string.h>
88
#include <unistd.h>
89
#include <sys/ioctl.h>
90
#include <sys/ipc.h>
91
#include <sys/mman.h>
92
#include <sys/resource.h>
93
#include <sys/select.h>
94
#include <sys/shm.h>
95
#include <sys/socket.h>
96
#include <sys/stat.h>
97
#include <sys/sysinfo.h>
98
#include <sys/systemcfg.h>
99
#include <sys/time.h>
100
#include <sys/times.h>
101
#include <sys/types.h>
102
#include <sys/utsname.h>
103
#include <sys/vminfo.h>
104
#include <sys/wait.h>
105
106
// Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
107
#if !defined(_AIXVERSION_610)
108
extern "C" {
109
int getthrds64(pid_t ProcessIdentifier,
110
struct thrdentry64* ThreadBuffer,
111
int ThreadSize,
112
tid64_t* IndexPointer,
113
int Count);
114
}
115
#endif
116
117
#define MAX_PATH (2 * K)
118
119
// for timer info max values which include all bits
120
#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
121
// for multipage initialization error analysis (in 'g_multipage_error')
122
#define ERROR_MP_OS_TOO_OLD 100
123
#define ERROR_MP_EXTSHM_ACTIVE 101
124
#define ERROR_MP_VMGETINFO_FAILED 102
125
#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
126
127
// The semantics in this file are thus that codeptr_t is a *real code ptr*.
128
// This means that any function taking codeptr_t as arguments will assume
129
// a real codeptr and won't handle function descriptors (eg getFuncName),
130
// whereas functions taking address as args will deal with function
131
// descriptors (eg os::dll_address_to_library_name).
132
typedef unsigned int* codeptr_t;
133
134
// Typedefs for stackslots, stack pointers, pointers to op codes.
135
typedef unsigned long stackslot_t;
136
typedef stackslot_t* stackptr_t;
137
138
// Excerpts from systemcfg.h definitions newer than AIX 5.3.
139
#ifndef PV_7
140
#define PV_7 0x200000 /* Power PC 7 */
141
#define PV_7_Compat 0x208000 /* Power PC 7 */
142
#endif
143
#ifndef PV_8
144
#define PV_8 0x300000 /* Power PC 8 */
145
#define PV_8_Compat 0x308000 /* Power PC 8 */
146
#endif
147
148
#define trcVerbose(fmt, ...) { /* PPC port */ \
149
if (Verbose) { \
150
fprintf(stderr, fmt, ##__VA_ARGS__); \
151
fputc('\n', stderr); fflush(stderr); \
152
} \
153
}
154
#define trc(fmt, ...) /* PPC port */
155
156
#define ERRBYE(s) { \
157
trcVerbose(s); \
158
return -1; \
159
}
160
161
// query dimensions of the stack of the calling thread
162
static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
163
164
// function to check a given stack pointer against given stack limits
165
inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
166
if (((uintptr_t)sp) & 0x7) {
167
return false;
168
}
169
if (sp > stack_base) {
170
return false;
171
}
172
if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
173
return false;
174
}
175
return true;
176
}
177
178
// returns true if function is a valid codepointer
179
inline bool is_valid_codepointer(codeptr_t p) {
180
if (!p) {
181
return false;
182
}
183
if (((uintptr_t)p) & 0x3) {
184
return false;
185
}
186
if (LoadedLibraries::find_for_text_address((address)p) == NULL) {
187
return false;
188
}
189
return true;
190
}
191
192
// Macro to check a given stack pointer against given stack limits and to die if test fails.
193
#define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
194
guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
195
}
196
197
// Macro to check the current stack pointer against given stacklimits.
198
#define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
199
address sp; \
200
sp = os::current_stack_pointer(); \
201
CHECK_STACK_PTR(sp, stack_base, stack_size); \
202
}
203
204
////////////////////////////////////////////////////////////////////////////////
205
// global variables (for a description see os_aix.hpp)
206
207
julong os::Aix::_physical_memory = 0;
208
pthread_t os::Aix::_main_thread = ((pthread_t)0);
209
int os::Aix::_page_size = -1;
210
int os::Aix::_on_pase = -1;
211
int os::Aix::_os_version = -1;
212
int os::Aix::_stack_page_size = -1;
213
size_t os::Aix::_shm_default_page_size = -1;
214
int os::Aix::_can_use_64K_pages = -1;
215
int os::Aix::_can_use_16M_pages = -1;
216
int os::Aix::_xpg_sus_mode = -1;
217
int os::Aix::_extshm = -1;
218
int os::Aix::_logical_cpus = -1;
219
220
////////////////////////////////////////////////////////////////////////////////
221
// local variables
222
223
static int g_multipage_error = -1; // error analysis for multipage initialization
224
static jlong initial_time_count = 0;
225
static int clock_tics_per_sec = 100;
226
static sigset_t check_signal_done; // For diagnostics to print a message once (see run_periodic_checks)
227
static bool check_signals = true;
228
static pid_t _initial_pid = 0;
229
static int SR_signum = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
230
static sigset_t SR_sigset;
231
static pthread_mutex_t dl_mutex; // Used to protect dlsym() calls.
232
233
julong os::available_memory() {
234
return Aix::available_memory();
235
}
236
237
julong os::Aix::available_memory() {
238
os::Aix::meminfo_t mi;
239
if (os::Aix::get_meminfo(&mi)) {
240
return mi.real_free;
241
} else {
242
return 0xFFFFFFFFFFFFFFFFLL;
243
}
244
}
245
246
julong os::physical_memory() {
247
return Aix::physical_memory();
248
}
249
250
////////////////////////////////////////////////////////////////////////////////
251
// environment support
252
253
bool os::getenv(const char* name, char* buf, int len) {
254
const char* val = ::getenv(name);
255
if (val != NULL && strlen(val) < (size_t)len) {
256
strcpy(buf, val);
257
return true;
258
}
259
if (len > 0) buf[0] = 0; // return a null string
260
return false;
261
}
262
263
// Return true if user is running as root.
264
265
bool os::have_special_privileges() {
266
static bool init = false;
267
static bool privileges = false;
268
if (!init) {
269
privileges = (getuid() != geteuid()) || (getgid() != getegid());
270
init = true;
271
}
272
return privileges;
273
}
274
275
// Helper function, emulates disclaim64 using multiple 32bit disclaims
276
// because we cannot use disclaim64() on AS/400 and old AIX releases.
277
static bool my_disclaim64(char* addr, size_t size) {
278
279
if (size == 0) {
280
return true;
281
}
282
283
// Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
284
const unsigned int maxDisclaimSize = 0x80000000;
285
286
const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
287
const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
288
289
char* p = addr;
290
291
for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
292
if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
293
trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
294
return false;
295
}
296
p += maxDisclaimSize;
297
}
298
299
if (lastDisclaimSize > 0) {
300
if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
301
trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
302
return false;
303
}
304
}
305
306
return true;
307
}
308
309
// Cpu architecture string
310
#if defined(PPC32)
311
static char cpu_arch[] = "ppc";
312
#elif defined(PPC64)
313
static char cpu_arch[] = "ppc64";
314
#else
315
#error Add appropriate cpu_arch setting
316
#endif
317
318
319
// Given an address, returns the size of the page backing that address.
320
size_t os::Aix::query_pagesize(void* addr) {
321
322
vm_page_info pi;
323
pi.addr = (uint64_t)addr;
324
if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
325
return pi.pagesize;
326
} else {
327
fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
328
assert(false, "vmgetinfo failed to retrieve page size");
329
return SIZE_4K;
330
}
331
332
}
333
334
// Returns the kernel thread id of the currently running thread.
335
pid_t os::Aix::gettid() {
336
return (pid_t) thread_self();
337
}
338
339
void os::Aix::initialize_system_info() {
340
341
// Get the number of online(logical) cpus instead of configured.
342
os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
343
assert(_processor_count > 0, "_processor_count must be > 0");
344
345
// Retrieve total physical storage.
346
os::Aix::meminfo_t mi;
347
if (!os::Aix::get_meminfo(&mi)) {
348
fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
349
assert(false, "os::Aix::get_meminfo failed.");
350
}
351
_physical_memory = (julong) mi.real_total;
352
}
353
354
// Helper function for tracing page sizes.
355
static const char* describe_pagesize(size_t pagesize) {
356
switch (pagesize) {
357
case SIZE_4K : return "4K";
358
case SIZE_64K: return "64K";
359
case SIZE_16M: return "16M";
360
case SIZE_16G: return "16G";
361
default:
362
assert(false, "surprise");
363
return "??";
364
}
365
}
366
367
// Retrieve information about multipage size support. Will initialize
368
// Aix::_page_size, Aix::_stack_page_size, Aix::_can_use_64K_pages,
369
// Aix::_can_use_16M_pages.
370
// Must be called before calling os::large_page_init().
371
void os::Aix::query_multipage_support() {
372
373
guarantee(_page_size == -1 &&
374
_stack_page_size == -1 &&
375
_can_use_64K_pages == -1 &&
376
_can_use_16M_pages == -1 &&
377
g_multipage_error == -1,
378
"do not call twice");
379
380
_page_size = ::sysconf(_SC_PAGESIZE);
381
382
// This really would surprise me.
383
assert(_page_size == SIZE_4K, "surprise!");
384
385
386
// Query default data page size (default page size for C-Heap, pthread stacks and .bss).
387
// Default data page size is influenced either by linker options (-bdatapsize)
388
// or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
389
// default should be 4K.
390
size_t data_page_size = SIZE_4K;
391
{
392
void* p = ::malloc(SIZE_16M);
393
guarantee(p != NULL, "malloc failed");
394
data_page_size = os::Aix::query_pagesize(p);
395
::free(p);
396
}
397
398
// query default shm page size (LDR_CNTRL SHMPSIZE)
399
{
400
const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
401
guarantee(shmid != -1, "shmget failed");
402
void* p = ::shmat(shmid, NULL, 0);
403
::shmctl(shmid, IPC_RMID, NULL);
404
guarantee(p != (void*) -1, "shmat failed");
405
_shm_default_page_size = os::Aix::query_pagesize(p);
406
::shmdt(p);
407
}
408
409
// before querying the stack page size, make sure we are not running as primordial
410
// thread (because primordial thread's stack may have different page size than
411
// pthread thread stacks). Running a VM on the primordial thread won't work for a
412
// number of reasons so we may just as well guarantee it here
413
guarantee(!os::is_primordial_thread(), "Must not be called for primordial thread");
414
415
// query stack page size
416
{
417
int dummy = 0;
418
_stack_page_size = os::Aix::query_pagesize(&dummy);
419
// everything else would surprise me and should be looked into
420
guarantee(_stack_page_size == SIZE_4K || _stack_page_size == SIZE_64K, "Wrong page size");
421
// also, just for completeness: pthread stacks are allocated from C heap, so
422
// stack page size should be the same as data page size
423
guarantee(_stack_page_size == data_page_size, "stack page size should be the same as data page size");
424
}
425
426
// EXTSHM is bad: among other things, it prevents setting pagesize dynamically
427
// for system V shm.
428
if (Aix::extshm()) {
429
if (Verbose) {
430
fprintf(stderr, "EXTSHM is active - will disable large page support.\n"
431
"Please make sure EXTSHM is OFF for large page support.\n");
432
}
433
g_multipage_error = ERROR_MP_EXTSHM_ACTIVE;
434
_can_use_64K_pages = _can_use_16M_pages = 0;
435
goto query_multipage_support_end;
436
}
437
438
// now check which page sizes the OS claims it supports, and of those, which actually can be used.
439
{
440
const int MAX_PAGE_SIZES = 4;
441
psize_t sizes[MAX_PAGE_SIZES];
442
const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
443
if (num_psizes == -1) {
444
if (Verbose) {
445
fprintf(stderr, "vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
446
fprintf(stderr, "disabling multipage support.\n");
447
}
448
g_multipage_error = ERROR_MP_VMGETINFO_FAILED;
449
_can_use_64K_pages = _can_use_16M_pages = 0;
450
goto query_multipage_support_end;
451
}
452
guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
453
assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
454
if (Verbose) {
455
fprintf(stderr, "vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
456
for (int i = 0; i < num_psizes; i ++) {
457
fprintf(stderr, " %s ", describe_pagesize(sizes[i]));
458
}
459
fprintf(stderr, " .\n");
460
}
461
462
// Can we use 64K, 16M pages?
463
_can_use_64K_pages = 0;
464
_can_use_16M_pages = 0;
465
for (int i = 0; i < num_psizes; i ++) {
466
if (sizes[i] == SIZE_64K) {
467
_can_use_64K_pages = 1;
468
} else if (sizes[i] == SIZE_16M) {
469
_can_use_16M_pages = 1;
470
}
471
}
472
473
if (!_can_use_64K_pages) {
474
g_multipage_error = ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K;
475
}
476
477
// Double-check for 16M pages: Even if AIX claims to be able to use 16M pages,
478
// there must be an actual 16M page pool, and we must run with enough rights.
479
if (_can_use_16M_pages) {
480
const int shmid = ::shmget(IPC_PRIVATE, SIZE_16M, IPC_CREAT | S_IRUSR | S_IWUSR);
481
guarantee(shmid != -1, "shmget failed");
482
struct shmid_ds shm_buf = { 0 };
483
shm_buf.shm_pagesize = SIZE_16M;
484
const bool can_set_pagesize = ::shmctl(shmid, SHM_PAGESIZE, &shm_buf) == 0 ? true : false;
485
const int en = errno;
486
::shmctl(shmid, IPC_RMID, NULL);
487
if (!can_set_pagesize) {
488
if (Verbose) {
489
fprintf(stderr, "Failed to allocate even one misely 16M page. shmctl failed with %d (%s).\n"
490
"Will deactivate 16M support.\n", en, strerror(en));
491
}
492
_can_use_16M_pages = 0;
493
}
494
}
495
496
} // end: check which pages can be used for shared memory
497
498
query_multipage_support_end:
499
500
guarantee(_page_size != -1 &&
501
_stack_page_size != -1 &&
502
_can_use_64K_pages != -1 &&
503
_can_use_16M_pages != -1, "Page sizes not properly initialized");
504
505
if (_can_use_64K_pages) {
506
g_multipage_error = 0;
507
}
508
509
if (Verbose) {
510
fprintf(stderr, "Data page size (C-Heap, bss, etc): %s\n", describe_pagesize(data_page_size));
511
fprintf(stderr, "Thread stack page size (pthread): %s\n", describe_pagesize(_stack_page_size));
512
fprintf(stderr, "Default shared memory page size: %s\n", describe_pagesize(_shm_default_page_size));
513
fprintf(stderr, "Can use 64K pages dynamically with shared meory: %s\n", (_can_use_64K_pages ? "yes" :"no"));
514
fprintf(stderr, "Can use 16M pages dynamically with shared memory: %s\n", (_can_use_16M_pages ? "yes" :"no"));
515
fprintf(stderr, "Multipage error details: %d\n", g_multipage_error);
516
}
517
518
} // end os::Aix::query_multipage_support()
519
520
void os::init_system_properties_values() {
521
522
#define DEFAULT_LIBPATH "/usr/lib:/lib"
523
#define EXTENSIONS_DIR "/lib/ext"
524
#define ENDORSED_DIR "/lib/endorsed"
525
526
// Buffer that fits several sprintfs.
527
// Note that the space for the trailing null is provided
528
// by the nulls included by the sizeof operator.
529
const size_t bufsize =
530
MAX3((size_t)MAXPATHLEN, // For dll_dir & friends.
531
(size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR), // extensions dir
532
(size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
533
char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
534
535
// sysclasspath, java_home, dll_dir
536
{
537
char *pslash;
538
os::jvm_path(buf, bufsize);
539
540
// Found the full path to libjvm.so.
541
// Now cut the path to <java_home>/jre if we can.
542
*(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
543
pslash = strrchr(buf, '/');
544
if (pslash != NULL) {
545
*pslash = '\0'; // Get rid of /{client|server|hotspot}.
546
}
547
Arguments::set_dll_dir(buf);
548
549
if (pslash != NULL) {
550
pslash = strrchr(buf, '/');
551
if (pslash != NULL) {
552
*pslash = '\0'; // Get rid of /<arch>.
553
pslash = strrchr(buf, '/');
554
if (pslash != NULL) {
555
*pslash = '\0'; // Get rid of /lib.
556
}
557
}
558
}
559
Arguments::set_java_home(buf);
560
set_boot_path('/', ':');
561
}
562
563
// Where to look for native libraries.
564
565
// On Aix we get the user setting of LIBPATH.
566
// Eventually, all the library path setting will be done here.
567
// Get the user setting of LIBPATH.
568
const char *v = ::getenv("LIBPATH");
569
const char *v_colon = ":";
570
if (v == NULL) { v = ""; v_colon = ""; }
571
572
// Concatenate user and invariant part of ld_library_path.
573
// That's +1 for the colon and +1 for the trailing '\0'.
574
char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
575
sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
576
Arguments::set_library_path(ld_library_path);
577
FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
578
579
// Extensions directories.
580
sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
581
Arguments::set_ext_dirs(buf);
582
583
// Endorsed standards default directory.
584
sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
585
Arguments::set_endorsed_dirs(buf);
586
587
FREE_C_HEAP_ARRAY(char, buf, mtInternal);
588
589
#undef DEFAULT_LIBPATH
590
#undef EXTENSIONS_DIR
591
#undef ENDORSED_DIR
592
}
593
594
////////////////////////////////////////////////////////////////////////////////
595
// breakpoint support
596
597
void os::breakpoint() {
598
BREAKPOINT;
599
}
600
601
extern "C" void breakpoint() {
602
// use debugger to set breakpoint here
603
}
604
605
////////////////////////////////////////////////////////////////////////////////
606
// signal support
607
608
debug_only(static bool signal_sets_initialized = false);
609
static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
610
611
bool os::Aix::is_sig_ignored(int sig) {
612
struct sigaction oact;
613
sigaction(sig, (struct sigaction*)NULL, &oact);
614
void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
615
: CAST_FROM_FN_PTR(void*, oact.sa_handler);
616
if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
617
return true;
618
} else {
619
return false;
620
}
621
}
622
623
void os::Aix::signal_sets_init() {
624
// Should also have an assertion stating we are still single-threaded.
625
assert(!signal_sets_initialized, "Already initialized");
626
// Fill in signals that are necessarily unblocked for all threads in
627
// the VM. Currently, we unblock the following signals:
628
// SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
629
// by -Xrs (=ReduceSignalUsage));
630
// BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
631
// other threads. The "ReduceSignalUsage" boolean tells us not to alter
632
// the dispositions or masks wrt these signals.
633
// Programs embedding the VM that want to use the above signals for their
634
// own purposes must, at this time, use the "-Xrs" option to prevent
635
// interference with shutdown hooks and BREAK_SIGNAL thread dumping.
636
// (See bug 4345157, and other related bugs).
637
// In reality, though, unblocking these signals is really a nop, since
638
// these signals are not blocked by default.
639
sigemptyset(&unblocked_sigs);
640
sigemptyset(&allowdebug_blocked_sigs);
641
sigaddset(&unblocked_sigs, SIGILL);
642
sigaddset(&unblocked_sigs, SIGSEGV);
643
sigaddset(&unblocked_sigs, SIGBUS);
644
sigaddset(&unblocked_sigs, SIGFPE);
645
sigaddset(&unblocked_sigs, SIGTRAP);
646
sigaddset(&unblocked_sigs, SIGDANGER);
647
sigaddset(&unblocked_sigs, SR_signum);
648
649
if (!ReduceSignalUsage) {
650
if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
651
sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
652
sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
653
}
654
if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
655
sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
656
sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
657
}
658
if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
659
sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
660
sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
661
}
662
}
663
// Fill in signals that are blocked by all but the VM thread.
664
sigemptyset(&vm_sigs);
665
if (!ReduceSignalUsage)
666
sigaddset(&vm_sigs, BREAK_SIGNAL);
667
debug_only(signal_sets_initialized = true);
668
}
669
670
// These are signals that are unblocked while a thread is running Java.
671
// (For some reason, they get blocked by default.)
672
sigset_t* os::Aix::unblocked_signals() {
673
assert(signal_sets_initialized, "Not initialized");
674
return &unblocked_sigs;
675
}
676
677
// These are the signals that are blocked while a (non-VM) thread is
678
// running Java. Only the VM thread handles these signals.
679
sigset_t* os::Aix::vm_signals() {
680
assert(signal_sets_initialized, "Not initialized");
681
return &vm_sigs;
682
}
683
684
// These are signals that are blocked during cond_wait to allow debugger in
685
sigset_t* os::Aix::allowdebug_blocked_signals() {
686
assert(signal_sets_initialized, "Not initialized");
687
return &allowdebug_blocked_sigs;
688
}
689
690
void os::Aix::hotspot_sigmask(Thread* thread) {
691
692
//Save caller's signal mask before setting VM signal mask
693
sigset_t caller_sigmask;
694
pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
695
696
OSThread* osthread = thread->osthread();
697
osthread->set_caller_sigmask(caller_sigmask);
698
699
pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
700
701
if (!ReduceSignalUsage) {
702
if (thread->is_VM_thread()) {
703
// Only the VM thread handles BREAK_SIGNAL ...
704
pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
705
} else {
706
// ... all other threads block BREAK_SIGNAL
707
pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
708
}
709
}
710
}
711
712
// retrieve memory information.
713
// Returns false if something went wrong;
714
// content of pmi undefined in this case.
715
bool os::Aix::get_meminfo(meminfo_t* pmi) {
716
717
assert(pmi, "get_meminfo: invalid parameter");
718
719
memset(pmi, 0, sizeof(meminfo_t));
720
721
if (os::Aix::on_pase()) {
722
723
Unimplemented();
724
return false;
725
726
} else {
727
728
// On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
729
// See:
730
// http://publib.boulder.ibm.com/infocenter/systems/index.jsp
731
// ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
732
// http://publib.boulder.ibm.com/infocenter/systems/index.jsp
733
// ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
734
735
perfstat_memory_total_t psmt;
736
memset (&psmt, '\0', sizeof(psmt));
737
const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
738
if (rc == -1) {
739
fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
740
assert(0, "perfstat_memory_total() failed");
741
return false;
742
}
743
744
assert(rc == 1, "perfstat_memory_total() - weird return code");
745
746
// excerpt from
747
// http://publib.boulder.ibm.com/infocenter/systems/index.jsp
748
// ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
749
// The fields of perfstat_memory_total_t:
750
// u_longlong_t virt_total Total virtual memory (in 4 KB pages).
751
// u_longlong_t real_total Total real memory (in 4 KB pages).
752
// u_longlong_t real_free Free real memory (in 4 KB pages).
753
// u_longlong_t pgsp_total Total paging space (in 4 KB pages).
754
// u_longlong_t pgsp_free Free paging space (in 4 KB pages).
755
756
pmi->virt_total = psmt.virt_total * 4096;
757
pmi->real_total = psmt.real_total * 4096;
758
pmi->real_free = psmt.real_free * 4096;
759
pmi->pgsp_total = psmt.pgsp_total * 4096;
760
pmi->pgsp_free = psmt.pgsp_free * 4096;
761
762
return true;
763
764
}
765
} // end os::Aix::get_meminfo
766
767
// Retrieve global cpu information.
768
// Returns false if something went wrong;
769
// the content of pci is undefined in this case.
770
bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
771
assert(pci, "get_cpuinfo: invalid parameter");
772
memset(pci, 0, sizeof(cpuinfo_t));
773
774
perfstat_cpu_total_t psct;
775
memset (&psct, '\0', sizeof(psct));
776
777
if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
778
fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
779
assert(0, "perfstat_cpu_total() failed");
780
return false;
781
}
782
783
// global cpu information
784
strcpy (pci->description, psct.description);
785
pci->processorHZ = psct.processorHZ;
786
pci->ncpus = psct.ncpus;
787
os::Aix::_logical_cpus = psct.ncpus;
788
for (int i = 0; i < 3; i++) {
789
pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
790
}
791
792
// get the processor version from _system_configuration
793
switch (_system_configuration.version) {
794
case PV_8:
795
strcpy(pci->version, "Power PC 8");
796
break;
797
case PV_7:
798
strcpy(pci->version, "Power PC 7");
799
break;
800
case PV_6_1:
801
strcpy(pci->version, "Power PC 6 DD1.x");
802
break;
803
case PV_6:
804
strcpy(pci->version, "Power PC 6");
805
break;
806
case PV_5:
807
strcpy(pci->version, "Power PC 5");
808
break;
809
case PV_5_2:
810
strcpy(pci->version, "Power PC 5_2");
811
break;
812
case PV_5_3:
813
strcpy(pci->version, "Power PC 5_3");
814
break;
815
case PV_5_Compat:
816
strcpy(pci->version, "PV_5_Compat");
817
break;
818
case PV_6_Compat:
819
strcpy(pci->version, "PV_6_Compat");
820
break;
821
case PV_7_Compat:
822
strcpy(pci->version, "PV_7_Compat");
823
break;
824
case PV_8_Compat:
825
strcpy(pci->version, "PV_8_Compat");
826
break;
827
default:
828
strcpy(pci->version, "unknown");
829
}
830
831
return true;
832
833
} //end os::Aix::get_cpuinfo
834
835
//////////////////////////////////////////////////////////////////////////////
836
// detecting pthread library
837
838
void os::Aix::libpthread_init() {
839
return;
840
}
841
842
//////////////////////////////////////////////////////////////////////////////
843
// create new thread
844
845
// Thread start routine for all newly created threads
846
static void *java_start(Thread *thread) {
847
848
// find out my own stack dimensions
849
{
850
// actually, this should do exactly the same as thread->record_stack_base_and_size...
851
address base = 0;
852
size_t size = 0;
853
query_stack_dimensions(&base, &size);
854
thread->set_stack_base(base);
855
thread->set_stack_size(size);
856
}
857
858
// Do some sanity checks.
859
CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
860
861
// Try to randomize the cache line index of hot stack frames.
862
// This helps when threads of the same stack traces evict each other's
863
// cache lines. The threads can be either from the same JVM instance, or
864
// from different JVM instances. The benefit is especially true for
865
// processors with hyperthreading technology.
866
867
static int counter = 0;
868
int pid = os::current_process_id();
869
alloca(((pid ^ counter++) & 7) * 128);
870
871
ThreadLocalStorage::set_thread(thread);
872
873
OSThread* osthread = thread->osthread();
874
875
// thread_id is kernel thread id (similar to Solaris LWP id)
876
osthread->set_thread_id(os::Aix::gettid());
877
878
// initialize signal mask for this thread
879
os::Aix::hotspot_sigmask(thread);
880
881
// initialize floating point control register
882
os::Aix::init_thread_fpu_state();
883
884
assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
885
886
// call one more level start routine
887
thread->run();
888
889
return 0;
890
}
891
892
bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
893
894
// We want the whole function to be synchronized.
895
ThreadCritical cs;
896
897
assert(thread->osthread() == NULL, "caller responsible");
898
899
// Allocate the OSThread object
900
OSThread* osthread = new OSThread(NULL, NULL);
901
if (osthread == NULL) {
902
return false;
903
}
904
905
// set the correct thread state
906
osthread->set_thread_type(thr_type);
907
908
// Initial state is ALLOCATED but not INITIALIZED
909
osthread->set_state(ALLOCATED);
910
911
thread->set_osthread(osthread);
912
913
// init thread attributes
914
pthread_attr_t attr;
915
pthread_attr_init(&attr);
916
guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
917
918
// Make sure we run in 1:1 kernel-user-thread mode.
919
if (os::Aix::on_aix()) {
920
guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
921
guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
922
} // end: aix
923
924
// Start in suspended state, and in os::thread_start, wake the thread up.
925
guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
926
927
// calculate stack size if it's not specified by caller
928
if (os::Aix::supports_variable_stack_size()) {
929
if (stack_size == 0) {
930
stack_size = os::Aix::default_stack_size(thr_type);
931
932
switch (thr_type) {
933
case os::java_thread:
934
// Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
935
assert(JavaThread::stack_size_at_create() > 0, "this should be set");
936
stack_size = JavaThread::stack_size_at_create();
937
break;
938
case os::compiler_thread:
939
if (CompilerThreadStackSize > 0) {
940
stack_size = (size_t)(CompilerThreadStackSize * K);
941
break;
942
} // else fall through:
943
// use VMThreadStackSize if CompilerThreadStackSize is not defined
944
case os::vm_thread:
945
case os::pgc_thread:
946
case os::cgc_thread:
947
case os::watcher_thread:
948
if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
949
break;
950
}
951
}
952
953
stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
954
pthread_attr_setstacksize(&attr, stack_size);
955
} //else let thread_create() pick the default value (96 K on AIX)
956
957
pthread_t tid;
958
int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
959
960
pthread_attr_destroy(&attr);
961
962
if (ret == 0) {
963
// PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid));
964
} else {
965
if (PrintMiscellaneous && (Verbose || WizardMode)) {
966
perror("pthread_create()");
967
}
968
// Need to clean up stuff we've allocated so far
969
thread->set_osthread(NULL);
970
delete osthread;
971
return false;
972
}
973
974
// Store pthread info into the OSThread
975
osthread->set_pthread_id(tid);
976
977
return true;
978
}
979
980
/////////////////////////////////////////////////////////////////////////////
981
// attach existing thread
982
983
// bootstrap the main thread
984
bool os::create_main_thread(JavaThread* thread) {
985
assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
986
return create_attached_thread(thread);
987
}
988
989
bool os::create_attached_thread(JavaThread* thread) {
990
#ifdef ASSERT
991
thread->verify_not_published();
992
#endif
993
994
// Allocate the OSThread object
995
OSThread* osthread = new OSThread(NULL, NULL);
996
997
if (osthread == NULL) {
998
return false;
999
}
1000
1001
// Store pthread info into the OSThread
1002
osthread->set_thread_id(os::Aix::gettid());
1003
osthread->set_pthread_id(::pthread_self());
1004
1005
// initialize floating point control register
1006
os::Aix::init_thread_fpu_state();
1007
1008
// some sanity checks
1009
CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
1010
1011
// Initial thread state is RUNNABLE
1012
osthread->set_state(RUNNABLE);
1013
1014
thread->set_osthread(osthread);
1015
1016
if (UseNUMA) {
1017
int lgrp_id = os::numa_get_group_id();
1018
if (lgrp_id != -1) {
1019
thread->set_lgrp_id(lgrp_id);
1020
}
1021
}
1022
1023
// initialize signal mask for this thread
1024
// and save the caller's signal mask
1025
os::Aix::hotspot_sigmask(thread);
1026
1027
return true;
1028
}
1029
1030
void os::pd_start_thread(Thread* thread) {
1031
int status = pthread_continue_np(thread->osthread()->pthread_id());
1032
assert(status == 0, "thr_continue failed");
1033
}
1034
1035
// Free OS resources related to the OSThread
1036
void os::free_thread(OSThread* osthread) {
1037
assert(osthread != NULL, "osthread not set");
1038
1039
if (Thread::current()->osthread() == osthread) {
1040
// Restore caller's signal mask
1041
sigset_t sigmask = osthread->caller_sigmask();
1042
pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1043
}
1044
1045
delete osthread;
1046
}
1047
1048
//////////////////////////////////////////////////////////////////////////////
1049
// thread local storage
1050
1051
int os::allocate_thread_local_storage() {
1052
pthread_key_t key;
1053
int rslt = pthread_key_create(&key, NULL);
1054
assert(rslt == 0, "cannot allocate thread local storage");
1055
return (int)key;
1056
}
1057
1058
// Note: This is currently not used by VM, as we don't destroy TLS key
1059
// on VM exit.
1060
void os::free_thread_local_storage(int index) {
1061
int rslt = pthread_key_delete((pthread_key_t)index);
1062
assert(rslt == 0, "invalid index");
1063
}
1064
1065
void os::thread_local_storage_at_put(int index, void* value) {
1066
int rslt = pthread_setspecific((pthread_key_t)index, value);
1067
assert(rslt == 0, "pthread_setspecific failed");
1068
}
1069
1070
extern "C" Thread* get_thread() {
1071
return ThreadLocalStorage::thread();
1072
}
1073
1074
////////////////////////////////////////////////////////////////////////////////
1075
// time support
1076
1077
// Time since start-up in seconds to a fine granularity.
1078
// Used by VMSelfDestructTimer and the MemProfiler.
1079
double os::elapsedTime() {
1080
return (double)(os::elapsed_counter()) * 0.000001;
1081
}
1082
1083
jlong os::elapsed_counter() {
1084
timeval time;
1085
int status = gettimeofday(&time, NULL);
1086
return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1087
}
1088
1089
jlong os::elapsed_frequency() {
1090
return (1000 * 1000);
1091
}
1092
1093
// For now, we say that linux does not support vtime. I have no idea
1094
// whether it can actually be made to (DLD, 9/13/05).
1095
1096
bool os::supports_vtime() { return false; }
1097
bool os::enable_vtime() { return false; }
1098
bool os::vtime_enabled() { return false; }
1099
double os::elapsedVTime() {
1100
// better than nothing, but not much
1101
return elapsedTime();
1102
}
1103
1104
jlong os::javaTimeMillis() {
1105
timeval time;
1106
int status = gettimeofday(&time, NULL);
1107
assert(status != -1, "aix error at gettimeofday()");
1108
return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1109
}
1110
1111
// We need to manually declare mread_real_time,
1112
// because IBM didn't provide a prototype in time.h.
1113
// (they probably only ever tested in C, not C++)
1114
extern "C"
1115
int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
1116
1117
jlong os::javaTimeNanos() {
1118
if (os::Aix::on_pase()) {
1119
Unimplemented();
1120
return 0;
1121
} else {
1122
// On AIX use the precision of processors real time clock
1123
// or time base registers.
1124
timebasestruct_t time;
1125
int rc;
1126
1127
// If the CPU has a time register, it will be used and
1128
// we have to convert to real time first. After convertion we have following data:
1129
// time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1130
// time.tb_low [nanoseconds after the last full second above]
1131
// We better use mread_real_time here instead of read_real_time
1132
// to ensure that we will get a monotonic increasing time.
1133
if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1134
rc = time_base_to_time(&time, TIMEBASE_SZ);
1135
assert(rc != -1, "aix error at time_base_to_time()");
1136
}
1137
return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1138
}
1139
}
1140
1141
void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1142
{
1143
// gettimeofday - based on time in seconds since the Epoch thus does not wrap
1144
info_ptr->max_value = ALL_64_BITS;
1145
1146
// gettimeofday is a real time clock so it skips
1147
info_ptr->may_skip_backward = true;
1148
info_ptr->may_skip_forward = true;
1149
}
1150
1151
info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
1152
}
1153
1154
// Return the real, user, and system times in seconds from an
1155
// arbitrary fixed point in the past.
1156
bool os::getTimesSecs(double* process_real_time,
1157
double* process_user_time,
1158
double* process_system_time) {
1159
struct tms ticks;
1160
clock_t real_ticks = times(&ticks);
1161
1162
if (real_ticks == (clock_t) (-1)) {
1163
return false;
1164
} else {
1165
double ticks_per_second = (double) clock_tics_per_sec;
1166
*process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1167
*process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1168
*process_real_time = ((double) real_ticks) / ticks_per_second;
1169
1170
return true;
1171
}
1172
}
1173
1174
char * os::local_time_string(char *buf, size_t buflen) {
1175
struct tm t;
1176
time_t long_time;
1177
time(&long_time);
1178
localtime_r(&long_time, &t);
1179
jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1180
t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1181
t.tm_hour, t.tm_min, t.tm_sec);
1182
return buf;
1183
}
1184
1185
struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1186
return localtime_r(clock, res);
1187
}
1188
1189
////////////////////////////////////////////////////////////////////////////////
1190
// runtime exit support
1191
1192
// Note: os::shutdown() might be called very early during initialization, or
1193
// called from signal handler. Before adding something to os::shutdown(), make
1194
// sure it is async-safe and can handle partially initialized VM.
1195
void os::shutdown() {
1196
1197
// allow PerfMemory to attempt cleanup of any persistent resources
1198
perfMemory_exit();
1199
1200
// needs to remove object in file system
1201
AttachListener::abort();
1202
1203
// flush buffered output, finish log files
1204
ostream_abort();
1205
1206
// Check for abort hook
1207
abort_hook_t abort_hook = Arguments::abort_hook();
1208
if (abort_hook != NULL) {
1209
abort_hook();
1210
}
1211
}
1212
1213
// Note: os::abort() might be called very early during initialization, or
1214
// called from signal handler. Before adding something to os::abort(), make
1215
// sure it is async-safe and can handle partially initialized VM.
1216
void os::abort(bool dump_core) {
1217
os::shutdown();
1218
if (dump_core) {
1219
#ifndef PRODUCT
1220
fdStream out(defaultStream::output_fd());
1221
out.print_raw("Current thread is ");
1222
char buf[16];
1223
jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1224
out.print_raw_cr(buf);
1225
out.print_raw_cr("Dumping core ...");
1226
#endif
1227
::abort(); // dump core
1228
}
1229
1230
::exit(1);
1231
}
1232
1233
// Die immediately, no exit hook, no abort hook, no cleanup.
1234
void os::die() {
1235
::abort();
1236
}
1237
1238
// This method is a copy of JDK's sysGetLastErrorString
1239
// from src/solaris/hpi/src/system_md.c
1240
1241
size_t os::lasterror(char *buf, size_t len) {
1242
if (errno == 0) return 0;
1243
1244
const char *s = ::strerror(errno);
1245
size_t n = ::strlen(s);
1246
if (n >= len) {
1247
n = len - 1;
1248
}
1249
::strncpy(buf, s, n);
1250
buf[n] = '\0';
1251
return n;
1252
}
1253
1254
intx os::current_thread_id() { return (intx)pthread_self(); }
1255
1256
int os::current_process_id() {
1257
1258
// This implementation returns a unique pid, the pid of the
1259
// launcher thread that starts the vm 'process'.
1260
1261
// Under POSIX, getpid() returns the same pid as the
1262
// launcher thread rather than a unique pid per thread.
1263
// Use gettid() if you want the old pre NPTL behaviour.
1264
1265
// if you are looking for the result of a call to getpid() that
1266
// returns a unique pid for the calling thread, then look at the
1267
// OSThread::thread_id() method in osThread_linux.hpp file
1268
1269
return (int)(_initial_pid ? _initial_pid : getpid());
1270
}
1271
1272
// DLL functions
1273
1274
const char* os::dll_file_extension() { return ".so"; }
1275
1276
// This must be hard coded because it's the system's temporary
1277
// directory not the java application's temp directory, ala java.io.tmpdir.
1278
const char* os::get_temp_directory() { return "/tmp"; }
1279
1280
static bool file_exists(const char* filename) {
1281
struct stat statbuf;
1282
if (filename == NULL || strlen(filename) == 0) {
1283
return false;
1284
}
1285
return os::stat(filename, &statbuf) == 0;
1286
}
1287
1288
bool os::dll_build_name(char* buffer, size_t buflen,
1289
const char* pname, const char* fname) {
1290
bool retval = false;
1291
// Copied from libhpi
1292
const size_t pnamelen = pname ? strlen(pname) : 0;
1293
1294
// Return error on buffer overflow.
1295
if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1296
*buffer = '\0';
1297
return retval;
1298
}
1299
1300
if (pnamelen == 0) {
1301
snprintf(buffer, buflen, "lib%s.so", fname);
1302
retval = true;
1303
} else if (strchr(pname, *os::path_separator()) != NULL) {
1304
int n;
1305
char** pelements = split_path(pname, &n);
1306
for (int i = 0; i < n; i++) {
1307
// Really shouldn't be NULL, but check can't hurt
1308
if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1309
continue; // skip the empty path values
1310
}
1311
snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1312
if (file_exists(buffer)) {
1313
retval = true;
1314
break;
1315
}
1316
}
1317
// release the storage
1318
for (int i = 0; i < n; i++) {
1319
if (pelements[i] != NULL) {
1320
FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1321
}
1322
}
1323
if (pelements != NULL) {
1324
FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1325
}
1326
} else {
1327
snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1328
retval = true;
1329
}
1330
return retval;
1331
}
1332
1333
// Check if addr is inside libjvm.so.
1334
bool os::address_is_in_vm(address addr) {
1335
1336
// Input could be a real pc or a function pointer literal. The latter
1337
// would be a function descriptor residing in the data segment of a module.
1338
1339
const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
1340
if (lib) {
1341
if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1342
return true;
1343
} else {
1344
return false;
1345
}
1346
} else {
1347
lib = LoadedLibraries::find_for_data_address(addr);
1348
if (lib) {
1349
if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1350
return true;
1351
} else {
1352
return false;
1353
}
1354
} else {
1355
return false;
1356
}
1357
}
1358
}
1359
1360
// Resolve an AIX function descriptor literal to a code pointer.
1361
// If the input is a valid code pointer to a text segment of a loaded module,
1362
// it is returned unchanged.
1363
// If the input is a valid AIX function descriptor, it is resolved to the
1364
// code entry point.
1365
// If the input is neither a valid function descriptor nor a valid code pointer,
1366
// NULL is returned.
1367
static address resolve_function_descriptor_to_code_pointer(address p) {
1368
1369
const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(p);
1370
if (lib) {
1371
// its a real code pointer
1372
return p;
1373
} else {
1374
lib = LoadedLibraries::find_for_data_address(p);
1375
if (lib) {
1376
// pointer to data segment, potential function descriptor
1377
address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1378
if (LoadedLibraries::find_for_text_address(code_entry)) {
1379
// Its a function descriptor
1380
return code_entry;
1381
}
1382
}
1383
}
1384
return NULL;
1385
}
1386
1387
bool os::dll_address_to_function_name(address addr, char *buf,
1388
int buflen, int *offset) {
1389
if (offset) {
1390
*offset = -1;
1391
}
1392
// Buf is not optional, but offset is optional.
1393
assert(buf != NULL, "sanity check");
1394
buf[0] = '\0';
1395
1396
// Resolve function ptr literals first.
1397
addr = resolve_function_descriptor_to_code_pointer(addr);
1398
if (!addr) {
1399
return false;
1400
}
1401
1402
// Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
1403
return Decoder::decode(addr, buf, buflen, offset);
1404
}
1405
1406
static int getModuleName(codeptr_t pc, // [in] program counter
1407
char* p_name, size_t namelen, // [out] optional: function name
1408
char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
1409
) {
1410
1411
// initialize output parameters
1412
if (p_name && namelen > 0) {
1413
*p_name = '\0';
1414
}
1415
if (p_errmsg && errmsglen > 0) {
1416
*p_errmsg = '\0';
1417
}
1418
1419
const LoadedLibraryModule* const lib = LoadedLibraries::find_for_text_address((address)pc);
1420
if (lib) {
1421
if (p_name && namelen > 0) {
1422
sprintf(p_name, "%.*s", namelen, lib->get_shortname());
1423
}
1424
return 0;
1425
}
1426
1427
trcVerbose("pc outside any module");
1428
1429
return -1;
1430
}
1431
1432
bool os::dll_address_to_library_name(address addr, char* buf,
1433
int buflen, int* offset) {
1434
if (offset) {
1435
*offset = -1;
1436
}
1437
// Buf is not optional, but offset is optional.
1438
assert(buf != NULL, "sanity check");
1439
buf[0] = '\0';
1440
1441
// Resolve function ptr literals first.
1442
addr = resolve_function_descriptor_to_code_pointer(addr);
1443
if (!addr) {
1444
return false;
1445
}
1446
1447
if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
1448
return true;
1449
}
1450
return false;
1451
}
1452
1453
// Loads .dll/.so and in case of error it checks if .dll/.so was built
1454
// for the same architecture as Hotspot is running on.
1455
void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1456
1457
if (ebuf && ebuflen > 0) {
1458
ebuf[0] = '\0';
1459
ebuf[ebuflen - 1] = '\0';
1460
}
1461
1462
if (!filename || strlen(filename) == 0) {
1463
::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1464
return NULL;
1465
}
1466
1467
// RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1468
void * result= ::dlopen(filename, RTLD_LAZY);
1469
if (result != NULL) {
1470
// Reload dll cache. Don't do this in signal handling.
1471
LoadedLibraries::reload();
1472
return result;
1473
} else {
1474
// error analysis when dlopen fails
1475
const char* const error_report = ::dlerror();
1476
if (error_report && ebuf && ebuflen > 0) {
1477
snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1478
filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1479
}
1480
}
1481
return NULL;
1482
}
1483
1484
// Glibc-2.0 libdl is not MT safe. If you are building with any glibc,
1485
// chances are you might want to run the generated bits against glibc-2.0
1486
// libdl.so, so always use locking for any version of glibc.
1487
void* os::dll_lookup(void* handle, const char* name) {
1488
pthread_mutex_lock(&dl_mutex);
1489
void* res = dlsym(handle, name);
1490
pthread_mutex_unlock(&dl_mutex);
1491
return res;
1492
}
1493
1494
void* os::get_default_process_handle() {
1495
return (void*)::dlopen(NULL, RTLD_LAZY);
1496
}
1497
1498
void os::print_dll_info(outputStream *st) {
1499
st->print_cr("Dynamic libraries:");
1500
LoadedLibraries::print(st);
1501
}
1502
1503
void os::print_os_info(outputStream* st) {
1504
st->print("OS:");
1505
1506
st->print("uname:");
1507
struct utsname name;
1508
uname(&name);
1509
st->print(name.sysname); st->print(" ");
1510
st->print(name.nodename); st->print(" ");
1511
st->print(name.release); st->print(" ");
1512
st->print(name.version); st->print(" ");
1513
st->print(name.machine);
1514
st->cr();
1515
1516
// rlimit
1517
st->print("rlimit:");
1518
struct rlimit rlim;
1519
1520
st->print(" STACK ");
1521
getrlimit(RLIMIT_STACK, &rlim);
1522
if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1523
else st->print("%uk", rlim.rlim_cur >> 10);
1524
1525
st->print(", CORE ");
1526
getrlimit(RLIMIT_CORE, &rlim);
1527
if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1528
else st->print("%uk", rlim.rlim_cur >> 10);
1529
1530
st->print(", NPROC ");
1531
st->print("%d", sysconf(_SC_CHILD_MAX));
1532
1533
st->print(", NOFILE ");
1534
getrlimit(RLIMIT_NOFILE, &rlim);
1535
if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1536
else st->print("%d", rlim.rlim_cur);
1537
1538
st->print(", AS ");
1539
getrlimit(RLIMIT_AS, &rlim);
1540
if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1541
else st->print("%uk", rlim.rlim_cur >> 10);
1542
1543
// Print limits on DATA, because it limits the C-heap.
1544
st->print(", DATA ");
1545
getrlimit(RLIMIT_DATA, &rlim);
1546
if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1547
else st->print("%uk", rlim.rlim_cur >> 10);
1548
st->cr();
1549
1550
// load average
1551
st->print("load average:");
1552
double loadavg[3] = {-1.L, -1.L, -1.L};
1553
os::loadavg(loadavg, 3);
1554
st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1555
st->cr();
1556
}
1557
1558
int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1559
// Not yet implemented.
1560
return 0;
1561
}
1562
1563
void os::print_memory_info(outputStream* st) {
1564
1565
st->print_cr("Memory:");
1566
1567
st->print_cr(" default page size: %s", describe_pagesize(os::vm_page_size()));
1568
st->print_cr(" default stack page size: %s", describe_pagesize(os::vm_page_size()));
1569
st->print_cr(" default shm page size: %s", describe_pagesize(os::Aix::shm_default_page_size()));
1570
st->print_cr(" can use 64K pages dynamically: %s", (os::Aix::can_use_64K_pages() ? "yes" :"no"));
1571
st->print_cr(" can use 16M pages dynamically: %s", (os::Aix::can_use_16M_pages() ? "yes" :"no"));
1572
if (g_multipage_error != 0) {
1573
st->print_cr(" multipage error: %d", g_multipage_error);
1574
}
1575
1576
// print out LDR_CNTRL because it affects the default page sizes
1577
const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1578
st->print_cr(" LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1579
1580
const char* const extshm = ::getenv("EXTSHM");
1581
st->print_cr(" EXTSHM=%s.", extshm ? extshm : "<unset>");
1582
1583
// Call os::Aix::get_meminfo() to retrieve memory statistics.
1584
os::Aix::meminfo_t mi;
1585
if (os::Aix::get_meminfo(&mi)) {
1586
char buffer[256];
1587
if (os::Aix::on_aix()) {
1588
jio_snprintf(buffer, sizeof(buffer),
1589
" physical total : %llu\n"
1590
" physical free : %llu\n"
1591
" swap total : %llu\n"
1592
" swap free : %llu\n",
1593
mi.real_total,
1594
mi.real_free,
1595
mi.pgsp_total,
1596
mi.pgsp_free);
1597
} else {
1598
Unimplemented();
1599
}
1600
st->print_raw(buffer);
1601
} else {
1602
st->print_cr(" (no more information available)");
1603
}
1604
}
1605
1606
void os::pd_print_cpu_info(outputStream* st) {
1607
// cpu
1608
st->print("CPU:");
1609
st->print("total %d", os::processor_count());
1610
// It's not safe to query number of active processors after crash
1611
// st->print("(active %d)", os::active_processor_count());
1612
st->print(" %s", VM_Version::cpu_features());
1613
st->cr();
1614
}
1615
1616
void os::print_siginfo(outputStream* st, void* siginfo) {
1617
// Use common posix version.
1618
os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1619
st->cr();
1620
}
1621
1622
static void print_signal_handler(outputStream* st, int sig,
1623
char* buf, size_t buflen);
1624
1625
void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1626
st->print_cr("Signal Handlers:");
1627
print_signal_handler(st, SIGSEGV, buf, buflen);
1628
print_signal_handler(st, SIGBUS , buf, buflen);
1629
print_signal_handler(st, SIGFPE , buf, buflen);
1630
print_signal_handler(st, SIGPIPE, buf, buflen);
1631
print_signal_handler(st, SIGXFSZ, buf, buflen);
1632
print_signal_handler(st, SIGILL , buf, buflen);
1633
print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
1634
print_signal_handler(st, SR_signum, buf, buflen);
1635
print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1636
print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1637
print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1638
print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1639
print_signal_handler(st, SIGTRAP, buf, buflen);
1640
print_signal_handler(st, SIGDANGER, buf, buflen);
1641
}
1642
1643
static char saved_jvm_path[MAXPATHLEN] = {0};
1644
1645
// Find the full path to the current module, libjvm.so.
1646
void os::jvm_path(char *buf, jint buflen) {
1647
// Error checking.
1648
if (buflen < MAXPATHLEN) {
1649
assert(false, "must use a large-enough buffer");
1650
buf[0] = '\0';
1651
return;
1652
}
1653
// Lazy resolve the path to current module.
1654
if (saved_jvm_path[0] != 0) {
1655
strcpy(buf, saved_jvm_path);
1656
return;
1657
}
1658
1659
Dl_info dlinfo;
1660
int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1661
assert(ret != 0, "cannot locate libjvm");
1662
char* rp = realpath((char *)dlinfo.dli_fname, buf);
1663
assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1664
1665
strcpy(saved_jvm_path, buf);
1666
}
1667
1668
void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1669
// no prefix required, not even "_"
1670
}
1671
1672
void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1673
// no suffix required
1674
}
1675
1676
////////////////////////////////////////////////////////////////////////////////
1677
// sun.misc.Signal support
1678
1679
static volatile jint sigint_count = 0;
1680
1681
static void
1682
UserHandler(int sig, void *siginfo, void *context) {
1683
// 4511530 - sem_post is serialized and handled by the manager thread. When
1684
// the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1685
// don't want to flood the manager thread with sem_post requests.
1686
if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1687
return;
1688
1689
// Ctrl-C is pressed during error reporting, likely because the error
1690
// handler fails to abort. Let VM die immediately.
1691
if (sig == SIGINT && is_error_reported()) {
1692
os::die();
1693
}
1694
1695
os::signal_notify(sig);
1696
}
1697
1698
void* os::user_handler() {
1699
return CAST_FROM_FN_PTR(void*, UserHandler);
1700
}
1701
1702
extern "C" {
1703
typedef void (*sa_handler_t)(int);
1704
typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1705
}
1706
1707
void* os::signal(int signal_number, void* handler) {
1708
struct sigaction sigAct, oldSigAct;
1709
1710
sigfillset(&(sigAct.sa_mask));
1711
1712
// Do not block out synchronous signals in the signal handler.
1713
// Blocking synchronous signals only makes sense if you can really
1714
// be sure that those signals won't happen during signal handling,
1715
// when the blocking applies. Normal signal handlers are lean and
1716
// do not cause signals. But our signal handlers tend to be "risky"
1717
// - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1718
// On AIX, PASE there was a case where a SIGSEGV happened, followed
1719
// by a SIGILL, which was blocked due to the signal mask. The process
1720
// just hung forever. Better to crash from a secondary signal than to hang.
1721
sigdelset(&(sigAct.sa_mask), SIGSEGV);
1722
sigdelset(&(sigAct.sa_mask), SIGBUS);
1723
sigdelset(&(sigAct.sa_mask), SIGILL);
1724
sigdelset(&(sigAct.sa_mask), SIGFPE);
1725
sigdelset(&(sigAct.sa_mask), SIGTRAP);
1726
1727
sigAct.sa_flags = SA_RESTART|SA_SIGINFO;
1728
1729
sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1730
1731
if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1732
// -1 means registration failed
1733
return (void *)-1;
1734
}
1735
1736
return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1737
}
1738
1739
void os::signal_raise(int signal_number) {
1740
::raise(signal_number);
1741
}
1742
1743
//
1744
// The following code is moved from os.cpp for making this
1745
// code platform specific, which it is by its very nature.
1746
//
1747
1748
// Will be modified when max signal is changed to be dynamic
1749
int os::sigexitnum_pd() {
1750
return NSIG;
1751
}
1752
1753
// a counter for each possible signal value
1754
static volatile jint pending_signals[NSIG+1] = { 0 };
1755
1756
// Linux(POSIX) specific hand shaking semaphore.
1757
static sem_t sig_sem;
1758
1759
void os::signal_init_pd() {
1760
// Initialize signal structures
1761
::memset((void*)pending_signals, 0, sizeof(pending_signals));
1762
1763
// Initialize signal semaphore
1764
int rc = ::sem_init(&sig_sem, 0, 0);
1765
guarantee(rc != -1, "sem_init failed");
1766
}
1767
1768
void os::signal_notify(int sig) {
1769
Atomic::inc(&pending_signals[sig]);
1770
::sem_post(&sig_sem);
1771
}
1772
1773
static int check_pending_signals(bool wait) {
1774
Atomic::store(0, &sigint_count);
1775
for (;;) {
1776
for (int i = 0; i < NSIG + 1; i++) {
1777
jint n = pending_signals[i];
1778
if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1779
return i;
1780
}
1781
}
1782
if (!wait) {
1783
return -1;
1784
}
1785
JavaThread *thread = JavaThread::current();
1786
ThreadBlockInVM tbivm(thread);
1787
1788
bool threadIsSuspended;
1789
do {
1790
thread->set_suspend_equivalent();
1791
// cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1792
1793
::sem_wait(&sig_sem);
1794
1795
// were we externally suspended while we were waiting?
1796
threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1797
if (threadIsSuspended) {
1798
//
1799
// The semaphore has been incremented, but while we were waiting
1800
// another thread suspended us. We don't want to continue running
1801
// while suspended because that would surprise the thread that
1802
// suspended us.
1803
//
1804
::sem_post(&sig_sem);
1805
1806
thread->java_suspend_self();
1807
}
1808
} while (threadIsSuspended);
1809
}
1810
}
1811
1812
int os::signal_lookup() {
1813
return check_pending_signals(false);
1814
}
1815
1816
int os::signal_wait() {
1817
return check_pending_signals(true);
1818
}
1819
1820
////////////////////////////////////////////////////////////////////////////////
1821
// Virtual Memory
1822
1823
// AddrRange describes an immutable address range
1824
//
1825
// This is a helper class for the 'shared memory bookkeeping' below.
1826
class AddrRange {
1827
friend class ShmBkBlock;
1828
1829
char* _start;
1830
size_t _size;
1831
1832
public:
1833
1834
AddrRange(char* start, size_t size)
1835
: _start(start), _size(size)
1836
{}
1837
1838
AddrRange(const AddrRange& r)
1839
: _start(r.start()), _size(r.size())
1840
{}
1841
1842
char* start() const { return _start; }
1843
size_t size() const { return _size; }
1844
char* end() const { return _start + _size; }
1845
bool is_empty() const { return _size == 0 ? true : false; }
1846
1847
static AddrRange empty_range() { return AddrRange(NULL, 0); }
1848
1849
bool contains(const char* p) const {
1850
return start() <= p && end() > p;
1851
}
1852
1853
bool contains(const AddrRange& range) const {
1854
return start() <= range.start() && end() >= range.end();
1855
}
1856
1857
bool intersects(const AddrRange& range) const {
1858
return (range.start() <= start() && range.end() > start()) ||
1859
(range.start() < end() && range.end() >= end()) ||
1860
contains(range);
1861
}
1862
1863
bool is_same_range(const AddrRange& range) const {
1864
return start() == range.start() && size() == range.size();
1865
}
1866
1867
// return the closest inside range consisting of whole pages
1868
AddrRange find_closest_aligned_range(size_t pagesize) const {
1869
if (pagesize == 0 || is_empty()) {
1870
return empty_range();
1871
}
1872
char* const from = (char*)align_size_up((intptr_t)_start, pagesize);
1873
char* const to = (char*)align_size_down((intptr_t)end(), pagesize);
1874
if (from > to) {
1875
return empty_range();
1876
}
1877
return AddrRange(from, to - from);
1878
}
1879
};
1880
1881
////////////////////////////////////////////////////////////////////////////
1882
// shared memory bookkeeping
1883
//
1884
// the os::reserve_memory() API and friends hand out different kind of memory, depending
1885
// on need and circumstances. Memory may be allocated with mmap() or with shmget/shmat.
1886
//
1887
// But these memory types have to be treated differently. For example, to uncommit
1888
// mmap-based memory, msync(MS_INVALIDATE) is needed, to uncommit shmat-based memory,
1889
// disclaim64() is needed.
1890
//
1891
// Therefore we need to keep track of the allocated memory segments and their
1892
// properties.
1893
1894
// ShmBkBlock: base class for all blocks in the shared memory bookkeeping
1895
class ShmBkBlock {
1896
1897
ShmBkBlock* _next;
1898
1899
protected:
1900
1901
AddrRange _range;
1902
const size_t _pagesize;
1903
const bool _pinned;
1904
1905
public:
1906
1907
ShmBkBlock(AddrRange range, size_t pagesize, bool pinned)
1908
: _range(range), _pagesize(pagesize), _pinned(pinned) , _next(NULL) {
1909
1910
assert(_pagesize == SIZE_4K || _pagesize == SIZE_64K || _pagesize == SIZE_16M, "invalid page size");
1911
assert(!_range.is_empty(), "invalid range");
1912
}
1913
1914
virtual void print(outputStream* st) const {
1915
st->print("0x%p ... 0x%p (%llu) - %d %s pages - %s",
1916
_range.start(), _range.end(), _range.size(),
1917
_range.size() / _pagesize, describe_pagesize(_pagesize),
1918
_pinned ? "pinned" : "");
1919
}
1920
1921
enum Type { MMAP, SHMAT };
1922
virtual Type getType() = 0;
1923
1924
char* base() const { return _range.start(); }
1925
size_t size() const { return _range.size(); }
1926
1927
void setAddrRange(AddrRange range) {
1928
_range = range;
1929
}
1930
1931
bool containsAddress(const char* p) const {
1932
return _range.contains(p);
1933
}
1934
1935
bool containsRange(const char* p, size_t size) const {
1936
return _range.contains(AddrRange((char*)p, size));
1937
}
1938
1939
bool isSameRange(const char* p, size_t size) const {
1940
return _range.is_same_range(AddrRange((char*)p, size));
1941
}
1942
1943
virtual bool disclaim(char* p, size_t size) = 0;
1944
virtual bool release() = 0;
1945
1946
// blocks live in a list.
1947
ShmBkBlock* next() const { return _next; }
1948
void set_next(ShmBkBlock* blk) { _next = blk; }
1949
1950
}; // end: ShmBkBlock
1951
1952
1953
// ShmBkMappedBlock: describes an block allocated with mmap()
1954
class ShmBkMappedBlock : public ShmBkBlock {
1955
public:
1956
1957
ShmBkMappedBlock(AddrRange range)
1958
: ShmBkBlock(range, SIZE_4K, false) {} // mmap: always 4K, never pinned
1959
1960
void print(outputStream* st) const {
1961
ShmBkBlock::print(st);
1962
st->print_cr(" - mmap'ed");
1963
}
1964
1965
Type getType() {
1966
return MMAP;
1967
}
1968
1969
bool disclaim(char* p, size_t size) {
1970
1971
AddrRange r(p, size);
1972
1973
guarantee(_range.contains(r), "invalid disclaim");
1974
1975
// only disclaim whole ranges.
1976
const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
1977
if (r2.is_empty()) {
1978
return true;
1979
}
1980
1981
const int rc = ::msync(r2.start(), r2.size(), MS_INVALIDATE);
1982
1983
if (rc != 0) {
1984
warning("msync(0x%p, %llu, MS_INVALIDATE) failed (%d)\n", r2.start(), r2.size(), errno);
1985
}
1986
1987
return rc == 0 ? true : false;
1988
}
1989
1990
bool release() {
1991
// mmap'ed blocks are released using munmap
1992
if (::munmap(_range.start(), _range.size()) != 0) {
1993
warning("munmap(0x%p, %llu) failed (%d)\n", _range.start(), _range.size(), errno);
1994
return false;
1995
}
1996
return true;
1997
}
1998
}; // end: ShmBkMappedBlock
1999
2000
// ShmBkShmatedBlock: describes an block allocated with shmget/shmat()
2001
class ShmBkShmatedBlock : public ShmBkBlock {
2002
public:
2003
2004
ShmBkShmatedBlock(AddrRange range, size_t pagesize, bool pinned)
2005
: ShmBkBlock(range, pagesize, pinned) {}
2006
2007
void print(outputStream* st) const {
2008
ShmBkBlock::print(st);
2009
st->print_cr(" - shmat'ed");
2010
}
2011
2012
Type getType() {
2013
return SHMAT;
2014
}
2015
2016
bool disclaim(char* p, size_t size) {
2017
2018
AddrRange r(p, size);
2019
2020
if (_pinned) {
2021
return true;
2022
}
2023
2024
// shmat'ed blocks are disclaimed using disclaim64
2025
guarantee(_range.contains(r), "invalid disclaim");
2026
2027
// only disclaim whole ranges.
2028
const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
2029
if (r2.is_empty()) {
2030
return true;
2031
}
2032
2033
const bool rc = my_disclaim64(r2.start(), r2.size());
2034
2035
if (Verbose && !rc) {
2036
warning("failed to disclaim shm %p-%p\n", r2.start(), r2.end());
2037
}
2038
2039
return rc;
2040
}
2041
2042
bool release() {
2043
bool rc = false;
2044
if (::shmdt(_range.start()) != 0) {
2045
warning("shmdt(0x%p) failed (%d)\n", _range.start(), errno);
2046
} else {
2047
rc = true;
2048
}
2049
return rc;
2050
}
2051
2052
}; // end: ShmBkShmatedBlock
2053
2054
static ShmBkBlock* g_shmbk_list = NULL;
2055
static volatile jint g_shmbk_table_lock = 0;
2056
2057
// keep some usage statistics
2058
static struct {
2059
int nodes; // number of nodes in list
2060
size_t bytes; // reserved - not committed - bytes.
2061
int reserves; // how often reserve was called
2062
int lookups; // how often a lookup was made
2063
} g_shmbk_stats = { 0, 0, 0, 0 };
2064
2065
// add information about a shared memory segment to the bookkeeping
2066
static void shmbk_register(ShmBkBlock* p_block) {
2067
guarantee(p_block, "logic error");
2068
p_block->set_next(g_shmbk_list);
2069
g_shmbk_list = p_block;
2070
g_shmbk_stats.reserves ++;
2071
g_shmbk_stats.bytes += p_block->size();
2072
g_shmbk_stats.nodes ++;
2073
}
2074
2075
// remove information about a shared memory segment by its starting address
2076
static void shmbk_unregister(ShmBkBlock* p_block) {
2077
ShmBkBlock* p = g_shmbk_list;
2078
ShmBkBlock* prev = NULL;
2079
while (p) {
2080
if (p == p_block) {
2081
if (prev) {
2082
prev->set_next(p->next());
2083
} else {
2084
g_shmbk_list = p->next();
2085
}
2086
g_shmbk_stats.nodes --;
2087
g_shmbk_stats.bytes -= p->size();
2088
return;
2089
}
2090
prev = p;
2091
p = p->next();
2092
}
2093
assert(false, "should not happen");
2094
}
2095
2096
// given a pointer, return shared memory bookkeeping record for the segment it points into
2097
// using the returned block info must happen under lock protection
2098
static ShmBkBlock* shmbk_find_by_containing_address(const char* addr) {
2099
g_shmbk_stats.lookups ++;
2100
ShmBkBlock* p = g_shmbk_list;
2101
while (p) {
2102
if (p->containsAddress(addr)) {
2103
return p;
2104
}
2105
p = p->next();
2106
}
2107
return NULL;
2108
}
2109
2110
// dump all information about all memory segments allocated with os::reserve_memory()
2111
void shmbk_dump_info() {
2112
tty->print_cr("-- shared mem bookkeeping (alive: %d segments, %llu bytes, "
2113
"total reserves: %d total lookups: %d)",
2114
g_shmbk_stats.nodes, g_shmbk_stats.bytes, g_shmbk_stats.reserves, g_shmbk_stats.lookups);
2115
const ShmBkBlock* p = g_shmbk_list;
2116
int i = 0;
2117
while (p) {
2118
p->print(tty);
2119
p = p->next();
2120
i ++;
2121
}
2122
}
2123
2124
#define LOCK_SHMBK { ThreadCritical _LOCK_SHMBK;
2125
#define UNLOCK_SHMBK }
2126
2127
// End: shared memory bookkeeping
2128
////////////////////////////////////////////////////////////////////////////////////////////////////
2129
2130
int os::vm_page_size() {
2131
// Seems redundant as all get out
2132
assert(os::Aix::page_size() != -1, "must call os::init");
2133
return os::Aix::page_size();
2134
}
2135
2136
// Aix allocates memory by pages.
2137
int os::vm_allocation_granularity() {
2138
assert(os::Aix::page_size() != -1, "must call os::init");
2139
return os::Aix::page_size();
2140
}
2141
2142
int os::Aix::commit_memory_impl(char* addr, size_t size, bool exec) {
2143
2144
// Commit is a noop. There is no explicit commit
2145
// needed on AIX. Memory is committed when touched.
2146
//
2147
// Debug : check address range for validity
2148
#ifdef ASSERT
2149
LOCK_SHMBK
2150
ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2151
if (!block) {
2152
fprintf(stderr, "invalid pointer: " INTPTR_FORMAT "\n", addr);
2153
shmbk_dump_info();
2154
assert(false, "invalid pointer");
2155
return false;
2156
} else if (!block->containsRange(addr, size)) {
2157
fprintf(stderr, "invalid range: " INTPTR_FORMAT " .. " INTPTR_FORMAT "\n", addr, addr + size);
2158
shmbk_dump_info();
2159
assert(false, "invalid range");
2160
return false;
2161
}
2162
UNLOCK_SHMBK
2163
#endif // ASSERT
2164
2165
return 0;
2166
}
2167
2168
bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2169
return os::Aix::commit_memory_impl(addr, size, exec) == 0;
2170
}
2171
2172
void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2173
const char* mesg) {
2174
assert(mesg != NULL, "mesg must be specified");
2175
os::Aix::commit_memory_impl(addr, size, exec);
2176
}
2177
2178
int os::Aix::commit_memory_impl(char* addr, size_t size,
2179
size_t alignment_hint, bool exec) {
2180
return os::Aix::commit_memory_impl(addr, size, exec);
2181
}
2182
2183
bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
2184
bool exec) {
2185
return os::Aix::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
2186
}
2187
2188
void os::pd_commit_memory_or_exit(char* addr, size_t size,
2189
size_t alignment_hint, bool exec,
2190
const char* mesg) {
2191
os::Aix::commit_memory_impl(addr, size, alignment_hint, exec);
2192
}
2193
2194
bool os::pd_uncommit_memory(char* addr, size_t size) {
2195
2196
// Delegate to ShmBkBlock class which knows how to uncommit its memory.
2197
2198
bool rc = false;
2199
LOCK_SHMBK
2200
ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2201
if (!block) {
2202
fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
2203
shmbk_dump_info();
2204
assert(false, "invalid pointer");
2205
return false;
2206
} else if (!block->containsRange(addr, size)) {
2207
fprintf(stderr, "invalid range: 0x%p .. 0x%p.\n", addr, addr + size);
2208
shmbk_dump_info();
2209
assert(false, "invalid range");
2210
return false;
2211
}
2212
rc = block->disclaim(addr, size);
2213
UNLOCK_SHMBK
2214
2215
if (Verbose && !rc) {
2216
warning("failed to disclaim 0x%p .. 0x%p (0x%llX bytes).", addr, addr + size, size);
2217
}
2218
return rc;
2219
}
2220
2221
bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2222
return os::guard_memory(addr, size);
2223
}
2224
2225
bool os::remove_stack_guard_pages(char* addr, size_t size) {
2226
return os::unguard_memory(addr, size);
2227
}
2228
2229
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2230
}
2231
2232
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2233
}
2234
2235
void os::numa_make_global(char *addr, size_t bytes) {
2236
}
2237
2238
void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2239
}
2240
2241
bool os::numa_topology_changed() {
2242
return false;
2243
}
2244
2245
size_t os::numa_get_groups_num() {
2246
return 1;
2247
}
2248
2249
int os::numa_get_group_id() {
2250
return 0;
2251
}
2252
2253
size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2254
if (size > 0) {
2255
ids[0] = 0;
2256
return 1;
2257
}
2258
return 0;
2259
}
2260
2261
bool os::get_page_info(char *start, page_info* info) {
2262
return false;
2263
}
2264
2265
char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2266
return end;
2267
}
2268
2269
// Flags for reserve_shmatted_memory:
2270
#define RESSHM_WISHADDR_OR_FAIL 1
2271
#define RESSHM_TRY_16M_PAGES 2
2272
#define RESSHM_16M_PAGES_OR_FAIL 4
2273
2274
// Result of reserve_shmatted_memory:
2275
struct shmatted_memory_info_t {
2276
char* addr;
2277
size_t pagesize;
2278
bool pinned;
2279
};
2280
2281
// Reserve a section of shmatted memory.
2282
// params:
2283
// bytes [in]: size of memory, in bytes
2284
// requested_addr [in]: wish address.
2285
// NULL = no wish.
2286
// If RESSHM_WISHADDR_OR_FAIL is set in flags and wish address cannot
2287
// be obtained, function will fail. Otherwise wish address is treated as hint and
2288
// another pointer is returned.
2289
// flags [in]: some flags. Valid flags are:
2290
// RESSHM_WISHADDR_OR_FAIL - fail if wish address is given and cannot be obtained.
2291
// RESSHM_TRY_16M_PAGES - try to allocate from 16M page pool
2292
// (requires UseLargePages and Use16MPages)
2293
// RESSHM_16M_PAGES_OR_FAIL - if you cannot allocate from 16M page pool, fail.
2294
// Otherwise any other page size will do.
2295
// p_info [out] : holds information about the created shared memory segment.
2296
static bool reserve_shmatted_memory(size_t bytes, char* requested_addr, int flags, shmatted_memory_info_t* p_info) {
2297
2298
assert(p_info, "parameter error");
2299
2300
// init output struct.
2301
p_info->addr = NULL;
2302
2303
// neither should we be here for EXTSHM=ON.
2304
if (os::Aix::extshm()) {
2305
ShouldNotReachHere();
2306
}
2307
2308
// extract flags. sanity checks.
2309
const bool wishaddr_or_fail =
2310
flags & RESSHM_WISHADDR_OR_FAIL;
2311
const bool try_16M_pages =
2312
flags & RESSHM_TRY_16M_PAGES;
2313
const bool f16M_pages_or_fail =
2314
flags & RESSHM_16M_PAGES_OR_FAIL;
2315
2316
// first check: if a wish address is given and it is mandatory, but not aligned to segment boundary,
2317
// shmat will fail anyway, so save some cycles by failing right away
2318
if (requested_addr && ((uintptr_t)requested_addr % SIZE_256M == 0)) {
2319
if (wishaddr_or_fail) {
2320
return false;
2321
} else {
2322
requested_addr = NULL;
2323
}
2324
}
2325
2326
char* addr = NULL;
2327
2328
// Align size of shm up to the largest possible page size, to avoid errors later on when we try to change
2329
// pagesize dynamically.
2330
const size_t size = align_size_up(bytes, SIZE_16M);
2331
2332
// reserve the shared segment
2333
int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
2334
if (shmid == -1) {
2335
warning("shmget(.., %lld, ..) failed (errno: %d).", size, errno);
2336
return false;
2337
}
2338
2339
// Important note:
2340
// It is very important that we, upon leaving this function, do not leave a shm segment alive.
2341
// We must right after attaching it remove it from the system. System V shm segments are global and
2342
// survive the process.
2343
// So, from here on: Do not assert. Do not return. Always do a "goto cleanup_shm".
2344
2345
// try forcing the page size
2346
size_t pagesize = -1; // unknown so far
2347
2348
if (UseLargePages) {
2349
2350
struct shmid_ds shmbuf;
2351
memset(&shmbuf, 0, sizeof(shmbuf));
2352
2353
// First, try to take from 16M page pool if...
2354
if (os::Aix::can_use_16M_pages() // we can ...
2355
&& Use16MPages // we are not explicitly forbidden to do so (-XX:-Use16MPages)..
2356
&& try_16M_pages) { // caller wants us to.
2357
shmbuf.shm_pagesize = SIZE_16M;
2358
if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
2359
pagesize = SIZE_16M;
2360
} else {
2361
warning("Failed to allocate %d 16M pages. 16M page pool might be exhausted. (shmctl failed with %d)",
2362
size / SIZE_16M, errno);
2363
if (f16M_pages_or_fail) {
2364
goto cleanup_shm;
2365
}
2366
}
2367
}
2368
2369
// Nothing yet? Try setting 64K pages. Note that I never saw this fail, but in theory it might,
2370
// because the 64K page pool may also be exhausted.
2371
if (pagesize == -1) {
2372
shmbuf.shm_pagesize = SIZE_64K;
2373
if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
2374
pagesize = SIZE_64K;
2375
} else {
2376
warning("Failed to allocate %d 64K pages. (shmctl failed with %d)",
2377
size / SIZE_64K, errno);
2378
// here I give up. leave page_size -1 - later, after attaching, we will query the
2379
// real page size of the attached memory. (in theory, it may be something different
2380
// from 4K if LDR_CNTRL SHM_PSIZE is set)
2381
}
2382
}
2383
}
2384
2385
// sanity point
2386
assert(pagesize == -1 || pagesize == SIZE_16M || pagesize == SIZE_64K, "wrong page size");
2387
2388
// Now attach the shared segment.
2389
addr = (char*) shmat(shmid, requested_addr, 0);
2390
if (addr == (char*)-1) {
2391
// How to handle attach failure:
2392
// If it failed for a specific wish address, tolerate this: in that case, if wish address was
2393
// mandatory, fail, if not, retry anywhere.
2394
// If it failed for any other reason, treat that as fatal error.
2395
addr = NULL;
2396
if (requested_addr) {
2397
if (wishaddr_or_fail) {
2398
goto cleanup_shm;
2399
} else {
2400
addr = (char*) shmat(shmid, NULL, 0);
2401
if (addr == (char*)-1) { // fatal
2402
addr = NULL;
2403
warning("shmat failed (errno: %d)", errno);
2404
goto cleanup_shm;
2405
}
2406
}
2407
} else { // fatal
2408
addr = NULL;
2409
warning("shmat failed (errno: %d)", errno);
2410
goto cleanup_shm;
2411
}
2412
}
2413
2414
// sanity point
2415
assert(addr && addr != (char*) -1, "wrong address");
2416
2417
// after successful Attach remove the segment - right away.
2418
if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2419
warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2420
guarantee(false, "failed to remove shared memory segment!");
2421
}
2422
shmid = -1;
2423
2424
// query the real page size. In case setting the page size did not work (see above), the system
2425
// may have given us something other then 4K (LDR_CNTRL)
2426
{
2427
const size_t real_pagesize = os::Aix::query_pagesize(addr);
2428
if (pagesize != -1) {
2429
assert(pagesize == real_pagesize, "unexpected pagesize after shmat");
2430
} else {
2431
pagesize = real_pagesize;
2432
}
2433
}
2434
2435
// Now register the reserved block with internal book keeping.
2436
LOCK_SHMBK
2437
const bool pinned = pagesize >= SIZE_16M ? true : false;
2438
ShmBkShmatedBlock* const p_block = new ShmBkShmatedBlock(AddrRange(addr, size), pagesize, pinned);
2439
assert(p_block, "");
2440
shmbk_register(p_block);
2441
UNLOCK_SHMBK
2442
2443
cleanup_shm:
2444
2445
// if we have not done so yet, remove the shared memory segment. This is very important.
2446
if (shmid != -1) {
2447
if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2448
warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2449
guarantee(false, "failed to remove shared memory segment!");
2450
}
2451
shmid = -1;
2452
}
2453
2454
// trace
2455
if (Verbose && !addr) {
2456
if (requested_addr != NULL) {
2457
warning("failed to shm-allocate 0x%llX bytes at wish address 0x%p.", size, requested_addr);
2458
} else {
2459
warning("failed to shm-allocate 0x%llX bytes at any address.", size);
2460
}
2461
}
2462
2463
// hand info to caller
2464
if (addr) {
2465
p_info->addr = addr;
2466
p_info->pagesize = pagesize;
2467
p_info->pinned = pagesize == SIZE_16M ? true : false;
2468
}
2469
2470
// sanity test:
2471
if (requested_addr && addr && wishaddr_or_fail) {
2472
guarantee(addr == requested_addr, "shmat error");
2473
}
2474
2475
// just one more test to really make sure we have no dangling shm segments.
2476
guarantee(shmid == -1, "dangling shm segments");
2477
2478
return addr ? true : false;
2479
2480
} // end: reserve_shmatted_memory
2481
2482
// Reserve memory using mmap. Behaves the same as reserve_shmatted_memory():
2483
// will return NULL in case of an error.
2484
static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {
2485
2486
// if a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2487
if (requested_addr && ((uintptr_t)requested_addr % os::vm_page_size() != 0)) {
2488
warning("Wish address 0x%p not aligned to page boundary.", requested_addr);
2489
return NULL;
2490
}
2491
2492
const size_t size = align_size_up(bytes, SIZE_4K);
2493
2494
// Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2495
// msync(MS_INVALIDATE) (see os::uncommit_memory)
2496
int flags = MAP_ANONYMOUS | MAP_SHARED;
2497
2498
// MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2499
// it means if wishaddress is given but MAP_FIXED is not set.
2500
//
2501
// Note however that this changes semantics in SPEC1170 mode insofar as MAP_FIXED
2502
// clobbers the address range, which is probably not what the caller wants. That's
2503
// why I assert here (again) that the SPEC1170 compat mode is off.
2504
// If we want to be able to run under SPEC1170, we have to do some porting and
2505
// testing.
2506
if (requested_addr != NULL) {
2507
assert(!os::Aix::xpg_sus_mode(), "SPEC1170 mode not allowed.");
2508
flags |= MAP_FIXED;
2509
}
2510
2511
char* addr = (char*)::mmap(requested_addr, size, PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2512
2513
if (addr == MAP_FAILED) {
2514
// attach failed: tolerate for specific wish addresses. Not being able to attach
2515
// anywhere is a fatal error.
2516
if (requested_addr == NULL) {
2517
// It's ok to fail here if the machine has not enough memory.
2518
warning("mmap(NULL, 0x%llX, ..) failed (%d)", size, errno);
2519
}
2520
addr = NULL;
2521
goto cleanup_mmap;
2522
}
2523
2524
// If we did request a specific address and that address was not available, fail.
2525
if (addr && requested_addr) {
2526
guarantee(addr == requested_addr, "unexpected");
2527
}
2528
2529
// register this mmap'ed segment with book keeping
2530
LOCK_SHMBK
2531
ShmBkMappedBlock* const p_block = new ShmBkMappedBlock(AddrRange(addr, size));
2532
assert(p_block, "");
2533
shmbk_register(p_block);
2534
UNLOCK_SHMBK
2535
2536
cleanup_mmap:
2537
2538
// trace
2539
if (Verbose) {
2540
if (addr) {
2541
fprintf(stderr, "mmap-allocated 0x%p .. 0x%p (0x%llX bytes)\n", addr, addr + bytes, bytes);
2542
}
2543
else {
2544
if (requested_addr != NULL) {
2545
warning("failed to mmap-allocate 0x%llX bytes at wish address 0x%p.", bytes, requested_addr);
2546
} else {
2547
warning("failed to mmap-allocate 0x%llX bytes at any address.", bytes);
2548
}
2549
}
2550
}
2551
2552
return addr;
2553
2554
} // end: reserve_mmaped_memory
2555
2556
// Reserves and attaches a shared memory segment.
2557
// Will assert if a wish address is given and could not be obtained.
2558
char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2559
return os::attempt_reserve_memory_at(bytes, requested_addr);
2560
}
2561
2562
bool os::pd_release_memory(char* addr, size_t size) {
2563
2564
// delegate to ShmBkBlock class which knows how to uncommit its memory.
2565
2566
bool rc = false;
2567
LOCK_SHMBK
2568
ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2569
if (!block) {
2570
fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
2571
shmbk_dump_info();
2572
assert(false, "invalid pointer");
2573
return false;
2574
}
2575
else if (!block->isSameRange(addr, size)) {
2576
if (block->getType() == ShmBkBlock::MMAP) {
2577
// Release only the same range or a the beginning or the end of a range.
2578
if (block->base() == addr && size < block->size()) {
2579
ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base() + size, block->size() - size));
2580
assert(b, "");
2581
shmbk_register(b);
2582
block->setAddrRange(AddrRange(addr, size));
2583
}
2584
else if (addr > block->base() && addr + size == block->base() + block->size()) {
2585
ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base(), block->size() - size));
2586
assert(b, "");
2587
shmbk_register(b);
2588
block->setAddrRange(AddrRange(addr, size));
2589
}
2590
else {
2591
fprintf(stderr, "invalid mmap range: 0x%p .. 0x%p.\n", addr, addr + size);
2592
shmbk_dump_info();
2593
assert(false, "invalid mmap range");
2594
return false;
2595
}
2596
}
2597
else {
2598
// Release only the same range. No partial release allowed.
2599
// Soften the requirement a bit, because the user may think he owns a smaller size
2600
// than the block is due to alignment etc.
2601
if (block->base() != addr || block->size() < size) {
2602
fprintf(stderr, "invalid shmget range: 0x%p .. 0x%p.\n", addr, addr + size);
2603
shmbk_dump_info();
2604
assert(false, "invalid shmget range");
2605
return false;
2606
}
2607
}
2608
}
2609
rc = block->release();
2610
assert(rc, "release failed");
2611
// remove block from bookkeeping
2612
shmbk_unregister(block);
2613
delete block;
2614
UNLOCK_SHMBK
2615
2616
if (!rc) {
2617
warning("failed to released %lu bytes at 0x%p", size, addr);
2618
}
2619
2620
return rc;
2621
}
2622
2623
static bool checked_mprotect(char* addr, size_t size, int prot) {
2624
2625
// Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2626
// not tell me if protection failed when trying to protect an un-protectable range.
2627
//
2628
// This means if the memory was allocated using shmget/shmat, protection wont work
2629
// but mprotect will still return 0:
2630
//
2631
// See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2632
2633
bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2634
2635
if (!rc) {
2636
const char* const s_errno = strerror(errno);
2637
warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2638
return false;
2639
}
2640
2641
// mprotect success check
2642
//
2643
// Mprotect said it changed the protection but can I believe it?
2644
//
2645
// To be sure I need to check the protection afterwards. Try to
2646
// read from protected memory and check whether that causes a segfault.
2647
//
2648
if (!os::Aix::xpg_sus_mode()) {
2649
2650
if (StubRoutines::SafeFetch32_stub()) {
2651
2652
const bool read_protected =
2653
(SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2654
SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2655
2656
if (prot & PROT_READ) {
2657
rc = !read_protected;
2658
} else {
2659
rc = read_protected;
2660
}
2661
}
2662
}
2663
if (!rc) {
2664
assert(false, "mprotect failed.");
2665
}
2666
return rc;
2667
}
2668
2669
// Set protections specified
2670
bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2671
unsigned int p = 0;
2672
switch (prot) {
2673
case MEM_PROT_NONE: p = PROT_NONE; break;
2674
case MEM_PROT_READ: p = PROT_READ; break;
2675
case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break;
2676
case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2677
default:
2678
ShouldNotReachHere();
2679
}
2680
// is_committed is unused.
2681
return checked_mprotect(addr, size, p);
2682
}
2683
2684
bool os::guard_memory(char* addr, size_t size) {
2685
return checked_mprotect(addr, size, PROT_NONE);
2686
}
2687
2688
bool os::unguard_memory(char* addr, size_t size) {
2689
return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2690
}
2691
2692
// Large page support
2693
2694
static size_t _large_page_size = 0;
2695
2696
// Enable large page support if OS allows that.
2697
void os::large_page_init() {
2698
2699
// Note: os::Aix::query_multipage_support must run first.
2700
2701
if (!UseLargePages) {
2702
return;
2703
}
2704
2705
if (!Aix::can_use_64K_pages()) {
2706
assert(!Aix::can_use_16M_pages(), "64K is a precondition for 16M.");
2707
UseLargePages = false;
2708
return;
2709
}
2710
2711
if (!Aix::can_use_16M_pages() && Use16MPages) {
2712
fprintf(stderr, "Cannot use 16M pages. Please ensure that there is a 16M page pool "
2713
" and that the VM runs with CAP_BYPASS_RAC_VMM and CAP_PROPAGATE capabilities.\n");
2714
}
2715
2716
// Do not report 16M page alignment as part of os::_page_sizes if we are
2717
// explicitly forbidden from using 16M pages. Doing so would increase the
2718
// alignment the garbage collector calculates with, slightly increasing
2719
// heap usage. We should only pay for 16M alignment if we really want to
2720
// use 16M pages.
2721
if (Use16MPages && Aix::can_use_16M_pages()) {
2722
_large_page_size = SIZE_16M;
2723
_page_sizes[0] = SIZE_16M;
2724
_page_sizes[1] = SIZE_64K;
2725
_page_sizes[2] = SIZE_4K;
2726
_page_sizes[3] = 0;
2727
} else if (Aix::can_use_64K_pages()) {
2728
_large_page_size = SIZE_64K;
2729
_page_sizes[0] = SIZE_64K;
2730
_page_sizes[1] = SIZE_4K;
2731
_page_sizes[2] = 0;
2732
}
2733
2734
if (Verbose) {
2735
("Default large page size is 0x%llX.", _large_page_size);
2736
}
2737
} // end: os::large_page_init()
2738
2739
char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2740
// "exec" is passed in but not used. Creating the shared image for
2741
// the code cache doesn't have an SHM_X executable permission to check.
2742
Unimplemented();
2743
return 0;
2744
}
2745
2746
bool os::release_memory_special(char* base, size_t bytes) {
2747
// detaching the SHM segment will also delete it, see reserve_memory_special()
2748
Unimplemented();
2749
return false;
2750
}
2751
2752
size_t os::large_page_size() {
2753
return _large_page_size;
2754
}
2755
2756
bool os::can_commit_large_page_memory() {
2757
// Well, sadly we cannot commit anything at all (see comment in
2758
// os::commit_memory) but we claim to so we can make use of large pages
2759
return true;
2760
}
2761
2762
bool os::can_execute_large_page_memory() {
2763
// We can do that
2764
return true;
2765
}
2766
2767
// Reserve memory at an arbitrary address, only if that area is
2768
// available (and not reserved for something else).
2769
char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2770
2771
bool use_mmap = false;
2772
2773
// mmap: smaller graining, no large page support
2774
// shm: large graining (256M), large page support, limited number of shm segments
2775
//
2776
// Prefer mmap wherever we either do not need large page support or have OS limits
2777
2778
if (!UseLargePages || bytes < SIZE_16M) {
2779
use_mmap = true;
2780
}
2781
2782
char* addr = NULL;
2783
if (use_mmap) {
2784
addr = reserve_mmaped_memory(bytes, requested_addr);
2785
} else {
2786
// shmat: wish address is mandatory, and do not try 16M pages here.
2787
shmatted_memory_info_t info;
2788
const int flags = RESSHM_WISHADDR_OR_FAIL;
2789
if (reserve_shmatted_memory(bytes, requested_addr, flags, &info)) {
2790
addr = info.addr;
2791
}
2792
}
2793
2794
return addr;
2795
}
2796
2797
size_t os::read(int fd, void *buf, unsigned int nBytes) {
2798
return ::read(fd, buf, nBytes);
2799
}
2800
2801
size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2802
return ::pread(fd, buf, nBytes, offset);
2803
}
2804
2805
#define NANOSECS_PER_MILLISEC 1000000
2806
2807
int os::sleep(Thread* thread, jlong millis, bool interruptible) {
2808
assert(thread == Thread::current(), "thread consistency check");
2809
2810
// Prevent nasty overflow in deadline calculation
2811
// by handling long sleeps similar to solaris or windows.
2812
const jlong limit = INT_MAX;
2813
int result;
2814
while (millis > limit) {
2815
if ((result = os::sleep(thread, limit, interruptible)) != OS_OK) {
2816
return result;
2817
}
2818
millis -= limit;
2819
}
2820
2821
ParkEvent * const slp = thread->_SleepEvent;
2822
slp->reset();
2823
OrderAccess::fence();
2824
2825
if (interruptible) {
2826
jlong prevtime = javaTimeNanos();
2827
2828
// Prevent precision loss and too long sleeps
2829
jlong deadline = prevtime + millis * NANOSECS_PER_MILLISEC;
2830
2831
for (;;) {
2832
if (os::is_interrupted(thread, true)) {
2833
return OS_INTRPT;
2834
}
2835
2836
jlong newtime = javaTimeNanos();
2837
2838
assert(newtime >= prevtime, "time moving backwards");
2839
// Doing prevtime and newtime in microseconds doesn't help precision,
2840
// and trying to round up to avoid lost milliseconds can result in a
2841
// too-short delay.
2842
millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
2843
2844
if (millis <= 0) {
2845
return OS_OK;
2846
}
2847
2848
// Stop sleeping if we passed the deadline
2849
if (newtime >= deadline) {
2850
return OS_OK;
2851
}
2852
2853
prevtime = newtime;
2854
2855
{
2856
assert(thread->is_Java_thread(), "sanity check");
2857
JavaThread *jt = (JavaThread *) thread;
2858
ThreadBlockInVM tbivm(jt);
2859
OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
2860
2861
jt->set_suspend_equivalent();
2862
2863
slp->park(millis);
2864
2865
// were we externally suspended while we were waiting?
2866
jt->check_and_wait_while_suspended();
2867
}
2868
}
2869
} else {
2870
OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
2871
jlong prevtime = javaTimeNanos();
2872
2873
// Prevent precision loss and too long sleeps
2874
jlong deadline = prevtime + millis * NANOSECS_PER_MILLISEC;
2875
2876
for (;;) {
2877
// It'd be nice to avoid the back-to-back javaTimeNanos() calls on
2878
// the 1st iteration ...
2879
jlong newtime = javaTimeNanos();
2880
2881
if (newtime - prevtime < 0) {
2882
// time moving backwards, should only happen if no monotonic clock
2883
// not a guarantee() because JVM should not abort on kernel/glibc bugs
2884
// - HS14 Commented out as not implemented.
2885
// - TODO Maybe we should implement it?
2886
//assert(!Aix::supports_monotonic_clock(), "time moving backwards");
2887
} else {
2888
millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
2889
}
2890
2891
if (millis <= 0) break;
2892
2893
if (newtime >= deadline) {
2894
break;
2895
}
2896
2897
prevtime = newtime;
2898
slp->park(millis);
2899
}
2900
return OS_OK;
2901
}
2902
}
2903
2904
void os::naked_short_sleep(jlong ms) {
2905
struct timespec req;
2906
2907
assert(ms < 1000, "Un-interruptable sleep, short time use only");
2908
req.tv_sec = 0;
2909
if (ms > 0) {
2910
req.tv_nsec = (ms % 1000) * 1000000;
2911
}
2912
else {
2913
req.tv_nsec = 1;
2914
}
2915
2916
nanosleep(&req, NULL);
2917
2918
return;
2919
}
2920
2921
// Sleep forever; naked call to OS-specific sleep; use with CAUTION
2922
void os::infinite_sleep() {
2923
while (true) { // sleep forever ...
2924
::sleep(100); // ... 100 seconds at a time
2925
}
2926
}
2927
2928
// Used to convert frequent JVM_Yield() to nops
2929
bool os::dont_yield() {
2930
return DontYieldALot;
2931
}
2932
2933
void os::yield() {
2934
sched_yield();
2935
}
2936
2937
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
2938
2939
void os::yield_all(int attempts) {
2940
// Yields to all threads, including threads with lower priorities
2941
// Threads on Linux are all with same priority. The Solaris style
2942
// os::yield_all() with nanosleep(1ms) is not necessary.
2943
sched_yield();
2944
}
2945
2946
// Called from the tight loops to possibly influence time-sharing heuristics
2947
void os::loop_breaker(int attempts) {
2948
os::yield_all(attempts);
2949
}
2950
2951
////////////////////////////////////////////////////////////////////////////////
2952
// thread priority support
2953
2954
// From AIX manpage to pthread_setschedparam
2955
// (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2956
// topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2957
//
2958
// "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2959
// range from 40 to 80, where 40 is the least favored priority and 80
2960
// is the most favored."
2961
//
2962
// (Actually, I doubt this even has an impact on AIX, as we do kernel
2963
// scheduling there; however, this still leaves iSeries.)
2964
//
2965
// We use the same values for AIX and PASE.
2966
int os::java_to_os_priority[CriticalPriority + 1] = {
2967
54, // 0 Entry should never be used
2968
2969
55, // 1 MinPriority
2970
55, // 2
2971
56, // 3
2972
2973
56, // 4
2974
57, // 5 NormPriority
2975
57, // 6
2976
2977
58, // 7
2978
58, // 8
2979
59, // 9 NearMaxPriority
2980
2981
60, // 10 MaxPriority
2982
2983
60 // 11 CriticalPriority
2984
};
2985
2986
OSReturn os::set_native_priority(Thread* thread, int newpri) {
2987
if (!UseThreadPriorities) return OS_OK;
2988
pthread_t thr = thread->osthread()->pthread_id();
2989
int policy = SCHED_OTHER;
2990
struct sched_param param;
2991
param.sched_priority = newpri;
2992
int ret = pthread_setschedparam(thr, policy, &param);
2993
2994
if (ret != 0) {
2995
trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2996
(int)thr, newpri, ret, strerror(ret));
2997
}
2998
return (ret == 0) ? OS_OK : OS_ERR;
2999
}
3000
3001
OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
3002
if (!UseThreadPriorities) {
3003
*priority_ptr = java_to_os_priority[NormPriority];
3004
return OS_OK;
3005
}
3006
pthread_t thr = thread->osthread()->pthread_id();
3007
int policy = SCHED_OTHER;
3008
struct sched_param param;
3009
int ret = pthread_getschedparam(thr, &policy, &param);
3010
*priority_ptr = param.sched_priority;
3011
3012
return (ret == 0) ? OS_OK : OS_ERR;
3013
}
3014
3015
// Hint to the underlying OS that a task switch would not be good.
3016
// Void return because it's a hint and can fail.
3017
void os::hint_no_preempt() {}
3018
3019
////////////////////////////////////////////////////////////////////////////////
3020
// suspend/resume support
3021
3022
// the low-level signal-based suspend/resume support is a remnant from the
3023
// old VM-suspension that used to be for java-suspension, safepoints etc,
3024
// within hotspot. Now there is a single use-case for this:
3025
// - calling get_thread_pc() on the VMThread by the flat-profiler task
3026
// that runs in the watcher thread.
3027
// The remaining code is greatly simplified from the more general suspension
3028
// code that used to be used.
3029
//
3030
// The protocol is quite simple:
3031
// - suspend:
3032
// - sends a signal to the target thread
3033
// - polls the suspend state of the osthread using a yield loop
3034
// - target thread signal handler (SR_handler) sets suspend state
3035
// and blocks in sigsuspend until continued
3036
// - resume:
3037
// - sets target osthread state to continue
3038
// - sends signal to end the sigsuspend loop in the SR_handler
3039
//
3040
// Note that the SR_lock plays no role in this suspend/resume protocol.
3041
//
3042
3043
static void resume_clear_context(OSThread *osthread) {
3044
osthread->set_ucontext(NULL);
3045
osthread->set_siginfo(NULL);
3046
}
3047
3048
static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
3049
osthread->set_ucontext(context);
3050
osthread->set_siginfo(siginfo);
3051
}
3052
3053
//
3054
// Handler function invoked when a thread's execution is suspended or
3055
// resumed. We have to be careful that only async-safe functions are
3056
// called here (Note: most pthread functions are not async safe and
3057
// should be avoided.)
3058
//
3059
// Note: sigwait() is a more natural fit than sigsuspend() from an
3060
// interface point of view, but sigwait() prevents the signal hander
3061
// from being run. libpthread would get very confused by not having
3062
// its signal handlers run and prevents sigwait()'s use with the
3063
// mutex granting granting signal.
3064
//
3065
// Currently only ever called on the VMThread and JavaThreads (PC sampling).
3066
//
3067
static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
3068
// Save and restore errno to avoid confusing native code with EINTR
3069
// after sigsuspend.
3070
int old_errno = errno;
3071
3072
Thread* thread = Thread::current();
3073
OSThread* osthread = thread->osthread();
3074
assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3075
3076
os::SuspendResume::State current = osthread->sr.state();
3077
if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3078
suspend_save_context(osthread, siginfo, context);
3079
3080
// attempt to switch the state, we assume we had a SUSPEND_REQUEST
3081
os::SuspendResume::State state = osthread->sr.suspended();
3082
if (state == os::SuspendResume::SR_SUSPENDED) {
3083
sigset_t suspend_set; // signals for sigsuspend()
3084
3085
// get current set of blocked signals and unblock resume signal
3086
pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
3087
sigdelset(&suspend_set, SR_signum);
3088
3089
// wait here until we are resumed
3090
while (1) {
3091
sigsuspend(&suspend_set);
3092
3093
os::SuspendResume::State result = osthread->sr.running();
3094
if (result == os::SuspendResume::SR_RUNNING) {
3095
break;
3096
}
3097
}
3098
3099
} else if (state == os::SuspendResume::SR_RUNNING) {
3100
// request was cancelled, continue
3101
} else {
3102
ShouldNotReachHere();
3103
}
3104
3105
resume_clear_context(osthread);
3106
} else if (current == os::SuspendResume::SR_RUNNING) {
3107
// request was cancelled, continue
3108
} else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
3109
// ignore
3110
} else {
3111
ShouldNotReachHere();
3112
}
3113
3114
errno = old_errno;
3115
}
3116
3117
static int SR_initialize() {
3118
struct sigaction act;
3119
char *s;
3120
// Get signal number to use for suspend/resume
3121
if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
3122
int sig = ::strtol(s, 0, 10);
3123
if (sig > 0 || sig < NSIG) {
3124
SR_signum = sig;
3125
}
3126
}
3127
3128
assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
3129
"SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
3130
3131
sigemptyset(&SR_sigset);
3132
sigaddset(&SR_sigset, SR_signum);
3133
3134
// Set up signal handler for suspend/resume.
3135
act.sa_flags = SA_RESTART|SA_SIGINFO;
3136
act.sa_handler = (void (*)(int)) SR_handler;
3137
3138
// SR_signum is blocked by default.
3139
// 4528190 - We also need to block pthread restart signal (32 on all
3140
// supported Linux platforms). Note that LinuxThreads need to block
3141
// this signal for all threads to work properly. So we don't have
3142
// to use hard-coded signal number when setting up the mask.
3143
pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
3144
3145
if (sigaction(SR_signum, &act, 0) == -1) {
3146
return -1;
3147
}
3148
3149
// Save signal flag
3150
os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
3151
return 0;
3152
}
3153
3154
static int SR_finalize() {
3155
return 0;
3156
}
3157
3158
static int sr_notify(OSThread* osthread) {
3159
int status = pthread_kill(osthread->pthread_id(), SR_signum);
3160
assert_status(status == 0, status, "pthread_kill");
3161
return status;
3162
}
3163
3164
// "Randomly" selected value for how long we want to spin
3165
// before bailing out on suspending a thread, also how often
3166
// we send a signal to a thread we want to resume
3167
static const int RANDOMLY_LARGE_INTEGER = 1000000;
3168
static const int RANDOMLY_LARGE_INTEGER2 = 100;
3169
3170
// returns true on success and false on error - really an error is fatal
3171
// but this seems the normal response to library errors
3172
static bool do_suspend(OSThread* osthread) {
3173
assert(osthread->sr.is_running(), "thread should be running");
3174
// mark as suspended and send signal
3175
3176
if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3177
// failed to switch, state wasn't running?
3178
ShouldNotReachHere();
3179
return false;
3180
}
3181
3182
if (sr_notify(osthread) != 0) {
3183
// try to cancel, switch to running
3184
3185
os::SuspendResume::State result = osthread->sr.cancel_suspend();
3186
if (result == os::SuspendResume::SR_RUNNING) {
3187
// cancelled
3188
return false;
3189
} else if (result == os::SuspendResume::SR_SUSPENDED) {
3190
// somehow managed to suspend
3191
return true;
3192
} else {
3193
ShouldNotReachHere();
3194
return false;
3195
}
3196
}
3197
3198
// managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3199
3200
for (int n = 0; !osthread->sr.is_suspended(); n++) {
3201
for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
3202
os::yield_all(i);
3203
}
3204
3205
// timeout, try to cancel the request
3206
if (n >= RANDOMLY_LARGE_INTEGER) {
3207
os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3208
if (cancelled == os::SuspendResume::SR_RUNNING) {
3209
return false;
3210
} else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3211
return true;
3212
} else {
3213
ShouldNotReachHere();
3214
return false;
3215
}
3216
}
3217
}
3218
3219
guarantee(osthread->sr.is_suspended(), "Must be suspended");
3220
return true;
3221
}
3222
3223
static void do_resume(OSThread* osthread) {
3224
//assert(osthread->sr.is_suspended(), "thread should be suspended");
3225
3226
if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3227
// failed to switch to WAKEUP_REQUEST
3228
ShouldNotReachHere();
3229
return;
3230
}
3231
3232
while (!osthread->sr.is_running()) {
3233
if (sr_notify(osthread) == 0) {
3234
for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
3235
for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
3236
os::yield_all(i);
3237
}
3238
}
3239
} else {
3240
ShouldNotReachHere();
3241
}
3242
}
3243
3244
guarantee(osthread->sr.is_running(), "Must be running!");
3245
}
3246
3247
////////////////////////////////////////////////////////////////////////////////
3248
// interrupt support
3249
3250
void os::interrupt(Thread* thread) {
3251
assert(Thread::current() == thread || Threads_lock->owned_by_self(),
3252
"possibility of dangling Thread pointer");
3253
3254
OSThread* osthread = thread->osthread();
3255
3256
if (!osthread->interrupted()) {
3257
osthread->set_interrupted(true);
3258
// More than one thread can get here with the same value of osthread,
3259
// resulting in multiple notifications. We do, however, want the store
3260
// to interrupted() to be visible to other threads before we execute unpark().
3261
OrderAccess::fence();
3262
ParkEvent * const slp = thread->_SleepEvent;
3263
if (slp != NULL) slp->unpark();
3264
}
3265
3266
// For JSR166. Unpark even if interrupt status already was set
3267
if (thread->is_Java_thread())
3268
((JavaThread*)thread)->parker()->unpark();
3269
3270
ParkEvent * ev = thread->_ParkEvent;
3271
if (ev != NULL) ev->unpark();
3272
3273
}
3274
3275
bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3276
assert(Thread::current() == thread || Threads_lock->owned_by_self(),
3277
"possibility of dangling Thread pointer");
3278
3279
OSThread* osthread = thread->osthread();
3280
3281
bool interrupted = osthread->interrupted();
3282
3283
if (interrupted && clear_interrupted) {
3284
osthread->set_interrupted(false);
3285
// consider thread->_SleepEvent->reset() ... optional optimization
3286
}
3287
3288
return interrupted;
3289
}
3290
3291
///////////////////////////////////////////////////////////////////////////////////
3292
// signal handling (except suspend/resume)
3293
3294
// This routine may be used by user applications as a "hook" to catch signals.
3295
// The user-defined signal handler must pass unrecognized signals to this
3296
// routine, and if it returns true (non-zero), then the signal handler must
3297
// return immediately. If the flag "abort_if_unrecognized" is true, then this
3298
// routine will never retun false (zero), but instead will execute a VM panic
3299
// routine kill the process.
3300
//
3301
// If this routine returns false, it is OK to call it again. This allows
3302
// the user-defined signal handler to perform checks either before or after
3303
// the VM performs its own checks. Naturally, the user code would be making
3304
// a serious error if it tried to handle an exception (such as a null check
3305
// or breakpoint) that the VM was generating for its own correct operation.
3306
//
3307
// This routine may recognize any of the following kinds of signals:
3308
// SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
3309
// It should be consulted by handlers for any of those signals.
3310
//
3311
// The caller of this routine must pass in the three arguments supplied
3312
// to the function referred to in the "sa_sigaction" (not the "sa_handler")
3313
// field of the structure passed to sigaction(). This routine assumes that
3314
// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3315
//
3316
// Note that the VM will print warnings if it detects conflicting signal
3317
// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3318
//
3319
extern "C" JNIEXPORT int
3320
JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
3321
3322
// Set thread signal mask (for some reason on AIX sigthreadmask() seems
3323
// to be the thing to call; documentation is not terribly clear about whether
3324
// pthread_sigmask also works, and if it does, whether it does the same.
3325
bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
3326
const int rc = ::pthread_sigmask(how, set, oset);
3327
// return value semantics differ slightly for error case:
3328
// pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
3329
// (so, pthread_sigmask is more theadsafe for error handling)
3330
// But success is always 0.
3331
return rc == 0 ? true : false;
3332
}
3333
3334
// Function to unblock all signals which are, according
3335
// to POSIX, typical program error signals. If they happen while being blocked,
3336
// they typically will bring down the process immediately.
3337
bool unblock_program_error_signals() {
3338
sigset_t set;
3339
::sigemptyset(&set);
3340
::sigaddset(&set, SIGILL);
3341
::sigaddset(&set, SIGBUS);
3342
::sigaddset(&set, SIGFPE);
3343
::sigaddset(&set, SIGSEGV);
3344
return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
3345
}
3346
3347
// Renamed from 'signalHandler' to avoid collision with other shared libs.
3348
void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
3349
assert(info != NULL && uc != NULL, "it must be old kernel");
3350
3351
// Never leave program error signals blocked;
3352
// on all our platforms they would bring down the process immediately when
3353
// getting raised while being blocked.
3354
unblock_program_error_signals();
3355
3356
JVM_handle_aix_signal(sig, info, uc, true);
3357
}
3358
3359
// This boolean allows users to forward their own non-matching signals
3360
// to JVM_handle_aix_signal, harmlessly.
3361
bool os::Aix::signal_handlers_are_installed = false;
3362
3363
// For signal-chaining
3364
struct sigaction os::Aix::sigact[MAXSIGNUM];
3365
unsigned int os::Aix::sigs = 0;
3366
bool os::Aix::libjsig_is_loaded = false;
3367
typedef struct sigaction *(*get_signal_t)(int);
3368
get_signal_t os::Aix::get_signal_action = NULL;
3369
3370
struct sigaction* os::Aix::get_chained_signal_action(int sig) {
3371
struct sigaction *actp = NULL;
3372
3373
if (libjsig_is_loaded) {
3374
// Retrieve the old signal handler from libjsig
3375
actp = (*get_signal_action)(sig);
3376
}
3377
if (actp == NULL) {
3378
// Retrieve the preinstalled signal handler from jvm
3379
actp = get_preinstalled_handler(sig);
3380
}
3381
3382
return actp;
3383
}
3384
3385
static bool call_chained_handler(struct sigaction *actp, int sig,
3386
siginfo_t *siginfo, void *context) {
3387
// Call the old signal handler
3388
if (actp->sa_handler == SIG_DFL) {
3389
// It's more reasonable to let jvm treat it as an unexpected exception
3390
// instead of taking the default action.
3391
return false;
3392
} else if (actp->sa_handler != SIG_IGN) {
3393
if ((actp->sa_flags & SA_NODEFER) == 0) {
3394
// automaticlly block the signal
3395
sigaddset(&(actp->sa_mask), sig);
3396
}
3397
3398
sa_handler_t hand = NULL;
3399
sa_sigaction_t sa = NULL;
3400
bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3401
// retrieve the chained handler
3402
if (siginfo_flag_set) {
3403
sa = actp->sa_sigaction;
3404
} else {
3405
hand = actp->sa_handler;
3406
}
3407
3408
if ((actp->sa_flags & SA_RESETHAND) != 0) {
3409
actp->sa_handler = SIG_DFL;
3410
}
3411
3412
// try to honor the signal mask
3413
sigset_t oset;
3414
pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3415
3416
// call into the chained handler
3417
if (siginfo_flag_set) {
3418
(*sa)(sig, siginfo, context);
3419
} else {
3420
(*hand)(sig);
3421
}
3422
3423
// restore the signal mask
3424
pthread_sigmask(SIG_SETMASK, &oset, 0);
3425
}
3426
// Tell jvm's signal handler the signal is taken care of.
3427
return true;
3428
}
3429
3430
bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3431
bool chained = false;
3432
// signal-chaining
3433
if (UseSignalChaining) {
3434
struct sigaction *actp = get_chained_signal_action(sig);
3435
if (actp != NULL) {
3436
chained = call_chained_handler(actp, sig, siginfo, context);
3437
}
3438
}
3439
return chained;
3440
}
3441
3442
struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3443
if ((((unsigned int)1 << sig) & sigs) != 0) {
3444
return &sigact[sig];
3445
}
3446
return NULL;
3447
}
3448
3449
void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3450
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3451
sigact[sig] = oldAct;
3452
sigs |= (unsigned int)1 << sig;
3453
}
3454
3455
// for diagnostic
3456
int os::Aix::sigflags[MAXSIGNUM];
3457
3458
int os::Aix::get_our_sigflags(int sig) {
3459
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3460
return sigflags[sig];
3461
}
3462
3463
void os::Aix::set_our_sigflags(int sig, int flags) {
3464
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3465
sigflags[sig] = flags;
3466
}
3467
3468
void os::Aix::set_signal_handler(int sig, bool set_installed) {
3469
// Check for overwrite.
3470
struct sigaction oldAct;
3471
sigaction(sig, (struct sigaction*)NULL, &oldAct);
3472
3473
void* oldhand = oldAct.sa_sigaction
3474
? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3475
: CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3476
// Renamed 'signalHandler' to avoid collision with other shared libs.
3477
if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3478
oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3479
oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3480
if (AllowUserSignalHandlers || !set_installed) {
3481
// Do not overwrite; user takes responsibility to forward to us.
3482
return;
3483
} else if (UseSignalChaining) {
3484
// save the old handler in jvm
3485
save_preinstalled_handler(sig, oldAct);
3486
// libjsig also interposes the sigaction() call below and saves the
3487
// old sigaction on it own.
3488
} else {
3489
fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
3490
"%#lx for signal %d.", (long)oldhand, sig));
3491
}
3492
}
3493
3494
struct sigaction sigAct;
3495
sigfillset(&(sigAct.sa_mask));
3496
if (!set_installed) {
3497
sigAct.sa_handler = SIG_DFL;
3498
sigAct.sa_flags = SA_RESTART;
3499
} else {
3500
// Renamed 'signalHandler' to avoid collision with other shared libs.
3501
sigAct.sa_sigaction = javaSignalHandler;
3502
sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3503
}
3504
// Save flags, which are set by ours
3505
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3506
sigflags[sig] = sigAct.sa_flags;
3507
3508
int ret = sigaction(sig, &sigAct, &oldAct);
3509
assert(ret == 0, "check");
3510
3511
void* oldhand2 = oldAct.sa_sigaction
3512
? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3513
: CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3514
assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3515
}
3516
3517
// install signal handlers for signals that HotSpot needs to
3518
// handle in order to support Java-level exception handling.
3519
void os::Aix::install_signal_handlers() {
3520
if (!signal_handlers_are_installed) {
3521
signal_handlers_are_installed = true;
3522
3523
// signal-chaining
3524
typedef void (*signal_setting_t)();
3525
signal_setting_t begin_signal_setting = NULL;
3526
signal_setting_t end_signal_setting = NULL;
3527
begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3528
dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3529
if (begin_signal_setting != NULL) {
3530
end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3531
dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3532
get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3533
dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3534
libjsig_is_loaded = true;
3535
assert(UseSignalChaining, "should enable signal-chaining");
3536
}
3537
if (libjsig_is_loaded) {
3538
// Tell libjsig jvm is setting signal handlers
3539
(*begin_signal_setting)();
3540
}
3541
3542
set_signal_handler(SIGSEGV, true);
3543
set_signal_handler(SIGPIPE, true);
3544
set_signal_handler(SIGBUS, true);
3545
set_signal_handler(SIGILL, true);
3546
set_signal_handler(SIGFPE, true);
3547
set_signal_handler(SIGTRAP, true);
3548
set_signal_handler(SIGXFSZ, true);
3549
set_signal_handler(SIGDANGER, true);
3550
3551
if (libjsig_is_loaded) {
3552
// Tell libjsig jvm finishes setting signal handlers.
3553
(*end_signal_setting)();
3554
}
3555
3556
// We don't activate signal checker if libjsig is in place, we trust ourselves
3557
// and if UserSignalHandler is installed all bets are off.
3558
// Log that signal checking is off only if -verbose:jni is specified.
3559
if (CheckJNICalls) {
3560
if (libjsig_is_loaded) {
3561
tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3562
check_signals = false;
3563
}
3564
if (AllowUserSignalHandlers) {
3565
tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3566
check_signals = false;
3567
}
3568
// Need to initialize check_signal_done.
3569
::sigemptyset(&check_signal_done);
3570
}
3571
}
3572
}
3573
3574
static const char* get_signal_handler_name(address handler,
3575
char* buf, int buflen) {
3576
int offset;
3577
bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3578
if (found) {
3579
// skip directory names
3580
const char *p1, *p2;
3581
p1 = buf;
3582
size_t len = strlen(os::file_separator());
3583
while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3584
// The way os::dll_address_to_library_name is implemented on Aix
3585
// right now, it always returns -1 for the offset which is not
3586
// terribly informative.
3587
// Will fix that. For now, omit the offset.
3588
jio_snprintf(buf, buflen, "%s", p1);
3589
} else {
3590
jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3591
}
3592
return buf;
3593
}
3594
3595
static void print_signal_handler(outputStream* st, int sig,
3596
char* buf, size_t buflen) {
3597
struct sigaction sa;
3598
sigaction(sig, NULL, &sa);
3599
3600
st->print("%s: ", os::exception_name(sig, buf, buflen));
3601
3602
address handler = (sa.sa_flags & SA_SIGINFO)
3603
? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3604
: CAST_FROM_FN_PTR(address, sa.sa_handler);
3605
3606
if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3607
st->print("SIG_DFL");
3608
} else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3609
st->print("SIG_IGN");
3610
} else {
3611
st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3612
}
3613
3614
// Print readable mask.
3615
st->print(", sa_mask[0]=");
3616
os::Posix::print_signal_set_short(st, &sa.sa_mask);
3617
3618
address rh = VMError::get_resetted_sighandler(sig);
3619
// May be, handler was resetted by VMError?
3620
if (rh != NULL) {
3621
handler = rh;
3622
sa.sa_flags = VMError::get_resetted_sigflags(sig);
3623
}
3624
3625
// Print textual representation of sa_flags.
3626
st->print(", sa_flags=");
3627
os::Posix::print_sa_flags(st, sa.sa_flags);
3628
3629
// Check: is it our handler?
3630
if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3631
handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3632
// It is our signal handler.
3633
// Check for flags, reset system-used one!
3634
if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3635
st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3636
os::Aix::get_our_sigflags(sig));
3637
}
3638
}
3639
st->cr();
3640
}
3641
3642
#define DO_SIGNAL_CHECK(sig) \
3643
if (!sigismember(&check_signal_done, sig)) \
3644
os::Aix::check_signal_handler(sig)
3645
3646
// This method is a periodic task to check for misbehaving JNI applications
3647
// under CheckJNI, we can add any periodic checks here
3648
3649
void os::run_periodic_checks() {
3650
3651
if (check_signals == false) return;
3652
3653
// SEGV and BUS if overridden could potentially prevent
3654
// generation of hs*.log in the event of a crash, debugging
3655
// such a case can be very challenging, so we absolutely
3656
// check the following for a good measure:
3657
DO_SIGNAL_CHECK(SIGSEGV);
3658
DO_SIGNAL_CHECK(SIGILL);
3659
DO_SIGNAL_CHECK(SIGFPE);
3660
DO_SIGNAL_CHECK(SIGBUS);
3661
DO_SIGNAL_CHECK(SIGPIPE);
3662
DO_SIGNAL_CHECK(SIGXFSZ);
3663
if (UseSIGTRAP) {
3664
DO_SIGNAL_CHECK(SIGTRAP);
3665
}
3666
DO_SIGNAL_CHECK(SIGDANGER);
3667
3668
// ReduceSignalUsage allows the user to override these handlers
3669
// see comments at the very top and jvm_solaris.h
3670
if (!ReduceSignalUsage) {
3671
DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3672
DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3673
DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3674
DO_SIGNAL_CHECK(BREAK_SIGNAL);
3675
}
3676
3677
DO_SIGNAL_CHECK(SR_signum);
3678
DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
3679
}
3680
3681
typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3682
3683
static os_sigaction_t os_sigaction = NULL;
3684
3685
void os::Aix::check_signal_handler(int sig) {
3686
char buf[O_BUFLEN];
3687
address jvmHandler = NULL;
3688
3689
struct sigaction act;
3690
if (os_sigaction == NULL) {
3691
// only trust the default sigaction, in case it has been interposed
3692
os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3693
if (os_sigaction == NULL) return;
3694
}
3695
3696
os_sigaction(sig, (struct sigaction*)NULL, &act);
3697
3698
address thisHandler = (act.sa_flags & SA_SIGINFO)
3699
? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3700
: CAST_FROM_FN_PTR(address, act.sa_handler);
3701
3702
switch(sig) {
3703
case SIGSEGV:
3704
case SIGBUS:
3705
case SIGFPE:
3706
case SIGPIPE:
3707
case SIGILL:
3708
case SIGXFSZ:
3709
// Renamed 'signalHandler' to avoid collision with other shared libs.
3710
jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3711
break;
3712
3713
case SHUTDOWN1_SIGNAL:
3714
case SHUTDOWN2_SIGNAL:
3715
case SHUTDOWN3_SIGNAL:
3716
case BREAK_SIGNAL:
3717
jvmHandler = (address)user_handler();
3718
break;
3719
3720
case INTERRUPT_SIGNAL:
3721
jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
3722
break;
3723
3724
default:
3725
if (sig == SR_signum) {
3726
jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3727
} else {
3728
return;
3729
}
3730
break;
3731
}
3732
3733
if (thisHandler != jvmHandler) {
3734
tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3735
tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3736
tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3737
// No need to check this sig any longer
3738
sigaddset(&check_signal_done, sig);
3739
// Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3740
if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3741
tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3742
exception_name(sig, buf, O_BUFLEN));
3743
}
3744
} else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3745
tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3746
tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
3747
tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags);
3748
// No need to check this sig any longer
3749
sigaddset(&check_signal_done, sig);
3750
}
3751
3752
// Dump all the signal
3753
if (sigismember(&check_signal_done, sig)) {
3754
print_signal_handlers(tty, buf, O_BUFLEN);
3755
}
3756
}
3757
3758
extern bool signal_name(int signo, char* buf, size_t len);
3759
3760
const char* os::exception_name(int exception_code, char* buf, size_t size) {
3761
if (0 < exception_code && exception_code <= SIGRTMAX) {
3762
// signal
3763
if (!signal_name(exception_code, buf, size)) {
3764
jio_snprintf(buf, size, "SIG%d", exception_code);
3765
}
3766
return buf;
3767
} else {
3768
return NULL;
3769
}
3770
}
3771
3772
// To install functions for atexit system call
3773
extern "C" {
3774
static void perfMemory_exit_helper() {
3775
perfMemory_exit();
3776
}
3777
}
3778
3779
// This is called _before_ the most of global arguments have been parsed.
3780
void os::init(void) {
3781
// This is basic, we want to know if that ever changes.
3782
// (shared memory boundary is supposed to be a 256M aligned)
3783
assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3784
3785
// First off, we need to know whether we run on AIX or PASE, and
3786
// the OS level we run on.
3787
os::Aix::initialize_os_info();
3788
3789
// Scan environment (SPEC1170 behaviour, etc)
3790
os::Aix::scan_environment();
3791
3792
// Check which pages are supported by AIX.
3793
os::Aix::query_multipage_support();
3794
3795
// Next, we need to initialize libo4 and libperfstat libraries.
3796
if (os::Aix::on_pase()) {
3797
os::Aix::initialize_libo4();
3798
} else {
3799
os::Aix::initialize_libperfstat();
3800
}
3801
3802
// Reset the perfstat information provided by ODM.
3803
if (os::Aix::on_aix()) {
3804
libperfstat::perfstat_reset();
3805
}
3806
3807
// Now initialze basic system properties. Note that for some of the values we
3808
// need libperfstat etc.
3809
os::Aix::initialize_system_info();
3810
3811
// Initialize large page support.
3812
if (UseLargePages) {
3813
os::large_page_init();
3814
if (!UseLargePages) {
3815
// initialize os::_page_sizes
3816
_page_sizes[0] = Aix::page_size();
3817
_page_sizes[1] = 0;
3818
if (Verbose) {
3819
fprintf(stderr, "Large Page initialization failed: setting UseLargePages=0.\n");
3820
}
3821
}
3822
} else {
3823
// initialize os::_page_sizes
3824
_page_sizes[0] = Aix::page_size();
3825
_page_sizes[1] = 0;
3826
}
3827
3828
// debug trace
3829
if (Verbose) {
3830
fprintf(stderr, "os::vm_page_size 0x%llX\n", os::vm_page_size());
3831
fprintf(stderr, "os::large_page_size 0x%llX\n", os::large_page_size());
3832
fprintf(stderr, "os::_page_sizes = ( ");
3833
for (int i = 0; _page_sizes[i]; i ++) {
3834
fprintf(stderr, " %s ", describe_pagesize(_page_sizes[i]));
3835
}
3836
fprintf(stderr, ")\n");
3837
}
3838
3839
_initial_pid = getpid();
3840
3841
clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3842
3843
init_random(1234567);
3844
3845
ThreadCritical::initialize();
3846
3847
// _main_thread points to the thread that created/loaded the JVM.
3848
Aix::_main_thread = pthread_self();
3849
3850
initial_time_count = os::elapsed_counter();
3851
pthread_mutex_init(&dl_mutex, NULL);
3852
}
3853
3854
// This is called _after_ the global arguments have been parsed.
3855
jint os::init_2(void) {
3856
3857
trcVerbose("processor count: %d", os::_processor_count);
3858
trcVerbose("physical memory: %lu", Aix::_physical_memory);
3859
3860
// Initially build up the loaded dll map.
3861
LoadedLibraries::reload();
3862
3863
const int page_size = Aix::page_size();
3864
const int map_size = page_size;
3865
3866
address map_address = (address) MAP_FAILED;
3867
const int prot = PROT_READ;
3868
const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3869
3870
// use optimized addresses for the polling page,
3871
// e.g. map it to a special 32-bit address.
3872
if (OptimizePollingPageLocation) {
3873
// architecture-specific list of address wishes:
3874
address address_wishes[] = {
3875
// AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3876
// PPC64: all address wishes are non-negative 32 bit values where
3877
// the lower 16 bits are all zero. we can load these addresses
3878
// with a single ppc_lis instruction.
3879
(address) 0x30000000, (address) 0x31000000,
3880
(address) 0x32000000, (address) 0x33000000,
3881
(address) 0x40000000, (address) 0x41000000,
3882
(address) 0x42000000, (address) 0x43000000,
3883
(address) 0x50000000, (address) 0x51000000,
3884
(address) 0x52000000, (address) 0x53000000,
3885
(address) 0x60000000, (address) 0x61000000,
3886
(address) 0x62000000, (address) 0x63000000
3887
};
3888
int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3889
3890
// iterate over the list of address wishes:
3891
for (int i=0; i<address_wishes_length; i++) {
3892
// try to map with current address wish.
3893
// AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3894
// fail if the address is already mapped.
3895
map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3896
map_size, prot,
3897
flags | MAP_FIXED,
3898
-1, 0);
3899
if (Verbose) {
3900
fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3901
address_wishes[i], map_address + (ssize_t)page_size);
3902
}
3903
3904
if (map_address + (ssize_t)page_size == address_wishes[i]) {
3905
// map succeeded and map_address is at wished address, exit loop.
3906
break;
3907
}
3908
3909
if (map_address != (address) MAP_FAILED) {
3910
// Map succeeded, but polling_page is not at wished address, unmap and continue.
3911
::munmap(map_address, map_size);
3912
map_address = (address) MAP_FAILED;
3913
}
3914
// map failed, continue loop.
3915
}
3916
} // end OptimizePollingPageLocation
3917
3918
if (map_address == (address) MAP_FAILED) {
3919
map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3920
}
3921
guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3922
os::set_polling_page(map_address);
3923
3924
if (!UseMembar) {
3925
address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3926
guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3927
os::set_memory_serialize_page(mem_serialize_page);
3928
3929
#ifndef PRODUCT
3930
if (Verbose && PrintMiscellaneous)
3931
tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3932
#endif
3933
}
3934
3935
// initialize suspend/resume support - must do this before signal_sets_init()
3936
if (SR_initialize() != 0) {
3937
perror("SR_initialize failed");
3938
return JNI_ERR;
3939
}
3940
3941
Aix::signal_sets_init();
3942
Aix::install_signal_handlers();
3943
3944
// Check minimum allowable stack size for thread creation and to initialize
3945
// the java system classes, including StackOverflowError - depends on page
3946
// size. Add a page for compiler2 recursion in main thread.
3947
// Add in 2*BytesPerWord times page size to account for VM stack during
3948
// class initialization depending on 32 or 64 bit VM.
3949
os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3950
(size_t)(StackYellowPages+StackRedPages+StackShadowPages +
3951
2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::page_size());
3952
3953
size_t threadStackSizeInBytes = ThreadStackSize * K;
3954
if (threadStackSizeInBytes != 0 &&
3955
threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3956
tty->print_cr("\nThe stack size specified is too small, "
3957
"Specify at least %dk",
3958
os::Aix::min_stack_allowed / K);
3959
return JNI_ERR;
3960
}
3961
3962
// Make the stack size a multiple of the page size so that
3963
// the yellow/red zones can be guarded.
3964
// Note that this can be 0, if no default stacksize was set.
3965
JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3966
3967
Aix::libpthread_init();
3968
3969
if (MaxFDLimit) {
3970
// set the number of file descriptors to max. print out error
3971
// if getrlimit/setrlimit fails but continue regardless.
3972
struct rlimit nbr_files;
3973
int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3974
if (status != 0) {
3975
if (PrintMiscellaneous && (Verbose || WizardMode))
3976
perror("os::init_2 getrlimit failed");
3977
} else {
3978
nbr_files.rlim_cur = nbr_files.rlim_max;
3979
status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3980
if (status != 0) {
3981
if (PrintMiscellaneous && (Verbose || WizardMode))
3982
perror("os::init_2 setrlimit failed");
3983
}
3984
}
3985
}
3986
3987
if (PerfAllowAtExitRegistration) {
3988
// only register atexit functions if PerfAllowAtExitRegistration is set.
3989
// atexit functions can be delayed until process exit time, which
3990
// can be problematic for embedded VM situations. Embedded VMs should
3991
// call DestroyJavaVM() to assure that VM resources are released.
3992
3993
// note: perfMemory_exit_helper atexit function may be removed in
3994
// the future if the appropriate cleanup code can be added to the
3995
// VM_Exit VMOperation's doit method.
3996
if (atexit(perfMemory_exit_helper) != 0) {
3997
warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3998
}
3999
}
4000
4001
return JNI_OK;
4002
}
4003
4004
// Mark the polling page as unreadable
4005
void os::make_polling_page_unreadable(void) {
4006
if (!guard_memory((char*)_polling_page, Aix::page_size())) {
4007
fatal("Could not disable polling page");
4008
}
4009
};
4010
4011
// Mark the polling page as readable
4012
void os::make_polling_page_readable(void) {
4013
// Changed according to os_linux.cpp.
4014
if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
4015
fatal(err_msg("Could not enable polling page at " PTR_FORMAT, _polling_page));
4016
}
4017
};
4018
4019
int os::active_processor_count() {
4020
// User has overridden the number of active processors
4021
if (ActiveProcessorCount > 0) {
4022
if (PrintActiveCpus) {
4023
tty->print_cr("active_processor_count: "
4024
"active processor count set by user : %d",
4025
ActiveProcessorCount);
4026
}
4027
return ActiveProcessorCount;
4028
}
4029
4030
int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
4031
assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
4032
return online_cpus;
4033
}
4034
4035
void os::set_native_thread_name(const char *name) {
4036
// Not yet implemented.
4037
return;
4038
}
4039
4040
bool os::distribute_processes(uint length, uint* distribution) {
4041
// Not yet implemented.
4042
return false;
4043
}
4044
4045
bool os::bind_to_processor(uint processor_id) {
4046
// Not yet implemented.
4047
return false;
4048
}
4049
4050
void os::SuspendedThreadTask::internal_do_task() {
4051
if (do_suspend(_thread->osthread())) {
4052
SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
4053
do_task(context);
4054
do_resume(_thread->osthread());
4055
}
4056
}
4057
4058
class PcFetcher : public os::SuspendedThreadTask {
4059
public:
4060
PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
4061
ExtendedPC result();
4062
protected:
4063
void do_task(const os::SuspendedThreadTaskContext& context);
4064
private:
4065
ExtendedPC _epc;
4066
};
4067
4068
ExtendedPC PcFetcher::result() {
4069
guarantee(is_done(), "task is not done yet.");
4070
return _epc;
4071
}
4072
4073
void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
4074
Thread* thread = context.thread();
4075
OSThread* osthread = thread->osthread();
4076
if (osthread->ucontext() != NULL) {
4077
_epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());
4078
} else {
4079
// NULL context is unexpected, double-check this is the VMThread.
4080
guarantee(thread->is_VM_thread(), "can only be called for VMThread");
4081
}
4082
}
4083
4084
// Suspends the target using the signal mechanism and then grabs the PC before
4085
// resuming the target. Used by the flat-profiler only
4086
ExtendedPC os::get_thread_pc(Thread* thread) {
4087
// Make sure that it is called by the watcher for the VMThread.
4088
assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
4089
assert(thread->is_VM_thread(), "Can only be called for VMThread");
4090
4091
PcFetcher fetcher(thread);
4092
fetcher.run();
4093
return fetcher.result();
4094
}
4095
4096
// Not neede on Aix.
4097
// int os::Aix::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) {
4098
// }
4099
4100
////////////////////////////////////////////////////////////////////////////////
4101
// debug support
4102
4103
static address same_page(address x, address y) {
4104
intptr_t page_bits = -os::vm_page_size();
4105
if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
4106
return x;
4107
else if (x > y)
4108
return (address)(intptr_t(y) | ~page_bits) + 1;
4109
else
4110
return (address)(intptr_t(y) & page_bits);
4111
}
4112
4113
bool os::find(address addr, outputStream* st) {
4114
4115
st->print(PTR_FORMAT ": ", addr);
4116
4117
const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
4118
if (lib) {
4119
lib->print(st);
4120
return true;
4121
} else {
4122
lib = LoadedLibraries::find_for_data_address(addr);
4123
if (lib) {
4124
lib->print(st);
4125
return true;
4126
} else {
4127
st->print_cr("(outside any module)");
4128
}
4129
}
4130
4131
return false;
4132
}
4133
4134
////////////////////////////////////////////////////////////////////////////////
4135
// misc
4136
4137
// This does not do anything on Aix. This is basically a hook for being
4138
// able to use structured exception handling (thread-local exception filters)
4139
// on, e.g., Win32.
4140
void
4141
os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
4142
JavaCallArguments* args, Thread* thread) {
4143
f(value, method, args, thread);
4144
}
4145
4146
void os::print_statistics() {
4147
}
4148
4149
int os::message_box(const char* title, const char* message) {
4150
int i;
4151
fdStream err(defaultStream::error_fd());
4152
for (i = 0; i < 78; i++) err.print_raw("=");
4153
err.cr();
4154
err.print_raw_cr(title);
4155
for (i = 0; i < 78; i++) err.print_raw("-");
4156
err.cr();
4157
err.print_raw_cr(message);
4158
for (i = 0; i < 78; i++) err.print_raw("=");
4159
err.cr();
4160
4161
char buf[16];
4162
// Prevent process from exiting upon "read error" without consuming all CPU
4163
while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
4164
4165
return buf[0] == 'y' || buf[0] == 'Y';
4166
}
4167
4168
int os::stat(const char *path, struct stat *sbuf) {
4169
char pathbuf[MAX_PATH];
4170
if (strlen(path) > MAX_PATH - 1) {
4171
errno = ENAMETOOLONG;
4172
return -1;
4173
}
4174
os::native_path(strcpy(pathbuf, path));
4175
return ::stat(pathbuf, sbuf);
4176
}
4177
4178
bool os::check_heap(bool force) {
4179
return true;
4180
}
4181
4182
// int local_vsnprintf(char* buf, size_t count, const char* format, va_list args) {
4183
// return ::vsnprintf(buf, count, format, args);
4184
// }
4185
4186
// Is a (classpath) directory empty?
4187
bool os::dir_is_empty(const char* path) {
4188
DIR *dir = NULL;
4189
struct dirent *ptr;
4190
4191
dir = opendir(path);
4192
if (dir == NULL) return true;
4193
4194
/* Scan the directory */
4195
bool result = true;
4196
while (result && (ptr = readdir(dir)) != NULL) {
4197
if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4198
result = false;
4199
}
4200
}
4201
closedir(dir);
4202
return result;
4203
}
4204
4205
// This code originates from JDK's sysOpen and open64_w
4206
// from src/solaris/hpi/src/system_md.c
4207
4208
#ifndef O_DELETE
4209
#define O_DELETE 0x10000
4210
#endif
4211
4212
// Open a file. Unlink the file immediately after open returns
4213
// if the specified oflag has the O_DELETE flag set.
4214
// O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
4215
4216
int os::open(const char *path, int oflag, int mode) {
4217
4218
if (strlen(path) > MAX_PATH - 1) {
4219
errno = ENAMETOOLONG;
4220
return -1;
4221
}
4222
int fd;
4223
int o_delete = (oflag & O_DELETE);
4224
oflag = oflag & ~O_DELETE;
4225
4226
fd = ::open64(path, oflag, mode);
4227
if (fd == -1) return -1;
4228
4229
// If the open succeeded, the file might still be a directory.
4230
{
4231
struct stat64 buf64;
4232
int ret = ::fstat64(fd, &buf64);
4233
int st_mode = buf64.st_mode;
4234
4235
if (ret != -1) {
4236
if ((st_mode & S_IFMT) == S_IFDIR) {
4237
errno = EISDIR;
4238
::close(fd);
4239
return -1;
4240
}
4241
} else {
4242
::close(fd);
4243
return -1;
4244
}
4245
}
4246
4247
// All file descriptors that are opened in the JVM and not
4248
// specifically destined for a subprocess should have the
4249
// close-on-exec flag set. If we don't set it, then careless 3rd
4250
// party native code might fork and exec without closing all
4251
// appropriate file descriptors (e.g. as we do in closeDescriptors in
4252
// UNIXProcess.c), and this in turn might:
4253
//
4254
// - cause end-of-file to fail to be detected on some file
4255
// descriptors, resulting in mysterious hangs, or
4256
//
4257
// - might cause an fopen in the subprocess to fail on a system
4258
// suffering from bug 1085341.
4259
//
4260
// (Yes, the default setting of the close-on-exec flag is a Unix
4261
// design flaw.)
4262
//
4263
// See:
4264
// 1085341: 32-bit stdio routines should support file descriptors >255
4265
// 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4266
// 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4267
#ifdef FD_CLOEXEC
4268
{
4269
int flags = ::fcntl(fd, F_GETFD);
4270
if (flags != -1)
4271
::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4272
}
4273
#endif
4274
4275
if (o_delete != 0) {
4276
::unlink(path);
4277
}
4278
return fd;
4279
}
4280
4281
// create binary file, rewriting existing file if required
4282
int os::create_binary_file(const char* path, bool rewrite_existing) {
4283
int oflags = O_WRONLY | O_CREAT;
4284
if (!rewrite_existing) {
4285
oflags |= O_EXCL;
4286
}
4287
return ::open64(path, oflags, S_IREAD | S_IWRITE);
4288
}
4289
4290
// return current position of file pointer
4291
jlong os::current_file_offset(int fd) {
4292
return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4293
}
4294
4295
// move file pointer to the specified offset
4296
jlong os::seek_to_file_offset(int fd, jlong offset) {
4297
return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4298
}
4299
4300
// This code originates from JDK's sysAvailable
4301
// from src/solaris/hpi/src/native_threads/src/sys_api_td.c
4302
4303
int os::available(int fd, jlong *bytes) {
4304
jlong cur, end;
4305
int mode;
4306
struct stat64 buf64;
4307
4308
if (::fstat64(fd, &buf64) >= 0) {
4309
mode = buf64.st_mode;
4310
if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4311
// XXX: is the following call interruptible? If so, this might
4312
// need to go through the INTERRUPT_IO() wrapper as for other
4313
// blocking, interruptible calls in this file.
4314
int n;
4315
if (::ioctl(fd, FIONREAD, &n) >= 0) {
4316
*bytes = n;
4317
return 1;
4318
}
4319
}
4320
}
4321
if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4322
return 0;
4323
} else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4324
return 0;
4325
} else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4326
return 0;
4327
}
4328
*bytes = end - cur;
4329
return 1;
4330
}
4331
4332
int os::socket_available(int fd, jint *pbytes) {
4333
// Linux doc says EINTR not returned, unlike Solaris
4334
int ret = ::ioctl(fd, FIONREAD, pbytes);
4335
4336
//%% note ioctl can return 0 when successful, JVM_SocketAvailable
4337
// is expected to return 0 on failure and 1 on success to the jdk.
4338
return (ret < 0) ? 0 : 1;
4339
}
4340
4341
// Map a block of memory.
4342
char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4343
char *addr, size_t bytes, bool read_only,
4344
bool allow_exec) {
4345
Unimplemented();
4346
return NULL;
4347
}
4348
4349
// Remap a block of memory.
4350
char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4351
char *addr, size_t bytes, bool read_only,
4352
bool allow_exec) {
4353
// same as map_memory() on this OS
4354
return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4355
allow_exec);
4356
}
4357
4358
// Unmap a block of memory.
4359
bool os::pd_unmap_memory(char* addr, size_t bytes) {
4360
return munmap(addr, bytes) == 0;
4361
}
4362
4363
// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4364
// are used by JVM M&M and JVMTI to get user+sys or user CPU time
4365
// of a thread.
4366
//
4367
// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4368
// the fast estimate available on the platform.
4369
4370
jlong os::current_thread_cpu_time() {
4371
// return user + sys since the cost is the same
4372
const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
4373
assert(n >= 0, "negative CPU time");
4374
return n;
4375
}
4376
4377
jlong os::thread_cpu_time(Thread* thread) {
4378
// consistent with what current_thread_cpu_time() returns
4379
const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
4380
assert(n >= 0, "negative CPU time");
4381
return n;
4382
}
4383
4384
jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4385
const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4386
assert(n >= 0, "negative CPU time");
4387
return n;
4388
}
4389
4390
static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4391
bool error = false;
4392
4393
jlong sys_time = 0;
4394
jlong user_time = 0;
4395
4396
// Reimplemented using getthrds64().
4397
//
4398
// Works like this:
4399
// For the thread in question, get the kernel thread id. Then get the
4400
// kernel thread statistics using that id.
4401
//
4402
// This only works of course when no pthread scheduling is used,
4403
// i.e. there is a 1:1 relationship to kernel threads.
4404
// On AIX, see AIXTHREAD_SCOPE variable.
4405
4406
pthread_t pthtid = thread->osthread()->pthread_id();
4407
4408
// retrieve kernel thread id for the pthread:
4409
tid64_t tid = 0;
4410
struct __pthrdsinfo pinfo;
4411
// I just love those otherworldly IBM APIs which force me to hand down
4412
// dummy buffers for stuff I dont care for...
4413
char dummy[1];
4414
int dummy_size = sizeof(dummy);
4415
if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4416
dummy, &dummy_size) == 0) {
4417
tid = pinfo.__pi_tid;
4418
} else {
4419
tty->print_cr("pthread_getthrds_np failed.");
4420
error = true;
4421
}
4422
4423
// retrieve kernel timing info for that kernel thread
4424
if (!error) {
4425
struct thrdentry64 thrdentry;
4426
if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4427
sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4428
user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4429
} else {
4430
tty->print_cr("pthread_getthrds_np failed.");
4431
error = true;
4432
}
4433
}
4434
4435
if (p_sys_time) {
4436
*p_sys_time = sys_time;
4437
}
4438
4439
if (p_user_time) {
4440
*p_user_time = user_time;
4441
}
4442
4443
if (error) {
4444
return false;
4445
}
4446
4447
return true;
4448
}
4449
4450
jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4451
jlong sys_time;
4452
jlong user_time;
4453
4454
if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4455
return -1;
4456
}
4457
4458
return user_sys_cpu_time ? sys_time + user_time : user_time;
4459
}
4460
4461
void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4462
info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
4463
info_ptr->may_skip_backward = false; // elapsed time not wall time
4464
info_ptr->may_skip_forward = false; // elapsed time not wall time
4465
info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
4466
}
4467
4468
void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4469
info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
4470
info_ptr->may_skip_backward = false; // elapsed time not wall time
4471
info_ptr->may_skip_forward = false; // elapsed time not wall time
4472
info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
4473
}
4474
4475
bool os::is_thread_cpu_time_supported() {
4476
return true;
4477
}
4478
4479
// System loadavg support. Returns -1 if load average cannot be obtained.
4480
// For now just return the system wide load average (no processor sets).
4481
int os::loadavg(double values[], int nelem) {
4482
4483
// Implemented using libperfstat on AIX.
4484
4485
guarantee(nelem >= 0 && nelem <= 3, "argument error");
4486
guarantee(values, "argument error");
4487
4488
if (os::Aix::on_pase()) {
4489
Unimplemented();
4490
return -1;
4491
} else {
4492
// AIX: use libperfstat
4493
//
4494
// See also:
4495
// http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
4496
// /usr/include/libperfstat.h:
4497
4498
// Use the already AIX version independent get_cpuinfo.
4499
os::Aix::cpuinfo_t ci;
4500
if (os::Aix::get_cpuinfo(&ci)) {
4501
for (int i = 0; i < nelem; i++) {
4502
values[i] = ci.loadavg[i];
4503
}
4504
} else {
4505
return -1;
4506
}
4507
return nelem;
4508
}
4509
}
4510
4511
void os::pause() {
4512
char filename[MAX_PATH];
4513
if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4514
jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4515
} else {
4516
jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4517
}
4518
4519
int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4520
if (fd != -1) {
4521
struct stat buf;
4522
::close(fd);
4523
while (::stat(filename, &buf) == 0) {
4524
(void)::poll(NULL, 0, 100);
4525
}
4526
} else {
4527
jio_fprintf(stderr,
4528
"Could not open pause file '%s', continuing immediately.\n", filename);
4529
}
4530
}
4531
4532
bool os::is_primordial_thread(void) {
4533
if (pthread_self() == (pthread_t)1) {
4534
return true;
4535
} else {
4536
return false;
4537
}
4538
}
4539
4540
// OS recognitions (PASE/AIX, OS level) call this before calling any
4541
// one of Aix::on_pase(), Aix::os_version() static
4542
void os::Aix::initialize_os_info() {
4543
4544
assert(_on_pase == -1 && _os_version == -1, "already called.");
4545
4546
struct utsname uts;
4547
memset(&uts, 0, sizeof(uts));
4548
strcpy(uts.sysname, "?");
4549
if (::uname(&uts) == -1) {
4550
trc("uname failed (%d)", errno);
4551
guarantee(0, "Could not determine whether we run on AIX or PASE");
4552
} else {
4553
trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4554
"node \"%s\" machine \"%s\"\n",
4555
uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4556
const int major = atoi(uts.version);
4557
assert(major > 0, "invalid OS version");
4558
const int minor = atoi(uts.release);
4559
assert(minor > 0, "invalid OS release");
4560
_os_version = (major << 8) | minor;
4561
if (strcmp(uts.sysname, "OS400") == 0) {
4562
Unimplemented();
4563
} else if (strcmp(uts.sysname, "AIX") == 0) {
4564
// We run on AIX. We do not support versions older than AIX 5.3.
4565
_on_pase = 0;
4566
if (_os_version < 0x0503) {
4567
trc("AIX release older than AIX 5.3 not supported.");
4568
assert(false, "AIX release too old.");
4569
} else {
4570
trcVerbose("We run on AIX %d.%d\n", major, minor);
4571
}
4572
} else {
4573
assert(false, "unknown OS");
4574
}
4575
}
4576
4577
guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4578
} // end: os::Aix::initialize_os_info()
4579
4580
// Scan environment for important settings which might effect the VM.
4581
// Trace out settings. Warn about invalid settings and/or correct them.
4582
//
4583
// Must run after os::Aix::initialue_os_info().
4584
void os::Aix::scan_environment() {
4585
4586
char* p;
4587
int rc;
4588
4589
// Warn explicity if EXTSHM=ON is used. That switch changes how
4590
// System V shared memory behaves. One effect is that page size of
4591
// shared memory cannot be change dynamically, effectivly preventing
4592
// large pages from working.
4593
// This switch was needed on AIX 32bit, but on AIX 64bit the general
4594
// recommendation is (in OSS notes) to switch it off.
4595
p = ::getenv("EXTSHM");
4596
if (Verbose) {
4597
fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4598
}
4599
if (p && strcmp(p, "ON") == 0) {
4600
fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4601
_extshm = 1;
4602
} else {
4603
_extshm = 0;
4604
}
4605
4606
// SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4607
// Not tested, not supported.
4608
//
4609
// Note that it might be worth the trouble to test and to require it, if only to
4610
// get useful return codes for mprotect.
4611
//
4612
// Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4613
// exec() ? before loading the libjvm ? ....)
4614
p = ::getenv("XPG_SUS_ENV");
4615
trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4616
if (p && strcmp(p, "ON") == 0) {
4617
_xpg_sus_mode = 1;
4618
trc("Unsupported setting: XPG_SUS_ENV=ON");
4619
// This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4620
// clobber address ranges. If we ever want to support that, we have to do some
4621
// testing first.
4622
guarantee(false, "XPG_SUS_ENV=ON not supported");
4623
} else {
4624
_xpg_sus_mode = 0;
4625
}
4626
4627
// Switch off AIX internal (pthread) guard pages. This has
4628
// immediate effect for any pthread_create calls which follow.
4629
p = ::getenv("AIXTHREAD_GUARDPAGES");
4630
trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4631
rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
4632
guarantee(rc == 0, "");
4633
4634
} // end: os::Aix::scan_environment()
4635
4636
// PASE: initialize the libo4 library (AS400 PASE porting library).
4637
void os::Aix::initialize_libo4() {
4638
Unimplemented();
4639
}
4640
4641
// AIX: initialize the libperfstat library (we load this dynamically
4642
// because it is only available on AIX.
4643
void os::Aix::initialize_libperfstat() {
4644
4645
assert(os::Aix::on_aix(), "AIX only");
4646
4647
if (!libperfstat::init()) {
4648
trc("libperfstat initialization failed.");
4649
assert(false, "libperfstat initialization failed");
4650
} else {
4651
if (Verbose) {
4652
fprintf(stderr, "libperfstat initialized.\n");
4653
}
4654
}
4655
} // end: os::Aix::initialize_libperfstat
4656
4657
/////////////////////////////////////////////////////////////////////////////
4658
// thread stack
4659
4660
// function to query the current stack size using pthread_getthrds_np
4661
//
4662
// ! do not change anything here unless you know what you are doing !
4663
static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4664
4665
// This only works when invoked on a pthread. As we agreed not to use
4666
// primordial threads anyway, I assert here
4667
guarantee(!os::is_primordial_thread(), "not allowed on the primordial thread");
4668
4669
// information about this api can be found (a) in the pthread.h header and
4670
// (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4671
//
4672
// The use of this API to find out the current stack is kind of undefined.
4673
// But after a lot of tries and asking IBM about it, I concluded that it is safe
4674
// enough for cases where I let the pthread library create its stacks. For cases
4675
// where I create an own stack and pass this to pthread_create, it seems not to
4676
// work (the returned stack size in that case is 0).
4677
4678
pthread_t tid = pthread_self();
4679
struct __pthrdsinfo pinfo;
4680
char dummy[1]; // we only need this to satisfy the api and to not get E
4681
int dummy_size = sizeof(dummy);
4682
4683
memset(&pinfo, 0, sizeof(pinfo));
4684
4685
const int rc = pthread_getthrds_np (&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4686
sizeof(pinfo), dummy, &dummy_size);
4687
4688
if (rc != 0) {
4689
fprintf(stderr, "pthread_getthrds_np failed (%d)\n", rc);
4690
guarantee(0, "pthread_getthrds_np failed");
4691
}
4692
4693
guarantee(pinfo.__pi_stackend, "returned stack base invalid");
4694
4695
// the following can happen when invoking pthread_getthrds_np on a pthread running on a user provided stack
4696
// (when handing down a stack to pthread create, see pthread_attr_setstackaddr).
4697
// Not sure what to do here - I feel inclined to forbid this use case completely.
4698
guarantee(pinfo.__pi_stacksize, "returned stack size invalid");
4699
4700
// On AIX, stacks are not necessarily page aligned so round the base and size accordingly
4701
if (p_stack_base) {
4702
(*p_stack_base) = (address) align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size());
4703
}
4704
4705
if (p_stack_size) {
4706
(*p_stack_size) = pinfo.__pi_stacksize - os::Aix::stack_page_size();
4707
}
4708
4709
#ifndef PRODUCT
4710
if (Verbose) {
4711
fprintf(stderr,
4712
"query_stack_dimensions() -> real stack_base=" INTPTR_FORMAT ", real stack_addr=" INTPTR_FORMAT
4713
", real stack_size=" INTPTR_FORMAT
4714
", stack_base=" INTPTR_FORMAT ", stack_size=" INTPTR_FORMAT "\n",
4715
(intptr_t)pinfo.__pi_stackend, (intptr_t)pinfo.__pi_stackaddr, pinfo.__pi_stacksize,
4716
(intptr_t)align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size()),
4717
pinfo.__pi_stacksize - os::Aix::stack_page_size());
4718
}
4719
#endif
4720
4721
} // end query_stack_dimensions
4722
4723
// get the current stack base from the OS (actually, the pthread library)
4724
address os::current_stack_base() {
4725
address p;
4726
query_stack_dimensions(&p, 0);
4727
return p;
4728
}
4729
4730
// get the current stack size from the OS (actually, the pthread library)
4731
size_t os::current_stack_size() {
4732
size_t s;
4733
query_stack_dimensions(0, &s);
4734
return s;
4735
}
4736
4737
// Refer to the comments in os_solaris.cpp park-unpark.
4738
//
4739
// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
4740
// hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
4741
// For specifics regarding the bug see GLIBC BUGID 261237 :
4742
// http://www.mail-archive.com/[email protected]/msg10837.html.
4743
// Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
4744
// will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
4745
// is used. (The simple C test-case provided in the GLIBC bug report manifests the
4746
// hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
4747
// and monitorenter when we're using 1-0 locking. All those operations may result in
4748
// calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
4749
// of libpthread avoids the problem, but isn't practical.
4750
//
4751
// Possible remedies:
4752
//
4753
// 1. Establish a minimum relative wait time. 50 to 100 msecs seems to work.
4754
// This is palliative and probabilistic, however. If the thread is preempted
4755
// between the call to compute_abstime() and pthread_cond_timedwait(), more
4756
// than the minimum period may have passed, and the abstime may be stale (in the
4757
// past) resultin in a hang. Using this technique reduces the odds of a hang
4758
// but the JVM is still vulnerable, particularly on heavily loaded systems.
4759
//
4760
// 2. Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
4761
// of the usual flag-condvar-mutex idiom. The write side of the pipe is set
4762
// NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
4763
// reduces to poll()+read(). This works well, but consumes 2 FDs per extant
4764
// thread.
4765
//
4766
// 3. Embargo pthread_cond_timedwait() and implement a native "chron" thread
4767
// that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
4768
// a timeout request to the chron thread and then blocking via pthread_cond_wait().
4769
// This also works well. In fact it avoids kernel-level scalability impediments
4770
// on certain platforms that don't handle lots of active pthread_cond_timedwait()
4771
// timers in a graceful fashion.
4772
//
4773
// 4. When the abstime value is in the past it appears that control returns
4774
// correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
4775
// Subsequent timedwait/wait calls may hang indefinitely. Given that, we
4776
// can avoid the problem by reinitializing the condvar -- by cond_destroy()
4777
// followed by cond_init() -- after all calls to pthread_cond_timedwait().
4778
// It may be possible to avoid reinitialization by checking the return
4779
// value from pthread_cond_timedwait(). In addition to reinitializing the
4780
// condvar we must establish the invariant that cond_signal() is only called
4781
// within critical sections protected by the adjunct mutex. This prevents
4782
// cond_signal() from "seeing" a condvar that's in the midst of being
4783
// reinitialized or that is corrupt. Sadly, this invariant obviates the
4784
// desirable signal-after-unlock optimization that avoids futile context switching.
4785
//
4786
// I'm also concerned that some versions of NTPL might allocate an auxilliary
4787
// structure when a condvar is used or initialized. cond_destroy() would
4788
// release the helper structure. Our reinitialize-after-timedwait fix
4789
// put excessive stress on malloc/free and locks protecting the c-heap.
4790
//
4791
// We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
4792
// It may be possible to refine (4) by checking the kernel and NTPL verisons
4793
// and only enabling the work-around for vulnerable environments.
4794
4795
// utility to compute the abstime argument to timedwait:
4796
// millis is the relative timeout time
4797
// abstime will be the absolute timeout time
4798
// TODO: replace compute_abstime() with unpackTime()
4799
4800
static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4801
if (millis < 0) millis = 0;
4802
struct timeval now;
4803
int status = gettimeofday(&now, NULL);
4804
assert(status == 0, "gettimeofday");
4805
jlong seconds = millis / 1000;
4806
millis %= 1000;
4807
if (seconds > 50000000) { // see man cond_timedwait(3T)
4808
seconds = 50000000;
4809
}
4810
abstime->tv_sec = now.tv_sec + seconds;
4811
long usec = now.tv_usec + millis * 1000;
4812
if (usec >= 1000000) {
4813
abstime->tv_sec += 1;
4814
usec -= 1000000;
4815
}
4816
abstime->tv_nsec = usec * 1000;
4817
return abstime;
4818
}
4819
4820
// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4821
// Conceptually TryPark() should be equivalent to park(0).
4822
4823
int os::PlatformEvent::TryPark() {
4824
for (;;) {
4825
const int v = _Event;
4826
guarantee ((v == 0) || (v == 1), "invariant");
4827
if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4828
}
4829
}
4830
4831
void os::PlatformEvent::park() { // AKA "down()"
4832
// Invariant: Only the thread associated with the Event/PlatformEvent
4833
// may call park().
4834
// TODO: assert that _Assoc != NULL or _Assoc == Self
4835
int v;
4836
for (;;) {
4837
v = _Event;
4838
if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4839
}
4840
guarantee (v >= 0, "invariant");
4841
if (v == 0) {
4842
// Do this the hard way by blocking ...
4843
int status = pthread_mutex_lock(_mutex);
4844
assert_status(status == 0, status, "mutex_lock");
4845
guarantee (_nParked == 0, "invariant");
4846
++ _nParked;
4847
while (_Event < 0) {
4848
status = pthread_cond_wait(_cond, _mutex);
4849
assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4850
}
4851
-- _nParked;
4852
4853
// In theory we could move the ST of 0 into _Event past the unlock(),
4854
// but then we'd need a MEMBAR after the ST.
4855
_Event = 0;
4856
status = pthread_mutex_unlock(_mutex);
4857
assert_status(status == 0, status, "mutex_unlock");
4858
}
4859
guarantee (_Event >= 0, "invariant");
4860
}
4861
4862
int os::PlatformEvent::park(jlong millis) {
4863
guarantee (_nParked == 0, "invariant");
4864
4865
int v;
4866
for (;;) {
4867
v = _Event;
4868
if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4869
}
4870
guarantee (v >= 0, "invariant");
4871
if (v != 0) return OS_OK;
4872
4873
// We do this the hard way, by blocking the thread.
4874
// Consider enforcing a minimum timeout value.
4875
struct timespec abst;
4876
compute_abstime(&abst, millis);
4877
4878
int ret = OS_TIMEOUT;
4879
int status = pthread_mutex_lock(_mutex);
4880
assert_status(status == 0, status, "mutex_lock");
4881
guarantee (_nParked == 0, "invariant");
4882
++_nParked;
4883
4884
// Object.wait(timo) will return because of
4885
// (a) notification
4886
// (b) timeout
4887
// (c) thread.interrupt
4888
//
4889
// Thread.interrupt and object.notify{All} both call Event::set.
4890
// That is, we treat thread.interrupt as a special case of notification.
4891
// The underlying Solaris implementation, cond_timedwait, admits
4892
// spurious/premature wakeups, but the JLS/JVM spec prevents the
4893
// JVM from making those visible to Java code. As such, we must
4894
// filter out spurious wakeups. We assume all ETIME returns are valid.
4895
//
4896
// TODO: properly differentiate simultaneous notify+interrupt.
4897
// In that case, we should propagate the notify to another waiter.
4898
4899
while (_Event < 0) {
4900
status = pthread_cond_timedwait(_cond, _mutex, &abst);
4901
assert_status(status == 0 || status == ETIMEDOUT,
4902
status, "cond_timedwait");
4903
if (!FilterSpuriousWakeups) break; // previous semantics
4904
if (status == ETIMEDOUT) break;
4905
// We consume and ignore EINTR and spurious wakeups.
4906
}
4907
--_nParked;
4908
if (_Event >= 0) {
4909
ret = OS_OK;
4910
}
4911
_Event = 0;
4912
status = pthread_mutex_unlock(_mutex);
4913
assert_status(status == 0, status, "mutex_unlock");
4914
assert (_nParked == 0, "invariant");
4915
return ret;
4916
}
4917
4918
void os::PlatformEvent::unpark() {
4919
int v, AnyWaiters;
4920
for (;;) {
4921
v = _Event;
4922
if (v > 0) {
4923
// The LD of _Event could have reordered or be satisfied
4924
// by a read-aside from this processor's write buffer.
4925
// To avoid problems execute a barrier and then
4926
// ratify the value.
4927
OrderAccess::fence();
4928
if (_Event == v) return;
4929
continue;
4930
}
4931
if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4932
}
4933
if (v < 0) {
4934
// Wait for the thread associated with the event to vacate
4935
int status = pthread_mutex_lock(_mutex);
4936
assert_status(status == 0, status, "mutex_lock");
4937
AnyWaiters = _nParked;
4938
4939
if (AnyWaiters != 0) {
4940
// We intentional signal *after* dropping the lock
4941
// to avoid a common class of futile wakeups.
4942
status = pthread_cond_signal(_cond);
4943
assert_status(status == 0, status, "cond_signal");
4944
}
4945
// Mutex should be locked for pthread_cond_signal(_cond).
4946
status = pthread_mutex_unlock(_mutex);
4947
assert_status(status == 0, status, "mutex_unlock");
4948
}
4949
4950
// Note that we signal() _after dropping the lock for "immortal" Events.
4951
// This is safe and avoids a common class of futile wakeups. In rare
4952
// circumstances this can cause a thread to return prematurely from
4953
// cond_{timed}wait() but the spurious wakeup is benign and the victim will
4954
// simply re-test the condition and re-park itself.
4955
}
4956
4957
4958
// JSR166
4959
// -------------------------------------------------------
4960
4961
//
4962
// The solaris and linux implementations of park/unpark are fairly
4963
// conservative for now, but can be improved. They currently use a
4964
// mutex/condvar pair, plus a a count.
4965
// Park decrements count if > 0, else does a condvar wait. Unpark
4966
// sets count to 1 and signals condvar. Only one thread ever waits
4967
// on the condvar. Contention seen when trying to park implies that someone
4968
// is unparking you, so don't wait. And spurious returns are fine, so there
4969
// is no need to track notifications.
4970
//
4971
4972
#define MAX_SECS 100000000
4973
//
4974
// This code is common to linux and solaris and will be moved to a
4975
// common place in dolphin.
4976
//
4977
// The passed in time value is either a relative time in nanoseconds
4978
// or an absolute time in milliseconds. Either way it has to be unpacked
4979
// into suitable seconds and nanoseconds components and stored in the
4980
// given timespec structure.
4981
// Given time is a 64-bit value and the time_t used in the timespec is only
4982
// a signed-32-bit value (except on 64-bit Linux) we have to watch for
4983
// overflow if times way in the future are given. Further on Solaris versions
4984
// prior to 10 there is a restriction (see cond_timedwait) that the specified
4985
// number of seconds, in abstime, is less than current_time + 100,000,000.
4986
// As it will be 28 years before "now + 100000000" will overflow we can
4987
// ignore overflow and just impose a hard-limit on seconds using the value
4988
// of "now + 100,000,000". This places a limit on the timeout of about 3.17
4989
// years from "now".
4990
//
4991
4992
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4993
assert (time > 0, "convertTime");
4994
4995
struct timeval now;
4996
int status = gettimeofday(&now, NULL);
4997
assert(status == 0, "gettimeofday");
4998
4999
time_t max_secs = now.tv_sec + MAX_SECS;
5000
5001
if (isAbsolute) {
5002
jlong secs = time / 1000;
5003
if (secs > max_secs) {
5004
absTime->tv_sec = max_secs;
5005
}
5006
else {
5007
absTime->tv_sec = secs;
5008
}
5009
absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
5010
}
5011
else {
5012
jlong secs = time / NANOSECS_PER_SEC;
5013
if (secs >= MAX_SECS) {
5014
absTime->tv_sec = max_secs;
5015
absTime->tv_nsec = 0;
5016
}
5017
else {
5018
absTime->tv_sec = now.tv_sec + secs;
5019
absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
5020
if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
5021
absTime->tv_nsec -= NANOSECS_PER_SEC;
5022
++absTime->tv_sec; // note: this must be <= max_secs
5023
}
5024
}
5025
}
5026
assert(absTime->tv_sec >= 0, "tv_sec < 0");
5027
assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
5028
assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
5029
assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
5030
}
5031
5032
void Parker::park(bool isAbsolute, jlong time) {
5033
// Optional fast-path check:
5034
// Return immediately if a permit is available.
5035
if (_counter > 0) {
5036
_counter = 0;
5037
OrderAccess::fence();
5038
return;
5039
}
5040
5041
Thread* thread = Thread::current();
5042
assert(thread->is_Java_thread(), "Must be JavaThread");
5043
JavaThread *jt = (JavaThread *)thread;
5044
5045
// Optional optimization -- avoid state transitions if there's an interrupt pending.
5046
// Check interrupt before trying to wait
5047
if (Thread::is_interrupted(thread, false)) {
5048
return;
5049
}
5050
5051
// Next, demultiplex/decode time arguments
5052
timespec absTime;
5053
if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
5054
return;
5055
}
5056
if (time > 0) {
5057
unpackTime(&absTime, isAbsolute, time);
5058
}
5059
5060
// Enter safepoint region
5061
// Beware of deadlocks such as 6317397.
5062
// The per-thread Parker:: mutex is a classic leaf-lock.
5063
// In particular a thread must never block on the Threads_lock while
5064
// holding the Parker:: mutex. If safepoints are pending both the
5065
// the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
5066
ThreadBlockInVM tbivm(jt);
5067
5068
// Don't wait if cannot get lock since interference arises from
5069
// unblocking. Also. check interrupt before trying wait
5070
if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
5071
return;
5072
}
5073
5074
int status;
5075
if (_counter > 0) { // no wait needed
5076
_counter = 0;
5077
status = pthread_mutex_unlock(_mutex);
5078
assert (status == 0, "invariant");
5079
OrderAccess::fence();
5080
return;
5081
}
5082
5083
#ifdef ASSERT
5084
// Don't catch signals while blocked; let the running threads have the signals.
5085
// (This allows a debugger to break into the running thread.)
5086
sigset_t oldsigs;
5087
sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
5088
pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
5089
#endif
5090
5091
OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5092
jt->set_suspend_equivalent();
5093
// cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
5094
5095
if (time == 0) {
5096
status = pthread_cond_wait (_cond, _mutex);
5097
} else {
5098
status = pthread_cond_timedwait (_cond, _mutex, &absTime);
5099
if (status != 0 && WorkAroundNPTLTimedWaitHang) {
5100
pthread_cond_destroy (_cond);
5101
pthread_cond_init (_cond, NULL);
5102
}
5103
}
5104
assert_status(status == 0 || status == EINTR ||
5105
status == ETIME || status == ETIMEDOUT,
5106
status, "cond_timedwait");
5107
5108
#ifdef ASSERT
5109
pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
5110
#endif
5111
5112
_counter = 0;
5113
status = pthread_mutex_unlock(_mutex);
5114
assert_status(status == 0, status, "invariant");
5115
// If externally suspended while waiting, re-suspend
5116
if (jt->handle_special_suspend_equivalent_condition()) {
5117
jt->java_suspend_self();
5118
}
5119
5120
OrderAccess::fence();
5121
}
5122
5123
void Parker::unpark() {
5124
int s, status;
5125
status = pthread_mutex_lock(_mutex);
5126
assert (status == 0, "invariant");
5127
s = _counter;
5128
_counter = 1;
5129
if (s < 1) {
5130
if (WorkAroundNPTLTimedWaitHang) {
5131
status = pthread_cond_signal (_cond);
5132
assert (status == 0, "invariant");
5133
status = pthread_mutex_unlock(_mutex);
5134
assert (status == 0, "invariant");
5135
} else {
5136
status = pthread_mutex_unlock(_mutex);
5137
assert (status == 0, "invariant");
5138
status = pthread_cond_signal (_cond);
5139
assert (status == 0, "invariant");
5140
}
5141
} else {
5142
pthread_mutex_unlock(_mutex);
5143
assert (status == 0, "invariant");
5144
}
5145
}
5146
5147
extern char** environ;
5148
5149
// Run the specified command in a separate process. Return its exit value,
5150
// or -1 on failure (e.g. can't fork a new process).
5151
// Unlike system(), this function can be called from signal handler. It
5152
// doesn't block SIGINT et al.
5153
int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
5154
char * argv[4] = {"sh", "-c", cmd, NULL};
5155
5156
pid_t pid = fork();
5157
5158
if (pid < 0) {
5159
// fork failed
5160
return -1;
5161
5162
} else if (pid == 0) {
5163
// child process
5164
5165
// Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
5166
execve("/usr/bin/sh", argv, environ);
5167
5168
// execve failed
5169
_exit(-1);
5170
5171
} else {
5172
// copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
5173
// care about the actual exit code, for now.
5174
5175
int status;
5176
5177
// Wait for the child process to exit. This returns immediately if
5178
// the child has already exited. */
5179
while (waitpid(pid, &status, 0) < 0) {
5180
switch (errno) {
5181
case ECHILD: return 0;
5182
case EINTR: break;
5183
default: return -1;
5184
}
5185
}
5186
5187
if (WIFEXITED(status)) {
5188
// The child exited normally; get its exit code.
5189
return WEXITSTATUS(status);
5190
} else if (WIFSIGNALED(status)) {
5191
// The child exited because of a signal.
5192
// The best value to return is 0x80 + signal number,
5193
// because that is what all Unix shells do, and because
5194
// it allows callers to distinguish between process exit and
5195
// process death by signal.
5196
return 0x80 + WTERMSIG(status);
5197
} else {
5198
// Unknown exit code; pass it through.
5199
return status;
5200
}
5201
}
5202
return -1;
5203
}
5204
5205
// is_headless_jre()
5206
//
5207
// Test for the existence of xawt/libmawt.so or libawt_xawt.so
5208
// in order to report if we are running in a headless jre.
5209
//
5210
// Since JDK8 xawt/libmawt.so is moved into the same directory
5211
// as libawt.so, and renamed libawt_xawt.so
5212
bool os::is_headless_jre() {
5213
struct stat statbuf;
5214
char buf[MAXPATHLEN];
5215
char libmawtpath[MAXPATHLEN];
5216
const char *xawtstr = "/xawt/libmawt.so";
5217
const char *new_xawtstr = "/libawt_xawt.so";
5218
5219
char *p;
5220
5221
// Get path to libjvm.so
5222
os::jvm_path(buf, sizeof(buf));
5223
5224
// Get rid of libjvm.so
5225
p = strrchr(buf, '/');
5226
if (p == NULL) return false;
5227
else *p = '\0';
5228
5229
// Get rid of client or server
5230
p = strrchr(buf, '/');
5231
if (p == NULL) return false;
5232
else *p = '\0';
5233
5234
// check xawt/libmawt.so
5235
strcpy(libmawtpath, buf);
5236
strcat(libmawtpath, xawtstr);
5237
if (::stat(libmawtpath, &statbuf) == 0) return false;
5238
5239
// check libawt_xawt.so
5240
strcpy(libmawtpath, buf);
5241
strcat(libmawtpath, new_xawtstr);
5242
if (::stat(libmawtpath, &statbuf) == 0) return false;
5243
5244
return true;
5245
}
5246
5247
// Get the default path to the core file
5248
// Returns the length of the string
5249
int os::get_core_path(char* buffer, size_t bufferSize) {
5250
const char* p = get_current_directory(buffer, bufferSize);
5251
5252
if (p == NULL) {
5253
assert(p != NULL, "failed to get current directory");
5254
return 0;
5255
}
5256
5257
return strlen(buffer);
5258
}
5259
5260
#ifndef PRODUCT
5261
void TestReserveMemorySpecial_test() {
5262
// No tests available for this platform
5263
}
5264
#endif
5265
5266