Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/os/posix/os_posix.cpp
40930 views
1
/*
2
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
26
#include "jvm.h"
27
#ifdef LINUX
28
#include "classfile/classLoader.hpp"
29
#endif
30
#include "jvmtifiles/jvmti.h"
31
#include "logging/log.hpp"
32
#include "memory/allocation.inline.hpp"
33
#include "os_posix.inline.hpp"
34
#include "runtime/globals_extension.hpp"
35
#include "runtime/osThread.hpp"
36
#include "utilities/globalDefinitions.hpp"
37
#include "runtime/frame.inline.hpp"
38
#include "runtime/interfaceSupport.inline.hpp"
39
#include "runtime/sharedRuntime.hpp"
40
#include "services/attachListener.hpp"
41
#include "services/memTracker.hpp"
42
#include "runtime/arguments.hpp"
43
#include "runtime/atomic.hpp"
44
#include "runtime/java.hpp"
45
#include "runtime/orderAccess.hpp"
46
#include "runtime/perfMemory.hpp"
47
#include "utilities/align.hpp"
48
#include "utilities/events.hpp"
49
#include "utilities/formatBuffer.hpp"
50
#include "utilities/macros.hpp"
51
#include "utilities/vmError.hpp"
52
53
#include <dirent.h>
54
#include <dlfcn.h>
55
#include <grp.h>
56
#include <netdb.h>
57
#include <pwd.h>
58
#include <pthread.h>
59
#include <signal.h>
60
#include <sys/mman.h>
61
#include <sys/resource.h>
62
#include <sys/socket.h>
63
#include <sys/types.h>
64
#include <sys/utsname.h>
65
#include <sys/wait.h>
66
#include <time.h>
67
#include <unistd.h>
68
#ifndef __ANDROID__
69
#include <utmpx.h>
70
#endif
71
72
#ifdef __APPLE__
73
#include <crt_externs.h>
74
#endif
75
76
#define ROOT_UID 0
77
78
#ifndef MAP_ANONYMOUS
79
#define MAP_ANONYMOUS MAP_ANON
80
#endif
81
82
#define check_with_errno(check_type, cond, msg) \
83
do { \
84
int err = errno; \
85
check_type(cond, "%s; error='%s' (errno=%s)", msg, os::strerror(err), \
86
os::errno_name(err)); \
87
} while (false)
88
89
#define assert_with_errno(cond, msg) check_with_errno(assert, cond, msg)
90
#define guarantee_with_errno(cond, msg) check_with_errno(guarantee, cond, msg)
91
92
// Check core dump limit and report possible place where core can be found
93
void os::check_dump_limit(char* buffer, size_t bufferSize) {
94
if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
95
jio_snprintf(buffer, bufferSize, "CreateCoredumpOnCrash is disabled from command line");
96
VMError::record_coredump_status(buffer, false);
97
return;
98
}
99
100
int n;
101
struct rlimit rlim;
102
bool success;
103
104
char core_path[PATH_MAX];
105
n = get_core_path(core_path, PATH_MAX);
106
107
if (n <= 0) {
108
jio_snprintf(buffer, bufferSize, "core.%d (may not exist)", current_process_id());
109
success = true;
110
#ifdef LINUX
111
} else if (core_path[0] == '"') { // redirect to user process
112
jio_snprintf(buffer, bufferSize, "Core dumps may be processed with %s", core_path);
113
success = true;
114
#endif
115
} else if (getrlimit(RLIMIT_CORE, &rlim) != 0) {
116
jio_snprintf(buffer, bufferSize, "%s (may not exist)", core_path);
117
success = true;
118
} else {
119
switch(rlim.rlim_cur) {
120
case RLIM_INFINITY:
121
jio_snprintf(buffer, bufferSize, "%s", core_path);
122
success = true;
123
break;
124
case 0:
125
jio_snprintf(buffer, bufferSize, "Core dumps have been disabled. To enable core dumping, try \"ulimit -c unlimited\" before starting Java again");
126
success = false;
127
break;
128
default:
129
jio_snprintf(buffer, bufferSize, "%s (max size " UINT64_FORMAT " kB). To ensure a full core dump, try \"ulimit -c unlimited\" before starting Java again", core_path, uint64_t(rlim.rlim_cur) / 1024);
130
success = true;
131
break;
132
}
133
}
134
135
VMError::record_coredump_status(buffer, success);
136
}
137
138
int os::get_native_stack(address* stack, int frames, int toSkip) {
139
int frame_idx = 0;
140
int num_of_frames; // number of frames captured
141
frame fr = os::current_frame();
142
while (fr.pc() && frame_idx < frames) {
143
if (toSkip > 0) {
144
toSkip --;
145
} else {
146
stack[frame_idx ++] = fr.pc();
147
}
148
if (fr.fp() == NULL || fr.cb() != NULL ||
149
fr.sender_pc() == NULL || os::is_first_C_frame(&fr)) break;
150
151
if (fr.sender_pc() && !os::is_first_C_frame(&fr)) {
152
fr = os::get_sender_for_C_frame(&fr);
153
} else {
154
break;
155
}
156
}
157
num_of_frames = frame_idx;
158
for (; frame_idx < frames; frame_idx ++) {
159
stack[frame_idx] = NULL;
160
}
161
162
return num_of_frames;
163
}
164
165
166
bool os::unsetenv(const char* name) {
167
assert(name != NULL, "Null pointer");
168
return (::unsetenv(name) == 0);
169
}
170
171
int os::get_last_error() {
172
return errno;
173
}
174
175
size_t os::lasterror(char *buf, size_t len) {
176
if (errno == 0) return 0;
177
178
const char *s = os::strerror(errno);
179
size_t n = ::strlen(s);
180
if (n >= len) {
181
n = len - 1;
182
}
183
::strncpy(buf, s, n);
184
buf[n] = '\0';
185
return n;
186
}
187
188
void os::wait_for_keypress_at_exit(void) {
189
// don't do anything on posix platforms
190
return;
191
}
192
193
int os::create_file_for_heap(const char* dir) {
194
int fd;
195
196
#if defined(LINUX) && defined(O_TMPFILE)
197
char* native_dir = os::strdup(dir);
198
if (native_dir == NULL) {
199
vm_exit_during_initialization(err_msg("strdup failed during creation of backing file for heap (%s)", os::strerror(errno)));
200
return -1;
201
}
202
os::native_path(native_dir);
203
fd = os::open(dir, O_TMPFILE | O_RDWR, S_IRUSR | S_IWUSR);
204
os::free(native_dir);
205
206
if (fd == -1)
207
#endif
208
{
209
const char name_template[] = "/jvmheap.XXXXXX";
210
211
size_t fullname_len = strlen(dir) + strlen(name_template);
212
char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
213
if (fullname == NULL) {
214
vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
215
return -1;
216
}
217
int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
218
assert((size_t)n == fullname_len, "Unexpected number of characters in string");
219
220
os::native_path(fullname);
221
222
// create a new file.
223
fd = mkstemp(fullname);
224
225
if (fd < 0) {
226
warning("Could not create file for heap with template %s", fullname);
227
os::free(fullname);
228
return -1;
229
} else {
230
// delete the name from the filesystem. When 'fd' is closed, the file (and space) will be deleted.
231
int ret = unlink(fullname);
232
assert_with_errno(ret == 0, "unlink returned error");
233
}
234
235
os::free(fullname);
236
}
237
238
return fd;
239
}
240
241
static char* reserve_mmapped_memory(size_t bytes, char* requested_addr) {
242
char * addr;
243
int flags = MAP_PRIVATE NOT_AIX( | MAP_NORESERVE ) | MAP_ANONYMOUS;
244
if (requested_addr != NULL) {
245
assert((uintptr_t)requested_addr % os::vm_page_size() == 0, "Requested address should be aligned to OS page size");
246
flags |= MAP_FIXED;
247
}
248
249
// Map reserved/uncommitted pages PROT_NONE so we fail early if we
250
// touch an uncommitted page. Otherwise, the read/write might
251
// succeed if we have enough swap space to back the physical page.
252
addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
253
flags, -1, 0);
254
255
if (addr != MAP_FAILED) {
256
MemTracker::record_virtual_memory_reserve((address)addr, bytes, CALLER_PC);
257
return addr;
258
}
259
return NULL;
260
}
261
262
static int util_posix_fallocate(int fd, off_t offset, off_t len) {
263
#ifdef __APPLE__
264
fstore_t store = { F_ALLOCATECONTIG, F_PEOFPOSMODE, 0, len };
265
// First we try to get a continuous chunk of disk space
266
int ret = fcntl(fd, F_PREALLOCATE, &store);
267
if (ret == -1) {
268
// Maybe we are too fragmented, try to allocate non-continuous range
269
store.fst_flags = F_ALLOCATEALL;
270
ret = fcntl(fd, F_PREALLOCATE, &store);
271
}
272
if(ret != -1) {
273
return ftruncate(fd, len);
274
}
275
return -1;
276
#else
277
return posix_fallocate(fd, offset, len);
278
#endif
279
}
280
281
// Map the given address range to the provided file descriptor.
282
char* os::map_memory_to_file(char* base, size_t size, int fd) {
283
assert(fd != -1, "File descriptor is not valid");
284
285
// allocate space for the file
286
int ret = util_posix_fallocate(fd, 0, (off_t)size);
287
if (ret != 0) {
288
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory. error(%d)", ret));
289
return NULL;
290
}
291
292
int prot = PROT_READ | PROT_WRITE;
293
int flags = MAP_SHARED;
294
if (base != NULL) {
295
flags |= MAP_FIXED;
296
}
297
char* addr = (char*)mmap(base, size, prot, flags, fd, 0);
298
299
if (addr == MAP_FAILED) {
300
warning("Failed mmap to file. (%s)", os::strerror(errno));
301
return NULL;
302
}
303
if (base != NULL && addr != base) {
304
if (!os::release_memory(addr, size)) {
305
warning("Could not release memory on unsuccessful file mapping");
306
}
307
return NULL;
308
}
309
return addr;
310
}
311
312
char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
313
assert(fd != -1, "File descriptor is not valid");
314
assert(base != NULL, "Base cannot be NULL");
315
316
return map_memory_to_file(base, size, fd);
317
}
318
319
static size_t calculate_aligned_extra_size(size_t size, size_t alignment) {
320
assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
321
"Alignment must be a multiple of allocation granularity (page size)");
322
assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
323
324
size_t extra_size = size + alignment;
325
assert(extra_size >= size, "overflow, size is too large to allow alignment");
326
return extra_size;
327
}
328
329
// After a bigger chunk was mapped, unmaps start and end parts to get the requested alignment.
330
static char* chop_extra_memory(size_t size, size_t alignment, char* extra_base, size_t extra_size) {
331
// Do manual alignment
332
char* aligned_base = align_up(extra_base, alignment);
333
334
// [ | | ]
335
// ^ extra_base
336
// ^ extra_base + begin_offset == aligned_base
337
// extra_base + begin_offset + size ^
338
// extra_base + extra_size ^
339
// |<>| == begin_offset
340
// end_offset == |<>|
341
size_t begin_offset = aligned_base - extra_base;
342
size_t end_offset = (extra_base + extra_size) - (aligned_base + size);
343
344
if (begin_offset > 0) {
345
os::release_memory(extra_base, begin_offset);
346
}
347
348
if (end_offset > 0) {
349
os::release_memory(extra_base + begin_offset + size, end_offset);
350
}
351
352
return aligned_base;
353
}
354
355
// Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
356
// so on posix, unmap the section at the start and at the end of the chunk that we mapped
357
// rather than unmapping and remapping the whole chunk to get requested alignment.
358
char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) {
359
size_t extra_size = calculate_aligned_extra_size(size, alignment);
360
char* extra_base = os::reserve_memory(extra_size, exec);
361
if (extra_base == NULL) {
362
return NULL;
363
}
364
return chop_extra_memory(size, alignment, extra_base, extra_size);
365
}
366
367
char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int file_desc) {
368
size_t extra_size = calculate_aligned_extra_size(size, alignment);
369
// For file mapping, we do not call os:map_memory_to_file(size,fd) since:
370
// - we later chop away parts of the mapping using os::release_memory and that could fail if the
371
// original mmap call had been tied to an fd.
372
// - The memory API os::reserve_memory uses is an implementation detail. It may (and usually is)
373
// mmap but it also may System V shared memory which cannot be uncommitted as a whole, so
374
// chopping off and unmapping excess bits back and front (see below) would not work.
375
char* extra_base = reserve_mmapped_memory(extra_size, NULL);
376
if (extra_base == NULL) {
377
return NULL;
378
}
379
char* aligned_base = chop_extra_memory(size, alignment, extra_base, extra_size);
380
// After we have an aligned address, we can replace anonymous mapping with file mapping
381
if (replace_existing_mapping_with_file_mapping(aligned_base, size, file_desc) == NULL) {
382
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
383
}
384
MemTracker::record_virtual_memory_commit((address)aligned_base, size, CALLER_PC);
385
return aligned_base;
386
}
387
388
int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
389
// All supported POSIX platforms provide C99 semantics.
390
int result = ::vsnprintf(buf, len, fmt, args);
391
// If an encoding error occurred (result < 0) then it's not clear
392
// whether the buffer is NUL terminated, so ensure it is.
393
if ((result < 0) && (len > 0)) {
394
buf[len - 1] = '\0';
395
}
396
return result;
397
}
398
399
int os::get_fileno(FILE* fp) {
400
return NOT_AIX(::)fileno(fp);
401
}
402
403
struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
404
return gmtime_r(clock, res);
405
}
406
407
void os::Posix::print_load_average(outputStream* st) {
408
st->print("load average: ");
409
double loadavg[3];
410
int res = os::loadavg(loadavg, 3);
411
if (res != -1) {
412
st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
413
} else {
414
st->print(" Unavailable");
415
}
416
st->cr();
417
}
418
419
// boot/uptime information;
420
// unfortunately it does not work on macOS and Linux because the utx chain has no entry
421
// for reboot at least on my test machines
422
void os::Posix::print_uptime_info(outputStream* st) {
423
#ifndef __ANDROID__
424
int bootsec = -1;
425
int currsec = time(NULL);
426
struct utmpx* ent;
427
setutxent();
428
while ((ent = getutxent())) {
429
if (!strcmp("system boot", ent->ut_line)) {
430
bootsec = ent->ut_tv.tv_sec;
431
break;
432
}
433
}
434
435
if (bootsec != -1) {
436
os::print_dhm(st, "OS uptime:", (long) (currsec-bootsec));
437
}
438
#endif
439
}
440
441
static void print_rlimit(outputStream* st, const char* msg,
442
int resource, bool output_k = false) {
443
struct rlimit rlim;
444
445
st->print(" %s ", msg);
446
int res = getrlimit(resource, &rlim);
447
if (res == -1) {
448
st->print("could not obtain value");
449
} else {
450
// soft limit
451
if (rlim.rlim_cur == RLIM_INFINITY) { st->print("infinity"); }
452
else {
453
if (output_k) { st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_cur) / 1024); }
454
else { st->print(UINT64_FORMAT, uint64_t(rlim.rlim_cur)); }
455
}
456
// hard limit
457
st->print("/");
458
if (rlim.rlim_max == RLIM_INFINITY) { st->print("infinity"); }
459
else {
460
if (output_k) { st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_max) / 1024); }
461
else { st->print(UINT64_FORMAT, uint64_t(rlim.rlim_max)); }
462
}
463
}
464
}
465
466
void os::Posix::print_rlimit_info(outputStream* st) {
467
st->print("rlimit (soft/hard):");
468
print_rlimit(st, "STACK", RLIMIT_STACK, true);
469
print_rlimit(st, ", CORE", RLIMIT_CORE, true);
470
471
#if defined(AIX)
472
st->print(", NPROC ");
473
st->print("%d", sysconf(_SC_CHILD_MAX));
474
475
print_rlimit(st, ", THREADS", RLIMIT_THREADS);
476
#else
477
print_rlimit(st, ", NPROC", RLIMIT_NPROC);
478
#endif
479
480
print_rlimit(st, ", NOFILE", RLIMIT_NOFILE);
481
print_rlimit(st, ", AS", RLIMIT_AS, true);
482
print_rlimit(st, ", CPU", RLIMIT_CPU);
483
print_rlimit(st, ", DATA", RLIMIT_DATA, true);
484
485
// maximum size of files that the process may create
486
print_rlimit(st, ", FSIZE", RLIMIT_FSIZE, true);
487
488
#if defined(LINUX) || defined(__APPLE__)
489
// maximum number of bytes of memory that may be locked into RAM
490
// (rounded down to the nearest multiple of system pagesize)
491
print_rlimit(st, ", MEMLOCK", RLIMIT_MEMLOCK, true);
492
#endif
493
494
// MacOS; The maximum size (in bytes) to which a process's resident set size may grow.
495
#if defined(__APPLE__)
496
print_rlimit(st, ", RSS", RLIMIT_RSS, true);
497
#endif
498
499
st->cr();
500
}
501
502
void os::Posix::print_uname_info(outputStream* st) {
503
// kernel
504
st->print("uname: ");
505
struct utsname name;
506
uname(&name);
507
st->print("%s ", name.sysname);
508
#ifdef ASSERT
509
st->print("%s ", name.nodename);
510
#endif
511
st->print("%s ", name.release);
512
st->print("%s ", name.version);
513
st->print("%s", name.machine);
514
st->cr();
515
}
516
517
void os::Posix::print_umask(outputStream* st, mode_t umsk) {
518
st->print((umsk & S_IRUSR) ? "r" : "-");
519
st->print((umsk & S_IWUSR) ? "w" : "-");
520
st->print((umsk & S_IXUSR) ? "x" : "-");
521
st->print((umsk & S_IRGRP) ? "r" : "-");
522
st->print((umsk & S_IWGRP) ? "w" : "-");
523
st->print((umsk & S_IXGRP) ? "x" : "-");
524
st->print((umsk & S_IROTH) ? "r" : "-");
525
st->print((umsk & S_IWOTH) ? "w" : "-");
526
st->print((umsk & S_IXOTH) ? "x" : "-");
527
}
528
529
void os::Posix::print_user_info(outputStream* st) {
530
unsigned id = (unsigned) ::getuid();
531
st->print("uid : %u ", id);
532
id = (unsigned) ::geteuid();
533
st->print("euid : %u ", id);
534
id = (unsigned) ::getgid();
535
st->print("gid : %u ", id);
536
id = (unsigned) ::getegid();
537
st->print_cr("egid : %u", id);
538
st->cr();
539
540
mode_t umsk = ::umask(0);
541
::umask(umsk);
542
st->print("umask: %04o (", (unsigned) umsk);
543
print_umask(st, umsk);
544
st->print_cr(")");
545
st->cr();
546
}
547
548
549
bool os::get_host_name(char* buf, size_t buflen) {
550
struct utsname name;
551
uname(&name);
552
jio_snprintf(buf, buflen, "%s", name.nodename);
553
return true;
554
}
555
556
#ifndef _LP64
557
// Helper, on 32bit, for os::has_allocatable_memory_limit
558
static bool is_allocatable(size_t s) {
559
if (s < 2 * G) {
560
return true;
561
}
562
// Use raw anonymous mmap here; no need to go through any
563
// of our reservation layers. We will unmap right away.
564
void* p = ::mmap(NULL, s, PROT_NONE,
565
MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS, -1, 0);
566
if (p == MAP_FAILED) {
567
return false;
568
} else {
569
::munmap(p, s);
570
return true;
571
}
572
}
573
#endif // !_LP64
574
575
576
bool os::has_allocatable_memory_limit(size_t* limit) {
577
struct rlimit rlim;
578
int getrlimit_res = getrlimit(RLIMIT_AS, &rlim);
579
// if there was an error when calling getrlimit, assume that there is no limitation
580
// on virtual memory.
581
bool result;
582
if ((getrlimit_res != 0) || (rlim.rlim_cur == RLIM_INFINITY)) {
583
result = false;
584
} else {
585
*limit = (size_t)rlim.rlim_cur;
586
result = true;
587
}
588
#ifdef _LP64
589
return result;
590
#else
591
// arbitrary virtual space limit for 32 bit Unices found by testing. If
592
// getrlimit above returned a limit, bound it with this limit. Otherwise
593
// directly use it.
594
const size_t max_virtual_limit = 3800*M;
595
if (result) {
596
*limit = MIN2(*limit, max_virtual_limit);
597
} else {
598
*limit = max_virtual_limit;
599
}
600
601
// bound by actually allocatable memory. The algorithm uses two bounds, an
602
// upper and a lower limit. The upper limit is the current highest amount of
603
// memory that could not be allocated, the lower limit is the current highest
604
// amount of memory that could be allocated.
605
// The algorithm iteratively refines the result by halving the difference
606
// between these limits, updating either the upper limit (if that value could
607
// not be allocated) or the lower limit (if the that value could be allocated)
608
// until the difference between these limits is "small".
609
610
// the minimum amount of memory we care about allocating.
611
const size_t min_allocation_size = M;
612
613
size_t upper_limit = *limit;
614
615
// first check a few trivial cases
616
if (is_allocatable(upper_limit) || (upper_limit <= min_allocation_size)) {
617
*limit = upper_limit;
618
} else if (!is_allocatable(min_allocation_size)) {
619
// we found that not even min_allocation_size is allocatable. Return it
620
// anyway. There is no point to search for a better value any more.
621
*limit = min_allocation_size;
622
} else {
623
// perform the binary search.
624
size_t lower_limit = min_allocation_size;
625
while ((upper_limit - lower_limit) > min_allocation_size) {
626
size_t temp_limit = ((upper_limit - lower_limit) / 2) + lower_limit;
627
temp_limit = align_down(temp_limit, min_allocation_size);
628
if (is_allocatable(temp_limit)) {
629
lower_limit = temp_limit;
630
} else {
631
upper_limit = temp_limit;
632
}
633
}
634
*limit = lower_limit;
635
}
636
return true;
637
#endif
638
}
639
640
void os::dll_unload(void *lib) {
641
::dlclose(lib);
642
}
643
644
jlong os::lseek(int fd, jlong offset, int whence) {
645
return (jlong) BSD_ONLY(::lseek) NOT_BSD(::lseek64)(fd, offset, whence);
646
}
647
648
int os::fsync(int fd) {
649
return ::fsync(fd);
650
}
651
652
int os::ftruncate(int fd, jlong length) {
653
return BSD_ONLY(::ftruncate) NOT_BSD(::ftruncate64)(fd, length);
654
}
655
656
const char* os::get_current_directory(char *buf, size_t buflen) {
657
return getcwd(buf, buflen);
658
}
659
660
FILE* os::open(int fd, const char* mode) {
661
return ::fdopen(fd, mode);
662
}
663
664
size_t os::write(int fd, const void *buf, unsigned int nBytes) {
665
size_t res;
666
RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
667
return res;
668
}
669
670
ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
671
return ::pread(fd, buf, nBytes, offset);
672
}
673
674
int os::close(int fd) {
675
return ::close(fd);
676
}
677
678
void os::flockfile(FILE* fp) {
679
::flockfile(fp);
680
}
681
682
void os::funlockfile(FILE* fp) {
683
::funlockfile(fp);
684
}
685
686
DIR* os::opendir(const char* dirname) {
687
assert(dirname != NULL, "just checking");
688
return ::opendir(dirname);
689
}
690
691
struct dirent* os::readdir(DIR* dirp) {
692
assert(dirp != NULL, "just checking");
693
return ::readdir(dirp);
694
}
695
696
int os::closedir(DIR *dirp) {
697
assert(dirp != NULL, "just checking");
698
return ::closedir(dirp);
699
}
700
701
int os::socket_close(int fd) {
702
return ::close(fd);
703
}
704
705
int os::socket(int domain, int type, int protocol) {
706
return ::socket(domain, type, protocol);
707
}
708
709
int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
710
RESTARTABLE_RETURN_INT(::recv(fd, buf, nBytes, flags));
711
}
712
713
int os::send(int fd, char* buf, size_t nBytes, uint flags) {
714
RESTARTABLE_RETURN_INT(::send(fd, buf, nBytes, flags));
715
}
716
717
int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
718
return os::send(fd, buf, nBytes, flags);
719
}
720
721
int os::connect(int fd, struct sockaddr* him, socklen_t len) {
722
RESTARTABLE_RETURN_INT(::connect(fd, him, len));
723
}
724
725
struct hostent* os::get_host_by_name(char* name) {
726
return ::gethostbyname(name);
727
}
728
729
void os::exit(int num) {
730
::exit(num);
731
}
732
733
// Builds a platform dependent Agent_OnLoad_<lib_name> function name
734
// which is used to find statically linked in agents.
735
// Parameters:
736
// sym_name: Symbol in library we are looking for
737
// lib_name: Name of library to look in, NULL for shared libs.
738
// is_absolute_path == true if lib_name is absolute path to agent
739
// such as "/a/b/libL.so"
740
// == false if only the base name of the library is passed in
741
// such as "L"
742
char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
743
bool is_absolute_path) {
744
char *agent_entry_name;
745
size_t len;
746
size_t name_len;
747
size_t prefix_len = strlen(JNI_LIB_PREFIX);
748
size_t suffix_len = strlen(JNI_LIB_SUFFIX);
749
const char *start;
750
751
if (lib_name != NULL) {
752
name_len = strlen(lib_name);
753
if (is_absolute_path) {
754
// Need to strip path, prefix and suffix
755
if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
756
lib_name = ++start;
757
}
758
if (strlen(lib_name) <= (prefix_len + suffix_len)) {
759
return NULL;
760
}
761
lib_name += prefix_len;
762
name_len = strlen(lib_name) - suffix_len;
763
}
764
}
765
len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
766
agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
767
if (agent_entry_name == NULL) {
768
return NULL;
769
}
770
strcpy(agent_entry_name, sym_name);
771
if (lib_name != NULL) {
772
strcat(agent_entry_name, "_");
773
strncat(agent_entry_name, lib_name, name_len);
774
}
775
return agent_entry_name;
776
}
777
778
779
void os::naked_short_nanosleep(jlong ns) {
780
struct timespec req;
781
assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
782
req.tv_sec = 0;
783
req.tv_nsec = ns;
784
::nanosleep(&req, NULL);
785
return;
786
}
787
788
void os::naked_short_sleep(jlong ms) {
789
assert(ms < MILLIUNITS, "Un-interruptable sleep, short time use only");
790
os::naked_short_nanosleep(millis_to_nanos(ms));
791
return;
792
}
793
794
char* os::Posix::describe_pthread_attr(char* buf, size_t buflen, const pthread_attr_t* attr) {
795
size_t stack_size = 0;
796
size_t guard_size = 0;
797
int detachstate = 0;
798
pthread_attr_getstacksize(attr, &stack_size);
799
pthread_attr_getguardsize(attr, &guard_size);
800
// Work around linux NPTL implementation error, see also os::create_thread() in os_linux.cpp.
801
LINUX_ONLY(stack_size -= guard_size);
802
pthread_attr_getdetachstate(attr, &detachstate);
803
jio_snprintf(buf, buflen, "stacksize: " SIZE_FORMAT "k, guardsize: " SIZE_FORMAT "k, %s",
804
stack_size / 1024, guard_size / 1024,
805
(detachstate == PTHREAD_CREATE_DETACHED ? "detached" : "joinable"));
806
return buf;
807
}
808
809
char* os::Posix::realpath(const char* filename, char* outbuf, size_t outbuflen) {
810
811
if (filename == NULL || outbuf == NULL || outbuflen < 1) {
812
assert(false, "os::Posix::realpath: invalid arguments.");
813
errno = EINVAL;
814
return NULL;
815
}
816
817
char* result = NULL;
818
819
// This assumes platform realpath() is implemented according to POSIX.1-2008.
820
// POSIX.1-2008 allows to specify NULL for the output buffer, in which case
821
// output buffer is dynamically allocated and must be ::free()'d by the caller.
822
char* p = ::realpath(filename, NULL);
823
if (p != NULL) {
824
if (strlen(p) < outbuflen) {
825
strcpy(outbuf, p);
826
result = outbuf;
827
} else {
828
errno = ENAMETOOLONG;
829
}
830
::free(p); // *not* os::free
831
} else {
832
// Fallback for platforms struggling with modern Posix standards (AIX 5.3, 6.1). If realpath
833
// returns EINVAL, this may indicate that realpath is not POSIX.1-2008 compatible and
834
// that it complains about the NULL we handed down as user buffer.
835
// In this case, use the user provided buffer but at least check whether realpath caused
836
// a memory overwrite.
837
if (errno == EINVAL) {
838
outbuf[outbuflen - 1] = '\0';
839
p = ::realpath(filename, outbuf);
840
if (p != NULL) {
841
guarantee(outbuf[outbuflen - 1] == '\0', "realpath buffer overwrite detected.");
842
result = p;
843
}
844
}
845
}
846
return result;
847
848
}
849
850
int os::stat(const char *path, struct stat *sbuf) {
851
return ::stat(path, sbuf);
852
}
853
854
char * os::native_path(char *path) {
855
return path;
856
}
857
858
bool os::same_files(const char* file1, const char* file2) {
859
if (file1 == nullptr && file2 == nullptr) {
860
return true;
861
}
862
863
if (file1 == nullptr || file2 == nullptr) {
864
return false;
865
}
866
867
if (strcmp(file1, file2) == 0) {
868
return true;
869
}
870
871
bool is_same = false;
872
struct stat st1;
873
struct stat st2;
874
875
if (os::stat(file1, &st1) < 0) {
876
return false;
877
}
878
879
if (os::stat(file2, &st2) < 0) {
880
return false;
881
}
882
883
if (st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino) {
884
// same files
885
is_same = true;
886
}
887
return is_same;
888
}
889
890
// Check minimum allowable stack sizes for thread creation and to initialize
891
// the java system classes, including StackOverflowError - depends on page
892
// size.
893
// The space needed for frames during startup is platform dependent. It
894
// depends on word size, platform calling conventions, C frame layout and
895
// interpreter/C1/C2 design decisions. Therefore this is given in a
896
// platform (os/cpu) dependent constant.
897
// To this, space for guard mechanisms is added, which depends on the
898
// page size which again depends on the concrete system the VM is running
899
// on. Space for libc guard pages is not included in this size.
900
jint os::Posix::set_minimum_stack_sizes() {
901
size_t os_min_stack_allowed = PTHREAD_STACK_MIN;
902
903
_java_thread_min_stack_allowed = _java_thread_min_stack_allowed +
904
StackOverflow::stack_guard_zone_size() +
905
StackOverflow::stack_shadow_zone_size();
906
907
_java_thread_min_stack_allowed = align_up(_java_thread_min_stack_allowed, vm_page_size());
908
_java_thread_min_stack_allowed = MAX2(_java_thread_min_stack_allowed, os_min_stack_allowed);
909
910
size_t stack_size_in_bytes = ThreadStackSize * K;
911
if (stack_size_in_bytes != 0 &&
912
stack_size_in_bytes < _java_thread_min_stack_allowed) {
913
// The '-Xss' and '-XX:ThreadStackSize=N' options both set
914
// ThreadStackSize so we go with "Java thread stack size" instead
915
// of "ThreadStackSize" to be more friendly.
916
tty->print_cr("\nThe Java thread stack size specified is too small. "
917
"Specify at least " SIZE_FORMAT "k",
918
_java_thread_min_stack_allowed / K);
919
return JNI_ERR;
920
}
921
922
// Make the stack size a multiple of the page size so that
923
// the yellow/red zones can be guarded.
924
JavaThread::set_stack_size_at_create(align_up(stack_size_in_bytes, vm_page_size()));
925
926
// Reminder: a compiler thread is a Java thread.
927
_compiler_thread_min_stack_allowed = _compiler_thread_min_stack_allowed +
928
StackOverflow::stack_guard_zone_size() +
929
StackOverflow::stack_shadow_zone_size();
930
931
_compiler_thread_min_stack_allowed = align_up(_compiler_thread_min_stack_allowed, vm_page_size());
932
_compiler_thread_min_stack_allowed = MAX2(_compiler_thread_min_stack_allowed, os_min_stack_allowed);
933
934
stack_size_in_bytes = CompilerThreadStackSize * K;
935
if (stack_size_in_bytes != 0 &&
936
stack_size_in_bytes < _compiler_thread_min_stack_allowed) {
937
tty->print_cr("\nThe CompilerThreadStackSize specified is too small. "
938
"Specify at least " SIZE_FORMAT "k",
939
_compiler_thread_min_stack_allowed / K);
940
return JNI_ERR;
941
}
942
943
_vm_internal_thread_min_stack_allowed = align_up(_vm_internal_thread_min_stack_allowed, vm_page_size());
944
_vm_internal_thread_min_stack_allowed = MAX2(_vm_internal_thread_min_stack_allowed, os_min_stack_allowed);
945
946
stack_size_in_bytes = VMThreadStackSize * K;
947
if (stack_size_in_bytes != 0 &&
948
stack_size_in_bytes < _vm_internal_thread_min_stack_allowed) {
949
tty->print_cr("\nThe VMThreadStackSize specified is too small. "
950
"Specify at least " SIZE_FORMAT "k",
951
_vm_internal_thread_min_stack_allowed / K);
952
return JNI_ERR;
953
}
954
return JNI_OK;
955
}
956
957
// Called when creating the thread. The minimum stack sizes have already been calculated
958
size_t os::Posix::get_initial_stack_size(ThreadType thr_type, size_t req_stack_size) {
959
size_t stack_size;
960
if (req_stack_size == 0) {
961
stack_size = default_stack_size(thr_type);
962
} else {
963
stack_size = req_stack_size;
964
}
965
966
switch (thr_type) {
967
case os::java_thread:
968
// Java threads use ThreadStackSize which default value can be
969
// changed with the flag -Xss
970
if (req_stack_size == 0 && JavaThread::stack_size_at_create() > 0) {
971
// no requested size and we have a more specific default value
972
stack_size = JavaThread::stack_size_at_create();
973
}
974
stack_size = MAX2(stack_size,
975
_java_thread_min_stack_allowed);
976
break;
977
case os::compiler_thread:
978
if (req_stack_size == 0 && CompilerThreadStackSize > 0) {
979
// no requested size and we have a more specific default value
980
stack_size = (size_t)(CompilerThreadStackSize * K);
981
}
982
stack_size = MAX2(stack_size,
983
_compiler_thread_min_stack_allowed);
984
break;
985
case os::vm_thread:
986
case os::pgc_thread:
987
case os::cgc_thread:
988
case os::watcher_thread:
989
default: // presume the unknown thr_type is a VM internal
990
if (req_stack_size == 0 && VMThreadStackSize > 0) {
991
// no requested size and we have a more specific default value
992
stack_size = (size_t)(VMThreadStackSize * K);
993
}
994
995
stack_size = MAX2(stack_size,
996
_vm_internal_thread_min_stack_allowed);
997
break;
998
}
999
1000
// pthread_attr_setstacksize() may require that the size be rounded up to the OS page size.
1001
// Be careful not to round up to 0. Align down in that case.
1002
if (stack_size <= SIZE_MAX - vm_page_size()) {
1003
stack_size = align_up(stack_size, vm_page_size());
1004
} else {
1005
stack_size = align_down(stack_size, vm_page_size());
1006
}
1007
1008
return stack_size;
1009
}
1010
1011
#ifndef ZERO
1012
#ifndef ARM
1013
static bool get_frame_at_stack_banging_point(JavaThread* thread, address pc, const void* ucVoid, frame* fr) {
1014
if (Interpreter::contains(pc)) {
1015
// interpreter performs stack banging after the fixed frame header has
1016
// been generated while the compilers perform it before. To maintain
1017
// semantic consistency between interpreted and compiled frames, the
1018
// method returns the Java sender of the current frame.
1019
*fr = os::fetch_frame_from_context(ucVoid);
1020
if (!fr->is_first_java_frame()) {
1021
// get_frame_at_stack_banging_point() is only called when we
1022
// have well defined stacks so java_sender() calls do not need
1023
// to assert safe_for_sender() first.
1024
*fr = fr->java_sender();
1025
}
1026
} else {
1027
// more complex code with compiled code
1028
assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
1029
CodeBlob* cb = CodeCache::find_blob(pc);
1030
if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
1031
// Not sure where the pc points to, fallback to default
1032
// stack overflow handling
1033
return false;
1034
} else {
1035
// in compiled code, the stack banging is performed just after the return pc
1036
// has been pushed on the stack
1037
*fr = os::fetch_compiled_frame_from_context(ucVoid);
1038
if (!fr->is_java_frame()) {
1039
assert(!fr->is_first_frame(), "Safety check");
1040
// See java_sender() comment above.
1041
*fr = fr->java_sender();
1042
}
1043
}
1044
}
1045
assert(fr->is_java_frame(), "Safety check");
1046
return true;
1047
}
1048
#endif // ARM
1049
1050
// This return true if the signal handler should just continue, ie. return after calling this
1051
bool os::Posix::handle_stack_overflow(JavaThread* thread, address addr, address pc,
1052
const void* ucVoid, address* stub) {
1053
// stack overflow
1054
StackOverflow* overflow_state = thread->stack_overflow_state();
1055
if (overflow_state->in_stack_yellow_reserved_zone(addr)) {
1056
if (thread->thread_state() == _thread_in_Java) {
1057
#ifndef ARM
1058
// arm32 doesn't have this
1059
if (overflow_state->in_stack_reserved_zone(addr)) {
1060
frame fr;
1061
if (get_frame_at_stack_banging_point(thread, pc, ucVoid, &fr)) {
1062
assert(fr.is_java_frame(), "Must be a Java frame");
1063
frame activation =
1064
SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
1065
if (activation.sp() != NULL) {
1066
overflow_state->disable_stack_reserved_zone();
1067
if (activation.is_interpreted_frame()) {
1068
overflow_state->set_reserved_stack_activation((address)(activation.fp()
1069
// Some platforms use frame pointers for interpreter frames, others use initial sp.
1070
#if !defined(PPC64) && !defined(S390)
1071
+ frame::interpreter_frame_initial_sp_offset
1072
#endif
1073
));
1074
} else {
1075
overflow_state->set_reserved_stack_activation((address)activation.unextended_sp());
1076
}
1077
return true; // just continue
1078
}
1079
}
1080
}
1081
#endif // ARM
1082
// Throw a stack overflow exception. Guard pages will be reenabled
1083
// while unwinding the stack.
1084
overflow_state->disable_stack_yellow_reserved_zone();
1085
*stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
1086
} else {
1087
// Thread was in the vm or native code. Return and try to finish.
1088
overflow_state->disable_stack_yellow_reserved_zone();
1089
return true; // just continue
1090
}
1091
} else if (overflow_state->in_stack_red_zone(addr)) {
1092
// Fatal red zone violation. Disable the guard pages and fall through
1093
// to handle_unexpected_exception way down below.
1094
overflow_state->disable_stack_red_zone();
1095
tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
1096
1097
// This is a likely cause, but hard to verify. Let's just print
1098
// it as a hint.
1099
tty->print_raw_cr("Please check if any of your loaded .so files has "
1100
"enabled executable stack (see man page execstack(8))");
1101
1102
} else {
1103
#if !defined(AIX) && !defined(__APPLE__)
1104
// bsd and aix don't have this
1105
1106
// Accessing stack address below sp may cause SEGV if current
1107
// thread has MAP_GROWSDOWN stack. This should only happen when
1108
// current thread was created by user code with MAP_GROWSDOWN flag
1109
// and then attached to VM. See notes in os_linux.cpp.
1110
if (thread->osthread()->expanding_stack() == 0) {
1111
thread->osthread()->set_expanding_stack();
1112
if (os::Linux::manually_expand_stack(thread, addr)) {
1113
thread->osthread()->clear_expanding_stack();
1114
return true; // just continue
1115
}
1116
thread->osthread()->clear_expanding_stack();
1117
} else {
1118
fatal("recursive segv. expanding stack.");
1119
}
1120
#else
1121
tty->print_raw_cr("SIGSEGV happened inside stack but outside yellow and red zone.");
1122
#endif // AIX or BSD
1123
}
1124
return false;
1125
}
1126
#endif // ZERO
1127
1128
bool os::Posix::is_root(uid_t uid){
1129
return ROOT_UID == uid;
1130
}
1131
1132
bool os::Posix::matches_effective_uid_or_root(uid_t uid) {
1133
return is_root(uid) || geteuid() == uid;
1134
}
1135
1136
bool os::Posix::matches_effective_uid_and_gid_or_root(uid_t uid, gid_t gid) {
1137
return is_root(uid) || (geteuid() == uid && getegid() == gid);
1138
}
1139
1140
Thread* os::ThreadCrashProtection::_protected_thread = NULL;
1141
os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
1142
1143
os::ThreadCrashProtection::ThreadCrashProtection() {
1144
_protected_thread = Thread::current();
1145
assert(_protected_thread->is_JfrSampler_thread(), "should be JFRSampler");
1146
}
1147
1148
/*
1149
* See the caveats for this class in os_posix.hpp
1150
* Protects the callback call so that SIGSEGV / SIGBUS jumps back into this
1151
* method and returns false. If none of the signals are raised, returns true.
1152
* The callback is supposed to provide the method that should be protected.
1153
*/
1154
bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
1155
sigset_t saved_sig_mask;
1156
1157
// we cannot rely on sigsetjmp/siglongjmp to save/restore the signal mask
1158
// since on at least some systems (OS X) siglongjmp will restore the mask
1159
// for the process, not the thread
1160
pthread_sigmask(0, NULL, &saved_sig_mask);
1161
if (sigsetjmp(_jmpbuf, 0) == 0) {
1162
// make sure we can see in the signal handler that we have crash protection
1163
// installed
1164
_crash_protection = this;
1165
cb.call();
1166
// and clear the crash protection
1167
_crash_protection = NULL;
1168
_protected_thread = NULL;
1169
return true;
1170
}
1171
// this happens when we siglongjmp() back
1172
pthread_sigmask(SIG_SETMASK, &saved_sig_mask, NULL);
1173
_crash_protection = NULL;
1174
_protected_thread = NULL;
1175
return false;
1176
}
1177
1178
void os::ThreadCrashProtection::restore() {
1179
assert(_crash_protection != NULL, "must have crash protection");
1180
siglongjmp(_jmpbuf, 1);
1181
}
1182
1183
void os::ThreadCrashProtection::check_crash_protection(int sig,
1184
Thread* thread) {
1185
1186
if (thread != NULL &&
1187
thread == _protected_thread &&
1188
_crash_protection != NULL) {
1189
1190
if (sig == SIGSEGV || sig == SIGBUS) {
1191
_crash_protection->restore();
1192
}
1193
}
1194
}
1195
1196
// Shared clock/time and other supporting routines for pthread_mutex/cond
1197
// initialization. This is enabled on Solaris but only some of the clock/time
1198
// functionality is actually used there.
1199
1200
// Shared condattr object for use with relative timed-waits. Will be associated
1201
// with CLOCK_MONOTONIC if available to avoid issues with time-of-day changes,
1202
// but otherwise whatever default is used by the platform - generally the
1203
// time-of-day clock.
1204
static pthread_condattr_t _condAttr[1];
1205
1206
// Shared mutexattr to explicitly set the type to PTHREAD_MUTEX_NORMAL as not
1207
// all systems (e.g. FreeBSD) map the default to "normal".
1208
static pthread_mutexattr_t _mutexAttr[1];
1209
1210
// common basic initialization that is always supported
1211
static void pthread_init_common(void) {
1212
int status;
1213
if ((status = pthread_condattr_init(_condAttr)) != 0) {
1214
fatal("pthread_condattr_init: %s", os::strerror(status));
1215
}
1216
if ((status = pthread_mutexattr_init(_mutexAttr)) != 0) {
1217
fatal("pthread_mutexattr_init: %s", os::strerror(status));
1218
}
1219
if ((status = pthread_mutexattr_settype(_mutexAttr, PTHREAD_MUTEX_NORMAL)) != 0) {
1220
fatal("pthread_mutexattr_settype: %s", os::strerror(status));
1221
}
1222
os::PlatformMutex::init();
1223
}
1224
1225
static int (*_pthread_condattr_setclock)(pthread_condattr_t *, clockid_t) = NULL;
1226
1227
static bool _use_clock_monotonic_condattr = false;
1228
1229
// Determine what POSIX API's are present and do appropriate
1230
// configuration.
1231
void os::Posix::init(void) {
1232
1233
// NOTE: no logging available when this is called. Put logging
1234
// statements in init_2().
1235
1236
// Check for pthread_condattr_setclock support.
1237
1238
// libpthread is already loaded.
1239
int (*condattr_setclock_func)(pthread_condattr_t*, clockid_t) =
1240
(int (*)(pthread_condattr_t*, clockid_t))dlsym(RTLD_DEFAULT,
1241
"pthread_condattr_setclock");
1242
if (condattr_setclock_func != NULL) {
1243
_pthread_condattr_setclock = condattr_setclock_func;
1244
}
1245
1246
// Now do general initialization.
1247
1248
pthread_init_common();
1249
1250
int status;
1251
if (_pthread_condattr_setclock != NULL) {
1252
if ((status = _pthread_condattr_setclock(_condAttr, CLOCK_MONOTONIC)) != 0) {
1253
if (status == EINVAL) {
1254
_use_clock_monotonic_condattr = false;
1255
warning("Unable to use monotonic clock with relative timed-waits" \
1256
" - changes to the time-of-day clock may have adverse affects");
1257
} else {
1258
fatal("pthread_condattr_setclock: %s", os::strerror(status));
1259
}
1260
} else {
1261
_use_clock_monotonic_condattr = true;
1262
}
1263
}
1264
}
1265
1266
void os::Posix::init_2(void) {
1267
log_info(os)("Use of CLOCK_MONOTONIC is supported");
1268
log_info(os)("Use of pthread_condattr_setclock is%s supported",
1269
(_pthread_condattr_setclock != NULL ? "" : " not"));
1270
log_info(os)("Relative timed-wait using pthread_cond_timedwait is associated with %s",
1271
_use_clock_monotonic_condattr ? "CLOCK_MONOTONIC" : "the default clock");
1272
}
1273
1274
// Utility to convert the given timeout to an absolute timespec
1275
// (based on the appropriate clock) to use with pthread_cond_timewait,
1276
// and sem_timedwait().
1277
// The clock queried here must be the clock used to manage the
1278
// timeout of the condition variable or semaphore.
1279
//
1280
// The passed in timeout value is either a relative time in nanoseconds
1281
// or an absolute time in milliseconds. A relative timeout will be
1282
// associated with CLOCK_MONOTONIC if available, unless the real-time clock
1283
// is explicitly requested; otherwise, or if absolute,
1284
// the default time-of-day clock will be used.
1285
1286
// Given time is a 64-bit value and the time_t used in the timespec is
1287
// sometimes a signed-32-bit value we have to watch for overflow if times
1288
// way in the future are given. Further on Solaris versions
1289
// prior to 10 there is a restriction (see cond_timedwait) that the specified
1290
// number of seconds, in abstime, is less than current_time + 100000000.
1291
// As it will be over 20 years before "now + 100000000" will overflow we can
1292
// ignore overflow and just impose a hard-limit on seconds using the value
1293
// of "now + 100000000". This places a limit on the timeout of about 3.17
1294
// years from "now".
1295
//
1296
#define MAX_SECS 100000000
1297
1298
// Calculate a new absolute time that is "timeout" nanoseconds from "now".
1299
// "unit" indicates the unit of "now_part_sec" (may be nanos or micros depending
1300
// on which clock API is being used).
1301
static void calc_rel_time(timespec* abstime, jlong timeout, jlong now_sec,
1302
jlong now_part_sec, jlong unit) {
1303
time_t max_secs = now_sec + MAX_SECS;
1304
1305
jlong seconds = timeout / NANOUNITS;
1306
timeout %= NANOUNITS; // remaining nanos
1307
1308
if (seconds >= MAX_SECS) {
1309
// More seconds than we can add, so pin to max_secs.
1310
abstime->tv_sec = max_secs;
1311
abstime->tv_nsec = 0;
1312
} else {
1313
abstime->tv_sec = now_sec + seconds;
1314
long nanos = (now_part_sec * (NANOUNITS / unit)) + timeout;
1315
if (nanos >= NANOUNITS) { // overflow
1316
abstime->tv_sec += 1;
1317
nanos -= NANOUNITS;
1318
}
1319
abstime->tv_nsec = nanos;
1320
}
1321
}
1322
1323
// Unpack the given deadline in milliseconds since the epoch, into the given timespec.
1324
// The current time in seconds is also passed in to enforce an upper bound as discussed above.
1325
static void unpack_abs_time(timespec* abstime, jlong deadline, jlong now_sec) {
1326
time_t max_secs = now_sec + MAX_SECS;
1327
1328
jlong seconds = deadline / MILLIUNITS;
1329
jlong millis = deadline % MILLIUNITS;
1330
1331
if (seconds >= max_secs) {
1332
// Absolute seconds exceeds allowed max, so pin to max_secs.
1333
abstime->tv_sec = max_secs;
1334
abstime->tv_nsec = 0;
1335
} else {
1336
abstime->tv_sec = seconds;
1337
abstime->tv_nsec = millis_to_nanos(millis);
1338
}
1339
}
1340
1341
static jlong millis_to_nanos_bounded(jlong millis) {
1342
// We have to watch for overflow when converting millis to nanos,
1343
// but if millis is that large then we will end up limiting to
1344
// MAX_SECS anyway, so just do that here.
1345
if (millis / MILLIUNITS > MAX_SECS) {
1346
millis = jlong(MAX_SECS) * MILLIUNITS;
1347
}
1348
return millis_to_nanos(millis);
1349
}
1350
1351
static void to_abstime(timespec* abstime, jlong timeout,
1352
bool isAbsolute, bool isRealtime) {
1353
DEBUG_ONLY(int max_secs = MAX_SECS;)
1354
1355
if (timeout < 0) {
1356
timeout = 0;
1357
}
1358
1359
clockid_t clock = CLOCK_MONOTONIC;
1360
if (isAbsolute || (!_use_clock_monotonic_condattr || isRealtime)) {
1361
clock = CLOCK_REALTIME;
1362
}
1363
1364
struct timespec now;
1365
int status = clock_gettime(clock, &now);
1366
assert(status == 0, "clock_gettime error: %s", os::strerror(errno));
1367
1368
if (!isAbsolute) {
1369
calc_rel_time(abstime, timeout, now.tv_sec, now.tv_nsec, NANOUNITS);
1370
} else {
1371
unpack_abs_time(abstime, timeout, now.tv_sec);
1372
}
1373
DEBUG_ONLY(max_secs += now.tv_sec;)
1374
1375
assert(abstime->tv_sec >= 0, "tv_sec < 0");
1376
assert(abstime->tv_sec <= max_secs, "tv_sec > max_secs");
1377
assert(abstime->tv_nsec >= 0, "tv_nsec < 0");
1378
assert(abstime->tv_nsec < NANOUNITS, "tv_nsec >= NANOUNITS");
1379
}
1380
1381
// Create an absolute time 'millis' milliseconds in the future, using the
1382
// real-time (time-of-day) clock. Used by PosixSemaphore.
1383
void os::Posix::to_RTC_abstime(timespec* abstime, int64_t millis) {
1384
to_abstime(abstime, millis_to_nanos_bounded(millis),
1385
false /* not absolute */,
1386
true /* use real-time clock */);
1387
}
1388
1389
// Common (partly) shared time functions
1390
1391
jlong os::javaTimeMillis() {
1392
struct timespec ts;
1393
int status = clock_gettime(CLOCK_REALTIME, &ts);
1394
assert(status == 0, "clock_gettime error: %s", os::strerror(errno));
1395
return jlong(ts.tv_sec) * MILLIUNITS +
1396
jlong(ts.tv_nsec) / NANOUNITS_PER_MILLIUNIT;
1397
}
1398
1399
void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1400
struct timespec ts;
1401
int status = clock_gettime(CLOCK_REALTIME, &ts);
1402
assert(status == 0, "clock_gettime error: %s", os::strerror(errno));
1403
seconds = jlong(ts.tv_sec);
1404
nanos = jlong(ts.tv_nsec);
1405
}
1406
1407
// macOS and AIX have platform specific implementations for javaTimeNanos()
1408
// using native clock/timer access APIs. These have historically worked well
1409
// for those platforms, but it may be possible for them to switch to the
1410
// generic clock_gettime mechanism in the future.
1411
#if !defined(__APPLE__) && !defined(AIX)
1412
1413
jlong os::javaTimeNanos() {
1414
struct timespec tp;
1415
int status = clock_gettime(CLOCK_MONOTONIC, &tp);
1416
assert(status == 0, "clock_gettime error: %s", os::strerror(errno));
1417
jlong result = jlong(tp.tv_sec) * NANOSECS_PER_SEC + jlong(tp.tv_nsec);
1418
return result;
1419
}
1420
1421
// for timer info max values which include all bits
1422
#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
1423
1424
void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1425
// CLOCK_MONOTONIC - amount of time since some arbitrary point in the past
1426
info_ptr->max_value = ALL_64_BITS;
1427
info_ptr->may_skip_backward = false; // not subject to resetting or drifting
1428
info_ptr->may_skip_forward = false; // not subject to resetting or drifting
1429
info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
1430
}
1431
1432
#endif // ! APPLE && !AIX
1433
1434
// Shared pthread_mutex/cond based PlatformEvent implementation.
1435
// Not currently usable by Solaris.
1436
1437
1438
// PlatformEvent
1439
//
1440
// Assumption:
1441
// Only one parker can exist on an event, which is why we allocate
1442
// them per-thread. Multiple unparkers can coexist.
1443
//
1444
// _event serves as a restricted-range semaphore.
1445
// -1 : thread is blocked, i.e. there is a waiter
1446
// 0 : neutral: thread is running or ready,
1447
// could have been signaled after a wait started
1448
// 1 : signaled - thread is running or ready
1449
//
1450
// Having three states allows for some detection of bad usage - see
1451
// comments on unpark().
1452
1453
os::PlatformEvent::PlatformEvent() {
1454
int status = pthread_cond_init(_cond, _condAttr);
1455
assert_status(status == 0, status, "cond_init");
1456
status = pthread_mutex_init(_mutex, _mutexAttr);
1457
assert_status(status == 0, status, "mutex_init");
1458
_event = 0;
1459
_nParked = 0;
1460
}
1461
1462
void os::PlatformEvent::park() { // AKA "down()"
1463
// Transitions for _event:
1464
// -1 => -1 : illegal
1465
// 1 => 0 : pass - return immediately
1466
// 0 => -1 : block; then set _event to 0 before returning
1467
1468
// Invariant: Only the thread associated with the PlatformEvent
1469
// may call park().
1470
assert(_nParked == 0, "invariant");
1471
1472
int v;
1473
1474
// atomically decrement _event
1475
for (;;) {
1476
v = _event;
1477
if (Atomic::cmpxchg(&_event, v, v - 1) == v) break;
1478
}
1479
guarantee(v >= 0, "invariant");
1480
1481
if (v == 0) { // Do this the hard way by blocking ...
1482
int status = pthread_mutex_lock(_mutex);
1483
assert_status(status == 0, status, "mutex_lock");
1484
guarantee(_nParked == 0, "invariant");
1485
++_nParked;
1486
while (_event < 0) {
1487
// OS-level "spurious wakeups" are ignored
1488
status = pthread_cond_wait(_cond, _mutex);
1489
assert_status(status == 0 MACOS_ONLY(|| status == ETIMEDOUT),
1490
status, "cond_wait");
1491
}
1492
--_nParked;
1493
1494
_event = 0;
1495
status = pthread_mutex_unlock(_mutex);
1496
assert_status(status == 0, status, "mutex_unlock");
1497
// Paranoia to ensure our locked and lock-free paths interact
1498
// correctly with each other.
1499
OrderAccess::fence();
1500
}
1501
guarantee(_event >= 0, "invariant");
1502
}
1503
1504
int os::PlatformEvent::park(jlong millis) {
1505
// Transitions for _event:
1506
// -1 => -1 : illegal
1507
// 1 => 0 : pass - return immediately
1508
// 0 => -1 : block; then set _event to 0 before returning
1509
1510
// Invariant: Only the thread associated with the Event/PlatformEvent
1511
// may call park().
1512
assert(_nParked == 0, "invariant");
1513
1514
int v;
1515
// atomically decrement _event
1516
for (;;) {
1517
v = _event;
1518
if (Atomic::cmpxchg(&_event, v, v - 1) == v) break;
1519
}
1520
guarantee(v >= 0, "invariant");
1521
1522
if (v == 0) { // Do this the hard way by blocking ...
1523
struct timespec abst;
1524
to_abstime(&abst, millis_to_nanos_bounded(millis), false, false);
1525
1526
int ret = OS_TIMEOUT;
1527
int status = pthread_mutex_lock(_mutex);
1528
assert_status(status == 0, status, "mutex_lock");
1529
guarantee(_nParked == 0, "invariant");
1530
++_nParked;
1531
1532
while (_event < 0) {
1533
status = pthread_cond_timedwait(_cond, _mutex, &abst);
1534
assert_status(status == 0 || status == ETIMEDOUT,
1535
status, "cond_timedwait");
1536
// OS-level "spurious wakeups" are ignored unless the archaic
1537
// FilterSpuriousWakeups is set false. That flag should be obsoleted.
1538
if (!FilterSpuriousWakeups) break;
1539
if (status == ETIMEDOUT) break;
1540
}
1541
--_nParked;
1542
1543
if (_event >= 0) {
1544
ret = OS_OK;
1545
}
1546
1547
_event = 0;
1548
status = pthread_mutex_unlock(_mutex);
1549
assert_status(status == 0, status, "mutex_unlock");
1550
// Paranoia to ensure our locked and lock-free paths interact
1551
// correctly with each other.
1552
OrderAccess::fence();
1553
return ret;
1554
}
1555
return OS_OK;
1556
}
1557
1558
void os::PlatformEvent::unpark() {
1559
// Transitions for _event:
1560
// 0 => 1 : just return
1561
// 1 => 1 : just return
1562
// -1 => either 0 or 1; must signal target thread
1563
// That is, we can safely transition _event from -1 to either
1564
// 0 or 1.
1565
// See also: "Semaphores in Plan 9" by Mullender & Cox
1566
//
1567
// Note: Forcing a transition from "-1" to "1" on an unpark() means
1568
// that it will take two back-to-back park() calls for the owning
1569
// thread to block. This has the benefit of forcing a spurious return
1570
// from the first park() call after an unpark() call which will help
1571
// shake out uses of park() and unpark() without checking state conditions
1572
// properly. This spurious return doesn't manifest itself in any user code
1573
// but only in the correctly written condition checking loops of ObjectMonitor,
1574
// Mutex/Monitor, and JavaThread::sleep
1575
1576
if (Atomic::xchg(&_event, 1) >= 0) return;
1577
1578
int status = pthread_mutex_lock(_mutex);
1579
assert_status(status == 0, status, "mutex_lock");
1580
int anyWaiters = _nParked;
1581
assert(anyWaiters == 0 || anyWaiters == 1, "invariant");
1582
status = pthread_mutex_unlock(_mutex);
1583
assert_status(status == 0, status, "mutex_unlock");
1584
1585
// Note that we signal() *after* dropping the lock for "immortal" Events.
1586
// This is safe and avoids a common class of futile wakeups. In rare
1587
// circumstances this can cause a thread to return prematurely from
1588
// cond_{timed}wait() but the spurious wakeup is benign and the victim
1589
// will simply re-test the condition and re-park itself.
1590
// This provides particular benefit if the underlying platform does not
1591
// provide wait morphing.
1592
1593
if (anyWaiters != 0) {
1594
status = pthread_cond_signal(_cond);
1595
assert_status(status == 0, status, "cond_signal");
1596
}
1597
}
1598
1599
// JSR166 support
1600
1601
os::PlatformParker::PlatformParker() : _counter(0), _cur_index(-1) {
1602
int status = pthread_cond_init(&_cond[REL_INDEX], _condAttr);
1603
assert_status(status == 0, status, "cond_init rel");
1604
status = pthread_cond_init(&_cond[ABS_INDEX], NULL);
1605
assert_status(status == 0, status, "cond_init abs");
1606
status = pthread_mutex_init(_mutex, _mutexAttr);
1607
assert_status(status == 0, status, "mutex_init");
1608
}
1609
1610
os::PlatformParker::~PlatformParker() {
1611
int status = pthread_cond_destroy(&_cond[REL_INDEX]);
1612
assert_status(status == 0, status, "cond_destroy rel");
1613
status = pthread_cond_destroy(&_cond[ABS_INDEX]);
1614
assert_status(status == 0, status, "cond_destroy abs");
1615
status = pthread_mutex_destroy(_mutex);
1616
assert_status(status == 0, status, "mutex_destroy");
1617
}
1618
1619
// Parker::park decrements count if > 0, else does a condvar wait. Unpark
1620
// sets count to 1 and signals condvar. Only one thread ever waits
1621
// on the condvar. Contention seen when trying to park implies that someone
1622
// is unparking you, so don't wait. And spurious returns are fine, so there
1623
// is no need to track notifications.
1624
1625
void Parker::park(bool isAbsolute, jlong time) {
1626
1627
// Optional fast-path check:
1628
// Return immediately if a permit is available.
1629
// We depend on Atomic::xchg() having full barrier semantics
1630
// since we are doing a lock-free update to _counter.
1631
if (Atomic::xchg(&_counter, 0) > 0) return;
1632
1633
JavaThread *jt = JavaThread::current();
1634
1635
// Optional optimization -- avoid state transitions if there's
1636
// an interrupt pending.
1637
if (jt->is_interrupted(false)) {
1638
return;
1639
}
1640
1641
// Next, demultiplex/decode time arguments
1642
struct timespec absTime;
1643
if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
1644
return;
1645
}
1646
if (time > 0) {
1647
to_abstime(&absTime, time, isAbsolute, false);
1648
}
1649
1650
// Enter safepoint region
1651
// Beware of deadlocks such as 6317397.
1652
// The per-thread Parker:: mutex is a classic leaf-lock.
1653
// In particular a thread must never block on the Threads_lock while
1654
// holding the Parker:: mutex. If safepoints are pending both the
1655
// the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
1656
ThreadBlockInVM tbivm(jt);
1657
1658
// Can't access interrupt state now that we are _thread_blocked. If we've
1659
// been interrupted since we checked above then _counter will be > 0.
1660
1661
// Don't wait if cannot get lock since interference arises from
1662
// unparking.
1663
if (pthread_mutex_trylock(_mutex) != 0) {
1664
return;
1665
}
1666
1667
int status;
1668
if (_counter > 0) { // no wait needed
1669
_counter = 0;
1670
status = pthread_mutex_unlock(_mutex);
1671
assert_status(status == 0, status, "invariant");
1672
// Paranoia to ensure our locked and lock-free paths interact
1673
// correctly with each other and Java-level accesses.
1674
OrderAccess::fence();
1675
return;
1676
}
1677
1678
OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
1679
1680
assert(_cur_index == -1, "invariant");
1681
if (time == 0) {
1682
_cur_index = REL_INDEX; // arbitrary choice when not timed
1683
status = pthread_cond_wait(&_cond[_cur_index], _mutex);
1684
assert_status(status == 0 MACOS_ONLY(|| status == ETIMEDOUT),
1685
status, "cond_wait");
1686
}
1687
else {
1688
_cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
1689
status = pthread_cond_timedwait(&_cond[_cur_index], _mutex, &absTime);
1690
assert_status(status == 0 || status == ETIMEDOUT,
1691
status, "cond_timedwait");
1692
}
1693
_cur_index = -1;
1694
1695
_counter = 0;
1696
status = pthread_mutex_unlock(_mutex);
1697
assert_status(status == 0, status, "invariant");
1698
// Paranoia to ensure our locked and lock-free paths interact
1699
// correctly with each other and Java-level accesses.
1700
OrderAccess::fence();
1701
}
1702
1703
void Parker::unpark() {
1704
int status = pthread_mutex_lock(_mutex);
1705
assert_status(status == 0, status, "invariant");
1706
const int s = _counter;
1707
_counter = 1;
1708
// must capture correct index before unlocking
1709
int index = _cur_index;
1710
status = pthread_mutex_unlock(_mutex);
1711
assert_status(status == 0, status, "invariant");
1712
1713
// Note that we signal() *after* dropping the lock for "immortal" Events.
1714
// This is safe and avoids a common class of futile wakeups. In rare
1715
// circumstances this can cause a thread to return prematurely from
1716
// cond_{timed}wait() but the spurious wakeup is benign and the victim
1717
// will simply re-test the condition and re-park itself.
1718
// This provides particular benefit if the underlying platform does not
1719
// provide wait morphing.
1720
1721
if (s < 1 && index != -1) {
1722
// thread is definitely parked
1723
status = pthread_cond_signal(&_cond[index]);
1724
assert_status(status == 0, status, "invariant");
1725
}
1726
}
1727
1728
// Platform Mutex/Monitor implementation
1729
1730
#if PLATFORM_MONITOR_IMPL_INDIRECT
1731
1732
os::PlatformMutex::Mutex::Mutex() : _next(NULL) {
1733
int status = pthread_mutex_init(&_mutex, _mutexAttr);
1734
assert_status(status == 0, status, "mutex_init");
1735
}
1736
1737
os::PlatformMutex::Mutex::~Mutex() {
1738
int status = pthread_mutex_destroy(&_mutex);
1739
assert_status(status == 0, status, "mutex_destroy");
1740
}
1741
1742
pthread_mutex_t os::PlatformMutex::_freelist_lock;
1743
os::PlatformMutex::Mutex* os::PlatformMutex::_mutex_freelist = NULL;
1744
1745
void os::PlatformMutex::init() {
1746
int status = pthread_mutex_init(&_freelist_lock, _mutexAttr);
1747
assert_status(status == 0, status, "freelist lock init");
1748
}
1749
1750
struct os::PlatformMutex::WithFreeListLocked : public StackObj {
1751
WithFreeListLocked() {
1752
int status = pthread_mutex_lock(&_freelist_lock);
1753
assert_status(status == 0, status, "freelist lock");
1754
}
1755
1756
~WithFreeListLocked() {
1757
int status = pthread_mutex_unlock(&_freelist_lock);
1758
assert_status(status == 0, status, "freelist unlock");
1759
}
1760
};
1761
1762
os::PlatformMutex::PlatformMutex() {
1763
{
1764
WithFreeListLocked wfl;
1765
_impl = _mutex_freelist;
1766
if (_impl != NULL) {
1767
_mutex_freelist = _impl->_next;
1768
_impl->_next = NULL;
1769
return;
1770
}
1771
}
1772
_impl = new Mutex();
1773
}
1774
1775
os::PlatformMutex::~PlatformMutex() {
1776
WithFreeListLocked wfl;
1777
assert(_impl->_next == NULL, "invariant");
1778
_impl->_next = _mutex_freelist;
1779
_mutex_freelist = _impl;
1780
}
1781
1782
os::PlatformMonitor::Cond::Cond() : _next(NULL) {
1783
int status = pthread_cond_init(&_cond, _condAttr);
1784
assert_status(status == 0, status, "cond_init");
1785
}
1786
1787
os::PlatformMonitor::Cond::~Cond() {
1788
int status = pthread_cond_destroy(&_cond);
1789
assert_status(status == 0, status, "cond_destroy");
1790
}
1791
1792
os::PlatformMonitor::Cond* os::PlatformMonitor::_cond_freelist = NULL;
1793
1794
os::PlatformMonitor::PlatformMonitor() {
1795
{
1796
WithFreeListLocked wfl;
1797
_impl = _cond_freelist;
1798
if (_impl != NULL) {
1799
_cond_freelist = _impl->_next;
1800
_impl->_next = NULL;
1801
return;
1802
}
1803
}
1804
_impl = new Cond();
1805
}
1806
1807
os::PlatformMonitor::~PlatformMonitor() {
1808
WithFreeListLocked wfl;
1809
assert(_impl->_next == NULL, "invariant");
1810
_impl->_next = _cond_freelist;
1811
_cond_freelist = _impl;
1812
}
1813
1814
#else
1815
1816
os::PlatformMutex::PlatformMutex() {
1817
int status = pthread_mutex_init(&_mutex, _mutexAttr);
1818
assert_status(status == 0, status, "mutex_init");
1819
}
1820
1821
os::PlatformMutex::~PlatformMutex() {
1822
int status = pthread_mutex_destroy(&_mutex);
1823
assert_status(status == 0, status, "mutex_destroy");
1824
}
1825
1826
os::PlatformMonitor::PlatformMonitor() {
1827
int status = pthread_cond_init(&_cond, _condAttr);
1828
assert_status(status == 0, status, "cond_init");
1829
}
1830
1831
os::PlatformMonitor::~PlatformMonitor() {
1832
int status = pthread_cond_destroy(&_cond);
1833
assert_status(status == 0, status, "cond_destroy");
1834
}
1835
1836
#endif // PLATFORM_MONITOR_IMPL_INDIRECT
1837
1838
// Must already be locked
1839
int os::PlatformMonitor::wait(jlong millis) {
1840
assert(millis >= 0, "negative timeout");
1841
if (millis > 0) {
1842
struct timespec abst;
1843
// We have to watch for overflow when converting millis to nanos,
1844
// but if millis is that large then we will end up limiting to
1845
// MAX_SECS anyway, so just do that here.
1846
if (millis / MILLIUNITS > MAX_SECS) {
1847
millis = jlong(MAX_SECS) * MILLIUNITS;
1848
}
1849
to_abstime(&abst, millis_to_nanos(millis), false, false);
1850
1851
int ret = OS_TIMEOUT;
1852
int status = pthread_cond_timedwait(cond(), mutex(), &abst);
1853
assert_status(status == 0 || status == ETIMEDOUT,
1854
status, "cond_timedwait");
1855
if (status == 0) {
1856
ret = OS_OK;
1857
}
1858
return ret;
1859
} else {
1860
int status = pthread_cond_wait(cond(), mutex());
1861
assert_status(status == 0 MACOS_ONLY(|| status == ETIMEDOUT),
1862
status, "cond_wait");
1863
return OS_OK;
1864
}
1865
}
1866
1867
// Darwin has no "environ" in a dynamic library.
1868
#ifdef __APPLE__
1869
#define environ (*_NSGetEnviron())
1870
#else
1871
extern char** environ;
1872
#endif
1873
1874
char** os::get_environ() { return environ; }
1875
1876
// Run the specified command in a separate process. Return its exit value,
1877
// or -1 on failure (e.g. can't fork a new process).
1878
// Notes: -Unlike system(), this function can be called from signal handler. It
1879
// doesn't block SIGINT et al.
1880
// -this function is unsafe to use in non-error situations, mainly
1881
// because the child process will inherit all parent descriptors.
1882
int os::fork_and_exec(const char* cmd, bool prefer_vfork) {
1883
const char * argv[4] = {"sh", "-c", cmd, NULL};
1884
1885
pid_t pid ;
1886
1887
char** env = os::get_environ();
1888
1889
// Use always vfork on AIX, since its safe and helps with analyzing OOM situations.
1890
// Otherwise leave it up to the caller.
1891
AIX_ONLY(prefer_vfork = true;)
1892
pid = prefer_vfork ? ::vfork() : ::fork();
1893
1894
if (pid < 0) {
1895
// fork failed
1896
return -1;
1897
1898
} else if (pid == 0) {
1899
// child process
1900
1901
::execve("/bin/sh", (char* const*)argv, env);
1902
1903
// execve failed
1904
::_exit(-1);
1905
1906
} else {
1907
// copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
1908
// care about the actual exit code, for now.
1909
1910
int status;
1911
1912
// Wait for the child process to exit. This returns immediately if
1913
// the child has already exited. */
1914
while (::waitpid(pid, &status, 0) < 0) {
1915
switch (errno) {
1916
case ECHILD: return 0;
1917
case EINTR: break;
1918
default: return -1;
1919
}
1920
}
1921
1922
if (WIFEXITED(status)) {
1923
// The child exited normally; get its exit code.
1924
return WEXITSTATUS(status);
1925
} else if (WIFSIGNALED(status)) {
1926
// The child exited because of a signal
1927
// The best value to return is 0x80 + signal number,
1928
// because that is what all Unix shells do, and because
1929
// it allows callers to distinguish between process exit and
1930
// process death by signal.
1931
return 0x80 + WTERMSIG(status);
1932
} else {
1933
// Unknown exit code; pass it through
1934
return status;
1935
}
1936
}
1937
}
1938
1939
////////////////////////////////////////////////////////////////////////////////
1940
// runtime exit support
1941
1942
// Note: os::shutdown() might be called very early during initialization, or
1943
// called from signal handler. Before adding something to os::shutdown(), make
1944
// sure it is async-safe and can handle partially initialized VM.
1945
void os::shutdown() {
1946
1947
// allow PerfMemory to attempt cleanup of any persistent resources
1948
perfMemory_exit();
1949
1950
// needs to remove object in file system
1951
AttachListener::abort();
1952
1953
// flush buffered output, finish log files
1954
ostream_abort();
1955
1956
// Check for abort hook
1957
abort_hook_t abort_hook = Arguments::abort_hook();
1958
if (abort_hook != NULL) {
1959
abort_hook();
1960
}
1961
1962
}
1963
1964
// Note: os::abort() might be called very early during initialization, or
1965
// called from signal handler. Before adding something to os::abort(), make
1966
// sure it is async-safe and can handle partially initialized VM.
1967
// Also note we can abort while other threads continue to run, so we can
1968
// easily trigger secondary faults in those threads. To reduce the likelihood
1969
// of that we use _exit rather than exit, so that no atexit hooks get run.
1970
// But note that os::shutdown() could also trigger secondary faults.
1971
void os::abort(bool dump_core, void* siginfo, const void* context) {
1972
os::shutdown();
1973
if (dump_core) {
1974
LINUX_ONLY(if (DumpPrivateMappingsInCore) ClassLoader::close_jrt_image();)
1975
::abort(); // dump core
1976
}
1977
::_exit(1);
1978
}
1979
1980
// Die immediately, no exit hook, no abort hook, no cleanup.
1981
// Dump a core file, if possible, for debugging.
1982
void os::die() {
1983
if (TestUnresponsiveErrorHandler && !CreateCoredumpOnCrash) {
1984
// For TimeoutInErrorHandlingTest.java, we just kill the VM
1985
// and don't take the time to generate a core file.
1986
os::signal_raise(SIGKILL);
1987
} else {
1988
::abort();
1989
}
1990
}
1991
1992