Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/os/posix/os_posix.cpp
64440 views
1
/*
2
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
26
#include "jvm.h"
27
#ifdef LINUX
28
#include "classfile/classLoader.hpp"
29
#endif
30
#include "jvmtifiles/jvmti.h"
31
#include "logging/log.hpp"
32
#include "memory/allocation.inline.hpp"
33
#include "os_posix.inline.hpp"
34
#include "runtime/globals_extension.hpp"
35
#include "runtime/osThread.hpp"
36
#include "utilities/globalDefinitions.hpp"
37
#include "runtime/frame.inline.hpp"
38
#include "runtime/interfaceSupport.inline.hpp"
39
#include "runtime/sharedRuntime.hpp"
40
#include "services/attachListener.hpp"
41
#include "services/memTracker.hpp"
42
#include "runtime/arguments.hpp"
43
#include "runtime/atomic.hpp"
44
#include "runtime/java.hpp"
45
#include "runtime/orderAccess.hpp"
46
#include "runtime/perfMemory.hpp"
47
#include "utilities/align.hpp"
48
#include "utilities/events.hpp"
49
#include "utilities/formatBuffer.hpp"
50
#include "utilities/macros.hpp"
51
#include "utilities/vmError.hpp"
52
53
#include <dirent.h>
54
#include <dlfcn.h>
55
#include <grp.h>
56
#include <netdb.h>
57
#include <pwd.h>
58
#include <pthread.h>
59
#include <signal.h>
60
#include <sys/mman.h>
61
#include <sys/resource.h>
62
#include <sys/socket.h>
63
#include <sys/types.h>
64
#include <sys/utsname.h>
65
#include <sys/wait.h>
66
#include <time.h>
67
#include <unistd.h>
68
#include <utmpx.h>
69
70
#ifdef __APPLE__
71
#include <crt_externs.h>
72
#endif
73
74
#define ROOT_UID 0
75
76
#ifndef MAP_ANONYMOUS
77
#define MAP_ANONYMOUS MAP_ANON
78
#endif
79
80
#define check_with_errno(check_type, cond, msg) \
81
do { \
82
int err = errno; \
83
check_type(cond, "%s; error='%s' (errno=%s)", msg, os::strerror(err), \
84
os::errno_name(err)); \
85
} while (false)
86
87
#define assert_with_errno(cond, msg) check_with_errno(assert, cond, msg)
88
#define guarantee_with_errno(cond, msg) check_with_errno(guarantee, cond, msg)
89
90
// Check core dump limit and report possible place where core can be found
91
void os::check_dump_limit(char* buffer, size_t bufferSize) {
92
if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
93
jio_snprintf(buffer, bufferSize, "CreateCoredumpOnCrash is disabled from command line");
94
VMError::record_coredump_status(buffer, false);
95
return;
96
}
97
98
int n;
99
struct rlimit rlim;
100
bool success;
101
102
char core_path[PATH_MAX];
103
n = get_core_path(core_path, PATH_MAX);
104
105
if (n <= 0) {
106
jio_snprintf(buffer, bufferSize, "core.%d (may not exist)", current_process_id());
107
success = true;
108
#ifdef LINUX
109
} else if (core_path[0] == '"') { // redirect to user process
110
jio_snprintf(buffer, bufferSize, "Core dumps may be processed with %s", core_path);
111
success = true;
112
#endif
113
} else if (getrlimit(RLIMIT_CORE, &rlim) != 0) {
114
jio_snprintf(buffer, bufferSize, "%s (may not exist)", core_path);
115
success = true;
116
} else {
117
switch(rlim.rlim_cur) {
118
case RLIM_INFINITY:
119
jio_snprintf(buffer, bufferSize, "%s", core_path);
120
success = true;
121
break;
122
case 0:
123
jio_snprintf(buffer, bufferSize, "Core dumps have been disabled. To enable core dumping, try \"ulimit -c unlimited\" before starting Java again");
124
success = false;
125
break;
126
default:
127
jio_snprintf(buffer, bufferSize, "%s (max size " UINT64_FORMAT " kB). To ensure a full core dump, try \"ulimit -c unlimited\" before starting Java again", core_path, uint64_t(rlim.rlim_cur) / 1024);
128
success = true;
129
break;
130
}
131
}
132
133
VMError::record_coredump_status(buffer, success);
134
}
135
136
int os::get_native_stack(address* stack, int frames, int toSkip) {
137
int frame_idx = 0;
138
int num_of_frames; // number of frames captured
139
frame fr = os::current_frame();
140
while (fr.pc() && frame_idx < frames) {
141
if (toSkip > 0) {
142
toSkip --;
143
} else {
144
stack[frame_idx ++] = fr.pc();
145
}
146
if (fr.fp() == NULL || fr.cb() != NULL ||
147
fr.sender_pc() == NULL || os::is_first_C_frame(&fr)) break;
148
149
if (fr.sender_pc() && !os::is_first_C_frame(&fr)) {
150
fr = os::get_sender_for_C_frame(&fr);
151
} else {
152
break;
153
}
154
}
155
num_of_frames = frame_idx;
156
for (; frame_idx < frames; frame_idx ++) {
157
stack[frame_idx] = NULL;
158
}
159
160
return num_of_frames;
161
}
162
163
164
bool os::unsetenv(const char* name) {
165
assert(name != NULL, "Null pointer");
166
return (::unsetenv(name) == 0);
167
}
168
169
int os::get_last_error() {
170
return errno;
171
}
172
173
size_t os::lasterror(char *buf, size_t len) {
174
if (errno == 0) return 0;
175
176
const char *s = os::strerror(errno);
177
size_t n = ::strlen(s);
178
if (n >= len) {
179
n = len - 1;
180
}
181
::strncpy(buf, s, n);
182
buf[n] = '\0';
183
return n;
184
}
185
186
void os::wait_for_keypress_at_exit(void) {
187
// don't do anything on posix platforms
188
return;
189
}
190
191
int os::create_file_for_heap(const char* dir) {
192
int fd;
193
194
#if defined(LINUX) && defined(O_TMPFILE)
195
char* native_dir = os::strdup(dir);
196
if (native_dir == NULL) {
197
vm_exit_during_initialization(err_msg("strdup failed during creation of backing file for heap (%s)", os::strerror(errno)));
198
return -1;
199
}
200
os::native_path(native_dir);
201
fd = os::open(dir, O_TMPFILE | O_RDWR, S_IRUSR | S_IWUSR);
202
os::free(native_dir);
203
204
if (fd == -1)
205
#endif
206
{
207
const char name_template[] = "/jvmheap.XXXXXX";
208
209
size_t fullname_len = strlen(dir) + strlen(name_template);
210
char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
211
if (fullname == NULL) {
212
vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
213
return -1;
214
}
215
int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
216
assert((size_t)n == fullname_len, "Unexpected number of characters in string");
217
218
os::native_path(fullname);
219
220
// create a new file.
221
fd = mkstemp(fullname);
222
223
if (fd < 0) {
224
warning("Could not create file for heap with template %s", fullname);
225
os::free(fullname);
226
return -1;
227
} else {
228
// delete the name from the filesystem. When 'fd' is closed, the file (and space) will be deleted.
229
int ret = unlink(fullname);
230
assert_with_errno(ret == 0, "unlink returned error");
231
}
232
233
os::free(fullname);
234
}
235
236
return fd;
237
}
238
239
// Is a (classpath) directory empty?
240
bool os::dir_is_empty(const char* path) {
241
DIR *dir = NULL;
242
struct dirent *ptr;
243
244
dir = ::opendir(path);
245
if (dir == NULL) return true;
246
247
// Scan the directory
248
bool result = true;
249
while (result && (ptr = ::readdir(dir)) != NULL) {
250
if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
251
result = false;
252
}
253
}
254
::closedir(dir);
255
return result;
256
}
257
258
static char* reserve_mmapped_memory(size_t bytes, char* requested_addr) {
259
char * addr;
260
int flags = MAP_PRIVATE NOT_AIX( | MAP_NORESERVE ) | MAP_ANONYMOUS;
261
if (requested_addr != NULL) {
262
assert((uintptr_t)requested_addr % os::vm_page_size() == 0, "Requested address should be aligned to OS page size");
263
flags |= MAP_FIXED;
264
}
265
266
// Map reserved/uncommitted pages PROT_NONE so we fail early if we
267
// touch an uncommitted page. Otherwise, the read/write might
268
// succeed if we have enough swap space to back the physical page.
269
addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
270
flags, -1, 0);
271
272
if (addr != MAP_FAILED) {
273
MemTracker::record_virtual_memory_reserve((address)addr, bytes, CALLER_PC);
274
return addr;
275
}
276
return NULL;
277
}
278
279
static int util_posix_fallocate(int fd, off_t offset, off_t len) {
280
#ifdef __APPLE__
281
fstore_t store = { F_ALLOCATECONTIG, F_PEOFPOSMODE, 0, len };
282
// First we try to get a continuous chunk of disk space
283
int ret = fcntl(fd, F_PREALLOCATE, &store);
284
if (ret == -1) {
285
// Maybe we are too fragmented, try to allocate non-continuous range
286
store.fst_flags = F_ALLOCATEALL;
287
ret = fcntl(fd, F_PREALLOCATE, &store);
288
}
289
if(ret != -1) {
290
return ftruncate(fd, len);
291
}
292
return -1;
293
#else
294
return posix_fallocate(fd, offset, len);
295
#endif
296
}
297
298
// Map the given address range to the provided file descriptor.
299
char* os::map_memory_to_file(char* base, size_t size, int fd) {
300
assert(fd != -1, "File descriptor is not valid");
301
302
// allocate space for the file
303
int ret = util_posix_fallocate(fd, 0, (off_t)size);
304
if (ret != 0) {
305
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory. error(%d)", ret));
306
return NULL;
307
}
308
309
int prot = PROT_READ | PROT_WRITE;
310
int flags = MAP_SHARED;
311
if (base != NULL) {
312
flags |= MAP_FIXED;
313
}
314
char* addr = (char*)mmap(base, size, prot, flags, fd, 0);
315
316
if (addr == MAP_FAILED) {
317
warning("Failed mmap to file. (%s)", os::strerror(errno));
318
return NULL;
319
}
320
if (base != NULL && addr != base) {
321
if (!os::release_memory(addr, size)) {
322
warning("Could not release memory on unsuccessful file mapping");
323
}
324
return NULL;
325
}
326
return addr;
327
}
328
329
char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
330
assert(fd != -1, "File descriptor is not valid");
331
assert(base != NULL, "Base cannot be NULL");
332
333
return map_memory_to_file(base, size, fd);
334
}
335
336
static size_t calculate_aligned_extra_size(size_t size, size_t alignment) {
337
assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
338
"Alignment must be a multiple of allocation granularity (page size)");
339
assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
340
341
size_t extra_size = size + alignment;
342
assert(extra_size >= size, "overflow, size is too large to allow alignment");
343
return extra_size;
344
}
345
346
// After a bigger chunk was mapped, unmaps start and end parts to get the requested alignment.
347
static char* chop_extra_memory(size_t size, size_t alignment, char* extra_base, size_t extra_size) {
348
// Do manual alignment
349
char* aligned_base = align_up(extra_base, alignment);
350
351
// [ | | ]
352
// ^ extra_base
353
// ^ extra_base + begin_offset == aligned_base
354
// extra_base + begin_offset + size ^
355
// extra_base + extra_size ^
356
// |<>| == begin_offset
357
// end_offset == |<>|
358
size_t begin_offset = aligned_base - extra_base;
359
size_t end_offset = (extra_base + extra_size) - (aligned_base + size);
360
361
if (begin_offset > 0) {
362
os::release_memory(extra_base, begin_offset);
363
}
364
365
if (end_offset > 0) {
366
os::release_memory(extra_base + begin_offset + size, end_offset);
367
}
368
369
return aligned_base;
370
}
371
372
// Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
373
// so on posix, unmap the section at the start and at the end of the chunk that we mapped
374
// rather than unmapping and remapping the whole chunk to get requested alignment.
375
char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) {
376
size_t extra_size = calculate_aligned_extra_size(size, alignment);
377
char* extra_base = os::reserve_memory(extra_size, exec);
378
if (extra_base == NULL) {
379
return NULL;
380
}
381
return chop_extra_memory(size, alignment, extra_base, extra_size);
382
}
383
384
char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int file_desc) {
385
size_t extra_size = calculate_aligned_extra_size(size, alignment);
386
// For file mapping, we do not call os:map_memory_to_file(size,fd) since:
387
// - we later chop away parts of the mapping using os::release_memory and that could fail if the
388
// original mmap call had been tied to an fd.
389
// - The memory API os::reserve_memory uses is an implementation detail. It may (and usually is)
390
// mmap but it also may System V shared memory which cannot be uncommitted as a whole, so
391
// chopping off and unmapping excess bits back and front (see below) would not work.
392
char* extra_base = reserve_mmapped_memory(extra_size, NULL);
393
if (extra_base == NULL) {
394
return NULL;
395
}
396
char* aligned_base = chop_extra_memory(size, alignment, extra_base, extra_size);
397
// After we have an aligned address, we can replace anonymous mapping with file mapping
398
if (replace_existing_mapping_with_file_mapping(aligned_base, size, file_desc) == NULL) {
399
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
400
}
401
MemTracker::record_virtual_memory_commit((address)aligned_base, size, CALLER_PC);
402
return aligned_base;
403
}
404
405
int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
406
// All supported POSIX platforms provide C99 semantics.
407
int result = ::vsnprintf(buf, len, fmt, args);
408
// If an encoding error occurred (result < 0) then it's not clear
409
// whether the buffer is NUL terminated, so ensure it is.
410
if ((result < 0) && (len > 0)) {
411
buf[len - 1] = '\0';
412
}
413
return result;
414
}
415
416
int os::get_fileno(FILE* fp) {
417
return NOT_AIX(::)fileno(fp);
418
}
419
420
struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
421
return gmtime_r(clock, res);
422
}
423
424
void os::Posix::print_load_average(outputStream* st) {
425
st->print("load average: ");
426
double loadavg[3];
427
int res = os::loadavg(loadavg, 3);
428
if (res != -1) {
429
st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
430
} else {
431
st->print(" Unavailable");
432
}
433
st->cr();
434
}
435
436
// boot/uptime information;
437
// unfortunately it does not work on macOS and Linux because the utx chain has no entry
438
// for reboot at least on my test machines
439
void os::Posix::print_uptime_info(outputStream* st) {
440
int bootsec = -1;
441
int currsec = time(NULL);
442
struct utmpx* ent;
443
setutxent();
444
while ((ent = getutxent())) {
445
if (!strcmp("system boot", ent->ut_line)) {
446
bootsec = ent->ut_tv.tv_sec;
447
break;
448
}
449
}
450
451
if (bootsec != -1) {
452
os::print_dhm(st, "OS uptime:", (long) (currsec-bootsec));
453
}
454
}
455
456
static void print_rlimit(outputStream* st, const char* msg,
457
int resource, bool output_k = false) {
458
struct rlimit rlim;
459
460
st->print(" %s ", msg);
461
int res = getrlimit(resource, &rlim);
462
if (res == -1) {
463
st->print("could not obtain value");
464
} else {
465
// soft limit
466
if (rlim.rlim_cur == RLIM_INFINITY) { st->print("infinity"); }
467
else {
468
if (output_k) { st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_cur) / 1024); }
469
else { st->print(UINT64_FORMAT, uint64_t(rlim.rlim_cur)); }
470
}
471
// hard limit
472
st->print("/");
473
if (rlim.rlim_max == RLIM_INFINITY) { st->print("infinity"); }
474
else {
475
if (output_k) { st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_max) / 1024); }
476
else { st->print(UINT64_FORMAT, uint64_t(rlim.rlim_max)); }
477
}
478
}
479
}
480
481
void os::Posix::print_rlimit_info(outputStream* st) {
482
st->print("rlimit (soft/hard):");
483
print_rlimit(st, "STACK", RLIMIT_STACK, true);
484
print_rlimit(st, ", CORE", RLIMIT_CORE, true);
485
486
#if defined(AIX)
487
st->print(", NPROC ");
488
st->print("%d", sysconf(_SC_CHILD_MAX));
489
490
print_rlimit(st, ", THREADS", RLIMIT_THREADS);
491
#else
492
print_rlimit(st, ", NPROC", RLIMIT_NPROC);
493
#endif
494
495
print_rlimit(st, ", NOFILE", RLIMIT_NOFILE);
496
print_rlimit(st, ", AS", RLIMIT_AS, true);
497
print_rlimit(st, ", CPU", RLIMIT_CPU);
498
print_rlimit(st, ", DATA", RLIMIT_DATA, true);
499
500
// maximum size of files that the process may create
501
print_rlimit(st, ", FSIZE", RLIMIT_FSIZE, true);
502
503
#if defined(LINUX) || defined(__APPLE__)
504
// maximum number of bytes of memory that may be locked into RAM
505
// (rounded down to the nearest multiple of system pagesize)
506
print_rlimit(st, ", MEMLOCK", RLIMIT_MEMLOCK, true);
507
#endif
508
509
// MacOS; The maximum size (in bytes) to which a process's resident set size may grow.
510
#if defined(__APPLE__)
511
print_rlimit(st, ", RSS", RLIMIT_RSS, true);
512
#endif
513
514
st->cr();
515
}
516
517
void os::Posix::print_uname_info(outputStream* st) {
518
// kernel
519
st->print("uname: ");
520
struct utsname name;
521
uname(&name);
522
st->print("%s ", name.sysname);
523
#ifdef ASSERT
524
st->print("%s ", name.nodename);
525
#endif
526
st->print("%s ", name.release);
527
st->print("%s ", name.version);
528
st->print("%s", name.machine);
529
st->cr();
530
}
531
532
void os::Posix::print_umask(outputStream* st, mode_t umsk) {
533
st->print((umsk & S_IRUSR) ? "r" : "-");
534
st->print((umsk & S_IWUSR) ? "w" : "-");
535
st->print((umsk & S_IXUSR) ? "x" : "-");
536
st->print((umsk & S_IRGRP) ? "r" : "-");
537
st->print((umsk & S_IWGRP) ? "w" : "-");
538
st->print((umsk & S_IXGRP) ? "x" : "-");
539
st->print((umsk & S_IROTH) ? "r" : "-");
540
st->print((umsk & S_IWOTH) ? "w" : "-");
541
st->print((umsk & S_IXOTH) ? "x" : "-");
542
}
543
544
void os::Posix::print_user_info(outputStream* st) {
545
unsigned id = (unsigned) ::getuid();
546
st->print("uid : %u ", id);
547
id = (unsigned) ::geteuid();
548
st->print("euid : %u ", id);
549
id = (unsigned) ::getgid();
550
st->print("gid : %u ", id);
551
id = (unsigned) ::getegid();
552
st->print_cr("egid : %u", id);
553
st->cr();
554
555
mode_t umsk = ::umask(0);
556
::umask(umsk);
557
st->print("umask: %04o (", (unsigned) umsk);
558
print_umask(st, umsk);
559
st->print_cr(")");
560
st->cr();
561
}
562
563
564
bool os::get_host_name(char* buf, size_t buflen) {
565
struct utsname name;
566
uname(&name);
567
jio_snprintf(buf, buflen, "%s", name.nodename);
568
return true;
569
}
570
571
#ifndef _LP64
572
// Helper, on 32bit, for os::has_allocatable_memory_limit
573
static bool is_allocatable(size_t s) {
574
if (s < 2 * G) {
575
return true;
576
}
577
// Use raw anonymous mmap here; no need to go through any
578
// of our reservation layers. We will unmap right away.
579
void* p = ::mmap(NULL, s, PROT_NONE,
580
MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS, -1, 0);
581
if (p == MAP_FAILED) {
582
return false;
583
} else {
584
::munmap(p, s);
585
return true;
586
}
587
}
588
#endif // !_LP64
589
590
591
bool os::has_allocatable_memory_limit(size_t* limit) {
592
struct rlimit rlim;
593
int getrlimit_res = getrlimit(RLIMIT_AS, &rlim);
594
// if there was an error when calling getrlimit, assume that there is no limitation
595
// on virtual memory.
596
bool result;
597
if ((getrlimit_res != 0) || (rlim.rlim_cur == RLIM_INFINITY)) {
598
result = false;
599
} else {
600
*limit = (size_t)rlim.rlim_cur;
601
result = true;
602
}
603
#ifdef _LP64
604
return result;
605
#else
606
// arbitrary virtual space limit for 32 bit Unices found by testing. If
607
// getrlimit above returned a limit, bound it with this limit. Otherwise
608
// directly use it.
609
const size_t max_virtual_limit = 3800*M;
610
if (result) {
611
*limit = MIN2(*limit, max_virtual_limit);
612
} else {
613
*limit = max_virtual_limit;
614
}
615
616
// bound by actually allocatable memory. The algorithm uses two bounds, an
617
// upper and a lower limit. The upper limit is the current highest amount of
618
// memory that could not be allocated, the lower limit is the current highest
619
// amount of memory that could be allocated.
620
// The algorithm iteratively refines the result by halving the difference
621
// between these limits, updating either the upper limit (if that value could
622
// not be allocated) or the lower limit (if the that value could be allocated)
623
// until the difference between these limits is "small".
624
625
// the minimum amount of memory we care about allocating.
626
const size_t min_allocation_size = M;
627
628
size_t upper_limit = *limit;
629
630
// first check a few trivial cases
631
if (is_allocatable(upper_limit) || (upper_limit <= min_allocation_size)) {
632
*limit = upper_limit;
633
} else if (!is_allocatable(min_allocation_size)) {
634
// we found that not even min_allocation_size is allocatable. Return it
635
// anyway. There is no point to search for a better value any more.
636
*limit = min_allocation_size;
637
} else {
638
// perform the binary search.
639
size_t lower_limit = min_allocation_size;
640
while ((upper_limit - lower_limit) > min_allocation_size) {
641
size_t temp_limit = ((upper_limit - lower_limit) / 2) + lower_limit;
642
temp_limit = align_down(temp_limit, min_allocation_size);
643
if (is_allocatable(temp_limit)) {
644
lower_limit = temp_limit;
645
} else {
646
upper_limit = temp_limit;
647
}
648
}
649
*limit = lower_limit;
650
}
651
return true;
652
#endif
653
}
654
655
void* os::get_default_process_handle() {
656
#ifdef __APPLE__
657
// MacOS X needs to use RTLD_FIRST instead of RTLD_LAZY
658
// to avoid finding unexpected symbols on second (or later)
659
// loads of a library.
660
return (void*)::dlopen(NULL, RTLD_FIRST);
661
#else
662
return (void*)::dlopen(NULL, RTLD_LAZY);
663
#endif
664
}
665
666
void* os::dll_lookup(void* handle, const char* name) {
667
return dlsym(handle, name);
668
}
669
670
void os::dll_unload(void *lib) {
671
const char* l_path = LINUX_ONLY(os::Linux::dll_path(lib))
672
NOT_LINUX("<not available>");
673
if (l_path == NULL) l_path = "<not available>";
674
int res = ::dlclose(lib);
675
676
if (res == 0) {
677
Events::log_dll_message(NULL, "Unloaded shared library \"%s\" [" INTPTR_FORMAT "]",
678
l_path, p2i(lib));
679
log_info(os)("Unloaded shared library \"%s\" [" INTPTR_FORMAT "]", l_path, p2i(lib));
680
} else {
681
const char* error_report = ::dlerror();
682
if (error_report == NULL) {
683
error_report = "dlerror returned no error description";
684
}
685
686
Events::log_dll_message(NULL, "Attempt to unload shared library \"%s\" [" INTPTR_FORMAT "] failed, %s",
687
l_path, p2i(lib), error_report);
688
log_info(os)("Attempt to unload shared library \"%s\" [" INTPTR_FORMAT "] failed, %s",
689
l_path, p2i(lib), error_report);
690
}
691
}
692
693
jlong os::lseek(int fd, jlong offset, int whence) {
694
return (jlong) BSD_ONLY(::lseek) NOT_BSD(::lseek64)(fd, offset, whence);
695
}
696
697
int os::fsync(int fd) {
698
return ::fsync(fd);
699
}
700
701
int os::ftruncate(int fd, jlong length) {
702
return BSD_ONLY(::ftruncate) NOT_BSD(::ftruncate64)(fd, length);
703
}
704
705
const char* os::get_current_directory(char *buf, size_t buflen) {
706
return getcwd(buf, buflen);
707
}
708
709
FILE* os::open(int fd, const char* mode) {
710
return ::fdopen(fd, mode);
711
}
712
713
size_t os::write(int fd, const void *buf, unsigned int nBytes) {
714
size_t res;
715
RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
716
return res;
717
}
718
719
ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
720
return ::pread(fd, buf, nBytes, offset);
721
}
722
723
int os::close(int fd) {
724
return ::close(fd);
725
}
726
727
void os::flockfile(FILE* fp) {
728
::flockfile(fp);
729
}
730
731
void os::funlockfile(FILE* fp) {
732
::funlockfile(fp);
733
}
734
735
DIR* os::opendir(const char* dirname) {
736
assert(dirname != NULL, "just checking");
737
return ::opendir(dirname);
738
}
739
740
struct dirent* os::readdir(DIR* dirp) {
741
assert(dirp != NULL, "just checking");
742
return ::readdir(dirp);
743
}
744
745
int os::closedir(DIR *dirp) {
746
assert(dirp != NULL, "just checking");
747
return ::closedir(dirp);
748
}
749
750
int os::socket_close(int fd) {
751
return ::close(fd);
752
}
753
754
int os::socket(int domain, int type, int protocol) {
755
return ::socket(domain, type, protocol);
756
}
757
758
int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
759
RESTARTABLE_RETURN_INT(::recv(fd, buf, nBytes, flags));
760
}
761
762
int os::send(int fd, char* buf, size_t nBytes, uint flags) {
763
RESTARTABLE_RETURN_INT(::send(fd, buf, nBytes, flags));
764
}
765
766
int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
767
return os::send(fd, buf, nBytes, flags);
768
}
769
770
int os::connect(int fd, struct sockaddr* him, socklen_t len) {
771
RESTARTABLE_RETURN_INT(::connect(fd, him, len));
772
}
773
774
struct hostent* os::get_host_by_name(char* name) {
775
return ::gethostbyname(name);
776
}
777
778
void os::exit(int num) {
779
::exit(num);
780
}
781
782
// Builds a platform dependent Agent_OnLoad_<lib_name> function name
783
// which is used to find statically linked in agents.
784
// Parameters:
785
// sym_name: Symbol in library we are looking for
786
// lib_name: Name of library to look in, NULL for shared libs.
787
// is_absolute_path == true if lib_name is absolute path to agent
788
// such as "/a/b/libL.so"
789
// == false if only the base name of the library is passed in
790
// such as "L"
791
char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
792
bool is_absolute_path) {
793
char *agent_entry_name;
794
size_t len;
795
size_t name_len;
796
size_t prefix_len = strlen(JNI_LIB_PREFIX);
797
size_t suffix_len = strlen(JNI_LIB_SUFFIX);
798
const char *start;
799
800
if (lib_name != NULL) {
801
name_len = strlen(lib_name);
802
if (is_absolute_path) {
803
// Need to strip path, prefix and suffix
804
if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
805
lib_name = ++start;
806
}
807
if (strlen(lib_name) <= (prefix_len + suffix_len)) {
808
return NULL;
809
}
810
lib_name += prefix_len;
811
name_len = strlen(lib_name) - suffix_len;
812
}
813
}
814
len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
815
agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
816
if (agent_entry_name == NULL) {
817
return NULL;
818
}
819
strcpy(agent_entry_name, sym_name);
820
if (lib_name != NULL) {
821
strcat(agent_entry_name, "_");
822
strncat(agent_entry_name, lib_name, name_len);
823
}
824
return agent_entry_name;
825
}
826
827
// Sleep forever; naked call to OS-specific sleep; use with CAUTION
828
void os::infinite_sleep() {
829
while (true) { // sleep forever ...
830
::sleep(100); // ... 100 seconds at a time
831
}
832
}
833
834
void os::naked_short_nanosleep(jlong ns) {
835
struct timespec req;
836
assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
837
req.tv_sec = 0;
838
req.tv_nsec = ns;
839
::nanosleep(&req, NULL);
840
return;
841
}
842
843
void os::naked_short_sleep(jlong ms) {
844
assert(ms < MILLIUNITS, "Un-interruptable sleep, short time use only");
845
os::naked_short_nanosleep(millis_to_nanos(ms));
846
return;
847
}
848
849
char* os::Posix::describe_pthread_attr(char* buf, size_t buflen, const pthread_attr_t* attr) {
850
size_t stack_size = 0;
851
size_t guard_size = 0;
852
int detachstate = 0;
853
pthread_attr_getstacksize(attr, &stack_size);
854
pthread_attr_getguardsize(attr, &guard_size);
855
// Work around linux NPTL implementation error, see also os::create_thread() in os_linux.cpp.
856
LINUX_ONLY(stack_size -= guard_size);
857
pthread_attr_getdetachstate(attr, &detachstate);
858
jio_snprintf(buf, buflen, "stacksize: " SIZE_FORMAT "k, guardsize: " SIZE_FORMAT "k, %s",
859
stack_size / 1024, guard_size / 1024,
860
(detachstate == PTHREAD_CREATE_DETACHED ? "detached" : "joinable"));
861
return buf;
862
}
863
864
char* os::Posix::realpath(const char* filename, char* outbuf, size_t outbuflen) {
865
866
if (filename == NULL || outbuf == NULL || outbuflen < 1) {
867
assert(false, "os::Posix::realpath: invalid arguments.");
868
errno = EINVAL;
869
return NULL;
870
}
871
872
char* result = NULL;
873
874
// This assumes platform realpath() is implemented according to POSIX.1-2008.
875
// POSIX.1-2008 allows to specify NULL for the output buffer, in which case
876
// output buffer is dynamically allocated and must be ::free()'d by the caller.
877
char* p = ::realpath(filename, NULL);
878
if (p != NULL) {
879
if (strlen(p) < outbuflen) {
880
strcpy(outbuf, p);
881
result = outbuf;
882
} else {
883
errno = ENAMETOOLONG;
884
}
885
::free(p); // *not* os::free
886
} else {
887
// Fallback for platforms struggling with modern Posix standards (AIX 5.3, 6.1). If realpath
888
// returns EINVAL, this may indicate that realpath is not POSIX.1-2008 compatible and
889
// that it complains about the NULL we handed down as user buffer.
890
// In this case, use the user provided buffer but at least check whether realpath caused
891
// a memory overwrite.
892
if (errno == EINVAL) {
893
outbuf[outbuflen - 1] = '\0';
894
p = ::realpath(filename, outbuf);
895
if (p != NULL) {
896
guarantee(outbuf[outbuflen - 1] == '\0', "realpath buffer overwrite detected.");
897
result = p;
898
}
899
}
900
}
901
return result;
902
903
}
904
905
int os::stat(const char *path, struct stat *sbuf) {
906
return ::stat(path, sbuf);
907
}
908
909
char * os::native_path(char *path) {
910
return path;
911
}
912
913
bool os::same_files(const char* file1, const char* file2) {
914
if (file1 == nullptr && file2 == nullptr) {
915
return true;
916
}
917
918
if (file1 == nullptr || file2 == nullptr) {
919
return false;
920
}
921
922
if (strcmp(file1, file2) == 0) {
923
return true;
924
}
925
926
bool is_same = false;
927
struct stat st1;
928
struct stat st2;
929
930
if (os::stat(file1, &st1) < 0) {
931
return false;
932
}
933
934
if (os::stat(file2, &st2) < 0) {
935
return false;
936
}
937
938
if (st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino) {
939
// same files
940
is_same = true;
941
}
942
return is_same;
943
}
944
945
// Check minimum allowable stack sizes for thread creation and to initialize
946
// the java system classes, including StackOverflowError - depends on page
947
// size.
948
// The space needed for frames during startup is platform dependent. It
949
// depends on word size, platform calling conventions, C frame layout and
950
// interpreter/C1/C2 design decisions. Therefore this is given in a
951
// platform (os/cpu) dependent constant.
952
// To this, space for guard mechanisms is added, which depends on the
953
// page size which again depends on the concrete system the VM is running
954
// on. Space for libc guard pages is not included in this size.
955
jint os::Posix::set_minimum_stack_sizes() {
956
size_t os_min_stack_allowed = PTHREAD_STACK_MIN;
957
958
_java_thread_min_stack_allowed = _java_thread_min_stack_allowed +
959
StackOverflow::stack_guard_zone_size() +
960
StackOverflow::stack_shadow_zone_size();
961
962
_java_thread_min_stack_allowed = align_up(_java_thread_min_stack_allowed, vm_page_size());
963
_java_thread_min_stack_allowed = MAX2(_java_thread_min_stack_allowed, os_min_stack_allowed);
964
965
size_t stack_size_in_bytes = ThreadStackSize * K;
966
if (stack_size_in_bytes != 0 &&
967
stack_size_in_bytes < _java_thread_min_stack_allowed) {
968
// The '-Xss' and '-XX:ThreadStackSize=N' options both set
969
// ThreadStackSize so we go with "Java thread stack size" instead
970
// of "ThreadStackSize" to be more friendly.
971
tty->print_cr("\nThe Java thread stack size specified is too small. "
972
"Specify at least " SIZE_FORMAT "k",
973
_java_thread_min_stack_allowed / K);
974
return JNI_ERR;
975
}
976
977
// Make the stack size a multiple of the page size so that
978
// the yellow/red zones can be guarded.
979
JavaThread::set_stack_size_at_create(align_up(stack_size_in_bytes, vm_page_size()));
980
981
// Reminder: a compiler thread is a Java thread.
982
_compiler_thread_min_stack_allowed = _compiler_thread_min_stack_allowed +
983
StackOverflow::stack_guard_zone_size() +
984
StackOverflow::stack_shadow_zone_size();
985
986
_compiler_thread_min_stack_allowed = align_up(_compiler_thread_min_stack_allowed, vm_page_size());
987
_compiler_thread_min_stack_allowed = MAX2(_compiler_thread_min_stack_allowed, os_min_stack_allowed);
988
989
stack_size_in_bytes = CompilerThreadStackSize * K;
990
if (stack_size_in_bytes != 0 &&
991
stack_size_in_bytes < _compiler_thread_min_stack_allowed) {
992
tty->print_cr("\nThe CompilerThreadStackSize specified is too small. "
993
"Specify at least " SIZE_FORMAT "k",
994
_compiler_thread_min_stack_allowed / K);
995
return JNI_ERR;
996
}
997
998
_vm_internal_thread_min_stack_allowed = align_up(_vm_internal_thread_min_stack_allowed, vm_page_size());
999
_vm_internal_thread_min_stack_allowed = MAX2(_vm_internal_thread_min_stack_allowed, os_min_stack_allowed);
1000
1001
stack_size_in_bytes = VMThreadStackSize * K;
1002
if (stack_size_in_bytes != 0 &&
1003
stack_size_in_bytes < _vm_internal_thread_min_stack_allowed) {
1004
tty->print_cr("\nThe VMThreadStackSize specified is too small. "
1005
"Specify at least " SIZE_FORMAT "k",
1006
_vm_internal_thread_min_stack_allowed / K);
1007
return JNI_ERR;
1008
}
1009
return JNI_OK;
1010
}
1011
1012
// Called when creating the thread. The minimum stack sizes have already been calculated
1013
size_t os::Posix::get_initial_stack_size(ThreadType thr_type, size_t req_stack_size) {
1014
size_t stack_size;
1015
if (req_stack_size == 0) {
1016
stack_size = default_stack_size(thr_type);
1017
} else {
1018
stack_size = req_stack_size;
1019
}
1020
1021
switch (thr_type) {
1022
case os::java_thread:
1023
// Java threads use ThreadStackSize which default value can be
1024
// changed with the flag -Xss
1025
if (req_stack_size == 0 && JavaThread::stack_size_at_create() > 0) {
1026
// no requested size and we have a more specific default value
1027
stack_size = JavaThread::stack_size_at_create();
1028
}
1029
stack_size = MAX2(stack_size,
1030
_java_thread_min_stack_allowed);
1031
break;
1032
case os::compiler_thread:
1033
if (req_stack_size == 0 && CompilerThreadStackSize > 0) {
1034
// no requested size and we have a more specific default value
1035
stack_size = (size_t)(CompilerThreadStackSize * K);
1036
}
1037
stack_size = MAX2(stack_size,
1038
_compiler_thread_min_stack_allowed);
1039
break;
1040
case os::vm_thread:
1041
case os::pgc_thread:
1042
case os::cgc_thread:
1043
case os::watcher_thread:
1044
default: // presume the unknown thr_type is a VM internal
1045
if (req_stack_size == 0 && VMThreadStackSize > 0) {
1046
// no requested size and we have a more specific default value
1047
stack_size = (size_t)(VMThreadStackSize * K);
1048
}
1049
1050
stack_size = MAX2(stack_size,
1051
_vm_internal_thread_min_stack_allowed);
1052
break;
1053
}
1054
1055
// pthread_attr_setstacksize() may require that the size be rounded up to the OS page size.
1056
// Be careful not to round up to 0. Align down in that case.
1057
if (stack_size <= SIZE_MAX - vm_page_size()) {
1058
stack_size = align_up(stack_size, vm_page_size());
1059
} else {
1060
stack_size = align_down(stack_size, vm_page_size());
1061
}
1062
1063
return stack_size;
1064
}
1065
1066
#ifndef ZERO
1067
#ifndef ARM
1068
static bool get_frame_at_stack_banging_point(JavaThread* thread, address pc, const void* ucVoid, frame* fr) {
1069
if (Interpreter::contains(pc)) {
1070
// interpreter performs stack banging after the fixed frame header has
1071
// been generated while the compilers perform it before. To maintain
1072
// semantic consistency between interpreted and compiled frames, the
1073
// method returns the Java sender of the current frame.
1074
*fr = os::fetch_frame_from_context(ucVoid);
1075
if (!fr->is_first_java_frame()) {
1076
// get_frame_at_stack_banging_point() is only called when we
1077
// have well defined stacks so java_sender() calls do not need
1078
// to assert safe_for_sender() first.
1079
*fr = fr->java_sender();
1080
}
1081
} else {
1082
// more complex code with compiled code
1083
assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
1084
CodeBlob* cb = CodeCache::find_blob(pc);
1085
if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
1086
// Not sure where the pc points to, fallback to default
1087
// stack overflow handling
1088
return false;
1089
} else {
1090
// in compiled code, the stack banging is performed just after the return pc
1091
// has been pushed on the stack
1092
*fr = os::fetch_compiled_frame_from_context(ucVoid);
1093
if (!fr->is_java_frame()) {
1094
assert(!fr->is_first_frame(), "Safety check");
1095
// See java_sender() comment above.
1096
*fr = fr->java_sender();
1097
}
1098
}
1099
}
1100
assert(fr->is_java_frame(), "Safety check");
1101
return true;
1102
}
1103
#endif // ARM
1104
1105
// This return true if the signal handler should just continue, ie. return after calling this
1106
bool os::Posix::handle_stack_overflow(JavaThread* thread, address addr, address pc,
1107
const void* ucVoid, address* stub) {
1108
// stack overflow
1109
StackOverflow* overflow_state = thread->stack_overflow_state();
1110
if (overflow_state->in_stack_yellow_reserved_zone(addr)) {
1111
if (thread->thread_state() == _thread_in_Java) {
1112
#ifndef ARM
1113
// arm32 doesn't have this
1114
if (overflow_state->in_stack_reserved_zone(addr)) {
1115
frame fr;
1116
if (get_frame_at_stack_banging_point(thread, pc, ucVoid, &fr)) {
1117
assert(fr.is_java_frame(), "Must be a Java frame");
1118
frame activation =
1119
SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
1120
if (activation.sp() != NULL) {
1121
overflow_state->disable_stack_reserved_zone();
1122
if (activation.is_interpreted_frame()) {
1123
overflow_state->set_reserved_stack_activation((address)(activation.fp()
1124
// Some platforms use frame pointers for interpreter frames, others use initial sp.
1125
#if !defined(PPC64) && !defined(S390)
1126
+ frame::interpreter_frame_initial_sp_offset
1127
#endif
1128
));
1129
} else {
1130
overflow_state->set_reserved_stack_activation((address)activation.unextended_sp());
1131
}
1132
return true; // just continue
1133
}
1134
}
1135
}
1136
#endif // ARM
1137
// Throw a stack overflow exception. Guard pages will be reenabled
1138
// while unwinding the stack.
1139
overflow_state->disable_stack_yellow_reserved_zone();
1140
*stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
1141
} else {
1142
// Thread was in the vm or native code. Return and try to finish.
1143
overflow_state->disable_stack_yellow_reserved_zone();
1144
return true; // just continue
1145
}
1146
} else if (overflow_state->in_stack_red_zone(addr)) {
1147
// Fatal red zone violation. Disable the guard pages and fall through
1148
// to handle_unexpected_exception way down below.
1149
overflow_state->disable_stack_red_zone();
1150
tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
1151
1152
// This is a likely cause, but hard to verify. Let's just print
1153
// it as a hint.
1154
tty->print_raw_cr("Please check if any of your loaded .so files has "
1155
"enabled executable stack (see man page execstack(8))");
1156
1157
} else {
1158
#if !defined(AIX) && !defined(__APPLE__)
1159
// bsd and aix don't have this
1160
1161
// Accessing stack address below sp may cause SEGV if current
1162
// thread has MAP_GROWSDOWN stack. This should only happen when
1163
// current thread was created by user code with MAP_GROWSDOWN flag
1164
// and then attached to VM. See notes in os_linux.cpp.
1165
if (thread->osthread()->expanding_stack() == 0) {
1166
thread->osthread()->set_expanding_stack();
1167
if (os::Linux::manually_expand_stack(thread, addr)) {
1168
thread->osthread()->clear_expanding_stack();
1169
return true; // just continue
1170
}
1171
thread->osthread()->clear_expanding_stack();
1172
} else {
1173
fatal("recursive segv. expanding stack.");
1174
}
1175
#else
1176
tty->print_raw_cr("SIGSEGV happened inside stack but outside yellow and red zone.");
1177
#endif // AIX or BSD
1178
}
1179
return false;
1180
}
1181
#endif // ZERO
1182
1183
bool os::Posix::is_root(uid_t uid){
1184
return ROOT_UID == uid;
1185
}
1186
1187
bool os::Posix::matches_effective_uid_or_root(uid_t uid) {
1188
return is_root(uid) || geteuid() == uid;
1189
}
1190
1191
bool os::Posix::matches_effective_uid_and_gid_or_root(uid_t uid, gid_t gid) {
1192
return is_root(uid) || (geteuid() == uid && getegid() == gid);
1193
}
1194
1195
Thread* os::ThreadCrashProtection::_protected_thread = NULL;
1196
os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
1197
1198
os::ThreadCrashProtection::ThreadCrashProtection() {
1199
_protected_thread = Thread::current();
1200
assert(_protected_thread->is_JfrSampler_thread(), "should be JFRSampler");
1201
}
1202
1203
/*
1204
* See the caveats for this class in os_posix.hpp
1205
* Protects the callback call so that SIGSEGV / SIGBUS jumps back into this
1206
* method and returns false. If none of the signals are raised, returns true.
1207
* The callback is supposed to provide the method that should be protected.
1208
*/
1209
bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
1210
sigset_t saved_sig_mask;
1211
1212
// we cannot rely on sigsetjmp/siglongjmp to save/restore the signal mask
1213
// since on at least some systems (OS X) siglongjmp will restore the mask
1214
// for the process, not the thread
1215
pthread_sigmask(0, NULL, &saved_sig_mask);
1216
if (sigsetjmp(_jmpbuf, 0) == 0) {
1217
// make sure we can see in the signal handler that we have crash protection
1218
// installed
1219
_crash_protection = this;
1220
cb.call();
1221
// and clear the crash protection
1222
_crash_protection = NULL;
1223
_protected_thread = NULL;
1224
return true;
1225
}
1226
// this happens when we siglongjmp() back
1227
pthread_sigmask(SIG_SETMASK, &saved_sig_mask, NULL);
1228
_crash_protection = NULL;
1229
_protected_thread = NULL;
1230
return false;
1231
}
1232
1233
void os::ThreadCrashProtection::restore() {
1234
assert(_crash_protection != NULL, "must have crash protection");
1235
siglongjmp(_jmpbuf, 1);
1236
}
1237
1238
void os::ThreadCrashProtection::check_crash_protection(int sig,
1239
Thread* thread) {
1240
1241
if (thread != NULL &&
1242
thread == _protected_thread &&
1243
_crash_protection != NULL) {
1244
1245
if (sig == SIGSEGV || sig == SIGBUS) {
1246
_crash_protection->restore();
1247
}
1248
}
1249
}
1250
1251
// Shared clock/time and other supporting routines for pthread_mutex/cond
1252
// initialization. This is enabled on Solaris but only some of the clock/time
1253
// functionality is actually used there.
1254
1255
// Shared condattr object for use with relative timed-waits. Will be associated
1256
// with CLOCK_MONOTONIC if available to avoid issues with time-of-day changes,
1257
// but otherwise whatever default is used by the platform - generally the
1258
// time-of-day clock.
1259
static pthread_condattr_t _condAttr[1];
1260
1261
// Shared mutexattr to explicitly set the type to PTHREAD_MUTEX_NORMAL as not
1262
// all systems (e.g. FreeBSD) map the default to "normal".
1263
static pthread_mutexattr_t _mutexAttr[1];
1264
1265
// common basic initialization that is always supported
1266
static void pthread_init_common(void) {
1267
int status;
1268
if ((status = pthread_condattr_init(_condAttr)) != 0) {
1269
fatal("pthread_condattr_init: %s", os::strerror(status));
1270
}
1271
if ((status = pthread_mutexattr_init(_mutexAttr)) != 0) {
1272
fatal("pthread_mutexattr_init: %s", os::strerror(status));
1273
}
1274
if ((status = pthread_mutexattr_settype(_mutexAttr, PTHREAD_MUTEX_NORMAL)) != 0) {
1275
fatal("pthread_mutexattr_settype: %s", os::strerror(status));
1276
}
1277
os::PlatformMutex::init();
1278
}
1279
1280
static int (*_pthread_condattr_setclock)(pthread_condattr_t *, clockid_t) = NULL;
1281
1282
static bool _use_clock_monotonic_condattr = false;
1283
1284
// Determine what POSIX API's are present and do appropriate
1285
// configuration.
1286
void os::Posix::init(void) {
1287
1288
// NOTE: no logging available when this is called. Put logging
1289
// statements in init_2().
1290
1291
// Check for pthread_condattr_setclock support.
1292
1293
// libpthread is already loaded.
1294
int (*condattr_setclock_func)(pthread_condattr_t*, clockid_t) =
1295
(int (*)(pthread_condattr_t*, clockid_t))dlsym(RTLD_DEFAULT,
1296
"pthread_condattr_setclock");
1297
if (condattr_setclock_func != NULL) {
1298
_pthread_condattr_setclock = condattr_setclock_func;
1299
}
1300
1301
// Now do general initialization.
1302
1303
pthread_init_common();
1304
1305
int status;
1306
if (_pthread_condattr_setclock != NULL) {
1307
if ((status = _pthread_condattr_setclock(_condAttr, CLOCK_MONOTONIC)) != 0) {
1308
if (status == EINVAL) {
1309
_use_clock_monotonic_condattr = false;
1310
warning("Unable to use monotonic clock with relative timed-waits" \
1311
" - changes to the time-of-day clock may have adverse affects");
1312
} else {
1313
fatal("pthread_condattr_setclock: %s", os::strerror(status));
1314
}
1315
} else {
1316
_use_clock_monotonic_condattr = true;
1317
}
1318
}
1319
}
1320
1321
void os::Posix::init_2(void) {
1322
log_info(os)("Use of CLOCK_MONOTONIC is supported");
1323
log_info(os)("Use of pthread_condattr_setclock is%s supported",
1324
(_pthread_condattr_setclock != NULL ? "" : " not"));
1325
log_info(os)("Relative timed-wait using pthread_cond_timedwait is associated with %s",
1326
_use_clock_monotonic_condattr ? "CLOCK_MONOTONIC" : "the default clock");
1327
}
1328
1329
// Utility to convert the given timeout to an absolute timespec
1330
// (based on the appropriate clock) to use with pthread_cond_timewait,
1331
// and sem_timedwait().
1332
// The clock queried here must be the clock used to manage the
1333
// timeout of the condition variable or semaphore.
1334
//
1335
// The passed in timeout value is either a relative time in nanoseconds
1336
// or an absolute time in milliseconds. A relative timeout will be
1337
// associated with CLOCK_MONOTONIC if available, unless the real-time clock
1338
// is explicitly requested; otherwise, or if absolute,
1339
// the default time-of-day clock will be used.
1340
1341
// Given time is a 64-bit value and the time_t used in the timespec is
1342
// sometimes a signed-32-bit value we have to watch for overflow if times
1343
// way in the future are given. Further on Solaris versions
1344
// prior to 10 there is a restriction (see cond_timedwait) that the specified
1345
// number of seconds, in abstime, is less than current_time + 100000000.
1346
// As it will be over 20 years before "now + 100000000" will overflow we can
1347
// ignore overflow and just impose a hard-limit on seconds using the value
1348
// of "now + 100000000". This places a limit on the timeout of about 3.17
1349
// years from "now".
1350
//
1351
#define MAX_SECS 100000000
1352
1353
// Calculate a new absolute time that is "timeout" nanoseconds from "now".
1354
// "unit" indicates the unit of "now_part_sec" (may be nanos or micros depending
1355
// on which clock API is being used).
1356
static void calc_rel_time(timespec* abstime, jlong timeout, jlong now_sec,
1357
jlong now_part_sec, jlong unit) {
1358
time_t max_secs = now_sec + MAX_SECS;
1359
1360
jlong seconds = timeout / NANOUNITS;
1361
timeout %= NANOUNITS; // remaining nanos
1362
1363
if (seconds >= MAX_SECS) {
1364
// More seconds than we can add, so pin to max_secs.
1365
abstime->tv_sec = max_secs;
1366
abstime->tv_nsec = 0;
1367
} else {
1368
abstime->tv_sec = now_sec + seconds;
1369
long nanos = (now_part_sec * (NANOUNITS / unit)) + timeout;
1370
if (nanos >= NANOUNITS) { // overflow
1371
abstime->tv_sec += 1;
1372
nanos -= NANOUNITS;
1373
}
1374
abstime->tv_nsec = nanos;
1375
}
1376
}
1377
1378
// Unpack the given deadline in milliseconds since the epoch, into the given timespec.
1379
// The current time in seconds is also passed in to enforce an upper bound as discussed above.
1380
static void unpack_abs_time(timespec* abstime, jlong deadline, jlong now_sec) {
1381
time_t max_secs = now_sec + MAX_SECS;
1382
1383
jlong seconds = deadline / MILLIUNITS;
1384
jlong millis = deadline % MILLIUNITS;
1385
1386
if (seconds >= max_secs) {
1387
// Absolute seconds exceeds allowed max, so pin to max_secs.
1388
abstime->tv_sec = max_secs;
1389
abstime->tv_nsec = 0;
1390
} else {
1391
abstime->tv_sec = seconds;
1392
abstime->tv_nsec = millis_to_nanos(millis);
1393
}
1394
}
1395
1396
static jlong millis_to_nanos_bounded(jlong millis) {
1397
// We have to watch for overflow when converting millis to nanos,
1398
// but if millis is that large then we will end up limiting to
1399
// MAX_SECS anyway, so just do that here.
1400
if (millis / MILLIUNITS > MAX_SECS) {
1401
millis = jlong(MAX_SECS) * MILLIUNITS;
1402
}
1403
return millis_to_nanos(millis);
1404
}
1405
1406
static void to_abstime(timespec* abstime, jlong timeout,
1407
bool isAbsolute, bool isRealtime) {
1408
DEBUG_ONLY(int max_secs = MAX_SECS;)
1409
1410
if (timeout < 0) {
1411
timeout = 0;
1412
}
1413
1414
clockid_t clock = CLOCK_MONOTONIC;
1415
if (isAbsolute || (!_use_clock_monotonic_condattr || isRealtime)) {
1416
clock = CLOCK_REALTIME;
1417
}
1418
1419
struct timespec now;
1420
int status = clock_gettime(clock, &now);
1421
assert(status == 0, "clock_gettime error: %s", os::strerror(errno));
1422
1423
if (!isAbsolute) {
1424
calc_rel_time(abstime, timeout, now.tv_sec, now.tv_nsec, NANOUNITS);
1425
} else {
1426
unpack_abs_time(abstime, timeout, now.tv_sec);
1427
}
1428
DEBUG_ONLY(max_secs += now.tv_sec;)
1429
1430
assert(abstime->tv_sec >= 0, "tv_sec < 0");
1431
assert(abstime->tv_sec <= max_secs, "tv_sec > max_secs");
1432
assert(abstime->tv_nsec >= 0, "tv_nsec < 0");
1433
assert(abstime->tv_nsec < NANOUNITS, "tv_nsec >= NANOUNITS");
1434
}
1435
1436
// Create an absolute time 'millis' milliseconds in the future, using the
1437
// real-time (time-of-day) clock. Used by PosixSemaphore.
1438
void os::Posix::to_RTC_abstime(timespec* abstime, int64_t millis) {
1439
to_abstime(abstime, millis_to_nanos_bounded(millis),
1440
false /* not absolute */,
1441
true /* use real-time clock */);
1442
}
1443
1444
// Common (partly) shared time functions
1445
1446
jlong os::javaTimeMillis() {
1447
struct timespec ts;
1448
int status = clock_gettime(CLOCK_REALTIME, &ts);
1449
assert(status == 0, "clock_gettime error: %s", os::strerror(errno));
1450
return jlong(ts.tv_sec) * MILLIUNITS +
1451
jlong(ts.tv_nsec) / NANOUNITS_PER_MILLIUNIT;
1452
}
1453
1454
void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1455
struct timespec ts;
1456
int status = clock_gettime(CLOCK_REALTIME, &ts);
1457
assert(status == 0, "clock_gettime error: %s", os::strerror(errno));
1458
seconds = jlong(ts.tv_sec);
1459
nanos = jlong(ts.tv_nsec);
1460
}
1461
1462
// macOS and AIX have platform specific implementations for javaTimeNanos()
1463
// using native clock/timer access APIs. These have historically worked well
1464
// for those platforms, but it may be possible for them to switch to the
1465
// generic clock_gettime mechanism in the future.
1466
#if !defined(__APPLE__) && !defined(AIX)
1467
1468
jlong os::javaTimeNanos() {
1469
struct timespec tp;
1470
int status = clock_gettime(CLOCK_MONOTONIC, &tp);
1471
assert(status == 0, "clock_gettime error: %s", os::strerror(errno));
1472
jlong result = jlong(tp.tv_sec) * NANOSECS_PER_SEC + jlong(tp.tv_nsec);
1473
return result;
1474
}
1475
1476
// for timer info max values which include all bits
1477
#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
1478
1479
void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1480
// CLOCK_MONOTONIC - amount of time since some arbitrary point in the past
1481
info_ptr->max_value = ALL_64_BITS;
1482
info_ptr->may_skip_backward = false; // not subject to resetting or drifting
1483
info_ptr->may_skip_forward = false; // not subject to resetting or drifting
1484
info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
1485
}
1486
1487
#endif // ! APPLE && !AIX
1488
1489
// Shared pthread_mutex/cond based PlatformEvent implementation.
1490
// Not currently usable by Solaris.
1491
1492
1493
// PlatformEvent
1494
//
1495
// Assumption:
1496
// Only one parker can exist on an event, which is why we allocate
1497
// them per-thread. Multiple unparkers can coexist.
1498
//
1499
// _event serves as a restricted-range semaphore.
1500
// -1 : thread is blocked, i.e. there is a waiter
1501
// 0 : neutral: thread is running or ready,
1502
// could have been signaled after a wait started
1503
// 1 : signaled - thread is running or ready
1504
//
1505
// Having three states allows for some detection of bad usage - see
1506
// comments on unpark().
1507
1508
os::PlatformEvent::PlatformEvent() {
1509
int status = pthread_cond_init(_cond, _condAttr);
1510
assert_status(status == 0, status, "cond_init");
1511
status = pthread_mutex_init(_mutex, _mutexAttr);
1512
assert_status(status == 0, status, "mutex_init");
1513
_event = 0;
1514
_nParked = 0;
1515
}
1516
1517
void os::PlatformEvent::park() { // AKA "down()"
1518
// Transitions for _event:
1519
// -1 => -1 : illegal
1520
// 1 => 0 : pass - return immediately
1521
// 0 => -1 : block; then set _event to 0 before returning
1522
1523
// Invariant: Only the thread associated with the PlatformEvent
1524
// may call park().
1525
assert(_nParked == 0, "invariant");
1526
1527
int v;
1528
1529
// atomically decrement _event
1530
for (;;) {
1531
v = _event;
1532
if (Atomic::cmpxchg(&_event, v, v - 1) == v) break;
1533
}
1534
guarantee(v >= 0, "invariant");
1535
1536
if (v == 0) { // Do this the hard way by blocking ...
1537
int status = pthread_mutex_lock(_mutex);
1538
assert_status(status == 0, status, "mutex_lock");
1539
guarantee(_nParked == 0, "invariant");
1540
++_nParked;
1541
while (_event < 0) {
1542
// OS-level "spurious wakeups" are ignored
1543
status = pthread_cond_wait(_cond, _mutex);
1544
assert_status(status == 0 MACOS_ONLY(|| status == ETIMEDOUT),
1545
status, "cond_wait");
1546
}
1547
--_nParked;
1548
1549
_event = 0;
1550
status = pthread_mutex_unlock(_mutex);
1551
assert_status(status == 0, status, "mutex_unlock");
1552
// Paranoia to ensure our locked and lock-free paths interact
1553
// correctly with each other.
1554
OrderAccess::fence();
1555
}
1556
guarantee(_event >= 0, "invariant");
1557
}
1558
1559
int os::PlatformEvent::park(jlong millis) {
1560
// Transitions for _event:
1561
// -1 => -1 : illegal
1562
// 1 => 0 : pass - return immediately
1563
// 0 => -1 : block; then set _event to 0 before returning
1564
1565
// Invariant: Only the thread associated with the Event/PlatformEvent
1566
// may call park().
1567
assert(_nParked == 0, "invariant");
1568
1569
int v;
1570
// atomically decrement _event
1571
for (;;) {
1572
v = _event;
1573
if (Atomic::cmpxchg(&_event, v, v - 1) == v) break;
1574
}
1575
guarantee(v >= 0, "invariant");
1576
1577
if (v == 0) { // Do this the hard way by blocking ...
1578
struct timespec abst;
1579
to_abstime(&abst, millis_to_nanos_bounded(millis), false, false);
1580
1581
int ret = OS_TIMEOUT;
1582
int status = pthread_mutex_lock(_mutex);
1583
assert_status(status == 0, status, "mutex_lock");
1584
guarantee(_nParked == 0, "invariant");
1585
++_nParked;
1586
1587
while (_event < 0) {
1588
status = pthread_cond_timedwait(_cond, _mutex, &abst);
1589
assert_status(status == 0 || status == ETIMEDOUT,
1590
status, "cond_timedwait");
1591
// OS-level "spurious wakeups" are ignored unless the archaic
1592
// FilterSpuriousWakeups is set false. That flag should be obsoleted.
1593
if (!FilterSpuriousWakeups) break;
1594
if (status == ETIMEDOUT) break;
1595
}
1596
--_nParked;
1597
1598
if (_event >= 0) {
1599
ret = OS_OK;
1600
}
1601
1602
_event = 0;
1603
status = pthread_mutex_unlock(_mutex);
1604
assert_status(status == 0, status, "mutex_unlock");
1605
// Paranoia to ensure our locked and lock-free paths interact
1606
// correctly with each other.
1607
OrderAccess::fence();
1608
return ret;
1609
}
1610
return OS_OK;
1611
}
1612
1613
void os::PlatformEvent::unpark() {
1614
// Transitions for _event:
1615
// 0 => 1 : just return
1616
// 1 => 1 : just return
1617
// -1 => either 0 or 1; must signal target thread
1618
// That is, we can safely transition _event from -1 to either
1619
// 0 or 1.
1620
// See also: "Semaphores in Plan 9" by Mullender & Cox
1621
//
1622
// Note: Forcing a transition from "-1" to "1" on an unpark() means
1623
// that it will take two back-to-back park() calls for the owning
1624
// thread to block. This has the benefit of forcing a spurious return
1625
// from the first park() call after an unpark() call which will help
1626
// shake out uses of park() and unpark() without checking state conditions
1627
// properly. This spurious return doesn't manifest itself in any user code
1628
// but only in the correctly written condition checking loops of ObjectMonitor,
1629
// Mutex/Monitor, and JavaThread::sleep
1630
1631
if (Atomic::xchg(&_event, 1) >= 0) return;
1632
1633
int status = pthread_mutex_lock(_mutex);
1634
assert_status(status == 0, status, "mutex_lock");
1635
int anyWaiters = _nParked;
1636
assert(anyWaiters == 0 || anyWaiters == 1, "invariant");
1637
status = pthread_mutex_unlock(_mutex);
1638
assert_status(status == 0, status, "mutex_unlock");
1639
1640
// Note that we signal() *after* dropping the lock for "immortal" Events.
1641
// This is safe and avoids a common class of futile wakeups. In rare
1642
// circumstances this can cause a thread to return prematurely from
1643
// cond_{timed}wait() but the spurious wakeup is benign and the victim
1644
// will simply re-test the condition and re-park itself.
1645
// This provides particular benefit if the underlying platform does not
1646
// provide wait morphing.
1647
1648
if (anyWaiters != 0) {
1649
status = pthread_cond_signal(_cond);
1650
assert_status(status == 0, status, "cond_signal");
1651
}
1652
}
1653
1654
// JSR166 support
1655
1656
os::PlatformParker::PlatformParker() : _counter(0), _cur_index(-1) {
1657
int status = pthread_cond_init(&_cond[REL_INDEX], _condAttr);
1658
assert_status(status == 0, status, "cond_init rel");
1659
status = pthread_cond_init(&_cond[ABS_INDEX], NULL);
1660
assert_status(status == 0, status, "cond_init abs");
1661
status = pthread_mutex_init(_mutex, _mutexAttr);
1662
assert_status(status == 0, status, "mutex_init");
1663
}
1664
1665
os::PlatformParker::~PlatformParker() {
1666
int status = pthread_cond_destroy(&_cond[REL_INDEX]);
1667
assert_status(status == 0, status, "cond_destroy rel");
1668
status = pthread_cond_destroy(&_cond[ABS_INDEX]);
1669
assert_status(status == 0, status, "cond_destroy abs");
1670
status = pthread_mutex_destroy(_mutex);
1671
assert_status(status == 0, status, "mutex_destroy");
1672
}
1673
1674
// Parker::park decrements count if > 0, else does a condvar wait. Unpark
1675
// sets count to 1 and signals condvar. Only one thread ever waits
1676
// on the condvar. Contention seen when trying to park implies that someone
1677
// is unparking you, so don't wait. And spurious returns are fine, so there
1678
// is no need to track notifications.
1679
1680
void Parker::park(bool isAbsolute, jlong time) {
1681
1682
// Optional fast-path check:
1683
// Return immediately if a permit is available.
1684
// We depend on Atomic::xchg() having full barrier semantics
1685
// since we are doing a lock-free update to _counter.
1686
if (Atomic::xchg(&_counter, 0) > 0) return;
1687
1688
JavaThread *jt = JavaThread::current();
1689
1690
// Optional optimization -- avoid state transitions if there's
1691
// an interrupt pending.
1692
if (jt->is_interrupted(false)) {
1693
return;
1694
}
1695
1696
// Next, demultiplex/decode time arguments
1697
struct timespec absTime;
1698
if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
1699
return;
1700
}
1701
if (time > 0) {
1702
to_abstime(&absTime, time, isAbsolute, false);
1703
}
1704
1705
// Enter safepoint region
1706
// Beware of deadlocks such as 6317397.
1707
// The per-thread Parker:: mutex is a classic leaf-lock.
1708
// In particular a thread must never block on the Threads_lock while
1709
// holding the Parker:: mutex. If safepoints are pending both the
1710
// the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
1711
ThreadBlockInVM tbivm(jt);
1712
1713
// Can't access interrupt state now that we are _thread_blocked. If we've
1714
// been interrupted since we checked above then _counter will be > 0.
1715
1716
// Don't wait if cannot get lock since interference arises from
1717
// unparking.
1718
if (pthread_mutex_trylock(_mutex) != 0) {
1719
return;
1720
}
1721
1722
int status;
1723
if (_counter > 0) { // no wait needed
1724
_counter = 0;
1725
status = pthread_mutex_unlock(_mutex);
1726
assert_status(status == 0, status, "invariant");
1727
// Paranoia to ensure our locked and lock-free paths interact
1728
// correctly with each other and Java-level accesses.
1729
OrderAccess::fence();
1730
return;
1731
}
1732
1733
OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
1734
1735
assert(_cur_index == -1, "invariant");
1736
if (time == 0) {
1737
_cur_index = REL_INDEX; // arbitrary choice when not timed
1738
status = pthread_cond_wait(&_cond[_cur_index], _mutex);
1739
assert_status(status == 0 MACOS_ONLY(|| status == ETIMEDOUT),
1740
status, "cond_wait");
1741
}
1742
else {
1743
_cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
1744
status = pthread_cond_timedwait(&_cond[_cur_index], _mutex, &absTime);
1745
assert_status(status == 0 || status == ETIMEDOUT,
1746
status, "cond_timedwait");
1747
}
1748
_cur_index = -1;
1749
1750
_counter = 0;
1751
status = pthread_mutex_unlock(_mutex);
1752
assert_status(status == 0, status, "invariant");
1753
// Paranoia to ensure our locked and lock-free paths interact
1754
// correctly with each other and Java-level accesses.
1755
OrderAccess::fence();
1756
}
1757
1758
void Parker::unpark() {
1759
int status = pthread_mutex_lock(_mutex);
1760
assert_status(status == 0, status, "invariant");
1761
const int s = _counter;
1762
_counter = 1;
1763
// must capture correct index before unlocking
1764
int index = _cur_index;
1765
status = pthread_mutex_unlock(_mutex);
1766
assert_status(status == 0, status, "invariant");
1767
1768
// Note that we signal() *after* dropping the lock for "immortal" Events.
1769
// This is safe and avoids a common class of futile wakeups. In rare
1770
// circumstances this can cause a thread to return prematurely from
1771
// cond_{timed}wait() but the spurious wakeup is benign and the victim
1772
// will simply re-test the condition and re-park itself.
1773
// This provides particular benefit if the underlying platform does not
1774
// provide wait morphing.
1775
1776
if (s < 1 && index != -1) {
1777
// thread is definitely parked
1778
status = pthread_cond_signal(&_cond[index]);
1779
assert_status(status == 0, status, "invariant");
1780
}
1781
}
1782
1783
// Platform Mutex/Monitor implementation
1784
1785
#if PLATFORM_MONITOR_IMPL_INDIRECT
1786
1787
os::PlatformMutex::Mutex::Mutex() : _next(NULL) {
1788
int status = pthread_mutex_init(&_mutex, _mutexAttr);
1789
assert_status(status == 0, status, "mutex_init");
1790
}
1791
1792
os::PlatformMutex::Mutex::~Mutex() {
1793
int status = pthread_mutex_destroy(&_mutex);
1794
assert_status(status == 0, status, "mutex_destroy");
1795
}
1796
1797
pthread_mutex_t os::PlatformMutex::_freelist_lock;
1798
os::PlatformMutex::Mutex* os::PlatformMutex::_mutex_freelist = NULL;
1799
1800
void os::PlatformMutex::init() {
1801
int status = pthread_mutex_init(&_freelist_lock, _mutexAttr);
1802
assert_status(status == 0, status, "freelist lock init");
1803
}
1804
1805
struct os::PlatformMutex::WithFreeListLocked : public StackObj {
1806
WithFreeListLocked() {
1807
int status = pthread_mutex_lock(&_freelist_lock);
1808
assert_status(status == 0, status, "freelist lock");
1809
}
1810
1811
~WithFreeListLocked() {
1812
int status = pthread_mutex_unlock(&_freelist_lock);
1813
assert_status(status == 0, status, "freelist unlock");
1814
}
1815
};
1816
1817
os::PlatformMutex::PlatformMutex() {
1818
{
1819
WithFreeListLocked wfl;
1820
_impl = _mutex_freelist;
1821
if (_impl != NULL) {
1822
_mutex_freelist = _impl->_next;
1823
_impl->_next = NULL;
1824
return;
1825
}
1826
}
1827
_impl = new Mutex();
1828
}
1829
1830
os::PlatformMutex::~PlatformMutex() {
1831
WithFreeListLocked wfl;
1832
assert(_impl->_next == NULL, "invariant");
1833
_impl->_next = _mutex_freelist;
1834
_mutex_freelist = _impl;
1835
}
1836
1837
os::PlatformMonitor::Cond::Cond() : _next(NULL) {
1838
int status = pthread_cond_init(&_cond, _condAttr);
1839
assert_status(status == 0, status, "cond_init");
1840
}
1841
1842
os::PlatformMonitor::Cond::~Cond() {
1843
int status = pthread_cond_destroy(&_cond);
1844
assert_status(status == 0, status, "cond_destroy");
1845
}
1846
1847
os::PlatformMonitor::Cond* os::PlatformMonitor::_cond_freelist = NULL;
1848
1849
os::PlatformMonitor::PlatformMonitor() {
1850
{
1851
WithFreeListLocked wfl;
1852
_impl = _cond_freelist;
1853
if (_impl != NULL) {
1854
_cond_freelist = _impl->_next;
1855
_impl->_next = NULL;
1856
return;
1857
}
1858
}
1859
_impl = new Cond();
1860
}
1861
1862
os::PlatformMonitor::~PlatformMonitor() {
1863
WithFreeListLocked wfl;
1864
assert(_impl->_next == NULL, "invariant");
1865
_impl->_next = _cond_freelist;
1866
_cond_freelist = _impl;
1867
}
1868
1869
#else
1870
1871
os::PlatformMutex::PlatformMutex() {
1872
int status = pthread_mutex_init(&_mutex, _mutexAttr);
1873
assert_status(status == 0, status, "mutex_init");
1874
}
1875
1876
os::PlatformMutex::~PlatformMutex() {
1877
int status = pthread_mutex_destroy(&_mutex);
1878
assert_status(status == 0, status, "mutex_destroy");
1879
}
1880
1881
os::PlatformMonitor::PlatformMonitor() {
1882
int status = pthread_cond_init(&_cond, _condAttr);
1883
assert_status(status == 0, status, "cond_init");
1884
}
1885
1886
os::PlatformMonitor::~PlatformMonitor() {
1887
int status = pthread_cond_destroy(&_cond);
1888
assert_status(status == 0, status, "cond_destroy");
1889
}
1890
1891
#endif // PLATFORM_MONITOR_IMPL_INDIRECT
1892
1893
// Must already be locked
1894
int os::PlatformMonitor::wait(jlong millis) {
1895
assert(millis >= 0, "negative timeout");
1896
if (millis > 0) {
1897
struct timespec abst;
1898
// We have to watch for overflow when converting millis to nanos,
1899
// but if millis is that large then we will end up limiting to
1900
// MAX_SECS anyway, so just do that here.
1901
if (millis / MILLIUNITS > MAX_SECS) {
1902
millis = jlong(MAX_SECS) * MILLIUNITS;
1903
}
1904
to_abstime(&abst, millis_to_nanos(millis), false, false);
1905
1906
int ret = OS_TIMEOUT;
1907
int status = pthread_cond_timedwait(cond(), mutex(), &abst);
1908
assert_status(status == 0 || status == ETIMEDOUT,
1909
status, "cond_timedwait");
1910
if (status == 0) {
1911
ret = OS_OK;
1912
}
1913
return ret;
1914
} else {
1915
int status = pthread_cond_wait(cond(), mutex());
1916
assert_status(status == 0 MACOS_ONLY(|| status == ETIMEDOUT),
1917
status, "cond_wait");
1918
return OS_OK;
1919
}
1920
}
1921
1922
// Darwin has no "environ" in a dynamic library.
1923
#ifdef __APPLE__
1924
#define environ (*_NSGetEnviron())
1925
#else
1926
extern char** environ;
1927
#endif
1928
1929
char** os::get_environ() { return environ; }
1930
1931
// Run the specified command in a separate process. Return its exit value,
1932
// or -1 on failure (e.g. can't fork a new process).
1933
// Notes: -Unlike system(), this function can be called from signal handler. It
1934
// doesn't block SIGINT et al.
1935
// -this function is unsafe to use in non-error situations, mainly
1936
// because the child process will inherit all parent descriptors.
1937
int os::fork_and_exec(const char* cmd, bool prefer_vfork) {
1938
const char * argv[4] = {"sh", "-c", cmd, NULL};
1939
1940
pid_t pid ;
1941
1942
char** env = os::get_environ();
1943
1944
// Use always vfork on AIX, since its safe and helps with analyzing OOM situations.
1945
// Otherwise leave it up to the caller.
1946
AIX_ONLY(prefer_vfork = true;)
1947
#ifdef __APPLE__
1948
pid = ::fork();
1949
#else
1950
pid = prefer_vfork ? ::vfork() : ::fork();
1951
#endif
1952
1953
if (pid < 0) {
1954
// fork failed
1955
return -1;
1956
1957
} else if (pid == 0) {
1958
// child process
1959
1960
::execve("/bin/sh", (char* const*)argv, env);
1961
1962
// execve failed
1963
::_exit(-1);
1964
1965
} else {
1966
// copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
1967
// care about the actual exit code, for now.
1968
1969
int status;
1970
1971
// Wait for the child process to exit. This returns immediately if
1972
// the child has already exited. */
1973
while (::waitpid(pid, &status, 0) < 0) {
1974
switch (errno) {
1975
case ECHILD: return 0;
1976
case EINTR: break;
1977
default: return -1;
1978
}
1979
}
1980
1981
if (WIFEXITED(status)) {
1982
// The child exited normally; get its exit code.
1983
return WEXITSTATUS(status);
1984
} else if (WIFSIGNALED(status)) {
1985
// The child exited because of a signal
1986
// The best value to return is 0x80 + signal number,
1987
// because that is what all Unix shells do, and because
1988
// it allows callers to distinguish between process exit and
1989
// process death by signal.
1990
return 0x80 + WTERMSIG(status);
1991
} else {
1992
// Unknown exit code; pass it through
1993
return status;
1994
}
1995
}
1996
}
1997
1998
////////////////////////////////////////////////////////////////////////////////
1999
// runtime exit support
2000
2001
// Note: os::shutdown() might be called very early during initialization, or
2002
// called from signal handler. Before adding something to os::shutdown(), make
2003
// sure it is async-safe and can handle partially initialized VM.
2004
void os::shutdown() {
2005
2006
// allow PerfMemory to attempt cleanup of any persistent resources
2007
perfMemory_exit();
2008
2009
// needs to remove object in file system
2010
AttachListener::abort();
2011
2012
// flush buffered output, finish log files
2013
ostream_abort();
2014
2015
// Check for abort hook
2016
abort_hook_t abort_hook = Arguments::abort_hook();
2017
if (abort_hook != NULL) {
2018
abort_hook();
2019
}
2020
2021
}
2022
2023
// Note: os::abort() might be called very early during initialization, or
2024
// called from signal handler. Before adding something to os::abort(), make
2025
// sure it is async-safe and can handle partially initialized VM.
2026
// Also note we can abort while other threads continue to run, so we can
2027
// easily trigger secondary faults in those threads. To reduce the likelihood
2028
// of that we use _exit rather than exit, so that no atexit hooks get run.
2029
// But note that os::shutdown() could also trigger secondary faults.
2030
void os::abort(bool dump_core, void* siginfo, const void* context) {
2031
os::shutdown();
2032
if (dump_core) {
2033
LINUX_ONLY(if (DumpPrivateMappingsInCore) ClassLoader::close_jrt_image();)
2034
::abort(); // dump core
2035
}
2036
::_exit(1);
2037
}
2038
2039
// Die immediately, no exit hook, no abort hook, no cleanup.
2040
// Dump a core file, if possible, for debugging.
2041
void os::die() {
2042
if (TestUnresponsiveErrorHandler && !CreateCoredumpOnCrash) {
2043
// For TimeoutInErrorHandlingTest.java, we just kill the VM
2044
// and don't take the time to generate a core file.
2045
os::signal_raise(SIGKILL);
2046
} else {
2047
::abort();
2048
}
2049
}
2050
2051