Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp
40930 views
1
/*
2
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
// no precompiled headers
26
#include "jvm.h"
27
#include "asm/macroAssembler.hpp"
28
#include "classfile/vmSymbols.hpp"
29
#include "code/codeCache.hpp"
30
#include "code/icBuffer.hpp"
31
#include "code/vtableStubs.hpp"
32
#include "interpreter/interpreter.hpp"
33
#include "logging/log.hpp"
34
#include "memory/allocation.inline.hpp"
35
#include "os_share_bsd.hpp"
36
#include "prims/jniFastGetField.hpp"
37
#include "prims/jvm_misc.hpp"
38
#include "runtime/arguments.hpp"
39
#include "runtime/frame.inline.hpp"
40
#include "runtime/interfaceSupport.inline.hpp"
41
#include "runtime/java.hpp"
42
#include "runtime/javaCalls.hpp"
43
#include "runtime/mutexLocker.hpp"
44
#include "runtime/osThread.hpp"
45
#include "runtime/safepointMechanism.hpp"
46
#include "runtime/sharedRuntime.hpp"
47
#include "runtime/stubRoutines.hpp"
48
#include "runtime/thread.inline.hpp"
49
#include "runtime/timer.hpp"
50
#include "signals_posix.hpp"
51
#include "utilities/align.hpp"
52
#include "utilities/events.hpp"
53
#include "utilities/vmError.hpp"
54
55
// put OS-includes here
56
# include <sys/types.h>
57
# include <sys/mman.h>
58
# include <pthread.h>
59
# include <signal.h>
60
# include <errno.h>
61
# include <dlfcn.h>
62
# include <stdlib.h>
63
# include <stdio.h>
64
# include <unistd.h>
65
# include <sys/resource.h>
66
# include <sys/stat.h>
67
# include <sys/time.h>
68
# include <sys/utsname.h>
69
# include <sys/socket.h>
70
# include <sys/wait.h>
71
# include <pwd.h>
72
# include <poll.h>
73
#ifndef __OpenBSD__
74
# include <ucontext.h>
75
#endif
76
77
#if !defined(__APPLE__) && !defined(__NetBSD__)
78
# include <pthread_np.h>
79
#endif
80
81
// needed by current_stack_region() workaround for Mavericks
82
#if defined(__APPLE__)
83
# include <errno.h>
84
# include <sys/types.h>
85
# include <sys/sysctl.h>
86
# define DEFAULT_MAIN_THREAD_STACK_PAGES 2048
87
# define OS_X_10_9_0_KERNEL_MAJOR_VERSION 13
88
#endif
89
90
#ifdef AMD64
91
#define SPELL_REG_SP "rsp"
92
#define SPELL_REG_FP "rbp"
93
#else
94
#define SPELL_REG_SP "esp"
95
#define SPELL_REG_FP "ebp"
96
#endif // AMD64
97
98
#ifdef __FreeBSD__
99
# define context_trapno uc_mcontext.mc_trapno
100
# ifdef AMD64
101
# define context_pc uc_mcontext.mc_rip
102
# define context_sp uc_mcontext.mc_rsp
103
# define context_fp uc_mcontext.mc_rbp
104
# define context_rip uc_mcontext.mc_rip
105
# define context_rsp uc_mcontext.mc_rsp
106
# define context_rbp uc_mcontext.mc_rbp
107
# define context_rax uc_mcontext.mc_rax
108
# define context_rbx uc_mcontext.mc_rbx
109
# define context_rcx uc_mcontext.mc_rcx
110
# define context_rdx uc_mcontext.mc_rdx
111
# define context_rsi uc_mcontext.mc_rsi
112
# define context_rdi uc_mcontext.mc_rdi
113
# define context_r8 uc_mcontext.mc_r8
114
# define context_r9 uc_mcontext.mc_r9
115
# define context_r10 uc_mcontext.mc_r10
116
# define context_r11 uc_mcontext.mc_r11
117
# define context_r12 uc_mcontext.mc_r12
118
# define context_r13 uc_mcontext.mc_r13
119
# define context_r14 uc_mcontext.mc_r14
120
# define context_r15 uc_mcontext.mc_r15
121
# define context_flags uc_mcontext.mc_flags
122
# define context_err uc_mcontext.mc_err
123
# else
124
# define context_pc uc_mcontext.mc_eip
125
# define context_sp uc_mcontext.mc_esp
126
# define context_fp uc_mcontext.mc_ebp
127
# define context_eip uc_mcontext.mc_eip
128
# define context_esp uc_mcontext.mc_esp
129
# define context_eax uc_mcontext.mc_eax
130
# define context_ebx uc_mcontext.mc_ebx
131
# define context_ecx uc_mcontext.mc_ecx
132
# define context_edx uc_mcontext.mc_edx
133
# define context_ebp uc_mcontext.mc_ebp
134
# define context_esi uc_mcontext.mc_esi
135
# define context_edi uc_mcontext.mc_edi
136
# define context_eflags uc_mcontext.mc_eflags
137
# define context_trapno uc_mcontext.mc_trapno
138
# endif
139
#endif
140
141
#ifdef __APPLE__
142
# if __DARWIN_UNIX03 && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_5)
143
// 10.5 UNIX03 member name prefixes
144
#define DU3_PREFIX(s, m) __ ## s.__ ## m
145
# else
146
#define DU3_PREFIX(s, m) s ## . ## m
147
# endif
148
149
# ifdef AMD64
150
# define context_pc context_rip
151
# define context_sp context_rsp
152
# define context_fp context_rbp
153
# define context_rip uc_mcontext->DU3_PREFIX(ss,rip)
154
# define context_rsp uc_mcontext->DU3_PREFIX(ss,rsp)
155
# define context_rax uc_mcontext->DU3_PREFIX(ss,rax)
156
# define context_rbx uc_mcontext->DU3_PREFIX(ss,rbx)
157
# define context_rcx uc_mcontext->DU3_PREFIX(ss,rcx)
158
# define context_rdx uc_mcontext->DU3_PREFIX(ss,rdx)
159
# define context_rbp uc_mcontext->DU3_PREFIX(ss,rbp)
160
# define context_rsi uc_mcontext->DU3_PREFIX(ss,rsi)
161
# define context_rdi uc_mcontext->DU3_PREFIX(ss,rdi)
162
# define context_r8 uc_mcontext->DU3_PREFIX(ss,r8)
163
# define context_r9 uc_mcontext->DU3_PREFIX(ss,r9)
164
# define context_r10 uc_mcontext->DU3_PREFIX(ss,r10)
165
# define context_r11 uc_mcontext->DU3_PREFIX(ss,r11)
166
# define context_r12 uc_mcontext->DU3_PREFIX(ss,r12)
167
# define context_r13 uc_mcontext->DU3_PREFIX(ss,r13)
168
# define context_r14 uc_mcontext->DU3_PREFIX(ss,r14)
169
# define context_r15 uc_mcontext->DU3_PREFIX(ss,r15)
170
# define context_flags uc_mcontext->DU3_PREFIX(ss,rflags)
171
# define context_trapno uc_mcontext->DU3_PREFIX(es,trapno)
172
# define context_err uc_mcontext->DU3_PREFIX(es,err)
173
# else
174
# define context_pc context_eip
175
# define context_sp context_esp
176
# define context_fp context_ebp
177
# define context_eip uc_mcontext->DU3_PREFIX(ss,eip)
178
# define context_esp uc_mcontext->DU3_PREFIX(ss,esp)
179
# define context_eax uc_mcontext->DU3_PREFIX(ss,eax)
180
# define context_ebx uc_mcontext->DU3_PREFIX(ss,ebx)
181
# define context_ecx uc_mcontext->DU3_PREFIX(ss,ecx)
182
# define context_edx uc_mcontext->DU3_PREFIX(ss,edx)
183
# define context_ebp uc_mcontext->DU3_PREFIX(ss,ebp)
184
# define context_esi uc_mcontext->DU3_PREFIX(ss,esi)
185
# define context_edi uc_mcontext->DU3_PREFIX(ss,edi)
186
# define context_eflags uc_mcontext->DU3_PREFIX(ss,eflags)
187
# define context_trapno uc_mcontext->DU3_PREFIX(es,trapno)
188
# endif
189
#endif
190
191
#ifdef __OpenBSD__
192
# define context_trapno sc_trapno
193
# ifdef AMD64
194
# define context_pc sc_rip
195
# define context_sp sc_rsp
196
# define context_fp sc_rbp
197
# define context_rip sc_rip
198
# define context_rsp sc_rsp
199
# define context_rbp sc_rbp
200
# define context_rax sc_rax
201
# define context_rbx sc_rbx
202
# define context_rcx sc_rcx
203
# define context_rdx sc_rdx
204
# define context_rsi sc_rsi
205
# define context_rdi sc_rdi
206
# define context_r8 sc_r8
207
# define context_r9 sc_r9
208
# define context_r10 sc_r10
209
# define context_r11 sc_r11
210
# define context_r12 sc_r12
211
# define context_r13 sc_r13
212
# define context_r14 sc_r14
213
# define context_r15 sc_r15
214
# define context_flags sc_rflags
215
# define context_err sc_err
216
# else
217
# define context_pc sc_eip
218
# define context_sp sc_esp
219
# define context_fp sc_ebp
220
# define context_eip sc_eip
221
# define context_esp sc_esp
222
# define context_eax sc_eax
223
# define context_ebx sc_ebx
224
# define context_ecx sc_ecx
225
# define context_edx sc_edx
226
# define context_ebp sc_ebp
227
# define context_esi sc_esi
228
# define context_edi sc_edi
229
# define context_eflags sc_eflags
230
# define context_trapno sc_trapno
231
# endif
232
#endif
233
234
#ifdef __NetBSD__
235
# define context_trapno uc_mcontext.__gregs[_REG_TRAPNO]
236
# ifdef AMD64
237
# define __register_t __greg_t
238
# define context_pc uc_mcontext.__gregs[_REG_RIP]
239
# define context_sp uc_mcontext.__gregs[_REG_URSP]
240
# define context_fp uc_mcontext.__gregs[_REG_RBP]
241
# define context_rip uc_mcontext.__gregs[_REG_RIP]
242
# define context_rsp uc_mcontext.__gregs[_REG_URSP]
243
# define context_rax uc_mcontext.__gregs[_REG_RAX]
244
# define context_rbx uc_mcontext.__gregs[_REG_RBX]
245
# define context_rcx uc_mcontext.__gregs[_REG_RCX]
246
# define context_rdx uc_mcontext.__gregs[_REG_RDX]
247
# define context_rbp uc_mcontext.__gregs[_REG_RBP]
248
# define context_rsi uc_mcontext.__gregs[_REG_RSI]
249
# define context_rdi uc_mcontext.__gregs[_REG_RDI]
250
# define context_r8 uc_mcontext.__gregs[_REG_R8]
251
# define context_r9 uc_mcontext.__gregs[_REG_R9]
252
# define context_r10 uc_mcontext.__gregs[_REG_R10]
253
# define context_r11 uc_mcontext.__gregs[_REG_R11]
254
# define context_r12 uc_mcontext.__gregs[_REG_R12]
255
# define context_r13 uc_mcontext.__gregs[_REG_R13]
256
# define context_r14 uc_mcontext.__gregs[_REG_R14]
257
# define context_r15 uc_mcontext.__gregs[_REG_R15]
258
# define context_flags uc_mcontext.__gregs[_REG_RFL]
259
# define context_err uc_mcontext.__gregs[_REG_ERR]
260
# else
261
# define context_pc uc_mcontext.__gregs[_REG_EIP]
262
# define context_sp uc_mcontext.__gregs[_REG_UESP]
263
# define context_fp uc_mcontext.__gregs[_REG_EBP]
264
# define context_eip uc_mcontext.__gregs[_REG_EIP]
265
# define context_esp uc_mcontext.__gregs[_REG_UESP]
266
# define context_eax uc_mcontext.__gregs[_REG_EAX]
267
# define context_ebx uc_mcontext.__gregs[_REG_EBX]
268
# define context_ecx uc_mcontext.__gregs[_REG_ECX]
269
# define context_edx uc_mcontext.__gregs[_REG_EDX]
270
# define context_ebp uc_mcontext.__gregs[_REG_EBP]
271
# define context_esi uc_mcontext.__gregs[_REG_ESI]
272
# define context_edi uc_mcontext.__gregs[_REG_EDI]
273
# define context_eflags uc_mcontext.__gregs[_REG_EFL]
274
# define context_trapno uc_mcontext.__gregs[_REG_TRAPNO]
275
# endif
276
#endif
277
278
address os::current_stack_pointer() {
279
#if defined(__clang__) || defined(__llvm__)
280
void *esp;
281
__asm__("mov %%" SPELL_REG_SP ", %0":"=r"(esp));
282
return (address) esp;
283
#else
284
register void *esp __asm__ (SPELL_REG_SP);
285
return (address) esp;
286
#endif
287
}
288
289
char* os::non_memory_address_word() {
290
// Must never look like an address returned by reserve_memory,
291
// even in its subfields (as defined by the CPU immediate fields,
292
// if the CPU splits constants across multiple instructions).
293
294
return (char*) -1;
295
}
296
297
address os::Posix::ucontext_get_pc(const ucontext_t * uc) {
298
return (address)uc->context_pc;
299
}
300
301
void os::Posix::ucontext_set_pc(ucontext_t * uc, address pc) {
302
uc->context_pc = (intptr_t)pc ;
303
}
304
305
intptr_t* os::Bsd::ucontext_get_sp(const ucontext_t * uc) {
306
return (intptr_t*)uc->context_sp;
307
}
308
309
intptr_t* os::Bsd::ucontext_get_fp(const ucontext_t * uc) {
310
return (intptr_t*)uc->context_fp;
311
}
312
313
address os::fetch_frame_from_context(const void* ucVoid,
314
intptr_t** ret_sp, intptr_t** ret_fp) {
315
316
address epc;
317
const ucontext_t* uc = (const ucontext_t*)ucVoid;
318
319
if (uc != NULL) {
320
epc = os::Posix::ucontext_get_pc(uc);
321
if (ret_sp) *ret_sp = os::Bsd::ucontext_get_sp(uc);
322
if (ret_fp) *ret_fp = os::Bsd::ucontext_get_fp(uc);
323
} else {
324
epc = NULL;
325
if (ret_sp) *ret_sp = (intptr_t *)NULL;
326
if (ret_fp) *ret_fp = (intptr_t *)NULL;
327
}
328
329
return epc;
330
}
331
332
frame os::fetch_frame_from_context(const void* ucVoid) {
333
intptr_t* sp;
334
intptr_t* fp;
335
address epc = fetch_frame_from_context(ucVoid, &sp, &fp);
336
return frame(sp, fp, epc);
337
}
338
339
frame os::fetch_compiled_frame_from_context(const void* ucVoid) {
340
const ucontext_t* uc = (const ucontext_t*)ucVoid;
341
frame fr = os::fetch_frame_from_context(uc);
342
// in compiled code, the stack banging is performed just after the return pc
343
// has been pushed on the stack
344
return frame(fr.sp() + 1, fr.fp(), (address)*(fr.sp()));
345
}
346
347
// By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get
348
// turned off by -fomit-frame-pointer,
349
frame os::get_sender_for_C_frame(frame* fr) {
350
return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
351
}
352
353
intptr_t* _get_previous_fp() {
354
#if defined(__clang__) || defined(__llvm__)
355
intptr_t **ebp;
356
__asm__("mov %%" SPELL_REG_FP ", %0":"=r"(ebp));
357
#else
358
register intptr_t **ebp __asm__ (SPELL_REG_FP);
359
#endif
360
// ebp is for this frame (_get_previous_fp). We want the ebp for the
361
// caller of os::current_frame*(), so go up two frames. However, for
362
// optimized builds, _get_previous_fp() will be inlined, so only go
363
// up 1 frame in that case.
364
#ifdef _NMT_NOINLINE_
365
return **(intptr_t***)ebp;
366
#else
367
return *ebp;
368
#endif
369
}
370
371
372
frame os::current_frame() {
373
intptr_t* fp = _get_previous_fp();
374
frame myframe((intptr_t*)os::current_stack_pointer(),
375
(intptr_t*)fp,
376
CAST_FROM_FN_PTR(address, os::current_frame));
377
if (os::is_first_C_frame(&myframe)) {
378
// stack is not walkable
379
return frame();
380
} else {
381
return os::get_sender_for_C_frame(&myframe);
382
}
383
}
384
385
// From IA32 System Programming Guide
386
enum {
387
trap_page_fault = 0xE
388
};
389
390
bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
391
ucontext_t* uc, JavaThread* thread) {
392
// decide if this trap can be handled by a stub
393
address stub = NULL;
394
395
address pc = NULL;
396
397
//%note os_trap_1
398
if (info != NULL && uc != NULL && thread != NULL) {
399
pc = (address) os::Posix::ucontext_get_pc(uc);
400
401
// Handle ALL stack overflow variations here
402
if (sig == SIGSEGV || sig == SIGBUS) {
403
address addr = (address) info->si_addr;
404
405
// check if fault address is within thread stack
406
if (thread->is_in_full_stack(addr)) {
407
// stack overflow
408
if (os::Posix::handle_stack_overflow(thread, addr, pc, uc, &stub)) {
409
return true; // continue
410
}
411
}
412
}
413
414
if ((sig == SIGSEGV || sig == SIGBUS) && VM_Version::is_cpuinfo_segv_addr(pc)) {
415
// Verify that OS save/restore AVX registers.
416
stub = VM_Version::cpuinfo_cont_addr();
417
}
418
419
// We test if stub is already set (by the stack overflow code
420
// above) so it is not overwritten by the code that follows. This
421
// check is not required on other platforms, because on other
422
// platforms we check for SIGSEGV only or SIGBUS only, where here
423
// we have to check for both SIGSEGV and SIGBUS.
424
if (thread->thread_state() == _thread_in_Java && stub == NULL) {
425
// Java thread running in Java code => find exception handler if any
426
// a fault inside compiled code, the interpreter, or a stub
427
428
if ((sig == SIGSEGV || sig == SIGBUS) && SafepointMechanism::is_poll_address((address)info->si_addr)) {
429
stub = SharedRuntime::get_poll_stub(pc);
430
#if defined(__APPLE__)
431
// 32-bit Darwin reports a SIGBUS for nearly all memory access exceptions.
432
// 64-bit Darwin may also use a SIGBUS (seen with compressed oops).
433
// Catching SIGBUS here prevents the implicit SIGBUS NULL check below from
434
// being called, so only do so if the implicit NULL check is not necessary.
435
} else if (sig == SIGBUS && !MacroAssembler::uses_implicit_null_check(info->si_addr)) {
436
#else
437
} else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) {
438
#endif
439
// BugId 4454115: A read from a MappedByteBuffer can fault
440
// here if the underlying file has been truncated.
441
// Do not crash the VM in such a case.
442
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
443
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
444
bool is_unsafe_arraycopy = thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc);
445
if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
446
address next_pc = Assembler::locate_next_instruction(pc);
447
if (is_unsafe_arraycopy) {
448
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
449
}
450
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
451
}
452
}
453
else
454
455
#ifdef AMD64
456
if (sig == SIGFPE &&
457
(info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV
458
// Workaround for macOS ARM incorrectly reporting FPE_FLTINV for "div by 0"
459
// instead of the expected FPE_FLTDIV when running x86_64 binary under Rosetta emulation
460
MACOS_ONLY(|| (VM_Version::is_cpu_emulated() && info->si_code == FPE_FLTINV)))) {
461
stub =
462
SharedRuntime::
463
continuation_for_implicit_exception(thread,
464
pc,
465
SharedRuntime::
466
IMPLICIT_DIVIDE_BY_ZERO);
467
#ifdef __APPLE__
468
} else if (sig == SIGFPE && info->si_code == FPE_NOOP) {
469
int op = pc[0];
470
471
// Skip REX
472
if ((pc[0] & 0xf0) == 0x40) {
473
op = pc[1];
474
} else {
475
op = pc[0];
476
}
477
478
// Check for IDIV
479
if (op == 0xF7) {
480
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime:: IMPLICIT_DIVIDE_BY_ZERO);
481
} else {
482
// TODO: handle more cases if we are using other x86 instructions
483
// that can generate SIGFPE signal.
484
tty->print_cr("unknown opcode 0x%X with SIGFPE.", op);
485
fatal("please update this code.");
486
}
487
#endif /* __APPLE__ */
488
489
#else
490
if (sig == SIGFPE /* && info->si_code == FPE_INTDIV */) {
491
// HACK: si_code does not work on bsd 2.2.12-20!!!
492
int op = pc[0];
493
if (op == 0xDB) {
494
// FIST
495
// TODO: The encoding of D2I in x86_32.ad can cause an exception
496
// prior to the fist instruction if there was an invalid operation
497
// pending. We want to dismiss that exception. From the win_32
498
// side it also seems that if it really was the fist causing
499
// the exception that we do the d2i by hand with different
500
// rounding. Seems kind of weird.
501
// NOTE: that we take the exception at the NEXT floating point instruction.
502
assert(pc[0] == 0xDB, "not a FIST opcode");
503
assert(pc[1] == 0x14, "not a FIST opcode");
504
assert(pc[2] == 0x24, "not a FIST opcode");
505
return true;
506
} else if (op == 0xF7) {
507
// IDIV
508
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
509
} else {
510
// TODO: handle more cases if we are using other x86 instructions
511
// that can generate SIGFPE signal on bsd.
512
tty->print_cr("unknown opcode 0x%X with SIGFPE.", op);
513
fatal("please update this code.");
514
}
515
#endif // AMD64
516
} else if ((sig == SIGSEGV || sig == SIGBUS) &&
517
MacroAssembler::uses_implicit_null_check(info->si_addr)) {
518
// Determination of interpreter/vtable stub/compiled code null exception
519
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
520
}
521
} else if ((thread->thread_state() == _thread_in_vm ||
522
thread->thread_state() == _thread_in_native) &&
523
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
524
thread->doing_unsafe_access()) {
525
address next_pc = Assembler::locate_next_instruction(pc);
526
if (UnsafeCopyMemory::contains_pc(pc)) {
527
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
528
}
529
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
530
}
531
532
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
533
// and the heap gets shrunk before the field access.
534
if ((sig == SIGSEGV) || (sig == SIGBUS)) {
535
address addr = JNI_FastGetField::find_slowcase_pc(pc);
536
if (addr != (address)-1) {
537
stub = addr;
538
}
539
}
540
}
541
542
#ifndef AMD64
543
// Execution protection violation
544
//
545
// This should be kept as the last step in the triage. We don't
546
// have a dedicated trap number for a no-execute fault, so be
547
// conservative and allow other handlers the first shot.
548
//
549
// Note: We don't test that info->si_code == SEGV_ACCERR here.
550
// this si_code is so generic that it is almost meaningless; and
551
// the si_code for this condition may change in the future.
552
// Furthermore, a false-positive should be harmless.
553
if (UnguardOnExecutionViolation > 0 &&
554
stub == NULL &&
555
(sig == SIGSEGV || sig == SIGBUS) &&
556
uc->context_trapno == trap_page_fault) {
557
int page_size = os::vm_page_size();
558
address addr = (address) info->si_addr;
559
address pc = os::Posix::ucontext_get_pc(uc);
560
// Make sure the pc and the faulting address are sane.
561
//
562
// If an instruction spans a page boundary, and the page containing
563
// the beginning of the instruction is executable but the following
564
// page is not, the pc and the faulting address might be slightly
565
// different - we still want to unguard the 2nd page in this case.
566
//
567
// 15 bytes seems to be a (very) safe value for max instruction size.
568
bool pc_is_near_addr =
569
(pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
570
bool instr_spans_page_boundary =
571
(align_down((intptr_t) pc ^ (intptr_t) addr,
572
(intptr_t) page_size) > 0);
573
574
if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
575
static volatile address last_addr =
576
(address) os::non_memory_address_word();
577
578
// In conservative mode, don't unguard unless the address is in the VM
579
if (addr != last_addr &&
580
(UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
581
582
// Set memory to RWX and retry
583
address page_start = align_down(addr, page_size);
584
bool res = os::protect_memory((char*) page_start, page_size,
585
os::MEM_PROT_RWX);
586
587
log_debug(os)("Execution protection violation "
588
"at " INTPTR_FORMAT
589
", unguarding " INTPTR_FORMAT ": %s, errno=%d", p2i(addr),
590
p2i(page_start), (res ? "success" : "failed"), errno);
591
stub = pc;
592
593
// Set last_addr so if we fault again at the same address, we don't end
594
// up in an endless loop.
595
//
596
// There are two potential complications here. Two threads trapping at
597
// the same address at the same time could cause one of the threads to
598
// think it already unguarded, and abort the VM. Likely very rare.
599
//
600
// The other race involves two threads alternately trapping at
601
// different addresses and failing to unguard the page, resulting in
602
// an endless loop. This condition is probably even more unlikely than
603
// the first.
604
//
605
// Although both cases could be avoided by using locks or thread local
606
// last_addr, these solutions are unnecessary complication: this
607
// handler is a best-effort safety net, not a complete solution. It is
608
// disabled by default and should only be used as a workaround in case
609
// we missed any no-execute-unsafe VM code.
610
611
last_addr = addr;
612
}
613
}
614
}
615
#endif // !AMD64
616
617
if (stub != NULL) {
618
// save all thread context in case we need to restore it
619
if (thread != NULL) thread->set_saved_exception_pc(pc);
620
621
os::Posix::ucontext_set_pc(uc, stub);
622
return true;
623
}
624
625
return false;
626
}
627
628
// From solaris_i486.s ported to bsd_i486.s
629
extern "C" void fixcw();
630
631
void os::Bsd::init_thread_fpu_state(void) {
632
#ifndef AMD64
633
// Set fpu to 53 bit precision. This happens too early to use a stub.
634
fixcw();
635
#endif // !AMD64
636
}
637
638
639
// Check that the bsd kernel version is 2.4 or higher since earlier
640
// versions do not support SSE without patches.
641
bool os::supports_sse() {
642
return true;
643
}
644
645
juint os::cpu_microcode_revision() {
646
juint result = 0;
647
char data[8];
648
size_t sz = sizeof(data);
649
int ret = sysctlbyname("machdep.cpu.microcode_version", data, &sz, NULL, 0);
650
if (ret == 0) {
651
if (sz == 4) result = *((juint*)data);
652
if (sz == 8) result = *((juint*)data + 1); // upper 32-bits
653
}
654
return result;
655
}
656
657
////////////////////////////////////////////////////////////////////////////////
658
// thread stack
659
660
// Minimum usable stack sizes required to get to user code. Space for
661
// HotSpot guard pages is added later.
662
size_t os::Posix::_compiler_thread_min_stack_allowed = 48 * K;
663
size_t os::Posix::_java_thread_min_stack_allowed = 48 * K;
664
#ifdef _LP64
665
size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K;
666
#else
667
size_t os::Posix::_vm_internal_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K;
668
#endif // _LP64
669
670
#ifndef AMD64
671
#ifdef __GNUC__
672
#define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;})
673
#endif
674
#endif // AMD64
675
676
// return default stack size for thr_type
677
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
678
// default stack size (compiler thread needs larger stack)
679
#ifdef AMD64
680
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
681
#else
682
size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K);
683
#endif // AMD64
684
return s;
685
}
686
687
688
// Java thread:
689
//
690
// Low memory addresses
691
// +------------------------+
692
// | |\ Java thread created by VM does not have glibc
693
// | glibc guard page | - guard, attached Java thread usually has
694
// | |/ 1 glibc guard page.
695
// P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
696
// | |\
697
// | HotSpot Guard Pages | - red, yellow and reserved pages
698
// | |/
699
// +------------------------+ StackOverflow::stack_reserved_zone_base()
700
// | |\
701
// | Normal Stack | -
702
// | |/
703
// P2 +------------------------+ Thread::stack_base()
704
//
705
// Non-Java thread:
706
//
707
// Low memory addresses
708
// +------------------------+
709
// | |\
710
// | glibc guard page | - usually 1 page
711
// | |/
712
// P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
713
// | |\
714
// | Normal Stack | -
715
// | |/
716
// P2 +------------------------+ Thread::stack_base()
717
//
718
// ** P1 (aka bottom) and size ( P2 = P1 - size) are the address and stack size returned from
719
// pthread_attr_getstack()
720
721
static void current_stack_region(address * bottom, size_t * size) {
722
#ifdef __APPLE__
723
pthread_t self = pthread_self();
724
void *stacktop = pthread_get_stackaddr_np(self);
725
*size = pthread_get_stacksize_np(self);
726
// workaround for OS X 10.9.0 (Mavericks)
727
// pthread_get_stacksize_np returns 128 pages even though the actual size is 2048 pages
728
if (pthread_main_np() == 1) {
729
// At least on Mac OS 10.12 we have observed stack sizes not aligned
730
// to pages boundaries. This can be provoked by e.g. setrlimit() (ulimit -s xxxx in the
731
// shell). Apparently Mac OS actually rounds upwards to next multiple of page size,
732
// however, we round downwards here to be on the safe side.
733
*size = align_down(*size, getpagesize());
734
735
if ((*size) < (DEFAULT_MAIN_THREAD_STACK_PAGES * (size_t)getpagesize())) {
736
char kern_osrelease[256];
737
size_t kern_osrelease_size = sizeof(kern_osrelease);
738
int ret = sysctlbyname("kern.osrelease", kern_osrelease, &kern_osrelease_size, NULL, 0);
739
if (ret == 0) {
740
// get the major number, atoi will ignore the minor amd micro portions of the version string
741
if (atoi(kern_osrelease) >= OS_X_10_9_0_KERNEL_MAJOR_VERSION) {
742
*size = (DEFAULT_MAIN_THREAD_STACK_PAGES*getpagesize());
743
}
744
}
745
}
746
}
747
*bottom = (address) stacktop - *size;
748
#elif defined(__OpenBSD__)
749
stack_t ss;
750
int rslt = pthread_stackseg_np(pthread_self(), &ss);
751
752
if (rslt != 0)
753
fatal("pthread_stackseg_np failed with error = %d", rslt);
754
755
*bottom = (address)((char *)ss.ss_sp - ss.ss_size);
756
*size = ss.ss_size;
757
#else
758
pthread_attr_t attr;
759
760
int rslt = pthread_attr_init(&attr);
761
762
// JVM needs to know exact stack location, abort if it fails
763
if (rslt != 0)
764
fatal("pthread_attr_init failed with error = %d", rslt);
765
766
rslt = pthread_attr_get_np(pthread_self(), &attr);
767
768
if (rslt != 0)
769
fatal("pthread_attr_get_np failed with error = %d", rslt);
770
771
if (pthread_attr_getstackaddr(&attr, (void **)bottom) != 0 ||
772
pthread_attr_getstacksize(&attr, size) != 0) {
773
fatal("Can not locate current stack attributes!");
774
}
775
776
pthread_attr_destroy(&attr);
777
#endif
778
assert(os::current_stack_pointer() >= *bottom &&
779
os::current_stack_pointer() < *bottom + *size, "just checking");
780
}
781
782
address os::current_stack_base() {
783
address bottom;
784
size_t size;
785
current_stack_region(&bottom, &size);
786
return (bottom + size);
787
}
788
789
size_t os::current_stack_size() {
790
// stack size includes normal stack and HotSpot guard pages
791
address bottom;
792
size_t size;
793
current_stack_region(&bottom, &size);
794
return size;
795
}
796
797
/////////////////////////////////////////////////////////////////////////////
798
// helper functions for fatal error handler
799
800
void os::print_context(outputStream *st, const void *context) {
801
if (context == NULL) return;
802
803
const ucontext_t *uc = (const ucontext_t*)context;
804
st->print_cr("Registers:");
805
#ifdef AMD64
806
st->print( "RAX=" INTPTR_FORMAT, (intptr_t)uc->context_rax);
807
st->print(", RBX=" INTPTR_FORMAT, (intptr_t)uc->context_rbx);
808
st->print(", RCX=" INTPTR_FORMAT, (intptr_t)uc->context_rcx);
809
st->print(", RDX=" INTPTR_FORMAT, (intptr_t)uc->context_rdx);
810
st->cr();
811
st->print( "RSP=" INTPTR_FORMAT, (intptr_t)uc->context_rsp);
812
st->print(", RBP=" INTPTR_FORMAT, (intptr_t)uc->context_rbp);
813
st->print(", RSI=" INTPTR_FORMAT, (intptr_t)uc->context_rsi);
814
st->print(", RDI=" INTPTR_FORMAT, (intptr_t)uc->context_rdi);
815
st->cr();
816
st->print( "R8 =" INTPTR_FORMAT, (intptr_t)uc->context_r8);
817
st->print(", R9 =" INTPTR_FORMAT, (intptr_t)uc->context_r9);
818
st->print(", R10=" INTPTR_FORMAT, (intptr_t)uc->context_r10);
819
st->print(", R11=" INTPTR_FORMAT, (intptr_t)uc->context_r11);
820
st->cr();
821
st->print( "R12=" INTPTR_FORMAT, (intptr_t)uc->context_r12);
822
st->print(", R13=" INTPTR_FORMAT, (intptr_t)uc->context_r13);
823
st->print(", R14=" INTPTR_FORMAT, (intptr_t)uc->context_r14);
824
st->print(", R15=" INTPTR_FORMAT, (intptr_t)uc->context_r15);
825
st->cr();
826
st->print( "RIP=" INTPTR_FORMAT, (intptr_t)uc->context_rip);
827
st->print(", EFLAGS=" INTPTR_FORMAT, (intptr_t)uc->context_flags);
828
st->print(", ERR=" INTPTR_FORMAT, (intptr_t)uc->context_err);
829
st->cr();
830
st->print(" TRAPNO=" INTPTR_FORMAT, (intptr_t)uc->context_trapno);
831
#else
832
st->print( "EAX=" INTPTR_FORMAT, (intptr_t)uc->context_eax);
833
st->print(", EBX=" INTPTR_FORMAT, (intptr_t)uc->context_ebx);
834
st->print(", ECX=" INTPTR_FORMAT, (intptr_t)uc->context_ecx);
835
st->print(", EDX=" INTPTR_FORMAT, (intptr_t)uc->context_edx);
836
st->cr();
837
st->print( "ESP=" INTPTR_FORMAT, (intptr_t)uc->context_esp);
838
st->print(", EBP=" INTPTR_FORMAT, (intptr_t)uc->context_ebp);
839
st->print(", ESI=" INTPTR_FORMAT, (intptr_t)uc->context_esi);
840
st->print(", EDI=" INTPTR_FORMAT, (intptr_t)uc->context_edi);
841
st->cr();
842
st->print( "EIP=" INTPTR_FORMAT, (intptr_t)uc->context_eip);
843
st->print(", EFLAGS=" INTPTR_FORMAT, (intptr_t)uc->context_eflags);
844
#endif // AMD64
845
st->cr();
846
st->cr();
847
848
intptr_t *sp = (intptr_t *)os::Bsd::ucontext_get_sp(uc);
849
st->print_cr("Top of Stack: (sp=" INTPTR_FORMAT ")", (intptr_t)sp);
850
print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t));
851
st->cr();
852
853
// Note: it may be unsafe to inspect memory near pc. For example, pc may
854
// point to garbage if entry point in an nmethod is corrupted. Leave
855
// this at the end, and hope for the best.
856
address pc = os::Posix::ucontext_get_pc(uc);
857
print_instructions(st, pc, sizeof(char));
858
st->cr();
859
}
860
861
void os::print_register_info(outputStream *st, const void *context) {
862
if (context == NULL) return;
863
864
const ucontext_t *uc = (const ucontext_t*)context;
865
866
st->print_cr("Register to memory mapping:");
867
st->cr();
868
869
// this is horrendously verbose but the layout of the registers in the
870
// context does not match how we defined our abstract Register set, so
871
// we can't just iterate through the gregs area
872
873
// this is only for the "general purpose" registers
874
875
#ifdef AMD64
876
st->print("RAX="); print_location(st, uc->context_rax);
877
st->print("RBX="); print_location(st, uc->context_rbx);
878
st->print("RCX="); print_location(st, uc->context_rcx);
879
st->print("RDX="); print_location(st, uc->context_rdx);
880
st->print("RSP="); print_location(st, uc->context_rsp);
881
st->print("RBP="); print_location(st, uc->context_rbp);
882
st->print("RSI="); print_location(st, uc->context_rsi);
883
st->print("RDI="); print_location(st, uc->context_rdi);
884
st->print("R8 ="); print_location(st, uc->context_r8);
885
st->print("R9 ="); print_location(st, uc->context_r9);
886
st->print("R10="); print_location(st, uc->context_r10);
887
st->print("R11="); print_location(st, uc->context_r11);
888
st->print("R12="); print_location(st, uc->context_r12);
889
st->print("R13="); print_location(st, uc->context_r13);
890
st->print("R14="); print_location(st, uc->context_r14);
891
st->print("R15="); print_location(st, uc->context_r15);
892
#else
893
st->print("EAX="); print_location(st, uc->context_eax);
894
st->print("EBX="); print_location(st, uc->context_ebx);
895
st->print("ECX="); print_location(st, uc->context_ecx);
896
st->print("EDX="); print_location(st, uc->context_edx);
897
st->print("ESP="); print_location(st, uc->context_esp);
898
st->print("EBP="); print_location(st, uc->context_ebp);
899
st->print("ESI="); print_location(st, uc->context_esi);
900
st->print("EDI="); print_location(st, uc->context_edi);
901
#endif // AMD64
902
903
st->cr();
904
}
905
906
void os::setup_fpu() {
907
#ifndef AMD64
908
address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std();
909
__asm__ volatile ( "fldcw (%0)" :
910
: "r" (fpu_cntrl) : "memory");
911
#endif // !AMD64
912
}
913
914
#ifndef PRODUCT
915
void os::verify_stack_alignment() {
916
}
917
#endif
918
919
int os::extra_bang_size_in_bytes() {
920
// JDK-8050147 requires the full cache line bang for x86.
921
return VM_Version::L1_line_size();
922
}
923
924