Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp
64440 views
1
/*
2
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
// no precompiled headers
26
#include "jvm.h"
27
#include "asm/assembler.inline.hpp"
28
#include "classfile/vmSymbols.hpp"
29
#include "code/icBuffer.hpp"
30
#include "code/vtableStubs.hpp"
31
#include "interpreter/interpreter.hpp"
32
#include "memory/allocation.inline.hpp"
33
#include "nativeInst_arm.hpp"
34
#include "os_share_linux.hpp"
35
#include "prims/jniFastGetField.hpp"
36
#include "prims/jvm_misc.hpp"
37
#include "runtime/arguments.hpp"
38
#include "runtime/frame.inline.hpp"
39
#include "runtime/interfaceSupport.inline.hpp"
40
#include "runtime/java.hpp"
41
#include "runtime/javaCalls.hpp"
42
#include "runtime/mutexLocker.hpp"
43
#include "runtime/osThread.hpp"
44
#include "runtime/safepointMechanism.hpp"
45
#include "runtime/sharedRuntime.hpp"
46
#include "runtime/stubRoutines.hpp"
47
#include "runtime/timer.hpp"
48
#include "signals_posix.hpp"
49
#include "utilities/debug.hpp"
50
#include "utilities/events.hpp"
51
#include "utilities/vmError.hpp"
52
53
// put OS-includes here
54
# include <sys/types.h>
55
# include <sys/mman.h>
56
# include <pthread.h>
57
# include <signal.h>
58
# include <errno.h>
59
# include <dlfcn.h>
60
# include <stdlib.h>
61
# include <stdio.h>
62
# include <unistd.h>
63
# include <sys/resource.h>
64
# include <pthread.h>
65
# include <sys/stat.h>
66
# include <sys/time.h>
67
# include <sys/utsname.h>
68
# include <sys/socket.h>
69
# include <sys/wait.h>
70
# include <pwd.h>
71
# include <poll.h>
72
# include <ucontext.h>
73
# include <fpu_control.h>
74
# include <asm/ptrace.h>
75
76
#define SPELL_REG_SP "sp"
77
78
// Don't #define SPELL_REG_FP for thumb because it is not safe to use, so this makes sure we never fetch it.
79
#ifndef __thumb__
80
#define SPELL_REG_FP "fp"
81
#endif
82
83
address os::current_stack_pointer() {
84
register address sp __asm__ (SPELL_REG_SP);
85
return sp;
86
}
87
88
char* os::non_memory_address_word() {
89
// Must never look like an address returned by reserve_memory
90
return (char*) -1;
91
}
92
93
94
#if NGREG == 16
95
// These definitions are based on the observation that until
96
// the certain version of GCC mcontext_t was defined as
97
// a structure containing gregs[NGREG] array with 16 elements.
98
// In later GCC versions mcontext_t was redefined as struct sigcontext,
99
// along with NGREG constant changed to 18.
100
#define arm_pc gregs[15]
101
#define arm_sp gregs[13]
102
#define arm_fp gregs[11]
103
#define arm_r0 gregs[0]
104
#endif
105
106
#define ARM_REGS_IN_CONTEXT 16
107
108
109
address os::Posix::ucontext_get_pc(const ucontext_t* uc) {
110
return (address)uc->uc_mcontext.arm_pc;
111
}
112
113
void os::Posix::ucontext_set_pc(ucontext_t* uc, address pc) {
114
uc->uc_mcontext.arm_pc = (uintx)pc;
115
}
116
117
intptr_t* os::Linux::ucontext_get_sp(const ucontext_t* uc) {
118
return (intptr_t*)uc->uc_mcontext.arm_sp;
119
}
120
121
intptr_t* os::Linux::ucontext_get_fp(const ucontext_t* uc) {
122
return (intptr_t*)uc->uc_mcontext.arm_fp;
123
}
124
125
bool is_safe_for_fp(address pc) {
126
#ifdef __thumb__
127
if (CodeCache::find_blob(pc) != NULL) {
128
return true;
129
}
130
// For thumb C frames, given an fp we have no idea how to access the frame contents.
131
return false;
132
#else
133
// Calling os::address_is_in_vm() here leads to a dladdr call. Calling any libc
134
// function during os::get_native_stack() can result in a deadlock if JFR is
135
// enabled. For now, be more lenient and allow all pc's. There are other
136
// frame sanity checks in shared code, and to date they have been sufficient
137
// for other platforms.
138
//return os::address_is_in_vm(pc);
139
return true;
140
#endif
141
}
142
143
address os::fetch_frame_from_context(const void* ucVoid,
144
intptr_t** ret_sp, intptr_t** ret_fp) {
145
146
address epc;
147
const ucontext_t* uc = (const ucontext_t*)ucVoid;
148
149
if (uc != NULL) {
150
epc = os::Posix::ucontext_get_pc(uc);
151
if (ret_sp) *ret_sp = os::Linux::ucontext_get_sp(uc);
152
if (ret_fp) {
153
intptr_t* fp = os::Linux::ucontext_get_fp(uc);
154
#ifndef __thumb__
155
if (CodeCache::find_blob(epc) == NULL) {
156
// It's a C frame. We need to adjust the fp.
157
fp += os::C_frame_offset;
158
}
159
#endif
160
// Clear FP when stack walking is dangerous so that
161
// the frame created will not be walked.
162
// However, ensure FP is set correctly when reliable and
163
// potentially necessary.
164
if (!is_safe_for_fp(epc)) {
165
// FP unreliable
166
fp = (intptr_t *)NULL;
167
}
168
*ret_fp = fp;
169
}
170
} else {
171
epc = NULL;
172
if (ret_sp) *ret_sp = (intptr_t *)NULL;
173
if (ret_fp) *ret_fp = (intptr_t *)NULL;
174
}
175
176
return epc;
177
}
178
179
frame os::fetch_frame_from_context(const void* ucVoid) {
180
intptr_t* sp;
181
intptr_t* fp;
182
address epc = fetch_frame_from_context(ucVoid, &sp, &fp);
183
return frame(sp, fp, epc);
184
}
185
186
frame os::get_sender_for_C_frame(frame* fr) {
187
#ifdef __thumb__
188
// We can't reliably get anything from a thumb C frame.
189
return frame();
190
#else
191
address pc = fr->sender_pc();
192
if (! is_safe_for_fp(pc)) {
193
return frame(fr->sender_sp(), (intptr_t *)NULL, pc);
194
} else {
195
return frame(fr->sender_sp(), fr->link() + os::C_frame_offset, pc);
196
}
197
#endif
198
}
199
200
//
201
// This actually returns two frames up. It does not return os::current_frame(),
202
// which is the actual current frame. Nor does it return os::get_native_stack(),
203
// which is the caller. It returns whoever called os::get_native_stack(). Not
204
// very intuitive, but consistent with how this API is implemented on other
205
// platforms.
206
//
207
frame os::current_frame() {
208
#ifdef __thumb__
209
// We can't reliably get anything from a thumb C frame.
210
return frame();
211
#else
212
register intptr_t* fp __asm__ (SPELL_REG_FP);
213
// fp is for os::current_frame. We want the fp for our caller.
214
frame myframe((intptr_t*)os::current_stack_pointer(), fp + os::C_frame_offset,
215
CAST_FROM_FN_PTR(address, os::current_frame));
216
frame caller_frame = os::get_sender_for_C_frame(&myframe);
217
218
if (os::is_first_C_frame(&caller_frame)) {
219
// stack is not walkable
220
// Assert below was added because it does not seem like this can ever happen.
221
// How can this frame ever be the first C frame since it is called from C code?
222
// If it does ever happen, undo the assert and comment here on when/why it happens.
223
assert(false, "this should never happen");
224
return frame();
225
}
226
227
// return frame for our caller's caller
228
return os::get_sender_for_C_frame(&caller_frame);
229
#endif
230
}
231
232
extern "C" address check_vfp_fault_instr;
233
extern "C" address check_vfp3_32_fault_instr;
234
extern "C" address check_simd_fault_instr;
235
extern "C" address check_mp_ext_fault_instr;
236
237
address check_vfp_fault_instr = NULL;
238
address check_vfp3_32_fault_instr = NULL;
239
address check_simd_fault_instr = NULL;
240
address check_mp_ext_fault_instr = NULL;
241
242
243
bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
244
ucontext_t* uc, JavaThread* thread) {
245
246
if (sig == SIGILL &&
247
((info->si_addr == (caddr_t)check_simd_fault_instr)
248
|| info->si_addr == (caddr_t)check_vfp_fault_instr
249
|| info->si_addr == (caddr_t)check_vfp3_32_fault_instr
250
|| info->si_addr == (caddr_t)check_mp_ext_fault_instr)) {
251
// skip faulty instruction + instruction that sets return value to
252
// success and set return value to failure.
253
os::Posix::ucontext_set_pc(uc, (address)info->si_addr + 8);
254
uc->uc_mcontext.arm_r0 = 0;
255
return true;
256
}
257
258
address stub = NULL;
259
address pc = NULL;
260
bool unsafe_access = false;
261
262
if (info != NULL && uc != NULL && thread != NULL) {
263
pc = (address) os::Posix::ucontext_get_pc(uc);
264
265
// Handle ALL stack overflow variations here
266
if (sig == SIGSEGV) {
267
address addr = (address) info->si_addr;
268
269
// check if fault address is within thread stack
270
if (thread->is_in_full_stack(addr)) {
271
// stack overflow
272
StackOverflow* overflow_state = thread->stack_overflow_state();
273
if (overflow_state->in_stack_yellow_reserved_zone(addr)) {
274
overflow_state->disable_stack_yellow_reserved_zone();
275
if (thread->thread_state() == _thread_in_Java) {
276
// Throw a stack overflow exception. Guard pages will be reenabled
277
// while unwinding the stack.
278
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
279
} else {
280
// Thread was in the vm or native code. Return and try to finish.
281
return true;
282
}
283
} else if (overflow_state->in_stack_red_zone(addr)) {
284
// Fatal red zone violation. Disable the guard pages and fall through
285
// to handle_unexpected_exception way down below.
286
overflow_state->disable_stack_red_zone();
287
tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
288
} else {
289
// Accessing stack address below sp may cause SEGV if current
290
// thread has MAP_GROWSDOWN stack. This should only happen when
291
// current thread was created by user code with MAP_GROWSDOWN flag
292
// and then attached to VM. See notes in os_linux.cpp.
293
if (thread->osthread()->expanding_stack() == 0) {
294
thread->osthread()->set_expanding_stack();
295
if (os::Linux::manually_expand_stack(thread, addr)) {
296
thread->osthread()->clear_expanding_stack();
297
return true;
298
}
299
thread->osthread()->clear_expanding_stack();
300
} else {
301
fatal("recursive segv. expanding stack.");
302
}
303
}
304
}
305
}
306
307
if (thread->thread_state() == _thread_in_Java) {
308
// Java thread running in Java code => find exception handler if any
309
// a fault inside compiled code, the interpreter, or a stub
310
311
if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) {
312
stub = SharedRuntime::get_poll_stub(pc);
313
} else if (sig == SIGBUS) {
314
// BugId 4454115: A read from a MappedByteBuffer can fault
315
// here if the underlying file has been truncated.
316
// Do not crash the VM in such a case.
317
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
318
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
319
if ((nm != NULL && nm->has_unsafe_access()) || (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc))) {
320
unsafe_access = true;
321
}
322
} else if (sig == SIGSEGV &&
323
MacroAssembler::uses_implicit_null_check(info->si_addr)) {
324
// Determination of interpreter/vtable stub/compiled code null exception
325
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
326
if (cb != NULL) {
327
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
328
}
329
} else if (sig == SIGILL && *(int *)pc == NativeInstruction::zombie_illegal_instruction) {
330
// Zombie
331
stub = SharedRuntime::get_handle_wrong_method_stub();
332
}
333
} else if ((thread->thread_state() == _thread_in_vm ||
334
thread->thread_state() == _thread_in_native) &&
335
sig == SIGBUS && thread->doing_unsafe_access()) {
336
unsafe_access = true;
337
}
338
339
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
340
// and the heap gets shrunk before the field access.
341
if (sig == SIGSEGV || sig == SIGBUS) {
342
address addr = JNI_FastGetField::find_slowcase_pc(pc);
343
if (addr != (address)-1) {
344
stub = addr;
345
}
346
}
347
}
348
349
if (unsafe_access && stub == NULL) {
350
// it can be an unsafe access and we haven't found
351
// any other suitable exception reason,
352
// so assume it is an unsafe access.
353
address next_pc = pc + Assembler::InstructionSize;
354
if (UnsafeCopyMemory::contains_pc(pc)) {
355
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
356
}
357
#ifdef __thumb__
358
if (uc->uc_mcontext.arm_cpsr & PSR_T_BIT) {
359
next_pc = (address)((intptr_t)next_pc | 0x1);
360
}
361
#endif
362
363
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
364
}
365
366
if (stub != NULL) {
367
#ifdef __thumb__
368
if (uc->uc_mcontext.arm_cpsr & PSR_T_BIT) {
369
intptr_t p = (intptr_t)pc | 0x1;
370
pc = (address)p;
371
372
// Clear Thumb mode bit if we're redirected into the ARM ISA based code
373
if (((intptr_t)stub & 0x1) == 0) {
374
uc->uc_mcontext.arm_cpsr &= ~PSR_T_BIT;
375
}
376
} else {
377
// No Thumb2 compiled stubs are triggered from ARM ISA compiled JIT'd code today.
378
// The support needs to be added if that changes
379
assert((((intptr_t)stub & 0x1) == 0), "can't return to Thumb code");
380
}
381
#endif
382
383
// save all thread context in case we need to restore it
384
if (thread != NULL) thread->set_saved_exception_pc(pc);
385
386
os::Posix::ucontext_set_pc(uc, stub);
387
return true;
388
}
389
390
return false;
391
}
392
393
void os::Linux::init_thread_fpu_state(void) {
394
os::setup_fpu();
395
}
396
397
int os::Linux::get_fpu_control_word(void) {
398
return 0;
399
}
400
401
void os::Linux::set_fpu_control_word(int fpu_control) {
402
// Nothing to do
403
}
404
405
void os::setup_fpu() {
406
#if !defined(__SOFTFP__) && defined(__VFP_FP__)
407
// Turn on IEEE-754 compliant VFP mode
408
__asm__ volatile (
409
"mov %%r0, #0;"
410
"fmxr fpscr, %%r0"
411
: /* no output */ : /* no input */ : "r0"
412
);
413
#endif
414
}
415
416
////////////////////////////////////////////////////////////////////////////////
417
// thread stack
418
419
// Minimum usable stack sizes required to get to user code. Space for
420
// HotSpot guard pages is added later.
421
size_t os::Posix::_compiler_thread_min_stack_allowed = (32 DEBUG_ONLY(+ 4)) * K;
422
size_t os::Posix::_java_thread_min_stack_allowed = (32 DEBUG_ONLY(+ 4)) * K;
423
size_t os::Posix::_vm_internal_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K;
424
425
// return default stack size for thr_type
426
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
427
// default stack size (compiler thread needs larger stack)
428
size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K);
429
return s;
430
}
431
432
/////////////////////////////////////////////////////////////////////////////
433
// helper functions for fatal error handler
434
435
void os::print_context(outputStream *st, const void *context) {
436
if (context == NULL) return;
437
const ucontext_t *uc = (const ucontext_t*)context;
438
439
st->print_cr("Registers:");
440
intx* reg_area = (intx*)&uc->uc_mcontext.arm_r0;
441
for (int r = 0; r < ARM_REGS_IN_CONTEXT; r++) {
442
st->print_cr(" %-3s = " INTPTR_FORMAT, as_Register(r)->name(), reg_area[r]);
443
}
444
#define U64_FORMAT "0x%016llx"
445
// now print flag register
446
st->print_cr(" %-4s = 0x%08lx", "cpsr",uc->uc_mcontext.arm_cpsr);
447
st->cr();
448
449
intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
450
st->print_cr("Top of Stack: (sp=" INTPTR_FORMAT ")", p2i(sp));
451
print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t));
452
st->cr();
453
454
// Note: it may be unsafe to inspect memory near pc. For example, pc may
455
// point to garbage if entry point in an nmethod is corrupted. Leave
456
// this at the end, and hope for the best.
457
address pc = os::Posix::ucontext_get_pc(uc);
458
print_instructions(st, pc, Assembler::InstructionSize);
459
st->cr();
460
}
461
462
void os::print_register_info(outputStream *st, const void *context) {
463
if (context == NULL) return;
464
465
const ucontext_t *uc = (const ucontext_t*)context;
466
intx* reg_area = (intx*)&uc->uc_mcontext.arm_r0;
467
468
st->print_cr("Register to memory mapping:");
469
st->cr();
470
for (int r = 0; r < ARM_REGS_IN_CONTEXT; r++) {
471
st->print_cr(" %-3s = " INTPTR_FORMAT, as_Register(r)->name(), reg_area[r]);
472
print_location(st, reg_area[r]);
473
st->cr();
474
}
475
st->cr();
476
}
477
478
479
480
typedef int64_t cmpxchg_long_func_t(int64_t, int64_t, volatile int64_t*);
481
482
cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
483
484
int64_t os::atomic_cmpxchg_long_bootstrap(int64_t compare_value, int64_t exchange_value, volatile int64_t* dest) {
485
// try to use the stub:
486
cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
487
488
if (func != NULL) {
489
os::atomic_cmpxchg_long_func = func;
490
return (*func)(compare_value, exchange_value, dest);
491
}
492
assert(Threads::number_of_threads() == 0, "for bootstrap only");
493
494
int64_t old_value = *dest;
495
if (old_value == compare_value)
496
*dest = exchange_value;
497
return old_value;
498
}
499
typedef int64_t load_long_func_t(const volatile int64_t*);
500
501
load_long_func_t* os::atomic_load_long_func = os::atomic_load_long_bootstrap;
502
503
int64_t os::atomic_load_long_bootstrap(const volatile int64_t* src) {
504
// try to use the stub:
505
load_long_func_t* func = CAST_TO_FN_PTR(load_long_func_t*, StubRoutines::atomic_load_long_entry());
506
507
if (func != NULL) {
508
os::atomic_load_long_func = func;
509
return (*func)(src);
510
}
511
assert(Threads::number_of_threads() == 0, "for bootstrap only");
512
513
int64_t old_value = *src;
514
return old_value;
515
}
516
517
typedef void store_long_func_t(int64_t, volatile int64_t*);
518
519
store_long_func_t* os::atomic_store_long_func = os::atomic_store_long_bootstrap;
520
521
void os::atomic_store_long_bootstrap(int64_t val, volatile int64_t* dest) {
522
// try to use the stub:
523
store_long_func_t* func = CAST_TO_FN_PTR(store_long_func_t*, StubRoutines::atomic_store_long_entry());
524
525
if (func != NULL) {
526
os::atomic_store_long_func = func;
527
return (*func)(val, dest);
528
}
529
assert(Threads::number_of_threads() == 0, "for bootstrap only");
530
531
*dest = val;
532
}
533
534
typedef int32_t atomic_add_func_t(int32_t add_value, volatile int32_t *dest);
535
536
atomic_add_func_t * os::atomic_add_func = os::atomic_add_bootstrap;
537
538
int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t *dest) {
539
atomic_add_func_t * func = CAST_TO_FN_PTR(atomic_add_func_t*,
540
StubRoutines::atomic_add_entry());
541
if (func != NULL) {
542
os::atomic_add_func = func;
543
return (*func)(add_value, dest);
544
}
545
546
int32_t old_value = *dest;
547
*dest = old_value + add_value;
548
return (old_value + add_value);
549
}
550
551
typedef int32_t atomic_xchg_func_t(int32_t exchange_value, volatile int32_t *dest);
552
553
atomic_xchg_func_t * os::atomic_xchg_func = os::atomic_xchg_bootstrap;
554
555
int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t *dest) {
556
atomic_xchg_func_t * func = CAST_TO_FN_PTR(atomic_xchg_func_t*,
557
StubRoutines::atomic_xchg_entry());
558
if (func != NULL) {
559
os::atomic_xchg_func = func;
560
return (*func)(exchange_value, dest);
561
}
562
563
int32_t old_value = *dest;
564
*dest = exchange_value;
565
return (old_value);
566
}
567
568
typedef int32_t cmpxchg_func_t(int32_t, int32_t, volatile int32_t*);
569
570
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
571
572
int32_t os::atomic_cmpxchg_bootstrap(int32_t compare_value, int32_t exchange_value, volatile int32_t* dest) {
573
// try to use the stub:
574
cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
575
576
if (func != NULL) {
577
os::atomic_cmpxchg_func = func;
578
return (*func)(compare_value, exchange_value, dest);
579
}
580
assert(Threads::number_of_threads() == 0, "for bootstrap only");
581
582
int32_t old_value = *dest;
583
if (old_value == compare_value)
584
*dest = exchange_value;
585
return old_value;
586
}
587
588
589
#ifndef PRODUCT
590
void os::verify_stack_alignment() {
591
}
592
#endif
593
594
int os::extra_bang_size_in_bytes() {
595
// ARM does not require an additional stack bang.
596
return 0;
597
}
598
599