Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/c1/c1_Runtime1.cpp
32285 views
1
/*
2
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "asm/codeBuffer.hpp"
27
#include "c1/c1_CodeStubs.hpp"
28
#include "c1/c1_Defs.hpp"
29
#include "c1/c1_FrameMap.hpp"
30
#include "c1/c1_LIRAssembler.hpp"
31
#include "c1/c1_MacroAssembler.hpp"
32
#include "c1/c1_Runtime1.hpp"
33
#include "classfile/systemDictionary.hpp"
34
#include "classfile/vmSymbols.hpp"
35
#include "code/codeBlob.hpp"
36
#include "code/compiledIC.hpp"
37
#include "code/pcDesc.hpp"
38
#include "code/scopeDesc.hpp"
39
#include "code/vtableStubs.hpp"
40
#include "compiler/disassembler.hpp"
41
#include "gc_interface/collectedHeap.hpp"
42
#include "interpreter/bytecode.hpp"
43
#include "interpreter/interpreter.hpp"
44
#include "jfr/support/jfrIntrinsics.hpp"
45
#include "memory/allocation.inline.hpp"
46
#include "memory/barrierSet.hpp"
47
#include "memory/oopFactory.hpp"
48
#include "memory/resourceArea.hpp"
49
#include "oops/objArrayKlass.hpp"
50
#include "oops/oop.inline.hpp"
51
#include "runtime/biasedLocking.hpp"
52
#include "runtime/compilationPolicy.hpp"
53
#include "runtime/interfaceSupport.hpp"
54
#include "runtime/javaCalls.hpp"
55
#include "runtime/sharedRuntime.hpp"
56
#include "runtime/threadCritical.hpp"
57
#include "runtime/vframe.hpp"
58
#include "runtime/vframeArray.hpp"
59
#include "utilities/copy.hpp"
60
#include "utilities/events.hpp"
61
#include "utilities/macros.hpp"
62
#if INCLUDE_ALL_GCS
63
#include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp"
64
#endif
65
66
// Implementation of StubAssembler
67
68
StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
69
_name = name;
70
_must_gc_arguments = false;
71
_frame_size = no_frame_size;
72
_num_rt_args = 0;
73
_stub_id = stub_id;
74
}
75
76
77
void StubAssembler::set_info(const char* name, bool must_gc_arguments) {
78
_name = name;
79
_must_gc_arguments = must_gc_arguments;
80
}
81
82
83
void StubAssembler::set_frame_size(int size) {
84
if (_frame_size == no_frame_size) {
85
_frame_size = size;
86
}
87
assert(_frame_size == size, "can't change the frame size");
88
}
89
90
91
void StubAssembler::set_num_rt_args(int args) {
92
if (_num_rt_args == 0) {
93
_num_rt_args = args;
94
}
95
assert(_num_rt_args == args, "can't change the number of args");
96
}
97
98
// Implementation of Runtime1
99
100
CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids];
101
const char *Runtime1::_blob_names[] = {
102
RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME)
103
};
104
105
#ifndef PRODUCT
106
// statistics
107
int Runtime1::_generic_arraycopy_cnt = 0;
108
int Runtime1::_primitive_arraycopy_cnt = 0;
109
int Runtime1::_oop_arraycopy_cnt = 0;
110
int Runtime1::_generic_arraycopystub_cnt = 0;
111
int Runtime1::_arraycopy_slowcase_cnt = 0;
112
int Runtime1::_arraycopy_checkcast_cnt = 0;
113
int Runtime1::_arraycopy_checkcast_attempt_cnt = 0;
114
int Runtime1::_new_type_array_slowcase_cnt = 0;
115
int Runtime1::_new_object_array_slowcase_cnt = 0;
116
int Runtime1::_new_instance_slowcase_cnt = 0;
117
int Runtime1::_new_multi_array_slowcase_cnt = 0;
118
int Runtime1::_monitorenter_slowcase_cnt = 0;
119
int Runtime1::_monitorexit_slowcase_cnt = 0;
120
int Runtime1::_patch_code_slowcase_cnt = 0;
121
int Runtime1::_throw_range_check_exception_count = 0;
122
int Runtime1::_throw_index_exception_count = 0;
123
int Runtime1::_throw_div0_exception_count = 0;
124
int Runtime1::_throw_null_pointer_exception_count = 0;
125
int Runtime1::_throw_class_cast_exception_count = 0;
126
int Runtime1::_throw_incompatible_class_change_error_count = 0;
127
int Runtime1::_throw_array_store_exception_count = 0;
128
int Runtime1::_throw_count = 0;
129
130
static int _byte_arraycopy_cnt = 0;
131
static int _short_arraycopy_cnt = 0;
132
static int _int_arraycopy_cnt = 0;
133
static int _long_arraycopy_cnt = 0;
134
static int _oop_arraycopy_cnt = 0;
135
136
address Runtime1::arraycopy_count_address(BasicType type) {
137
switch (type) {
138
case T_BOOLEAN:
139
case T_BYTE: return (address)&_byte_arraycopy_cnt;
140
case T_CHAR:
141
case T_SHORT: return (address)&_short_arraycopy_cnt;
142
case T_FLOAT:
143
case T_INT: return (address)&_int_arraycopy_cnt;
144
case T_DOUBLE:
145
case T_LONG: return (address)&_long_arraycopy_cnt;
146
case T_ARRAY:
147
case T_OBJECT: return (address)&_oop_arraycopy_cnt;
148
default:
149
ShouldNotReachHere();
150
return NULL;
151
}
152
}
153
154
155
#endif
156
157
// Simple helper to see if the caller of a runtime stub which
158
// entered the VM has been deoptimized
159
160
static bool caller_is_deopted() {
161
JavaThread* thread = JavaThread::current();
162
RegisterMap reg_map(thread, false);
163
frame runtime_frame = thread->last_frame();
164
frame caller_frame = runtime_frame.sender(&reg_map);
165
assert(caller_frame.is_compiled_frame(), "must be compiled");
166
return caller_frame.is_deoptimized_frame();
167
}
168
169
// Stress deoptimization
170
static void deopt_caller() {
171
if ( !caller_is_deopted()) {
172
JavaThread* thread = JavaThread::current();
173
RegisterMap reg_map(thread, false);
174
frame runtime_frame = thread->last_frame();
175
frame caller_frame = runtime_frame.sender(&reg_map);
176
Deoptimization::deoptimize_frame(thread, caller_frame.id());
177
assert(caller_is_deopted(), "Must be deoptimized");
178
}
179
}
180
181
182
void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
183
assert(0 <= id && id < number_of_ids, "illegal stub id");
184
ResourceMark rm;
185
// create code buffer for code storage
186
CodeBuffer code(buffer_blob);
187
188
Compilation::setup_code_buffer(&code, 0);
189
190
// create assembler for code generation
191
StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);
192
// generate code for runtime stub
193
OopMapSet* oop_maps;
194
oop_maps = generate_code_for(id, sasm);
195
assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
196
"if stub has an oop map it must have a valid frame size");
197
198
#ifdef ASSERT
199
// Make sure that stubs that need oopmaps have them
200
switch (id) {
201
// These stubs don't need to have an oopmap
202
case dtrace_object_alloc_id:
203
case g1_pre_barrier_slow_id:
204
case g1_post_barrier_slow_id:
205
case shenandoah_lrb_slow_id:
206
case slow_subtype_check_id:
207
case fpu2long_stub_id:
208
case unwind_exception_id:
209
case counter_overflow_id:
210
#if defined(SPARC) || defined(PPC)
211
case handle_exception_nofpu_id: // Unused on sparc
212
#endif
213
break;
214
215
// All other stubs should have oopmaps
216
default:
217
assert(oop_maps != NULL, "must have an oopmap");
218
}
219
#endif
220
221
// align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
222
sasm->align(BytesPerWord);
223
// make sure all code is in code buffer
224
sasm->flush();
225
// create blob - distinguish a few special cases
226
CodeBlob* blob = RuntimeStub::new_runtime_stub(name_for(id),
227
&code,
228
CodeOffsets::frame_never_safe,
229
sasm->frame_size(),
230
oop_maps,
231
sasm->must_gc_arguments());
232
// install blob
233
assert(blob != NULL, "blob must exist");
234
_blobs[id] = blob;
235
}
236
237
238
void Runtime1::initialize(BufferBlob* blob) {
239
// platform-dependent initialization
240
initialize_pd();
241
// generate stubs
242
for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);
243
// printing
244
#ifndef PRODUCT
245
if (PrintSimpleStubs) {
246
ResourceMark rm;
247
for (int id = 0; id < number_of_ids; id++) {
248
_blobs[id]->print();
249
if (_blobs[id]->oop_maps() != NULL) {
250
_blobs[id]->oop_maps()->print();
251
}
252
}
253
}
254
#endif
255
}
256
257
258
CodeBlob* Runtime1::blob_for(StubID id) {
259
assert(0 <= id && id < number_of_ids, "illegal stub id");
260
return _blobs[id];
261
}
262
263
264
const char* Runtime1::name_for(StubID id) {
265
assert(0 <= id && id < number_of_ids, "illegal stub id");
266
return _blob_names[id];
267
}
268
269
const char* Runtime1::name_for_address(address entry) {
270
for (int id = 0; id < number_of_ids; id++) {
271
if (entry == entry_for((StubID)id)) return name_for((StubID)id);
272
}
273
274
#define FUNCTION_CASE(a, f) \
275
if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f)) return #f
276
277
FUNCTION_CASE(entry, os::javaTimeMillis);
278
FUNCTION_CASE(entry, os::javaTimeNanos);
279
FUNCTION_CASE(entry, SharedRuntime::OSR_migration_end);
280
FUNCTION_CASE(entry, SharedRuntime::d2f);
281
FUNCTION_CASE(entry, SharedRuntime::d2i);
282
FUNCTION_CASE(entry, SharedRuntime::d2l);
283
FUNCTION_CASE(entry, SharedRuntime::dcos);
284
FUNCTION_CASE(entry, SharedRuntime::dexp);
285
FUNCTION_CASE(entry, SharedRuntime::dlog);
286
FUNCTION_CASE(entry, SharedRuntime::dlog10);
287
FUNCTION_CASE(entry, SharedRuntime::dpow);
288
FUNCTION_CASE(entry, SharedRuntime::drem);
289
FUNCTION_CASE(entry, SharedRuntime::dsin);
290
FUNCTION_CASE(entry, SharedRuntime::dtan);
291
FUNCTION_CASE(entry, SharedRuntime::f2i);
292
FUNCTION_CASE(entry, SharedRuntime::f2l);
293
FUNCTION_CASE(entry, SharedRuntime::frem);
294
FUNCTION_CASE(entry, SharedRuntime::l2d);
295
FUNCTION_CASE(entry, SharedRuntime::l2f);
296
FUNCTION_CASE(entry, SharedRuntime::ldiv);
297
FUNCTION_CASE(entry, SharedRuntime::lmul);
298
FUNCTION_CASE(entry, SharedRuntime::lrem);
299
FUNCTION_CASE(entry, SharedRuntime::lrem);
300
FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
301
FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
302
FUNCTION_CASE(entry, is_instance_of);
303
FUNCTION_CASE(entry, trace_block_entry);
304
#ifdef JFR_HAVE_INTRINSICS
305
FUNCTION_CASE(entry, JFR_TIME_FUNCTION);
306
#endif
307
FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
308
309
#undef FUNCTION_CASE
310
311
// Soft float adds more runtime names.
312
return pd_name_for_address(entry);
313
}
314
315
316
JRT_ENTRY(void, Runtime1::new_instance(JavaThread* thread, Klass* klass))
317
NOT_PRODUCT(_new_instance_slowcase_cnt++;)
318
319
assert(klass->is_klass(), "not a class");
320
Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
321
instanceKlassHandle h(thread, klass);
322
h->check_valid_for_instantiation(true, CHECK);
323
// make sure klass is initialized
324
h->initialize(CHECK);
325
// allocate instance and return via TLS
326
oop obj = h->allocate_instance(CHECK);
327
thread->set_vm_result(obj);
328
JRT_END
329
330
331
JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* thread, Klass* klass, jint length))
332
NOT_PRODUCT(_new_type_array_slowcase_cnt++;)
333
// Note: no handle for klass needed since they are not used
334
// anymore after new_typeArray() and no GC can happen before.
335
// (This may have to change if this code changes!)
336
assert(klass->is_klass(), "not a class");
337
BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
338
oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
339
thread->set_vm_result(obj);
340
// This is pretty rare but this runtime patch is stressful to deoptimization
341
// if we deoptimize here so force a deopt to stress the path.
342
if (DeoptimizeALot) {
343
deopt_caller();
344
}
345
346
JRT_END
347
348
349
JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* thread, Klass* array_klass, jint length))
350
NOT_PRODUCT(_new_object_array_slowcase_cnt++;)
351
352
// Note: no handle for klass needed since they are not used
353
// anymore after new_objArray() and no GC can happen before.
354
// (This may have to change if this code changes!)
355
assert(array_klass->is_klass(), "not a class");
356
Handle holder(THREAD, array_klass->klass_holder()); // keep the klass alive
357
Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
358
objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
359
thread->set_vm_result(obj);
360
// This is pretty rare but this runtime patch is stressful to deoptimization
361
// if we deoptimize here so force a deopt to stress the path.
362
if (DeoptimizeALot) {
363
deopt_caller();
364
}
365
JRT_END
366
367
368
JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* thread, Klass* klass, int rank, jint* dims))
369
NOT_PRODUCT(_new_multi_array_slowcase_cnt++;)
370
371
assert(klass->is_klass(), "not a class");
372
assert(rank >= 1, "rank must be nonzero");
373
Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
374
oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
375
thread->set_vm_result(obj);
376
JRT_END
377
378
379
JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* thread, StubID id))
380
tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id);
381
JRT_END
382
383
384
JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* thread, oopDesc* obj))
385
ResourceMark rm(thread);
386
const char* klass_name = obj->klass()->external_name();
387
SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArrayStoreException(), klass_name);
388
JRT_END
389
390
391
// counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
392
// associated with the top activation record. The inlinee (that is possibly included in the enclosing
393
// method) method oop is passed as an argument. In order to do that it is embedded in the code as
394
// a constant.
395
static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, Method* m) {
396
nmethod* osr_nm = NULL;
397
methodHandle method(THREAD, m);
398
399
RegisterMap map(THREAD, false);
400
frame fr = THREAD->last_frame().sender(&map);
401
nmethod* nm = (nmethod*) fr.cb();
402
assert(nm!= NULL && nm->is_nmethod(), "Sanity check");
403
methodHandle enclosing_method(THREAD, nm->method());
404
405
CompLevel level = (CompLevel)nm->comp_level();
406
int bci = InvocationEntryBci;
407
if (branch_bci != InvocationEntryBci) {
408
// Compute desination bci
409
address pc = method()->code_base() + branch_bci;
410
Bytecodes::Code branch = Bytecodes::code_at(method(), pc);
411
int offset = 0;
412
switch (branch) {
413
case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
414
case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
415
case Bytecodes::_if_icmple: case Bytecodes::_ifle:
416
case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
417
case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
418
case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
419
case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
420
offset = (int16_t)Bytes::get_Java_u2(pc + 1);
421
break;
422
case Bytecodes::_goto_w:
423
offset = Bytes::get_Java_u4(pc + 1);
424
break;
425
default: ;
426
}
427
bci = branch_bci + offset;
428
}
429
assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending");
430
osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD);
431
assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions");
432
return osr_nm;
433
}
434
435
JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* thread, int bci, Method* method))
436
nmethod* osr_nm;
437
JRT_BLOCK
438
osr_nm = counter_overflow_helper(thread, bci, method);
439
if (osr_nm != NULL) {
440
RegisterMap map(thread, false);
441
frame fr = thread->last_frame().sender(&map);
442
Deoptimization::deoptimize_frame(thread, fr.id());
443
}
444
JRT_BLOCK_END
445
return NULL;
446
JRT_END
447
448
extern void vm_exit(int code);
449
450
// Enter this method from compiled code handler below. This is where we transition
451
// to VM mode. This is done as a helper routine so that the method called directly
452
// from compiled code does not have to transition to VM. This allows the entry
453
// method to see if the nmethod that we have just looked up a handler for has
454
// been deoptimized while we were in the vm. This simplifies the assembly code
455
// cpu directories.
456
//
457
// We are entering here from exception stub (via the entry method below)
458
// If there is a compiled exception handler in this method, we will continue there;
459
// otherwise we will unwind the stack and continue at the caller of top frame method
460
// Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
461
// control the area where we can allow a safepoint. After we exit the safepoint area we can
462
// check to see if the handler we are going to return is now in a nmethod that has
463
// been deoptimized. If that is the case we return the deopt blob
464
// unpack_with_exception entry instead. This makes life for the exception blob easier
465
// because making that same check and diverting is painful from assembly language.
466
JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, nmethod*& nm))
467
// Reset method handle flag.
468
thread->set_is_method_handle_return(false);
469
470
Handle exception(thread, ex);
471
nm = CodeCache::find_nmethod(pc);
472
assert(nm != NULL, "this is not an nmethod");
473
// Adjust the pc as needed/
474
if (nm->is_deopt_pc(pc)) {
475
RegisterMap map(thread, false);
476
frame exception_frame = thread->last_frame().sender(&map);
477
// if the frame isn't deopted then pc must not correspond to the caller of last_frame
478
assert(exception_frame.is_deoptimized_frame(), "must be deopted");
479
pc = exception_frame.pc();
480
}
481
#ifdef ASSERT
482
assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");
483
assert(exception->is_oop(), "just checking");
484
// Check that exception is a subclass of Throwable, otherwise we have a VerifyError
485
if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
486
if (ExitVMOnVerifyError) vm_exit(-1);
487
ShouldNotReachHere();
488
}
489
#endif
490
491
// Check the stack guard pages and reenable them if necessary and there is
492
// enough space on the stack to do so. Use fast exceptions only if the guard
493
// pages are enabled.
494
bool guard_pages_enabled = thread->stack_yellow_zone_enabled();
495
if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
496
497
if (JvmtiExport::can_post_on_exceptions()) {
498
// To ensure correct notification of exception catches and throws
499
// we have to deoptimize here. If we attempted to notify the
500
// catches and throws during this exception lookup it's possible
501
// we could deoptimize on the way out of the VM and end back in
502
// the interpreter at the throw site. This would result in double
503
// notifications since the interpreter would also notify about
504
// these same catches and throws as it unwound the frame.
505
506
RegisterMap reg_map(thread);
507
frame stub_frame = thread->last_frame();
508
frame caller_frame = stub_frame.sender(&reg_map);
509
510
// We don't really want to deoptimize the nmethod itself since we
511
// can actually continue in the exception handler ourselves but I
512
// don't see an easy way to have the desired effect.
513
Deoptimization::deoptimize_frame(thread, caller_frame.id());
514
assert(caller_is_deopted(), "Must be deoptimized");
515
516
return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
517
}
518
519
// ExceptionCache is used only for exceptions at call sites and not for implicit exceptions
520
if (guard_pages_enabled) {
521
address fast_continuation = nm->handler_for_exception_and_pc(exception, pc);
522
if (fast_continuation != NULL) {
523
// Set flag if return address is a method handle call site.
524
thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
525
return fast_continuation;
526
}
527
}
528
529
// If the stack guard pages are enabled, check whether there is a handler in
530
// the current method. Otherwise (guard pages disabled), force an unwind and
531
// skip the exception cache update (i.e., just leave continuation==NULL).
532
address continuation = NULL;
533
if (guard_pages_enabled) {
534
535
// New exception handling mechanism can support inlined methods
536
// with exception handlers since the mappings are from PC to PC
537
538
// debugging support
539
// tracing
540
if (TraceExceptions) {
541
ttyLocker ttyl;
542
ResourceMark rm;
543
tty->print_cr("Exception <%s> (" INTPTR_FORMAT ") thrown in compiled method <%s> at PC " INTPTR_FORMAT " for thread " INTPTR_FORMAT "",
544
exception->print_value_string(), p2i((address)exception()), nm->method()->print_value_string(), p2i(pc), p2i(thread));
545
}
546
// for AbortVMOnException flag
547
NOT_PRODUCT(Exceptions::debug_check_abort(exception));
548
549
// Clear out the exception oop and pc since looking up an
550
// exception handler can cause class loading, which might throw an
551
// exception and those fields are expected to be clear during
552
// normal bytecode execution.
553
thread->clear_exception_oop_and_pc();
554
555
bool recursive_exception = false;
556
continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false, recursive_exception);
557
// If an exception was thrown during exception dispatch, the exception oop may have changed
558
thread->set_exception_oop(exception());
559
thread->set_exception_pc(pc);
560
561
// the exception cache is used only by non-implicit exceptions
562
// Update the exception cache only when there didn't happen
563
// another exception during the computation of the compiled
564
// exception handler. Checking for exception oop equality is not
565
// sufficient because some exceptions are pre-allocated and reused.
566
if (continuation != NULL && !recursive_exception) {
567
nm->add_handler_for_exception_and_pc(exception, pc, continuation);
568
}
569
}
570
571
thread->set_vm_result(exception());
572
// Set flag if return address is a method handle call site.
573
thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
574
575
if (TraceExceptions) {
576
ttyLocker ttyl;
577
ResourceMark rm;
578
tty->print_cr("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT " for exception thrown at PC " PTR_FORMAT,
579
p2i(thread), p2i(continuation), p2i(pc));
580
}
581
582
return continuation;
583
JRT_END
584
585
// Enter this method from compiled code only if there is a Java exception handler
586
// in the method handling the exception.
587
// We are entering here from exception stub. We don't do a normal VM transition here.
588
// We do it in a helper. This is so we can check to see if the nmethod we have just
589
// searched for an exception handler has been deoptimized in the meantime.
590
address Runtime1::exception_handler_for_pc(JavaThread* thread) {
591
oop exception = thread->exception_oop();
592
address pc = thread->exception_pc();
593
// Still in Java mode
594
DEBUG_ONLY(ResetNoHandleMark rnhm);
595
nmethod* nm = NULL;
596
address continuation = NULL;
597
{
598
// Enter VM mode by calling the helper
599
ResetNoHandleMark rnhm;
600
continuation = exception_handler_for_pc_helper(thread, exception, pc, nm);
601
}
602
// Back in JAVA, use no oops DON'T safepoint
603
604
// Now check to see if the nmethod we were called from is now deoptimized.
605
// If so we must return to the deopt blob and deoptimize the nmethod
606
if (nm != NULL && caller_is_deopted()) {
607
continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
608
}
609
610
assert(continuation != NULL, "no handler found");
611
return continuation;
612
}
613
614
615
JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* thread, int index))
616
NOT_PRODUCT(_throw_range_check_exception_count++;)
617
char message[jintAsStringSize];
618
sprintf(message, "%d", index);
619
SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message);
620
JRT_END
621
622
623
JRT_ENTRY(void, Runtime1::throw_index_exception(JavaThread* thread, int index))
624
NOT_PRODUCT(_throw_index_exception_count++;)
625
char message[16];
626
sprintf(message, "%d", index);
627
SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IndexOutOfBoundsException(), message);
628
JRT_END
629
630
631
JRT_ENTRY(void, Runtime1::throw_div0_exception(JavaThread* thread))
632
NOT_PRODUCT(_throw_div0_exception_count++;)
633
SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
634
JRT_END
635
636
637
JRT_ENTRY(void, Runtime1::throw_null_pointer_exception(JavaThread* thread))
638
NOT_PRODUCT(_throw_null_pointer_exception_count++;)
639
SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
640
JRT_END
641
642
643
JRT_ENTRY(void, Runtime1::throw_class_cast_exception(JavaThread* thread, oopDesc* object))
644
NOT_PRODUCT(_throw_class_cast_exception_count++;)
645
ResourceMark rm(thread);
646
char* message = SharedRuntime::generate_class_cast_message(
647
thread, object->klass()->external_name());
648
SharedRuntime::throw_and_post_jvmti_exception(
649
thread, vmSymbols::java_lang_ClassCastException(), message);
650
JRT_END
651
652
653
JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* thread))
654
NOT_PRODUCT(_throw_incompatible_class_change_error_count++;)
655
ResourceMark rm(thread);
656
SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError());
657
JRT_END
658
659
660
JRT_ENTRY_NO_ASYNC(void, Runtime1::monitorenter(JavaThread* thread, oopDesc* obj, BasicObjectLock* lock))
661
NOT_PRODUCT(_monitorenter_slowcase_cnt++;)
662
if (PrintBiasedLockingStatistics) {
663
Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
664
}
665
Handle h_obj(thread, obj);
666
assert(h_obj()->is_oop(), "must be NULL or an object");
667
if (UseBiasedLocking) {
668
// Retry fast entry if bias is revoked to avoid unnecessary inflation
669
ObjectSynchronizer::fast_enter(h_obj, lock->lock(), true, CHECK);
670
} else {
671
if (UseFastLocking) {
672
// When using fast locking, the compiled code has already tried the fast case
673
assert(obj == lock->obj(), "must match");
674
ObjectSynchronizer::slow_enter(h_obj, lock->lock(), THREAD);
675
} else {
676
lock->set_obj(obj);
677
ObjectSynchronizer::fast_enter(h_obj, lock->lock(), false, THREAD);
678
}
679
}
680
JRT_END
681
682
683
JRT_LEAF(void, Runtime1::monitorexit(JavaThread* thread, BasicObjectLock* lock))
684
NOT_PRODUCT(_monitorexit_slowcase_cnt++;)
685
assert(thread == JavaThread::current(), "threads must correspond");
686
assert(thread->last_Java_sp(), "last_Java_sp must be set");
687
// monitorexit is non-blocking (leaf routine) => no exceptions can be thrown
688
EXCEPTION_MARK;
689
690
oop obj = lock->obj();
691
assert(obj->is_oop(), "must be NULL or an object");
692
if (UseFastLocking) {
693
// When using fast locking, the compiled code has already tried the fast case
694
ObjectSynchronizer::slow_exit(obj, lock->lock(), THREAD);
695
} else {
696
ObjectSynchronizer::fast_exit(obj, lock->lock(), THREAD);
697
}
698
JRT_END
699
700
// Cf. OptoRuntime::deoptimize_caller_frame
701
JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* thread))
702
// Called from within the owner thread, so no need for safepoint
703
RegisterMap reg_map(thread, false);
704
frame stub_frame = thread->last_frame();
705
assert(stub_frame.is_runtime_frame(), "sanity check");
706
frame caller_frame = stub_frame.sender(&reg_map);
707
708
// We are coming from a compiled method; check this is true.
709
assert(CodeCache::find_nmethod(caller_frame.pc()) != NULL, "sanity");
710
711
// Deoptimize the caller frame.
712
Deoptimization::deoptimize_frame(thread, caller_frame.id());
713
714
// Return to the now deoptimized frame.
715
JRT_END
716
717
718
static Klass* resolve_field_return_klass(methodHandle caller, int bci, TRAPS) {
719
Bytecode_field field_access(caller, bci);
720
// This can be static or non-static field access
721
Bytecodes::Code code = field_access.code();
722
723
// We must load class, initialize class and resolvethe field
724
fieldDescriptor result; // initialize class if needed
725
constantPoolHandle constants(THREAD, caller->constants());
726
LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK_NULL);
727
return result.field_holder();
728
}
729
730
731
//
732
// This routine patches sites where a class wasn't loaded or
733
// initialized at the time the code was generated. It handles
734
// references to classes, fields and forcing of initialization. Most
735
// of the cases are straightforward and involving simply forcing
736
// resolution of a class, rewriting the instruction stream with the
737
// needed constant and replacing the call in this function with the
738
// patched code. The case for static field is more complicated since
739
// the thread which is in the process of initializing a class can
740
// access it's static fields but other threads can't so the code
741
// either has to deoptimize when this case is detected or execute a
742
// check that the current thread is the initializing thread. The
743
// current
744
//
745
// Patches basically look like this:
746
//
747
//
748
// patch_site: jmp patch stub ;; will be patched
749
// continue: ...
750
// ...
751
// ...
752
// ...
753
//
754
// They have a stub which looks like this:
755
//
756
// ;; patch body
757
// movl <const>, reg (for class constants)
758
// <or> movl [reg1 + <const>], reg (for field offsets)
759
// <or> movl reg, [reg1 + <const>] (for field offsets)
760
// <being_init offset> <bytes to copy> <bytes to skip>
761
// patch_stub: call Runtime1::patch_code (through a runtime stub)
762
// jmp patch_site
763
//
764
//
765
// A normal patch is done by rewriting the patch body, usually a move,
766
// and then copying it into place over top of the jmp instruction
767
// being careful to flush caches and doing it in an MP-safe way. The
768
// constants following the patch body are used to find various pieces
769
// of the patch relative to the call site for Runtime1::patch_code.
770
// The case for getstatic and putstatic is more complicated because
771
// getstatic and putstatic have special semantics when executing while
772
// the class is being initialized. getstatic/putstatic on a class
773
// which is being_initialized may be executed by the initializing
774
// thread but other threads have to block when they execute it. This
775
// is accomplished in compiled code by executing a test of the current
776
// thread against the initializing thread of the class. It's emitted
777
// as boilerplate in their stub which allows the patched code to be
778
// executed before it's copied back into the main body of the nmethod.
779
//
780
// being_init: get_thread(<tmp reg>
781
// cmpl [reg1 + <init_thread_offset>], <tmp reg>
782
// jne patch_stub
783
// movl [reg1 + <const>], reg (for field offsets) <or>
784
// movl reg, [reg1 + <const>] (for field offsets)
785
// jmp continue
786
// <being_init offset> <bytes to copy> <bytes to skip>
787
// patch_stub: jmp Runtim1::patch_code (through a runtime stub)
788
// jmp patch_site
789
//
790
// If the class is being initialized the patch body is rewritten and
791
// the patch site is rewritten to jump to being_init, instead of
792
// patch_stub. Whenever this code is executed it checks the current
793
// thread against the intializing thread so other threads will enter
794
// the runtime and end up blocked waiting the class to finish
795
// initializing inside the calls to resolve_field below. The
796
// initializing class will continue on it's way. Once the class is
797
// fully_initialized, the intializing_thread of the class becomes
798
// NULL, so the next thread to execute this code will fail the test,
799
// call into patch_code and complete the patching process by copying
800
// the patch body back into the main part of the nmethod and resume
801
// executing.
802
//
803
//
804
805
JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id ))
806
NOT_PRODUCT(_patch_code_slowcase_cnt++;)
807
808
#ifdef AARCH64
809
// AArch64 does not patch C1-generated code.
810
ShouldNotReachHere();
811
#endif
812
813
ResourceMark rm(thread);
814
RegisterMap reg_map(thread, false);
815
frame runtime_frame = thread->last_frame();
816
frame caller_frame = runtime_frame.sender(&reg_map);
817
818
// last java frame on stack
819
vframeStream vfst(thread, true);
820
assert(!vfst.at_end(), "Java frame must exist");
821
822
methodHandle caller_method(THREAD, vfst.method());
823
// Note that caller_method->code() may not be same as caller_code because of OSR's
824
// Note also that in the presence of inlining it is not guaranteed
825
// that caller_method() == caller_code->method()
826
827
int bci = vfst.bci();
828
Bytecodes::Code code = caller_method()->java_code_at(bci);
829
830
#ifndef PRODUCT
831
// this is used by assertions in the access_field_patching_id
832
BasicType patch_field_type = T_ILLEGAL;
833
#endif // PRODUCT
834
bool deoptimize_for_volatile = false;
835
int patch_field_offset = -1;
836
KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
837
KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
838
Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code
839
Handle appendix(THREAD, NULL); // oop needed by appendix_patching code
840
bool load_klass_or_mirror_patch_id =
841
(stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
842
843
if (stub_id == Runtime1::access_field_patching_id) {
844
845
Bytecode_field field_access(caller_method, bci);
846
fieldDescriptor result; // initialize class if needed
847
Bytecodes::Code code = field_access.code();
848
constantPoolHandle constants(THREAD, caller_method->constants());
849
LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK);
850
patch_field_offset = result.offset();
851
852
// If we're patching a field which is volatile then at compile it
853
// must not have been know to be volatile, so the generated code
854
// isn't correct for a volatile reference. The nmethod has to be
855
// deoptimized so that the code can be regenerated correctly.
856
// This check is only needed for access_field_patching since this
857
// is the path for patching field offsets. load_klass is only
858
// used for patching references to oops which don't need special
859
// handling in the volatile case.
860
deoptimize_for_volatile = result.access_flags().is_volatile();
861
862
#ifndef PRODUCT
863
patch_field_type = result.field_type();
864
#endif
865
} else if (load_klass_or_mirror_patch_id) {
866
Klass* k = NULL;
867
switch (code) {
868
case Bytecodes::_putstatic:
869
case Bytecodes::_getstatic:
870
{ Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
871
init_klass = KlassHandle(THREAD, klass);
872
mirror = Handle(THREAD, klass->java_mirror());
873
}
874
break;
875
case Bytecodes::_new:
876
{ Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
877
k = caller_method->constants()->klass_at(bnew.index(), CHECK);
878
}
879
break;
880
case Bytecodes::_multianewarray:
881
{ Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
882
k = caller_method->constants()->klass_at(mna.index(), CHECK);
883
}
884
break;
885
case Bytecodes::_instanceof:
886
{ Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci));
887
k = caller_method->constants()->klass_at(io.index(), CHECK);
888
}
889
break;
890
case Bytecodes::_checkcast:
891
{ Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci));
892
k = caller_method->constants()->klass_at(cc.index(), CHECK);
893
}
894
break;
895
case Bytecodes::_anewarray:
896
{ Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci));
897
Klass* ek = caller_method->constants()->klass_at(anew.index(), CHECK);
898
k = ek->array_klass(CHECK);
899
}
900
break;
901
case Bytecodes::_ldc:
902
case Bytecodes::_ldc_w:
903
{
904
Bytecode_loadconstant cc(caller_method, bci);
905
oop m = cc.resolve_constant(CHECK);
906
mirror = Handle(THREAD, m);
907
}
908
break;
909
default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
910
}
911
// convert to handle
912
load_klass = KlassHandle(THREAD, k);
913
} else if (stub_id == load_appendix_patching_id) {
914
Bytecode_invoke bytecode(caller_method, bci);
915
Bytecodes::Code bc = bytecode.invoke_code();
916
917
CallInfo info;
918
constantPoolHandle pool(thread, caller_method->constants());
919
int index = bytecode.index();
920
LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
921
appendix = info.resolved_appendix();
922
switch (bc) {
923
case Bytecodes::_invokehandle: {
924
int cache_index = ConstantPool::decode_cpcache_index(index, true);
925
assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
926
pool->cache()->entry_at(cache_index)->set_method_handle(pool, info);
927
break;
928
}
929
case Bytecodes::_invokedynamic: {
930
pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info);
931
break;
932
}
933
default: fatal("unexpected bytecode for load_appendix_patching_id");
934
}
935
} else {
936
ShouldNotReachHere();
937
}
938
939
if (deoptimize_for_volatile) {
940
// At compile time we assumed the field wasn't volatile but after
941
// loading it turns out it was volatile so we have to throw the
942
// compiled code out and let it be regenerated.
943
if (TracePatching) {
944
tty->print_cr("Deoptimizing for patching volatile field reference");
945
}
946
// It's possible the nmethod was invalidated in the last
947
// safepoint, but if it's still alive then make it not_entrant.
948
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
949
if (nm != NULL) {
950
nm->make_not_entrant();
951
}
952
953
Deoptimization::deoptimize_frame(thread, caller_frame.id());
954
955
// Return to the now deoptimized frame.
956
}
957
958
// Now copy code back
959
{
960
MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);
961
//
962
// Deoptimization may have happened while we waited for the lock.
963
// In that case we don't bother to do any patching we just return
964
// and let the deopt happen
965
if (!caller_is_deopted()) {
966
NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());
967
address instr_pc = jump->jump_destination();
968
NativeInstruction* ni = nativeInstruction_at(instr_pc);
969
if (ni->is_jump() ) {
970
// the jump has not been patched yet
971
// The jump destination is slow case and therefore not part of the stubs
972
// (stubs are only for StaticCalls)
973
974
// format of buffer
975
// ....
976
// instr byte 0 <-- copy_buff
977
// instr byte 1
978
// ..
979
// instr byte n-1
980
// n
981
// .... <-- call destination
982
983
address stub_location = caller_frame.pc() + PatchingStub::patch_info_offset();
984
unsigned char* byte_count = (unsigned char*) (stub_location - 1);
985
unsigned char* byte_skip = (unsigned char*) (stub_location - 2);
986
unsigned char* being_initialized_entry_offset = (unsigned char*) (stub_location - 3);
987
address copy_buff = stub_location - *byte_skip - *byte_count;
988
address being_initialized_entry = stub_location - *being_initialized_entry_offset;
989
if (TracePatching) {
990
ttyLocker ttyl;
991
tty->print_cr(" Patching %s at bci %d at address " INTPTR_FORMAT " (%s)", Bytecodes::name(code), bci,
992
p2i(instr_pc), (stub_id == Runtime1::access_field_patching_id) ? "field" : "klass");
993
nmethod* caller_code = CodeCache::find_nmethod(caller_frame.pc());
994
assert(caller_code != NULL, "nmethod not found");
995
996
// NOTE we use pc() not original_pc() because we already know they are
997
// identical otherwise we'd have never entered this block of code
998
999
OopMap* map = caller_code->oop_map_for_return_address(caller_frame.pc());
1000
assert(map != NULL, "null check");
1001
map->print();
1002
tty->cr();
1003
1004
Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1005
}
1006
// depending on the code below, do_patch says whether to copy the patch body back into the nmethod
1007
bool do_patch = true;
1008
if (stub_id == Runtime1::access_field_patching_id) {
1009
// The offset may not be correct if the class was not loaded at code generation time.
1010
// Set it now.
1011
NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff);
1012
assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type");
1013
assert(patch_field_offset >= 0, "illegal offset");
1014
n_move->add_offset_in_bytes(patch_field_offset);
1015
} else if (load_klass_or_mirror_patch_id) {
1016
// If a getstatic or putstatic is referencing a klass which
1017
// isn't fully initialized, the patch body isn't copied into
1018
// place until initialization is complete. In this case the
1019
// patch site is setup so that any threads besides the
1020
// initializing thread are forced to come into the VM and
1021
// block.
1022
do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) ||
1023
InstanceKlass::cast(init_klass())->is_initialized();
1024
NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc);
1025
if (jump->jump_destination() == being_initialized_entry) {
1026
assert(do_patch == true, "initialization must be complete at this point");
1027
} else {
1028
// patch the instruction <move reg, klass>
1029
NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1030
1031
assert(n_copy->data() == 0 ||
1032
n_copy->data() == (intptr_t)Universe::non_oop_word(),
1033
"illegal init value");
1034
if (stub_id == Runtime1::load_klass_patching_id) {
1035
assert(load_klass() != NULL, "klass not set");
1036
n_copy->set_data((intx) (load_klass()));
1037
} else {
1038
assert(mirror() != NULL, "klass not set");
1039
// Don't need a G1 pre-barrier here since we assert above that data isn't an oop.
1040
n_copy->set_data(cast_from_oop<intx>(mirror()));
1041
}
1042
1043
if (TracePatching) {
1044
Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1045
}
1046
}
1047
} else if (stub_id == Runtime1::load_appendix_patching_id) {
1048
NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1049
assert(n_copy->data() == 0 ||
1050
n_copy->data() == (intptr_t)Universe::non_oop_word(),
1051
"illegal init value");
1052
n_copy->set_data(cast_from_oop<intx>(appendix()));
1053
1054
if (TracePatching) {
1055
Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1056
}
1057
} else {
1058
ShouldNotReachHere();
1059
}
1060
1061
#if defined(SPARC) || defined(PPC)
1062
if (load_klass_or_mirror_patch_id ||
1063
stub_id == Runtime1::load_appendix_patching_id) {
1064
// Update the location in the nmethod with the proper
1065
// metadata. When the code was generated, a NULL was stuffed
1066
// in the metadata table and that table needs to be update to
1067
// have the right value. On intel the value is kept
1068
// directly in the instruction instead of in the metadata
1069
// table, so set_data above effectively updated the value.
1070
nmethod* nm = CodeCache::find_nmethod(instr_pc);
1071
assert(nm != NULL, "invalid nmethod_pc");
1072
RelocIterator mds(nm, copy_buff, copy_buff + 1);
1073
bool found = false;
1074
while (mds.next() && !found) {
1075
if (mds.type() == relocInfo::oop_type) {
1076
assert(stub_id == Runtime1::load_mirror_patching_id ||
1077
stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
1078
oop_Relocation* r = mds.oop_reloc();
1079
oop* oop_adr = r->oop_addr();
1080
*oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();
1081
r->fix_oop_relocation();
1082
found = true;
1083
} else if (mds.type() == relocInfo::metadata_type) {
1084
assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
1085
metadata_Relocation* r = mds.metadata_reloc();
1086
Metadata** metadata_adr = r->metadata_addr();
1087
*metadata_adr = load_klass();
1088
r->fix_metadata_relocation();
1089
found = true;
1090
}
1091
}
1092
assert(found, "the metadata must exist!");
1093
}
1094
#endif
1095
if (do_patch) {
1096
// replace instructions
1097
// first replace the tail, then the call
1098
#if defined(ARM) && !defined(AARCH32)
1099
if((load_klass_or_mirror_patch_id ||
1100
stub_id == Runtime1::load_appendix_patching_id) &&
1101
nativeMovConstReg_at(copy_buff)->is_pc_relative()) {
1102
nmethod* nm = CodeCache::find_nmethod(instr_pc);
1103
address addr = NULL;
1104
assert(nm != NULL, "invalid nmethod_pc");
1105
RelocIterator mds(nm, copy_buff, copy_buff + 1);
1106
while (mds.next()) {
1107
if (mds.type() == relocInfo::oop_type) {
1108
assert(stub_id == Runtime1::load_mirror_patching_id ||
1109
stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
1110
oop_Relocation* r = mds.oop_reloc();
1111
addr = (address)r->oop_addr();
1112
break;
1113
} else if (mds.type() == relocInfo::metadata_type) {
1114
assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
1115
metadata_Relocation* r = mds.metadata_reloc();
1116
addr = (address)r->metadata_addr();
1117
break;
1118
}
1119
}
1120
assert(addr != NULL, "metadata relocation must exist");
1121
copy_buff -= *byte_count;
1122
NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff);
1123
n_copy2->set_pc_relative_offset(addr, instr_pc);
1124
}
1125
#endif
1126
1127
for (int i = NativeCall::instruction_size; i < *byte_count; i++) {
1128
address ptr = copy_buff + i;
1129
int a_byte = (*ptr) & 0xFF;
1130
address dst = instr_pc + i;
1131
*(unsigned char*)dst = (unsigned char) a_byte;
1132
}
1133
ICache::invalidate_range(instr_pc, *byte_count);
1134
NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
1135
1136
if (load_klass_or_mirror_patch_id ||
1137
stub_id == Runtime1::load_appendix_patching_id) {
1138
relocInfo::relocType rtype =
1139
(stub_id == Runtime1::load_klass_patching_id) ?
1140
relocInfo::metadata_type :
1141
relocInfo::oop_type;
1142
// update relocInfo to metadata
1143
nmethod* nm = CodeCache::find_nmethod(instr_pc);
1144
assert(nm != NULL, "invalid nmethod_pc");
1145
1146
// The old patch site is now a move instruction so update
1147
// the reloc info so that it will get updated during
1148
// future GCs.
1149
RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1));
1150
relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc,
1151
relocInfo::none, rtype);
1152
#ifdef SPARC
1153
// Sparc takes two relocations for an metadata so update the second one.
1154
address instr_pc2 = instr_pc + NativeMovConstReg::add_offset;
1155
RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
1156
relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
1157
relocInfo::none, rtype);
1158
#endif
1159
#ifdef PPC
1160
{ address instr_pc2 = instr_pc + NativeMovConstReg::lo_offset;
1161
RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
1162
relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
1163
relocInfo::none, rtype);
1164
}
1165
#endif
1166
}
1167
#ifdef AARCH32
1168
// AArch32 have (disabled) relocation for offset, should enable it back
1169
if (stub_id == Runtime1::access_field_patching_id) {
1170
nmethod* nm = CodeCache::find_nmethod(instr_pc);
1171
RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1));
1172
relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc,
1173
relocInfo::none, relocInfo::section_word_type);
1174
}
1175
#endif
1176
1177
} else {
1178
ICache::invalidate_range(copy_buff, *byte_count);
1179
NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);
1180
}
1181
}
1182
}
1183
}
1184
1185
// If we are patching in a non-perm oop, make sure the nmethod
1186
// is on the right list.
1187
if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) ||
1188
(appendix.not_null() && appendix->is_scavengable()))) {
1189
MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
1190
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1191
guarantee(nm != NULL, "only nmethods can contain non-perm oops");
1192
if (!nm->on_scavenge_root_list()) {
1193
CodeCache::add_scavenge_root_nmethod(nm);
1194
}
1195
1196
// Since we've patched some oops in the nmethod,
1197
// (re)register it with the heap.
1198
Universe::heap()->register_nmethod(nm);
1199
}
1200
JRT_END
1201
1202
//
1203
// Entry point for compiled code. We want to patch a nmethod.
1204
// We don't do a normal VM transition here because we want to
1205
// know after the patching is complete and any safepoint(s) are taken
1206
// if the calling nmethod was deoptimized. We do this by calling a
1207
// helper method which does the normal VM transition and when it
1208
// completes we can check for deoptimization. This simplifies the
1209
// assembly code in the cpu directories.
1210
//
1211
#ifndef TARGET_ARCH_aarch64
1212
int Runtime1::move_klass_patching(JavaThread* thread) {
1213
//
1214
// NOTE: we are still in Java
1215
//
1216
Thread* THREAD = thread;
1217
debug_only(NoHandleMark nhm;)
1218
{
1219
// Enter VM mode
1220
1221
ResetNoHandleMark rnhm;
1222
patch_code(thread, load_klass_patching_id);
1223
}
1224
// Back in JAVA, use no oops DON'T safepoint
1225
1226
// Return true if calling code is deoptimized
1227
1228
return caller_is_deopted();
1229
}
1230
1231
int Runtime1::move_mirror_patching(JavaThread* thread) {
1232
//
1233
// NOTE: we are still in Java
1234
//
1235
Thread* THREAD = thread;
1236
debug_only(NoHandleMark nhm;)
1237
{
1238
// Enter VM mode
1239
1240
ResetNoHandleMark rnhm;
1241
patch_code(thread, load_mirror_patching_id);
1242
}
1243
// Back in JAVA, use no oops DON'T safepoint
1244
1245
// Return true if calling code is deoptimized
1246
1247
return caller_is_deopted();
1248
}
1249
1250
int Runtime1::move_appendix_patching(JavaThread* thread) {
1251
//
1252
// NOTE: we are still in Java
1253
//
1254
Thread* THREAD = thread;
1255
debug_only(NoHandleMark nhm;)
1256
{
1257
// Enter VM mode
1258
1259
ResetNoHandleMark rnhm;
1260
patch_code(thread, load_appendix_patching_id);
1261
}
1262
// Back in JAVA, use no oops DON'T safepoint
1263
1264
// Return true if calling code is deoptimized
1265
1266
return caller_is_deopted();
1267
}
1268
//
1269
// Entry point for compiled code. We want to patch a nmethod.
1270
// We don't do a normal VM transition here because we want to
1271
// know after the patching is complete and any safepoint(s) are taken
1272
// if the calling nmethod was deoptimized. We do this by calling a
1273
// helper method which does the normal VM transition and when it
1274
// completes we can check for deoptimization. This simplifies the
1275
// assembly code in the cpu directories.
1276
//
1277
1278
int Runtime1::access_field_patching(JavaThread* thread) {
1279
//
1280
// NOTE: we are still in Java
1281
//
1282
Thread* THREAD = thread;
1283
debug_only(NoHandleMark nhm;)
1284
{
1285
// Enter VM mode
1286
1287
ResetNoHandleMark rnhm;
1288
patch_code(thread, access_field_patching_id);
1289
}
1290
// Back in JAVA, use no oops DON'T safepoint
1291
1292
// Return true if calling code is deoptimized
1293
1294
return caller_is_deopted();
1295
JRT_END
1296
#endif
1297
1298
JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
1299
// for now we just print out the block id
1300
tty->print("%d ", block_id);
1301
JRT_END
1302
1303
1304
// Array copy return codes.
1305
enum {
1306
ac_failed = -1, // arraycopy failed
1307
ac_ok = 0 // arraycopy succeeded
1308
};
1309
1310
1311
// Below length is the # elements copied.
1312
template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
1313
oopDesc* dst, T* dst_addr,
1314
int length) {
1315
1316
// For performance reasons, we assume we are using a card marking write
1317
// barrier. The assert will fail if this is not the case.
1318
// Note that we use the non-virtual inlineable variant of write_ref_array.
1319
BarrierSet* bs = Universe::heap()->barrier_set();
1320
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
1321
assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
1322
1323
#if INCLUDE_ALL_GCS
1324
if (UseShenandoahGC) {
1325
ShenandoahBarrierSet::barrier_set()->arraycopy_barrier(src_addr, dst_addr, length);
1326
}
1327
#endif
1328
1329
if (src == dst) {
1330
// same object, no check
1331
bs->write_ref_array_pre(dst_addr, length);
1332
Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
1333
bs->write_ref_array((HeapWord*)dst_addr, length);
1334
return ac_ok;
1335
} else {
1336
Klass* bound = ObjArrayKlass::cast(dst->klass())->element_klass();
1337
Klass* stype = ObjArrayKlass::cast(src->klass())->element_klass();
1338
if (stype == bound || stype->is_subtype_of(bound)) {
1339
// Elements are guaranteed to be subtypes, so no check necessary
1340
bs->write_ref_array_pre(dst_addr, length);
1341
Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
1342
bs->write_ref_array((HeapWord*)dst_addr, length);
1343
return ac_ok;
1344
}
1345
}
1346
return ac_failed;
1347
}
1348
1349
// fast and direct copy of arrays; returning -1, means that an exception may be thrown
1350
// and we did not copy anything
1351
JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length))
1352
#ifndef PRODUCT
1353
_generic_arraycopy_cnt++; // Slow-path oop array copy
1354
#endif
1355
1356
if (src == NULL || dst == NULL || src_pos < 0 || dst_pos < 0 || length < 0) return ac_failed;
1357
if (!dst->is_array() || !src->is_array()) return ac_failed;
1358
if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed;
1359
if ((unsigned int) arrayOop(dst)->length() < (unsigned int)dst_pos + (unsigned int)length) return ac_failed;
1360
1361
if (length == 0) return ac_ok;
1362
if (src->is_typeArray()) {
1363
Klass* klass_oop = src->klass();
1364
if (klass_oop != dst->klass()) return ac_failed;
1365
TypeArrayKlass* klass = TypeArrayKlass::cast(klass_oop);
1366
const int l2es = klass->log2_element_size();
1367
const int ihs = klass->array_header_in_bytes() / wordSize;
1368
char* src_addr = (char*) ((oopDesc**)src + ihs) + (src_pos << l2es);
1369
char* dst_addr = (char*) ((oopDesc**)dst + ihs) + (dst_pos << l2es);
1370
// Potential problem: memmove is not guaranteed to be word atomic
1371
// Revisit in Merlin
1372
memmove(dst_addr, src_addr, length << l2es);
1373
return ac_ok;
1374
} else if (src->is_objArray() && dst->is_objArray()) {
1375
if (UseCompressedOops) {
1376
narrowOop *src_addr = objArrayOop(src)->obj_at_addr<narrowOop>(src_pos);
1377
narrowOop *dst_addr = objArrayOop(dst)->obj_at_addr<narrowOop>(dst_pos);
1378
return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
1379
} else {
1380
oop *src_addr = objArrayOop(src)->obj_at_addr<oop>(src_pos);
1381
oop *dst_addr = objArrayOop(dst)->obj_at_addr<oop>(dst_pos);
1382
return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
1383
}
1384
}
1385
return ac_failed;
1386
JRT_END
1387
1388
1389
JRT_LEAF(void, Runtime1::primitive_arraycopy(HeapWord* src, HeapWord* dst, int length))
1390
#ifndef PRODUCT
1391
_primitive_arraycopy_cnt++;
1392
#endif
1393
1394
if (length == 0) return;
1395
// Not guaranteed to be word atomic, but that doesn't matter
1396
// for anything but an oop array, which is covered by oop_arraycopy.
1397
Copy::conjoint_jbytes(src, dst, length);
1398
JRT_END
1399
1400
JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
1401
#ifndef PRODUCT
1402
_oop_arraycopy_cnt++;
1403
#endif
1404
1405
if (num == 0) return;
1406
BarrierSet* bs = Universe::heap()->barrier_set();
1407
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
1408
assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
1409
if (UseCompressedOops) {
1410
bs->write_ref_array_pre((narrowOop*)dst, num);
1411
Copy::conjoint_oops_atomic((narrowOop*) src, (narrowOop*) dst, num);
1412
} else {
1413
bs->write_ref_array_pre((oop*)dst, num);
1414
Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
1415
}
1416
bs->write_ref_array(dst, num);
1417
JRT_END
1418
1419
1420
JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
1421
// had to return int instead of bool, otherwise there may be a mismatch
1422
// between the C calling convention and the Java one.
1423
// e.g., on x86, GCC may clear only %al when returning a bool false, but
1424
// JVM takes the whole %eax as the return value, which may misinterpret
1425
// the return value as a boolean true.
1426
1427
assert(mirror != NULL, "should null-check on mirror before calling");
1428
Klass* k = java_lang_Class::as_Klass(mirror);
1429
return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0;
1430
JRT_END
1431
1432
JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* thread))
1433
ResourceMark rm;
1434
1435
assert(!TieredCompilation, "incompatible with tiered compilation");
1436
1437
RegisterMap reg_map(thread, false);
1438
frame runtime_frame = thread->last_frame();
1439
frame caller_frame = runtime_frame.sender(&reg_map);
1440
1441
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1442
assert (nm != NULL, "no more nmethod?");
1443
nm->make_not_entrant();
1444
1445
methodHandle m(nm->method());
1446
MethodData* mdo = m->method_data();
1447
1448
if (mdo == NULL && !HAS_PENDING_EXCEPTION) {
1449
// Build an MDO. Ignore errors like OutOfMemory;
1450
// that simply means we won't have an MDO to update.
1451
Method::build_interpreter_method_data(m, THREAD);
1452
if (HAS_PENDING_EXCEPTION) {
1453
assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
1454
CLEAR_PENDING_EXCEPTION;
1455
}
1456
mdo = m->method_data();
1457
}
1458
1459
if (mdo != NULL) {
1460
mdo->inc_trap_count(Deoptimization::Reason_none);
1461
}
1462
1463
if (TracePredicateFailedTraps) {
1464
stringStream ss1, ss2;
1465
vframeStream vfst(thread);
1466
methodHandle inlinee = methodHandle(vfst.method());
1467
inlinee->print_short_name(&ss1);
1468
m->print_short_name(&ss2);
1469
tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc " INTPTR_FORMAT, ss1.as_string(), vfst.bci(), ss2.as_string(), p2i(caller_frame.pc()));
1470
}
1471
1472
1473
Deoptimization::deoptimize_frame(thread, caller_frame.id());
1474
1475
JRT_END
1476
1477
#ifndef PRODUCT
1478
void Runtime1::print_statistics() {
1479
tty->print_cr("C1 Runtime statistics:");
1480
tty->print_cr(" _resolve_invoke_virtual_cnt: %d", SharedRuntime::_resolve_virtual_ctr);
1481
tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %d", SharedRuntime::_resolve_opt_virtual_ctr);
1482
tty->print_cr(" _resolve_invoke_static_cnt: %d", SharedRuntime::_resolve_static_ctr);
1483
tty->print_cr(" _handle_wrong_method_cnt: %d", SharedRuntime::_wrong_method_ctr);
1484
tty->print_cr(" _ic_miss_cnt: %d", SharedRuntime::_ic_miss_ctr);
1485
tty->print_cr(" _generic_arraycopy_cnt: %d", _generic_arraycopy_cnt);
1486
tty->print_cr(" _generic_arraycopystub_cnt: %d", _generic_arraycopystub_cnt);
1487
tty->print_cr(" _byte_arraycopy_cnt: %d", _byte_arraycopy_cnt);
1488
tty->print_cr(" _short_arraycopy_cnt: %d", _short_arraycopy_cnt);
1489
tty->print_cr(" _int_arraycopy_cnt: %d", _int_arraycopy_cnt);
1490
tty->print_cr(" _long_arraycopy_cnt: %d", _long_arraycopy_cnt);
1491
tty->print_cr(" _primitive_arraycopy_cnt: %d", _primitive_arraycopy_cnt);
1492
tty->print_cr(" _oop_arraycopy_cnt (C): %d", Runtime1::_oop_arraycopy_cnt);
1493
tty->print_cr(" _oop_arraycopy_cnt (stub): %d", _oop_arraycopy_cnt);
1494
tty->print_cr(" _arraycopy_slowcase_cnt: %d", _arraycopy_slowcase_cnt);
1495
tty->print_cr(" _arraycopy_checkcast_cnt: %d", _arraycopy_checkcast_cnt);
1496
tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%d", _arraycopy_checkcast_attempt_cnt);
1497
1498
tty->print_cr(" _new_type_array_slowcase_cnt: %d", _new_type_array_slowcase_cnt);
1499
tty->print_cr(" _new_object_array_slowcase_cnt: %d", _new_object_array_slowcase_cnt);
1500
tty->print_cr(" _new_instance_slowcase_cnt: %d", _new_instance_slowcase_cnt);
1501
tty->print_cr(" _new_multi_array_slowcase_cnt: %d", _new_multi_array_slowcase_cnt);
1502
tty->print_cr(" _monitorenter_slowcase_cnt: %d", _monitorenter_slowcase_cnt);
1503
tty->print_cr(" _monitorexit_slowcase_cnt: %d", _monitorexit_slowcase_cnt);
1504
tty->print_cr(" _patch_code_slowcase_cnt: %d", _patch_code_slowcase_cnt);
1505
1506
tty->print_cr(" _throw_range_check_exception_count: %d:", _throw_range_check_exception_count);
1507
tty->print_cr(" _throw_index_exception_count: %d:", _throw_index_exception_count);
1508
tty->print_cr(" _throw_div0_exception_count: %d:", _throw_div0_exception_count);
1509
tty->print_cr(" _throw_null_pointer_exception_count: %d:", _throw_null_pointer_exception_count);
1510
tty->print_cr(" _throw_class_cast_exception_count: %d:", _throw_class_cast_exception_count);
1511
tty->print_cr(" _throw_incompatible_class_change_error_count: %d:", _throw_incompatible_class_change_error_count);
1512
tty->print_cr(" _throw_array_store_exception_count: %d:", _throw_array_store_exception_count);
1513
tty->print_cr(" _throw_count: %d:", _throw_count);
1514
1515
SharedRuntime::print_ic_miss_histogram();
1516
tty->cr();
1517
}
1518
#endif // PRODUCT
1519
1520