Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/interpreter/interpreterRuntime.cpp
40949 views
1
/*
2
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "jvm_io.h"
27
#include "classfile/javaClasses.inline.hpp"
28
#include "classfile/symbolTable.hpp"
29
#include "classfile/vmClasses.hpp"
30
#include "classfile/vmSymbols.hpp"
31
#include "code/codeCache.hpp"
32
#include "compiler/compilationPolicy.hpp"
33
#include "compiler/compileBroker.hpp"
34
#include "compiler/disassembler.hpp"
35
#include "gc/shared/barrierSetNMethod.hpp"
36
#include "gc/shared/collectedHeap.hpp"
37
#include "interpreter/interpreter.hpp"
38
#include "interpreter/interpreterRuntime.hpp"
39
#include "interpreter/linkResolver.hpp"
40
#include "interpreter/templateTable.hpp"
41
#include "logging/log.hpp"
42
#include "memory/oopFactory.hpp"
43
#include "memory/resourceArea.hpp"
44
#include "memory/universe.hpp"
45
#include "oops/constantPool.hpp"
46
#include "oops/cpCache.inline.hpp"
47
#include "oops/instanceKlass.inline.hpp"
48
#include "oops/klass.inline.hpp"
49
#include "oops/methodData.hpp"
50
#include "oops/objArrayKlass.hpp"
51
#include "oops/objArrayOop.inline.hpp"
52
#include "oops/oop.inline.hpp"
53
#include "oops/symbol.hpp"
54
#include "prims/jvmtiExport.hpp"
55
#include "prims/methodHandles.hpp"
56
#include "prims/nativeLookup.hpp"
57
#include "runtime/atomic.hpp"
58
#include "runtime/biasedLocking.hpp"
59
#include "runtime/deoptimization.hpp"
60
#include "runtime/fieldDescriptor.inline.hpp"
61
#include "runtime/frame.inline.hpp"
62
#include "runtime/handles.inline.hpp"
63
#include "runtime/icache.hpp"
64
#include "runtime/interfaceSupport.inline.hpp"
65
#include "runtime/java.hpp"
66
#include "runtime/javaCalls.hpp"
67
#include "runtime/jfieldIDWorkaround.hpp"
68
#include "runtime/osThread.hpp"
69
#include "runtime/sharedRuntime.hpp"
70
#include "runtime/stackWatermarkSet.hpp"
71
#include "runtime/stubRoutines.hpp"
72
#include "runtime/synchronizer.hpp"
73
#include "runtime/threadCritical.hpp"
74
#include "utilities/align.hpp"
75
#include "utilities/copy.hpp"
76
#include "utilities/events.hpp"
77
#ifdef COMPILER2
78
#include "opto/runtime.hpp"
79
#endif
80
81
// Helper class to access current interpreter state
82
class LastFrameAccessor : public StackObj {
83
frame _last_frame;
84
public:
85
LastFrameAccessor(JavaThread* current) {
86
assert(current == Thread::current(), "sanity");
87
_last_frame = current->last_frame();
88
}
89
bool is_interpreted_frame() const { return _last_frame.is_interpreted_frame(); }
90
Method* method() const { return _last_frame.interpreter_frame_method(); }
91
address bcp() const { return _last_frame.interpreter_frame_bcp(); }
92
int bci() const { return _last_frame.interpreter_frame_bci(); }
93
address mdp() const { return _last_frame.interpreter_frame_mdp(); }
94
95
void set_bcp(address bcp) { _last_frame.interpreter_frame_set_bcp(bcp); }
96
void set_mdp(address dp) { _last_frame.interpreter_frame_set_mdp(dp); }
97
98
// pass method to avoid calling unsafe bcp_to_method (partial fix 4926272)
99
Bytecodes::Code code() const { return Bytecodes::code_at(method(), bcp()); }
100
101
Bytecode bytecode() const { return Bytecode(method(), bcp()); }
102
int get_index_u1(Bytecodes::Code bc) const { return bytecode().get_index_u1(bc); }
103
int get_index_u2(Bytecodes::Code bc) const { return bytecode().get_index_u2(bc); }
104
int get_index_u2_cpcache(Bytecodes::Code bc) const
105
{ return bytecode().get_index_u2_cpcache(bc); }
106
int get_index_u4(Bytecodes::Code bc) const { return bytecode().get_index_u4(bc); }
107
int number_of_dimensions() const { return bcp()[3]; }
108
ConstantPoolCacheEntry* cache_entry_at(int i) const
109
{ return method()->constants()->cache()->entry_at(i); }
110
ConstantPoolCacheEntry* cache_entry() const { return cache_entry_at(Bytes::get_native_u2(bcp() + 1)); }
111
112
oop callee_receiver(Symbol* signature) {
113
return _last_frame.interpreter_callee_receiver(signature);
114
}
115
BasicObjectLock* monitor_begin() const {
116
return _last_frame.interpreter_frame_monitor_begin();
117
}
118
BasicObjectLock* monitor_end() const {
119
return _last_frame.interpreter_frame_monitor_end();
120
}
121
BasicObjectLock* next_monitor(BasicObjectLock* current) const {
122
return _last_frame.next_monitor_in_interpreter_frame(current);
123
}
124
125
frame& get_frame() { return _last_frame; }
126
};
127
128
//------------------------------------------------------------------------------------------------------------------------
129
// State accessors
130
131
void InterpreterRuntime::set_bcp_and_mdp(address bcp, JavaThread* current) {
132
LastFrameAccessor last_frame(current);
133
last_frame.set_bcp(bcp);
134
if (ProfileInterpreter) {
135
// ProfileTraps uses MDOs independently of ProfileInterpreter.
136
// That is why we must check both ProfileInterpreter and mdo != NULL.
137
MethodData* mdo = last_frame.method()->method_data();
138
if (mdo != NULL) {
139
NEEDS_CLEANUP;
140
last_frame.set_mdp(mdo->bci_to_dp(last_frame.bci()));
141
}
142
}
143
}
144
145
//------------------------------------------------------------------------------------------------------------------------
146
// Constants
147
148
149
JRT_ENTRY(void, InterpreterRuntime::ldc(JavaThread* current, bool wide))
150
// access constant pool
151
LastFrameAccessor last_frame(current);
152
ConstantPool* pool = last_frame.method()->constants();
153
int index = wide ? last_frame.get_index_u2(Bytecodes::_ldc_w) : last_frame.get_index_u1(Bytecodes::_ldc);
154
constantTag tag = pool->tag_at(index);
155
156
assert (tag.is_unresolved_klass() || tag.is_klass(), "wrong ldc call");
157
Klass* klass = pool->klass_at(index, CHECK);
158
oop java_class = klass->java_mirror();
159
current->set_vm_result(java_class);
160
JRT_END
161
162
JRT_ENTRY(void, InterpreterRuntime::resolve_ldc(JavaThread* current, Bytecodes::Code bytecode)) {
163
assert(bytecode == Bytecodes::_ldc ||
164
bytecode == Bytecodes::_ldc_w ||
165
bytecode == Bytecodes::_ldc2_w ||
166
bytecode == Bytecodes::_fast_aldc ||
167
bytecode == Bytecodes::_fast_aldc_w, "wrong bc");
168
ResourceMark rm(current);
169
const bool is_fast_aldc = (bytecode == Bytecodes::_fast_aldc ||
170
bytecode == Bytecodes::_fast_aldc_w);
171
LastFrameAccessor last_frame(current);
172
methodHandle m (current, last_frame.method());
173
Bytecode_loadconstant ldc(m, last_frame.bci());
174
175
// Double-check the size. (Condy can have any type.)
176
BasicType type = ldc.result_type();
177
switch (type2size[type]) {
178
case 2: guarantee(bytecode == Bytecodes::_ldc2_w, ""); break;
179
case 1: guarantee(bytecode != Bytecodes::_ldc2_w, ""); break;
180
default: ShouldNotReachHere();
181
}
182
183
// Resolve the constant. This does not do unboxing.
184
// But it does replace Universe::the_null_sentinel by null.
185
oop result = ldc.resolve_constant(CHECK);
186
assert(result != NULL || is_fast_aldc, "null result only valid for fast_aldc");
187
188
#ifdef ASSERT
189
{
190
// The bytecode wrappers aren't GC-safe so construct a new one
191
Bytecode_loadconstant ldc2(m, last_frame.bci());
192
int rindex = ldc2.cache_index();
193
if (rindex < 0)
194
rindex = m->constants()->cp_to_object_index(ldc2.pool_index());
195
if (rindex >= 0) {
196
oop coop = m->constants()->resolved_references()->obj_at(rindex);
197
oop roop = (result == NULL ? Universe::the_null_sentinel() : result);
198
assert(roop == coop, "expected result for assembly code");
199
}
200
}
201
#endif
202
current->set_vm_result(result);
203
if (!is_fast_aldc) {
204
// Tell the interpreter how to unbox the primitive.
205
guarantee(java_lang_boxing_object::is_instance(result, type), "");
206
int offset = java_lang_boxing_object::value_offset(type);
207
intptr_t flags = ((as_TosState(type) << ConstantPoolCacheEntry::tos_state_shift)
208
| (offset & ConstantPoolCacheEntry::field_index_mask));
209
current->set_vm_result_2((Metadata*)flags);
210
}
211
}
212
JRT_END
213
214
215
//------------------------------------------------------------------------------------------------------------------------
216
// Allocation
217
218
JRT_ENTRY(void, InterpreterRuntime::_new(JavaThread* current, ConstantPool* pool, int index))
219
Klass* k = pool->klass_at(index, CHECK);
220
InstanceKlass* klass = InstanceKlass::cast(k);
221
222
// Make sure we are not instantiating an abstract klass
223
klass->check_valid_for_instantiation(true, CHECK);
224
225
// Make sure klass is initialized
226
klass->initialize(CHECK);
227
228
// At this point the class may not be fully initialized
229
// because of recursive initialization. If it is fully
230
// initialized & has_finalized is not set, we rewrite
231
// it into its fast version (Note: no locking is needed
232
// here since this is an atomic byte write and can be
233
// done more than once).
234
//
235
// Note: In case of classes with has_finalized we don't
236
// rewrite since that saves us an extra check in
237
// the fast version which then would call the
238
// slow version anyway (and do a call back into
239
// Java).
240
// If we have a breakpoint, then we don't rewrite
241
// because the _breakpoint bytecode would be lost.
242
oop obj = klass->allocate_instance(CHECK);
243
current->set_vm_result(obj);
244
JRT_END
245
246
247
JRT_ENTRY(void, InterpreterRuntime::newarray(JavaThread* current, BasicType type, jint size))
248
oop obj = oopFactory::new_typeArray(type, size, CHECK);
249
current->set_vm_result(obj);
250
JRT_END
251
252
253
JRT_ENTRY(void, InterpreterRuntime::anewarray(JavaThread* current, ConstantPool* pool, int index, jint size))
254
Klass* klass = pool->klass_at(index, CHECK);
255
objArrayOop obj = oopFactory::new_objArray(klass, size, CHECK);
256
current->set_vm_result(obj);
257
JRT_END
258
259
260
JRT_ENTRY(void, InterpreterRuntime::multianewarray(JavaThread* current, jint* first_size_address))
261
// We may want to pass in more arguments - could make this slightly faster
262
LastFrameAccessor last_frame(current);
263
ConstantPool* constants = last_frame.method()->constants();
264
int i = last_frame.get_index_u2(Bytecodes::_multianewarray);
265
Klass* klass = constants->klass_at(i, CHECK);
266
int nof_dims = last_frame.number_of_dimensions();
267
assert(klass->is_klass(), "not a class");
268
assert(nof_dims >= 1, "multianewarray rank must be nonzero");
269
270
// We must create an array of jints to pass to multi_allocate.
271
ResourceMark rm(current);
272
const int small_dims = 10;
273
jint dim_array[small_dims];
274
jint *dims = &dim_array[0];
275
if (nof_dims > small_dims) {
276
dims = (jint*) NEW_RESOURCE_ARRAY(jint, nof_dims);
277
}
278
for (int index = 0; index < nof_dims; index++) {
279
// offset from first_size_address is addressed as local[index]
280
int n = Interpreter::local_offset_in_bytes(index)/jintSize;
281
dims[index] = first_size_address[n];
282
}
283
oop obj = ArrayKlass::cast(klass)->multi_allocate(nof_dims, dims, CHECK);
284
current->set_vm_result(obj);
285
JRT_END
286
287
288
JRT_ENTRY(void, InterpreterRuntime::register_finalizer(JavaThread* current, oopDesc* obj))
289
assert(oopDesc::is_oop(obj), "must be a valid oop");
290
assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
291
InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
292
JRT_END
293
294
295
// Quicken instance-of and check-cast bytecodes
296
JRT_ENTRY(void, InterpreterRuntime::quicken_io_cc(JavaThread* current))
297
// Force resolving; quicken the bytecode
298
LastFrameAccessor last_frame(current);
299
int which = last_frame.get_index_u2(Bytecodes::_checkcast);
300
ConstantPool* cpool = last_frame.method()->constants();
301
// We'd expect to assert that we're only here to quicken bytecodes, but in a multithreaded
302
// program we might have seen an unquick'd bytecode in the interpreter but have another
303
// thread quicken the bytecode before we get here.
304
// assert( cpool->tag_at(which).is_unresolved_klass(), "should only come here to quicken bytecodes" );
305
Klass* klass = cpool->klass_at(which, CHECK);
306
current->set_vm_result_2(klass);
307
JRT_END
308
309
310
//------------------------------------------------------------------------------------------------------------------------
311
// Exceptions
312
313
void InterpreterRuntime::note_trap_inner(JavaThread* current, int reason,
314
const methodHandle& trap_method, int trap_bci) {
315
if (trap_method.not_null()) {
316
MethodData* trap_mdo = trap_method->method_data();
317
if (trap_mdo == NULL) {
318
ExceptionMark em(current);
319
JavaThread* THREAD = current; // For exception macros.
320
Method::build_interpreter_method_data(trap_method, THREAD);
321
if (HAS_PENDING_EXCEPTION) {
322
// Only metaspace OOM is expected. No Java code executed.
323
assert((PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())),
324
"we expect only an OOM error here");
325
CLEAR_PENDING_EXCEPTION;
326
}
327
trap_mdo = trap_method->method_data();
328
// and fall through...
329
}
330
if (trap_mdo != NULL) {
331
// Update per-method count of trap events. The interpreter
332
// is updating the MDO to simulate the effect of compiler traps.
333
Deoptimization::update_method_data_from_interpreter(trap_mdo, trap_bci, reason);
334
}
335
}
336
}
337
338
// Assume the compiler is (or will be) interested in this event.
339
// If necessary, create an MDO to hold the information, and record it.
340
void InterpreterRuntime::note_trap(JavaThread* current, int reason) {
341
assert(ProfileTraps, "call me only if profiling");
342
LastFrameAccessor last_frame(current);
343
methodHandle trap_method(current, last_frame.method());
344
int trap_bci = trap_method->bci_from(last_frame.bcp());
345
note_trap_inner(current, reason, trap_method, trap_bci);
346
}
347
348
static Handle get_preinitialized_exception(Klass* k, TRAPS) {
349
// get klass
350
InstanceKlass* klass = InstanceKlass::cast(k);
351
assert(klass->is_initialized(),
352
"this klass should have been initialized during VM initialization");
353
// create instance - do not call constructor since we may have no
354
// (java) stack space left (should assert constructor is empty)
355
Handle exception;
356
oop exception_oop = klass->allocate_instance(CHECK_(exception));
357
exception = Handle(THREAD, exception_oop);
358
if (StackTraceInThrowable) {
359
java_lang_Throwable::fill_in_stack_trace(exception);
360
}
361
return exception;
362
}
363
364
// Special handling for stack overflow: since we don't have any (java) stack
365
// space left we use the pre-allocated & pre-initialized StackOverflowError
366
// klass to create an stack overflow error instance. We do not call its
367
// constructor for the same reason (it is empty, anyway).
368
JRT_ENTRY(void, InterpreterRuntime::throw_StackOverflowError(JavaThread* current))
369
Handle exception = get_preinitialized_exception(
370
vmClasses::StackOverflowError_klass(),
371
CHECK);
372
// Increment counter for hs_err file reporting
373
Atomic::inc(&Exceptions::_stack_overflow_errors);
374
THROW_HANDLE(exception);
375
JRT_END
376
377
JRT_ENTRY(void, InterpreterRuntime::throw_delayed_StackOverflowError(JavaThread* current))
378
Handle exception = get_preinitialized_exception(
379
vmClasses::StackOverflowError_klass(),
380
CHECK);
381
java_lang_Throwable::set_message(exception(),
382
Universe::delayed_stack_overflow_error_message());
383
// Increment counter for hs_err file reporting
384
Atomic::inc(&Exceptions::_stack_overflow_errors);
385
THROW_HANDLE(exception);
386
JRT_END
387
388
JRT_ENTRY(void, InterpreterRuntime::create_exception(JavaThread* current, char* name, char* message))
389
// lookup exception klass
390
TempNewSymbol s = SymbolTable::new_symbol(name);
391
if (ProfileTraps) {
392
if (s == vmSymbols::java_lang_ArithmeticException()) {
393
note_trap(current, Deoptimization::Reason_div0_check);
394
} else if (s == vmSymbols::java_lang_NullPointerException()) {
395
note_trap(current, Deoptimization::Reason_null_check);
396
}
397
}
398
// create exception
399
Handle exception = Exceptions::new_exception(current, s, message);
400
current->set_vm_result(exception());
401
JRT_END
402
403
404
JRT_ENTRY(void, InterpreterRuntime::create_klass_exception(JavaThread* current, char* name, oopDesc* obj))
405
// Produce the error message first because note_trap can safepoint
406
ResourceMark rm(current);
407
const char* klass_name = obj->klass()->external_name();
408
// lookup exception klass
409
TempNewSymbol s = SymbolTable::new_symbol(name);
410
if (ProfileTraps) {
411
note_trap(current, Deoptimization::Reason_class_check);
412
}
413
// create exception, with klass name as detail message
414
Handle exception = Exceptions::new_exception(current, s, klass_name);
415
current->set_vm_result(exception());
416
JRT_END
417
418
JRT_ENTRY(void, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException(JavaThread* current, arrayOopDesc* a, jint index))
419
// Produce the error message first because note_trap can safepoint
420
ResourceMark rm(current);
421
stringStream ss;
422
ss.print("Index %d out of bounds for length %d", index, a->length());
423
424
if (ProfileTraps) {
425
note_trap(current, Deoptimization::Reason_range_check);
426
}
427
428
THROW_MSG(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), ss.as_string());
429
JRT_END
430
431
JRT_ENTRY(void, InterpreterRuntime::throw_ClassCastException(
432
JavaThread* current, oopDesc* obj))
433
434
// Produce the error message first because note_trap can safepoint
435
ResourceMark rm(current);
436
char* message = SharedRuntime::generate_class_cast_message(
437
current, obj->klass());
438
439
if (ProfileTraps) {
440
note_trap(current, Deoptimization::Reason_class_check);
441
}
442
443
// create exception
444
THROW_MSG(vmSymbols::java_lang_ClassCastException(), message);
445
JRT_END
446
447
// exception_handler_for_exception(...) returns the continuation address,
448
// the exception oop (via TLS) and sets the bci/bcp for the continuation.
449
// The exception oop is returned to make sure it is preserved over GC (it
450
// is only on the stack if the exception was thrown explicitly via athrow).
451
// During this operation, the expression stack contains the values for the
452
// bci where the exception happened. If the exception was propagated back
453
// from a call, the expression stack contains the values for the bci at the
454
// invoke w/o arguments (i.e., as if one were inside the call).
455
JRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThread* current, oopDesc* exception))
456
// We get here after we have unwound from a callee throwing an exception
457
// into the interpreter. Any deferred stack processing is notified of
458
// the event via the StackWatermarkSet.
459
StackWatermarkSet::after_unwind(current);
460
461
LastFrameAccessor last_frame(current);
462
Handle h_exception(current, exception);
463
methodHandle h_method (current, last_frame.method());
464
constantPoolHandle h_constants(current, h_method->constants());
465
bool should_repeat;
466
int handler_bci;
467
int current_bci = last_frame.bci();
468
469
if (current->frames_to_pop_failed_realloc() > 0) {
470
// Allocation of scalar replaced object used in this frame
471
// failed. Unconditionally pop the frame.
472
current->dec_frames_to_pop_failed_realloc();
473
current->set_vm_result(h_exception());
474
// If the method is synchronized we already unlocked the monitor
475
// during deoptimization so the interpreter needs to skip it when
476
// the frame is popped.
477
current->set_do_not_unlock_if_synchronized(true);
478
return Interpreter::remove_activation_entry();
479
}
480
481
// Need to do this check first since when _do_not_unlock_if_synchronized
482
// is set, we don't want to trigger any classloading which may make calls
483
// into java, or surprisingly find a matching exception handler for bci 0
484
// since at this moment the method hasn't been "officially" entered yet.
485
if (current->do_not_unlock_if_synchronized()) {
486
ResourceMark rm;
487
assert(current_bci == 0, "bci isn't zero for do_not_unlock_if_synchronized");
488
current->set_vm_result(exception);
489
return Interpreter::remove_activation_entry();
490
}
491
492
do {
493
should_repeat = false;
494
495
// assertions
496
assert(h_exception.not_null(), "NULL exceptions should be handled by athrow");
497
// Check that exception is a subclass of Throwable.
498
assert(h_exception->is_a(vmClasses::Throwable_klass()),
499
"Exception not subclass of Throwable");
500
501
// tracing
502
if (log_is_enabled(Info, exceptions)) {
503
ResourceMark rm(current);
504
stringStream tempst;
505
tempst.print("interpreter method <%s>\n"
506
" at bci %d for thread " INTPTR_FORMAT " (%s)",
507
h_method->print_value_string(), current_bci, p2i(current), current->name());
508
Exceptions::log_exception(h_exception, tempst.as_string());
509
}
510
// Don't go paging in something which won't be used.
511
// else if (extable->length() == 0) {
512
// // disabled for now - interpreter is not using shortcut yet
513
// // (shortcut is not to call runtime if we have no exception handlers)
514
// // warning("performance bug: should not call runtime if method has no exception handlers");
515
// }
516
// for AbortVMOnException flag
517
Exceptions::debug_check_abort(h_exception);
518
519
// exception handler lookup
520
Klass* klass = h_exception->klass();
521
handler_bci = Method::fast_exception_handler_bci_for(h_method, klass, current_bci, THREAD);
522
if (HAS_PENDING_EXCEPTION) {
523
// We threw an exception while trying to find the exception handler.
524
// Transfer the new exception to the exception handle which will
525
// be set into thread local storage, and do another lookup for an
526
// exception handler for this exception, this time starting at the
527
// BCI of the exception handler which caused the exception to be
528
// thrown (bug 4307310).
529
h_exception = Handle(THREAD, PENDING_EXCEPTION);
530
CLEAR_PENDING_EXCEPTION;
531
if (handler_bci >= 0) {
532
current_bci = handler_bci;
533
should_repeat = true;
534
}
535
}
536
} while (should_repeat == true);
537
538
#if INCLUDE_JVMCI
539
if (EnableJVMCI && h_method->method_data() != NULL) {
540
ResourceMark rm(current);
541
ProfileData* pdata = h_method->method_data()->allocate_bci_to_data(current_bci, NULL);
542
if (pdata != NULL && pdata->is_BitData()) {
543
BitData* bit_data = (BitData*) pdata;
544
bit_data->set_exception_seen();
545
}
546
}
547
#endif
548
549
// notify JVMTI of an exception throw; JVMTI will detect if this is a first
550
// time throw or a stack unwinding throw and accordingly notify the debugger
551
if (JvmtiExport::can_post_on_exceptions()) {
552
JvmtiExport::post_exception_throw(current, h_method(), last_frame.bcp(), h_exception());
553
}
554
555
address continuation = NULL;
556
address handler_pc = NULL;
557
if (handler_bci < 0 || !current->stack_overflow_state()->reguard_stack((address) &continuation)) {
558
// Forward exception to callee (leaving bci/bcp untouched) because (a) no
559
// handler in this method, or (b) after a stack overflow there is not yet
560
// enough stack space available to reprotect the stack.
561
continuation = Interpreter::remove_activation_entry();
562
#if COMPILER2_OR_JVMCI
563
// Count this for compilation purposes
564
h_method->interpreter_throwout_increment(THREAD);
565
#endif
566
} else {
567
// handler in this method => change bci/bcp to handler bci/bcp and continue there
568
handler_pc = h_method->code_base() + handler_bci;
569
#ifndef ZERO
570
set_bcp_and_mdp(handler_pc, current);
571
continuation = Interpreter::dispatch_table(vtos)[*handler_pc];
572
#else
573
continuation = (address)(intptr_t) handler_bci;
574
#endif
575
}
576
577
// notify debugger of an exception catch
578
// (this is good for exceptions caught in native methods as well)
579
if (JvmtiExport::can_post_on_exceptions()) {
580
JvmtiExport::notice_unwind_due_to_exception(current, h_method(), handler_pc, h_exception(), (handler_pc != NULL));
581
}
582
583
current->set_vm_result(h_exception());
584
return continuation;
585
JRT_END
586
587
588
JRT_ENTRY(void, InterpreterRuntime::throw_pending_exception(JavaThread* current))
589
assert(current->has_pending_exception(), "must only be called if there's an exception pending");
590
// nothing to do - eventually we should remove this code entirely (see comments @ call sites)
591
JRT_END
592
593
594
JRT_ENTRY(void, InterpreterRuntime::throw_AbstractMethodError(JavaThread* current))
595
THROW(vmSymbols::java_lang_AbstractMethodError());
596
JRT_END
597
598
// This method is called from the "abstract_entry" of the interpreter.
599
// At that point, the arguments have already been removed from the stack
600
// and therefore we don't have the receiver object at our fingertips. (Though,
601
// on some platforms the receiver still resides in a register...). Thus,
602
// we have no choice but print an error message not containing the receiver
603
// type.
604
JRT_ENTRY(void, InterpreterRuntime::throw_AbstractMethodErrorWithMethod(JavaThread* current,
605
Method* missingMethod))
606
ResourceMark rm(current);
607
assert(missingMethod != NULL, "sanity");
608
methodHandle m(current, missingMethod);
609
LinkResolver::throw_abstract_method_error(m, THREAD);
610
JRT_END
611
612
JRT_ENTRY(void, InterpreterRuntime::throw_AbstractMethodErrorVerbose(JavaThread* current,
613
Klass* recvKlass,
614
Method* missingMethod))
615
ResourceMark rm(current);
616
methodHandle mh = methodHandle(current, missingMethod);
617
LinkResolver::throw_abstract_method_error(mh, recvKlass, THREAD);
618
JRT_END
619
620
621
JRT_ENTRY(void, InterpreterRuntime::throw_IncompatibleClassChangeError(JavaThread* current))
622
THROW(vmSymbols::java_lang_IncompatibleClassChangeError());
623
JRT_END
624
625
JRT_ENTRY(void, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose(JavaThread* current,
626
Klass* recvKlass,
627
Klass* interfaceKlass))
628
ResourceMark rm(current);
629
char buf[1000];
630
buf[0] = '\0';
631
jio_snprintf(buf, sizeof(buf),
632
"Class %s does not implement the requested interface %s",
633
recvKlass ? recvKlass->external_name() : "NULL",
634
interfaceKlass ? interfaceKlass->external_name() : "NULL");
635
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
636
JRT_END
637
638
JRT_ENTRY(void, InterpreterRuntime::throw_NullPointerException(JavaThread* current))
639
THROW(vmSymbols::java_lang_NullPointerException());
640
JRT_END
641
642
//------------------------------------------------------------------------------------------------------------------------
643
// Fields
644
//
645
646
void InterpreterRuntime::resolve_get_put(JavaThread* current, Bytecodes::Code bytecode) {
647
// resolve field
648
fieldDescriptor info;
649
LastFrameAccessor last_frame(current);
650
constantPoolHandle pool(current, last_frame.method()->constants());
651
methodHandle m(current, last_frame.method());
652
bool is_put = (bytecode == Bytecodes::_putfield || bytecode == Bytecodes::_nofast_putfield ||
653
bytecode == Bytecodes::_putstatic);
654
bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic);
655
656
{
657
JvmtiHideSingleStepping jhss(current);
658
JavaThread* THREAD = current; // For exception macros.
659
LinkResolver::resolve_field_access(info, pool, last_frame.get_index_u2_cpcache(bytecode),
660
m, bytecode, CHECK);
661
} // end JvmtiHideSingleStepping
662
663
// check if link resolution caused cpCache to be updated
664
ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry();
665
if (cp_cache_entry->is_resolved(bytecode)) return;
666
667
// compute auxiliary field attributes
668
TosState state = as_TosState(info.field_type());
669
670
// Resolution of put instructions on final fields is delayed. That is required so that
671
// exceptions are thrown at the correct place (when the instruction is actually invoked).
672
// If we do not resolve an instruction in the current pass, leaving the put_code
673
// set to zero will cause the next put instruction to the same field to reresolve.
674
675
// Resolution of put instructions to final instance fields with invalid updates (i.e.,
676
// to final instance fields with updates originating from a method different than <init>)
677
// is inhibited. A putfield instruction targeting an instance final field must throw
678
// an IllegalAccessError if the instruction is not in an instance
679
// initializer method <init>. If resolution were not inhibited, a putfield
680
// in an initializer method could be resolved in the initializer. Subsequent
681
// putfield instructions to the same field would then use cached information.
682
// As a result, those instructions would not pass through the VM. That is,
683
// checks in resolve_field_access() would not be executed for those instructions
684
// and the required IllegalAccessError would not be thrown.
685
//
686
// Also, we need to delay resolving getstatic and putstatic instructions until the
687
// class is initialized. This is required so that access to the static
688
// field will call the initialization function every time until the class
689
// is completely initialized ala. in 2.17.5 in JVM Specification.
690
InstanceKlass* klass = info.field_holder();
691
bool uninitialized_static = is_static && !klass->is_initialized();
692
bool has_initialized_final_update = info.field_holder()->major_version() >= 53 &&
693
info.has_initialized_final_update();
694
assert(!(has_initialized_final_update && !info.access_flags().is_final()), "Fields with initialized final updates must be final");
695
696
Bytecodes::Code get_code = (Bytecodes::Code)0;
697
Bytecodes::Code put_code = (Bytecodes::Code)0;
698
if (!uninitialized_static) {
699
get_code = ((is_static) ? Bytecodes::_getstatic : Bytecodes::_getfield);
700
if ((is_put && !has_initialized_final_update) || !info.access_flags().is_final()) {
701
put_code = ((is_static) ? Bytecodes::_putstatic : Bytecodes::_putfield);
702
}
703
}
704
705
cp_cache_entry->set_field(
706
get_code,
707
put_code,
708
info.field_holder(),
709
info.index(),
710
info.offset(),
711
state,
712
info.access_flags().is_final(),
713
info.access_flags().is_volatile()
714
);
715
}
716
717
718
//------------------------------------------------------------------------------------------------------------------------
719
// Synchronization
720
//
721
// The interpreter's synchronization code is factored out so that it can
722
// be shared by method invocation and synchronized blocks.
723
//%note synchronization_3
724
725
//%note monitor_1
726
JRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorenter(JavaThread* current, BasicObjectLock* elem))
727
#ifdef ASSERT
728
current->last_frame().interpreter_frame_verify_monitor(elem);
729
#endif
730
if (PrintBiasedLockingStatistics) {
731
Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
732
}
733
Handle h_obj(current, elem->obj());
734
assert(Universe::heap()->is_in_or_null(h_obj()),
735
"must be NULL or an object");
736
ObjectSynchronizer::enter(h_obj, elem->lock(), current);
737
assert(Universe::heap()->is_in_or_null(elem->obj()),
738
"must be NULL or an object");
739
#ifdef ASSERT
740
current->last_frame().interpreter_frame_verify_monitor(elem);
741
#endif
742
JRT_END
743
744
745
JRT_LEAF(void, InterpreterRuntime::monitorexit(BasicObjectLock* elem))
746
oop obj = elem->obj();
747
assert(Universe::heap()->is_in(obj), "must be an object");
748
// The object could become unlocked through a JNI call, which we have no other checks for.
749
// Give a fatal message if CheckJNICalls. Otherwise we ignore it.
750
if (obj->is_unlocked()) {
751
if (CheckJNICalls) {
752
fatal("Object has been unlocked by JNI");
753
}
754
return;
755
}
756
ObjectSynchronizer::exit(obj, elem->lock(), JavaThread::current());
757
// Free entry. If it is not cleared, the exception handling code will try to unlock the monitor
758
// again at method exit or in the case of an exception.
759
elem->set_obj(NULL);
760
JRT_END
761
762
763
JRT_ENTRY(void, InterpreterRuntime::throw_illegal_monitor_state_exception(JavaThread* current))
764
THROW(vmSymbols::java_lang_IllegalMonitorStateException());
765
JRT_END
766
767
768
JRT_ENTRY(void, InterpreterRuntime::new_illegal_monitor_state_exception(JavaThread* current))
769
// Returns an illegal exception to install into the current thread. The
770
// pending_exception flag is cleared so normal exception handling does not
771
// trigger. Any current installed exception will be overwritten. This
772
// method will be called during an exception unwind.
773
774
assert(!HAS_PENDING_EXCEPTION, "no pending exception");
775
Handle exception(current, current->vm_result());
776
assert(exception() != NULL, "vm result should be set");
777
current->set_vm_result(NULL); // clear vm result before continuing (may cause memory leaks and assert failures)
778
if (!exception->is_a(vmClasses::ThreadDeath_klass())) {
779
exception = get_preinitialized_exception(
780
vmClasses::IllegalMonitorStateException_klass(),
781
CATCH);
782
}
783
current->set_vm_result(exception());
784
JRT_END
785
786
787
//------------------------------------------------------------------------------------------------------------------------
788
// Invokes
789
790
JRT_ENTRY(Bytecodes::Code, InterpreterRuntime::get_original_bytecode_at(JavaThread* current, Method* method, address bcp))
791
return method->orig_bytecode_at(method->bci_from(bcp));
792
JRT_END
793
794
JRT_ENTRY(void, InterpreterRuntime::set_original_bytecode_at(JavaThread* current, Method* method, address bcp, Bytecodes::Code new_code))
795
method->set_orig_bytecode_at(method->bci_from(bcp), new_code);
796
JRT_END
797
798
JRT_ENTRY(void, InterpreterRuntime::_breakpoint(JavaThread* current, Method* method, address bcp))
799
JvmtiExport::post_raw_breakpoint(current, method, bcp);
800
JRT_END
801
802
void InterpreterRuntime::resolve_invoke(JavaThread* current, Bytecodes::Code bytecode) {
803
LastFrameAccessor last_frame(current);
804
// extract receiver from the outgoing argument list if necessary
805
Handle receiver(current, NULL);
806
if (bytecode == Bytecodes::_invokevirtual || bytecode == Bytecodes::_invokeinterface ||
807
bytecode == Bytecodes::_invokespecial) {
808
ResourceMark rm(current);
809
methodHandle m (current, last_frame.method());
810
Bytecode_invoke call(m, last_frame.bci());
811
Symbol* signature = call.signature();
812
receiver = Handle(current, last_frame.callee_receiver(signature));
813
814
assert(Universe::heap()->is_in_or_null(receiver()),
815
"sanity check");
816
assert(receiver.is_null() ||
817
!Universe::heap()->is_in(receiver->klass()),
818
"sanity check");
819
}
820
821
// resolve method
822
CallInfo info;
823
constantPoolHandle pool(current, last_frame.method()->constants());
824
825
methodHandle resolved_method;
826
827
{
828
JvmtiHideSingleStepping jhss(current);
829
JavaThread* THREAD = current; // For exception macros.
830
LinkResolver::resolve_invoke(info, receiver, pool,
831
last_frame.get_index_u2_cpcache(bytecode), bytecode,
832
CHECK);
833
if (JvmtiExport::can_hotswap_or_post_breakpoint() && info.resolved_method()->is_old()) {
834
resolved_method = methodHandle(current, info.resolved_method()->get_new_method());
835
} else {
836
resolved_method = methodHandle(current, info.resolved_method());
837
}
838
} // end JvmtiHideSingleStepping
839
840
// check if link resolution caused cpCache to be updated
841
ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry();
842
if (cp_cache_entry->is_resolved(bytecode)) return;
843
844
#ifdef ASSERT
845
if (bytecode == Bytecodes::_invokeinterface) {
846
if (resolved_method->method_holder() == vmClasses::Object_klass()) {
847
// NOTE: THIS IS A FIX FOR A CORNER CASE in the JVM spec
848
// (see also CallInfo::set_interface for details)
849
assert(info.call_kind() == CallInfo::vtable_call ||
850
info.call_kind() == CallInfo::direct_call, "");
851
assert(resolved_method->is_final() || info.has_vtable_index(),
852
"should have been set already");
853
} else if (!resolved_method->has_itable_index()) {
854
// Resolved something like CharSequence.toString. Use vtable not itable.
855
assert(info.call_kind() != CallInfo::itable_call, "");
856
} else {
857
// Setup itable entry
858
assert(info.call_kind() == CallInfo::itable_call, "");
859
int index = resolved_method->itable_index();
860
assert(info.itable_index() == index, "");
861
}
862
} else if (bytecode == Bytecodes::_invokespecial) {
863
assert(info.call_kind() == CallInfo::direct_call, "must be direct call");
864
} else {
865
assert(info.call_kind() == CallInfo::direct_call ||
866
info.call_kind() == CallInfo::vtable_call, "");
867
}
868
#endif
869
// Get sender and only set cpCache entry to resolved if it is not an
870
// interface. The receiver for invokespecial calls within interface
871
// methods must be checked for every call.
872
InstanceKlass* sender = pool->pool_holder();
873
874
switch (info.call_kind()) {
875
case CallInfo::direct_call:
876
cp_cache_entry->set_direct_call(
877
bytecode,
878
resolved_method,
879
sender->is_interface());
880
break;
881
case CallInfo::vtable_call:
882
cp_cache_entry->set_vtable_call(
883
bytecode,
884
resolved_method,
885
info.vtable_index());
886
break;
887
case CallInfo::itable_call:
888
cp_cache_entry->set_itable_call(
889
bytecode,
890
info.resolved_klass(),
891
resolved_method,
892
info.itable_index());
893
break;
894
default: ShouldNotReachHere();
895
}
896
}
897
898
899
// First time execution: Resolve symbols, create a permanent MethodType object.
900
void InterpreterRuntime::resolve_invokehandle(JavaThread* current) {
901
const Bytecodes::Code bytecode = Bytecodes::_invokehandle;
902
LastFrameAccessor last_frame(current);
903
904
// resolve method
905
CallInfo info;
906
constantPoolHandle pool(current, last_frame.method()->constants());
907
{
908
JvmtiHideSingleStepping jhss(current);
909
JavaThread* THREAD = current; // For exception macros.
910
LinkResolver::resolve_invoke(info, Handle(), pool,
911
last_frame.get_index_u2_cpcache(bytecode), bytecode,
912
CHECK);
913
} // end JvmtiHideSingleStepping
914
915
ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry();
916
cp_cache_entry->set_method_handle(pool, info);
917
}
918
919
// First time execution: Resolve symbols, create a permanent CallSite object.
920
void InterpreterRuntime::resolve_invokedynamic(JavaThread* current) {
921
LastFrameAccessor last_frame(current);
922
const Bytecodes::Code bytecode = Bytecodes::_invokedynamic;
923
924
// resolve method
925
CallInfo info;
926
constantPoolHandle pool(current, last_frame.method()->constants());
927
int index = last_frame.get_index_u4(bytecode);
928
{
929
JvmtiHideSingleStepping jhss(current);
930
JavaThread* THREAD = current; // For exception macros.
931
LinkResolver::resolve_invoke(info, Handle(), pool,
932
index, bytecode, CHECK);
933
} // end JvmtiHideSingleStepping
934
935
ConstantPoolCacheEntry* cp_cache_entry = pool->invokedynamic_cp_cache_entry_at(index);
936
cp_cache_entry->set_dynamic_call(pool, info);
937
}
938
939
// This function is the interface to the assembly code. It returns the resolved
940
// cpCache entry. This doesn't safepoint, but the helper routines safepoint.
941
// This function will check for redefinition!
942
JRT_ENTRY(void, InterpreterRuntime::resolve_from_cache(JavaThread* current, Bytecodes::Code bytecode)) {
943
switch (bytecode) {
944
case Bytecodes::_getstatic:
945
case Bytecodes::_putstatic:
946
case Bytecodes::_getfield:
947
case Bytecodes::_putfield:
948
resolve_get_put(current, bytecode);
949
break;
950
case Bytecodes::_invokevirtual:
951
case Bytecodes::_invokespecial:
952
case Bytecodes::_invokestatic:
953
case Bytecodes::_invokeinterface:
954
resolve_invoke(current, bytecode);
955
break;
956
case Bytecodes::_invokehandle:
957
resolve_invokehandle(current);
958
break;
959
case Bytecodes::_invokedynamic:
960
resolve_invokedynamic(current);
961
break;
962
default:
963
fatal("unexpected bytecode: %s", Bytecodes::name(bytecode));
964
break;
965
}
966
}
967
JRT_END
968
969
//------------------------------------------------------------------------------------------------------------------------
970
// Miscellaneous
971
972
973
nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* current, address branch_bcp) {
974
// Enable WXWrite: the function is called directly by interpreter.
975
MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current));
976
977
// frequency_counter_overflow_inner can throw async exception.
978
nmethod* nm = frequency_counter_overflow_inner(current, branch_bcp);
979
assert(branch_bcp != NULL || nm == NULL, "always returns null for non OSR requests");
980
if (branch_bcp != NULL && nm != NULL) {
981
// This was a successful request for an OSR nmethod. Because
982
// frequency_counter_overflow_inner ends with a safepoint check,
983
// nm could have been unloaded so look it up again. It's unsafe
984
// to examine nm directly since it might have been freed and used
985
// for something else.
986
LastFrameAccessor last_frame(current);
987
Method* method = last_frame.method();
988
int bci = method->bci_from(last_frame.bcp());
989
nm = method->lookup_osr_nmethod_for(bci, CompLevel_none, false);
990
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
991
if (nm != NULL && bs_nm != NULL) {
992
// in case the transition passed a safepoint we need to barrier this again
993
if (!bs_nm->nmethod_osr_entry_barrier(nm)) {
994
nm = NULL;
995
}
996
}
997
}
998
if (nm != NULL && current->is_interp_only_mode()) {
999
// Normally we never get an nm if is_interp_only_mode() is true, because
1000
// policy()->event has a check for this and won't compile the method when
1001
// true. However, it's possible for is_interp_only_mode() to become true
1002
// during the compilation. We don't want to return the nm in that case
1003
// because we want to continue to execute interpreted.
1004
nm = NULL;
1005
}
1006
#ifndef PRODUCT
1007
if (TraceOnStackReplacement) {
1008
if (nm != NULL) {
1009
tty->print("OSR entry @ pc: " INTPTR_FORMAT ": ", p2i(nm->osr_entry()));
1010
nm->print();
1011
}
1012
}
1013
#endif
1014
return nm;
1015
}
1016
1017
JRT_ENTRY(nmethod*,
1018
InterpreterRuntime::frequency_counter_overflow_inner(JavaThread* current, address branch_bcp))
1019
// use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized
1020
// flag, in case this method triggers classloading which will call into Java.
1021
UnlockFlagSaver fs(current);
1022
1023
LastFrameAccessor last_frame(current);
1024
assert(last_frame.is_interpreted_frame(), "must come from interpreter");
1025
methodHandle method(current, last_frame.method());
1026
const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : InvocationEntryBci;
1027
const int bci = branch_bcp != NULL ? method->bci_from(last_frame.bcp()) : InvocationEntryBci;
1028
1029
nmethod* osr_nm = CompilationPolicy::event(method, method, branch_bci, bci, CompLevel_none, NULL, CHECK_NULL);
1030
1031
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
1032
if (osr_nm != NULL && bs_nm != NULL) {
1033
if (!bs_nm->nmethod_osr_entry_barrier(osr_nm)) {
1034
osr_nm = NULL;
1035
}
1036
}
1037
1038
if (osr_nm != NULL) {
1039
// We may need to do on-stack replacement which requires that no
1040
// monitors in the activation are biased because their
1041
// BasicObjectLocks will need to migrate during OSR. Force
1042
// unbiasing of all monitors in the activation now (even though
1043
// the OSR nmethod might be invalidated) because we don't have a
1044
// safepoint opportunity later once the migration begins.
1045
if (UseBiasedLocking) {
1046
ResourceMark rm;
1047
GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
1048
for( BasicObjectLock *kptr = last_frame.monitor_end();
1049
kptr < last_frame.monitor_begin();
1050
kptr = last_frame.next_monitor(kptr) ) {
1051
if( kptr->obj() != NULL ) {
1052
objects_to_revoke->append(Handle(current, kptr->obj()));
1053
}
1054
}
1055
BiasedLocking::revoke(objects_to_revoke, current);
1056
}
1057
}
1058
return osr_nm;
1059
JRT_END
1060
1061
JRT_LEAF(jint, InterpreterRuntime::bcp_to_di(Method* method, address cur_bcp))
1062
assert(ProfileInterpreter, "must be profiling interpreter");
1063
int bci = method->bci_from(cur_bcp);
1064
MethodData* mdo = method->method_data();
1065
if (mdo == NULL) return 0;
1066
return mdo->bci_to_di(bci);
1067
JRT_END
1068
1069
#ifdef ASSERT
1070
JRT_LEAF(void, InterpreterRuntime::verify_mdp(Method* method, address bcp, address mdp))
1071
assert(ProfileInterpreter, "must be profiling interpreter");
1072
1073
MethodData* mdo = method->method_data();
1074
assert(mdo != NULL, "must not be null");
1075
1076
int bci = method->bci_from(bcp);
1077
1078
address mdp2 = mdo->bci_to_dp(bci);
1079
if (mdp != mdp2) {
1080
ResourceMark rm;
1081
tty->print_cr("FAILED verify : actual mdp %p expected mdp %p @ bci %d", mdp, mdp2, bci);
1082
int current_di = mdo->dp_to_di(mdp);
1083
int expected_di = mdo->dp_to_di(mdp2);
1084
tty->print_cr(" actual di %d expected di %d", current_di, expected_di);
1085
int expected_approx_bci = mdo->data_at(expected_di)->bci();
1086
int approx_bci = -1;
1087
if (current_di >= 0) {
1088
approx_bci = mdo->data_at(current_di)->bci();
1089
}
1090
tty->print_cr(" actual bci is %d expected bci %d", approx_bci, expected_approx_bci);
1091
mdo->print_on(tty);
1092
method->print_codes();
1093
}
1094
assert(mdp == mdp2, "wrong mdp");
1095
JRT_END
1096
#endif // ASSERT
1097
1098
JRT_ENTRY(void, InterpreterRuntime::update_mdp_for_ret(JavaThread* current, int return_bci))
1099
assert(ProfileInterpreter, "must be profiling interpreter");
1100
ResourceMark rm(current);
1101
LastFrameAccessor last_frame(current);
1102
assert(last_frame.is_interpreted_frame(), "must come from interpreter");
1103
MethodData* h_mdo = last_frame.method()->method_data();
1104
1105
// Grab a lock to ensure atomic access to setting the return bci and
1106
// the displacement. This can block and GC, invalidating all naked oops.
1107
MutexLocker ml(RetData_lock);
1108
1109
// ProfileData is essentially a wrapper around a derived oop, so we
1110
// need to take the lock before making any ProfileData structures.
1111
ProfileData* data = h_mdo->data_at(h_mdo->dp_to_di(last_frame.mdp()));
1112
guarantee(data != NULL, "profile data must be valid");
1113
RetData* rdata = data->as_RetData();
1114
address new_mdp = rdata->fixup_ret(return_bci, h_mdo);
1115
last_frame.set_mdp(new_mdp);
1116
JRT_END
1117
1118
JRT_ENTRY(MethodCounters*, InterpreterRuntime::build_method_counters(JavaThread* current, Method* m))
1119
return Method::build_method_counters(current, m);
1120
JRT_END
1121
1122
1123
JRT_ENTRY(void, InterpreterRuntime::at_safepoint(JavaThread* current))
1124
// We used to need an explict preserve_arguments here for invoke bytecodes. However,
1125
// stack traversal automatically takes care of preserving arguments for invoke, so
1126
// this is no longer needed.
1127
1128
// JRT_END does an implicit safepoint check, hence we are guaranteed to block
1129
// if this is called during a safepoint
1130
1131
if (JvmtiExport::should_post_single_step()) {
1132
// This function is called by the interpreter when single stepping. Such single
1133
// stepping could unwind a frame. Then, it is important that we process any frames
1134
// that we might return into.
1135
StackWatermarkSet::before_unwind(current);
1136
1137
// We are called during regular safepoints and when the VM is
1138
// single stepping. If any thread is marked for single stepping,
1139
// then we may have JVMTI work to do.
1140
LastFrameAccessor last_frame(current);
1141
JvmtiExport::at_single_stepping_point(current, last_frame.method(), last_frame.bcp());
1142
}
1143
JRT_END
1144
1145
JRT_LEAF(void, InterpreterRuntime::at_unwind(JavaThread* current))
1146
// This function is called by the interpreter when the return poll found a reason
1147
// to call the VM. The reason could be that we are returning into a not yet safe
1148
// to access frame. We handle that below.
1149
// Note that this path does not check for single stepping, because we do not want
1150
// to single step when unwinding frames for an exception being thrown. Instead,
1151
// such single stepping code will use the safepoint table, which will use the
1152
// InterpreterRuntime::at_safepoint callback.
1153
StackWatermarkSet::before_unwind(current);
1154
JRT_END
1155
1156
JRT_ENTRY(void, InterpreterRuntime::post_field_access(JavaThread* current, oopDesc* obj,
1157
ConstantPoolCacheEntry *cp_entry))
1158
1159
// check the access_flags for the field in the klass
1160
1161
InstanceKlass* ik = InstanceKlass::cast(cp_entry->f1_as_klass());
1162
int index = cp_entry->field_index();
1163
if ((ik->field_access_flags(index) & JVM_ACC_FIELD_ACCESS_WATCHED) == 0) return;
1164
1165
bool is_static = (obj == NULL);
1166
HandleMark hm(current);
1167
1168
Handle h_obj;
1169
if (!is_static) {
1170
// non-static field accessors have an object, but we need a handle
1171
h_obj = Handle(current, obj);
1172
}
1173
InstanceKlass* cp_entry_f1 = InstanceKlass::cast(cp_entry->f1_as_klass());
1174
jfieldID fid = jfieldIDWorkaround::to_jfieldID(cp_entry_f1, cp_entry->f2_as_index(), is_static);
1175
LastFrameAccessor last_frame(current);
1176
JvmtiExport::post_field_access(current, last_frame.method(), last_frame.bcp(), cp_entry_f1, h_obj, fid);
1177
JRT_END
1178
1179
JRT_ENTRY(void, InterpreterRuntime::post_field_modification(JavaThread* current, oopDesc* obj,
1180
ConstantPoolCacheEntry *cp_entry, jvalue *value))
1181
1182
Klass* k = cp_entry->f1_as_klass();
1183
1184
// check the access_flags for the field in the klass
1185
InstanceKlass* ik = InstanceKlass::cast(k);
1186
int index = cp_entry->field_index();
1187
// bail out if field modifications are not watched
1188
if ((ik->field_access_flags(index) & JVM_ACC_FIELD_MODIFICATION_WATCHED) == 0) return;
1189
1190
char sig_type = '\0';
1191
1192
switch(cp_entry->flag_state()) {
1193
case btos: sig_type = JVM_SIGNATURE_BYTE; break;
1194
case ztos: sig_type = JVM_SIGNATURE_BOOLEAN; break;
1195
case ctos: sig_type = JVM_SIGNATURE_CHAR; break;
1196
case stos: sig_type = JVM_SIGNATURE_SHORT; break;
1197
case itos: sig_type = JVM_SIGNATURE_INT; break;
1198
case ftos: sig_type = JVM_SIGNATURE_FLOAT; break;
1199
case atos: sig_type = JVM_SIGNATURE_CLASS; break;
1200
case ltos: sig_type = JVM_SIGNATURE_LONG; break;
1201
case dtos: sig_type = JVM_SIGNATURE_DOUBLE; break;
1202
default: ShouldNotReachHere(); return;
1203
}
1204
bool is_static = (obj == NULL);
1205
1206
HandleMark hm(current);
1207
jfieldID fid = jfieldIDWorkaround::to_jfieldID(ik, cp_entry->f2_as_index(), is_static);
1208
jvalue fvalue;
1209
#ifdef _LP64
1210
fvalue = *value;
1211
#else
1212
// Long/double values are stored unaligned and also noncontiguously with
1213
// tagged stacks. We can't just do a simple assignment even in the non-
1214
// J/D cases because a C++ compiler is allowed to assume that a jvalue is
1215
// 8-byte aligned, and interpreter stack slots are only 4-byte aligned.
1216
// We assume that the two halves of longs/doubles are stored in interpreter
1217
// stack slots in platform-endian order.
1218
jlong_accessor u;
1219
jint* newval = (jint*)value;
1220
u.words[0] = newval[0];
1221
u.words[1] = newval[Interpreter::stackElementWords]; // skip if tag
1222
fvalue.j = u.long_value;
1223
#endif // _LP64
1224
1225
Handle h_obj;
1226
if (!is_static) {
1227
// non-static field accessors have an object, but we need a handle
1228
h_obj = Handle(current, obj);
1229
}
1230
1231
LastFrameAccessor last_frame(current);
1232
JvmtiExport::post_raw_field_modification(current, last_frame.method(), last_frame.bcp(), ik, h_obj,
1233
fid, sig_type, &fvalue);
1234
JRT_END
1235
1236
JRT_ENTRY(void, InterpreterRuntime::post_method_entry(JavaThread* current))
1237
LastFrameAccessor last_frame(current);
1238
JvmtiExport::post_method_entry(current, last_frame.method(), last_frame.get_frame());
1239
JRT_END
1240
1241
1242
// This is a JRT_BLOCK_ENTRY because we have to stash away the return oop
1243
// before transitioning to VM, and restore it after transitioning back
1244
// to Java. The return oop at the top-of-stack, is not walked by the GC.
1245
JRT_BLOCK_ENTRY(void, InterpreterRuntime::post_method_exit(JavaThread* current))
1246
LastFrameAccessor last_frame(current);
1247
JvmtiExport::post_method_exit(current, last_frame.method(), last_frame.get_frame());
1248
JRT_END
1249
1250
JRT_LEAF(int, InterpreterRuntime::interpreter_contains(address pc))
1251
{
1252
return (Interpreter::contains(pc) ? 1 : 0);
1253
}
1254
JRT_END
1255
1256
1257
// Implementation of SignatureHandlerLibrary
1258
1259
#ifndef SHARING_FAST_NATIVE_FINGERPRINTS
1260
// Dummy definition (else normalization method is defined in CPU
1261
// dependant code)
1262
uint64_t InterpreterRuntime::normalize_fast_native_fingerprint(uint64_t fingerprint) {
1263
return fingerprint;
1264
}
1265
#endif
1266
1267
address SignatureHandlerLibrary::set_handler_blob() {
1268
BufferBlob* handler_blob = BufferBlob::create("native signature handlers", blob_size);
1269
if (handler_blob == NULL) {
1270
return NULL;
1271
}
1272
address handler = handler_blob->code_begin();
1273
_handler_blob = handler_blob;
1274
_handler = handler;
1275
return handler;
1276
}
1277
1278
void SignatureHandlerLibrary::initialize() {
1279
if (_fingerprints != NULL) {
1280
return;
1281
}
1282
if (set_handler_blob() == NULL) {
1283
vm_exit_out_of_memory(blob_size, OOM_MALLOC_ERROR, "native signature handlers");
1284
}
1285
1286
BufferBlob* bb = BufferBlob::create("Signature Handler Temp Buffer",
1287
SignatureHandlerLibrary::buffer_size);
1288
_buffer = bb->code_begin();
1289
1290
_fingerprints = new(ResourceObj::C_HEAP, mtCode)GrowableArray<uint64_t>(32, mtCode);
1291
_handlers = new(ResourceObj::C_HEAP, mtCode)GrowableArray<address>(32, mtCode);
1292
}
1293
1294
address SignatureHandlerLibrary::set_handler(CodeBuffer* buffer) {
1295
address handler = _handler;
1296
int insts_size = buffer->pure_insts_size();
1297
if (handler + insts_size > _handler_blob->code_end()) {
1298
// get a new handler blob
1299
handler = set_handler_blob();
1300
}
1301
if (handler != NULL) {
1302
memcpy(handler, buffer->insts_begin(), insts_size);
1303
pd_set_handler(handler);
1304
ICache::invalidate_range(handler, insts_size);
1305
_handler = handler + insts_size;
1306
}
1307
return handler;
1308
}
1309
1310
void SignatureHandlerLibrary::add(const methodHandle& method) {
1311
if (method->signature_handler() == NULL) {
1312
// use slow signature handler if we can't do better
1313
int handler_index = -1;
1314
// check if we can use customized (fast) signature handler
1315
if (UseFastSignatureHandlers && method->size_of_parameters() <= Fingerprinter::fp_max_size_of_parameters) {
1316
// use customized signature handler
1317
MutexLocker mu(SignatureHandlerLibrary_lock);
1318
// make sure data structure is initialized
1319
initialize();
1320
// lookup method signature's fingerprint
1321
uint64_t fingerprint = Fingerprinter(method).fingerprint();
1322
// allow CPU dependant code to optimize the fingerprints for the fast handler
1323
fingerprint = InterpreterRuntime::normalize_fast_native_fingerprint(fingerprint);
1324
handler_index = _fingerprints->find(fingerprint);
1325
// create handler if necessary
1326
if (handler_index < 0) {
1327
ResourceMark rm;
1328
ptrdiff_t align_offset = align_up(_buffer, CodeEntryAlignment) - (address)_buffer;
1329
CodeBuffer buffer((address)(_buffer + align_offset),
1330
SignatureHandlerLibrary::buffer_size - align_offset);
1331
InterpreterRuntime::SignatureHandlerGenerator(method, &buffer).generate(fingerprint);
1332
// copy into code heap
1333
address handler = set_handler(&buffer);
1334
if (handler == NULL) {
1335
// use slow signature handler (without memorizing it in the fingerprints)
1336
} else {
1337
// debugging suppport
1338
if (PrintSignatureHandlers && (handler != Interpreter::slow_signature_handler())) {
1339
ttyLocker ttyl;
1340
tty->cr();
1341
tty->print_cr("argument handler #%d for: %s %s (fingerprint = " UINT64_FORMAT ", %d bytes generated)",
1342
_handlers->length(),
1343
(method->is_static() ? "static" : "receiver"),
1344
method->name_and_sig_as_C_string(),
1345
fingerprint,
1346
buffer.insts_size());
1347
if (buffer.insts_size() > 0) {
1348
Disassembler::decode(handler, handler + buffer.insts_size());
1349
}
1350
#ifndef PRODUCT
1351
address rh_begin = Interpreter::result_handler(method()->result_type());
1352
if (CodeCache::contains(rh_begin)) {
1353
// else it might be special platform dependent values
1354
tty->print_cr(" --- associated result handler ---");
1355
address rh_end = rh_begin;
1356
while (*(int*)rh_end != 0) {
1357
rh_end += sizeof(int);
1358
}
1359
Disassembler::decode(rh_begin, rh_end);
1360
} else {
1361
tty->print_cr(" associated result handler: " PTR_FORMAT, p2i(rh_begin));
1362
}
1363
#endif
1364
}
1365
// add handler to library
1366
_fingerprints->append(fingerprint);
1367
_handlers->append(handler);
1368
// set handler index
1369
assert(_fingerprints->length() == _handlers->length(), "sanity check");
1370
handler_index = _fingerprints->length() - 1;
1371
}
1372
}
1373
// Set handler under SignatureHandlerLibrary_lock
1374
if (handler_index < 0) {
1375
// use generic signature handler
1376
method->set_signature_handler(Interpreter::slow_signature_handler());
1377
} else {
1378
// set handler
1379
method->set_signature_handler(_handlers->at(handler_index));
1380
}
1381
} else {
1382
DEBUG_ONLY(JavaThread::current()->check_possible_safepoint());
1383
// use generic signature handler
1384
method->set_signature_handler(Interpreter::slow_signature_handler());
1385
}
1386
}
1387
#ifdef ASSERT
1388
int handler_index = -1;
1389
int fingerprint_index = -2;
1390
{
1391
// '_handlers' and '_fingerprints' are 'GrowableArray's and are NOT synchronized
1392
// in any way if accessed from multiple threads. To avoid races with another
1393
// thread which may change the arrays in the above, mutex protected block, we
1394
// have to protect this read access here with the same mutex as well!
1395
MutexLocker mu(SignatureHandlerLibrary_lock);
1396
if (_handlers != NULL) {
1397
handler_index = _handlers->find(method->signature_handler());
1398
uint64_t fingerprint = Fingerprinter(method).fingerprint();
1399
fingerprint = InterpreterRuntime::normalize_fast_native_fingerprint(fingerprint);
1400
fingerprint_index = _fingerprints->find(fingerprint);
1401
}
1402
}
1403
assert(method->signature_handler() == Interpreter::slow_signature_handler() ||
1404
handler_index == fingerprint_index, "sanity check");
1405
#endif // ASSERT
1406
}
1407
1408
void SignatureHandlerLibrary::add(uint64_t fingerprint, address handler) {
1409
int handler_index = -1;
1410
// use customized signature handler
1411
MutexLocker mu(SignatureHandlerLibrary_lock);
1412
// make sure data structure is initialized
1413
initialize();
1414
fingerprint = InterpreterRuntime::normalize_fast_native_fingerprint(fingerprint);
1415
handler_index = _fingerprints->find(fingerprint);
1416
// create handler if necessary
1417
if (handler_index < 0) {
1418
if (PrintSignatureHandlers && (handler != Interpreter::slow_signature_handler())) {
1419
tty->cr();
1420
tty->print_cr("argument handler #%d at " PTR_FORMAT " for fingerprint " UINT64_FORMAT,
1421
_handlers->length(),
1422
p2i(handler),
1423
fingerprint);
1424
}
1425
_fingerprints->append(fingerprint);
1426
_handlers->append(handler);
1427
} else {
1428
if (PrintSignatureHandlers) {
1429
tty->cr();
1430
tty->print_cr("duplicate argument handler #%d for fingerprint " UINT64_FORMAT "(old: " PTR_FORMAT ", new : " PTR_FORMAT ")",
1431
_handlers->length(),
1432
fingerprint,
1433
p2i(_handlers->at(handler_index)),
1434
p2i(handler));
1435
}
1436
}
1437
}
1438
1439
1440
BufferBlob* SignatureHandlerLibrary::_handler_blob = NULL;
1441
address SignatureHandlerLibrary::_handler = NULL;
1442
GrowableArray<uint64_t>* SignatureHandlerLibrary::_fingerprints = NULL;
1443
GrowableArray<address>* SignatureHandlerLibrary::_handlers = NULL;
1444
address SignatureHandlerLibrary::_buffer = NULL;
1445
1446
1447
JRT_ENTRY(void, InterpreterRuntime::prepare_native_call(JavaThread* current, Method* method))
1448
methodHandle m(current, method);
1449
assert(m->is_native(), "sanity check");
1450
// lookup native function entry point if it doesn't exist
1451
if (!m->has_native_function()) {
1452
NativeLookup::lookup(m, CHECK);
1453
}
1454
// make sure signature handler is installed
1455
SignatureHandlerLibrary::add(m);
1456
// The interpreter entry point checks the signature handler first,
1457
// before trying to fetch the native entry point and klass mirror.
1458
// We must set the signature handler last, so that multiple processors
1459
// preparing the same method will be sure to see non-null entry & mirror.
1460
JRT_END
1461
1462
#if defined(IA32) || defined(AMD64) || defined(ARM)
1463
JRT_LEAF(void, InterpreterRuntime::popframe_move_outgoing_args(JavaThread* current, void* src_address, void* dest_address))
1464
if (src_address == dest_address) {
1465
return;
1466
}
1467
ResourceMark rm;
1468
LastFrameAccessor last_frame(current);
1469
assert(last_frame.is_interpreted_frame(), "");
1470
jint bci = last_frame.bci();
1471
methodHandle mh(current, last_frame.method());
1472
Bytecode_invoke invoke(mh, bci);
1473
ArgumentSizeComputer asc(invoke.signature());
1474
int size_of_arguments = (asc.size() + (invoke.has_receiver() ? 1 : 0)); // receiver
1475
Copy::conjoint_jbytes(src_address, dest_address,
1476
size_of_arguments * Interpreter::stackElementSize);
1477
JRT_END
1478
#endif
1479
1480
#if INCLUDE_JVMTI
1481
// This is a support of the JVMTI PopFrame interface.
1482
// Make sure it is an invokestatic of a polymorphic intrinsic that has a member_name argument
1483
// and return it as a vm_result so that it can be reloaded in the list of invokestatic parameters.
1484
// The member_name argument is a saved reference (in local#0) to the member_name.
1485
// For backward compatibility with some JDK versions (7, 8) it can also be a direct method handle.
1486
// FIXME: remove DMH case after j.l.i.InvokerBytecodeGenerator code shape is updated.
1487
JRT_ENTRY(void, InterpreterRuntime::member_name_arg_or_null(JavaThread* current, address member_name,
1488
Method* method, address bcp))
1489
Bytecodes::Code code = Bytecodes::code_at(method, bcp);
1490
if (code != Bytecodes::_invokestatic) {
1491
return;
1492
}
1493
ConstantPool* cpool = method->constants();
1494
int cp_index = Bytes::get_native_u2(bcp + 1) + ConstantPool::CPCACHE_INDEX_TAG;
1495
Symbol* cname = cpool->klass_name_at(cpool->klass_ref_index_at(cp_index));
1496
Symbol* mname = cpool->name_ref_at(cp_index);
1497
1498
if (MethodHandles::has_member_arg(cname, mname)) {
1499
oop member_name_oop = cast_to_oop(member_name);
1500
if (java_lang_invoke_DirectMethodHandle::is_instance(member_name_oop)) {
1501
// FIXME: remove after j.l.i.InvokerBytecodeGenerator code shape is updated.
1502
member_name_oop = java_lang_invoke_DirectMethodHandle::member(member_name_oop);
1503
}
1504
current->set_vm_result(member_name_oop);
1505
} else {
1506
current->set_vm_result(NULL);
1507
}
1508
JRT_END
1509
#endif // INCLUDE_JVMTI
1510
1511
#ifndef PRODUCT
1512
// This must be a JRT_LEAF function because the interpreter must save registers on x86 to
1513
// call this, which changes rsp and makes the interpreter's expression stack not walkable.
1514
// The generated code still uses call_VM because that will set up the frame pointer for
1515
// bcp and method.
1516
JRT_LEAF(intptr_t, InterpreterRuntime::trace_bytecode(JavaThread* current, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2))
1517
LastFrameAccessor last_frame(current);
1518
assert(last_frame.is_interpreted_frame(), "must be an interpreted frame");
1519
methodHandle mh(current, last_frame.method());
1520
BytecodeTracer::trace(mh, last_frame.bcp(), tos, tos2);
1521
return preserve_this_value;
1522
JRT_END
1523
#endif // !PRODUCT
1524
1525