Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/share/prims/jvmtiImpl.cpp
64440 views
1
/*
2
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "classfile/javaClasses.hpp"
27
#include "classfile/symbolTable.hpp"
28
#include "code/nmethod.hpp"
29
#include "interpreter/interpreter.hpp"
30
#include "interpreter/oopMapCache.hpp"
31
#include "jvmtifiles/jvmtiEnv.hpp"
32
#include "logging/log.hpp"
33
#include "logging/logStream.hpp"
34
#include "memory/allocation.inline.hpp"
35
#include "memory/resourceArea.hpp"
36
#include "oops/instanceKlass.hpp"
37
#include "oops/klass.inline.hpp"
38
#include "oops/oop.inline.hpp"
39
#include "oops/oopHandle.inline.hpp"
40
#include "prims/jvmtiAgentThread.hpp"
41
#include "prims/jvmtiEventController.inline.hpp"
42
#include "prims/jvmtiImpl.hpp"
43
#include "prims/jvmtiRedefineClasses.hpp"
44
#include "runtime/deoptimization.hpp"
45
#include "runtime/frame.inline.hpp"
46
#include "runtime/handles.inline.hpp"
47
#include "runtime/interfaceSupport.inline.hpp"
48
#include "runtime/javaCalls.hpp"
49
#include "runtime/jniHandles.hpp"
50
#include "runtime/os.hpp"
51
#include "runtime/serviceThread.hpp"
52
#include "runtime/signature.hpp"
53
#include "runtime/thread.inline.hpp"
54
#include "runtime/threadSMR.hpp"
55
#include "runtime/vframe.hpp"
56
#include "runtime/vframe_hp.hpp"
57
#include "runtime/vmOperations.hpp"
58
#include "utilities/exceptions.hpp"
59
60
//
61
// class JvmtiAgentThread
62
//
63
// JavaThread used to wrap a thread started by an agent
64
// using the JVMTI method RunAgentThread.
65
//
66
67
JvmtiAgentThread::JvmtiAgentThread(JvmtiEnv* env, jvmtiStartFunction start_fn, const void *start_arg)
68
: JavaThread(start_function_wrapper) {
69
_env = env;
70
_start_fn = start_fn;
71
_start_arg = start_arg;
72
}
73
74
void
75
JvmtiAgentThread::start_function_wrapper(JavaThread *thread, TRAPS) {
76
// It is expected that any Agent threads will be created as
77
// Java Threads. If this is the case, notification of the creation
78
// of the thread is given in JavaThread::thread_main().
79
assert(thread == JavaThread::current(), "sanity check");
80
81
JvmtiAgentThread *dthread = (JvmtiAgentThread *)thread;
82
dthread->call_start_function();
83
}
84
85
void
86
JvmtiAgentThread::call_start_function() {
87
ThreadToNativeFromVM transition(this);
88
_start_fn(_env->jvmti_external(), jni_environment(), (void*)_start_arg);
89
}
90
91
92
//
93
// class GrowableCache - private methods
94
//
95
96
void GrowableCache::recache() {
97
int len = _elements->length();
98
99
FREE_C_HEAP_ARRAY(address, _cache);
100
_cache = NEW_C_HEAP_ARRAY(address,len+1, mtInternal);
101
102
for (int i=0; i<len; i++) {
103
_cache[i] = _elements->at(i)->getCacheValue();
104
//
105
// The cache entry has gone bad. Without a valid frame pointer
106
// value, the entry is useless so we simply delete it in product
107
// mode. The call to remove() will rebuild the cache again
108
// without the bad entry.
109
//
110
if (_cache[i] == NULL) {
111
assert(false, "cannot recache NULL elements");
112
remove(i);
113
return;
114
}
115
}
116
_cache[len] = NULL;
117
118
_listener_fun(_this_obj,_cache);
119
}
120
121
bool GrowableCache::equals(void* v, GrowableElement *e2) {
122
GrowableElement *e1 = (GrowableElement *) v;
123
assert(e1 != NULL, "e1 != NULL");
124
assert(e2 != NULL, "e2 != NULL");
125
126
return e1->equals(e2);
127
}
128
129
//
130
// class GrowableCache - public methods
131
//
132
133
GrowableCache::GrowableCache() {
134
_this_obj = NULL;
135
_listener_fun = NULL;
136
_elements = NULL;
137
_cache = NULL;
138
}
139
140
GrowableCache::~GrowableCache() {
141
clear();
142
delete _elements;
143
FREE_C_HEAP_ARRAY(address, _cache);
144
}
145
146
void GrowableCache::initialize(void *this_obj, void listener_fun(void *, address*) ) {
147
_this_obj = this_obj;
148
_listener_fun = listener_fun;
149
_elements = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<GrowableElement*>(5, mtServiceability);
150
recache();
151
}
152
153
// number of elements in the collection
154
int GrowableCache::length() {
155
return _elements->length();
156
}
157
158
// get the value of the index element in the collection
159
GrowableElement* GrowableCache::at(int index) {
160
GrowableElement *e = (GrowableElement *) _elements->at(index);
161
assert(e != NULL, "e != NULL");
162
return e;
163
}
164
165
int GrowableCache::find(GrowableElement* e) {
166
return _elements->find(e, GrowableCache::equals);
167
}
168
169
// append a copy of the element to the end of the collection
170
void GrowableCache::append(GrowableElement* e) {
171
GrowableElement *new_e = e->clone();
172
_elements->append(new_e);
173
recache();
174
}
175
176
// remove the element at index
177
void GrowableCache::remove (int index) {
178
GrowableElement *e = _elements->at(index);
179
assert(e != NULL, "e != NULL");
180
_elements->remove(e);
181
delete e;
182
recache();
183
}
184
185
// clear out all elements, release all heap space and
186
// let our listener know that things have changed.
187
void GrowableCache::clear() {
188
int len = _elements->length();
189
for (int i=0; i<len; i++) {
190
delete _elements->at(i);
191
}
192
_elements->clear();
193
recache();
194
}
195
196
//
197
// class JvmtiBreakpoint
198
//
199
200
JvmtiBreakpoint::JvmtiBreakpoint(Method* m_method, jlocation location)
201
: _method(m_method), _bci((int)location) {
202
assert(_method != NULL, "No method for breakpoint.");
203
assert(_bci >= 0, "Negative bci for breakpoint.");
204
oop class_holder_oop = _method->method_holder()->klass_holder();
205
_class_holder = OopHandle(JvmtiExport::jvmti_oop_storage(), class_holder_oop);
206
}
207
208
JvmtiBreakpoint::~JvmtiBreakpoint() {
209
_class_holder.release(JvmtiExport::jvmti_oop_storage());
210
}
211
212
void JvmtiBreakpoint::copy(JvmtiBreakpoint& bp) {
213
_method = bp._method;
214
_bci = bp._bci;
215
_class_holder = OopHandle(JvmtiExport::jvmti_oop_storage(), bp._class_holder.resolve());
216
}
217
218
bool JvmtiBreakpoint::equals(JvmtiBreakpoint& bp) {
219
return _method == bp._method
220
&& _bci == bp._bci;
221
}
222
223
address JvmtiBreakpoint::getBcp() const {
224
return _method->bcp_from(_bci);
225
}
226
227
void JvmtiBreakpoint::each_method_version_do(method_action meth_act) {
228
((Method*)_method->*meth_act)(_bci);
229
230
// add/remove breakpoint to/from versions of the method that are EMCP.
231
Thread *thread = Thread::current();
232
InstanceKlass* ik = _method->method_holder();
233
Symbol* m_name = _method->name();
234
Symbol* m_signature = _method->signature();
235
236
// search previous versions if they exist
237
for (InstanceKlass* pv_node = ik->previous_versions();
238
pv_node != NULL;
239
pv_node = pv_node->previous_versions()) {
240
Array<Method*>* methods = pv_node->methods();
241
242
for (int i = methods->length() - 1; i >= 0; i--) {
243
Method* method = methods->at(i);
244
// Only set breakpoints in EMCP methods.
245
// EMCP methods are old but not obsolete. Equivalent
246
// Modulo Constant Pool means the method is equivalent except
247
// the constant pool and instructions that access the constant
248
// pool might be different.
249
// If a breakpoint is set in a redefined method, its EMCP methods
250
// must have a breakpoint also.
251
// None of the methods are deleted until none are running.
252
// This code could set a breakpoint in a method that
253
// is never reached, but this won't be noticeable to the programmer.
254
if (!method->is_obsolete() &&
255
method->name() == m_name &&
256
method->signature() == m_signature) {
257
ResourceMark rm;
258
log_debug(redefine, class, breakpoint)
259
("%sing breakpoint in %s(%s)", meth_act == &Method::set_breakpoint ? "sett" : "clear",
260
method->name()->as_C_string(), method->signature()->as_C_string());
261
(method->*meth_act)(_bci);
262
break;
263
}
264
}
265
}
266
}
267
268
void JvmtiBreakpoint::set() {
269
each_method_version_do(&Method::set_breakpoint);
270
}
271
272
void JvmtiBreakpoint::clear() {
273
each_method_version_do(&Method::clear_breakpoint);
274
}
275
276
void JvmtiBreakpoint::print_on(outputStream* out) const {
277
#ifndef PRODUCT
278
ResourceMark rm;
279
const char *class_name = (_method == NULL) ? "NULL" : _method->klass_name()->as_C_string();
280
const char *method_name = (_method == NULL) ? "NULL" : _method->name()->as_C_string();
281
out->print("Breakpoint(%s,%s,%d,%p)", class_name, method_name, _bci, getBcp());
282
#endif
283
}
284
285
286
//
287
// class VM_ChangeBreakpoints
288
//
289
// Modify the Breakpoints data structure at a safepoint
290
//
291
292
void VM_ChangeBreakpoints::doit() {
293
switch (_operation) {
294
case SET_BREAKPOINT:
295
_breakpoints->set_at_safepoint(*_bp);
296
break;
297
case CLEAR_BREAKPOINT:
298
_breakpoints->clear_at_safepoint(*_bp);
299
break;
300
default:
301
assert(false, "Unknown operation");
302
}
303
}
304
305
//
306
// class JvmtiBreakpoints
307
//
308
// a JVMTI internal collection of JvmtiBreakpoint
309
//
310
311
JvmtiBreakpoints::JvmtiBreakpoints(void listener_fun(void *,address *)) {
312
_bps.initialize(this,listener_fun);
313
}
314
315
JvmtiBreakpoints:: ~JvmtiBreakpoints() {}
316
317
void JvmtiBreakpoints::print() {
318
#ifndef PRODUCT
319
LogTarget(Trace, jvmti) log;
320
LogStream log_stream(log);
321
322
int n = _bps.length();
323
for (int i=0; i<n; i++) {
324
JvmtiBreakpoint& bp = _bps.at(i);
325
log_stream.print("%d: ", i);
326
bp.print_on(&log_stream);
327
log_stream.cr();
328
}
329
#endif
330
}
331
332
333
void JvmtiBreakpoints::set_at_safepoint(JvmtiBreakpoint& bp) {
334
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
335
336
int i = _bps.find(bp);
337
if (i == -1) {
338
_bps.append(bp);
339
bp.set();
340
}
341
}
342
343
void JvmtiBreakpoints::clear_at_safepoint(JvmtiBreakpoint& bp) {
344
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
345
346
int i = _bps.find(bp);
347
if (i != -1) {
348
_bps.remove(i);
349
bp.clear();
350
}
351
}
352
353
int JvmtiBreakpoints::length() { return _bps.length(); }
354
355
int JvmtiBreakpoints::set(JvmtiBreakpoint& bp) {
356
if ( _bps.find(bp) != -1) {
357
return JVMTI_ERROR_DUPLICATE;
358
}
359
VM_ChangeBreakpoints set_breakpoint(VM_ChangeBreakpoints::SET_BREAKPOINT, &bp);
360
VMThread::execute(&set_breakpoint);
361
return JVMTI_ERROR_NONE;
362
}
363
364
int JvmtiBreakpoints::clear(JvmtiBreakpoint& bp) {
365
if ( _bps.find(bp) == -1) {
366
return JVMTI_ERROR_NOT_FOUND;
367
}
368
369
VM_ChangeBreakpoints clear_breakpoint(VM_ChangeBreakpoints::CLEAR_BREAKPOINT, &bp);
370
VMThread::execute(&clear_breakpoint);
371
return JVMTI_ERROR_NONE;
372
}
373
374
void JvmtiBreakpoints::clearall_in_class_at_safepoint(Klass* klass) {
375
bool changed = true;
376
// We are going to run thru the list of bkpts
377
// and delete some. This deletion probably alters
378
// the list in some implementation defined way such
379
// that when we delete entry i, the next entry might
380
// no longer be at i+1. To be safe, each time we delete
381
// an entry, we'll just start again from the beginning.
382
// We'll stop when we make a pass thru the whole list without
383
// deleting anything.
384
while (changed) {
385
int len = _bps.length();
386
changed = false;
387
for (int i = 0; i < len; i++) {
388
JvmtiBreakpoint& bp = _bps.at(i);
389
if (bp.method()->method_holder() == klass) {
390
bp.clear();
391
_bps.remove(i);
392
// This changed 'i' so we have to start over.
393
changed = true;
394
break;
395
}
396
}
397
}
398
}
399
400
//
401
// class JvmtiCurrentBreakpoints
402
//
403
404
JvmtiBreakpoints *JvmtiCurrentBreakpoints::_jvmti_breakpoints = NULL;
405
address * JvmtiCurrentBreakpoints::_breakpoint_list = NULL;
406
407
408
JvmtiBreakpoints& JvmtiCurrentBreakpoints::get_jvmti_breakpoints() {
409
if (_jvmti_breakpoints != NULL) return (*_jvmti_breakpoints);
410
_jvmti_breakpoints = new JvmtiBreakpoints(listener_fun);
411
assert(_jvmti_breakpoints != NULL, "_jvmti_breakpoints != NULL");
412
return (*_jvmti_breakpoints);
413
}
414
415
void JvmtiCurrentBreakpoints::listener_fun(void *this_obj, address *cache) {
416
JvmtiBreakpoints *this_jvmti = (JvmtiBreakpoints *) this_obj;
417
assert(this_jvmti != NULL, "this_jvmti != NULL");
418
419
debug_only(int n = this_jvmti->length(););
420
assert(cache[n] == NULL, "cache must be NULL terminated");
421
422
set_breakpoint_list(cache);
423
}
424
425
///////////////////////////////////////////////////////////////
426
//
427
// class VM_GetOrSetLocal
428
//
429
430
// Constructor for non-object getter
431
VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, jint index, BasicType type)
432
: _thread(thread)
433
, _calling_thread(NULL)
434
, _depth(depth)
435
, _index(index)
436
, _type(type)
437
, _jvf(NULL)
438
, _set(false)
439
, _eb(false, NULL, NULL)
440
, _result(JVMTI_ERROR_NONE)
441
{
442
}
443
444
// Constructor for object or non-object setter
445
VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, jint depth, jint index, BasicType type, jvalue value)
446
: _thread(thread)
447
, _calling_thread(NULL)
448
, _depth(depth)
449
, _index(index)
450
, _type(type)
451
, _value(value)
452
, _jvf(NULL)
453
, _set(true)
454
, _eb(type == T_OBJECT, JavaThread::current(), thread)
455
, _result(JVMTI_ERROR_NONE)
456
{
457
}
458
459
// Constructor for object getter
460
VM_GetOrSetLocal::VM_GetOrSetLocal(JavaThread* thread, JavaThread* calling_thread, jint depth, int index)
461
: _thread(thread)
462
, _calling_thread(calling_thread)
463
, _depth(depth)
464
, _index(index)
465
, _type(T_OBJECT)
466
, _jvf(NULL)
467
, _set(false)
468
, _eb(true, calling_thread, thread)
469
, _result(JVMTI_ERROR_NONE)
470
{
471
}
472
473
vframe *VM_GetOrSetLocal::get_vframe() {
474
if (!_thread->has_last_Java_frame()) {
475
return NULL;
476
}
477
RegisterMap reg_map(_thread);
478
vframe *vf = _thread->last_java_vframe(&reg_map);
479
int d = 0;
480
while ((vf != NULL) && (d < _depth)) {
481
vf = vf->java_sender();
482
d++;
483
}
484
return vf;
485
}
486
487
javaVFrame *VM_GetOrSetLocal::get_java_vframe() {
488
vframe* vf = get_vframe();
489
if (vf == NULL) {
490
_result = JVMTI_ERROR_NO_MORE_FRAMES;
491
return NULL;
492
}
493
javaVFrame *jvf = (javaVFrame*)vf;
494
495
if (!vf->is_java_frame()) {
496
_result = JVMTI_ERROR_OPAQUE_FRAME;
497
return NULL;
498
}
499
return jvf;
500
}
501
502
// Check that the klass is assignable to a type with the given signature.
503
// Another solution could be to use the function Klass::is_subtype_of(type).
504
// But the type class can be forced to load/initialize eagerly in such a case.
505
// This may cause unexpected consequences like CFLH or class-init JVMTI events.
506
// It is better to avoid such a behavior.
507
bool VM_GetOrSetLocal::is_assignable(const char* ty_sign, Klass* klass, Thread* thread) {
508
assert(ty_sign != NULL, "type signature must not be NULL");
509
assert(thread != NULL, "thread must not be NULL");
510
assert(klass != NULL, "klass must not be NULL");
511
512
int len = (int) strlen(ty_sign);
513
if (ty_sign[0] == JVM_SIGNATURE_CLASS &&
514
ty_sign[len-1] == JVM_SIGNATURE_ENDCLASS) { // Need pure class/interface name
515
ty_sign++;
516
len -= 2;
517
}
518
TempNewSymbol ty_sym = SymbolTable::new_symbol(ty_sign, len);
519
if (klass->name() == ty_sym) {
520
return true;
521
}
522
// Compare primary supers
523
int super_depth = klass->super_depth();
524
int idx;
525
for (idx = 0; idx < super_depth; idx++) {
526
if (klass->primary_super_of_depth(idx)->name() == ty_sym) {
527
return true;
528
}
529
}
530
// Compare secondary supers
531
const Array<Klass*>* sec_supers = klass->secondary_supers();
532
for (idx = 0; idx < sec_supers->length(); idx++) {
533
if (((Klass*) sec_supers->at(idx))->name() == ty_sym) {
534
return true;
535
}
536
}
537
return false;
538
}
539
540
// Checks error conditions:
541
// JVMTI_ERROR_INVALID_SLOT
542
// JVMTI_ERROR_TYPE_MISMATCH
543
// Returns: 'true' - everything is Ok, 'false' - error code
544
545
bool VM_GetOrSetLocal::check_slot_type_lvt(javaVFrame* jvf) {
546
Method* method = jvf->method();
547
jint num_entries = method->localvariable_table_length();
548
if (num_entries == 0) {
549
_result = JVMTI_ERROR_INVALID_SLOT;
550
return false; // There are no slots
551
}
552
int signature_idx = -1;
553
int vf_bci = jvf->bci();
554
LocalVariableTableElement* table = method->localvariable_table_start();
555
for (int i = 0; i < num_entries; i++) {
556
int start_bci = table[i].start_bci;
557
int end_bci = start_bci + table[i].length;
558
559
// Here we assume that locations of LVT entries
560
// with the same slot number cannot be overlapped
561
if (_index == (jint) table[i].slot && start_bci <= vf_bci && vf_bci <= end_bci) {
562
signature_idx = (int) table[i].descriptor_cp_index;
563
break;
564
}
565
}
566
if (signature_idx == -1) {
567
_result = JVMTI_ERROR_INVALID_SLOT;
568
return false; // Incorrect slot index
569
}
570
Symbol* sign_sym = method->constants()->symbol_at(signature_idx);
571
BasicType slot_type = Signature::basic_type(sign_sym);
572
573
switch (slot_type) {
574
case T_BYTE:
575
case T_SHORT:
576
case T_CHAR:
577
case T_BOOLEAN:
578
slot_type = T_INT;
579
break;
580
case T_ARRAY:
581
slot_type = T_OBJECT;
582
break;
583
default:
584
break;
585
};
586
if (_type != slot_type) {
587
_result = JVMTI_ERROR_TYPE_MISMATCH;
588
return false;
589
}
590
591
jobject jobj = _value.l;
592
if (_set && slot_type == T_OBJECT && jobj != NULL) { // NULL reference is allowed
593
// Check that the jobject class matches the return type signature.
594
oop obj = JNIHandles::resolve_external_guard(jobj);
595
NULL_CHECK(obj, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
596
Klass* ob_k = obj->klass();
597
NULL_CHECK(ob_k, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
598
599
const char* signature = (const char *) sign_sym->as_utf8();
600
if (!is_assignable(signature, ob_k, VMThread::vm_thread())) {
601
_result = JVMTI_ERROR_TYPE_MISMATCH;
602
return false;
603
}
604
}
605
return true;
606
}
607
608
bool VM_GetOrSetLocal::check_slot_type_no_lvt(javaVFrame* jvf) {
609
Method* method = jvf->method();
610
jint extra_slot = (_type == T_LONG || _type == T_DOUBLE) ? 1 : 0;
611
612
if (_index < 0 || _index + extra_slot >= method->max_locals()) {
613
_result = JVMTI_ERROR_INVALID_SLOT;
614
return false;
615
}
616
StackValueCollection *locals = _jvf->locals();
617
BasicType slot_type = locals->at(_index)->type();
618
619
if (slot_type == T_CONFLICT) {
620
_result = JVMTI_ERROR_INVALID_SLOT;
621
return false;
622
}
623
if (extra_slot) {
624
BasicType extra_slot_type = locals->at(_index + 1)->type();
625
if (extra_slot_type != T_INT) {
626
_result = JVMTI_ERROR_INVALID_SLOT;
627
return false;
628
}
629
}
630
if (_type != slot_type && (_type == T_OBJECT || slot_type != T_INT)) {
631
_result = JVMTI_ERROR_TYPE_MISMATCH;
632
return false;
633
}
634
return true;
635
}
636
637
static bool can_be_deoptimized(vframe* vf) {
638
return (vf->is_compiled_frame() && vf->fr().can_be_deoptimized());
639
}
640
641
bool VM_GetOrSetLocal::doit_prologue() {
642
if (!_eb.deoptimize_objects(_depth, _depth)) {
643
// The target frame is affected by a reallocation failure.
644
_result = JVMTI_ERROR_OUT_OF_MEMORY;
645
return false;
646
}
647
648
return true;
649
}
650
651
void VM_GetOrSetLocal::doit() {
652
_jvf = _jvf == NULL ? get_java_vframe() : _jvf;
653
if (_jvf == NULL) {
654
return;
655
};
656
657
Method* method = _jvf->method();
658
if (getting_receiver()) {
659
if (method->is_static()) {
660
_result = JVMTI_ERROR_INVALID_SLOT;
661
return;
662
}
663
} else {
664
if (method->is_native()) {
665
_result = JVMTI_ERROR_OPAQUE_FRAME;
666
return;
667
}
668
669
if (!check_slot_type_no_lvt(_jvf)) {
670
return;
671
}
672
if (method->has_localvariable_table() &&
673
!check_slot_type_lvt(_jvf)) {
674
return;
675
}
676
}
677
678
InterpreterOopMap oop_mask;
679
_jvf->method()->mask_for(_jvf->bci(), &oop_mask);
680
if (oop_mask.is_dead(_index)) {
681
// The local can be invalid and uninitialized in the scope of current bci
682
_result = JVMTI_ERROR_INVALID_SLOT;
683
return;
684
}
685
if (_set) {
686
// Force deoptimization of frame if compiled because it's
687
// possible the compiler emitted some locals as constant values,
688
// meaning they are not mutable.
689
if (can_be_deoptimized(_jvf)) {
690
691
// Schedule deoptimization so that eventually the local
692
// update will be written to an interpreter frame.
693
Deoptimization::deoptimize_frame(_jvf->thread(), _jvf->fr().id());
694
695
// Now store a new value for the local which will be applied
696
// once deoptimization occurs. Note however that while this
697
// write is deferred until deoptimization actually happens
698
// can vframe created after this point will have its locals
699
// reflecting this update so as far as anyone can see the
700
// write has already taken place.
701
702
// If we are updating an oop then get the oop from the handle
703
// since the handle will be long gone by the time the deopt
704
// happens. The oop stored in the deferred local will be
705
// gc'd on its own.
706
if (_type == T_OBJECT) {
707
_value.l = cast_from_oop<jobject>(JNIHandles::resolve_external_guard(_value.l));
708
}
709
// Re-read the vframe so we can see that it is deoptimized
710
// [ Only need because of assert in update_local() ]
711
_jvf = get_java_vframe();
712
((compiledVFrame*)_jvf)->update_local(_type, _index, _value);
713
return;
714
}
715
StackValueCollection *locals = _jvf->locals();
716
Thread* current_thread = VMThread::vm_thread();
717
HandleMark hm(current_thread);
718
719
switch (_type) {
720
case T_INT: locals->set_int_at (_index, _value.i); break;
721
case T_LONG: locals->set_long_at (_index, _value.j); break;
722
case T_FLOAT: locals->set_float_at (_index, _value.f); break;
723
case T_DOUBLE: locals->set_double_at(_index, _value.d); break;
724
case T_OBJECT: {
725
Handle ob_h(current_thread, JNIHandles::resolve_external_guard(_value.l));
726
locals->set_obj_at (_index, ob_h);
727
break;
728
}
729
default: ShouldNotReachHere();
730
}
731
_jvf->set_locals(locals);
732
} else {
733
if (_jvf->method()->is_native() && _jvf->is_compiled_frame()) {
734
assert(getting_receiver(), "Can only get here when getting receiver");
735
oop receiver = _jvf->fr().get_native_receiver();
736
_value.l = JNIHandles::make_local(_calling_thread, receiver);
737
} else {
738
StackValueCollection *locals = _jvf->locals();
739
740
switch (_type) {
741
case T_INT: _value.i = locals->int_at (_index); break;
742
case T_LONG: _value.j = locals->long_at (_index); break;
743
case T_FLOAT: _value.f = locals->float_at (_index); break;
744
case T_DOUBLE: _value.d = locals->double_at(_index); break;
745
case T_OBJECT: {
746
// Wrap the oop to be returned in a local JNI handle since
747
// oops_do() no longer applies after doit() is finished.
748
oop obj = locals->obj_at(_index)();
749
_value.l = JNIHandles::make_local(_calling_thread, obj);
750
break;
751
}
752
default: ShouldNotReachHere();
753
}
754
}
755
}
756
}
757
758
759
bool VM_GetOrSetLocal::allow_nested_vm_operations() const {
760
return true; // May need to deoptimize
761
}
762
763
764
VM_GetReceiver::VM_GetReceiver(
765
JavaThread* thread, JavaThread* caller_thread, jint depth)
766
: VM_GetOrSetLocal(thread, caller_thread, depth, 0) {}
767
768
/////////////////////////////////////////////////////////////////////////////////////////
769
770
//
771
// class JvmtiSuspendControl - see comments in jvmtiImpl.hpp
772
//
773
774
bool JvmtiSuspendControl::suspend(JavaThread *java_thread) {
775
return java_thread->java_suspend();
776
}
777
778
bool JvmtiSuspendControl::resume(JavaThread *java_thread) {
779
return java_thread->java_resume();
780
}
781
782
void JvmtiSuspendControl::print() {
783
#ifndef PRODUCT
784
ResourceMark rm;
785
LogStreamHandle(Trace, jvmti) log_stream;
786
log_stream.print("Suspended Threads: [");
787
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
788
#ifdef JVMTI_TRACE
789
const char *name = JvmtiTrace::safe_get_thread_name(thread);
790
#else
791
const char *name = "";
792
#endif /*JVMTI_TRACE */
793
log_stream.print("%s(%c ", name, thread->is_suspended() ? 'S' : '_');
794
if (!thread->has_last_Java_frame()) {
795
log_stream.print("no stack");
796
}
797
log_stream.print(") ");
798
}
799
log_stream.print_cr("]");
800
#endif
801
}
802
803
JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_load_event(
804
nmethod* nm) {
805
JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_LOAD);
806
event._event_data.compiled_method_load = nm;
807
return event;
808
}
809
810
JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event(
811
jmethodID id, const void* code) {
812
JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_UNLOAD);
813
event._event_data.compiled_method_unload.method_id = id;
814
event._event_data.compiled_method_unload.code_begin = code;
815
return event;
816
}
817
818
JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event(
819
const char* name, const void* code_begin, const void* code_end) {
820
JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_DYNAMIC_CODE_GENERATED);
821
// Need to make a copy of the name since we don't know how long
822
// the event poster will keep it around after we enqueue the
823
// deferred event and return. strdup() failure is handled in
824
// the post() routine below.
825
event._event_data.dynamic_code_generated.name = os::strdup(name);
826
event._event_data.dynamic_code_generated.code_begin = code_begin;
827
event._event_data.dynamic_code_generated.code_end = code_end;
828
return event;
829
}
830
831
JvmtiDeferredEvent JvmtiDeferredEvent::class_unload_event(const char* name) {
832
JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_CLASS_UNLOAD);
833
// Need to make a copy of the name since we don't know how long
834
// the event poster will keep it around after we enqueue the
835
// deferred event and return. strdup() failure is handled in
836
// the post() routine below.
837
event._event_data.class_unload.name = os::strdup(name);
838
return event;
839
}
840
841
void JvmtiDeferredEvent::post() {
842
assert(Thread::current()->is_service_thread(),
843
"Service thread must post enqueued events");
844
switch(_type) {
845
case TYPE_COMPILED_METHOD_LOAD: {
846
nmethod* nm = _event_data.compiled_method_load;
847
JvmtiExport::post_compiled_method_load(nm);
848
break;
849
}
850
case TYPE_COMPILED_METHOD_UNLOAD: {
851
JvmtiExport::post_compiled_method_unload(
852
_event_data.compiled_method_unload.method_id,
853
_event_data.compiled_method_unload.code_begin);
854
break;
855
}
856
case TYPE_DYNAMIC_CODE_GENERATED: {
857
JvmtiExport::post_dynamic_code_generated_internal(
858
// if strdup failed give the event a default name
859
(_event_data.dynamic_code_generated.name == NULL)
860
? "unknown_code" : _event_data.dynamic_code_generated.name,
861
_event_data.dynamic_code_generated.code_begin,
862
_event_data.dynamic_code_generated.code_end);
863
if (_event_data.dynamic_code_generated.name != NULL) {
864
// release our copy
865
os::free((void *)_event_data.dynamic_code_generated.name);
866
}
867
break;
868
}
869
case TYPE_CLASS_UNLOAD: {
870
JvmtiExport::post_class_unload_internal(
871
// if strdup failed give the event a default name
872
(_event_data.class_unload.name == NULL)
873
? "unknown_class" : _event_data.class_unload.name);
874
if (_event_data.class_unload.name != NULL) {
875
// release our copy
876
os::free((void *)_event_data.class_unload.name);
877
}
878
break;
879
}
880
default:
881
ShouldNotReachHere();
882
}
883
}
884
885
void JvmtiDeferredEvent::post_compiled_method_load_event(JvmtiEnv* env) {
886
assert(_type == TYPE_COMPILED_METHOD_LOAD, "only user of this method");
887
nmethod* nm = _event_data.compiled_method_load;
888
JvmtiExport::post_compiled_method_load(env, nm);
889
}
890
891
void JvmtiDeferredEvent::run_nmethod_entry_barriers() {
892
if (_type == TYPE_COMPILED_METHOD_LOAD) {
893
_event_data.compiled_method_load->run_nmethod_entry_barrier();
894
}
895
}
896
897
898
// Keep the nmethod for compiled_method_load from being unloaded.
899
void JvmtiDeferredEvent::oops_do(OopClosure* f, CodeBlobClosure* cf) {
900
if (cf != NULL && _type == TYPE_COMPILED_METHOD_LOAD) {
901
cf->do_code_blob(_event_data.compiled_method_load);
902
}
903
}
904
905
// The sweeper calls this and marks the nmethods here on the stack so that
906
// they cannot be turned into zombies while in the queue.
907
void JvmtiDeferredEvent::nmethods_do(CodeBlobClosure* cf) {
908
if (cf != NULL && _type == TYPE_COMPILED_METHOD_LOAD) {
909
cf->do_code_blob(_event_data.compiled_method_load);
910
}
911
}
912
913
914
bool JvmtiDeferredEventQueue::has_events() {
915
// We save the queued events before the live phase and post them when it starts.
916
// This code could skip saving the events on the queue before the live
917
// phase and ignore them, but this would change how we do things now.
918
// Starting the service thread earlier causes this to be called before the live phase begins.
919
// The events on the queue should all be posted after the live phase so this is an
920
// ok check. Before the live phase, DynamicCodeGenerated events are posted directly.
921
// If we add other types of events to the deferred queue, this could get ugly.
922
return JvmtiEnvBase::get_phase() == JVMTI_PHASE_LIVE && _queue_head != NULL;
923
}
924
925
void JvmtiDeferredEventQueue::enqueue(JvmtiDeferredEvent event) {
926
// Events get added to the end of the queue (and are pulled off the front).
927
QueueNode* node = new QueueNode(event);
928
if (_queue_tail == NULL) {
929
_queue_tail = _queue_head = node;
930
} else {
931
assert(_queue_tail->next() == NULL, "Must be the last element in the list");
932
_queue_tail->set_next(node);
933
_queue_tail = node;
934
}
935
936
assert((_queue_head == NULL) == (_queue_tail == NULL),
937
"Inconsistent queue markers");
938
}
939
940
JvmtiDeferredEvent JvmtiDeferredEventQueue::dequeue() {
941
assert(_queue_head != NULL, "Nothing to dequeue");
942
943
if (_queue_head == NULL) {
944
// Just in case this happens in product; it shouldn't but let's not crash
945
return JvmtiDeferredEvent();
946
}
947
948
QueueNode* node = _queue_head;
949
_queue_head = _queue_head->next();
950
if (_queue_head == NULL) {
951
_queue_tail = NULL;
952
}
953
954
assert((_queue_head == NULL) == (_queue_tail == NULL),
955
"Inconsistent queue markers");
956
957
JvmtiDeferredEvent event = node->event();
958
delete node;
959
return event;
960
}
961
962
void JvmtiDeferredEventQueue::post(JvmtiEnv* env) {
963
// Post and destroy queue nodes
964
while (_queue_head != NULL) {
965
JvmtiDeferredEvent event = dequeue();
966
event.post_compiled_method_load_event(env);
967
}
968
}
969
970
void JvmtiDeferredEventQueue::run_nmethod_entry_barriers() {
971
for(QueueNode* node = _queue_head; node != NULL; node = node->next()) {
972
node->event().run_nmethod_entry_barriers();
973
}
974
}
975
976
977
void JvmtiDeferredEventQueue::oops_do(OopClosure* f, CodeBlobClosure* cf) {
978
for(QueueNode* node = _queue_head; node != NULL; node = node->next()) {
979
node->event().oops_do(f, cf);
980
}
981
}
982
983
void JvmtiDeferredEventQueue::nmethods_do(CodeBlobClosure* cf) {
984
for(QueueNode* node = _queue_head; node != NULL; node = node->next()) {
985
node->event().nmethods_do(cf);
986
}
987
}
988
989