Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/code/nmethod.hpp
32285 views
1
/*
2
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_VM_CODE_NMETHOD_HPP
26
#define SHARE_VM_CODE_NMETHOD_HPP
27
28
#include "code/codeBlob.hpp"
29
#include "code/pcDesc.hpp"
30
#include "oops/metadata.hpp"
31
32
// This class is used internally by nmethods, to cache
33
// exception/pc/handler information.
34
35
class ExceptionCache : public CHeapObj<mtCode> {
36
friend class VMStructs;
37
private:
38
enum { cache_size = 16 };
39
Klass* _exception_type;
40
address _pc[cache_size];
41
address _handler[cache_size];
42
volatile int _count;
43
ExceptionCache* _next;
44
45
address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; }
46
void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
47
address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; }
48
void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
49
int count() { return OrderAccess::load_acquire(&_count); }
50
// increment_count is only called under lock, but there may be concurrent readers.
51
void increment_count() { OrderAccess::release_store(&_count, _count + 1); }
52
53
public:
54
55
ExceptionCache(Handle exception, address pc, address handler);
56
57
Klass* exception_type() { return _exception_type; }
58
ExceptionCache* next() { return _next; }
59
void set_next(ExceptionCache *ec) { _next = ec; }
60
61
address match(Handle exception, address pc);
62
bool match_exception_with_space(Handle exception) ;
63
address test_address(address addr);
64
bool add_address_and_handler(address addr, address handler) ;
65
};
66
67
68
// cache pc descs found in earlier inquiries
69
class PcDescCache VALUE_OBJ_CLASS_SPEC {
70
friend class VMStructs;
71
private:
72
enum { cache_size = 4 };
73
// The array elements MUST be volatile! Several threads may modify
74
// and read from the cache concurrently. find_pc_desc_internal has
75
// returned wrong results. C++ compiler (namely xlC12) may duplicate
76
// C++ field accesses if the elements are not volatile.
77
typedef PcDesc* PcDescPtr;
78
volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
79
public:
80
PcDescCache() { debug_only(_pc_descs[0] = NULL); }
81
void reset_to(PcDesc* initial_pc_desc);
82
PcDesc* find_pc_desc(int pc_offset, bool approximate);
83
void add_pc_desc(PcDesc* pc_desc);
84
PcDesc* last_pc_desc() { return _pc_descs[0]; }
85
};
86
87
88
// nmethods (native methods) are the compiled code versions of Java methods.
89
//
90
// An nmethod contains:
91
// - header (the nmethod structure)
92
// [Relocation]
93
// - relocation information
94
// - constant part (doubles, longs and floats used in nmethod)
95
// - oop table
96
// [Code]
97
// - code body
98
// - exception handler
99
// - stub code
100
// [Debugging information]
101
// - oop array
102
// - data array
103
// - pcs
104
// [Exception handler table]
105
// - handler entry point array
106
// [Implicit Null Pointer exception table]
107
// - implicit null table array
108
109
class Dependencies;
110
class ExceptionHandlerTable;
111
class ImplicitExceptionTable;
112
class AbstractCompiler;
113
class xmlStream;
114
115
class nmethod : public CodeBlob {
116
friend class VMStructs;
117
friend class NMethodSweeper;
118
friend class CodeCache; // scavengable oops
119
private:
120
121
// GC support to help figure out if an nmethod has been
122
// cleaned/unloaded by the current GC.
123
static unsigned char _global_unloading_clock;
124
125
// Shared fields for all nmethod's
126
Method* _method;
127
int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
128
jmethodID _jmethod_id; // Cache of method()->jmethod_id()
129
130
// To support simple linked-list chaining of nmethods:
131
nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
132
133
union {
134
// Used by G1 to chain nmethods.
135
nmethod* _unloading_next;
136
// Used by non-G1 GCs to chain nmethods.
137
nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
138
};
139
140
static nmethod* volatile _oops_do_mark_nmethods;
141
nmethod* volatile _oops_do_mark_link;
142
143
AbstractCompiler* _compiler; // The compiler which compiled this nmethod
144
145
// offsets for entry points
146
address _entry_point; // entry point with class check
147
address _verified_entry_point; // entry point without class check
148
address _osr_entry_point; // entry point for on stack replacement
149
150
// Offsets for different nmethod parts
151
int _exception_offset;
152
// All deoptee's will resume execution at this location described by
153
// this offset.
154
int _deoptimize_offset;
155
// All deoptee's at a MethodHandle call site will resume execution
156
// at this location described by this offset.
157
int _deoptimize_mh_offset;
158
// Offset of the unwind handler if it exists
159
int _unwind_handler_offset;
160
161
#ifdef HAVE_DTRACE_H
162
int _trap_offset;
163
#endif // def HAVE_DTRACE_H
164
int _consts_offset;
165
int _stub_offset;
166
int _oops_offset; // offset to where embedded oop table begins (inside data)
167
int _metadata_offset; // embedded meta data table
168
int _scopes_data_offset;
169
int _scopes_pcs_offset;
170
int _dependencies_offset;
171
int _handler_table_offset;
172
int _nul_chk_table_offset;
173
int _nmethod_end_offset;
174
175
// location in frame (offset for sp) that deopt can store the original
176
// pc during a deopt.
177
int _orig_pc_offset;
178
179
int _compile_id; // which compilation made this nmethod
180
int _comp_level; // compilation level
181
182
// protected by CodeCache_lock
183
bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
184
185
bool _marked_for_reclamation; // Used by NMethodSweeper (set only by sweeper)
186
bool _marked_for_deoptimization; // Used for stack deoptimization
187
188
// used by jvmti to track if an unload event has been posted for this nmethod.
189
bool _unload_reported;
190
191
// set during construction
192
unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
193
unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
194
unsigned int _lazy_critical_native:1; // Lazy JNI critical native
195
unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints
196
197
// Protected by Patching_lock
198
volatile unsigned char _state; // {alive, not_entrant, zombie, unloaded}
199
200
volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod
201
202
#ifdef ASSERT
203
bool _oops_are_stale; // indicates that it's no longer safe to access oops section
204
#endif
205
206
enum { in_use = 0, // executable nmethod
207
not_entrant = 1, // marked for deoptimization but activations may still exist,
208
// will be transformed to zombie when all activations are gone
209
zombie = 2, // no activations exist, nmethod is ready for purge
210
unloaded = 3 }; // there should be no activations, should not be called,
211
// will be transformed to zombie immediately
212
213
jbyte _scavenge_root_state;
214
215
#if INCLUDE_RTM_OPT
216
// RTM state at compile time. Used during deoptimization to decide
217
// whether to restart collecting RTM locking abort statistic again.
218
RTMState _rtm_state;
219
#endif
220
221
// Nmethod Flushing lock. If non-zero, then the nmethod is not removed
222
// and is not made into a zombie. However, once the nmethod is made into
223
// a zombie, it will be locked one final time if CompiledMethodUnload
224
// event processing needs to be done.
225
jint _lock_count;
226
227
// not_entrant method removal. Each mark_sweep pass will update
228
// this mark to current sweep invocation count if it is seen on the
229
// stack. An not_entrant method can be removed when there are no
230
// more activations, i.e., when the _stack_traversal_mark is less than
231
// current sweep traversal index.
232
long _stack_traversal_mark;
233
234
// The _hotness_counter indicates the hotness of a method. The higher
235
// the value the hotter the method. The hotness counter of a nmethod is
236
// set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method
237
// is active while stack scanning (mark_active_nmethods()). The hotness
238
// counter is decreased (by 1) while sweeping.
239
int _hotness_counter;
240
241
ExceptionCache * volatile _exception_cache;
242
PcDescCache _pc_desc_cache;
243
244
// These are used for compiled synchronized native methods to
245
// locate the owner and stack slot for the BasicLock so that we can
246
// properly revoke the bias of the owner if necessary. They are
247
// needed because there is no debug information for compiled native
248
// wrappers and the oop maps are insufficient to allow
249
// frame::retrieve_receiver() to work. Currently they are expected
250
// to be byte offsets from the Java stack pointer for maximum code
251
// sharing between platforms. Note that currently biased locking
252
// will never cause Class instances to be biased but this code
253
// handles the static synchronized case as well.
254
// JVMTI's GetLocalInstance() also uses these offsets to find the receiver
255
// for non-static native wrapper frames.
256
ByteSize _native_receiver_sp_offset;
257
ByteSize _native_basic_lock_sp_offset;
258
259
friend class nmethodLocker;
260
261
// For native wrappers
262
nmethod(Method* method,
263
int nmethod_size,
264
int compile_id,
265
CodeOffsets* offsets,
266
CodeBuffer *code_buffer,
267
int frame_size,
268
ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
269
ByteSize basic_lock_sp_offset, /* synchronized natives only */
270
OopMapSet* oop_maps);
271
272
#ifdef HAVE_DTRACE_H
273
// For native wrappers
274
nmethod(Method* method,
275
int nmethod_size,
276
CodeOffsets* offsets,
277
CodeBuffer *code_buffer,
278
int frame_size);
279
#endif // def HAVE_DTRACE_H
280
281
// Creation support
282
nmethod(Method* method,
283
int nmethod_size,
284
int compile_id,
285
int entry_bci,
286
CodeOffsets* offsets,
287
int orig_pc_offset,
288
DebugInformationRecorder *recorder,
289
Dependencies* dependencies,
290
CodeBuffer *code_buffer,
291
int frame_size,
292
OopMapSet* oop_maps,
293
ExceptionHandlerTable* handler_table,
294
ImplicitExceptionTable* nul_chk_table,
295
AbstractCompiler* compiler,
296
int comp_level);
297
298
// helper methods
299
void* operator new(size_t size, int nmethod_size) throw();
300
301
const char* reloc_string_for(u_char* begin, u_char* end);
302
// Returns true if this thread changed the state of the nmethod or
303
// false if another thread performed the transition.
304
bool make_not_entrant_or_zombie(unsigned int state);
305
void inc_decompile_count();
306
307
// Used to manipulate the exception cache
308
void add_exception_cache_entry(ExceptionCache* new_entry);
309
ExceptionCache* exception_cache_entry_for_exception(Handle exception);
310
311
// Inform external interfaces that a compiled method has been unloaded
312
void post_compiled_method_unload();
313
314
// Initailize fields to their default values
315
void init_defaults();
316
317
public:
318
// create nmethod with entry_bci
319
static nmethod* new_nmethod(methodHandle method,
320
int compile_id,
321
int entry_bci,
322
CodeOffsets* offsets,
323
int orig_pc_offset,
324
DebugInformationRecorder* recorder,
325
Dependencies* dependencies,
326
CodeBuffer *code_buffer,
327
int frame_size,
328
OopMapSet* oop_maps,
329
ExceptionHandlerTable* handler_table,
330
ImplicitExceptionTable* nul_chk_table,
331
AbstractCompiler* compiler,
332
int comp_level);
333
334
static nmethod* new_native_nmethod(methodHandle method,
335
int compile_id,
336
CodeBuffer *code_buffer,
337
int vep_offset,
338
int frame_complete,
339
int frame_size,
340
ByteSize receiver_sp_offset,
341
ByteSize basic_lock_sp_offset,
342
OopMapSet* oop_maps);
343
344
#ifdef HAVE_DTRACE_H
345
// The method we generate for a dtrace probe has to look
346
// like an nmethod as far as the rest of the system is concerned
347
// which is somewhat unfortunate.
348
static nmethod* new_dtrace_nmethod(methodHandle method,
349
CodeBuffer *code_buffer,
350
int vep_offset,
351
int trap_offset,
352
int frame_complete,
353
int frame_size);
354
355
int trap_offset() const { return _trap_offset; }
356
address trap_address() const { return insts_begin() + _trap_offset; }
357
358
#endif // def HAVE_DTRACE_H
359
360
// accessors
361
Method* method() const { return _method; }
362
AbstractCompiler* compiler() const { return _compiler; }
363
364
// type info
365
bool is_nmethod() const { return true; }
366
bool is_java_method() const { return !method()->is_native(); }
367
bool is_native_method() const { return method()->is_native(); }
368
bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
369
370
bool is_compiled_by_c1() const;
371
bool is_compiled_by_c2() const;
372
bool is_compiled_by_shark() const;
373
374
// boundaries for different parts
375
address consts_begin () const { return header_begin() + _consts_offset ; }
376
address consts_end () const { return header_begin() + code_offset() ; }
377
address insts_begin () const { return header_begin() + code_offset() ; }
378
address insts_end () const { return header_begin() + _stub_offset ; }
379
address stub_begin () const { return header_begin() + _stub_offset ; }
380
address stub_end () const { return header_begin() + _oops_offset ; }
381
address exception_begin () const { return header_begin() + _exception_offset ; }
382
address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; }
383
address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; }
384
address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
385
oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; }
386
oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; }
387
388
Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; }
389
Metadata** metadata_end () const { return (Metadata**) (header_begin() + _scopes_data_offset) ; }
390
391
address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
392
address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
393
PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
394
PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
395
address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
396
address dependencies_end () const { return header_begin() + _handler_table_offset ; }
397
address handler_table_begin () const { return header_begin() + _handler_table_offset ; }
398
address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
399
address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
400
address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
401
402
// Sizes
403
int consts_size () const { return consts_end () - consts_begin (); }
404
int insts_size () const { return insts_end () - insts_begin (); }
405
int stub_size () const { return stub_end () - stub_begin (); }
406
int oops_size () const { return (address) oops_end () - (address) oops_begin (); }
407
int metadata_size () const { return (address) metadata_end () - (address) metadata_begin (); }
408
int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
409
int scopes_pcs_size () const { return (intptr_t) scopes_pcs_end () - (intptr_t) scopes_pcs_begin (); }
410
int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
411
int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
412
int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
413
414
int total_size () const;
415
416
void dec_hotness_counter() { _hotness_counter--; }
417
void set_hotness_counter(int val) { _hotness_counter = val; }
418
int hotness_counter() const { return _hotness_counter; }
419
420
// Containment
421
bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
422
bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
423
bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
424
bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
425
bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
426
bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
427
bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
428
bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
429
bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
430
431
// entry points
432
address entry_point() const { return _entry_point; } // normal entry point
433
address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
434
435
// flag accessing and manipulation
436
bool is_in_use() const { return _state == in_use; }
437
bool is_alive() const { unsigned char s = _state; return s == in_use || s == not_entrant; }
438
bool is_not_entrant() const { return _state == not_entrant; }
439
bool is_zombie() const { return _state == zombie; }
440
bool is_unloaded() const { return _state == unloaded; }
441
442
#if INCLUDE_RTM_OPT
443
// rtm state accessing and manipulating
444
RTMState rtm_state() const { return _rtm_state; }
445
void set_rtm_state(RTMState state) { _rtm_state = state; }
446
#endif
447
448
// Make the nmethod non entrant. The nmethod will continue to be
449
// alive. It is used when an uncommon trap happens. Returns true
450
// if this thread changed the state of the nmethod or false if
451
// another thread performed the transition.
452
bool make_not_entrant() {
453
assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant");
454
return make_not_entrant_or_zombie(not_entrant);
455
}
456
bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
457
458
// used by jvmti to track if the unload event has been reported
459
bool unload_reported() { return _unload_reported; }
460
void set_unload_reported() { _unload_reported = true; }
461
462
void set_unloading_next(nmethod* next) { _unloading_next = next; }
463
nmethod* unloading_next() { return _unloading_next; }
464
465
static unsigned char global_unloading_clock() { return _global_unloading_clock; }
466
static void increase_unloading_clock();
467
468
void set_unloading_clock(unsigned char unloading_clock);
469
unsigned char unloading_clock();
470
471
bool is_marked_for_deoptimization() const { return _marked_for_deoptimization; }
472
void mark_for_deoptimization() { _marked_for_deoptimization = true; }
473
474
void make_unloaded(BoolObjectClosure* is_alive, oop cause);
475
476
bool has_dependencies() { return dependencies_size() != 0; }
477
void flush_dependencies(BoolObjectClosure* is_alive);
478
bool has_flushed_dependencies() { return _has_flushed_dependencies; }
479
void set_has_flushed_dependencies() {
480
assert(!has_flushed_dependencies(), "should only happen once");
481
_has_flushed_dependencies = 1;
482
}
483
484
bool is_marked_for_reclamation() const { return _marked_for_reclamation; }
485
void mark_for_reclamation() { _marked_for_reclamation = 1; }
486
487
bool has_unsafe_access() const { return _has_unsafe_access; }
488
void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
489
490
bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
491
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
492
493
bool is_lazy_critical_native() const { return _lazy_critical_native; }
494
void set_lazy_critical_native(bool z) { _lazy_critical_native = z; }
495
496
bool has_wide_vectors() const { return _has_wide_vectors; }
497
void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
498
499
int comp_level() const { return _comp_level; }
500
501
// Support for oops in scopes and relocs:
502
// Note: index 0 is reserved for null.
503
oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
504
oop* oop_addr_at(int index) const { // for GC
505
// relocation indexes are biased by 1 (because 0 is reserved)
506
assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
507
assert(!_oops_are_stale, "oops are stale");
508
return &oops_begin()[index - 1];
509
}
510
511
// Support for meta data in scopes and relocs:
512
// Note: index 0 is reserved for null.
513
Metadata* metadata_at(int index) const { return index == 0 ? NULL: *metadata_addr_at(index); }
514
Metadata** metadata_addr_at(int index) const { // for GC
515
// relocation indexes are biased by 1 (because 0 is reserved)
516
assert(index > 0 && index <= metadata_size(), "must be a valid non-zero index");
517
return &metadata_begin()[index - 1];
518
}
519
520
void copy_values(GrowableArray<jobject>* oops);
521
void copy_values(GrowableArray<Metadata*>* metadata);
522
523
// Relocation support
524
private:
525
void fix_oop_relocations(address begin, address end, bool initialize_immediates);
526
inline void initialize_immediate_oop(oop* dest, jobject handle);
527
528
public:
529
void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
530
void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); }
531
void verify_oop_relocations();
532
533
bool is_at_poll_return(address pc);
534
bool is_at_poll_or_poll_return(address pc);
535
536
// Scavengable oop support
537
bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
538
protected:
539
enum { sl_on_list = 0x01, sl_marked = 0x10 };
540
void set_on_scavenge_root_list() { _scavenge_root_state = sl_on_list; }
541
void clear_on_scavenge_root_list() { _scavenge_root_state = 0; }
542
// assertion-checking and pruning logic uses the bits of _scavenge_root_state
543
#ifndef PRODUCT
544
void set_scavenge_root_marked() { _scavenge_root_state |= sl_marked; }
545
void clear_scavenge_root_marked() { _scavenge_root_state &= ~sl_marked; }
546
bool scavenge_root_not_marked() { return (_scavenge_root_state &~ sl_on_list) == 0; }
547
// N.B. there is no positive marked query, and we only use the not_marked query for asserts.
548
#endif //PRODUCT
549
nmethod* scavenge_root_link() const { return _scavenge_root_link; }
550
void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
551
552
public:
553
554
// Sweeper support
555
long stack_traversal_mark() { return _stack_traversal_mark; }
556
void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; }
557
558
// Exception cache support
559
// Note: _exception_cache may be read concurrently. We rely on memory_order_consume here.
560
ExceptionCache* exception_cache() const { return _exception_cache; }
561
void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
562
void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store_ptr(&_exception_cache, ec); }
563
address handler_for_exception_and_pc(Handle exception, address pc);
564
void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
565
void clean_exception_cache(BoolObjectClosure* is_alive);
566
567
// implicit exceptions support
568
address continuation_for_implicit_exception(address pc);
569
570
// On-stack replacement support
571
int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
572
address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
573
void invalidate_osr_method();
574
nmethod* osr_link() const { return _osr_link; }
575
void set_osr_link(nmethod *n) { _osr_link = n; }
576
577
// tells whether frames described by this nmethod can be deoptimized
578
// note: native wrappers cannot be deoptimized.
579
bool can_be_deoptimized() const { return is_java_method(); }
580
581
// Inline cache support
582
void clear_inline_caches();
583
void clear_ic_stubs();
584
void cleanup_inline_caches();
585
bool inlinecache_check_contains(address addr) const {
586
return (addr >= code_begin() && addr < verified_entry_point());
587
}
588
589
// Verify calls to dead methods have been cleaned.
590
void verify_clean_inline_caches();
591
// Verify and count cached icholder relocations.
592
int verify_icholder_relocations();
593
// Check that all metadata is still alive
594
void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
595
596
// unlink and deallocate this nmethod
597
// Only NMethodSweeper class is expected to use this. NMethodSweeper is not
598
// expected to use any other private methods/data in this class.
599
600
protected:
601
void flush();
602
603
public:
604
// When true is returned, it is unsafe to remove this nmethod even if
605
// it is a zombie, since the VM or the ServiceThread might still be
606
// using it.
607
bool is_locked_by_vm() const { return _lock_count >0; }
608
609
// See comment at definition of _last_seen_on_stack
610
void mark_as_seen_on_stack();
611
bool can_convert_to_zombie();
612
613
// Evolution support. We make old (discarded) compiled methods point to new Method*s.
614
void set_method(Method* method) { _method = method; }
615
616
// GC support
617
void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
618
// The parallel versions are used by G1.
619
bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
620
void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
621
622
private:
623
// Unload a nmethod if the *root object is dead.
624
bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
625
bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred);
626
627
void mark_metadata_on_stack_at(RelocIterator* iter_at_metadata);
628
void mark_metadata_on_stack_non_relocs();
629
630
public:
631
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
632
OopClosure* f);
633
void oops_do(OopClosure* f) { oops_do(f, false); }
634
void oops_do(OopClosure* f, bool allow_zombie);
635
bool detect_scavenge_root_oops();
636
void verify_scavenge_root_oops() PRODUCT_RETURN;
637
638
bool test_set_oops_do_mark();
639
static void oops_do_marking_prologue();
640
static void oops_do_marking_epilogue();
641
static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
642
bool test_oops_do_mark() { return _oops_do_mark_link != NULL; }
643
644
// ScopeDesc for an instruction
645
ScopeDesc* scope_desc_at(address pc);
646
647
private:
648
ScopeDesc* scope_desc_in(address begin, address end);
649
650
address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
651
652
PcDesc* find_pc_desc_internal(address pc, bool approximate);
653
654
PcDesc* find_pc_desc(address pc, bool approximate) {
655
PcDesc* desc = _pc_desc_cache.last_pc_desc();
656
if (desc != NULL && desc->pc_offset() == pc - code_begin()) {
657
return desc;
658
}
659
return find_pc_desc_internal(pc, approximate);
660
}
661
662
public:
663
// ScopeDesc retrieval operation
664
PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
665
// pc_desc_near returns the first PcDesc at or after the givne pc.
666
PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
667
668
public:
669
// copying of debugging information
670
void copy_scopes_pcs(PcDesc* pcs, int count);
671
void copy_scopes_data(address buffer, int size);
672
673
// Deopt
674
// Return true is the PC is one would expect if the frame is being deopted.
675
bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
676
bool is_deopt_entry (address pc) { return pc == deopt_handler_begin(); }
677
bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
678
// Accessor/mutator for the original pc of a frame before a frame was deopted.
679
address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
680
void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
681
682
static address get_deopt_original_pc(const frame* fr);
683
684
// MethodHandle
685
bool is_method_handle_return(address return_pc);
686
687
// jvmti support:
688
void post_compiled_method_load_event();
689
jmethodID get_and_cache_jmethod_id();
690
691
// verify operations
692
void verify();
693
void verify_scopes();
694
void verify_interrupt_point(address interrupt_point);
695
696
// printing support
697
void print() const;
698
void print_relocations() PRODUCT_RETURN;
699
void print_pcs() PRODUCT_RETURN;
700
void print_scopes() PRODUCT_RETURN;
701
void print_dependencies() PRODUCT_RETURN;
702
void print_value_on(outputStream* st) const PRODUCT_RETURN;
703
void print_calls(outputStream* st) PRODUCT_RETURN;
704
void print_handler_table() PRODUCT_RETURN;
705
void print_nul_chk_table() PRODUCT_RETURN;
706
void print_nmethod(bool print_code);
707
708
// need to re-define this from CodeBlob else the overload hides it
709
virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); }
710
void print_on(outputStream* st, const char* msg) const;
711
712
// Logging
713
void log_identity(xmlStream* log) const;
714
void log_new_nmethod() const;
715
void log_state_change() const;
716
717
// Prints block-level comments, including nmethod specific block labels:
718
virtual void print_block_comment(outputStream* stream, address block_begin) const {
719
print_nmethod_labels(stream, block_begin);
720
CodeBlob::print_block_comment(stream, block_begin);
721
}
722
void print_nmethod_labels(outputStream* stream, address block_begin) const;
723
724
// Prints a comment for one native instruction (reloc info, pc desc)
725
void print_code_comment_on(outputStream* st, int column, address begin, address end);
726
static void print_statistics() PRODUCT_RETURN;
727
728
// Compiler task identification. Note that all OSR methods
729
// are numbered in an independent sequence if CICountOSR is true,
730
// and native method wrappers are also numbered independently if
731
// CICountNative is true.
732
int compile_id() const { return _compile_id; }
733
const char* compile_kind() const;
734
735
// For debugging
736
// CompiledIC* IC_at(char* p) const;
737
// PrimitiveIC* primitiveIC_at(char* p) const;
738
oop embeddedOop_at(address p);
739
740
// tells if any of this method's dependencies have been invalidated
741
// (this is expensive!)
742
bool check_all_dependencies();
743
744
// tells if this compiled method is dependent on the given changes,
745
// and the changes have invalidated it
746
bool check_dependency_on(DepChange& changes);
747
748
// Evolution support. Tells if this compiled method is dependent on any of
749
// methods m() of class dependee, such that if m() in dependee is replaced,
750
// this compiled method will have to be deoptimized.
751
bool is_evol_dependent_on(Klass* dependee);
752
753
// Fast breakpoint support. Tells if this compiled method is
754
// dependent on the given method. Returns true if this nmethod
755
// corresponds to the given method as well.
756
bool is_dependent_on_method(Method* dependee);
757
758
// is it ok to patch at address?
759
bool is_patchable_at(address instr_address);
760
761
// UseBiasedLocking support
762
ByteSize native_receiver_sp_offset() {
763
return _native_receiver_sp_offset;
764
}
765
ByteSize native_basic_lock_sp_offset() {
766
return _native_basic_lock_sp_offset;
767
}
768
769
// support for code generation
770
static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); }
771
static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); }
772
static int entry_bci_offset() { return offset_of(nmethod, _entry_bci); }
773
774
// RedefineClasses support. Mark metadata in nmethods as on_stack so that
775
// redefine classes doesn't purge it.
776
static void mark_on_stack(nmethod* nm) {
777
nm->metadata_do(Metadata::mark_on_stack);
778
}
779
void metadata_do(void f(Metadata*));
780
};
781
782
// Locks an nmethod so its code will not get removed and it will not
783
// be made into a zombie, even if it is a not_entrant method. After the
784
// nmethod becomes a zombie, if CompiledMethodUnload event processing
785
// needs to be done, then lock_nmethod() is used directly to keep the
786
// generated code from being reused too early.
787
class nmethodLocker : public StackObj {
788
nmethod* _nm;
789
790
public:
791
792
// note: nm can be NULL
793
// Only JvmtiDeferredEvent::compiled_method_unload_event()
794
// should pass zombie_ok == true.
795
static void lock_nmethod(nmethod* nm, bool zombie_ok = false);
796
static void unlock_nmethod(nmethod* nm); // (ditto)
797
798
nmethodLocker(address pc); // derive nm from pc
799
nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
800
nmethodLocker() { _nm = NULL; }
801
~nmethodLocker() { unlock_nmethod(_nm); }
802
803
nmethod* code() { return _nm; }
804
void set_code(nmethod* new_nm) {
805
unlock_nmethod(_nm); // note: This works even if _nm==new_nm.
806
_nm = new_nm;
807
lock_nmethod(_nm);
808
}
809
};
810
811
#endif // SHARE_VM_CODE_NMETHOD_HPP
812
813