Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/oops/method.hpp
32285 views
1
/*
2
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_VM_OOPS_METHODOOP_HPP
26
#define SHARE_VM_OOPS_METHODOOP_HPP
27
28
#include "classfile/vmSymbols.hpp"
29
#include "code/compressedStream.hpp"
30
#include "compiler/oopMap.hpp"
31
#include "interpreter/invocationCounter.hpp"
32
#include "oops/annotations.hpp"
33
#include "oops/constantPool.hpp"
34
#include "oops/methodCounters.hpp"
35
#include "oops/instanceKlass.hpp"
36
#include "oops/oop.hpp"
37
#include "oops/typeArrayOop.hpp"
38
#include "utilities/accessFlags.hpp"
39
#include "utilities/growableArray.hpp"
40
#include "utilities/macros.hpp"
41
#if INCLUDE_JFR
42
#include "jfr/support/jfrTraceIdExtension.hpp"
43
#endif
44
45
// A Method* represents a Java method.
46
//
47
// Memory layout (each line represents a word). Note that most applications load thousands of methods,
48
// so keeping the size of this structure small has a big impact on footprint.
49
//
50
// We put all oops and method_size first for better gc cache locality.
51
//
52
// The actual bytecodes are inlined after the end of the Method struct.
53
//
54
// There are bits in the access_flags telling whether inlined tables are present.
55
// Note that accessing the line number and local variable tables is not performance critical at all.
56
// Accessing the checked exceptions table is used by reflection, so we put that last to make access
57
// to it fast.
58
//
59
// The line number table is compressed and inlined following the byte codes. It is found as the first
60
// byte following the byte codes. The checked exceptions table and the local variable table are inlined
61
// after the line number table, and indexed from the end of the method. We do not compress the checked
62
// exceptions table since the average length is less than 2, and do not bother to compress the local
63
// variable table either since it is mostly absent.
64
//
65
// Note that native_function and signature_handler has to be at fixed offsets (required by the interpreter)
66
//
67
// |------------------------------------------------------|
68
// | header |
69
// | klass |
70
// |------------------------------------------------------|
71
// | ConstMethod* (oop) |
72
// |------------------------------------------------------|
73
// | methodData (oop) |
74
// | methodCounters |
75
// |------------------------------------------------------|
76
// | access_flags |
77
// | vtable_index |
78
// |------------------------------------------------------|
79
// | result_index (C++ interpreter only) |
80
// |------------------------------------------------------|
81
// | method_size | intrinsic_id| flags |
82
// |------------------------------------------------------|
83
// | code (pointer) |
84
// | i2i (pointer) |
85
// | adapter (pointer) |
86
// | from_compiled_entry (pointer) |
87
// | from_interpreted_entry (pointer) |
88
// |------------------------------------------------------|
89
// | native_function (present only if native) |
90
// | signature_handler (present only if native) |
91
// |------------------------------------------------------|
92
93
94
class CheckedExceptionElement;
95
class LocalVariableTableElement;
96
class AdapterHandlerEntry;
97
class MethodData;
98
class MethodCounters;
99
class ConstMethod;
100
class InlineTableSizes;
101
class KlassSizeStats;
102
103
class Method : public Metadata {
104
friend class VMStructs;
105
private:
106
ConstMethod* _constMethod; // Method read-only data.
107
MethodData* _method_data;
108
MethodCounters* _method_counters;
109
AccessFlags _access_flags; // Access flags
110
int _vtable_index; // vtable index of this method (see VtableIndexFlag)
111
// note: can have vtables with >2**16 elements (because of inheritance)
112
u2 _method_size; // size of this object
113
u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
114
u1 _jfr_towrite : 1, // Flags
115
_caller_sensitive : 1,
116
_force_inline : 1,
117
_hidden : 1,
118
_running_emcp : 1,
119
_dont_inline : 1,
120
_has_injected_profile : 1,
121
: 2;
122
123
JFR_ONLY(DEFINE_TRACE_FLAG;)
124
125
#ifndef PRODUCT
126
int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging)
127
#endif
128
// Entry point for calling both from and to the interpreter.
129
address _i2i_entry; // All-args-on-stack calling convention
130
// Adapter blob (i2c/c2i) for this Method*. Set once when method is linked.
131
AdapterHandlerEntry* _adapter;
132
// Entry point for calling from compiled code, to compiled code if it exists
133
// or else the interpreter.
134
volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry()
135
// The entry point for calling both from and to compiled code is
136
// "_code->entry_point()". Because of tiered compilation and de-opt, this
137
// field can come and go. It can transition from NULL to not-null at any
138
// time (whenever a compile completes). It can transition from not-null to
139
// NULL only at safepoints (because of a de-opt).
140
nmethod* volatile _code; // Points to the corresponding piece of native code
141
volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry
142
143
// Constructor
144
Method(ConstMethod* xconst, AccessFlags access_flags, int size);
145
public:
146
147
static Method* allocate(ClassLoaderData* loader_data,
148
int byte_code_size,
149
AccessFlags access_flags,
150
InlineTableSizes* sizes,
151
ConstMethod::MethodType method_type,
152
TRAPS);
153
154
// CDS and vtbl checking can create an empty Method to get vtbl pointer.
155
Method(){}
156
157
// The Method vtable is restored by this call when the Method is in the
158
// shared archive. See patch_klass_vtables() in metaspaceShared.cpp for
159
// all the gory details. SA, dtrace and pstack helpers distinguish metadata
160
// by their vtable.
161
void restore_vtable() { guarantee(is_method(), "vtable restored by this call"); }
162
bool is_method() const volatile { return true; }
163
164
void restore_unshareable_info(TRAPS);
165
166
// accessors for instance variables
167
168
ConstMethod* constMethod() const { return _constMethod; }
169
void set_constMethod(ConstMethod* xconst) { _constMethod = xconst; }
170
171
172
static address make_adapters(methodHandle mh, TRAPS);
173
volatile address from_compiled_entry() const { return (address)OrderAccess::load_ptr_acquire(&_from_compiled_entry); }
174
volatile address from_interpreted_entry() const{ return (address)OrderAccess::load_ptr_acquire(&_from_interpreted_entry); }
175
176
// access flag
177
AccessFlags access_flags() const { return _access_flags; }
178
void set_access_flags(AccessFlags flags) { _access_flags = flags; }
179
180
// name
181
Symbol* name() const { return constants()->symbol_at(name_index()); }
182
int name_index() const { return constMethod()->name_index(); }
183
void set_name_index(int index) { constMethod()->set_name_index(index); }
184
185
// signature
186
Symbol* signature() const { return constants()->symbol_at(signature_index()); }
187
int signature_index() const { return constMethod()->signature_index(); }
188
void set_signature_index(int index) { constMethod()->set_signature_index(index); }
189
190
// generics support
191
Symbol* generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? constants()->symbol_at(idx) : (Symbol*)NULL); }
192
int generic_signature_index() const { return constMethod()->generic_signature_index(); }
193
void set_generic_signature_index(int index) { constMethod()->set_generic_signature_index(index); }
194
195
// annotations support
196
AnnotationArray* annotations() const {
197
return constMethod()->method_annotations();
198
}
199
AnnotationArray* parameter_annotations() const {
200
return constMethod()->parameter_annotations();
201
}
202
AnnotationArray* annotation_default() const {
203
return constMethod()->default_annotations();
204
}
205
AnnotationArray* type_annotations() const {
206
return constMethod()->type_annotations();
207
}
208
209
// Helper routine: get klass name + "." + method name + signature as
210
// C string, for the purpose of providing more useful NoSuchMethodErrors
211
// and fatal error handling. The string is allocated in resource
212
// area if a buffer is not provided by the caller.
213
char* name_and_sig_as_C_string() const;
214
char* name_and_sig_as_C_string(char* buf, int size) const;
215
216
// Static routine in the situations we don't have a Method*
217
static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature);
218
static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature, char* buf, int size);
219
220
Bytecodes::Code java_code_at(int bci) const {
221
return Bytecodes::java_code_at(this, bcp_from(bci));
222
}
223
Bytecodes::Code code_at(int bci) const {
224
return Bytecodes::code_at(this, bcp_from(bci));
225
}
226
227
// JVMTI breakpoints
228
Bytecodes::Code orig_bytecode_at(int bci) const;
229
void set_orig_bytecode_at(int bci, Bytecodes::Code code);
230
void set_breakpoint(int bci);
231
void clear_breakpoint(int bci);
232
void clear_all_breakpoints();
233
// Tracking number of breakpoints, for fullspeed debugging.
234
// Only mutated by VM thread.
235
u2 number_of_breakpoints() const {
236
MethodCounters* mcs = method_counters();
237
if (mcs == NULL) {
238
return 0;
239
} else {
240
return mcs->number_of_breakpoints();
241
}
242
}
243
void incr_number_of_breakpoints(TRAPS) {
244
MethodCounters* mcs = get_method_counters(CHECK);
245
if (mcs != NULL) {
246
mcs->incr_number_of_breakpoints();
247
}
248
}
249
void decr_number_of_breakpoints(TRAPS) {
250
MethodCounters* mcs = get_method_counters(CHECK);
251
if (mcs != NULL) {
252
mcs->decr_number_of_breakpoints();
253
}
254
}
255
// Initialization only
256
void clear_number_of_breakpoints() {
257
MethodCounters* mcs = method_counters();
258
if (mcs != NULL) {
259
mcs->clear_number_of_breakpoints();
260
}
261
}
262
263
// index into InstanceKlass methods() array
264
// note: also used by jfr
265
u2 method_idnum() const { return constMethod()->method_idnum(); }
266
void set_method_idnum(u2 idnum) { constMethod()->set_method_idnum(idnum); }
267
268
u2 orig_method_idnum() const { return constMethod()->orig_method_idnum(); }
269
void set_orig_method_idnum(u2 idnum) { constMethod()->set_orig_method_idnum(idnum); }
270
271
// code size
272
int code_size() const { return constMethod()->code_size(); }
273
274
// method size
275
int method_size() const { return _method_size; }
276
void set_method_size(int size) {
277
assert(0 <= size && size < (1 << 16), "invalid method size");
278
_method_size = size;
279
}
280
281
// constant pool for Klass* holding this method
282
ConstantPool* constants() const { return constMethod()->constants(); }
283
void set_constants(ConstantPool* c) { constMethod()->set_constants(c); }
284
285
// max stack
286
// return original max stack size for method verification
287
int verifier_max_stack() const { return constMethod()->max_stack(); }
288
int max_stack() const { return constMethod()->max_stack() + extra_stack_entries(); }
289
void set_max_stack(int size) { constMethod()->set_max_stack(size); }
290
291
// max locals
292
int max_locals() const { return constMethod()->max_locals(); }
293
void set_max_locals(int size) { constMethod()->set_max_locals(size); }
294
295
int highest_comp_level() const;
296
void set_highest_comp_level(int level);
297
int highest_osr_comp_level() const;
298
void set_highest_osr_comp_level(int level);
299
300
// Count of times method was exited via exception while interpreting
301
void interpreter_throwout_increment(TRAPS) {
302
MethodCounters* mcs = get_method_counters(CHECK);
303
if (mcs != NULL) {
304
mcs->interpreter_throwout_increment();
305
}
306
}
307
308
int interpreter_throwout_count() const {
309
MethodCounters* mcs = method_counters();
310
if (mcs == NULL) {
311
return 0;
312
} else {
313
return mcs->interpreter_throwout_count();
314
}
315
}
316
317
// size of parameters
318
int size_of_parameters() const { return constMethod()->size_of_parameters(); }
319
void set_size_of_parameters(int size) { constMethod()->set_size_of_parameters(size); }
320
321
bool has_stackmap_table() const {
322
return constMethod()->has_stackmap_table();
323
}
324
325
Array<u1>* stackmap_data() const {
326
return constMethod()->stackmap_data();
327
}
328
329
void set_stackmap_data(Array<u1>* sd) {
330
constMethod()->set_stackmap_data(sd);
331
}
332
333
// exception handler table
334
bool has_exception_handler() const
335
{ return constMethod()->has_exception_handler(); }
336
int exception_table_length() const
337
{ return constMethod()->exception_table_length(); }
338
ExceptionTableElement* exception_table_start() const
339
{ return constMethod()->exception_table_start(); }
340
341
// Finds the first entry point bci of an exception handler for an
342
// exception of klass ex_klass thrown at throw_bci. A value of NULL
343
// for ex_klass indicates that the exception klass is not known; in
344
// this case it matches any constraint class. Returns -1 if the
345
// exception cannot be handled in this method. The handler
346
// constraint classes are loaded if necessary. Note that this may
347
// throw an exception if loading of the constraint classes causes
348
// an IllegalAccessError (bugid 4307310) or an OutOfMemoryError.
349
// If an exception is thrown, returns the bci of the
350
// exception handler which caused the exception to be thrown, which
351
// is needed for proper retries. See, for example,
352
// InterpreterRuntime::exception_handler_for_exception.
353
static int fast_exception_handler_bci_for(methodHandle mh, KlassHandle ex_klass, int throw_bci, TRAPS);
354
355
// method data access
356
MethodData* method_data() const {
357
return _method_data;
358
}
359
360
void set_method_data(MethodData* data) {
361
// The store into method must be released. On platforms without
362
// total store order (TSO) the reference may become visible before
363
// the initialization of data otherwise.
364
OrderAccess::release_store_ptr((volatile void *)&_method_data, data);
365
}
366
367
MethodCounters* method_counters() const {
368
return _method_counters;
369
}
370
371
void clear_method_counters() {
372
_method_counters = NULL;
373
}
374
375
bool init_method_counters(MethodCounters* counters) {
376
// Try to install a pointer to MethodCounters, return true on success.
377
return Atomic::cmpxchg_ptr(counters, (volatile void*)&_method_counters, NULL) == NULL;
378
}
379
380
#ifdef TIERED
381
// We are reusing interpreter_invocation_count as a holder for the previous event count!
382
// We can do that since interpreter_invocation_count is not used in tiered.
383
int prev_event_count() const {
384
if (method_counters() == NULL) {
385
return 0;
386
} else {
387
return method_counters()->interpreter_invocation_count();
388
}
389
}
390
void set_prev_event_count(int count) {
391
MethodCounters* mcs = method_counters();
392
if (mcs != NULL) {
393
mcs->set_interpreter_invocation_count(count);
394
}
395
}
396
jlong prev_time() const {
397
MethodCounters* mcs = method_counters();
398
return mcs == NULL ? 0 : mcs->prev_time();
399
}
400
void set_prev_time(jlong time) {
401
MethodCounters* mcs = method_counters();
402
if (mcs != NULL) {
403
mcs->set_prev_time(time);
404
}
405
}
406
float rate() const {
407
MethodCounters* mcs = method_counters();
408
return mcs == NULL ? 0 : mcs->rate();
409
}
410
void set_rate(float rate) {
411
MethodCounters* mcs = method_counters();
412
if (mcs != NULL) {
413
mcs->set_rate(rate);
414
}
415
}
416
#endif
417
418
int invocation_count();
419
int backedge_count();
420
421
bool was_executed_more_than(int n);
422
bool was_never_executed() { return !was_executed_more_than(0); }
423
424
static void build_interpreter_method_data(methodHandle method, TRAPS);
425
426
static MethodCounters* build_method_counters(Method* m, TRAPS);
427
428
int interpreter_invocation_count() {
429
if (TieredCompilation) {
430
return invocation_count();
431
} else {
432
MethodCounters* mcs = method_counters();
433
return (mcs == NULL) ? 0 : mcs->interpreter_invocation_count();
434
}
435
}
436
int increment_interpreter_invocation_count(TRAPS) {
437
if (TieredCompilation) ShouldNotReachHere();
438
MethodCounters* mcs = get_method_counters(CHECK_0);
439
return (mcs == NULL) ? 0 : mcs->increment_interpreter_invocation_count();
440
}
441
442
#ifndef PRODUCT
443
int compiled_invocation_count() const { return _compiled_invocation_count; }
444
void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; }
445
#endif // not PRODUCT
446
447
// Clear (non-shared space) pointers which could not be relevant
448
// if this (shared) method were mapped into another JVM.
449
void remove_unshareable_info();
450
451
// nmethod/verified compiler entry
452
address verified_code_entry();
453
bool check_code() const; // Not inline to avoid circular ref
454
nmethod* volatile code() const { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); }
455
void clear_code(bool acquire_lock = true); // Clear out any compiled code
456
static void set_code(methodHandle mh, nmethod* code);
457
void set_adapter_entry(AdapterHandlerEntry* adapter) { _adapter = adapter; }
458
address get_i2c_entry();
459
address get_c2i_entry();
460
address get_c2i_unverified_entry();
461
AdapterHandlerEntry* adapter() { return _adapter; }
462
// setup entry points
463
void link_method(methodHandle method, TRAPS);
464
// clear entry points. Used by sharing code
465
void unlink_method();
466
467
// vtable index
468
enum VtableIndexFlag {
469
// Valid vtable indexes are non-negative (>= 0).
470
// These few negative values are used as sentinels.
471
itable_index_max = -10, // first itable index, growing downward
472
pending_itable_index = -9, // itable index will be assigned
473
invalid_vtable_index = -4, // distinct from any valid vtable index
474
garbage_vtable_index = -3, // not yet linked; no vtable layout yet
475
nonvirtual_vtable_index = -2 // there is no need for vtable dispatch
476
// 6330203 Note: Do not use -1, which was overloaded with many meanings.
477
};
478
DEBUG_ONLY(bool valid_vtable_index() const { return _vtable_index >= nonvirtual_vtable_index; })
479
bool has_vtable_index() const { return _vtable_index >= 0; }
480
int vtable_index() const { return _vtable_index; }
481
void set_vtable_index(int index);
482
DEBUG_ONLY(bool valid_itable_index() const { return _vtable_index <= pending_itable_index; })
483
bool has_itable_index() const { return _vtable_index <= itable_index_max; }
484
int itable_index() const { assert(valid_itable_index(), "");
485
return itable_index_max - _vtable_index; }
486
void set_itable_index(int index);
487
488
// interpreter entry
489
address interpreter_entry() const { return _i2i_entry; }
490
// Only used when first initialize so we can set _i2i_entry and _from_interpreted_entry
491
void set_interpreter_entry(address entry) { _i2i_entry = entry; _from_interpreted_entry = entry; }
492
493
// native function (used for native methods only)
494
enum {
495
native_bind_event_is_interesting = true
496
};
497
address native_function() const { return *(native_function_addr()); }
498
address critical_native_function();
499
500
// Must specify a real function (not NULL).
501
// Use clear_native_function() to unregister.
502
void set_native_function(address function, bool post_event_flag);
503
bool has_native_function() const;
504
void clear_native_function();
505
506
// signature handler (used for native methods only)
507
address signature_handler() const { return *(signature_handler_addr()); }
508
void set_signature_handler(address handler);
509
510
// Interpreter oopmap support
511
void mask_for(int bci, InterpreterOopMap* mask);
512
513
#ifndef PRODUCT
514
// operations on invocation counter
515
void print_invocation_count();
516
#endif
517
518
// byte codes
519
void set_code(address code) { return constMethod()->set_code(code); }
520
address code_base() const { return constMethod()->code_base(); }
521
bool contains(address bcp) const { return constMethod()->contains(bcp); }
522
523
// prints byte codes
524
void print_codes() const { print_codes_on(tty); }
525
void print_codes_on(outputStream* st) const PRODUCT_RETURN;
526
void print_codes_on(int from, int to, outputStream* st) const PRODUCT_RETURN;
527
528
// method parameters
529
bool has_method_parameters() const
530
{ return constMethod()->has_method_parameters(); }
531
int method_parameters_length() const
532
{ return constMethod()->method_parameters_length(); }
533
MethodParametersElement* method_parameters_start() const
534
{ return constMethod()->method_parameters_start(); }
535
536
// checked exceptions
537
int checked_exceptions_length() const
538
{ return constMethod()->checked_exceptions_length(); }
539
CheckedExceptionElement* checked_exceptions_start() const
540
{ return constMethod()->checked_exceptions_start(); }
541
542
// localvariable table
543
bool has_localvariable_table() const
544
{ return constMethod()->has_localvariable_table(); }
545
int localvariable_table_length() const
546
{ return constMethod()->localvariable_table_length(); }
547
LocalVariableTableElement* localvariable_table_start() const
548
{ return constMethod()->localvariable_table_start(); }
549
550
bool has_linenumber_table() const
551
{ return constMethod()->has_linenumber_table(); }
552
u_char* compressed_linenumber_table() const
553
{ return constMethod()->compressed_linenumber_table(); }
554
555
// method holder (the Klass* holding this method)
556
InstanceKlass* method_holder() const { return constants()->pool_holder(); }
557
558
void compute_size_of_parameters(Thread *thread); // word size of parameters (receiver if any + arguments)
559
Symbol* klass_name() const; // returns the name of the method holder
560
BasicType result_type() const; // type of the method result
561
bool is_returning_oop() const { BasicType r = result_type(); return (r == T_OBJECT || r == T_ARRAY); }
562
bool is_returning_fp() const { BasicType r = result_type(); return (r == T_FLOAT || r == T_DOUBLE); }
563
564
// Checked exceptions thrown by this method (resolved to mirrors)
565
objArrayHandle resolved_checked_exceptions(TRAPS) { return resolved_checked_exceptions_impl(this, THREAD); }
566
567
// Access flags
568
bool is_public() const { return access_flags().is_public(); }
569
bool is_private() const { return access_flags().is_private(); }
570
bool is_protected() const { return access_flags().is_protected(); }
571
bool is_package_private() const { return !is_public() && !is_private() && !is_protected(); }
572
bool is_static() const { return access_flags().is_static(); }
573
bool is_final() const { return access_flags().is_final(); }
574
bool is_synchronized() const { return access_flags().is_synchronized();}
575
bool is_native() const { return access_flags().is_native(); }
576
bool is_abstract() const { return access_flags().is_abstract(); }
577
bool is_strict() const { return access_flags().is_strict(); }
578
bool is_synthetic() const { return access_flags().is_synthetic(); }
579
580
// returns true if contains only return operation
581
bool is_empty_method() const;
582
583
// returns true if this is a vanilla constructor
584
bool is_vanilla_constructor() const;
585
586
// checks method and its method holder
587
bool is_final_method() const;
588
bool is_final_method(AccessFlags class_access_flags) const;
589
bool is_default_method() const;
590
591
// true if method needs no dynamic dispatch (final and/or no vtable entry)
592
bool can_be_statically_bound() const;
593
bool can_be_statically_bound(AccessFlags class_access_flags) const;
594
595
// returns true if the method has any backward branches.
596
bool has_loops() {
597
return access_flags().loops_flag_init() ? access_flags().has_loops() : compute_has_loops_flag();
598
};
599
600
bool compute_has_loops_flag();
601
602
bool has_jsrs() {
603
return access_flags().has_jsrs();
604
};
605
void set_has_jsrs() {
606
_access_flags.set_has_jsrs();
607
}
608
609
// returns true if the method has any monitors.
610
bool has_monitors() const { return is_synchronized() || access_flags().has_monitor_bytecodes(); }
611
bool has_monitor_bytecodes() const { return access_flags().has_monitor_bytecodes(); }
612
613
void set_has_monitor_bytecodes() { _access_flags.set_has_monitor_bytecodes(); }
614
615
// monitor matching. This returns a conservative estimate of whether the monitorenter/monitorexit bytecodes
616
// propererly nest in the method. It might return false, even though they actually nest properly, since the info.
617
// has not been computed yet.
618
bool guaranteed_monitor_matching() const { return access_flags().is_monitor_matching(); }
619
void set_guaranteed_monitor_matching() { _access_flags.set_monitor_matching(); }
620
621
// returns true if the method is an accessor function (setter/getter).
622
bool is_accessor() const;
623
624
// returns true if the method does nothing but return a constant of primitive type
625
bool is_constant_getter() const;
626
627
// returns true if the method is an initializer (<init> or <clinit>).
628
bool is_initializer() const;
629
630
// returns true if the method is static OR if the classfile version < 51
631
bool has_valid_initializer_flags() const;
632
633
// returns true if the method name is <clinit> and the method has
634
// valid static initializer flags.
635
bool is_static_initializer() const;
636
637
// returns true if the method name is <init>
638
bool is_object_initializer() const;
639
640
// compiled code support
641
// NOTE: code() is inherently racy as deopt can be clearing code
642
// simultaneously. Use with caution.
643
bool has_compiled_code() const { return code() != NULL; }
644
645
// sizing
646
static int header_size() { return sizeof(Method)/HeapWordSize; }
647
static int size(bool is_native);
648
int size() const { return method_size(); }
649
#if INCLUDE_SERVICES
650
void collect_statistics(KlassSizeStats *sz) const;
651
#endif
652
653
// interpreter support
654
static ByteSize const_offset() { return byte_offset_of(Method, _constMethod ); }
655
static ByteSize access_flags_offset() { return byte_offset_of(Method, _access_flags ); }
656
static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); }
657
static ByteSize code_offset() { return byte_offset_of(Method, _code); }
658
static ByteSize method_data_offset() {
659
return byte_offset_of(Method, _method_data);
660
}
661
static ByteSize method_counters_offset() {
662
return byte_offset_of(Method, _method_counters);
663
}
664
#ifndef PRODUCT
665
static ByteSize compiled_invocation_counter_offset() { return byte_offset_of(Method, _compiled_invocation_count); }
666
#endif // not PRODUCT
667
static ByteSize native_function_offset() { return in_ByteSize(sizeof(Method)); }
668
static ByteSize from_interpreted_offset() { return byte_offset_of(Method, _from_interpreted_entry ); }
669
static ByteSize interpreter_entry_offset() { return byte_offset_of(Method, _i2i_entry ); }
670
static ByteSize signature_handler_offset() { return in_ByteSize(sizeof(Method) + wordSize); }
671
static ByteSize itable_index_offset() { return byte_offset_of(Method, _vtable_index ); }
672
673
// for code generation
674
static int method_data_offset_in_bytes() { return offset_of(Method, _method_data); }
675
static int intrinsic_id_offset_in_bytes() { return offset_of(Method, _intrinsic_id); }
676
static int intrinsic_id_size_in_bytes() { return sizeof(u1); }
677
678
// Static methods that are used to implement member methods where an exposed this pointer
679
// is needed due to possible GCs
680
static objArrayHandle resolved_checked_exceptions_impl(Method* this_oop, TRAPS);
681
682
// Returns the byte code index from the byte code pointer
683
int bci_from(address bcp) const;
684
address bcp_from(int bci) const;
685
int validate_bci_from_bcx(intptr_t bcx) const;
686
687
// Returns the line number for a bci if debugging information for the method is prowided,
688
// -1 is returned otherwise.
689
int line_number_from_bci(int bci) const;
690
691
// Reflection support
692
bool is_overridden_in(Klass* k) const;
693
694
// Stack walking support
695
bool is_ignored_by_security_stack_walk() const;
696
697
// JSR 292 support
698
bool is_method_handle_intrinsic() const; // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id)
699
bool is_compiled_lambda_form() const; // intrinsic_id() == vmIntrinsics::_compiledLambdaForm
700
bool has_member_arg() const; // intrinsic_id() == vmIntrinsics::_linkToSpecial, etc.
701
static methodHandle make_method_handle_intrinsic(vmIntrinsics::ID iid, // _invokeBasic, _linkToVirtual
702
Symbol* signature, //anything at all
703
TRAPS);
704
static Klass* check_non_bcp_klass(Klass* klass);
705
706
// How many extra stack entries for invokedynamic when it's enabled
707
static const int extra_stack_entries_for_jsr292 = 1;
708
709
// this operates only on invoke methods:
710
// presize interpreter frames for extra interpreter stack entries, if needed
711
// Account for the extra appendix argument for invokehandle/invokedynamic
712
static int extra_stack_entries() { return EnableInvokeDynamic ? extra_stack_entries_for_jsr292 : 0; }
713
static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize
714
715
// RedefineClasses() support:
716
bool is_old() const { return access_flags().is_old(); }
717
void set_is_old() { _access_flags.set_is_old(); }
718
bool is_obsolete() const { return access_flags().is_obsolete(); }
719
void set_is_obsolete() { _access_flags.set_is_obsolete(); }
720
bool is_deleted() const { return access_flags().is_deleted(); }
721
void set_is_deleted() { _access_flags.set_is_deleted(); }
722
723
bool is_running_emcp() const {
724
// EMCP methods are old but not obsolete or deleted. Equivalent
725
// Modulo Constant Pool means the method is equivalent except
726
// the constant pool and instructions that access the constant
727
// pool might be different.
728
// If a breakpoint is set in a redefined method, its EMCP methods that are
729
// still running must have a breakpoint also.
730
return _running_emcp;
731
}
732
733
void set_running_emcp(bool x) {
734
_running_emcp = x;
735
}
736
737
bool on_stack() const { return access_flags().on_stack(); }
738
void set_on_stack(const bool value);
739
740
// see the definition in Method*.cpp for the gory details
741
bool should_not_be_cached() const;
742
743
// JVMTI Native method prefixing support:
744
bool is_prefixed_native() const { return access_flags().is_prefixed_native(); }
745
void set_is_prefixed_native() { _access_flags.set_is_prefixed_native(); }
746
747
// Rewriting support
748
static methodHandle clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length,
749
u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS);
750
751
// jmethodID handling
752
// Because the useful life-span of a jmethodID cannot be determined,
753
// once created they are never reclaimed. The methods to which they refer,
754
// however, can be GC'ed away if the class is unloaded or if the method is
755
// made obsolete or deleted -- in these cases, the jmethodID
756
// refers to NULL (as is the case for any weak reference).
757
static jmethodID make_jmethod_id(ClassLoaderData* loader_data, Method* mh);
758
static void destroy_jmethod_id(ClassLoaderData* loader_data, jmethodID mid);
759
760
// Use resolve_jmethod_id() in situations where the caller is expected
761
// to provide a valid jmethodID; the only sanity checks are in asserts;
762
// result guaranteed not to be NULL.
763
inline static Method* resolve_jmethod_id(jmethodID mid) {
764
assert(mid != NULL, "JNI method id should not be null");
765
return *((Method**)mid);
766
}
767
768
// Use checked_resolve_jmethod_id() in situations where the caller
769
// should provide a valid jmethodID, but might not. NULL is returned
770
// when the jmethodID does not refer to a valid method.
771
static Method* checked_resolve_jmethod_id(jmethodID mid);
772
773
static void change_method_associated_with_jmethod_id(jmethodID old_jmid_ptr, Method* new_method);
774
static bool is_method_id(jmethodID mid);
775
776
// Clear methods
777
static void clear_jmethod_ids(ClassLoaderData* loader_data);
778
static void print_jmethod_ids(ClassLoaderData* loader_data, outputStream* out) PRODUCT_RETURN;
779
780
// Get this method's jmethodID -- allocate if it doesn't exist
781
jmethodID jmethod_id() { methodHandle this_h(this);
782
return InstanceKlass::get_jmethod_id(method_holder(), this_h); }
783
784
// Lookup the jmethodID for this method. Return NULL if not found.
785
// NOTE that this function can be called from a signal handler
786
// (see AsyncGetCallTrace support for Forte Analyzer) and this
787
// needs to be async-safe. No allocation should be done and
788
// so handles are not used to avoid deadlock.
789
jmethodID find_jmethod_id_or_null() { return method_holder()->jmethod_id_or_null(this); }
790
791
// Support for inlining of intrinsic methods
792
vmIntrinsics::ID intrinsic_id() const { return (vmIntrinsics::ID) _intrinsic_id; }
793
void set_intrinsic_id(vmIntrinsics::ID id) { _intrinsic_id = (u1) id; }
794
795
// Helper routines for intrinsic_id() and vmIntrinsics::method().
796
void init_intrinsic_id(); // updates from _none if a match
797
void clear_jmethod_id(ClassLoaderData* loader_data);
798
799
static vmSymbols::SID klass_id_for_intrinsics(Klass* holder);
800
801
bool jfr_towrite() { return _jfr_towrite; }
802
void set_jfr_towrite(bool x) { _jfr_towrite = x; }
803
bool caller_sensitive() { return _caller_sensitive; }
804
void set_caller_sensitive(bool x) { _caller_sensitive = x; }
805
bool force_inline() { return _force_inline; }
806
void set_force_inline(bool x) { _force_inline = x; }
807
bool dont_inline() { return _dont_inline; }
808
void set_dont_inline(bool x) { _dont_inline = x; }
809
bool is_hidden() { return _hidden; }
810
void set_hidden(bool x) { _hidden = x; }
811
bool has_injected_profile() { return _has_injected_profile; }
812
void set_has_injected_profile(bool x) { _has_injected_profile = x; }
813
814
JFR_ONLY(DEFINE_TRACE_FLAG_ACCESSOR;)
815
816
ConstMethod::MethodType method_type() const {
817
return _constMethod->method_type();
818
}
819
bool is_overpass() const { return method_type() == ConstMethod::OVERPASS; }
820
821
// On-stack replacement support
822
bool has_osr_nmethod(int level, bool match_level) {
823
return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL;
824
}
825
826
int mark_osr_nmethods() {
827
return method_holder()->mark_osr_nmethods(this);
828
}
829
830
nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) {
831
return method_holder()->lookup_osr_nmethod(this, bci, level, match_level);
832
}
833
834
// Inline cache support
835
void cleanup_inline_caches();
836
837
// Find if klass for method is loaded
838
bool is_klass_loaded_by_klass_index(int klass_index) const;
839
bool is_klass_loaded(int refinfo_index, bool must_be_resolved = false) const;
840
841
// Indicates whether compilation failed earlier for this method, or
842
// whether it is not compilable for another reason like having a
843
// breakpoint set in it.
844
bool is_not_compilable(int comp_level = CompLevel_any) const;
845
void set_not_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL);
846
void set_not_compilable_quietly(int comp_level = CompLevel_all) {
847
set_not_compilable(comp_level, false);
848
}
849
bool is_not_osr_compilable(int comp_level = CompLevel_any) const;
850
void set_not_osr_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL);
851
void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) {
852
set_not_osr_compilable(comp_level, false);
853
}
854
bool is_always_compilable() const;
855
856
private:
857
void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);
858
859
public:
860
MethodCounters* get_method_counters(TRAPS) {
861
if (_method_counters == NULL) {
862
build_method_counters(this, CHECK_AND_CLEAR_NULL);
863
}
864
return _method_counters;
865
}
866
867
bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); }
868
void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); }
869
void clear_not_c1_compilable() { _access_flags.clear_not_c1_compilable(); }
870
bool is_not_c2_compilable() const { return access_flags().is_not_c2_compilable(); }
871
void set_not_c2_compilable() { _access_flags.set_not_c2_compilable(); }
872
void clear_not_c2_compilable() { _access_flags.clear_not_c2_compilable(); }
873
874
bool is_not_c1_osr_compilable() const { return is_not_c1_compilable(); } // don't waste an accessFlags bit
875
void set_not_c1_osr_compilable() { set_not_c1_compilable(); } // don't waste an accessFlags bit
876
void clear_not_c1_osr_compilable() { clear_not_c1_compilable(); } // don't waste an accessFlags bit
877
bool is_not_c2_osr_compilable() const { return access_flags().is_not_c2_osr_compilable(); }
878
void set_not_c2_osr_compilable() { _access_flags.set_not_c2_osr_compilable(); }
879
void clear_not_c2_osr_compilable() { _access_flags.clear_not_c2_osr_compilable(); }
880
881
// Background compilation support
882
bool queued_for_compilation() const { return access_flags().queued_for_compilation(); }
883
void set_queued_for_compilation() { _access_flags.set_queued_for_compilation(); }
884
void clear_queued_for_compilation() { _access_flags.clear_queued_for_compilation(); }
885
886
// Resolve all classes in signature, return 'true' if successful
887
static bool load_signature_classes(methodHandle m, TRAPS);
888
889
// Return if true if not all classes references in signature, including return type, has been loaded
890
static bool has_unloaded_classes_in_signature(methodHandle m, TRAPS);
891
892
// Printing
893
void print_short_name(outputStream* st = tty); // prints as klassname::methodname; Exposed so field engineers can debug VM
894
#if INCLUDE_JVMTI
895
void print_name(outputStream* st = tty); // prints as "virtual void foo(int)"; exposed for TraceRedefineClasses
896
#else
897
void print_name(outputStream* st = tty) PRODUCT_RETURN; // prints as "virtual void foo(int)"
898
#endif
899
900
// Helper routine used for method sorting
901
static void sort_methods(Array<Method*>* methods, bool idempotent = false, bool set_idnums = true);
902
903
// Deallocation function for redefine classes or if an error occurs
904
void deallocate_contents(ClassLoaderData* loader_data);
905
906
// Printing
907
#ifndef PRODUCT
908
void print_on(outputStream* st) const;
909
#endif
910
void print_value_on(outputStream* st) const;
911
912
const char* internal_name() const { return "{method}"; }
913
914
// Check for valid method pointer
915
static bool has_method_vptr(const void* ptr);
916
bool is_valid_method() const;
917
918
// Verify
919
void verify() { verify_on(tty); }
920
void verify_on(outputStream* st);
921
922
private:
923
924
// Inlined elements
925
address* native_function_addr() const { assert(is_native(), "must be native"); return (address*) (this+1); }
926
address* signature_handler_addr() const { return native_function_addr() + 1; }
927
};
928
929
930
// Utility class for compressing line number tables
931
932
class CompressedLineNumberWriteStream: public CompressedWriteStream {
933
private:
934
int _bci;
935
int _line;
936
public:
937
// Constructor
938
CompressedLineNumberWriteStream(int initial_size) : CompressedWriteStream(initial_size), _bci(0), _line(0) {}
939
CompressedLineNumberWriteStream(u_char* buffer, int initial_size) : CompressedWriteStream(buffer, initial_size), _bci(0), _line(0) {}
940
941
// Write (bci, line number) pair to stream
942
void write_pair_regular(int bci_delta, int line_delta);
943
944
inline void write_pair_inline(int bci, int line) {
945
int bci_delta = bci - _bci;
946
int line_delta = line - _line;
947
_bci = bci;
948
_line = line;
949
// Skip (0,0) deltas - they do not add information and conflict with terminator.
950
if (bci_delta == 0 && line_delta == 0) return;
951
// Check if bci is 5-bit and line number 3-bit unsigned.
952
if (((bci_delta & ~0x1F) == 0) && ((line_delta & ~0x7) == 0)) {
953
// Compress into single byte.
954
jubyte value = ((jubyte) bci_delta << 3) | (jubyte) line_delta;
955
// Check that value doesn't match escape character.
956
if (value != 0xFF) {
957
write_byte(value);
958
return;
959
}
960
}
961
write_pair_regular(bci_delta, line_delta);
962
}
963
964
// Windows AMD64 + Apr 2005 PSDK with /O2 generates bad code for write_pair.
965
// Disabling optimization doesn't work for methods in header files
966
// so we force it to call through the non-optimized version in the .cpp.
967
// It's gross, but it's the only way we can ensure that all callers are
968
// fixed. _MSC_VER is defined by the windows compiler
969
#if defined(_M_AMD64) && _MSC_VER >= 1400
970
void write_pair(int bci, int line);
971
#else
972
void write_pair(int bci, int line) { write_pair_inline(bci, line); }
973
#endif
974
975
// Write end-of-stream marker
976
void write_terminator() { write_byte(0); }
977
};
978
979
980
// Utility class for decompressing line number tables
981
982
class CompressedLineNumberReadStream: public CompressedReadStream {
983
private:
984
int _bci;
985
int _line;
986
public:
987
// Constructor
988
CompressedLineNumberReadStream(u_char* buffer);
989
// Read (bci, line number) pair from stream. Returns false at end-of-stream.
990
bool read_pair();
991
// Accessing bci and line number (after calling read_pair)
992
int bci() const { return _bci; }
993
int line() const { return _line; }
994
};
995
996
997
/// Fast Breakpoints.
998
999
// If this structure gets more complicated (because bpts get numerous),
1000
// move it into its own header.
1001
1002
// There is presently no provision for concurrent access
1003
// to breakpoint lists, which is only OK for JVMTI because
1004
// breakpoints are written only at safepoints, and are read
1005
// concurrently only outside of safepoints.
1006
1007
class BreakpointInfo : public CHeapObj<mtClass> {
1008
friend class VMStructs;
1009
private:
1010
Bytecodes::Code _orig_bytecode;
1011
int _bci;
1012
u2 _name_index; // of method
1013
u2 _signature_index; // of method
1014
BreakpointInfo* _next; // simple storage allocation
1015
1016
public:
1017
BreakpointInfo(Method* m, int bci);
1018
1019
// accessors
1020
Bytecodes::Code orig_bytecode() { return _orig_bytecode; }
1021
void set_orig_bytecode(Bytecodes::Code code) { _orig_bytecode = code; }
1022
int bci() { return _bci; }
1023
1024
BreakpointInfo* next() const { return _next; }
1025
void set_next(BreakpointInfo* n) { _next = n; }
1026
1027
// helps for searchers
1028
bool match(const Method* m, int bci) {
1029
return bci == _bci && match(m);
1030
}
1031
1032
bool match(const Method* m) {
1033
return _name_index == m->name_index() &&
1034
_signature_index == m->signature_index();
1035
}
1036
1037
void set(Method* method);
1038
void clear(Method* method);
1039
};
1040
1041
// Utility class for access exception handlers
1042
class ExceptionTable : public StackObj {
1043
private:
1044
ExceptionTableElement* _table;
1045
u2 _length;
1046
1047
public:
1048
ExceptionTable(const Method* m) {
1049
if (m->has_exception_handler()) {
1050
_table = m->exception_table_start();
1051
_length = m->exception_table_length();
1052
} else {
1053
_table = NULL;
1054
_length = 0;
1055
}
1056
}
1057
1058
int length() const {
1059
return _length;
1060
}
1061
1062
u2 start_pc(int idx) const {
1063
assert(idx < _length, "out of bounds");
1064
return _table[idx].start_pc;
1065
}
1066
1067
void set_start_pc(int idx, u2 value) {
1068
assert(idx < _length, "out of bounds");
1069
_table[idx].start_pc = value;
1070
}
1071
1072
u2 end_pc(int idx) const {
1073
assert(idx < _length, "out of bounds");
1074
return _table[idx].end_pc;
1075
}
1076
1077
void set_end_pc(int idx, u2 value) {
1078
assert(idx < _length, "out of bounds");
1079
_table[idx].end_pc = value;
1080
}
1081
1082
u2 handler_pc(int idx) const {
1083
assert(idx < _length, "out of bounds");
1084
return _table[idx].handler_pc;
1085
}
1086
1087
void set_handler_pc(int idx, u2 value) {
1088
assert(idx < _length, "out of bounds");
1089
_table[idx].handler_pc = value;
1090
}
1091
1092
u2 catch_type_index(int idx) const {
1093
assert(idx < _length, "out of bounds");
1094
return _table[idx].catch_type_index;
1095
}
1096
1097
void set_catch_type_index(int idx, u2 value) {
1098
assert(idx < _length, "out of bounds");
1099
_table[idx].catch_type_index = value;
1100
}
1101
};
1102
1103
#endif // SHARE_VM_OOPS_METHODOOP_HPP
1104
1105