Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/code/nmethod.cpp
32285 views
/*1* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "code/codeCache.hpp"26#include "code/compiledIC.hpp"27#include "code/dependencies.hpp"28#include "code/nmethod.hpp"29#include "code/scopeDesc.hpp"30#include "compiler/abstractCompiler.hpp"31#include "compiler/compileBroker.hpp"32#include "compiler/compileLog.hpp"33#include "compiler/compilerOracle.hpp"34#include "compiler/disassembler.hpp"35#include "interpreter/bytecode.hpp"36#include "oops/methodData.hpp"37#include "prims/jvmtiRedefineClassesTrace.hpp"38#include "prims/jvmtiImpl.hpp"39#include "runtime/orderAccess.inline.hpp"40#include "runtime/sharedRuntime.hpp"41#include "runtime/sweeper.hpp"42#include "utilities/dtrace.hpp"43#include "utilities/events.hpp"44#include "utilities/xmlstream.hpp"45#ifdef SHARK46#include "shark/sharkCompiler.hpp"47#endif4849PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC5051unsigned char nmethod::_global_unloading_clock = 0;5253#ifdef DTRACE_ENABLED5455// Only bother with this argument setup if dtrace is available5657#ifndef USDT258HS_DTRACE_PROBE_DECL8(hotspot, compiled__method__load,59const char*, int, const char*, int, const char*, int, void*, size_t);6061HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload,62char*, int, char*, int, char*, int);6364#define DTRACE_METHOD_UNLOAD_PROBE(method) \65{ \66Method* m = (method); \67if (m != NULL) { \68Symbol* klass_name = m->klass_name(); \69Symbol* name = m->name(); \70Symbol* signature = m->signature(); \71HS_DTRACE_PROBE6(hotspot, compiled__method__unload, \72klass_name->bytes(), klass_name->utf8_length(), \73name->bytes(), name->utf8_length(), \74signature->bytes(), signature->utf8_length()); \75} \76}77#else /* USDT2 */78#define DTRACE_METHOD_UNLOAD_PROBE(method) \79{ \80Method* m = (method); \81if (m != NULL) { \82Symbol* klass_name = m->klass_name(); \83Symbol* name = m->name(); \84Symbol* signature = m->signature(); \85HOTSPOT_COMPILED_METHOD_UNLOAD( \86(char *) klass_name->bytes(), klass_name->utf8_length(), \87(char *) name->bytes(), name->utf8_length(), \88(char *) signature->bytes(), signature->utf8_length()); \89} \90}91#endif /* USDT2 */9293#else // ndef DTRACE_ENABLED9495#define DTRACE_METHOD_UNLOAD_PROBE(method)9697#endif9899bool nmethod::is_compiled_by_c1() const {100if (compiler() == NULL) {101return false;102}103return compiler()->is_c1();104}105bool nmethod::is_compiled_by_c2() const {106if (compiler() == NULL) {107return false;108}109return compiler()->is_c2();110}111bool nmethod::is_compiled_by_shark() const {112if (compiler() == NULL) {113return false;114}115return compiler()->is_shark();116}117118119120//---------------------------------------------------------------------------------121// NMethod statistics122// They are printed under various flags, including:123// PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.124// (In the latter two cases, they like other stats are printed to the log only.)125126#ifndef PRODUCT127// These variables are put into one block to reduce relocations128// and make it simpler to print from the debugger.129static130struct nmethod_stats_struct {131int nmethod_count;132int total_size;133int relocation_size;134int consts_size;135int insts_size;136int stub_size;137int scopes_data_size;138int scopes_pcs_size;139int dependencies_size;140int handler_table_size;141int nul_chk_table_size;142int oops_size;143144void note_nmethod(nmethod* nm) {145nmethod_count += 1;146total_size += nm->size();147relocation_size += nm->relocation_size();148consts_size += nm->consts_size();149insts_size += nm->insts_size();150stub_size += nm->stub_size();151oops_size += nm->oops_size();152scopes_data_size += nm->scopes_data_size();153scopes_pcs_size += nm->scopes_pcs_size();154dependencies_size += nm->dependencies_size();155handler_table_size += nm->handler_table_size();156nul_chk_table_size += nm->nul_chk_table_size();157}158void print_nmethod_stats() {159if (nmethod_count == 0) return;160tty->print_cr("Statistics for %d bytecoded nmethods:", nmethod_count);161if (total_size != 0) tty->print_cr(" total in heap = %d", total_size);162if (relocation_size != 0) tty->print_cr(" relocation = %d", relocation_size);163if (consts_size != 0) tty->print_cr(" constants = %d", consts_size);164if (insts_size != 0) tty->print_cr(" main code = %d", insts_size);165if (stub_size != 0) tty->print_cr(" stub code = %d", stub_size);166if (oops_size != 0) tty->print_cr(" oops = %d", oops_size);167if (scopes_data_size != 0) tty->print_cr(" scopes data = %d", scopes_data_size);168if (scopes_pcs_size != 0) tty->print_cr(" scopes pcs = %d", scopes_pcs_size);169if (dependencies_size != 0) tty->print_cr(" dependencies = %d", dependencies_size);170if (handler_table_size != 0) tty->print_cr(" handler table = %d", handler_table_size);171if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %d", nul_chk_table_size);172}173174int native_nmethod_count;175int native_total_size;176int native_relocation_size;177int native_insts_size;178int native_oops_size;179void note_native_nmethod(nmethod* nm) {180native_nmethod_count += 1;181native_total_size += nm->size();182native_relocation_size += nm->relocation_size();183native_insts_size += nm->insts_size();184native_oops_size += nm->oops_size();185}186void print_native_nmethod_stats() {187if (native_nmethod_count == 0) return;188tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count);189if (native_total_size != 0) tty->print_cr(" N. total size = %d", native_total_size);190if (native_relocation_size != 0) tty->print_cr(" N. relocation = %d", native_relocation_size);191if (native_insts_size != 0) tty->print_cr(" N. main code = %d", native_insts_size);192if (native_oops_size != 0) tty->print_cr(" N. oops = %d", native_oops_size);193}194195int pc_desc_resets; // number of resets (= number of caches)196int pc_desc_queries; // queries to nmethod::find_pc_desc197int pc_desc_approx; // number of those which have approximate true198int pc_desc_repeats; // number of _pc_descs[0] hits199int pc_desc_hits; // number of LRU cache hits200int pc_desc_tests; // total number of PcDesc examinations201int pc_desc_searches; // total number of quasi-binary search steps202int pc_desc_adds; // number of LUR cache insertions203204void print_pc_stats() {205tty->print_cr("PcDesc Statistics: %d queries, %.2f comparisons per query",206pc_desc_queries,207(double)(pc_desc_tests + pc_desc_searches)208/ pc_desc_queries);209tty->print_cr(" caches=%d queries=%d/%d, hits=%d+%d, tests=%d+%d, adds=%d",210pc_desc_resets,211pc_desc_queries, pc_desc_approx,212pc_desc_repeats, pc_desc_hits,213pc_desc_tests, pc_desc_searches, pc_desc_adds);214}215} nmethod_stats;216#endif //PRODUCT217218219//---------------------------------------------------------------------------------220221222ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {223assert(pc != NULL, "Must be non null");224assert(exception.not_null(), "Must be non null");225assert(handler != NULL, "Must be non null");226227_count = 0;228_exception_type = exception->klass();229_next = NULL;230231add_address_and_handler(pc,handler);232}233234235address ExceptionCache::match(Handle exception, address pc) {236assert(pc != NULL,"Must be non null");237assert(exception.not_null(),"Must be non null");238if (exception->klass() == exception_type()) {239return (test_address(pc));240}241242return NULL;243}244245246bool ExceptionCache::match_exception_with_space(Handle exception) {247assert(exception.not_null(),"Must be non null");248if (exception->klass() == exception_type() && count() < cache_size) {249return true;250}251return false;252}253254255address ExceptionCache::test_address(address addr) {256int limit = count();257for (int i = 0; i < limit; i++) {258if (pc_at(i) == addr) {259return handler_at(i);260}261}262return NULL;263}264265266bool ExceptionCache::add_address_and_handler(address addr, address handler) {267if (test_address(addr) == handler) return true;268269int index = count();270if (index < cache_size) {271set_pc_at(index, addr);272set_handler_at(index, handler);273increment_count();274return true;275}276return false;277}278279280// private method for handling exception cache281// These methods are private, and used to manipulate the exception cache282// directly.283ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {284ExceptionCache* ec = exception_cache();285while (ec != NULL) {286if (ec->match_exception_with_space(exception)) {287return ec;288}289ec = ec->next();290}291return NULL;292}293294295//-----------------------------------------------------------------------------296297298// Helper used by both find_pc_desc methods.299static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {300NOT_PRODUCT(++nmethod_stats.pc_desc_tests);301if (!approximate)302return pc->pc_offset() == pc_offset;303else304return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();305}306307void PcDescCache::reset_to(PcDesc* initial_pc_desc) {308if (initial_pc_desc == NULL) {309_pc_descs[0] = NULL; // native method; no PcDescs at all310return;311}312NOT_PRODUCT(++nmethod_stats.pc_desc_resets);313// reset the cache by filling it with benign (non-null) values314assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");315for (int i = 0; i < cache_size; i++)316_pc_descs[i] = initial_pc_desc;317}318319PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {320NOT_PRODUCT(++nmethod_stats.pc_desc_queries);321NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx);322323// Note: one might think that caching the most recently324// read value separately would be a win, but one would be325// wrong. When many threads are updating it, the cache326// line it's in would bounce between caches, negating327// any benefit.328329// In order to prevent race conditions do not load cache elements330// repeatedly, but use a local copy:331PcDesc* res;332333// Step one: Check the most recently added value.334res = _pc_descs[0];335if (res == NULL) return NULL; // native method; no PcDescs at all336if (match_desc(res, pc_offset, approximate)) {337NOT_PRODUCT(++nmethod_stats.pc_desc_repeats);338return res;339}340341// Step two: Check the rest of the LRU cache.342for (int i = 1; i < cache_size; ++i) {343res = _pc_descs[i];344if (res->pc_offset() < 0) break; // optimization: skip empty cache345if (match_desc(res, pc_offset, approximate)) {346NOT_PRODUCT(++nmethod_stats.pc_desc_hits);347return res;348}349}350351// Report failure.352return NULL;353}354355void PcDescCache::add_pc_desc(PcDesc* pc_desc) {356NOT_PRODUCT(++nmethod_stats.pc_desc_adds);357// Update the LRU cache by shifting pc_desc forward.358for (int i = 0; i < cache_size; i++) {359PcDesc* next = _pc_descs[i];360_pc_descs[i] = pc_desc;361pc_desc = next;362}363}364365// adjust pcs_size so that it is a multiple of both oopSize and366// sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple367// of oopSize, then 2*sizeof(PcDesc) is)368static int adjust_pcs_size(int pcs_size) {369int nsize = round_to(pcs_size, oopSize);370if ((nsize % sizeof(PcDesc)) != 0) {371nsize = pcs_size + sizeof(PcDesc);372}373assert((nsize % oopSize) == 0, "correct alignment");374return nsize;375}376377//-----------------------------------------------------------------------------378379380void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {381assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");382assert(new_entry != NULL,"Must be non null");383assert(new_entry->next() == NULL, "Must be null");384385ExceptionCache *ec = exception_cache();386if (ec != NULL) {387new_entry->set_next(ec);388}389release_set_exception_cache(new_entry);390}391392void nmethod::clean_exception_cache(BoolObjectClosure* is_alive) {393ExceptionCache* prev = NULL;394ExceptionCache* curr = exception_cache();395396while (curr != NULL) {397ExceptionCache* next = curr->next();398399Klass* ex_klass = curr->exception_type();400if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {401if (prev == NULL) {402set_exception_cache(next);403} else {404prev->set_next(next);405}406delete curr;407// prev stays the same.408} else {409prev = curr;410}411412curr = next;413}414}415416// public method for accessing the exception cache417// These are the public access methods.418address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {419// We never grab a lock to read the exception cache, so we may420// have false negatives. This is okay, as it can only happen during421// the first few exception lookups for a given nmethod.422ExceptionCache* ec = exception_cache();423while (ec != NULL) {424address ret_val;425if ((ret_val = ec->match(exception,pc)) != NULL) {426return ret_val;427}428ec = ec->next();429}430return NULL;431}432433434void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {435// There are potential race conditions during exception cache updates, so we436// must own the ExceptionCache_lock before doing ANY modifications. Because437// we don't lock during reads, it is possible to have several threads attempt438// to update the cache with the same data. We need to check for already inserted439// copies of the current data before adding it.440441MutexLocker ml(ExceptionCache_lock);442ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);443444if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {445target_entry = new ExceptionCache(exception,pc,handler);446add_exception_cache_entry(target_entry);447}448}449450451//-------------end of code for ExceptionCache--------------452453454int nmethod::total_size() const {455return456consts_size() +457insts_size() +458stub_size() +459scopes_data_size() +460scopes_pcs_size() +461handler_table_size() +462nul_chk_table_size();463}464465const char* nmethod::compile_kind() const {466if (is_osr_method()) return "osr";467if (method() != NULL && is_native_method()) return "c2n";468return NULL;469}470471// Fill in default values for various flag fields472void nmethod::init_defaults() {473_state = in_use;474_unloading_clock = 0;475_marked_for_reclamation = 0;476_has_flushed_dependencies = 0;477_has_unsafe_access = 0;478_has_method_handle_invokes = 0;479_lazy_critical_native = 0;480_has_wide_vectors = 0;481_marked_for_deoptimization = 0;482_lock_count = 0;483_stack_traversal_mark = 0;484_unload_reported = false; // jvmti state485486#ifdef ASSERT487_oops_are_stale = false;488#endif489490_oops_do_mark_link = NULL;491_jmethod_id = NULL;492_osr_link = NULL;493if (UseG1GC || UseShenandoahGC) {494_unloading_next = NULL;495} else {496_scavenge_root_link = NULL;497}498_scavenge_root_state = 0;499_compiler = NULL;500#if INCLUDE_RTM_OPT501_rtm_state = NoRTM;502#endif503#ifdef HAVE_DTRACE_H504_trap_offset = 0;505#endif // def HAVE_DTRACE_H506}507508nmethod* nmethod::new_native_nmethod(methodHandle method,509int compile_id,510CodeBuffer *code_buffer,511int vep_offset,512int frame_complete,513int frame_size,514ByteSize basic_lock_owner_sp_offset,515ByteSize basic_lock_sp_offset,516OopMapSet* oop_maps) {517code_buffer->finalize_oop_references(method);518// create nmethod519nmethod* nm = NULL;520{521MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);522int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));523CodeOffsets offsets;524offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);525offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);526nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size,527compile_id, &offsets,528code_buffer, frame_size,529basic_lock_owner_sp_offset,530basic_lock_sp_offset, oop_maps);531NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_native_nmethod(nm));532if (PrintAssembly && nm != NULL) {533Disassembler::decode(nm);534}535}536// verify nmethod537debug_only(if (nm) nm->verify();) // might block538539if (nm != NULL) {540nm->log_new_nmethod();541}542543return nm;544}545546#ifdef HAVE_DTRACE_H547nmethod* nmethod::new_dtrace_nmethod(methodHandle method,548CodeBuffer *code_buffer,549int vep_offset,550int trap_offset,551int frame_complete,552int frame_size) {553code_buffer->finalize_oop_references(method);554// create nmethod555nmethod* nm = NULL;556{557MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);558int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));559CodeOffsets offsets;560offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);561offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);562offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);563564nm = new (nmethod_size) nmethod(method(), nmethod_size,565&offsets, code_buffer, frame_size);566567NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));568if (PrintAssembly && nm != NULL) {569Disassembler::decode(nm);570}571}572// verify nmethod573debug_only(if (nm) nm->verify();) // might block574575if (nm != NULL) {576nm->log_new_nmethod();577}578579return nm;580}581582#endif // def HAVE_DTRACE_H583584nmethod* nmethod::new_nmethod(methodHandle method,585int compile_id,586int entry_bci,587CodeOffsets* offsets,588int orig_pc_offset,589DebugInformationRecorder* debug_info,590Dependencies* dependencies,591CodeBuffer* code_buffer, int frame_size,592OopMapSet* oop_maps,593ExceptionHandlerTable* handler_table,594ImplicitExceptionTable* nul_chk_table,595AbstractCompiler* compiler,596int comp_level597)598{599assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");600code_buffer->finalize_oop_references(method);601// create nmethod602nmethod* nm = NULL;603{ MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);604int nmethod_size =605allocation_size(code_buffer, sizeof(nmethod))606+ adjust_pcs_size(debug_info->pcs_size())607+ round_to(dependencies->size_in_bytes() , oopSize)608+ round_to(handler_table->size_in_bytes(), oopSize)609+ round_to(nul_chk_table->size_in_bytes(), oopSize)610+ round_to(debug_info->data_size() , oopSize);611612nm = new (nmethod_size)613nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,614orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,615oop_maps,616handler_table,617nul_chk_table,618compiler,619comp_level);620621if (nm != NULL) {622// To make dependency checking during class loading fast, record623// the nmethod dependencies in the classes it is dependent on.624// This allows the dependency checking code to simply walk the625// class hierarchy above the loaded class, checking only nmethods626// which are dependent on those classes. The slow way is to627// check every nmethod for dependencies which makes it linear in628// the number of methods compiled. For applications with a lot629// classes the slow way is too slow.630for (Dependencies::DepStream deps(nm); deps.next(); ) {631Klass* klass = deps.context_type();632if (klass == NULL) {633continue; // ignore things like evol_method634}635636// record this nmethod as dependent on this klass637InstanceKlass::cast(klass)->add_dependent_nmethod(nm);638}639NOT_PRODUCT(nmethod_stats.note_nmethod(nm));640if (PrintAssembly || CompilerOracle::has_option_string(method, "PrintAssembly")) {641Disassembler::decode(nm);642}643}644}645// Do verification and logging outside CodeCache_lock.646if (nm != NULL) {647// Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.648DEBUG_ONLY(nm->verify();)649nm->log_new_nmethod();650}651return nm;652}653654655// For native wrappers656nmethod::nmethod(657Method* method,658int nmethod_size,659int compile_id,660CodeOffsets* offsets,661CodeBuffer* code_buffer,662int frame_size,663ByteSize basic_lock_owner_sp_offset,664ByteSize basic_lock_sp_offset,665OopMapSet* oop_maps )666: CodeBlob("native nmethod", code_buffer, sizeof(nmethod),667nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),668_native_receiver_sp_offset(basic_lock_owner_sp_offset),669_native_basic_lock_sp_offset(basic_lock_sp_offset)670{671{672debug_only(No_Safepoint_Verifier nsv;)673assert_locked_or_safepoint(CodeCache_lock);674675init_defaults();676_method = method;677_entry_bci = InvocationEntryBci;678// We have no exception handler or deopt handler make the679// values something that will never match a pc like the nmethod vtable entry680_exception_offset = 0;681_deoptimize_offset = 0;682_deoptimize_mh_offset = 0;683_orig_pc_offset = 0;684685_consts_offset = data_offset();686_stub_offset = data_offset();687_oops_offset = data_offset();688_metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize);689_scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize);690_scopes_pcs_offset = _scopes_data_offset;691_dependencies_offset = _scopes_pcs_offset;692_handler_table_offset = _dependencies_offset;693_nul_chk_table_offset = _handler_table_offset;694_nmethod_end_offset = _nul_chk_table_offset;695_compile_id = compile_id;696_comp_level = CompLevel_none;697_entry_point = code_begin() + offsets->value(CodeOffsets::Entry);698_verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry);699_osr_entry_point = NULL;700_exception_cache = NULL;701_pc_desc_cache.reset_to(NULL);702_hotness_counter = NMethodSweeper::hotness_counter_reset_val();703704code_buffer->copy_values_to(this);705if (ScavengeRootsInCode) {706if (detect_scavenge_root_oops()) {707CodeCache::add_scavenge_root_nmethod(this);708}709Universe::heap()->register_nmethod(this);710}711debug_only(verify_scavenge_root_oops());712CodeCache::commit(this);713}714715if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {716ttyLocker ttyl; // keep the following output all in one block717// This output goes directly to the tty, not the compiler log.718// To enable tools to match it up with the compilation activity,719// be sure to tag this tty output with the compile ID.720if (xtty != NULL) {721xtty->begin_head("print_native_nmethod");722xtty->method(_method);723xtty->stamp();724xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);725}726// print the header part first727print();728// then print the requested information729if (PrintNativeNMethods) {730print_code();731if (oop_maps != NULL) {732oop_maps->print();733}734}735if (PrintRelocations) {736print_relocations();737}738if (xtty != NULL) {739xtty->tail("print_native_nmethod");740}741}742}743744// For dtrace wrappers745#ifdef HAVE_DTRACE_H746nmethod::nmethod(747Method* method,748int nmethod_size,749CodeOffsets* offsets,750CodeBuffer* code_buffer,751int frame_size)752: CodeBlob("dtrace nmethod", code_buffer, sizeof(nmethod),753nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, NULL),754_native_receiver_sp_offset(in_ByteSize(-1)),755_native_basic_lock_sp_offset(in_ByteSize(-1))756{757{758debug_only(No_Safepoint_Verifier nsv;)759assert_locked_or_safepoint(CodeCache_lock);760761init_defaults();762_method = method;763_entry_bci = InvocationEntryBci;764// We have no exception handler or deopt handler make the765// values something that will never match a pc like the nmethod vtable entry766_exception_offset = 0;767_deoptimize_offset = 0;768_deoptimize_mh_offset = 0;769_unwind_handler_offset = -1;770_trap_offset = offsets->value(CodeOffsets::Dtrace_trap);771_orig_pc_offset = 0;772_consts_offset = data_offset();773_stub_offset = data_offset();774_oops_offset = data_offset();775_metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize);776_scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize);777_scopes_pcs_offset = _scopes_data_offset;778_dependencies_offset = _scopes_pcs_offset;779_handler_table_offset = _dependencies_offset;780_nul_chk_table_offset = _handler_table_offset;781_nmethod_end_offset = _nul_chk_table_offset;782_compile_id = 0; // default783_comp_level = CompLevel_none;784_entry_point = code_begin() + offsets->value(CodeOffsets::Entry);785_verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry);786_osr_entry_point = NULL;787_exception_cache = NULL;788_pc_desc_cache.reset_to(NULL);789_hotness_counter = NMethodSweeper::hotness_counter_reset_val();790791code_buffer->copy_values_to(this);792if (ScavengeRootsInCode) {793if (detect_scavenge_root_oops()) {794CodeCache::add_scavenge_root_nmethod(this);795}796Universe::heap()->register_nmethod(this);797}798DEBUG_ONLY(verify_scavenge_root_oops();)799CodeCache::commit(this);800}801802if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {803ttyLocker ttyl; // keep the following output all in one block804// This output goes directly to the tty, not the compiler log.805// To enable tools to match it up with the compilation activity,806// be sure to tag this tty output with the compile ID.807if (xtty != NULL) {808xtty->begin_head("print_dtrace_nmethod");809xtty->method(_method);810xtty->stamp();811xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);812}813// print the header part first814print();815// then print the requested information816if (PrintNMethods) {817print_code();818}819if (PrintRelocations) {820print_relocations();821}822if (xtty != NULL) {823xtty->tail("print_dtrace_nmethod");824}825}826}827#endif // def HAVE_DTRACE_H828829void* nmethod::operator new(size_t size, int nmethod_size) throw() {830// Not critical, may return null if there is too little continuous memory831return CodeCache::allocate(nmethod_size);832}833834nmethod::nmethod(835Method* method,836int nmethod_size,837int compile_id,838int entry_bci,839CodeOffsets* offsets,840int orig_pc_offset,841DebugInformationRecorder* debug_info,842Dependencies* dependencies,843CodeBuffer *code_buffer,844int frame_size,845OopMapSet* oop_maps,846ExceptionHandlerTable* handler_table,847ImplicitExceptionTable* nul_chk_table,848AbstractCompiler* compiler,849int comp_level850)851: CodeBlob("nmethod", code_buffer, sizeof(nmethod),852nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),853_native_receiver_sp_offset(in_ByteSize(-1)),854_native_basic_lock_sp_offset(in_ByteSize(-1))855{856assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");857{858debug_only(No_Safepoint_Verifier nsv;)859assert_locked_or_safepoint(CodeCache_lock);860861init_defaults();862_method = method;863_entry_bci = entry_bci;864_compile_id = compile_id;865_comp_level = comp_level;866_compiler = compiler;867_orig_pc_offset = orig_pc_offset;868_hotness_counter = NMethodSweeper::hotness_counter_reset_val();869870// Section offsets871_consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts());872_stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());873874// Exception handler and deopt handler are in the stub section875assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");876assert(offsets->value(CodeOffsets::Deopt ) != -1, "must be set");877_exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions);878_deoptimize_offset = _stub_offset + offsets->value(CodeOffsets::Deopt);879if (offsets->value(CodeOffsets::DeoptMH) != -1) {880_deoptimize_mh_offset = _stub_offset + offsets->value(CodeOffsets::DeoptMH);881} else {882_deoptimize_mh_offset = -1;883}884if (offsets->value(CodeOffsets::UnwindHandler) != -1) {885_unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler);886} else {887_unwind_handler_offset = -1;888}889890_oops_offset = data_offset();891_metadata_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize);892_scopes_data_offset = _metadata_offset + round_to(code_buffer->total_metadata_size(), wordSize);893894_scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize);895_dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size());896_handler_table_offset = _dependencies_offset + round_to(dependencies->size_in_bytes (), oopSize);897_nul_chk_table_offset = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);898_nmethod_end_offset = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize);899900_entry_point = code_begin() + offsets->value(CodeOffsets::Entry);901_verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry);902_osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry);903_exception_cache = NULL;904_pc_desc_cache.reset_to(scopes_pcs_begin());905906// Copy contents of ScopeDescRecorder to nmethod907code_buffer->copy_values_to(this);908debug_info->copy_to(this);909dependencies->copy_to(this);910if (ScavengeRootsInCode) {911if (detect_scavenge_root_oops()) {912CodeCache::add_scavenge_root_nmethod(this);913}914Universe::heap()->register_nmethod(this);915}916debug_only(verify_scavenge_root_oops());917918CodeCache::commit(this);919920// Copy contents of ExceptionHandlerTable to nmethod921handler_table->copy_to(this);922nul_chk_table->copy_to(this);923924// we use the information of entry points to find out if a method is925// static or non static926assert(compiler->is_c2() ||927_method->is_static() == (entry_point() == _verified_entry_point),928" entry points must be same for static methods and vice versa");929}930931bool printnmethods = PrintNMethods932|| CompilerOracle::should_print(_method)933|| CompilerOracle::has_option_string(_method, "PrintNMethods");934if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {935print_nmethod(printnmethods);936}937}938939940// Print a short set of xml attributes to identify this nmethod. The941// output should be embedded in some other element.942void nmethod::log_identity(xmlStream* log) const {943log->print(" compile_id='%d'", compile_id());944const char* nm_kind = compile_kind();945if (nm_kind != NULL) log->print(" compile_kind='%s'", nm_kind);946if (compiler() != NULL) {947log->print(" compiler='%s'", compiler()->name());948}949if (TieredCompilation) {950log->print(" level='%d'", comp_level());951}952}953954955#define LOG_OFFSET(log, name) \956if ((intptr_t)name##_end() - (intptr_t)name##_begin()) \957log->print(" " XSTR(name) "_offset='%d'" , \958(intptr_t)name##_begin() - (intptr_t)this)959960961void nmethod::log_new_nmethod() const {962if (LogCompilation && xtty != NULL) {963ttyLocker ttyl;964HandleMark hm;965xtty->begin_elem("nmethod");966log_identity(xtty);967xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", code_begin(), size());968xtty->print(" address='" INTPTR_FORMAT "'", (intptr_t) this);969970LOG_OFFSET(xtty, relocation);971LOG_OFFSET(xtty, consts);972LOG_OFFSET(xtty, insts);973LOG_OFFSET(xtty, stub);974LOG_OFFSET(xtty, scopes_data);975LOG_OFFSET(xtty, scopes_pcs);976LOG_OFFSET(xtty, dependencies);977LOG_OFFSET(xtty, handler_table);978LOG_OFFSET(xtty, nul_chk_table);979LOG_OFFSET(xtty, oops);980981xtty->method(method());982xtty->stamp();983xtty->end_elem();984}985}986987#undef LOG_OFFSET988989990// Print out more verbose output usually for a newly created nmethod.991void nmethod::print_on(outputStream* st, const char* msg) const {992if (st != NULL) {993ttyLocker ttyl;994if (WizardMode) {995CompileTask::print_compilation(st, this, msg, /*short_form:*/ true);996st->print_cr(" (" INTPTR_FORMAT ")", this);997} else {998CompileTask::print_compilation(st, this, msg, /*short_form:*/ false);999}1000}1001}100210031004void nmethod::print_nmethod(bool printmethod) {1005ttyLocker ttyl; // keep the following output all in one block1006if (xtty != NULL) {1007xtty->begin_head("print_nmethod");1008xtty->stamp();1009xtty->end_head();1010}1011// print the header part first1012print();1013// then print the requested information1014if (printmethod) {1015print_code();1016print_pcs();1017if (oop_maps()) {1018oop_maps()->print();1019}1020}1021if (PrintDebugInfo) {1022print_scopes();1023}1024if (PrintRelocations) {1025print_relocations();1026}1027if (PrintDependencies) {1028print_dependencies();1029}1030if (PrintExceptionHandlers) {1031print_handler_table();1032print_nul_chk_table();1033}1034if (xtty != NULL) {1035xtty->tail("print_nmethod");1036}1037}103810391040// Promote one word from an assembly-time handle to a live embedded oop.1041inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {1042if (handle == NULL ||1043// As a special case, IC oops are initialized to 1 or -1.1044handle == (jobject) Universe::non_oop_word()) {1045(*dest) = (oop) handle;1046} else {1047(*dest) = JNIHandles::resolve_non_null(handle);1048}1049}105010511052// Have to have the same name because it's called by a template1053void nmethod::copy_values(GrowableArray<jobject>* array) {1054int length = array->length();1055assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");1056oop* dest = oops_begin();1057for (int index = 0 ; index < length; index++) {1058initialize_immediate_oop(&dest[index], array->at(index));1059}10601061// Now we can fix up all the oops in the code. We need to do this1062// in the code because the assembler uses jobjects as placeholders.1063// The code and relocations have already been initialized by the1064// CodeBlob constructor, so it is valid even at this early point to1065// iterate over relocations and patch the code.1066fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);1067}10681069void nmethod::copy_values(GrowableArray<Metadata*>* array) {1070int length = array->length();1071assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");1072Metadata** dest = metadata_begin();1073for (int index = 0 ; index < length; index++) {1074dest[index] = array->at(index);1075}1076}10771078bool nmethod::is_at_poll_return(address pc) {1079RelocIterator iter(this, pc, pc+1);1080while (iter.next()) {1081if (iter.type() == relocInfo::poll_return_type)1082return true;1083}1084return false;1085}108610871088bool nmethod::is_at_poll_or_poll_return(address pc) {1089RelocIterator iter(this, pc, pc+1);1090while (iter.next()) {1091relocInfo::relocType t = iter.type();1092if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)1093return true;1094}1095return false;1096}109710981099void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {1100// re-patch all oop-bearing instructions, just in case some oops moved1101RelocIterator iter(this, begin, end);1102while (iter.next()) {1103if (iter.type() == relocInfo::oop_type) {1104oop_Relocation* reloc = iter.oop_reloc();1105if (initialize_immediates && reloc->oop_is_immediate()) {1106oop* dest = reloc->oop_addr();1107initialize_immediate_oop(dest, (jobject) *dest);1108}1109// Refresh the oop-related bits of this instruction.1110reloc->fix_oop_relocation();1111} else if (iter.type() == relocInfo::metadata_type) {1112metadata_Relocation* reloc = iter.metadata_reloc();1113reloc->fix_metadata_relocation();1114}1115}1116}111711181119void nmethod::verify_oop_relocations() {1120// Ensure sure that the code matches the current oop values1121RelocIterator iter(this, NULL, NULL);1122while (iter.next()) {1123if (iter.type() == relocInfo::oop_type) {1124oop_Relocation* reloc = iter.oop_reloc();1125if (!reloc->oop_is_immediate()) {1126reloc->verify_oop_relocation();1127}1128}1129}1130}113111321133ScopeDesc* nmethod::scope_desc_at(address pc) {1134PcDesc* pd = pc_desc_at(pc);1135guarantee(pd != NULL, "scope must be present");1136return new ScopeDesc(this, pd->scope_decode_offset(),1137pd->obj_decode_offset(), pd->should_reexecute(),1138pd->return_oop());1139}114011411142void nmethod::clear_inline_caches() {1143assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");1144if (is_zombie()) {1145return;1146}11471148RelocIterator iter(this);1149while (iter.next()) {1150iter.reloc()->clear_inline_cache();1151}1152}11531154// Clear ICStubs of all compiled ICs1155void nmethod::clear_ic_stubs() {1156assert_locked_or_safepoint(CompiledIC_lock);1157ResourceMark rm;1158RelocIterator iter(this);1159while(iter.next()) {1160if (iter.type() == relocInfo::virtual_call_type) {1161CompiledIC* ic = CompiledIC_at(&iter);1162ic->clear_ic_stub();1163}1164}1165}116611671168void nmethod::cleanup_inline_caches() {1169assert_locked_or_safepoint(CompiledIC_lock);11701171// If the method is not entrant or zombie then a JMP is plastered over the1172// first few bytes. If an oop in the old code was there, that oop1173// should not get GC'd. Skip the first few bytes of oops on1174// not-entrant methods.1175address low_boundary = verified_entry_point();1176if (!is_in_use()) {1177low_boundary += NativeJump::instruction_size;1178// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.1179// This means that the low_boundary is going to be a little too high.1180// This shouldn't matter, since oops of non-entrant methods are never used.1181// In fact, why are we bothering to look at oops in a non-entrant method??1182}11831184// Find all calls in an nmethod and clear the ones that point to non-entrant,1185// zombie and unloaded nmethods.1186ResourceMark rm;1187RelocIterator iter(this, low_boundary);1188while(iter.next()) {1189switch(iter.type()) {1190case relocInfo::virtual_call_type:1191case relocInfo::opt_virtual_call_type: {1192CompiledIC *ic = CompiledIC_at(&iter);1193// Ok, to lookup references to zombies here1194CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());1195if( cb != NULL && cb->is_nmethod() ) {1196nmethod* nm = (nmethod*)cb;1197// Clean inline caches pointing to zombie, non-entrant and unloaded methods1198if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());1199}1200break;1201}1202case relocInfo::static_call_type: {1203CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());1204CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());1205if( cb != NULL && cb->is_nmethod() ) {1206nmethod* nm = (nmethod*)cb;1207// Clean inline caches pointing to zombie, non-entrant and unloaded methods1208if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();1209}1210break;1211}1212}1213}1214}12151216void nmethod::verify_clean_inline_caches() {1217assert_locked_or_safepoint(CompiledIC_lock);12181219// If the method is not entrant or zombie then a JMP is plastered over the1220// first few bytes. If an oop in the old code was there, that oop1221// should not get GC'd. Skip the first few bytes of oops on1222// not-entrant methods.1223address low_boundary = verified_entry_point();1224if (!is_in_use()) {1225low_boundary += NativeJump::instruction_size;1226// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.1227// This means that the low_boundary is going to be a little too high.1228// This shouldn't matter, since oops of non-entrant methods are never used.1229// In fact, why are we bothering to look at oops in a non-entrant method??1230}12311232ResourceMark rm;1233RelocIterator iter(this, low_boundary);1234while(iter.next()) {1235switch(iter.type()) {1236case relocInfo::virtual_call_type:1237case relocInfo::opt_virtual_call_type: {1238CompiledIC *ic = CompiledIC_at(&iter);1239// Ok, to lookup references to zombies here1240CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());1241if( cb != NULL && cb->is_nmethod() ) {1242nmethod* nm = (nmethod*)cb;1243// Verify that inline caches pointing to both zombie and not_entrant methods are clean1244if (!nm->is_in_use() || (nm->method()->code() != nm)) {1245assert(ic->is_clean(), "IC should be clean");1246}1247}1248break;1249}1250case relocInfo::static_call_type: {1251CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());1252CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());1253if( cb != NULL && cb->is_nmethod() ) {1254nmethod* nm = (nmethod*)cb;1255// Verify that inline caches pointing to both zombie and not_entrant methods are clean1256if (!nm->is_in_use() || (nm->method()->code() != nm)) {1257assert(csc->is_clean(), "IC should be clean");1258}1259}1260break;1261}1262}1263}1264}12651266int nmethod::verify_icholder_relocations() {1267int count = 0;12681269RelocIterator iter(this);1270while(iter.next()) {1271if (iter.type() == relocInfo::virtual_call_type) {1272if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {1273CompiledIC *ic = CompiledIC_at(&iter);1274if (TraceCompiledIC) {1275tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));1276ic->print();1277}1278assert(ic->cached_icholder() != NULL, "must be non-NULL");1279count++;1280}1281}1282}12831284return count;1285}12861287// This is a private interface with the sweeper.1288void nmethod::mark_as_seen_on_stack() {1289assert(is_alive(), "Must be an alive method");1290// Set the traversal mark to ensure that the sweeper does 21291// cleaning passes before moving to zombie.1292set_stack_traversal_mark(NMethodSweeper::traversal_count());1293}12941295// Tell if a non-entrant method can be converted to a zombie (i.e.,1296// there are no activations on the stack, not in use by the VM,1297// and not in use by the ServiceThread)1298bool nmethod::can_convert_to_zombie() {1299assert(is_not_entrant(), "must be a non-entrant method");13001301// Since the nmethod sweeper only does partial sweep the sweeper's traversal1302// count can be greater than the stack traversal count before it hits the1303// nmethod for the second time.1304return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&1305!is_locked_by_vm();1306}13071308void nmethod::inc_decompile_count() {1309if (!is_compiled_by_c2()) return;1310// Could be gated by ProfileTraps, but do not bother...1311Method* m = method();1312if (m == NULL) return;1313MethodData* mdo = m->method_data();1314if (mdo == NULL) return;1315// There is a benign race here. See comments in methodData.hpp.1316mdo->inc_decompile_count();1317}13181319void nmethod::increase_unloading_clock() {1320_global_unloading_clock++;1321if (_global_unloading_clock == 0) {1322// _nmethods are allocated with _unloading_clock == 0,1323// so 0 is never used as a clock value.1324_global_unloading_clock = 1;1325}1326}13271328void nmethod::set_unloading_clock(unsigned char unloading_clock) {1329OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);1330}13311332unsigned char nmethod::unloading_clock() {1333return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);1334}13351336void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {13371338post_compiled_method_unload();13391340// Since this nmethod is being unloaded, make sure that dependencies1341// recorded in instanceKlasses get flushed and pass non-NULL closure to1342// indicate that this work is being done during a GC.1343assert(Universe::heap()->is_gc_active(), "should only be called during gc");1344assert(is_alive != NULL, "Should be non-NULL");1345// A non-NULL is_alive closure indicates that this is being called during GC.1346flush_dependencies(is_alive);13471348// Break cycle between nmethod & method1349if (TraceClassUnloading && WizardMode) {1350tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT1351" unloadable], Method*(" INTPTR_FORMAT1352"), cause(" INTPTR_FORMAT ")",1353this, (address)_method, (address)cause);1354if (!Universe::heap()->is_gc_active())1355cause->klass()->print();1356}1357// Unlink the osr method, so we do not look this up again1358if (is_osr_method()) {1359invalidate_osr_method();1360}1361// If _method is already NULL the Method* is about to be unloaded,1362// so we don't have to break the cycle. Note that it is possible to1363// have the Method* live here, in case we unload the nmethod because1364// it is pointing to some oop (other than the Method*) being unloaded.1365if (_method != NULL) {1366// OSR methods point to the Method*, but the Method* does not1367// point back!1368if (_method->code() == this) {1369_method->clear_code(); // Break a cycle1370}1371_method = NULL; // Clear the method of this dead nmethod1372}1373// Make the class unloaded - i.e., change state and notify sweeper1374assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");1375if (is_in_use()) {1376// Transitioning directly from live to unloaded -- so1377// we need to force a cache clean-up; remember this1378// for later on.1379CodeCache::set_needs_cache_clean(true);1380}13811382// Unregister must be done before the state change1383Universe::heap()->unregister_nmethod(this);13841385_state = unloaded;13861387// Log the unloading.1388log_state_change();13891390// The Method* is gone at this point1391assert(_method == NULL, "Tautology");13921393set_osr_link(NULL);1394NMethodSweeper::report_state_change(this);1395}13961397void nmethod::invalidate_osr_method() {1398assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");1399// Remove from list of active nmethods1400if (method() != NULL)1401method()->method_holder()->remove_osr_nmethod(this);1402// Set entry as invalid1403_entry_bci = InvalidOSREntryBci;1404}14051406void nmethod::log_state_change() const {1407if (LogCompilation) {1408if (xtty != NULL) {1409ttyLocker ttyl; // keep the following output all in one block1410if (_state == unloaded) {1411xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",1412os::current_thread_id());1413} else {1414xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",1415os::current_thread_id(),1416(_state == zombie ? " zombie='1'" : ""));1417}1418log_identity(xtty);1419xtty->stamp();1420xtty->end_elem();1421}1422}1423if (PrintCompilation && _state != unloaded) {1424print_on(tty, _state == zombie ? "made zombie" : "made not entrant");1425}1426}14271428/**1429* Common functionality for both make_not_entrant and make_zombie1430*/1431bool nmethod::make_not_entrant_or_zombie(unsigned int state) {1432assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");1433assert(!is_zombie(), "should not already be a zombie");14341435// Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.1436nmethodLocker nml(this);1437methodHandle the_method(method());1438No_Safepoint_Verifier nsv;14391440// during patching, depending on the nmethod state we must notify the GC that1441// code has been unloaded, unregistering it. We cannot do this right while1442// holding the Patching_lock because we need to use the CodeCache_lock. This1443// would be prone to deadlocks.1444// This flag is used to remember whether we need to later lock and unregister.1445bool nmethod_needs_unregister = false;14461447{1448// invalidate osr nmethod before acquiring the patching lock since1449// they both acquire leaf locks and we don't want a deadlock.1450// This logic is equivalent to the logic below for patching the1451// verified entry point of regular methods.1452if (is_osr_method()) {1453// this effectively makes the osr nmethod not entrant1454invalidate_osr_method();1455}14561457// Enter critical section. Does not block for safepoint.1458MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);14591460if (_state == state) {1461// another thread already performed this transition so nothing1462// to do, but return false to indicate this.1463return false;1464}14651466// The caller can be calling the method statically or through an inline1467// cache call.1468if (!is_osr_method() && !is_not_entrant()) {1469NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),1470SharedRuntime::get_handle_wrong_method_stub());1471}14721473if (is_in_use()) {1474// It's a true state change, so mark the method as decompiled.1475// Do it only for transition from alive.1476inc_decompile_count();1477}14781479// If the state is becoming a zombie, signal to unregister the nmethod with1480// the heap.1481// This nmethod may have already been unloaded during a full GC.1482if ((state == zombie) && !is_unloaded()) {1483nmethod_needs_unregister = true;1484}14851486// Must happen before state change. Otherwise we have a race condition in1487// nmethod::can_not_entrant_be_converted(). I.e., a method can immediately1488// transition its state from 'not_entrant' to 'zombie' without having to wait1489// for stack scanning.1490if (state == not_entrant) {1491mark_as_seen_on_stack();1492OrderAccess::storestore();1493}14941495// Change state1496_state = state;14971498// Log the transition once1499log_state_change();15001501// Remove nmethod from method.1502// We need to check if both the _code and _from_compiled_code_entry_point1503// refer to this nmethod because there is a race in setting these two fields1504// in Method* as seen in bugid 4947125.1505// If the vep() points to the zombie nmethod, the memory for the nmethod1506// could be flushed and the compiler and vtable stubs could still call1507// through it.1508if (method() != NULL && (method()->code() == this ||1509method()->from_compiled_entry() == verified_entry_point())) {1510HandleMark hm;1511method()->clear_code(false /* already owns Patching_lock */);1512}1513} // leave critical region under Patching_lock15141515// When the nmethod becomes zombie it is no longer alive so the1516// dependencies must be flushed. nmethods in the not_entrant1517// state will be flushed later when the transition to zombie1518// happens or they get unloaded.1519if (state == zombie) {1520{1521// Flushing dependecies must be done before any possible1522// safepoint can sneak in, otherwise the oops used by the1523// dependency logic could have become stale.1524MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);1525if (nmethod_needs_unregister) {1526Universe::heap()->unregister_nmethod(this);1527}1528flush_dependencies(NULL);1529}15301531// zombie only - if a JVMTI agent has enabled the CompiledMethodUnload1532// event and it hasn't already been reported for this nmethod then1533// report it now. The event may have been reported earilier if the GC1534// marked it for unloading). JvmtiDeferredEventQueue support means1535// we no longer go to a safepoint here.1536post_compiled_method_unload();15371538#ifdef ASSERT1539// It's no longer safe to access the oops section since zombie1540// nmethods aren't scanned for GC.1541_oops_are_stale = true;1542#endif1543// the Method may be reclaimed by class unloading now that the1544// nmethod is in zombie state1545set_method(NULL);1546} else {1547assert(state == not_entrant, "other cases may need to be handled differently");1548}15491550if (TraceCreateZombies) {1551tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");1552}15531554NMethodSweeper::report_state_change(this);1555return true;1556}15571558void nmethod::flush() {1559// Note that there are no valid oops in the nmethod anymore.1560assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");1561assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");15621563assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");1564assert_locked_or_safepoint(CodeCache_lock);15651566// completely deallocate this method1567Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);1568if (PrintMethodFlushing) {1569tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",1570_compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);1571}15721573// We need to deallocate any ExceptionCache data.1574// Note that we do not need to grab the nmethod lock for this, it1575// better be thread safe if we're disposing of it!1576ExceptionCache* ec = exception_cache();1577set_exception_cache(NULL);1578while(ec != NULL) {1579ExceptionCache* next = ec->next();1580delete ec;1581ec = next;1582}15831584if (on_scavenge_root_list()) {1585CodeCache::drop_scavenge_root_nmethod(this);1586}15871588#ifdef SHARK1589((SharkCompiler *) compiler())->free_compiled_method(insts_begin());1590#endif // SHARK15911592((CodeBlob*)(this))->flush();15931594CodeCache::free(this);1595}159615971598//1599// Notify all classes this nmethod is dependent on that it is no1600// longer dependent. This should only be called in two situations.1601// First, when a nmethod transitions to a zombie all dependents need1602// to be clear. Since zombification happens at a safepoint there's no1603// synchronization issues. The second place is a little more tricky.1604// During phase 1 of mark sweep class unloading may happen and as a1605// result some nmethods may get unloaded. In this case the flushing1606// of dependencies must happen during phase 1 since after GC any1607// dependencies in the unloaded nmethod won't be updated, so1608// traversing the dependency information in unsafe. In that case this1609// function is called with a non-NULL argument and this function only1610// notifies instanceKlasses that are reachable16111612void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {1613assert_locked_or_safepoint(CodeCache_lock);1614assert(Universe::heap()->is_gc_active() == (is_alive != NULL),1615"is_alive is non-NULL if and only if we are called during GC");1616if (!has_flushed_dependencies()) {1617set_has_flushed_dependencies();1618for (Dependencies::DepStream deps(this); deps.next(); ) {1619Klass* klass = deps.context_type();1620if (klass == NULL) continue; // ignore things like evol_method16211622// During GC the is_alive closure is non-NULL, and is used to1623// determine liveness of dependees that need to be updated.1624if (is_alive == NULL || klass->is_loader_alive(is_alive)) {1625// The GC defers deletion of this entry, since there might be multiple threads1626// iterating over the _dependencies graph. Other call paths are single-threaded1627// and may delete it immediately.1628bool delete_immediately = is_alive == NULL;1629InstanceKlass::cast(klass)->remove_dependent_nmethod(this, delete_immediately);1630}1631}1632}1633}163416351636// If this oop is not live, the nmethod can be unloaded.1637bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred) {1638assert(root != NULL, "just checking");1639oop obj = *root;1640if (obj == NULL || is_alive->do_object_b(obj)) {1641return false;1642}16431644// If ScavengeRootsInCode is true, an nmethod might be unloaded1645// simply because one of its constant oops has gone dead.1646// No actual classes need to be unloaded in order for this to occur.1647assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");1648make_unloaded(is_alive, obj);1649return true;1650}16511652// ------------------------------------------------------------------1653// post_compiled_method_load_event1654// new method for install_code() path1655// Transfer information from compilation to jvmti1656void nmethod::post_compiled_method_load_event() {16571658Method* moop = method();1659#ifndef USDT21660HS_DTRACE_PROBE8(hotspot, compiled__method__load,1661moop->klass_name()->bytes(),1662moop->klass_name()->utf8_length(),1663moop->name()->bytes(),1664moop->name()->utf8_length(),1665moop->signature()->bytes(),1666moop->signature()->utf8_length(),1667insts_begin(), insts_size());1668#else /* USDT2 */1669HOTSPOT_COMPILED_METHOD_LOAD(1670(char *) moop->klass_name()->bytes(),1671moop->klass_name()->utf8_length(),1672(char *) moop->name()->bytes(),1673moop->name()->utf8_length(),1674(char *) moop->signature()->bytes(),1675moop->signature()->utf8_length(),1676insts_begin(), insts_size());1677#endif /* USDT2 */16781679if (JvmtiExport::should_post_compiled_method_load() ||1680JvmtiExport::should_post_compiled_method_unload()) {1681get_and_cache_jmethod_id();1682}16831684if (JvmtiExport::should_post_compiled_method_load()) {1685// Let the Service thread (which is a real Java thread) post the event1686MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);1687JvmtiDeferredEventQueue::enqueue(1688JvmtiDeferredEvent::compiled_method_load_event(this));1689}1690}16911692jmethodID nmethod::get_and_cache_jmethod_id() {1693if (_jmethod_id == NULL) {1694// Cache the jmethod_id since it can no longer be looked up once the1695// method itself has been marked for unloading.1696_jmethod_id = method()->jmethod_id();1697}1698return _jmethod_id;1699}17001701void nmethod::post_compiled_method_unload() {1702if (unload_reported()) {1703// During unloading we transition to unloaded and then to zombie1704// and the unloading is reported during the first transition.1705return;1706}17071708assert(_method != NULL && !is_unloaded(), "just checking");1709DTRACE_METHOD_UNLOAD_PROBE(method());17101711// If a JVMTI agent has enabled the CompiledMethodUnload event then1712// post the event. Sometime later this nmethod will be made a zombie1713// by the sweeper but the Method* will not be valid at that point.1714// If the _jmethod_id is null then no load event was ever requested1715// so don't bother posting the unload. The main reason for this is1716// that the jmethodID is a weak reference to the Method* so if1717// it's being unloaded there's no way to look it up since the weak1718// ref will have been cleared.1719if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {1720assert(!unload_reported(), "already unloaded");1721JvmtiDeferredEvent event =1722JvmtiDeferredEvent::compiled_method_unload_event(this,1723_jmethod_id, insts_begin());1724if (SafepointSynchronize::is_at_safepoint()) {1725// Don't want to take the queueing lock. Add it as pending and1726// it will get enqueued later.1727JvmtiDeferredEventQueue::add_pending_event(event);1728} else {1729MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);1730JvmtiDeferredEventQueue::enqueue(event);1731}1732}17331734// The JVMTI CompiledMethodUnload event can be enabled or disabled at1735// any time. As the nmethod is being unloaded now we mark it has1736// having the unload event reported - this will ensure that we don't1737// attempt to report the event in the unlikely scenario where the1738// event is enabled at the time the nmethod is made a zombie.1739set_unload_reported();1740}17411742void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive, bool mark_on_stack) {1743if (ic->is_icholder_call()) {1744// The only exception is compiledICHolder oops which may1745// yet be marked below. (We check this further below).1746CompiledICHolder* cichk_oop = ic->cached_icholder();17471748if (mark_on_stack) {1749Metadata::mark_on_stack(cichk_oop->holder_metadata());1750Metadata::mark_on_stack(cichk_oop->holder_klass());1751}17521753if (cichk_oop->is_loader_alive(is_alive)) {1754return;1755}1756} else {1757Metadata* ic_oop = ic->cached_metadata();1758if (ic_oop != NULL) {1759if (mark_on_stack) {1760Metadata::mark_on_stack(ic_oop);1761}17621763if (ic_oop->is_klass()) {1764if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {1765return;1766}1767} else if (ic_oop->is_method()) {1768if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {1769return;1770}1771} else {1772ShouldNotReachHere();1773}1774}1775}17761777ic->set_to_clean();1778}17791780// This is called at the end of the strong tracing/marking phase of a1781// GC to unload an nmethod if it contains otherwise unreachable1782// oops.17831784void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {1785// Make sure the oop's ready to receive visitors1786assert(!is_zombie() && !is_unloaded(),1787"should not call follow on zombie or unloaded nmethod");17881789// If the method is not entrant then a JMP is plastered over the1790// first few bytes. If an oop in the old code was there, that oop1791// should not get GC'd. Skip the first few bytes of oops on1792// not-entrant methods.1793address low_boundary = verified_entry_point();1794if (is_not_entrant()) {1795low_boundary += NativeJump::instruction_size;1796// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.1797// (See comment above.)1798}17991800// The RedefineClasses() API can cause the class unloading invariant1801// to no longer be true. See jvmtiExport.hpp for details.1802// Also, leave a debugging breadcrumb in local flag.1803bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();1804if (a_class_was_redefined) {1805// This set of the unloading_occurred flag is done before the1806// call to post_compiled_method_unload() so that the unloading1807// of this nmethod is reported.1808unloading_occurred = true;1809}18101811// Exception cache1812clean_exception_cache(is_alive);18131814// If class unloading occurred we first iterate over all inline caches and1815// clear ICs where the cached oop is referring to an unloaded klass or method.1816// The remaining live cached oops will be traversed in the relocInfo::oop_type1817// iteration below.1818if (unloading_occurred) {1819RelocIterator iter(this, low_boundary);1820while(iter.next()) {1821if (iter.type() == relocInfo::virtual_call_type) {1822CompiledIC *ic = CompiledIC_at(&iter);1823clean_ic_if_metadata_is_dead(ic, is_alive, false);1824}1825}1826}18271828// Compiled code1829{1830RelocIterator iter(this, low_boundary);1831while (iter.next()) {1832if (iter.type() == relocInfo::oop_type) {1833oop_Relocation* r = iter.oop_reloc();1834// In this loop, we must only traverse those oops directly embedded in1835// the code. Other oops (oop_index>0) are seen as part of scopes_oops.1836assert(1 == (r->oop_is_immediate()) +1837(r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),1838"oop must be found in exactly one place");1839if (r->oop_is_immediate() && r->oop_value() != NULL) {1840if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {1841return;1842}1843}1844}1845}1846}184718481849// Scopes1850for (oop* p = oops_begin(); p < oops_end(); p++) {1851if (*p == Universe::non_oop_word()) continue; // skip non-oops1852if (can_unload(is_alive, p, unloading_occurred)) {1853return;1854}1855}18561857// Ensure that all metadata is still alive1858verify_metadata_loaders(low_boundary, is_alive);1859}18601861template <class CompiledICorStaticCall>1862static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, nmethod* from) {1863// Ok, to lookup references to zombies here1864CodeBlob *cb = CodeCache::find_blob_unsafe(addr);1865if (cb != NULL && cb->is_nmethod()) {1866nmethod* nm = (nmethod*)cb;18671868if (nm->unloading_clock() != nmethod::global_unloading_clock()) {1869// The nmethod has not been processed yet.1870return true;1871}18721873// Clean inline caches pointing to both zombie and not_entrant methods1874if (!nm->is_in_use() || (nm->method()->code() != nm)) {1875ic->set_to_clean();1876assert(ic->is_clean(), err_msg("nmethod " PTR_FORMAT "not clean %s", from, from->method()->name_and_sig_as_C_string()));1877}1878}18791880return false;1881}18821883static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) {1884return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);1885}18861887static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) {1888return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);1889}18901891bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive, bool unloading_occurred) {1892assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type");18931894oop_Relocation* r = iter_at_oop->oop_reloc();1895// Traverse those oops directly embedded in the code.1896// Other oops (oop_index>0) are seen as part of scopes_oops.1897assert(1 == (r->oop_is_immediate()) +1898(r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),1899"oop must be found in exactly one place");1900if (r->oop_is_immediate() && r->oop_value() != NULL) {1901// Unload this nmethod if the oop is dead.1902if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {1903return true;;1904}1905}19061907return false;1908}19091910void nmethod::mark_metadata_on_stack_at(RelocIterator* iter_at_metadata) {1911assert(iter_at_metadata->type() == relocInfo::metadata_type, "Wrong relocation type");19121913metadata_Relocation* r = iter_at_metadata->metadata_reloc();1914// In this metadata, we must only follow those metadatas directly embedded in1915// the code. Other metadatas (oop_index>0) are seen as part of1916// the metadata section below.1917assert(1 == (r->metadata_is_immediate()) +1918(r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),1919"metadata must be found in exactly one place");1920if (r->metadata_is_immediate() && r->metadata_value() != NULL) {1921Metadata* md = r->metadata_value();1922if (md != _method) Metadata::mark_on_stack(md);1923}1924}19251926void nmethod::mark_metadata_on_stack_non_relocs() {1927// Visit the metadata section1928for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {1929if (*p == Universe::non_oop_word() || *p == NULL) continue; // skip non-oops1930Metadata* md = *p;1931Metadata::mark_on_stack(md);1932}19331934// Visit metadata not embedded in the other places.1935if (_method != NULL) Metadata::mark_on_stack(_method);1936}19371938bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {1939ResourceMark rm;19401941// Make sure the oop's ready to receive visitors1942assert(!is_zombie() && !is_unloaded(),1943"should not call follow on zombie or unloaded nmethod");19441945// If the method is not entrant then a JMP is plastered over the1946// first few bytes. If an oop in the old code was there, that oop1947// should not get GC'd. Skip the first few bytes of oops on1948// not-entrant methods.1949address low_boundary = verified_entry_point();1950if (is_not_entrant()) {1951low_boundary += NativeJump::instruction_size;1952// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.1953// (See comment above.)1954}19551956// The RedefineClasses() API can cause the class unloading invariant1957// to no longer be true. See jvmtiExport.hpp for details.1958// Also, leave a debugging breadcrumb in local flag.1959bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();1960if (a_class_was_redefined) {1961// This set of the unloading_occurred flag is done before the1962// call to post_compiled_method_unload() so that the unloading1963// of this nmethod is reported.1964unloading_occurred = true;1965}19661967// When class redefinition is used all metadata in the CodeCache has to be recorded,1968// so that unused "previous versions" can be purged. Since walking the CodeCache can1969// be expensive, the "mark on stack" is piggy-backed on this parallel unloading code.1970bool mark_metadata_on_stack = a_class_was_redefined;19711972// Exception cache1973clean_exception_cache(is_alive);19741975bool is_unloaded = false;1976bool postponed = false;19771978RelocIterator iter(this, low_boundary);1979while(iter.next()) {19801981switch (iter.type()) {19821983case relocInfo::virtual_call_type:1984if (unloading_occurred) {1985// If class unloading occurred we first iterate over all inline caches and1986// clear ICs where the cached oop is referring to an unloaded klass or method.1987clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive, mark_metadata_on_stack);1988}19891990postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);1991break;19921993case relocInfo::opt_virtual_call_type:1994postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);1995break;19961997case relocInfo::static_call_type:1998postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);1999break;20002001case relocInfo::oop_type:2002if (!is_unloaded) {2003is_unloaded = unload_if_dead_at(&iter, is_alive, unloading_occurred);2004}2005break;20062007case relocInfo::metadata_type:2008if (mark_metadata_on_stack) {2009mark_metadata_on_stack_at(&iter);2010}2011}2012}20132014if (mark_metadata_on_stack) {2015mark_metadata_on_stack_non_relocs();2016}20172018if (is_unloaded) {2019return postponed;2020}20212022// Scopes2023for (oop* p = oops_begin(); p < oops_end(); p++) {2024if (*p == Universe::non_oop_word()) continue; // skip non-oops2025if (can_unload(is_alive, p, unloading_occurred)) {2026is_unloaded = true;2027break;2028}2029}20302031if (is_unloaded) {2032return postponed;2033}20342035// Ensure that all metadata is still alive2036verify_metadata_loaders(low_boundary, is_alive);20372038return postponed;2039}20402041void nmethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {2042ResourceMark rm;20432044// Make sure the oop's ready to receive visitors2045assert(!is_zombie(),2046"should not call follow on zombie nmethod");20472048// If the method is not entrant then a JMP is plastered over the2049// first few bytes. If an oop in the old code was there, that oop2050// should not get GC'd. Skip the first few bytes of oops on2051// not-entrant methods.2052address low_boundary = verified_entry_point();2053if (is_not_entrant()) {2054low_boundary += NativeJump::instruction_size;2055// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.2056// (See comment above.)2057}20582059RelocIterator iter(this, low_boundary);2060while(iter.next()) {20612062switch (iter.type()) {20632064case relocInfo::virtual_call_type:2065clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);2066break;20672068case relocInfo::opt_virtual_call_type:2069clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);2070break;20712072case relocInfo::static_call_type:2073clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);2074break;2075}2076}2077}20782079#ifdef ASSERT20802081class CheckClass : AllStatic {2082static BoolObjectClosure* _is_alive;20832084// Check class_loader is alive for this bit of metadata.2085static void check_class(Metadata* md) {2086Klass* klass = NULL;2087if (md->is_klass()) {2088klass = ((Klass*)md);2089} else if (md->is_method()) {2090klass = ((Method*)md)->method_holder();2091} else if (md->is_methodData()) {2092klass = ((MethodData*)md)->method()->method_holder();2093} else {2094md->print();2095ShouldNotReachHere();2096}2097assert(klass->is_loader_alive(_is_alive), "must be alive");2098}2099public:2100static void do_check_class(BoolObjectClosure* is_alive, nmethod* nm) {2101assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");2102_is_alive = is_alive;2103nm->metadata_do(check_class);2104}2105};21062107// This is called during a safepoint so can use static data2108BoolObjectClosure* CheckClass::_is_alive = NULL;2109#endif // ASSERT211021112112// Processing of oop references should have been sufficient to keep2113// all strong references alive. Any weak references should have been2114// cleared as well. Visit all the metadata and ensure that it's2115// really alive.2116void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {2117#ifdef ASSERT2118RelocIterator iter(this, low_boundary);2119while (iter.next()) {2120// static_stub_Relocations may have dangling references to2121// Method*s so trim them out here. Otherwise it looks like2122// compiled code is maintaining a link to dead metadata.2123address static_call_addr = NULL;2124if (iter.type() == relocInfo::opt_virtual_call_type) {2125CompiledIC* cic = CompiledIC_at(&iter);2126if (!cic->is_call_to_interpreted()) {2127static_call_addr = iter.addr();2128}2129} else if (iter.type() == relocInfo::static_call_type) {2130CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());2131if (!csc->is_call_to_interpreted()) {2132static_call_addr = iter.addr();2133}2134}2135if (static_call_addr != NULL) {2136RelocIterator sciter(this, low_boundary);2137while (sciter.next()) {2138if (sciter.type() == relocInfo::static_stub_type &&2139sciter.static_stub_reloc()->static_call() == static_call_addr) {2140sciter.static_stub_reloc()->clear_inline_cache();2141}2142}2143}2144}2145// Check that the metadata embedded in the nmethod is alive2146CheckClass::do_check_class(is_alive, this);2147#endif2148}214921502151// Iterate over metadata calling this function. Used by RedefineClasses2152void nmethod::metadata_do(void f(Metadata*)) {2153address low_boundary = verified_entry_point();2154if (is_not_entrant()) {2155low_boundary += NativeJump::instruction_size;2156// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.2157// (See comment above.)2158}2159{2160// Visit all immediate references that are embedded in the instruction stream.2161RelocIterator iter(this, low_boundary);2162while (iter.next()) {2163if (iter.type() == relocInfo::metadata_type ) {2164metadata_Relocation* r = iter.metadata_reloc();2165// In this metadata, we must only follow those metadatas directly embedded in2166// the code. Other metadatas (oop_index>0) are seen as part of2167// the metadata section below.2168assert(1 == (r->metadata_is_immediate()) +2169(r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),2170"metadata must be found in exactly one place");2171if (r->metadata_is_immediate() && r->metadata_value() != NULL) {2172Metadata* md = r->metadata_value();2173if (md != _method) f(md);2174}2175} else if (iter.type() == relocInfo::virtual_call_type) {2176// Check compiledIC holders associated with this nmethod2177ResourceMark rm;2178CompiledIC *ic = CompiledIC_at(&iter);2179if (ic->is_icholder_call()) {2180CompiledICHolder* cichk = ic->cached_icholder();2181f(cichk->holder_metadata());2182f(cichk->holder_klass());2183} else {2184Metadata* ic_oop = ic->cached_metadata();2185if (ic_oop != NULL) {2186f(ic_oop);2187}2188}2189}2190}2191}21922193// Visit the metadata section2194for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {2195if (*p == Universe::non_oop_word() || *p == NULL) continue; // skip non-oops2196Metadata* md = *p;2197f(md);2198}21992200// Call function Method*, not embedded in these other places.2201if (_method != NULL) f(_method);2202}22032204void nmethod::oops_do(OopClosure* f, bool allow_zombie) {2205// make sure the oops ready to receive visitors2206assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");2207assert(!is_unloaded(), "should not call follow on unloaded nmethod");22082209// If the method is not entrant or zombie then a JMP is plastered over the2210// first few bytes. If an oop in the old code was there, that oop2211// should not get GC'd. Skip the first few bytes of oops on2212// not-entrant methods.2213address low_boundary = verified_entry_point();2214if (is_not_entrant()) {2215low_boundary += NativeJump::instruction_size;2216// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.2217// (See comment above.)2218}22192220RelocIterator iter(this, low_boundary);22212222while (iter.next()) {2223if (iter.type() == relocInfo::oop_type ) {2224oop_Relocation* r = iter.oop_reloc();2225// In this loop, we must only follow those oops directly embedded in2226// the code. Other oops (oop_index>0) are seen as part of scopes_oops.2227assert(1 == (r->oop_is_immediate()) +2228(r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),2229"oop must be found in exactly one place");2230if (r->oop_is_immediate() && r->oop_value() != NULL) {2231f->do_oop(r->oop_addr());2232}2233}2234}22352236// Scopes2237// This includes oop constants not inlined in the code stream.2238for (oop* p = oops_begin(); p < oops_end(); p++) {2239if (*p == Universe::non_oop_word()) continue; // skip non-oops2240f->do_oop(p);2241}2242}22432244#define NMETHOD_SENTINEL ((nmethod*)badAddress)22452246nmethod* volatile nmethod::_oops_do_mark_nmethods;22472248// An nmethod is "marked" if its _mark_link is set non-null.2249// Even if it is the end of the linked list, it will have a non-null link value,2250// as long as it is on the list.2251// This code must be MP safe, because it is used from parallel GC passes.2252bool nmethod::test_set_oops_do_mark() {2253assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");2254nmethod* observed_mark_link = _oops_do_mark_link;2255if (observed_mark_link == NULL) {2256// Claim this nmethod for this thread to mark.2257observed_mark_link = (nmethod*)2258Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);2259if (observed_mark_link == NULL) {22602261// Atomically append this nmethod (now claimed) to the head of the list:2262nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;2263for (;;) {2264nmethod* required_mark_nmethods = observed_mark_nmethods;2265_oops_do_mark_link = required_mark_nmethods;2266observed_mark_nmethods = (nmethod*)2267Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);2268if (observed_mark_nmethods == required_mark_nmethods)2269break;2270}2271// Mark was clear when we first saw this guy.2272NOT_PRODUCT(if (TraceScavenge) print_on(tty, "oops_do, mark"));2273return false;2274}2275}2276// On fall through, another racing thread marked this nmethod before we did.2277return true;2278}22792280void nmethod::oops_do_marking_prologue() {2281NOT_PRODUCT(if (TraceScavenge) tty->print_cr("[oops_do_marking_prologue"));2282assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");2283// We use cmpxchg_ptr instead of regular assignment here because the user2284// may fork a bunch of threads, and we need them all to see the same state.2285void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);2286guarantee(observed == NULL, "no races in this sequential code");2287}22882289void nmethod::oops_do_marking_epilogue() {2290assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");2291nmethod* cur = _oops_do_mark_nmethods;2292while (cur != NMETHOD_SENTINEL) {2293assert(cur != NULL, "not NULL-terminated");2294nmethod* next = cur->_oops_do_mark_link;2295cur->_oops_do_mark_link = NULL;2296DEBUG_ONLY(cur->verify_oop_relocations());2297NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark"));2298cur = next;2299}2300void* required = _oops_do_mark_nmethods;2301void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);2302guarantee(observed == required, "no races in this sequential code");2303NOT_PRODUCT(if (TraceScavenge) tty->print_cr("oops_do_marking_epilogue]"));2304}23052306class DetectScavengeRoot: public OopClosure {2307bool _detected_scavenge_root;2308public:2309DetectScavengeRoot() : _detected_scavenge_root(false)2310{ NOT_PRODUCT(_print_nm = NULL); }2311bool detected_scavenge_root() { return _detected_scavenge_root; }2312virtual void do_oop(oop* p) {2313if ((*p) != NULL && (*p)->is_scavengable()) {2314NOT_PRODUCT(maybe_print(p));2315_detected_scavenge_root = true;2316}2317}2318virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }23192320#ifndef PRODUCT2321nmethod* _print_nm;2322void maybe_print(oop* p) {2323if (_print_nm == NULL) return;2324if (!_detected_scavenge_root) _print_nm->print_on(tty, "new scavenge root");2325tty->print_cr("" PTR_FORMAT "[offset=%d] detected scavengable oop " PTR_FORMAT " (found at " PTR_FORMAT ")",2326_print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),2327(void *)(*p), (intptr_t)p);2328(*p)->print();2329}2330#endif //PRODUCT2331};23322333bool nmethod::detect_scavenge_root_oops() {2334DetectScavengeRoot detect_scavenge_root;2335NOT_PRODUCT(if (TraceScavenge) detect_scavenge_root._print_nm = this);2336oops_do(&detect_scavenge_root);2337return detect_scavenge_root.detected_scavenge_root();2338}23392340// Method that knows how to preserve outgoing arguments at call. This method must be2341// called with a frame corresponding to a Java invoke2342void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {2343#ifndef SHARK2344if (!method()->is_native()) {2345SimpleScopeDesc ssd(this, fr.pc());2346Bytecode_invoke call(ssd.method(), ssd.bci());2347bool has_receiver = call.has_receiver();2348bool has_appendix = call.has_appendix();2349Symbol* signature = call.signature();2350fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);2351}2352#endif // !SHARK2353}235423552356oop nmethod::embeddedOop_at(u_char* p) {2357RelocIterator iter(this, p, p + 1);2358while (iter.next())2359if (iter.type() == relocInfo::oop_type) {2360return iter.oop_reloc()->oop_value();2361}2362return NULL;2363}236423652366inline bool includes(void* p, void* from, void* to) {2367return from <= p && p < to;2368}236923702371void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {2372assert(count >= 2, "must be sentinel values, at least");23732374#ifdef ASSERT2375// must be sorted and unique; we do a binary search in find_pc_desc()2376int prev_offset = pcs[0].pc_offset();2377assert(prev_offset == PcDesc::lower_offset_limit,2378"must start with a sentinel");2379for (int i = 1; i < count; i++) {2380int this_offset = pcs[i].pc_offset();2381assert(this_offset > prev_offset, "offsets must be sorted");2382prev_offset = this_offset;2383}2384assert(prev_offset == PcDesc::upper_offset_limit,2385"must end with a sentinel");2386#endif //ASSERT23872388// Search for MethodHandle invokes and tag the nmethod.2389for (int i = 0; i < count; i++) {2390if (pcs[i].is_method_handle_invoke()) {2391set_has_method_handle_invokes(true);2392break;2393}2394}2395assert(has_method_handle_invokes() == (_deoptimize_mh_offset != -1), "must have deopt mh handler");23962397int size = count * sizeof(PcDesc);2398assert(scopes_pcs_size() >= size, "oob");2399memcpy(scopes_pcs_begin(), pcs, size);24002401// Adjust the final sentinel downward.2402PcDesc* last_pc = &scopes_pcs_begin()[count-1];2403assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");2404last_pc->set_pc_offset(content_size() + 1);2405for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {2406// Fill any rounding gaps with copies of the last record.2407last_pc[1] = last_pc[0];2408}2409// The following assert could fail if sizeof(PcDesc) is not2410// an integral multiple of oopSize (the rounding term).2411// If it fails, change the logic to always allocate a multiple2412// of sizeof(PcDesc), and fill unused words with copies of *last_pc.2413assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");2414}24152416void nmethod::copy_scopes_data(u_char* buffer, int size) {2417assert(scopes_data_size() >= size, "oob");2418memcpy(scopes_data_begin(), buffer, size);2419}242024212422#ifdef ASSERT2423static PcDesc* linear_search(nmethod* nm, int pc_offset, bool approximate) {2424PcDesc* lower = nm->scopes_pcs_begin();2425PcDesc* upper = nm->scopes_pcs_end();2426lower += 1; // exclude initial sentinel2427PcDesc* res = NULL;2428for (PcDesc* p = lower; p < upper; p++) {2429NOT_PRODUCT(--nmethod_stats.pc_desc_tests); // don't count this call to match_desc2430if (match_desc(p, pc_offset, approximate)) {2431if (res == NULL)2432res = p;2433else2434res = (PcDesc*) badAddress;2435}2436}2437return res;2438}2439#endif244024412442// Finds a PcDesc with real-pc equal to "pc"2443PcDesc* nmethod::find_pc_desc_internal(address pc, bool approximate) {2444address base_address = code_begin();2445if ((pc < base_address) ||2446(pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) {2447return NULL; // PC is wildly out of range2448}2449int pc_offset = (int) (pc - base_address);24502451// Check the PcDesc cache if it contains the desired PcDesc2452// (This as an almost 100% hit rate.)2453PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);2454if (res != NULL) {2455assert(res == linear_search(this, pc_offset, approximate), "cache ok");2456return res;2457}24582459// Fallback algorithm: quasi-linear search for the PcDesc2460// Find the last pc_offset less than the given offset.2461// The successor must be the required match, if there is a match at all.2462// (Use a fixed radix to avoid expensive affine pointer arithmetic.)2463PcDesc* lower = scopes_pcs_begin();2464PcDesc* upper = scopes_pcs_end();2465upper -= 1; // exclude final sentinel2466if (lower >= upper) return NULL; // native method; no PcDescs at all24672468#define assert_LU_OK \2469/* invariant on lower..upper during the following search: */ \2470assert(lower->pc_offset() < pc_offset, "sanity"); \2471assert(upper->pc_offset() >= pc_offset, "sanity")2472assert_LU_OK;24732474// Use the last successful return as a split point.2475PcDesc* mid = _pc_desc_cache.last_pc_desc();2476NOT_PRODUCT(++nmethod_stats.pc_desc_searches);2477if (mid->pc_offset() < pc_offset) {2478lower = mid;2479} else {2480upper = mid;2481}24822483// Take giant steps at first (4096, then 256, then 16, then 1)2484const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1);2485const int RADIX = (1 << LOG2_RADIX);2486for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {2487while ((mid = lower + step) < upper) {2488assert_LU_OK;2489NOT_PRODUCT(++nmethod_stats.pc_desc_searches);2490if (mid->pc_offset() < pc_offset) {2491lower = mid;2492} else {2493upper = mid;2494break;2495}2496}2497assert_LU_OK;2498}24992500// Sneak up on the value with a linear search of length ~16.2501while (true) {2502assert_LU_OK;2503mid = lower + 1;2504NOT_PRODUCT(++nmethod_stats.pc_desc_searches);2505if (mid->pc_offset() < pc_offset) {2506lower = mid;2507} else {2508upper = mid;2509break;2510}2511}2512#undef assert_LU_OK25132514if (match_desc(upper, pc_offset, approximate)) {2515assert(upper == linear_search(this, pc_offset, approximate), "search ok");2516_pc_desc_cache.add_pc_desc(upper);2517return upper;2518} else {2519assert(NULL == linear_search(this, pc_offset, approximate), "search ok");2520return NULL;2521}2522}252325242525bool nmethod::check_all_dependencies() {2526bool found_check = false;2527// wholesale check of all dependencies2528for (Dependencies::DepStream deps(this); deps.next(); ) {2529if (deps.check_dependency() != NULL) {2530found_check = true;2531NOT_DEBUG(break);2532}2533}2534return found_check; // tell caller if we found anything2535}25362537bool nmethod::check_dependency_on(DepChange& changes) {2538// What has happened:2539// 1) a new class dependee has been added2540// 2) dependee and all its super classes have been marked2541bool found_check = false; // set true if we are upset2542for (Dependencies::DepStream deps(this); deps.next(); ) {2543// Evaluate only relevant dependencies.2544if (deps.spot_check_dependency_at(changes) != NULL) {2545found_check = true;2546NOT_DEBUG(break);2547}2548}2549return found_check;2550}25512552bool nmethod::is_evol_dependent_on(Klass* dependee) {2553InstanceKlass *dependee_ik = InstanceKlass::cast(dependee);2554Array<Method*>* dependee_methods = dependee_ik->methods();2555for (Dependencies::DepStream deps(this); deps.next(); ) {2556if (deps.type() == Dependencies::evol_method) {2557Method* method = deps.method_argument(0);2558for (int j = 0; j < dependee_methods->length(); j++) {2559if (dependee_methods->at(j) == method) {2560// RC_TRACE macro has an embedded ResourceMark2561RC_TRACE(0x01000000,2562("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)",2563_method->method_holder()->external_name(),2564_method->name()->as_C_string(),2565_method->signature()->as_C_string(), compile_id(),2566method->method_holder()->external_name(),2567method->name()->as_C_string(),2568method->signature()->as_C_string()));2569if (TraceDependencies || LogCompilation)2570deps.log_dependency(dependee);2571return true;2572}2573}2574}2575}2576return false;2577}25782579// Called from mark_for_deoptimization, when dependee is invalidated.2580bool nmethod::is_dependent_on_method(Method* dependee) {2581for (Dependencies::DepStream deps(this); deps.next(); ) {2582if (deps.type() != Dependencies::evol_method)2583continue;2584Method* method = deps.method_argument(0);2585if (method == dependee) return true;2586}2587return false;2588}258925902591bool nmethod::is_patchable_at(address instr_addr) {2592assert(insts_contains(instr_addr), "wrong nmethod used");2593if (is_zombie()) {2594// a zombie may never be patched2595return false;2596}2597return true;2598}259926002601address nmethod::continuation_for_implicit_exception(address pc) {2602// Exception happened outside inline-cache check code => we are inside2603// an active nmethod => use cpc to determine a return address2604int exception_offset = pc - code_begin();2605int cont_offset = ImplicitExceptionTable(this).at( exception_offset );2606#ifdef ASSERT2607if (cont_offset == 0) {2608Thread* thread = ThreadLocalStorage::get_thread_slow();2609ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY2610HandleMark hm(thread);2611ResourceMark rm(thread);2612CodeBlob* cb = CodeCache::find_blob(pc);2613assert(cb != NULL && cb == this, "");2614ttyLocker ttyl;2615tty->print_cr("implicit exception happened at " INTPTR_FORMAT, pc);2616print();2617method()->print_codes();2618print_code();2619print_pcs();2620}2621#endif2622if (cont_offset == 0) {2623// Let the normal error handling report the exception2624return NULL;2625}2626return code_begin() + cont_offset;2627}2628262926302631void nmethod_init() {2632// make sure you didn't forget to adjust the filler fields2633assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");2634}263526362637//-------------------------------------------------------------------------------------------263826392640// QQQ might we make this work from a frame??2641nmethodLocker::nmethodLocker(address pc) {2642CodeBlob* cb = CodeCache::find_blob(pc);2643guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found");2644_nm = (nmethod*)cb;2645lock_nmethod(_nm);2646}26472648// Only JvmtiDeferredEvent::compiled_method_unload_event()2649// should pass zombie_ok == true.2650void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {2651if (nm == NULL) return;2652Atomic::inc(&nm->_lock_count);2653guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");2654}26552656void nmethodLocker::unlock_nmethod(nmethod* nm) {2657if (nm == NULL) return;2658Atomic::dec(&nm->_lock_count);2659guarantee(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");2660}266126622663// -----------------------------------------------------------------------------2664// nmethod::get_deopt_original_pc2665//2666// Return the original PC for the given PC if:2667// (a) the given PC belongs to a nmethod and2668// (b) it is a deopt PC2669address nmethod::get_deopt_original_pc(const frame* fr) {2670if (fr->cb() == NULL) return NULL;26712672nmethod* nm = fr->cb()->as_nmethod_or_null();2673if (nm != NULL && nm->is_deopt_pc(fr->pc()))2674return nm->get_original_pc(fr);26752676return NULL;2677}267826792680// -----------------------------------------------------------------------------2681// MethodHandle26822683bool nmethod::is_method_handle_return(address return_pc) {2684if (!has_method_handle_invokes()) return false;2685PcDesc* pd = pc_desc_at(return_pc);2686if (pd == NULL)2687return false;2688return pd->is_method_handle_invoke();2689}269026912692// -----------------------------------------------------------------------------2693// Verification26942695class VerifyOopsClosure: public OopClosure {2696nmethod* _nm;2697bool _ok;2698public:2699VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }2700bool ok() { return _ok; }2701virtual void do_oop(oop* p) {2702if ((*p) == NULL || (*p)->is_oop()) return;2703if (_ok) {2704_nm->print_nmethod(true);2705_ok = false;2706}2707tty->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)",2708(void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));2709}2710virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }2711};27122713void nmethod::verify() {27142715// Hmm. OSR methods can be deopted but not marked as zombie or not_entrant2716// seems odd.27172718if (is_zombie() || is_not_entrant() || is_unloaded())2719return;27202721// Make sure all the entry points are correctly aligned for patching.2722NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());27232724// assert(method()->is_oop(), "must be valid");27252726ResourceMark rm;27272728if (!CodeCache::contains(this)) {2729fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this));2730}27312732if(is_native_method() )2733return;27342735nmethod* nm = CodeCache::find_nmethod(verified_entry_point());2736if (nm != this) {2737fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")",2738this));2739}27402741for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {2742if (! p->verify(this)) {2743tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", this);2744}2745}27462747VerifyOopsClosure voc(this);2748oops_do(&voc);2749assert(voc.ok(), "embedded oops must be OK");2750verify_scavenge_root_oops();27512752verify_scopes();2753}275427552756void nmethod::verify_interrupt_point(address call_site) {2757// Verify IC only when nmethod installation is finished.2758bool is_installed = (method()->code() == this) // nmethod is in state 'in_use' and installed2759|| !this->is_in_use(); // nmethod is installed, but not in 'in_use' state2760if (is_installed) {2761Thread *cur = Thread::current();2762if (CompiledIC_lock->owner() == cur ||2763((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&2764SafepointSynchronize::is_at_safepoint())) {2765CompiledIC_at(this, call_site);2766CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());2767} else {2768MutexLocker ml_verify (CompiledIC_lock);2769CompiledIC_at(this, call_site);2770}2771}27722773PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address());2774assert(pd != NULL, "PcDesc must exist");2775for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),2776pd->obj_decode_offset(), pd->should_reexecute(),2777pd->return_oop());2778!sd->is_top(); sd = sd->sender()) {2779sd->verify();2780}2781}27822783void nmethod::verify_scopes() {2784if( !method() ) return; // Runtime stubs have no scope2785if (method()->is_native()) return; // Ignore stub methods.2786// iterate through all interrupt point2787// and verify the debug information is valid.2788RelocIterator iter((nmethod*)this);2789while (iter.next()) {2790address stub = NULL;2791switch (iter.type()) {2792case relocInfo::virtual_call_type:2793verify_interrupt_point(iter.addr());2794break;2795case relocInfo::opt_virtual_call_type:2796stub = iter.opt_virtual_call_reloc()->static_stub();2797verify_interrupt_point(iter.addr());2798break;2799case relocInfo::static_call_type:2800stub = iter.static_call_reloc()->static_stub();2801//verify_interrupt_point(iter.addr());2802break;2803case relocInfo::runtime_call_type:2804address destination = iter.reloc()->value();2805// Right now there is no way to find out which entries support2806// an interrupt point. It would be nice if we had this2807// information in a table.2808break;2809}2810assert(stub == NULL || stub_contains(stub), "static call stub outside stub section");2811}2812}281328142815// -----------------------------------------------------------------------------2816// Non-product code2817#ifndef PRODUCT28182819class DebugScavengeRoot: public OopClosure {2820nmethod* _nm;2821bool _ok;2822public:2823DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }2824bool ok() { return _ok; }2825virtual void do_oop(oop* p) {2826if ((*p) == NULL || !(*p)->is_scavengable()) return;2827if (_ok) {2828_nm->print_nmethod(true);2829_ok = false;2830}2831tty->print_cr("*** scavengable oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)",2832(void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));2833(*p)->print();2834}2835virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }2836};28372838void nmethod::verify_scavenge_root_oops() {2839if (UseG1GC || UseShenandoahGC) {2840return;2841}28422843if (!on_scavenge_root_list()) {2844// Actually look inside, to verify the claim that it's clean.2845DebugScavengeRoot debug_scavenge_root(this);2846oops_do(&debug_scavenge_root);2847if (!debug_scavenge_root.ok())2848fatal("found an unadvertised bad scavengable oop in the code cache");2849}2850assert(scavenge_root_not_marked(), "");2851}28522853#endif // PRODUCT28542855// Printing operations28562857void nmethod::print() const {2858ResourceMark rm;2859ttyLocker ttyl; // keep the following output all in one block28602861tty->print("Compiled method ");28622863if (is_compiled_by_c1()) {2864tty->print("(c1) ");2865} else if (is_compiled_by_c2()) {2866tty->print("(c2) ");2867} else if (is_compiled_by_shark()) {2868tty->print("(shark) ");2869} else {2870tty->print("(nm) ");2871}28722873print_on(tty, NULL);28742875if (WizardMode) {2876tty->print("((nmethod*) " INTPTR_FORMAT ") ", this);2877tty->print(" for method " INTPTR_FORMAT , (address)method());2878tty->print(" { ");2879if (is_in_use()) tty->print("in_use ");2880if (is_not_entrant()) tty->print("not_entrant ");2881if (is_zombie()) tty->print("zombie ");2882if (is_unloaded()) tty->print("unloaded ");2883if (on_scavenge_root_list()) tty->print("scavenge_root ");2884tty->print_cr("}:");2885}2886if (size () > 0) tty->print_cr(" total in heap [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",2887(address)this,2888(address)this + size(),2889size());2890if (relocation_size () > 0) tty->print_cr(" relocation [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",2891relocation_begin(),2892relocation_end(),2893relocation_size());2894if (consts_size () > 0) tty->print_cr(" constants [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",2895consts_begin(),2896consts_end(),2897consts_size());2898if (insts_size () > 0) tty->print_cr(" main code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",2899insts_begin(),2900insts_end(),2901insts_size());2902if (stub_size () > 0) tty->print_cr(" stub code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",2903stub_begin(),2904stub_end(),2905stub_size());2906if (oops_size () > 0) tty->print_cr(" oops [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",2907oops_begin(),2908oops_end(),2909oops_size());2910if (metadata_size () > 0) tty->print_cr(" metadata [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",2911metadata_begin(),2912metadata_end(),2913metadata_size());2914if (scopes_data_size () > 0) tty->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",2915scopes_data_begin(),2916scopes_data_end(),2917scopes_data_size());2918if (scopes_pcs_size () > 0) tty->print_cr(" scopes pcs [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",2919scopes_pcs_begin(),2920scopes_pcs_end(),2921scopes_pcs_size());2922if (dependencies_size () > 0) tty->print_cr(" dependencies [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",2923dependencies_begin(),2924dependencies_end(),2925dependencies_size());2926if (handler_table_size() > 0) tty->print_cr(" handler table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",2927handler_table_begin(),2928handler_table_end(),2929handler_table_size());2930if (nul_chk_table_size() > 0) tty->print_cr(" nul chk table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",2931nul_chk_table_begin(),2932nul_chk_table_end(),2933nul_chk_table_size());2934}29352936#ifndef PRODUCT29372938void nmethod::print_scopes() {2939// Find the first pc desc for all scopes in the code and print it.2940ResourceMark rm;2941for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {2942if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)2943continue;29442945ScopeDesc* sd = scope_desc_at(p->real_pc(this));2946sd->print_on(tty, p);2947}2948}29492950void nmethod::print_dependencies() {2951ResourceMark rm;2952ttyLocker ttyl; // keep the following output all in one block2953tty->print_cr("Dependencies:");2954for (Dependencies::DepStream deps(this); deps.next(); ) {2955deps.print_dependency();2956Klass* ctxk = deps.context_type();2957if (ctxk != NULL) {2958if (ctxk->oop_is_instance() && ((InstanceKlass*)ctxk)->is_dependent_nmethod(this)) {2959tty->print_cr(" [nmethod<=klass]%s", ctxk->external_name());2960}2961}2962deps.log_dependency(); // put it into the xml log also2963}2964}296529662967void nmethod::print_relocations() {2968ResourceMark m; // in case methods get printed via the debugger2969tty->print_cr("relocations:");2970RelocIterator iter(this);2971iter.print();2972if (UseRelocIndex) {2973jint* index_end = (jint*)relocation_end() - 1;2974jint index_size = *index_end;2975jint* index_start = (jint*)( (address)index_end - index_size );2976tty->print_cr(" index @" INTPTR_FORMAT ": index_size=%d", index_start, index_size);2977if (index_size > 0) {2978jint* ip;2979for (ip = index_start; ip+2 <= index_end; ip += 2)2980tty->print_cr(" (%d %d) addr=" INTPTR_FORMAT " @" INTPTR_FORMAT,2981ip[0],2982ip[1],2983header_end()+ip[0],2984relocation_begin()-1+ip[1]);2985for (; ip < index_end; ip++)2986tty->print_cr(" (%d ?)", ip[0]);2987tty->print_cr(" @" INTPTR_FORMAT ": index_size=%d", ip, *ip);2988ip++;2989tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip);2990}2991}2992}299329942995void nmethod::print_pcs() {2996ResourceMark m; // in case methods get printed via debugger2997tty->print_cr("pc-bytecode offsets:");2998for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {2999p->print(this);3000}3001}30023003#endif // PRODUCT30043005const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {3006RelocIterator iter(this, begin, end);3007bool have_one = false;3008while (iter.next()) {3009have_one = true;3010switch (iter.type()) {3011case relocInfo::none: return "no_reloc";3012case relocInfo::oop_type: {3013stringStream st;3014oop_Relocation* r = iter.oop_reloc();3015oop obj = r->oop_value();3016st.print("oop(");3017if (obj == NULL) st.print("NULL");3018else obj->print_value_on(&st);3019st.print(")");3020return st.as_string();3021}3022case relocInfo::metadata_type: {3023stringStream st;3024metadata_Relocation* r = iter.metadata_reloc();3025Metadata* obj = r->metadata_value();3026st.print("metadata(");3027if (obj == NULL) st.print("NULL");3028else obj->print_value_on(&st);3029st.print(")");3030return st.as_string();3031}3032case relocInfo::virtual_call_type: return "virtual_call";3033case relocInfo::opt_virtual_call_type: return "optimized virtual_call";3034case relocInfo::static_call_type: return "static_call";3035case relocInfo::static_stub_type: return "static_stub";3036case relocInfo::runtime_call_type: return "runtime_call";3037case relocInfo::external_word_type: return "external_word";3038case relocInfo::internal_word_type: return "internal_word";3039case relocInfo::section_word_type: return "section_word";3040case relocInfo::poll_type: return "poll";3041case relocInfo::poll_return_type: return "poll_return";3042case relocInfo::type_mask: return "type_bit_mask";3043}3044}3045return have_one ? "other" : NULL;3046}30473048// Return a the last scope in (begin..end]3049ScopeDesc* nmethod::scope_desc_in(address begin, address end) {3050PcDesc* p = pc_desc_near(begin+1);3051if (p != NULL && p->real_pc(this) <= end) {3052return new ScopeDesc(this, p->scope_decode_offset(),3053p->obj_decode_offset(), p->should_reexecute(),3054p->return_oop());3055}3056return NULL;3057}30583059void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) const {3060if (block_begin == entry_point()) stream->print_cr("[Entry Point]");3061if (block_begin == verified_entry_point()) stream->print_cr("[Verified Entry Point]");3062if (block_begin == exception_begin()) stream->print_cr("[Exception Handler]");3063if (block_begin == stub_begin()) stream->print_cr("[Stub Code]");3064if (block_begin == deopt_handler_begin()) stream->print_cr("[Deopt Handler Code]");30653066if (has_method_handle_invokes())3067if (block_begin == deopt_mh_handler_begin()) stream->print_cr("[Deopt MH Handler Code]");30683069if (block_begin == consts_begin()) stream->print_cr("[Constants]");30703071if (block_begin == entry_point()) {3072methodHandle m = method();3073if (m.not_null()) {3074stream->print(" # ");3075m->print_value_on(stream);3076stream->cr();3077}3078if (m.not_null() && !is_osr_method()) {3079ResourceMark rm;3080int sizeargs = m->size_of_parameters();3081BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);3082VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);3083{3084int sig_index = 0;3085if (!m->is_static())3086sig_bt[sig_index++] = T_OBJECT; // 'this'3087for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {3088BasicType t = ss.type();3089sig_bt[sig_index++] = t;3090if (type2size[t] == 2) {3091sig_bt[sig_index++] = T_VOID;3092} else {3093assert(type2size[t] == 1, "size is 1 or 2");3094}3095}3096assert(sig_index == sizeargs, "");3097}3098const char* spname = "sp"; // make arch-specific?3099intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false);3100int stack_slot_offset = this->frame_size() * wordSize;3101int tab1 = 14, tab2 = 24;3102int sig_index = 0;3103int arg_index = (m->is_static() ? 0 : -1);3104bool did_old_sp = false;3105for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {3106bool at_this = (arg_index == -1);3107bool at_old_sp = false;3108BasicType t = (at_this ? T_OBJECT : ss.type());3109assert(t == sig_bt[sig_index], "sigs in sync");3110if (at_this)3111stream->print(" # this: ");3112else3113stream->print(" # parm%d: ", arg_index);3114stream->move_to(tab1);3115VMReg fst = regs[sig_index].first();3116VMReg snd = regs[sig_index].second();3117if (fst->is_reg()) {3118stream->print("%s", fst->name());3119if (snd->is_valid()) {3120stream->print(":%s", snd->name());3121}3122} else if (fst->is_stack()) {3123int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;3124if (offset == stack_slot_offset) at_old_sp = true;3125stream->print("[%s+0x%x]", spname, offset);3126} else {3127stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);3128}3129stream->print(" ");3130stream->move_to(tab2);3131stream->print("= ");3132if (at_this) {3133m->method_holder()->print_value_on(stream);3134} else {3135bool did_name = false;3136if (!at_this && ss.is_object()) {3137Symbol* name = ss.as_symbol_or_null();3138if (name != NULL) {3139name->print_value_on(stream);3140did_name = true;3141}3142}3143if (!did_name)3144stream->print("%s", type2name(t));3145}3146if (at_old_sp) {3147stream->print(" (%s of caller)", spname);3148did_old_sp = true;3149}3150stream->cr();3151sig_index += type2size[t];3152arg_index += 1;3153if (!at_this) ss.next();3154}3155if (!did_old_sp) {3156stream->print(" # ");3157stream->move_to(tab1);3158stream->print("[%s+0x%x]", spname, stack_slot_offset);3159stream->print(" (%s of caller)", spname);3160stream->cr();3161}3162}3163}3164}31653166void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) {3167// First, find an oopmap in (begin, end].3168// We use the odd half-closed interval so that oop maps and scope descs3169// which are tied to the byte after a call are printed with the call itself.3170address base = code_begin();3171OopMapSet* oms = oop_maps();3172if (oms != NULL) {3173for (int i = 0, imax = oms->size(); i < imax; i++) {3174OopMap* om = oms->at(i);3175address pc = base + om->offset();3176if (pc > begin) {3177if (pc <= end) {3178st->move_to(column);3179st->print("; ");3180om->print_on(st);3181}3182break;3183}3184}3185}31863187// Print any debug info present at this pc.3188ScopeDesc* sd = scope_desc_in(begin, end);3189if (sd != NULL) {3190st->move_to(column);3191if (sd->bci() == SynchronizationEntryBCI) {3192st->print(";*synchronization entry");3193} else {3194if (sd->method() == NULL) {3195st->print("method is NULL");3196} else if (sd->method()->is_native()) {3197st->print("method is native");3198} else {3199Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());3200st->print(";*%s", Bytecodes::name(bc));3201switch (bc) {3202case Bytecodes::_invokevirtual:3203case Bytecodes::_invokespecial:3204case Bytecodes::_invokestatic:3205case Bytecodes::_invokeinterface:3206{3207Bytecode_invoke invoke(sd->method(), sd->bci());3208st->print(" ");3209if (invoke.name() != NULL)3210invoke.name()->print_symbol_on(st);3211else3212st->print("<UNKNOWN>");3213break;3214}3215case Bytecodes::_getfield:3216case Bytecodes::_putfield:3217case Bytecodes::_getstatic:3218case Bytecodes::_putstatic:3219{3220Bytecode_field field(sd->method(), sd->bci());3221st->print(" ");3222if (field.name() != NULL)3223field.name()->print_symbol_on(st);3224else3225st->print("<UNKNOWN>");3226}3227}3228}3229}32303231// Print all scopes3232for (;sd != NULL; sd = sd->sender()) {3233st->move_to(column);3234st->print("; -");3235if (sd->method() == NULL) {3236st->print("method is NULL");3237} else {3238sd->method()->print_short_name(st);3239}3240int lineno = sd->method()->line_number_from_bci(sd->bci());3241if (lineno != -1) {3242st->print("@%d (line %d)", sd->bci(), lineno);3243} else {3244st->print("@%d", sd->bci());3245}3246st->cr();3247}3248}32493250// Print relocation information3251const char* str = reloc_string_for(begin, end);3252if (str != NULL) {3253if (sd != NULL) st->cr();3254st->move_to(column);3255st->print("; {%s}", str);3256}3257int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin());3258if (cont_offset != 0) {3259st->move_to(column);3260st->print("; implicit exception: dispatches to " INTPTR_FORMAT, code_begin() + cont_offset);3261}32623263}32643265#ifndef PRODUCT32663267void nmethod::print_value_on(outputStream* st) const {3268st->print("nmethod");3269print_on(st, NULL);3270}32713272void nmethod::print_calls(outputStream* st) {3273RelocIterator iter(this);3274while (iter.next()) {3275switch (iter.type()) {3276case relocInfo::virtual_call_type:3277case relocInfo::opt_virtual_call_type: {3278VerifyMutexLocker mc(CompiledIC_lock);3279CompiledIC_at(&iter)->print();3280break;3281}3282case relocInfo::static_call_type:3283st->print_cr("Static call at " INTPTR_FORMAT, iter.reloc()->addr());3284compiledStaticCall_at(iter.reloc())->print();3285break;3286}3287}3288}32893290void nmethod::print_handler_table() {3291ExceptionHandlerTable(this).print();3292}32933294void nmethod::print_nul_chk_table() {3295ImplicitExceptionTable(this).print(code_begin());3296}32973298void nmethod::print_statistics() {3299ttyLocker ttyl;3300if (xtty != NULL) xtty->head("statistics type='nmethod'");3301nmethod_stats.print_native_nmethod_stats();3302nmethod_stats.print_nmethod_stats();3303DebugInformationRecorder::print_statistics();3304nmethod_stats.print_pc_stats();3305Dependencies::print_statistics();3306if (xtty != NULL) xtty->tail("statistics");3307}33083309#endif // PRODUCT331033113312