Path: blob/master/src/hotspot/share/code/compiledMethod.cpp
40930 views
/*1* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "code/compiledIC.hpp"26#include "code/compiledMethod.inline.hpp"27#include "code/exceptionHandlerTable.hpp"28#include "code/scopeDesc.hpp"29#include "code/codeCache.hpp"30#include "code/icBuffer.hpp"31#include "gc/shared/barrierSet.hpp"32#include "gc/shared/barrierSetNMethod.hpp"33#include "gc/shared/gcBehaviours.hpp"34#include "interpreter/bytecode.inline.hpp"35#include "logging/log.hpp"36#include "logging/logTag.hpp"37#include "memory/resourceArea.hpp"38#include "oops/compiledICHolder.inline.hpp"39#include "oops/klass.inline.hpp"40#include "oops/methodData.hpp"41#include "oops/method.inline.hpp"42#include "prims/methodHandles.hpp"43#include "runtime/atomic.hpp"44#include "runtime/deoptimization.hpp"45#include "runtime/handles.inline.hpp"46#include "runtime/mutexLocker.hpp"47#include "runtime/sharedRuntime.hpp"4849CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout,50int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps,51bool caller_must_gc_arguments)52: CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),53_mark_for_deoptimization_status(not_marked),54_method(method),55_gc_data(NULL)56{57init_defaults();58}5960CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size,61int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size,62OopMapSet* oop_maps, bool caller_must_gc_arguments)63: CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb,64frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),65_mark_for_deoptimization_status(not_marked),66_method(method),67_gc_data(NULL)68{69init_defaults();70}7172void CompiledMethod::init_defaults() {73{ // avoid uninitialized fields, even for short time periods74_scopes_data_begin = NULL;75_deopt_handler_begin = NULL;76_deopt_mh_handler_begin = NULL;77_exception_cache = NULL;78}79_has_unsafe_access = 0;80_has_method_handle_invokes = 0;81_has_wide_vectors = 0;82}8384bool CompiledMethod::is_method_handle_return(address return_pc) {85if (!has_method_handle_invokes()) return false;86PcDesc* pd = pc_desc_at(return_pc);87if (pd == NULL)88return false;89return pd->is_method_handle_invoke();90}9192// Returns a string version of the method state.93const char* CompiledMethod::state() const {94int state = get_state();95switch (state) {96case not_installed:97return "not installed";98case in_use:99return "in use";100case not_used:101return "not_used";102case not_entrant:103return "not_entrant";104case zombie:105return "zombie";106case unloaded:107return "unloaded";108default:109fatal("unexpected method state: %d", state);110return NULL;111}112}113114//-----------------------------------------------------------------------------115void CompiledMethod::mark_for_deoptimization(bool inc_recompile_counts) {116MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock,117Mutex::_no_safepoint_check_flag);118_mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);119}120121//-----------------------------------------------------------------------------122123ExceptionCache* CompiledMethod::exception_cache_acquire() const {124return Atomic::load_acquire(&_exception_cache);125}126127void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {128assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");129assert(new_entry != NULL,"Must be non null");130assert(new_entry->next() == NULL, "Must be null");131132for (;;) {133ExceptionCache *ec = exception_cache();134if (ec != NULL) {135Klass* ex_klass = ec->exception_type();136if (!ex_klass->is_loader_alive()) {137// We must guarantee that entries are not inserted with new next pointer138// edges to ExceptionCache entries with dead klasses, due to bad interactions139// with concurrent ExceptionCache cleanup. Therefore, the inserts roll140// the head pointer forward to the first live ExceptionCache, so that the new141// next pointers always point at live ExceptionCaches, that are not removed due142// to concurrent ExceptionCache cleanup.143ExceptionCache* next = ec->next();144if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) {145CodeCache::release_exception_cache(ec);146}147continue;148}149ec = exception_cache();150if (ec != NULL) {151new_entry->set_next(ec);152}153}154if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) {155return;156}157}158}159160void CompiledMethod::clean_exception_cache() {161// For each nmethod, only a single thread may call this cleanup function162// at the same time, whether called in STW cleanup or concurrent cleanup.163// Note that if the GC is processing exception cache cleaning in a concurrent phase,164// then a single writer may contend with cleaning up the head pointer to the165// first ExceptionCache node that has a Klass* that is alive. That is fine,166// as long as there is no concurrent cleanup of next pointers from concurrent writers.167// And the concurrent writers do not clean up next pointers, only the head.168// Also note that concurent readers will walk through Klass* pointers that are not169// alive. That does not cause ABA problems, because Klass* is deleted after170// a handshake with all threads, after all stale ExceptionCaches have been171// unlinked. That is also when the CodeCache::exception_cache_purge_list()172// is deleted, with all ExceptionCache entries that were cleaned concurrently.173// That similarly implies that CAS operations on ExceptionCache entries do not174// suffer from ABA problems as unlinking and deletion is separated by a global175// handshake operation.176ExceptionCache* prev = NULL;177ExceptionCache* curr = exception_cache_acquire();178179while (curr != NULL) {180ExceptionCache* next = curr->next();181182if (!curr->exception_type()->is_loader_alive()) {183if (prev == NULL) {184// Try to clean head; this is contended by concurrent inserts, that185// both lazily clean the head, and insert entries at the head. If186// the CAS fails, the operation is restarted.187if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) {188prev = NULL;189curr = exception_cache_acquire();190continue;191}192} else {193// It is impossible to during cleanup connect the next pointer to194// an ExceptionCache that has not been published before a safepoint195// prior to the cleanup. Therefore, release is not required.196prev->set_next(next);197}198// prev stays the same.199200CodeCache::release_exception_cache(curr);201} else {202prev = curr;203}204205curr = next;206}207}208209// public method for accessing the exception cache210// These are the public access methods.211address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {212// We never grab a lock to read the exception cache, so we may213// have false negatives. This is okay, as it can only happen during214// the first few exception lookups for a given nmethod.215ExceptionCache* ec = exception_cache_acquire();216while (ec != NULL) {217address ret_val;218if ((ret_val = ec->match(exception,pc)) != NULL) {219return ret_val;220}221ec = ec->next();222}223return NULL;224}225226void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {227// There are potential race conditions during exception cache updates, so we228// must own the ExceptionCache_lock before doing ANY modifications. Because229// we don't lock during reads, it is possible to have several threads attempt230// to update the cache with the same data. We need to check for already inserted231// copies of the current data before adding it.232233MutexLocker ml(ExceptionCache_lock);234ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);235236if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {237target_entry = new ExceptionCache(exception,pc,handler);238add_exception_cache_entry(target_entry);239}240}241242// private method for handling exception cache243// These methods are private, and used to manipulate the exception cache244// directly.245ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {246ExceptionCache* ec = exception_cache_acquire();247while (ec != NULL) {248if (ec->match_exception_with_space(exception)) {249return ec;250}251ec = ec->next();252}253return NULL;254}255256//-------------end of code for ExceptionCache--------------257258bool CompiledMethod::is_at_poll_return(address pc) {259RelocIterator iter(this, pc, pc+1);260while (iter.next()) {261if (iter.type() == relocInfo::poll_return_type)262return true;263}264return false;265}266267268bool CompiledMethod::is_at_poll_or_poll_return(address pc) {269RelocIterator iter(this, pc, pc+1);270while (iter.next()) {271relocInfo::relocType t = iter.type();272if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)273return true;274}275return false;276}277278void CompiledMethod::verify_oop_relocations() {279// Ensure sure that the code matches the current oop values280RelocIterator iter(this, NULL, NULL);281while (iter.next()) {282if (iter.type() == relocInfo::oop_type) {283oop_Relocation* reloc = iter.oop_reloc();284if (!reloc->oop_is_immediate()) {285reloc->verify_oop_relocation();286}287}288}289}290291292ScopeDesc* CompiledMethod::scope_desc_at(address pc) {293PcDesc* pd = pc_desc_at(pc);294guarantee(pd != NULL, "scope must be present");295return new ScopeDesc(this, pd);296}297298ScopeDesc* CompiledMethod::scope_desc_near(address pc) {299PcDesc* pd = pc_desc_near(pc);300guarantee(pd != NULL, "scope must be present");301return new ScopeDesc(this, pd);302}303304address CompiledMethod::oops_reloc_begin() const {305// If the method is not entrant or zombie then a JMP is plastered over the306// first few bytes. If an oop in the old code was there, that oop307// should not get GC'd. Skip the first few bytes of oops on308// not-entrant methods.309if (frame_complete_offset() != CodeOffsets::frame_never_safe &&310code_begin() + frame_complete_offset() >311verified_entry_point() + NativeJump::instruction_size)312{313// If we have a frame_complete_offset after the native jump, then there314// is no point trying to look for oops before that. This is a requirement315// for being allowed to scan oops concurrently.316return code_begin() + frame_complete_offset();317}318319// It is not safe to read oops concurrently using entry barriers, if their320// location depend on whether the nmethod is entrant or not.321assert(BarrierSet::barrier_set()->barrier_set_nmethod() == NULL, "Not safe oop scan");322323address low_boundary = verified_entry_point();324if (!is_in_use() && is_nmethod()) {325low_boundary += NativeJump::instruction_size;326// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.327// This means that the low_boundary is going to be a little too high.328// This shouldn't matter, since oops of non-entrant methods are never used.329// In fact, why are we bothering to look at oops in a non-entrant method??330}331return low_boundary;332}333334int CompiledMethod::verify_icholder_relocations() {335ResourceMark rm;336int count = 0;337338RelocIterator iter(this);339while(iter.next()) {340if (iter.type() == relocInfo::virtual_call_type) {341if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {342CompiledIC *ic = CompiledIC_at(&iter);343if (TraceCompiledIC) {344tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));345ic->print();346}347assert(ic->cached_icholder() != NULL, "must be non-NULL");348count++;349}350}351}352353return count;354}355356// Method that knows how to preserve outgoing arguments at call. This method must be357// called with a frame corresponding to a Java invoke358void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {359if (method() != NULL && !method()->is_native()) {360address pc = fr.pc();361SimpleScopeDesc ssd(this, pc);362if (ssd.is_optimized_linkToNative()) return; // call was replaced363Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci());364bool has_receiver = call.has_receiver();365bool has_appendix = call.has_appendix();366Symbol* signature = call.signature();367368// The method attached by JIT-compilers should be used, if present.369// Bytecode can be inaccurate in such case.370Method* callee = attached_method_before_pc(pc);371if (callee != NULL) {372has_receiver = !(callee->access_flags().is_static());373has_appendix = false;374signature = callee->signature();375}376377fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);378}379}380381Method* CompiledMethod::attached_method(address call_instr) {382assert(code_contains(call_instr), "not part of the nmethod");383RelocIterator iter(this, call_instr, call_instr + 1);384while (iter.next()) {385if (iter.addr() == call_instr) {386switch(iter.type()) {387case relocInfo::static_call_type: return iter.static_call_reloc()->method_value();388case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();389case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value();390default: break;391}392}393}394return NULL; // not found395}396397Method* CompiledMethod::attached_method_before_pc(address pc) {398if (NativeCall::is_call_before(pc)) {399NativeCall* ncall = nativeCall_before(pc);400return attached_method(ncall->instruction_address());401}402return NULL; // not a call403}404405void CompiledMethod::clear_inline_caches() {406assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");407if (is_zombie()) {408return;409}410411RelocIterator iter(this);412while (iter.next()) {413iter.reloc()->clear_inline_cache();414}415}416417// Clear IC callsites, releasing ICStubs of all compiled ICs418// as well as any associated CompiledICHolders.419void CompiledMethod::clear_ic_callsites() {420assert(CompiledICLocker::is_safe(this), "mt unsafe call");421ResourceMark rm;422RelocIterator iter(this);423while(iter.next()) {424if (iter.type() == relocInfo::virtual_call_type) {425CompiledIC* ic = CompiledIC_at(&iter);426ic->set_to_clean(false);427}428}429}430431#ifdef ASSERT432// Check class_loader is alive for this bit of metadata.433class CheckClass : public MetadataClosure {434void do_metadata(Metadata* md) {435Klass* klass = NULL;436if (md->is_klass()) {437klass = ((Klass*)md);438} else if (md->is_method()) {439klass = ((Method*)md)->method_holder();440} else if (md->is_methodData()) {441klass = ((MethodData*)md)->method()->method_holder();442} else {443md->print();444ShouldNotReachHere();445}446assert(klass->is_loader_alive(), "must be alive");447}448};449#endif // ASSERT450451452bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {453if (ic->is_clean()) {454return true;455}456if (ic->is_icholder_call()) {457// The only exception is compiledICHolder metdata which may458// yet be marked below. (We check this further below).459CompiledICHolder* cichk_metdata = ic->cached_icholder();460461if (cichk_metdata->is_loader_alive()) {462return true;463}464} else {465Metadata* ic_metdata = ic->cached_metadata();466if (ic_metdata != NULL) {467if (ic_metdata->is_klass()) {468if (((Klass*)ic_metdata)->is_loader_alive()) {469return true;470}471} else if (ic_metdata->is_method()) {472Method* method = (Method*)ic_metdata;473assert(!method->is_old(), "old method should have been cleaned");474if (method->method_holder()->is_loader_alive()) {475return true;476}477} else {478ShouldNotReachHere();479}480}481}482483return ic->set_to_clean();484}485486// Clean references to unloaded nmethods at addr from this one, which is not unloaded.487template <class CompiledICorStaticCall>488static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,489bool clean_all) {490// Ok, to lookup references to zombies here491CodeBlob *cb = CodeCache::find_blob_unsafe(addr);492CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;493if (nm != NULL) {494// Clean inline caches pointing to both zombie and not_entrant methods495if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) {496// Inline cache cleaning should only be initiated on CompiledMethods that have been497// observed to be is_alive(). However, with concurrent code cache unloading, it is498// possible that by now, the state has become !is_alive. This can happen in two ways:499// 1) It can be racingly flipped to unloaded if the nmethod // being cleaned (from the500// sweeper) is_unloading(). This is fine, because if that happens, then the inline501// caches have already been cleaned under the same CompiledICLocker that we now hold during502// inline cache cleaning, and we will simply walk the inline caches again, and likely not503// find much of interest to clean. However, this race prevents us from asserting that the504// nmethod is_alive(). The is_unloading() function is completely monotonic; once set due505// to an oop dying, it remains set forever until freed. Because of that, all unloaded506// nmethods are is_unloading(), but notably, an unloaded nmethod may also subsequently507// become zombie (when the sweeper converts it to zombie).508// 2) It can be racingly flipped to zombie if the nmethod being cleaned (by the concurrent509// GC) cleans a zombie nmethod that is concurrently made zombie by the sweeper. In this510// scenario, the sweeper will first transition the nmethod to zombie, and then when511// unregistering from the GC, it will wait until the GC is done. The GC will then clean512// the inline caches *with IC stubs*, even though no IC stubs are needed. This is fine,513// as long as the IC stubs are guaranteed to be released until the next safepoint, where514// IC finalization requires live IC stubs to not be associated with zombie nmethods.515// This is guaranteed, because the sweeper does not have a single safepoint check until516// after it completes the whole transition function; it will wake up after the GC is517// done with concurrent code cache cleaning (which blocks out safepoints using the518// suspendible threads set), and then call clear_ic_callsites, which will release the519// associated IC stubs, before a subsequent safepoint poll can be reached. This520// guarantees that the spuriously created IC stubs are released appropriately before521// IC finalization in a safepoint gets to run. Therefore, this race is fine. This is also522// valid in a scenario where an inline cache of a zombie nmethod gets a spurious IC stub,523// and then when cleaning another inline cache, fails to request an IC stub because we524// exhausted the IC stub buffer. In this scenario, the GC will request a safepoint after525// yielding the suspendible therad set, effectively unblocking safepoints. Before such526// a safepoint can be reached, the sweeper similarly has to wake up, clear the IC stubs,527// and reach the next safepoint poll, after the whole transition function has completed.528// Due to the various races that can cause an nmethod to first be is_alive() and then529// racingly become !is_alive(), it is unfortunately not possible to assert the nmethod530// is_alive(), !is_unloaded() or !is_zombie() here.531if (!ic->set_to_clean(!from->is_unloading())) {532return false;533}534assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());535}536}537return true;538}539540static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,541bool clean_all) {542return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all);543}544545static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,546bool clean_all) {547return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all);548}549550// Cleans caches in nmethods that point to either classes that are unloaded551// or nmethods that are unloaded.552//553// Can be called either in parallel by G1 currently or after all554// nmethods are unloaded. Return postponed=true in the parallel case for555// inline caches found that point to nmethods that are not yet visited during556// the do_unloading walk.557bool CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {558ResourceMark rm;559560// Exception cache only needs to be called if unloading occurred561if (unloading_occurred) {562clean_exception_cache();563}564565if (!cleanup_inline_caches_impl(unloading_occurred, false)) {566return false;567}568569#ifdef ASSERT570// Check that the metadata embedded in the nmethod is alive571CheckClass check_class;572metadata_do(&check_class);573#endif574return true;575}576577void CompiledMethod::run_nmethod_entry_barrier() {578BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();579if (bs_nm != NULL) {580// We want to keep an invariant that nmethods found through iterations of a Thread's581// nmethods found in safepoints have gone through an entry barrier and are not armed.582// By calling this nmethod entry barrier, it plays along and acts583// like any other nmethod found on the stack of a thread (fewer surprises).584nmethod* nm = as_nmethod_or_null();585if (nm != NULL) {586bool alive = bs_nm->nmethod_entry_barrier(nm);587assert(alive, "should be alive");588}589}590}591592void CompiledMethod::cleanup_inline_caches(bool clean_all) {593for (;;) {594ICRefillVerifier ic_refill_verifier;595{ CompiledICLocker ic_locker(this);596if (cleanup_inline_caches_impl(false, clean_all)) {597return;598}599}600// Call this nmethod entry barrier from the sweeper.601run_nmethod_entry_barrier();602InlineCacheBuffer::refill_ic_stubs();603}604}605606// Called to clean up after class unloading for live nmethods and from the sweeper607// for all methods.608bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {609assert(CompiledICLocker::is_safe(this), "mt unsafe call");610ResourceMark rm;611612// Find all calls in an nmethod and clear the ones that point to non-entrant,613// zombie and unloaded nmethods.614RelocIterator iter(this, oops_reloc_begin());615bool is_in_static_stub = false;616while(iter.next()) {617618switch (iter.type()) {619620case relocInfo::virtual_call_type:621if (unloading_occurred) {622// If class unloading occurred we first clear ICs where the cached metadata623// is referring to an unloaded klass or method.624if (!clean_ic_if_metadata_is_dead(CompiledIC_at(&iter))) {625return false;626}627}628629if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) {630return false;631}632break;633634case relocInfo::opt_virtual_call_type:635if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) {636return false;637}638break;639640case relocInfo::static_call_type:641if (!clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all)) {642return false;643}644break;645646case relocInfo::static_stub_type: {647is_in_static_stub = true;648break;649}650651case relocInfo::metadata_type: {652// Only the metadata relocations contained in static/opt virtual call stubs653// contains the Method* passed to c2i adapters. It is the only metadata654// relocation that needs to be walked, as it is the one metadata relocation655// that violates the invariant that all metadata relocations have an oop656// in the compiled method (due to deferred resolution and code patching).657658// This causes dead metadata to remain in compiled methods that are not659// unloading. Unless these slippery metadata relocations of the static660// stubs are at least cleared, subsequent class redefinition operations661// will access potentially free memory, and JavaThread execution662// concurrent to class unloading may call c2i adapters with dead methods.663if (!is_in_static_stub) {664// The first metadata relocation after a static stub relocation is the665// metadata relocation of the static stub used to pass the Method* to666// c2i adapters.667continue;668}669is_in_static_stub = false;670if (is_unloading()) {671// If the nmethod itself is dying, then it may point at dead metadata.672// Nobody should follow that metadata; it is strictly unsafe.673continue;674}675metadata_Relocation* r = iter.metadata_reloc();676Metadata* md = r->metadata_value();677if (md != NULL && md->is_method()) {678Method* method = static_cast<Method*>(md);679if (!method->method_holder()->is_loader_alive()) {680Atomic::store(r->metadata_addr(), (Method*)NULL);681682if (!r->metadata_is_immediate()) {683r->fix_metadata_relocation();684}685}686}687break;688}689690default:691break;692}693}694695return true;696}697698address CompiledMethod::continuation_for_implicit_exception(address pc, bool for_div0_check) {699// Exception happened outside inline-cache check code => we are inside700// an active nmethod => use cpc to determine a return address701int exception_offset = pc - code_begin();702int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset );703#ifdef ASSERT704if (cont_offset == 0) {705Thread* thread = Thread::current();706ResourceMark rm(thread);707CodeBlob* cb = CodeCache::find_blob(pc);708assert(cb != NULL && cb == this, "");709ttyLocker ttyl;710tty->print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));711print();712method()->print_codes();713print_code();714print_pcs();715}716#endif717if (cont_offset == 0) {718// Let the normal error handling report the exception719return NULL;720}721if (cont_offset == exception_offset) {722#if INCLUDE_JVMCI723Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check;724JavaThread *thread = JavaThread::current();725thread->set_jvmci_implicit_exception_pc(pc);726thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason,727Deoptimization::Action_reinterpret));728return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());729#else730ShouldNotReachHere();731#endif732}733return code_begin() + cont_offset;734}735736class HasEvolDependency : public MetadataClosure {737bool _has_evol_dependency;738public:739HasEvolDependency() : _has_evol_dependency(false) {}740void do_metadata(Metadata* md) {741if (md->is_method()) {742Method* method = (Method*)md;743if (method->is_old()) {744_has_evol_dependency = true;745}746}747}748bool has_evol_dependency() const { return _has_evol_dependency; }749};750751bool CompiledMethod::has_evol_metadata() {752// Check the metadata in relocIter and CompiledIC and also deoptimize753// any nmethod that has reference to old methods.754HasEvolDependency check_evol;755metadata_do(&check_evol);756if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) {757ResourceMark rm;758log_debug(redefine, class, nmethod)759("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata",760_method->method_holder()->external_name(),761_method->name()->as_C_string(),762_method->signature()->as_C_string(),763compile_id());764}765return check_evol.has_evol_dependency();766}767768769