Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/oops/cpCache.cpp
32285 views
/*1* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "gc_implementation/shared/markSweep.inline.hpp"26#include "interpreter/interpreter.hpp"27#include "interpreter/rewriter.hpp"28#include "memory/universe.inline.hpp"29#include "oops/cpCache.hpp"30#include "oops/objArrayOop.hpp"31#include "oops/oop.inline.hpp"32#include "prims/jvmtiRedefineClassesTrace.hpp"33#include "prims/methodHandles.hpp"34#include "runtime/handles.inline.hpp"35#include "runtime/orderAccess.inline.hpp"36#include "utilities/macros.hpp"37#if INCLUDE_ALL_GCS38# include "gc_implementation/parallelScavenge/psPromotionManager.hpp"39#endif // INCLUDE_ALL_GCS4041PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC4243// Implementation of ConstantPoolCacheEntry4445void ConstantPoolCacheEntry::initialize_entry(int index) {46assert(0 < index && index < 0x10000, "sanity check");47_indices = index;48_f1 = NULL;49_f2 = _flags = 0;50assert(constant_pool_index() == index, "");51}5253int ConstantPoolCacheEntry::make_flags(TosState state,54int option_bits,55int field_index_or_method_params) {56assert(state < number_of_states, "Invalid state in make_flags");57int f = ((int)state << tos_state_shift) | option_bits | field_index_or_method_params;58// Preserve existing flag bit values59// The low bits are a field offset, or else the method parameter size.60#ifdef ASSERT61TosState old_state = flag_state();62assert(old_state == (TosState)0 || old_state == state,63"inconsistent cpCache flags state");64#endif65return (_flags | f) ;66}6768void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {69#ifdef ASSERT70// Read once.71volatile Bytecodes::Code c = bytecode_1();72assert(c == 0 || c == code || code == 0, "update must be consistent");73#endif74// Need to flush pending stores here before bytecode is written.75OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_1_shift));76}7778void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {79#ifdef ASSERT80// Read once.81volatile Bytecodes::Code c = bytecode_2();82assert(c == 0 || c == code || code == 0, "update must be consistent");83#endif84// Need to flush pending stores here before bytecode is written.85OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_2_shift));86}8788// Sets f1, ordering with previous writes.89void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) {90assert(f1 != NULL, "");91OrderAccess::release_store_ptr((HeapWord*) &_f1, f1);92}9394// Sets flags, but only if the value was previously zero.95bool ConstantPoolCacheEntry::init_flags_atomic(intptr_t flags) {96intptr_t result = Atomic::cmpxchg_ptr(flags, &_flags, 0);97return (result == 0);98}99100// Note that concurrent update of both bytecodes can leave one of them101// reset to zero. This is harmless; the interpreter will simply re-resolve102// the damaged entry. More seriously, the memory synchronization is needed103// to flush other fields (f1, f2) completely to memory before the bytecodes104// are updated, lest other processors see a non-zero bytecode but zero f1/f2.105void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,106Bytecodes::Code put_code,107KlassHandle field_holder,108int field_index,109int field_offset,110TosState field_type,111bool is_final,112bool is_volatile,113Klass* root_klass) {114set_f1(field_holder());115set_f2(field_offset);116assert((field_index & field_index_mask) == field_index,117"field index does not fit in low flag bits");118set_field_flags(field_type,119((is_volatile ? 1 : 0) << is_volatile_shift) |120((is_final ? 1 : 0) << is_final_shift),121field_index);122set_bytecode_1(get_code);123set_bytecode_2(put_code);124NOT_PRODUCT(verify(tty));125}126127void ConstantPoolCacheEntry::set_parameter_size(int value) {128// This routine is called only in corner cases where the CPCE is not yet initialized.129// See AbstractInterpreter::deopt_continue_after_entry.130assert(_flags == 0 || parameter_size() == 0 || parameter_size() == value,131err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));132// Setting the parameter size by itself is only safe if the133// current value of _flags is 0, otherwise another thread may have134// updated it and we don't want to overwrite that value. Don't135// bother trying to update it once it's nonzero but always make136// sure that the final parameter size agrees with what was passed.137if (_flags == 0) {138Atomic::cmpxchg_ptr((value & parameter_size_mask), &_flags, 0);139}140guarantee(parameter_size() == value,141err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));142}143144void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_code,145methodHandle method,146int vtable_index,147bool sender_is_interface) {148bool is_vtable_call = (vtable_index >= 0); // FIXME: split this method on this boolean149assert(method->interpreter_entry() != NULL, "should have been set at this point");150assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache");151152int byte_no = -1;153bool change_to_virtual = false;154155switch (invoke_code) {156case Bytecodes::_invokeinterface:157// We get here from InterpreterRuntime::resolve_invoke when an invokeinterface158// instruction somehow links to a non-interface method (in Object).159// In that case, the method has no itable index and must be invoked as a virtual.160// Set a flag to keep track of this corner case.161change_to_virtual = true;162163// ...and fall through as if we were handling invokevirtual:164case Bytecodes::_invokevirtual:165{166if (!is_vtable_call) {167assert(method->can_be_statically_bound(), "");168// set_f2_as_vfinal_method checks if is_vfinal flag is true.169set_method_flags(as_TosState(method->result_type()),170( 1 << is_vfinal_shift) |171((method->is_final_method() ? 1 : 0) << is_final_shift) |172((change_to_virtual ? 1 : 0) << is_forced_virtual_shift),173method()->size_of_parameters());174set_f2_as_vfinal_method(method());175} else {176assert(!method->can_be_statically_bound(), "");177assert(vtable_index >= 0, "valid index");178assert(!method->is_final_method(), "sanity");179set_method_flags(as_TosState(method->result_type()),180((change_to_virtual ? 1 : 0) << is_forced_virtual_shift),181method()->size_of_parameters());182set_f2(vtable_index);183}184byte_no = 2;185break;186}187188case Bytecodes::_invokespecial:189case Bytecodes::_invokestatic:190assert(!is_vtable_call, "");191// Note: Read and preserve the value of the is_vfinal flag on any192// invokevirtual bytecode shared with this constant pool cache entry.193// It is cheap and safe to consult is_vfinal() at all times.194// Once is_vfinal is set, it must stay that way, lest we get a dangling oop.195set_method_flags(as_TosState(method->result_type()),196((is_vfinal() ? 1 : 0) << is_vfinal_shift) |197((method->is_final_method() ? 1 : 0) << is_final_shift),198method()->size_of_parameters());199set_f1(method());200byte_no = 1;201break;202default:203ShouldNotReachHere();204break;205}206207// Note: byte_no also appears in TemplateTable::resolve.208if (byte_no == 1) {209assert(invoke_code != Bytecodes::_invokevirtual &&210invoke_code != Bytecodes::_invokeinterface, "");211bool do_resolve = true;212// Don't mark invokespecial to method as resolved if sender is an interface. The receiver213// has to be checked that it is a subclass of the current class every time this bytecode214// is executed.215if (invoke_code == Bytecodes::_invokespecial && sender_is_interface &&216method->name() != vmSymbols::object_initializer_name()) {217do_resolve = false;218}219// Don't mark invokestatic to method as resolved if the holder class has not yet completed220// initialization. An invokestatic must only proceed if the class is initialized, but if221// we resolve it before then that class initialization check is skipped.222if (invoke_code == Bytecodes::_invokestatic && !method->method_holder()->is_initialized()) {223do_resolve = false;224}225if (do_resolve) {226set_bytecode_1(invoke_code);227}228} else if (byte_no == 2) {229if (change_to_virtual) {230assert(invoke_code == Bytecodes::_invokeinterface, "");231// NOTE: THIS IS A HACK - BE VERY CAREFUL!!!232//233// Workaround for the case where we encounter an invokeinterface, but we234// should really have an _invokevirtual since the resolved method is a235// virtual method in java.lang.Object. This is a corner case in the spec236// but is presumably legal. javac does not generate this code.237//238// We do not set bytecode_1() to _invokeinterface, because that is the239// bytecode # used by the interpreter to see if it is resolved. In this240// case, the method gets reresolved with caller for each interface call241// because the actual selected method may not be public.242//243// We set bytecode_2() to _invokevirtual.244// See also interpreterRuntime.cpp. (8/25/2000)245} else {246assert(invoke_code == Bytecodes::_invokevirtual, "");247}248// set up for invokevirtual, even if linking for invokeinterface also:249set_bytecode_2(Bytecodes::_invokevirtual);250} else {251ShouldNotReachHere();252}253NOT_PRODUCT(verify(tty));254}255256void ConstantPoolCacheEntry::set_direct_call(Bytecodes::Code invoke_code, methodHandle method,257bool sender_is_interface) {258int index = Method::nonvirtual_vtable_index;259// index < 0; FIXME: inline and customize set_direct_or_vtable_call260set_direct_or_vtable_call(invoke_code, method, index, sender_is_interface);261}262263void ConstantPoolCacheEntry::set_vtable_call(Bytecodes::Code invoke_code, methodHandle method, int index) {264// either the method is a miranda or its holder should accept the given index265assert(method->method_holder()->is_interface() || method->method_holder()->verify_vtable_index(index), "");266// index >= 0; FIXME: inline and customize set_direct_or_vtable_call267set_direct_or_vtable_call(invoke_code, method, index, false);268}269270void ConstantPoolCacheEntry::set_itable_call(Bytecodes::Code invoke_code,271KlassHandle referenced_klass,272methodHandle method, int index) {273assert(method->method_holder()->verify_itable_index(index), "");274assert(invoke_code == Bytecodes::_invokeinterface, "");275InstanceKlass* interf = method->method_holder();276assert(interf->is_interface(), "must be an interface");277assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here");278set_f1(referenced_klass());279set_f2((intx)method());280set_method_flags(as_TosState(method->result_type()),2810, // no option bits282method()->size_of_parameters());283set_bytecode_1(Bytecodes::_invokeinterface);284}285286287void ConstantPoolCacheEntry::set_method_handle(constantPoolHandle cpool, const CallInfo &call_info) {288set_method_handle_common(cpool, Bytecodes::_invokehandle, call_info);289}290291void ConstantPoolCacheEntry::set_dynamic_call(constantPoolHandle cpool, const CallInfo &call_info) {292set_method_handle_common(cpool, Bytecodes::_invokedynamic, call_info);293}294295void ConstantPoolCacheEntry::set_method_handle_common(constantPoolHandle cpool,296Bytecodes::Code invoke_code,297const CallInfo &call_info) {298// NOTE: This CPCE can be the subject of data races.299// There are three words to update: flags, refs[f2], f1 (in that order).300// Writers must store all other values before f1.301// Readers must test f1 first for non-null before reading other fields.302// Competing writers must acquire exclusive access via a lock.303// A losing writer waits on the lock until the winner writes f1 and leaves304// the lock, so that when the losing writer returns, he can use the linked305// cache entry.306307MonitorLockerEx ml(cpool->lock());308if (!is_f1_null()) {309return;310}311312const methodHandle adapter = call_info.resolved_method();313const Handle appendix = call_info.resolved_appendix();314const Handle method_type = call_info.resolved_method_type();315const bool has_appendix = appendix.not_null();316const bool has_method_type = method_type.not_null();317318// Write the flags.319set_method_flags(as_TosState(adapter->result_type()),320((has_appendix ? 1 : 0) << has_appendix_shift ) |321((has_method_type ? 1 : 0) << has_method_type_shift) |322( 1 << is_final_shift ),323adapter->size_of_parameters());324325if (TraceInvokeDynamic) {326tty->print_cr("set_method_handle bc=%d appendix=" PTR_FORMAT "%s method_type=" PTR_FORMAT "%s method=" PTR_FORMAT " ",327invoke_code,328(void *)appendix(), (has_appendix ? "" : " (unused)"),329(void *)method_type(), (has_method_type ? "" : " (unused)"),330(intptr_t)adapter());331adapter->print();332if (has_appendix) appendix()->print();333}334335// Method handle invokes and invokedynamic sites use both cp cache words.336// refs[f2], if not null, contains a value passed as a trailing argument to the adapter.337// In the general case, this could be the call site's MethodType,338// for use with java.lang.Invokers.checkExactType, or else a CallSite object.339// f1 contains the adapter method which manages the actual call.340// In the general case, this is a compiled LambdaForm.341// (The Java code is free to optimize these calls by binding other342// sorts of methods and appendices to call sites.)343// JVM-level linking is via f1, as if for invokespecial, and signatures are erased.344// The appendix argument (if any) is added to the signature, and is counted in the parameter_size bits.345// Even with the appendix, the method will never take more than 255 parameter slots.346//347// This means that given a call site like (List)mh.invoke("foo"),348// the f1 method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;',349// not '(Ljava/lang/String;)Ljava/util/List;'.350// The fact that String and List are involved is encoded in the MethodType in refs[f2].351// This allows us to create fewer method oops, while keeping type safety.352//353354objArrayHandle resolved_references = cpool->resolved_references();355// Store appendix, if any.356if (has_appendix) {357const int appendix_index = f2_as_index() + _indy_resolved_references_appendix_offset;358assert(appendix_index >= 0 && appendix_index < resolved_references->length(), "oob");359assert(resolved_references->obj_at(appendix_index) == NULL, "init just once");360resolved_references->obj_at_put(appendix_index, appendix());361}362363// Store MethodType, if any.364if (has_method_type) {365const int method_type_index = f2_as_index() + _indy_resolved_references_method_type_offset;366assert(method_type_index >= 0 && method_type_index < resolved_references->length(), "oob");367assert(resolved_references->obj_at(method_type_index) == NULL, "init just once");368resolved_references->obj_at_put(method_type_index, method_type());369}370371release_set_f1(adapter()); // This must be the last one to set (see NOTE above)!372373// The interpreter assembly code does not check byte_2,374// but it is used by is_resolved, method_if_resolved, etc.375set_bytecode_1(invoke_code);376NOT_PRODUCT(verify(tty));377if (TraceInvokeDynamic) {378this->print(tty, 0);379}380}381382Method* ConstantPoolCacheEntry::method_if_resolved(constantPoolHandle cpool) {383// Decode the action of set_method and set_interface_call384Bytecodes::Code invoke_code = bytecode_1();385if (invoke_code != (Bytecodes::Code)0) {386Metadata* f1 = f1_ord();387if (f1 != NULL) {388switch (invoke_code) {389case Bytecodes::_invokeinterface:390assert(f1->is_klass(), "");391return klassItable::method_for_itable_index((Klass*)f1, f2_as_index());392case Bytecodes::_invokestatic:393case Bytecodes::_invokespecial:394assert(!has_appendix(), "");395case Bytecodes::_invokehandle:396case Bytecodes::_invokedynamic:397assert(f1->is_method(), "");398return (Method*)f1;399}400}401}402invoke_code = bytecode_2();403if (invoke_code != (Bytecodes::Code)0) {404switch (invoke_code) {405case Bytecodes::_invokevirtual:406if (is_vfinal()) {407// invokevirtual408Method* m = f2_as_vfinal_method();409assert(m->is_method(), "");410return m;411} else {412int holder_index = cpool->uncached_klass_ref_index_at(constant_pool_index());413if (cpool->tag_at(holder_index).is_klass()) {414Klass* klass = cpool->resolved_klass_at(holder_index);415if (!klass->oop_is_instance())416klass = SystemDictionary::Object_klass();417return InstanceKlass::cast(klass)->method_at_vtable(f2_as_index());418}419}420break;421}422}423return NULL;424}425426427oop ConstantPoolCacheEntry::appendix_if_resolved(constantPoolHandle cpool) {428if (!has_appendix())429return NULL;430const int ref_index = f2_as_index() + _indy_resolved_references_appendix_offset;431objArrayOop resolved_references = cpool->resolved_references();432return resolved_references->obj_at(ref_index);433}434435436oop ConstantPoolCacheEntry::method_type_if_resolved(constantPoolHandle cpool) {437if (!has_method_type())438return NULL;439const int ref_index = f2_as_index() + _indy_resolved_references_method_type_offset;440objArrayOop resolved_references = cpool->resolved_references();441return resolved_references->obj_at(ref_index);442}443444445#if INCLUDE_JVMTI446447void log_adjust(const char* entry_type, Method* old_method, Method* new_method, bool* trace_name_printed) {448if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {449if (!(*trace_name_printed)) {450// RC_TRACE_MESG macro has an embedded ResourceMark451RC_TRACE_MESG(("adjust: name=%s",452old_method->method_holder()->external_name()));453*trace_name_printed = true;454}455// RC_TRACE macro has an embedded ResourceMark456RC_TRACE(0x00400000, ("cpc %s entry update: %s(%s)",457entry_type,458new_method->name()->as_C_string(),459new_method->signature()->as_C_string()));460}461}462463// RedefineClasses() API support:464// If this ConstantPoolCacheEntry refers to old_method then update it465// to refer to new_method.466void ConstantPoolCacheEntry::adjust_method_entry(Method* old_method,467Method* new_method, bool * trace_name_printed) {468469if (is_vfinal()) {470// virtual and final so _f2 contains method ptr instead of vtable index471if (f2_as_vfinal_method() == old_method) {472// match old_method so need an update473// NOTE: can't use set_f2_as_vfinal_method as it asserts on different values474_f2 = (intptr_t)new_method;475}476return;477}478479assert (_f1 != NULL, "should not call with uninteresting entry");480481if (!(_f1->is_method())) {482// _f1 is a Klass* for an interface, _f2 is the method483if (f2_as_interface_method() == old_method) {484_f2 = (intptr_t)new_method;485log_adjust("interface", old_method, new_method, trace_name_printed);486}487} else if (_f1 == old_method) {488_f1 = new_method;489log_adjust("special, static or dynamic", old_method, new_method, trace_name_printed);490}491}492493// a constant pool cache entry should never contain old or obsolete methods494bool ConstantPoolCacheEntry::check_no_old_or_obsolete_entries() {495Method* m = get_interesting_method_entry(NULL);496// return false if m refers to a non-deleted old or obsolete method497if (m != NULL) {498assert(m->is_valid() && m->is_method(), "m is a valid method");499return !m->is_old() && !m->is_obsolete(); // old is always set for old and obsolete500} else {501return true;502}503}504505Method* ConstantPoolCacheEntry::get_interesting_method_entry(Klass* k) {506if (!is_method_entry()) {507// not a method entry so not interesting by default508return NULL;509}510Method* m = NULL;511if (is_vfinal()) {512// virtual and final so _f2 contains method ptr instead of vtable index513m = f2_as_vfinal_method();514} else if (is_f1_null()) {515// NULL _f1 means this is a virtual entry so also not interesting516return NULL;517} else {518if (!(_f1->is_method())) {519// _f1 is a Klass* for an interface520m = f2_as_interface_method();521} else {522m = f1_as_method();523}524}525assert(m != NULL && m->is_method(), "sanity check");526if (m == NULL || !m->is_method() || (k != NULL && m->method_holder() != k)) {527// robustness for above sanity checks or method is not in528// the interesting class529return NULL;530}531// the method is in the interesting class so the entry is interesting532return m;533}534#endif // INCLUDE_JVMTI535536void ConstantPoolCacheEntry::print(outputStream* st, int index) const {537// print separator538if (index == 0) st->print_cr(" -------------");539// print entry540st->print("%3d (" PTR_FORMAT ") ", index, (intptr_t)this);541st->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(),542constant_pool_index());543st->print_cr(" [ " PTR_FORMAT "]", (intptr_t)_f1);544st->print_cr(" [ " PTR_FORMAT "]", (intptr_t)_f2);545st->print_cr(" [ " PTR_FORMAT "]", (intptr_t)_flags);546st->print_cr(" -------------");547}548549void ConstantPoolCacheEntry::verify(outputStream* st) const {550// not implemented yet551}552553// Implementation of ConstantPoolCache554555ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data,556const intStack& index_map,557const intStack& invokedynamic_index_map,558const intStack& invokedynamic_map, TRAPS) {559560const int length = index_map.length() + invokedynamic_index_map.length();561int size = ConstantPoolCache::size(length);562563return new (loader_data, size, false, MetaspaceObj::ConstantPoolCacheType, THREAD)564ConstantPoolCache(length, index_map, invokedynamic_index_map, invokedynamic_map);565}566567void ConstantPoolCache::initialize(const intArray& inverse_index_map,568const intArray& invokedynamic_inverse_index_map,569const intArray& invokedynamic_references_map) {570for (int i = 0; i < inverse_index_map.length(); i++) {571ConstantPoolCacheEntry* e = entry_at(i);572int original_index = inverse_index_map[i];573e->initialize_entry(original_index);574assert(entry_at(i) == e, "sanity");575}576577// Append invokedynamic entries at the end578int invokedynamic_offset = inverse_index_map.length();579for (int i = 0; i < invokedynamic_inverse_index_map.length(); i++) {580int offset = i + invokedynamic_offset;581ConstantPoolCacheEntry* e = entry_at(offset);582int original_index = invokedynamic_inverse_index_map[i];583e->initialize_entry(original_index);584assert(entry_at(offset) == e, "sanity");585}586587for (int ref = 0; ref < invokedynamic_references_map.length(); ref++) {588const int cpci = invokedynamic_references_map[ref];589if (cpci >= 0) {590#ifdef ASSERT591// invokedynamic and invokehandle have more entries; check if they592// all point to the same constant pool cache entry.593for (int entry = 1; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) {594const int cpci_next = invokedynamic_references_map[ref + entry];595assert(cpci == cpci_next, err_msg_res("%d == %d", cpci, cpci_next));596}597#endif598entry_at(cpci)->initialize_resolved_reference_index(ref);599ref += ConstantPoolCacheEntry::_indy_resolved_references_entries - 1; // skip extra entries600}601}602}603604#if INCLUDE_JVMTI605// RedefineClasses() API support:606// If any entry of this ConstantPoolCache points to any of607// old_methods, replace it with the corresponding new_method.608void ConstantPoolCache::adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed) {609for (int i = 0; i < length(); i++) {610ConstantPoolCacheEntry* entry = entry_at(i);611Method* old_method = entry->get_interesting_method_entry(holder);612if (old_method == NULL || !old_method->is_old()) {613continue; // skip uninteresting entries614}615if (old_method->is_deleted()) {616// clean up entries with deleted methods617entry->initialize_entry(entry->constant_pool_index());618continue;619}620Method* new_method = holder->method_with_idnum(old_method->orig_method_idnum());621622assert(new_method != NULL, "method_with_idnum() should not be NULL");623assert(old_method != new_method, "sanity check");624625entry_at(i)->adjust_method_entry(old_method, new_method, trace_name_printed);626}627}628629// the constant pool cache should never contain old or obsolete methods630bool ConstantPoolCache::check_no_old_or_obsolete_entries() {631for (int i = 1; i < length(); i++) {632if (entry_at(i)->get_interesting_method_entry(NULL) != NULL &&633!entry_at(i)->check_no_old_or_obsolete_entries()) {634return false;635}636}637return true;638}639640void ConstantPoolCache::dump_cache() {641for (int i = 1; i < length(); i++) {642if (entry_at(i)->get_interesting_method_entry(NULL) != NULL) {643entry_at(i)->print(tty, i);644}645}646}647#endif // INCLUDE_JVMTI648649650// Printing651652void ConstantPoolCache::print_on(outputStream* st) const {653assert(is_constantPoolCache(), "obj must be constant pool cache");654st->print_cr("%s", internal_name());655// print constant pool cache entries656for (int i = 0; i < length(); i++) entry_at(i)->print(st, i);657}658659void ConstantPoolCache::print_value_on(outputStream* st) const {660assert(is_constantPoolCache(), "obj must be constant pool cache");661st->print("cache [%d]", length());662print_address_on(st);663st->print(" for ");664constant_pool()->print_value_on(st);665}666667668// Verification669670void ConstantPoolCache::verify_on(outputStream* st) {671guarantee(is_constantPoolCache(), "obj must be constant pool cache");672// print constant pool cache entries673for (int i = 0; i < length(); i++) entry_at(i)->verify(st);674}675676677