Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/code/codeCache.cpp
32285 views
/*1* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "code/codeBlob.hpp"26#include "code/codeCache.hpp"27#include "code/compiledIC.hpp"28#include "code/dependencies.hpp"29#include "code/icBuffer.hpp"30#include "code/nmethod.hpp"31#include "code/pcDesc.hpp"32#include "compiler/compileBroker.hpp"33#include "gc_implementation/shared/markSweep.hpp"34#include "jfr/jfrEvents.hpp"35#include "memory/allocation.inline.hpp"36#include "memory/gcLocker.hpp"37#include "memory/iterator.hpp"38#include "memory/resourceArea.hpp"39#include "oops/method.hpp"40#include "oops/objArrayOop.hpp"41#include "oops/oop.inline.hpp"42#include "runtime/handles.inline.hpp"43#include "runtime/arguments.hpp"44#include "runtime/deoptimization.hpp"45#include "runtime/icache.hpp"46#include "runtime/java.hpp"47#include "runtime/mutexLocker.hpp"48#include "services/memoryService.hpp"49#include "utilities/xmlstream.hpp"505152// Helper class for printing in CodeCache5354class CodeBlob_sizes {55private:56int count;57int total_size;58int header_size;59int code_size;60int stub_size;61int relocation_size;62int scopes_oop_size;63int scopes_metadata_size;64int scopes_data_size;65int scopes_pcs_size;6667public:68CodeBlob_sizes() {69count = 0;70total_size = 0;71header_size = 0;72code_size = 0;73stub_size = 0;74relocation_size = 0;75scopes_oop_size = 0;76scopes_metadata_size = 0;77scopes_data_size = 0;78scopes_pcs_size = 0;79}8081int total() { return total_size; }82bool is_empty() { return count == 0; }8384void print(const char* title) {85tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])",86count,87title,88(int)(total() / K),89header_size * 100 / total_size,90relocation_size * 100 / total_size,91code_size * 100 / total_size,92stub_size * 100 / total_size,93scopes_oop_size * 100 / total_size,94scopes_metadata_size * 100 / total_size,95scopes_data_size * 100 / total_size,96scopes_pcs_size * 100 / total_size);97}9899void add(CodeBlob* cb) {100count++;101total_size += cb->size();102header_size += cb->header_size();103relocation_size += cb->relocation_size();104if (cb->is_nmethod()) {105nmethod* nm = cb->as_nmethod_or_null();106code_size += nm->insts_size();107stub_size += nm->stub_size();108109scopes_oop_size += nm->oops_size();110scopes_metadata_size += nm->metadata_size();111scopes_data_size += nm->scopes_data_size();112scopes_pcs_size += nm->scopes_pcs_size();113} else {114code_size += cb->code_size();115}116}117};118119// CodeCache implementation120121CodeHeap * CodeCache::_heap = new CodeHeap();122int CodeCache::_number_of_blobs = 0;123int CodeCache::_number_of_adapters = 0;124int CodeCache::_number_of_nmethods = 0;125int CodeCache::_number_of_nmethods_with_dependencies = 0;126bool CodeCache::_needs_cache_clean = false;127nmethod* CodeCache::_scavenge_root_nmethods = NULL;128129int CodeCache::_codemem_full_count = 0;130131CodeBlob* CodeCache::first() {132assert_locked_or_safepoint(CodeCache_lock);133return (CodeBlob*)_heap->first();134}135136137CodeBlob* CodeCache::next(CodeBlob* cb) {138assert_locked_or_safepoint(CodeCache_lock);139return (CodeBlob*)_heap->next(cb);140}141142143CodeBlob* CodeCache::alive(CodeBlob *cb) {144assert_locked_or_safepoint(CodeCache_lock);145while (cb != NULL && !cb->is_alive()) cb = next(cb);146return cb;147}148149150nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {151assert_locked_or_safepoint(CodeCache_lock);152while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);153return (nmethod*)cb;154}155156nmethod* CodeCache::first_nmethod() {157assert_locked_or_safepoint(CodeCache_lock);158CodeBlob* cb = first();159while (cb != NULL && !cb->is_nmethod()) {160cb = next(cb);161}162return (nmethod*)cb;163}164165nmethod* CodeCache::next_nmethod (CodeBlob* cb) {166assert_locked_or_safepoint(CodeCache_lock);167cb = next(cb);168while (cb != NULL && !cb->is_nmethod()) {169cb = next(cb);170}171return (nmethod*)cb;172}173174static size_t maxCodeCacheUsed = 0;175176CodeBlob* CodeCache::allocate(int size, bool is_critical) {177// Do not seize the CodeCache lock here--if the caller has not178// already done so, we are going to lose bigtime, since the code179// cache will contain a garbage CodeBlob until the caller can180// run the constructor for the CodeBlob subclass he is busy181// instantiating.182guarantee(size >= 0, "allocation request must be reasonable");183assert_locked_or_safepoint(CodeCache_lock);184CodeBlob* cb = NULL;185_number_of_blobs++;186while (true) {187cb = (CodeBlob*)_heap->allocate(size, is_critical);188if (cb != NULL) break;189if (!_heap->expand_by(CodeCacheExpansionSize)) {190// Expansion failed191if (CodeCache_lock->owned_by_self()) {192MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);193report_codemem_full();194} else {195report_codemem_full();196}197return NULL;198}199if (PrintCodeCacheExtension) {200ResourceMark rm;201tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",202(intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(),203(address)_heap->high() - (address)_heap->low_boundary());204}205}206maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -207(address)_heap->low_boundary()) - unallocated_capacity());208verify_if_often();209print_trace("allocation", cb, size);210return cb;211}212213void CodeCache::free(CodeBlob* cb) {214assert_locked_or_safepoint(CodeCache_lock);215verify_if_often();216217print_trace("free", cb);218if (cb->is_nmethod()) {219_number_of_nmethods--;220if (((nmethod *)cb)->has_dependencies()) {221_number_of_nmethods_with_dependencies--;222}223}224if (cb->is_adapter_blob()) {225_number_of_adapters--;226}227_number_of_blobs--;228229_heap->deallocate(cb);230231verify_if_often();232assert(_number_of_blobs >= 0, "sanity check");233}234235236void CodeCache::commit(CodeBlob* cb) {237// this is called by nmethod::nmethod, which must already own CodeCache_lock238assert_locked_or_safepoint(CodeCache_lock);239if (cb->is_nmethod()) {240_number_of_nmethods++;241if (((nmethod *)cb)->has_dependencies()) {242_number_of_nmethods_with_dependencies++;243}244}245if (cb->is_adapter_blob()) {246_number_of_adapters++;247}248249// flush the hardware I-cache250ICache::invalidate_range(cb->content_begin(), cb->content_size());251}252253254void CodeCache::flush() {255assert_locked_or_safepoint(CodeCache_lock);256Unimplemented();257}258259260// Iteration over CodeBlobs261262#define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) )263#define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))264#define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))265266267bool CodeCache::contains(void *p) {268// It should be ok to call contains without holding a lock269return _heap->contains(p);270}271272273// This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not274// looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain275// valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.276CodeBlob* CodeCache::find_blob(void* start) {277CodeBlob* result = find_blob_unsafe(start);278if (result == NULL) return NULL;279// We could potientially look up non_entrant methods280guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");281return result;282}283284nmethod* CodeCache::find_nmethod(void* start) {285CodeBlob *cb = find_blob(start);286assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");287return (nmethod*)cb;288}289290291void CodeCache::blobs_do(void f(CodeBlob* nm)) {292assert_locked_or_safepoint(CodeCache_lock);293FOR_ALL_BLOBS(p) {294f(p);295}296}297298299void CodeCache::nmethods_do(void f(nmethod* nm)) {300assert_locked_or_safepoint(CodeCache_lock);301FOR_ALL_BLOBS(nm) {302if (nm->is_nmethod()) f((nmethod*)nm);303}304}305306void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {307assert_locked_or_safepoint(CodeCache_lock);308FOR_ALL_ALIVE_NMETHODS(nm) {309f(nm);310}311}312313int CodeCache::alignment_unit() {314return (int)_heap->alignment_unit();315}316317318int CodeCache::alignment_offset() {319return (int)_heap->alignment_offset();320}321322323// Mark nmethods for unloading if they contain otherwise unreachable324// oops.325void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {326assert_locked_or_safepoint(CodeCache_lock);327FOR_ALL_ALIVE_NMETHODS(nm) {328nm->do_unloading(is_alive, unloading_occurred);329}330}331332void CodeCache::blobs_do(CodeBlobClosure* f) {333assert_locked_or_safepoint(CodeCache_lock);334FOR_ALL_ALIVE_BLOBS(cb) {335f->do_code_blob(cb);336337#ifdef ASSERT338if (cb->is_nmethod())339((nmethod*)cb)->verify_scavenge_root_oops();340#endif //ASSERT341}342}343344// Walk the list of methods which might contain non-perm oops.345void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) {346assert_locked_or_safepoint(CodeCache_lock);347348if (UseG1GC || UseShenandoahGC) {349return;350}351352const bool fix_relocations = f->fix_relocations();353debug_only(mark_scavenge_root_nmethods());354355nmethod* prev = NULL;356nmethod* cur = scavenge_root_nmethods();357while (cur != NULL) {358debug_only(cur->clear_scavenge_root_marked());359assert(cur->scavenge_root_not_marked(), "");360assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");361362bool is_live = (!cur->is_zombie() && !cur->is_unloaded());363#ifndef PRODUCT364if (TraceScavenge) {365cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();366}367#endif //PRODUCT368if (is_live) {369// Perform cur->oops_do(f), maybe just once per nmethod.370f->do_code_blob(cur);371}372nmethod* const next = cur->scavenge_root_link();373// The scavengable nmethod list must contain all methods with scavengable374// oops. It is safe to include more nmethod on the list, but we do not375// expect any live non-scavengable nmethods on the list.376if (fix_relocations) {377if (!is_live || !cur->detect_scavenge_root_oops()) {378unlink_scavenge_root_nmethod(cur, prev);379} else {380prev = cur;381}382}383cur = next;384}385386// Check for stray marks.387debug_only(verify_perm_nmethods(NULL));388}389390void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {391assert_locked_or_safepoint(CodeCache_lock);392393if (UseG1GC || UseShenandoahGC) {394return;395}396397nm->set_on_scavenge_root_list();398nm->set_scavenge_root_link(_scavenge_root_nmethods);399set_scavenge_root_nmethods(nm);400print_trace("add_scavenge_root", nm);401}402403void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) {404assert_locked_or_safepoint(CodeCache_lock);405406assert((prev == NULL && scavenge_root_nmethods() == nm) ||407(prev != NULL && prev->scavenge_root_link() == nm), "precondition");408409assert(!UseG1GC, "G1 does not use the scavenge_root_nmethods list");410411print_trace("unlink_scavenge_root", nm);412if (prev == NULL) {413set_scavenge_root_nmethods(nm->scavenge_root_link());414} else {415prev->set_scavenge_root_link(nm->scavenge_root_link());416}417nm->set_scavenge_root_link(NULL);418nm->clear_on_scavenge_root_list();419}420421void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {422assert_locked_or_safepoint(CodeCache_lock);423424if (UseG1GC || UseShenandoahGC) {425return;426}427428print_trace("drop_scavenge_root", nm);429nmethod* prev = NULL;430for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {431if (cur == nm) {432unlink_scavenge_root_nmethod(cur, prev);433return;434}435prev = cur;436}437assert(false, "should have been on list");438}439440void CodeCache::prune_scavenge_root_nmethods() {441assert_locked_or_safepoint(CodeCache_lock);442443if (UseG1GC || UseShenandoahGC) {444return;445}446447debug_only(mark_scavenge_root_nmethods());448449nmethod* last = NULL;450nmethod* cur = scavenge_root_nmethods();451while (cur != NULL) {452nmethod* next = cur->scavenge_root_link();453debug_only(cur->clear_scavenge_root_marked());454assert(cur->scavenge_root_not_marked(), "");455assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");456457if (!cur->is_zombie() && !cur->is_unloaded()458&& cur->detect_scavenge_root_oops()) {459// Keep it. Advance 'last' to prevent deletion.460last = cur;461} else {462// Prune it from the list, so we don't have to look at it any more.463print_trace("prune_scavenge_root", cur);464unlink_scavenge_root_nmethod(cur, last);465}466cur = next;467}468469// Check for stray marks.470debug_only(verify_perm_nmethods(NULL));471}472473#ifndef PRODUCT474void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {475if (UseG1GC || UseShenandoahGC) {476return;477}478479// While we are here, verify the integrity of the list.480mark_scavenge_root_nmethods();481for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {482assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");483cur->clear_scavenge_root_marked();484}485verify_perm_nmethods(f);486}487488// Temporarily mark nmethods that are claimed to be on the non-perm list.489void CodeCache::mark_scavenge_root_nmethods() {490FOR_ALL_ALIVE_BLOBS(cb) {491if (cb->is_nmethod()) {492nmethod *nm = (nmethod*)cb;493assert(nm->scavenge_root_not_marked(), "clean state");494if (nm->on_scavenge_root_list())495nm->set_scavenge_root_marked();496}497}498}499500// If the closure is given, run it on the unlisted nmethods.501// Also make sure that the effects of mark_scavenge_root_nmethods is gone.502void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {503FOR_ALL_ALIVE_BLOBS(cb) {504bool call_f = (f_or_null != NULL);505if (cb->is_nmethod()) {506nmethod *nm = (nmethod*)cb;507assert(nm->scavenge_root_not_marked(), "must be already processed");508if (nm->on_scavenge_root_list())509call_f = false; // don't show this one to the client510nm->verify_scavenge_root_oops();511} else {512call_f = false; // not an nmethod513}514if (call_f) f_or_null->do_code_blob(cb);515}516}517#endif //PRODUCT518519void CodeCache::verify_clean_inline_caches() {520#ifdef ASSERT521FOR_ALL_ALIVE_BLOBS(cb) {522if (cb->is_nmethod()) {523nmethod* nm = (nmethod*)cb;524assert(!nm->is_unloaded(), "Tautology");525nm->verify_clean_inline_caches();526nm->verify();527}528}529#endif530}531532void CodeCache::verify_icholder_relocations() {533#ifdef ASSERT534// make sure that we aren't leaking icholders535int count = 0;536FOR_ALL_BLOBS(cb) {537if (cb->is_nmethod()) {538nmethod* nm = (nmethod*)cb;539count += nm->verify_icholder_relocations();540}541}542543assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==544CompiledICHolder::live_count(), "must agree");545#endif546}547548void CodeCache::gc_prologue() {549}550551void CodeCache::gc_epilogue() {552assert_locked_or_safepoint(CodeCache_lock);553NOT_DEBUG(if (needs_cache_clean())) {554FOR_ALL_ALIVE_BLOBS(cb) {555if (cb->is_nmethod()) {556nmethod *nm = (nmethod*)cb;557assert(!nm->is_unloaded(), "Tautology");558DEBUG_ONLY(if (needs_cache_clean())) {559nm->cleanup_inline_caches();560}561DEBUG_ONLY(nm->verify());562DEBUG_ONLY(nm->verify_oop_relocations());563}564}565}566set_needs_cache_clean(false);567prune_scavenge_root_nmethods();568569verify_icholder_relocations();570}571572void CodeCache::verify_oops() {573MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);574VerifyOopClosure voc;575FOR_ALL_ALIVE_BLOBS(cb) {576if (cb->is_nmethod()) {577nmethod *nm = (nmethod*)cb;578nm->oops_do(&voc);579nm->verify_oop_relocations();580}581}582}583584585address CodeCache::first_address() {586assert_locked_or_safepoint(CodeCache_lock);587return (address)_heap->low_boundary();588}589590591address CodeCache::last_address() {592assert_locked_or_safepoint(CodeCache_lock);593return (address)_heap->high();594}595596/**597* Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache598* is free, reverse_free_ratio() returns 4.599*/600double CodeCache::reverse_free_ratio() {601double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace);602double max_capacity = (double)CodeCache::max_capacity();603return max_capacity / unallocated_capacity;604}605606void icache_init();607608void CodeCache::initialize() {609assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");610#ifdef COMPILER2611assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");612#endif613assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");614// This was originally just a check of the alignment, causing failure, instead, round615// the code cache to the page size. In particular, Solaris is moving to a larger616// default page size.617CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());618InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());619ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());620if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {621vm_exit_during_initialization("Could not reserve enough space for code cache");622}623624MemoryService::add_code_heap_memory_pool(_heap);625626// Initialize ICache flush mechanism627// This service is needed for os::register_code_area628icache_init();629630// Give OS a chance to register generated code area.631// This is used on Windows 64 bit platforms to register632// Structured Exception Handlers for our generated code.633os::register_code_area(_heap->low_boundary(), _heap->high_boundary());634}635636637void codeCache_init() {638CodeCache::initialize();639}640641//------------------------------------------------------------------------------------------------642643int CodeCache::number_of_nmethods_with_dependencies() {644return _number_of_nmethods_with_dependencies;645}646647void CodeCache::clear_inline_caches() {648assert_locked_or_safepoint(CodeCache_lock);649FOR_ALL_ALIVE_NMETHODS(nm) {650nm->clear_inline_caches();651}652}653654#ifndef PRODUCT655// used to keep track of how much time is spent in mark_for_deoptimization656static elapsedTimer dependentCheckTime;657static int dependentCheckCount = 0;658#endif // PRODUCT659660661int CodeCache::mark_for_deoptimization(DepChange& changes) {662MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);663664#ifndef PRODUCT665dependentCheckTime.start();666dependentCheckCount++;667#endif // PRODUCT668669int number_of_marked_CodeBlobs = 0;670671// search the hierarchy looking for nmethods which are affected by the loading of this class672673// then search the interfaces this class implements looking for nmethods674// which might be dependent of the fact that an interface only had one675// implementor.676677{ No_Safepoint_Verifier nsv;678for (DepChange::ContextStream str(changes, nsv); str.next(); ) {679Klass* d = str.klass();680number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);681}682}683684if (VerifyDependencies) {685// Turn off dependency tracing while actually testing deps.686NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );687FOR_ALL_ALIVE_NMETHODS(nm) {688if (!nm->is_marked_for_deoptimization() &&689nm->check_all_dependencies()) {690ResourceMark rm;691tty->print_cr("Should have been marked for deoptimization:");692changes.print();693nm->print();694nm->print_dependencies();695}696}697}698699#ifndef PRODUCT700dependentCheckTime.stop();701#endif // PRODUCT702703return number_of_marked_CodeBlobs;704}705706707#ifdef HOTSWAP708int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {709MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);710int number_of_marked_CodeBlobs = 0;711712// Deoptimize all methods of the evolving class itself713Array<Method*>* old_methods = dependee->methods();714for (int i = 0; i < old_methods->length(); i++) {715ResourceMark rm;716Method* old_method = old_methods->at(i);717nmethod *nm = old_method->code();718if (nm != NULL) {719nm->mark_for_deoptimization();720number_of_marked_CodeBlobs++;721}722}723724FOR_ALL_ALIVE_NMETHODS(nm) {725if (nm->is_marked_for_deoptimization()) {726// ...Already marked in the previous pass; don't count it again.727} else if (nm->is_evol_dependent_on(dependee())) {728ResourceMark rm;729nm->mark_for_deoptimization();730number_of_marked_CodeBlobs++;731} else {732// flush caches in case they refer to a redefined Method*733nm->clear_inline_caches();734}735}736737return number_of_marked_CodeBlobs;738}739#endif // HOTSWAP740741742// Deoptimize all methods743void CodeCache::mark_all_nmethods_for_deoptimization() {744MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);745FOR_ALL_ALIVE_NMETHODS(nm) {746if (!nm->method()->is_method_handle_intrinsic()) {747nm->mark_for_deoptimization();748}749}750}751752753int CodeCache::mark_for_deoptimization(Method* dependee) {754MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);755int number_of_marked_CodeBlobs = 0;756757FOR_ALL_ALIVE_NMETHODS(nm) {758if (nm->is_dependent_on_method(dependee)) {759ResourceMark rm;760nm->mark_for_deoptimization();761number_of_marked_CodeBlobs++;762}763}764765return number_of_marked_CodeBlobs;766}767768void CodeCache::make_marked_nmethods_not_entrant() {769assert_locked_or_safepoint(CodeCache_lock);770FOR_ALL_ALIVE_NMETHODS(nm) {771if (nm->is_marked_for_deoptimization()) {772nm->make_not_entrant();773}774}775}776777void CodeCache::verify() {778_heap->verify();779FOR_ALL_ALIVE_BLOBS(p) {780p->verify();781}782}783784void CodeCache::report_codemem_full() {785_codemem_full_count++;786EventCodeCacheFull event;787if (event.should_commit()) {788event.set_codeBlobType((u1)CodeBlobType::All);789event.set_startAddress((u8)low_bound());790event.set_commitedTopAddress((u8)high());791event.set_reservedTopAddress((u8)high_bound());792event.set_entryCount(nof_blobs());793event.set_methodCount(nof_nmethods());794event.set_adaptorCount(nof_adapters());795event.set_unallocatedCapacity(unallocated_capacity()/K);796event.set_fullCount(_codemem_full_count);797event.commit();798}799}800801//------------------------------------------------------------------------------------------------802// Non-product version803804#ifndef PRODUCT805806void CodeCache::verify_if_often() {807if (VerifyCodeCacheOften) {808_heap->verify();809}810}811812void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {813if (PrintCodeCache2) { // Need to add a new flag814ResourceMark rm;815if (size == 0) size = cb->size();816tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);817}818}819820void CodeCache::print_internals() {821int nmethodCount = 0;822int runtimeStubCount = 0;823int adapterCount = 0;824int deoptimizationStubCount = 0;825int uncommonTrapStubCount = 0;826int bufferBlobCount = 0;827int total = 0;828int nmethodAlive = 0;829int nmethodNotEntrant = 0;830int nmethodZombie = 0;831int nmethodUnloaded = 0;832int nmethodJava = 0;833int nmethodNative = 0;834int maxCodeSize = 0;835ResourceMark rm;836837CodeBlob *cb;838for (cb = first(); cb != NULL; cb = next(cb)) {839total++;840if (cb->is_nmethod()) {841nmethod* nm = (nmethod*)cb;842843if (Verbose && nm->method() != NULL) {844ResourceMark rm;845char *method_name = nm->method()->name_and_sig_as_C_string();846tty->print("%s", method_name);847if(nm->is_alive()) { tty->print_cr(" alive"); }848if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }849if(nm->is_zombie()) { tty->print_cr(" zombie"); }850}851852nmethodCount++;853854if(nm->is_alive()) { nmethodAlive++; }855if(nm->is_not_entrant()) { nmethodNotEntrant++; }856if(nm->is_zombie()) { nmethodZombie++; }857if(nm->is_unloaded()) { nmethodUnloaded++; }858if(nm->is_native_method()) { nmethodNative++; }859860if(nm->method() != NULL && nm->is_java_method()) {861nmethodJava++;862if (nm->insts_size() > maxCodeSize) {863maxCodeSize = nm->insts_size();864}865}866} else if (cb->is_runtime_stub()) {867runtimeStubCount++;868} else if (cb->is_deoptimization_stub()) {869deoptimizationStubCount++;870} else if (cb->is_uncommon_trap_stub()) {871uncommonTrapStubCount++;872} else if (cb->is_adapter_blob()) {873adapterCount++;874} else if (cb->is_buffer_blob()) {875bufferBlobCount++;876}877}878879int bucketSize = 512;880int bucketLimit = maxCodeSize / bucketSize + 1;881int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);882memset(buckets,0,sizeof(int) * bucketLimit);883884for (cb = first(); cb != NULL; cb = next(cb)) {885if (cb->is_nmethod()) {886nmethod* nm = (nmethod*)cb;887if(nm->is_java_method()) {888buckets[nm->insts_size() / bucketSize]++;889}890}891}892tty->print_cr("Code Cache Entries (total of %d)",total);893tty->print_cr("-------------------------------------------------");894tty->print_cr("nmethods: %d",nmethodCount);895tty->print_cr("\talive: %d",nmethodAlive);896tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);897tty->print_cr("\tzombie: %d",nmethodZombie);898tty->print_cr("\tunloaded: %d",nmethodUnloaded);899tty->print_cr("\tjava: %d",nmethodJava);900tty->print_cr("\tnative: %d",nmethodNative);901tty->print_cr("runtime_stubs: %d",runtimeStubCount);902tty->print_cr("adapters: %d",adapterCount);903tty->print_cr("buffer blobs: %d",bufferBlobCount);904tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);905tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);906tty->print_cr("\nnmethod size distribution (non-zombie java)");907tty->print_cr("-------------------------------------------------");908909for(int i=0; i<bucketLimit; i++) {910if(buckets[i] != 0) {911tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);912tty->fill_to(40);913tty->print_cr("%d",buckets[i]);914}915}916917FREE_C_HEAP_ARRAY(int, buckets, mtCode);918}919920#endif // !PRODUCT921922void CodeCache::print() {923print_summary(tty);924925#ifndef PRODUCT926if (!Verbose) return;927928CodeBlob_sizes live;929CodeBlob_sizes dead;930931FOR_ALL_BLOBS(p) {932if (!p->is_alive()) {933dead.add(p);934} else {935live.add(p);936}937}938939tty->print_cr("CodeCache:");940941tty->print_cr("nmethod dependency checking time %f, per dependent %f", dependentCheckTime.seconds(),942dependentCheckTime.seconds() / dependentCheckCount);943944if (!live.is_empty()) {945live.print("live");946}947if (!dead.is_empty()) {948dead.print("dead");949}950951952if (WizardMode) {953// print the oop_map usage954int code_size = 0;955int number_of_blobs = 0;956int number_of_oop_maps = 0;957int map_size = 0;958FOR_ALL_BLOBS(p) {959if (p->is_alive()) {960number_of_blobs++;961code_size += p->code_size();962OopMapSet* set = p->oop_maps();963if (set != NULL) {964number_of_oop_maps += set->size();965map_size += set->heap_size();966}967}968}969tty->print_cr("OopMaps");970tty->print_cr(" #blobs = %d", number_of_blobs);971tty->print_cr(" code size = %d", code_size);972tty->print_cr(" #oop_maps = %d", number_of_oop_maps);973tty->print_cr(" map size = %d", map_size);974}975976#endif // !PRODUCT977}978979void CodeCache::print_summary(outputStream* st, bool detailed) {980size_t total = (_heap->high_boundary() - _heap->low_boundary());981st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT982"Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",983total/K, (total - unallocated_capacity())/K,984maxCodeCacheUsed/K, unallocated_capacity()/K);985986if (detailed) {987st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",988p2i(_heap->low_boundary()),989p2i(_heap->high()),990p2i(_heap->high_boundary()));991st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT992" adapters=" UINT32_FORMAT,993nof_blobs(), nof_nmethods(), nof_adapters());994st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?995"enabled" : Arguments::mode() == Arguments::_int ?996"disabled (interpreter mode)" :997"disabled (not enough contiguous free space left)");998}999}10001001void CodeCache::log_state(outputStream* st) {1002st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"1003" adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",1004nof_blobs(), nof_nmethods(), nof_adapters(),1005unallocated_capacity());1006}100710081009