Path: blob/master/src/hotspot/share/code/codeCache.cpp
40930 views
/*1* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "jvm_io.h"26#include "code/codeBlob.hpp"27#include "code/codeCache.hpp"28#include "code/codeHeapState.hpp"29#include "code/compiledIC.hpp"30#include "code/dependencies.hpp"31#include "code/dependencyContext.hpp"32#include "code/icBuffer.hpp"33#include "code/nmethod.hpp"34#include "code/pcDesc.hpp"35#include "compiler/compilationPolicy.hpp"36#include "compiler/compileBroker.hpp"37#include "compiler/oopMap.hpp"38#include "gc/shared/collectedHeap.hpp"39#include "jfr/jfrEvents.hpp"40#include "logging/log.hpp"41#include "logging/logStream.hpp"42#include "memory/allocation.inline.hpp"43#include "memory/iterator.hpp"44#include "memory/resourceArea.hpp"45#include "memory/universe.hpp"46#include "oops/method.inline.hpp"47#include "oops/objArrayOop.hpp"48#include "oops/oop.inline.hpp"49#include "oops/verifyOopClosure.hpp"50#include "runtime/arguments.hpp"51#include "runtime/atomic.hpp"52#include "runtime/deoptimization.hpp"53#include "runtime/globals_extension.hpp"54#include "runtime/handles.inline.hpp"55#include "runtime/icache.hpp"56#include "runtime/java.hpp"57#include "runtime/mutexLocker.hpp"58#include "runtime/safepointVerifiers.hpp"59#include "runtime/sweeper.hpp"60#include "runtime/vmThread.hpp"61#include "services/memoryService.hpp"62#include "utilities/align.hpp"63#include "utilities/vmError.hpp"64#include "utilities/xmlstream.hpp"65#ifdef COMPILER166#include "c1/c1_Compilation.hpp"67#include "c1/c1_Compiler.hpp"68#endif69#ifdef COMPILER270#include "opto/c2compiler.hpp"71#include "opto/compile.hpp"72#include "opto/node.hpp"73#endif7475// Helper class for printing in CodeCache76class CodeBlob_sizes {77private:78int count;79int total_size;80int header_size;81int code_size;82int stub_size;83int relocation_size;84int scopes_oop_size;85int scopes_metadata_size;86int scopes_data_size;87int scopes_pcs_size;8889public:90CodeBlob_sizes() {91count = 0;92total_size = 0;93header_size = 0;94code_size = 0;95stub_size = 0;96relocation_size = 0;97scopes_oop_size = 0;98scopes_metadata_size = 0;99scopes_data_size = 0;100scopes_pcs_size = 0;101}102103int total() { return total_size; }104bool is_empty() { return count == 0; }105106void print(const char* title) {107tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])",108count,109title,110(int)(total() / K),111header_size * 100 / total_size,112relocation_size * 100 / total_size,113code_size * 100 / total_size,114stub_size * 100 / total_size,115scopes_oop_size * 100 / total_size,116scopes_metadata_size * 100 / total_size,117scopes_data_size * 100 / total_size,118scopes_pcs_size * 100 / total_size);119}120121void add(CodeBlob* cb) {122count++;123total_size += cb->size();124header_size += cb->header_size();125relocation_size += cb->relocation_size();126if (cb->is_nmethod()) {127nmethod* nm = cb->as_nmethod_or_null();128code_size += nm->insts_size();129stub_size += nm->stub_size();130131scopes_oop_size += nm->oops_size();132scopes_metadata_size += nm->metadata_size();133scopes_data_size += nm->scopes_data_size();134scopes_pcs_size += nm->scopes_pcs_size();135} else {136code_size += cb->code_size();137}138}139};140141// Iterate over all CodeHeaps142#define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)143#define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap)144#define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)145146// Iterate over all CodeBlobs (cb) on the given CodeHeap147#define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))148149address CodeCache::_low_bound = 0;150address CodeCache::_high_bound = 0;151int CodeCache::_number_of_nmethods_with_dependencies = 0;152ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL;153154// Initialize arrays of CodeHeap subsets155GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);156GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);157GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);158GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);159160void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {161size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;162// Prepare error message163const char* error = "Invalid code heap sizes";164err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)"165" + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K",166non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);167168if (total_size > cache_size) {169// Some code heap sizes were explicitly set: total_size must be <= cache_size170message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);171vm_exit_during_initialization(error, message);172} else if (all_set && total_size != cache_size) {173// All code heap sizes were explicitly set: total_size must equal cache_size174message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);175vm_exit_during_initialization(error, message);176}177}178179void CodeCache::initialize_heaps() {180bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);181bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);182bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);183size_t min_size = os::vm_page_size();184size_t cache_size = ReservedCodeCacheSize;185size_t non_nmethod_size = NonNMethodCodeHeapSize;186size_t profiled_size = ProfiledCodeHeapSize;187size_t non_profiled_size = NonProfiledCodeHeapSize;188// Check if total size set via command line flags exceeds the reserved size189check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size),190(profiled_set ? profiled_size : min_size),191(non_profiled_set ? non_profiled_size : min_size),192cache_size,193non_nmethod_set && profiled_set && non_profiled_set);194195// Determine size of compiler buffers196size_t code_buffers_size = 0;197#ifdef COMPILER1198// C1 temporary code buffers (see Compiler::init_buffer_blob())199const int c1_count = CompilationPolicy::c1_count();200code_buffers_size += c1_count * Compiler::code_buffer_size();201#endif202#ifdef COMPILER2203// C2 scratch buffers (see Compile::init_scratch_buffer_blob())204const int c2_count = CompilationPolicy::c2_count();205// Initial size of constant table (this may be increased if a compiled method needs more space)206code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();207#endif208209// Increase default non_nmethod_size to account for compiler buffers210if (!non_nmethod_set) {211non_nmethod_size += code_buffers_size;212}213// Calculate default CodeHeap sizes if not set by user214if (!non_nmethod_set && !profiled_set && !non_profiled_set) {215// Check if we have enough space for the non-nmethod code heap216if (cache_size > non_nmethod_size) {217// Use the default value for non_nmethod_size and one half of the218// remaining size for non-profiled and one half for profiled methods219size_t remaining_size = cache_size - non_nmethod_size;220profiled_size = remaining_size / 2;221non_profiled_size = remaining_size - profiled_size;222} else {223// Use all space for the non-nmethod heap and set other heaps to minimal size224non_nmethod_size = cache_size - 2 * min_size;225profiled_size = min_size;226non_profiled_size = min_size;227}228} else if (!non_nmethod_set || !profiled_set || !non_profiled_set) {229// The user explicitly set some code heap sizes. Increase or decrease the (default)230// sizes of the other code heaps accordingly. First adapt non-profiled and profiled231// code heap sizes and then only change non-nmethod code heap size if still necessary.232intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size);233if (non_profiled_set) {234if (!profiled_set) {235// Adapt size of profiled code heap236if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) {237// Not enough space available, set to minimum size238diff_size += profiled_size - min_size;239profiled_size = min_size;240} else {241profiled_size += diff_size;242diff_size = 0;243}244}245} else if (profiled_set) {246// Adapt size of non-profiled code heap247if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) {248// Not enough space available, set to minimum size249diff_size += non_profiled_size - min_size;250non_profiled_size = min_size;251} else {252non_profiled_size += diff_size;253diff_size = 0;254}255} else if (non_nmethod_set) {256// Distribute remaining size between profiled and non-profiled code heaps257diff_size = cache_size - non_nmethod_size;258profiled_size = diff_size / 2;259non_profiled_size = diff_size - profiled_size;260diff_size = 0;261}262if (diff_size != 0) {263// Use non-nmethod code heap for remaining space requirements264assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity");265non_nmethod_size += diff_size;266}267}268269// We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap270if (!heap_available(CodeBlobType::MethodProfiled)) {271non_profiled_size += profiled_size;272profiled_size = 0;273}274// We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap275if (!heap_available(CodeBlobType::MethodNonProfiled)) {276non_nmethod_size += non_profiled_size;277non_profiled_size = 0;278}279// Make sure we have enough space for VM internal code280uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);281if (non_nmethod_size < min_code_cache_size) {282vm_exit_during_initialization(err_msg(283"Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K",284non_nmethod_size/K, min_code_cache_size/K));285}286287// Verify sizes and update flag values288assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");289FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod_size);290FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled_size);291FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled_size);292293// If large page support is enabled, align code heaps according to large294// page size to make sure that code cache is covered by large pages.295const size_t alignment = MAX2(page_size(false, 8), (size_t) os::vm_allocation_granularity());296non_nmethod_size = align_up(non_nmethod_size, alignment);297profiled_size = align_down(profiled_size, alignment);298299// Reserve one continuous chunk of memory for CodeHeaps and split it into300// parts for the individual heaps. The memory layout looks like this:301// ---------- high -----------302// Non-profiled nmethods303// Profiled nmethods304// Non-nmethods305// ---------- low ------------306ReservedCodeSpace rs = reserve_heap_memory(cache_size);307ReservedSpace non_method_space = rs.first_part(non_nmethod_size);308ReservedSpace rest = rs.last_part(non_nmethod_size);309ReservedSpace profiled_space = rest.first_part(profiled_size);310ReservedSpace non_profiled_space = rest.last_part(profiled_size);311312// Non-nmethods (stubs, adapters, ...)313add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);314// Tier 2 and tier 3 (profiled) methods315add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);316// Tier 1 and tier 4 (non-profiled) methods and native methods317add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);318}319320size_t CodeCache::page_size(bool aligned, size_t min_pages) {321if (os::can_execute_large_page_memory()) {322if (InitialCodeCacheSize < ReservedCodeCacheSize) {323// Make sure that the page size allows for an incremental commit of the reserved space324min_pages = MAX2(min_pages, (size_t)8);325}326return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :327os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);328} else {329return os::vm_page_size();330}331}332333ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {334// Align and reserve space for code cache335const size_t rs_ps = page_size();336const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity());337const size_t rs_size = align_up(size, rs_align);338ReservedCodeSpace rs(rs_size, rs_align, rs_ps);339if (!rs.is_reserved()) {340vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",341rs_size/K));342}343344// Initialize bounds345_low_bound = (address)rs.base();346_high_bound = _low_bound + rs.size();347return rs;348}349350// Heaps available for allocation351bool CodeCache::heap_available(int code_blob_type) {352if (!SegmentedCodeCache) {353// No segmentation: use a single code heap354return (code_blob_type == CodeBlobType::All);355} else if (Arguments::is_interpreter_only()) {356// Interpreter only: we don't need any method code heaps357return (code_blob_type == CodeBlobType::NonNMethod);358} else if (CompilerConfig::is_c1_profiling()) {359// Tiered compilation: use all code heaps360return (code_blob_type < CodeBlobType::All);361} else {362// No TieredCompilation: we only need the non-nmethod and non-profiled code heap363return (code_blob_type == CodeBlobType::NonNMethod) ||364(code_blob_type == CodeBlobType::MethodNonProfiled);365}366}367368const char* CodeCache::get_code_heap_flag_name(int code_blob_type) {369switch(code_blob_type) {370case CodeBlobType::NonNMethod:371return "NonNMethodCodeHeapSize";372break;373case CodeBlobType::MethodNonProfiled:374return "NonProfiledCodeHeapSize";375break;376case CodeBlobType::MethodProfiled:377return "ProfiledCodeHeapSize";378break;379}380ShouldNotReachHere();381return NULL;382}383384int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) {385if (lhs->code_blob_type() == rhs->code_blob_type()) {386return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0);387} else {388return lhs->code_blob_type() - rhs->code_blob_type();389}390}391392void CodeCache::add_heap(CodeHeap* heap) {393assert(!Universe::is_fully_initialized(), "late heap addition?");394395_heaps->insert_sorted<code_heap_compare>(heap);396397int type = heap->code_blob_type();398if (code_blob_type_accepts_compiled(type)) {399_compiled_heaps->insert_sorted<code_heap_compare>(heap);400}401if (code_blob_type_accepts_nmethod(type)) {402_nmethod_heaps->insert_sorted<code_heap_compare>(heap);403}404if (code_blob_type_accepts_allocable(type)) {405_allocable_heaps->insert_sorted<code_heap_compare>(heap);406}407}408409void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) {410// Check if heap is needed411if (!heap_available(code_blob_type)) {412return;413}414415// Create CodeHeap416CodeHeap* heap = new CodeHeap(name, code_blob_type);417add_heap(heap);418419// Reserve Space420size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size());421size_initial = align_up(size_initial, os::vm_page_size());422if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {423vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)",424heap->name(), size_initial/K));425}426427// Register the CodeHeap428MemoryService::add_code_heap_memory_pool(heap, name);429}430431CodeHeap* CodeCache::get_code_heap_containing(void* start) {432FOR_ALL_HEAPS(heap) {433if ((*heap)->contains(start)) {434return *heap;435}436}437return NULL;438}439440CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) {441assert(cb != NULL, "CodeBlob is null");442FOR_ALL_HEAPS(heap) {443if ((*heap)->contains_blob(cb)) {444return *heap;445}446}447ShouldNotReachHere();448return NULL;449}450451CodeHeap* CodeCache::get_code_heap(int code_blob_type) {452FOR_ALL_HEAPS(heap) {453if ((*heap)->accepts(code_blob_type)) {454return *heap;455}456}457return NULL;458}459460CodeBlob* CodeCache::first_blob(CodeHeap* heap) {461assert_locked_or_safepoint(CodeCache_lock);462assert(heap != NULL, "heap is null");463return (CodeBlob*)heap->first();464}465466CodeBlob* CodeCache::first_blob(int code_blob_type) {467if (heap_available(code_blob_type)) {468return first_blob(get_code_heap(code_blob_type));469} else {470return NULL;471}472}473474CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {475assert_locked_or_safepoint(CodeCache_lock);476assert(heap != NULL, "heap is null");477return (CodeBlob*)heap->next(cb);478}479480/**481* Do not seize the CodeCache lock here--if the caller has not482* already done so, we are going to lose bigtime, since the code483* cache will contain a garbage CodeBlob until the caller can484* run the constructor for the CodeBlob subclass he is busy485* instantiating.486*/487CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool handle_alloc_failure, int orig_code_blob_type) {488// Possibly wakes up the sweeper thread.489NMethodSweeper::report_allocation(code_blob_type);490assert_locked_or_safepoint(CodeCache_lock);491assert(size > 0, "Code cache allocation request must be > 0 but is %d", size);492if (size <= 0) {493return NULL;494}495CodeBlob* cb = NULL;496497// Get CodeHeap for the given CodeBlobType498CodeHeap* heap = get_code_heap(code_blob_type);499assert(heap != NULL, "heap is null");500501while (true) {502cb = (CodeBlob*)heap->allocate(size);503if (cb != NULL) break;504if (!heap->expand_by(CodeCacheExpansionSize)) {505// Save original type for error reporting506if (orig_code_blob_type == CodeBlobType::All) {507orig_code_blob_type = code_blob_type;508}509// Expansion failed510if (SegmentedCodeCache) {511// Fallback solution: Try to store code in another code heap.512// NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)513// Note that in the sweeper, we check the reverse_free_ratio of the code heap514// and force stack scanning if less than 10% of the code heap are free.515int type = code_blob_type;516switch (type) {517case CodeBlobType::NonNMethod:518type = CodeBlobType::MethodNonProfiled;519break;520case CodeBlobType::MethodNonProfiled:521type = CodeBlobType::MethodProfiled;522break;523case CodeBlobType::MethodProfiled:524// Avoid loop if we already tried that code heap525if (type == orig_code_blob_type) {526type = CodeBlobType::MethodNonProfiled;527}528break;529}530if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {531if (PrintCodeCacheExtension) {532tty->print_cr("Extension of %s failed. Trying to allocate in %s.",533heap->name(), get_code_heap(type)->name());534}535return allocate(size, type, handle_alloc_failure, orig_code_blob_type);536}537}538if (handle_alloc_failure) {539MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);540CompileBroker::handle_full_code_cache(orig_code_blob_type);541}542return NULL;543}544if (PrintCodeCacheExtension) {545ResourceMark rm;546if (_nmethod_heaps->length() >= 1) {547tty->print("%s", heap->name());548} else {549tty->print("CodeCache");550}551tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",552(intptr_t)heap->low_boundary(), (intptr_t)heap->high(),553(address)heap->high() - (address)heap->low_boundary());554}555}556print_trace("allocation", cb, size);557return cb;558}559560void CodeCache::free(CodeBlob* cb) {561assert_locked_or_safepoint(CodeCache_lock);562CodeHeap* heap = get_code_heap(cb);563print_trace("free", cb);564if (cb->is_nmethod()) {565nmethod* ptr = (nmethod *)cb;566heap->set_nmethod_count(heap->nmethod_count() - 1);567if (ptr->has_dependencies()) {568_number_of_nmethods_with_dependencies--;569}570ptr->free_native_invokers();571}572if (cb->is_adapter_blob()) {573heap->set_adapter_count(heap->adapter_count() - 1);574}575576// Get heap for given CodeBlob and deallocate577get_code_heap(cb)->deallocate(cb);578579assert(heap->blob_count() >= 0, "sanity check");580}581582void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) {583assert_locked_or_safepoint(CodeCache_lock);584guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!");585print_trace("free_unused_tail", cb);586587// We also have to account for the extra space (i.e. header) used by the CodeBlob588// which provides the memory (see BufferBlob::create() in codeBlob.cpp).589used += CodeBlob::align_code_offset(cb->header_size());590591// Get heap for given CodeBlob and deallocate its unused tail592get_code_heap(cb)->deallocate_tail(cb, used);593// Adjust the sizes of the CodeBlob594cb->adjust_size(used);595}596597void CodeCache::commit(CodeBlob* cb) {598// this is called by nmethod::nmethod, which must already own CodeCache_lock599assert_locked_or_safepoint(CodeCache_lock);600CodeHeap* heap = get_code_heap(cb);601if (cb->is_nmethod()) {602heap->set_nmethod_count(heap->nmethod_count() + 1);603if (((nmethod *)cb)->has_dependencies()) {604_number_of_nmethods_with_dependencies++;605}606}607if (cb->is_adapter_blob()) {608heap->set_adapter_count(heap->adapter_count() + 1);609}610611// flush the hardware I-cache612ICache::invalidate_range(cb->content_begin(), cb->content_size());613}614615bool CodeCache::contains(void *p) {616// S390 uses contains() in current_frame(), which is used before617// code cache initialization if NativeMemoryTracking=detail is set.618S390_ONLY(if (_heaps == NULL) return false;)619// It should be ok to call contains without holding a lock.620FOR_ALL_HEAPS(heap) {621if ((*heap)->contains(p)) {622return true;623}624}625return false;626}627628bool CodeCache::contains(nmethod *nm) {629return contains((void *)nm);630}631632// This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not633// looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain634// valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.635CodeBlob* CodeCache::find_blob(void* start) {636CodeBlob* result = find_blob_unsafe(start);637// We could potentially look up non_entrant methods638guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method");639return result;640}641642// Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know643// what you are doing)644CodeBlob* CodeCache::find_blob_unsafe(void* start) {645// NMT can walk the stack before code cache is created646if (_heaps != NULL) {647CodeHeap* heap = get_code_heap_containing(start);648if (heap != NULL) {649return heap->find_blob_unsafe(start);650}651}652return NULL;653}654655nmethod* CodeCache::find_nmethod(void* start) {656CodeBlob* cb = find_blob(start);657assert(cb->is_nmethod(), "did not find an nmethod");658return (nmethod*)cb;659}660661void CodeCache::blobs_do(void f(CodeBlob* nm)) {662assert_locked_or_safepoint(CodeCache_lock);663FOR_ALL_HEAPS(heap) {664FOR_ALL_BLOBS(cb, *heap) {665f(cb);666}667}668}669670void CodeCache::nmethods_do(void f(nmethod* nm)) {671assert_locked_or_safepoint(CodeCache_lock);672NMethodIterator iter(NMethodIterator::all_blobs);673while(iter.next()) {674f(iter.method());675}676}677678void CodeCache::metadata_do(MetadataClosure* f) {679assert_locked_or_safepoint(CodeCache_lock);680NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);681while(iter.next()) {682iter.method()->metadata_do(f);683}684}685686int CodeCache::alignment_unit() {687return (int)_heaps->first()->alignment_unit();688}689690int CodeCache::alignment_offset() {691return (int)_heaps->first()->alignment_offset();692}693694// Mark nmethods for unloading if they contain otherwise unreachable oops.695void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {696assert_locked_or_safepoint(CodeCache_lock);697UnloadingScope scope(is_alive);698CompiledMethodIterator iter(CompiledMethodIterator::only_alive);699while(iter.next()) {700iter.method()->do_unloading(unloading_occurred);701}702}703704void CodeCache::blobs_do(CodeBlobClosure* f) {705assert_locked_or_safepoint(CodeCache_lock);706FOR_ALL_ALLOCABLE_HEAPS(heap) {707FOR_ALL_BLOBS(cb, *heap) {708if (cb->is_alive()) {709f->do_code_blob(cb);710#ifdef ASSERT711if (cb->is_nmethod()) {712Universe::heap()->verify_nmethod((nmethod*)cb);713}714#endif //ASSERT715}716}717}718}719720void CodeCache::verify_clean_inline_caches() {721#ifdef ASSERT722NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);723while(iter.next()) {724nmethod* nm = iter.method();725assert(!nm->is_unloaded(), "Tautology");726nm->verify_clean_inline_caches();727nm->verify();728}729#endif730}731732void CodeCache::verify_icholder_relocations() {733#ifdef ASSERT734// make sure that we aren't leaking icholders735int count = 0;736FOR_ALL_HEAPS(heap) {737FOR_ALL_BLOBS(cb, *heap) {738CompiledMethod *nm = cb->as_compiled_method_or_null();739if (nm != NULL) {740count += nm->verify_icholder_relocations();741}742}743}744assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==745CompiledICHolder::live_count(), "must agree");746#endif747}748749// Defer freeing of concurrently cleaned ExceptionCache entries until750// after a global handshake operation.751void CodeCache::release_exception_cache(ExceptionCache* entry) {752if (SafepointSynchronize::is_at_safepoint()) {753delete entry;754} else {755for (;;) {756ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);757entry->set_purge_list_next(purge_list_head);758if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {759break;760}761}762}763}764765// Delete exception caches that have been concurrently unlinked,766// followed by a global handshake operation.767void CodeCache::purge_exception_caches() {768ExceptionCache* curr = _exception_cache_purge_list;769while (curr != NULL) {770ExceptionCache* next = curr->purge_list_next();771delete curr;772curr = next;773}774_exception_cache_purge_list = NULL;775}776777uint8_t CodeCache::_unloading_cycle = 1;778779void CodeCache::increment_unloading_cycle() {780// 2-bit value (see IsUnloadingState in nmethod.cpp for details)781// 0 is reserved for new methods.782_unloading_cycle = (_unloading_cycle + 1) % 4;783if (_unloading_cycle == 0) {784_unloading_cycle = 1;785}786}787788CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive)789: _is_unloading_behaviour(is_alive)790{791_saved_behaviour = IsUnloadingBehaviour::current();792IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);793increment_unloading_cycle();794DependencyContext::cleaning_start();795}796797CodeCache::UnloadingScope::~UnloadingScope() {798IsUnloadingBehaviour::set_current(_saved_behaviour);799DependencyContext::cleaning_end();800}801802void CodeCache::verify_oops() {803MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);804VerifyOopClosure voc;805NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);806while(iter.next()) {807nmethod* nm = iter.method();808nm->oops_do(&voc);809nm->verify_oop_relocations();810}811}812813int CodeCache::blob_count(int code_blob_type) {814CodeHeap* heap = get_code_heap(code_blob_type);815return (heap != NULL) ? heap->blob_count() : 0;816}817818int CodeCache::blob_count() {819int count = 0;820FOR_ALL_HEAPS(heap) {821count += (*heap)->blob_count();822}823return count;824}825826int CodeCache::nmethod_count(int code_blob_type) {827CodeHeap* heap = get_code_heap(code_blob_type);828return (heap != NULL) ? heap->nmethod_count() : 0;829}830831int CodeCache::nmethod_count() {832int count = 0;833FOR_ALL_NMETHOD_HEAPS(heap) {834count += (*heap)->nmethod_count();835}836return count;837}838839int CodeCache::adapter_count(int code_blob_type) {840CodeHeap* heap = get_code_heap(code_blob_type);841return (heap != NULL) ? heap->adapter_count() : 0;842}843844int CodeCache::adapter_count() {845int count = 0;846FOR_ALL_HEAPS(heap) {847count += (*heap)->adapter_count();848}849return count;850}851852address CodeCache::low_bound(int code_blob_type) {853CodeHeap* heap = get_code_heap(code_blob_type);854return (heap != NULL) ? (address)heap->low_boundary() : NULL;855}856857address CodeCache::high_bound(int code_blob_type) {858CodeHeap* heap = get_code_heap(code_blob_type);859return (heap != NULL) ? (address)heap->high_boundary() : NULL;860}861862size_t CodeCache::capacity() {863size_t cap = 0;864FOR_ALL_ALLOCABLE_HEAPS(heap) {865cap += (*heap)->capacity();866}867return cap;868}869870size_t CodeCache::unallocated_capacity(int code_blob_type) {871CodeHeap* heap = get_code_heap(code_blob_type);872return (heap != NULL) ? heap->unallocated_capacity() : 0;873}874875size_t CodeCache::unallocated_capacity() {876size_t unallocated_cap = 0;877FOR_ALL_ALLOCABLE_HEAPS(heap) {878unallocated_cap += (*heap)->unallocated_capacity();879}880return unallocated_cap;881}882883size_t CodeCache::max_capacity() {884size_t max_cap = 0;885FOR_ALL_ALLOCABLE_HEAPS(heap) {886max_cap += (*heap)->max_capacity();887}888return max_cap;889}890891/**892* Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap893* is free, reverse_free_ratio() returns 4.894*/895double CodeCache::reverse_free_ratio(int code_blob_type) {896CodeHeap* heap = get_code_heap(code_blob_type);897if (heap == NULL) {898return 0;899}900901double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0;902double max_capacity = (double)heap->max_capacity();903double result = max_capacity / unallocated_capacity;904assert (max_capacity >= unallocated_capacity, "Must be");905assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);906return result;907}908909size_t CodeCache::bytes_allocated_in_freelists() {910size_t allocated_bytes = 0;911FOR_ALL_ALLOCABLE_HEAPS(heap) {912allocated_bytes += (*heap)->allocated_in_freelist();913}914return allocated_bytes;915}916917int CodeCache::allocated_segments() {918int number_of_segments = 0;919FOR_ALL_ALLOCABLE_HEAPS(heap) {920number_of_segments += (*heap)->allocated_segments();921}922return number_of_segments;923}924925size_t CodeCache::freelists_length() {926size_t length = 0;927FOR_ALL_ALLOCABLE_HEAPS(heap) {928length += (*heap)->freelist_length();929}930return length;931}932933void icache_init();934935void CodeCache::initialize() {936assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");937#ifdef COMPILER2938assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");939#endif940assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");941// This was originally just a check of the alignment, causing failure, instead, round942// the code cache to the page size. In particular, Solaris is moving to a larger943// default page size.944CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size());945946if (SegmentedCodeCache) {947// Use multiple code heaps948initialize_heaps();949} else {950// Use a single code heap951FLAG_SET_ERGO(NonNMethodCodeHeapSize, 0);952FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);953FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0);954ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);955add_heap(rs, "CodeCache", CodeBlobType::All);956}957958// Initialize ICache flush mechanism959// This service is needed for os::register_code_area960icache_init();961962// Give OS a chance to register generated code area.963// This is used on Windows 64 bit platforms to register964// Structured Exception Handlers for our generated code.965os::register_code_area((char*)low_bound(), (char*)high_bound());966}967968void codeCache_init() {969CodeCache::initialize();970}971972//------------------------------------------------------------------------------------------------973974int CodeCache::number_of_nmethods_with_dependencies() {975return _number_of_nmethods_with_dependencies;976}977978void CodeCache::clear_inline_caches() {979assert_locked_or_safepoint(CodeCache_lock);980CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);981while(iter.next()) {982iter.method()->clear_inline_caches();983}984}985986void CodeCache::cleanup_inline_caches() {987assert_locked_or_safepoint(CodeCache_lock);988NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);989while(iter.next()) {990iter.method()->cleanup_inline_caches(/*clean_all=*/true);991}992}993994// Keeps track of time spent for checking dependencies995NOT_PRODUCT(static elapsedTimer dependentCheckTime;)996997int CodeCache::mark_for_deoptimization(KlassDepChange& changes) {998MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);999int number_of_marked_CodeBlobs = 0;10001001// search the hierarchy looking for nmethods which are affected by the loading of this class10021003// then search the interfaces this class implements looking for nmethods1004// which might be dependent of the fact that an interface only had one1005// implementor.1006// nmethod::check_all_dependencies works only correctly, if no safepoint1007// can happen1008NoSafepointVerifier nsv;1009for (DepChange::ContextStream str(changes, nsv); str.next(); ) {1010Klass* d = str.klass();1011number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);1012}10131014#ifndef PRODUCT1015if (VerifyDependencies) {1016// Object pointers are used as unique identifiers for dependency arguments. This1017// is only possible if no safepoint, i.e., GC occurs during the verification code.1018dependentCheckTime.start();1019nmethod::check_all_dependencies(changes);1020dependentCheckTime.stop();1021}1022#endif10231024return number_of_marked_CodeBlobs;1025}10261027CompiledMethod* CodeCache::find_compiled(void* start) {1028CodeBlob *cb = find_blob(start);1029assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method");1030return (CompiledMethod*)cb;1031}10321033#if INCLUDE_JVMTI1034// RedefineClasses support for unloading nmethods that are dependent on "old" methods.1035// We don't really expect this table to grow very large. If it does, it can become a hashtable.1036static GrowableArray<CompiledMethod*>* old_compiled_method_table = NULL;10371038static void add_to_old_table(CompiledMethod* c) {1039if (old_compiled_method_table == NULL) {1040old_compiled_method_table = new (ResourceObj::C_HEAP, mtCode) GrowableArray<CompiledMethod*>(100, mtCode);1041}1042old_compiled_method_table->push(c);1043}10441045static void reset_old_method_table() {1046if (old_compiled_method_table != NULL) {1047delete old_compiled_method_table;1048old_compiled_method_table = NULL;1049}1050}10511052// Remove this method when zombied or unloaded.1053void CodeCache::unregister_old_nmethod(CompiledMethod* c) {1054assert_lock_strong(CodeCache_lock);1055if (old_compiled_method_table != NULL) {1056int index = old_compiled_method_table->find(c);1057if (index != -1) {1058old_compiled_method_table->delete_at(index);1059}1060}1061}10621063void CodeCache::old_nmethods_do(MetadataClosure* f) {1064// Walk old method table and mark those on stack.1065int length = 0;1066if (old_compiled_method_table != NULL) {1067length = old_compiled_method_table->length();1068for (int i = 0; i < length; i++) {1069CompiledMethod* cm = old_compiled_method_table->at(i);1070// Only walk alive nmethods, the dead ones will get removed by the sweeper or GC.1071if (cm->is_alive() && !cm->is_unloading()) {1072old_compiled_method_table->at(i)->metadata_do(f);1073}1074}1075}1076log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length);1077}10781079// Just marks the methods in this class as needing deoptimization1080void CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) {1081assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");1082}108310841085// Walk compiled methods and mark dependent methods for deoptimization.1086int CodeCache::mark_dependents_for_evol_deoptimization() {1087assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");1088// Each redefinition creates a new set of nmethods that have references to "old" Methods1089// So delete old method table and create a new one.1090reset_old_method_table();10911092int number_of_marked_CodeBlobs = 0;1093CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);1094while(iter.next()) {1095CompiledMethod* nm = iter.method();1096// Walk all alive nmethods to check for old Methods.1097// This includes methods whose inline caches point to old methods, so1098// inline cache clearing is unnecessary.1099if (nm->has_evol_metadata()) {1100nm->mark_for_deoptimization();1101add_to_old_table(nm);1102number_of_marked_CodeBlobs++;1103}1104}11051106// return total count of nmethods marked for deoptimization, if zero the caller1107// can skip deoptimization1108return number_of_marked_CodeBlobs;1109}11101111void CodeCache::mark_all_nmethods_for_evol_deoptimization() {1112assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");1113CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);1114while(iter.next()) {1115CompiledMethod* nm = iter.method();1116if (!nm->method()->is_method_handle_intrinsic()) {1117nm->mark_for_deoptimization();1118if (nm->has_evol_metadata()) {1119add_to_old_table(nm);1120}1121}1122}1123}11241125// Flushes compiled methods dependent on redefined classes, that have already been1126// marked for deoptimization.1127void CodeCache::flush_evol_dependents() {1128assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");11291130// CodeCache can only be updated by a thread_in_VM and they will all be1131// stopped during the safepoint so CodeCache will be safe to update without1132// holding the CodeCache_lock.11331134// At least one nmethod has been marked for deoptimization11351136Deoptimization::deoptimize_all_marked();1137}1138#endif // INCLUDE_JVMTI11391140// Mark methods for deopt (if safe or possible).1141void CodeCache::mark_all_nmethods_for_deoptimization() {1142MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);1143CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);1144while(iter.next()) {1145CompiledMethod* nm = iter.method();1146if (!nm->is_native_method()) {1147nm->mark_for_deoptimization();1148}1149}1150}11511152int CodeCache::mark_for_deoptimization(Method* dependee) {1153MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);1154int number_of_marked_CodeBlobs = 0;11551156CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);1157while(iter.next()) {1158CompiledMethod* nm = iter.method();1159if (nm->is_dependent_on_method(dependee)) {1160ResourceMark rm;1161nm->mark_for_deoptimization();1162number_of_marked_CodeBlobs++;1163}1164}11651166return number_of_marked_CodeBlobs;1167}11681169void CodeCache::make_marked_nmethods_not_entrant() {1170assert_locked_or_safepoint(CodeCache_lock);1171CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);1172while(iter.next()) {1173CompiledMethod* nm = iter.method();1174if (nm->is_marked_for_deoptimization()) {1175nm->make_not_entrant();1176}1177}1178}11791180// Flushes compiled methods dependent on dependee.1181void CodeCache::flush_dependents_on(InstanceKlass* dependee) {1182assert_lock_strong(Compile_lock);11831184if (number_of_nmethods_with_dependencies() == 0) return;11851186int marked = 0;1187if (dependee->is_linked()) {1188// Class initialization state change.1189KlassInitDepChange changes(dependee);1190marked = mark_for_deoptimization(changes);1191} else {1192// New class is loaded.1193NewKlassDepChange changes(dependee);1194marked = mark_for_deoptimization(changes);1195}11961197if (marked > 0) {1198// At least one nmethod has been marked for deoptimization1199Deoptimization::deoptimize_all_marked();1200}1201}12021203// Flushes compiled methods dependent on dependee1204void CodeCache::flush_dependents_on_method(const methodHandle& m_h) {1205// --- Compile_lock is not held. However we are at a safepoint.1206assert_locked_or_safepoint(Compile_lock);12071208// Compute the dependent nmethods1209if (mark_for_deoptimization(m_h()) > 0) {1210Deoptimization::deoptimize_all_marked();1211}1212}12131214void CodeCache::verify() {1215assert_locked_or_safepoint(CodeCache_lock);1216FOR_ALL_HEAPS(heap) {1217(*heap)->verify();1218FOR_ALL_BLOBS(cb, *heap) {1219if (cb->is_alive()) {1220cb->verify();1221}1222}1223}1224}12251226// A CodeHeap is full. Print out warning and report event.1227PRAGMA_DIAG_PUSH1228PRAGMA_FORMAT_NONLITERAL_IGNORED1229void CodeCache::report_codemem_full(int code_blob_type, bool print) {1230// Get nmethod heap for the given CodeBlobType and build CodeCacheFull event1231CodeHeap* heap = get_code_heap(code_blob_type);1232assert(heap != NULL, "heap is null");12331234if ((heap->full_count() == 0) || print) {1235// Not yet reported for this heap, report1236if (SegmentedCodeCache) {1237ResourceMark rm;1238stringStream msg1_stream, msg2_stream;1239msg1_stream.print("%s is full. Compiler has been disabled.",1240get_code_heap_name(code_blob_type));1241msg2_stream.print("Try increasing the code heap size using -XX:%s=",1242get_code_heap_flag_name(code_blob_type));1243const char *msg1 = msg1_stream.as_string();1244const char *msg2 = msg2_stream.as_string();12451246log_warning(codecache)("%s", msg1);1247log_warning(codecache)("%s", msg2);1248warning("%s", msg1);1249warning("%s", msg2);1250} else {1251const char *msg1 = "CodeCache is full. Compiler has been disabled.";1252const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize=";12531254log_warning(codecache)("%s", msg1);1255log_warning(codecache)("%s", msg2);1256warning("%s", msg1);1257warning("%s", msg2);1258}1259ResourceMark rm;1260stringStream s;1261// Dump code cache into a buffer before locking the tty.1262{1263MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);1264print_summary(&s);1265}1266{1267ttyLocker ttyl;1268tty->print("%s", s.as_string());1269}12701271if (heap->full_count() == 0) {1272if (PrintCodeHeapAnalytics) {1273CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot!1274}1275}1276}12771278heap->report_full();12791280EventCodeCacheFull event;1281if (event.should_commit()) {1282event.set_codeBlobType((u1)code_blob_type);1283event.set_startAddress((u8)heap->low_boundary());1284event.set_commitedTopAddress((u8)heap->high());1285event.set_reservedTopAddress((u8)heap->high_boundary());1286event.set_entryCount(heap->blob_count());1287event.set_methodCount(heap->nmethod_count());1288event.set_adaptorCount(heap->adapter_count());1289event.set_unallocatedCapacity(heap->unallocated_capacity());1290event.set_fullCount(heap->full_count());1291event.commit();1292}1293}1294PRAGMA_DIAG_POP12951296void CodeCache::print_memory_overhead() {1297size_t wasted_bytes = 0;1298FOR_ALL_ALLOCABLE_HEAPS(heap) {1299CodeHeap* curr_heap = *heap;1300for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) {1301HeapBlock* heap_block = ((HeapBlock*)cb) - 1;1302wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();1303}1304}1305// Print bytes that are allocated in the freelist1306ttyLocker ttl;1307tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length());1308tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K);1309tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K));1310tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment1311}13121313//------------------------------------------------------------------------------------------------1314// Non-product version13151316#ifndef PRODUCT13171318void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {1319if (PrintCodeCache2) { // Need to add a new flag1320ResourceMark rm;1321if (size == 0) size = cb->size();1322tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);1323}1324}13251326void CodeCache::print_internals() {1327int nmethodCount = 0;1328int runtimeStubCount = 0;1329int adapterCount = 0;1330int deoptimizationStubCount = 0;1331int uncommonTrapStubCount = 0;1332int bufferBlobCount = 0;1333int total = 0;1334int nmethodAlive = 0;1335int nmethodNotEntrant = 0;1336int nmethodZombie = 0;1337int nmethodUnloaded = 0;1338int nmethodJava = 0;1339int nmethodNative = 0;1340int max_nm_size = 0;1341ResourceMark rm;13421343int i = 0;1344FOR_ALL_ALLOCABLE_HEAPS(heap) {1345if ((_nmethod_heaps->length() >= 1) && Verbose) {1346tty->print_cr("-- %s --", (*heap)->name());1347}1348FOR_ALL_BLOBS(cb, *heap) {1349total++;1350if (cb->is_nmethod()) {1351nmethod* nm = (nmethod*)cb;13521353if (Verbose && nm->method() != NULL) {1354ResourceMark rm;1355char *method_name = nm->method()->name_and_sig_as_C_string();1356tty->print("%s", method_name);1357if(nm->is_alive()) { tty->print_cr(" alive"); }1358if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }1359if(nm->is_zombie()) { tty->print_cr(" zombie"); }1360}13611362nmethodCount++;13631364if(nm->is_alive()) { nmethodAlive++; }1365if(nm->is_not_entrant()) { nmethodNotEntrant++; }1366if(nm->is_zombie()) { nmethodZombie++; }1367if(nm->is_unloaded()) { nmethodUnloaded++; }1368if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }13691370if(nm->method() != NULL && nm->is_java_method()) {1371nmethodJava++;1372max_nm_size = MAX2(max_nm_size, nm->size());1373}1374} else if (cb->is_runtime_stub()) {1375runtimeStubCount++;1376} else if (cb->is_deoptimization_stub()) {1377deoptimizationStubCount++;1378} else if (cb->is_uncommon_trap_stub()) {1379uncommonTrapStubCount++;1380} else if (cb->is_adapter_blob()) {1381adapterCount++;1382} else if (cb->is_buffer_blob()) {1383bufferBlobCount++;1384}1385}1386}13871388int bucketSize = 512;1389int bucketLimit = max_nm_size / bucketSize + 1;1390int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);1391memset(buckets, 0, sizeof(int) * bucketLimit);13921393NMethodIterator iter(NMethodIterator::all_blobs);1394while(iter.next()) {1395nmethod* nm = iter.method();1396if(nm->method() != NULL && nm->is_java_method()) {1397buckets[nm->size() / bucketSize]++;1398}1399}14001401tty->print_cr("Code Cache Entries (total of %d)",total);1402tty->print_cr("-------------------------------------------------");1403tty->print_cr("nmethods: %d",nmethodCount);1404tty->print_cr("\talive: %d",nmethodAlive);1405tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);1406tty->print_cr("\tzombie: %d",nmethodZombie);1407tty->print_cr("\tunloaded: %d",nmethodUnloaded);1408tty->print_cr("\tjava: %d",nmethodJava);1409tty->print_cr("\tnative: %d",nmethodNative);1410tty->print_cr("runtime_stubs: %d",runtimeStubCount);1411tty->print_cr("adapters: %d",adapterCount);1412tty->print_cr("buffer blobs: %d",bufferBlobCount);1413tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);1414tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);1415tty->print_cr("\nnmethod size distribution (non-zombie java)");1416tty->print_cr("-------------------------------------------------");14171418for(int i=0; i<bucketLimit; i++) {1419if(buckets[i] != 0) {1420tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);1421tty->fill_to(40);1422tty->print_cr("%d",buckets[i]);1423}1424}14251426FREE_C_HEAP_ARRAY(int, buckets);1427print_memory_overhead();1428}14291430#endif // !PRODUCT14311432void CodeCache::print() {1433print_summary(tty);14341435#ifndef PRODUCT1436if (!Verbose) return;14371438CodeBlob_sizes live;1439CodeBlob_sizes dead;14401441FOR_ALL_ALLOCABLE_HEAPS(heap) {1442FOR_ALL_BLOBS(cb, *heap) {1443if (!cb->is_alive()) {1444dead.add(cb);1445} else {1446live.add(cb);1447}1448}1449}14501451tty->print_cr("CodeCache:");1452tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());14531454if (!live.is_empty()) {1455live.print("live");1456}1457if (!dead.is_empty()) {1458dead.print("dead");1459}14601461if (WizardMode) {1462// print the oop_map usage1463int code_size = 0;1464int number_of_blobs = 0;1465int number_of_oop_maps = 0;1466int map_size = 0;1467FOR_ALL_ALLOCABLE_HEAPS(heap) {1468FOR_ALL_BLOBS(cb, *heap) {1469if (cb->is_alive()) {1470number_of_blobs++;1471code_size += cb->code_size();1472ImmutableOopMapSet* set = cb->oop_maps();1473if (set != NULL) {1474number_of_oop_maps += set->count();1475map_size += set->nr_of_bytes();1476}1477}1478}1479}1480tty->print_cr("OopMaps");1481tty->print_cr(" #blobs = %d", number_of_blobs);1482tty->print_cr(" code size = %d", code_size);1483tty->print_cr(" #oop_maps = %d", number_of_oop_maps);1484tty->print_cr(" map size = %d", map_size);1485}14861487#endif // !PRODUCT1488}14891490void CodeCache::print_summary(outputStream* st, bool detailed) {1491int full_count = 0;1492FOR_ALL_HEAPS(heap_iterator) {1493CodeHeap* heap = (*heap_iterator);1494size_t total = (heap->high_boundary() - heap->low_boundary());1495if (_heaps->length() >= 1) {1496st->print("%s:", heap->name());1497} else {1498st->print("CodeCache:");1499}1500st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT1501"Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",1502total/K, (total - heap->unallocated_capacity())/K,1503heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);15041505if (detailed) {1506st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",1507p2i(heap->low_boundary()),1508p2i(heap->high()),1509p2i(heap->high_boundary()));15101511full_count += get_codemem_full_count(heap->code_blob_type());1512}1513}15141515if (detailed) {1516st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT1517" adapters=" UINT32_FORMAT,1518blob_count(), nmethod_count(), adapter_count());1519st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?1520"enabled" : Arguments::mode() == Arguments::_int ?1521"disabled (interpreter mode)" :1522"disabled (not enough contiguous free space left)");1523st->print_cr(" stopped_count=%d, restarted_count=%d",1524CompileBroker::get_total_compiler_stopped_count(),1525CompileBroker::get_total_compiler_restarted_count());1526st->print_cr(" full_count=%d", full_count);1527}1528}15291530void CodeCache::print_codelist(outputStream* st) {1531MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);15321533CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);1534while (iter.next()) {1535CompiledMethod* cm = iter.method();1536ResourceMark rm;1537char* method_name = cm->method()->name_and_sig_as_C_string();1538st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",1539cm->compile_id(), cm->comp_level(), cm->get_state(),1540method_name,1541(intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end());1542}1543}15441545void CodeCache::print_layout(outputStream* st) {1546MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);1547ResourceMark rm;1548print_summary(st, true);1549}15501551void CodeCache::log_state(outputStream* st) {1552st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"1553" adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",1554blob_count(), nmethod_count(), adapter_count(),1555unallocated_capacity());1556}15571558#ifdef LINUX1559void CodeCache::write_perf_map() {1560MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);15611562// Perf expects to find the map file at /tmp/perf-<pid>.map.1563char fname[32];1564jio_snprintf(fname, sizeof(fname), "/tmp/perf-%d.map", os::current_process_id());15651566fileStream fs(fname, "w");1567if (!fs.is_open()) {1568log_warning(codecache)("Failed to create %s for perf map", fname);1569return;1570}15711572AllCodeBlobsIterator iter(AllCodeBlobsIterator::only_alive_and_not_unloading);1573while (iter.next()) {1574CodeBlob *cb = iter.method();1575ResourceMark rm;1576const char* method_name =1577cb->is_compiled() ? cb->as_compiled_method()->method()->external_name()1578: cb->name();1579fs.print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %s",1580(intptr_t)cb->code_begin(), (intptr_t)cb->code_size(),1581method_name);1582}1583}1584#endif // LINUX15851586//---< BEGIN >--- CodeHeap State Analytics.15871588void CodeCache::aggregate(outputStream *out, size_t granularity) {1589FOR_ALL_ALLOCABLE_HEAPS(heap) {1590CodeHeapState::aggregate(out, (*heap), granularity);1591}1592}15931594void CodeCache::discard(outputStream *out) {1595FOR_ALL_ALLOCABLE_HEAPS(heap) {1596CodeHeapState::discard(out, (*heap));1597}1598}15991600void CodeCache::print_usedSpace(outputStream *out) {1601FOR_ALL_ALLOCABLE_HEAPS(heap) {1602CodeHeapState::print_usedSpace(out, (*heap));1603}1604}16051606void CodeCache::print_freeSpace(outputStream *out) {1607FOR_ALL_ALLOCABLE_HEAPS(heap) {1608CodeHeapState::print_freeSpace(out, (*heap));1609}1610}16111612void CodeCache::print_count(outputStream *out) {1613FOR_ALL_ALLOCABLE_HEAPS(heap) {1614CodeHeapState::print_count(out, (*heap));1615}1616}16171618void CodeCache::print_space(outputStream *out) {1619FOR_ALL_ALLOCABLE_HEAPS(heap) {1620CodeHeapState::print_space(out, (*heap));1621}1622}16231624void CodeCache::print_age(outputStream *out) {1625FOR_ALL_ALLOCABLE_HEAPS(heap) {1626CodeHeapState::print_age(out, (*heap));1627}1628}16291630void CodeCache::print_names(outputStream *out) {1631FOR_ALL_ALLOCABLE_HEAPS(heap) {1632CodeHeapState::print_names(out, (*heap));1633}1634}1635//---< END >--- CodeHeap State Analytics.163616371638