Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/code/codeCache.hpp
32285 views
/*1* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef SHARE_VM_CODE_CODECACHE_HPP25#define SHARE_VM_CODE_CODECACHE_HPP2627#include "code/codeBlob.hpp"28#include "memory/allocation.hpp"29#include "memory/heap.hpp"30#include "oops/instanceKlass.hpp"31#include "oops/oopsHierarchy.hpp"3233// The CodeCache implements the code cache for various pieces of generated34// code, e.g., compiled java methods, runtime stubs, transition frames, etc.35// The entries in the CodeCache are all CodeBlob's.3637// Implementation:38// - Each CodeBlob occupies one chunk of memory.39// - Like the offset table in oldspace the zone has at table for40// locating a method given a addess of an instruction.4142class OopClosure;43class DepChange;44class ShenandoahParallelCodeCacheIterator;4546class CodeCache : AllStatic {47friend class VMStructs;48friend class ShenandoahParallelCodeCacheIterator;49private:50// CodeHeap is malloc()'ed at startup and never deleted during shutdown,51// so that the generated assembly code is always there when it's needed.52// This may cause memory leak, but is necessary, for now. See 4423824,53// 4422213 or 4436291 for details.54static CodeHeap * _heap;55static int _number_of_blobs;56static int _number_of_adapters;57static int _number_of_nmethods;58static int _number_of_nmethods_with_dependencies;59static bool _needs_cache_clean;60static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()6162static void verify_if_often() PRODUCT_RETURN;6364static void mark_scavenge_root_nmethods() PRODUCT_RETURN;65static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;6667static int _codemem_full_count;6869static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }70static void prune_scavenge_root_nmethods();71static void unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev);7273public:7475// Initialization76static void initialize();7778static void report_codemem_full();7980// Allocation/administration81static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob82static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled83static int alignment_unit(); // guaranteed alignment of all CodeBlobs84static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)85static void free(CodeBlob* cb); // frees a CodeBlob86static void flush(); // flushes all CodeBlobs87static bool contains(void *p); // returns whether p is included88static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs89static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs90static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods91static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods9293// Lookup94static CodeBlob* find_blob(void* start);95static nmethod* find_nmethod(void* start);9697// Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know98// what you are doing)99static CodeBlob* find_blob_unsafe(void* start) {100// NMT can walk the stack before code cache is created101if (_heap == NULL) return NULL;102103CodeBlob* result = (CodeBlob*)_heap->find_start(start);104// this assert is too strong because the heap code will return the105// heapblock containing start. That block can often be larger than106// the codeBlob itself. If you look up an address that is within107// the heapblock but not in the codeBlob you will assert.108//109// Most things will not lookup such bad addresses. However110// AsyncGetCallTrace can see intermediate frames and get that kind111// of invalid address and so can a developer using hsfind.112//113// The more correct answer is to return NULL if blob_contains() returns114// false.115// assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob");116117if (result != NULL && !result->blob_contains((address)start)) {118result = NULL;119}120return result;121}122123// Iteration124static CodeBlob* first();125static CodeBlob* next (CodeBlob* cb);126static CodeBlob* alive(CodeBlob *cb);127static nmethod* alive_nmethod(CodeBlob *cb);128static nmethod* first_nmethod();129static nmethod* next_nmethod (CodeBlob* cb);130static int nof_blobs() { return _number_of_blobs; }131static int nof_adapters() { return _number_of_adapters; }132static int nof_nmethods() { return _number_of_nmethods; }133134// GC support135static void gc_epilogue();136static void gc_prologue();137static void verify_oops();138// If "unloading_occurred" is true, then unloads (i.e., breaks root links139// to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading"140// to "true" iff some code got unloaded.141static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);142static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;143144// Apply f to every live code blob in scavengable nmethods. Prune nmethods145// from the list of scavengable nmethods if f->fix_relocations() and a nmethod146// no longer has scavengable oops. If f->fix_relocations(), then f must copy147// objects to their new location immediately to avoid fixing nmethods on the148// basis of the old object locations.149static void scavenge_root_nmethods_do(CodeBlobToOopClosure* f);150151static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; }152static void add_scavenge_root_nmethod(nmethod* nm);153static void drop_scavenge_root_nmethod(nmethod* nm);154155// Printing/debugging156static void print(); // prints summary157static void print_internals();158static void verify(); // verifies the code cache159static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;160static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage161static void log_state(outputStream* st);162163// The full limits of the codeCache164static address low_bound() { return (address) _heap->low_boundary(); }165static address high_bound() { return (address) _heap->high_boundary(); }166static address high() { return (address) _heap->high(); }167168// Profiling169static address first_address(); // first address used for CodeBlobs170static address last_address(); // last address used for CodeBlobs171static size_t capacity() { return _heap->capacity(); }172static size_t max_capacity() { return _heap->max_capacity(); }173static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }174static double reverse_free_ratio();175176static bool needs_cache_clean() { return _needs_cache_clean; }177static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }178static void clear_inline_caches(); // clear all inline caches179180static void verify_clean_inline_caches();181static void verify_icholder_relocations();182183// Deoptimization184static int mark_for_deoptimization(DepChange& changes);185#ifdef HOTSWAP186static int mark_for_evol_deoptimization(instanceKlassHandle dependee);187#endif // HOTSWAP188189static void mark_all_nmethods_for_deoptimization();190static int mark_for_deoptimization(Method* dependee);191static void make_marked_nmethods_not_entrant();192193// tells how many nmethods have dependencies194static int number_of_nmethods_with_dependencies();195196static int get_codemem_full_count() { return _codemem_full_count; }197};198199#endif // SHARE_VM_CODE_CODECACHE_HPP200201202