Path: blob/master/src/hotspot/share/code/vtableStubs.cpp
40931 views
/*1* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "code/vtableStubs.hpp"26#include "compiler/compileBroker.hpp"27#include "compiler/disassembler.hpp"28#include "logging/log.hpp"29#include "memory/allocation.inline.hpp"30#include "memory/resourceArea.hpp"31#include "oops/instanceKlass.hpp"32#include "oops/klass.inline.hpp"33#include "oops/klassVtable.hpp"34#include "oops/oop.inline.hpp"35#include "prims/forte.hpp"36#include "prims/jvmtiExport.hpp"37#include "runtime/handles.inline.hpp"38#include "runtime/mutexLocker.hpp"39#include "runtime/sharedRuntime.hpp"40#include "utilities/align.hpp"41#include "utilities/powerOfTwo.hpp"42#ifdef COMPILER243#include "opto/matcher.hpp"44#endif4546// -----------------------------------------------------------------------------------------47// Implementation of VtableStub4849address VtableStub::_chunk = NULL;50address VtableStub::_chunk_end = NULL;51VMReg VtableStub::_receiver_location = VMRegImpl::Bad();525354void* VtableStub::operator new(size_t size, int code_size) throw() {55assert_lock_strong(VtableStubs_lock);56assert(size == sizeof(VtableStub), "mismatched size");57// compute real VtableStub size (rounded to nearest word)58const int real_size = align_up(code_size + (int)sizeof(VtableStub), wordSize);59// malloc them in chunks to minimize header overhead60const int chunk_factor = 32;61if (_chunk == NULL || _chunk + real_size > _chunk_end) {62const int bytes = chunk_factor * real_size + pd_code_alignment();6364// There is a dependency on the name of the blob in src/share/vm/prims/jvmtiCodeBlobEvents.cpp65// If changing the name, update the other file accordingly.66VtableBlob* blob = VtableBlob::create("vtable chunks", bytes);67if (blob == NULL) {68return NULL;69}70_chunk = blob->content_begin();71_chunk_end = _chunk + bytes;72Forte::register_stub("vtable stub", _chunk, _chunk_end);73align_chunk();74}75assert(_chunk + real_size <= _chunk_end, "bad allocation");76void* res = _chunk;77_chunk += real_size;78align_chunk();79return res;80}818283void VtableStub::print_on(outputStream* st) const {84st->print("vtable stub (index = %d, receiver_location = " INTX_FORMAT ", code = [" INTPTR_FORMAT ", " INTPTR_FORMAT "])",85index(), p2i(receiver_location()), p2i(code_begin()), p2i(code_end()));86}8788void VtableStub::print() const { print_on(tty); }8990// -----------------------------------------------------------------------------------------91// Implementation of VtableStubs92//93// For each hash value there's a linked list of vtable stubs (with that94// hash value). Each list is anchored in a little hash _table, indexed95// by that hash value.9697VtableStub* VtableStubs::_table[VtableStubs::N];98int VtableStubs::_number_of_vtable_stubs = 0;99int VtableStubs::_vtab_stub_size = 0;100int VtableStubs::_itab_stub_size = 0;101102#if defined(PRODUCT)103// These values are good for the PRODUCT case (no tracing).104static const int first_vtableStub_size = 64;105static const int first_itableStub_size = 256;106#else107// These values are good for the non-PRODUCT case (when tracing can be switched on).108// To find out, run test workload with109// -Xlog:vtablestubs=Trace -XX:+CountCompiledCalls -XX:+DebugVtables110// and use the reported "estimate" value.111// Here is a list of observed worst-case values:112// vtable itable113// aarch64: 460 324114// arm: ? ?115// ppc (linux, BE): 404 288116// ppc (linux, LE): 356 276117// ppc (AIX): 416 296118// s390x: 408 256119// Solaris-sparc: 792 348120// x86 (Linux): 670 309121// x86 (MacOS): 682 321122static const int first_vtableStub_size = 1024;123static const int first_itableStub_size = 512;124#endif125126127void VtableStubs::initialize() {128VtableStub::_receiver_location = SharedRuntime::name_for_receiver();129{130MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);131assert(_number_of_vtable_stubs == 0, "potential performance bug: VtableStubs initialized more than once");132assert(is_power_of_2(int(N)), "N must be a power of 2");133for (int i = 0; i < N; i++) {134_table[i] = NULL;135}136}137}138139140int VtableStubs::code_size_limit(bool is_vtable_stub) {141if (is_vtable_stub) {142return _vtab_stub_size > 0 ? _vtab_stub_size : first_vtableStub_size;143} else { // itable stub144return _itab_stub_size > 0 ? _itab_stub_size : first_itableStub_size;145}146} // code_size_limit147148149void VtableStubs::check_and_set_size_limit(bool is_vtable_stub,150int code_size,151int padding) {152const char* name = is_vtable_stub ? "vtable" : "itable";153154guarantee(code_size <= code_size_limit(is_vtable_stub),155"buffer overflow in %s stub, code_size is %d, limit is %d", name, code_size, code_size_limit(is_vtable_stub));156157if (is_vtable_stub) {158if (log_is_enabled(Trace, vtablestubs)) {159if ( (_vtab_stub_size > 0) && ((code_size + padding) > _vtab_stub_size) ) {160log_trace(vtablestubs)("%s size estimate needed adjustment from %d to %d bytes",161name, _vtab_stub_size, code_size + padding);162}163}164if ( (code_size + padding) > _vtab_stub_size ) {165_vtab_stub_size = code_size + padding;166}167} else { // itable stub168if (log_is_enabled(Trace, vtablestubs)) {169if ( (_itab_stub_size > 0) && ((code_size + padding) > _itab_stub_size) ) {170log_trace(vtablestubs)("%s size estimate needed adjustment from %d to %d bytes",171name, _itab_stub_size, code_size + padding);172}173}174if ( (code_size + padding) > _itab_stub_size ) {175_itab_stub_size = code_size + padding;176}177}178return;179} // check_and_set_size_limit180181182void VtableStubs::bookkeeping(MacroAssembler* masm, outputStream* out, VtableStub* s,183address npe_addr, address ame_addr, bool is_vtable_stub,184int index, int slop_bytes, int index_dependent_slop) {185const char* name = is_vtable_stub ? "vtable" : "itable";186const int stub_length = code_size_limit(is_vtable_stub);187188if (log_is_enabled(Trace, vtablestubs)) {189log_trace(vtablestubs)("%s #%d at " PTR_FORMAT ": size: %d, estimate: %d, slop area: %d",190name, index, p2i(s->code_begin()),191(int)(masm->pc() - s->code_begin()),192stub_length,193(int)(s->code_end() - masm->pc()));194}195guarantee(masm->pc() <= s->code_end(), "%s #%d: overflowed buffer, estimated len: %d, actual len: %d, overrun: %d",196name, index, stub_length,197(int)(masm->pc() - s->code_begin()),198(int)(masm->pc() - s->code_end()));199assert((masm->pc() + index_dependent_slop) <= s->code_end(), "%s #%d: spare space for 32-bit offset: required = %d, available = %d",200name, index, index_dependent_slop,201(int)(s->code_end() - masm->pc()));202203// After the first vtable/itable stub is generated, we have a much204// better estimate for the stub size. Remember/update this205// estimate after some sanity checks.206check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes);207s->set_exception_points(npe_addr, ame_addr);208}209210211address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {212assert(vtable_index >= 0, "must be positive");213214VtableStub* s;215{216MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);217s = lookup(is_vtable_stub, vtable_index);218if (s == NULL) {219if (is_vtable_stub) {220s = create_vtable_stub(vtable_index);221} else {222s = create_itable_stub(vtable_index);223}224225// Creation of vtable or itable can fail if there is not enough free space in the code cache.226if (s == NULL) {227return NULL;228}229230enter(is_vtable_stub, vtable_index, s);231if (PrintAdapterHandlers) {232tty->print_cr("Decoding VtableStub %s[%d]@" INTX_FORMAT,233is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()));234Disassembler::decode(s->code_begin(), s->code_end());235}236// Notify JVMTI about this stub. The event will be recorded by the enclosing237// JvmtiDynamicCodeEventCollector and posted when this thread has released238// all locks. Only post this event if a new state is not required. Creating a new state would239// cause a safepoint and the caller of this code has a NoSafepointVerifier.240if (JvmtiExport::should_post_dynamic_code_generated()) {241JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",242s->code_begin(), s->code_end());243}244}245}246return s->entry_point();247}248249250inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){251// Assumption: receiver_location < 4 in most cases.252int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;253return (is_vtable_stub ? ~hash : hash) & mask;254}255256257VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {258assert_lock_strong(VtableStubs_lock);259unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index);260VtableStub* s = _table[hash];261while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next();262return s;263}264265266void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {267assert_lock_strong(VtableStubs_lock);268assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub");269unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index);270// enter s at the beginning of the corresponding list271s->set_next(_table[h]);272_table[h] = s;273_number_of_vtable_stubs++;274}275276VtableStub* VtableStubs::entry_point(address pc) {277MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);278VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());279uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index());280VtableStub* s;281for (s = _table[hash]; s != NULL && s != stub; s = s->next()) {}282return (s == stub) ? s : NULL;283}284285bool VtableStubs::contains(address pc) {286// simple solution for now - we may want to use287// a faster way if this function is called often288return stub_containing(pc) != NULL;289}290291292VtableStub* VtableStubs::stub_containing(address pc) {293// Note: No locking needed since any change to the data structure294// happens with an atomic store into it (we don't care about295// consistency with the _number_of_vtable_stubs counter).296for (int i = 0; i < N; i++) {297for (VtableStub* s = _table[i]; s != NULL; s = s->next()) {298if (s->contains(pc)) return s;299}300}301return NULL;302}303304void vtableStubs_init() {305VtableStubs::initialize();306}307308void VtableStubs::vtable_stub_do(void f(VtableStub*)) {309for (int i = 0; i < N; i++) {310for (VtableStub* s = _table[i]; s != NULL; s = s->next()) {311f(s);312}313}314}315316317//-----------------------------------------------------------------------------------------------------318// Non-product code319#ifndef PRODUCT320321extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index) {322ResourceMark rm;323Klass* klass = receiver->klass();324InstanceKlass* ik = InstanceKlass::cast(klass);325klassVtable vt = ik->vtable();326ik->print();327fatal("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", "328"index %d (vtable length %d)",329p2i(receiver), index, vt.length());330}331332#endif // PRODUCT333334335