Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/opto/locknode.cpp
32285 views
/*1* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "opto/locknode.hpp"26#include "opto/parse.hpp"27#include "opto/rootnode.hpp"28#include "opto/runtime.hpp"2930//=============================================================================31const RegMask &BoxLockNode::in_RegMask(uint i) const {32return _inmask;33}3435const RegMask &BoxLockNode::out_RegMask() const {36return *Matcher::idealreg2regmask[Op_RegP];37}3839uint BoxLockNode::size_of() const { return sizeof(*this); }4041BoxLockNode::BoxLockNode( int slot ) : Node( Compile::current()->root() ),42_slot(slot), _is_eliminated(false) {43init_class_id(Class_BoxLock);44init_flags(Flag_rematerialize);45OptoReg::Name reg = OptoReg::stack2reg(_slot);46_inmask.Insert(reg);47}4849//-----------------------------hash--------------------------------------------50uint BoxLockNode::hash() const {51if (EliminateNestedLocks)52return NO_HASH; // Each locked region has own BoxLock node53return Node::hash() + _slot + (_is_eliminated ? Compile::current()->fixed_slots() : 0);54}5556//------------------------------cmp--------------------------------------------57uint BoxLockNode::cmp( const Node &n ) const {58if (EliminateNestedLocks)59return (&n == this); // Always fail except on self60const BoxLockNode &bn = (const BoxLockNode &)n;61return bn._slot == _slot && bn._is_eliminated == _is_eliminated;62}6364BoxLockNode* BoxLockNode::box_node(Node* box) {65// Chase down the BoxNode after RA which may spill box nodes.66while (!box->is_BoxLock()) {67// if (box_node->is_SpillCopy()) {68// Node *m = box_node->in(1);69// if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_StoreP) {70// box_node = m->in(m->as_Mach()->operand_index(2));71// continue;72// }73// }74assert(box->is_SpillCopy() || box->is_Phi(), "Bad spill of Lock.");75// Only BoxLock nodes with the same stack slot are merged.76// So it is enough to trace one path to find the slot value.77box = box->in(1);78}79return box->as_BoxLock();80}8182OptoReg::Name BoxLockNode::reg(Node* box) {83return box_node(box)->in_RegMask(0).find_first_elem();84}8586// Is BoxLock node used for one simple lock region (same box and obj)?87bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj) {88LockNode* lock = NULL;89bool has_one_lock = false;90for (uint i = 0; i < this->outcnt(); i++) {91Node* n = this->raw_out(i);92assert(!n->is_Phi(), "should not merge BoxLock nodes");93if (n->is_AbstractLock()) {94AbstractLockNode* alock = n->as_AbstractLock();95// Check lock's box since box could be referenced by Lock's debug info.96if (alock->box_node() == this) {97if (alock->obj_node()->eqv_uncast(obj)) {98if ((unique_lock != NULL) && alock->is_Lock()) {99if (lock == NULL) {100lock = alock->as_Lock();101has_one_lock = true;102} else if (lock != alock->as_Lock()) {103has_one_lock = false;104}105}106} else {107return false; // Different objects108}109}110}111}112#ifdef ASSERT113// Verify that FastLock and Safepoint reference only this lock region.114for (uint i = 0; i < this->outcnt(); i++) {115Node* n = this->raw_out(i);116if (n->is_FastLock()) {117FastLockNode* flock = n->as_FastLock();118assert((flock->box_node() == this) && flock->obj_node()->eqv_uncast(obj),"");119}120// Don't check monitor info in safepoints since the referenced object could121// be different from the locked object. It could be Phi node of different122// cast nodes which point to this locked object.123// We assume that no other objects could be referenced in monitor info124// associated with this BoxLock node because all associated locks and125// unlocks are reference only this one object.126}127#endif128if (unique_lock != NULL && has_one_lock) {129*unique_lock = lock;130}131return true;132}133134//=============================================================================135//-----------------------------hash--------------------------------------------136uint FastLockNode::hash() const { return NO_HASH; }137138uint FastLockNode::size_of() const { return sizeof(*this); }139140//------------------------------cmp--------------------------------------------141uint FastLockNode::cmp( const Node &n ) const {142return (&n == this); // Always fail except on self143}144145//=============================================================================146//-----------------------------hash--------------------------------------------147uint FastUnlockNode::hash() const { return NO_HASH; }148149//------------------------------cmp--------------------------------------------150uint FastUnlockNode::cmp( const Node &n ) const {151return (&n == this); // Always fail except on self152}153154//155// Create a counter which counts the number of times this lock is acquired156//157void FastLockNode::create_lock_counter(JVMState* state) {158BiasedLockingNamedCounter* blnc = (BiasedLockingNamedCounter*)159OptoRuntime::new_named_counter(state, NamedCounter::BiasedLockingCounter);160_counters = blnc->counters();161}162163void FastLockNode::create_rtm_lock_counter(JVMState* state) {164#if INCLUDE_RTM_OPT165Compile* C = Compile::current();166if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {167RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)168OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);169_rtm_counters = rlnc->counters();170if (UseRTMForStackLocks) {171rlnc = (RTMLockingNamedCounter*)172OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);173_stack_rtm_counters = rlnc->counters();174}175}176#endif177}178179//=============================================================================180//------------------------------do_monitor_enter-------------------------------181void Parse::do_monitor_enter() {182kill_dead_locals();183184// Null check; get casted pointer.185Node* obj = null_check(peek());186// Check for locking null object187if (stopped()) return;188189// the monitor object is not part of debug info expression stack190pop();191192// Insert a FastLockNode which takes as arguments the current thread pointer,193// the obj pointer & the address of the stack slot pair used for the lock.194shared_lock(obj);195}196197//------------------------------do_monitor_exit--------------------------------198void Parse::do_monitor_exit() {199kill_dead_locals();200201pop(); // Pop oop to unlock202// Because monitors are guaranteed paired (else we bail out), we know203// the matching Lock for this Unlock. Hence we know there is no need204// for a null check on Unlock.205shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());206}207208209