Path: blob/master/src/hotspot/share/gc/shared/c2/modRefBarrierSetC2.cpp
40974 views
/*1* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "opto/arraycopynode.hpp"26#include "opto/graphKit.hpp"27#include "opto/idealKit.hpp"28#include "opto/narrowptrnode.hpp"29#include "gc/shared/c2/modRefBarrierSetC2.hpp"30#include "utilities/macros.hpp"3132Node* ModRefBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {33DecoratorSet decorators = access.decorators();3435const TypePtr* adr_type = access.addr().type();36Node* adr = access.addr().node();3738bool is_array = (decorators & IS_ARRAY) != 0;39bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;40bool in_heap = (decorators & IN_HEAP) != 0;41bool use_precise = is_array || anonymous;42bool tightly_coupled_alloc = (decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0;4344if (!access.is_oop() || tightly_coupled_alloc || (!in_heap && !anonymous)) {45return BarrierSetC2::store_at_resolved(access, val);46}4748assert(access.is_parse_access(), "entry not supported at optimization time");49C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);50GraphKit* kit = parse_access.kit();5152uint adr_idx = kit->C->get_alias_index(adr_type);53assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );5455pre_barrier(kit, true /* do_load */, kit->control(), access.base(), adr, adr_idx, val.node(),56static_cast<const TypeOopPtr*>(val.type()), NULL /* pre_val */, access.type());57Node* store = BarrierSetC2::store_at_resolved(access, val);58post_barrier(kit, kit->control(), access.raw_access(), access.base(), adr, adr_idx, val.node(),59access.type(), use_precise);6061return store;62}6364Node* ModRefBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,65Node* new_val, const Type* value_type) const {66GraphKit* kit = access.kit();6768if (!access.is_oop()) {69return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);70}7172pre_barrier(kit, false /* do_load */,73kit->control(), NULL, NULL, max_juint, NULL, NULL,74expected_val /* pre_val */, T_OBJECT);7576Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);7778post_barrier(kit, kit->control(), access.raw_access(), access.base(),79access.addr().node(), access.alias_idx(), new_val, T_OBJECT, true);8081return result;82}8384Node* ModRefBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,85Node* new_val, const Type* value_type) const {86GraphKit* kit = access.kit();8788if (!access.is_oop()) {89return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);90}9192pre_barrier(kit, false /* do_load */,93kit->control(), NULL, NULL, max_juint, NULL, NULL,94expected_val /* pre_val */, T_OBJECT);9596Node* load_store = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);9798// Emit the post barrier only when the actual store happened. This makes sense99// to check only for LS_cmp_* that can fail to set the value.100// LS_cmp_exchange does not produce any branches by default, so there is no101// boolean result to piggyback on. TODO: When we merge CompareAndSwap with102// CompareAndExchange and move branches here, it would make sense to conditionalize103// post_barriers for LS_cmp_exchange as well.104//105// CAS success path is marked more likely since we anticipate this is a performance106// critical path, while CAS failure path can use the penalty for going through unlikely107// path as backoff. Which is still better than doing a store barrier there.108IdealKit ideal(kit);109ideal.if_then(load_store, BoolTest::ne, ideal.ConI(0), PROB_STATIC_FREQUENT); {110kit->sync_kit(ideal);111post_barrier(kit, ideal.ctrl(), access.raw_access(), access.base(),112access.addr().node(), access.alias_idx(), new_val, T_OBJECT, true);113ideal.sync_kit(kit);114} ideal.end_if();115kit->final_sync(ideal);116117return load_store;118}119120Node* ModRefBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {121GraphKit* kit = access.kit();122123Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, value_type);124if (!access.is_oop()) {125return result;126}127128// Don't need to load pre_val. The old value is returned by load_store.129// The pre_barrier can execute after the xchg as long as no safepoint130// gets inserted between them.131pre_barrier(kit, false /* do_load */,132kit->control(), NULL, NULL, max_juint, NULL, NULL,133result /* pre_val */, T_OBJECT);134post_barrier(kit, kit->control(), access.raw_access(), access.base(), access.addr().node(),135access.alias_idx(), new_val, T_OBJECT, true);136137return result;138}139140141