Path: blob/master/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp
40976 views
/*1* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "classfile/javaClasses.hpp"26#include "gc/g1/c2/g1BarrierSetC2.hpp"27#include "gc/g1/g1BarrierSet.hpp"28#include "gc/g1/g1BarrierSetRuntime.hpp"29#include "gc/g1/g1CardTable.hpp"30#include "gc/g1/g1ThreadLocalData.hpp"31#include "gc/g1/heapRegion.hpp"32#include "opto/arraycopynode.hpp"33#include "opto/compile.hpp"34#include "opto/escape.hpp"35#include "opto/graphKit.hpp"36#include "opto/idealKit.hpp"37#include "opto/macro.hpp"38#include "opto/rootnode.hpp"39#include "opto/type.hpp"40#include "utilities/macros.hpp"4142const TypeFunc *G1BarrierSetC2::write_ref_field_pre_entry_Type() {43const Type **fields = TypeTuple::fields(2);44fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value45fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread46const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);4748// create result type (range)49fields = TypeTuple::fields(0);50const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);5152return TypeFunc::make(domain, range);53}5455const TypeFunc *G1BarrierSetC2::write_ref_field_post_entry_Type() {56const Type **fields = TypeTuple::fields(2);57fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Card addr58fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread59const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);6061// create result type (range)62fields = TypeTuple::fields(0);63const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);6465return TypeFunc::make(domain, range);66}6768#define __ ideal.69/*70* Determine if the G1 pre-barrier can be removed. The pre-barrier is71* required by SATB to make sure all objects live at the start of the72* marking are kept alive, all reference updates need to any previous73* reference stored before writing.74*75* If the previous value is NULL there is no need to save the old value.76* References that are NULL are filtered during runtime by the barrier77* code to avoid unnecessary queuing.78*79* However in the case of newly allocated objects it might be possible to80* prove that the reference about to be overwritten is NULL during compile81* time and avoid adding the barrier code completely.82*83* The compiler needs to determine that the object in which a field is about84* to be written is newly allocated, and that no prior store to the same field85* has happened since the allocation.86*87* Returns true if the pre-barrier can be removed88*/89bool G1BarrierSetC2::g1_can_remove_pre_barrier(GraphKit* kit,90PhaseTransform* phase,91Node* adr,92BasicType bt,93uint adr_idx) const {94intptr_t offset = 0;95Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);96AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);9798if (offset == Type::OffsetBot) {99return false; // cannot unalias unless there are precise offsets100}101102if (alloc == NULL) {103return false; // No allocation found104}105106intptr_t size_in_bytes = type2aelembytes(bt);107108Node* mem = kit->memory(adr_idx); // start searching here...109110for (int cnt = 0; cnt < 50; cnt++) {111112if (mem->is_Store()) {113114Node* st_adr = mem->in(MemNode::Address);115intptr_t st_offset = 0;116Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);117118if (st_base == NULL) {119break; // inscrutable pointer120}121122// Break we have found a store with same base and offset as ours so break123if (st_base == base && st_offset == offset) {124break;125}126127if (st_offset != offset && st_offset != Type::OffsetBot) {128const int MAX_STORE = BytesPerLong;129if (st_offset >= offset + size_in_bytes ||130st_offset <= offset - MAX_STORE ||131st_offset <= offset - mem->as_Store()->memory_size()) {132// Success: The offsets are provably independent.133// (You may ask, why not just test st_offset != offset and be done?134// The answer is that stores of different sizes can co-exist135// in the same sequence of RawMem effects. We sometimes initialize136// a whole 'tile' of array elements with a single jint or jlong.)137mem = mem->in(MemNode::Memory);138continue; // advance through independent store memory139}140}141142if (st_base != base143&& MemNode::detect_ptr_independence(base, alloc, st_base,144AllocateNode::Ideal_allocation(st_base, phase),145phase)) {146// Success: The bases are provably independent.147mem = mem->in(MemNode::Memory);148continue; // advance through independent store memory149}150} else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {151152InitializeNode* st_init = mem->in(0)->as_Initialize();153AllocateNode* st_alloc = st_init->allocation();154155// Make sure that we are looking at the same allocation site.156// The alloc variable is guaranteed to not be null here from earlier check.157if (alloc == st_alloc) {158// Check that the initialization is storing NULL so that no previous store159// has been moved up and directly write a reference160Node* captured_store = st_init->find_captured_store(offset,161type2aelembytes(T_OBJECT),162phase);163if (captured_store == NULL || captured_store == st_init->zero_memory()) {164return true;165}166}167}168169// Unless there is an explicit 'continue', we must bail out here,170// because 'mem' is an inscrutable memory state (e.g., a call).171break;172}173174return false;175}176177// G1 pre/post barriers178void G1BarrierSetC2::pre_barrier(GraphKit* kit,179bool do_load,180Node* ctl,181Node* obj,182Node* adr,183uint alias_idx,184Node* val,185const TypeOopPtr* val_type,186Node* pre_val,187BasicType bt) const {188// Some sanity checks189// Note: val is unused in this routine.190191if (do_load) {192// We need to generate the load of the previous value193assert(obj != NULL, "must have a base");194assert(adr != NULL, "where are loading from?");195assert(pre_val == NULL, "loaded already?");196assert(val_type != NULL, "need a type");197198if (use_ReduceInitialCardMarks()199&& g1_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) {200return;201}202203} else {204// In this case both val_type and alias_idx are unused.205assert(pre_val != NULL, "must be loaded already");206// Nothing to be done if pre_val is null.207if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;208assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");209}210assert(bt == T_OBJECT, "or we shouldn't be here");211212IdealKit ideal(kit, true);213214Node* tls = __ thread(); // ThreadLocalStorage215216Node* no_base = __ top();217Node* zero = __ ConI(0);218Node* zeroX = __ ConX(0);219220float likely = PROB_LIKELY(0.999);221float unlikely = PROB_UNLIKELY(0.999);222223BasicType active_type = in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;224assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 || in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "flag width");225226// Offsets into the thread227const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());228const int index_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());229const int buffer_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());230231// Now the actual pointers into the thread232Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));233Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));234Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));235236// Now some of the values237Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);238239// if (!marking)240__ if_then(marking, BoolTest::ne, zero, unlikely); {241BasicType index_bt = TypeX_X->basic_type();242assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");243Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);244245if (do_load) {246// load original value247// alias_idx correct??248pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);249}250251// if (pre_val != NULL)252__ if_then(pre_val, BoolTest::ne, kit->null()); {253Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);254255// is the queue for this thread full?256__ if_then(index, BoolTest::ne, zeroX, likely); {257258// decrement the index259Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));260261// Now get the buffer location we will log the previous value into and store it262Node *log_addr = __ AddP(no_base, buffer, next_index);263__ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);264// update the index265__ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);266267} __ else_(); {268269// logging buffer is full, call the runtime270const TypeFunc *tf = write_ref_field_pre_entry_Type();271__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), "write_ref_field_pre_entry", pre_val, tls);272} __ end_if(); // (!index)273} __ end_if(); // (pre_val != NULL)274} __ end_if(); // (!marking)275276// Final sync IdealKit and GraphKit.277kit->final_sync(ideal);278}279280/*281* G1 similar to any GC with a Young Generation requires a way to keep track of282* references from Old Generation to Young Generation to make sure all live283* objects are found. G1 also requires to keep track of object references284* between different regions to enable evacuation of old regions, which is done285* as part of mixed collections. References are tracked in remembered sets and286* is continuously updated as reference are written to with the help of the287* post-barrier.288*289* To reduce the number of updates to the remembered set the post-barrier290* filters updates to fields in objects located in the Young Generation,291* the same region as the reference, when the NULL is being written or292* if the card is already marked as dirty by an earlier write.293*294* Under certain circumstances it is possible to avoid generating the295* post-barrier completely if it is possible during compile time to prove296* the object is newly allocated and that no safepoint exists between the297* allocation and the store.298*299* In the case of slow allocation the allocation code must handle the barrier300* as part of the allocation in the case the allocated object is not located301* in the nursery; this would happen for humongous objects.302*303* Returns true if the post barrier can be removed304*/305bool G1BarrierSetC2::g1_can_remove_post_barrier(GraphKit* kit,306PhaseTransform* phase, Node* store,307Node* adr) const {308intptr_t offset = 0;309Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);310AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);311312if (offset == Type::OffsetBot) {313return false; // cannot unalias unless there are precise offsets314}315316if (alloc == NULL) {317return false; // No allocation found318}319320// Start search from Store node321Node* mem = store->in(MemNode::Control);322if (mem->is_Proj() && mem->in(0)->is_Initialize()) {323324InitializeNode* st_init = mem->in(0)->as_Initialize();325AllocateNode* st_alloc = st_init->allocation();326327// Make sure we are looking at the same allocation328if (alloc == st_alloc) {329return true;330}331}332333return false;334}335336//337// Update the card table and add card address to the queue338//339void G1BarrierSetC2::g1_mark_card(GraphKit* kit,340IdealKit& ideal,341Node* card_adr,342Node* oop_store,343uint oop_alias_idx,344Node* index,345Node* index_adr,346Node* buffer,347const TypeFunc* tf) const {348Node* zero = __ ConI(0);349Node* zeroX = __ ConX(0);350Node* no_base = __ top();351BasicType card_bt = T_BYTE;352// Smash zero into card. MUST BE ORDERED WRT TO STORE353__ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);354355// Now do the queue work356__ if_then(index, BoolTest::ne, zeroX); {357358Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));359Node* log_addr = __ AddP(no_base, buffer, next_index);360361// Order, see storeCM.362__ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);363__ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered);364365} __ else_(); {366__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), "write_ref_field_post_entry", card_adr, __ thread());367} __ end_if();368369}370371void G1BarrierSetC2::post_barrier(GraphKit* kit,372Node* ctl,373Node* oop_store,374Node* obj,375Node* adr,376uint alias_idx,377Node* val,378BasicType bt,379bool use_precise) const {380// If we are writing a NULL then we need no post barrier381382if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {383// Must be NULL384const Type* t = val->bottom_type();385assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");386// No post barrier if writing NULLx387return;388}389390if (use_ReduceInitialCardMarks() && obj == kit->just_allocated_object(kit->control())) {391// We can skip marks on a freshly-allocated object in Eden.392// Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.393// That routine informs GC to take appropriate compensating steps,394// upon a slow-path allocation, so as to make this card-mark395// elision safe.396return;397}398399if (use_ReduceInitialCardMarks()400&& g1_can_remove_post_barrier(kit, &kit->gvn(), oop_store, adr)) {401return;402}403404if (!use_precise) {405// All card marks for a (non-array) instance are in one place:406adr = obj;407}408// (Else it's an array (or unknown), and we want more precise card marks.)409assert(adr != NULL, "");410411IdealKit ideal(kit, true);412413Node* tls = __ thread(); // ThreadLocalStorage414415Node* no_base = __ top();416float likely = PROB_LIKELY_MAG(3);417float unlikely = PROB_UNLIKELY_MAG(3);418Node* young_card = __ ConI((jint)G1CardTable::g1_young_card_val());419Node* dirty_card = __ ConI((jint)G1CardTable::dirty_card_val());420Node* zeroX = __ ConX(0);421422const TypeFunc *tf = write_ref_field_post_entry_Type();423424// Offsets into the thread425const int index_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());426const int buffer_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());427428// Pointers into the thread429430Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));431Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));432433// Now some values434// Use ctrl to avoid hoisting these values past a safepoint, which could435// potentially reset these fields in the JavaThread.436Node* index = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);437Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);438439// Convert the store obj pointer to an int prior to doing math on it440// Must use ctrl to prevent "integerized oop" existing across safepoint441Node* cast = __ CastPX(__ ctrl(), adr);442443// Divide pointer by card size444Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );445446// Combine card table base and card offset447Node* card_adr = __ AddP(no_base, byte_map_base_node(kit), card_offset );448449// If we know the value being stored does it cross regions?450451if (val != NULL) {452// Does the store cause us to cross regions?453454// Should be able to do an unsigned compare of region_size instead of455// and extra shift. Do we have an unsigned compare??456// Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes);457Node* xor_res = __ URShiftX ( __ XorX( cast, __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes));458459// if (xor_res == 0) same region so skip460__ if_then(xor_res, BoolTest::ne, zeroX, likely); {461462// No barrier if we are storing a NULL463__ if_then(val, BoolTest::ne, kit->null(), likely); {464465// Ok must mark the card if not already dirty466467// load the original value of the card468Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);469470__ if_then(card_val, BoolTest::ne, young_card, unlikely); {471kit->sync_kit(ideal);472kit->insert_mem_bar(Op_MemBarVolatile, oop_store);473__ sync_kit(kit);474475Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);476__ if_then(card_val_reload, BoolTest::ne, dirty_card); {477g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);478} __ end_if();479} __ end_if();480} __ end_if();481} __ end_if();482} else {483// The Object.clone() intrinsic uses this path if !ReduceInitialCardMarks.484// We don't need a barrier here if the destination is a newly allocated object485// in Eden. Otherwise, GC verification breaks because we assume that cards in Eden486// are set to 'g1_young_gen' (see G1CardTable::verify_g1_young_region()).487assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");488Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);489__ if_then(card_val, BoolTest::ne, young_card); {490g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);491} __ end_if();492}493494// Final sync IdealKit and GraphKit.495kit->final_sync(ideal);496}497498// Helper that guards and inserts a pre-barrier.499void G1BarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,500Node* pre_val, bool need_mem_bar) const {501// We could be accessing the referent field of a reference object. If so, when G1502// is enabled, we need to log the value in the referent field in an SATB buffer.503// This routine performs some compile time filters and generates suitable504// runtime filters that guard the pre-barrier code.505// Also add memory barrier for non volatile load from the referent field506// to prevent commoning of loads across safepoint.507508// Some compile time checks.509510// If offset is a constant, is it java_lang_ref_Reference::_reference_offset?511const TypeX* otype = offset->find_intptr_t_type();512if (otype != NULL && otype->is_con() &&513otype->get_con() != java_lang_ref_Reference::referent_offset()) {514// Constant offset but not the reference_offset so just return515return;516}517518// We only need to generate the runtime guards for instances.519const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();520if (btype != NULL) {521if (btype->isa_aryptr()) {522// Array type so nothing to do523return;524}525526const TypeInstPtr* itype = btype->isa_instptr();527if (itype != NULL) {528// Can the klass of base_oop be statically determined to be529// _not_ a sub-class of Reference and _not_ Object?530ciKlass* klass = itype->klass();531if ( klass->is_loaded() &&532!klass->is_subtype_of(kit->env()->Reference_klass()) &&533!kit->env()->Object_klass()->is_subtype_of(klass)) {534return;535}536}537}538539// The compile time filters did not reject base_oop/offset so540// we need to generate the following runtime filters541//542// if (offset == java_lang_ref_Reference::_reference_offset) {543// if (instance_of(base, java.lang.ref.Reference)) {544// pre_barrier(_, pre_val, ...);545// }546// }547548float likely = PROB_LIKELY( 0.999);549float unlikely = PROB_UNLIKELY(0.999);550551IdealKit ideal(kit);552553Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset());554555__ if_then(offset, BoolTest::eq, referent_off, unlikely); {556// Update graphKit memory and control from IdealKit.557kit->sync_kit(ideal);558559Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass()));560Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con);561562// Update IdealKit memory and control from graphKit.563__ sync_kit(kit);564565Node* one = __ ConI(1);566// is_instof == 0 if base_oop == NULL567__ if_then(is_instof, BoolTest::eq, one, unlikely); {568569// Update graphKit from IdeakKit.570kit->sync_kit(ideal);571572// Use the pre-barrier to record the value in the referent field573pre_barrier(kit, false /* do_load */,574__ ctrl(),575NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,576pre_val /* pre_val */,577T_OBJECT);578if (need_mem_bar) {579// Add memory barrier to prevent commoning reads from this field580// across safepoint since GC can change its value.581kit->insert_mem_bar(Op_MemBarCPUOrder);582}583// Update IdealKit from graphKit.584__ sync_kit(kit);585586} __ end_if(); // _ref_type != ref_none587} __ end_if(); // offset == referent_offset588589// Final sync IdealKit and GraphKit.590kit->final_sync(ideal);591}592593#undef __594595Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {596DecoratorSet decorators = access.decorators();597Node* adr = access.addr().node();598Node* obj = access.base();599600bool anonymous = (decorators & C2_UNSAFE_ACCESS) != 0;601bool mismatched = (decorators & C2_MISMATCHED) != 0;602bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;603bool in_heap = (decorators & IN_HEAP) != 0;604bool in_native = (decorators & IN_NATIVE) != 0;605bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;606bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;607bool is_unordered = (decorators & MO_UNORDERED) != 0;608bool no_keepalive = (decorators & AS_NO_KEEPALIVE) != 0;609bool is_mixed = !in_heap && !in_native;610bool need_cpu_mem_bar = !is_unordered || mismatched || is_mixed;611612Node* top = Compile::current()->top();613Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;614Node* load = CardTableBarrierSetC2::load_at_resolved(access, val_type);615616// If we are reading the value of the referent field of a Reference617// object (either by using Unsafe directly or through reflection)618// then, if G1 is enabled, we need to record the referent in an619// SATB log buffer using the pre-barrier mechanism.620// Also we need to add memory barrier to prevent commoning reads621// from this field across safepoint since GC can change its value.622bool need_read_barrier = (((on_weak || on_phantom) && !no_keepalive) ||623(in_heap && unknown && offset != top && obj != top));624625if (!access.is_oop() || !need_read_barrier) {626return load;627}628629assert(access.is_parse_access(), "entry not supported at optimization time");630C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);631GraphKit* kit = parse_access.kit();632633if (on_weak || on_phantom) {634// Use the pre-barrier to record the value in the referent field635pre_barrier(kit, false /* do_load */,636kit->control(),637NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,638load /* pre_val */, T_OBJECT);639// Add memory barrier to prevent commoning reads from this field640// across safepoint since GC can change its value.641kit->insert_mem_bar(Op_MemBarCPUOrder);642} else if (unknown) {643// We do not require a mem bar inside pre_barrier if need_mem_bar644// is set: the barriers would be emitted by us.645insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);646}647648return load;649}650651bool G1BarrierSetC2::is_gc_barrier_node(Node* node) const {652if (CardTableBarrierSetC2::is_gc_barrier_node(node)) {653return true;654}655if (node->Opcode() != Op_CallLeaf) {656return false;657}658CallLeafNode *call = node->as_CallLeaf();659if (call->_name == NULL) {660return false;661}662663return strcmp(call->_name, "write_ref_field_pre_entry") == 0 || strcmp(call->_name, "write_ref_field_post_entry") == 0;664}665666void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {667assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required");668assert(node->outcnt() <= 2, "expects 1 or 2 users: Xor and URShift nodes");669// It could be only one user, URShift node, in Object.clone() intrinsic670// but the new allocation is passed to arraycopy stub and it could not671// be scalar replaced. So we don't check the case.672673// An other case of only one user (Xor) is when the value check for NULL674// in G1 post barrier is folded after CCP so the code which used URShift675// is removed.676677// Take Region node before eliminating post barrier since it also678// eliminates CastP2X node when it has only one user.679Node* this_region = node->in(0);680assert(this_region != NULL, "");681682// Remove G1 post barrier.683684// Search for CastP2X->Xor->URShift->Cmp path which685// checks if the store done to a different from the value's region.686// And replace Cmp with #0 (false) to collapse G1 post barrier.687Node* xorx = node->find_out_with(Op_XorX);688if (xorx != NULL) {689Node* shift = xorx->unique_out();690Node* cmpx = shift->unique_out();691assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&692cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,693"missing region check in G1 post barrier");694macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));695696// Remove G1 pre barrier.697698// Search "if (marking != 0)" check and set it to "false".699// There is no G1 pre barrier if previous stored value is NULL700// (for example, after initialization).701if (this_region->is_Region() && this_region->req() == 3) {702int ind = 1;703if (!this_region->in(ind)->is_IfFalse()) {704ind = 2;705}706if (this_region->in(ind)->is_IfFalse() &&707this_region->in(ind)->in(0)->Opcode() == Op_If) {708Node* bol = this_region->in(ind)->in(0)->in(1);709assert(bol->is_Bool(), "");710cmpx = bol->in(1);711if (bol->as_Bool()->_test._test == BoolTest::ne &&712cmpx->is_Cmp() && cmpx->in(2) == macro->intcon(0) &&713cmpx->in(1)->is_Load()) {714Node* adr = cmpx->in(1)->as_Load()->in(MemNode::Address);715const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());716if (adr->is_AddP() && adr->in(AddPNode::Base) == macro->top() &&717adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&718adr->in(AddPNode::Offset) == macro->MakeConX(marking_offset)) {719macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));720}721}722}723}724} else {725assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");726// This is a G1 post barrier emitted by the Object.clone() intrinsic.727// Search for the CastP2X->URShiftX->AddP->LoadB->Cmp path which checks if the card728// is marked as young_gen and replace the Cmp with 0 (false) to collapse the barrier.729Node* shift = node->find_out_with(Op_URShiftX);730assert(shift != NULL, "missing G1 post barrier");731Node* addp = shift->unique_out();732Node* load = addp->find_out_with(Op_LoadB);733assert(load != NULL, "missing G1 post barrier");734Node* cmpx = load->unique_out();735assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&736cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,737"missing card value check in G1 post barrier");738macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));739// There is no G1 pre barrier in this case740}741// Now CastP2X can be removed since it is used only on dead path742// which currently still alive until igvn optimize it.743assert(node->outcnt() == 0 || node->unique_out()->Opcode() == Op_URShiftX, "");744macro->replace_node(node, macro->top());745}746747Node* G1BarrierSetC2::step_over_gc_barrier(Node* c) const {748if (!use_ReduceInitialCardMarks() &&749c != NULL && c->is_Region() && c->req() == 3) {750for (uint i = 1; i < c->req(); i++) {751if (c->in(i) != NULL && c->in(i)->is_Region() &&752c->in(i)->req() == 3) {753Node* r = c->in(i);754for (uint j = 1; j < r->req(); j++) {755if (r->in(j) != NULL && r->in(j)->is_Proj() &&756r->in(j)->in(0) != NULL &&757r->in(j)->in(0)->Opcode() == Op_CallLeaf &&758r->in(j)->in(0)->as_Call()->entry_point() == CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry)) {759Node* call = r->in(j)->in(0);760c = c->in(i == 1 ? 2 : 1);761if (c != NULL && c->Opcode() != Op_Parm) {762c = c->in(0);763if (c != NULL) {764c = c->in(0);765assert(call->in(0) == NULL ||766call->in(0)->in(0) == NULL ||767call->in(0)->in(0)->in(0) == NULL ||768call->in(0)->in(0)->in(0)->in(0) == NULL ||769call->in(0)->in(0)->in(0)->in(0)->in(0) == NULL ||770c == call->in(0)->in(0)->in(0)->in(0)->in(0), "bad barrier shape");771return c;772}773}774}775}776}777}778}779return c;780}781782#ifdef ASSERT783void G1BarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {784if (phase != BarrierSetC2::BeforeCodeGen) {785return;786}787// Verify G1 pre-barriers788const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());789790Unique_Node_List visited;791Node_List worklist;792// We're going to walk control flow backwards starting from the Root793worklist.push(compile->root());794while (worklist.size() > 0) {795Node* x = worklist.pop();796if (x == NULL || x == compile->top()) continue;797if (visited.member(x)) {798continue;799} else {800visited.push(x);801}802803if (x->is_Region()) {804for (uint i = 1; i < x->req(); i++) {805worklist.push(x->in(i));806}807} else {808worklist.push(x->in(0));809// We are looking for the pattern:810// /->ThreadLocal811// If->Bool->CmpI->LoadB->AddP->ConL(marking_offset)812// \->ConI(0)813// We want to verify that the If and the LoadB have the same control814// See GraphKit::g1_write_barrier_pre()815if (x->is_If()) {816IfNode *iff = x->as_If();817if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {818CmpNode *cmp = iff->in(1)->in(1)->as_Cmp();819if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0820&& cmp->in(1)->is_Load()) {821LoadNode* load = cmp->in(1)->as_Load();822if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal823&& load->in(2)->in(3)->is_Con()824&& load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) {825826Node* if_ctrl = iff->in(0);827Node* load_ctrl = load->in(0);828829if (if_ctrl != load_ctrl) {830// Skip possible CProj->NeverBranch in infinite loops831if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj)832&& (if_ctrl->in(0)->is_MultiBranch() && if_ctrl->in(0)->Opcode() == Op_NeverBranch)) {833if_ctrl = if_ctrl->in(0)->in(0);834}835}836assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match");837}838}839}840}841}842}843}844#endif845846bool G1BarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const {847if (opcode == Op_StoreP) {848Node* adr = n->in(MemNode::Address);849const Type* adr_type = gvn->type(adr);850// Pointer stores in G1 barriers looks like unsafe access.851// Ignore such stores to be able scalar replace non-escaping852// allocations.853if (adr_type->isa_rawptr() && adr->is_AddP()) {854Node* base = conn_graph->get_addp_base(adr);855if (base->Opcode() == Op_LoadP &&856base->in(MemNode::Address)->is_AddP()) {857adr = base->in(MemNode::Address);858Node* tls = conn_graph->get_addp_base(adr);859if (tls->Opcode() == Op_ThreadLocal) {860int offs = (int) gvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);861const int buf_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());862if (offs == buf_offset) {863return true; // G1 pre barrier previous oop value store.864}865if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) {866return true; // G1 post barrier card address store.867}868}869}870}871}872return false;873}874875876