Path: blob/master/src/hotspot/share/gc/z/zBarrier.inline.hpp
40957 views
/*1* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*/2223#ifndef SHARE_GC_Z_ZBARRIER_INLINE_HPP24#define SHARE_GC_Z_ZBARRIER_INLINE_HPP2526#include "gc/z/zBarrier.hpp"2728#include "gc/z/zAddress.inline.hpp"29#include "gc/z/zOop.inline.hpp"30#include "gc/z/zResurrection.inline.hpp"31#include "oops/oop.hpp"32#include "runtime/atomic.hpp"3334// A self heal must always "upgrade" the address metadata bits in35// accordance with the metadata bits state machine, which has the36// valid state transitions as described below (where N is the GC37// cycle).38//39// Note the subtleness of overlapping GC cycles. Specifically that40// oops are colored Remapped(N) starting at relocation N and ending41// at marking N + 1.42//43// +--- Mark Start44// | +--- Mark End45// | | +--- Relocate Start46// | | | +--- Relocate End47// | | | |48// Marked |---N---|--N+1--|--N+2--|----49// Finalizable |---N---|--N+1--|--N+2--|----50// Remapped ----|---N---|--N+1--|--N+2--|51//52// VALID STATE TRANSITIONS53//54// Marked(N) -> Remapped(N)55// -> Marked(N + 1)56// -> Finalizable(N + 1)57//58// Finalizable(N) -> Marked(N)59// -> Remapped(N)60// -> Marked(N + 1)61// -> Finalizable(N + 1)62//63// Remapped(N) -> Marked(N + 1)64// -> Finalizable(N + 1)65//66// PHASE VIEW67//68// ZPhaseMark69// Load & Mark70// Marked(N) <- Marked(N - 1)71// <- Finalizable(N - 1)72// <- Remapped(N - 1)73// <- Finalizable(N)74//75// Mark(Finalizable)76// Finalizable(N) <- Marked(N - 1)77// <- Finalizable(N - 1)78// <- Remapped(N - 1)79//80// Load(AS_NO_KEEPALIVE)81// Remapped(N - 1) <- Marked(N - 1)82// <- Finalizable(N - 1)83//84// ZPhaseMarkCompleted (Resurrection blocked)85// Load & Load(ON_WEAK/PHANTOM_OOP_REF | AS_NO_KEEPALIVE) & KeepAlive86// Marked(N) <- Marked(N - 1)87// <- Finalizable(N - 1)88// <- Remapped(N - 1)89// <- Finalizable(N)90//91// Load(ON_STRONG_OOP_REF | AS_NO_KEEPALIVE)92// Remapped(N - 1) <- Marked(N - 1)93// <- Finalizable(N - 1)94//95// ZPhaseMarkCompleted (Resurrection unblocked)96// Load97// Marked(N) <- Finalizable(N)98//99// ZPhaseRelocate100// Load & Load(AS_NO_KEEPALIVE)101// Remapped(N) <- Marked(N)102// <- Finalizable(N)103104template <ZBarrierFastPath fast_path>105inline void ZBarrier::self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr) {106if (heal_addr == 0) {107// Never heal with null since it interacts badly with reference processing.108// A mutator clearing an oop would be similar to calling Reference.clear(),109// which would make the reference non-discoverable or silently dropped110// by the reference processor.111return;112}113114assert(!fast_path(addr), "Invalid self heal");115assert(fast_path(heal_addr), "Invalid self heal");116117for (;;) {118// Heal119const uintptr_t prev_addr = Atomic::cmpxchg((volatile uintptr_t*)p, addr, heal_addr);120if (prev_addr == addr) {121// Success122return;123}124125if (fast_path(prev_addr)) {126// Must not self heal127return;128}129130// The oop location was healed by another barrier, but still needs upgrading.131// Re-apply healing to make sure the oop is not left with weaker (remapped or132// finalizable) metadata bits than what this barrier tried to apply.133assert(ZAddress::offset(prev_addr) == ZAddress::offset(heal_addr), "Invalid offset");134addr = prev_addr;135}136}137138template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>139inline oop ZBarrier::barrier(volatile oop* p, oop o) {140const uintptr_t addr = ZOop::to_address(o);141142// Fast path143if (fast_path(addr)) {144return ZOop::from_address(addr);145}146147// Slow path148const uintptr_t good_addr = slow_path(addr);149150if (p != NULL) {151self_heal<fast_path>(p, addr, good_addr);152}153154return ZOop::from_address(good_addr);155}156157template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>158inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) {159const uintptr_t addr = ZOop::to_address(o);160161// Fast path162if (fast_path(addr)) {163// Return the good address instead of the weak good address164// to ensure that the currently active heap view is used.165return ZOop::from_address(ZAddress::good_or_null(addr));166}167168// Slow path169const uintptr_t good_addr = slow_path(addr);170171if (p != NULL) {172// The slow path returns a good/marked address or null, but we never mark173// oops in a weak load barrier so we always heal with the remapped address.174self_heal<fast_path>(p, addr, ZAddress::remapped_or_null(good_addr));175}176177return ZOop::from_address(good_addr);178}179180template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>181inline void ZBarrier::root_barrier(oop* p, oop o) {182const uintptr_t addr = ZOop::to_address(o);183184// Fast path185if (fast_path(addr)) {186return;187}188189// Slow path190const uintptr_t good_addr = slow_path(addr);191192// Non-atomic healing helps speed up root scanning. This is safe to do193// since we are always healing roots in a safepoint, or under a lock,194// which ensures we are never racing with mutators modifying roots while195// we are healing them. It's also safe in case multiple GC threads try196// to heal the same root if it is aligned, since they would always heal197// the root in the same way and it does not matter in which order it198// happens. For misaligned oops, there needs to be mutual exclusion.199*p = ZOop::from_address(good_addr);200}201202inline bool ZBarrier::is_good_or_null_fast_path(uintptr_t addr) {203return ZAddress::is_good_or_null(addr);204}205206inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) {207return ZAddress::is_weak_good_or_null(addr);208}209210inline bool ZBarrier::is_marked_or_null_fast_path(uintptr_t addr) {211return ZAddress::is_marked_or_null(addr);212}213214inline bool ZBarrier::during_mark() {215return ZGlobalPhase == ZPhaseMark;216}217218inline bool ZBarrier::during_relocate() {219return ZGlobalPhase == ZPhaseRelocate;220}221222//223// Load barrier224//225inline oop ZBarrier::load_barrier_on_oop(oop o) {226return load_barrier_on_oop_field_preloaded((oop*)NULL, o);227}228229inline oop ZBarrier::load_barrier_on_oop_field(volatile oop* p) {230const oop o = Atomic::load(p);231return load_barrier_on_oop_field_preloaded(p, o);232}233234inline oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {235return barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);236}237238inline void ZBarrier::load_barrier_on_oop_array(volatile oop* p, size_t length) {239for (volatile const oop* const end = p + length; p < end; p++) {240load_barrier_on_oop_field(p);241}242}243244inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {245verify_on_weak(p);246247if (ZResurrection::is_blocked()) {248return barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);249}250251return load_barrier_on_oop_field_preloaded(p, o);252}253254inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {255if (ZResurrection::is_blocked()) {256return barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);257}258259return load_barrier_on_oop_field_preloaded(p, o);260}261262inline void ZBarrier::load_barrier_on_root_oop_field(oop* p) {263const oop o = *p;264root_barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);265}266267inline void ZBarrier::load_barrier_on_invisible_root_oop_field(oop* p) {268const oop o = *p;269root_barrier<is_good_or_null_fast_path, load_barrier_on_invisible_root_oop_slow_path>(p, o);270}271272//273// Weak load barrier274//275inline oop ZBarrier::weak_load_barrier_on_oop_field(volatile oop* p) {276assert(!ZResurrection::is_blocked(), "Should not be called during resurrection blocked phase");277const oop o = Atomic::load(p);278return weak_load_barrier_on_oop_field_preloaded(p, o);279}280281inline oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {282return weak_barrier<is_weak_good_or_null_fast_path, weak_load_barrier_on_oop_slow_path>(p, o);283}284285inline oop ZBarrier::weak_load_barrier_on_weak_oop(oop o) {286return weak_load_barrier_on_weak_oop_field_preloaded((oop*)NULL, o);287}288289inline oop ZBarrier::weak_load_barrier_on_weak_oop_field(volatile oop* p) {290const oop o = Atomic::load(p);291return weak_load_barrier_on_weak_oop_field_preloaded(p, o);292}293294inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {295verify_on_weak(p);296297if (ZResurrection::is_blocked()) {298return barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);299}300301return weak_load_barrier_on_oop_field_preloaded(p, o);302}303304inline oop ZBarrier::weak_load_barrier_on_phantom_oop(oop o) {305return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)NULL, o);306}307308inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field(volatile oop* p) {309const oop o = Atomic::load(p);310return weak_load_barrier_on_phantom_oop_field_preloaded(p, o);311}312313inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {314if (ZResurrection::is_blocked()) {315return barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);316}317318return weak_load_barrier_on_oop_field_preloaded(p, o);319}320321//322// Is alive barrier323//324inline bool ZBarrier::is_alive_barrier_on_weak_oop(oop o) {325// Check if oop is logically non-null. This operation326// is only valid when resurrection is blocked.327assert(ZResurrection::is_blocked(), "Invalid phase");328return weak_load_barrier_on_weak_oop(o) != NULL;329}330331inline bool ZBarrier::is_alive_barrier_on_phantom_oop(oop o) {332// Check if oop is logically non-null. This operation333// is only valid when resurrection is blocked.334assert(ZResurrection::is_blocked(), "Invalid phase");335return weak_load_barrier_on_phantom_oop(o) != NULL;336}337338//339// Keep alive barrier340//341inline void ZBarrier::keep_alive_barrier_on_weak_oop_field(volatile oop* p) {342// This operation is only valid when resurrection is blocked.343assert(ZResurrection::is_blocked(), "Invalid phase");344const oop o = Atomic::load(p);345barrier<is_good_or_null_fast_path, keep_alive_barrier_on_weak_oop_slow_path>(p, o);346}347348inline void ZBarrier::keep_alive_barrier_on_phantom_oop_field(volatile oop* p) {349// This operation is only valid when resurrection is blocked.350assert(ZResurrection::is_blocked(), "Invalid phase");351const oop o = Atomic::load(p);352barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);353}354355inline void ZBarrier::keep_alive_barrier_on_phantom_root_oop_field(oop* p) {356// This operation is only valid when resurrection is blocked.357assert(ZResurrection::is_blocked(), "Invalid phase");358const oop o = *p;359root_barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);360}361362inline void ZBarrier::keep_alive_barrier_on_oop(oop o) {363const uintptr_t addr = ZOop::to_address(o);364assert(ZAddress::is_good(addr), "Invalid address");365366if (during_mark()) {367keep_alive_barrier_on_oop_slow_path(addr);368}369}370371//372// Mark barrier373//374inline void ZBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) {375const oop o = Atomic::load(p);376377if (finalizable) {378barrier<is_marked_or_null_fast_path, mark_barrier_on_finalizable_oop_slow_path>(p, o);379} else {380const uintptr_t addr = ZOop::to_address(o);381if (ZAddress::is_good(addr)) {382// Mark through good oop383mark_barrier_on_oop_slow_path(addr);384} else {385// Mark through bad oop386barrier<is_good_or_null_fast_path, mark_barrier_on_oop_slow_path>(p, o);387}388}389}390391inline void ZBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) {392for (volatile const oop* const end = p + length; p < end; p++) {393mark_barrier_on_oop_field(p, finalizable);394}395}396397#endif // SHARE_GC_Z_ZBARRIER_INLINE_HPP398399400