Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.cpp
38920 views
/*1* Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.2*3* This code is free software; you can redistribute it and/or modify it4* under the terms of the GNU General Public License version 2 only, as5* published by the Free Software Foundation.6*7* This code is distributed in the hope that it will be useful, but WITHOUT8* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or9* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License10* version 2 for more details (a copy is included in the LICENSE file that11* accompanied this code).12*13* You should have received a copy of the GNU General Public License version14* 2 along with this work; if not, write to the Free Software Foundation,15* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.16*17* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA18* or visit www.oracle.com if you need additional information or have any19* questions.20*21*/2223#include "precompiled.hpp"24#include "memory/allocation.hpp"2526#include "gc_implementation/shared/gcTimer.hpp"27#include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp"2829#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"30#include "gc_implementation/shenandoah/shenandoahClosures.inline.hpp"31#include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp"32#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"33#include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"34#include "gc_implementation/shenandoah/shenandoahControlThread.hpp"35#include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"36#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"37#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"38#include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp"39#include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp"40#include "gc_implementation/shenandoah/shenandoahMarkCompact.hpp"41#include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp"42#include "gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp"43#include "gc_implementation/shenandoah/shenandoahMetrics.hpp"44#include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp"45#include "gc_implementation/shenandoah/shenandoahPacer.inline.hpp"46#include "gc_implementation/shenandoah/shenandoahPadding.hpp"47#include "gc_implementation/shenandoah/shenandoahParallelCleaning.hpp"48#include "gc_implementation/shenandoah/shenandoahRootProcessor.inline.hpp"49#include "gc_implementation/shenandoah/shenandoahTaskqueue.hpp"50#include "gc_implementation/shenandoah/shenandoahUtils.hpp"51#include "gc_implementation/shenandoah/shenandoahVerifier.hpp"52#include "gc_implementation/shenandoah/shenandoahCodeRoots.hpp"53#include "gc_implementation/shenandoah/shenandoahVMOperations.hpp"54#include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp"55#include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp"56#include "gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp"57#include "gc_implementation/shenandoah/mode/shenandoahIUMode.hpp"58#include "gc_implementation/shenandoah/mode/shenandoahPassiveMode.hpp"59#include "gc_implementation/shenandoah/mode/shenandoahSATBMode.hpp"60#if INCLUDE_JFR61#include "gc_implementation/shenandoah/shenandoahJfrSupport.hpp"62#endif6364#include "memory/metaspace.hpp"65#include "runtime/vmThread.hpp"66#include "services/mallocTracker.hpp"6768ShenandoahHeap* ShenandoahHeap::_heap = NULL;6970class ShenandoahPretouchHeapTask : public AbstractGangTask {71private:72ShenandoahRegionIterator _regions;73const size_t _page_size;74public:75ShenandoahPretouchHeapTask(size_t page_size) :76AbstractGangTask("Shenandoah Pretouch Heap"),77_page_size(page_size) {}7879virtual void work(uint worker_id) {80ShenandoahHeapRegion* r = _regions.next();81while (r != NULL) {82if (r->is_committed()) {83os::pretouch_memory((char *) r->bottom(), (char *) r->end());84}85r = _regions.next();86}87}88};8990class ShenandoahPretouchBitmapTask : public AbstractGangTask {91private:92ShenandoahRegionIterator _regions;93char* _bitmap_base;94const size_t _bitmap_size;95const size_t _page_size;96public:97ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :98AbstractGangTask("Shenandoah Pretouch Bitmap"),99_bitmap_base(bitmap_base),100_bitmap_size(bitmap_size),101_page_size(page_size) {}102103virtual void work(uint worker_id) {104ShenandoahHeapRegion* r = _regions.next();105while (r != NULL) {106size_t start = r->index() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();107size_t end = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();108assert (end <= _bitmap_size, err_msg("end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size));109110if (r->is_committed()) {111os::pretouch_memory(_bitmap_base + start, _bitmap_base + end);112}113114r = _regions.next();115}116}117};118119jint ShenandoahHeap::initialize() {120CollectedHeap::pre_initialize();121122//123// Figure out heap sizing124//125126size_t init_byte_size = collector_policy()->initial_heap_byte_size();127size_t min_byte_size = collector_policy()->min_heap_byte_size();128size_t max_byte_size = collector_policy()->max_heap_byte_size();129size_t heap_alignment = collector_policy()->heap_alignment();130131size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();132133Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");134Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");135136_num_regions = ShenandoahHeapRegion::region_count();137138// Now we know the number of regions, initialize the heuristics.139initialize_heuristics();140141size_t num_committed_regions = init_byte_size / reg_size_bytes;142num_committed_regions = MIN2(num_committed_regions, _num_regions);143assert(num_committed_regions <= _num_regions, "sanity");144_initial_size = num_committed_regions * reg_size_bytes;145146size_t num_min_regions = min_byte_size / reg_size_bytes;147num_min_regions = MIN2(num_min_regions, _num_regions);148assert(num_min_regions <= _num_regions, "sanity");149_minimum_size = num_min_regions * reg_size_bytes;150151// Default to max heap size.152_soft_max_size = _num_regions * reg_size_bytes;153154_committed = _initial_size;155156size_t heap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();157size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();158size_t region_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();159160//161// Reserve and commit memory for heap162//163164ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);165_reserved.set_word_size(0);166_reserved.set_start((HeapWord*)heap_rs.base());167_reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));168_heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);169_heap_region_special = heap_rs.special();170171assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,172err_msg("Misaligned heap: " PTR_FORMAT, p2i(base())));173174#if SHENANDOAH_OPTIMIZED_MARKTASK175// The optimized ObjArrayChunkedTask takes some bits away from the full object bits.176// Fail if we ever attempt to address more than we can.177if ((uintptr_t)(heap_rs.base() + heap_rs.size()) >= ShenandoahMarkTask::max_addressable()) {178FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"179"but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"180"VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",181p2i(heap_rs.base()), p2i(heap_rs.base() + heap_rs.size()), ShenandoahMarkTask::max_addressable());182vm_exit_during_initialization("Fatal Error", buf);183}184#endif185186ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);187if (!_heap_region_special) {188os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,189"Cannot commit heap memory");190}191192//193// Reserve and commit memory for bitmap(s)194//195196_bitmap_size = MarkBitMap::compute_size(heap_rs.size());197_bitmap_size = align_size_up(_bitmap_size, bitmap_page_size);198199size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();200201guarantee(bitmap_bytes_per_region != 0,202err_msg("Bitmap bytes per region should not be zero"));203guarantee(is_power_of_2(bitmap_bytes_per_region),204err_msg("Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region));205206if (bitmap_page_size > bitmap_bytes_per_region) {207_bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;208_bitmap_bytes_per_slice = bitmap_page_size;209} else {210_bitmap_regions_per_slice = 1;211_bitmap_bytes_per_slice = bitmap_bytes_per_region;212}213214guarantee(_bitmap_regions_per_slice >= 1,215err_msg("Should have at least one region per slice: " SIZE_FORMAT,216_bitmap_regions_per_slice));217218guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,219err_msg("Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,220_bitmap_bytes_per_slice, bitmap_page_size));221222ReservedSpace bitmap(_bitmap_size, bitmap_page_size);223MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);224_bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);225_bitmap_region_special = bitmap.special();226227size_t bitmap_init_commit = _bitmap_bytes_per_slice *228align_size_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;229bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);230if (!_bitmap_region_special) {231os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,232"Cannot commit bitmap memory");233}234235_marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);236237if (ShenandoahVerify) {238ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);239if (!verify_bitmap.special()) {240os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,241"Cannot commit verification bitmap memory");242}243MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);244MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);245_verification_bit_map.initialize(_heap_region, verify_bitmap_region);246_verifier = new ShenandoahVerifier(this, &_verification_bit_map);247}248249// Reserve aux bitmap for use in object_iterate(). We don't commit it here.250ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);251MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);252_aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);253_aux_bitmap_region_special = aux_bitmap.special();254_aux_bit_map.initialize(_heap_region, _aux_bitmap_region);255256//257// Create regions and region sets258//259size_t region_align = align_size_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);260size_t region_storage_size = align_size_up(region_align * _num_regions, region_page_size);261region_storage_size = align_size_up(region_storage_size, os::vm_allocation_granularity());262263ReservedSpace region_storage(region_storage_size, region_page_size);264MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);265if (!region_storage.special()) {266os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,267"Cannot commit region memory");268}269270// Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.271// Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.272// If not successful, bite a bullet and allocate at whatever address.273{274size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());275size_t cset_size = align_size_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);276277uintptr_t min = ShenandoahUtils::round_up_power_of_2(cset_align);278uintptr_t max = (1u << 30u);279280for (uintptr_t addr = min; addr <= max; addr <<= 1u) {281char* req_addr = (char*)addr;282assert(is_ptr_aligned(req_addr, cset_align), "Should be aligned");283ReservedSpace cset_rs(cset_size, cset_align, false, req_addr);284if (cset_rs.is_reserved()) {285assert(cset_rs.base() == req_addr, err_msg("Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr));286_collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());287break;288}289}290291if (_collection_set == NULL) {292ReservedSpace cset_rs(cset_size, cset_align, false);293_collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());294}295}296297_regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);298_free_set = new ShenandoahFreeSet(this, _num_regions);299300{301ShenandoahHeapLocker locker(lock());302303for (size_t i = 0; i < _num_regions; i++) {304HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;305bool is_committed = i < num_committed_regions;306void* loc = region_storage.base() + i * region_align;307308ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);309assert(is_ptr_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");310311_marking_context->initialize_top_at_mark_start(r);312_regions[i] = r;313assert(!collection_set()->is_in(i), "New region should not be in collection set");314}315316// Initialize to complete317_marking_context->mark_complete();318319_free_set->rebuild();320}321322if (AlwaysPreTouch) {323// For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,324// before initialize() below zeroes it with initializing thread. For any given region,325// we touch the region and the corresponding bitmaps from the same thread.326ShenandoahPushWorkerScope scope(workers(), _max_workers, false);327328_pretouch_heap_page_size = heap_page_size;329_pretouch_bitmap_page_size = bitmap_page_size;330331#ifdef LINUX332// UseTransparentHugePages would madvise that backing memory can be coalesced into huge333// pages. But, the kernel needs to know that every small page is used, in order to coalesce334// them into huge one. Therefore, we need to pretouch with smaller pages.335if (UseTransparentHugePages) {336_pretouch_heap_page_size = (size_t)os::vm_page_size();337_pretouch_bitmap_page_size = (size_t)os::vm_page_size();338}339#endif340341// OS memory managers may want to coalesce back-to-back pages. Make their jobs342// simpler by pre-touching continuous spaces (heap and bitmap) separately.343344ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);345_workers->run_task(&bcl);346347ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);348_workers->run_task(&hcl);349}350351//352// Initialize the rest of GC subsystems353//354355set_barrier_set(new ShenandoahBarrierSet(this));356357_liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);358for (uint worker = 0; worker < _max_workers; worker++) {359_liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);360Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));361}362363// The call below uses stuff (the SATB* things) that are in G1, but probably364// belong into a shared location.365JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,366SATB_Q_FL_lock,36720 /*G1SATBProcessCompletedThreshold */,368Shared_SATB_Q_lock);369370_monitoring_support = new ShenandoahMonitoringSupport(this);371_phase_timings = new ShenandoahPhaseTimings(max_workers());372ShenandoahStringDedup::initialize();373ShenandoahCodeRoots::initialize();374375if (ShenandoahPacing) {376_pacer = new ShenandoahPacer(this);377_pacer->setup_for_idle();378} else {379_pacer = NULL;380}381382_control_thread = new ShenandoahControlThread();383384log_info(gc, init)("Initialize Shenandoah heap: " SIZE_FORMAT "%s initial, " SIZE_FORMAT "%s min, " SIZE_FORMAT "%s max",385byte_size_in_proper_unit(_initial_size), proper_unit_for_byte_size(_initial_size),386byte_size_in_proper_unit(_minimum_size), proper_unit_for_byte_size(_minimum_size),387byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity())388);389390return JNI_OK;391}392393#ifdef _MSC_VER394#pragma warning( push )395#pragma warning( disable:4355 ) // 'this' : used in base member initializer list396#endif397398void ShenandoahHeap::initialize_heuristics() {399if (ShenandoahGCMode != NULL) {400if (strcmp(ShenandoahGCMode, "satb") == 0) {401_gc_mode = new ShenandoahSATBMode();402} else if (strcmp(ShenandoahGCMode, "iu") == 0) {403_gc_mode = new ShenandoahIUMode();404} else if (strcmp(ShenandoahGCMode, "passive") == 0) {405_gc_mode = new ShenandoahPassiveMode();406} else {407vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");408}409} else {410ShouldNotReachHere();411}412_gc_mode->initialize_flags();413if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {414vm_exit_during_initialization(415err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",416_gc_mode->name()));417}418if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {419vm_exit_during_initialization(420err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",421_gc_mode->name()));422}423log_info(gc, init)("Shenandoah GC mode: %s",424_gc_mode->name());425426_heuristics = _gc_mode->initialize_heuristics();427428if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {429vm_exit_during_initialization(430err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",431_heuristics->name()));432}433if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {434vm_exit_during_initialization(435err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",436_heuristics->name()));437}438log_info(gc, init)("Shenandoah heuristics: %s",439_heuristics->name());440}441442ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :443SharedHeap(policy),444_shenandoah_policy(policy),445_heap_region_special(false),446_regions(NULL),447_free_set(NULL),448_collection_set(NULL),449_update_refs_iterator(this),450_bytes_allocated_since_gc_start(0),451_max_workers((uint)MAX2(ConcGCThreads, ParallelGCThreads)),452_ref_processor(NULL),453_marking_context(NULL),454_bitmap_size(0),455_bitmap_regions_per_slice(0),456_bitmap_bytes_per_slice(0),457_bitmap_region_special(false),458_aux_bitmap_region_special(false),459_liveness_cache(NULL),460_aux_bit_map(),461_verifier(NULL),462_pacer(NULL),463_gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),464_phase_timings(NULL)465{466_heap = this;467468log_info(gc, init)("GC threads: " UINTX_FORMAT " parallel, " UINTX_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);469470_scm = new ShenandoahConcurrentMark();471472_full_gc = new ShenandoahMarkCompact();473_used = 0;474475_max_workers = MAX2(_max_workers, 1U);476477// SharedHeap did not initialize this for us, and we want our own workgang anyway.478assert(SharedHeap::_workers == NULL && _workers == NULL, "Should not be initialized yet");479_workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,480/* are_GC_task_threads */true,481/* are_ConcurrentGC_threads */false);482if (_workers == NULL) {483vm_exit_during_initialization("Failed necessary allocation.");484} else {485_workers->initialize_workers();486}487assert(SharedHeap::_workers == _workers, "Sanity: initialized the correct field");488}489490#ifdef _MSC_VER491#pragma warning( pop )492#endif493494class ShenandoahResetBitmapTask : public AbstractGangTask {495private:496ShenandoahRegionIterator _regions;497498public:499ShenandoahResetBitmapTask() :500AbstractGangTask("Parallel Reset Bitmap Task") {}501502void work(uint worker_id) {503ShenandoahHeapRegion* region = _regions.next();504ShenandoahHeap* heap = ShenandoahHeap::heap();505ShenandoahMarkingContext* const ctx = heap->marking_context();506while (region != NULL) {507if (heap->is_bitmap_slice_committed(region)) {508ctx->clear_bitmap(region);509}510region = _regions.next();511}512}513};514515void ShenandoahHeap::reset_mark_bitmap() {516assert_gc_workers(_workers->active_workers());517mark_incomplete_marking_context();518519ShenandoahResetBitmapTask task;520_workers->run_task(&task);521}522523void ShenandoahHeap::print_on(outputStream* st) const {524st->print_cr("Shenandoah Heap");525st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",526byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),527byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),528byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()),529byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));530st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",531num_regions(),532byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),533proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));534535st->print("Status: ");536if (has_forwarded_objects()) st->print("has forwarded objects, ");537if (is_concurrent_mark_in_progress()) st->print("marking, ");538if (is_evacuation_in_progress()) st->print("evacuating, ");539if (is_update_refs_in_progress()) st->print("updating refs, ");540if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");541if (is_full_gc_in_progress()) st->print("full gc, ");542if (is_full_gc_move_in_progress()) st->print("full gc move, ");543544if (cancelled_gc()) {545st->print("cancelled");546} else {547st->print("not cancelled");548}549st->cr();550551st->print_cr("Reserved region:");552st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",553p2i(reserved_region().start()),554p2i(reserved_region().end()));555556ShenandoahCollectionSet* cset = collection_set();557st->print_cr("Collection set:");558if (cset != NULL) {559st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));560st->print_cr(" - map (biased): " PTR_FORMAT, p2i(cset->biased_map_address()));561} else {562st->print_cr(" (NULL)");563}564565st->cr();566MetaspaceAux::print_on(st);567568if (Verbose) {569print_heap_regions_on(st);570}571}572573class ShenandoahInitGCLABClosure : public ThreadClosure {574public:575void do_thread(Thread* thread) {576assert(thread == NULL || !thread->is_Java_thread(), "Don't expect JavaThread this early");577if (thread != NULL && thread->is_Worker_thread()) {578thread->gclab().initialize(true);579}580}581};582583void ShenandoahHeap::post_initialize() {584if (UseTLAB) {585MutexLocker ml(Threads_lock);586587ShenandoahInitGCLABClosure init_gclabs;588Threads::threads_do(&init_gclabs);589}590591_scm->initialize(_max_workers);592_full_gc->initialize(_gc_timer);593594ref_processing_init();595596_heuristics->initialize();597598JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());599}600601size_t ShenandoahHeap::used() const {602OrderAccess::acquire();603return (size_t) _used;604}605606size_t ShenandoahHeap::committed() const {607OrderAccess::acquire();608return _committed;609}610611void ShenandoahHeap::increase_committed(size_t bytes) {612shenandoah_assert_heaplocked_or_safepoint();613_committed += bytes;614}615616void ShenandoahHeap::decrease_committed(size_t bytes) {617shenandoah_assert_heaplocked_or_safepoint();618_committed -= bytes;619}620621void ShenandoahHeap::increase_used(size_t bytes) {622Atomic::add(bytes, &_used);623}624625void ShenandoahHeap::set_used(size_t bytes) {626OrderAccess::release_store_fence(&_used, bytes);627}628629void ShenandoahHeap::decrease_used(size_t bytes) {630assert(used() >= bytes, "never decrease heap size by more than we've left");631Atomic::add(-(jlong)bytes, &_used);632}633634void ShenandoahHeap::increase_allocated(size_t bytes) {635Atomic::add(bytes, &_bytes_allocated_since_gc_start);636}637638void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {639size_t bytes = words * HeapWordSize;640if (!waste) {641increase_used(bytes);642}643increase_allocated(bytes);644if (ShenandoahPacing) {645control_thread()->pacing_notify_alloc(words);646if (waste) {647pacer()->claim_for_alloc(words, true);648}649}650}651652size_t ShenandoahHeap::capacity() const {653return committed();654}655656size_t ShenandoahHeap::max_capacity() const {657return _num_regions * ShenandoahHeapRegion::region_size_bytes();658}659660size_t ShenandoahHeap::soft_max_capacity() const {661size_t v = OrderAccess::load_acquire((volatile size_t*)&_soft_max_size);662assert(min_capacity() <= v && v <= max_capacity(),663err_msg("Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,664min_capacity(), v, max_capacity()));665return v;666}667668void ShenandoahHeap::set_soft_max_capacity(size_t v) {669assert(min_capacity() <= v && v <= max_capacity(),670err_msg("Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,671min_capacity(), v, max_capacity()));672OrderAccess::release_store_fence(&_soft_max_size, v);673}674675size_t ShenandoahHeap::min_capacity() const {676return _minimum_size;677}678679size_t ShenandoahHeap::initial_capacity() const {680return _initial_size;681}682683bool ShenandoahHeap::is_in(const void* p) const {684HeapWord* heap_base = (HeapWord*) base();685HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();686return p >= heap_base && p < last_region_end;687}688689void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {690assert (ShenandoahUncommit, "should be enabled");691692// Application allocates from the beginning of the heap, and GC allocates at693// the end of it. It is more efficient to uncommit from the end, so that applications694// could enjoy the near committed regions. GC allocations are much less frequent,695// and therefore can accept the committing costs.696697size_t count = 0;698for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow699ShenandoahHeapRegion* r = get_region(i - 1);700if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {701ShenandoahHeapLocker locker(lock());702if (r->is_empty_committed()) {703if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {704break;705}706707r->make_uncommitted();708count++;709}710}711SpinPause(); // allow allocators to take the lock712}713714if (count > 0) {715_control_thread->notify_heap_changed();716}717}718719HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {720// Retain tlab and allocate object in shared space if721// the amount free in the tlab is too large to discard.722if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {723thread->gclab().record_slow_allocation(size);724return NULL;725}726727// Discard gclab and allocate a new one.728// To minimize fragmentation, the last GCLAB may be smaller than the rest.729size_t new_gclab_size = thread->gclab().compute_size(size);730731thread->gclab().clear_before_allocation();732733if (new_gclab_size == 0) {734return NULL;735}736737// Allocated object should fit in new GCLAB, and new_gclab_size should be larger than min738size_t min_size = MAX2(size + ThreadLocalAllocBuffer::alignment_reserve(), ThreadLocalAllocBuffer::min_size());739new_gclab_size = MAX2(new_gclab_size, min_size);740741// Allocate a new GCLAB...742size_t actual_size = 0;743HeapWord* obj = allocate_new_gclab(min_size, new_gclab_size, &actual_size);744745if (obj == NULL) {746return NULL;747}748749assert (size <= actual_size, "allocation should fit");750751if (ZeroTLAB) {752// ..and clear it.753Copy::zero_to_words(obj, actual_size);754} else {755// ...and zap just allocated object.756#ifdef ASSERT757// Skip mangling the space corresponding to the object header to758// ensure that the returned space is not considered parsable by759// any concurrent GC thread.760size_t hdr_size = oopDesc::header_size();761Copy::fill_to_words(obj + hdr_size, actual_size - hdr_size, badHeapWordVal);762#endif // ASSERT763}764thread->gclab().fill(obj, obj + size, actual_size);765return obj;766}767768HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {769ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(word_size);770return allocate_memory(req);771}772773HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,774size_t word_size,775size_t* actual_size) {776ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);777HeapWord* res = allocate_memory(req);778if (res != NULL) {779*actual_size = req.actual_size();780} else {781*actual_size = 0;782}783return res;784}785786HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {787intptr_t pacer_epoch = 0;788bool in_new_region = false;789HeapWord* result = NULL;790791if (req.is_mutator_alloc()) {792if (ShenandoahPacing) {793pacer()->pace_for_alloc(req.size());794pacer_epoch = pacer()->epoch();795}796797if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {798result = allocate_memory_under_lock(req, in_new_region);799}800801// Allocation failed, block until control thread reacted, then retry allocation.802//803// It might happen that one of the threads requesting allocation would unblock804// way later after GC happened, only to fail the second allocation, because805// other threads have already depleted the free storage. In this case, a better806// strategy is to try again, as long as GC makes progress.807//808// Then, we need to make sure the allocation was retried after at least one809// Full GC, which means we want to try more than ShenandoahFullGCThreshold times.810811size_t tries = 0;812813while (result == NULL && _progress_last_gc.is_set()) {814tries++;815control_thread()->handle_alloc_failure(req);816result = allocate_memory_under_lock(req, in_new_region);817}818819while (result == NULL && tries <= ShenandoahFullGCThreshold) {820tries++;821control_thread()->handle_alloc_failure(req);822result = allocate_memory_under_lock(req, in_new_region);823}824825} else {826assert(req.is_gc_alloc(), "Can only accept GC allocs here");827result = allocate_memory_under_lock(req, in_new_region);828// Do not call handle_alloc_failure() here, because we cannot block.829// The allocation failure would be handled by the WB slowpath with handle_alloc_failure_evac().830}831832if (in_new_region) {833control_thread()->notify_heap_changed();834}835836if (result != NULL) {837size_t requested = req.size();838size_t actual = req.actual_size();839840assert (req.is_lab_alloc() || (requested == actual),841err_msg("Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,842ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual));843844if (req.is_mutator_alloc()) {845notify_mutator_alloc_words(actual, false);846847// If we requested more than we were granted, give the rest back to pacer.848// This only matters if we are in the same pacing epoch: do not try to unpace849// over the budget for the other phase.850if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {851pacer()->unpace_for_alloc(pacer_epoch, requested - actual);852}853} else {854increase_used(actual*HeapWordSize);855}856}857858return result;859}860861HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {862ShenandoahHeapLocker locker(lock());863return _free_set->allocate(req, in_new_region);864}865866HeapWord* ShenandoahHeap::mem_allocate(size_t size,867bool* gc_overhead_limit_was_exceeded) {868ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);869return allocate_memory(req);870}871872class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {873private:874ShenandoahHeap* const _heap;875Thread* const _thread;876public:877ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :878_heap(heap), _thread(Thread::current()) {}879880void do_object(oop p) {881shenandoah_assert_marked(NULL, p);882if (!p->is_forwarded()) {883_heap->evacuate_object(p, _thread);884}885}886};887888class ShenandoahEvacuationTask : public AbstractGangTask {889private:890ShenandoahHeap* const _sh;891ShenandoahCollectionSet* const _cs;892bool _concurrent;893public:894ShenandoahEvacuationTask(ShenandoahHeap* sh,895ShenandoahCollectionSet* cs,896bool concurrent) :897AbstractGangTask("Parallel Evacuation Task"),898_sh(sh),899_cs(cs),900_concurrent(concurrent)901{}902903void work(uint worker_id) {904ShenandoahEvacOOMScope oom_evac_scope;905if (_concurrent) {906ShenandoahConcurrentWorkerSession worker_session(worker_id);907do_work();908} else {909ShenandoahParallelWorkerSession worker_session(worker_id);910do_work();911}912}913914private:915void do_work() {916ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);917ShenandoahHeapRegion* r;918while ((r =_cs->claim_next()) != NULL) {919assert(r->has_live(), err_msg("Region " SIZE_FORMAT " should have been reclaimed early", r->index()));920_sh->marked_object_iterate(r, &cl);921922if (ShenandoahPacing) {923_sh->pacer()->report_evac(r->used() >> LogHeapWordSize);924}925926if (_sh->cancelled_gc()) {927break;928}929}930}931};932933void ShenandoahHeap::trash_cset_regions() {934ShenandoahHeapLocker locker(lock());935936ShenandoahCollectionSet* set = collection_set();937ShenandoahHeapRegion* r;938set->clear_current_index();939while ((r = set->next()) != NULL) {940r->make_trash();941}942collection_set()->clear();943}944945void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {946st->print_cr("Heap Regions:");947st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");948st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");949st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start, UWM=update watermark");950st->print_cr("SN=alloc sequence number");951952for (size_t i = 0; i < num_regions(); i++) {953get_region(i)->print_on(st);954}955}956957void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {958assert(start->is_humongous_start(), "reclaim regions starting with the first one");959960oop humongous_obj = oop(start->bottom());961size_t size = humongous_obj->size();962size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);963size_t index = start->index() + required_regions - 1;964965assert(!start->has_live(), "liveness must be zero");966967for(size_t i = 0; i < required_regions; i++) {968// Reclaim from tail. Otherwise, assertion fails when printing region to trace log,969// as it expects that every region belongs to a humongous region starting with a humongous start region.970ShenandoahHeapRegion* region = get_region(index --);971972assert(region->is_humongous(), "expect correct humongous start or continuation");973assert(!region->is_cset(), "Humongous region should not be in collection set");974975region->make_trash_immediate();976}977}978979class ShenandoahRetireGCLABClosure : public ThreadClosure {980private:981bool _retire;982public:983ShenandoahRetireGCLABClosure(bool retire) : _retire(retire) {};984985void do_thread(Thread* thread) {986assert(thread->gclab().is_initialized(), err_msg("GCLAB should be initialized for %s", thread->name()));987thread->gclab().make_parsable(_retire);988}989};990991void ShenandoahHeap::make_parsable(bool retire_tlabs) {992if (UseTLAB) {993CollectedHeap::ensure_parsability(retire_tlabs);994ShenandoahRetireGCLABClosure cl(retire_tlabs);995Threads::java_threads_do(&cl);996_workers->threads_do(&cl);997}998}9991000class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {1001private:1002ShenandoahRootEvacuator* _rp;10031004public:1005ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :1006AbstractGangTask("Shenandoah evacuate and update roots"),1007_rp(rp) {}10081009void work(uint worker_id) {1010ShenandoahParallelWorkerSession worker_session(worker_id);1011ShenandoahEvacOOMScope oom_evac_scope;1012ShenandoahEvacuateUpdateRootsClosure cl;10131014MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);1015_rp->roots_do(worker_id, &cl);1016}1017};10181019void ShenandoahHeap::evacuate_and_update_roots() {1020COMPILER2_PRESENT(DerivedPointerTable::clear());10211022assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");10231024{1025ShenandoahRootEvacuator rp(ShenandoahPhaseTimings::init_evac);1026ShenandoahEvacuateUpdateRootsTask roots_task(&rp);1027workers()->run_task(&roots_task);1028}10291030COMPILER2_PRESENT(DerivedPointerTable::update_pointers());1031}10321033size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {1034// Returns size in bytes1035return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());1036}10371038size_t ShenandoahHeap::max_tlab_size() const {1039// Returns size in words1040return ShenandoahHeapRegion::max_tlab_size_words();1041}10421043class ShenandoahResizeGCLABClosure : public ThreadClosure {1044public:1045void do_thread(Thread* thread) {1046assert(thread->gclab().is_initialized(), err_msg("GCLAB should be initialized for %s", thread->name()));1047thread->gclab().resize();1048}1049};10501051void ShenandoahHeap::resize_all_tlabs() {1052CollectedHeap::resize_all_tlabs();10531054ShenandoahResizeGCLABClosure cl;1055Threads::java_threads_do(&cl);1056_workers->threads_do(&cl);1057}10581059class ShenandoahAccumulateStatisticsGCLABClosure : public ThreadClosure {1060public:1061void do_thread(Thread* thread) {1062assert(thread->gclab().is_initialized(), err_msg("GCLAB should be initialized for %s", thread->name()));1063thread->gclab().accumulate_statistics();1064thread->gclab().initialize_statistics();1065}1066};10671068void ShenandoahHeap::accumulate_statistics_all_gclabs() {1069ShenandoahAccumulateStatisticsGCLABClosure cl;1070Threads::java_threads_do(&cl);1071_workers->threads_do(&cl);1072}10731074void ShenandoahHeap::collect(GCCause::Cause cause) {1075_control_thread->request_gc(cause);1076}10771078void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {1079//assert(false, "Shouldn't need to do full collections");1080}10811082CollectorPolicy* ShenandoahHeap::collector_policy() const {1083return _shenandoah_policy;1084}10851086void ShenandoahHeap::resize_tlabs() {1087CollectedHeap::resize_all_tlabs();1088}10891090void ShenandoahHeap::accumulate_statistics_tlabs() {1091CollectedHeap::accumulate_statistics_all_tlabs();1092}10931094HeapWord* ShenandoahHeap::block_start(const void* addr) const {1095ShenandoahHeapRegion* r = heap_region_containing(addr);1096if (r != NULL) {1097return r->block_start(addr);1098}1099return NULL;1100}11011102size_t ShenandoahHeap::block_size(const HeapWord* addr) const {1103ShenandoahHeapRegion* r = heap_region_containing(addr);1104return r->block_size(addr);1105}11061107bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {1108ShenandoahHeapRegion* r = heap_region_containing(addr);1109return r->block_is_obj(addr);1110}11111112jlong ShenandoahHeap::millis_since_last_gc() {1113double v = heuristics()->time_since_last_gc() * 1000;1114assert(0 <= v && v <= max_jlong, err_msg("value should fit: %f", v));1115return (jlong)v;1116}11171118void ShenandoahHeap::prepare_for_verify() {1119if (SafepointSynchronize::is_at_safepoint()) {1120make_parsable(false);1121}1122}11231124void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {1125workers()->print_worker_threads_on(st);1126if (ShenandoahStringDedup::is_enabled()) {1127ShenandoahStringDedup::print_worker_threads_on(st);1128}1129}11301131void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {1132workers()->threads_do(tcl);1133if (ShenandoahStringDedup::is_enabled()) {1134ShenandoahStringDedup::threads_do(tcl);1135}1136}11371138void ShenandoahHeap::print_tracing_info() const {1139if (PrintGC || TraceGen0Time || TraceGen1Time) {1140ResourceMark rm;1141outputStream* out = gclog_or_tty;1142phase_timings()->print_global_on(out);11431144out->cr();1145out->cr();11461147shenandoah_policy()->print_gc_stats(out);11481149out->cr();1150out->cr();1151}1152}11531154void ShenandoahHeap::verify(bool silent, VerifyOption vo) {1155if (ShenandoahSafepoint::is_at_shenandoah_safepoint() || ! UseTLAB) {1156if (ShenandoahVerify) {1157verifier()->verify_generic(vo);1158} else {1159// TODO: Consider allocating verification bitmaps on demand,1160// and turn this on unconditionally.1161}1162}1163}1164size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {1165return _free_set->capacity();1166}11671168class ObjectIterateScanRootClosure : public ExtendedOopClosure {1169private:1170MarkBitMap* _bitmap;1171Stack<oop,mtGC>* _oop_stack;11721173template <class T>1174void do_oop_work(T* p) {1175T o = oopDesc::load_heap_oop(p);1176if (!oopDesc::is_null(o)) {1177oop obj = oopDesc::decode_heap_oop_not_null(o);1178obj = (oop) ShenandoahBarrierSet::resolve_forwarded_not_null(obj);1179assert(obj->is_oop(), "must be a valid oop");1180if (!_bitmap->isMarked((HeapWord*) obj)) {1181_bitmap->mark((HeapWord*) obj);1182_oop_stack->push(obj);1183}1184}1185}1186public:1187ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :1188_bitmap(bitmap), _oop_stack(oop_stack) {}1189void do_oop(oop* p) { do_oop_work(p); }1190void do_oop(narrowOop* p) { do_oop_work(p); }1191};11921193/*1194* This is public API, used in preparation of object_iterate().1195* Since we don't do linear scan of heap in object_iterate() (see comment below), we don't1196* need to make the heap parsable. For Shenandoah-internal linear heap scans that we can1197* control, we call SH::make_parsable().1198*/1199void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {1200// No-op.1201}12021203/*1204* Iterates objects in the heap. This is public API, used for, e.g., heap dumping.1205*1206* We cannot safely iterate objects by doing a linear scan at random points in time. Linear1207* scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.1208* calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear1209* scanning therefore depends on having a valid marking bitmap to support it. However, we only1210* have a valid marking bitmap after successful marking. In particular, we *don't* have a valid1211* marking bitmap during marking, after aborted marking or during/after cleanup (when we just1212* wiped the bitmap in preparation for next marking).1213*1214* For all those reasons, we implement object iteration as a single marking traversal, reporting1215* objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap1216* is allowed to report dead objects, but is not required to do so.1217*/1218void ShenandoahHeap::object_iterate(ObjectClosure* cl) {1219assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");1220if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {1221log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");1222return;1223}12241225// Reset bitmap1226_aux_bit_map.clear();12271228Stack<oop,mtGC> oop_stack;12291230ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);12311232{1233// First, we process GC roots according to current GC cycle.1234// This populates the work stack with initial objects.1235// It is important to relinquish the associated locks before diving1236// into heap dumper.1237ShenandoahHeapIterationRootScanner rp;1238rp.roots_do(&oops);1239}12401241// Work through the oop stack to traverse heap.1242while (! oop_stack.is_empty()) {1243oop obj = oop_stack.pop();1244assert(obj->is_oop(), "must be a valid oop");1245cl->do_object(obj);1246obj->oop_iterate(&oops);1247}12481249assert(oop_stack.is_empty(), "should be empty");12501251if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {1252log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");1253}1254}12551256void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {1257assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");1258object_iterate(cl);1259}12601261void ShenandoahHeap::oop_iterate(ExtendedOopClosure* cl) {1262ObjectToOopClosure cl2(cl);1263object_iterate(&cl2);1264}12651266void ShenandoahHeap::gc_prologue(bool b) {1267Unimplemented();1268}12691270void ShenandoahHeap::gc_epilogue(bool b) {1271Unimplemented();1272}12731274void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {1275for (size_t i = 0; i < num_regions(); i++) {1276ShenandoahHeapRegion* current = get_region(i);1277blk->heap_region_do(current);1278}1279}12801281class ShenandoahParallelHeapRegionTask : public AbstractGangTask {1282private:1283ShenandoahHeap* const _heap;1284ShenandoahHeapRegionClosure* const _blk;12851286shenandoah_padding(0);1287volatile jint _index;1288shenandoah_padding(1);12891290public:1291ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :1292AbstractGangTask("Parallel Region Task"),1293_heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}12941295void work(uint worker_id) {1296jint stride = (jint)ShenandoahParallelRegionStride;12971298jint max = (jint)_heap->num_regions();1299while (_index < max) {1300jint cur = Atomic::add(stride, &_index) - stride;1301jint start = cur;1302jint end = MIN2(cur + stride, max);1303if (start >= max) break;13041305for (jint i = cur; i < end; i++) {1306ShenandoahHeapRegion* current = _heap->get_region((size_t)i);1307_blk->heap_region_do(current);1308}1309}1310}1311};13121313void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {1314assert(blk->is_thread_safe(), "Only thread-safe closures here");1315if (num_regions() > ShenandoahParallelRegionStride) {1316ShenandoahParallelHeapRegionTask task(blk);1317workers()->run_task(&task);1318} else {1319heap_region_iterate(blk);1320}1321}13221323class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {1324private:1325ShenandoahMarkingContext* const _ctx;1326public:1327ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}13281329void heap_region_do(ShenandoahHeapRegion* r) {1330assert(!r->has_live(),1331err_msg("Region " SIZE_FORMAT " should have no live data", r->index()));1332if (r->is_active()) {1333// Check if region needs updating its TAMS. We have updated it already during concurrent1334// reset, so it is very likely we don't need to do another write here.1335if (_ctx->top_at_mark_start(r) != r->top()) {1336_ctx->capture_top_at_mark_start(r);1337}1338} else {1339assert(_ctx->top_at_mark_start(r) == r->top(),1340err_msg("Region " SIZE_FORMAT " should already have correct TAMS", r->index()));1341}1342}13431344bool is_thread_safe() { return true; }1345};13461347void ShenandoahHeap::op_init_mark() {1348assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");1349assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");13501351assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap");1352assert(!marking_context()->is_complete(), "should not be complete");1353assert(!has_forwarded_objects(), "No forwarded objects on this path");13541355if (ShenandoahVerify) {1356verifier()->verify_before_concmark();1357}13581359{1360ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);1361accumulate_statistics_tlabs();1362}13631364if (VerifyBeforeGC) {1365Universe::verify();1366}13671368set_concurrent_mark_in_progress(true);1369// We need to reset all TLABs because we'd lose marks on all objects allocated in them.1370if (UseTLAB) {1371ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);1372make_parsable(true);1373}13741375{1376ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);1377ShenandoahInitMarkUpdateRegionStateClosure cl;1378parallel_heap_region_iterate(&cl);1379}13801381// Make above changes visible to worker threads1382OrderAccess::fence();13831384concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots);13851386if (UseTLAB) {1387ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);1388resize_tlabs();1389}13901391if (ShenandoahPacing) {1392pacer()->setup_for_mark();1393}1394}13951396void ShenandoahHeap::op_mark() {1397concurrent_mark()->mark_from_roots();1398}13991400class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {1401private:1402ShenandoahMarkingContext* const _ctx;1403ShenandoahHeapLock* const _lock;14041405public:1406ShenandoahFinalMarkUpdateRegionStateClosure() :1407_ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}14081409void heap_region_do(ShenandoahHeapRegion* r) {1410if (r->is_active()) {1411// All allocations past TAMS are implicitly live, adjust the region data.1412// Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.1413HeapWord *tams = _ctx->top_at_mark_start(r);1414HeapWord *top = r->top();1415if (top > tams) {1416r->increase_live_data_alloc_words(pointer_delta(top, tams));1417}14181419// We are about to select the collection set, make sure it knows about1420// current pinning status. Also, this allows trashing more regions that1421// now have their pinning status dropped.1422if (r->is_pinned()) {1423if (r->pin_count() == 0) {1424ShenandoahHeapLocker locker(_lock);1425r->make_unpinned();1426}1427} else {1428if (r->pin_count() > 0) {1429ShenandoahHeapLocker locker(_lock);1430r->make_pinned();1431}1432}14331434// Remember limit for updating refs. It's guaranteed that we get no1435// from-space-refs written from here on.1436r->set_update_watermark_at_safepoint(r->top());1437} else {1438assert(!r->has_live(),1439err_msg("Region " SIZE_FORMAT " should have no live data", r->index()));1440assert(_ctx->top_at_mark_start(r) == r->top(),1441err_msg("Region " SIZE_FORMAT " should have correct TAMS", r->index()));1442}1443}14441445bool is_thread_safe() { return true; }1446};14471448void ShenandoahHeap::op_final_mark() {1449assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");1450assert(!has_forwarded_objects(), "No forwarded objects on this path");14511452// It is critical that we1453// evacuate roots right after finishing marking, so that we don't1454// get unmarked objects in the roots.14551456if (!cancelled_gc()) {1457concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false);14581459TASKQUEUE_STATS_ONLY(concurrent_mark()->task_queues()->reset_taskqueue_stats());14601461if (ShenandoahVerify) {1462verifier()->verify_roots_no_forwarded();1463}14641465TASKQUEUE_STATS_ONLY(concurrent_mark()->task_queues()->print_taskqueue_stats());14661467{1468ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_region_states);1469ShenandoahFinalMarkUpdateRegionStateClosure cl;1470parallel_heap_region_iterate(&cl);14711472assert_pinned_region_status();1473}14741475// Force the threads to reacquire their TLABs outside the collection set.1476{1477ShenandoahGCPhase phase(ShenandoahPhaseTimings::retire_tlabs);1478make_parsable(true);1479}14801481{1482ShenandoahGCPhase phase(ShenandoahPhaseTimings::choose_cset);1483ShenandoahHeapLocker locker(lock());1484_collection_set->clear();1485heuristics()->choose_collection_set(_collection_set);1486}14871488{1489ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_rebuild_freeset);1490ShenandoahHeapLocker locker(lock());1491_free_set->rebuild();1492}14931494// If collection set has candidates, start evacuation.1495// Otherwise, bypass the rest of the cycle.1496if (!collection_set()->is_empty()) {1497ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);14981499if (ShenandoahVerify) {1500verifier()->verify_before_evacuation();1501}15021503set_evacuation_in_progress(true);1504// From here on, we need to update references.1505set_has_forwarded_objects(true);15061507if (!is_degenerated_gc_in_progress()) {1508evacuate_and_update_roots();1509}15101511if (ShenandoahPacing) {1512pacer()->setup_for_evac();1513}15141515if (ShenandoahVerify) {1516verifier()->verify_roots_no_forwarded();1517verifier()->verify_during_evacuation();1518}1519} else {1520if (ShenandoahVerify) {1521verifier()->verify_after_concmark();1522}15231524if (VerifyAfterGC) {1525Universe::verify();1526}1527}15281529} else {1530concurrent_mark()->cancel();1531complete_marking();15321533if (process_references()) {1534// Abandon reference processing right away: pre-cleaning must have failed.1535ReferenceProcessor *rp = ref_processor();1536rp->disable_discovery();1537rp->abandon_partial_discovery();1538rp->verify_no_references_recorded();1539}1540}1541}15421543void ShenandoahHeap::op_conc_evac() {1544ShenandoahEvacuationTask task(this, _collection_set, true);1545workers()->run_task(&task);1546}15471548void ShenandoahHeap::op_stw_evac() {1549ShenandoahEvacuationTask task(this, _collection_set, false);1550workers()->run_task(&task);1551}15521553void ShenandoahHeap::op_updaterefs() {1554update_heap_references(true);1555}15561557void ShenandoahHeap::op_cleanup_early() {1558free_set()->recycle_trash();1559}15601561void ShenandoahHeap::op_cleanup_complete() {1562free_set()->recycle_trash();1563}15641565class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {1566private:1567ShenandoahMarkingContext* const _ctx;1568public:1569ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}15701571void heap_region_do(ShenandoahHeapRegion* r) {1572if (r->is_active()) {1573// Reset live data and set TAMS optimistically. We would recheck these under the pause1574// anyway to capture any updates that happened since now.1575r->clear_live_data();1576_ctx->capture_top_at_mark_start(r);1577}1578}15791580bool is_thread_safe() { return true; }1581};15821583void ShenandoahHeap::op_reset() {1584if (ShenandoahPacing) {1585pacer()->setup_for_reset();1586}1587reset_mark_bitmap();15881589ShenandoahResetUpdateRegionStateClosure cl;1590parallel_heap_region_iterate(&cl);1591}15921593void ShenandoahHeap::op_preclean() {1594if (ShenandoahPacing) {1595pacer()->setup_for_preclean();1596}1597concurrent_mark()->preclean_weak_refs();1598}15991600void ShenandoahHeap::op_full(GCCause::Cause cause) {1601ShenandoahMetricsSnapshot metrics;1602metrics.snap_before();16031604full_gc()->do_it(cause);16051606metrics.snap_after();16071608if (metrics.is_good_progress()) {1609_progress_last_gc.set();1610} else {1611// Nothing to do. Tell the allocation path that we have failed to make1612// progress, and it can finally fail.1613_progress_last_gc.unset();1614}1615}16161617void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {1618// Degenerated GC is STW, but it can also fail. Current mechanics communicates1619// GC failure via cancelled_concgc() flag. So, if we detect the failure after1620// some phase, we have to upgrade the Degenerate GC to Full GC.16211622clear_cancelled_gc();16231624ShenandoahMetricsSnapshot metrics;1625metrics.snap_before();16261627switch (point) {1628// The cases below form the Duff's-like device: it describes the actual GC cycle,1629// but enters it at different points, depending on which concurrent phase had1630// degenerated.16311632case _degenerated_outside_cycle:1633// We have degenerated from outside the cycle, which means something is bad with1634// the heap, most probably heavy humongous fragmentation, or we are very low on free1635// space. It makes little sense to wait for Full GC to reclaim as much as it can, when1636// we can do the most aggressive degen cycle, which includes processing references and1637// class unloading, unless those features are explicitly disabled.1638//1639// Note that we can only do this for "outside-cycle" degens, otherwise we would risk1640// changing the cycle parameters mid-cycle during concurrent -> degenerated handover.1641set_process_references(heuristics()->can_process_references());1642set_unload_classes(heuristics()->can_unload_classes());16431644op_reset();16451646op_init_mark();1647if (cancelled_gc()) {1648op_degenerated_fail();1649return;1650}16511652case _degenerated_mark:1653op_final_mark();1654if (cancelled_gc()) {1655op_degenerated_fail();1656return;1657}16581659op_cleanup_early();16601661case _degenerated_evac:1662// If heuristics thinks we should do the cycle, this flag would be set,1663// and we can do evacuation. Otherwise, it would be the shortcut cycle.1664if (is_evacuation_in_progress()) {16651666// Degeneration under oom-evac protocol might have left some objects in1667// collection set un-evacuated. Restart evacuation from the beginning to1668// capture all objects. For all the objects that are already evacuated,1669// it would be a simple check, which is supposed to be fast. This is also1670// safe to do even without degeneration, as CSet iterator is at beginning1671// in preparation for evacuation anyway.1672//1673// Before doing that, we need to make sure we never had any cset-pinned1674// regions. This may happen if allocation failure happened when evacuating1675// the about-to-be-pinned object, oom-evac protocol left the object in1676// the collection set, and then the pin reached the cset region. If we continue1677// the cycle here, we would trash the cset and alive objects in it. To avoid1678// it, we fail degeneration right away and slide into Full GC to recover.16791680{1681sync_pinned_region_status();1682collection_set()->clear_current_index();16831684ShenandoahHeapRegion* r;1685while ((r = collection_set()->next()) != NULL) {1686if (r->is_pinned()) {1687cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);1688op_degenerated_fail();1689return;1690}1691}16921693collection_set()->clear_current_index();1694}16951696op_stw_evac();1697if (cancelled_gc()) {1698op_degenerated_fail();1699return;1700}1701}17021703// If heuristics thinks we should do the cycle, this flag would be set,1704// and we need to do update-refs. Otherwise, it would be the shortcut cycle.1705if (has_forwarded_objects()) {1706op_init_updaterefs();1707if (cancelled_gc()) {1708op_degenerated_fail();1709return;1710}1711}17121713case _degenerated_updaterefs:1714if (has_forwarded_objects()) {1715op_final_updaterefs();1716if (cancelled_gc()) {1717op_degenerated_fail();1718return;1719}1720}17211722op_cleanup_complete();1723break;17241725default:1726ShouldNotReachHere();1727}17281729if (ShenandoahVerify) {1730verifier()->verify_after_degenerated();1731}17321733if (VerifyAfterGC) {1734Universe::verify();1735}17361737metrics.snap_after();17381739// Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,1740// because that probably means the heap is overloaded and/or fragmented.1741if (!metrics.is_good_progress()) {1742_progress_last_gc.unset();1743cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);1744op_degenerated_futile();1745} else {1746_progress_last_gc.set();1747}1748}17491750void ShenandoahHeap::op_degenerated_fail() {1751log_info(gc)("Cannot finish degeneration, upgrading to Full GC");1752shenandoah_policy()->record_degenerated_upgrade_to_full();1753op_full(GCCause::_shenandoah_upgrade_to_full_gc);1754}17551756void ShenandoahHeap::op_degenerated_futile() {1757shenandoah_policy()->record_degenerated_upgrade_to_full();1758op_full(GCCause::_shenandoah_upgrade_to_full_gc);1759}17601761void ShenandoahHeap::complete_marking() {1762if (is_concurrent_mark_in_progress()) {1763set_concurrent_mark_in_progress(false);1764}17651766if (!cancelled_gc()) {1767// If we needed to update refs, and concurrent marking has been cancelled,1768// we need to finish updating references.1769set_has_forwarded_objects(false);1770mark_complete_marking_context();1771}1772}17731774void ShenandoahHeap::force_satb_flush_all_threads() {1775if (!is_concurrent_mark_in_progress()) {1776// No need to flush SATBs1777return;1778}17791780// Do not block if Threads lock is busy. This avoids the potential deadlock1781// when this code is called from the periodic task, and something else is1782// expecting the periodic task to complete without blocking. On the off-chance1783// Threads lock is busy momentarily, try to acquire several times.1784for (int t = 0; t < 10; t++) {1785if (Threads_lock->try_lock()) {1786JavaThread::set_force_satb_flush_all_threads(true);1787Threads_lock->unlock();17881789// The threads are not "acquiring" their thread-local data, but it does not1790// hurt to "release" the updates here anyway.1791OrderAccess::fence();1792break;1793}1794os::naked_short_sleep(1);1795}1796}17971798void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {1799assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");1800_gc_state.set_cond(mask, value);1801JavaThread::set_gc_state_all_threads(_gc_state.raw_value());1802}18031804void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {1805if (has_forwarded_objects()) {1806set_gc_state_mask(MARKING | UPDATEREFS, in_progress);1807} else {1808set_gc_state_mask(MARKING, in_progress);1809}1810JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);1811}18121813void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {1814assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");1815set_gc_state_mask(EVACUATION, in_progress);1816}18171818void ShenandoahHeap::ref_processing_init() {1819MemRegion mr = reserved_region();18201821assert(_max_workers > 0, "Sanity");18221823bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);1824bool mt_discovery = _max_workers > 1;18251826_ref_processor =1827new ReferenceProcessor(mr, // span1828mt_processing, // MT processing1829_max_workers, // Degree of MT processing1830mt_discovery, // MT discovery1831_max_workers, // Degree of MT discovery1832false, // Reference discovery is not atomic1833NULL); // No closure, should be installed before use18341835log_info(gc, init)("Reference processing: %s discovery, %s processing",1836mt_discovery ? "parallel" : "serial",1837mt_processing ? "parallel" : "serial");18381839shenandoah_assert_rp_isalive_not_installed();1840}18411842void ShenandoahHeap::acquire_pending_refs_lock() {1843_control_thread->slt()->manipulatePLL(SurrogateLockerThread::acquirePLL);1844}18451846void ShenandoahHeap::release_pending_refs_lock() {1847_control_thread->slt()->manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);1848}18491850GCTracer* ShenandoahHeap::tracer() {1851return shenandoah_policy()->tracer();1852}18531854size_t ShenandoahHeap::tlab_used(Thread* thread) const {1855return _free_set->used();1856}18571858void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {1859if (try_cancel_gc()) {1860FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));1861log_info(gc)("%s", msg.buffer());1862Events::log(Thread::current(), "%s", msg.buffer());1863}1864}18651866uint ShenandoahHeap::max_workers() {1867return _max_workers;1868}18691870void ShenandoahHeap::stop() {1871// The shutdown sequence should be able to terminate when GC is running.18721873// Step 0. Notify policy to disable event recording.1874_shenandoah_policy->record_shutdown();18751876// Step 1. Notify control thread that we are in shutdown.1877// Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.1878// Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.1879_control_thread->prepare_for_graceful_shutdown();18801881// Step 2. Notify GC workers that we are cancelling GC.1882cancel_gc(GCCause::_shenandoah_stop_vm);18831884// Step 3. Wait until GC worker exits normally.1885_control_thread->stop();18861887// Step 4. Stop String Dedup thread if it is active1888if (ShenandoahStringDedup::is_enabled()) {1889ShenandoahStringDedup::stop();1890}1891}18921893void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) {1894assert(heuristics()->can_unload_classes(), "Class unloading should be enabled");18951896ShenandoahGCPhase root_phase(full_gc ?1897ShenandoahPhaseTimings::full_gc_purge :1898ShenandoahPhaseTimings::purge);18991900ShenandoahIsAliveSelector alive;1901BoolObjectClosure* is_alive = alive.is_alive_closure();19021903// Cleaning of klasses depends on correct information from MetadataMarkOnStack. The CodeCache::mark_on_stack1904// part is too slow to be done serially, so it is handled during the ShenandoahParallelCleaning phase.1905// Defer the cleaning until we have complete on_stack data.1906MetadataOnStackMark md_on_stack(false /* Don't visit the code cache at this point */);19071908bool purged_class;19091910// Unload classes and purge SystemDictionary.1911{1912ShenandoahGCPhase phase(full_gc ?1913ShenandoahPhaseTimings::full_gc_purge_class_unload :1914ShenandoahPhaseTimings::purge_class_unload);1915purged_class = SystemDictionary::do_unloading(is_alive,1916false /* Defer klass cleaning */);1917}1918{1919ShenandoahGCPhase phase(full_gc ?1920ShenandoahPhaseTimings::full_gc_purge_par :1921ShenandoahPhaseTimings::purge_par);1922uint active = _workers->active_workers();1923ShenandoahParallelCleaningTask unlink_task(is_alive, true, true, active, purged_class);1924_workers->run_task(&unlink_task);1925}19261927{1928ShenandoahGCPhase phase(full_gc ?1929ShenandoahPhaseTimings::full_gc_purge_metadata :1930ShenandoahPhaseTimings::purge_metadata);1931ClassLoaderDataGraph::free_deallocate_lists();1932}19331934if (ShenandoahStringDedup::is_enabled()) {1935ShenandoahGCPhase phase(full_gc ?1936ShenandoahPhaseTimings::full_gc_purge_string_dedup :1937ShenandoahPhaseTimings::purge_string_dedup);1938ShenandoahStringDedup::parallel_cleanup();1939}19401941{1942ShenandoahGCPhase phase(full_gc ?1943ShenandoahPhaseTimings::full_gc_purge_cldg :1944ShenandoahPhaseTimings::purge_cldg);1945ClassLoaderDataGraph::purge();1946}1947}19481949void ShenandoahHeap::set_has_forwarded_objects(bool cond) {1950set_gc_state_mask(HAS_FORWARDED, cond);1951}19521953void ShenandoahHeap::set_process_references(bool pr) {1954_process_references.set_cond(pr);1955}19561957void ShenandoahHeap::set_unload_classes(bool uc) {1958_unload_classes.set_cond(uc);1959}19601961bool ShenandoahHeap::process_references() const {1962return _process_references.is_set();1963}19641965bool ShenandoahHeap::unload_classes() const {1966return _unload_classes.is_set();1967}19681969address ShenandoahHeap::in_cset_fast_test_addr() {1970ShenandoahHeap* heap = ShenandoahHeap::heap();1971assert(heap->collection_set() != NULL, "Sanity");1972return (address) heap->collection_set()->biased_map_address();1973}19741975address ShenandoahHeap::cancelled_gc_addr() {1976return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();1977}19781979address ShenandoahHeap::gc_state_addr() {1980return (address) ShenandoahHeap::heap()->_gc_state.addr_of();1981}19821983size_t ShenandoahHeap::conservative_max_heap_alignment() {1984size_t align = ShenandoahMaxRegionSize;1985if (UseLargePages) {1986align = MAX2(align, os::large_page_size());1987}1988return align;1989}19901991size_t ShenandoahHeap::bytes_allocated_since_gc_start() {1992return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);1993}19941995void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {1996OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);1997}19981999void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {2000_degenerated_gc_in_progress.set_cond(in_progress);2001}20022003void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {2004_full_gc_in_progress.set_cond(in_progress);2005}20062007void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {2008assert (is_full_gc_in_progress(), "should be");2009_full_gc_move_in_progress.set_cond(in_progress);2010}20112012void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {2013set_gc_state_mask(UPDATEREFS, in_progress);2014}20152016void ShenandoahHeap::register_nmethod(nmethod* nm) {2017ShenandoahCodeRoots::add_nmethod(nm);2018}20192020void ShenandoahHeap::unregister_nmethod(nmethod* nm) {2021ShenandoahCodeRoots::remove_nmethod(nm);2022}20232024oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {2025heap_region_containing(o)->record_pin();2026return o;2027}20282029void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {2030heap_region_containing(o)->record_unpin();2031}20322033void ShenandoahHeap::sync_pinned_region_status() {2034ShenandoahHeapLocker locker(lock());20352036for (size_t i = 0; i < num_regions(); i++) {2037ShenandoahHeapRegion *r = get_region(i);2038if (r->is_active()) {2039if (r->is_pinned()) {2040if (r->pin_count() == 0) {2041r->make_unpinned();2042}2043} else {2044if (r->pin_count() > 0) {2045r->make_pinned();2046}2047}2048}2049}20502051assert_pinned_region_status();2052}20532054#ifdef ASSERT2055void ShenandoahHeap::assert_pinned_region_status() {2056for (size_t i = 0; i < num_regions(); i++) {2057ShenandoahHeapRegion* r = get_region(i);2058assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),2059err_msg("Region " SIZE_FORMAT " pinning status is inconsistent", i));2060}2061}2062#endif20632064GCTimer* ShenandoahHeap::gc_timer() const {2065return _gc_timer;2066}20672068#ifdef ASSERT2069void ShenandoahHeap::assert_gc_workers(uint nworkers) {2070assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");20712072if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {2073if (UseDynamicNumberOfGCThreads ||2074(FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {2075assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");2076} else {2077// Use ParallelGCThreads inside safepoints2078assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");2079}2080} else {2081if (UseDynamicNumberOfGCThreads ||2082(FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {2083assert(nworkers <= ConcGCThreads, "Cannot use more than it has");2084} else {2085// Use ConcGCThreads outside safepoints2086assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");2087}2088}2089}2090#endif20912092ShenandoahVerifier* ShenandoahHeap::verifier() {2093guarantee(ShenandoahVerify, "Should be enabled");2094assert (_verifier != NULL, "sanity");2095return _verifier;2096}20972098ShenandoahUpdateHeapRefsClosure::ShenandoahUpdateHeapRefsClosure() :2099_heap(ShenandoahHeap::heap()) {}21002101class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {2102private:2103ShenandoahHeap* _heap;2104ShenandoahRegionIterator* _regions;2105bool _concurrent;21062107public:2108ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :2109AbstractGangTask("Concurrent Update References Task"),2110_heap(ShenandoahHeap::heap()),2111_regions(regions),2112_concurrent(concurrent) {2113}21142115void work(uint worker_id) {2116ShenandoahConcurrentWorkerSession worker_session(worker_id);2117ShenandoahUpdateHeapRefsClosure cl;2118ShenandoahHeapRegion* r = _regions->next();2119ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();2120while (r != NULL) {2121HeapWord* update_watermark = r->get_update_watermark();2122assert (update_watermark >= r->bottom(), "sanity");2123if (r->is_active() && !r->is_cset()) {2124_heap->marked_object_oop_iterate(r, &cl, update_watermark);2125}2126if (ShenandoahPacing) {2127_heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));2128}2129if (_heap->cancelled_gc()) {2130return;2131}2132r = _regions->next();2133}2134}2135};21362137void ShenandoahHeap::update_heap_references(bool concurrent) {2138ShenandoahUpdateHeapRefsTask task(&_update_refs_iterator, concurrent);2139workers()->run_task(&task);2140}21412142void ShenandoahHeap::op_init_updaterefs() {2143assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");21442145set_evacuation_in_progress(false);21462147if (ShenandoahVerify) {2148if (!is_degenerated_gc_in_progress()) {2149verifier()->verify_roots_no_forwarded_except(ShenandoahRootVerifier::ThreadRoots);2150}2151verifier()->verify_before_updaterefs();2152}21532154set_update_refs_in_progress(true);21552156{2157ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_prepare);21582159make_parsable(true);21602161// Reset iterator.2162_update_refs_iterator.reset();2163}21642165if (ShenandoahPacing) {2166pacer()->setup_for_updaterefs();2167}2168}21692170class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {2171private:2172ShenandoahHeapLock* const _lock;21732174public:2175ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}21762177void heap_region_do(ShenandoahHeapRegion* r) {2178// Drop unnecessary "pinned" state from regions that does not have CP marks2179// anymore, as this would allow trashing them.21802181if (r->is_active()) {2182if (r->is_pinned()) {2183if (r->pin_count() == 0) {2184ShenandoahHeapLocker locker(_lock);2185r->make_unpinned();2186}2187} else {2188if (r->pin_count() > 0) {2189ShenandoahHeapLocker locker(_lock);2190r->make_pinned();2191}2192}2193}2194}21952196bool is_thread_safe() { return true; }2197};21982199void ShenandoahHeap::op_final_updaterefs() {2200assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");22012202// Check if there is left-over work, and finish it2203if (_update_refs_iterator.has_next()) {2204ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_finish_work);22052206// Finish updating references where we left off.2207clear_cancelled_gc();2208update_heap_references(false);2209}22102211// Clear cancelled GC, if set. On cancellation path, the block before would handle2212// everything. On degenerated paths, cancelled gc would not be set anyway.2213if (cancelled_gc()) {2214clear_cancelled_gc();2215}2216assert(!cancelled_gc(), "Should have been done right before");22172218if (ShenandoahVerify && !is_degenerated_gc_in_progress()) {2219verifier()->verify_roots_no_forwarded_except(ShenandoahRootVerifier::ThreadRoots);2220}22212222if (is_degenerated_gc_in_progress()) {2223concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots);2224} else {2225concurrent_mark()->update_thread_roots(ShenandoahPhaseTimings::final_update_refs_roots);2226}22272228// Has to be done before cset is clear2229if (ShenandoahVerify) {2230verifier()->verify_roots_in_to_space();2231}22322233{2234ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_trash_cset);2235trash_cset_regions();2236}22372238set_has_forwarded_objects(false);2239set_update_refs_in_progress(false);22402241if (ShenandoahVerify) {2242verifier()->verify_after_updaterefs();2243}22442245if (VerifyAfterGC) {2246Universe::verify();2247}22482249{2250ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_update_region_states);2251ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;2252parallel_heap_region_iterate(&cl);22532254assert_pinned_region_status();2255}22562257{2258ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_rebuild_freeset);2259ShenandoahHeapLocker locker(lock());2260_free_set->rebuild();2261}2262}22632264void ShenandoahHeap::print_extended_on(outputStream *st) const {2265print_on(st);2266print_heap_regions_on(st);2267}22682269bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {2270size_t slice = r->index() / _bitmap_regions_per_slice;22712272size_t regions_from = _bitmap_regions_per_slice * slice;2273size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));2274for (size_t g = regions_from; g < regions_to; g++) {2275assert (g / _bitmap_regions_per_slice == slice, "same slice");2276if (skip_self && g == r->index()) continue;2277if (get_region(g)->is_committed()) {2278return true;2279}2280}2281return false;2282}22832284bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {2285shenandoah_assert_heaplocked();22862287// Bitmaps in special regions do not need commits2288if (_bitmap_region_special) {2289return true;2290}22912292if (is_bitmap_slice_committed(r, true)) {2293// Some other region from the group is already committed, meaning the bitmap2294// slice is already committed, we exit right away.2295return true;2296}22972298// Commit the bitmap slice:2299size_t slice = r->index() / _bitmap_regions_per_slice;2300size_t off = _bitmap_bytes_per_slice * slice;2301size_t len = _bitmap_bytes_per_slice;2302char* start = (char*) _bitmap_region.start() + off;23032304if (!os::commit_memory(start, len, false)) {2305return false;2306}23072308if (AlwaysPreTouch) {2309os::pretouch_memory(start, start + len);2310}23112312return true;2313}23142315bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {2316shenandoah_assert_heaplocked();23172318// Bitmaps in special regions do not need uncommits2319if (_bitmap_region_special) {2320return true;2321}23222323if (is_bitmap_slice_committed(r, true)) {2324// Some other region from the group is still committed, meaning the bitmap2325// slice is should stay committed, exit right away.2326return true;2327}23282329// Uncommit the bitmap slice:2330size_t slice = r->index() / _bitmap_regions_per_slice;2331size_t off = _bitmap_bytes_per_slice * slice;2332size_t len = _bitmap_bytes_per_slice;2333if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {2334return false;2335}2336return true;2337}23382339void ShenandoahHeap::vmop_entry_init_mark() {2340TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());2341ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);23422343try_inject_alloc_failure();2344VM_ShenandoahInitMark op;2345VMThread::execute(&op); // jump to entry_init_mark() under safepoint2346}23472348void ShenandoahHeap::vmop_entry_final_mark() {2349TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());2350ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);23512352try_inject_alloc_failure();2353VM_ShenandoahFinalMarkStartEvac op;2354VMThread::execute(&op); // jump to entry_final_mark under safepoint2355}23562357void ShenandoahHeap::vmop_entry_init_updaterefs() {2358TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());2359ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);23602361try_inject_alloc_failure();2362VM_ShenandoahInitUpdateRefs op;2363VMThread::execute(&op);2364}23652366void ShenandoahHeap::vmop_entry_final_updaterefs() {2367TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());2368ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);23692370try_inject_alloc_failure();2371VM_ShenandoahFinalUpdateRefs op;2372VMThread::execute(&op);2373}23742375void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {2376TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());2377ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);23782379try_inject_alloc_failure();2380VM_ShenandoahFullGC op(cause);2381VMThread::execute(&op);2382}23832384void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {2385TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());2386ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);23872388VM_ShenandoahDegeneratedGC degenerated_gc((int)point);2389VMThread::execute(°enerated_gc);2390}23912392void ShenandoahHeap::entry_init_mark() {2393ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark);23942395const char* msg = init_mark_event_message();2396GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());2397EventMark em("%s", msg);23982399ShenandoahWorkerScope scope(workers(),2400ShenandoahWorkerPolicy::calc_workers_for_init_marking(),2401"init marking");24022403op_init_mark();2404}24052406void ShenandoahHeap::entry_final_mark() {2407ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark);24082409const char* msg = final_mark_event_message();2410GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());2411EventMark em("%s", msg);24122413ShenandoahWorkerScope scope(workers(),2414ShenandoahWorkerPolicy::calc_workers_for_final_marking(),2415"final marking");24162417op_final_mark();2418}24192420void ShenandoahHeap::entry_init_updaterefs() {2421ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs);24222423static const char* msg = "Pause Init Update Refs";2424GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());2425EventMark em("%s", msg);24262427// No workers used in this phase, no setup required24282429op_init_updaterefs();2430}24312432void ShenandoahHeap::entry_final_updaterefs() {2433ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);24342435static const char* msg = "Pause Final Update Refs";2436GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());2437EventMark em("%s", msg);24382439ShenandoahWorkerScope scope(workers(),2440ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),2441"final reference update");24422443op_final_updaterefs();2444}24452446void ShenandoahHeap::entry_full(GCCause::Cause cause) {2447ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);24482449static const char* msg = "Pause Full";2450GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id(), true);2451EventMark em("%s", msg);24522453ShenandoahWorkerScope scope(workers(),2454ShenandoahWorkerPolicy::calc_workers_for_fullgc(),2455"full gc");24562457op_full(cause);2458}24592460void ShenandoahHeap::entry_degenerated(int point) {2461ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);24622463ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;2464const char* msg = degen_event_message(dpoint);2465GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id(), true);2466EventMark em("%s", msg);24672468ShenandoahWorkerScope scope(workers(),2469ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),2470"stw degenerated gc");24712472set_degenerated_gc_in_progress(true);2473op_degenerated(dpoint);2474set_degenerated_gc_in_progress(false);2475}24762477void ShenandoahHeap::entry_mark() {2478TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());24792480const char* msg = conc_mark_event_message();2481GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id());2482EventMark em("%s", msg);24832484ShenandoahWorkerScope scope(workers(),2485ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),2486"concurrent marking");24872488try_inject_alloc_failure();2489op_mark();2490}24912492void ShenandoahHeap::entry_evac() {2493ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);2494TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());24952496static const char *msg = "Concurrent evacuation";2497GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id());2498EventMark em("%s", msg);24992500ShenandoahWorkerScope scope(workers(),2501ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),2502"concurrent evacuation");25032504try_inject_alloc_failure();2505op_conc_evac();2506}25072508void ShenandoahHeap::entry_updaterefs() {2509ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);25102511static const char* msg = "Concurrent update references";2512GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id());2513EventMark em("%s", msg);25142515ShenandoahWorkerScope scope(workers(),2516ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),2517"concurrent reference update");25182519try_inject_alloc_failure();2520op_updaterefs();2521}25222523void ShenandoahHeap::entry_cleanup_early() {2524ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup_early);25252526static const char* msg = "Concurrent cleanup";2527GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);2528EventMark em("%s", msg);25292530// This phase does not use workers, no need for setup25312532try_inject_alloc_failure();2533op_cleanup_early();2534}25352536void ShenandoahHeap::entry_cleanup_complete() {2537ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup_complete);25382539static const char* msg = "Concurrent cleanup";2540GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);2541EventMark em("%s", msg);25422543// This phase does not use workers, no need for setup25442545try_inject_alloc_failure();2546op_cleanup_complete();2547}25482549void ShenandoahHeap::entry_reset() {2550ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_reset);25512552static const char* msg = "Concurrent reset";2553GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id());2554EventMark em("%s", msg);25552556ShenandoahWorkerScope scope(workers(),2557ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),2558"concurrent reset");25592560try_inject_alloc_failure();2561op_reset();2562}25632564void ShenandoahHeap::entry_preclean() {2565if (ShenandoahPreclean && process_references()) {2566ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);25672568static const char* msg = "Concurrent precleaning";2569GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id());2570EventMark em("%s", msg);25712572ShenandoahWorkerScope scope(workers(),2573ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(),2574"concurrent preclean",2575/* check_workers = */ false);25762577try_inject_alloc_failure();2578op_preclean();2579}2580}25812582void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {2583static const char *msg = "Concurrent uncommit";2584GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);2585EventMark em("%s", msg);25862587ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_uncommit);25882589op_uncommit(shrink_before, shrink_until);2590}25912592void ShenandoahHeap::try_inject_alloc_failure() {2593if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {2594_inject_alloc_failure.set();2595os::naked_short_sleep(1);2596if (cancelled_gc()) {2597log_info(gc)("Allocation failure was successfully injected");2598}2599}2600}26012602bool ShenandoahHeap::should_inject_alloc_failure() {2603return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();2604}26052606void ShenandoahHeap::enter_evacuation() {2607_oom_evac_handler.enter_evacuation();2608}26092610void ShenandoahHeap::leave_evacuation() {2611_oom_evac_handler.leave_evacuation();2612}26132614ShenandoahRegionIterator::ShenandoahRegionIterator() :2615_heap(ShenandoahHeap::heap()),2616_index(0) {}26172618ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :2619_heap(heap),2620_index(0) {}26212622void ShenandoahRegionIterator::reset() {2623_index = 0;2624}26252626bool ShenandoahRegionIterator::has_next() const {2627return _index < (jint)_heap->num_regions();2628}26292630char ShenandoahHeap::gc_state() {2631return _gc_state.raw_value();2632}26332634const char* ShenandoahHeap::init_mark_event_message() const {2635assert(!has_forwarded_objects(), "Should not have forwarded objects here");26362637bool proc_refs = process_references();2638bool unload_cls = unload_classes();26392640if (proc_refs && unload_cls) {2641return "Pause Init Mark (process weakrefs) (unload classes)";2642} else if (proc_refs) {2643return "Pause Init Mark (process weakrefs)";2644} else if (unload_cls) {2645return "Pause Init Mark (unload classes)";2646} else {2647return "Pause Init Mark";2648}2649}26502651const char* ShenandoahHeap::final_mark_event_message() const {2652assert(!has_forwarded_objects(), "Should not have forwarded objects here");26532654bool proc_refs = process_references();2655bool unload_cls = unload_classes();26562657if (proc_refs && unload_cls) {2658return "Pause Final Mark (process weakrefs) (unload classes)";2659} else if (proc_refs) {2660return "Pause Final Mark (process weakrefs)";2661} else if (unload_cls) {2662return "Pause Final Mark (unload classes)";2663} else {2664return "Pause Final Mark";2665}2666}26672668const char* ShenandoahHeap::conc_mark_event_message() const {2669assert(!has_forwarded_objects(), "Should not have forwarded objects here");26702671bool proc_refs = process_references();2672bool unload_cls = unload_classes();26732674if (proc_refs && unload_cls) {2675return "Concurrent marking (process weakrefs) (unload classes)";2676} else if (proc_refs) {2677return "Concurrent marking (process weakrefs)";2678} else if (unload_cls) {2679return "Concurrent marking (unload classes)";2680} else {2681return "Concurrent marking";2682}2683}26842685const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const {2686switch (point) {2687case _degenerated_unset:2688return "Pause Degenerated GC (<UNSET>)";2689case _degenerated_outside_cycle:2690return "Pause Degenerated GC (Outside of Cycle)";2691case _degenerated_mark:2692return "Pause Degenerated GC (Mark)";2693case _degenerated_evac:2694return "Pause Degenerated GC (Evacuation)";2695case _degenerated_updaterefs:2696return "Pause Degenerated GC (Update Refs)";2697default:2698ShouldNotReachHere();2699return "ERROR";2700}2701}27022703ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {2704#ifdef ASSERT2705assert(_liveness_cache != NULL, "sanity");2706assert(worker_id < _max_workers, "sanity");2707for (uint i = 0; i < num_regions(); i++) {2708assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");2709}2710#endif2711return _liveness_cache[worker_id];2712}27132714void ShenandoahHeap::flush_liveness_cache(uint worker_id) {2715assert(worker_id < _max_workers, "sanity");2716assert(_liveness_cache != NULL, "sanity");2717ShenandoahLiveData* ld = _liveness_cache[worker_id];2718for (uint i = 0; i < num_regions(); i++) {2719ShenandoahLiveData live = ld[i];2720if (live > 0) {2721ShenandoahHeapRegion* r = get_region(i);2722r->increase_live_data_gc_words(live);2723ld[i] = 0;2724}2725}2726}272727282729