Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
38920 views
/*1* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "classfile/metadataOnStackMark.hpp"26#include "code/codeCache.hpp"27#include "code/icBuffer.hpp"28#include "gc_implementation/g1/bufferingOopClosure.hpp"29#include "gc_implementation/g1/concurrentG1Refine.hpp"30#include "gc_implementation/g1/concurrentG1RefineThread.hpp"31#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"32#include "gc_implementation/g1/g1AllocRegion.inline.hpp"33#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"34#include "gc_implementation/g1/g1CollectorPolicy.hpp"35#include "gc_implementation/g1/g1ErgoVerbose.hpp"36#include "gc_implementation/g1/g1EvacFailure.hpp"37#include "gc_implementation/g1/g1GCPhaseTimes.hpp"38#include "gc_implementation/g1/g1Log.hpp"39#include "gc_implementation/g1/g1MarkSweep.hpp"40#include "gc_implementation/g1/g1OopClosures.inline.hpp"41#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"42#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"43#include "gc_implementation/g1/g1RemSet.inline.hpp"44#include "gc_implementation/g1/g1RootProcessor.hpp"45#include "gc_implementation/g1/g1StringDedup.hpp"46#include "gc_implementation/g1/g1YCTypes.hpp"47#include "gc_implementation/g1/heapRegion.inline.hpp"48#include "gc_implementation/g1/heapRegionRemSet.hpp"49#include "gc_implementation/g1/heapRegionSet.inline.hpp"50#include "gc_implementation/g1/vm_operations_g1.hpp"51#include "gc_implementation/shared/gcHeapSummary.hpp"52#include "gc_implementation/shared/gcTimer.hpp"53#include "gc_implementation/shared/gcTrace.hpp"54#include "gc_implementation/shared/gcTraceTime.hpp"55#include "gc_implementation/shared/isGCActiveMark.hpp"56#include "memory/allocation.hpp"57#include "memory/gcLocker.inline.hpp"58#include "memory/generationSpec.hpp"59#include "memory/iterator.hpp"60#include "memory/referenceProcessor.hpp"61#include "oops/oop.inline.hpp"62#include "oops/oop.pcgc.inline.hpp"63#include "runtime/orderAccess.inline.hpp"64#include "runtime/vmThread.hpp"6566size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;6768// turn it on so that the contents of the young list (scan-only /69// to-be-collected) are printed at "strategic" points before / during70// / after the collection --- this is useful for debugging71#define YOUNG_LIST_VERBOSE 072// CURRENT STATUS73// This file is under construction. Search for "FIXME".7475// INVARIANTS/NOTES76//77// All allocation activity covered by the G1CollectedHeap interface is78// serialized by acquiring the HeapLock. This happens in mem_allocate79// and allocate_new_tlab, which are the "entry" points to the80// allocation code from the rest of the JVM. (Note that this does not81// apply to TLAB allocation, which is not part of this interface: it82// is done by clients of this interface.)8384// Local to this file.8586class RefineCardTableEntryClosure: public CardTableEntryClosure {87bool _concurrent;88public:89RefineCardTableEntryClosure() : _concurrent(true) { }9091bool do_card_ptr(jbyte* card_ptr, uint worker_i) {92bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false);93// This path is executed by the concurrent refine or mutator threads,94// concurrently, and so we do not care if card_ptr contains references95// that point into the collection set.96assert(!oops_into_cset, "should be");9798if (_concurrent && SuspendibleThreadSet::should_yield()) {99// Caller will actually yield.100return false;101}102// Otherwise, we finished successfully; return true.103return true;104}105106void set_concurrent(bool b) { _concurrent = b; }107};108109110class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {111size_t _num_processed;112CardTableModRefBS* _ctbs;113int _histo[256];114115public:116ClearLoggedCardTableEntryClosure() :117_num_processed(0), _ctbs(G1CollectedHeap::heap()->g1_barrier_set())118{119for (int i = 0; i < 256; i++) _histo[i] = 0;120}121122bool do_card_ptr(jbyte* card_ptr, uint worker_i) {123unsigned char* ujb = (unsigned char*)card_ptr;124int ind = (int)(*ujb);125_histo[ind]++;126127*card_ptr = (jbyte)CardTableModRefBS::clean_card_val();128_num_processed++;129130return true;131}132133size_t num_processed() { return _num_processed; }134135void print_histo() {136gclog_or_tty->print_cr("Card table value histogram:");137for (int i = 0; i < 256; i++) {138if (_histo[i] != 0) {139gclog_or_tty->print_cr(" %d: %d", i, _histo[i]);140}141}142}143};144145class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {146private:147size_t _num_processed;148149public:150RedirtyLoggedCardTableEntryClosure() : CardTableEntryClosure(), _num_processed(0) { }151152bool do_card_ptr(jbyte* card_ptr, uint worker_i) {153*card_ptr = CardTableModRefBS::dirty_card_val();154_num_processed++;155return true;156}157158size_t num_processed() const { return _num_processed; }159};160161YoungList::YoungList(G1CollectedHeap* g1h) :162_g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),163_survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {164guarantee(check_list_empty(false), "just making sure...");165}166167void YoungList::push_region(HeapRegion *hr) {168assert(!hr->is_young(), "should not already be young");169assert(hr->get_next_young_region() == NULL, "cause it should!");170171hr->set_next_young_region(_head);172_head = hr;173174_g1h->g1_policy()->set_region_eden(hr, (int) _length);175++_length;176}177178void YoungList::add_survivor_region(HeapRegion* hr) {179assert(hr->is_survivor(), "should be flagged as survivor region");180assert(hr->get_next_young_region() == NULL, "cause it should!");181182hr->set_next_young_region(_survivor_head);183if (_survivor_head == NULL) {184_survivor_tail = hr;185}186_survivor_head = hr;187++_survivor_length;188}189190void YoungList::empty_list(HeapRegion* list) {191while (list != NULL) {192HeapRegion* next = list->get_next_young_region();193list->set_next_young_region(NULL);194list->uninstall_surv_rate_group();195// This is called before a Full GC and all the non-empty /196// non-humongous regions at the end of the Full GC will end up as197// old anyway.198list->set_old();199list = next;200}201}202203void YoungList::empty_list() {204assert(check_list_well_formed(), "young list should be well formed");205206empty_list(_head);207_head = NULL;208_length = 0;209210empty_list(_survivor_head);211_survivor_head = NULL;212_survivor_tail = NULL;213_survivor_length = 0;214215_last_sampled_rs_lengths = 0;216217assert(check_list_empty(false), "just making sure...");218}219220bool YoungList::check_list_well_formed() {221bool ret = true;222223uint length = 0;224HeapRegion* curr = _head;225HeapRegion* last = NULL;226while (curr != NULL) {227if (!curr->is_young()) {228gclog_or_tty->print_cr("### YOUNG REGION " PTR_FORMAT "-" PTR_FORMAT " "229"incorrectly tagged (y: %d, surv: %d)",230p2i(curr->bottom()), p2i(curr->end()),231curr->is_young(), curr->is_survivor());232ret = false;233}234++length;235last = curr;236curr = curr->get_next_young_region();237}238ret = ret && (length == _length);239240if (!ret) {241gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");242gclog_or_tty->print_cr("### list has %u entries, _length is %u",243length, _length);244}245246return ret;247}248249bool YoungList::check_list_empty(bool check_sample) {250bool ret = true;251252if (_length != 0) {253gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",254_length);255ret = false;256}257if (check_sample && _last_sampled_rs_lengths != 0) {258gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");259ret = false;260}261if (_head != NULL) {262gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");263ret = false;264}265if (!ret) {266gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");267}268269return ret;270}271272void273YoungList::rs_length_sampling_init() {274_sampled_rs_lengths = 0;275_curr = _head;276}277278bool279YoungList::rs_length_sampling_more() {280return _curr != NULL;281}282283void284YoungList::rs_length_sampling_next() {285assert( _curr != NULL, "invariant" );286size_t rs_length = _curr->rem_set()->occupied();287288_sampled_rs_lengths += rs_length;289290// The current region may not yet have been added to the291// incremental collection set (it gets added when it is292// retired as the current allocation region).293if (_curr->in_collection_set()) {294// Update the collection set policy information for this region295_g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);296}297298_curr = _curr->get_next_young_region();299if (_curr == NULL) {300_last_sampled_rs_lengths = _sampled_rs_lengths;301// gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);302}303}304305void306YoungList::reset_auxilary_lists() {307guarantee( is_empty(), "young list should be empty" );308assert(check_list_well_formed(), "young list should be well formed");309310// Add survivor regions to SurvRateGroup.311_g1h->g1_policy()->note_start_adding_survivor_regions();312_g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);313314int young_index_in_cset = 0;315for (HeapRegion* curr = _survivor_head;316curr != NULL;317curr = curr->get_next_young_region()) {318_g1h->g1_policy()->set_region_survivor(curr, young_index_in_cset);319320// The region is a non-empty survivor so let's add it to321// the incremental collection set for the next evacuation322// pause.323_g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);324young_index_in_cset += 1;325}326assert((uint) young_index_in_cset == _survivor_length, "post-condition");327_g1h->g1_policy()->note_stop_adding_survivor_regions();328329_head = _survivor_head;330_length = _survivor_length;331if (_survivor_head != NULL) {332assert(_survivor_tail != NULL, "cause it shouldn't be");333assert(_survivor_length > 0, "invariant");334_survivor_tail->set_next_young_region(NULL);335}336337// Don't clear the survivor list handles until the start of338// the next evacuation pause - we need it in order to re-tag339// the survivor regions from this evacuation pause as 'young'340// at the start of the next.341342_g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);343344assert(check_list_well_formed(), "young list should be well formed");345}346347void YoungList::print() {348HeapRegion* lists[] = {_head, _survivor_head};349const char* names[] = {"YOUNG", "SURVIVOR"};350351for (uint list = 0; list < ARRAY_SIZE(lists); ++list) {352gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);353HeapRegion *curr = lists[list];354if (curr == NULL)355gclog_or_tty->print_cr(" empty");356while (curr != NULL) {357gclog_or_tty->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT ", N: " PTR_FORMAT ", age: %4d",358HR_FORMAT_PARAMS(curr),359p2i(curr->prev_top_at_mark_start()),360p2i(curr->next_top_at_mark_start()),361curr->age_in_surv_rate_group_cond());362curr = curr->get_next_young_region();363}364}365366gclog_or_tty->cr();367}368369void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {370OtherRegionsTable::invalidate(start_idx, num_regions);371}372373void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {374// The from card cache is not the memory that is actually committed. So we cannot375// take advantage of the zero_filled parameter.376reset_from_card_cache(start_idx, num_regions);377}378379void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)380{381// Claim the right to put the region on the dirty cards region list382// by installing a self pointer.383HeapRegion* next = hr->get_next_dirty_cards_region();384if (next == NULL) {385HeapRegion* res = (HeapRegion*)386Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),387NULL);388if (res == NULL) {389HeapRegion* head;390do {391// Put the region to the dirty cards region list.392head = _dirty_cards_region_list;393next = (HeapRegion*)394Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);395if (next == head) {396assert(hr->get_next_dirty_cards_region() == hr,397"hr->get_next_dirty_cards_region() != hr");398if (next == NULL) {399// The last region in the list points to itself.400hr->set_next_dirty_cards_region(hr);401} else {402hr->set_next_dirty_cards_region(next);403}404}405} while (next != head);406}407}408}409410HeapRegion* G1CollectedHeap::pop_dirty_cards_region()411{412HeapRegion* head;413HeapRegion* hr;414do {415head = _dirty_cards_region_list;416if (head == NULL) {417return NULL;418}419HeapRegion* new_head = head->get_next_dirty_cards_region();420if (head == new_head) {421// The last region.422new_head = NULL;423}424hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,425head);426} while (hr != head);427assert(hr != NULL, "invariant");428hr->set_next_dirty_cards_region(NULL);429return hr;430}431432#ifdef ASSERT433// A region is added to the collection set as it is retired434// so an address p can point to a region which will be in the435// collection set but has not yet been retired. This method436// therefore is only accurate during a GC pause after all437// regions have been retired. It is used for debugging438// to check if an nmethod has references to objects that can439// be move during a partial collection. Though it can be440// inaccurate, it is sufficient for G1 because the conservative441// implementation of is_scavengable() for G1 will indicate that442// all nmethods must be scanned during a partial collection.443bool G1CollectedHeap::is_in_partial_collection(const void* p) {444if (p == NULL) {445return false;446}447return heap_region_containing(p)->in_collection_set();448}449#endif450451// Returns true if the reference points to an object that452// can move in an incremental collection.453bool G1CollectedHeap::is_scavengable(const void* p) {454HeapRegion* hr = heap_region_containing(p);455return !hr->isHumongous();456}457458void G1CollectedHeap::check_ct_logs_at_safepoint() {459DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();460CardTableModRefBS* ct_bs = g1_barrier_set();461462// Count the dirty cards at the start.463CountNonCleanMemRegionClosure count1(this);464ct_bs->mod_card_iterate(&count1);465int orig_count = count1.n();466467// First clear the logged cards.468ClearLoggedCardTableEntryClosure clear;469dcqs.apply_closure_to_all_completed_buffers(&clear);470dcqs.iterate_closure_all_threads(&clear, false);471clear.print_histo();472473// Now ensure that there's no dirty cards.474CountNonCleanMemRegionClosure count2(this);475ct_bs->mod_card_iterate(&count2);476if (count2.n() != 0) {477gclog_or_tty->print_cr("Card table has %d entries; %d originally",478count2.n(), orig_count);479}480guarantee(count2.n() == 0, "Card table should be clean.");481482RedirtyLoggedCardTableEntryClosure redirty;483dcqs.apply_closure_to_all_completed_buffers(&redirty);484dcqs.iterate_closure_all_threads(&redirty, false);485gclog_or_tty->print_cr("Log entries = " SIZE_FORMAT ", dirty cards = %d.",486clear.num_processed(), orig_count);487guarantee(redirty.num_processed() == clear.num_processed(),488err_msg("Redirtied " SIZE_FORMAT " cards, bug cleared " SIZE_FORMAT,489redirty.num_processed(), clear.num_processed()));490491CountNonCleanMemRegionClosure count3(this);492ct_bs->mod_card_iterate(&count3);493if (count3.n() != orig_count) {494gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",495orig_count, count3.n());496guarantee(count3.n() >= orig_count, "Should have restored them all.");497}498}499500// Private class members.501502G1CollectedHeap* G1CollectedHeap::_g1h;503504// Private methods.505506HeapRegion*507G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {508MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);509while (!_secondary_free_list.is_empty() || free_regions_coming()) {510if (!_secondary_free_list.is_empty()) {511if (G1ConcRegionFreeingVerbose) {512gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "513"secondary_free_list has %u entries",514_secondary_free_list.length());515}516// It looks as if there are free regions available on the517// secondary_free_list. Let's move them to the free_list and try518// again to allocate from it.519append_secondary_free_list();520521assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "522"empty we should have moved at least one entry to the free_list");523HeapRegion* res = _hrm.allocate_free_region(is_old);524if (G1ConcRegionFreeingVerbose) {525gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "526"allocated " HR_FORMAT " from secondary_free_list",527HR_FORMAT_PARAMS(res));528}529return res;530}531532// Wait here until we get notified either when (a) there are no533// more free regions coming or (b) some regions have been moved on534// the secondary_free_list.535SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);536}537538if (G1ConcRegionFreeingVerbose) {539gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "540"could not allocate from secondary_free_list");541}542return NULL;543}544545HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {546assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,547"the only time we use this to allocate a humongous region is "548"when we are allocating a single humongous region");549550HeapRegion* res;551if (G1StressConcRegionFreeing) {552if (!_secondary_free_list.is_empty()) {553if (G1ConcRegionFreeingVerbose) {554gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "555"forced to look at the secondary_free_list");556}557res = new_region_try_secondary_free_list(is_old);558if (res != NULL) {559return res;560}561}562}563564res = _hrm.allocate_free_region(is_old);565566if (res == NULL) {567if (G1ConcRegionFreeingVerbose) {568gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "569"res == NULL, trying the secondary_free_list");570}571res = new_region_try_secondary_free_list(is_old);572}573if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {574// Currently, only attempts to allocate GC alloc regions set575// do_expand to true. So, we should only reach here during a576// safepoint. If this assumption changes we might have to577// reconsider the use of _expand_heap_after_alloc_failure.578assert(SafepointSynchronize::is_at_safepoint(), "invariant");579580ergo_verbose1(ErgoHeapSizing,581"attempt heap expansion",582ergo_format_reason("region allocation request failed")583ergo_format_byte("allocation request"),584word_size * HeapWordSize);585if (expand(word_size * HeapWordSize)) {586// Given that expand() succeeded in expanding the heap, and we587// always expand the heap by an amount aligned to the heap588// region size, the free list should in theory not be empty.589// In either case allocate_free_region() will check for NULL.590res = _hrm.allocate_free_region(is_old);591} else {592_expand_heap_after_alloc_failure = false;593}594}595return res;596}597598HeapWord*599G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,600uint num_regions,601size_t word_size,602AllocationContext_t context) {603assert(first != G1_NO_HRM_INDEX, "pre-condition");604assert(isHumongous(word_size), "word_size should be humongous");605assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");606607// Index of last region in the series + 1.608uint last = first + num_regions;609610// We need to initialize the region(s) we just discovered. This is611// a bit tricky given that it can happen concurrently with612// refinement threads refining cards on these regions and613// potentially wanting to refine the BOT as they are scanning614// those cards (this can happen shortly after a cleanup; see CR615// 6991377). So we have to set up the region(s) carefully and in616// a specific order.617618// The word size sum of all the regions we will allocate.619size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;620assert(word_size <= word_size_sum, "sanity");621622// This will be the "starts humongous" region.623HeapRegion* first_hr = region_at(first);624// The header of the new object will be placed at the bottom of625// the first region.626HeapWord* new_obj = first_hr->bottom();627// This will be the new end of the first region in the series that628// should also match the end of the last region in the series.629HeapWord* new_end = new_obj + word_size_sum;630// This will be the new top of the first region that will reflect631// this allocation.632HeapWord* new_top = new_obj + word_size;633634// First, we need to zero the header of the space that we will be635// allocating. When we update top further down, some refinement636// threads might try to scan the region. By zeroing the header we637// ensure that any thread that will try to scan the region will638// come across the zero klass word and bail out.639//640// NOTE: It would not have been correct to have used641// CollectedHeap::fill_with_object() and make the space look like642// an int array. The thread that is doing the allocation will643// later update the object header to a potentially different array644// type and, for a very short period of time, the klass and length645// fields will be inconsistent. This could cause a refinement646// thread to calculate the object size incorrectly.647Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);648649// We will set up the first region as "starts humongous". This650// will also update the BOT covering all the regions to reflect651// that there is a single object that starts at the bottom of the652// first region.653first_hr->set_startsHumongous(new_top, new_end);654first_hr->set_allocation_context(context);655// Then, if there are any, we will set up the "continues656// humongous" regions.657HeapRegion* hr = NULL;658for (uint i = first + 1; i < last; ++i) {659hr = region_at(i);660hr->set_continuesHumongous(first_hr);661hr->set_allocation_context(context);662}663// If we have "continues humongous" regions (hr != NULL), then the664// end of the last one should match new_end.665assert(hr == NULL || hr->end() == new_end, "sanity");666667// Up to this point no concurrent thread would have been able to668// do any scanning on any region in this series. All the top669// fields still point to bottom, so the intersection between670// [bottom,top] and [card_start,card_end] will be empty. Before we671// update the top fields, we'll do a storestore to make sure that672// no thread sees the update to top before the zeroing of the673// object header and the BOT initialization.674OrderAccess::storestore();675676// Now that the BOT and the object header have been initialized,677// we can update top of the "starts humongous" region.678assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),679"new_top should be in this region");680first_hr->set_top(new_top);681if (_hr_printer.is_active()) {682HeapWord* bottom = first_hr->bottom();683HeapWord* end = first_hr->orig_end();684if ((first + 1) == last) {685// the series has a single humongous region686_hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);687} else {688// the series has more than one humongous regions689_hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);690}691}692693// Now, we will update the top fields of the "continues humongous"694// regions. The reason we need to do this is that, otherwise,695// these regions would look empty and this will confuse parts of696// G1. For example, the code that looks for a consecutive number697// of empty regions will consider them empty and try to698// re-allocate them. We can extend is_empty() to also include699// !continuesHumongous(), but it is easier to just update the top700// fields here. The way we set top for all regions (i.e., top ==701// end for all regions but the last one, top == new_top for the702// last one) is actually used when we will free up the humongous703// region in free_humongous_region().704hr = NULL;705for (uint i = first + 1; i < last; ++i) {706hr = region_at(i);707if ((i + 1) == last) {708// last continues humongous region709assert(hr->bottom() < new_top && new_top <= hr->end(),710"new_top should fall on this region");711hr->set_top(new_top);712_hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);713} else {714// not last one715assert(new_top > hr->end(), "new_top should be above this region");716hr->set_top(hr->end());717_hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());718}719}720// If we have continues humongous regions (hr != NULL), then the721// end of the last one should match new_end and its top should722// match new_top.723assert(hr == NULL ||724(hr->end() == new_end && hr->top() == new_top), "sanity");725check_bitmaps("Humongous Region Allocation", first_hr);726727assert(first_hr->used() == word_size * HeapWordSize, "invariant");728_allocator->increase_used(first_hr->used());729_humongous_set.add(first_hr);730731return new_obj;732}733734// If could fit into free regions w/o expansion, try.735// Otherwise, if can expand, do so.736// Otherwise, if using ex regions might help, try with ex given back.737HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {738assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);739740verify_region_sets_optional();741742uint first = G1_NO_HRM_INDEX;743uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);744745if (obj_regions == 1) {746// Only one region to allocate, try to use a fast path by directly allocating747// from the free lists. Do not try to expand here, we will potentially do that748// later.749HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);750if (hr != NULL) {751first = hr->hrm_index();752}753} else {754// We can't allocate humongous regions spanning more than one region while755// cleanupComplete() is running, since some of the regions we find to be756// empty might not yet be added to the free list. It is not straightforward757// to know in which list they are on so that we can remove them. We only758// need to do this if we need to allocate more than one region to satisfy the759// current humongous allocation request. If we are only allocating one region760// we use the one-region region allocation code (see above), that already761// potentially waits for regions from the secondary free list.762wait_while_free_regions_coming();763append_secondary_free_list_if_not_empty_with_lock();764765// Policy: Try only empty regions (i.e. already committed first). Maybe we766// are lucky enough to find some.767first = _hrm.find_contiguous_only_empty(obj_regions);768if (first != G1_NO_HRM_INDEX) {769_hrm.allocate_free_regions_starting_at(first, obj_regions);770}771}772773if (first == G1_NO_HRM_INDEX) {774// Policy: We could not find enough regions for the humongous object in the775// free list. Look through the heap to find a mix of free and uncommitted regions.776// If so, try expansion.777first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);778if (first != G1_NO_HRM_INDEX) {779// We found something. Make sure these regions are committed, i.e. expand780// the heap. Alternatively we could do a defragmentation GC.781ergo_verbose1(ErgoHeapSizing,782"attempt heap expansion",783ergo_format_reason("humongous allocation request failed")784ergo_format_byte("allocation request"),785word_size * HeapWordSize);786787_hrm.expand_at(first, obj_regions);788g1_policy()->record_new_heap_size(num_regions());789790#ifdef ASSERT791for (uint i = first; i < first + obj_regions; ++i) {792HeapRegion* hr = region_at(i);793assert(hr->is_free(), "sanity");794assert(hr->is_empty(), "sanity");795assert(is_on_master_free_list(hr), "sanity");796}797#endif798_hrm.allocate_free_regions_starting_at(first, obj_regions);799} else {800// Policy: Potentially trigger a defragmentation GC.801}802}803804HeapWord* result = NULL;805if (first != G1_NO_HRM_INDEX) {806result = humongous_obj_allocate_initialize_regions(first, obj_regions,807word_size, context);808assert(result != NULL, "it should always return a valid result");809810// A successful humongous object allocation changes the used space811// information of the old generation so we need to recalculate the812// sizes and update the jstat counters here.813g1mm()->update_sizes();814}815816verify_region_sets_optional();817818return result;819}820821HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {822assert_heap_not_locked_and_not_at_safepoint();823assert(!isHumongous(word_size), "we do not allow humongous TLABs");824825uint dummy_gc_count_before;826uint dummy_gclocker_retry_count = 0;827return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);828}829830HeapWord*831G1CollectedHeap::mem_allocate(size_t word_size,832bool* gc_overhead_limit_was_exceeded) {833assert_heap_not_locked_and_not_at_safepoint();834835// Loop until the allocation is satisfied, or unsatisfied after GC.836for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {837uint gc_count_before;838839HeapWord* result = NULL;840if (!isHumongous(word_size)) {841result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);842} else {843result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);844}845if (result != NULL) {846return result;847}848849// Create the garbage collection operation...850VM_G1CollectForAllocation op(gc_count_before, word_size);851op.set_allocation_context(AllocationContext::current());852853// ...and get the VM thread to execute it.854VMThread::execute(&op);855856if (op.prologue_succeeded() && op.pause_succeeded()) {857// If the operation was successful we'll return the result even858// if it is NULL. If the allocation attempt failed immediately859// after a Full GC, it's unlikely we'll be able to allocate now.860HeapWord* result = op.result();861if (result != NULL && !isHumongous(word_size)) {862// Allocations that take place on VM operations do not do any863// card dirtying and we have to do it here. We only have to do864// this for non-humongous allocations, though.865dirty_young_block(result, word_size);866}867return result;868} else {869if (gclocker_retry_count > GCLockerRetryAllocationCount) {870return NULL;871}872assert(op.result() == NULL,873"the result should be NULL if the VM op did not succeed");874}875876// Give a warning if we seem to be looping forever.877if ((QueuedAllocationWarningCount > 0) &&878(try_count % QueuedAllocationWarningCount == 0)) {879warning("G1CollectedHeap::mem_allocate retries %d times", try_count);880}881}882883ShouldNotReachHere();884return NULL;885}886887HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,888AllocationContext_t context,889uint* gc_count_before_ret,890uint* gclocker_retry_count_ret) {891// Make sure you read the note in attempt_allocation_humongous().892893assert_heap_not_locked_and_not_at_safepoint();894assert(!isHumongous(word_size), "attempt_allocation_slow() should not "895"be called for humongous allocation requests");896897// We should only get here after the first-level allocation attempt898// (attempt_allocation()) failed to allocate.899900// We will loop until a) we manage to successfully perform the901// allocation or b) we successfully schedule a collection which902// fails to perform the allocation. b) is the only case when we'll903// return NULL.904HeapWord* result = NULL;905for (int try_count = 1; /* we'll return */; try_count += 1) {906bool should_try_gc;907uint gc_count_before;908909{910MutexLockerEx x(Heap_lock);911result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,912false /* bot_updates */);913if (result != NULL) {914return result;915}916917// If we reach here, attempt_allocation_locked() above failed to918// allocate a new region. So the mutator alloc region should be NULL.919assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");920921if (GC_locker::is_active_and_needs_gc()) {922if (g1_policy()->can_expand_young_list()) {923// No need for an ergo verbose message here,924// can_expand_young_list() does this when it returns true.925result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size,926false /* bot_updates */);927if (result != NULL) {928return result;929}930}931should_try_gc = false;932} else {933// The GCLocker may not be active but the GCLocker initiated934// GC may not yet have been performed (GCLocker::needs_gc()935// returns true). In this case we do not try this GC and936// wait until the GCLocker initiated GC is performed, and937// then retry the allocation.938if (GC_locker::needs_gc()) {939should_try_gc = false;940} else {941// Read the GC count while still holding the Heap_lock.942gc_count_before = total_collections();943should_try_gc = true;944}945}946}947948if (should_try_gc) {949bool succeeded;950result = do_collection_pause(word_size, gc_count_before, &succeeded,951GCCause::_g1_inc_collection_pause);952if (result != NULL) {953assert(succeeded, "only way to get back a non-NULL result");954return result;955}956957if (succeeded) {958// If we get here we successfully scheduled a collection which959// failed to allocate. No point in trying to allocate960// further. We'll just return NULL.961MutexLockerEx x(Heap_lock);962*gc_count_before_ret = total_collections();963return NULL;964}965} else {966if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {967MutexLockerEx x(Heap_lock);968*gc_count_before_ret = total_collections();969return NULL;970}971// The GCLocker is either active or the GCLocker initiated972// GC has not yet been performed. Stall until it is and973// then retry the allocation.974GC_locker::stall_until_clear();975(*gclocker_retry_count_ret) += 1;976}977978// We can reach here if we were unsuccessful in scheduling a979// collection (because another thread beat us to it) or if we were980// stalled due to the GC locker. In either can we should retry the981// allocation attempt in case another thread successfully982// performed a collection and reclaimed enough space. We do the983// first attempt (without holding the Heap_lock) here and the984// follow-on attempt will be at the start of the next loop985// iteration (after taking the Heap_lock).986result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,987false /* bot_updates */);988if (result != NULL) {989return result;990}991992// Give a warning if we seem to be looping forever.993if ((QueuedAllocationWarningCount > 0) &&994(try_count % QueuedAllocationWarningCount == 0)) {995warning("G1CollectedHeap::attempt_allocation_slow() "996"retries %d times", try_count);997}998}9991000ShouldNotReachHere();1001return NULL;1002}10031004HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,1005uint* gc_count_before_ret,1006uint* gclocker_retry_count_ret) {1007// The structure of this method has a lot of similarities to1008// attempt_allocation_slow(). The reason these two were not merged1009// into a single one is that such a method would require several "if1010// allocation is not humongous do this, otherwise do that"1011// conditional paths which would obscure its flow. In fact, an early1012// version of this code did use a unified method which was harder to1013// follow and, as a result, it had subtle bugs that were hard to1014// track down. So keeping these two methods separate allows each to1015// be more readable. It will be good to keep these two in sync as1016// much as possible.10171018assert_heap_not_locked_and_not_at_safepoint();1019assert(isHumongous(word_size), "attempt_allocation_humongous() "1020"should only be called for humongous allocations");10211022// Humongous objects can exhaust the heap quickly, so we should check if we1023// need to start a marking cycle at each humongous object allocation. We do1024// the check before we do the actual allocation. The reason for doing it1025// before the allocation is that we avoid having to keep track of the newly1026// allocated memory while we do a GC.1027if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",1028word_size)) {1029collect(GCCause::_g1_humongous_allocation);1030}10311032// We will loop until a) we manage to successfully perform the1033// allocation or b) we successfully schedule a collection which1034// fails to perform the allocation. b) is the only case when we'll1035// return NULL.1036HeapWord* result = NULL;1037for (int try_count = 1; /* we'll return */; try_count += 1) {1038bool should_try_gc;1039uint gc_count_before;10401041{1042MutexLockerEx x(Heap_lock);10431044// Given that humongous objects are not allocated in young1045// regions, we'll first try to do the allocation without doing a1046// collection hoping that there's enough space in the heap.1047result = humongous_obj_allocate(word_size, AllocationContext::current());1048if (result != NULL) {1049return result;1050}10511052if (GC_locker::is_active_and_needs_gc()) {1053should_try_gc = false;1054} else {1055// The GCLocker may not be active but the GCLocker initiated1056// GC may not yet have been performed (GCLocker::needs_gc()1057// returns true). In this case we do not try this GC and1058// wait until the GCLocker initiated GC is performed, and1059// then retry the allocation.1060if (GC_locker::needs_gc()) {1061should_try_gc = false;1062} else {1063// Read the GC count while still holding the Heap_lock.1064gc_count_before = total_collections();1065should_try_gc = true;1066}1067}1068}10691070if (should_try_gc) {1071// If we failed to allocate the humongous object, we should try to1072// do a collection pause (if we're allowed) in case it reclaims1073// enough space for the allocation to succeed after the pause.10741075bool succeeded;1076result = do_collection_pause(word_size, gc_count_before, &succeeded,1077GCCause::_g1_humongous_allocation);1078if (result != NULL) {1079assert(succeeded, "only way to get back a non-NULL result");1080return result;1081}10821083if (succeeded) {1084// If we get here we successfully scheduled a collection which1085// failed to allocate. No point in trying to allocate1086// further. We'll just return NULL.1087MutexLockerEx x(Heap_lock);1088*gc_count_before_ret = total_collections();1089return NULL;1090}1091} else {1092if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {1093MutexLockerEx x(Heap_lock);1094*gc_count_before_ret = total_collections();1095return NULL;1096}1097// The GCLocker is either active or the GCLocker initiated1098// GC has not yet been performed. Stall until it is and1099// then retry the allocation.1100GC_locker::stall_until_clear();1101(*gclocker_retry_count_ret) += 1;1102}11031104// We can reach here if we were unsuccessful in scheduling a1105// collection (because another thread beat us to it) or if we were1106// stalled due to the GC locker. In either can we should retry the1107// allocation attempt in case another thread successfully1108// performed a collection and reclaimed enough space. Give a1109// warning if we seem to be looping forever.11101111if ((QueuedAllocationWarningCount > 0) &&1112(try_count % QueuedAllocationWarningCount == 0)) {1113warning("G1CollectedHeap::attempt_allocation_humongous() "1114"retries %d times", try_count);1115}1116}11171118ShouldNotReachHere();1119return NULL;1120}11211122HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,1123AllocationContext_t context,1124bool expect_null_mutator_alloc_region) {1125assert_at_safepoint(true /* should_be_vm_thread */);1126assert(_allocator->mutator_alloc_region(context)->get() == NULL ||1127!expect_null_mutator_alloc_region,1128"the current alloc region was unexpectedly found to be non-NULL");11291130if (!isHumongous(word_size)) {1131return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,1132false /* bot_updates */);1133} else {1134HeapWord* result = humongous_obj_allocate(word_size, context);1135if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {1136g1_policy()->set_initiate_conc_mark_if_possible();1137}1138return result;1139}11401141ShouldNotReachHere();1142}11431144class PostMCRemSetClearClosure: public HeapRegionClosure {1145G1CollectedHeap* _g1h;1146ModRefBarrierSet* _mr_bs;1147public:1148PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :1149_g1h(g1h), _mr_bs(mr_bs) {}11501151bool doHeapRegion(HeapRegion* r) {1152HeapRegionRemSet* hrrs = r->rem_set();11531154if (r->continuesHumongous()) {1155// We'll assert that the strong code root list and RSet is empty1156assert(hrrs->strong_code_roots_list_length() == 0, "sanity");1157assert(hrrs->occupied() == 0, "RSet should be empty");1158return false;1159}11601161_g1h->reset_gc_time_stamps(r);1162hrrs->clear();1163// You might think here that we could clear just the cards1164// corresponding to the used region. But no: if we leave a dirty card1165// in a region we might allocate into, then it would prevent that card1166// from being enqueued, and cause it to be missed.1167// Re: the performance cost: we shouldn't be doing full GC anyway!1168_mr_bs->clear(MemRegion(r->bottom(), r->end()));11691170return false;1171}1172};11731174void G1CollectedHeap::clear_rsets_post_compaction() {1175PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());1176heap_region_iterate(&rs_clear);1177}11781179class RebuildRSOutOfRegionClosure: public HeapRegionClosure {1180G1CollectedHeap* _g1h;1181UpdateRSOopClosure _cl;1182int _worker_i;1183public:1184RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :1185_cl(g1->g1_rem_set(), worker_i),1186_worker_i(worker_i),1187_g1h(g1)1188{ }11891190bool doHeapRegion(HeapRegion* r) {1191if (!r->continuesHumongous()) {1192_cl.set_from(r);1193r->oop_iterate(&_cl);1194}1195return false;1196}1197};11981199class ParRebuildRSTask: public AbstractGangTask {1200G1CollectedHeap* _g1;1201public:1202ParRebuildRSTask(G1CollectedHeap* g1)1203: AbstractGangTask("ParRebuildRSTask"),1204_g1(g1)1205{ }12061207void work(uint worker_id) {1208RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);1209_g1->heap_region_par_iterate_chunked(&rebuild_rs, worker_id,1210_g1->workers()->active_workers(),1211HeapRegion::RebuildRSClaimValue);1212}1213};12141215class PostCompactionPrinterClosure: public HeapRegionClosure {1216private:1217G1HRPrinter* _hr_printer;1218public:1219bool doHeapRegion(HeapRegion* hr) {1220assert(!hr->is_young(), "not expecting to find young regions");1221if (hr->is_free()) {1222// We only generate output for non-empty regions.1223} else if (hr->startsHumongous()) {1224if (hr->region_num() == 1) {1225// single humongous region1226_hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);1227} else {1228_hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);1229}1230} else if (hr->continuesHumongous()) {1231_hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);1232} else if (hr->is_old()) {1233_hr_printer->post_compaction(hr, G1HRPrinter::Old);1234} else {1235ShouldNotReachHere();1236}1237return false;1238}12391240PostCompactionPrinterClosure(G1HRPrinter* hr_printer)1241: _hr_printer(hr_printer) { }1242};12431244void G1CollectedHeap::print_hrm_post_compaction() {1245PostCompactionPrinterClosure cl(hr_printer());1246heap_region_iterate(&cl);1247}12481249bool G1CollectedHeap::do_collection(bool explicit_gc,1250bool clear_all_soft_refs,1251size_t word_size) {1252assert_at_safepoint(true /* should_be_vm_thread */);12531254if (GC_locker::check_active_before_gc()) {1255return false;1256}12571258STWGCTimer* gc_timer = G1MarkSweep::gc_timer();1259gc_timer->register_gc_start();12601261SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();1262gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());12631264SvcGCMarker sgcm(SvcGCMarker::FULL);1265ResourceMark rm;12661267print_heap_before_gc();1268trace_heap_before_gc(gc_tracer);12691270size_t metadata_prev_used = MetaspaceAux::used_bytes();12711272verify_region_sets_optional();12731274const bool do_clear_all_soft_refs = clear_all_soft_refs ||1275collector_policy()->should_clear_all_soft_refs();12761277ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());12781279{1280IsGCActiveMark x;12811282// Timing1283assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");1284TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);12851286{1287GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id());1288TraceCollectorStats tcs(g1mm()->full_collection_counters());1289TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());12901291double start = os::elapsedTime();1292g1_policy()->record_full_collection_start();12931294// Note: When we have a more flexible GC logging framework that1295// allows us to add optional attributes to a GC log record we1296// could consider timing and reporting how long we wait in the1297// following two methods.1298wait_while_free_regions_coming();1299// If we start the compaction before the CM threads finish1300// scanning the root regions we might trip them over as we'll1301// be moving objects / updating references. So let's wait until1302// they are done. By telling them to abort, they should complete1303// early.1304_cm->root_regions()->abort();1305_cm->root_regions()->wait_until_scan_finished();1306append_secondary_free_list_if_not_empty_with_lock();13071308gc_prologue(true);1309increment_total_collections(true /* full gc */);1310increment_old_marking_cycles_started();13111312assert(used() == recalculate_used(), "Should be equal");13131314verify_before_gc();13151316check_bitmaps("Full GC Start");1317pre_full_gc_dump(gc_timer);13181319COMPILER2_PRESENT(DerivedPointerTable::clear());13201321// Disable discovery and empty the discovered lists1322// for the CM ref processor.1323ref_processor_cm()->disable_discovery();1324ref_processor_cm()->abandon_partial_discovery();1325ref_processor_cm()->verify_no_references_recorded();13261327// Abandon current iterations of concurrent marking and concurrent1328// refinement, if any are in progress. We have to do this before1329// wait_until_scan_finished() below.1330concurrent_mark()->abort();13311332// Make sure we'll choose a new allocation region afterwards.1333_allocator->release_mutator_alloc_region();1334_allocator->abandon_gc_alloc_regions();1335g1_rem_set()->cleanupHRRS();13361337// We should call this after we retire any currently active alloc1338// regions so that all the ALLOC / RETIRE events are generated1339// before the start GC event.1340_hr_printer.start_gc(true /* full */, (size_t) total_collections());13411342// We may have added regions to the current incremental collection1343// set between the last GC or pause and now. We need to clear the1344// incremental collection set and then start rebuilding it afresh1345// after this full GC.1346abandon_collection_set(g1_policy()->inc_cset_head());1347g1_policy()->clear_incremental_cset();1348g1_policy()->stop_incremental_cset_building();13491350tear_down_region_sets(false /* free_list_only */);1351g1_policy()->set_gcs_are_young(true);13521353// See the comments in g1CollectedHeap.hpp and1354// G1CollectedHeap::ref_processing_init() about1355// how reference processing currently works in G1.13561357// Temporarily make discovery by the STW ref processor single threaded (non-MT).1358ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);13591360// Temporarily clear the STW ref processor's _is_alive_non_header field.1361ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);13621363ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);1364ref_processor_stw()->setup_policy(do_clear_all_soft_refs);13651366// Do collection work1367{1368HandleMark hm; // Discard invalid handles created during gc1369G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);1370}13711372assert(num_free_regions() == 0, "we should not have added any free regions");1373rebuild_region_sets(false /* free_list_only */);13741375// Enqueue any discovered reference objects that have1376// not been removed from the discovered lists.1377ref_processor_stw()->enqueue_discovered_references();13781379COMPILER2_PRESENT(DerivedPointerTable::update_pointers());13801381MemoryService::track_memory_usage();13821383assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");1384ref_processor_stw()->verify_no_references_recorded();13851386// Delete metaspaces for unloaded class loaders and clean up loader_data graph1387ClassLoaderDataGraph::purge();1388MetaspaceAux::verify_metrics();13891390// Note: since we've just done a full GC, concurrent1391// marking is no longer active. Therefore we need not1392// re-enable reference discovery for the CM ref processor.1393// That will be done at the start of the next marking cycle.1394assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");1395ref_processor_cm()->verify_no_references_recorded();13961397reset_gc_time_stamp();1398// Since everything potentially moved, we will clear all remembered1399// sets, and clear all cards. Later we will rebuild remembered1400// sets. We will also reset the GC time stamps of the regions.1401clear_rsets_post_compaction();1402check_gc_time_stamps();14031404// Resize the heap if necessary.1405resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);14061407if (_hr_printer.is_active()) {1408// We should do this after we potentially resize the heap so1409// that all the COMMIT / UNCOMMIT events are generated before1410// the end GC event.14111412print_hrm_post_compaction();1413_hr_printer.end_gc(true /* full */, (size_t) total_collections());1414}14151416G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();1417if (hot_card_cache->use_cache()) {1418hot_card_cache->reset_card_counts();1419hot_card_cache->reset_hot_cache();1420}14211422// Rebuild remembered sets of all regions.1423if (G1CollectedHeap::use_parallel_gc_threads()) {1424uint n_workers =1425AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),1426workers()->active_workers(),1427Threads::number_of_non_daemon_threads());1428assert(UseDynamicNumberOfGCThreads ||1429n_workers == workers()->total_workers(),1430"If not dynamic should be using all the workers");1431workers()->set_active_workers(n_workers);1432// Set parallel threads in the heap (_n_par_threads) only1433// before a parallel phase and always reset it to 0 after1434// the phase so that the number of parallel threads does1435// no get carried forward to a serial phase where there1436// may be code that is "possibly_parallel".1437set_par_threads(n_workers);14381439ParRebuildRSTask rebuild_rs_task(this);1440assert(check_heap_region_claim_values(1441HeapRegion::InitialClaimValue), "sanity check");1442assert(UseDynamicNumberOfGCThreads ||1443workers()->active_workers() == workers()->total_workers(),1444"Unless dynamic should use total workers");1445// Use the most recent number of active workers1446assert(workers()->active_workers() > 0,1447"Active workers not properly set");1448set_par_threads(workers()->active_workers());1449workers()->run_task(&rebuild_rs_task);1450set_par_threads(0);1451assert(check_heap_region_claim_values(1452HeapRegion::RebuildRSClaimValue), "sanity check");1453reset_heap_region_claim_values();1454} else {1455RebuildRSOutOfRegionClosure rebuild_rs(this);1456heap_region_iterate(&rebuild_rs);1457}14581459// Rebuild the strong code root lists for each region1460rebuild_strong_code_roots();14611462// Purge code root memory1463purge_code_root_memory();14641465if (true) { // FIXME1466MetaspaceGC::compute_new_size();1467}14681469#ifdef TRACESPINNING1470ParallelTaskTerminator::print_termination_counts();1471#endif14721473// Discard all rset updates1474JavaThread::dirty_card_queue_set().abandon_logs();1475assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");14761477_young_list->reset_sampled_info();1478// At this point there should be no regions in the1479// entire heap tagged as young.1480assert(check_young_list_empty(true /* check_heap */),1481"young list should be empty at this point");14821483// Update the number of full collections that have been completed.1484increment_old_marking_cycles_completed(false /* concurrent */);14851486_hrm.verify_optional();1487verify_region_sets_optional();14881489verify_after_gc();14901491// Clear the previous marking bitmap, if needed for bitmap verification.1492// Note we cannot do this when we clear the next marking bitmap in1493// ConcurrentMark::abort() above since VerifyDuringGC verifies the1494// objects marked during a full GC against the previous bitmap.1495// But we need to clear it before calling check_bitmaps below since1496// the full GC has compacted objects and updated TAMS but not updated1497// the prev bitmap.1498if (G1VerifyBitmaps) {1499((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();1500}1501check_bitmaps("Full GC End");15021503// Start a new incremental collection set for the next pause1504assert(g1_policy()->collection_set() == NULL, "must be");1505g1_policy()->start_incremental_cset_building();15061507clear_cset_fast_test();15081509_allocator->init_mutator_alloc_region();15101511double end = os::elapsedTime();1512g1_policy()->record_full_collection_end();15131514if (G1Log::fine()) {1515g1_policy()->print_heap_transition();1516}15171518// We must call G1MonitoringSupport::update_sizes() in the same scoping level1519// as an active TraceMemoryManagerStats object (i.e. before the destructor for the1520// TraceMemoryManagerStats is called) so that the G1 memory pools are updated1521// before any GC notifications are raised.1522g1mm()->update_sizes();15231524gc_epilogue(true);1525}15261527if (G1Log::finer()) {1528g1_policy()->print_detailed_heap_transition(true /* full */);1529}15301531print_heap_after_gc();1532trace_heap_after_gc(gc_tracer);15331534post_full_gc_dump(gc_timer);15351536gc_timer->register_gc_end();1537gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());1538}15391540return true;1541}15421543void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {1544// do_collection() will return whether it succeeded in performing1545// the GC. Currently, there is no facility on the1546// do_full_collection() API to notify the caller than the collection1547// did not succeed (e.g., because it was locked out by the GC1548// locker). So, right now, we'll ignore the return value.1549bool dummy = do_collection(true, /* explicit_gc */1550clear_all_soft_refs,15510 /* word_size */);1552}15531554// This code is mostly copied from TenuredGeneration.1555void1556G1CollectedHeap::1557resize_if_necessary_after_full_collection(size_t word_size) {1558// Include the current allocation, if any, and bytes that will be1559// pre-allocated to support collections, as "used".1560const size_t used_after_gc = used();1561const size_t capacity_after_gc = capacity();1562const size_t free_after_gc = capacity_after_gc - used_after_gc;15631564// This is enforced in arguments.cpp.1565assert(MinHeapFreeRatio <= MaxHeapFreeRatio,1566"otherwise the code below doesn't make sense");15671568// We don't have floating point command-line arguments1569const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;1570const double maximum_used_percentage = 1.0 - minimum_free_percentage;1571const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;1572const double minimum_used_percentage = 1.0 - maximum_free_percentage;15731574const size_t min_heap_size = collector_policy()->min_heap_byte_size();1575const size_t max_heap_size = collector_policy()->max_heap_byte_size();15761577// We have to be careful here as these two calculations can overflow1578// 32-bit size_t's.1579double used_after_gc_d = (double) used_after_gc;1580double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;1581double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;15821583// Let's make sure that they are both under the max heap size, which1584// by default will make them fit into a size_t.1585double desired_capacity_upper_bound = (double) max_heap_size;1586minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,1587desired_capacity_upper_bound);1588maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,1589desired_capacity_upper_bound);15901591// We can now safely turn them into size_t's.1592size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;1593size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;15941595// This assert only makes sense here, before we adjust them1596// with respect to the min and max heap size.1597assert(minimum_desired_capacity <= maximum_desired_capacity,1598err_msg("minimum_desired_capacity = " SIZE_FORMAT ", "1599"maximum_desired_capacity = " SIZE_FORMAT,1600minimum_desired_capacity, maximum_desired_capacity));16011602// Should not be greater than the heap max size. No need to adjust1603// it with respect to the heap min size as it's a lower bound (i.e.,1604// we'll try to make the capacity larger than it, not smaller).1605minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);1606// Should not be less than the heap min size. No need to adjust it1607// with respect to the heap max size as it's an upper bound (i.e.,1608// we'll try to make the capacity smaller than it, not greater).1609maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size);16101611if (capacity_after_gc < minimum_desired_capacity) {1612// Don't expand unless it's significant1613size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;1614ergo_verbose4(ErgoHeapSizing,1615"attempt heap expansion",1616ergo_format_reason("capacity lower than "1617"min desired capacity after Full GC")1618ergo_format_byte("capacity")1619ergo_format_byte("occupancy")1620ergo_format_byte_perc("min desired capacity"),1621capacity_after_gc, used_after_gc,1622minimum_desired_capacity, (double) MinHeapFreeRatio);1623expand(expand_bytes);16241625// No expansion, now see if we want to shrink1626} else if (capacity_after_gc > maximum_desired_capacity) {1627// Capacity too large, compute shrinking size1628size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;1629ergo_verbose4(ErgoHeapSizing,1630"attempt heap shrinking",1631ergo_format_reason("capacity higher than "1632"max desired capacity after Full GC")1633ergo_format_byte("capacity")1634ergo_format_byte("occupancy")1635ergo_format_byte_perc("max desired capacity"),1636capacity_after_gc, used_after_gc,1637maximum_desired_capacity, (double) MaxHeapFreeRatio);1638shrink(shrink_bytes);1639}1640}164116421643HeapWord*1644G1CollectedHeap::satisfy_failed_allocation(size_t word_size,1645AllocationContext_t context,1646bool* succeeded) {1647assert_at_safepoint(true /* should_be_vm_thread */);16481649*succeeded = true;1650// Let's attempt the allocation first.1651HeapWord* result =1652attempt_allocation_at_safepoint(word_size,1653context,1654false /* expect_null_mutator_alloc_region */);1655if (result != NULL) {1656assert(*succeeded, "sanity");1657return result;1658}16591660// In a G1 heap, we're supposed to keep allocation from failing by1661// incremental pauses. Therefore, at least for now, we'll favor1662// expansion over collection. (This might change in the future if we can1663// do something smarter than full collection to satisfy a failed alloc.)1664result = expand_and_allocate(word_size, context);1665if (result != NULL) {1666assert(*succeeded, "sanity");1667return result;1668}16691670// Expansion didn't work, we'll try to do a Full GC.1671bool gc_succeeded = do_collection(false, /* explicit_gc */1672false, /* clear_all_soft_refs */1673word_size);1674if (!gc_succeeded) {1675*succeeded = false;1676return NULL;1677}16781679// Retry the allocation1680result = attempt_allocation_at_safepoint(word_size,1681context,1682true /* expect_null_mutator_alloc_region */);1683if (result != NULL) {1684assert(*succeeded, "sanity");1685return result;1686}16871688// Then, try a Full GC that will collect all soft references.1689gc_succeeded = do_collection(false, /* explicit_gc */1690true, /* clear_all_soft_refs */1691word_size);1692if (!gc_succeeded) {1693*succeeded = false;1694return NULL;1695}16961697// Retry the allocation once more1698result = attempt_allocation_at_safepoint(word_size,1699context,1700true /* expect_null_mutator_alloc_region */);1701if (result != NULL) {1702assert(*succeeded, "sanity");1703return result;1704}17051706assert(!collector_policy()->should_clear_all_soft_refs(),1707"Flag should have been handled and cleared prior to this point");17081709// What else? We might try synchronous finalization later. If the total1710// space available is large enough for the allocation, then a more1711// complete compaction phase than we've tried so far might be1712// appropriate.1713assert(*succeeded, "sanity");1714return NULL;1715}17161717// Attempting to expand the heap sufficiently1718// to support an allocation of the given "word_size". If1719// successful, perform the allocation and return the address of the1720// allocated block, or else "NULL".17211722HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {1723assert_at_safepoint(true /* should_be_vm_thread */);17241725verify_region_sets_optional();17261727size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);1728ergo_verbose1(ErgoHeapSizing,1729"attempt heap expansion",1730ergo_format_reason("allocation request failed")1731ergo_format_byte("allocation request"),1732word_size * HeapWordSize);1733if (expand(expand_bytes)) {1734_hrm.verify_optional();1735verify_region_sets_optional();1736return attempt_allocation_at_safepoint(word_size,1737context,1738false /* expect_null_mutator_alloc_region */);1739}1740return NULL;1741}17421743bool G1CollectedHeap::expand(size_t expand_bytes) {1744size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);1745aligned_expand_bytes = align_size_up(aligned_expand_bytes,1746HeapRegion::GrainBytes);1747ergo_verbose2(ErgoHeapSizing,1748"expand the heap",1749ergo_format_byte("requested expansion amount")1750ergo_format_byte("attempted expansion amount"),1751expand_bytes, aligned_expand_bytes);17521753if (is_maximal_no_gc()) {1754ergo_verbose0(ErgoHeapSizing,1755"did not expand the heap",1756ergo_format_reason("heap already fully expanded"));1757return false;1758}17591760uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);1761assert(regions_to_expand > 0, "Must expand by at least one region");17621763uint expanded_by = _hrm.expand_by(regions_to_expand);17641765if (expanded_by > 0) {1766size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;1767assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");1768g1_policy()->record_new_heap_size(num_regions());1769} else {1770ergo_verbose0(ErgoHeapSizing,1771"did not expand the heap",1772ergo_format_reason("heap expansion operation failed"));1773// The expansion of the virtual storage space was unsuccessful.1774// Let's see if it was because we ran out of swap.1775if (G1ExitOnExpansionFailure &&1776_hrm.available() >= regions_to_expand) {1777// We had head room...1778vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");1779}1780}1781return regions_to_expand > 0;1782}17831784void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {1785size_t aligned_shrink_bytes =1786ReservedSpace::page_align_size_down(shrink_bytes);1787aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,1788HeapRegion::GrainBytes);1789uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);17901791uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);1792size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;17931794ergo_verbose3(ErgoHeapSizing,1795"shrink the heap",1796ergo_format_byte("requested shrinking amount")1797ergo_format_byte("aligned shrinking amount")1798ergo_format_byte("attempted shrinking amount"),1799shrink_bytes, aligned_shrink_bytes, shrunk_bytes);1800if (num_regions_removed > 0) {1801g1_policy()->record_new_heap_size(num_regions());1802} else {1803ergo_verbose0(ErgoHeapSizing,1804"did not shrink the heap",1805ergo_format_reason("heap shrinking operation failed"));1806}1807}18081809void G1CollectedHeap::shrink(size_t shrink_bytes) {1810verify_region_sets_optional();18111812// We should only reach here at the end of a Full GC which means we1813// should not not be holding to any GC alloc regions. The method1814// below will make sure of that and do any remaining clean up.1815_allocator->abandon_gc_alloc_regions();18161817// Instead of tearing down / rebuilding the free lists here, we1818// could instead use the remove_all_pending() method on free_list to1819// remove only the ones that we need to remove.1820tear_down_region_sets(true /* free_list_only */);1821shrink_helper(shrink_bytes);1822rebuild_region_sets(true /* free_list_only */);18231824_hrm.verify_optional();1825verify_region_sets_optional();1826}18271828// Public methods.18291830#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away1831#pragma warning( disable:4355 ) // 'this' : used in base member initializer list1832#endif // _MSC_VER183318341835G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :1836SharedHeap(policy_),1837_g1_policy(policy_),1838_dirty_card_queue_set(false),1839_into_cset_dirty_card_queue_set(false),1840_is_alive_closure_cm(this),1841_is_alive_closure_stw(this),1842_ref_processor_cm(NULL),1843_ref_processor_stw(NULL),1844_bot_shared(NULL),1845_evac_failure_scan_stack(NULL),1846_mark_in_progress(false),1847_cg1r(NULL),1848_g1mm(NULL),1849_refine_cte_cl(NULL),1850_full_collection(false),1851_secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),1852_old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),1853_humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),1854_humongous_reclaim_candidates(),1855_has_humongous_reclaim_candidates(false),1856_free_regions_coming(false),1857_young_list(new YoungList(this)),1858_gc_time_stamp(0),1859_survivor_plab_stats(YoungPLABSize, PLABWeight),1860_old_plab_stats(OldPLABSize, PLABWeight),1861_expand_heap_after_alloc_failure(true),1862_surviving_young_words(NULL),1863_old_marking_cycles_started(0),1864_old_marking_cycles_completed(0),1865_concurrent_cycle_started(false),1866_heap_summary_sent(false),1867_in_cset_fast_test(),1868_dirty_cards_region_list(NULL),1869_worker_cset_start_region(NULL),1870_worker_cset_start_region_time_stamp(NULL),1871_gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),1872_gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),1873_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),1874_gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {18751876_g1h = this;18771878_allocator = G1Allocator::create_allocator(_g1h);1879_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;18801881int n_queues = MAX2((int)ParallelGCThreads, 1);1882_task_queues = new RefToScanQueueSet(n_queues);18831884uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();1885assert(n_rem_sets > 0, "Invariant.");18861887_worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);1888_worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);1889_evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);18901891for (int i = 0; i < n_queues; i++) {1892RefToScanQueue* q = new RefToScanQueue();1893q->initialize();1894_task_queues->register_queue(i, q);1895::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();1896}1897clear_cset_start_regions();18981899// Initialize the G1EvacuationFailureALot counters and flags.1900NOT_PRODUCT(reset_evacuation_should_fail();)19011902guarantee(_task_queues != NULL, "task_queues allocation failure.");1903}19041905G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,1906size_t size,1907size_t translation_factor) {1908size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);1909// Allocate a new reserved space, preferring to use large pages.1910ReservedSpace rs(size, preferred_page_size);1911G1RegionToSpaceMapper* result =1912G1RegionToSpaceMapper::create_mapper(rs,1913size,1914rs.alignment(),1915HeapRegion::GrainBytes,1916translation_factor,1917mtGC);1918if (TracePageSizes) {1919gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,1920description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size);1921}1922return result;1923}19241925jint G1CollectedHeap::initialize() {1926CollectedHeap::pre_initialize();1927os::enable_vtime();19281929G1Log::init();19301931// Necessary to satisfy locking discipline assertions.19321933MutexLocker x(Heap_lock);19341935// We have to initialize the printer before committing the heap, as1936// it will be used then.1937_hr_printer.set_active(G1PrintHeapRegions);19381939// While there are no constraints in the GC code that HeapWordSize1940// be any particular value, there are multiple other areas in the1941// system which believe this to be true (e.g. oop->object_size in some1942// cases incorrectly returns the size in wordSize units rather than1943// HeapWordSize).1944guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");19451946size_t init_byte_size = collector_policy()->initial_heap_byte_size();1947size_t max_byte_size = collector_policy()->max_heap_byte_size();1948size_t heap_alignment = collector_policy()->heap_alignment();19491950// Ensure that the sizes are properly aligned.1951Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");1952Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");1953Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");19541955_refine_cte_cl = new RefineCardTableEntryClosure();19561957_cg1r = new ConcurrentG1Refine(this, _refine_cte_cl);19581959// Reserve the maximum.19601961// When compressed oops are enabled, the preferred heap base1962// is calculated by subtracting the requested size from the1963// 32Gb boundary and using the result as the base address for1964// heap reservation. If the requested size is not aligned to1965// HeapRegion::GrainBytes (i.e. the alignment that is passed1966// into the ReservedHeapSpace constructor) then the actual1967// base of the reserved heap may end up differing from the1968// address that was requested (i.e. the preferred heap base).1969// If this happens then we could end up using a non-optimal1970// compressed oops mode.19711972ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,1973heap_alignment);19741975// It is important to do this in a way such that concurrent readers can't1976// temporarily think something is in the heap. (I've actually seen this1977// happen in asserts: DLD.)1978_reserved.set_word_size(0);1979_reserved.set_start((HeapWord*)heap_rs.base());1980_reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));19811982// Create the gen rem set (and barrier set) for the entire reserved region.1983_rem_set = collector_policy()->create_rem_set(_reserved, 2);1984set_barrier_set(rem_set()->bs());1985if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {1986vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");1987return JNI_ENOMEM;1988}19891990// Also create a G1 rem set.1991_g1_rem_set = new G1RemSet(this, g1_barrier_set());19921993// Carve out the G1 part of the heap.19941995ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);1996G1RegionToSpaceMapper* heap_storage =1997G1RegionToSpaceMapper::create_mapper(g1_rs,1998g1_rs.size(),1999UseLargePages ? os::large_page_size() : os::vm_page_size(),2000HeapRegion::GrainBytes,20011,2002mtJavaHeap);2003heap_storage->set_mapping_changed_listener(&_listener);20042005// Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.2006G1RegionToSpaceMapper* bot_storage =2007create_aux_memory_mapper("Block offset table",2008G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize),2009G1BlockOffsetSharedArray::N_bytes);20102011ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));2012G1RegionToSpaceMapper* cardtable_storage =2013create_aux_memory_mapper("Card table",2014G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),2015G1BlockOffsetSharedArray::N_bytes);20162017G1RegionToSpaceMapper* card_counts_storage =2018create_aux_memory_mapper("Card counts table",2019G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize),2020G1BlockOffsetSharedArray::N_bytes);20212022size_t bitmap_size = CMBitMap::compute_size(g1_rs.size());2023G1RegionToSpaceMapper* prev_bitmap_storage =2024create_aux_memory_mapper("Prev Bitmap", bitmap_size, CMBitMap::mark_distance());2025G1RegionToSpaceMapper* next_bitmap_storage =2026create_aux_memory_mapper("Next Bitmap", bitmap_size, CMBitMap::mark_distance());20272028_hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);2029g1_barrier_set()->initialize(cardtable_storage);2030// Do later initialization work for concurrent refinement.2031_cg1r->init(card_counts_storage);20322033// 6843694 - ensure that the maximum region index can fit2034// in the remembered set structures.2035const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;2036guarantee((max_regions() - 1) <= max_region_idx, "too many regions");20372038size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;2039guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");2040guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,2041"too many cards per region");20422043FreeRegionList::set_unrealistically_long_length(max_regions() + 1);20442045_bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);20462047_g1h = this;20482049{2050HeapWord* start = _hrm.reserved().start();2051HeapWord* end = _hrm.reserved().end();2052size_t granularity = HeapRegion::GrainBytes;20532054_in_cset_fast_test.initialize(start, end, granularity);2055_humongous_reclaim_candidates.initialize(start, end, granularity);2056}20572058// Create the ConcurrentMark data structure and thread.2059// (Must do this late, so that "max_regions" is defined.)2060_cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);2061if (_cm == NULL || !_cm->completed_initialization()) {2062vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");2063return JNI_ENOMEM;2064}2065_cmThread = _cm->cmThread();20662067// Initialize the from_card cache structure of HeapRegionRemSet.2068HeapRegionRemSet::init_heap(max_regions());20692070// Now expand into the initial heap size.2071if (!expand(init_byte_size)) {2072vm_shutdown_during_initialization("Failed to allocate initial heap.");2073return JNI_ENOMEM;2074}20752076// Perform any initialization actions delegated to the policy.2077g1_policy()->init();20782079JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,2080SATB_Q_FL_lock,2081G1SATBProcessCompletedThreshold,2082Shared_SATB_Q_lock);20832084JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,2085DirtyCardQ_CBL_mon,2086DirtyCardQ_FL_lock,2087concurrent_g1_refine()->yellow_zone(),2088concurrent_g1_refine()->red_zone(),2089Shared_DirtyCardQ_lock);20902091dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code2092DirtyCardQ_CBL_mon,2093DirtyCardQ_FL_lock,2094-1, // never trigger processing2095-1, // no limit on length2096Shared_DirtyCardQ_lock,2097&JavaThread::dirty_card_queue_set());20982099// Initialize the card queue set used to hold cards containing2100// references into the collection set.2101_into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code2102DirtyCardQ_CBL_mon,2103DirtyCardQ_FL_lock,2104-1, // never trigger processing2105-1, // no limit on length2106Shared_DirtyCardQ_lock,2107&JavaThread::dirty_card_queue_set());21082109// In case we're keeping closure specialization stats, initialize those2110// counts and that mechanism.2111SpecializationStats::clear();21122113// Here we allocate the dummy HeapRegion that is required by the2114// G1AllocRegion class.2115HeapRegion* dummy_region = _hrm.get_dummy_region();21162117// We'll re-use the same region whether the alloc region will2118// require BOT updates or not and, if it doesn't, then a non-young2119// region will complain that it cannot support allocations without2120// BOT updates. So we'll tag the dummy region as eden to avoid that.2121dummy_region->set_eden();2122// Make sure it's full.2123dummy_region->set_top(dummy_region->end());2124G1AllocRegion::setup(this, dummy_region);21252126_allocator->init_mutator_alloc_region();21272128// Do create of the monitoring and management support so that2129// values in the heap have been properly initialized.2130_g1mm = new G1MonitoringSupport(this);21312132G1StringDedup::initialize();21332134return JNI_OK;2135}21362137void G1CollectedHeap::stop() {2138// Stop all concurrent threads. We do this to make sure these threads2139// do not continue to execute and access resources (e.g. gclog_or_tty)2140// that are destroyed during shutdown.2141_cg1r->stop();2142_cmThread->stop();2143if (G1StringDedup::is_enabled()) {2144G1StringDedup::stop();2145}2146}21472148size_t G1CollectedHeap::conservative_max_heap_alignment() {2149return HeapRegion::max_region_size();2150}21512152void G1CollectedHeap::ref_processing_init() {2153// Reference processing in G1 currently works as follows:2154//2155// * There are two reference processor instances. One is2156// used to record and process discovered references2157// during concurrent marking; the other is used to2158// record and process references during STW pauses2159// (both full and incremental).2160// * Both ref processors need to 'span' the entire heap as2161// the regions in the collection set may be dotted around.2162//2163// * For the concurrent marking ref processor:2164// * Reference discovery is enabled at initial marking.2165// * Reference discovery is disabled and the discovered2166// references processed etc during remarking.2167// * Reference discovery is MT (see below).2168// * Reference discovery requires a barrier (see below).2169// * Reference processing may or may not be MT2170// (depending on the value of ParallelRefProcEnabled2171// and ParallelGCThreads).2172// * A full GC disables reference discovery by the CM2173// ref processor and abandons any entries on it's2174// discovered lists.2175//2176// * For the STW processor:2177// * Non MT discovery is enabled at the start of a full GC.2178// * Processing and enqueueing during a full GC is non-MT.2179// * During a full GC, references are processed after marking.2180//2181// * Discovery (may or may not be MT) is enabled at the start2182// of an incremental evacuation pause.2183// * References are processed near the end of a STW evacuation pause.2184// * For both types of GC:2185// * Discovery is atomic - i.e. not concurrent.2186// * Reference discovery will not need a barrier.21872188SharedHeap::ref_processing_init();2189MemRegion mr = reserved_region();21902191// Concurrent Mark ref processor2192_ref_processor_cm =2193new ReferenceProcessor(mr, // span2194ParallelRefProcEnabled && (ParallelGCThreads > 1),2195// mt processing2196(int) ParallelGCThreads,2197// degree of mt processing2198(ParallelGCThreads > 1) || (ConcGCThreads > 1),2199// mt discovery2200(int) MAX2(ParallelGCThreads, ConcGCThreads),2201// degree of mt discovery2202false,2203// Reference discovery is not atomic2204&_is_alive_closure_cm);2205// is alive closure2206// (for efficiency/performance)22072208// STW ref processor2209_ref_processor_stw =2210new ReferenceProcessor(mr, // span2211ParallelRefProcEnabled && (ParallelGCThreads > 1),2212// mt processing2213MAX2((int)ParallelGCThreads, 1),2214// degree of mt processing2215(ParallelGCThreads > 1),2216// mt discovery2217MAX2((int)ParallelGCThreads, 1),2218// degree of mt discovery2219true,2220// Reference discovery is atomic2221&_is_alive_closure_stw);2222// is alive closure2223// (for efficiency/performance)2224}22252226size_t G1CollectedHeap::capacity() const {2227return _hrm.length() * HeapRegion::GrainBytes;2228}22292230void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {2231assert(!hr->continuesHumongous(), "pre-condition");2232hr->reset_gc_time_stamp();2233if (hr->startsHumongous()) {2234uint first_index = hr->hrm_index() + 1;2235uint last_index = hr->last_hc_index();2236for (uint i = first_index; i < last_index; i += 1) {2237HeapRegion* chr = region_at(i);2238assert(chr->continuesHumongous(), "sanity");2239chr->reset_gc_time_stamp();2240}2241}2242}22432244#ifndef PRODUCT2245class CheckGCTimeStampsHRClosure : public HeapRegionClosure {2246private:2247unsigned _gc_time_stamp;2248bool _failures;22492250public:2251CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :2252_gc_time_stamp(gc_time_stamp), _failures(false) { }22532254virtual bool doHeapRegion(HeapRegion* hr) {2255unsigned region_gc_time_stamp = hr->get_gc_time_stamp();2256if (_gc_time_stamp != region_gc_time_stamp) {2257gclog_or_tty->print_cr("Region " HR_FORMAT " has GC time stamp = %d, "2258"expected %d", HR_FORMAT_PARAMS(hr),2259region_gc_time_stamp, _gc_time_stamp);2260_failures = true;2261}2262return false;2263}22642265bool failures() { return _failures; }2266};22672268void G1CollectedHeap::check_gc_time_stamps() {2269CheckGCTimeStampsHRClosure cl(_gc_time_stamp);2270heap_region_iterate(&cl);2271guarantee(!cl.failures(), "all GC time stamps should have been reset");2272}2273#endif // PRODUCT22742275void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,2276DirtyCardQueue* into_cset_dcq,2277bool concurrent,2278uint worker_i) {2279// Clean cards in the hot card cache2280G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();2281hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);22822283DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();2284size_t n_completed_buffers = 0;2285while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {2286n_completed_buffers++;2287}2288g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);2289dcqs.clear_n_completed_buffers();2290assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");2291}229222932294// Computes the sum of the storage used by the various regions.2295size_t G1CollectedHeap::used() const {2296return _allocator->used();2297}22982299size_t G1CollectedHeap::used_unlocked() const {2300return _allocator->used_unlocked();2301}23022303class SumUsedClosure: public HeapRegionClosure {2304size_t _used;2305public:2306SumUsedClosure() : _used(0) {}2307bool doHeapRegion(HeapRegion* r) {2308if (!r->continuesHumongous()) {2309_used += r->used();2310}2311return false;2312}2313size_t result() { return _used; }2314};23152316size_t G1CollectedHeap::recalculate_used() const {2317double recalculate_used_start = os::elapsedTime();23182319SumUsedClosure blk;2320heap_region_iterate(&blk);23212322g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);2323return blk.result();2324}23252326bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {2327switch (cause) {2328case GCCause::_gc_locker: return GCLockerInvokesConcurrent;2329case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;2330case GCCause::_g1_humongous_allocation: return true;2331case GCCause::_update_allocation_context_stats_inc: return true;2332case GCCause::_wb_conc_mark: return true;2333default: return false;2334}2335}23362337#ifndef PRODUCT2338void G1CollectedHeap::allocate_dummy_regions() {2339// Let's fill up most of the region2340size_t word_size = HeapRegion::GrainWords - 1024;2341// And as a result the region we'll allocate will be humongous.2342guarantee(isHumongous(word_size), "sanity");23432344for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {2345// Let's use the existing mechanism for the allocation2346HeapWord* dummy_obj = humongous_obj_allocate(word_size,2347AllocationContext::system());2348if (dummy_obj != NULL) {2349MemRegion mr(dummy_obj, word_size);2350CollectedHeap::fill_with_object(mr);2351} else {2352// If we can't allocate once, we probably cannot allocate2353// again. Let's get out of the loop.2354break;2355}2356}2357}2358#endif // !PRODUCT23592360void G1CollectedHeap::increment_old_marking_cycles_started() {2361assert(_old_marking_cycles_started == _old_marking_cycles_completed ||2362_old_marking_cycles_started == _old_marking_cycles_completed + 1,2363err_msg("Wrong marking cycle count (started: %d, completed: %d)",2364_old_marking_cycles_started, _old_marking_cycles_completed));23652366_old_marking_cycles_started++;2367}23682369void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {2370MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);23712372// We assume that if concurrent == true, then the caller is a2373// concurrent thread that was joined the Suspendible Thread2374// Set. If there's ever a cheap way to check this, we should add an2375// assert here.23762377// Given that this method is called at the end of a Full GC or of a2378// concurrent cycle, and those can be nested (i.e., a Full GC can2379// interrupt a concurrent cycle), the number of full collections2380// completed should be either one (in the case where there was no2381// nesting) or two (when a Full GC interrupted a concurrent cycle)2382// behind the number of full collections started.23832384// This is the case for the inner caller, i.e. a Full GC.2385assert(concurrent ||2386(_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||2387(_old_marking_cycles_started == _old_marking_cycles_completed + 2),2388err_msg("for inner caller (Full GC): _old_marking_cycles_started = %u "2389"is inconsistent with _old_marking_cycles_completed = %u",2390_old_marking_cycles_started, _old_marking_cycles_completed));23912392// This is the case for the outer caller, i.e. the concurrent cycle.2393assert(!concurrent ||2394(_old_marking_cycles_started == _old_marking_cycles_completed + 1),2395err_msg("for outer caller (concurrent cycle): "2396"_old_marking_cycles_started = %u "2397"is inconsistent with _old_marking_cycles_completed = %u",2398_old_marking_cycles_started, _old_marking_cycles_completed));23992400_old_marking_cycles_completed += 1;24012402// We need to clear the "in_progress" flag in the CM thread before2403// we wake up any waiters (especially when ExplicitInvokesConcurrent2404// is set) so that if a waiter requests another System.gc() it doesn't2405// incorrectly see that a marking cycle is still in progress.2406if (concurrent) {2407_cmThread->set_idle();2408}24092410// This notify_all() will ensure that a thread that called2411// System.gc() with (with ExplicitGCInvokesConcurrent set or not)2412// and it's waiting for a full GC to finish will be woken up. It is2413// waiting in VM_G1IncCollectionPause::doit_epilogue().2414FullGCCount_lock->notify_all();2415}24162417void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {2418_concurrent_cycle_started = true;2419_gc_timer_cm->register_gc_start(start_time);24202421_gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());2422trace_heap_before_gc(_gc_tracer_cm);2423}24242425void G1CollectedHeap::register_concurrent_cycle_end() {2426if (_concurrent_cycle_started) {2427if (_cm->has_aborted()) {2428_gc_tracer_cm->report_concurrent_mode_failure();2429}24302431_gc_timer_cm->register_gc_end();2432_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());24332434// Clear state variables to prepare for the next concurrent cycle.2435_concurrent_cycle_started = false;2436_heap_summary_sent = false;2437}2438}24392440void G1CollectedHeap::trace_heap_after_concurrent_cycle() {2441if (_concurrent_cycle_started) {2442// This function can be called when:2443// the cleanup pause is run2444// the concurrent cycle is aborted before the cleanup pause.2445// the concurrent cycle is aborted after the cleanup pause,2446// but before the concurrent cycle end has been registered.2447// Make sure that we only send the heap information once.2448if (!_heap_summary_sent) {2449trace_heap_after_gc(_gc_tracer_cm);2450_heap_summary_sent = true;2451}2452}2453}24542455G1YCType G1CollectedHeap::yc_type() {2456bool is_young = g1_policy()->gcs_are_young();2457bool is_initial_mark = g1_policy()->during_initial_mark_pause();2458bool is_during_mark = mark_in_progress();24592460if (is_initial_mark) {2461return InitialMark;2462} else if (is_during_mark) {2463return DuringMark;2464} else if (is_young) {2465return Normal;2466} else {2467return Mixed;2468}2469}24702471void G1CollectedHeap::collect(GCCause::Cause cause) {2472assert_heap_not_locked();24732474uint gc_count_before;2475uint old_marking_count_before;2476uint full_gc_count_before;2477bool retry_gc;24782479do {2480retry_gc = false;24812482{2483MutexLocker ml(Heap_lock);24842485// Read the GC count while holding the Heap_lock2486gc_count_before = total_collections();2487full_gc_count_before = total_full_collections();2488old_marking_count_before = _old_marking_cycles_started;2489}24902491if (should_do_concurrent_full_gc(cause)) {2492// Schedule an initial-mark evacuation pause that will start a2493// concurrent cycle. We're setting word_size to 0 which means that2494// we are not requesting a post-GC allocation.2495VM_G1IncCollectionPause op(gc_count_before,24960, /* word_size */2497true, /* should_initiate_conc_mark */2498g1_policy()->max_pause_time_ms(),2499cause);2500op.set_allocation_context(AllocationContext::current());25012502VMThread::execute(&op);2503if (!op.pause_succeeded()) {2504if (old_marking_count_before == _old_marking_cycles_started) {2505retry_gc = op.should_retry_gc();2506} else {2507// A Full GC happened while we were trying to schedule the2508// initial-mark GC. No point in starting a new cycle given2509// that the whole heap was collected anyway.2510}25112512if (retry_gc) {2513if (GC_locker::is_active_and_needs_gc()) {2514GC_locker::stall_until_clear();2515}2516}2517}2518} else if (GC_locker::should_discard(cause, gc_count_before)) {2519// Return to be consistent with VMOp failure due to another2520// collection slipping in after our gc_count but before our2521// request is processed. _gc_locker collections upgraded by2522// GCLockerInvokesConcurrent are handled above and never discarded.2523return;2524} else {2525if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc2526DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {25272528// Schedule a standard evacuation pause. We're setting word_size2529// to 0 which means that we are not requesting a post-GC allocation.2530VM_G1IncCollectionPause op(gc_count_before,25310, /* word_size */2532false, /* should_initiate_conc_mark */2533g1_policy()->max_pause_time_ms(),2534cause);2535VMThread::execute(&op);2536} else {2537// Schedule a Full GC.2538VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);2539VMThread::execute(&op);2540}2541}2542} while (retry_gc);2543}25442545bool G1CollectedHeap::is_in(const void* p) const {2546if (_hrm.reserved().contains(p)) {2547// Given that we know that p is in the reserved space,2548// heap_region_containing_raw() should successfully2549// return the containing region.2550HeapRegion* hr = heap_region_containing_raw(p);2551return hr->is_in(p);2552} else {2553return false;2554}2555}25562557#ifdef ASSERT2558bool G1CollectedHeap::is_in_exact(const void* p) const {2559bool contains = reserved_region().contains(p);2560bool available = _hrm.is_available(addr_to_region((HeapWord*)p));2561if (contains && available) {2562return true;2563} else {2564return false;2565}2566}2567#endif25682569// Iteration functions.25702571// Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.25722573class IterateOopClosureRegionClosure: public HeapRegionClosure {2574ExtendedOopClosure* _cl;2575public:2576IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}2577bool doHeapRegion(HeapRegion* r) {2578if (!r->continuesHumongous()) {2579r->oop_iterate(_cl);2580}2581return false;2582}2583};25842585void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {2586IterateOopClosureRegionClosure blk(cl);2587heap_region_iterate(&blk);2588}25892590// Iterates an ObjectClosure over all objects within a HeapRegion.25912592class IterateObjectClosureRegionClosure: public HeapRegionClosure {2593ObjectClosure* _cl;2594public:2595IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}2596bool doHeapRegion(HeapRegion* r) {2597if (! r->continuesHumongous()) {2598r->object_iterate(_cl);2599}2600return false;2601}2602};26032604void G1CollectedHeap::object_iterate(ObjectClosure* cl) {2605IterateObjectClosureRegionClosure blk(cl);2606heap_region_iterate(&blk);2607}26082609// Calls a SpaceClosure on a HeapRegion.26102611class SpaceClosureRegionClosure: public HeapRegionClosure {2612SpaceClosure* _cl;2613public:2614SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}2615bool doHeapRegion(HeapRegion* r) {2616_cl->do_space(r);2617return false;2618}2619};26202621void G1CollectedHeap::space_iterate(SpaceClosure* cl) {2622SpaceClosureRegionClosure blk(cl);2623heap_region_iterate(&blk);2624}26252626void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {2627_hrm.iterate(cl);2628}26292630void2631G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,2632uint worker_id,2633uint num_workers,2634jint claim_value) const {2635_hrm.par_iterate(cl, worker_id, num_workers, claim_value);2636}26372638class ResetClaimValuesClosure: public HeapRegionClosure {2639public:2640bool doHeapRegion(HeapRegion* r) {2641r->set_claim_value(HeapRegion::InitialClaimValue);2642return false;2643}2644};26452646void G1CollectedHeap::reset_heap_region_claim_values() {2647ResetClaimValuesClosure blk;2648heap_region_iterate(&blk);2649}26502651void G1CollectedHeap::reset_cset_heap_region_claim_values() {2652ResetClaimValuesClosure blk;2653collection_set_iterate(&blk);2654}26552656#ifdef ASSERT2657// This checks whether all regions in the heap have the correct claim2658// value. I also piggy-backed on this a check to ensure that the2659// humongous_start_region() information on "continues humongous"2660// regions is correct.26612662class CheckClaimValuesClosure : public HeapRegionClosure {2663private:2664jint _claim_value;2665uint _failures;2666HeapRegion* _sh_region;26672668public:2669CheckClaimValuesClosure(jint claim_value) :2670_claim_value(claim_value), _failures(0), _sh_region(NULL) { }2671bool doHeapRegion(HeapRegion* r) {2672if (r->claim_value() != _claim_value) {2673gclog_or_tty->print_cr("Region " HR_FORMAT ", "2674"claim value = %d, should be %d",2675HR_FORMAT_PARAMS(r),2676r->claim_value(), _claim_value);2677++_failures;2678}2679if (!r->isHumongous()) {2680_sh_region = NULL;2681} else if (r->startsHumongous()) {2682_sh_region = r;2683} else if (r->continuesHumongous()) {2684if (r->humongous_start_region() != _sh_region) {2685gclog_or_tty->print_cr("Region " HR_FORMAT ", "2686"HS = " PTR_FORMAT ", should be " PTR_FORMAT,2687HR_FORMAT_PARAMS(r),2688p2i(r->humongous_start_region()),2689p2i(_sh_region));2690++_failures;2691}2692}2693return false;2694}2695uint failures() { return _failures; }2696};26972698bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {2699CheckClaimValuesClosure cl(claim_value);2700heap_region_iterate(&cl);2701return cl.failures() == 0;2702}27032704class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {2705private:2706jint _claim_value;2707uint _failures;27082709public:2710CheckClaimValuesInCSetHRClosure(jint claim_value) :2711_claim_value(claim_value), _failures(0) { }27122713uint failures() { return _failures; }27142715bool doHeapRegion(HeapRegion* hr) {2716assert(hr->in_collection_set(), "how?");2717assert(!hr->isHumongous(), "H-region in CSet");2718if (hr->claim_value() != _claim_value) {2719gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "2720"claim value = %d, should be %d",2721HR_FORMAT_PARAMS(hr),2722hr->claim_value(), _claim_value);2723_failures += 1;2724}2725return false;2726}2727};27282729bool G1CollectedHeap::check_cset_heap_region_claim_values(jint claim_value) {2730CheckClaimValuesInCSetHRClosure cl(claim_value);2731collection_set_iterate(&cl);2732return cl.failures() == 0;2733}2734#endif // ASSERT27352736// Clear the cached CSet starting regions and (more importantly)2737// the time stamps. Called when we reset the GC time stamp.2738void G1CollectedHeap::clear_cset_start_regions() {2739assert(_worker_cset_start_region != NULL, "sanity");2740assert(_worker_cset_start_region_time_stamp != NULL, "sanity");27412742int n_queues = MAX2((int)ParallelGCThreads, 1);2743for (int i = 0; i < n_queues; i++) {2744_worker_cset_start_region[i] = NULL;2745_worker_cset_start_region_time_stamp[i] = 0;2746}2747}27482749// Given the id of a worker, obtain or calculate a suitable2750// starting region for iterating over the current collection set.2751HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {2752assert(get_gc_time_stamp() > 0, "should have been updated by now");27532754HeapRegion* result = NULL;2755unsigned gc_time_stamp = get_gc_time_stamp();27562757if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {2758// Cached starting region for current worker was set2759// during the current pause - so it's valid.2760// Note: the cached starting heap region may be NULL2761// (when the collection set is empty).2762result = _worker_cset_start_region[worker_i];2763assert(result == NULL || result->in_collection_set(), "sanity");2764return result;2765}27662767// The cached entry was not valid so let's calculate2768// a suitable starting heap region for this worker.27692770// We want the parallel threads to start their collection2771// set iteration at different collection set regions to2772// avoid contention.2773// If we have:2774// n collection set regions2775// p threads2776// Then thread t will start at region floor ((t * n) / p)27772778result = g1_policy()->collection_set();2779if (G1CollectedHeap::use_parallel_gc_threads()) {2780uint cs_size = g1_policy()->cset_region_length();2781uint active_workers = workers()->active_workers();2782assert(UseDynamicNumberOfGCThreads ||2783active_workers == workers()->total_workers(),2784"Unless dynamic should use total workers");27852786uint end_ind = (cs_size * worker_i) / active_workers;2787uint start_ind = 0;27882789if (worker_i > 0 &&2790_worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {2791// Previous workers starting region is valid2792// so let's iterate from there2793start_ind = (cs_size * (worker_i - 1)) / active_workers;2794OrderAccess::loadload();2795result = _worker_cset_start_region[worker_i - 1];2796}27972798for (uint i = start_ind; i < end_ind; i++) {2799result = result->next_in_collection_set();2800}2801}28022803// Note: the calculated starting heap region may be NULL2804// (when the collection set is empty).2805assert(result == NULL || result->in_collection_set(), "sanity");2806assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,2807"should be updated only once per pause");2808_worker_cset_start_region[worker_i] = result;2809OrderAccess::storestore();2810_worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;2811return result;2812}28132814void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {2815HeapRegion* r = g1_policy()->collection_set();2816while (r != NULL) {2817HeapRegion* next = r->next_in_collection_set();2818if (cl->doHeapRegion(r)) {2819cl->incomplete();2820return;2821}2822r = next;2823}2824}28252826void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,2827HeapRegionClosure *cl) {2828if (r == NULL) {2829// The CSet is empty so there's nothing to do.2830return;2831}28322833assert(r->in_collection_set(),2834"Start region must be a member of the collection set.");2835HeapRegion* cur = r;2836while (cur != NULL) {2837HeapRegion* next = cur->next_in_collection_set();2838if (cl->doHeapRegion(cur) && false) {2839cl->incomplete();2840return;2841}2842cur = next;2843}2844cur = g1_policy()->collection_set();2845while (cur != r) {2846HeapRegion* next = cur->next_in_collection_set();2847if (cl->doHeapRegion(cur) && false) {2848cl->incomplete();2849return;2850}2851cur = next;2852}2853}28542855HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {2856HeapRegion* result = _hrm.next_region_in_heap(from);2857while (result != NULL && result->isHumongous()) {2858result = _hrm.next_region_in_heap(result);2859}2860return result;2861}28622863Space* G1CollectedHeap::space_containing(const void* addr) const {2864return heap_region_containing(addr);2865}28662867HeapWord* G1CollectedHeap::block_start(const void* addr) const {2868Space* sp = space_containing(addr);2869return sp->block_start(addr);2870}28712872size_t G1CollectedHeap::block_size(const HeapWord* addr) const {2873Space* sp = space_containing(addr);2874return sp->block_size(addr);2875}28762877bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {2878Space* sp = space_containing(addr);2879return sp->block_is_obj(addr);2880}28812882bool G1CollectedHeap::supports_tlab_allocation() const {2883return true;2884}28852886size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {2887return (_g1_policy->young_list_target_length() - young_list()->survivor_length()) * HeapRegion::GrainBytes;2888}28892890size_t G1CollectedHeap::tlab_used(Thread* ignored) const {2891return young_list()->eden_used_bytes();2892}28932894// For G1 TLABs should not contain humongous objects, so the maximum TLAB size2895// must be smaller than the humongous object limit.2896size_t G1CollectedHeap::max_tlab_size() const {2897return align_size_down(_humongous_object_threshold_in_words - 1, MinObjAlignment);2898}28992900size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {2901// Return the remaining space in the cur alloc region, but not less than2902// the min TLAB size.29032904// Also, this value can be at most the humongous object threshold,2905// since we can't allow tlabs to grow big enough to accommodate2906// humongous objects.29072908HeapRegion* hr = _allocator->mutator_alloc_region(AllocationContext::current())->get();2909size_t max_tlab = max_tlab_size() * wordSize;2910if (hr == NULL) {2911return max_tlab;2912} else {2913return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);2914}2915}29162917size_t G1CollectedHeap::max_capacity() const {2918return _hrm.reserved().byte_size();2919}29202921jlong G1CollectedHeap::millis_since_last_gc() {2922// assert(false, "NYI");2923return 0;2924}29252926void G1CollectedHeap::prepare_for_verify() {2927if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {2928ensure_parsability(false);2929}2930g1_rem_set()->prepare_for_verify();2931}29322933bool G1CollectedHeap::allocated_since_marking(oop obj, HeapRegion* hr,2934VerifyOption vo) {2935switch (vo) {2936case VerifyOption_G1UsePrevMarking:2937return hr->obj_allocated_since_prev_marking(obj);2938case VerifyOption_G1UseNextMarking:2939return hr->obj_allocated_since_next_marking(obj);2940case VerifyOption_G1UseMarkWord:2941return false;2942default:2943ShouldNotReachHere();2944}2945return false; // keep some compilers happy2946}29472948HeapWord* G1CollectedHeap::top_at_mark_start(HeapRegion* hr, VerifyOption vo) {2949switch (vo) {2950case VerifyOption_G1UsePrevMarking: return hr->prev_top_at_mark_start();2951case VerifyOption_G1UseNextMarking: return hr->next_top_at_mark_start();2952case VerifyOption_G1UseMarkWord: return NULL;2953default: ShouldNotReachHere();2954}2955return NULL; // keep some compilers happy2956}29572958bool G1CollectedHeap::is_marked(oop obj, VerifyOption vo) {2959switch (vo) {2960case VerifyOption_G1UsePrevMarking: return isMarkedPrev(obj);2961case VerifyOption_G1UseNextMarking: return isMarkedNext(obj);2962case VerifyOption_G1UseMarkWord: return obj->is_gc_marked();2963default: ShouldNotReachHere();2964}2965return false; // keep some compilers happy2966}29672968const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {2969switch (vo) {2970case VerifyOption_G1UsePrevMarking: return "PTAMS";2971case VerifyOption_G1UseNextMarking: return "NTAMS";2972case VerifyOption_G1UseMarkWord: return "NONE";2973default: ShouldNotReachHere();2974}2975return NULL; // keep some compilers happy2976}29772978class VerifyRootsClosure: public OopClosure {2979private:2980G1CollectedHeap* _g1h;2981VerifyOption _vo;2982bool _failures;2983public:2984// _vo == UsePrevMarking -> use "prev" marking information,2985// _vo == UseNextMarking -> use "next" marking information,2986// _vo == UseMarkWord -> use mark word from object header.2987VerifyRootsClosure(VerifyOption vo) :2988_g1h(G1CollectedHeap::heap()),2989_vo(vo),2990_failures(false) { }29912992bool failures() { return _failures; }29932994template <class T> void do_oop_nv(T* p) {2995T heap_oop = oopDesc::load_heap_oop(p);2996if (!oopDesc::is_null(heap_oop)) {2997oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);2998if (_g1h->is_obj_dead_cond(obj, _vo)) {2999gclog_or_tty->print_cr("Root location " PTR_FORMAT " "3000"points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));3001if (_vo == VerifyOption_G1UseMarkWord) {3002gclog_or_tty->print_cr(" Mark word: " INTPTR_FORMAT, (intptr_t)obj->mark());3003}3004obj->print_on(gclog_or_tty);3005_failures = true;3006}3007}3008}30093010void do_oop(oop* p) { do_oop_nv(p); }3011void do_oop(narrowOop* p) { do_oop_nv(p); }3012};30133014class G1VerifyCodeRootOopClosure: public OopClosure {3015G1CollectedHeap* _g1h;3016OopClosure* _root_cl;3017nmethod* _nm;3018VerifyOption _vo;3019bool _failures;30203021template <class T> void do_oop_work(T* p) {3022// First verify that this root is live3023_root_cl->do_oop(p);30243025if (!G1VerifyHeapRegionCodeRoots) {3026// We're not verifying the code roots attached to heap region.3027return;3028}30293030// Don't check the code roots during marking verification in a full GC3031if (_vo == VerifyOption_G1UseMarkWord) {3032return;3033}30343035// Now verify that the current nmethod (which contains p) is3036// in the code root list of the heap region containing the3037// object referenced by p.30383039T heap_oop = oopDesc::load_heap_oop(p);3040if (!oopDesc::is_null(heap_oop)) {3041oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);30423043// Now fetch the region containing the object3044HeapRegion* hr = _g1h->heap_region_containing(obj);3045HeapRegionRemSet* hrrs = hr->rem_set();3046// Verify that the strong code root list for this region3047// contains the nmethod3048if (!hrrs->strong_code_roots_list_contains(_nm)) {3049gclog_or_tty->print_cr("Code root location " PTR_FORMAT " "3050"from nmethod " PTR_FORMAT " not in strong "3051"code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",3052p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));3053_failures = true;3054}3055}3056}30573058public:3059G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):3060_g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}30613062void do_oop(oop* p) { do_oop_work(p); }3063void do_oop(narrowOop* p) { do_oop_work(p); }30643065void set_nmethod(nmethod* nm) { _nm = nm; }3066bool failures() { return _failures; }3067};30683069class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {3070G1VerifyCodeRootOopClosure* _oop_cl;30713072public:3073G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):3074_oop_cl(oop_cl) {}30753076void do_code_blob(CodeBlob* cb) {3077nmethod* nm = cb->as_nmethod_or_null();3078if (nm != NULL) {3079_oop_cl->set_nmethod(nm);3080nm->oops_do(_oop_cl);3081}3082}3083};30843085class YoungRefCounterClosure : public OopClosure {3086G1CollectedHeap* _g1h;3087int _count;3088public:3089YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}3090void do_oop(oop* p) { if (_g1h->is_in_young(*p)) { _count++; } }3091void do_oop(narrowOop* p) { ShouldNotReachHere(); }30923093int count() { return _count; }3094void reset_count() { _count = 0; };3095};30963097class VerifyKlassClosure: public KlassClosure {3098YoungRefCounterClosure _young_ref_counter_closure;3099OopClosure *_oop_closure;3100public:3101VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}3102void do_klass(Klass* k) {3103k->oops_do(_oop_closure);31043105_young_ref_counter_closure.reset_count();3106k->oops_do(&_young_ref_counter_closure);3107if (_young_ref_counter_closure.count() > 0) {3108guarantee(k->has_modified_oops(), err_msg("Klass " PTR_FORMAT ", has young refs but is not dirty.", p2i(k)));3109}3110}3111};31123113class VerifyLivenessOopClosure: public OopClosure {3114G1CollectedHeap* _g1h;3115VerifyOption _vo;3116public:3117VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):3118_g1h(g1h), _vo(vo)3119{ }3120void do_oop(narrowOop *p) { do_oop_work(p); }3121void do_oop( oop *p) { do_oop_work(p); }31223123template <class T> void do_oop_work(T *p) {3124oop obj = oopDesc::load_decode_heap_oop(p);3125guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),3126"Dead object referenced by a not dead object");3127}3128};31293130class VerifyObjsInRegionClosure: public ObjectClosure {3131private:3132G1CollectedHeap* _g1h;3133size_t _live_bytes;3134HeapRegion *_hr;3135VerifyOption _vo;3136public:3137// _vo == UsePrevMarking -> use "prev" marking information,3138// _vo == UseNextMarking -> use "next" marking information,3139// _vo == UseMarkWord -> use mark word from object header.3140VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)3141: _live_bytes(0), _hr(hr), _vo(vo) {3142_g1h = G1CollectedHeap::heap();3143}3144void do_object(oop o) {3145VerifyLivenessOopClosure isLive(_g1h, _vo);3146assert(o != NULL, "Huh?");3147if (!_g1h->is_obj_dead_cond(o, _vo)) {3148// If the object is alive according to the mark word,3149// then verify that the marking information agrees.3150// Note we can't verify the contra-positive of the3151// above: if the object is dead (according to the mark3152// word), it may not be marked, or may have been marked3153// but has since became dead, or may have been allocated3154// since the last marking.3155if (_vo == VerifyOption_G1UseMarkWord) {3156guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");3157}31583159o->oop_iterate_no_header(&isLive);3160if (!_hr->obj_allocated_since_prev_marking(o)) {3161size_t obj_size = o->size(); // Make sure we don't overflow3162_live_bytes += (obj_size * HeapWordSize);3163}3164}3165}3166size_t live_bytes() { return _live_bytes; }3167};31683169class PrintObjsInRegionClosure : public ObjectClosure {3170HeapRegion *_hr;3171G1CollectedHeap *_g1;3172public:3173PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {3174_g1 = G1CollectedHeap::heap();3175};31763177void do_object(oop o) {3178if (o != NULL) {3179HeapWord *start = (HeapWord *) o;3180size_t word_sz = o->size();3181gclog_or_tty->print("\nPrinting obj " PTR_FORMAT " of size " SIZE_FORMAT3182" isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",3183p2i(o), word_sz,3184_g1->isMarkedPrev(o),3185_g1->isMarkedNext(o),3186_hr->obj_allocated_since_prev_marking(o));3187HeapWord *end = start + word_sz;3188HeapWord *cur;3189int *val;3190for (cur = start; cur < end; cur++) {3191val = (int *) cur;3192gclog_or_tty->print("\t " PTR_FORMAT ": %d\n", p2i(val), *val);3193}3194}3195}3196};31973198class VerifyRegionClosure: public HeapRegionClosure {3199private:3200bool _par;3201VerifyOption _vo;3202bool _failures;3203public:3204// _vo == UsePrevMarking -> use "prev" marking information,3205// _vo == UseNextMarking -> use "next" marking information,3206// _vo == UseMarkWord -> use mark word from object header.3207VerifyRegionClosure(bool par, VerifyOption vo)3208: _par(par),3209_vo(vo),3210_failures(false) {}32113212bool failures() {3213return _failures;3214}32153216bool doHeapRegion(HeapRegion* r) {3217if (!r->continuesHumongous()) {3218bool failures = false;3219r->verify(_vo, &failures);3220if (failures) {3221_failures = true;3222} else {3223VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);3224r->object_iterate(¬_dead_yet_cl);3225if (_vo != VerifyOption_G1UseNextMarking) {3226if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {3227gclog_or_tty->print_cr("[" PTR_FORMAT "," PTR_FORMAT "] "3228"max_live_bytes " SIZE_FORMAT " "3229"< calculated " SIZE_FORMAT,3230p2i(r->bottom()), p2i(r->end()),3231r->max_live_bytes(),3232not_dead_yet_cl.live_bytes());3233_failures = true;3234}3235} else {3236// When vo == UseNextMarking we cannot currently do a sanity3237// check on the live bytes as the calculation has not been3238// finalized yet.3239}3240}3241}3242return false; // stop the region iteration if we hit a failure3243}3244};32453246// This is the task used for parallel verification of the heap regions32473248class G1ParVerifyTask: public AbstractGangTask {3249private:3250G1CollectedHeap* _g1h;3251VerifyOption _vo;3252bool _failures;32533254public:3255// _vo == UsePrevMarking -> use "prev" marking information,3256// _vo == UseNextMarking -> use "next" marking information,3257// _vo == UseMarkWord -> use mark word from object header.3258G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :3259AbstractGangTask("Parallel verify task"),3260_g1h(g1h),3261_vo(vo),3262_failures(false) { }32633264bool failures() {3265return _failures;3266}32673268void work(uint worker_id) {3269HandleMark hm;3270VerifyRegionClosure blk(true, _vo);3271_g1h->heap_region_par_iterate_chunked(&blk, worker_id,3272_g1h->workers()->active_workers(),3273HeapRegion::ParVerifyClaimValue);3274if (blk.failures()) {3275_failures = true;3276}3277}3278};32793280void G1CollectedHeap::verify(bool silent, VerifyOption vo) {3281if (SafepointSynchronize::is_at_safepoint()) {3282assert(Thread::current()->is_VM_thread(),3283"Expected to be executed serially by the VM thread at this point");32843285if (!silent) { gclog_or_tty->print("Roots "); }3286VerifyRootsClosure rootsCl(vo);3287VerifyKlassClosure klassCl(this, &rootsCl);3288CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);32893290// We apply the relevant closures to all the oops in the3291// system dictionary, class loader data graph, the string table3292// and the nmethods in the code cache.3293G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);3294G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);32953296{3297G1RootProcessor root_processor(this);3298root_processor.process_all_roots(&rootsCl,3299&cldCl,3300&blobsCl);3301}33023303bool failures = rootsCl.failures() || codeRootsCl.failures();33043305if (vo != VerifyOption_G1UseMarkWord) {3306// If we're verifying during a full GC then the region sets3307// will have been torn down at the start of the GC. Therefore3308// verifying the region sets will fail. So we only verify3309// the region sets when not in a full GC.3310if (!silent) { gclog_or_tty->print("HeapRegionSets "); }3311verify_region_sets();3312}33133314if (!silent) { gclog_or_tty->print("HeapRegions "); }3315if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {3316assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),3317"sanity check");33183319G1ParVerifyTask task(this, vo);3320assert(UseDynamicNumberOfGCThreads ||3321workers()->active_workers() == workers()->total_workers(),3322"If not dynamic should be using all the workers");3323int n_workers = workers()->active_workers();3324set_par_threads(n_workers);3325workers()->run_task(&task);3326set_par_threads(0);3327if (task.failures()) {3328failures = true;3329}33303331// Checks that the expected amount of parallel work was done.3332// The implication is that n_workers is > 0.3333assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),3334"sanity check");33353336reset_heap_region_claim_values();33373338assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),3339"sanity check");3340} else {3341VerifyRegionClosure blk(false, vo);3342heap_region_iterate(&blk);3343if (blk.failures()) {3344failures = true;3345}3346}3347if (!silent) gclog_or_tty->print("RemSet ");3348rem_set()->verify();33493350if (G1StringDedup::is_enabled()) {3351if (!silent) gclog_or_tty->print("StrDedup ");3352G1StringDedup::verify();3353}33543355if (failures) {3356gclog_or_tty->print_cr("Heap:");3357// It helps to have the per-region information in the output to3358// help us track down what went wrong. This is why we call3359// print_extended_on() instead of print_on().3360print_extended_on(gclog_or_tty);3361gclog_or_tty->cr();3362#ifndef PRODUCT3363if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {3364concurrent_mark()->print_reachable("at-verification-failure",3365vo, false /* all */);3366}3367#endif3368gclog_or_tty->flush();3369}3370guarantee(!failures, "there should not have been any failures");3371} else {3372if (!silent) {3373gclog_or_tty->print("(SKIPPING Roots, HeapRegionSets, HeapRegions, RemSet");3374if (G1StringDedup::is_enabled()) {3375gclog_or_tty->print(", StrDedup");3376}3377gclog_or_tty->print(") ");3378}3379}3380}33813382void G1CollectedHeap::verify(bool silent) {3383verify(silent, VerifyOption_G1UsePrevMarking);3384}33853386double G1CollectedHeap::verify(bool guard, const char* msg) {3387double verify_time_ms = 0.0;33883389if (guard && total_collections() >= VerifyGCStartAt) {3390double verify_start = os::elapsedTime();3391HandleMark hm; // Discard invalid handles created during verification3392prepare_for_verify();3393Universe::verify(VerifyOption_G1UsePrevMarking, msg);3394verify_time_ms = (os::elapsedTime() - verify_start) * 1000;3395}33963397return verify_time_ms;3398}33993400void G1CollectedHeap::verify_before_gc() {3401double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");3402g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);3403}34043405void G1CollectedHeap::verify_after_gc() {3406double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");3407g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);3408}34093410class PrintRegionClosure: public HeapRegionClosure {3411outputStream* _st;3412public:3413PrintRegionClosure(outputStream* st) : _st(st) {}3414bool doHeapRegion(HeapRegion* r) {3415r->print_on(_st);3416return false;3417}3418};34193420bool G1CollectedHeap::is_obj_dead_cond(const oop obj,3421const HeapRegion* hr,3422const VerifyOption vo) const {3423switch (vo) {3424case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);3425case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);3426case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();3427default: ShouldNotReachHere();3428}3429return false; // keep some compilers happy3430}34313432bool G1CollectedHeap::is_obj_dead_cond(const oop obj,3433const VerifyOption vo) const {3434switch (vo) {3435case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);3436case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);3437case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();3438default: ShouldNotReachHere();3439}3440return false; // keep some compilers happy3441}34423443void G1CollectedHeap::print_on(outputStream* st) const {3444st->print(" %-20s", "garbage-first heap");3445st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",3446capacity()/K, used_unlocked()/K);3447st->print(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")",3448p2i(_hrm.reserved().start()),3449p2i(_hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords),3450p2i(_hrm.reserved().end()));3451st->cr();3452st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);3453uint young_regions = _young_list->length();3454st->print("%u young (" SIZE_FORMAT "K), ", young_regions,3455(size_t) young_regions * HeapRegion::GrainBytes / K);3456uint survivor_regions = g1_policy()->recorded_survivor_regions();3457st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,3458(size_t) survivor_regions * HeapRegion::GrainBytes / K);3459st->cr();3460MetaspaceAux::print_on(st);3461}34623463void G1CollectedHeap::print_extended_on(outputStream* st) const {3464print_on(st);34653466// Print the per-region information.3467st->cr();3468st->print_cr("Heap Regions: (E=young(eden), S=young(survivor), O=old, "3469"HS=humongous(starts), HC=humongous(continues), "3470"CS=collection set, F=free, TS=gc time stamp, "3471"PTAMS=previous top-at-mark-start, "3472"NTAMS=next top-at-mark-start)");3473PrintRegionClosure blk(st);3474heap_region_iterate(&blk);3475}34763477void G1CollectedHeap::print_on_error(outputStream* st) const {3478this->CollectedHeap::print_on_error(st);34793480if (_cm != NULL) {3481st->cr();3482_cm->print_on_error(st);3483}3484}34853486void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {3487if (G1CollectedHeap::use_parallel_gc_threads()) {3488workers()->print_worker_threads_on(st);3489}3490_cmThread->print_on(st);3491st->cr();3492_cm->print_worker_threads_on(st);3493_cg1r->print_worker_threads_on(st);3494if (G1StringDedup::is_enabled()) {3495G1StringDedup::print_worker_threads_on(st);3496}3497}34983499void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {3500if (G1CollectedHeap::use_parallel_gc_threads()) {3501workers()->threads_do(tc);3502}3503tc->do_thread(_cmThread);3504_cg1r->threads_do(tc);3505if (G1StringDedup::is_enabled()) {3506G1StringDedup::threads_do(tc);3507}3508}35093510void G1CollectedHeap::print_tracing_info() const {3511// We'll overload this to mean "trace GC pause statistics."3512if (TraceGen0Time || TraceGen1Time) {3513// The "G1CollectorPolicy" is keeping track of these stats, so delegate3514// to that.3515g1_policy()->print_tracing_info();3516}3517if (G1SummarizeRSetStats) {3518g1_rem_set()->print_summary_info();3519}3520if (G1SummarizeConcMark) {3521concurrent_mark()->print_summary_info();3522}3523g1_policy()->print_yg_surv_rate_info();3524SpecializationStats::print();3525}35263527#ifndef PRODUCT3528// Helpful for debugging RSet issues.35293530class PrintRSetsClosure : public HeapRegionClosure {3531private:3532const char* _msg;3533size_t _occupied_sum;35343535public:3536bool doHeapRegion(HeapRegion* r) {3537HeapRegionRemSet* hrrs = r->rem_set();3538size_t occupied = hrrs->occupied();3539_occupied_sum += occupied;35403541gclog_or_tty->print_cr("Printing RSet for region " HR_FORMAT,3542HR_FORMAT_PARAMS(r));3543if (occupied == 0) {3544gclog_or_tty->print_cr(" RSet is empty");3545} else {3546hrrs->print();3547}3548gclog_or_tty->print_cr("----------");3549return false;3550}35513552PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {3553gclog_or_tty->cr();3554gclog_or_tty->print_cr("========================================");3555gclog_or_tty->print_cr("%s", msg);3556gclog_or_tty->cr();3557}35583559~PrintRSetsClosure() {3560gclog_or_tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);3561gclog_or_tty->print_cr("========================================");3562gclog_or_tty->cr();3563}3564};35653566void G1CollectedHeap::print_cset_rsets() {3567PrintRSetsClosure cl("Printing CSet RSets");3568collection_set_iterate(&cl);3569}35703571void G1CollectedHeap::print_all_rsets() {3572PrintRSetsClosure cl("Printing All RSets");;3573heap_region_iterate(&cl);3574}3575#endif // PRODUCT35763577G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {35783579size_t eden_used_bytes = _young_list->eden_used_bytes();3580size_t survivor_used_bytes = _young_list->survivor_used_bytes();3581size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();35823583size_t eden_capacity_bytes =3584(g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;35853586VirtualSpaceSummary heap_summary = create_heap_space_summary();3587return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,3588eden_capacity_bytes, survivor_used_bytes, num_regions());3589}35903591void G1CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {3592const G1HeapSummary& heap_summary = create_g1_heap_summary();3593gc_tracer->report_gc_heap_summary(when, heap_summary);35943595const MetaspaceSummary& metaspace_summary = create_metaspace_summary();3596gc_tracer->report_metaspace_summary(when, metaspace_summary);3597}35983599G1CollectedHeap* G1CollectedHeap::heap() {3600assert(_sh->kind() == CollectedHeap::G1CollectedHeap,3601"not a garbage-first heap");3602return _g1h;3603}36043605void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {3606// always_do_update_barrier = false;3607assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");3608// Fill TLAB's and such3609accumulate_statistics_all_tlabs();3610ensure_parsability(true);36113612if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&3613(total_collections() % G1SummarizeRSetStatsPeriod == 0)) {3614g1_rem_set()->print_periodic_summary_info("Before GC RS summary");3615}3616}36173618void G1CollectedHeap::gc_epilogue(bool full) {36193620if (G1SummarizeRSetStats &&3621(G1SummarizeRSetStatsPeriod > 0) &&3622// we are at the end of the GC. Total collections has already been increased.3623((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {3624g1_rem_set()->print_periodic_summary_info("After GC RS summary");3625}36263627// FIXME: what is this about?3628// I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"3629// is set.3630COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),3631"derived pointer present"));3632// always_do_update_barrier = true;36333634resize_all_tlabs();3635allocation_context_stats().update(full);36363637// We have just completed a GC. Update the soft reference3638// policy with the new heap occupancy3639Universe::update_heap_info_at_gc();3640}36413642HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,3643uint gc_count_before,3644bool* succeeded,3645GCCause::Cause gc_cause) {3646assert_heap_not_locked_and_not_at_safepoint();3647g1_policy()->record_stop_world_start();3648VM_G1IncCollectionPause op(gc_count_before,3649word_size,3650false, /* should_initiate_conc_mark */3651g1_policy()->max_pause_time_ms(),3652gc_cause);36533654op.set_allocation_context(AllocationContext::current());3655VMThread::execute(&op);36563657HeapWord* result = op.result();3658bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();3659assert(result == NULL || ret_succeeded,3660"the result should be NULL if the VM did not succeed");3661*succeeded = ret_succeeded;36623663assert_heap_not_locked();3664return result;3665}36663667void3668G1CollectedHeap::doConcurrentMark() {3669MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);3670if (!_cmThread->in_progress()) {3671_cmThread->set_started();3672CGC_lock->notify();3673}3674}36753676size_t G1CollectedHeap::pending_card_num() {3677size_t extra_cards = 0;3678JavaThread *curr = Threads::first();3679while (curr != NULL) {3680DirtyCardQueue& dcq = curr->dirty_card_queue();3681extra_cards += dcq.size();3682curr = curr->next();3683}3684DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();3685size_t buffer_size = dcqs.buffer_size();3686size_t buffer_num = dcqs.completed_buffers_num();36873688// PtrQueueSet::buffer_size() and PtrQueue:size() return sizes3689// in bytes - not the number of 'entries'. We need to convert3690// into a number of cards.3691return (buffer_size * buffer_num + extra_cards) / oopSize;3692}36933694size_t G1CollectedHeap::cards_scanned() {3695return g1_rem_set()->cardsScanned();3696}36973698class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {3699private:3700size_t _total_humongous;3701size_t _candidate_humongous;37023703DirtyCardQueue _dcq;37043705// We don't nominate objects with many remembered set entries, on3706// the assumption that such objects are likely still live.3707bool is_remset_small(HeapRegion* region) const {3708HeapRegionRemSet* const rset = region->rem_set();3709return G1EagerReclaimHumongousObjectsWithStaleRefs3710? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)3711: rset->is_empty();3712}37133714bool is_typeArray_region(HeapRegion* region) const {3715return oop(region->bottom())->is_typeArray();3716}37173718bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {3719assert(region->startsHumongous(), "Must start a humongous object");37203721// Candidate selection must satisfy the following constraints3722// while concurrent marking is in progress:3723//3724// * In order to maintain SATB invariants, an object must not be3725// reclaimed if it was allocated before the start of marking and3726// has not had its references scanned. Such an object must have3727// its references (including type metadata) scanned to ensure no3728// live objects are missed by the marking process. Objects3729// allocated after the start of concurrent marking don't need to3730// be scanned.3731//3732// * An object must not be reclaimed if it is on the concurrent3733// mark stack. Objects allocated after the start of concurrent3734// marking are never pushed on the mark stack.3735//3736// Nominating only objects allocated after the start of concurrent3737// marking is sufficient to meet both constraints. This may miss3738// some objects that satisfy the constraints, but the marking data3739// structures don't support efficiently performing the needed3740// additional tests or scrubbing of the mark stack.3741//3742// However, we presently only nominate is_typeArray() objects.3743// A humongous object containing references induces remembered3744// set entries on other regions. In order to reclaim such an3745// object, those remembered sets would need to be cleaned up.3746//3747// We also treat is_typeArray() objects specially, allowing them3748// to be reclaimed even if allocated before the start of3749// concurrent mark. For this we rely on mark stack insertion to3750// exclude is_typeArray() objects, preventing reclaiming an object3751// that is in the mark stack. We also rely on the metadata for3752// such objects to be built-in and so ensured to be kept live.3753// Frequent allocation and drop of large binary blobs is an3754// important use case for eager reclaim, and this special handling3755// may reduce needed headroom.37563757return is_typeArray_region(region) && is_remset_small(region);3758}37593760public:3761RegisterHumongousWithInCSetFastTestClosure()3762: _total_humongous(0),3763_candidate_humongous(0),3764_dcq(&JavaThread::dirty_card_queue_set()) {3765}37663767virtual bool doHeapRegion(HeapRegion* r) {3768if (!r->startsHumongous()) {3769return false;3770}3771G1CollectedHeap* g1h = G1CollectedHeap::heap();37723773bool is_candidate = humongous_region_is_candidate(g1h, r);3774uint rindex = r->hrm_index();3775g1h->set_humongous_reclaim_candidate(rindex, is_candidate);3776if (is_candidate) {3777_candidate_humongous++;3778g1h->register_humongous_region_with_in_cset_fast_test(rindex);3779// Is_candidate already filters out humongous object with large remembered sets.3780// If we have a humongous object with a few remembered sets, we simply flush these3781// remembered set entries into the DCQS. That will result in automatic3782// re-evaluation of their remembered set entries during the following evacuation3783// phase.3784if (!r->rem_set()->is_empty()) {3785guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),3786"Found a not-small remembered set here. This is inconsistent with previous assumptions.");3787G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();3788HeapRegionRemSetIterator hrrs(r->rem_set());3789size_t card_index;3790while (hrrs.has_next(card_index)) {3791jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index);3792if (*card_ptr != CardTableModRefBS::dirty_card_val()) {3793*card_ptr = CardTableModRefBS::dirty_card_val();3794_dcq.enqueue(card_ptr);3795}3796}3797assert(hrrs.n_yielded() == r->rem_set()->occupied(),3798err_msg("Remembered set hash maps out of sync, cur: " SIZE_FORMAT " entries, next: " SIZE_FORMAT " entries",3799hrrs.n_yielded(), r->rem_set()->occupied()));3800r->rem_set()->clear_locked();3801}3802assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");3803}3804_total_humongous++;38053806return false;3807}38083809size_t total_humongous() const { return _total_humongous; }3810size_t candidate_humongous() const { return _candidate_humongous; }38113812void flush_rem_set_entries() { _dcq.flush(); }3813};38143815void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {3816if (!G1EagerReclaimHumongousObjects) {3817g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);3818return;3819}3820double time = os::elapsed_counter();38213822// Collect reclaim candidate information and register candidates with cset.3823RegisterHumongousWithInCSetFastTestClosure cl;3824heap_region_iterate(&cl);38253826time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;3827g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,3828cl.total_humongous(),3829cl.candidate_humongous());3830_has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;38313832// Finally flush all remembered set entries to re-check into the global DCQS.3833cl.flush_rem_set_entries();3834}38353836void3837G1CollectedHeap::setup_surviving_young_words() {3838assert(_surviving_young_words == NULL, "pre-condition");3839uint array_length = g1_policy()->young_cset_region_length();3840_surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);3841if (_surviving_young_words == NULL) {3842vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,3843"Not enough space for young surv words summary.");3844}3845memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));3846#ifdef ASSERT3847for (uint i = 0; i < array_length; ++i) {3848assert( _surviving_young_words[i] == 0, "memset above" );3849}3850#endif // !ASSERT3851}38523853void3854G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {3855MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);3856uint array_length = g1_policy()->young_cset_region_length();3857for (uint i = 0; i < array_length; ++i) {3858_surviving_young_words[i] += surv_young_words[i];3859}3860}38613862void3863G1CollectedHeap::cleanup_surviving_young_words() {3864guarantee( _surviving_young_words != NULL, "pre-condition" );3865FREE_C_HEAP_ARRAY(size_t, _surviving_young_words, mtGC);3866_surviving_young_words = NULL;3867}38683869class VerifyRegionRemSetClosure : public HeapRegionClosure {3870public:3871bool doHeapRegion(HeapRegion* hr) {3872if (!hr->continuesHumongous()) {3873hr->verify_rem_set();3874}3875return false;3876}3877};38783879#ifdef ASSERT3880class VerifyCSetClosure: public HeapRegionClosure {3881public:3882bool doHeapRegion(HeapRegion* hr) {3883// Here we check that the CSet region's RSet is ready for parallel3884// iteration. The fields that we'll verify are only manipulated3885// when the region is part of a CSet and is collected. Afterwards,3886// we reset these fields when we clear the region's RSet (when the3887// region is freed) so they are ready when the region is3888// re-allocated. The only exception to this is if there's an3889// evacuation failure and instead of freeing the region we leave3890// it in the heap. In that case, we reset these fields during3891// evacuation failure handling.3892guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");38933894// Here's a good place to add any other checks we'd like to3895// perform on CSet regions.3896return false;3897}3898};3899#endif // ASSERT39003901#if TASKQUEUE_STATS3902void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {3903st->print_raw_cr("GC Task Stats");3904st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();3905st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();3906}39073908void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {3909print_taskqueue_stats_hdr(st);39103911TaskQueueStats totals;3912const int n = workers() != NULL ? workers()->total_workers() : 1;3913for (int i = 0; i < n; ++i) {3914st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();3915totals += task_queue(i)->stats;3916}3917st->print_raw("tot "); totals.print(st); st->cr();39183919DEBUG_ONLY(totals.verify());3920}39213922void G1CollectedHeap::reset_taskqueue_stats() {3923const int n = workers() != NULL ? workers()->total_workers() : 1;3924for (int i = 0; i < n; ++i) {3925task_queue(i)->stats.reset();3926}3927}3928#endif // TASKQUEUE_STATS39293930void G1CollectedHeap::log_gc_header() {3931if (!G1Log::fine()) {3932return;3933}39343935gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());39363937GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())3938.append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")3939.append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");39403941gclog_or_tty->print("[%s", (const char*)gc_cause_str);3942}39433944void G1CollectedHeap::log_gc_footer(double pause_time_sec) {3945if (!G1Log::fine()) {3946return;3947}39483949if (G1Log::finer()) {3950if (evacuation_failed()) {3951gclog_or_tty->print(" (to-space exhausted)");3952}3953gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);3954g1_policy()->phase_times()->note_gc_end();3955g1_policy()->phase_times()->print(pause_time_sec);3956g1_policy()->print_detailed_heap_transition();3957} else {3958if (evacuation_failed()) {3959gclog_or_tty->print("--");3960}3961g1_policy()->print_heap_transition();3962gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);3963}3964gclog_or_tty->flush();3965}39663967bool3968G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {3969assert_at_safepoint(true /* should_be_vm_thread */);3970guarantee(!is_gc_active(), "collection is not reentrant");39713972if (GC_locker::check_active_before_gc()) {3973return false;3974}39753976_gc_timer_stw->register_gc_start();39773978_gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());39793980SvcGCMarker sgcm(SvcGCMarker::MINOR);3981ResourceMark rm;39823983print_heap_before_gc();3984trace_heap_before_gc(_gc_tracer_stw);39853986verify_region_sets_optional();3987verify_dirty_young_regions();39883989// This call will decide whether this pause is an initial-mark3990// pause. If it is, during_initial_mark_pause() will return true3991// for the duration of this pause.3992g1_policy()->decide_on_conc_mark_initiation();39933994// We do not allow initial-mark to be piggy-backed on a mixed GC.3995assert(!g1_policy()->during_initial_mark_pause() ||3996g1_policy()->gcs_are_young(), "sanity");39973998// We also do not allow mixed GCs during marking.3999assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");40004001// Record whether this pause is an initial mark. When the current4002// thread has completed its logging output and it's safe to signal4003// the CM thread, the flag's value in the policy has been reset.4004bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();40054006// Inner scope for scope based logging, timers, and stats collection4007{4008EvacuationInfo evacuation_info;40094010if (g1_policy()->during_initial_mark_pause()) {4011// We are about to start a marking cycle, so we increment the4012// full collection counter.4013increment_old_marking_cycles_started();4014register_concurrent_cycle_start(_gc_timer_stw->gc_start());4015}40164017_gc_tracer_stw->report_yc_type(yc_type());40184019TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);40204021uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),4022workers()->active_workers(),4023Threads::number_of_non_daemon_threads());4024assert(UseDynamicNumberOfGCThreads ||4025active_workers == workers()->total_workers(),4026"If not dynamic should be using all the workers");4027workers()->set_active_workers(active_workers);402840294030double pause_start_sec = os::elapsedTime();4031g1_policy()->phase_times()->note_gc_start(active_workers, mark_in_progress());4032log_gc_header();40334034TraceCollectorStats tcs(g1mm()->incremental_collection_counters());4035TraceMemoryManagerStats tms(false /* fullGC */, gc_cause(),4036yc_type() == Mixed /* allMemoryPoolsAffected */);40374038// If the secondary_free_list is not empty, append it to the4039// free_list. No need to wait for the cleanup operation to finish;4040// the region allocation code will check the secondary_free_list4041// and wait if necessary. If the G1StressConcRegionFreeing flag is4042// set, skip this step so that the region allocation code has to4043// get entries from the secondary_free_list.4044if (!G1StressConcRegionFreeing) {4045append_secondary_free_list_if_not_empty_with_lock();4046}40474048assert(check_young_list_well_formed(), "young list should be well formed");4049assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),4050"sanity check");40514052// Don't dynamically change the number of GC threads this early. A value of4053// 0 is used to indicate serial work. When parallel work is done,4054// it will be set.40554056{ // Call to jvmpi::post_class_unload_events must occur outside of active GC4057IsGCActiveMark x;40584059gc_prologue(false);4060increment_total_collections(false /* full gc */);4061increment_gc_time_stamp();40624063if (VerifyRememberedSets) {4064if (!VerifySilently) {4065gclog_or_tty->print_cr("[Verifying RemSets before GC]");4066}4067VerifyRegionRemSetClosure v_cl;4068heap_region_iterate(&v_cl);4069}40704071verify_before_gc();4072check_bitmaps("GC Start");40734074COMPILER2_PRESENT(DerivedPointerTable::clear());40754076// Please see comment in g1CollectedHeap.hpp and4077// G1CollectedHeap::ref_processing_init() to see how4078// reference processing currently works in G1.40794080// Enable discovery in the STW reference processor4081ref_processor_stw()->enable_discovery(true /*verify_disabled*/,4082true /*verify_no_refs*/);40834084{4085// We want to temporarily turn off discovery by the4086// CM ref processor, if necessary, and turn it back on4087// on again later if we do. Using a scoped4088// NoRefDiscovery object will do this.4089NoRefDiscovery no_cm_discovery(ref_processor_cm());40904091// Forget the current alloc region (we might even choose it to be part4092// of the collection set!).4093_allocator->release_mutator_alloc_region();40944095// We should call this after we retire the mutator alloc4096// region(s) so that all the ALLOC / RETIRE events are generated4097// before the start GC event.4098_hr_printer.start_gc(false /* full */, (size_t) total_collections());40994100// This timing is only used by the ergonomics to handle our pause target.4101// It is unclear why this should not include the full pause. We will4102// investigate this in CR 7178365.4103//4104// Preserving the old comment here if that helps the investigation:4105//4106// The elapsed time induced by the start time below deliberately elides4107// the possible verification above.4108double sample_start_time_sec = os::elapsedTime();41094110#if YOUNG_LIST_VERBOSE4111gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");4112_young_list->print();4113g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);4114#endif // YOUNG_LIST_VERBOSE41154116g1_policy()->record_collection_pause_start(sample_start_time_sec, *_gc_tracer_stw);41174118double scan_wait_start = os::elapsedTime();4119// We have to wait until the CM threads finish scanning the4120// root regions as it's the only way to ensure that all the4121// objects on them have been correctly scanned before we start4122// moving them during the GC.4123bool waited = _cm->root_regions()->wait_until_scan_finished();4124double wait_time_ms = 0.0;4125if (waited) {4126double scan_wait_end = os::elapsedTime();4127wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;4128}4129g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);41304131#if YOUNG_LIST_VERBOSE4132gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");4133_young_list->print();4134#endif // YOUNG_LIST_VERBOSE41354136if (g1_policy()->during_initial_mark_pause()) {4137concurrent_mark()->checkpointRootsInitialPre();4138}41394140#if YOUNG_LIST_VERBOSE4141gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");4142_young_list->print();4143g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);4144#endif // YOUNG_LIST_VERBOSE41454146g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);41474148// Make sure the remembered sets are up to date. This needs to be4149// done before register_humongous_regions_with_cset(), because the4150// remembered sets are used there to choose eager reclaim candidates.4151// If the remembered sets are not up to date we might miss some4152// entries that need to be handled.4153g1_rem_set()->cleanupHRRS();41544155register_humongous_regions_with_in_cset_fast_test();41564157assert(check_cset_fast_test(), "Inconsistency in the InCSetState table.");41584159_cm->note_start_of_gc();4160// We call this after finalize_cset() to4161// ensure that the CSet has been finalized.4162_cm->verify_no_cset_oops();41634164if (_hr_printer.is_active()) {4165HeapRegion* hr = g1_policy()->collection_set();4166while (hr != NULL) {4167_hr_printer.cset(hr);4168hr = hr->next_in_collection_set();4169}4170}41714172#ifdef ASSERT4173VerifyCSetClosure cl;4174collection_set_iterate(&cl);4175#endif // ASSERT41764177setup_surviving_young_words();41784179// Initialize the GC alloc regions.4180_allocator->init_gc_alloc_regions(evacuation_info);41814182// Actually do the work...4183evacuate_collection_set(evacuation_info);41844185free_collection_set(g1_policy()->collection_set(), evacuation_info);41864187eagerly_reclaim_humongous_regions();41884189g1_policy()->clear_collection_set();41904191cleanup_surviving_young_words();41924193// Start a new incremental collection set for the next pause.4194g1_policy()->start_incremental_cset_building();41954196clear_cset_fast_test();41974198_young_list->reset_sampled_info();41994200// Don't check the whole heap at this point as the4201// GC alloc regions from this pause have been tagged4202// as survivors and moved on to the survivor list.4203// Survivor regions will fail the !is_young() check.4204assert(check_young_list_empty(false /* check_heap */),4205"young list should be empty");42064207#if YOUNG_LIST_VERBOSE4208gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");4209_young_list->print();4210#endif // YOUNG_LIST_VERBOSE42114212g1_policy()->record_survivor_regions(_young_list->survivor_length(),4213_young_list->first_survivor_region(),4214_young_list->last_survivor_region());42154216_young_list->reset_auxilary_lists();42174218if (evacuation_failed()) {4219_allocator->set_used(recalculate_used());4220uint n_queues = MAX2((int)ParallelGCThreads, 1);4221for (uint i = 0; i < n_queues; i++) {4222if (_evacuation_failed_info_array[i].has_failed()) {4223_gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);4224}4225}4226} else {4227// The "used" of the the collection set have already been subtracted4228// when they were freed. Add in the bytes evacuated.4229_allocator->increase_used(g1_policy()->bytes_copied_during_gc());4230}42314232if (g1_policy()->during_initial_mark_pause()) {4233// We have to do this before we notify the CM threads that4234// they can start working to make sure that all the4235// appropriate initialization is done on the CM object.4236concurrent_mark()->checkpointRootsInitialPost();4237set_marking_started();4238// Note that we don't actually trigger the CM thread at4239// this point. We do that later when we're sure that4240// the current thread has completed its logging output.4241}42424243allocate_dummy_regions();42444245#if YOUNG_LIST_VERBOSE4246gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");4247_young_list->print();4248g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);4249#endif // YOUNG_LIST_VERBOSE42504251_allocator->init_mutator_alloc_region();42524253{4254size_t expand_bytes = g1_policy()->expansion_amount();4255if (expand_bytes > 0) {4256size_t bytes_before = capacity();4257// No need for an ergo verbose message here,4258// expansion_amount() does this when it returns a value > 0.4259if (!expand(expand_bytes)) {4260// We failed to expand the heap. Cannot do anything about it.4261}4262}4263}42644265// We redo the verification but now wrt to the new CSet which4266// has just got initialized after the previous CSet was freed.4267_cm->verify_no_cset_oops();4268_cm->note_end_of_gc();42694270// This timing is only used by the ergonomics to handle our pause target.4271// It is unclear why this should not include the full pause. We will4272// investigate this in CR 7178365.4273double sample_end_time_sec = os::elapsedTime();4274double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;4275g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info);42764277MemoryService::track_memory_usage();42784279// In prepare_for_verify() below we'll need to scan the deferred4280// update buffers to bring the RSets up-to-date if4281// G1HRRSFlushLogBuffersOnVerify has been set. While scanning4282// the update buffers we'll probably need to scan cards on the4283// regions we just allocated to (i.e., the GC alloc4284// regions). However, during the last GC we called4285// set_saved_mark() on all the GC alloc regions, so card4286// scanning might skip the [saved_mark_word()...top()] area of4287// those regions (i.e., the area we allocated objects into4288// during the last GC). But it shouldn't. Given that4289// saved_mark_word() is conditional on whether the GC time stamp4290// on the region is current or not, by incrementing the GC time4291// stamp here we invalidate all the GC time stamps on all the4292// regions and saved_mark_word() will simply return top() for4293// all the regions. This is a nicer way of ensuring this rather4294// than iterating over the regions and fixing them. In fact, the4295// GC time stamp increment here also ensures that4296// saved_mark_word() will return top() between pauses, i.e.,4297// during concurrent refinement. So we don't need the4298// is_gc_active() check to decided which top to use when4299// scanning cards (see CR 7039627).4300increment_gc_time_stamp();43014302if (VerifyRememberedSets) {4303if (!VerifySilently) {4304gclog_or_tty->print_cr("[Verifying RemSets after GC]");4305}4306VerifyRegionRemSetClosure v_cl;4307heap_region_iterate(&v_cl);4308}43094310verify_after_gc();4311check_bitmaps("GC End");43124313assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");4314ref_processor_stw()->verify_no_references_recorded();43154316// CM reference discovery will be re-enabled if necessary.4317}43184319// We should do this after we potentially expand the heap so4320// that all the COMMIT events are generated before the end GC4321// event, and after we retire the GC alloc regions so that all4322// RETIRE events are generated before the end GC event.4323_hr_printer.end_gc(false /* full */, (size_t) total_collections());43244325#ifdef TRACESPINNING4326ParallelTaskTerminator::print_termination_counts();4327#endif43284329gc_epilogue(false);4330}43314332// Print the remainder of the GC log output.4333log_gc_footer(os::elapsedTime() - pause_start_sec);43344335// It is not yet to safe to tell the concurrent mark to4336// start as we have some optional output below. We don't want the4337// output from the concurrent mark thread interfering with this4338// logging output either.43394340_hrm.verify_optional();4341verify_region_sets_optional();43424343TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());4344TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());43454346print_heap_after_gc();4347trace_heap_after_gc(_gc_tracer_stw);43484349// We must call G1MonitoringSupport::update_sizes() in the same scoping level4350// as an active TraceMemoryManagerStats object (i.e. before the destructor for the4351// TraceMemoryManagerStats is called) so that the G1 memory pools are updated4352// before any GC notifications are raised.4353g1mm()->update_sizes();43544355_gc_tracer_stw->report_evacuation_info(&evacuation_info);4356_gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());4357_gc_timer_stw->register_gc_end();4358_gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());4359}4360// It should now be safe to tell the concurrent mark thread to start4361// without its logging output interfering with the logging output4362// that came from the pause.43634364if (should_start_conc_mark) {4365// CAUTION: after the doConcurrentMark() call below,4366// the concurrent marking thread(s) could be running4367// concurrently with us. Make sure that anything after4368// this point does not assume that we are the only GC thread4369// running. Note: of course, the actual marking work will4370// not start until the safepoint itself is released in4371// SuspendibleThreadSet::desynchronize().4372doConcurrentMark();4373}43744375return true;4376}43774378void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {4379_drain_in_progress = false;4380set_evac_failure_closure(cl);4381_evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);4382}43834384void G1CollectedHeap::finalize_for_evac_failure() {4385assert(_evac_failure_scan_stack != NULL &&4386_evac_failure_scan_stack->length() == 0,4387"Postcondition");4388assert(!_drain_in_progress, "Postcondition");4389delete _evac_failure_scan_stack;4390_evac_failure_scan_stack = NULL;4391}43924393void G1CollectedHeap::remove_self_forwarding_pointers() {4394assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");43954396double remove_self_forwards_start = os::elapsedTime();43974398G1ParRemoveSelfForwardPtrsTask rsfp_task(this);43994400if (G1CollectedHeap::use_parallel_gc_threads()) {4401set_par_threads();4402workers()->run_task(&rsfp_task);4403set_par_threads(0);4404} else {4405rsfp_task.work(0);4406}44074408assert(check_cset_heap_region_claim_values(HeapRegion::ParEvacFailureClaimValue), "sanity");44094410// Reset the claim values in the regions in the collection set.4411reset_cset_heap_region_claim_values();44124413assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");44144415// Now restore saved marks, if any.4416assert(_objs_with_preserved_marks.size() ==4417_preserved_marks_of_objs.size(), "Both or none.");4418while (!_objs_with_preserved_marks.is_empty()) {4419oop obj = _objs_with_preserved_marks.pop();4420markOop m = _preserved_marks_of_objs.pop();4421obj->set_mark(m);4422}4423_objs_with_preserved_marks.clear(true);4424_preserved_marks_of_objs.clear(true);44254426g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);4427}44284429void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {4430_evac_failure_scan_stack->push(obj);4431}44324433void G1CollectedHeap::drain_evac_failure_scan_stack() {4434assert(_evac_failure_scan_stack != NULL, "precondition");44354436while (_evac_failure_scan_stack->length() > 0) {4437oop obj = _evac_failure_scan_stack->pop();4438_evac_failure_closure->set_region(heap_region_containing(obj));4439obj->oop_iterate_backwards(_evac_failure_closure);4440}4441}44424443oop4444G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state,4445oop old) {4446assert(obj_in_cs(old),4447err_msg("obj: " PTR_FORMAT " should still be in the CSet",4448p2i(old)));4449markOop m = old->mark();4450oop forward_ptr = old->forward_to_atomic(old);4451if (forward_ptr == NULL) {4452// Forward-to-self succeeded.4453assert(_par_scan_state != NULL, "par scan state");4454OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();4455uint queue_num = _par_scan_state->queue_num();44564457_evacuation_failed = true;4458_evacuation_failed_info_array[queue_num].register_copy_failure(old->size());4459if (_evac_failure_closure != cl) {4460MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);4461assert(!_drain_in_progress,4462"Should only be true while someone holds the lock.");4463// Set the global evac-failure closure to the current thread's.4464assert(_evac_failure_closure == NULL, "Or locking has failed.");4465set_evac_failure_closure(cl);4466// Now do the common part.4467handle_evacuation_failure_common(old, m);4468// Reset to NULL.4469set_evac_failure_closure(NULL);4470} else {4471// The lock is already held, and this is recursive.4472assert(_drain_in_progress, "This should only be the recursive case.");4473handle_evacuation_failure_common(old, m);4474}4475return old;4476} else {4477// Forward-to-self failed. Either someone else managed to allocate4478// space for this object (old != forward_ptr) or they beat us in4479// self-forwarding it (old == forward_ptr).4480assert(old == forward_ptr || !obj_in_cs(forward_ptr),4481err_msg("obj: " PTR_FORMAT " forwarded to: " PTR_FORMAT " "4482"should not be in the CSet",4483p2i(old), p2i(forward_ptr)));4484return forward_ptr;4485}4486}44874488void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {4489preserve_mark_if_necessary(old, m);44904491HeapRegion* r = heap_region_containing(old);4492if (!r->evacuation_failed()) {4493r->set_evacuation_failed(true);4494_hr_printer.evac_failure(r);4495}44964497push_on_evac_failure_scan_stack(old);44984499if (!_drain_in_progress) {4500// prevent recursion in copy_to_survivor_space()4501_drain_in_progress = true;4502drain_evac_failure_scan_stack();4503_drain_in_progress = false;4504}4505}45064507void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {4508assert(evacuation_failed(), "Oversaving!");4509// We want to call the "for_promotion_failure" version only in the4510// case of a promotion failure.4511if (m->must_be_preserved_for_promotion_failure(obj)) {4512_objs_with_preserved_marks.push(obj);4513_preserved_marks_of_objs.push(m);4514}4515}45164517void G1ParCopyHelper::mark_object(oop obj) {4518assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");45194520// We know that the object is not moving so it's safe to read its size.4521_cm->grayRoot(obj, (size_t) obj->size(), _worker_id);4522}45234524void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {4525assert(from_obj->is_forwarded(), "from obj should be forwarded");4526assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");4527assert(from_obj != to_obj, "should not be self-forwarded");45284529assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");4530assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");45314532// The object might be in the process of being copied by another4533// worker so we cannot trust that its to-space image is4534// well-formed. So we have to read its size from its from-space4535// image which we know should not be changing.4536_cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);4537}45384539template <class T>4540void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {4541if (_g1->heap_region_containing_raw(new_obj)->is_young()) {4542_scanned_klass->record_modified_oops();4543}4544}45454546template <G1Barrier barrier, G1Mark do_mark_object>4547template <class T>4548void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {4549T heap_oop = oopDesc::load_heap_oop(p);45504551if (oopDesc::is_null(heap_oop)) {4552return;4553}45544555oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);45564557assert(_worker_id == _par_scan_state->queue_num(), "sanity");45584559const InCSetState state = _g1->in_cset_state(obj);4560if (state.is_in_cset()) {4561oop forwardee;4562markOop m = obj->mark();4563if (m->is_marked()) {4564forwardee = (oop) m->decode_pointer();4565} else {4566forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);4567}4568assert(forwardee != NULL, "forwardee should not be NULL");4569oopDesc::encode_store_heap_oop(p, forwardee);4570if (do_mark_object != G1MarkNone && forwardee != obj) {4571// If the object is self-forwarded we don't need to explicitly4572// mark it, the evacuation failure protocol will do so.4573mark_forwarded_object(obj, forwardee);4574}45754576if (barrier == G1BarrierKlass) {4577do_klass_barrier(p, forwardee);4578}4579} else {4580if (state.is_humongous()) {4581_g1->set_humongous_is_live(obj);4582}4583// The object is not in collection set. If we're a root scanning4584// closure during an initial mark pause then attempt to mark the object.4585if (do_mark_object == G1MarkFromRoot) {4586mark_object(obj);4587}4588}45894590if (barrier == G1BarrierEvac) {4591_par_scan_state->update_rs(_from, p, _worker_id);4592}4593}45944595template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);4596template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);45974598class G1ParEvacuateFollowersClosure : public VoidClosure {4599protected:4600G1CollectedHeap* _g1h;4601G1ParScanThreadState* _par_scan_state;4602RefToScanQueueSet* _queues;4603ParallelTaskTerminator* _terminator;46044605G1ParScanThreadState* par_scan_state() { return _par_scan_state; }4606RefToScanQueueSet* queues() { return _queues; }4607ParallelTaskTerminator* terminator() { return _terminator; }46084609public:4610G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,4611G1ParScanThreadState* par_scan_state,4612RefToScanQueueSet* queues,4613ParallelTaskTerminator* terminator)4614: _g1h(g1h), _par_scan_state(par_scan_state),4615_queues(queues), _terminator(terminator) {}46164617void do_void();46184619private:4620inline bool offer_termination();4621};46224623bool G1ParEvacuateFollowersClosure::offer_termination() {4624G1ParScanThreadState* const pss = par_scan_state();4625pss->start_term_time();4626const bool res = terminator()->offer_termination();4627pss->end_term_time();4628return res;4629}46304631void G1ParEvacuateFollowersClosure::do_void() {4632G1ParScanThreadState* const pss = par_scan_state();4633pss->trim_queue();4634do {4635pss->steal_and_trim_queue(queues());4636} while (!offer_termination());4637}46384639class G1KlassScanClosure : public KlassClosure {4640G1ParCopyHelper* _closure;4641bool _process_only_dirty;4642int _count;4643public:4644G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)4645: _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}4646void do_klass(Klass* klass) {4647// If the klass has not been dirtied we know that there's4648// no references into the young gen and we can skip it.4649if (!_process_only_dirty || klass->has_modified_oops()) {4650// Clean the klass since we're going to scavenge all the metadata.4651klass->clear_modified_oops();46524653// Tell the closure that this klass is the Klass to scavenge4654// and is the one to dirty if oops are left pointing into the young gen.4655_closure->set_scanned_klass(klass);46564657klass->oops_do(_closure);46584659_closure->set_scanned_klass(NULL);4660}4661_count++;4662}4663};46644665class G1ParTask : public AbstractGangTask {4666protected:4667G1CollectedHeap* _g1h;4668RefToScanQueueSet *_queues;4669G1RootProcessor* _root_processor;4670ParallelTaskTerminator _terminator;4671uint _n_workers;46724673Mutex _stats_lock;4674Mutex* stats_lock() { return &_stats_lock; }46754676public:4677G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor)4678: AbstractGangTask("G1 collection"),4679_g1h(g1h),4680_queues(task_queues),4681_root_processor(root_processor),4682_terminator(0, _queues),4683_stats_lock(Mutex::leaf, "parallel G1 stats lock", true)4684{}46854686RefToScanQueueSet* queues() { return _queues; }46874688RefToScanQueue *work_queue(int i) {4689return queues()->queue(i);4690}46914692ParallelTaskTerminator* terminator() { return &_terminator; }46934694virtual void set_for_termination(int active_workers) {4695_root_processor->set_num_workers(active_workers);4696terminator()->reset_for_reuse(active_workers);4697_n_workers = active_workers;4698}46994700// Helps out with CLD processing.4701//4702// During InitialMark we need to:4703// 1) Scavenge all CLDs for the young GC.4704// 2) Mark all objects directly reachable from strong CLDs.4705template <G1Mark do_mark_object>4706class G1CLDClosure : public CLDClosure {4707G1ParCopyClosure<G1BarrierNone, do_mark_object>* _oop_closure;4708G1ParCopyClosure<G1BarrierKlass, do_mark_object> _oop_in_klass_closure;4709G1KlassScanClosure _klass_in_cld_closure;4710bool _claim;47114712public:4713G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,4714bool only_young, bool claim)4715: _oop_closure(oop_closure),4716_oop_in_klass_closure(oop_closure->g1(),4717oop_closure->pss(),4718oop_closure->rp()),4719_klass_in_cld_closure(&_oop_in_klass_closure, only_young),4720_claim(claim) {47214722}47234724void do_cld(ClassLoaderData* cld) {4725cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);4726}4727};47284729void work(uint worker_id) {4730if (worker_id >= _n_workers) return; // no work needed this round47314732_g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, os::elapsedTime());47334734{4735ResourceMark rm;4736HandleMark hm;47374738ReferenceProcessor* rp = _g1h->ref_processor_stw();47394740G1ParScanThreadState pss(_g1h, worker_id, rp);4741G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);47424743pss.set_evac_failure_closure(&evac_failure_cl);47444745bool only_young = _g1h->g1_policy()->gcs_are_young();47464747// Non-IM young GC.4748G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, &pss, rp);4749G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl,4750only_young, // Only process dirty klasses.4751false); // No need to claim CLDs.4752// IM young GC.4753// Strong roots closures.4754G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, &pss, rp);4755G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl,4756false, // Process all klasses.4757true); // Need to claim CLDs.4758// Weak roots closures.4759G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);4760G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,4761false, // Process all klasses.4762true); // Need to claim CLDs.47634764OopClosure* strong_root_cl;4765OopClosure* weak_root_cl;4766CLDClosure* strong_cld_cl;4767CLDClosure* weak_cld_cl;47684769bool trace_metadata = false;47704771if (_g1h->g1_policy()->during_initial_mark_pause()) {4772// We also need to mark copied objects.4773strong_root_cl = &scan_mark_root_cl;4774strong_cld_cl = &scan_mark_cld_cl;4775if (ClassUnloadingWithConcurrentMark) {4776weak_root_cl = &scan_mark_weak_root_cl;4777weak_cld_cl = &scan_mark_weak_cld_cl;4778trace_metadata = true;4779} else {4780weak_root_cl = &scan_mark_root_cl;4781weak_cld_cl = &scan_mark_cld_cl;4782}4783} else {4784strong_root_cl = &scan_only_root_cl;4785weak_root_cl = &scan_only_root_cl;4786strong_cld_cl = &scan_only_cld_cl;4787weak_cld_cl = &scan_only_cld_cl;4788}47894790pss.start_strong_roots();47914792_root_processor->evacuate_roots(strong_root_cl,4793weak_root_cl,4794strong_cld_cl,4795weak_cld_cl,4796trace_metadata,4797worker_id);47984799G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);4800_root_processor->scan_remembered_sets(&push_heap_rs_cl,4801weak_root_cl,4802worker_id);4803pss.end_strong_roots();48044805{4806double start = os::elapsedTime();4807G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);4808evac.do_void();4809double elapsed_sec = os::elapsedTime() - start;4810double term_sec = pss.term_time();4811_g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);4812_g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);4813_g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, pss.term_attempts());4814}4815_g1h->g1_policy()->record_thread_age_table(pss.age_table());4816_g1h->update_surviving_young_words(pss.surviving_young_words()+1);48174818if (ParallelGCVerbose) {4819MutexLocker x(stats_lock());4820pss.print_termination_stats(worker_id);4821}48224823assert(pss.queue_is_empty(), "should be empty");48244825// Close the inner scope so that the ResourceMark and HandleMark4826// destructors are executed here and are included as part of the4827// "GC Worker Time".4828}4829_g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());4830}4831};48324833class G1StringSymbolTableUnlinkTask : public AbstractGangTask {4834private:4835BoolObjectClosure* _is_alive;4836int _initial_string_table_size;4837int _initial_symbol_table_size;48384839bool _process_strings;4840int _strings_processed;4841int _strings_removed;48424843bool _process_symbols;4844int _symbols_processed;4845int _symbols_removed;48464847bool _do_in_parallel;4848public:4849G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :4850AbstractGangTask("String/Symbol Unlinking"),4851_is_alive(is_alive),4852_do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),4853_process_strings(process_strings), _strings_processed(0), _strings_removed(0),4854_process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {48554856_initial_string_table_size = StringTable::the_table()->table_size();4857_initial_symbol_table_size = SymbolTable::the_table()->table_size();4858if (process_strings) {4859StringTable::clear_parallel_claimed_index();4860}4861if (process_symbols) {4862SymbolTable::clear_parallel_claimed_index();4863}4864}48654866~G1StringSymbolTableUnlinkTask() {4867guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,4868err_msg("claim value " INT32_FORMAT " after unlink less than initial string table size " INT32_FORMAT,4869StringTable::parallel_claimed_index(), _initial_string_table_size));4870guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,4871err_msg("claim value " INT32_FORMAT " after unlink less than initial symbol table size " INT32_FORMAT,4872SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));48734874if (G1TraceStringSymbolTableScrubbing) {4875gclog_or_tty->print_cr("Cleaned string and symbol table, "4876"strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "4877"symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",4878strings_processed(), strings_removed(),4879symbols_processed(), symbols_removed());4880}4881}48824883void work(uint worker_id) {4884if (_do_in_parallel) {4885int strings_processed = 0;4886int strings_removed = 0;4887int symbols_processed = 0;4888int symbols_removed = 0;4889if (_process_strings) {4890StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);4891Atomic::add(strings_processed, &_strings_processed);4892Atomic::add(strings_removed, &_strings_removed);4893}4894if (_process_symbols) {4895SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);4896Atomic::add(symbols_processed, &_symbols_processed);4897Atomic::add(symbols_removed, &_symbols_removed);4898}4899} else {4900if (_process_strings) {4901StringTable::unlink(_is_alive, &_strings_processed, &_strings_removed);4902}4903if (_process_symbols) {4904SymbolTable::unlink(&_symbols_processed, &_symbols_removed);4905}4906}4907}49084909size_t strings_processed() const { return (size_t)_strings_processed; }4910size_t strings_removed() const { return (size_t)_strings_removed; }49114912size_t symbols_processed() const { return (size_t)_symbols_processed; }4913size_t symbols_removed() const { return (size_t)_symbols_removed; }4914};49154916class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {4917private:4918static Monitor* _lock;49194920BoolObjectClosure* const _is_alive;4921const bool _unloading_occurred;4922const uint _num_workers;49234924// Variables used to claim nmethods.4925nmethod* _first_nmethod;4926volatile nmethod* _claimed_nmethod;49274928// The list of nmethods that need to be processed by the second pass.4929volatile nmethod* _postponed_list;4930volatile uint _num_entered_barrier;49314932public:4933G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :4934_is_alive(is_alive),4935_unloading_occurred(unloading_occurred),4936_num_workers(num_workers),4937_first_nmethod(NULL),4938_claimed_nmethod(NULL),4939_postponed_list(NULL),4940_num_entered_barrier(0)4941{4942nmethod::increase_unloading_clock();4943_first_nmethod = CodeCache::alive_nmethod(CodeCache::first());4944_claimed_nmethod = (volatile nmethod*)_first_nmethod;4945}49464947~G1CodeCacheUnloadingTask() {4948CodeCache::verify_clean_inline_caches();49494950CodeCache::set_needs_cache_clean(false);4951guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");49524953CodeCache::verify_icholder_relocations();4954}49554956private:4957void add_to_postponed_list(nmethod* nm) {4958nmethod* old;4959do {4960old = (nmethod*)_postponed_list;4961nm->set_unloading_next(old);4962} while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);4963}49644965void clean_nmethod(nmethod* nm) {4966bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);49674968if (postponed) {4969// This nmethod referred to an nmethod that has not been cleaned/unloaded yet.4970add_to_postponed_list(nm);4971}49724973// Mark that this thread has been cleaned/unloaded.4974// After this call, it will be safe to ask if this nmethod was unloaded or not.4975nm->set_unloading_clock(nmethod::global_unloading_clock());4976}49774978void clean_nmethod_postponed(nmethod* nm) {4979nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);4980}49814982static const int MaxClaimNmethods = 16;49834984void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {4985nmethod* first;4986nmethod* last;49874988do {4989*num_claimed_nmethods = 0;49904991first = last = (nmethod*)_claimed_nmethod;49924993if (first != NULL) {4994for (int i = 0; i < MaxClaimNmethods; i++) {4995last = CodeCache::alive_nmethod(CodeCache::next(last));49964997if (last == NULL) {4998break;4999}50005001claimed_nmethods[i] = last;5002(*num_claimed_nmethods)++;5003}5004}50055006} while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first);5007}50085009nmethod* claim_postponed_nmethod() {5010nmethod* claim;5011nmethod* next;50125013do {5014claim = (nmethod*)_postponed_list;5015if (claim == NULL) {5016return NULL;5017}50185019next = claim->unloading_next();50205021} while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);50225023return claim;5024}50255026public:5027// Mark that we're done with the first pass of nmethod cleaning.5028void barrier_mark(uint worker_id) {5029MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);5030_num_entered_barrier++;5031if (_num_entered_barrier == _num_workers) {5032ml.notify_all();5033}5034}50355036// See if we have to wait for the other workers to5037// finish their first-pass nmethod cleaning work.5038void barrier_wait(uint worker_id) {5039if (_num_entered_barrier < _num_workers) {5040MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);5041while (_num_entered_barrier < _num_workers) {5042ml.wait(Mutex::_no_safepoint_check_flag, 0, false);5043}5044}5045}50465047// Cleaning and unloading of nmethods. Some work has to be postponed5048// to the second pass, when we know which nmethods survive.5049void work_first_pass(uint worker_id) {5050// The first nmethods is claimed by the first worker.5051if (worker_id == 0 && _first_nmethod != NULL) {5052clean_nmethod(_first_nmethod);5053_first_nmethod = NULL;5054}50555056int num_claimed_nmethods;5057nmethod* claimed_nmethods[MaxClaimNmethods];50585059while (true) {5060claim_nmethods(claimed_nmethods, &num_claimed_nmethods);50615062if (num_claimed_nmethods == 0) {5063break;5064}50655066for (int i = 0; i < num_claimed_nmethods; i++) {5067clean_nmethod(claimed_nmethods[i]);5068}5069}50705071// The nmethod cleaning helps out and does the CodeCache part of MetadataOnStackMark.5072// Need to retire the buffers now that this thread has stopped cleaning nmethods.5073MetadataOnStackMark::retire_buffer_for_thread(Thread::current());5074}50755076void work_second_pass(uint worker_id) {5077nmethod* nm;5078// Take care of postponed nmethods.5079while ((nm = claim_postponed_nmethod()) != NULL) {5080clean_nmethod_postponed(nm);5081}5082}5083};50845085Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock");50865087class G1KlassCleaningTask : public StackObj {5088BoolObjectClosure* _is_alive;5089volatile jint _clean_klass_tree_claimed;5090ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;50915092public:5093G1KlassCleaningTask(BoolObjectClosure* is_alive) :5094_is_alive(is_alive),5095_clean_klass_tree_claimed(0),5096_klass_iterator() {5097}50985099private:5100bool claim_clean_klass_tree_task() {5101if (_clean_klass_tree_claimed) {5102return false;5103}51045105return Atomic::cmpxchg(1, (jint*)&_clean_klass_tree_claimed, 0) == 0;5106}51075108InstanceKlass* claim_next_klass() {5109Klass* klass;5110do {5111klass =_klass_iterator.next_klass();5112} while (klass != NULL && !klass->oop_is_instance());51135114return (InstanceKlass*)klass;5115}51165117public:51185119void clean_klass(InstanceKlass* ik) {5120ik->clean_weak_instanceklass_links(_is_alive);51215122if (JvmtiExport::has_redefined_a_class()) {5123InstanceKlass::purge_previous_versions(ik);5124}5125}51265127void work() {5128ResourceMark rm;51295130// One worker will clean the subklass/sibling klass tree.5131if (claim_clean_klass_tree_task()) {5132Klass::clean_subklass_tree(_is_alive);5133}51345135// All workers will help cleaning the classes,5136InstanceKlass* klass;5137while ((klass = claim_next_klass()) != NULL) {5138clean_klass(klass);5139}5140}5141};51425143// To minimize the remark pause times, the tasks below are done in parallel.5144class G1ParallelCleaningTask : public AbstractGangTask {5145private:5146G1StringSymbolTableUnlinkTask _string_symbol_task;5147G1CodeCacheUnloadingTask _code_cache_task;5148G1KlassCleaningTask _klass_cleaning_task;51495150public:5151// The constructor is run in the VMThread.5152G1ParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) :5153AbstractGangTask("Parallel Cleaning"),5154_string_symbol_task(is_alive, process_strings, process_symbols),5155_code_cache_task(num_workers, is_alive, unloading_occurred),5156_klass_cleaning_task(is_alive) {5157}51585159void pre_work_verification() {5160// The VM Thread will have registered Metadata during the single-threaded phase of MetadataStackOnMark.5161assert(Thread::current()->is_VM_thread()5162|| !MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");5163}51645165void post_work_verification() {5166assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");5167}51685169// The parallel work done by all worker threads.5170void work(uint worker_id) {5171pre_work_verification();51725173// Do first pass of code cache cleaning.5174_code_cache_task.work_first_pass(worker_id);51755176// Let the threads mark that the first pass is done.5177_code_cache_task.barrier_mark(worker_id);51785179// Clean the Strings and Symbols.5180_string_symbol_task.work(worker_id);51815182// Wait for all workers to finish the first code cache cleaning pass.5183_code_cache_task.barrier_wait(worker_id);51845185// Do the second code cache cleaning work, which realize on5186// the liveness information gathered during the first pass.5187_code_cache_task.work_second_pass(worker_id);51885189// Clean all klasses that were not unloaded.5190_klass_cleaning_task.work();51915192post_work_verification();5193}5194};519551965197void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,5198bool process_strings,5199bool process_symbols,5200bool class_unloading_occurred) {5201uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?5202workers()->active_workers() : 1);52035204G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,5205n_workers, class_unloading_occurred);5206if (G1CollectedHeap::use_parallel_gc_threads()) {5207set_par_threads(n_workers);5208workers()->run_task(&g1_unlink_task);5209set_par_threads(0);5210} else {5211g1_unlink_task.work(0);5212}5213}52145215void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,5216bool process_strings, bool process_symbols) {5217{5218uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?5219_g1h->workers()->active_workers() : 1);5220G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);5221if (G1CollectedHeap::use_parallel_gc_threads()) {5222set_par_threads(n_workers);5223workers()->run_task(&g1_unlink_task);5224set_par_threads(0);5225} else {5226g1_unlink_task.work(0);5227}5228}52295230if (G1StringDedup::is_enabled()) {5231G1StringDedup::unlink(is_alive);5232}5233}52345235class G1RedirtyLoggedCardsTask : public AbstractGangTask {5236private:5237DirtyCardQueueSet* _queue;5238public:5239G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }52405241virtual void work(uint worker_id) {5242G1GCPhaseTimes* phase_times = G1CollectedHeap::heap()->g1_policy()->phase_times();5243G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);52445245RedirtyLoggedCardTableEntryClosure cl;5246if (G1CollectedHeap::heap()->use_parallel_gc_threads()) {5247_queue->par_apply_closure_to_all_completed_buffers(&cl);5248} else {5249_queue->apply_closure_to_all_completed_buffers(&cl);5250}52515252phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_processed());5253}5254};52555256void G1CollectedHeap::redirty_logged_cards() {5257double redirty_logged_cards_start = os::elapsedTime();52585259uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?5260_g1h->workers()->active_workers() : 1);52615262G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());5263dirty_card_queue_set().reset_for_par_iteration();5264if (use_parallel_gc_threads()) {5265set_par_threads(n_workers);5266workers()->run_task(&redirty_task);5267set_par_threads(0);5268} else {5269redirty_task.work(0);5270}52715272DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();5273dcq.merge_bufferlists(&dirty_card_queue_set());5274assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");52755276g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);5277}52785279// Weak Reference Processing support52805281// An always "is_alive" closure that is used to preserve referents.5282// If the object is non-null then it's alive. Used in the preservation5283// of referent objects that are pointed to by reference objects5284// discovered by the CM ref processor.5285class G1AlwaysAliveClosure: public BoolObjectClosure {5286G1CollectedHeap* _g1;5287public:5288G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}5289bool do_object_b(oop p) {5290if (p != NULL) {5291return true;5292}5293return false;5294}5295};52965297bool G1STWIsAliveClosure::do_object_b(oop p) {5298// An object is reachable if it is outside the collection set,5299// or is inside and copied.5300return !_g1->obj_in_cs(p) || p->is_forwarded();5301}53025303// Non Copying Keep Alive closure5304class G1KeepAliveClosure: public OopClosure {5305G1CollectedHeap* _g1;5306public:5307G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}5308void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }5309void do_oop(oop* p) {5310oop obj = *p;5311assert(obj != NULL, "the caller should have filtered out NULL values");53125313const InCSetState cset_state = _g1->in_cset_state(obj);5314if (!cset_state.is_in_cset_or_humongous()) {5315return;5316}5317if (cset_state.is_in_cset()) {5318assert( obj->is_forwarded(), "invariant" );5319*p = obj->forwardee();5320} else {5321assert(!obj->is_forwarded(), "invariant" );5322assert(cset_state.is_humongous(),5323err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state.value()));5324_g1->set_humongous_is_live(obj);5325}5326}5327};53285329// Copying Keep Alive closure - can be called from both5330// serial and parallel code as long as different worker5331// threads utilize different G1ParScanThreadState instances5332// and different queues.53335334class G1CopyingKeepAliveClosure: public OopClosure {5335G1CollectedHeap* _g1h;5336OopClosure* _copy_non_heap_obj_cl;5337G1ParScanThreadState* _par_scan_state;53385339public:5340G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,5341OopClosure* non_heap_obj_cl,5342G1ParScanThreadState* pss):5343_g1h(g1h),5344_copy_non_heap_obj_cl(non_heap_obj_cl),5345_par_scan_state(pss)5346{}53475348virtual void do_oop(narrowOop* p) { do_oop_work(p); }5349virtual void do_oop( oop* p) { do_oop_work(p); }53505351template <class T> void do_oop_work(T* p) {5352oop obj = oopDesc::load_decode_heap_oop(p);53535354if (_g1h->is_in_cset_or_humongous(obj)) {5355// If the referent object has been forwarded (either copied5356// to a new location or to itself in the event of an5357// evacuation failure) then we need to update the reference5358// field and, if both reference and referent are in the G15359// heap, update the RSet for the referent.5360//5361// If the referent has not been forwarded then we have to keep5362// it alive by policy. Therefore we have copy the referent.5363//5364// If the reference field is in the G1 heap then we can push5365// on the PSS queue. When the queue is drained (after each5366// phase of reference processing) the object and it's followers5367// will be copied, the reference field set to point to the5368// new location, and the RSet updated. Otherwise we need to5369// use the the non-heap or metadata closures directly to copy5370// the referent object and update the pointer, while avoiding5371// updating the RSet.53725373if (_g1h->is_in_g1_reserved(p)) {5374_par_scan_state->push_on_queue(p);5375} else {5376assert(!Metaspace::contains((const void*)p),5377err_msg("Unexpectedly found a pointer from metadata: " PTR_FORMAT, p2i(p)));5378_copy_non_heap_obj_cl->do_oop(p);5379}5380}5381}5382};53835384// Serial drain queue closure. Called as the 'complete_gc'5385// closure for each discovered list in some of the5386// reference processing phases.53875388class G1STWDrainQueueClosure: public VoidClosure {5389protected:5390G1CollectedHeap* _g1h;5391G1ParScanThreadState* _par_scan_state;53925393G1ParScanThreadState* par_scan_state() { return _par_scan_state; }53945395public:5396G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :5397_g1h(g1h),5398_par_scan_state(pss)5399{ }54005401void do_void() {5402G1ParScanThreadState* const pss = par_scan_state();5403pss->trim_queue();5404}5405};54065407// Parallel Reference Processing closures54085409// Implementation of AbstractRefProcTaskExecutor for parallel reference5410// processing during G1 evacuation pauses.54115412class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {5413private:5414G1CollectedHeap* _g1h;5415RefToScanQueueSet* _queues;5416FlexibleWorkGang* _workers;5417int _active_workers;54185419public:5420G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,5421FlexibleWorkGang* workers,5422RefToScanQueueSet *task_queues,5423int n_workers) :5424_g1h(g1h),5425_queues(task_queues),5426_workers(workers),5427_active_workers(n_workers)5428{5429assert(n_workers > 0, "shouldn't call this otherwise");5430}54315432// Executes the given task using concurrent marking worker threads.5433virtual void execute(ProcessTask& task);5434virtual void execute(EnqueueTask& task);5435};54365437// Gang task for possibly parallel reference processing54385439class G1STWRefProcTaskProxy: public AbstractGangTask {5440typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;5441ProcessTask& _proc_task;5442G1CollectedHeap* _g1h;5443RefToScanQueueSet *_task_queues;5444ParallelTaskTerminator* _terminator;54455446public:5447G1STWRefProcTaskProxy(ProcessTask& proc_task,5448G1CollectedHeap* g1h,5449RefToScanQueueSet *task_queues,5450ParallelTaskTerminator* terminator) :5451AbstractGangTask("Process reference objects in parallel"),5452_proc_task(proc_task),5453_g1h(g1h),5454_task_queues(task_queues),5455_terminator(terminator)5456{}54575458virtual void work(uint worker_id) {5459// The reference processing task executed by a single worker.5460ResourceMark rm;5461HandleMark hm;54625463G1STWIsAliveClosure is_alive(_g1h);54645465G1ParScanThreadState pss(_g1h, worker_id, NULL);5466G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);54675468pss.set_evac_failure_closure(&evac_failure_cl);54695470G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);54715472G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);54735474OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;54755476if (_g1h->g1_policy()->during_initial_mark_pause()) {5477// We also need to mark copied objects.5478copy_non_heap_cl = ©_mark_non_heap_cl;5479}54805481// Keep alive closure.5482G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);54835484// Complete GC closure5485G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);54865487// Call the reference processing task's work routine.5488_proc_task.work(worker_id, is_alive, keep_alive, drain_queue);54895490// Note we cannot assert that the refs array is empty here as not all5491// of the processing tasks (specifically phase2 - pp2_work) execute5492// the complete_gc closure (which ordinarily would drain the queue) so5493// the queue may not be empty.5494}5495};54965497// Driver routine for parallel reference processing.5498// Creates an instance of the ref processing gang5499// task and has the worker threads execute it.5500void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {5501assert(_workers != NULL, "Need parallel worker threads.");55025503ParallelTaskTerminator terminator(_active_workers, _queues);5504G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);55055506_g1h->set_par_threads(_active_workers);5507_workers->run_task(&proc_task_proxy);5508_g1h->set_par_threads(0);5509}55105511// Gang task for parallel reference enqueueing.55125513class G1STWRefEnqueueTaskProxy: public AbstractGangTask {5514typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;5515EnqueueTask& _enq_task;55165517public:5518G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :5519AbstractGangTask("Enqueue reference objects in parallel"),5520_enq_task(enq_task)5521{ }55225523virtual void work(uint worker_id) {5524_enq_task.work(worker_id);5525}5526};55275528// Driver routine for parallel reference enqueueing.5529// Creates an instance of the ref enqueueing gang5530// task and has the worker threads execute it.55315532void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {5533assert(_workers != NULL, "Need parallel worker threads.");55345535G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);55365537_g1h->set_par_threads(_active_workers);5538_workers->run_task(&enq_task_proxy);5539_g1h->set_par_threads(0);5540}55415542// End of weak reference support closures55435544// Abstract task used to preserve (i.e. copy) any referent objects5545// that are in the collection set and are pointed to by reference5546// objects discovered by the CM ref processor.55475548class G1ParPreserveCMReferentsTask: public AbstractGangTask {5549protected:5550G1CollectedHeap* _g1h;5551RefToScanQueueSet *_queues;5552ParallelTaskTerminator _terminator;5553uint _n_workers;55545555public:5556G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :5557AbstractGangTask("ParPreserveCMReferents"),5558_g1h(g1h),5559_queues(task_queues),5560_terminator(workers, _queues),5561_n_workers(workers)5562{ }55635564void work(uint worker_id) {5565ResourceMark rm;5566HandleMark hm;55675568G1ParScanThreadState pss(_g1h, worker_id, NULL);5569G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);55705571pss.set_evac_failure_closure(&evac_failure_cl);55725573assert(pss.queue_is_empty(), "both queue and overflow should be empty");55745575G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);55765577G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);55785579OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;55805581if (_g1h->g1_policy()->during_initial_mark_pause()) {5582// We also need to mark copied objects.5583copy_non_heap_cl = ©_mark_non_heap_cl;5584}55855586// Is alive closure5587G1AlwaysAliveClosure always_alive(_g1h);55885589// Copying keep alive closure. Applied to referent objects that need5590// to be copied.5591G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);55925593ReferenceProcessor* rp = _g1h->ref_processor_cm();55945595uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();5596uint stride = MIN2(MAX2(_n_workers, 1U), limit);55975598// limit is set using max_num_q() - which was set using ParallelGCThreads.5599// So this must be true - but assert just in case someone decides to5600// change the worker ids.5601assert(0 <= worker_id && worker_id < limit, "sanity");5602assert(!rp->discovery_is_atomic(), "check this code");56035604// Select discovered lists [i, i+stride, i+2*stride,...,limit)5605for (uint idx = worker_id; idx < limit; idx += stride) {5606DiscoveredList& ref_list = rp->discovered_refs()[idx];56075608DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);5609while (iter.has_next()) {5610// Since discovery is not atomic for the CM ref processor, we5611// can see some null referent objects.5612iter.load_ptrs(DEBUG_ONLY(true));5613oop ref = iter.obj();56145615// This will filter nulls.5616if (iter.is_referent_alive()) {5617iter.make_referent_alive();5618}5619iter.move_to_next();5620}5621}56225623// Drain the queue - which may cause stealing5624G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);5625drain_queue.do_void();5626// Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure5627assert(pss.queue_is_empty(), "should be");5628}5629};56305631// Weak Reference processing during an evacuation pause (part 1).5632void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {5633double ref_proc_start = os::elapsedTime();56345635ReferenceProcessor* rp = _ref_processor_stw;5636assert(rp->discovery_enabled(), "should have been enabled");56375638// Any reference objects, in the collection set, that were 'discovered'5639// by the CM ref processor should have already been copied (either by5640// applying the external root copy closure to the discovered lists, or5641// by following an RSet entry).5642//5643// But some of the referents, that are in the collection set, that these5644// reference objects point to may not have been copied: the STW ref5645// processor would have seen that the reference object had already5646// been 'discovered' and would have skipped discovering the reference,5647// but would not have treated the reference object as a regular oop.5648// As a result the copy closure would not have been applied to the5649// referent object.5650//5651// We need to explicitly copy these referent objects - the references5652// will be processed at the end of remarking.5653//5654// We also need to do this copying before we process the reference5655// objects discovered by the STW ref processor in case one of these5656// referents points to another object which is also referenced by an5657// object discovered by the STW ref processor.56585659assert(!G1CollectedHeap::use_parallel_gc_threads() ||5660no_of_gc_workers == workers()->active_workers(),5661"Need to reset active GC workers");56625663set_par_threads(no_of_gc_workers);5664G1ParPreserveCMReferentsTask keep_cm_referents(this,5665no_of_gc_workers,5666_task_queues);56675668if (G1CollectedHeap::use_parallel_gc_threads()) {5669workers()->run_task(&keep_cm_referents);5670} else {5671keep_cm_referents.work(0);5672}56735674set_par_threads(0);56755676// Closure to test whether a referent is alive.5677G1STWIsAliveClosure is_alive(this);56785679// Even when parallel reference processing is enabled, the processing5680// of JNI refs is serial and performed serially by the current thread5681// rather than by a worker. The following PSS will be used for processing5682// JNI refs.56835684// Use only a single queue for this PSS.5685G1ParScanThreadState pss(this, 0, NULL);56865687// We do not embed a reference processor in the copying/scanning5688// closures while we're actually processing the discovered5689// reference objects.5690G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);56915692pss.set_evac_failure_closure(&evac_failure_cl);56935694assert(pss.queue_is_empty(), "pre-condition");56955696G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL);56975698G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);56995700OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;57015702if (_g1h->g1_policy()->during_initial_mark_pause()) {5703// We also need to mark copied objects.5704copy_non_heap_cl = ©_mark_non_heap_cl;5705}57065707// Keep alive closure.5708G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, &pss);57095710// Serial Complete GC closure5711G1STWDrainQueueClosure drain_queue(this, &pss);57125713// Setup the soft refs policy...5714rp->setup_policy(false);57155716ReferenceProcessorStats stats;5717if (!rp->processing_is_mt()) {5718// Serial reference processing...5719stats = rp->process_discovered_references(&is_alive,5720&keep_alive,5721&drain_queue,5722NULL,5723_gc_timer_stw,5724_gc_tracer_stw->gc_id());5725} else {5726// Parallel reference processing5727assert(rp->num_q() == no_of_gc_workers, "sanity");5728assert(no_of_gc_workers <= rp->max_num_q(), "sanity");57295730G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);5731stats = rp->process_discovered_references(&is_alive,5732&keep_alive,5733&drain_queue,5734&par_task_executor,5735_gc_timer_stw,5736_gc_tracer_stw->gc_id());5737}57385739_gc_tracer_stw->report_gc_reference_stats(stats);57405741// We have completed copying any necessary live referent objects.5742assert(pss.queue_is_empty(), "both queue and overflow should be empty");57435744double ref_proc_time = os::elapsedTime() - ref_proc_start;5745g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);5746}57475748// Weak Reference processing during an evacuation pause (part 2).5749void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {5750double ref_enq_start = os::elapsedTime();57515752ReferenceProcessor* rp = _ref_processor_stw;5753assert(!rp->discovery_enabled(), "should have been disabled as part of processing");57545755// Now enqueue any remaining on the discovered lists on to5756// the pending list.5757if (!rp->processing_is_mt()) {5758// Serial reference processing...5759rp->enqueue_discovered_references();5760} else {5761// Parallel reference enqueueing57625763assert(no_of_gc_workers == workers()->active_workers(),5764"Need to reset active workers");5765assert(rp->num_q() == no_of_gc_workers, "sanity");5766assert(no_of_gc_workers <= rp->max_num_q(), "sanity");57675768G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);5769rp->enqueue_discovered_references(&par_task_executor);5770}57715772rp->verify_no_references_recorded();5773assert(!rp->discovery_enabled(), "should have been disabled");57745775// FIXME5776// CM's reference processing also cleans up the string and symbol tables.5777// Should we do that here also? We could, but it is a serial operation5778// and could significantly increase the pause time.57795780double ref_enq_time = os::elapsedTime() - ref_enq_start;5781g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);5782}57835784void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {5785_expand_heap_after_alloc_failure = true;5786_evacuation_failed = false;57875788// Should G1EvacuationFailureALot be in effect for this GC?5789NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)57905791g1_rem_set()->prepare_for_oops_into_collection_set_do();57925793// Disable the hot card cache.5794G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();5795hot_card_cache->reset_hot_cache_claimed_index();5796hot_card_cache->set_use_cache(false);57975798const uint n_workers = workers()->active_workers();5799assert(UseDynamicNumberOfGCThreads ||5800n_workers == workers()->total_workers(),5801"If not dynamic should be using all the workers");5802set_par_threads(n_workers);58035804init_for_evac_failure(NULL);58055806rem_set()->prepare_for_younger_refs_iterate(true);58075808assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");5809double start_par_time_sec = os::elapsedTime();5810double end_par_time_sec;58115812{5813G1RootProcessor root_processor(this);5814G1ParTask g1_par_task(this, _task_queues, &root_processor);5815// InitialMark needs claim bits to keep track of the marked-through CLDs.5816if (g1_policy()->during_initial_mark_pause()) {5817ClassLoaderDataGraph::clear_claimed_marks();5818}58195820if (G1CollectedHeap::use_parallel_gc_threads()) {5821// The individual threads will set their evac-failure closures.5822if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();5823// These tasks use ShareHeap::_process_strong_tasks5824assert(UseDynamicNumberOfGCThreads ||5825workers()->active_workers() == workers()->total_workers(),5826"If not dynamic should be using all the workers");5827workers()->run_task(&g1_par_task);5828} else {5829g1_par_task.set_for_termination(n_workers);5830g1_par_task.work(0);5831}5832end_par_time_sec = os::elapsedTime();58335834// Closing the inner scope will execute the destructor5835// for the G1RootProcessor object. We record the current5836// elapsed time before closing the scope so that time5837// taken for the destructor is NOT included in the5838// reported parallel time.5839}58405841G1GCPhaseTimes* phase_times = g1_policy()->phase_times();58425843double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;5844phase_times->record_par_time(par_time_ms);58455846double code_root_fixup_time_ms =5847(os::elapsedTime() - end_par_time_sec) * 1000.0;5848phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);58495850set_par_threads(0);58515852// Process any discovered reference objects - we have5853// to do this _before_ we retire the GC alloc regions5854// as we may have to copy some 'reachable' referent5855// objects (and their reachable sub-graphs) that were5856// not copied during the pause.5857process_discovered_references(n_workers);58585859if (G1StringDedup::is_enabled()) {5860double fixup_start = os::elapsedTime();58615862G1STWIsAliveClosure is_alive(this);5863G1KeepAliveClosure keep_alive(this);5864G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, phase_times);58655866double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;5867phase_times->record_string_dedup_fixup_time(fixup_time_ms);5868}58695870_allocator->release_gc_alloc_regions(n_workers, evacuation_info);5871g1_rem_set()->cleanup_after_oops_into_collection_set_do();58725873// Reset and re-enable the hot card cache.5874// Note the counts for the cards in the regions in the5875// collection set are reset when the collection set is freed.5876hot_card_cache->reset_hot_cache();5877hot_card_cache->set_use_cache(true);58785879purge_code_root_memory();58805881if (g1_policy()->during_initial_mark_pause()) {5882// Reset the claim values set during marking the strong code roots5883reset_heap_region_claim_values();5884}58855886finalize_for_evac_failure();58875888if (evacuation_failed()) {5889remove_self_forwarding_pointers();58905891// Reset the G1EvacuationFailureALot counters and flags5892// Note: the values are reset only when an actual5893// evacuation failure occurs.5894NOT_PRODUCT(reset_evacuation_should_fail();)5895}58965897// Enqueue any remaining references remaining on the STW5898// reference processor's discovered lists. We need to do5899// this after the card table is cleaned (and verified) as5900// the act of enqueueing entries on to the pending list5901// will log these updates (and dirty their associated5902// cards). We need these updates logged to update any5903// RSets.5904enqueue_discovered_references(n_workers);59055906redirty_logged_cards();5907COMPILER2_PRESENT(DerivedPointerTable::update_pointers());5908}59095910void G1CollectedHeap::free_region(HeapRegion* hr,5911FreeRegionList* free_list,5912bool par,5913bool locked) {5914assert(!hr->is_free(), "the region should not be free");5915assert(!hr->is_empty(), "the region should not be empty");5916assert(_hrm.is_available(hr->hrm_index()), "region should be committed");5917assert(free_list != NULL, "pre-condition");59185919if (G1VerifyBitmaps) {5920MemRegion mr(hr->bottom(), hr->end());5921concurrent_mark()->clearRangePrevBitmap(mr);5922}59235924// Clear the card counts for this region.5925// Note: we only need to do this if the region is not young5926// (since we don't refine cards in young regions).5927if (!hr->is_young()) {5928_cg1r->hot_card_cache()->reset_card_counts(hr);5929}5930hr->hr_clear(par, true /* clear_space */, locked /* locked */);5931free_list->add_ordered(hr);5932}59335934void G1CollectedHeap::free_humongous_region(HeapRegion* hr,5935FreeRegionList* free_list,5936bool par) {5937assert(hr->startsHumongous(), "this is only for starts humongous regions");5938assert(free_list != NULL, "pre-condition");59395940size_t hr_capacity = hr->capacity();5941// We need to read this before we make the region non-humongous,5942// otherwise the information will be gone.5943uint last_index = hr->last_hc_index();5944hr->clear_humongous();5945free_region(hr, free_list, par);59465947uint i = hr->hrm_index() + 1;5948while (i < last_index) {5949HeapRegion* curr_hr = region_at(i);5950assert(curr_hr->continuesHumongous(), "invariant");5951curr_hr->clear_humongous();5952free_region(curr_hr, free_list, par);5953i += 1;5954}5955}59565957void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,5958const HeapRegionSetCount& humongous_regions_removed) {5959if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) {5960MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);5961_old_set.bulk_remove(old_regions_removed);5962_humongous_set.bulk_remove(humongous_regions_removed);5963}59645965}59665967void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {5968assert(list != NULL, "list can't be null");5969if (!list->is_empty()) {5970MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);5971_hrm.insert_list_into_free_list(list);5972}5973}59745975void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {5976_allocator->decrease_used(bytes);5977}59785979class G1ParCleanupCTTask : public AbstractGangTask {5980G1SATBCardTableModRefBS* _ct_bs;5981G1CollectedHeap* _g1h;5982HeapRegion* volatile _su_head;5983public:5984G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs,5985G1CollectedHeap* g1h) :5986AbstractGangTask("G1 Par Cleanup CT Task"),5987_ct_bs(ct_bs), _g1h(g1h) { }59885989void work(uint worker_id) {5990HeapRegion* r;5991while (r = _g1h->pop_dirty_cards_region()) {5992clear_cards(r);5993}5994}59955996void clear_cards(HeapRegion* r) {5997// Cards of the survivors should have already been dirtied.5998if (!r->is_survivor()) {5999_ct_bs->clear(MemRegion(r->bottom(), r->end()));6000}6001}6002};60036004#ifndef PRODUCT6005class G1VerifyCardTableCleanup: public HeapRegionClosure {6006G1CollectedHeap* _g1h;6007G1SATBCardTableModRefBS* _ct_bs;6008public:6009G1VerifyCardTableCleanup(G1CollectedHeap* g1h, G1SATBCardTableModRefBS* ct_bs)6010: _g1h(g1h), _ct_bs(ct_bs) { }6011virtual bool doHeapRegion(HeapRegion* r) {6012if (r->is_survivor()) {6013_g1h->verify_dirty_region(r);6014} else {6015_g1h->verify_not_dirty_region(r);6016}6017return false;6018}6019};60206021void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {6022// All of the region should be clean.6023G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();6024MemRegion mr(hr->bottom(), hr->end());6025ct_bs->verify_not_dirty_region(mr);6026}60276028void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {6029// We cannot guarantee that [bottom(),end()] is dirty. Threads6030// dirty allocated blocks as they allocate them. The thread that6031// retires each region and replaces it with a new one will do a6032// maximal allocation to fill in [pre_dummy_top(),end()] but will6033// not dirty that area (one less thing to have to do while holding6034// a lock). So we can only verify that [bottom(),pre_dummy_top()]6035// is dirty.6036G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();6037MemRegion mr(hr->bottom(), hr->pre_dummy_top());6038if (hr->is_young()) {6039ct_bs->verify_g1_young_region(mr);6040} else {6041ct_bs->verify_dirty_region(mr);6042}6043}60446045void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {6046G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();6047for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {6048verify_dirty_region(hr);6049}6050}60516052void G1CollectedHeap::verify_dirty_young_regions() {6053verify_dirty_young_list(_young_list->first_region());6054}60556056bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,6057HeapWord* tams, HeapWord* end) {6058guarantee(tams <= end,6059err_msg("tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end)));6060HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);6061if (result < end) {6062gclog_or_tty->cr();6063gclog_or_tty->print_cr("## wrong marked address on %s bitmap: " PTR_FORMAT,6064bitmap_name, p2i(result));6065gclog_or_tty->print_cr("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT,6066bitmap_name, p2i(tams), p2i(end));6067return false;6068}6069return true;6070}60716072bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {6073CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();6074CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();60756076HeapWord* bottom = hr->bottom();6077HeapWord* ptams = hr->prev_top_at_mark_start();6078HeapWord* ntams = hr->next_top_at_mark_start();6079HeapWord* end = hr->end();60806081bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);60826083bool res_n = true;6084// We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window6085// we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap6086// if we happen to be in that state.6087if (mark_in_progress() || !_cmThread->in_progress()) {6088res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);6089}6090if (!res_p || !res_n) {6091gclog_or_tty->print_cr("#### Bitmap verification failed for " HR_FORMAT,6092HR_FORMAT_PARAMS(hr));6093gclog_or_tty->print_cr("#### Caller: %s", caller);6094return false;6095}6096return true;6097}60986099void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {6100if (!G1VerifyBitmaps) return;61016102guarantee(verify_bitmaps(caller, hr), "bitmap verification");6103}61046105class G1VerifyBitmapClosure : public HeapRegionClosure {6106private:6107const char* _caller;6108G1CollectedHeap* _g1h;6109bool _failures;61106111public:6112G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :6113_caller(caller), _g1h(g1h), _failures(false) { }61146115bool failures() { return _failures; }61166117virtual bool doHeapRegion(HeapRegion* hr) {6118if (hr->continuesHumongous()) return false;61196120bool result = _g1h->verify_bitmaps(_caller, hr);6121if (!result) {6122_failures = true;6123}6124return false;6125}6126};61276128void G1CollectedHeap::check_bitmaps(const char* caller) {6129if (!G1VerifyBitmaps) return;61306131G1VerifyBitmapClosure cl(caller, this);6132heap_region_iterate(&cl);6133guarantee(!cl.failures(), "bitmap verification");6134}61356136class G1CheckCSetFastTableClosure : public HeapRegionClosure {6137private:6138bool _failures;6139public:6140G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }61416142virtual bool doHeapRegion(HeapRegion* hr) {6143uint i = hr->hrm_index();6144InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);6145if (hr->isHumongous()) {6146if (hr->in_collection_set()) {6147gclog_or_tty->print_cr("\n## humongous region %u in CSet", i);6148_failures = true;6149return true;6150}6151if (cset_state.is_in_cset()) {6152gclog_or_tty->print_cr("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);6153_failures = true;6154return true;6155}6156if (hr->continuesHumongous() && cset_state.is_humongous()) {6157gclog_or_tty->print_cr("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);6158_failures = true;6159return true;6160}6161} else {6162if (cset_state.is_humongous()) {6163gclog_or_tty->print_cr("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);6164_failures = true;6165return true;6166}6167if (hr->in_collection_set() != cset_state.is_in_cset()) {6168gclog_or_tty->print_cr("\n## in CSet %d / cset state %d inconsistency for region %u",6169hr->in_collection_set(), cset_state.value(), i);6170_failures = true;6171return true;6172}6173if (cset_state.is_in_cset()) {6174if (hr->is_young() != (cset_state.is_young())) {6175gclog_or_tty->print_cr("\n## is_young %d / cset state %d inconsistency for region %u",6176hr->is_young(), cset_state.value(), i);6177_failures = true;6178return true;6179}6180if (hr->is_old() != (cset_state.is_old())) {6181gclog_or_tty->print_cr("\n## is_old %d / cset state %d inconsistency for region %u",6182hr->is_old(), cset_state.value(), i);6183_failures = true;6184return true;6185}6186}6187}6188return false;6189}61906191bool failures() const { return _failures; }6192};61936194bool G1CollectedHeap::check_cset_fast_test() {6195G1CheckCSetFastTableClosure cl;6196_hrm.iterate(&cl);6197return !cl.failures();6198}6199#endif // PRODUCT62006201void G1CollectedHeap::cleanUpCardTable() {6202G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();6203double start = os::elapsedTime();62046205{6206// Iterate over the dirty cards region list.6207G1ParCleanupCTTask cleanup_task(ct_bs, this);62086209if (G1CollectedHeap::use_parallel_gc_threads()) {6210set_par_threads();6211workers()->run_task(&cleanup_task);6212set_par_threads(0);6213} else {6214while (_dirty_cards_region_list) {6215HeapRegion* r = _dirty_cards_region_list;6216cleanup_task.clear_cards(r);6217_dirty_cards_region_list = r->get_next_dirty_cards_region();6218if (_dirty_cards_region_list == r) {6219// The last region.6220_dirty_cards_region_list = NULL;6221}6222r->set_next_dirty_cards_region(NULL);6223}6224}6225#ifndef PRODUCT6226if (G1VerifyCTCleanup || VerifyAfterGC) {6227G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);6228heap_region_iterate(&cleanup_verifier);6229}6230#endif6231}62326233double elapsed = os::elapsedTime() - start;6234g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);6235}62366237void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info) {6238size_t pre_used = 0;6239FreeRegionList local_free_list("Local List for CSet Freeing");62406241double young_time_ms = 0.0;6242double non_young_time_ms = 0.0;62436244// Since the collection set is a superset of the the young list,6245// all we need to do to clear the young list is clear its6246// head and length, and unlink any young regions in the code below6247_young_list->clear();62486249G1CollectorPolicy* policy = g1_policy();62506251double start_sec = os::elapsedTime();6252bool non_young = true;62536254HeapRegion* cur = cs_head;6255int age_bound = -1;6256size_t rs_lengths = 0;62576258while (cur != NULL) {6259assert(!is_on_master_free_list(cur), "sanity");6260if (non_young) {6261if (cur->is_young()) {6262double end_sec = os::elapsedTime();6263double elapsed_ms = (end_sec - start_sec) * 1000.0;6264non_young_time_ms += elapsed_ms;62656266start_sec = os::elapsedTime();6267non_young = false;6268}6269} else {6270if (!cur->is_young()) {6271double end_sec = os::elapsedTime();6272double elapsed_ms = (end_sec - start_sec) * 1000.0;6273young_time_ms += elapsed_ms;62746275start_sec = os::elapsedTime();6276non_young = true;6277}6278}62796280rs_lengths += cur->rem_set()->occupied_locked();62816282HeapRegion* next = cur->next_in_collection_set();6283assert(cur->in_collection_set(), "bad CS");6284cur->set_next_in_collection_set(NULL);6285cur->set_in_collection_set(false);62866287if (cur->is_young()) {6288int index = cur->young_index_in_cset();6289assert(index != -1, "invariant");6290assert((uint) index < policy->young_cset_region_length(), "invariant");6291size_t words_survived = _surviving_young_words[index];6292cur->record_surv_words_in_group(words_survived);62936294// At this point the we have 'popped' cur from the collection set6295// (linked via next_in_collection_set()) but it is still in the6296// young list (linked via next_young_region()). Clear the6297// _next_young_region field.6298cur->set_next_young_region(NULL);6299} else {6300int index = cur->young_index_in_cset();6301assert(index == -1, "invariant");6302}63036304assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||6305(!cur->is_young() && cur->young_index_in_cset() == -1),6306"invariant" );63076308if (!cur->evacuation_failed()) {6309MemRegion used_mr = cur->used_region();63106311// And the region is empty.6312assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");6313pre_used += cur->used();6314free_region(cur, &local_free_list, false /* par */, true /* locked */);6315} else {6316cur->uninstall_surv_rate_group();6317if (cur->is_young()) {6318cur->set_young_index_in_cset(-1);6319}6320cur->set_evacuation_failed(false);6321// The region is now considered to be old.6322cur->set_old();6323_old_set.add(cur);6324evacuation_info.increment_collectionset_used_after(cur->used());6325}6326cur = next;6327}63286329evacuation_info.set_regions_freed(local_free_list.length());6330policy->record_max_rs_lengths(rs_lengths);6331policy->cset_regions_freed();63326333double end_sec = os::elapsedTime();6334double elapsed_ms = (end_sec - start_sec) * 1000.0;63356336if (non_young) {6337non_young_time_ms += elapsed_ms;6338} else {6339young_time_ms += elapsed_ms;6340}63416342prepend_to_freelist(&local_free_list);6343decrement_summary_bytes(pre_used);6344policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);6345policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);6346}63476348class G1FreeHumongousRegionClosure : public HeapRegionClosure {6349private:6350FreeRegionList* _free_region_list;6351HeapRegionSet* _proxy_set;6352HeapRegionSetCount _humongous_regions_removed;6353size_t _freed_bytes;6354public:63556356G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :6357_free_region_list(free_region_list), _humongous_regions_removed(), _freed_bytes(0) {6358}63596360virtual bool doHeapRegion(HeapRegion* r) {6361if (!r->startsHumongous()) {6362return false;6363}63646365G1CollectedHeap* g1h = G1CollectedHeap::heap();63666367oop obj = (oop)r->bottom();6368CMBitMap* next_bitmap = g1h->concurrent_mark()->nextMarkBitMap();63696370// The following checks whether the humongous object is live are sufficient.6371// The main additional check (in addition to having a reference from the roots6372// or the young gen) is whether the humongous object has a remembered set entry.6373//6374// A humongous object cannot be live if there is no remembered set for it6375// because:6376// - there can be no references from within humongous starts regions referencing6377// the object because we never allocate other objects into them.6378// (I.e. there are no intra-region references that may be missed by the6379// remembered set)6380// - as soon there is a remembered set entry to the humongous starts region6381// (i.e. it has "escaped" to an old object) this remembered set entry will stay6382// until the end of a concurrent mark.6383//6384// It is not required to check whether the object has been found dead by marking6385// or not, in fact it would prevent reclamation within a concurrent cycle, as6386// all objects allocated during that time are considered live.6387// SATB marking is even more conservative than the remembered set.6388// So if at this point in the collection there is no remembered set entry,6389// nobody has a reference to it.6390// At the start of collection we flush all refinement logs, and remembered sets6391// are completely up-to-date wrt to references to the humongous object.6392//6393// Other implementation considerations:6394// - never consider object arrays at this time because they would pose6395// considerable effort for cleaning up the the remembered sets. This is6396// required because stale remembered sets might reference locations that6397// are currently allocated into.6398uint region_idx = r->hrm_index();6399if (!g1h->is_humongous_reclaim_candidate(region_idx) ||6400!r->rem_set()->is_empty()) {64016402if (G1TraceEagerReclaimHumongousObjects) {6403gclog_or_tty->print_cr("Live humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",6404region_idx,6405(size_t)obj->size()*HeapWordSize,6406p2i(r->bottom()),6407r->region_num(),6408r->rem_set()->occupied(),6409r->rem_set()->strong_code_roots_list_length(),6410next_bitmap->isMarked(r->bottom()),6411g1h->is_humongous_reclaim_candidate(region_idx),6412obj->is_typeArray()6413);6414}64156416return false;6417}64186419guarantee(obj->is_typeArray(),6420err_msg("Only eagerly reclaiming type arrays is supported, but the object "6421PTR_FORMAT " is not.",6422p2i(r->bottom())));64236424if (G1TraceEagerReclaimHumongousObjects) {6425gclog_or_tty->print_cr("Dead humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",6426region_idx,6427(size_t)obj->size()*HeapWordSize,6428p2i(r->bottom()),6429r->region_num(),6430r->rem_set()->occupied(),6431r->rem_set()->strong_code_roots_list_length(),6432next_bitmap->isMarked(r->bottom()),6433g1h->is_humongous_reclaim_candidate(region_idx),6434obj->is_typeArray()6435);6436}6437// Need to clear mark bit of the humongous object if already set.6438if (next_bitmap->isMarked(r->bottom())) {6439next_bitmap->clear(r->bottom());6440}6441_freed_bytes += r->used();6442r->set_containing_set(NULL);6443_humongous_regions_removed.increment(1u, r->capacity());6444g1h->free_humongous_region(r, _free_region_list, false);64456446return false;6447}64486449HeapRegionSetCount& humongous_free_count() {6450return _humongous_regions_removed;6451}64526453size_t bytes_freed() const {6454return _freed_bytes;6455}64566457size_t humongous_reclaimed() const {6458return _humongous_regions_removed.length();6459}6460};64616462void G1CollectedHeap::eagerly_reclaim_humongous_regions() {6463assert_at_safepoint(true);64646465if (!G1EagerReclaimHumongousObjects ||6466(!_has_humongous_reclaim_candidates && !G1TraceEagerReclaimHumongousObjects)) {6467g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);6468return;6469}64706471double start_time = os::elapsedTime();64726473FreeRegionList local_cleanup_list("Local Humongous Cleanup List");64746475G1FreeHumongousRegionClosure cl(&local_cleanup_list);6476heap_region_iterate(&cl);64776478HeapRegionSetCount empty_set;6479remove_from_old_sets(empty_set, cl.humongous_free_count());64806481G1HRPrinter* hr_printer = _g1h->hr_printer();6482if (hr_printer->is_active()) {6483FreeRegionListIterator iter(&local_cleanup_list);6484while (iter.more_available()) {6485HeapRegion* hr = iter.get_next();6486hr_printer->cleanup(hr);6487}6488}64896490prepend_to_freelist(&local_cleanup_list);6491decrement_summary_bytes(cl.bytes_freed());64926493g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,6494cl.humongous_reclaimed());6495}64966497// This routine is similar to the above but does not record6498// any policy statistics or update free lists; we are abandoning6499// the current incremental collection set in preparation of a6500// full collection. After the full GC we will start to build up6501// the incremental collection set again.6502// This is only called when we're doing a full collection6503// and is immediately followed by the tearing down of the young list.65046505void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {6506HeapRegion* cur = cs_head;65076508while (cur != NULL) {6509HeapRegion* next = cur->next_in_collection_set();6510assert(cur->in_collection_set(), "bad CS");6511cur->set_next_in_collection_set(NULL);6512cur->set_in_collection_set(false);6513cur->set_young_index_in_cset(-1);6514cur = next;6515}6516}65176518void G1CollectedHeap::set_free_regions_coming() {6519if (G1ConcRegionFreeingVerbose) {6520gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "6521"setting free regions coming");6522}65236524assert(!free_regions_coming(), "pre-condition");6525_free_regions_coming = true;6526}65276528void G1CollectedHeap::reset_free_regions_coming() {6529assert(free_regions_coming(), "pre-condition");65306531{6532MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);6533_free_regions_coming = false;6534SecondaryFreeList_lock->notify_all();6535}65366537if (G1ConcRegionFreeingVerbose) {6538gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "6539"reset free regions coming");6540}6541}65426543void G1CollectedHeap::wait_while_free_regions_coming() {6544// Most of the time we won't have to wait, so let's do a quick test6545// first before we take the lock.6546if (!free_regions_coming()) {6547return;6548}65496550if (G1ConcRegionFreeingVerbose) {6551gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "6552"waiting for free regions");6553}65546555{6556MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);6557while (free_regions_coming()) {6558SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);6559}6560}65616562if (G1ConcRegionFreeingVerbose) {6563gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "6564"done waiting for free regions");6565}6566}65676568void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {6569assert(heap_lock_held_for_gc(),6570"the heap lock should already be held by or for this thread");6571_young_list->push_region(hr);6572}65736574class NoYoungRegionsClosure: public HeapRegionClosure {6575private:6576bool _success;6577public:6578NoYoungRegionsClosure() : _success(true) { }6579bool doHeapRegion(HeapRegion* r) {6580if (r->is_young()) {6581gclog_or_tty->print_cr("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",6582p2i(r->bottom()), p2i(r->end()));6583_success = false;6584}6585return false;6586}6587bool success() { return _success; }6588};65896590bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {6591bool ret = _young_list->check_list_empty(check_sample);65926593if (check_heap) {6594NoYoungRegionsClosure closure;6595heap_region_iterate(&closure);6596ret = ret && closure.success();6597}65986599return ret;6600}66016602class TearDownRegionSetsClosure : public HeapRegionClosure {6603private:6604HeapRegionSet *_old_set;66056606public:6607TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }66086609bool doHeapRegion(HeapRegion* r) {6610if (r->is_old()) {6611_old_set->remove(r);6612} else {6613// We ignore free regions, we'll empty the free list afterwards.6614// We ignore young regions, we'll empty the young list afterwards.6615// We ignore humongous regions, we're not tearing down the6616// humongous regions set.6617assert(r->is_free() || r->is_young() || r->isHumongous(),6618"it cannot be another type");6619}6620return false;6621}66226623~TearDownRegionSetsClosure() {6624assert(_old_set->is_empty(), "post-condition");6625}6626};66276628void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {6629assert_at_safepoint(true /* should_be_vm_thread */);66306631if (!free_list_only) {6632TearDownRegionSetsClosure cl(&_old_set);6633heap_region_iterate(&cl);66346635// Note that emptying the _young_list is postponed and instead done as6636// the first step when rebuilding the regions sets again. The reason for6637// this is that during a full GC string deduplication needs to know if6638// a collected region was young or old when the full GC was initiated.6639}6640_hrm.remove_all_free_regions();6641}66426643class RebuildRegionSetsClosure : public HeapRegionClosure {6644private:6645bool _free_list_only;6646HeapRegionSet* _old_set;6647HeapRegionManager* _hrm;6648size_t _total_used;66496650public:6651RebuildRegionSetsClosure(bool free_list_only,6652HeapRegionSet* old_set, HeapRegionManager* hrm) :6653_free_list_only(free_list_only),6654_old_set(old_set), _hrm(hrm), _total_used(0) {6655assert(_hrm->num_free_regions() == 0, "pre-condition");6656if (!free_list_only) {6657assert(_old_set->is_empty(), "pre-condition");6658}6659}66606661bool doHeapRegion(HeapRegion* r) {6662if (r->continuesHumongous()) {6663return false;6664}66656666if (r->is_empty()) {6667// Add free regions to the free list6668r->set_free();6669r->set_allocation_context(AllocationContext::system());6670_hrm->insert_into_free_list(r);6671} else if (!_free_list_only) {6672assert(!r->is_young(), "we should not come across young regions");66736674if (r->isHumongous()) {6675// We ignore humongous regions, we left the humongous set unchanged6676} else {6677// Objects that were compacted would have ended up on regions6678// that were previously old or free.6679assert(r->is_free() || r->is_old(), "invariant");6680// We now consider them old, so register as such.6681r->set_old();6682_old_set->add(r);6683}6684_total_used += r->used();6685}66866687return false;6688}66896690size_t total_used() {6691return _total_used;6692}6693};66946695void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {6696assert_at_safepoint(true /* should_be_vm_thread */);66976698if (!free_list_only) {6699_young_list->empty_list();6700}67016702RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);6703heap_region_iterate(&cl);67046705if (!free_list_only) {6706_allocator->set_used(cl.total_used());6707}6708assert(_allocator->used_unlocked() == recalculate_used(),6709err_msg("inconsistent _allocator->used_unlocked(), "6710"value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,6711_allocator->used_unlocked(), recalculate_used()));6712}67136714void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {6715_refine_cte_cl->set_concurrent(concurrent);6716}67176718bool G1CollectedHeap::is_in_closed_subset(const void* p) const {6719HeapRegion* hr = heap_region_containing(p);6720return hr->is_in(p);6721}67226723// Methods for the mutator alloc region67246725HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,6726bool force) {6727assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);6728assert(!force || g1_policy()->can_expand_young_list(),6729"if force is true we should be able to expand the young list");6730bool young_list_full = g1_policy()->is_young_list_full();6731if (force || !young_list_full) {6732HeapRegion* new_alloc_region = new_region(word_size,6733false /* is_old */,6734false /* do_expand */);6735if (new_alloc_region != NULL) {6736set_region_short_lived_locked(new_alloc_region);6737_hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);6738check_bitmaps("Mutator Region Allocation", new_alloc_region);6739return new_alloc_region;6740}6741}6742return NULL;6743}67446745void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,6746size_t allocated_bytes) {6747assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);6748assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");67496750g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);6751_allocator->increase_used(allocated_bytes);6752_hr_printer.retire(alloc_region);6753// We update the eden sizes here, when the region is retired,6754// instead of when it's allocated, since this is the point that its6755// used space has been recored in _summary_bytes_used.6756g1mm()->update_eden_size();6757}67586759void G1CollectedHeap::set_par_threads() {6760// Don't change the number of workers. Use the value previously set6761// in the workgroup.6762assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");6763uint n_workers = workers()->active_workers();6764assert(UseDynamicNumberOfGCThreads ||6765n_workers == workers()->total_workers(),6766"Otherwise should be using the total number of workers");6767if (n_workers == 0) {6768assert(false, "Should have been set in prior evacuation pause.");6769n_workers = ParallelGCThreads;6770workers()->set_active_workers(n_workers);6771}6772set_par_threads(n_workers);6773}67746775// Methods for the GC alloc regions67766777HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,6778uint count,6779InCSetState dest) {6780assert(FreeList_lock->owned_by_self(), "pre-condition");67816782if (count < g1_policy()->max_regions(dest)) {6783const bool is_survivor = (dest.is_young());6784HeapRegion* new_alloc_region = new_region(word_size,6785!is_survivor,6786true /* do_expand */);6787if (new_alloc_region != NULL) {6788// We really only need to do this for old regions given that we6789// should never scan survivors. But it doesn't hurt to do it6790// for survivors too.6791new_alloc_region->record_timestamp();6792if (is_survivor) {6793new_alloc_region->set_survivor();6794_hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);6795check_bitmaps("Survivor Region Allocation", new_alloc_region);6796} else {6797new_alloc_region->set_old();6798_hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);6799check_bitmaps("Old Region Allocation", new_alloc_region);6800}6801bool during_im = g1_policy()->during_initial_mark_pause();6802new_alloc_region->note_start_of_copying(during_im);6803return new_alloc_region;6804}6805}6806return NULL;6807}68086809void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,6810size_t allocated_bytes,6811InCSetState dest) {6812bool during_im = g1_policy()->during_initial_mark_pause();6813alloc_region->note_end_of_copying(during_im);6814g1_policy()->record_bytes_copied_during_gc(allocated_bytes);6815if (dest.is_young()) {6816young_list()->add_survivor_region(alloc_region);6817} else {6818_old_set.add(alloc_region);6819}6820_hr_printer.retire(alloc_region);6821}68226823// Heap region set verification68246825class VerifyRegionListsClosure : public HeapRegionClosure {6826private:6827HeapRegionSet* _old_set;6828HeapRegionSet* _humongous_set;6829HeapRegionManager* _hrm;68306831public:6832HeapRegionSetCount _old_count;6833HeapRegionSetCount _humongous_count;6834HeapRegionSetCount _free_count;68356836VerifyRegionListsClosure(HeapRegionSet* old_set,6837HeapRegionSet* humongous_set,6838HeapRegionManager* hrm) :6839_old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),6840_old_count(), _humongous_count(), _free_count(){ }68416842bool doHeapRegion(HeapRegion* hr) {6843if (hr->continuesHumongous()) {6844return false;6845}68466847if (hr->is_young()) {6848// TODO6849} else if (hr->startsHumongous()) {6850assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()));6851_humongous_count.increment(1u, hr->capacity());6852} else if (hr->is_empty()) {6853assert(_hrm->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrm_index()));6854_free_count.increment(1u, hr->capacity());6855} else if (hr->is_old()) {6856assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));6857_old_count.increment(1u, hr->capacity());6858} else {6859ShouldNotReachHere();6860}6861return false;6862}68636864void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {6865guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));6866guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,6867old_set->total_capacity_bytes(), _old_count.capacity()));68686869guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length()));6870guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,6871humongous_set->total_capacity_bytes(), _humongous_count.capacity()));68726873guarantee(free_list->num_free_regions() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length()));6874guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,6875free_list->total_capacity_bytes(), _free_count.capacity()));6876}6877};68786879void G1CollectedHeap::verify_region_sets() {6880assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);68816882// First, check the explicit lists.6883_hrm.verify();6884{6885// Given that a concurrent operation might be adding regions to6886// the secondary free list we have to take the lock before6887// verifying it.6888MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);6889_secondary_free_list.verify_list();6890}68916892// If a concurrent region freeing operation is in progress it will6893// be difficult to correctly attributed any free regions we come6894// across to the correct free list given that they might belong to6895// one of several (free_list, secondary_free_list, any local lists,6896// etc.). So, if that's the case we will skip the rest of the6897// verification operation. Alternatively, waiting for the concurrent6898// operation to complete will have a non-trivial effect on the GC's6899// operation (no concurrent operation will last longer than the6900// interval between two calls to verification) and it might hide6901// any issues that we would like to catch during testing.6902if (free_regions_coming()) {6903return;6904}69056906// Make sure we append the secondary_free_list on the free_list so6907// that all free regions we will come across can be safely6908// attributed to the free_list.6909append_secondary_free_list_if_not_empty_with_lock();69106911// Finally, make sure that the region accounting in the lists is6912// consistent with what we see in the heap.69136914VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrm);6915heap_region_iterate(&cl);6916cl.verify_counts(&_old_set, &_humongous_set, &_hrm);6917}69186919// Optimized nmethod scanning69206921class RegisterNMethodOopClosure: public OopClosure {6922G1CollectedHeap* _g1h;6923nmethod* _nm;69246925template <class T> void do_oop_work(T* p) {6926T heap_oop = oopDesc::load_heap_oop(p);6927if (!oopDesc::is_null(heap_oop)) {6928oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);6929HeapRegion* hr = _g1h->heap_region_containing(obj);6930assert(!hr->continuesHumongous(),6931err_msg("trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT6932" starting at " HR_FORMAT,6933p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));69346935// HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.6936hr->add_strong_code_root_locked(_nm);6937}6938}69396940public:6941RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :6942_g1h(g1h), _nm(nm) {}69436944void do_oop(oop* p) { do_oop_work(p); }6945void do_oop(narrowOop* p) { do_oop_work(p); }6946};69476948class UnregisterNMethodOopClosure: public OopClosure {6949G1CollectedHeap* _g1h;6950nmethod* _nm;69516952template <class T> void do_oop_work(T* p) {6953T heap_oop = oopDesc::load_heap_oop(p);6954if (!oopDesc::is_null(heap_oop)) {6955oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);6956HeapRegion* hr = _g1h->heap_region_containing(obj);6957assert(!hr->continuesHumongous(),6958err_msg("trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT6959" starting at " HR_FORMAT,6960p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));69616962hr->remove_strong_code_root(_nm);6963}6964}69656966public:6967UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :6968_g1h(g1h), _nm(nm) {}69696970void do_oop(oop* p) { do_oop_work(p); }6971void do_oop(narrowOop* p) { do_oop_work(p); }6972};69736974void G1CollectedHeap::register_nmethod(nmethod* nm) {6975CollectedHeap::register_nmethod(nm);69766977guarantee(nm != NULL, "sanity");6978RegisterNMethodOopClosure reg_cl(this, nm);6979nm->oops_do(®_cl);6980}69816982void G1CollectedHeap::unregister_nmethod(nmethod* nm) {6983CollectedHeap::unregister_nmethod(nm);69846985guarantee(nm != NULL, "sanity");6986UnregisterNMethodOopClosure reg_cl(this, nm);6987nm->oops_do(®_cl, true);6988}69896990void G1CollectedHeap::purge_code_root_memory() {6991double purge_start = os::elapsedTime();6992G1CodeRootSet::purge();6993double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;6994g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);6995}69966997class RebuildStrongCodeRootClosure: public CodeBlobClosure {6998G1CollectedHeap* _g1h;69997000public:7001RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :7002_g1h(g1h) {}70037004void do_code_blob(CodeBlob* cb) {7005nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;7006if (nm == NULL) {7007return;7008}70097010if (ScavengeRootsInCode) {7011_g1h->register_nmethod(nm);7012}7013}7014};70157016void G1CollectedHeap::rebuild_strong_code_roots() {7017RebuildStrongCodeRootClosure blob_cl(this);7018CodeCache::blobs_do(&blob_cl);7019}702070217022