Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp
38921 views
/*1* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "gc_implementation/parallelScavenge/cardTableExtension.hpp"26#include "gc_implementation/parallelScavenge/gcTaskManager.hpp"27#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"28#include "gc_implementation/parallelScavenge/psTasks.hpp"29#include "gc_implementation/parallelScavenge/psYoungGen.hpp"30#include "oops/oop.inline.hpp"31#include "oops/oop.psgc.inline.hpp"32#include "runtime/prefetch.inline.hpp"3334// Checks an individual oop for missing precise marks. Mark35// may be either dirty or newgen.36class CheckForUnmarkedOops : public OopClosure {37private:38PSYoungGen* _young_gen;39CardTableExtension* _card_table;40HeapWord* _unmarked_addr;41jbyte* _unmarked_card;4243protected:44template <class T> void do_oop_work(T* p) {45oop obj = oopDesc::load_decode_heap_oop(p);46if (_young_gen->is_in_reserved(obj) &&47!_card_table->addr_is_marked_imprecise(p)) {48// Don't overwrite the first missing card mark49if (_unmarked_addr == NULL) {50_unmarked_addr = (HeapWord*)p;51_unmarked_card = _card_table->byte_for(p);52}53}54}5556public:57CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) :58_young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }5960virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); }61virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); }6263bool has_unmarked_oop() {64return _unmarked_addr != NULL;65}66};6768// Checks all objects for the existance of some type of mark,69// precise or imprecise, dirty or newgen.70class CheckForUnmarkedObjects : public ObjectClosure {71private:72PSYoungGen* _young_gen;73CardTableExtension* _card_table;7475public:76CheckForUnmarkedObjects() {77ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();78assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");7980_young_gen = heap->young_gen();81_card_table = (CardTableExtension*)heap->barrier_set();82// No point in asserting barrier set type here. Need to make CardTableExtension83// a unique barrier set type.84}8586// Card marks are not precise. The current system can leave us with87// a mismash of precise marks and beginning of object marks. This means88// we test for missing precise marks first. If any are found, we don't89// fail unless the object head is also unmarked.90virtual void do_object(oop obj) {91CheckForUnmarkedOops object_check(_young_gen, _card_table);92obj->oop_iterate_no_header(&object_check);93if (object_check.has_unmarked_oop()) {94assert(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");95}96}97};9899// Checks for precise marking of oops as newgen.100class CheckForPreciseMarks : public OopClosure {101private:102PSYoungGen* _young_gen;103CardTableExtension* _card_table;104105protected:106template <class T> void do_oop_work(T* p) {107oop obj = oopDesc::load_decode_heap_oop_not_null(p);108if (_young_gen->is_in_reserved(obj)) {109assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");110_card_table->set_card_newgen(p);111}112}113114public:115CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) :116_young_gen(young_gen), _card_table(card_table) { }117118virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); }119virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); }120};121122// We get passed the space_top value to prevent us from traversing into123// the old_gen promotion labs, which cannot be safely parsed.124125// Do not call this method if the space is empty.126// It is a waste to start tasks and get here only to127// do no work. If this method needs to be called128// when the space is empty, fix the calculation of129// end_card to allow sp_top == sp->bottom().130131void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array,132MutableSpace* sp,133HeapWord* space_top,134PSPromotionManager* pm,135uint stripe_number,136uint stripe_total) {137int ssize = 128; // Naked constant! Work unit = 64k.138int dirty_card_count = 0;139140// It is a waste to get here if empty.141assert(sp->bottom() < sp->top(), "Should not be called if empty");142oop* sp_top = (oop*)space_top;143jbyte* start_card = byte_for(sp->bottom());144jbyte* end_card = byte_for(sp_top - 1) + 1;145oop* last_scanned = NULL; // Prevent scanning objects more than once146// The width of the stripe ssize*stripe_total must be147// consistent with the number of stripes so that the complete slice148// is covered.149size_t slice_width = ssize * stripe_total;150for (jbyte* slice = start_card; slice < end_card; slice += slice_width) {151jbyte* worker_start_card = slice + stripe_number * ssize;152if (worker_start_card >= end_card)153return; // We're done.154155jbyte* worker_end_card = worker_start_card + ssize;156if (worker_end_card > end_card)157worker_end_card = end_card;158159// We do not want to scan objects more than once. In order to accomplish160// this, we assert that any object with an object head inside our 'slice'161// belongs to us. We may need to extend the range of scanned cards if the162// last object continues into the next 'slice'.163//164// Note! ending cards are exclusive!165HeapWord* slice_start = addr_for(worker_start_card);166HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card));167168#ifdef ASSERT169if (GCWorkerDelayMillis > 0) {170// Delay 1 worker so that it proceeds after all the work171// has been completed.172if (stripe_number < 2) {173os::sleep(Thread::current(), GCWorkerDelayMillis, false);174}175}176#endif177178// If there are not objects starting within the chunk, skip it.179if (!start_array->object_starts_in_range(slice_start, slice_end)) {180continue;181}182// Update our beginning addr183HeapWord* first_object = start_array->object_start(slice_start);184debug_only(oop* first_object_within_slice = (oop*) first_object;)185if (first_object < slice_start) {186last_scanned = (oop*)(first_object + oop(first_object)->size());187debug_only(first_object_within_slice = last_scanned;)188worker_start_card = byte_for(last_scanned);189}190191// Update the ending addr192if (slice_end < (HeapWord*)sp_top) {193// The subtraction is important! An object may start precisely at slice_end.194HeapWord* last_object = start_array->object_start(slice_end - 1);195slice_end = last_object + oop(last_object)->size();196// worker_end_card is exclusive, so bump it one past the end of last_object's197// covered span.198worker_end_card = byte_for(slice_end) + 1;199200if (worker_end_card > end_card)201worker_end_card = end_card;202}203204assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary");205assert(is_valid_card_address(worker_start_card), "Invalid worker start card");206assert(is_valid_card_address(worker_end_card), "Invalid worker end card");207// Note that worker_start_card >= worker_end_card is legal, and happens when208// an object spans an entire slice.209assert(worker_start_card <= end_card, "worker start card beyond end card");210assert(worker_end_card <= end_card, "worker end card beyond end card");211212jbyte* current_card = worker_start_card;213while (current_card < worker_end_card) {214// Find an unclean card.215while (current_card < worker_end_card && card_is_clean(*current_card)) {216current_card++;217}218jbyte* first_unclean_card = current_card;219220// Find the end of a run of contiguous unclean cards221while (current_card < worker_end_card && !card_is_clean(*current_card)) {222while (current_card < worker_end_card && !card_is_clean(*current_card)) {223current_card++;224}225226if (current_card < worker_end_card) {227// Some objects may be large enough to span several cards. If such228// an object has more than one dirty card, separated by a clean card,229// we will attempt to scan it twice. The test against "last_scanned"230// prevents the redundant object scan, but it does not prevent newly231// marked cards from being cleaned.232HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1);233size_t size_of_last_object = oop(last_object_in_dirty_region)->size();234HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object;235jbyte* ending_card_of_last_object = byte_for(end_of_last_object);236assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card");237if (ending_card_of_last_object > current_card) {238// This means the object spans the next complete card.239// We need to bump the current_card to ending_card_of_last_object240current_card = ending_card_of_last_object;241}242}243}244jbyte* following_clean_card = current_card;245246if (first_unclean_card < worker_end_card) {247oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card));248assert((HeapWord*)p <= addr_for(first_unclean_card), "checking");249// "p" should always be >= "last_scanned" because newly GC dirtied250// cards are no longer scanned again (see comment at end251// of loop on the increment of "current_card"). Test that252// hypothesis before removing this code.253// If this code is removed, deal with the first time through254// the loop when the last_scanned is the object starting in255// the previous slice.256assert((p >= last_scanned) ||257(last_scanned == first_object_within_slice),258"Should no longer be possible");259if (p < last_scanned) {260// Avoid scanning more than once; this can happen because261// newgen cards set by GC may a different set than the262// originally dirty set263p = last_scanned;264}265oop* to = (oop*)addr_for(following_clean_card);266267// Test slice_end first!268if ((HeapWord*)to > slice_end) {269to = (oop*)slice_end;270} else if (to > sp_top) {271to = sp_top;272}273274// we know which cards to scan, now clear them275if (first_unclean_card <= worker_start_card+1)276first_unclean_card = worker_start_card+1;277if (following_clean_card >= worker_end_card-1)278following_clean_card = worker_end_card-1;279280while (first_unclean_card < following_clean_card) {281*first_unclean_card++ = clean_card;282}283284const int interval = PrefetchScanIntervalInBytes;285// scan all objects in the range286if (interval != 0) {287while (p < to) {288Prefetch::write(p, interval);289oop m = oop(p);290assert(m->is_oop_or_null(), "check for header");291m->push_contents(pm);292p += m->size();293}294pm->drain_stacks_cond_depth();295} else {296while (p < to) {297oop m = oop(p);298assert(m->is_oop_or_null(), "check for header");299m->push_contents(pm);300p += m->size();301}302pm->drain_stacks_cond_depth();303}304last_scanned = p;305}306// "current_card" is still the "following_clean_card" or307// the current_card is >= the worker_end_card so the308// loop will not execute again.309assert((current_card == following_clean_card) ||310(current_card >= worker_end_card),311"current_card should only be incremented if it still equals "312"following_clean_card");313// Increment current_card so that it is not processed again.314// It may now be dirty because a old-to-young pointer was315// found on it an updated. If it is now dirty, it cannot be316// be safely cleaned in the next iteration.317current_card++;318}319}320}321322// This should be called before a scavenge.323void CardTableExtension::verify_all_young_refs_imprecise() {324CheckForUnmarkedObjects check;325326ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();327assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");328329PSOldGen* old_gen = heap->old_gen();330331old_gen->object_iterate(&check);332}333334// This should be called immediately after a scavenge, before mutators resume.335void CardTableExtension::verify_all_young_refs_precise() {336ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();337assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");338339PSOldGen* old_gen = heap->old_gen();340341CheckForPreciseMarks check(heap->young_gen(), (CardTableExtension*)heap->barrier_set());342343old_gen->oop_iterate_no_header(&check);344345verify_all_young_refs_precise_helper(old_gen->object_space()->used_region());346}347348void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {349CardTableExtension* card_table = (CardTableExtension*)Universe::heap()->barrier_set();350// FIX ME ASSERT HERE351352jbyte* bot = card_table->byte_for(mr.start());353jbyte* top = card_table->byte_for(mr.end());354while(bot <= top) {355assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");356if (*bot == verify_card)357*bot = youngergen_card;358bot++;359}360}361362bool CardTableExtension::addr_is_marked_imprecise(void *addr) {363jbyte* p = byte_for(addr);364jbyte val = *p;365366if (card_is_dirty(val))367return true;368369if (card_is_newgen(val))370return true;371372if (card_is_clean(val))373return false;374375assert(false, "Found unhandled card mark type");376377return false;378}379380// Also includes verify_card381bool CardTableExtension::addr_is_marked_precise(void *addr) {382jbyte* p = byte_for(addr);383jbyte val = *p;384385if (card_is_newgen(val))386return true;387388if (card_is_verify(val))389return true;390391if (card_is_clean(val))392return false;393394if (card_is_dirty(val))395return false;396397assert(false, "Found unhandled card mark type");398399return false;400}401402// Assumes that only the base or the end changes. This allows indentification403// of the region that is being resized. The404// CardTableModRefBS::resize_covered_region() is used for the normal case405// where the covered regions are growing or shrinking at the high end.406// The method resize_covered_region_by_end() is analogous to407// CardTableModRefBS::resize_covered_region() but408// for regions that grow or shrink at the low end.409void CardTableExtension::resize_covered_region(MemRegion new_region) {410411for (int i = 0; i < _cur_covered_regions; i++) {412if (_covered[i].start() == new_region.start()) {413// Found a covered region with the same start as the414// new region. The region is growing or shrinking415// from the start of the region.416resize_covered_region_by_start(new_region);417return;418}419if (_covered[i].start() > new_region.start()) {420break;421}422}423424int changed_region = -1;425for (int j = 0; j < _cur_covered_regions; j++) {426if (_covered[j].end() == new_region.end()) {427changed_region = j;428// This is a case where the covered region is growing or shrinking429// at the start of the region.430assert(changed_region != -1, "Don't expect to add a covered region");431assert(_covered[changed_region].byte_size() != new_region.byte_size(),432"The sizes should be different here");433resize_covered_region_by_end(changed_region, new_region);434return;435}436}437// This should only be a new covered region (where no existing438// covered region matches at the start or the end).439assert(_cur_covered_regions < _max_covered_regions,440"An existing region should have been found");441resize_covered_region_by_start(new_region);442}443444void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) {445CardTableModRefBS::resize_covered_region(new_region);446debug_only(verify_guard();)447}448449void CardTableExtension::resize_covered_region_by_end(int changed_region,450MemRegion new_region) {451assert(SafepointSynchronize::is_at_safepoint(),452"Only expect an expansion at the low end at a GC");453debug_only(verify_guard();)454#ifdef ASSERT455for (int k = 0; k < _cur_covered_regions; k++) {456if (_covered[k].end() == new_region.end()) {457assert(changed_region == k, "Changed region is incorrect");458break;459}460}461#endif462463// Commit new or uncommit old pages, if necessary.464if (resize_commit_uncommit(changed_region, new_region)) {465// Set the new start of the committed region466resize_update_committed_table(changed_region, new_region);467}468469// Update card table entries470resize_update_card_table_entries(changed_region, new_region);471472// Update the covered region473resize_update_covered_table(changed_region, new_region);474475if (TraceCardTableModRefBS) {476int ind = changed_region;477gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");478gclog_or_tty->print_cr(" "479" _covered[%d].start(): " INTPTR_FORMAT480" _covered[%d].last(): " INTPTR_FORMAT,481ind, p2i(_covered[ind].start()),482ind, p2i(_covered[ind].last()));483gclog_or_tty->print_cr(" "484" _committed[%d].start(): " INTPTR_FORMAT485" _committed[%d].last(): " INTPTR_FORMAT,486ind, p2i(_committed[ind].start()),487ind, p2i(_committed[ind].last()));488gclog_or_tty->print_cr(" "489" byte_for(start): " INTPTR_FORMAT490" byte_for(last): " INTPTR_FORMAT,491p2i(byte_for(_covered[ind].start())),492p2i(byte_for(_covered[ind].last())));493gclog_or_tty->print_cr(" "494" addr_for(start): " INTPTR_FORMAT495" addr_for(last): " INTPTR_FORMAT,496p2i(addr_for((jbyte*) _committed[ind].start())),497p2i(addr_for((jbyte*) _committed[ind].last())));498}499debug_only(verify_guard();)500}501502bool CardTableExtension::resize_commit_uncommit(int changed_region,503MemRegion new_region) {504bool result = false;505// Commit new or uncommit old pages, if necessary.506MemRegion cur_committed = _committed[changed_region];507assert(_covered[changed_region].end() == new_region.end(),508"The ends of the regions are expected to match");509// Extend the start of this _committed region to510// to cover the start of any previous _committed region.511// This forms overlapping regions, but never interior regions.512HeapWord* min_prev_start = lowest_prev_committed_start(changed_region);513if (min_prev_start < cur_committed.start()) {514// Only really need to set start of "cur_committed" to515// the new start (min_prev_start) but assertion checking code516// below use cur_committed.end() so make it correct.517MemRegion new_committed =518MemRegion(min_prev_start, cur_committed.end());519cur_committed = new_committed;520}521#ifdef ASSERT522ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();523assert(cur_committed.start() ==524(HeapWord*) align_size_up((uintptr_t) cur_committed.start(),525os::vm_page_size()),526"Starts should have proper alignment");527#endif528529jbyte* new_start = byte_for(new_region.start());530// Round down because this is for the start address531HeapWord* new_start_aligned =532(HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size());533// The guard page is always committed and should not be committed over.534// This method is used in cases where the generation is growing toward535// lower addresses but the guard region is still at the end of the536// card table. That still makes sense when looking for writes537// off the end of the card table.538if (new_start_aligned < cur_committed.start()) {539// Expand the committed region540//541// Case A542// |+ guard +|543// |+ cur committed +++++++++|544// |+ new committed +++++++++++++++++|545//546// Case B547// |+ guard +|548// |+ cur committed +|549// |+ new committed +++++++|550//551// These are not expected because the calculation of the552// cur committed region and the new committed region553// share the same end for the covered region.554// Case C555// |+ guard +|556// |+ cur committed +|557// |+ new committed +++++++++++++++++|558// Case D559// |+ guard +|560// |+ cur committed +++++++++++|561// |+ new committed +++++++|562563HeapWord* new_end_for_commit =564MIN2(cur_committed.end(), _guard_region.start());565if(new_start_aligned < new_end_for_commit) {566MemRegion new_committed =567MemRegion(new_start_aligned, new_end_for_commit);568os::commit_memory_or_exit((char*)new_committed.start(),569new_committed.byte_size(), !ExecMem,570"card table expansion");571}572result = true;573} else if (new_start_aligned > cur_committed.start()) {574// Shrink the committed region575#if 0 // uncommitting space is currently unsafe because of the interactions576// of growing and shrinking regions. One region A can uncommit space577// that it owns but which is being used by another region B (maybe).578// Region B has not committed the space because it was already579// committed by region A.580MemRegion uncommit_region = committed_unique_to_self(changed_region,581MemRegion(cur_committed.start(), new_start_aligned));582if (!uncommit_region.is_empty()) {583if (!os::uncommit_memory((char*)uncommit_region.start(),584uncommit_region.byte_size())) {585// If the uncommit fails, ignore it. Let the586// committed table resizing go even though the committed587// table will over state the committed space.588}589}590#else591assert(!result, "Should be false with current workaround");592#endif593}594assert(_committed[changed_region].end() == cur_committed.end(),595"end should not change");596return result;597}598599void CardTableExtension::resize_update_committed_table(int changed_region,600MemRegion new_region) {601602jbyte* new_start = byte_for(new_region.start());603// Set the new start of the committed region604HeapWord* new_start_aligned =605(HeapWord*)align_size_down((uintptr_t)new_start,606os::vm_page_size());607MemRegion new_committed = MemRegion(new_start_aligned,608_committed[changed_region].end());609_committed[changed_region] = new_committed;610_committed[changed_region].set_start(new_start_aligned);611}612613void CardTableExtension::resize_update_card_table_entries(int changed_region,614MemRegion new_region) {615debug_only(verify_guard();)616MemRegion original_covered = _covered[changed_region];617// Initialize the card entries. Only consider the618// region covered by the card table (_whole_heap)619jbyte* entry;620if (new_region.start() < _whole_heap.start()) {621entry = byte_for(_whole_heap.start());622} else {623entry = byte_for(new_region.start());624}625jbyte* end = byte_for(original_covered.start());626// If _whole_heap starts at the original covered regions start,627// this loop will not execute.628while (entry < end) { *entry++ = clean_card; }629}630631void CardTableExtension::resize_update_covered_table(int changed_region,632MemRegion new_region) {633// Update the covered region634_covered[changed_region].set_start(new_region.start());635_covered[changed_region].set_word_size(new_region.word_size());636637// reorder regions. There should only be at most 1 out638// of order.639for (int i = _cur_covered_regions-1 ; i > 0; i--) {640if (_covered[i].start() < _covered[i-1].start()) {641MemRegion covered_mr = _covered[i-1];642_covered[i-1] = _covered[i];643_covered[i] = covered_mr;644MemRegion committed_mr = _committed[i-1];645_committed[i-1] = _committed[i];646_committed[i] = committed_mr;647break;648}649}650#ifdef ASSERT651for (int m = 0; m < _cur_covered_regions-1; m++) {652assert(_covered[m].start() <= _covered[m+1].start(),653"Covered regions out of order");654assert(_committed[m].start() <= _committed[m+1].start(),655"Committed regions out of order");656}657#endif658}659660// Returns the start of any committed region that is lower than661// the target committed region (index ind) and that intersects the662// target region. If none, return start of target region.663//664// -------------665// | |666// -------------667// ------------668// | target |669// ------------670// -------------671// | |672// -------------673// ^ returns this674//675// -------------676// | |677// -------------678// ------------679// | target |680// ------------681// -------------682// | |683// -------------684// ^ returns this685686HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const {687assert(_cur_covered_regions >= 0, "Expecting at least on region");688HeapWord* min_start = _committed[ind].start();689for (int j = 0; j < ind; j++) {690HeapWord* this_start = _committed[j].start();691if ((this_start < min_start) &&692!(_committed[j].intersection(_committed[ind])).is_empty()) {693min_start = this_start;694}695}696return min_start;697}698699700