Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_interface/collectedHeap.cpp
32285 views
/*1* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "classfile/systemDictionary.hpp"26#include "gc_implementation/shared/gcHeapSummary.hpp"27#include "gc_implementation/shared/gcTrace.hpp"28#include "gc_implementation/shared/gcTraceTime.hpp"29#include "gc_implementation/shared/gcWhen.hpp"30#include "gc_implementation/shared/vmGCOperations.hpp"31#include "gc_interface/allocTracer.hpp"32#include "gc_interface/collectedHeap.hpp"33#include "gc_interface/collectedHeap.inline.hpp"34#include "memory/metaspace.hpp"35#include "oops/oop.inline.hpp"36#include "oops/instanceMirrorKlass.hpp"37#include "runtime/init.hpp"38#include "runtime/thread.inline.hpp"39#include "services/heapDumper.hpp"404142#ifdef ASSERT43int CollectedHeap::_fire_out_of_memory_count = 0;44#endif4546size_t CollectedHeap::_filler_array_max_size = 0;4748template <>49void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {50st->print_cr("GC heap %s", m.is_before ? "before" : "after");51st->print_raw(m);52}5354void GCHeapLog::log_heap(bool before) {55if (!should_log()) {56return;57}5859double timestamp = fetch_timestamp();60MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);61int index = compute_log_index();62_records[index].thread = NULL; // Its the GC thread so it's not that interesting.63_records[index].timestamp = timestamp;64_records[index].data.is_before = before;65stringStream st(_records[index].data.buffer(), _records[index].data.size());66if (before) {67Universe::print_heap_before_gc(&st, true);68} else {69Universe::print_heap_after_gc(&st, true);70}71}7273VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {74size_t capacity_in_words = capacity() / HeapWordSize;7576return VirtualSpaceSummary(77reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end());78}7980GCHeapSummary CollectedHeap::create_heap_summary() {81VirtualSpaceSummary heap_space = create_heap_space_summary();82return GCHeapSummary(heap_space, used());83}8485MetaspaceSummary CollectedHeap::create_metaspace_summary() {86const MetaspaceSizes meta_space(87MetaspaceAux::committed_bytes(),88MetaspaceAux::used_bytes(),89MetaspaceAux::reserved_bytes());90const MetaspaceSizes data_space(91MetaspaceAux::committed_bytes(Metaspace::NonClassType),92MetaspaceAux::used_bytes(Metaspace::NonClassType),93MetaspaceAux::reserved_bytes(Metaspace::NonClassType));94const MetaspaceSizes class_space(95MetaspaceAux::committed_bytes(Metaspace::ClassType),96MetaspaceAux::used_bytes(Metaspace::ClassType),97MetaspaceAux::reserved_bytes(Metaspace::ClassType));9899const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =100MetaspaceAux::chunk_free_list_summary(Metaspace::NonClassType);101const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =102MetaspaceAux::chunk_free_list_summary(Metaspace::ClassType);103104return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space,105ms_chunk_free_list_summary, class_chunk_free_list_summary);106}107108void CollectedHeap::print_heap_before_gc() {109if (PrintHeapAtGC) {110Universe::print_heap_before_gc();111}112if (_gc_heap_log != NULL) {113_gc_heap_log->log_heap_before();114}115}116117void CollectedHeap::print_heap_after_gc() {118if (PrintHeapAtGC) {119Universe::print_heap_after_gc();120}121if (_gc_heap_log != NULL) {122_gc_heap_log->log_heap_after();123}124}125126void CollectedHeap::register_nmethod(nmethod* nm) {127assert_locked_or_safepoint(CodeCache_lock);128}129130void CollectedHeap::unregister_nmethod(nmethod* nm) {131assert_locked_or_safepoint(CodeCache_lock);132}133134void CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {135const GCHeapSummary& heap_summary = create_heap_summary();136gc_tracer->report_gc_heap_summary(when, heap_summary);137138const MetaspaceSummary& metaspace_summary = create_metaspace_summary();139gc_tracer->report_metaspace_summary(when, metaspace_summary);140}141142void CollectedHeap::trace_heap_before_gc(GCTracer* gc_tracer) {143trace_heap(GCWhen::BeforeGC, gc_tracer);144}145146void CollectedHeap::trace_heap_after_gc(GCTracer* gc_tracer) {147trace_heap(GCWhen::AfterGC, gc_tracer);148}149150// Memory state functions.151152153CollectedHeap::CollectedHeap() : _n_par_threads(0)154{155const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));156const size_t elements_per_word = HeapWordSize / sizeof(jint);157_filler_array_max_size = align_object_size(filler_array_hdr_size() +158max_len / elements_per_word);159160_barrier_set = NULL;161_is_gc_active = false;162_total_collections = _total_full_collections = 0;163_gc_cause = _gc_lastcause = GCCause::_no_gc;164NOT_PRODUCT(_promotion_failure_alot_count = 0;)165NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)166167if (UsePerfData) {168EXCEPTION_MARK;169170// create the gc cause jvmstat counters171_perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",17280, GCCause::to_string(_gc_cause), CHECK);173174_perf_gc_lastcause =175PerfDataManager::create_string_variable(SUN_GC, "lastCause",17680, GCCause::to_string(_gc_lastcause), CHECK);177}178_defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below.179// Create the ring log180if (LogEvents) {181_gc_heap_log = new GCHeapLog();182} else {183_gc_heap_log = NULL;184}185}186187// This interface assumes that it's being called by the188// vm thread. It collects the heap assuming that the189// heap lock is already held and that we are executing in190// the context of the vm thread.191void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {192assert(Thread::current()->is_VM_thread(), "Precondition#1");193assert(Heap_lock->is_locked(), "Precondition#2");194GCCauseSetter gcs(this, cause);195switch (cause) {196case GCCause::_heap_inspection:197case GCCause::_heap_dump:198case GCCause::_metadata_GC_threshold : {199HandleMark hm;200do_full_collection(false); // don't clear all soft refs201break;202}203case GCCause::_last_ditch_collection: {204HandleMark hm;205do_full_collection(true); // do clear all soft refs206break;207}208default:209ShouldNotReachHere(); // Unexpected use of this function210}211}212213void CollectedHeap::pre_initialize() {214// Used for ReduceInitialCardMarks (when COMPILER2 is used);215// otherwise remains unused.216#ifdef COMPILER2217_defer_initial_card_mark = ReduceInitialCardMarks && can_elide_tlab_store_barriers()218&& (DeferInitialCardMark || card_mark_must_follow_store());219#else220assert(_defer_initial_card_mark == false, "Who would set it?");221#endif222}223224#ifndef PRODUCT225void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {226if (CheckMemoryInitialization && ZapUnusedHeapArea) {227for (size_t slot = 0; slot < size; slot += 1) {228assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),229"Found badHeapWordValue in post-allocation check");230}231}232}233234void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {235if (CheckMemoryInitialization && ZapUnusedHeapArea) {236for (size_t slot = 0; slot < size; slot += 1) {237assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),238"Found non badHeapWordValue in pre-allocation check");239}240}241}242#endif // PRODUCT243244#ifdef ASSERT245void CollectedHeap::check_for_valid_allocation_state() {246Thread *thread = Thread::current();247// How to choose between a pending exception and a potential248// OutOfMemoryError? Don't allow pending exceptions.249// This is a VM policy failure, so how do we exhaustively test it?250assert(!thread->has_pending_exception(),251"shouldn't be allocating with pending exception");252if (StrictSafepointChecks) {253assert(thread->allow_allocation(),254"Allocation done by thread for which allocation is blocked "255"by No_Allocation_Verifier!");256// Allocation of an oop can always invoke a safepoint,257// hence, the true argument258thread->check_for_valid_safepoint_state(true);259}260}261#endif262263HeapWord* CollectedHeap::allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size) {264265// Retain tlab and allocate object in shared space if266// the amount free in the tlab is too large to discard.267if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {268thread->tlab().record_slow_allocation(size);269return NULL;270}271272// Discard tlab and allocate a new one.273// To minimize fragmentation, the last TLAB may be smaller than the rest.274size_t new_tlab_size = thread->tlab().compute_size(size);275276thread->tlab().clear_before_allocation();277278if (new_tlab_size == 0) {279return NULL;280}281282// Allocate a new TLAB...283HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);284if (obj == NULL) {285return NULL;286}287288if (ZeroTLAB) {289// ..and clear it.290Copy::zero_to_words(obj, new_tlab_size);291} else {292// ...and zap just allocated object.293#ifdef ASSERT294// Skip mangling the space corresponding to the object header to295// ensure that the returned space is not considered parsable by296// any concurrent GC thread.297size_t hdr_size = oopDesc::header_size();298Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);299#endif // ASSERT300}301thread->tlab().fill(obj, obj + size, new_tlab_size);302return obj;303}304305void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {306MemRegion deferred = thread->deferred_card_mark();307if (!deferred.is_empty()) {308assert(_defer_initial_card_mark, "Otherwise should be empty");309{310// Verify that the storage points to a parsable object in heap311DEBUG_ONLY(oop old_obj = oop(deferred.start());)312assert(is_in(old_obj), "Not in allocated heap");313assert(!can_elide_initializing_store_barrier(old_obj),314"Else should have been filtered in new_store_pre_barrier()");315assert(old_obj->is_oop(true), "Not an oop");316assert(deferred.word_size() == (size_t)(old_obj->size()),317"Mismatch: multiple objects?");318}319BarrierSet* bs = barrier_set();320assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");321bs->write_region(deferred);322// "Clear" the deferred_card_mark field323thread->set_deferred_card_mark(MemRegion());324}325assert(thread->deferred_card_mark().is_empty(), "invariant");326}327328size_t CollectedHeap::max_tlab_size() const {329// TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].330// This restriction could be removed by enabling filling with multiple arrays.331// If we compute that the reasonable way as332// header_size + ((sizeof(jint) * max_jint) / HeapWordSize)333// we'll overflow on the multiply, so we do the divide first.334// We actually lose a little by dividing first,335// but that just makes the TLAB somewhat smaller than the biggest array,336// which is fine, since we'll be able to fill that.337size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +338sizeof(jint) *339((juint) max_jint / (size_t) HeapWordSize);340return align_size_down(max_int_size, MinObjAlignment);341}342343// Helper for ReduceInitialCardMarks. For performance,344// compiled code may elide card-marks for initializing stores345// to a newly allocated object along the fast-path. We346// compensate for such elided card-marks as follows:347// (a) Generational, non-concurrent collectors, such as348// GenCollectedHeap(ParNew,DefNew,Tenured) and349// ParallelScavengeHeap(ParallelGC, ParallelOldGC)350// need the card-mark if and only if the region is351// in the old gen, and do not care if the card-mark352// succeeds or precedes the initializing stores themselves,353// so long as the card-mark is completed before the next354// scavenge. For all these cases, we can do a card mark355// at the point at which we do a slow path allocation356// in the old gen, i.e. in this call.357// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires358// in addition that the card-mark for an old gen allocated359// object strictly follow any associated initializing stores.360// In these cases, the memRegion remembered below is361// used to card-mark the entire region either just before the next362// slow-path allocation by this thread or just before the next scavenge or363// CMS-associated safepoint, whichever of these events happens first.364// (The implicit assumption is that the object has been fully365// initialized by this point, a fact that we assert when doing the366// card-mark.)367// (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a368// G1 concurrent marking is in progress an SATB (pre-write-)barrier is369// is used to remember the pre-value of any store. Initializing370// stores will not need this barrier, so we need not worry about371// compensating for the missing pre-barrier here. Turning now372// to the post-barrier, we note that G1 needs a RS update barrier373// which simply enqueues a (sequence of) dirty cards which may374// optionally be refined by the concurrent update threads. Note375// that this barrier need only be applied to a non-young write,376// but, like in CMS, because of the presence of concurrent refinement377// (much like CMS' precleaning), must strictly follow the oop-store.378// Thus, using the same protocol for maintaining the intended379// invariants turns out, serendepitously, to be the same for both380// G1 and CMS.381//382// For any future collector, this code should be reexamined with383// that specific collector in mind, and the documentation above suitably384// extended and updated.385oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {386// If a previous card-mark was deferred, flush it now.387flush_deferred_store_barrier(thread);388if (can_elide_initializing_store_barrier(new_obj)) {389// The deferred_card_mark region should be empty390// following the flush above.391assert(thread->deferred_card_mark().is_empty(), "Error");392} else {393MemRegion mr((HeapWord*)new_obj, new_obj->size());394assert(!mr.is_empty(), "Error");395if (_defer_initial_card_mark) {396// Defer the card mark397thread->set_deferred_card_mark(mr);398} else {399// Do the card mark400BarrierSet* bs = barrier_set();401assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");402bs->write_region(mr);403}404}405return new_obj;406}407408size_t CollectedHeap::filler_array_hdr_size() {409return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long410}411412size_t CollectedHeap::filler_array_min_size() {413return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment414}415416#ifdef ASSERT417void CollectedHeap::fill_args_check(HeapWord* start, size_t words)418{419assert(words >= min_fill_size(), "too small to fill");420assert(words % MinObjAlignment == 0, "unaligned size");421assert(Universe::heap()->is_in_reserved(start), "not in heap");422assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");423}424425void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)426{427if (ZapFillerObjects && zap) {428Copy::fill_to_words(start + filler_array_hdr_size(),429words - filler_array_hdr_size(), 0XDEAFBABE);430}431}432#endif // ASSERT433434void435CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)436{437assert(words >= filler_array_min_size(), "too small for an array");438assert(words <= filler_array_max_size(), "too big for a single object");439440const size_t payload_size = words - filler_array_hdr_size();441const size_t len = payload_size * HeapWordSize / sizeof(jint);442assert((int)len >= 0, err_msg("size too large " SIZE_FORMAT " becomes %d", words, (int)len));443444// Set the length first for concurrent GC.445((arrayOop)start)->set_length((int)len);446post_allocation_setup_common(Universe::intArrayKlassObj(), start);447DEBUG_ONLY(zap_filler_array(start, words, zap);)448}449450void451CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)452{453assert(words <= filler_array_max_size(), "too big for a single object");454455if (words >= filler_array_min_size()) {456fill_with_array(start, words, zap);457} else if (words > 0) {458assert(words == min_fill_size(), "unaligned size");459post_allocation_setup_common(SystemDictionary::Object_klass(), start);460}461}462463void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)464{465DEBUG_ONLY(fill_args_check(start, words);)466HandleMark hm; // Free handles before leaving.467fill_with_object_impl(start, words, zap);468}469470void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)471{472DEBUG_ONLY(fill_args_check(start, words);)473HandleMark hm; // Free handles before leaving.474475#ifdef _LP64476// A single array can fill ~8G, so multiple objects are needed only in 64-bit.477// First fill with arrays, ensuring that any remaining space is big enough to478// fill. The remainder is filled with a single object.479const size_t min = min_fill_size();480const size_t max = filler_array_max_size();481while (words > max) {482const size_t cur = words - max >= min ? max : max - min;483fill_with_array(start, cur, zap);484start += cur;485words -= cur;486}487#endif488489fill_with_object_impl(start, words, zap);490}491492void CollectedHeap::post_initialize() {493collector_policy()->post_heap_initialize();494}495496HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {497guarantee(false, "thread-local allocation buffers not supported");498return NULL;499}500501void CollectedHeap::ensure_parsability(bool retire_tlabs) {502// The second disjunct in the assertion below makes a concession503// for the start-up verification done while the VM is being504// created. Callers be careful that you know that mutators505// aren't going to interfere -- for instance, this is permissible506// if we are still single-threaded and have either not yet507// started allocating (nothing much to verify) or we have508// started allocating but are now a full-fledged JavaThread509// (and have thus made our TLAB's) available for filling.510assert(SafepointSynchronize::is_at_safepoint() ||511!is_init_completed(),512"Should only be called at a safepoint or at start-up"513" otherwise concurrent mutator activity may make heap "514" unparsable again");515const bool use_tlab = UseTLAB;516const bool deferred = _defer_initial_card_mark;517// The main thread starts allocating via a TLAB even before it518// has added itself to the threads list at vm boot-up.519assert(!use_tlab || Threads::first() != NULL,520"Attempt to fill tlabs before main thread has been added"521" to threads list is doomed to failure!");522for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {523if (use_tlab) thread->tlab().make_parsable(retire_tlabs);524#ifdef COMPILER2525// The deferred store barriers must all have been flushed to the526// card-table (or other remembered set structure) before GC starts527// processing the card-table (or other remembered set).528if (deferred) flush_deferred_store_barrier(thread);529#else530assert(!deferred, "Should be false");531assert(thread->deferred_card_mark().is_empty(), "Should be empty");532#endif533}534}535536void CollectedHeap::accumulate_statistics_all_tlabs() {537if (UseTLAB) {538assert(SafepointSynchronize::is_at_safepoint() ||539!is_init_completed(),540"should only accumulate statistics on tlabs at safepoint");541542ThreadLocalAllocBuffer::accumulate_statistics_before_gc();543}544}545546void CollectedHeap::resize_all_tlabs() {547if (UseTLAB) {548assert(SafepointSynchronize::is_at_safepoint() ||549!is_init_completed(),550"should only resize tlabs at safepoint");551552ThreadLocalAllocBuffer::resize_all_tlabs();553}554}555556void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {557if (HeapDumpBeforeFullGC) {558GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer, GCId::create());559// We are doing a "major" collection and a heap dump before560// major collection has been requested.561HeapDumper::dump_heap();562}563if (PrintClassHistogramBeforeFullGC) {564GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer, GCId::create());565VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);566inspector.doit();567}568}569570void CollectedHeap::post_full_gc_dump(GCTimer* timer) {571if (HeapDumpAfterFullGC) {572GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer, GCId::create());573HeapDumper::dump_heap();574}575if (PrintClassHistogramAfterFullGC) {576GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer, GCId::create());577VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);578inspector.doit();579}580}581582/////////////// Unit tests ///////////////583584#ifndef PRODUCT585void CollectedHeap::test_is_in() {586CollectedHeap* heap = Universe::heap();587588uintptr_t epsilon = (uintptr_t) MinObjAlignment;589uintptr_t heap_start = (uintptr_t) heap->_reserved.start();590uintptr_t heap_end = (uintptr_t) heap->_reserved.end();591592// Test that NULL is not in the heap.593assert(!heap->is_in(NULL), "NULL is unexpectedly in the heap");594595// Test that a pointer to before the heap start is reported as outside the heap.596assert(heap_start >= ((uintptr_t)NULL + epsilon), "sanity");597void* before_heap = (void*)(heap_start - epsilon);598assert(!heap->is_in(before_heap),599err_msg("before_heap: " PTR_FORMAT " is unexpectedly in the heap", p2i(before_heap)));600601// Test that a pointer to after the heap end is reported as outside the heap.602assert(heap_end <= ((uintptr_t)-1 - epsilon), "sanity");603void* after_heap = (void*)(heap_end + epsilon);604assert(!heap->is_in(after_heap),605err_msg("after_heap: " PTR_FORMAT " is unexpectedly in the heap", p2i(after_heap)));606}607#endif608609void CollectedHeap::shutdown() {610// Default implementation does nothing.611}612613void CollectedHeap::accumulate_statistics_all_gclabs() {614// Default implementation does nothing.615}616617bool CollectedHeap::supports_object_pinning() const {618return false;619}620621oop CollectedHeap::pin_object(JavaThread* thread, oop obj) {622ShouldNotReachHere();623return NULL;624}625626void CollectedHeap::unpin_object(JavaThread* thread, oop obj) {627ShouldNotReachHere();628}629630631