Path: blob/master/src/hotspot/share/gc/shared/collectedHeap.cpp
40957 views
/*1* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "classfile/classLoaderData.hpp"26#include "classfile/vmClasses.hpp"27#include "gc/shared/allocTracer.hpp"28#include "gc/shared/barrierSet.hpp"29#include "gc/shared/collectedHeap.hpp"30#include "gc/shared/collectedHeap.inline.hpp"31#include "gc/shared/gcLocker.inline.hpp"32#include "gc/shared/gcHeapSummary.hpp"33#include "gc/shared/stringdedup/stringDedup.hpp"34#include "gc/shared/gcTrace.hpp"35#include "gc/shared/gcTraceTime.inline.hpp"36#include "gc/shared/gcVMOperations.hpp"37#include "gc/shared/gcWhen.hpp"38#include "gc/shared/gc_globals.hpp"39#include "gc/shared/memAllocator.hpp"40#include "gc/shared/tlab_globals.hpp"41#include "logging/log.hpp"42#include "logging/logStream.hpp"43#include "memory/classLoaderMetaspace.hpp"44#include "memory/metaspaceUtils.hpp"45#include "memory/resourceArea.hpp"46#include "memory/universe.hpp"47#include "oops/instanceMirrorKlass.hpp"48#include "oops/oop.inline.hpp"49#include "runtime/handles.inline.hpp"50#include "runtime/init.hpp"51#include "runtime/perfData.hpp"52#include "runtime/thread.inline.hpp"53#include "runtime/threadSMR.hpp"54#include "runtime/vmThread.hpp"55#include "services/heapDumper.hpp"56#include "utilities/align.hpp"57#include "utilities/copy.hpp"58#include "utilities/events.hpp"5960class ClassLoaderData;6162size_t CollectedHeap::_filler_array_max_size = 0;6364class GCMessage : public FormatBuffer<1024> {65public:66bool is_before;67};6869template <>70void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {71st->print_cr("GC heap %s", m.is_before ? "before" : "after");72st->print_raw(m);73}7475class GCHeapLog : public EventLogBase<GCMessage> {76private:77void log_heap(CollectedHeap* heap, bool before);7879public:80GCHeapLog() : EventLogBase<GCMessage>("GC Heap History", "gc") {}8182void log_heap_before(CollectedHeap* heap) {83log_heap(heap, true);84}85void log_heap_after(CollectedHeap* heap) {86log_heap(heap, false);87}88};8990void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {91if (!should_log()) {92return;93}9495double timestamp = fetch_timestamp();96MutexLocker ml(&_mutex, Mutex::_no_safepoint_check_flag);97int index = compute_log_index();98_records[index].thread = NULL; // Its the GC thread so it's not that interesting.99_records[index].timestamp = timestamp;100_records[index].data.is_before = before;101stringStream st(_records[index].data.buffer(), _records[index].data.size());102103st.print_cr("{Heap %s GC invocations=%u (full %u):",104before ? "before" : "after",105heap->total_collections(),106heap->total_full_collections());107108heap->print_on(&st);109st.print_cr("}");110}111112size_t CollectedHeap::unused() const {113MutexLocker ml(Heap_lock);114return capacity() - used();115}116117VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {118size_t capacity_in_words = capacity() / HeapWordSize;119120return VirtualSpaceSummary(121_reserved.start(), _reserved.start() + capacity_in_words, _reserved.end());122}123124GCHeapSummary CollectedHeap::create_heap_summary() {125VirtualSpaceSummary heap_space = create_heap_space_summary();126return GCHeapSummary(heap_space, used());127}128129MetaspaceSummary CollectedHeap::create_metaspace_summary() {130const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =131MetaspaceUtils::chunk_free_list_summary(Metaspace::NonClassType);132const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =133MetaspaceUtils::chunk_free_list_summary(Metaspace::ClassType);134return MetaspaceSummary(MetaspaceGC::capacity_until_GC(),135MetaspaceUtils::get_combined_statistics(),136ms_chunk_free_list_summary, class_chunk_free_list_summary);137}138139void CollectedHeap::print_heap_before_gc() {140LogTarget(Debug, gc, heap) lt;141if (lt.is_enabled()) {142LogStream ls(lt);143ls.print_cr("Heap before GC invocations=%u (full %u):", total_collections(), total_full_collections());144ResourceMark rm;145print_on(&ls);146}147148if (_gc_heap_log != NULL) {149_gc_heap_log->log_heap_before(this);150}151}152153void CollectedHeap::print_heap_after_gc() {154LogTarget(Debug, gc, heap) lt;155if (lt.is_enabled()) {156LogStream ls(lt);157ls.print_cr("Heap after GC invocations=%u (full %u):", total_collections(), total_full_collections());158ResourceMark rm;159print_on(&ls);160}161162if (_gc_heap_log != NULL) {163_gc_heap_log->log_heap_after(this);164}165}166167void CollectedHeap::print() const { print_on(tty); }168169void CollectedHeap::print_on_error(outputStream* st) const {170st->print_cr("Heap:");171print_extended_on(st);172st->cr();173174BarrierSet* bs = BarrierSet::barrier_set();175if (bs != NULL) {176bs->print_on(st);177}178}179180void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {181const GCHeapSummary& heap_summary = create_heap_summary();182gc_tracer->report_gc_heap_summary(when, heap_summary);183184const MetaspaceSummary& metaspace_summary = create_metaspace_summary();185gc_tracer->report_metaspace_summary(when, metaspace_summary);186}187188void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) {189trace_heap(GCWhen::BeforeGC, gc_tracer);190}191192void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {193trace_heap(GCWhen::AfterGC, gc_tracer);194}195196// Default implementation, for collectors that don't support the feature.197bool CollectedHeap::supports_concurrent_gc_breakpoints() const {198return false;199}200201bool CollectedHeap::is_oop(oop object) const {202if (!is_object_aligned(object)) {203return false;204}205206if (!is_in(object)) {207return false;208}209210if (is_in(object->klass_or_null())) {211return false;212}213214return true;215}216217// Memory state functions.218219220CollectedHeap::CollectedHeap() :221_capacity_at_last_gc(0),222_used_at_last_gc(0),223_is_gc_active(false),224_last_whole_heap_examined_time_ns(os::javaTimeNanos()),225_total_collections(0),226_total_full_collections(0),227_gc_cause(GCCause::_no_gc),228_gc_lastcause(GCCause::_no_gc)229{230const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));231const size_t elements_per_word = HeapWordSize / sizeof(jint);232_filler_array_max_size = align_object_size(filler_array_hdr_size() +233max_len / elements_per_word);234235NOT_PRODUCT(_promotion_failure_alot_count = 0;)236NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)237238if (UsePerfData) {239EXCEPTION_MARK;240241// create the gc cause jvmstat counters242_perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",24380, GCCause::to_string(_gc_cause), CHECK);244245_perf_gc_lastcause =246PerfDataManager::create_string_variable(SUN_GC, "lastCause",24780, GCCause::to_string(_gc_lastcause), CHECK);248}249250// Create the ring log251if (LogEvents) {252_gc_heap_log = new GCHeapLog();253} else {254_gc_heap_log = NULL;255}256}257258// This interface assumes that it's being called by the259// vm thread. It collects the heap assuming that the260// heap lock is already held and that we are executing in261// the context of the vm thread.262void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {263Thread* thread = Thread::current();264assert(thread->is_VM_thread(), "Precondition#1");265assert(Heap_lock->is_locked(), "Precondition#2");266GCCauseSetter gcs(this, cause);267switch (cause) {268case GCCause::_heap_inspection:269case GCCause::_heap_dump:270case GCCause::_metadata_GC_threshold : {271HandleMark hm(thread);272do_full_collection(false); // don't clear all soft refs273break;274}275case GCCause::_archive_time_gc:276case GCCause::_metadata_GC_clear_soft_refs: {277HandleMark hm(thread);278do_full_collection(true); // do clear all soft refs279break;280}281default:282ShouldNotReachHere(); // Unexpected use of this function283}284}285286MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,287size_t word_size,288Metaspace::MetadataType mdtype) {289uint loop_count = 0;290uint gc_count = 0;291uint full_gc_count = 0;292293assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");294295do {296MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);297if (result != NULL) {298return result;299}300301if (GCLocker::is_active_and_needs_gc()) {302// If the GCLocker is active, just expand and allocate.303// If that does not succeed, wait if this thread is not304// in a critical section itself.305result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype);306if (result != NULL) {307return result;308}309JavaThread* jthr = JavaThread::current();310if (!jthr->in_critical()) {311// Wait for JNI critical section to be exited312GCLocker::stall_until_clear();313// The GC invoked by the last thread leaving the critical314// section will be a young collection and a full collection315// is (currently) needed for unloading classes so continue316// to the next iteration to get a full GC.317continue;318} else {319if (CheckJNICalls) {320fatal("Possible deadlock due to allocating while"321" in jni critical section");322}323return NULL;324}325}326327{ // Need lock to get self consistent gc_count's328MutexLocker ml(Heap_lock);329gc_count = Universe::heap()->total_collections();330full_gc_count = Universe::heap()->total_full_collections();331}332333// Generate a VM operation334VM_CollectForMetadataAllocation op(loader_data,335word_size,336mdtype,337gc_count,338full_gc_count,339GCCause::_metadata_GC_threshold);340VMThread::execute(&op);341342// If GC was locked out, try again. Check before checking success because the343// prologue could have succeeded and the GC still have been locked out.344if (op.gc_locked()) {345continue;346}347348if (op.prologue_succeeded()) {349return op.result();350}351loop_count++;352if ((QueuedAllocationWarningCount > 0) &&353(loop_count % QueuedAllocationWarningCount == 0)) {354log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times,"355" size=" SIZE_FORMAT, loop_count, word_size);356}357} while (true); // Until a GC is done358}359360MemoryUsage CollectedHeap::memory_usage() {361return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity());362}363364void CollectedHeap::set_gc_cause(GCCause::Cause v) {365if (UsePerfData) {366_gc_lastcause = _gc_cause;367_perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));368_perf_gc_cause->set_value(GCCause::to_string(v));369}370_gc_cause = v;371}372373#ifndef PRODUCT374void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {375if (CheckMemoryInitialization && ZapUnusedHeapArea) {376// please note mismatch between size (in 32/64 bit words), and ju_addr that always point to a 32 bit word377for (juint* ju_addr = reinterpret_cast<juint*>(addr); ju_addr < reinterpret_cast<juint*>(addr + size); ++ju_addr) {378assert(*ju_addr == badHeapWordVal, "Found non badHeapWordValue in pre-allocation check");379}380}381}382#endif // PRODUCT383384size_t CollectedHeap::max_tlab_size() const {385// TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].386// This restriction could be removed by enabling filling with multiple arrays.387// If we compute that the reasonable way as388// header_size + ((sizeof(jint) * max_jint) / HeapWordSize)389// we'll overflow on the multiply, so we do the divide first.390// We actually lose a little by dividing first,391// but that just makes the TLAB somewhat smaller than the biggest array,392// which is fine, since we'll be able to fill that.393size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +394sizeof(jint) *395((juint) max_jint / (size_t) HeapWordSize);396return align_down(max_int_size, MinObjAlignment);397}398399size_t CollectedHeap::filler_array_hdr_size() {400return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long401}402403size_t CollectedHeap::filler_array_min_size() {404return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment405}406407#ifdef ASSERT408void CollectedHeap::fill_args_check(HeapWord* start, size_t words)409{410assert(words >= min_fill_size(), "too small to fill");411assert(is_object_aligned(words), "unaligned size");412}413414void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)415{416if (ZapFillerObjects && zap) {417Copy::fill_to_words(start + filler_array_hdr_size(),418words - filler_array_hdr_size(), 0XDEAFBABE);419}420}421#endif // ASSERT422423void424CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)425{426assert(words >= filler_array_min_size(), "too small for an array");427assert(words <= filler_array_max_size(), "too big for a single object");428429const size_t payload_size = words - filler_array_hdr_size();430const size_t len = payload_size * HeapWordSize / sizeof(jint);431assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);432433ObjArrayAllocator allocator(Universe::intArrayKlassObj(), words, (int)len, /* do_zero */ false);434allocator.initialize(start);435DEBUG_ONLY(zap_filler_array(start, words, zap);)436}437438void439CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)440{441assert(words <= filler_array_max_size(), "too big for a single object");442443if (words >= filler_array_min_size()) {444fill_with_array(start, words, zap);445} else if (words > 0) {446assert(words == min_fill_size(), "unaligned size");447ObjAllocator allocator(vmClasses::Object_klass(), words);448allocator.initialize(start);449}450}451452void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)453{454DEBUG_ONLY(fill_args_check(start, words);)455HandleMark hm(Thread::current()); // Free handles before leaving.456fill_with_object_impl(start, words, zap);457}458459void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)460{461DEBUG_ONLY(fill_args_check(start, words);)462HandleMark hm(Thread::current()); // Free handles before leaving.463464// Multiple objects may be required depending on the filler array maximum size. Fill465// the range up to that with objects that are filler_array_max_size sized. The466// remainder is filled with a single object.467const size_t min = min_fill_size();468const size_t max = filler_array_max_size();469while (words > max) {470const size_t cur = (words - max) >= min ? max : max - min;471fill_with_array(start, cur, zap);472start += cur;473words -= cur;474}475476fill_with_object_impl(start, words, zap);477}478479void CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {480CollectedHeap::fill_with_object(start, end, zap);481}482483size_t CollectedHeap::min_dummy_object_size() const {484return oopDesc::header_size();485}486487size_t CollectedHeap::tlab_alloc_reserve() const {488size_t min_size = min_dummy_object_size();489return min_size > (size_t)MinObjAlignment ? align_object_size(min_size) : 0;490}491492HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size,493size_t requested_size,494size_t* actual_size) {495guarantee(false, "thread-local allocation buffers not supported");496return NULL;497}498499void CollectedHeap::ensure_parsability(bool retire_tlabs) {500assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),501"Should only be called at a safepoint or at start-up");502503ThreadLocalAllocStats stats;504505for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next();) {506BarrierSet::barrier_set()->make_parsable(thread);507if (UseTLAB) {508if (retire_tlabs) {509thread->tlab().retire(&stats);510} else {511thread->tlab().make_parsable();512}513}514}515516stats.publish();517}518519void CollectedHeap::resize_all_tlabs() {520assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),521"Should only resize tlabs at safepoint");522523if (UseTLAB && ResizeTLAB) {524for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {525thread->tlab().resize();526}527}528}529530jlong CollectedHeap::millis_since_last_whole_heap_examined() {531return (os::javaTimeNanos() - _last_whole_heap_examined_time_ns) / NANOSECS_PER_MILLISEC;532}533534void CollectedHeap::record_whole_heap_examined_timestamp() {535_last_whole_heap_examined_time_ns = os::javaTimeNanos();536}537538void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) {539assert(timer != NULL, "timer is null");540if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) {541GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer);542HeapDumper::dump_heap();543}544545LogTarget(Trace, gc, classhisto) lt;546if (lt.is_enabled()) {547GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer);548ResourceMark rm;549LogStream ls(lt);550VM_GC_HeapInspection inspector(&ls, false /* ! full gc */);551inspector.doit();552}553}554555void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {556full_gc_dump(timer, true);557}558559void CollectedHeap::post_full_gc_dump(GCTimer* timer) {560full_gc_dump(timer, false);561}562563void CollectedHeap::initialize_reserved_region(const ReservedHeapSpace& rs) {564// It is important to do this in a way such that concurrent readers can't565// temporarily think something is in the heap. (Seen this happen in asserts.)566_reserved.set_word_size(0);567_reserved.set_start((HeapWord*)rs.base());568_reserved.set_end((HeapWord*)rs.end());569}570571void CollectedHeap::post_initialize() {572StringDedup::initialize();573initialize_serviceability();574}575576#ifndef PRODUCT577578bool CollectedHeap::promotion_should_fail(volatile size_t* count) {579// Access to count is not atomic; the value does not have to be exact.580if (PromotionFailureALot) {581const size_t gc_num = total_collections();582const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;583if (elapsed_gcs >= PromotionFailureALotInterval) {584// Test for unsigned arithmetic wrap-around.585if (++*count >= PromotionFailureALotCount) {586*count = 0;587return true;588}589}590}591return false;592}593594bool CollectedHeap::promotion_should_fail() {595return promotion_should_fail(&_promotion_failure_alot_count);596}597598void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) {599if (PromotionFailureALot) {600_promotion_failure_alot_gc_number = total_collections();601*count = 0;602}603}604605void CollectedHeap::reset_promotion_should_fail() {606reset_promotion_should_fail(&_promotion_failure_alot_count);607}608609#endif // #ifndef PRODUCT610611bool CollectedHeap::supports_object_pinning() const {612return false;613}614615oop CollectedHeap::pin_object(JavaThread* thread, oop obj) {616ShouldNotReachHere();617return NULL;618}619620void CollectedHeap::unpin_object(JavaThread* thread, oop obj) {621ShouldNotReachHere();622}623624bool CollectedHeap::is_archived_object(oop object) const {625return false;626}627628uint32_t CollectedHeap::hash_oop(oop obj) const {629const uintptr_t addr = cast_from_oop<uintptr_t>(obj);630return static_cast<uint32_t>(addr >> LogMinObjAlignment);631}632633// It's the caller's responsibility to ensure glitch-freedom634// (if required).635void CollectedHeap::update_capacity_and_used_at_gc() {636_capacity_at_last_gc = capacity();637_used_at_last_gc = used();638}639640641