Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
38920 views
/*1* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP25#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP2627#include "gc_implementation/parallelScavenge/generationSizer.hpp"28#include "gc_implementation/parallelScavenge/objectStartArray.hpp"29#include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"30#include "gc_implementation/parallelScavenge/psOldGen.hpp"31#include "gc_implementation/parallelScavenge/psYoungGen.hpp"32#include "gc_implementation/shared/gcPolicyCounters.hpp"33#include "gc_implementation/shared/gcWhen.hpp"34#include "gc_interface/collectedHeap.inline.hpp"35#include "memory/collectorPolicy.hpp"36#include "utilities/ostream.hpp"3738class AdjoiningGenerations;39class GCHeapSummary;40class GCTaskManager;41class PSAdaptiveSizePolicy;42class PSHeapSummary;4344class ParallelScavengeHeap : public CollectedHeap {45friend class VMStructs;46private:47static PSYoungGen* _young_gen;48static PSOldGen* _old_gen;4950// Sizing policy for entire heap51static PSAdaptiveSizePolicy* _size_policy;52static PSGCAdaptivePolicyCounters* _gc_policy_counters;5354static ParallelScavengeHeap* _psh;5556GenerationSizer* _collector_policy;5758// Collection of generations that are adjacent in the59// space reserved for the heap.60AdjoiningGenerations* _gens;61unsigned int _death_march_count;6263// The task manager64static GCTaskManager* _gc_task_manager;6566void trace_heap(GCWhen::Type when, GCTracer* tracer);6768protected:69static inline size_t total_invocations();70HeapWord* allocate_new_tlab(size_t size);7172inline bool should_alloc_in_eden(size_t size) const;73inline void death_march_check(HeapWord* const result, size_t size);74HeapWord* mem_allocate_old_gen(size_t size);7576public:77ParallelScavengeHeap() : CollectedHeap(), _death_march_count(0) { }7879// For use by VM operations80enum CollectionType {81Scavenge,82MarkSweep83};8485ParallelScavengeHeap::Name kind() const {86return CollectedHeap::ParallelScavengeHeap;87}8889virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }9091static PSYoungGen* young_gen() { return _young_gen; }92static PSOldGen* old_gen() { return _old_gen; }9394virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }9596static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; }9798static ParallelScavengeHeap* heap();99100static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }101102AdjoiningGenerations* gens() { return _gens; }103104// Returns JNI_OK on success105virtual jint initialize();106107void post_initialize();108void update_counters();109110// The alignment used for the various areas111size_t space_alignment() { return _collector_policy->space_alignment(); }112size_t generation_alignment() { return _collector_policy->gen_alignment(); }113114// Return the (conservative) maximum heap alignment115static size_t conservative_max_heap_alignment() {116return CollectorPolicy::compute_heap_alignment();117}118119size_t capacity() const;120size_t used() const;121122// Return "true" if all generations have reached the123// maximal committed limit that they can reach, without a garbage124// collection.125virtual bool is_maximal_no_gc() const;126127// Return true if the reference points to an object that128// can be moved in a partial collection. For currently implemented129// generational collectors that means during a collection of130// the young gen.131virtual bool is_scavengable(const void* addr);132133// Does this heap support heap inspection? (+PrintClassHistogram)134bool supports_heap_inspection() const { return true; }135136size_t max_capacity() const;137138// Whether p is in the allocated part of the heap139bool is_in(const void* p) const;140141bool is_in_reserved(const void* p) const;142143#ifdef ASSERT144virtual bool is_in_partial_collection(const void *p);145#endif146147bool is_in_young(oop p); // reserved part148bool is_in_old(oop p); // reserved part149150// Memory allocation. "gc_time_limit_was_exceeded" will151// be set to true if the adaptive size policy determine that152// an excessive amount of time is being spent doing collections153// and caused a NULL to be returned. If a NULL is not returned,154// "gc_time_limit_was_exceeded" has an undefined meaning.155HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);156157// Allocation attempt(s) during a safepoint. It should never be called158// to allocate a new TLAB as this allocation might be satisfied out159// of the old generation.160HeapWord* failed_mem_allocate(size_t size);161162// Support for System.gc()163void collect(GCCause::Cause cause);164165// These also should be called by the vm thread at a safepoint (e.g., from a166// VM operation).167//168// The first collects the young generation only, unless the scavenge fails; it169// will then attempt a full gc. The second collects the entire heap; if170// maximum_compaction is true, it will compact everything and clear all soft171// references.172inline void invoke_scavenge();173174// Perform a full collection175virtual void do_full_collection(bool clear_all_soft_refs);176177bool supports_inline_contig_alloc() const { return !UseNUMA; }178179HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }180HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }181182void ensure_parsability(bool retire_tlabs);183void accumulate_statistics_all_tlabs();184void resize_all_tlabs();185186bool supports_tlab_allocation() const { return true; }187188size_t tlab_capacity(Thread* thr) const;189size_t tlab_used(Thread* thr) const;190size_t unsafe_max_tlab_alloc(Thread* thr) const;191192// Can a compiler initialize a new object without store barriers?193// This permission only extends from the creation of a new object194// via a TLAB up to the first subsequent safepoint.195virtual bool can_elide_tlab_store_barriers() const {196return true;197}198199virtual bool card_mark_must_follow_store() const {200return false;201}202203// Return true if we don't we need a store barrier for204// initializing stores to an object at this address.205virtual bool can_elide_initializing_store_barrier(oop new_obj);206207void oop_iterate(ExtendedOopClosure* cl);208void object_iterate(ObjectClosure* cl);209void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }210211HeapWord* block_start(const void* addr) const;212size_t block_size(const HeapWord* addr) const;213bool block_is_obj(const HeapWord* addr) const;214215jlong millis_since_last_gc();216217void prepare_for_verify();218PSHeapSummary create_ps_heap_summary();219virtual void print_on(outputStream* st) const;220virtual void print_on_error(outputStream* st) const;221virtual void print_gc_threads_on(outputStream* st) const;222virtual void gc_threads_do(ThreadClosure* tc) const;223virtual void print_tracing_info() const;224225void verify(bool silent, VerifyOption option /* ignored */);226227void print_heap_change(size_t prev_used);228229// Resize the young generation. The reserved space for the230// generation may be expanded in preparation for the resize.231void resize_young_gen(size_t eden_size, size_t survivor_size);232233// Resize the old generation. The reserved space for the234// generation may be expanded in preparation for the resize.235void resize_old_gen(size_t desired_free_space);236237// Save the tops of the spaces in all generations238void record_gen_tops_before_GC() PRODUCT_RETURN;239240// Mangle the unused parts of all spaces in the heap241void gen_mangle_unused_area() PRODUCT_RETURN;242243// Call these in sequential code around the processing of strong roots.244class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {245public:246ParStrongRootsScope();247~ParStrongRootsScope();248};249};250251#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP252253254