Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
38920 views
/*1* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP25#define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP2627#include "gc_implementation/shared/gcTrace.hpp"28#include "gc_implementation/shared/parGCAllocBuffer.hpp"29#include "gc_implementation/shared/copyFailedInfo.hpp"30#include "memory/defNewGeneration.hpp"31#include "memory/padded.hpp"32#include "utilities/taskqueue.hpp"3334class ChunkArray;35class ParScanWithoutBarrierClosure;36class ParScanWithBarrierClosure;37class ParRootScanWithoutBarrierClosure;38class ParRootScanWithBarrierTwoGensClosure;39class ParEvacuateFollowersClosure;4041// It would be better if these types could be kept local to the .cpp file,42// but they must be here to allow ParScanClosure::do_oop_work to be defined43// in genOopClosures.inline.hpp.4445typedef Padded<OopTaskQueue> ObjToScanQueue;46typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet;4748class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure {49private:50ParScanWeakRefClosure* _par_cl;51protected:52template <class T> void do_oop_work(T* p);53public:54ParKeepAliveClosure(ParScanWeakRefClosure* cl);55virtual void do_oop(oop* p);56virtual void do_oop(narrowOop* p);57};5859// The state needed by thread performing parallel young-gen collection.60class ParScanThreadState {61friend class ParScanThreadStateSet;62private:63ObjToScanQueue *_work_queue;64Stack<oop, mtGC>* const _overflow_stack;6566ParGCAllocBuffer _to_space_alloc_buffer;6768ParScanWithoutBarrierClosure _to_space_closure; // scan_without_gc_barrier69ParScanWithBarrierClosure _old_gen_closure; // scan_with_gc_barrier70ParRootScanWithoutBarrierClosure _to_space_root_closure; // scan_root_without_gc_barrier71// One of these two will be passed to process_roots, which will72// set its generation. The first is for two-gen configs where the73// old gen collects the perm gen; the second is for arbitrary configs.74// The second isn't used right now (it used to be used for the train, an75// incremental collector) but the declaration has been left as a reminder.76ParRootScanWithBarrierTwoGensClosure _older_gen_closure;77// This closure will always be bound to the old gen; it will be used78// in evacuate_followers.79ParRootScanWithBarrierTwoGensClosure _old_gen_root_closure; // scan_old_root_with_gc_barrier80ParEvacuateFollowersClosure _evacuate_followers;81DefNewGeneration::IsAliveClosure _is_alive_closure;82ParScanWeakRefClosure _scan_weak_ref_closure;83ParKeepAliveClosure _keep_alive_closure;848586Space* _to_space;87Space* to_space() { return _to_space; }8889ParNewGeneration* _young_gen;90ParNewGeneration* young_gen() const { return _young_gen; }9192Generation* _old_gen;93Generation* old_gen() { return _old_gen; }9495HeapWord *_young_old_boundary;9697int _hash_seed;98int _thread_num;99ageTable _ageTable;100101bool _to_space_full;102103#if TASKQUEUE_STATS104size_t _term_attempts;105size_t _overflow_refills;106size_t _overflow_refill_objs;107#endif // TASKQUEUE_STATS108109// Stats for promotion failure110PromotionFailedInfo _promotion_failed_info;111112// Timing numbers.113double _start;114double _start_strong_roots;115double _strong_roots_time;116double _start_term;117double _term_time;118119// Helper for trim_queues. Scans subset of an array and makes120// remainder available for work stealing.121void scan_partial_array_and_push_remainder(oop obj);122123// In support of CMS' parallel rescan of survivor space.124ChunkArray* _survivor_chunk_array;125ChunkArray* survivor_chunk_array() { return _survivor_chunk_array; }126127void record_survivor_plab(HeapWord* plab_start, size_t plab_word_size);128129ParScanThreadState(Space* to_space_, ParNewGeneration* gen_,130Generation* old_gen_, int thread_num_,131ObjToScanQueueSet* work_queue_set_,132Stack<oop, mtGC>* overflow_stacks_,133size_t desired_plab_sz_,134ParallelTaskTerminator& term_);135136public:137ageTable* age_table() {return &_ageTable;}138139ObjToScanQueue* work_queue() { return _work_queue; }140141ParGCAllocBuffer* to_space_alloc_buffer() {142return &_to_space_alloc_buffer;143}144145ParEvacuateFollowersClosure& evacuate_followers_closure() { return _evacuate_followers; }146DefNewGeneration::IsAliveClosure& is_alive_closure() { return _is_alive_closure; }147ParScanWeakRefClosure& scan_weak_ref_closure() { return _scan_weak_ref_closure; }148ParKeepAliveClosure& keep_alive_closure() { return _keep_alive_closure; }149ParScanClosure& older_gen_closure() { return _older_gen_closure; }150ParRootScanWithoutBarrierClosure& to_space_root_closure() { return _to_space_root_closure; };151152// Decrease queue size below "max_size".153void trim_queues(int max_size);154155// Private overflow stack usage156Stack<oop, mtGC>* overflow_stack() { return _overflow_stack; }157bool take_from_overflow_stack();158void push_on_overflow_stack(oop p);159160// Is new_obj a candidate for scan_partial_array_and_push_remainder method.161inline bool should_be_partially_scanned(oop new_obj, oop old_obj) const;162163int* hash_seed() { return &_hash_seed; }164int thread_num() { return _thread_num; }165166// Allocate a to-space block of size "sz", or else return NULL.167HeapWord* alloc_in_to_space_slow(size_t word_sz);168169HeapWord* alloc_in_to_space(size_t word_sz) {170HeapWord* obj = to_space_alloc_buffer()->allocate_aligned(word_sz, SurvivorAlignmentInBytes);171if (obj != NULL) return obj;172else return alloc_in_to_space_slow(word_sz);173}174175HeapWord* young_old_boundary() { return _young_old_boundary; }176177void set_young_old_boundary(HeapWord *boundary) {178_young_old_boundary = boundary;179}180181// Undo the most recent allocation ("obj", of "word_sz").182void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz);183184// Promotion failure stats185void register_promotion_failure(size_t sz) {186_promotion_failed_info.register_copy_failure(sz);187}188PromotionFailedInfo& promotion_failed_info() {189return _promotion_failed_info;190}191bool promotion_failed() {192return _promotion_failed_info.has_failed();193}194void print_promotion_failure_size();195196#if TASKQUEUE_STATS197TaskQueueStats & taskqueue_stats() const { return _work_queue->stats; }198199size_t term_attempts() const { return _term_attempts; }200size_t overflow_refills() const { return _overflow_refills; }201size_t overflow_refill_objs() const { return _overflow_refill_objs; }202203void note_term_attempt() { ++_term_attempts; }204void note_overflow_refill(size_t objs) {205++_overflow_refills; _overflow_refill_objs += objs;206}207208void reset_stats();209#endif // TASKQUEUE_STATS210211void start_strong_roots() {212_start_strong_roots = os::elapsedTime();213}214void end_strong_roots() {215_strong_roots_time += (os::elapsedTime() - _start_strong_roots);216}217double strong_roots_time() const { return _strong_roots_time; }218void start_term_time() {219TASKQUEUE_STATS_ONLY(note_term_attempt());220_start_term = os::elapsedTime();221}222void end_term_time() {223_term_time += (os::elapsedTime() - _start_term);224}225double term_time() const { return _term_time; }226227double elapsed_time() const {228return os::elapsedTime() - _start;229}230};231232class ParNewGenTask: public AbstractGangTask {233private:234ParNewGeneration* _gen;235Generation* _next_gen;236HeapWord* _young_old_boundary;237class ParScanThreadStateSet* _state_set;238239public:240ParNewGenTask(ParNewGeneration* gen,241Generation* next_gen,242HeapWord* young_old_boundary,243ParScanThreadStateSet* state_set);244245HeapWord* young_old_boundary() { return _young_old_boundary; }246247void work(uint worker_id);248249// Reset the terminator in ParScanThreadStateSet for250// "active_workers" threads.251virtual void set_for_termination(int active_workers);252};253254class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure {255protected:256template <class T> void do_oop_work(T* p);257public:258KeepAliveClosure(ScanWeakRefClosure* cl);259virtual void do_oop(oop* p);260virtual void do_oop(narrowOop* p);261};262263class EvacuateFollowersClosureGeneral: public VoidClosure {264private:265GenCollectedHeap* _gch;266int _level;267OopsInGenClosure* _scan_cur_or_nonheap;268OopsInGenClosure* _scan_older;269public:270EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,271OopsInGenClosure* cur,272OopsInGenClosure* older);273virtual void do_void();274};275276// Closure for scanning ParNewGeneration.277// Same as ScanClosure, except does parallel GC barrier.278class ScanClosureWithParBarrier: public ScanClosure {279protected:280template <class T> void do_oop_work(T* p);281public:282ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier);283virtual void do_oop(oop* p);284virtual void do_oop(narrowOop* p);285};286287// Implements AbstractRefProcTaskExecutor for ParNew.288class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor {289private:290ParNewGeneration& _generation;291ParScanThreadStateSet& _state_set;292public:293ParNewRefProcTaskExecutor(ParNewGeneration& generation,294ParScanThreadStateSet& state_set)295: _generation(generation), _state_set(state_set)296{ }297298// Executes a task using worker threads.299virtual void execute(ProcessTask& task);300virtual void execute(EnqueueTask& task);301// Switch to single threaded mode.302virtual void set_single_threaded_mode();303};304305306// A Generation that does parallel young-gen collection.307308class ParNewGeneration: public DefNewGeneration {309friend class ParNewGenTask;310friend class ParNewRefProcTask;311friend class ParNewRefProcTaskExecutor;312friend class ParScanThreadStateSet;313friend class ParEvacuateFollowersClosure;314315private:316// The per-worker-thread work queues317ObjToScanQueueSet* _task_queues;318319// Per-worker-thread local overflow stacks320Stack<oop, mtGC>* _overflow_stacks;321322// Desired size of survivor space plab's323PLABStats _plab_stats;324325// A list of from-space images of to-be-scanned objects, threaded through326// klass-pointers (klass information already copied to the forwarded327// image.) Manipulated with CAS.328oop _overflow_list;329NOT_PRODUCT(ssize_t _num_par_pushes;)330331// If true, older generation does not support promotion undo, so avoid.332static bool _avoid_promotion_undo;333334// This closure is used by the reference processor to filter out335// references to live referent.336DefNewGeneration::IsAliveClosure _is_alive_closure;337338static oop real_forwardee_slow(oop obj);339static void waste_some_time();340341// Preserve the mark of "obj", if necessary, in preparation for its mark342// word being overwritten with a self-forwarding-pointer.343void preserve_mark_if_necessary(oop obj, markOop m);344345void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer);346347protected:348349bool _survivor_overflow;350351bool avoid_promotion_undo() { return _avoid_promotion_undo; }352void set_avoid_promotion_undo(bool v) { _avoid_promotion_undo = v; }353354bool survivor_overflow() { return _survivor_overflow; }355void set_survivor_overflow(bool v) { _survivor_overflow = v; }356357public:358ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level);359360~ParNewGeneration() {361for (uint i = 0; i < ParallelGCThreads; i++)362delete _task_queues->queue(i);363364delete _task_queues;365}366367virtual void ref_processor_init();368virtual Generation::Name kind() { return Generation::ParNew; }369virtual const char* name() const;370virtual const char* short_name() const { return "ParNew"; }371372// override373virtual bool refs_discovery_is_mt() const {374assert(UseParNewGC, "ParNewGeneration only when UseParNewGC");375return ParallelGCThreads > 1;376}377378// Make the collection virtual.379virtual void collect(bool full,380bool clear_all_soft_refs,381size_t size,382bool is_tlab);383384// This needs to be visible to the closure function.385// "obj" is the object to be copied, "m" is a recent value of its mark386// that must not contain a forwarding pointer (though one might be387// inserted in "obj"s mark word by a parallel thread).388inline oop copy_to_survivor_space(ParScanThreadState* par_scan_state,389oop obj, size_t obj_sz, markOop m) {390if (_avoid_promotion_undo) {391return copy_to_survivor_space_avoiding_promotion_undo(par_scan_state,392obj, obj_sz, m);393}394395return copy_to_survivor_space_with_undo(par_scan_state, obj, obj_sz, m);396}397398oop copy_to_survivor_space_avoiding_promotion_undo(ParScanThreadState* par_scan_state,399oop obj, size_t obj_sz, markOop m);400401oop copy_to_survivor_space_with_undo(ParScanThreadState* par_scan_state,402oop obj, size_t obj_sz, markOop m);403404// in support of testing overflow code405NOT_PRODUCT(int _overflow_counter;)406NOT_PRODUCT(bool should_simulate_overflow();)407408// Accessor for overflow list409oop overflow_list() { return _overflow_list; }410411// Push the given (from-space) object on the global overflow list.412void push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state);413414// If the global overflow list is non-empty, move some tasks from it415// onto "work_q" (which need not be empty). No more than 1/4 of the416// available space on "work_q" is used.417bool take_from_overflow_list(ParScanThreadState* par_scan_state);418bool take_from_overflow_list_work(ParScanThreadState* par_scan_state);419420// The task queues to be used by parallel GC threads.421ObjToScanQueueSet* task_queues() {422return _task_queues;423}424425PLABStats* plab_stats() {426return &_plab_stats;427}428429size_t desired_plab_sz() {430return _plab_stats.desired_plab_sz();431}432433static oop real_forwardee(oop obj);434435DEBUG_ONLY(static bool is_legal_forward_ptr(oop p);)436};437438#endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP439440441