Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp
38920 views
/*1* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP25#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP2627#include "gc_implementation/g1/g1AllocationContext.hpp"28#include "gc_implementation/g1/g1AllocRegion.hpp"29#include "gc_implementation/g1/g1InCSetState.hpp"30#include "gc_implementation/shared/parGCAllocBuffer.hpp"3132// Base class for G1 allocators.33class G1Allocator : public CHeapObj<mtGC> {34friend class VMStructs;35protected:36G1CollectedHeap* _g1h;3738// Outside of GC pauses, the number of bytes used in all regions other39// than the current allocation region.40size_t _summary_bytes_used;4142public:43G1Allocator(G1CollectedHeap* heap) :44_g1h(heap), _summary_bytes_used(0) { }4546static G1Allocator* create_allocator(G1CollectedHeap* g1h);4748virtual void init_mutator_alloc_region() = 0;49virtual void release_mutator_alloc_region() = 0;5051virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;52virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0;53virtual void abandon_gc_alloc_regions() = 0;5455virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0;56virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;57virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0;58virtual size_t used() = 0;59virtual bool is_retained_old_region(HeapRegion* hr) = 0;6061void reuse_retained_old_region(EvacuationInfo& evacuation_info,62OldGCAllocRegion* old,63HeapRegion** retained);6465size_t used_unlocked() const {66return _summary_bytes_used;67}6869void increase_used(size_t bytes) {70_summary_bytes_used += bytes;71}7273void decrease_used(size_t bytes) {74assert(_summary_bytes_used >= bytes,75err_msg("invariant: _summary_bytes_used: " SIZE_FORMAT " should be >= bytes: " SIZE_FORMAT,76_summary_bytes_used, bytes));77_summary_bytes_used -= bytes;78}7980void set_used(size_t bytes) {81_summary_bytes_used = bytes;82}8384virtual HeapRegion* new_heap_region(uint hrs_index,85G1BlockOffsetSharedArray* sharedOffsetArray,86MemRegion mr) {87return new HeapRegion(hrs_index, sharedOffsetArray, mr);88}89};9091// The default allocator for G1.92class G1DefaultAllocator : public G1Allocator {93protected:94// Alloc region used to satisfy mutator allocation requests.95MutatorAllocRegion _mutator_alloc_region;9697// Alloc region used to satisfy allocation requests by the GC for98// survivor objects.99SurvivorGCAllocRegion _survivor_gc_alloc_region;100101// Alloc region used to satisfy allocation requests by the GC for102// old objects.103OldGCAllocRegion _old_gc_alloc_region;104105HeapRegion* _retained_old_gc_alloc_region;106public:107G1DefaultAllocator(G1CollectedHeap* heap) : G1Allocator(heap), _retained_old_gc_alloc_region(NULL) { }108109virtual void init_mutator_alloc_region();110virtual void release_mutator_alloc_region();111112virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);113virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);114virtual void abandon_gc_alloc_regions();115116virtual bool is_retained_old_region(HeapRegion* hr) {117return _retained_old_gc_alloc_region == hr;118}119120virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {121return &_mutator_alloc_region;122}123124virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {125return &_survivor_gc_alloc_region;126}127128virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {129return &_old_gc_alloc_region;130}131132virtual size_t used() {133assert(Heap_lock->owner() != NULL,134"Should be owned on this thread's behalf.");135size_t result = _summary_bytes_used;136137// Read only once in case it is set to NULL concurrently138HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();139if (hr != NULL) {140result += hr->used();141}142return result;143}144};145146class G1ParGCAllocBuffer: public ParGCAllocBuffer {147private:148bool _retired;149150public:151G1ParGCAllocBuffer(size_t gclab_word_size);152virtual ~G1ParGCAllocBuffer() {153guarantee(_retired, "Allocation buffer has not been retired");154}155156virtual void set_buf(HeapWord* buf) {157ParGCAllocBuffer::set_buf(buf);158_retired = false;159}160161virtual void retire(bool end_of_gc, bool retain) {162if (_retired) {163return;164}165ParGCAllocBuffer::retire(end_of_gc, retain);166_retired = true;167}168};169170class G1ParGCAllocator : public CHeapObj<mtGC> {171friend class G1ParScanThreadState;172protected:173G1CollectedHeap* _g1h;174175// The survivor alignment in effect in bytes.176// == 0 : don't align survivors177// != 0 : align survivors to that alignment178// These values were chosen to favor the non-alignment case since some179// architectures have a special compare against zero instructions.180const uint _survivor_alignment_bytes;181182size_t _alloc_buffer_waste;183size_t _undo_waste;184185void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }186void add_to_undo_waste(size_t waste) { _undo_waste += waste; }187188virtual void retire_alloc_buffers() = 0;189virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;190191// Calculate the survivor space object alignment in bytes. Returns that or 0 if192// there are no restrictions on survivor alignment.193static uint calc_survivor_alignment_bytes() {194assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");195if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {196// No need to align objects in the survivors differently, return 0197// which means "survivor alignment is not used".198return 0;199} else {200assert(SurvivorAlignmentInBytes > 0, "sanity");201return SurvivorAlignmentInBytes;202}203}204205public:206G1ParGCAllocator(G1CollectedHeap* g1h) :207_g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()),208_alloc_buffer_waste(0), _undo_waste(0) {209}210211static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);212213size_t alloc_buffer_waste() { return _alloc_buffer_waste; }214size_t undo_waste() {return _undo_waste; }215216// Allocate word_sz words in dest, either directly into the regions or by217// allocating a new PLAB. Returns the address of the allocated memory, NULL if218// not successful.219HeapWord* allocate_direct_or_new_plab(InCSetState dest,220size_t word_sz,221AllocationContext_t context);222223// Allocate word_sz words in the PLAB of dest. Returns the address of the224// allocated memory, NULL if not successful.225HeapWord* plab_allocate(InCSetState dest,226size_t word_sz,227AllocationContext_t context) {228G1ParGCAllocBuffer* buffer = alloc_buffer(dest, context);229if (_survivor_alignment_bytes == 0) {230return buffer->allocate(word_sz);231} else {232return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);233}234}235236HeapWord* allocate(InCSetState dest, size_t word_sz,237AllocationContext_t context) {238HeapWord* const obj = plab_allocate(dest, word_sz, context);239if (obj != NULL) {240return obj;241}242return allocate_direct_or_new_plab(dest, word_sz, context);243}244245void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {246if (alloc_buffer(dest, context)->contains(obj)) {247assert(alloc_buffer(dest, context)->contains(obj + word_sz - 1),248"should contain whole object");249alloc_buffer(dest, context)->undo_allocation(obj, word_sz);250} else {251CollectedHeap::fill_with_object(obj, word_sz);252add_to_undo_waste(word_sz);253}254}255};256257class G1DefaultParGCAllocator : public G1ParGCAllocator {258G1ParGCAllocBuffer _surviving_alloc_buffer;259G1ParGCAllocBuffer _tenured_alloc_buffer;260G1ParGCAllocBuffer* _alloc_buffers[InCSetState::Num];261262public:263G1DefaultParGCAllocator(G1CollectedHeap* g1h);264265virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) {266assert(dest.is_valid(),267err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));268assert(_alloc_buffers[dest.value()] != NULL,269err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));270return _alloc_buffers[dest.value()];271}272273virtual void retire_alloc_buffers() ;274};275276#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP277278279