Path: blob/master/src/hotspot/share/gc/g1/g1Allocator.cpp
40960 views
/*1* Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "gc/g1/g1Allocator.inline.hpp"26#include "gc/g1/g1AllocRegion.inline.hpp"27#include "gc/g1/g1EvacStats.inline.hpp"28#include "gc/g1/g1EvacuationInfo.hpp"29#include "gc/g1/g1CollectedHeap.inline.hpp"30#include "gc/g1/g1NUMA.hpp"31#include "gc/g1/g1Policy.hpp"32#include "gc/g1/heapRegion.inline.hpp"33#include "gc/g1/heapRegionSet.inline.hpp"34#include "gc/g1/heapRegionType.hpp"35#include "gc/shared/tlab_globals.hpp"36#include "utilities/align.hpp"3738G1Allocator::G1Allocator(G1CollectedHeap* heap) :39_g1h(heap),40_numa(heap->numa()),41_survivor_is_full(false),42_old_is_full(false),43_num_alloc_regions(_numa->num_active_nodes()),44_mutator_alloc_regions(NULL),45_survivor_gc_alloc_regions(NULL),46_old_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Old)),47_retained_old_gc_alloc_region(NULL) {4849_mutator_alloc_regions = NEW_C_HEAP_ARRAY(MutatorAllocRegion, _num_alloc_regions, mtGC);50_survivor_gc_alloc_regions = NEW_C_HEAP_ARRAY(SurvivorGCAllocRegion, _num_alloc_regions, mtGC);51G1EvacStats* stat = heap->alloc_buffer_stats(G1HeapRegionAttr::Young);5253for (uint i = 0; i < _num_alloc_regions; i++) {54::new(_mutator_alloc_regions + i) MutatorAllocRegion(i);55::new(_survivor_gc_alloc_regions + i) SurvivorGCAllocRegion(stat, i);56}57}5859G1Allocator::~G1Allocator() {60for (uint i = 0; i < _num_alloc_regions; i++) {61_mutator_alloc_regions[i].~MutatorAllocRegion();62_survivor_gc_alloc_regions[i].~SurvivorGCAllocRegion();63}64FREE_C_HEAP_ARRAY(MutatorAllocRegion, _mutator_alloc_regions);65FREE_C_HEAP_ARRAY(SurvivorGCAllocRegion, _survivor_gc_alloc_regions);66}6768#ifdef ASSERT69bool G1Allocator::has_mutator_alloc_region() {70uint node_index = current_node_index();71return mutator_alloc_region(node_index)->get() != NULL;72}73#endif7475void G1Allocator::init_mutator_alloc_regions() {76for (uint i = 0; i < _num_alloc_regions; i++) {77assert(mutator_alloc_region(i)->get() == NULL, "pre-condition");78mutator_alloc_region(i)->init();79}80}8182void G1Allocator::release_mutator_alloc_regions() {83for (uint i = 0; i < _num_alloc_regions; i++) {84mutator_alloc_region(i)->release();85assert(mutator_alloc_region(i)->get() == NULL, "post-condition");86}87}8889bool G1Allocator::is_retained_old_region(HeapRegion* hr) {90return _retained_old_gc_alloc_region == hr;91}9293void G1Allocator::reuse_retained_old_region(G1EvacuationInfo& evacuation_info,94OldGCAllocRegion* old,95HeapRegion** retained_old) {96HeapRegion* retained_region = *retained_old;97*retained_old = NULL;98assert(retained_region == NULL || !retained_region->is_archive(),99"Archive region should not be alloc region (index %u)", retained_region->hrm_index());100101// We will discard the current GC alloc region if:102// a) it's in the collection set (it can happen!),103// b) it's already full (no point in using it),104// c) it's empty (this means that it was emptied during105// a cleanup and it should be on the free list now), or106// d) it's humongous (this means that it was emptied107// during a cleanup and was added to the free list, but108// has been subsequently used to allocate a humongous109// object that may be less than the region size).110if (retained_region != NULL &&111!retained_region->in_collection_set() &&112!(retained_region->top() == retained_region->end()) &&113!retained_region->is_empty() &&114!retained_region->is_humongous()) {115// The retained region was added to the old region set when it was116// retired. We have to remove it now, since we don't allow regions117// we allocate to in the region sets. We'll re-add it later, when118// it's retired again.119_g1h->old_set_remove(retained_region);120old->set(retained_region);121_g1h->hr_printer()->reuse(retained_region);122evacuation_info.set_alloc_regions_used_before(retained_region->used());123}124}125126void G1Allocator::init_gc_alloc_regions(G1EvacuationInfo& evacuation_info) {127assert_at_safepoint_on_vm_thread();128129_survivor_is_full = false;130_old_is_full = false;131132for (uint i = 0; i < _num_alloc_regions; i++) {133survivor_gc_alloc_region(i)->init();134}135136_old_gc_alloc_region.init();137reuse_retained_old_region(evacuation_info,138&_old_gc_alloc_region,139&_retained_old_gc_alloc_region);140}141142void G1Allocator::release_gc_alloc_regions(G1EvacuationInfo& evacuation_info) {143uint survivor_region_count = 0;144for (uint node_index = 0; node_index < _num_alloc_regions; node_index++) {145survivor_region_count += survivor_gc_alloc_region(node_index)->count();146survivor_gc_alloc_region(node_index)->release();147}148evacuation_info.set_allocation_regions(survivor_region_count +149old_gc_alloc_region()->count());150151// If we have an old GC alloc region to release, we'll save it in152// _retained_old_gc_alloc_region. If we don't153// _retained_old_gc_alloc_region will become NULL. This is what we154// want either way so no reason to check explicitly for either155// condition.156_retained_old_gc_alloc_region = old_gc_alloc_region()->release();157}158159void G1Allocator::abandon_gc_alloc_regions() {160for (uint i = 0; i < _num_alloc_regions; i++) {161assert(survivor_gc_alloc_region(i)->get() == NULL, "pre-condition");162}163assert(old_gc_alloc_region()->get() == NULL, "pre-condition");164_retained_old_gc_alloc_region = NULL;165}166167bool G1Allocator::survivor_is_full() const {168return _survivor_is_full;169}170171bool G1Allocator::old_is_full() const {172return _old_is_full;173}174175void G1Allocator::set_survivor_full() {176_survivor_is_full = true;177}178179void G1Allocator::set_old_full() {180_old_is_full = true;181}182183size_t G1Allocator::unsafe_max_tlab_alloc() {184// Return the remaining space in the cur alloc region, but not less than185// the min TLAB size.186187// Also, this value can be at most the humongous object threshold,188// since we can't allow tlabs to grow big enough to accommodate189// humongous objects.190191uint node_index = current_node_index();192HeapRegion* hr = mutator_alloc_region(node_index)->get();193size_t max_tlab = _g1h->max_tlab_size() * wordSize;194if (hr == NULL) {195return max_tlab;196} else {197return clamp(hr->free(), MinTLABSize, max_tlab);198}199}200201size_t G1Allocator::used_in_alloc_regions() {202assert(Heap_lock->owner() != NULL, "Should be owned on this thread's behalf.");203size_t used = 0;204for (uint i = 0; i < _num_alloc_regions; i++) {205used += mutator_alloc_region(i)->used_in_alloc_regions();206}207return used;208}209210211HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,212size_t word_size,213uint node_index) {214size_t temp = 0;215HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp, node_index);216assert(result == NULL || temp == word_size,217"Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,218word_size, temp, p2i(result));219return result;220}221222HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,223size_t min_word_size,224size_t desired_word_size,225size_t* actual_word_size,226uint node_index) {227switch (dest.type()) {228case G1HeapRegionAttr::Young:229return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size, node_index);230case G1HeapRegionAttr::Old:231return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size);232default:233ShouldNotReachHere();234return NULL; // Keep some compilers happy235}236}237238HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,239size_t desired_word_size,240size_t* actual_word_size,241uint node_index) {242assert(!_g1h->is_humongous(desired_word_size),243"we should not be seeing humongous-size allocations in this path");244245HeapWord* result = survivor_gc_alloc_region(node_index)->attempt_allocation(min_word_size,246desired_word_size,247actual_word_size);248if (result == NULL && !survivor_is_full()) {249MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag);250result = survivor_gc_alloc_region(node_index)->attempt_allocation_locked(min_word_size,251desired_word_size,252actual_word_size);253if (result == NULL) {254set_survivor_full();255}256}257if (result != NULL) {258_g1h->dirty_young_block(result, *actual_word_size);259}260return result;261}262263HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size,264size_t desired_word_size,265size_t* actual_word_size) {266assert(!_g1h->is_humongous(desired_word_size),267"we should not be seeing humongous-size allocations in this path");268269HeapWord* result = old_gc_alloc_region()->attempt_allocation(min_word_size,270desired_word_size,271actual_word_size);272if (result == NULL && !old_is_full()) {273MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag);274result = old_gc_alloc_region()->attempt_allocation_locked(min_word_size,275desired_word_size,276actual_word_size);277if (result == NULL) {278set_old_full();279}280}281return result;282}283284G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :285_g1h(G1CollectedHeap::heap()),286_allocator(allocator) {287for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {288_direct_allocated[state] = 0;289uint length = alloc_buffers_length(state);290_alloc_buffers[state] = NEW_C_HEAP_ARRAY(PLAB*, length, mtGC);291for (uint node_index = 0; node_index < length; node_index++) {292_alloc_buffers[state][node_index] = new PLAB(_g1h->desired_plab_sz(state));293}294}295}296297G1PLABAllocator::~G1PLABAllocator() {298for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {299uint length = alloc_buffers_length(state);300for (uint node_index = 0; node_index < length; node_index++) {301delete _alloc_buffers[state][node_index];302}303FREE_C_HEAP_ARRAY(PLAB*, _alloc_buffers[state]);304}305}306307bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const {308return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct);309}310311HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(G1HeapRegionAttr dest,312size_t word_sz,313bool* plab_refill_failed,314uint node_index) {315size_t plab_word_size = _g1h->desired_plab_sz(dest);316size_t required_in_plab = PLAB::size_required_for_allocation(word_sz);317318// Only get a new PLAB if the allocation fits and it would not waste more than319// ParallelGCBufferWastePct in the existing buffer.320if ((required_in_plab <= plab_word_size) &&321may_throw_away_buffer(required_in_plab, plab_word_size)) {322323PLAB* alloc_buf = alloc_buffer(dest, node_index);324alloc_buf->retire();325326size_t actual_plab_size = 0;327HeapWord* buf = _allocator->par_allocate_during_gc(dest,328required_in_plab,329plab_word_size,330&actual_plab_size,331node_index);332333assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)),334"Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,335required_in_plab, plab_word_size, actual_plab_size, p2i(buf));336337if (buf != NULL) {338alloc_buf->set_buf(buf, actual_plab_size);339340HeapWord* const obj = alloc_buf->allocate(word_sz);341assert(obj != NULL, "PLAB should have been big enough, tried to allocate "342SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT,343word_sz, required_in_plab, plab_word_size);344return obj;345}346// Otherwise.347*plab_refill_failed = true;348}349// Try direct allocation.350HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz, node_index);351if (result != NULL) {352_direct_allocated[dest.type()] += word_sz;353}354return result;355}356357void G1PLABAllocator::undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz, uint node_index) {358alloc_buffer(dest, node_index)->undo_allocation(obj, word_sz);359}360361void G1PLABAllocator::flush_and_retire_stats() {362for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {363G1EvacStats* stats = _g1h->alloc_buffer_stats(state);364for (uint node_index = 0; node_index < alloc_buffers_length(state); node_index++) {365PLAB* const buf = alloc_buffer(state, node_index);366if (buf != NULL) {367buf->flush_and_retire_stats(stats);368}369}370stats->add_direct_allocated(_direct_allocated[state]);371_direct_allocated[state] = 0;372}373}374375size_t G1PLABAllocator::waste() const {376size_t result = 0;377for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {378for (uint node_index = 0; node_index < alloc_buffers_length(state); node_index++) {379PLAB* const buf = alloc_buffer(state, node_index);380if (buf != NULL) {381result += buf->waste();382}383}384}385return result;386}387388size_t G1PLABAllocator::undo_waste() const {389size_t result = 0;390for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {391for (uint node_index = 0; node_index < alloc_buffers_length(state); node_index++) {392PLAB* const buf = alloc_buffer(state, node_index);393if (buf != NULL) {394result += buf->undo_waste();395}396}397}398return result;399}400401G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h, bool open) {402return new G1ArchiveAllocator(g1h, open);403}404405bool G1ArchiveAllocator::alloc_new_region() {406// Allocate the highest free region in the reserved heap,407// and add it to our list of allocated regions. It is marked408// archive and added to the old set.409HeapRegion* hr = _g1h->alloc_highest_free_region();410if (hr == NULL) {411return false;412}413assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index());414if (_open) {415hr->set_open_archive();416} else {417hr->set_closed_archive();418}419_g1h->policy()->remset_tracker()->update_at_allocate(hr);420_g1h->archive_set_add(hr);421_g1h->hr_printer()->alloc(hr);422_allocated_regions.append(hr);423_allocation_region = hr;424425// Set up _bottom and _max to begin allocating in the lowest426// min_region_size'd chunk of the allocated G1 region.427_bottom = hr->bottom();428_max = _bottom + HeapRegion::min_region_size_in_words();429430// Since we've modified the old set, call update_sizes.431_g1h->g1mm()->update_sizes();432return true;433}434435HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {436assert(word_size != 0, "size must not be zero");437if (_allocation_region == NULL) {438if (!alloc_new_region()) {439return NULL;440}441}442HeapWord* old_top = _allocation_region->top();443assert(_bottom >= _allocation_region->bottom(),444"inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,445p2i(_bottom), p2i(_allocation_region->bottom()));446assert(_max <= _allocation_region->end(),447"inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,448p2i(_max), p2i(_allocation_region->end()));449assert(_bottom <= old_top && old_top <= _max,450"inconsistent allocation state: expected "451PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,452p2i(_bottom), p2i(old_top), p2i(_max));453454// Try to allocate word_size in the current allocation chunk. Two cases455// require special treatment:456// 1. no enough space for word_size457// 2. after allocating word_size, there's non-zero space left, but too small for the minimal filler458// In both cases, we retire the current chunk and move on to the next one.459size_t free_words = pointer_delta(_max, old_top);460if (free_words < word_size ||461((free_words - word_size != 0) && (free_words - word_size < CollectedHeap::min_fill_size()))) {462// Retiring the current chunk463if (old_top != _max) {464// Non-zero space; need to insert the filler465size_t fill_size = free_words;466CollectedHeap::fill_with_object(old_top, fill_size);467_summary_bytes_used += fill_size * HeapWordSize;468}469// Set the current chunk as "full"470_allocation_region->set_top(_max);471472// Check if we've just used up the last min_region_size'd chunk473// in the current region, and if so, allocate a new one.474if (_max != _allocation_region->end()) {475// Shift to the next chunk476old_top = _bottom = _max;477_max = _bottom + HeapRegion::min_region_size_in_words();478} else {479if (!alloc_new_region()) {480return NULL;481}482old_top = _allocation_region->bottom();483}484}485assert(pointer_delta(_max, old_top) >= word_size, "enough space left");486_allocation_region->set_top(old_top + word_size);487_summary_bytes_used += word_size * HeapWordSize;488489return old_top;490}491492void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,493size_t end_alignment_in_bytes) {494assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),495"alignment " SIZE_FORMAT " too large", end_alignment_in_bytes);496assert(is_aligned(end_alignment_in_bytes, HeapWordSize),497"alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize);498499// If we've allocated nothing, simply return.500if (_allocation_region == NULL) {501return;502}503504// If an end alignment was requested, insert filler objects.505if (end_alignment_in_bytes != 0) {506HeapWord* currtop = _allocation_region->top();507HeapWord* newtop = align_up(currtop, end_alignment_in_bytes);508size_t fill_size = pointer_delta(newtop, currtop);509if (fill_size != 0) {510if (fill_size < CollectedHeap::min_fill_size()) {511// If the required fill is smaller than we can represent,512// bump up to the next aligned address. We know we won't exceed the current513// region boundary because the max supported alignment is smaller than the min514// region size, and because the allocation code never leaves space smaller than515// the min_fill_size at the top of the current allocation region.516newtop = align_up(currtop + CollectedHeap::min_fill_size(),517end_alignment_in_bytes);518fill_size = pointer_delta(newtop, currtop);519}520HeapWord* fill = archive_mem_allocate(fill_size);521CollectedHeap::fill_with_objects(fill, fill_size);522}523}524525// Loop through the allocated regions, and create MemRegions summarizing526// the allocated address range, combining contiguous ranges. Add the527// MemRegions to the GrowableArray provided by the caller.528int index = _allocated_regions.length() - 1;529assert(_allocated_regions.at(index) == _allocation_region,530"expected region %u at end of array, found %u",531_allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index());532HeapWord* base_address = _allocation_region->bottom();533HeapWord* top = base_address;534535while (index >= 0) {536HeapRegion* next = _allocated_regions.at(index);537HeapWord* new_base = next->bottom();538HeapWord* new_top = next->top();539if (new_base != top) {540ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));541base_address = new_base;542}543top = new_top;544index = index - 1;545}546547assert(top != base_address, "zero-sized range, address " PTR_FORMAT, p2i(base_address));548ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));549_allocated_regions.clear();550_allocation_region = NULL;551};552553554