Path: blob/master/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp
40957 views
/*1* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "gc/g1/g1CollectedHeap.inline.hpp"26#include "gc/g1/g1CollectionSetCandidates.hpp"27#include "gc/g1/g1CollectionSetChooser.hpp"28#include "gc/g1/heapRegionRemSet.hpp"29#include "gc/shared/space.inline.hpp"30#include "runtime/atomic.hpp"31#include "utilities/quickSort.hpp"3233// Order regions according to GC efficiency. This will cause regions with a lot34// of live objects and large remembered sets to end up at the end of the array.35// Given that we might skip collecting the last few old regions, if after a few36// mixed GCs the remaining have reclaimable bytes under a certain threshold, the37// hope is that the ones we'll skip are ones with both large remembered sets and38// a lot of live objects, not the ones with just a lot of live objects if we39// ordered according to the amount of reclaimable bytes per region.40static int order_regions(HeapRegion* hr1, HeapRegion* hr2) {41// Make sure that NULL entries are moved to the end.42if (hr1 == NULL) {43if (hr2 == NULL) {44return 0;45} else {46return 1;47}48} else if (hr2 == NULL) {49return -1;50}5152double gc_eff1 = hr1->gc_efficiency();53double gc_eff2 = hr2->gc_efficiency();5455if (gc_eff1 > gc_eff2) {56return -1;57} if (gc_eff1 < gc_eff2) {58return 1;59} else {60return 0;61}62}6364// Determine collection set candidates: For all regions determine whether they65// should be a collection set candidates, calculate their efficiency, sort and66// return them as G1CollectionSetCandidates instance.67// Threads calculate the GC efficiency of the regions they get to process, and68// put them into some work area unsorted. At the end the array is sorted and69// copied into the G1CollectionSetCandidates instance; the caller will be the new70// owner of this object.71class G1BuildCandidateRegionsTask : public AbstractGangTask {7273// Work area for building the set of collection set candidates. Contains references74// to heap regions with their GC efficiencies calculated. To reduce contention75// on claiming array elements, worker threads claim parts of this array in chunks;76// Array elements may be NULL as threads might not get enough regions to fill77// up their chunks completely.78// Final sorting will remove them.79class G1BuildCandidateArray : public StackObj {8081uint const _max_size;82uint const _chunk_size;8384HeapRegion** _data;8586uint volatile _cur_claim_idx;8788// Calculates the maximum array size that will be used.89static uint required_array_size(uint num_regions, uint chunk_size, uint num_workers) {90uint const max_waste = num_workers * chunk_size;91// The array should be aligned with respect to chunk_size.92uint const aligned_num_regions = ((num_regions + chunk_size - 1) / chunk_size) * chunk_size;9394return aligned_num_regions + max_waste;95}9697public:98G1BuildCandidateArray(uint max_num_regions, uint chunk_size, uint num_workers) :99_max_size(required_array_size(max_num_regions, chunk_size, num_workers)),100_chunk_size(chunk_size),101_data(NEW_C_HEAP_ARRAY(HeapRegion*, _max_size, mtGC)),102_cur_claim_idx(0) {103for (uint i = 0; i < _max_size; i++) {104_data[i] = NULL;105}106}107108~G1BuildCandidateArray() {109FREE_C_HEAP_ARRAY(HeapRegion*, _data);110}111112// Claim a new chunk, returning its bounds [from, to[.113void claim_chunk(uint& from, uint& to) {114uint result = Atomic::add(&_cur_claim_idx, _chunk_size);115assert(_max_size > result - 1,116"Array too small, is %u should be %u with chunk size %u.",117_max_size, result, _chunk_size);118from = result - _chunk_size;119to = result;120}121122// Set element in array.123void set(uint idx, HeapRegion* hr) {124assert(idx < _max_size, "Index %u out of bounds %u", idx, _max_size);125assert(_data[idx] == NULL, "Value must not have been set.");126_data[idx] = hr;127}128129void sort_and_copy_into(HeapRegion** dest, uint num_regions) {130if (_cur_claim_idx == 0) {131return;132}133for (uint i = _cur_claim_idx; i < _max_size; i++) {134assert(_data[i] == NULL, "must be");135}136QuickSort::sort(_data, _cur_claim_idx, order_regions, true);137for (uint i = num_regions; i < _max_size; i++) {138assert(_data[i] == NULL, "must be");139}140for (uint i = 0; i < num_regions; i++) {141dest[i] = _data[i];142}143}144};145146// Per-region closure. In addition to determining whether a region should be147// added to the candidates, and calculating those regions' gc efficiencies, also148// gather additional statistics.149class G1BuildCandidateRegionsClosure : public HeapRegionClosure {150G1BuildCandidateArray* _array;151152uint _cur_chunk_idx;153uint _cur_chunk_end;154155uint _regions_added;156size_t _reclaimable_bytes_added;157158void add_region(HeapRegion* hr) {159if (_cur_chunk_idx == _cur_chunk_end) {160_array->claim_chunk(_cur_chunk_idx, _cur_chunk_end);161}162assert(_cur_chunk_idx < _cur_chunk_end, "Must be");163164hr->calc_gc_efficiency();165_array->set(_cur_chunk_idx, hr);166167_cur_chunk_idx++;168169_regions_added++;170_reclaimable_bytes_added += hr->reclaimable_bytes();171}172173bool should_add(HeapRegion* hr) { return G1CollectionSetChooser::should_add(hr); }174175public:176G1BuildCandidateRegionsClosure(G1BuildCandidateArray* array) :177_array(array),178_cur_chunk_idx(0),179_cur_chunk_end(0),180_regions_added(0),181_reclaimable_bytes_added(0) { }182183bool do_heap_region(HeapRegion* r) {184// We will skip any region that's currently used as an old GC185// alloc region (we should not consider those for collection186// before we fill them up).187if (should_add(r) && !G1CollectedHeap::heap()->is_old_gc_alloc_region(r)) {188add_region(r);189} else if (r->is_old()) {190// Keep remembered sets for humongous regions, otherwise clean out remembered191// sets for old regions.192r->rem_set()->clear(true /* only_cardset */);193} else {194assert(r->is_archive() || !r->is_old() || !r->rem_set()->is_tracked(),195"Missed to clear unused remembered set of region %u (%s) that is %s",196r->hrm_index(), r->get_type_str(), r->rem_set()->get_state_str());197}198return false;199}200201uint regions_added() const { return _regions_added; }202size_t reclaimable_bytes_added() const { return _reclaimable_bytes_added; }203};204205G1CollectedHeap* _g1h;206HeapRegionClaimer _hrclaimer;207208uint volatile _num_regions_added;209size_t volatile _reclaimable_bytes_added;210211G1BuildCandidateArray _result;212213void update_totals(uint num_regions, size_t reclaimable_bytes) {214if (num_regions > 0) {215assert(reclaimable_bytes > 0, "invariant");216Atomic::add(&_num_regions_added, num_regions);217Atomic::add(&_reclaimable_bytes_added, reclaimable_bytes);218} else {219assert(reclaimable_bytes == 0, "invariant");220}221}222223public:224G1BuildCandidateRegionsTask(uint max_num_regions, uint chunk_size, uint num_workers) :225AbstractGangTask("G1 Build Candidate Regions"),226_g1h(G1CollectedHeap::heap()),227_hrclaimer(num_workers),228_num_regions_added(0),229_reclaimable_bytes_added(0),230_result(max_num_regions, chunk_size, num_workers) { }231232void work(uint worker_id) {233G1BuildCandidateRegionsClosure cl(&_result);234_g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);235update_totals(cl.regions_added(), cl.reclaimable_bytes_added());236}237238G1CollectionSetCandidates* get_sorted_candidates() {239HeapRegion** regions = NEW_C_HEAP_ARRAY(HeapRegion*, _num_regions_added, mtGC);240_result.sort_and_copy_into(regions, _num_regions_added);241return new G1CollectionSetCandidates(regions,242_num_regions_added,243_reclaimable_bytes_added);244}245};246247uint G1CollectionSetChooser::calculate_work_chunk_size(uint num_workers, uint num_regions) {248assert(num_workers > 0, "Active gc workers should be greater than 0");249return MAX2(num_regions / num_workers, 1U);250}251252bool G1CollectionSetChooser::should_add(HeapRegion* hr) {253return !hr->is_young() &&254!hr->is_pinned() &&255region_occupancy_low_enough_for_evac(hr->live_bytes()) &&256hr->rem_set()->is_complete();257}258259// Closure implementing early pruning (removal) of regions meeting the260// G1HeapWastePercent criteria. That is, either until _max_pruned regions were261// removed (for forward progress in evacuation) or the waste accumulated by the262// removed regions is above max_wasted.263class G1PruneRegionClosure : public HeapRegionClosure {264uint _num_pruned;265size_t _cur_wasted;266267uint const _max_pruned;268size_t const _max_wasted;269270public:271G1PruneRegionClosure(uint max_pruned, size_t max_wasted) :272_num_pruned(0), _cur_wasted(0), _max_pruned(max_pruned), _max_wasted(max_wasted) { }273274virtual bool do_heap_region(HeapRegion* r) {275size_t const reclaimable = r->reclaimable_bytes();276if (_num_pruned > _max_pruned ||277_cur_wasted + reclaimable > _max_wasted) {278return true;279}280r->rem_set()->clear(true /* cardset_only */);281_cur_wasted += reclaimable;282_num_pruned++;283return false;284}285286uint num_pruned() const { return _num_pruned; }287size_t wasted() const { return _cur_wasted; }288};289290void G1CollectionSetChooser::prune(G1CollectionSetCandidates* candidates) {291G1Policy* p = G1CollectedHeap::heap()->policy();292293uint min_old_cset_length = p->calc_min_old_cset_length(candidates);294uint num_candidates = candidates->num_regions();295296if (min_old_cset_length < num_candidates) {297size_t allowed_waste = p->allowed_waste_in_collection_set();298299G1PruneRegionClosure prune_cl(num_candidates - min_old_cset_length,300allowed_waste);301candidates->iterate_backwards(&prune_cl);302303log_debug(gc, ergo, cset)("Pruned %u regions out of %u, leaving " SIZE_FORMAT " bytes waste (allowed " SIZE_FORMAT ")",304prune_cl.num_pruned(),305candidates->num_regions(),306prune_cl.wasted(),307allowed_waste);308309candidates->remove_from_end(prune_cl.num_pruned(), prune_cl.wasted());310}311}312313G1CollectionSetCandidates* G1CollectionSetChooser::build(WorkGang* workers, uint max_num_regions) {314uint num_workers = workers->active_workers();315uint chunk_size = calculate_work_chunk_size(num_workers, max_num_regions);316317G1BuildCandidateRegionsTask cl(max_num_regions, chunk_size, num_workers);318workers->run_task(&cl, num_workers);319320G1CollectionSetCandidates* result = cl.get_sorted_candidates();321prune(result);322result->verify();323return result;324}325326327