Path: blob/master/src/hotspot/share/gc/shared/blockOffsetTable.hpp
40957 views
/*1* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef SHARE_GC_SHARED_BLOCKOFFSETTABLE_HPP25#define SHARE_GC_SHARED_BLOCKOFFSETTABLE_HPP2627#include "gc/shared/gc_globals.hpp"28#include "gc/shared/memset_with_concurrent_readers.hpp"29#include "memory/allocation.hpp"30#include "memory/memRegion.hpp"31#include "memory/virtualspace.hpp"32#include "runtime/globals.hpp"33#include "utilities/globalDefinitions.hpp"34#include "utilities/macros.hpp"3536// The CollectedHeap type requires subtypes to implement a method37// "block_start". For some subtypes, notably generational38// systems using card-table-based write barriers, the efficiency of this39// operation may be important. Implementations of the "BlockOffsetArray"40// class may be useful in providing such efficient implementations.41//42// BlockOffsetTable (abstract)43// - BlockOffsetArray (abstract)44// - BlockOffsetArrayContigSpace45//4647class ContiguousSpace;4849class BOTConstants : public AllStatic {50public:51static const uint LogN = 9;52static const uint LogN_words = LogN - LogHeapWordSize;53static const uint N_bytes = 1 << LogN;54static const uint N_words = 1 << LogN_words;55// entries "e" of at least N_words mean "go back by Base^(e-N_words)."56// All entries are less than "N_words + N_powers".57static const uint LogBase = 4;58static const uint Base = (1 << LogBase);59static const uint N_powers = 14;6061static size_t power_to_cards_back(uint i) {62return (size_t)1 << (LogBase * i);63}64static size_t power_to_words_back(uint i) {65return power_to_cards_back(i) * N_words;66}67static size_t entry_to_cards_back(u_char entry) {68assert(entry >= N_words, "Precondition");69return power_to_cards_back(entry - N_words);70}71static size_t entry_to_words_back(u_char entry) {72assert(entry >= N_words, "Precondition");73return power_to_words_back(entry - N_words);74}75};7677//////////////////////////////////////////////////////////////////////////78// The BlockOffsetTable "interface"79//////////////////////////////////////////////////////////////////////////80class BlockOffsetTable {81friend class VMStructs;82protected:83// These members describe the region covered by the table.8485// The space this table is covering.86HeapWord* _bottom; // == reserved.start87HeapWord* _end; // End of currently allocated region.8889public:90// Initialize the table to cover the given space.91// The contents of the initial table are undefined.92BlockOffsetTable(HeapWord* bottom, HeapWord* end):93_bottom(bottom), _end(end) {94assert(_bottom <= _end, "arguments out of order");95}9697// Note that the committed size of the covered space may have changed,98// so the table size might also wish to change.99virtual void resize(size_t new_word_size) = 0;100101virtual void set_bottom(HeapWord* new_bottom) {102assert(new_bottom <= _end, "new_bottom > _end");103_bottom = new_bottom;104resize(pointer_delta(_end, _bottom));105}106107// Requires "addr" to be contained by a block, and returns the address of108// the start of that block.109virtual HeapWord* block_start_unsafe(const void* addr) const = 0;110111// Returns the address of the start of the block containing "addr", or112// else "null" if it is covered by no block.113HeapWord* block_start(const void* addr) const;114};115116//////////////////////////////////////////////////////////////////////////117// One implementation of "BlockOffsetTable," the BlockOffsetArray,118// divides the covered region into "N"-word subregions (where119// "N" = 2^"LogN". An array with an entry for each such subregion120// indicates how far back one must go to find the start of the121// chunk that includes the first word of the subregion.122//123// Each BlockOffsetArray is owned by a Space. However, the actual array124// may be shared by several BlockOffsetArrays; this is useful125// when a single resizable area (such as a generation) is divided up into126// several spaces in which contiguous allocation takes place. (Consider,127// for example, the garbage-first generation.)128129// Here is the shared array type.130//////////////////////////////////////////////////////////////////////////131// BlockOffsetSharedArray132//////////////////////////////////////////////////////////////////////////133class BlockOffsetSharedArray: public CHeapObj<mtGC> {134friend class BlockOffsetArray;135friend class BlockOffsetArrayNonContigSpace;136friend class BlockOffsetArrayContigSpace;137friend class VMStructs;138139private:140bool _init_to_zero;141142// The reserved region covered by the shared array.143MemRegion _reserved;144145// End of the current committed region.146HeapWord* _end;147148// Array for keeping offsets for retrieving object start fast given an149// address.150VirtualSpace _vs;151u_char* _offset_array; // byte array keeping backwards offsets152153void fill_range(size_t start, size_t num_cards, u_char offset) {154void* start_ptr = &_offset_array[start];155// If collector is concurrent, special handling may be needed.156G1GC_ONLY(assert(!UseG1GC, "Shouldn't be here when using G1");)157memset(start_ptr, offset, num_cards);158}159160protected:161// Bounds checking accessors:162// For performance these have to devolve to array accesses in product builds.163u_char offset_array(size_t index) const {164assert(index < _vs.committed_size(), "index out of range");165return _offset_array[index];166}167// An assertion-checking helper method for the set_offset_array() methods below.168void check_reducing_assertion(bool reducing);169170void set_offset_array(size_t index, u_char offset, bool reducing = false) {171check_reducing_assertion(reducing);172assert(index < _vs.committed_size(), "index out of range");173assert(!reducing || _offset_array[index] >= offset, "Not reducing");174_offset_array[index] = offset;175}176177void set_offset_array(size_t index, HeapWord* high, HeapWord* low, bool reducing = false) {178check_reducing_assertion(reducing);179assert(index < _vs.committed_size(), "index out of range");180assert(high >= low, "addresses out of order");181assert(pointer_delta(high, low) <= BOTConstants::N_words, "offset too large");182assert(!reducing || _offset_array[index] >= (u_char)pointer_delta(high, low),183"Not reducing");184_offset_array[index] = (u_char)pointer_delta(high, low);185}186187void set_offset_array(HeapWord* left, HeapWord* right, u_char offset, bool reducing = false) {188check_reducing_assertion(reducing);189assert(index_for(right - 1) < _vs.committed_size(),190"right address out of range");191assert(left < right, "Heap addresses out of order");192size_t num_cards = pointer_delta(right, left) >> BOTConstants::LogN_words;193194fill_range(index_for(left), num_cards, offset);195}196197void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) {198check_reducing_assertion(reducing);199assert(right < _vs.committed_size(), "right address out of range");200assert(left <= right, "indexes out of order");201size_t num_cards = right - left + 1;202203fill_range(left, num_cards, offset);204}205206void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {207assert(index < _vs.committed_size(), "index out of range");208assert(high >= low, "addresses out of order");209assert(pointer_delta(high, low) <= BOTConstants::N_words, "offset too large");210assert(_offset_array[index] == pointer_delta(high, low),211"Wrong offset");212}213214bool is_card_boundary(HeapWord* p) const;215216// Return the number of slots needed for an offset array217// that covers mem_region_words words.218// We always add an extra slot because if an object219// ends on a card boundary we put a 0 in the next220// offset array slot, so we want that slot always221// to be reserved.222223size_t compute_size(size_t mem_region_words) {224size_t number_of_slots = (mem_region_words / BOTConstants::N_words) + 1;225return ReservedSpace::allocation_align_size_up(number_of_slots);226}227228public:229// Initialize the table to cover from "base" to (at least)230// "base + init_word_size". In the future, the table may be expanded231// (see "resize" below) up to the size of "_reserved" (which must be at232// least "init_word_size".) The contents of the initial table are233// undefined; it is the responsibility of the constituent234// BlockOffsetTable(s) to initialize cards.235BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);236237// Notes a change in the committed size of the region covered by the238// table. The "new_word_size" may not be larger than the size of the239// reserved region this table covers.240void resize(size_t new_word_size);241242void set_bottom(HeapWord* new_bottom);243244// Whether entries should be initialized to zero. Used currently only for245// error checking.246void set_init_to_zero(bool val) { _init_to_zero = val; }247bool init_to_zero() { return _init_to_zero; }248249// Updates all the BlockOffsetArray's sharing this shared array to250// reflect the current "top"'s of their spaces.251void update_offset_arrays(); // Not yet implemented!252253// Return the appropriate index into "_offset_array" for "p".254size_t index_for(const void* p) const;255256// Return the address indicating the start of the region corresponding to257// "index" in "_offset_array".258HeapWord* address_for_index(size_t index) const;259};260261class Space;262263//////////////////////////////////////////////////////////////////////////264// The BlockOffsetArray whose subtypes use the BlockOffsetSharedArray.265//////////////////////////////////////////////////////////////////////////266class BlockOffsetArray: public BlockOffsetTable {267friend class VMStructs;268protected:269// The following enums are used by do_block_internal() below270enum Action {271Action_single, // BOT records a single block (see single_block())272Action_mark, // BOT marks the start of a block (see mark_block())273Action_check // Check that BOT records block correctly274// (see verify_single_block()).275};276277// The shared array, which is shared with other BlockOffsetArray's278// corresponding to different spaces within a generation or span of279// memory.280BlockOffsetSharedArray* _array;281282// The space that owns this subregion.283Space* _sp;284285// If true, array entries are initialized to 0; otherwise, they are286// initialized to point backwards to the beginning of the covered region.287bool _init_to_zero;288289// An assertion-checking helper method for the set_remainder*() methods below.290void check_reducing_assertion(bool reducing) { _array->check_reducing_assertion(reducing); }291292// Sets the entries293// corresponding to the cards starting at "start" and ending at "end"294// to point back to the card before "start": the interval [start, end)295// is right-open. The last parameter, reducing, indicates whether the296// updates to individual entries always reduce the entry from a higher297// to a lower value. (For example this would hold true during a temporal298// regime during which only block splits were updating the BOT.299void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing = false);300// Same as above, except that the args here are a card _index_ interval301// that is closed: [start_index, end_index]302void set_remainder_to_point_to_start_incl(size_t start, size_t end, bool reducing = false);303304// A helper function for BOT adjustment/verification work305void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action, bool reducing = false);306307public:308// The space may not have its bottom and top set yet, which is why the309// region is passed as a parameter. If "init_to_zero" is true, the310// elements of the array are initialized to zero. Otherwise, they are311// initialized to point backwards to the beginning.312BlockOffsetArray(BlockOffsetSharedArray* array, MemRegion mr,313bool init_to_zero_);314315// Note: this ought to be part of the constructor, but that would require316// "this" to be passed as a parameter to a member constructor for317// the containing concrete subtype of Space.318// This would be legal C++, but MS VC++ doesn't allow it.319void set_space(Space* sp) { _sp = sp; }320321// Resets the covered region to the given "mr".322void set_region(MemRegion mr) {323_bottom = mr.start();324_end = mr.end();325}326327// Note that the committed size of the covered space may have changed,328// so the table size might also wish to change.329virtual void resize(size_t new_word_size) {330HeapWord* new_end = _bottom + new_word_size;331if (_end < new_end && !init_to_zero()) {332// verify that the old and new boundaries are also card boundaries333assert(_array->is_card_boundary(_end),334"_end not a card boundary");335assert(_array->is_card_boundary(new_end),336"new _end would not be a card boundary");337// set all the newly added cards338_array->set_offset_array(_end, new_end, BOTConstants::N_words);339}340_end = new_end; // update _end341}342343// Adjust the BOT to show that it has a single block in the344// range [blk_start, blk_start + size). All necessary BOT345// cards are adjusted, but _unallocated_block isn't.346void single_block(HeapWord* blk_start, HeapWord* blk_end);347void single_block(HeapWord* blk, size_t size) {348single_block(blk, blk + size);349}350351// When the alloc_block() call returns, the block offset table should352// have enough information such that any subsequent block_start() call353// with an argument equal to an address that is within the range354// [blk_start, blk_end) would return the value blk_start, provided355// there have been no calls in between that reset this information356// (e.g. see BlockOffsetArrayNonContigSpace::single_block() call357// for an appropriate range covering the said interval).358// These methods expect to be called with [blk_start, blk_end)359// representing a block of memory in the heap.360virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end);361void alloc_block(HeapWord* blk, size_t size) {362alloc_block(blk, blk + size);363}364365// If true, initialize array slots with no allocated blocks to zero.366// Otherwise, make them point back to the front.367bool init_to_zero() { return _init_to_zero; }368// Corresponding setter369void set_init_to_zero(bool val) {370_init_to_zero = val;371assert(_array != NULL, "_array should be non-NULL");372_array->set_init_to_zero(val);373}374375// Debugging376// Return the index of the last entry in the "active" region.377virtual size_t last_active_index() const = 0;378// Verify the block offset table379void verify() const;380void check_all_cards(size_t left_card, size_t right_card) const;381};382383////////////////////////////////////////////////////////////////////////////384// A subtype of BlockOffsetArray that takes advantage of the fact385// that its underlying space is a ContiguousSpace, so that its "active"386// region can be more efficiently tracked (than for a non-contiguous space).387////////////////////////////////////////////////////////////////////////////388class BlockOffsetArrayContigSpace: public BlockOffsetArray {389friend class VMStructs;390private:391// allocation boundary at which offset array must be updated392HeapWord* _next_offset_threshold;393size_t _next_offset_index; // index corresponding to that boundary394395// Work function when allocation start crosses threshold.396void alloc_block_work(HeapWord* blk_start, HeapWord* blk_end);397398public:399BlockOffsetArrayContigSpace(BlockOffsetSharedArray* array, MemRegion mr):400BlockOffsetArray(array, mr, true) {401_next_offset_threshold = NULL;402_next_offset_index = 0;403}404405void set_contig_space(ContiguousSpace* sp) { set_space((Space*)sp); }406407// Initialize the threshold for an empty heap.408HeapWord* initialize_threshold();409// Zero out the entry for _bottom (offset will be zero)410void zero_bottom_entry();411412// Return the next threshold, the point at which the table should be413// updated.414HeapWord* threshold() const { return _next_offset_threshold; }415416// In general, these methods expect to be called with417// [blk_start, blk_end) representing a block of memory in the heap.418// In this implementation, however, we are OK even if blk_start and/or419// blk_end are NULL because NULL is represented as 0, and thus420// never exceeds the "_next_offset_threshold".421void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {422if (blk_end > _next_offset_threshold) {423alloc_block_work(blk_start, blk_end);424}425}426void alloc_block(HeapWord* blk, size_t size) {427alloc_block(blk, blk + size);428}429430HeapWord* block_start_unsafe(const void* addr) const;431432// Debugging support433virtual size_t last_active_index() const;434};435436#endif // SHARE_GC_SHARED_BLOCKOFFSETTABLE_HPP437438439