Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/memory/blockOffsetTable.hpp
32285 views
/*1* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef SHARE_VM_MEMORY_BLOCKOFFSETTABLE_HPP25#define SHARE_VM_MEMORY_BLOCKOFFSETTABLE_HPP2627#include "memory/memRegion.hpp"28#include "runtime/virtualspace.hpp"29#include "utilities/globalDefinitions.hpp"3031// The CollectedHeap type requires subtypes to implement a method32// "block_start". For some subtypes, notably generational33// systems using card-table-based write barriers, the efficiency of this34// operation may be important. Implementations of the "BlockOffsetArray"35// class may be useful in providing such efficient implementations.36//37// BlockOffsetTable (abstract)38// - BlockOffsetArray (abstract)39// - BlockOffsetArrayNonContigSpace40// - BlockOffsetArrayContigSpace41//4243class ContiguousSpace;4445//////////////////////////////////////////////////////////////////////////46// The BlockOffsetTable "interface"47//////////////////////////////////////////////////////////////////////////48class BlockOffsetTable VALUE_OBJ_CLASS_SPEC {49friend class VMStructs;50protected:51// These members describe the region covered by the table.5253// The space this table is covering.54HeapWord* _bottom; // == reserved.start55HeapWord* _end; // End of currently allocated region.5657public:58// Initialize the table to cover the given space.59// The contents of the initial table are undefined.60BlockOffsetTable(HeapWord* bottom, HeapWord* end):61_bottom(bottom), _end(end) {62assert(_bottom <= _end, "arguments out of order");63}6465// Note that the committed size of the covered space may have changed,66// so the table size might also wish to change.67virtual void resize(size_t new_word_size) = 0;6869virtual void set_bottom(HeapWord* new_bottom) {70assert(new_bottom <= _end, "new_bottom > _end");71_bottom = new_bottom;72resize(pointer_delta(_end, _bottom));73}7475// Requires "addr" to be contained by a block, and returns the address of76// the start of that block.77virtual HeapWord* block_start_unsafe(const void* addr) const = 0;7879// Returns the address of the start of the block containing "addr", or80// else "null" if it is covered by no block.81HeapWord* block_start(const void* addr) const;82};8384//////////////////////////////////////////////////////////////////////////85// One implementation of "BlockOffsetTable," the BlockOffsetArray,86// divides the covered region into "N"-word subregions (where87// "N" = 2^"LogN". An array with an entry for each such subregion88// indicates how far back one must go to find the start of the89// chunk that includes the first word of the subregion.90//91// Each BlockOffsetArray is owned by a Space. However, the actual array92// may be shared by several BlockOffsetArrays; this is useful93// when a single resizable area (such as a generation) is divided up into94// several spaces in which contiguous allocation takes place. (Consider,95// for example, the garbage-first generation.)9697// Here is the shared array type.98//////////////////////////////////////////////////////////////////////////99// BlockOffsetSharedArray100//////////////////////////////////////////////////////////////////////////101class BlockOffsetSharedArray: public CHeapObj<mtGC> {102friend class BlockOffsetArray;103friend class BlockOffsetArrayNonContigSpace;104friend class BlockOffsetArrayContigSpace;105friend class VMStructs;106107private:108enum SomePrivateConstants {109LogN = 9,110LogN_words = LogN - LogHeapWordSize,111N_bytes = 1 << LogN,112N_words = 1 << LogN_words113};114115bool _init_to_zero;116117// The reserved region covered by the shared array.118MemRegion _reserved;119120// End of the current committed region.121HeapWord* _end;122123// Array for keeping offsets for retrieving object start fast given an124// address.125VirtualSpace _vs;126u_char* _offset_array; // byte array keeping backwards offsets127128protected:129// Bounds checking accessors:130// For performance these have to devolve to array accesses in product builds.131u_char offset_array(size_t index) const {132assert(index < _vs.committed_size(), "index out of range");133return _offset_array[index];134}135// An assertion-checking helper method for the set_offset_array() methods below.136void check_reducing_assertion(bool reducing);137138void set_offset_array(size_t index, u_char offset, bool reducing = false) {139check_reducing_assertion(reducing);140assert(index < _vs.committed_size(), "index out of range");141assert(!reducing || _offset_array[index] >= offset, "Not reducing");142_offset_array[index] = offset;143}144145void set_offset_array(size_t index, HeapWord* high, HeapWord* low, bool reducing = false) {146check_reducing_assertion(reducing);147assert(index < _vs.committed_size(), "index out of range");148assert(high >= low, "addresses out of order");149assert(pointer_delta(high, low) <= N_words, "offset too large");150assert(!reducing || _offset_array[index] >= (u_char)pointer_delta(high, low),151"Not reducing");152_offset_array[index] = (u_char)pointer_delta(high, low);153}154155void set_offset_array(HeapWord* left, HeapWord* right, u_char offset, bool reducing = false) {156check_reducing_assertion(reducing);157assert(index_for(right - 1) < _vs.committed_size(),158"right address out of range");159assert(left < right, "Heap addresses out of order");160size_t num_cards = pointer_delta(right, left) >> LogN_words;161162// Below, we may use an explicit loop instead of memset()163// because on certain platforms memset() can give concurrent164// readers "out-of-thin-air," phantom zeros; see 6948537.165if (UseMemSetInBOT) {166memset(&_offset_array[index_for(left)], offset, num_cards);167} else {168size_t i = index_for(left);169const size_t end = i + num_cards;170for (; i < end; i++) {171// Elided until CR 6977974 is fixed properly.172// assert(!reducing || _offset_array[i] >= offset, "Not reducing");173_offset_array[i] = offset;174}175}176}177178void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) {179check_reducing_assertion(reducing);180assert(right < _vs.committed_size(), "right address out of range");181assert(left <= right, "indexes out of order");182size_t num_cards = right - left + 1;183184// Below, we may use an explicit loop instead of memset185// because on certain platforms memset() can give concurrent186// readers "out-of-thin-air," phantom zeros; see 6948537.187if (UseMemSetInBOT) {188memset(&_offset_array[left], offset, num_cards);189} else {190size_t i = left;191const size_t end = i + num_cards;192for (; i < end; i++) {193// Elided until CR 6977974 is fixed properly.194// assert(!reducing || _offset_array[i] >= offset, "Not reducing");195_offset_array[i] = offset;196}197}198}199200void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {201assert(index < _vs.committed_size(), "index out of range");202assert(high >= low, "addresses out of order");203assert(pointer_delta(high, low) <= N_words, "offset too large");204assert(_offset_array[index] == pointer_delta(high, low),205"Wrong offset");206}207208bool is_card_boundary(HeapWord* p) const;209210// Return the number of slots needed for an offset array211// that covers mem_region_words words.212// We always add an extra slot because if an object213// ends on a card boundary we put a 0 in the next214// offset array slot, so we want that slot always215// to be reserved.216217size_t compute_size(size_t mem_region_words) {218size_t number_of_slots = (mem_region_words / N_words) + 1;219return ReservedSpace::allocation_align_size_up(number_of_slots);220}221222public:223// Initialize the table to cover from "base" to (at least)224// "base + init_word_size". In the future, the table may be expanded225// (see "resize" below) up to the size of "_reserved" (which must be at226// least "init_word_size".) The contents of the initial table are227// undefined; it is the responsibility of the constituent228// BlockOffsetTable(s) to initialize cards.229BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);230231// Notes a change in the committed size of the region covered by the232// table. The "new_word_size" may not be larger than the size of the233// reserved region this table covers.234void resize(size_t new_word_size);235236void set_bottom(HeapWord* new_bottom);237238// Whether entries should be initialized to zero. Used currently only for239// error checking.240void set_init_to_zero(bool val) { _init_to_zero = val; }241bool init_to_zero() { return _init_to_zero; }242243// Updates all the BlockOffsetArray's sharing this shared array to244// reflect the current "top"'s of their spaces.245void update_offset_arrays(); // Not yet implemented!246247// Return the appropriate index into "_offset_array" for "p".248size_t index_for(const void* p) const;249250// Return the address indicating the start of the region corresponding to251// "index" in "_offset_array".252HeapWord* address_for_index(size_t index) const;253254// Return the address "p" incremented by the size of255// a region. This method does not align the address256// returned to the start of a region. It is a simple257// primitive.258HeapWord* inc_by_region_size(HeapWord* p) const { return p + N_words; }259};260261//////////////////////////////////////////////////////////////////////////262// The BlockOffsetArray whose subtypes use the BlockOffsetSharedArray.263//////////////////////////////////////////////////////////////////////////264class BlockOffsetArray: public BlockOffsetTable {265friend class VMStructs;266friend class G1BlockOffsetArray; // temp. until we restructure and cleanup267protected:268// The following enums are used by do_block_internal() below269enum Action {270Action_single, // BOT records a single block (see single_block())271Action_mark, // BOT marks the start of a block (see mark_block())272Action_check // Check that BOT records block correctly273// (see verify_single_block()).274};275276enum SomePrivateConstants {277N_words = BlockOffsetSharedArray::N_words,278LogN = BlockOffsetSharedArray::LogN,279// entries "e" of at least N_words mean "go back by Base^(e-N_words)."280// All entries are less than "N_words + N_powers".281LogBase = 4,282Base = (1 << LogBase),283N_powers = 14284};285286static size_t power_to_cards_back(uint i) {287return (size_t)1 << (LogBase * i);288}289static size_t power_to_words_back(uint i) {290return power_to_cards_back(i) * N_words;291}292static size_t entry_to_cards_back(u_char entry) {293assert(entry >= N_words, "Precondition");294return power_to_cards_back(entry - N_words);295}296static size_t entry_to_words_back(u_char entry) {297assert(entry >= N_words, "Precondition");298return power_to_words_back(entry - N_words);299}300301// The shared array, which is shared with other BlockOffsetArray's302// corresponding to different spaces within a generation or span of303// memory.304BlockOffsetSharedArray* _array;305306// The space that owns this subregion.307Space* _sp;308309// If true, array entries are initialized to 0; otherwise, they are310// initialized to point backwards to the beginning of the covered region.311bool _init_to_zero;312313// An assertion-checking helper method for the set_remainder*() methods below.314void check_reducing_assertion(bool reducing) { _array->check_reducing_assertion(reducing); }315316// Sets the entries317// corresponding to the cards starting at "start" and ending at "end"318// to point back to the card before "start": the interval [start, end)319// is right-open. The last parameter, reducing, indicates whether the320// updates to individual entries always reduce the entry from a higher321// to a lower value. (For example this would hold true during a temporal322// regime during which only block splits were updating the BOT.323void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing = false);324// Same as above, except that the args here are a card _index_ interval325// that is closed: [start_index, end_index]326void set_remainder_to_point_to_start_incl(size_t start, size_t end, bool reducing = false);327328// A helper function for BOT adjustment/verification work329void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action, bool reducing = false);330331public:332// The space may not have its bottom and top set yet, which is why the333// region is passed as a parameter. If "init_to_zero" is true, the334// elements of the array are initialized to zero. Otherwise, they are335// initialized to point backwards to the beginning.336BlockOffsetArray(BlockOffsetSharedArray* array, MemRegion mr,337bool init_to_zero_);338339// Note: this ought to be part of the constructor, but that would require340// "this" to be passed as a parameter to a member constructor for341// the containing concrete subtype of Space.342// This would be legal C++, but MS VC++ doesn't allow it.343void set_space(Space* sp) { _sp = sp; }344345// Resets the covered region to the given "mr".346void set_region(MemRegion mr) {347_bottom = mr.start();348_end = mr.end();349}350351// Note that the committed size of the covered space may have changed,352// so the table size might also wish to change.353virtual void resize(size_t new_word_size) {354HeapWord* new_end = _bottom + new_word_size;355if (_end < new_end && !init_to_zero()) {356// verify that the old and new boundaries are also card boundaries357assert(_array->is_card_boundary(_end),358"_end not a card boundary");359assert(_array->is_card_boundary(new_end),360"new _end would not be a card boundary");361// set all the newly added cards362_array->set_offset_array(_end, new_end, N_words);363}364_end = new_end; // update _end365}366367// Adjust the BOT to show that it has a single block in the368// range [blk_start, blk_start + size). All necessary BOT369// cards are adjusted, but _unallocated_block isn't.370void single_block(HeapWord* blk_start, HeapWord* blk_end);371void single_block(HeapWord* blk, size_t size) {372single_block(blk, blk + size);373}374375// When the alloc_block() call returns, the block offset table should376// have enough information such that any subsequent block_start() call377// with an argument equal to an address that is within the range378// [blk_start, blk_end) would return the value blk_start, provided379// there have been no calls in between that reset this information380// (e.g. see BlockOffsetArrayNonContigSpace::single_block() call381// for an appropriate range covering the said interval).382// These methods expect to be called with [blk_start, blk_end)383// representing a block of memory in the heap.384virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end);385void alloc_block(HeapWord* blk, size_t size) {386alloc_block(blk, blk + size);387}388389// If true, initialize array slots with no allocated blocks to zero.390// Otherwise, make them point back to the front.391bool init_to_zero() { return _init_to_zero; }392// Corresponding setter393void set_init_to_zero(bool val) {394_init_to_zero = val;395assert(_array != NULL, "_array should be non-NULL");396_array->set_init_to_zero(val);397}398399// Debugging400// Return the index of the last entry in the "active" region.401virtual size_t last_active_index() const = 0;402// Verify the block offset table403void verify() const;404void check_all_cards(size_t left_card, size_t right_card) const;405};406407////////////////////////////////////////////////////////////////////////////408// A subtype of BlockOffsetArray that takes advantage of the fact409// that its underlying space is a NonContiguousSpace, so that some410// specialized interfaces can be made available for spaces that411// manipulate the table.412////////////////////////////////////////////////////////////////////////////413class BlockOffsetArrayNonContigSpace: public BlockOffsetArray {414friend class VMStructs;415private:416// The portion [_unallocated_block, _sp.end()) of the space that417// is a single block known not to contain any objects.418// NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.419HeapWord* _unallocated_block;420421public:422BlockOffsetArrayNonContigSpace(BlockOffsetSharedArray* array, MemRegion mr):423BlockOffsetArray(array, mr, false),424_unallocated_block(_bottom) { }425426// accessor427HeapWord* unallocated_block() const {428assert(BlockOffsetArrayUseUnallocatedBlock,429"_unallocated_block is not being maintained");430return _unallocated_block;431}432433void set_unallocated_block(HeapWord* block) {434assert(BlockOffsetArrayUseUnallocatedBlock,435"_unallocated_block is not being maintained");436assert(block >= _bottom && block <= _end, "out of range");437_unallocated_block = block;438}439440// These methods expect to be called with [blk_start, blk_end)441// representing a block of memory in the heap.442void alloc_block(HeapWord* blk_start, HeapWord* blk_end);443void alloc_block(HeapWord* blk, size_t size) {444alloc_block(blk, blk + size);445}446447// The following methods are useful and optimized for a448// non-contiguous space.449450// Given a block [blk_start, blk_start + full_blk_size), and451// a left_blk_size < full_blk_size, adjust the BOT to show two452// blocks [blk_start, blk_start + left_blk_size) and453// [blk_start + left_blk_size, blk_start + full_blk_size).454// It is assumed (and verified in the non-product VM) that the455// BOT was correct for the original block.456void split_block(HeapWord* blk_start, size_t full_blk_size,457size_t left_blk_size);458459// Adjust BOT to show that it has a block in the range460// [blk_start, blk_start + size). Only the first card461// of BOT is touched. It is assumed (and verified in the462// non-product VM) that the remaining cards of the block463// are correct.464void mark_block(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false);465void mark_block(HeapWord* blk, size_t size, bool reducing = false) {466mark_block(blk, blk + size, reducing);467}468469// Adjust _unallocated_block to indicate that a particular470// block has been newly allocated or freed. It is assumed (and471// verified in the non-product VM) that the BOT is correct for472// the given block.473void allocated(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false) {474// Verify that the BOT shows [blk, blk + blk_size) to be one block.475verify_single_block(blk_start, blk_end);476if (BlockOffsetArrayUseUnallocatedBlock) {477_unallocated_block = MAX2(_unallocated_block, blk_end);478}479}480481void allocated(HeapWord* blk, size_t size, bool reducing = false) {482allocated(blk, blk + size, reducing);483}484485void freed(HeapWord* blk_start, HeapWord* blk_end);486void freed(HeapWord* blk, size_t size);487488HeapWord* block_start_unsafe(const void* addr) const;489490// Requires "addr" to be the start of a card and returns the491// start of the block that contains the given address.492HeapWord* block_start_careful(const void* addr) const;493494// Verification & debugging: ensure that the offset table reflects495// the fact that the block [blk_start, blk_end) or [blk, blk + size)496// is a single block of storage. NOTE: can't const this because of497// call to non-const do_block_internal() below.498void verify_single_block(HeapWord* blk_start, HeapWord* blk_end)499PRODUCT_RETURN;500void verify_single_block(HeapWord* blk, size_t size) PRODUCT_RETURN;501502// Verify that the given block is before _unallocated_block503void verify_not_unallocated(HeapWord* blk_start, HeapWord* blk_end)504const PRODUCT_RETURN;505void verify_not_unallocated(HeapWord* blk, size_t size)506const PRODUCT_RETURN;507508// Debugging support509virtual size_t last_active_index() const;510};511512////////////////////////////////////////////////////////////////////////////513// A subtype of BlockOffsetArray that takes advantage of the fact514// that its underlying space is a ContiguousSpace, so that its "active"515// region can be more efficiently tracked (than for a non-contiguous space).516////////////////////////////////////////////////////////////////////////////517class BlockOffsetArrayContigSpace: public BlockOffsetArray {518friend class VMStructs;519private:520// allocation boundary at which offset array must be updated521HeapWord* _next_offset_threshold;522size_t _next_offset_index; // index corresponding to that boundary523524// Work function when allocation start crosses threshold.525void alloc_block_work(HeapWord* blk_start, HeapWord* blk_end);526527public:528BlockOffsetArrayContigSpace(BlockOffsetSharedArray* array, MemRegion mr):529BlockOffsetArray(array, mr, true) {530_next_offset_threshold = NULL;531_next_offset_index = 0;532}533534void set_contig_space(ContiguousSpace* sp) { set_space((Space*)sp); }535536// Initialize the threshold for an empty heap.537HeapWord* initialize_threshold();538// Zero out the entry for _bottom (offset will be zero)539void zero_bottom_entry();540541// Return the next threshold, the point at which the table should be542// updated.543HeapWord* threshold() const { return _next_offset_threshold; }544545// In general, these methods expect to be called with546// [blk_start, blk_end) representing a block of memory in the heap.547// In this implementation, however, we are OK even if blk_start and/or548// blk_end are NULL because NULL is represented as 0, and thus549// never exceeds the "_next_offset_threshold".550void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {551if (blk_end > _next_offset_threshold) {552alloc_block_work(blk_start, blk_end);553}554}555void alloc_block(HeapWord* blk, size_t size) {556alloc_block(blk, blk + size);557}558559HeapWord* block_start_unsafe(const void* addr) const;560561// Debugging support562virtual size_t last_active_index() const;563};564565#endif // SHARE_VM_MEMORY_BLOCKOFFSETTABLE_HPP566567568