Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp
38920 views
/*1* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP25#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP2627#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"28#include "memory/memRegion.hpp"29#include "runtime/virtualspace.hpp"30#include "utilities/globalDefinitions.hpp"3132// The CollectedHeap type requires subtypes to implement a method33// "block_start". For some subtypes, notably generational34// systems using card-table-based write barriers, the efficiency of this35// operation may be important. Implementations of the "BlockOffsetArray"36// class may be useful in providing such efficient implementations.37//38// While generally mirroring the structure of the BOT for GenCollectedHeap,39// the following types are tailored more towards G1's uses; these should,40// however, be merged back into a common BOT to avoid code duplication41// and reduce maintenance overhead.42//43// G1BlockOffsetTable (abstract)44// -- G1BlockOffsetArray (uses G1BlockOffsetSharedArray)45// -- G1BlockOffsetArrayContigSpace46//47// A main impediment to the consolidation of this code might be the48// effect of making some of the block_start*() calls non-const as49// below. Whether that might adversely affect performance optimizations50// that compilers might normally perform in the case of non-G151// collectors needs to be carefully investigated prior to any such52// consolidation.5354// Forward declarations55class G1BlockOffsetSharedArray;56class G1OffsetTableContigSpace;5758class G1BlockOffsetTable VALUE_OBJ_CLASS_SPEC {59friend class VMStructs;60protected:61// These members describe the region covered by the table.6263// The space this table is covering.64HeapWord* _bottom; // == reserved.start65HeapWord* _end; // End of currently allocated region.6667public:68// Initialize the table to cover the given space.69// The contents of the initial table are undefined.70G1BlockOffsetTable(HeapWord* bottom, HeapWord* end) :71_bottom(bottom), _end(end)72{73assert(_bottom <= _end, "arguments out of order");74}7576// Note that the committed size of the covered space may have changed,77// so the table size might also wish to change.78virtual void resize(size_t new_word_size) = 0;7980virtual void set_bottom(HeapWord* new_bottom) {81assert(new_bottom <= _end,82err_msg("new_bottom (" PTR_FORMAT ") > _end (" PTR_FORMAT ")",83p2i(new_bottom), p2i(_end)));84_bottom = new_bottom;85resize(pointer_delta(_end, _bottom));86}8788// Requires "addr" to be contained by a block, and returns the address of89// the start of that block. (May have side effects, namely updating of90// shared array entries that "point" too far backwards. This can occur,91// for example, when LAB allocation is used in a space covered by the92// table.)93virtual HeapWord* block_start_unsafe(const void* addr) = 0;94// Same as above, but does not have any of the possible side effects95// discussed above.96virtual HeapWord* block_start_unsafe_const(const void* addr) const = 0;9798// Returns the address of the start of the block containing "addr", or99// else "null" if it is covered by no block. (May have side effects,100// namely updating of shared array entries that "point" too far101// backwards. This can occur, for example, when lab allocation is used102// in a space covered by the table.)103inline HeapWord* block_start(const void* addr);104// Same as above, but does not have any of the possible side effects105// discussed above.106inline HeapWord* block_start_const(const void* addr) const;107};108109class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener {110public:111virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled) {112// Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot113// retrieve it here since this would cause firing of several asserts. The code114// executed after commit of a region already needs to do some re-initialization of115// the HeapRegion, so we combine that.116}117};118119// This implementation of "G1BlockOffsetTable" divides the covered region120// into "N"-word subregions (where "N" = 2^"LogN". An array with an entry121// for each such subregion indicates how far back one must go to find the122// start of the chunk that includes the first word of the subregion.123//124// Each BlockOffsetArray is owned by a Space. However, the actual array125// may be shared by several BlockOffsetArrays; this is useful126// when a single resizable area (such as a generation) is divided up into127// several spaces in which contiguous allocation takes place,128// such as, for example, in G1 or in the train generation.)129130// Here is the shared array type.131132class G1BlockOffsetSharedArray: public CHeapObj<mtGC> {133friend class G1BlockOffsetArray;134friend class G1BlockOffsetArrayContigSpace;135friend class VMStructs;136137private:138G1BlockOffsetSharedArrayMappingChangedListener _listener;139// The reserved region covered by the shared array.140MemRegion _reserved;141142// End of the current committed region.143HeapWord* _end;144145// Array for keeping offsets for retrieving object start fast given an146// address.147volatile u_char* _offset_array; // byte array keeping backwards offsets148149void check_offset(size_t offset, const char* msg) const {150assert(offset <= N_words,151err_msg("%s - "152"offset: " SIZE_FORMAT ", N_words: " UINT32_FORMAT,153msg, offset, N_words));154}155156// Bounds checking accessors:157// For performance these have to devolve to array accesses in product builds.158inline u_char offset_array(size_t index) const;159160inline void set_offset_array_raw(size_t index, u_char offset);161162inline void set_offset_array(size_t index, u_char offset);163164inline void set_offset_array(size_t index, HeapWord* high, HeapWord* low);165166inline void set_offset_array(size_t left, size_t right, u_char offset);167168bool is_card_boundary(HeapWord* p) const;169170public:171172// Return the number of slots needed for an offset array173// that covers mem_region_words words.174static size_t compute_size(size_t mem_region_words) {175size_t number_of_slots = (mem_region_words / N_words);176return ReservedSpace::allocation_align_size_up(number_of_slots);177}178179enum SomePublicConstants {180LogN = 9,181LogN_words = LogN - LogHeapWordSize,182N_bytes = 1 << LogN,183N_words = 1 << LogN_words184};185186// Initialize the table to cover from "base" to (at least)187// "base + init_word_size". In the future, the table may be expanded188// (see "resize" below) up to the size of "_reserved" (which must be at189// least "init_word_size".) The contents of the initial table are190// undefined; it is the responsibility of the constituent191// G1BlockOffsetTable(s) to initialize cards.192G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage);193194// Return the appropriate index into "_offset_array" for "p".195inline size_t index_for(const void* p) const;196inline size_t index_for_raw(const void* p) const;197198// Return the address indicating the start of the region corresponding to199// "index" in "_offset_array".200inline HeapWord* address_for_index(size_t index) const;201// Variant of address_for_index that does not check the index for validity.202inline HeapWord* address_for_index_raw(size_t index) const {203return _reserved.start() + (index << LogN_words);204}205};206207// And here is the G1BlockOffsetTable subtype that uses the array.208209class G1BlockOffsetArray: public G1BlockOffsetTable {210friend class G1BlockOffsetSharedArray;211friend class G1BlockOffsetArrayContigSpace;212friend class VMStructs;213private:214enum SomePrivateConstants {215N_words = G1BlockOffsetSharedArray::N_words,216LogN = G1BlockOffsetSharedArray::LogN217};218219// This is the array, which can be shared by several BlockOffsetArray's220// servicing different221G1BlockOffsetSharedArray* _array;222223// The space that owns this subregion.224G1OffsetTableContigSpace* _gsp;225226// The portion [_unallocated_block, _sp.end()) of the space that227// is a single block known not to contain any objects.228// NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.229HeapWord* _unallocated_block;230231// Sets the entries232// corresponding to the cards starting at "start" and ending at "end"233// to point back to the card before "start": the interval [start, end)234// is right-open.235void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end);236// Same as above, except that the args here are a card _index_ interval237// that is closed: [start_index, end_index]238void set_remainder_to_point_to_start_incl(size_t start, size_t end);239240protected:241242G1OffsetTableContigSpace* gsp() const { return _gsp; }243244inline size_t block_size(const HeapWord* p) const;245246// Returns the address of a block whose start is at most "addr".247// If "has_max_index" is true, "assumes "max_index" is the last valid one248// in the array.249inline HeapWord* block_at_or_preceding(const void* addr,250bool has_max_index,251size_t max_index) const;252253// "q" is a block boundary that is <= "addr"; "n" is the address of the254// next block (or the end of the space.) Return the address of the255// beginning of the block that contains "addr". Does so without side256// effects (see, e.g., spec of block_start.)257inline HeapWord*258forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,259const void* addr) const;260261// "q" is a block boundary that is <= "addr"; return the address of the262// beginning of the block that contains "addr". May have side effects263// on "this", by updating imprecise entries.264inline HeapWord* forward_to_block_containing_addr(HeapWord* q,265const void* addr);266267// "q" is a block boundary that is <= "addr"; "n" is the address of the268// next block (or the end of the space.) Return the address of the269// beginning of the block that contains "addr". May have side effects270// on "this", by updating imprecise entries.271HeapWord* forward_to_block_containing_addr_slow(HeapWord* q,272HeapWord* n,273const void* addr);274275// Requires that "*threshold_" be the first array entry boundary at or276// above "blk_start", and that "*index_" be the corresponding array277// index. If the block starts at or crosses "*threshold_", records278// "blk_start" as the appropriate block start for the array index279// starting at "*threshold_", and for any other indices crossed by the280// block. Updates "*threshold_" and "*index_" to correspond to the first281// index after the block end.282void alloc_block_work2(HeapWord** threshold_, size_t* index_,283HeapWord* blk_start, HeapWord* blk_end);284285public:286// The space may not have it's bottom and top set yet, which is why the287// region is passed as a parameter. The elements of the array are288// initialized to zero.289G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr);290291// Note: this ought to be part of the constructor, but that would require292// "this" to be passed as a parameter to a member constructor for293// the containing concrete subtype of Space.294// This would be legal C++, but MS VC++ doesn't allow it.295void set_space(G1OffsetTableContigSpace* sp);296297// Resets the covered region to one with the same _bottom as before but298// the "new_word_size".299void resize(size_t new_word_size);300301virtual HeapWord* block_start_unsafe(const void* addr);302virtual HeapWord* block_start_unsafe_const(const void* addr) const;303304// Used by region verification. Checks that the contents of the305// BOT reflect that there's a single object that spans the address306// range [obj_start, obj_start + word_size); returns true if this is307// the case, returns false if it's not.308bool verify_for_object(HeapWord* obj_start, size_t word_size) const;309310void check_all_cards(size_t left_card, size_t right_card) const;311312virtual void print_on(outputStream* out) PRODUCT_RETURN;313};314315// A subtype of BlockOffsetArray that takes advantage of the fact316// that its underlying space is a ContiguousSpace, so that its "active"317// region can be more efficiently tracked (than for a non-contiguous space).318class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {319friend class VMStructs;320321// allocation boundary at which offset array must be updated322HeapWord* _next_offset_threshold;323size_t _next_offset_index; // index corresponding to that boundary324325// Work function to be called when allocation start crosses the next326// threshold in the contig space.327void alloc_block_work1(HeapWord* blk_start, HeapWord* blk_end) {328alloc_block_work2(&_next_offset_threshold, &_next_offset_index,329blk_start, blk_end);330}331332// Zero out the entry for _bottom (offset will be zero). Does not check for availability of the333// memory first.334void zero_bottom_entry_raw();335// Variant of initialize_threshold that does not check for availability of the336// memory first.337HeapWord* initialize_threshold_raw();338public:339G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);340341// Initialize the threshold to reflect the first boundary after the342// bottom of the covered region.343HeapWord* initialize_threshold();344345void reset_bot() {346zero_bottom_entry_raw();347initialize_threshold_raw();348}349350// Return the next threshold, the point at which the table should be351// updated.352HeapWord* threshold() const { return _next_offset_threshold; }353354// These must be guaranteed to work properly (i.e., do nothing)355// when "blk_start" ("blk" for second version) is "NULL". In this356// implementation, that's true because NULL is represented as 0, and thus357// never exceeds the "_next_offset_threshold".358void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {359if (blk_end > _next_offset_threshold)360alloc_block_work1(blk_start, blk_end);361}362void alloc_block(HeapWord* blk, size_t size) {363alloc_block(blk, blk+size);364}365366HeapWord* block_start_unsafe(const void* addr);367HeapWord* block_start_unsafe_const(const void* addr) const;368369void set_for_starts_humongous(HeapWord* new_top);370371virtual void print_on(outputStream* out) PRODUCT_RETURN;372};373374#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP375376377