Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp
38920 views
/*1* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "gc_implementation/parallelScavenge/objectStartArray.hpp"26#include "memory/allocation.inline.hpp"27#include "memory/cardTableModRefBS.hpp"28#include "oops/oop.inline.hpp"29#include "runtime/java.hpp"30#include "services/memTracker.hpp"3132void ObjectStartArray::initialize(MemRegion reserved_region) {33// We're based on the assumption that we use the same34// size blocks as the card table.35assert((int)block_size == (int)CardTableModRefBS::card_size, "Sanity");36assert((int)block_size <= 512, "block_size must be less than or equal to 512");3738// Calculate how much space must be reserved39_reserved_region = reserved_region;4041size_t bytes_to_reserve = reserved_region.word_size() / block_size_in_words;42assert(bytes_to_reserve > 0, "Sanity");4344bytes_to_reserve =45align_size_up(bytes_to_reserve, os::vm_allocation_granularity());4647// Do not use large-pages for the backing store. The one large page region48// will be used for the heap proper.49ReservedSpace backing_store(bytes_to_reserve);50if (!backing_store.is_reserved()) {51vm_exit_during_initialization("Could not reserve space for ObjectStartArray");52}53MemTracker::record_virtual_memory_type((address)backing_store.base(), mtGC);5455// We do not commit any memory initially56if (!_virtual_space.initialize(backing_store, 0)) {57vm_exit_during_initialization("Could not commit space for ObjectStartArray");58}5960_raw_base = (jbyte*)_virtual_space.low_boundary();6162if (_raw_base == NULL) {63vm_exit_during_initialization("Could not get raw_base address");64}6566MemTracker::record_virtual_memory_type((address)_raw_base, mtGC);676869_offset_base = _raw_base - (size_t(reserved_region.start()) >> block_shift);7071_covered_region.set_start(reserved_region.start());72_covered_region.set_word_size(0);7374_blocks_region.set_start((HeapWord*)_raw_base);75_blocks_region.set_word_size(0);76}7778void ObjectStartArray::set_covered_region(MemRegion mr) {79assert(_reserved_region.contains(mr), "MemRegion outside of reserved space");80assert(_reserved_region.start() == mr.start(), "Attempt to move covered region");8182HeapWord* low_bound = mr.start();83HeapWord* high_bound = mr.end();84assert((uintptr_t(low_bound) & (block_size - 1)) == 0, "heap must start at block boundary");85assert((uintptr_t(high_bound) & (block_size - 1)) == 0, "heap must end at block boundary");8687size_t requested_blocks_size_in_bytes = mr.word_size() / block_size_in_words;8889// Only commit memory in page sized chunks90requested_blocks_size_in_bytes =91align_size_up(requested_blocks_size_in_bytes, os::vm_page_size());9293_covered_region = mr;9495size_t current_blocks_size_in_bytes = _blocks_region.byte_size();9697if (requested_blocks_size_in_bytes > current_blocks_size_in_bytes) {98// Expand99size_t expand_by = requested_blocks_size_in_bytes - current_blocks_size_in_bytes;100if (!_virtual_space.expand_by(expand_by)) {101vm_exit_out_of_memory(expand_by, OOM_MMAP_ERROR, "object start array expansion");102}103// Clear *only* the newly allocated region104memset(_blocks_region.end(), clean_block, expand_by);105}106107if (requested_blocks_size_in_bytes < current_blocks_size_in_bytes) {108// Shrink109size_t shrink_by = current_blocks_size_in_bytes - requested_blocks_size_in_bytes;110_virtual_space.shrink_by(shrink_by);111}112113_blocks_region.set_word_size(requested_blocks_size_in_bytes / sizeof(HeapWord));114115assert(requested_blocks_size_in_bytes % sizeof(HeapWord) == 0, "Block table not expanded in word sized increment");116assert(requested_blocks_size_in_bytes == _blocks_region.byte_size(), "Sanity");117assert(block_for_addr(low_bound) == &_raw_base[0], "Checking start of map");118assert(block_for_addr(high_bound-1) <= &_raw_base[_blocks_region.byte_size()-1], "Checking end of map");119}120121void ObjectStartArray::reset() {122memset(_blocks_region.start(), clean_block, _blocks_region.byte_size());123}124125126bool ObjectStartArray::object_starts_in_range(HeapWord* start_addr,127HeapWord* end_addr) const {128assert(start_addr <= end_addr, "range is wrong");129if (start_addr > end_addr) {130return false;131}132133jbyte* start_block = block_for_addr(start_addr);134jbyte* end_block = block_for_addr(end_addr);135136for (jbyte* block = start_block; block <= end_block; block++) {137if (*block != clean_block) {138return true;139}140}141142return false;143}144145146