Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/memory/heap.cpp
32285 views
/*1* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "memory/heap.hpp"26#include "oops/oop.inline.hpp"27#include "runtime/os.hpp"28#include "services/memTracker.hpp"2930size_t CodeHeap::header_size() {31return sizeof(HeapBlock);32}333435// Implementation of Heap3637CodeHeap::CodeHeap() {38_number_of_committed_segments = 0;39_number_of_reserved_segments = 0;40_segment_size = 0;41_log2_segment_size = 0;42_next_segment = 0;43_freelist = NULL;44_freelist_segments = 0;45}464748void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) {49assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds");50assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds");51// setup _segmap pointers for faster indexing52address p = (address)_segmap.low() + beg;53address q = (address)_segmap.low() + end;54// initialize interval55while (p < q) *p++ = 0xFF;56}575859void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) {60assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds");61assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds");62// setup _segmap pointers for faster indexing63address p = (address)_segmap.low() + beg;64address q = (address)_segmap.low() + end;65// initialize interval66int i = 0;67while (p < q) {68*p++ = i++;69if (i == 0xFF) i = 1;70}71}727374static size_t align_to_page_size(size_t size) {75const size_t alignment = (size_t)os::vm_page_size();76assert(is_power_of_2(alignment), "no kidding ???");77return (size + alignment - 1) & ~(alignment - 1);78}798081void CodeHeap::on_code_mapping(char* base, size_t size) {82#ifdef LINUX83extern void linux_wrap_code(char* base, size_t size);84linux_wrap_code(base, size);85#endif86}878889bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,90size_t segment_size) {91assert(reserved_size >= committed_size, "reserved < committed");92assert(segment_size >= sizeof(FreeBlock), "segment size is too small");93assert(is_power_of_2(segment_size), "segment_size must be a power of 2");9495_segment_size = segment_size;96_log2_segment_size = exact_log2(segment_size);9798// Reserve and initialize space for _memory.99size_t page_size = os::vm_page_size();100if (os::can_execute_large_page_memory()) {101page_size = os::page_size_for_region_unaligned(reserved_size, 8);102}103104const size_t granularity = os::vm_allocation_granularity();105const size_t r_align = MAX2(page_size, granularity);106const size_t r_size = align_size_up(reserved_size, r_align);107const size_t c_size = align_size_up(committed_size, page_size);108109const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :110MAX2(page_size, granularity);111ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);112os::trace_page_sizes("code heap", committed_size, reserved_size, page_size,113rs.base(), rs.size());114if (!_memory.initialize(rs, c_size)) {115return false;116}117118on_code_mapping(_memory.low(), _memory.committed_size());119_number_of_committed_segments = size_to_segments(_memory.committed_size());120_number_of_reserved_segments = size_to_segments(_memory.reserved_size());121assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");122const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);123const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);124const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);125126// reserve space for _segmap127if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) {128return false;129}130131MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode);132133assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit enough space for segment map");134assert(_segmap.reserved_size() >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map");135assert(_segmap.reserved_size() >= _segmap.committed_size() , "just checking");136137// initialize remaining instance variables138clear();139return true;140}141142143void CodeHeap::release() {144Unimplemented();145}146147148bool CodeHeap::expand_by(size_t size) {149// expand _memory space150size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size();151if (dm > 0) {152char* base = _memory.low() + _memory.committed_size();153if (!_memory.expand_by(dm)) return false;154on_code_mapping(base, dm);155size_t i = _number_of_committed_segments;156_number_of_committed_segments = size_to_segments(_memory.committed_size());157assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change");158assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");159// expand _segmap space160size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size();161if (ds > 0) {162if (!_segmap.expand_by(ds)) return false;163}164assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking");165// initialize additional segmap entries166mark_segmap_as_free(i, _number_of_committed_segments);167}168return true;169}170171172void CodeHeap::shrink_by(size_t size) {173Unimplemented();174}175176177void CodeHeap::clear() {178_next_segment = 0;179mark_segmap_as_free(0, _number_of_committed_segments);180}181182183void* CodeHeap::allocate(size_t instance_size, bool is_critical) {184size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock));185assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");186187// First check if we can satify request from freelist188debug_only(verify());189HeapBlock* block = search_freelist(number_of_segments, is_critical);190debug_only(if (VerifyCodeCacheOften) verify());191if (block != NULL) {192assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check");193assert(!block->free(), "must be marked free");194#ifdef ASSERT195memset((void *)block->allocated_space(), badCodeHeapNewVal, instance_size);196#endif197return block->allocated_space();198}199200// Ensure minimum size for allocation to the heap.201if (number_of_segments < CodeCacheMinBlockLength) {202number_of_segments = CodeCacheMinBlockLength;203}204205if (!is_critical) {206// Make sure the allocation fits in the unallocated heap without using207// the CodeCacheMimimumFreeSpace that is reserved for critical allocations.208if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) {209// Fail allocation210return NULL;211}212}213214if (_next_segment + number_of_segments <= _number_of_committed_segments) {215mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);216HeapBlock* b = block_at(_next_segment);217b->initialize(number_of_segments);218_next_segment += number_of_segments;219#ifdef ASSERT220memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size);221#endif222return b->allocated_space();223} else {224return NULL;225}226}227228229void CodeHeap::deallocate(void* p) {230assert(p == find_start(p), "illegal deallocation");231// Find start of HeapBlock232HeapBlock* b = (((HeapBlock *)p) - 1);233assert(b->allocated_space() == p, "sanity check");234#ifdef ASSERT235memset((void *)b->allocated_space(),236badCodeHeapFreeVal,237segments_to_size(b->length()) - sizeof(HeapBlock));238#endif239add_to_freelist(b);240241debug_only(if (VerifyCodeCacheOften) verify());242}243244245void* CodeHeap::find_start(void* p) const {246if (!contains(p)) {247return NULL;248}249size_t i = segment_for(p);250address b = (address)_segmap.low();251if (b[i] == 0xFF) {252return NULL;253}254while (b[i] > 0) i -= (int)b[i];255HeapBlock* h = block_at(i);256if (h->free()) {257return NULL;258}259return h->allocated_space();260}261262263size_t CodeHeap::alignment_unit() const {264// this will be a power of two265return _segment_size;266}267268269size_t CodeHeap::alignment_offset() const {270// The lowest address in any allocated block will be271// equal to alignment_offset (mod alignment_unit).272return sizeof(HeapBlock) & (_segment_size - 1);273}274275// Finds the next free heapblock. If the current one is free, that it returned276void* CodeHeap::next_free(HeapBlock *b) const {277// Since free blocks are merged, there is max. on free block278// between two used ones279if (b != NULL && b->free()) b = next_block(b);280assert(b == NULL || !b->free(), "must be in use or at end of heap");281return (b == NULL) ? NULL : b->allocated_space();282}283284// Returns the first used HeapBlock285HeapBlock* CodeHeap::first_block() const {286if (_next_segment > 0)287return block_at(0);288return NULL;289}290291HeapBlock *CodeHeap::block_start(void *q) const {292HeapBlock* b = (HeapBlock*)find_start(q);293if (b == NULL) return NULL;294return b - 1;295}296297// Returns the next Heap block an offset into one298HeapBlock* CodeHeap::next_block(HeapBlock *b) const {299if (b == NULL) return NULL;300size_t i = segment_for(b) + b->length();301if (i < _next_segment)302return block_at(i);303return NULL;304}305306307// Returns current capacity308size_t CodeHeap::capacity() const {309return _memory.committed_size();310}311312size_t CodeHeap::max_capacity() const {313return _memory.reserved_size();314}315316size_t CodeHeap::allocated_capacity() const {317// size of used heap - size on freelist318return segments_to_size(_next_segment - _freelist_segments);319}320321// Returns size of the unallocated heap block322size_t CodeHeap::heap_unallocated_capacity() const {323// Total number of segments - number currently used324return segments_to_size(_number_of_reserved_segments - _next_segment);325}326327// Free list management328329FreeBlock *CodeHeap::following_block(FreeBlock *b) {330return (FreeBlock*)(((address)b) + _segment_size * b->length());331}332333// Inserts block b after a334void CodeHeap::insert_after(FreeBlock* a, FreeBlock* b) {335assert(a != NULL && b != NULL, "must be real pointers");336337// Link b into the list after a338b->set_link(a->link());339a->set_link(b);340341// See if we can merge blocks342merge_right(b); // Try to make b bigger343merge_right(a); // Try to make a include b344}345346// Try to merge this block with the following block347void CodeHeap::merge_right(FreeBlock *a) {348assert(a->free(), "must be a free block");349if (following_block(a) == a->link()) {350assert(a->link() != NULL && a->link()->free(), "must be free too");351// Update block a to include the following block352a->set_length(a->length() + a->link()->length());353a->set_link(a->link()->link());354// Update find_start map355size_t beg = segment_for(a);356mark_segmap_as_used(beg, beg + a->length());357}358}359360void CodeHeap::add_to_freelist(HeapBlock *a) {361FreeBlock* b = (FreeBlock*)a;362assert(b != _freelist, "cannot be removed twice");363364// Mark as free and update free space count365_freelist_segments += b->length();366b->set_free();367368// First element in list?369if (_freelist == NULL) {370_freelist = b;371b->set_link(NULL);372return;373}374375// Scan for right place to put into list. List376// is sorted by increasing addresseses377FreeBlock* prev = NULL;378FreeBlock* cur = _freelist;379while(cur != NULL && cur < b) {380assert(prev == NULL || prev < cur, "must be ordered");381prev = cur;382cur = cur->link();383}384385assert( (prev == NULL && b < _freelist) ||386(prev < b && (cur == NULL || b < cur)), "list must be ordered");387388if (prev == NULL) {389// Insert first in list390b->set_link(_freelist);391_freelist = b;392merge_right(_freelist);393} else {394insert_after(prev, b);395}396}397398// Search freelist for an entry on the list with the best fit399// Return NULL if no one was found400FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) {401FreeBlock *best_block = NULL;402FreeBlock *best_prev = NULL;403size_t best_length = 0;404405// Search for smallest block which is bigger than length406FreeBlock *prev = NULL;407FreeBlock *cur = _freelist;408while(cur != NULL) {409size_t l = cur->length();410if (l >= length && (best_block == NULL || best_length > l)) {411412// Non critical allocations are not allowed to use the last part of the code heap.413if (!is_critical) {414// Make sure the end of the allocation doesn't cross into the last part of the code heap415if (((size_t)cur + length) > ((size_t)high_boundary() - CodeCacheMinimumFreeSpace)) {416// the freelist is sorted by address - if one fails, all consecutive will also fail.417break;418}419}420421// Remember best block, its previous element, and its length422best_block = cur;423best_prev = prev;424best_length = best_block->length();425}426427// Next element in list428prev = cur;429cur = cur->link();430}431432if (best_block == NULL) {433// None found434return NULL;435}436437assert((best_prev == NULL && _freelist == best_block ) ||438(best_prev != NULL && best_prev->link() == best_block), "sanity check");439440// Exact (or at least good enough) fit. Remove from list.441// Don't leave anything on the freelist smaller than CodeCacheMinBlockLength.442if (best_length < length + CodeCacheMinBlockLength) {443length = best_length;444if (best_prev == NULL) {445assert(_freelist == best_block, "sanity check");446_freelist = _freelist->link();447} else {448// Unmap element449best_prev->set_link(best_block->link());450}451} else {452// Truncate block and return a pointer to the following block453best_block->set_length(best_length - length);454best_block = following_block(best_block);455// Set used bit and length on new block456size_t beg = segment_for(best_block);457mark_segmap_as_used(beg, beg + length);458best_block->set_length(length);459}460461best_block->set_used();462_freelist_segments -= length;463return best_block;464}465466//----------------------------------------------------------------------------467// Non-product code468469#ifndef PRODUCT470471void CodeHeap::print() {472tty->print_cr("The Heap");473}474475#endif476477void CodeHeap::verify() {478// Count the number of blocks on the freelist, and the amount of space479// represented.480int count = 0;481size_t len = 0;482for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {483len += b->length();484count++;485}486487// Verify that freelist contains the right amount of free space488// guarantee(len == _freelist_segments, "wrong freelist");489490// Verify that the number of free blocks is not out of hand.491static int free_block_threshold = 10000;492if (count > free_block_threshold) {493warning("CodeHeap: # of free blocks > %d", free_block_threshold);494// Double the warning limit495free_block_threshold *= 2;496}497498// Verify that the freelist contains the same number of free blocks that is499// found on the full list.500for(HeapBlock *h = first_block(); h != NULL; h = next_block(h)) {501if (h->free()) count--;502}503// guarantee(count == 0, "missing free blocks");504}505506507