Path: blob/master/src/hotspot/share/memory/arena.cpp
40950 views
/*1* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "memory/allocation.hpp"26#include "memory/allocation.inline.hpp"27#include "memory/resourceArea.hpp"28#include "runtime/os.hpp"29#include "runtime/task.hpp"30#include "runtime/threadCritical.hpp"31#include "services/memTracker.hpp"32#include "utilities/ostream.hpp"3334//--------------------------------------------------------------------------------------35// ChunkPool implementation3637// MT-safe pool of chunks to reduce malloc/free thrashing38// NB: not using Mutex because pools are used before Threads are initialized39class ChunkPool: public CHeapObj<mtInternal> {40Chunk* _first; // first cached Chunk; its first word points to next chunk41size_t _num_chunks; // number of unused chunks in pool42size_t _num_used; // number of chunks currently checked out43const size_t _size; // size of each chunk (must be uniform)4445// Our four static pools46static ChunkPool* _large_pool;47static ChunkPool* _medium_pool;48static ChunkPool* _small_pool;49static ChunkPool* _tiny_pool;5051// return first element or null52void* get_first() {53Chunk* c = _first;54if (_first) {55_first = _first->next();56_num_chunks--;57}58return c;59}6061public:62// All chunks in a ChunkPool has the same size63ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }6465// Allocate a new chunk from the pool (might expand the pool)66NOINLINE void* allocate(size_t bytes, AllocFailType alloc_failmode) {67assert(bytes == _size, "bad size");68void* p = NULL;69// No VM lock can be taken inside ThreadCritical lock, so os::malloc70// should be done outside ThreadCritical lock due to NMT71{ ThreadCritical tc;72_num_used++;73p = get_first();74}75if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);76if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {77vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate");78}79return p;80}8182// Return a chunk to the pool83void free(Chunk* chunk) {84assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size");85ThreadCritical tc;86_num_used--;8788// Add chunk to list89chunk->set_next(_first);90_first = chunk;91_num_chunks++;92}9394// Prune the pool95void free_all_but(size_t n) {96Chunk* cur = NULL;97Chunk* next;98{99// if we have more than n chunks, free all of them100ThreadCritical tc;101if (_num_chunks > n) {102// free chunks at end of queue, for better locality103cur = _first;104for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();105106if (cur != NULL) {107next = cur->next();108cur->set_next(NULL);109cur = next;110111// Free all remaining chunks while in ThreadCritical lock112// so NMT adjustment is stable.113while(cur != NULL) {114next = cur->next();115os::free(cur);116_num_chunks--;117cur = next;118}119}120}121}122}123124// Accessors to preallocated pool's125static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; }126static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }127static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; }128static ChunkPool* tiny_pool() { assert(_tiny_pool != NULL, "must be initialized"); return _tiny_pool; }129130static void initialize() {131_large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size());132_medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());133_small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size());134_tiny_pool = new ChunkPool(Chunk::tiny_size + Chunk::aligned_overhead_size());135}136137static void clean() {138enum { BlocksToKeep = 5 };139_tiny_pool->free_all_but(BlocksToKeep);140_small_pool->free_all_but(BlocksToKeep);141_medium_pool->free_all_but(BlocksToKeep);142_large_pool->free_all_but(BlocksToKeep);143}144};145146ChunkPool* ChunkPool::_large_pool = NULL;147ChunkPool* ChunkPool::_medium_pool = NULL;148ChunkPool* ChunkPool::_small_pool = NULL;149ChunkPool* ChunkPool::_tiny_pool = NULL;150151void chunkpool_init() {152ChunkPool::initialize();153}154155156//--------------------------------------------------------------------------------------157// ChunkPoolCleaner implementation158//159160class ChunkPoolCleaner : public PeriodicTask {161enum { CleaningInterval = 5000 }; // cleaning interval in ms162163public:164ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {}165void task() {166ChunkPool::clean();167}168};169170//--------------------------------------------------------------------------------------171// Chunk implementation172173void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() {174// requested_size is equal to sizeof(Chunk) but in order for the arena175// allocations to come out aligned as expected the size must be aligned176// to expected arena alignment.177// expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.178assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");179size_t bytes = ARENA_ALIGN(requested_size) + length;180switch (length) {181case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode);182case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);183case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);184case Chunk::tiny_size: return ChunkPool::tiny_pool()->allocate(bytes, alloc_failmode);185default: {186void* p = os::malloc(bytes, mtChunk, CALLER_PC);187if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {188vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");189}190return p;191}192}193}194195void Chunk::operator delete(void* p) {196Chunk* c = (Chunk*)p;197switch (c->length()) {198case Chunk::size: ChunkPool::large_pool()->free(c); break;199case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;200case Chunk::init_size: ChunkPool::small_pool()->free(c); break;201case Chunk::tiny_size: ChunkPool::tiny_pool()->free(c); break;202default:203ThreadCritical tc; // Free chunks under TC lock so that NMT adjustment is stable.204os::free(c);205}206}207208Chunk::Chunk(size_t length) : _len(length) {209_next = NULL; // Chain on the linked list210}211212void Chunk::chop() {213Chunk *k = this;214while( k ) {215Chunk *tmp = k->next();216// clear out this chunk (to detect allocation bugs)217if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());218delete k; // Free chunk (was malloc'd)219k = tmp;220}221}222223void Chunk::next_chop() {224_next->chop();225_next = NULL;226}227228void Chunk::start_chunk_pool_cleaner_task() {229#ifdef ASSERT230static bool task_created = false;231assert(!task_created, "should not start chuck pool cleaner twice");232task_created = true;233#endif234ChunkPoolCleaner* cleaner = new ChunkPoolCleaner();235cleaner->enroll();236}237238//------------------------------Arena------------------------------------------239240Arena::Arena(MEMFLAGS flag, size_t init_size) : _flags(flag), _size_in_bytes(0) {241size_t round_size = (sizeof (char *)) - 1;242init_size = (init_size+round_size) & ~round_size;243_first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);244_hwm = _chunk->bottom(); // Save the cached hwm, max245_max = _chunk->top();246MemTracker::record_new_arena(flag);247set_size_in_bytes(init_size);248}249250Arena::Arena(MEMFLAGS flag) : _flags(flag), _size_in_bytes(0) {251_first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);252_hwm = _chunk->bottom(); // Save the cached hwm, max253_max = _chunk->top();254MemTracker::record_new_arena(flag);255set_size_in_bytes(Chunk::init_size);256}257258Arena *Arena::move_contents(Arena *copy) {259copy->destruct_contents();260copy->_chunk = _chunk;261copy->_hwm = _hwm;262copy->_max = _max;263copy->_first = _first;264265// workaround rare racing condition, which could double count266// the arena size by native memory tracking267size_t size = size_in_bytes();268set_size_in_bytes(0);269copy->set_size_in_bytes(size);270// Destroy original arena271reset();272return copy; // Return Arena with contents273}274275Arena::~Arena() {276destruct_contents();277MemTracker::record_arena_free(_flags);278}279280void* Arena::operator new(size_t size) throw() {281assert(false, "Use dynamic memory type binding");282return NULL;283}284285void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) throw() {286assert(false, "Use dynamic memory type binding");287return NULL;288}289290// dynamic memory type binding291void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {292return (void *) AllocateHeap(size, flags, CALLER_PC);293}294295void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {296return (void*)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL);297}298299void Arena::operator delete(void* p) {300FreeHeap(p);301}302303// Destroy this arenas contents and reset to empty304void Arena::destruct_contents() {305if (UseMallocOnly && _first != NULL) {306char* end = _first->next() ? _first->top() : _hwm;307free_malloced_objects(_first, _first->bottom(), end, _hwm);308}309// reset size before chop to avoid a rare racing condition310// that can have total arena memory exceed total chunk memory311set_size_in_bytes(0);312if (_first != NULL) {313_first->chop();314}315reset();316}317318// This is high traffic method, but many calls actually don't319// change the size320void Arena::set_size_in_bytes(size_t size) {321if (_size_in_bytes != size) {322ssize_t delta = size - size_in_bytes();323_size_in_bytes = size;324MemTracker::record_arena_size_change(delta, _flags);325}326}327328// Total of all Chunks in arena329size_t Arena::used() const {330size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk331Chunk *k = _first;332while( k != _chunk) { // Whilst have Chunks in a row333sum += k->length(); // Total size of this Chunk334k = k->next(); // Bump along to next Chunk335}336return sum; // Return total consumed space.337}338339void Arena::signal_out_of_memory(size_t sz, const char* whence) const {340vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, "%s", whence);341}342343// Grow a new Chunk344void* Arena::grow(size_t x, AllocFailType alloc_failmode) {345// Get minimal required size. Either real big, or even bigger for giant objs346size_t len = MAX2(x, (size_t) Chunk::size);347348Chunk *k = _chunk; // Get filled-up chunk address349_chunk = new (alloc_failmode, len) Chunk(len);350351if (_chunk == NULL) {352_chunk = k; // restore the previous value of _chunk353return NULL;354}355if (k) k->set_next(_chunk); // Append new chunk to end of linked list356else _first = _chunk;357_hwm = _chunk->bottom(); // Save the cached hwm, max358_max = _chunk->top();359set_size_in_bytes(size_in_bytes() + len);360void* result = _hwm;361_hwm += x;362return result;363}364365366367// Reallocate storage in Arena.368void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) {369if (new_size == 0) {370Afree(old_ptr, old_size); // like realloc(3)371return NULL;372}373if (old_ptr == NULL) {374assert(old_size == 0, "sanity");375return Amalloc(new_size, alloc_failmode); // as with realloc(3), a NULL old ptr is equivalent to malloc(3)376}377#ifdef ASSERT378if (UseMallocOnly) {379// always allocate a new object (otherwise we'll free this one twice)380char* copy = (char*)Amalloc(new_size, alloc_failmode);381if (copy == NULL) {382return NULL;383}384size_t n = MIN2(old_size, new_size);385if (n > 0) memcpy(copy, old_ptr, n);386Afree(old_ptr,old_size); // Mostly done to keep stats accurate387return copy;388}389#endif390char *c_old = (char*)old_ptr; // Handy name391// Stupid fast special case392if( new_size <= old_size ) { // Shrink in-place393if( c_old+old_size == _hwm) // Attempt to free the excess bytes394_hwm = c_old+new_size; // Adjust hwm395return c_old;396}397398// make sure that new_size is legal399size_t corrected_new_size = ARENA_ALIGN(new_size);400401// See if we can resize in-place402if( (c_old+old_size == _hwm) && // Adjusting recent thing403(c_old+corrected_new_size <= _max) ) { // Still fits where it sits404_hwm = c_old+corrected_new_size; // Adjust hwm405return c_old; // Return old pointer406}407408// Oops, got to relocate guts409void *new_ptr = Amalloc(new_size, alloc_failmode);410if (new_ptr == NULL) {411return NULL;412}413memcpy( new_ptr, c_old, old_size );414Afree(c_old,old_size); // Mostly done to keep stats accurate415return new_ptr;416}417418419// Determine if pointer belongs to this Arena or not.420bool Arena::contains( const void *ptr ) const {421#ifdef ASSERT422if (UseMallocOnly) {423// really slow, but not easy to make fast424if (_chunk == NULL) return false;425char** bottom = (char**)_chunk->bottom();426for (char** p = (char**)_hwm - 1; p >= bottom; p--) {427if (*p == ptr) return true;428}429for (Chunk *c = _first; c != NULL; c = c->next()) {430if (c == _chunk) continue; // current chunk has been processed431char** bottom = (char**)c->bottom();432for (char** p = (char**)c->top() - 1; p >= bottom; p--) {433if (*p == ptr) return true;434}435}436return false;437}438#endif439if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )440return true; // Check for in this chunk441for (Chunk *c = _first; c; c = c->next()) {442if (c == _chunk) continue; // current chunk has been processed443if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) {444return true; // Check for every chunk in Arena445}446}447return false; // Not in any Chunk, so not in Arena448}449450451#ifdef ASSERT452void* Arena::malloc(size_t size) {453assert(UseMallocOnly, "shouldn't call");454// use malloc, but save pointer in res. area for later freeing455char** save = (char**)internal_malloc_4(sizeof(char*));456return (*save = (char*)os::malloc(size, mtChunk));457}458459// for debugging with UseMallocOnly460void* Arena::internal_malloc_4(size_t x) {461assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );462check_for_overflow(x, "Arena::internal_malloc_4");463if (_hwm + x > _max) {464return grow(x);465} else {466char *old = _hwm;467_hwm += x;468return old;469}470}471#endif472473474//--------------------------------------------------------------------------------------475// Non-product code476477#ifndef PRODUCT478479// debugging code480inline void Arena::free_all(char** start, char** end) {481for (char** p = start; p < end; p++) if (*p) os::free(*p);482}483484void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) {485assert(UseMallocOnly, "should not call");486// free all objects malloced since resource mark was created; resource area487// contains their addresses488if (chunk->next()) {489// this chunk is full, and some others too490for (Chunk* c = chunk->next(); c != NULL; c = c->next()) {491char* top = c->top();492if (c->next() == NULL) {493top = hwm2; // last junk is only used up to hwm2494assert(c->contains(hwm2), "bad hwm2");495}496free_all((char**)c->bottom(), (char**)top);497}498assert(chunk->contains(hwm), "bad hwm");499assert(chunk->contains(max), "bad max");500free_all((char**)hwm, (char**)max);501} else {502// this chunk was partially used503assert(chunk->contains(hwm), "bad hwm");504assert(chunk->contains(hwm2), "bad hwm2");505free_all((char**)hwm, (char**)hwm2);506}507}508509#endif // Non-product510511512