Path: blob/master/src/hotspot/share/utilities/concurrentHashTable.inline.hpp
40949 views
/*1* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#ifndef SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP25#define SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP2627#include "utilities/concurrentHashTable.hpp"2829#include "memory/allocation.inline.hpp"30#include "runtime/atomic.hpp"31#include "runtime/orderAccess.hpp"32#include "runtime/prefetch.inline.hpp"33#include "utilities/globalCounter.inline.hpp"34#include "utilities/numberSeq.hpp"35#include "utilities/spinYield.hpp"3637// 2^30 = 1G buckets38#define SIZE_BIG_LOG2 3039// 2^5 = 32 buckets40#define SIZE_SMALL_LOG2 54142// Number from spinYield.hpp. In some loops SpinYield would be unfair.43#define SPINPAUSES_PER_YIELD 81924445#ifdef ASSERT46#ifdef _LP6447// Two low bits are not usable.48static const void* POISON_PTR = (void*)UCONST64(0xfbadbadbadbadbac);49#else50// Two low bits are not usable.51static const void* POISON_PTR = (void*)0xffbadbac;52#endif53#endif5455// Node56template <typename CONFIG, MEMFLAGS F>57inline typename ConcurrentHashTable<CONFIG, F>::Node*58ConcurrentHashTable<CONFIG, F>::59Node::next() const60{61return Atomic::load_acquire(&_next);62}6364// Bucket65template <typename CONFIG, MEMFLAGS F>66inline typename ConcurrentHashTable<CONFIG, F>::Node*67ConcurrentHashTable<CONFIG, F>::68Bucket::first_raw() const69{70return Atomic::load_acquire(&_first);71}7273template <typename CONFIG, MEMFLAGS F>74inline void ConcurrentHashTable<CONFIG, F>::75Bucket::release_assign_node_ptr(76typename ConcurrentHashTable<CONFIG, F>::Node* const volatile * dst,77typename ConcurrentHashTable<CONFIG, F>::Node* node) const78{79// Due to this assert this methods is not static.80assert(is_locked(), "Must be locked.");81Node** tmp = (Node**)dst;82Atomic::release_store(tmp, clear_set_state(node, *dst));83}8485template <typename CONFIG, MEMFLAGS F>86inline typename ConcurrentHashTable<CONFIG, F>::Node*87ConcurrentHashTable<CONFIG, F>::88Bucket::first() const89{90// We strip the states bit before returning the ptr.91return clear_state(Atomic::load_acquire(&_first));92}9394template <typename CONFIG, MEMFLAGS F>95inline bool ConcurrentHashTable<CONFIG, F>::96Bucket::have_redirect() const97{98return is_state(first_raw(), STATE_REDIRECT_BIT);99}100101template <typename CONFIG, MEMFLAGS F>102inline bool ConcurrentHashTable<CONFIG, F>::103Bucket::is_locked() const104{105return is_state(first_raw(), STATE_LOCK_BIT);106}107108template <typename CONFIG, MEMFLAGS F>109inline void ConcurrentHashTable<CONFIG, F>::110Bucket::lock()111{112int i = 0;113// SpinYield would be unfair here114while (!this->trylock()) {115if ((++i) == SPINPAUSES_PER_YIELD) {116// On contemporary OS yielding will give CPU to another runnable thread if117// there is no CPU available.118os::naked_yield();119i = 0;120} else {121SpinPause();122}123}124}125126template <typename CONFIG, MEMFLAGS F>127inline void ConcurrentHashTable<CONFIG, F>::128Bucket::release_assign_last_node_next(129typename ConcurrentHashTable<CONFIG, F>::Node* node)130{131assert(is_locked(), "Must be locked.");132Node* const volatile * ret = first_ptr();133while (clear_state(*ret) != NULL) {134ret = clear_state(*ret)->next_ptr();135}136release_assign_node_ptr(ret, node);137}138139template <typename CONFIG, MEMFLAGS F>140inline bool ConcurrentHashTable<CONFIG, F>::141Bucket::cas_first(typename ConcurrentHashTable<CONFIG, F>::Node* node,142typename ConcurrentHashTable<CONFIG, F>::Node* expect143)144{145if (is_locked()) {146return false;147}148if (Atomic::cmpxchg(&_first, expect, node) == expect) {149return true;150}151return false;152}153154template <typename CONFIG, MEMFLAGS F>155inline bool ConcurrentHashTable<CONFIG, F>::156Bucket::trylock()157{158if (is_locked()) {159return false;160}161// We will expect a clean first pointer.162Node* tmp = first();163if (Atomic::cmpxchg(&_first, tmp, set_state(tmp, STATE_LOCK_BIT)) == tmp) {164return true;165}166return false;167}168169template <typename CONFIG, MEMFLAGS F>170inline void ConcurrentHashTable<CONFIG, F>::171Bucket::unlock()172{173assert(is_locked(), "Must be locked.");174assert(!have_redirect(),175"Unlocking a bucket after it has reached terminal state.");176Atomic::release_store(&_first, clear_state(first()));177}178179template <typename CONFIG, MEMFLAGS F>180inline void ConcurrentHashTable<CONFIG, F>::181Bucket::redirect()182{183assert(is_locked(), "Must be locked.");184Atomic::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));185}186187// InternalTable188template <typename CONFIG, MEMFLAGS F>189inline ConcurrentHashTable<CONFIG, F>::190InternalTable::InternalTable(size_t log2_size)191: _log2_size(log2_size), _size(((size_t)1ul) << _log2_size),192_hash_mask(~(~((size_t)0) << _log2_size))193{194assert(_log2_size >= SIZE_SMALL_LOG2 && _log2_size <= SIZE_BIG_LOG2,195"Bad size");196_buckets = NEW_C_HEAP_ARRAY(Bucket, _size, F);197// Use placement new for each element instead of new[] which could use more198// memory than allocated.199for (size_t i = 0; i < _size; ++i) {200new (_buckets + i) Bucket();201}202}203204template <typename CONFIG, MEMFLAGS F>205inline ConcurrentHashTable<CONFIG, F>::206InternalTable::~InternalTable()207{208FREE_C_HEAP_ARRAY(Bucket, _buckets);209}210211// ScopedCS212template <typename CONFIG, MEMFLAGS F>213inline ConcurrentHashTable<CONFIG, F>::214ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable<CONFIG, F>* cht)215: _thread(thread),216_cht(cht),217_cs_context(GlobalCounter::critical_section_begin(_thread))218{219// This version is published now.220if (Atomic::load_acquire(&_cht->_invisible_epoch) != NULL) {221Atomic::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);222}223}224225template <typename CONFIG, MEMFLAGS F>226inline ConcurrentHashTable<CONFIG, F>::227ScopedCS::~ScopedCS()228{229GlobalCounter::critical_section_end(_thread, _cs_context);230}231232template <typename CONFIG, MEMFLAGS F>233template <typename LOOKUP_FUNC>234inline typename CONFIG::Value* ConcurrentHashTable<CONFIG, F>::235MultiGetHandle::get(LOOKUP_FUNC& lookup_f, bool* grow_hint)236{237return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint);238}239240// HaveDeletables241template <typename CONFIG, MEMFLAGS F>242template <typename EVALUATE_FUNC>243inline bool ConcurrentHashTable<CONFIG, F>::244HaveDeletables<true, EVALUATE_FUNC>::have_deletable(Bucket* bucket,245EVALUATE_FUNC& eval_f,246Bucket* prefetch_bucket)247{248// Instantiated for pointer type (true), so we can use prefetch.249// When visiting all Nodes doing this prefetch give around 30%.250Node* pref = prefetch_bucket != NULL ? prefetch_bucket->first() : NULL;251for (Node* next = bucket->first(); next != NULL ; next = next->next()) {252if (pref != NULL) {253Prefetch::read(*pref->value(), 0);254pref = pref->next();255}256// Read next() Node* once. May be racing with a thread moving the next257// pointers.258Node* next_pref = next->next();259if (next_pref != NULL) {260Prefetch::read(*next_pref->value(), 0);261}262if (eval_f(next->value())) {263return true;264}265}266return false;267}268269template <typename CONFIG, MEMFLAGS F>270template <bool b, typename EVALUATE_FUNC>271inline bool ConcurrentHashTable<CONFIG, F>::272HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket,273EVALUATE_FUNC& eval_f,274Bucket* preb)275{276for (Node* next = bucket->first(); next != NULL ; next = next->next()) {277if (eval_f(next->value())) {278return true;279}280}281return false;282}283284// ConcurrentHashTable285template <typename CONFIG, MEMFLAGS F>286inline void ConcurrentHashTable<CONFIG, F>::287write_synchonize_on_visible_epoch(Thread* thread)288{289assert(_resize_lock_owner == thread, "Re-size lock not held");290OrderAccess::fence(); // Prevent below load from floating up.291// If no reader saw this version we can skip write_synchronize.292if (Atomic::load_acquire(&_invisible_epoch) == thread) {293return;294}295assert(_invisible_epoch == NULL, "Two thread doing bulk operations");296// We set this/next version that we are synchronizing for to not published.297// A reader will zero this flag if it reads this/next version.298Atomic::release_store(&_invisible_epoch, thread);299GlobalCounter::write_synchronize();300}301302template <typename CONFIG, MEMFLAGS F>303inline bool ConcurrentHashTable<CONFIG, F>::304try_resize_lock(Thread* locker)305{306if (_resize_lock->try_lock()) {307if (_resize_lock_owner != NULL) {308assert(locker != _resize_lock_owner, "Already own lock");309// We got mutex but internal state is locked.310_resize_lock->unlock();311return false;312}313} else {314return false;315}316_invisible_epoch = 0;317_resize_lock_owner = locker;318return true;319}320321template <typename CONFIG, MEMFLAGS F>322inline void ConcurrentHashTable<CONFIG, F>::323lock_resize_lock(Thread* locker)324{325size_t i = 0;326// If lock is hold by some other thread, the chances that it is return quick327// is low. So we will prefer yielding.328SpinYield yield(1, 512);329do {330_resize_lock->lock_without_safepoint_check();331// If holder of lock dropped mutex for safepoint mutex might be unlocked,332// and _resize_lock_owner will contain the owner.333if (_resize_lock_owner != NULL) {334assert(locker != _resize_lock_owner, "Already own lock");335// We got mutex but internal state is locked.336_resize_lock->unlock();337yield.wait();338} else {339break;340}341} while(true);342_resize_lock_owner = locker;343_invisible_epoch = 0;344}345346template <typename CONFIG, MEMFLAGS F>347inline void ConcurrentHashTable<CONFIG, F>::348unlock_resize_lock(Thread* locker)349{350_invisible_epoch = 0;351assert(locker == _resize_lock_owner, "Not unlocked by locker.");352_resize_lock_owner = NULL;353_resize_lock->unlock();354}355356template <typename CONFIG, MEMFLAGS F>357inline void ConcurrentHashTable<CONFIG, F>::358free_nodes()359{360// We assume we are not MT during freeing.361for (size_t node_it = 0; node_it < _table->_size; node_it++) {362Bucket* bucket = _table->get_buckets() + node_it;363Node* node = bucket->first();364while (node != NULL) {365Node* free_node = node;366node = node->next();367Node::destroy_node(_context, free_node);368}369}370}371372template <typename CONFIG, MEMFLAGS F>373inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*374ConcurrentHashTable<CONFIG, F>::375get_table() const376{377return Atomic::load_acquire(&_table);378}379380template <typename CONFIG, MEMFLAGS F>381inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*382ConcurrentHashTable<CONFIG, F>::383get_new_table() const384{385return Atomic::load_acquire(&_new_table);386}387388template <typename CONFIG, MEMFLAGS F>389inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*390ConcurrentHashTable<CONFIG, F>::391set_table_from_new()392{393InternalTable* old_table = _table;394// Publish the new table.395Atomic::release_store(&_table, _new_table);396// All must see this.397GlobalCounter::write_synchronize();398// _new_table not read any more.399_new_table = NULL;400DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;)401return old_table;402}403404template <typename CONFIG, MEMFLAGS F>405inline void ConcurrentHashTable<CONFIG, F>::406internal_grow_range(Thread* thread, size_t start, size_t stop)407{408assert(stop <= _table->_size, "Outside backing array");409assert(_new_table != NULL, "Grow not proper setup before start");410// The state is also copied here. Hence all buckets in new table will be411// locked. I call the siblings odd/even, where even have high bit 0 and odd412// have high bit 1.413for (size_t even_index = start; even_index < stop; even_index++) {414Bucket* bucket = _table->get_bucket(even_index);415416bucket->lock();417418size_t odd_index = even_index + _table->_size;419_new_table->get_buckets()[even_index] = *bucket;420_new_table->get_buckets()[odd_index] = *bucket;421422// Moves lockers go to new table, where they will wait until unlock() below.423bucket->redirect(); /* Must release stores above */424425// When this is done we have separated the nodes into corresponding buckets426// in new table.427if (!unzip_bucket(thread, _table, _new_table, even_index, odd_index)) {428// If bucket is empty, unzip does nothing.429// We must make sure readers go to new table before we poison the bucket.430DEBUG_ONLY(GlobalCounter::write_synchronize();)431}432433// Unlock for writes into the new table buckets.434_new_table->get_bucket(even_index)->unlock();435_new_table->get_bucket(odd_index)->unlock();436437DEBUG_ONLY(438bucket->release_assign_node_ptr(439_table->get_bucket(even_index)->first_ptr(), (Node*)POISON_PTR);440)441}442}443444template <typename CONFIG, MEMFLAGS F>445template <typename LOOKUP_FUNC, typename DELETE_FUNC>446inline bool ConcurrentHashTable<CONFIG, F>::447internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& delete_f)448{449Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());450assert(bucket->is_locked(), "Must be locked.");451Node* const volatile * rem_n_prev = bucket->first_ptr();452Node* rem_n = bucket->first();453bool have_dead = false;454while (rem_n != NULL) {455if (lookup_f.equals(rem_n->value(), &have_dead)) {456bucket->release_assign_node_ptr(rem_n_prev, rem_n->next());457break;458} else {459rem_n_prev = rem_n->next_ptr();460rem_n = rem_n->next();461}462}463464bucket->unlock();465466if (rem_n == NULL) {467return false;468}469// Publish the deletion.470GlobalCounter::write_synchronize();471delete_f(rem_n->value());472Node::destroy_node(_context, rem_n);473JFR_ONLY(_stats_rate.remove();)474return true;475}476477template <typename CONFIG, MEMFLAGS F>478template <typename EVALUATE_FUNC, typename DELETE_FUNC>479inline void ConcurrentHashTable<CONFIG, F>::480do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx,481EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f, bool is_mt)482{483// Here we have resize lock so table is SMR safe, and there is no new484// table. Can do this in parallel if we want.485assert((is_mt && _resize_lock_owner != NULL) ||486(!is_mt && _resize_lock_owner == thread), "Re-size lock not held");487Node* ndel[BULK_DELETE_LIMIT];488InternalTable* table = get_table();489assert(start_idx < stop_idx, "Must be");490assert(stop_idx <= _table->_size, "Must be");491// Here manual do critical section since we don't want to take the cost of492// locking the bucket if there is nothing to delete. But we can have493// concurrent single deletes. The _invisible_epoch can only be used by the494// owner of _resize_lock, us here. There we should not changed it in our495// own read-side.496GlobalCounter::CSContext cs_context = GlobalCounter::critical_section_begin(thread);497for (size_t bucket_it = start_idx; bucket_it < stop_idx; bucket_it++) {498Bucket* bucket = table->get_bucket(bucket_it);499Bucket* prefetch_bucket = (bucket_it+1) < stop_idx ?500table->get_bucket(bucket_it+1) : NULL;501502if (!HaveDeletables<IsPointer<VALUE>::value, EVALUATE_FUNC>::503have_deletable(bucket, eval_f, prefetch_bucket)) {504// Nothing to remove in this bucket.505continue;506}507508GlobalCounter::critical_section_end(thread, cs_context);509// We left critical section but the bucket cannot be removed while we hold510// the _resize_lock.511bucket->lock();512size_t nd = delete_check_nodes(bucket, eval_f, BULK_DELETE_LIMIT, ndel);513bucket->unlock();514if (is_mt) {515GlobalCounter::write_synchronize();516} else {517write_synchonize_on_visible_epoch(thread);518}519for (size_t node_it = 0; node_it < nd; node_it++) {520del_f(ndel[node_it]->value());521Node::destroy_node(_context, ndel[node_it]);522JFR_ONLY(_stats_rate.remove();)523DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)524}525cs_context = GlobalCounter::critical_section_begin(thread);526}527GlobalCounter::critical_section_end(thread, cs_context);528}529530template <typename CONFIG, MEMFLAGS F>531template <typename LOOKUP_FUNC>532inline void ConcurrentHashTable<CONFIG, F>::533delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f)534{535assert(bucket->is_locked(), "Must be locked.");536537size_t dels = 0;538Node* ndel[BULK_DELETE_LIMIT];539Node* const volatile * rem_n_prev = bucket->first_ptr();540Node* rem_n = bucket->first();541while (rem_n != NULL) {542bool is_dead = false;543lookup_f.equals(rem_n->value(), &is_dead);544if (is_dead) {545ndel[dels++] = rem_n;546Node* next_node = rem_n->next();547bucket->release_assign_node_ptr(rem_n_prev, next_node);548rem_n = next_node;549if (dels == BULK_DELETE_LIMIT) {550break;551}552} else {553rem_n_prev = rem_n->next_ptr();554rem_n = rem_n->next();555}556}557if (dels > 0) {558GlobalCounter::write_synchronize();559for (size_t node_it = 0; node_it < dels; node_it++) {560Node::destroy_node(_context, ndel[node_it]);561JFR_ONLY(_stats_rate.remove();)562DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)563}564}565}566567template <typename CONFIG, MEMFLAGS F>568inline typename ConcurrentHashTable<CONFIG, F>::Bucket*569ConcurrentHashTable<CONFIG, F>::570get_bucket(uintx hash) const571{572InternalTable* table = get_table();573Bucket* bucket = get_bucket_in(table, hash);574if (bucket->have_redirect()) {575table = get_new_table();576bucket = get_bucket_in(table, hash);577}578return bucket;579}580581template <typename CONFIG, MEMFLAGS F>582inline typename ConcurrentHashTable<CONFIG, F>::Bucket*583ConcurrentHashTable<CONFIG, F>::584get_bucket_locked(Thread* thread, const uintx hash)585{586Bucket* bucket;587int i = 0;588// SpinYield would be unfair here589while(true) {590{591// We need a critical section to protect the table itself. But if we fail592// we must leave critical section otherwise we would deadlock.593ScopedCS cs(thread, this);594bucket = get_bucket(hash);595if (bucket->trylock()) {596break; /* ends critical section */597}598} /* ends critical section */599if ((++i) == SPINPAUSES_PER_YIELD) {600// On contemporary OS yielding will give CPU to another runnable thread if601// there is no CPU available.602os::naked_yield();603i = 0;604} else {605SpinPause();606}607}608return bucket;609}610611// Always called within critical section612template <typename CONFIG, MEMFLAGS F>613template <typename LOOKUP_FUNC>614typename ConcurrentHashTable<CONFIG, F>::Node*615ConcurrentHashTable<CONFIG, F>::616get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f,617bool* have_dead, size_t* loops) const618{619size_t loop_count = 0;620Node* node = bucket->first();621while (node != NULL) {622bool is_dead = false;623++loop_count;624if (lookup_f.equals(node->value(), &is_dead)) {625break;626}627if (is_dead && !(*have_dead)) {628*have_dead = true;629}630node = node->next();631}632if (loops != NULL) {633*loops = loop_count;634}635return node;636}637638template <typename CONFIG, MEMFLAGS F>639inline bool ConcurrentHashTable<CONFIG, F>::640unzip_bucket(Thread* thread, InternalTable* old_table,641InternalTable* new_table, size_t even_index, size_t odd_index)642{643Node* aux = old_table->get_bucket(even_index)->first();644if (aux == NULL) {645// This is an empty bucket and in debug we poison first ptr in bucket.646// Therefore we must make sure no readers are looking at this bucket.647// If we don't do a write_synch here, caller must do it.648return false;649}650Node* delete_me = NULL;651Node* const volatile * even = new_table->get_bucket(even_index)->first_ptr();652Node* const volatile * odd = new_table->get_bucket(odd_index)->first_ptr();653while (aux != NULL) {654bool dead_hash = false;655size_t aux_hash = CONFIG::get_hash(*aux->value(), &dead_hash);656Node* aux_next = aux->next();657if (dead_hash) {658delete_me = aux;659// This item is dead, move both list to next660new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,661aux_next);662new_table->get_bucket(even_index)->release_assign_node_ptr(even,663aux_next);664} else {665size_t aux_index = bucket_idx_hash(new_table, aux_hash);666if (aux_index == even_index) {667// This is a even, so move odd to aux/even next668new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,669aux_next);670// Keep in even list671even = aux->next_ptr();672} else if (aux_index == odd_index) {673// This is a odd, so move odd to aux/odd next674new_table->get_bucket(even_index)->release_assign_node_ptr(even,675aux_next);676// Keep in odd list677odd = aux->next_ptr();678} else {679fatal("aux_index does not match even or odd indices");680}681}682aux = aux_next;683684// We can only move 1 pointer otherwise a reader might be moved to the wrong685// chain. E.g. looking for even hash value but got moved to the odd bucket686// chain.687write_synchonize_on_visible_epoch(thread);688if (delete_me != NULL) {689Node::destroy_node(_context, delete_me);690delete_me = NULL;691}692}693return true;694}695696template <typename CONFIG, MEMFLAGS F>697inline bool ConcurrentHashTable<CONFIG, F>::698internal_shrink_prolog(Thread* thread, size_t log2_size)699{700if (!try_resize_lock(thread)) {701return false;702}703assert(_resize_lock_owner == thread, "Re-size lock not held");704if (_table->_log2_size == _log2_start_size ||705_table->_log2_size <= log2_size) {706unlock_resize_lock(thread);707return false;708}709_new_table = new InternalTable(_table->_log2_size - 1);710return true;711}712713template <typename CONFIG, MEMFLAGS F>714inline void ConcurrentHashTable<CONFIG, F>::715internal_shrink_epilog(Thread* thread)716{717assert(_resize_lock_owner == thread, "Re-size lock not held");718719InternalTable* old_table = set_table_from_new();720_size_limit_reached = false;721unlock_resize_lock(thread);722#ifdef ASSERT723for (size_t i = 0; i < old_table->_size; i++) {724assert(old_table->get_bucket(i++)->first() == POISON_PTR,725"No poison found");726}727#endif728// ABA safe, old_table not visible to any other threads.729delete old_table;730}731732template <typename CONFIG, MEMFLAGS F>733inline void ConcurrentHashTable<CONFIG, F>::734internal_shrink_range(Thread* thread, size_t start, size_t stop)735{736// The state is also copied here.737// Hence all buckets in new table will be locked.738for (size_t bucket_it = start; bucket_it < stop; bucket_it++) {739size_t even_hash_index = bucket_it; // High bit 0740size_t odd_hash_index = bucket_it + _new_table->_size; // High bit 1741742Bucket* b_old_even = _table->get_bucket(even_hash_index);743Bucket* b_old_odd = _table->get_bucket(odd_hash_index);744745b_old_even->lock();746b_old_odd->lock();747748_new_table->get_buckets()[bucket_it] = *b_old_even;749750// Put chains together.751_new_table->get_bucket(bucket_it)->752release_assign_last_node_next(*(b_old_odd->first_ptr()));753754b_old_even->redirect();755b_old_odd->redirect();756757write_synchonize_on_visible_epoch(thread);758759// Unlock for writes into new smaller table.760_new_table->get_bucket(bucket_it)->unlock();761762DEBUG_ONLY(b_old_even->release_assign_node_ptr(b_old_even->first_ptr(),763(Node*)POISON_PTR);)764DEBUG_ONLY(b_old_odd->release_assign_node_ptr(b_old_odd->first_ptr(),765(Node*)POISON_PTR);)766}767}768769template <typename CONFIG, MEMFLAGS F>770inline bool ConcurrentHashTable<CONFIG, F>::771internal_shrink(Thread* thread, size_t log2_size)772{773if (!internal_shrink_prolog(thread, log2_size)) {774assert(_resize_lock_owner != thread, "Re-size lock held");775return false;776}777assert(_resize_lock_owner == thread, "Should be locked by me");778internal_shrink_range(thread, 0, _new_table->_size);779internal_shrink_epilog(thread);780assert(_resize_lock_owner != thread, "Re-size lock held");781return true;782}783784template <typename CONFIG, MEMFLAGS F>785inline void ConcurrentHashTable<CONFIG, F>::786internal_reset(size_t log2_size)787{788assert(_table != NULL, "table failed");789assert(_log2_size_limit >= log2_size, "bad ergo");790791delete _table;792// Create and publish a new table793InternalTable* table = new InternalTable(log2_size);794_size_limit_reached = (log2_size == _log2_size_limit);795Atomic::release_store(&_table, table);796}797798template <typename CONFIG, MEMFLAGS F>799inline bool ConcurrentHashTable<CONFIG, F>::800internal_grow_prolog(Thread* thread, size_t log2_size)801{802// This double checking of _size_limit_reached/is_max_size_reached()803// we only do in grow path, since grow means high load on table804// while shrink means low load.805if (is_max_size_reached()) {806return false;807}808if (!try_resize_lock(thread)) {809// Either we have an ongoing resize or an operation which doesn't want us810// to resize now.811return false;812}813if (is_max_size_reached() || _table->_log2_size >= log2_size) {814unlock_resize_lock(thread);815return false;816}817818_new_table = new InternalTable(_table->_log2_size + 1);819820if (_new_table->_log2_size == _log2_size_limit) {821_size_limit_reached = true;822}823824return true;825}826827template <typename CONFIG, MEMFLAGS F>828inline void ConcurrentHashTable<CONFIG, F>::829internal_grow_epilog(Thread* thread)830{831assert(_resize_lock_owner == thread, "Should be locked");832833InternalTable* old_table = set_table_from_new();834unlock_resize_lock(thread);835#ifdef ASSERT836for (size_t i = 0; i < old_table->_size; i++) {837assert(old_table->get_bucket(i++)->first() == POISON_PTR,838"No poison found");839}840#endif841// ABA safe, old_table not visible to any other threads.842delete old_table;843}844845template <typename CONFIG, MEMFLAGS F>846inline bool ConcurrentHashTable<CONFIG, F>::847internal_grow(Thread* thread, size_t log2_size)848{849if (!internal_grow_prolog(thread, log2_size)) {850assert(_resize_lock_owner != thread, "Re-size lock held");851return false;852}853assert(_resize_lock_owner == thread, "Should be locked by me");854internal_grow_range(thread, 0, _table->_size);855internal_grow_epilog(thread);856assert(_resize_lock_owner != thread, "Re-size lock held");857return true;858}859860// Always called within critical section861template <typename CONFIG, MEMFLAGS F>862template <typename LOOKUP_FUNC>863inline typename CONFIG::Value* ConcurrentHashTable<CONFIG, F>::864internal_get(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint)865{866bool clean = false;867size_t loops = 0;868VALUE* ret = NULL;869870const Bucket* bucket = get_bucket(lookup_f.get_hash());871Node* node = get_node(bucket, lookup_f, &clean, &loops);872if (node != NULL) {873ret = node->value();874}875if (grow_hint != NULL) {876*grow_hint = loops > _grow_hint;877}878879return ret;880}881882template <typename CONFIG, MEMFLAGS F>883template <typename LOOKUP_FUNC, typename FOUND_FUNC>884inline bool ConcurrentHashTable<CONFIG, F>::885internal_insert_get(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value,886FOUND_FUNC& foundf, bool* grow_hint, bool* clean_hint)887{888bool ret = false;889bool clean = false;890bool locked;891size_t loops = 0;892size_t i = 0;893uintx hash = lookup_f.get_hash();894Node* new_node = Node::create_node(_context, value, NULL);895896while (true) {897{898ScopedCS cs(thread, this); /* protected the table/bucket */899Bucket* bucket = get_bucket(hash);900Node* first_at_start = bucket->first();901Node* old = get_node(bucket, lookup_f, &clean, &loops);902if (old == NULL) {903new_node->set_next(first_at_start);904if (bucket->cas_first(new_node, first_at_start)) {905foundf(new_node->value());906JFR_ONLY(_stats_rate.add();)907new_node = NULL;908ret = true;909break; /* leave critical section */910}911// CAS failed we must leave critical section and retry.912locked = bucket->is_locked();913} else {914// There is a duplicate.915foundf(old->value());916break; /* leave critical section */917}918} /* leave critical section */919i++;920if (locked) {921os::naked_yield();922} else {923SpinPause();924}925}926927if (new_node != NULL) {928// CAS failed and a duplicate was inserted, we must free this node.929Node::destroy_node(_context, new_node);930} else if (i == 0 && clean) {931// We only do cleaning on fast inserts.932Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());933delete_in_bucket(thread, bucket, lookup_f);934bucket->unlock();935clean = false;936}937938if (grow_hint != NULL) {939*grow_hint = loops > _grow_hint;940}941942if (clean_hint != NULL) {943*clean_hint = clean;944}945946return ret;947}948949template <typename CONFIG, MEMFLAGS F>950template <typename FUNC>951inline bool ConcurrentHashTable<CONFIG, F>::952visit_nodes(Bucket* bucket, FUNC& visitor_f)953{954Node* current_node = bucket->first();955while (current_node != NULL) {956if (!visitor_f(current_node->value())) {957return false;958}959current_node = current_node->next();960}961return true;962}963964template <typename CONFIG, MEMFLAGS F>965template <typename FUNC>966inline void ConcurrentHashTable<CONFIG, F>::967do_scan_locked(Thread* thread, FUNC& scan_f)968{969assert(_resize_lock_owner == thread, "Re-size lock not held");970// We can do a critical section over the entire loop but that would block971// updates for a long time. Instead we choose to block resizes.972InternalTable* table = get_table();973for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {974ScopedCS cs(thread, this);975if (!visit_nodes(table->get_bucket(bucket_it), scan_f)) {976break; /* ends critical section */977}978} /* ends critical section */979}980981template <typename CONFIG, MEMFLAGS F>982template <typename EVALUATE_FUNC>983inline size_t ConcurrentHashTable<CONFIG, F>::984delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f,985size_t num_del, Node** ndel)986{987size_t dels = 0;988Node* const volatile * rem_n_prev = bucket->first_ptr();989Node* rem_n = bucket->first();990while (rem_n != NULL) {991if (eval_f(rem_n->value())) {992ndel[dels++] = rem_n;993Node* next_node = rem_n->next();994bucket->release_assign_node_ptr(rem_n_prev, next_node);995rem_n = next_node;996if (dels == num_del) {997break;998}999} else {1000rem_n_prev = rem_n->next_ptr();1001rem_n = rem_n->next();1002}1003}1004return dels;1005}10061007// Constructor1008template <typename CONFIG, MEMFLAGS F>1009inline ConcurrentHashTable<CONFIG, F>::1010ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint, void* context)1011: _context(context), _new_table(NULL), _log2_size_limit(log2size_limit),1012_log2_start_size(log2size), _grow_hint(grow_hint),1013_size_limit_reached(false), _resize_lock_owner(NULL),1014_invisible_epoch(0)1015{1016_stats_rate = TableRateStatistics();1017_resize_lock =1018new Mutex(Mutex::leaf, "ConcurrentHashTable", true,1019Mutex::_safepoint_check_never);1020_table = new InternalTable(log2size);1021assert(log2size_limit >= log2size, "bad ergo");1022_size_limit_reached = _table->_log2_size == _log2_size_limit;1023}10241025template <typename CONFIG, MEMFLAGS F>1026inline ConcurrentHashTable<CONFIG, F>::1027~ConcurrentHashTable()1028{1029delete _resize_lock;1030free_nodes();1031delete _table;1032}10331034template <typename CONFIG, MEMFLAGS F>1035inline size_t ConcurrentHashTable<CONFIG, F>::1036get_size_log2(Thread* thread)1037{1038ScopedCS cs(thread, this);1039return _table->_log2_size;1040}10411042template <typename CONFIG, MEMFLAGS F>1043inline bool ConcurrentHashTable<CONFIG, F>::1044shrink(Thread* thread, size_t size_limit_log2)1045{1046size_t tmp = size_limit_log2 == 0 ? _log2_start_size : size_limit_log2;1047bool ret = internal_shrink(thread, tmp);1048return ret;1049}10501051template <typename CONFIG, MEMFLAGS F>1052inline void ConcurrentHashTable<CONFIG, F>::1053unsafe_reset(size_t size_log2)1054{1055size_t tmp = size_log2 == 0 ? _log2_start_size : size_log2;1056internal_reset(tmp);1057}10581059template <typename CONFIG, MEMFLAGS F>1060inline bool ConcurrentHashTable<CONFIG, F>::1061grow(Thread* thread, size_t size_limit_log2)1062{1063size_t tmp = size_limit_log2 == 0 ? _log2_size_limit : size_limit_log2;1064return internal_grow(thread, tmp);1065}10661067template <typename CONFIG, MEMFLAGS F>1068template <typename LOOKUP_FUNC, typename FOUND_FUNC>1069inline bool ConcurrentHashTable<CONFIG, F>::1070get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& found_f, bool* grow_hint)1071{1072bool ret = false;1073ScopedCS cs(thread, this);1074VALUE* val = internal_get(thread, lookup_f, grow_hint);1075if (val != NULL) {1076found_f(val);1077ret = true;1078}1079return ret;1080}10811082template <typename CONFIG, MEMFLAGS F>1083inline bool ConcurrentHashTable<CONFIG, F>::1084unsafe_insert(const VALUE& value) {1085bool dead_hash = false;1086size_t hash = CONFIG::get_hash(value, &dead_hash);1087if (dead_hash) {1088return false;1089}1090// This is an unsafe operation.1091InternalTable* table = get_table();1092Bucket* bucket = get_bucket_in(table, hash);1093assert(!bucket->have_redirect() && !bucket->is_locked(), "bad");1094Node* new_node = Node::create_node(_context, value, bucket->first());1095if (!bucket->cas_first(new_node, bucket->first())) {1096assert(false, "bad");1097}1098JFR_ONLY(_stats_rate.add();)1099return true;1100}11011102template <typename CONFIG, MEMFLAGS F>1103template <typename SCAN_FUNC>1104inline bool ConcurrentHashTable<CONFIG, F>::1105try_scan(Thread* thread, SCAN_FUNC& scan_f)1106{1107if (!try_resize_lock(thread)) {1108return false;1109}1110do_scan_locked(thread, scan_f);1111unlock_resize_lock(thread);1112return true;1113}11141115template <typename CONFIG, MEMFLAGS F>1116template <typename SCAN_FUNC>1117inline void ConcurrentHashTable<CONFIG, F>::1118do_scan(Thread* thread, SCAN_FUNC& scan_f)1119{1120assert(!SafepointSynchronize::is_at_safepoint(),1121"must be outside a safepoint");1122assert(_resize_lock_owner != thread, "Re-size lock held");1123lock_resize_lock(thread);1124do_scan_locked(thread, scan_f);1125unlock_resize_lock(thread);1126assert(_resize_lock_owner != thread, "Re-size lock held");1127}11281129template <typename CONFIG, MEMFLAGS F>1130template <typename SCAN_FUNC>1131inline void ConcurrentHashTable<CONFIG, F>::1132do_safepoint_scan(SCAN_FUNC& scan_f)1133{1134// We only allow this method to be used during a safepoint.1135assert(SafepointSynchronize::is_at_safepoint(),1136"must only be called in a safepoint");1137assert(Thread::current()->is_VM_thread(),1138"should be in vm thread");11391140// Here we skip protection,1141// thus no other thread may use this table at the same time.1142InternalTable* table = get_table();1143for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {1144Bucket* bucket = table->get_bucket(bucket_it);1145// If bucket have a redirect the items will be in the new table.1146// We must visit them there since the new table will contain any1147// concurrent inserts done after this bucket was resized.1148// If the bucket don't have redirect flag all items is in this table.1149if (!bucket->have_redirect()) {1150if(!visit_nodes(bucket, scan_f)) {1151return;1152}1153} else {1154assert(bucket->is_locked(), "Bucket must be locked.");1155}1156}1157// If there is a paused resize we also need to visit the already resized items.1158table = get_new_table();1159if (table == NULL) {1160return;1161}1162DEBUG_ONLY(if (table == POISON_PTR) { return; })1163for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {1164Bucket* bucket = table->get_bucket(bucket_it);1165assert(!bucket->is_locked(), "Bucket must be unlocked.");1166if (!visit_nodes(bucket, scan_f)) {1167return;1168}1169}1170}11711172template <typename CONFIG, MEMFLAGS F>1173template <typename EVALUATE_FUNC, typename DELETE_FUNC>1174inline bool ConcurrentHashTable<CONFIG, F>::1175try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)1176{1177if (!try_resize_lock(thread)) {1178return false;1179}1180do_bulk_delete_locked(thread, eval_f, del_f);1181unlock_resize_lock(thread);1182assert(_resize_lock_owner != thread, "Re-size lock held");1183return true;1184}11851186template <typename CONFIG, MEMFLAGS F>1187template <typename EVALUATE_FUNC, typename DELETE_FUNC>1188inline void ConcurrentHashTable<CONFIG, F>::1189bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)1190{1191assert(!SafepointSynchronize::is_at_safepoint(),1192"must be outside a safepoint");1193lock_resize_lock(thread);1194do_bulk_delete_locked(thread, eval_f, del_f);1195unlock_resize_lock(thread);1196}11971198template <typename CONFIG, MEMFLAGS F>1199template <typename VALUE_SIZE_FUNC>1200inline TableStatistics ConcurrentHashTable<CONFIG, F>::1201statistics_calculate(Thread* thread, VALUE_SIZE_FUNC& vs_f)1202{1203NumberSeq summary;1204size_t literal_bytes = 0;1205InternalTable* table = get_table();1206for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {1207ScopedCS cs(thread, this);1208size_t count = 0;1209Bucket* bucket = table->get_bucket(bucket_it);1210if (bucket->have_redirect() || bucket->is_locked()) {1211continue;1212}1213Node* current_node = bucket->first();1214while (current_node != NULL) {1215++count;1216literal_bytes += vs_f(current_node->value());1217current_node = current_node->next();1218}1219summary.add((double)count);1220}12211222return TableStatistics(_stats_rate, summary, literal_bytes, sizeof(Bucket), sizeof(Node));1223}12241225template <typename CONFIG, MEMFLAGS F>1226template <typename VALUE_SIZE_FUNC>1227inline TableStatistics ConcurrentHashTable<CONFIG, F>::1228statistics_get(Thread* thread, VALUE_SIZE_FUNC& vs_f, TableStatistics old)1229{1230if (!try_resize_lock(thread)) {1231return old;1232}12331234TableStatistics ts = statistics_calculate(thread, vs_f);1235unlock_resize_lock(thread);12361237return ts;1238}12391240template <typename CONFIG, MEMFLAGS F>1241template <typename VALUE_SIZE_FUNC>1242inline void ConcurrentHashTable<CONFIG, F>::1243statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f,1244outputStream* st, const char* table_name)1245{1246if (!try_resize_lock(thread)) {1247st->print_cr("statistics unavailable at this moment");1248return;1249}12501251TableStatistics ts = statistics_calculate(thread, vs_f);1252unlock_resize_lock(thread);12531254ts.print(st, table_name);1255}12561257template <typename CONFIG, MEMFLAGS F>1258inline bool ConcurrentHashTable<CONFIG, F>::1259try_move_nodes_to(Thread* thread, ConcurrentHashTable<CONFIG, F>* to_cht)1260{1261if (!try_resize_lock(thread)) {1262return false;1263}1264assert(_new_table == NULL || _new_table == POISON_PTR, "Must be NULL");1265for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) {1266Bucket* bucket = _table->get_bucket(bucket_it);1267assert(!bucket->have_redirect() && !bucket->is_locked(), "Table must be uncontended");1268while (bucket->first() != NULL) {1269Node* move_node = bucket->first();1270bool ok = bucket->cas_first(move_node->next(), move_node);1271assert(ok, "Uncontended cas must work");1272bool dead_hash = false;1273size_t insert_hash = CONFIG::get_hash(*move_node->value(), &dead_hash);1274if (!dead_hash) {1275Bucket* insert_bucket = to_cht->get_bucket(insert_hash);1276assert(!bucket->have_redirect() && !bucket->is_locked(), "Not bit should be present");1277move_node->set_next(insert_bucket->first());1278ok = insert_bucket->cas_first(move_node, insert_bucket->first());1279assert(ok, "Uncontended cas must work");1280}1281}1282}1283unlock_resize_lock(thread);1284return true;1285}12861287#endif // SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP128812891290