Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/utilities/concurrentHashTable.inline.hpp
40949 views
1
/*
2
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP
26
#define SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP
27
28
#include "utilities/concurrentHashTable.hpp"
29
30
#include "memory/allocation.inline.hpp"
31
#include "runtime/atomic.hpp"
32
#include "runtime/orderAccess.hpp"
33
#include "runtime/prefetch.inline.hpp"
34
#include "utilities/globalCounter.inline.hpp"
35
#include "utilities/numberSeq.hpp"
36
#include "utilities/spinYield.hpp"
37
38
// 2^30 = 1G buckets
39
#define SIZE_BIG_LOG2 30
40
// 2^5 = 32 buckets
41
#define SIZE_SMALL_LOG2 5
42
43
// Number from spinYield.hpp. In some loops SpinYield would be unfair.
44
#define SPINPAUSES_PER_YIELD 8192
45
46
#ifdef ASSERT
47
#ifdef _LP64
48
// Two low bits are not usable.
49
static const void* POISON_PTR = (void*)UCONST64(0xfbadbadbadbadbac);
50
#else
51
// Two low bits are not usable.
52
static const void* POISON_PTR = (void*)0xffbadbac;
53
#endif
54
#endif
55
56
// Node
57
template <typename CONFIG, MEMFLAGS F>
58
inline typename ConcurrentHashTable<CONFIG, F>::Node*
59
ConcurrentHashTable<CONFIG, F>::
60
Node::next() const
61
{
62
return Atomic::load_acquire(&_next);
63
}
64
65
// Bucket
66
template <typename CONFIG, MEMFLAGS F>
67
inline typename ConcurrentHashTable<CONFIG, F>::Node*
68
ConcurrentHashTable<CONFIG, F>::
69
Bucket::first_raw() const
70
{
71
return Atomic::load_acquire(&_first);
72
}
73
74
template <typename CONFIG, MEMFLAGS F>
75
inline void ConcurrentHashTable<CONFIG, F>::
76
Bucket::release_assign_node_ptr(
77
typename ConcurrentHashTable<CONFIG, F>::Node* const volatile * dst,
78
typename ConcurrentHashTable<CONFIG, F>::Node* node) const
79
{
80
// Due to this assert this methods is not static.
81
assert(is_locked(), "Must be locked.");
82
Node** tmp = (Node**)dst;
83
Atomic::release_store(tmp, clear_set_state(node, *dst));
84
}
85
86
template <typename CONFIG, MEMFLAGS F>
87
inline typename ConcurrentHashTable<CONFIG, F>::Node*
88
ConcurrentHashTable<CONFIG, F>::
89
Bucket::first() const
90
{
91
// We strip the states bit before returning the ptr.
92
return clear_state(Atomic::load_acquire(&_first));
93
}
94
95
template <typename CONFIG, MEMFLAGS F>
96
inline bool ConcurrentHashTable<CONFIG, F>::
97
Bucket::have_redirect() const
98
{
99
return is_state(first_raw(), STATE_REDIRECT_BIT);
100
}
101
102
template <typename CONFIG, MEMFLAGS F>
103
inline bool ConcurrentHashTable<CONFIG, F>::
104
Bucket::is_locked() const
105
{
106
return is_state(first_raw(), STATE_LOCK_BIT);
107
}
108
109
template <typename CONFIG, MEMFLAGS F>
110
inline void ConcurrentHashTable<CONFIG, F>::
111
Bucket::lock()
112
{
113
int i = 0;
114
// SpinYield would be unfair here
115
while (!this->trylock()) {
116
if ((++i) == SPINPAUSES_PER_YIELD) {
117
// On contemporary OS yielding will give CPU to another runnable thread if
118
// there is no CPU available.
119
os::naked_yield();
120
i = 0;
121
} else {
122
SpinPause();
123
}
124
}
125
}
126
127
template <typename CONFIG, MEMFLAGS F>
128
inline void ConcurrentHashTable<CONFIG, F>::
129
Bucket::release_assign_last_node_next(
130
typename ConcurrentHashTable<CONFIG, F>::Node* node)
131
{
132
assert(is_locked(), "Must be locked.");
133
Node* const volatile * ret = first_ptr();
134
while (clear_state(*ret) != NULL) {
135
ret = clear_state(*ret)->next_ptr();
136
}
137
release_assign_node_ptr(ret, node);
138
}
139
140
template <typename CONFIG, MEMFLAGS F>
141
inline bool ConcurrentHashTable<CONFIG, F>::
142
Bucket::cas_first(typename ConcurrentHashTable<CONFIG, F>::Node* node,
143
typename ConcurrentHashTable<CONFIG, F>::Node* expect
144
)
145
{
146
if (is_locked()) {
147
return false;
148
}
149
if (Atomic::cmpxchg(&_first, expect, node) == expect) {
150
return true;
151
}
152
return false;
153
}
154
155
template <typename CONFIG, MEMFLAGS F>
156
inline bool ConcurrentHashTable<CONFIG, F>::
157
Bucket::trylock()
158
{
159
if (is_locked()) {
160
return false;
161
}
162
// We will expect a clean first pointer.
163
Node* tmp = first();
164
if (Atomic::cmpxchg(&_first, tmp, set_state(tmp, STATE_LOCK_BIT)) == tmp) {
165
return true;
166
}
167
return false;
168
}
169
170
template <typename CONFIG, MEMFLAGS F>
171
inline void ConcurrentHashTable<CONFIG, F>::
172
Bucket::unlock()
173
{
174
assert(is_locked(), "Must be locked.");
175
assert(!have_redirect(),
176
"Unlocking a bucket after it has reached terminal state.");
177
Atomic::release_store(&_first, clear_state(first()));
178
}
179
180
template <typename CONFIG, MEMFLAGS F>
181
inline void ConcurrentHashTable<CONFIG, F>::
182
Bucket::redirect()
183
{
184
assert(is_locked(), "Must be locked.");
185
Atomic::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
186
}
187
188
// InternalTable
189
template <typename CONFIG, MEMFLAGS F>
190
inline ConcurrentHashTable<CONFIG, F>::
191
InternalTable::InternalTable(size_t log2_size)
192
: _log2_size(log2_size), _size(((size_t)1ul) << _log2_size),
193
_hash_mask(~(~((size_t)0) << _log2_size))
194
{
195
assert(_log2_size >= SIZE_SMALL_LOG2 && _log2_size <= SIZE_BIG_LOG2,
196
"Bad size");
197
_buckets = NEW_C_HEAP_ARRAY(Bucket, _size, F);
198
// Use placement new for each element instead of new[] which could use more
199
// memory than allocated.
200
for (size_t i = 0; i < _size; ++i) {
201
new (_buckets + i) Bucket();
202
}
203
}
204
205
template <typename CONFIG, MEMFLAGS F>
206
inline ConcurrentHashTable<CONFIG, F>::
207
InternalTable::~InternalTable()
208
{
209
FREE_C_HEAP_ARRAY(Bucket, _buckets);
210
}
211
212
// ScopedCS
213
template <typename CONFIG, MEMFLAGS F>
214
inline ConcurrentHashTable<CONFIG, F>::
215
ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable<CONFIG, F>* cht)
216
: _thread(thread),
217
_cht(cht),
218
_cs_context(GlobalCounter::critical_section_begin(_thread))
219
{
220
// This version is published now.
221
if (Atomic::load_acquire(&_cht->_invisible_epoch) != NULL) {
222
Atomic::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
223
}
224
}
225
226
template <typename CONFIG, MEMFLAGS F>
227
inline ConcurrentHashTable<CONFIG, F>::
228
ScopedCS::~ScopedCS()
229
{
230
GlobalCounter::critical_section_end(_thread, _cs_context);
231
}
232
233
template <typename CONFIG, MEMFLAGS F>
234
template <typename LOOKUP_FUNC>
235
inline typename CONFIG::Value* ConcurrentHashTable<CONFIG, F>::
236
MultiGetHandle::get(LOOKUP_FUNC& lookup_f, bool* grow_hint)
237
{
238
return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint);
239
}
240
241
// HaveDeletables
242
template <typename CONFIG, MEMFLAGS F>
243
template <typename EVALUATE_FUNC>
244
inline bool ConcurrentHashTable<CONFIG, F>::
245
HaveDeletables<true, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
246
EVALUATE_FUNC& eval_f,
247
Bucket* prefetch_bucket)
248
{
249
// Instantiated for pointer type (true), so we can use prefetch.
250
// When visiting all Nodes doing this prefetch give around 30%.
251
Node* pref = prefetch_bucket != NULL ? prefetch_bucket->first() : NULL;
252
for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
253
if (pref != NULL) {
254
Prefetch::read(*pref->value(), 0);
255
pref = pref->next();
256
}
257
// Read next() Node* once. May be racing with a thread moving the next
258
// pointers.
259
Node* next_pref = next->next();
260
if (next_pref != NULL) {
261
Prefetch::read(*next_pref->value(), 0);
262
}
263
if (eval_f(next->value())) {
264
return true;
265
}
266
}
267
return false;
268
}
269
270
template <typename CONFIG, MEMFLAGS F>
271
template <bool b, typename EVALUATE_FUNC>
272
inline bool ConcurrentHashTable<CONFIG, F>::
273
HaveDeletables<b, EVALUATE_FUNC>::have_deletable(Bucket* bucket,
274
EVALUATE_FUNC& eval_f,
275
Bucket* preb)
276
{
277
for (Node* next = bucket->first(); next != NULL ; next = next->next()) {
278
if (eval_f(next->value())) {
279
return true;
280
}
281
}
282
return false;
283
}
284
285
// ConcurrentHashTable
286
template <typename CONFIG, MEMFLAGS F>
287
inline void ConcurrentHashTable<CONFIG, F>::
288
write_synchonize_on_visible_epoch(Thread* thread)
289
{
290
assert(_resize_lock_owner == thread, "Re-size lock not held");
291
OrderAccess::fence(); // Prevent below load from floating up.
292
// If no reader saw this version we can skip write_synchronize.
293
if (Atomic::load_acquire(&_invisible_epoch) == thread) {
294
return;
295
}
296
assert(_invisible_epoch == NULL, "Two thread doing bulk operations");
297
// We set this/next version that we are synchronizing for to not published.
298
// A reader will zero this flag if it reads this/next version.
299
Atomic::release_store(&_invisible_epoch, thread);
300
GlobalCounter::write_synchronize();
301
}
302
303
template <typename CONFIG, MEMFLAGS F>
304
inline bool ConcurrentHashTable<CONFIG, F>::
305
try_resize_lock(Thread* locker)
306
{
307
if (_resize_lock->try_lock()) {
308
if (_resize_lock_owner != NULL) {
309
assert(locker != _resize_lock_owner, "Already own lock");
310
// We got mutex but internal state is locked.
311
_resize_lock->unlock();
312
return false;
313
}
314
} else {
315
return false;
316
}
317
_invisible_epoch = 0;
318
_resize_lock_owner = locker;
319
return true;
320
}
321
322
template <typename CONFIG, MEMFLAGS F>
323
inline void ConcurrentHashTable<CONFIG, F>::
324
lock_resize_lock(Thread* locker)
325
{
326
size_t i = 0;
327
// If lock is hold by some other thread, the chances that it is return quick
328
// is low. So we will prefer yielding.
329
SpinYield yield(1, 512);
330
do {
331
_resize_lock->lock_without_safepoint_check();
332
// If holder of lock dropped mutex for safepoint mutex might be unlocked,
333
// and _resize_lock_owner will contain the owner.
334
if (_resize_lock_owner != NULL) {
335
assert(locker != _resize_lock_owner, "Already own lock");
336
// We got mutex but internal state is locked.
337
_resize_lock->unlock();
338
yield.wait();
339
} else {
340
break;
341
}
342
} while(true);
343
_resize_lock_owner = locker;
344
_invisible_epoch = 0;
345
}
346
347
template <typename CONFIG, MEMFLAGS F>
348
inline void ConcurrentHashTable<CONFIG, F>::
349
unlock_resize_lock(Thread* locker)
350
{
351
_invisible_epoch = 0;
352
assert(locker == _resize_lock_owner, "Not unlocked by locker.");
353
_resize_lock_owner = NULL;
354
_resize_lock->unlock();
355
}
356
357
template <typename CONFIG, MEMFLAGS F>
358
inline void ConcurrentHashTable<CONFIG, F>::
359
free_nodes()
360
{
361
// We assume we are not MT during freeing.
362
for (size_t node_it = 0; node_it < _table->_size; node_it++) {
363
Bucket* bucket = _table->get_buckets() + node_it;
364
Node* node = bucket->first();
365
while (node != NULL) {
366
Node* free_node = node;
367
node = node->next();
368
Node::destroy_node(_context, free_node);
369
}
370
}
371
}
372
373
template <typename CONFIG, MEMFLAGS F>
374
inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
375
ConcurrentHashTable<CONFIG, F>::
376
get_table() const
377
{
378
return Atomic::load_acquire(&_table);
379
}
380
381
template <typename CONFIG, MEMFLAGS F>
382
inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
383
ConcurrentHashTable<CONFIG, F>::
384
get_new_table() const
385
{
386
return Atomic::load_acquire(&_new_table);
387
}
388
389
template <typename CONFIG, MEMFLAGS F>
390
inline typename ConcurrentHashTable<CONFIG, F>::InternalTable*
391
ConcurrentHashTable<CONFIG, F>::
392
set_table_from_new()
393
{
394
InternalTable* old_table = _table;
395
// Publish the new table.
396
Atomic::release_store(&_table, _new_table);
397
// All must see this.
398
GlobalCounter::write_synchronize();
399
// _new_table not read any more.
400
_new_table = NULL;
401
DEBUG_ONLY(_new_table = (InternalTable*)POISON_PTR;)
402
return old_table;
403
}
404
405
template <typename CONFIG, MEMFLAGS F>
406
inline void ConcurrentHashTable<CONFIG, F>::
407
internal_grow_range(Thread* thread, size_t start, size_t stop)
408
{
409
assert(stop <= _table->_size, "Outside backing array");
410
assert(_new_table != NULL, "Grow not proper setup before start");
411
// The state is also copied here. Hence all buckets in new table will be
412
// locked. I call the siblings odd/even, where even have high bit 0 and odd
413
// have high bit 1.
414
for (size_t even_index = start; even_index < stop; even_index++) {
415
Bucket* bucket = _table->get_bucket(even_index);
416
417
bucket->lock();
418
419
size_t odd_index = even_index + _table->_size;
420
_new_table->get_buckets()[even_index] = *bucket;
421
_new_table->get_buckets()[odd_index] = *bucket;
422
423
// Moves lockers go to new table, where they will wait until unlock() below.
424
bucket->redirect(); /* Must release stores above */
425
426
// When this is done we have separated the nodes into corresponding buckets
427
// in new table.
428
if (!unzip_bucket(thread, _table, _new_table, even_index, odd_index)) {
429
// If bucket is empty, unzip does nothing.
430
// We must make sure readers go to new table before we poison the bucket.
431
DEBUG_ONLY(GlobalCounter::write_synchronize();)
432
}
433
434
// Unlock for writes into the new table buckets.
435
_new_table->get_bucket(even_index)->unlock();
436
_new_table->get_bucket(odd_index)->unlock();
437
438
DEBUG_ONLY(
439
bucket->release_assign_node_ptr(
440
_table->get_bucket(even_index)->first_ptr(), (Node*)POISON_PTR);
441
)
442
}
443
}
444
445
template <typename CONFIG, MEMFLAGS F>
446
template <typename LOOKUP_FUNC, typename DELETE_FUNC>
447
inline bool ConcurrentHashTable<CONFIG, F>::
448
internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& delete_f)
449
{
450
Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
451
assert(bucket->is_locked(), "Must be locked.");
452
Node* const volatile * rem_n_prev = bucket->first_ptr();
453
Node* rem_n = bucket->first();
454
bool have_dead = false;
455
while (rem_n != NULL) {
456
if (lookup_f.equals(rem_n->value(), &have_dead)) {
457
bucket->release_assign_node_ptr(rem_n_prev, rem_n->next());
458
break;
459
} else {
460
rem_n_prev = rem_n->next_ptr();
461
rem_n = rem_n->next();
462
}
463
}
464
465
bucket->unlock();
466
467
if (rem_n == NULL) {
468
return false;
469
}
470
// Publish the deletion.
471
GlobalCounter::write_synchronize();
472
delete_f(rem_n->value());
473
Node::destroy_node(_context, rem_n);
474
JFR_ONLY(_stats_rate.remove();)
475
return true;
476
}
477
478
template <typename CONFIG, MEMFLAGS F>
479
template <typename EVALUATE_FUNC, typename DELETE_FUNC>
480
inline void ConcurrentHashTable<CONFIG, F>::
481
do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx,
482
EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f, bool is_mt)
483
{
484
// Here we have resize lock so table is SMR safe, and there is no new
485
// table. Can do this in parallel if we want.
486
assert((is_mt && _resize_lock_owner != NULL) ||
487
(!is_mt && _resize_lock_owner == thread), "Re-size lock not held");
488
Node* ndel[BULK_DELETE_LIMIT];
489
InternalTable* table = get_table();
490
assert(start_idx < stop_idx, "Must be");
491
assert(stop_idx <= _table->_size, "Must be");
492
// Here manual do critical section since we don't want to take the cost of
493
// locking the bucket if there is nothing to delete. But we can have
494
// concurrent single deletes. The _invisible_epoch can only be used by the
495
// owner of _resize_lock, us here. There we should not changed it in our
496
// own read-side.
497
GlobalCounter::CSContext cs_context = GlobalCounter::critical_section_begin(thread);
498
for (size_t bucket_it = start_idx; bucket_it < stop_idx; bucket_it++) {
499
Bucket* bucket = table->get_bucket(bucket_it);
500
Bucket* prefetch_bucket = (bucket_it+1) < stop_idx ?
501
table->get_bucket(bucket_it+1) : NULL;
502
503
if (!HaveDeletables<IsPointer<VALUE>::value, EVALUATE_FUNC>::
504
have_deletable(bucket, eval_f, prefetch_bucket)) {
505
// Nothing to remove in this bucket.
506
continue;
507
}
508
509
GlobalCounter::critical_section_end(thread, cs_context);
510
// We left critical section but the bucket cannot be removed while we hold
511
// the _resize_lock.
512
bucket->lock();
513
size_t nd = delete_check_nodes(bucket, eval_f, BULK_DELETE_LIMIT, ndel);
514
bucket->unlock();
515
if (is_mt) {
516
GlobalCounter::write_synchronize();
517
} else {
518
write_synchonize_on_visible_epoch(thread);
519
}
520
for (size_t node_it = 0; node_it < nd; node_it++) {
521
del_f(ndel[node_it]->value());
522
Node::destroy_node(_context, ndel[node_it]);
523
JFR_ONLY(_stats_rate.remove();)
524
DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
525
}
526
cs_context = GlobalCounter::critical_section_begin(thread);
527
}
528
GlobalCounter::critical_section_end(thread, cs_context);
529
}
530
531
template <typename CONFIG, MEMFLAGS F>
532
template <typename LOOKUP_FUNC>
533
inline void ConcurrentHashTable<CONFIG, F>::
534
delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f)
535
{
536
assert(bucket->is_locked(), "Must be locked.");
537
538
size_t dels = 0;
539
Node* ndel[BULK_DELETE_LIMIT];
540
Node* const volatile * rem_n_prev = bucket->first_ptr();
541
Node* rem_n = bucket->first();
542
while (rem_n != NULL) {
543
bool is_dead = false;
544
lookup_f.equals(rem_n->value(), &is_dead);
545
if (is_dead) {
546
ndel[dels++] = rem_n;
547
Node* next_node = rem_n->next();
548
bucket->release_assign_node_ptr(rem_n_prev, next_node);
549
rem_n = next_node;
550
if (dels == BULK_DELETE_LIMIT) {
551
break;
552
}
553
} else {
554
rem_n_prev = rem_n->next_ptr();
555
rem_n = rem_n->next();
556
}
557
}
558
if (dels > 0) {
559
GlobalCounter::write_synchronize();
560
for (size_t node_it = 0; node_it < dels; node_it++) {
561
Node::destroy_node(_context, ndel[node_it]);
562
JFR_ONLY(_stats_rate.remove();)
563
DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
564
}
565
}
566
}
567
568
template <typename CONFIG, MEMFLAGS F>
569
inline typename ConcurrentHashTable<CONFIG, F>::Bucket*
570
ConcurrentHashTable<CONFIG, F>::
571
get_bucket(uintx hash) const
572
{
573
InternalTable* table = get_table();
574
Bucket* bucket = get_bucket_in(table, hash);
575
if (bucket->have_redirect()) {
576
table = get_new_table();
577
bucket = get_bucket_in(table, hash);
578
}
579
return bucket;
580
}
581
582
template <typename CONFIG, MEMFLAGS F>
583
inline typename ConcurrentHashTable<CONFIG, F>::Bucket*
584
ConcurrentHashTable<CONFIG, F>::
585
get_bucket_locked(Thread* thread, const uintx hash)
586
{
587
Bucket* bucket;
588
int i = 0;
589
// SpinYield would be unfair here
590
while(true) {
591
{
592
// We need a critical section to protect the table itself. But if we fail
593
// we must leave critical section otherwise we would deadlock.
594
ScopedCS cs(thread, this);
595
bucket = get_bucket(hash);
596
if (bucket->trylock()) {
597
break; /* ends critical section */
598
}
599
} /* ends critical section */
600
if ((++i) == SPINPAUSES_PER_YIELD) {
601
// On contemporary OS yielding will give CPU to another runnable thread if
602
// there is no CPU available.
603
os::naked_yield();
604
i = 0;
605
} else {
606
SpinPause();
607
}
608
}
609
return bucket;
610
}
611
612
// Always called within critical section
613
template <typename CONFIG, MEMFLAGS F>
614
template <typename LOOKUP_FUNC>
615
typename ConcurrentHashTable<CONFIG, F>::Node*
616
ConcurrentHashTable<CONFIG, F>::
617
get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f,
618
bool* have_dead, size_t* loops) const
619
{
620
size_t loop_count = 0;
621
Node* node = bucket->first();
622
while (node != NULL) {
623
bool is_dead = false;
624
++loop_count;
625
if (lookup_f.equals(node->value(), &is_dead)) {
626
break;
627
}
628
if (is_dead && !(*have_dead)) {
629
*have_dead = true;
630
}
631
node = node->next();
632
}
633
if (loops != NULL) {
634
*loops = loop_count;
635
}
636
return node;
637
}
638
639
template <typename CONFIG, MEMFLAGS F>
640
inline bool ConcurrentHashTable<CONFIG, F>::
641
unzip_bucket(Thread* thread, InternalTable* old_table,
642
InternalTable* new_table, size_t even_index, size_t odd_index)
643
{
644
Node* aux = old_table->get_bucket(even_index)->first();
645
if (aux == NULL) {
646
// This is an empty bucket and in debug we poison first ptr in bucket.
647
// Therefore we must make sure no readers are looking at this bucket.
648
// If we don't do a write_synch here, caller must do it.
649
return false;
650
}
651
Node* delete_me = NULL;
652
Node* const volatile * even = new_table->get_bucket(even_index)->first_ptr();
653
Node* const volatile * odd = new_table->get_bucket(odd_index)->first_ptr();
654
while (aux != NULL) {
655
bool dead_hash = false;
656
size_t aux_hash = CONFIG::get_hash(*aux->value(), &dead_hash);
657
Node* aux_next = aux->next();
658
if (dead_hash) {
659
delete_me = aux;
660
// This item is dead, move both list to next
661
new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,
662
aux_next);
663
new_table->get_bucket(even_index)->release_assign_node_ptr(even,
664
aux_next);
665
} else {
666
size_t aux_index = bucket_idx_hash(new_table, aux_hash);
667
if (aux_index == even_index) {
668
// This is a even, so move odd to aux/even next
669
new_table->get_bucket(odd_index)->release_assign_node_ptr(odd,
670
aux_next);
671
// Keep in even list
672
even = aux->next_ptr();
673
} else if (aux_index == odd_index) {
674
// This is a odd, so move odd to aux/odd next
675
new_table->get_bucket(even_index)->release_assign_node_ptr(even,
676
aux_next);
677
// Keep in odd list
678
odd = aux->next_ptr();
679
} else {
680
fatal("aux_index does not match even or odd indices");
681
}
682
}
683
aux = aux_next;
684
685
// We can only move 1 pointer otherwise a reader might be moved to the wrong
686
// chain. E.g. looking for even hash value but got moved to the odd bucket
687
// chain.
688
write_synchonize_on_visible_epoch(thread);
689
if (delete_me != NULL) {
690
Node::destroy_node(_context, delete_me);
691
delete_me = NULL;
692
}
693
}
694
return true;
695
}
696
697
template <typename CONFIG, MEMFLAGS F>
698
inline bool ConcurrentHashTable<CONFIG, F>::
699
internal_shrink_prolog(Thread* thread, size_t log2_size)
700
{
701
if (!try_resize_lock(thread)) {
702
return false;
703
}
704
assert(_resize_lock_owner == thread, "Re-size lock not held");
705
if (_table->_log2_size == _log2_start_size ||
706
_table->_log2_size <= log2_size) {
707
unlock_resize_lock(thread);
708
return false;
709
}
710
_new_table = new InternalTable(_table->_log2_size - 1);
711
return true;
712
}
713
714
template <typename CONFIG, MEMFLAGS F>
715
inline void ConcurrentHashTable<CONFIG, F>::
716
internal_shrink_epilog(Thread* thread)
717
{
718
assert(_resize_lock_owner == thread, "Re-size lock not held");
719
720
InternalTable* old_table = set_table_from_new();
721
_size_limit_reached = false;
722
unlock_resize_lock(thread);
723
#ifdef ASSERT
724
for (size_t i = 0; i < old_table->_size; i++) {
725
assert(old_table->get_bucket(i++)->first() == POISON_PTR,
726
"No poison found");
727
}
728
#endif
729
// ABA safe, old_table not visible to any other threads.
730
delete old_table;
731
}
732
733
template <typename CONFIG, MEMFLAGS F>
734
inline void ConcurrentHashTable<CONFIG, F>::
735
internal_shrink_range(Thread* thread, size_t start, size_t stop)
736
{
737
// The state is also copied here.
738
// Hence all buckets in new table will be locked.
739
for (size_t bucket_it = start; bucket_it < stop; bucket_it++) {
740
size_t even_hash_index = bucket_it; // High bit 0
741
size_t odd_hash_index = bucket_it + _new_table->_size; // High bit 1
742
743
Bucket* b_old_even = _table->get_bucket(even_hash_index);
744
Bucket* b_old_odd = _table->get_bucket(odd_hash_index);
745
746
b_old_even->lock();
747
b_old_odd->lock();
748
749
_new_table->get_buckets()[bucket_it] = *b_old_even;
750
751
// Put chains together.
752
_new_table->get_bucket(bucket_it)->
753
release_assign_last_node_next(*(b_old_odd->first_ptr()));
754
755
b_old_even->redirect();
756
b_old_odd->redirect();
757
758
write_synchonize_on_visible_epoch(thread);
759
760
// Unlock for writes into new smaller table.
761
_new_table->get_bucket(bucket_it)->unlock();
762
763
DEBUG_ONLY(b_old_even->release_assign_node_ptr(b_old_even->first_ptr(),
764
(Node*)POISON_PTR);)
765
DEBUG_ONLY(b_old_odd->release_assign_node_ptr(b_old_odd->first_ptr(),
766
(Node*)POISON_PTR);)
767
}
768
}
769
770
template <typename CONFIG, MEMFLAGS F>
771
inline bool ConcurrentHashTable<CONFIG, F>::
772
internal_shrink(Thread* thread, size_t log2_size)
773
{
774
if (!internal_shrink_prolog(thread, log2_size)) {
775
assert(_resize_lock_owner != thread, "Re-size lock held");
776
return false;
777
}
778
assert(_resize_lock_owner == thread, "Should be locked by me");
779
internal_shrink_range(thread, 0, _new_table->_size);
780
internal_shrink_epilog(thread);
781
assert(_resize_lock_owner != thread, "Re-size lock held");
782
return true;
783
}
784
785
template <typename CONFIG, MEMFLAGS F>
786
inline void ConcurrentHashTable<CONFIG, F>::
787
internal_reset(size_t log2_size)
788
{
789
assert(_table != NULL, "table failed");
790
assert(_log2_size_limit >= log2_size, "bad ergo");
791
792
delete _table;
793
// Create and publish a new table
794
InternalTable* table = new InternalTable(log2_size);
795
_size_limit_reached = (log2_size == _log2_size_limit);
796
Atomic::release_store(&_table, table);
797
}
798
799
template <typename CONFIG, MEMFLAGS F>
800
inline bool ConcurrentHashTable<CONFIG, F>::
801
internal_grow_prolog(Thread* thread, size_t log2_size)
802
{
803
// This double checking of _size_limit_reached/is_max_size_reached()
804
// we only do in grow path, since grow means high load on table
805
// while shrink means low load.
806
if (is_max_size_reached()) {
807
return false;
808
}
809
if (!try_resize_lock(thread)) {
810
// Either we have an ongoing resize or an operation which doesn't want us
811
// to resize now.
812
return false;
813
}
814
if (is_max_size_reached() || _table->_log2_size >= log2_size) {
815
unlock_resize_lock(thread);
816
return false;
817
}
818
819
_new_table = new InternalTable(_table->_log2_size + 1);
820
821
if (_new_table->_log2_size == _log2_size_limit) {
822
_size_limit_reached = true;
823
}
824
825
return true;
826
}
827
828
template <typename CONFIG, MEMFLAGS F>
829
inline void ConcurrentHashTable<CONFIG, F>::
830
internal_grow_epilog(Thread* thread)
831
{
832
assert(_resize_lock_owner == thread, "Should be locked");
833
834
InternalTable* old_table = set_table_from_new();
835
unlock_resize_lock(thread);
836
#ifdef ASSERT
837
for (size_t i = 0; i < old_table->_size; i++) {
838
assert(old_table->get_bucket(i++)->first() == POISON_PTR,
839
"No poison found");
840
}
841
#endif
842
// ABA safe, old_table not visible to any other threads.
843
delete old_table;
844
}
845
846
template <typename CONFIG, MEMFLAGS F>
847
inline bool ConcurrentHashTable<CONFIG, F>::
848
internal_grow(Thread* thread, size_t log2_size)
849
{
850
if (!internal_grow_prolog(thread, log2_size)) {
851
assert(_resize_lock_owner != thread, "Re-size lock held");
852
return false;
853
}
854
assert(_resize_lock_owner == thread, "Should be locked by me");
855
internal_grow_range(thread, 0, _table->_size);
856
internal_grow_epilog(thread);
857
assert(_resize_lock_owner != thread, "Re-size lock held");
858
return true;
859
}
860
861
// Always called within critical section
862
template <typename CONFIG, MEMFLAGS F>
863
template <typename LOOKUP_FUNC>
864
inline typename CONFIG::Value* ConcurrentHashTable<CONFIG, F>::
865
internal_get(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint)
866
{
867
bool clean = false;
868
size_t loops = 0;
869
VALUE* ret = NULL;
870
871
const Bucket* bucket = get_bucket(lookup_f.get_hash());
872
Node* node = get_node(bucket, lookup_f, &clean, &loops);
873
if (node != NULL) {
874
ret = node->value();
875
}
876
if (grow_hint != NULL) {
877
*grow_hint = loops > _grow_hint;
878
}
879
880
return ret;
881
}
882
883
template <typename CONFIG, MEMFLAGS F>
884
template <typename LOOKUP_FUNC, typename FOUND_FUNC>
885
inline bool ConcurrentHashTable<CONFIG, F>::
886
internal_insert_get(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value,
887
FOUND_FUNC& foundf, bool* grow_hint, bool* clean_hint)
888
{
889
bool ret = false;
890
bool clean = false;
891
bool locked;
892
size_t loops = 0;
893
size_t i = 0;
894
uintx hash = lookup_f.get_hash();
895
Node* new_node = Node::create_node(_context, value, NULL);
896
897
while (true) {
898
{
899
ScopedCS cs(thread, this); /* protected the table/bucket */
900
Bucket* bucket = get_bucket(hash);
901
Node* first_at_start = bucket->first();
902
Node* old = get_node(bucket, lookup_f, &clean, &loops);
903
if (old == NULL) {
904
new_node->set_next(first_at_start);
905
if (bucket->cas_first(new_node, first_at_start)) {
906
foundf(new_node->value());
907
JFR_ONLY(_stats_rate.add();)
908
new_node = NULL;
909
ret = true;
910
break; /* leave critical section */
911
}
912
// CAS failed we must leave critical section and retry.
913
locked = bucket->is_locked();
914
} else {
915
// There is a duplicate.
916
foundf(old->value());
917
break; /* leave critical section */
918
}
919
} /* leave critical section */
920
i++;
921
if (locked) {
922
os::naked_yield();
923
} else {
924
SpinPause();
925
}
926
}
927
928
if (new_node != NULL) {
929
// CAS failed and a duplicate was inserted, we must free this node.
930
Node::destroy_node(_context, new_node);
931
} else if (i == 0 && clean) {
932
// We only do cleaning on fast inserts.
933
Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
934
delete_in_bucket(thread, bucket, lookup_f);
935
bucket->unlock();
936
clean = false;
937
}
938
939
if (grow_hint != NULL) {
940
*grow_hint = loops > _grow_hint;
941
}
942
943
if (clean_hint != NULL) {
944
*clean_hint = clean;
945
}
946
947
return ret;
948
}
949
950
template <typename CONFIG, MEMFLAGS F>
951
template <typename FUNC>
952
inline bool ConcurrentHashTable<CONFIG, F>::
953
visit_nodes(Bucket* bucket, FUNC& visitor_f)
954
{
955
Node* current_node = bucket->first();
956
while (current_node != NULL) {
957
if (!visitor_f(current_node->value())) {
958
return false;
959
}
960
current_node = current_node->next();
961
}
962
return true;
963
}
964
965
template <typename CONFIG, MEMFLAGS F>
966
template <typename FUNC>
967
inline void ConcurrentHashTable<CONFIG, F>::
968
do_scan_locked(Thread* thread, FUNC& scan_f)
969
{
970
assert(_resize_lock_owner == thread, "Re-size lock not held");
971
// We can do a critical section over the entire loop but that would block
972
// updates for a long time. Instead we choose to block resizes.
973
InternalTable* table = get_table();
974
for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
975
ScopedCS cs(thread, this);
976
if (!visit_nodes(table->get_bucket(bucket_it), scan_f)) {
977
break; /* ends critical section */
978
}
979
} /* ends critical section */
980
}
981
982
template <typename CONFIG, MEMFLAGS F>
983
template <typename EVALUATE_FUNC>
984
inline size_t ConcurrentHashTable<CONFIG, F>::
985
delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f,
986
size_t num_del, Node** ndel)
987
{
988
size_t dels = 0;
989
Node* const volatile * rem_n_prev = bucket->first_ptr();
990
Node* rem_n = bucket->first();
991
while (rem_n != NULL) {
992
if (eval_f(rem_n->value())) {
993
ndel[dels++] = rem_n;
994
Node* next_node = rem_n->next();
995
bucket->release_assign_node_ptr(rem_n_prev, next_node);
996
rem_n = next_node;
997
if (dels == num_del) {
998
break;
999
}
1000
} else {
1001
rem_n_prev = rem_n->next_ptr();
1002
rem_n = rem_n->next();
1003
}
1004
}
1005
return dels;
1006
}
1007
1008
// Constructor
1009
template <typename CONFIG, MEMFLAGS F>
1010
inline ConcurrentHashTable<CONFIG, F>::
1011
ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint, void* context)
1012
: _context(context), _new_table(NULL), _log2_size_limit(log2size_limit),
1013
_log2_start_size(log2size), _grow_hint(grow_hint),
1014
_size_limit_reached(false), _resize_lock_owner(NULL),
1015
_invisible_epoch(0)
1016
{
1017
_stats_rate = TableRateStatistics();
1018
_resize_lock =
1019
new Mutex(Mutex::leaf, "ConcurrentHashTable", true,
1020
Mutex::_safepoint_check_never);
1021
_table = new InternalTable(log2size);
1022
assert(log2size_limit >= log2size, "bad ergo");
1023
_size_limit_reached = _table->_log2_size == _log2_size_limit;
1024
}
1025
1026
template <typename CONFIG, MEMFLAGS F>
1027
inline ConcurrentHashTable<CONFIG, F>::
1028
~ConcurrentHashTable()
1029
{
1030
delete _resize_lock;
1031
free_nodes();
1032
delete _table;
1033
}
1034
1035
template <typename CONFIG, MEMFLAGS F>
1036
inline size_t ConcurrentHashTable<CONFIG, F>::
1037
get_size_log2(Thread* thread)
1038
{
1039
ScopedCS cs(thread, this);
1040
return _table->_log2_size;
1041
}
1042
1043
template <typename CONFIG, MEMFLAGS F>
1044
inline bool ConcurrentHashTable<CONFIG, F>::
1045
shrink(Thread* thread, size_t size_limit_log2)
1046
{
1047
size_t tmp = size_limit_log2 == 0 ? _log2_start_size : size_limit_log2;
1048
bool ret = internal_shrink(thread, tmp);
1049
return ret;
1050
}
1051
1052
template <typename CONFIG, MEMFLAGS F>
1053
inline void ConcurrentHashTable<CONFIG, F>::
1054
unsafe_reset(size_t size_log2)
1055
{
1056
size_t tmp = size_log2 == 0 ? _log2_start_size : size_log2;
1057
internal_reset(tmp);
1058
}
1059
1060
template <typename CONFIG, MEMFLAGS F>
1061
inline bool ConcurrentHashTable<CONFIG, F>::
1062
grow(Thread* thread, size_t size_limit_log2)
1063
{
1064
size_t tmp = size_limit_log2 == 0 ? _log2_size_limit : size_limit_log2;
1065
return internal_grow(thread, tmp);
1066
}
1067
1068
template <typename CONFIG, MEMFLAGS F>
1069
template <typename LOOKUP_FUNC, typename FOUND_FUNC>
1070
inline bool ConcurrentHashTable<CONFIG, F>::
1071
get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& found_f, bool* grow_hint)
1072
{
1073
bool ret = false;
1074
ScopedCS cs(thread, this);
1075
VALUE* val = internal_get(thread, lookup_f, grow_hint);
1076
if (val != NULL) {
1077
found_f(val);
1078
ret = true;
1079
}
1080
return ret;
1081
}
1082
1083
template <typename CONFIG, MEMFLAGS F>
1084
inline bool ConcurrentHashTable<CONFIG, F>::
1085
unsafe_insert(const VALUE& value) {
1086
bool dead_hash = false;
1087
size_t hash = CONFIG::get_hash(value, &dead_hash);
1088
if (dead_hash) {
1089
return false;
1090
}
1091
// This is an unsafe operation.
1092
InternalTable* table = get_table();
1093
Bucket* bucket = get_bucket_in(table, hash);
1094
assert(!bucket->have_redirect() && !bucket->is_locked(), "bad");
1095
Node* new_node = Node::create_node(_context, value, bucket->first());
1096
if (!bucket->cas_first(new_node, bucket->first())) {
1097
assert(false, "bad");
1098
}
1099
JFR_ONLY(_stats_rate.add();)
1100
return true;
1101
}
1102
1103
template <typename CONFIG, MEMFLAGS F>
1104
template <typename SCAN_FUNC>
1105
inline bool ConcurrentHashTable<CONFIG, F>::
1106
try_scan(Thread* thread, SCAN_FUNC& scan_f)
1107
{
1108
if (!try_resize_lock(thread)) {
1109
return false;
1110
}
1111
do_scan_locked(thread, scan_f);
1112
unlock_resize_lock(thread);
1113
return true;
1114
}
1115
1116
template <typename CONFIG, MEMFLAGS F>
1117
template <typename SCAN_FUNC>
1118
inline void ConcurrentHashTable<CONFIG, F>::
1119
do_scan(Thread* thread, SCAN_FUNC& scan_f)
1120
{
1121
assert(!SafepointSynchronize::is_at_safepoint(),
1122
"must be outside a safepoint");
1123
assert(_resize_lock_owner != thread, "Re-size lock held");
1124
lock_resize_lock(thread);
1125
do_scan_locked(thread, scan_f);
1126
unlock_resize_lock(thread);
1127
assert(_resize_lock_owner != thread, "Re-size lock held");
1128
}
1129
1130
template <typename CONFIG, MEMFLAGS F>
1131
template <typename SCAN_FUNC>
1132
inline void ConcurrentHashTable<CONFIG, F>::
1133
do_safepoint_scan(SCAN_FUNC& scan_f)
1134
{
1135
// We only allow this method to be used during a safepoint.
1136
assert(SafepointSynchronize::is_at_safepoint(),
1137
"must only be called in a safepoint");
1138
assert(Thread::current()->is_VM_thread(),
1139
"should be in vm thread");
1140
1141
// Here we skip protection,
1142
// thus no other thread may use this table at the same time.
1143
InternalTable* table = get_table();
1144
for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
1145
Bucket* bucket = table->get_bucket(bucket_it);
1146
// If bucket have a redirect the items will be in the new table.
1147
// We must visit them there since the new table will contain any
1148
// concurrent inserts done after this bucket was resized.
1149
// If the bucket don't have redirect flag all items is in this table.
1150
if (!bucket->have_redirect()) {
1151
if(!visit_nodes(bucket, scan_f)) {
1152
return;
1153
}
1154
} else {
1155
assert(bucket->is_locked(), "Bucket must be locked.");
1156
}
1157
}
1158
// If there is a paused resize we also need to visit the already resized items.
1159
table = get_new_table();
1160
if (table == NULL) {
1161
return;
1162
}
1163
DEBUG_ONLY(if (table == POISON_PTR) { return; })
1164
for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
1165
Bucket* bucket = table->get_bucket(bucket_it);
1166
assert(!bucket->is_locked(), "Bucket must be unlocked.");
1167
if (!visit_nodes(bucket, scan_f)) {
1168
return;
1169
}
1170
}
1171
}
1172
1173
template <typename CONFIG, MEMFLAGS F>
1174
template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1175
inline bool ConcurrentHashTable<CONFIG, F>::
1176
try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1177
{
1178
if (!try_resize_lock(thread)) {
1179
return false;
1180
}
1181
do_bulk_delete_locked(thread, eval_f, del_f);
1182
unlock_resize_lock(thread);
1183
assert(_resize_lock_owner != thread, "Re-size lock held");
1184
return true;
1185
}
1186
1187
template <typename CONFIG, MEMFLAGS F>
1188
template <typename EVALUATE_FUNC, typename DELETE_FUNC>
1189
inline void ConcurrentHashTable<CONFIG, F>::
1190
bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f)
1191
{
1192
assert(!SafepointSynchronize::is_at_safepoint(),
1193
"must be outside a safepoint");
1194
lock_resize_lock(thread);
1195
do_bulk_delete_locked(thread, eval_f, del_f);
1196
unlock_resize_lock(thread);
1197
}
1198
1199
template <typename CONFIG, MEMFLAGS F>
1200
template <typename VALUE_SIZE_FUNC>
1201
inline TableStatistics ConcurrentHashTable<CONFIG, F>::
1202
statistics_calculate(Thread* thread, VALUE_SIZE_FUNC& vs_f)
1203
{
1204
NumberSeq summary;
1205
size_t literal_bytes = 0;
1206
InternalTable* table = get_table();
1207
for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
1208
ScopedCS cs(thread, this);
1209
size_t count = 0;
1210
Bucket* bucket = table->get_bucket(bucket_it);
1211
if (bucket->have_redirect() || bucket->is_locked()) {
1212
continue;
1213
}
1214
Node* current_node = bucket->first();
1215
while (current_node != NULL) {
1216
++count;
1217
literal_bytes += vs_f(current_node->value());
1218
current_node = current_node->next();
1219
}
1220
summary.add((double)count);
1221
}
1222
1223
return TableStatistics(_stats_rate, summary, literal_bytes, sizeof(Bucket), sizeof(Node));
1224
}
1225
1226
template <typename CONFIG, MEMFLAGS F>
1227
template <typename VALUE_SIZE_FUNC>
1228
inline TableStatistics ConcurrentHashTable<CONFIG, F>::
1229
statistics_get(Thread* thread, VALUE_SIZE_FUNC& vs_f, TableStatistics old)
1230
{
1231
if (!try_resize_lock(thread)) {
1232
return old;
1233
}
1234
1235
TableStatistics ts = statistics_calculate(thread, vs_f);
1236
unlock_resize_lock(thread);
1237
1238
return ts;
1239
}
1240
1241
template <typename CONFIG, MEMFLAGS F>
1242
template <typename VALUE_SIZE_FUNC>
1243
inline void ConcurrentHashTable<CONFIG, F>::
1244
statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f,
1245
outputStream* st, const char* table_name)
1246
{
1247
if (!try_resize_lock(thread)) {
1248
st->print_cr("statistics unavailable at this moment");
1249
return;
1250
}
1251
1252
TableStatistics ts = statistics_calculate(thread, vs_f);
1253
unlock_resize_lock(thread);
1254
1255
ts.print(st, table_name);
1256
}
1257
1258
template <typename CONFIG, MEMFLAGS F>
1259
inline bool ConcurrentHashTable<CONFIG, F>::
1260
try_move_nodes_to(Thread* thread, ConcurrentHashTable<CONFIG, F>* to_cht)
1261
{
1262
if (!try_resize_lock(thread)) {
1263
return false;
1264
}
1265
assert(_new_table == NULL || _new_table == POISON_PTR, "Must be NULL");
1266
for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) {
1267
Bucket* bucket = _table->get_bucket(bucket_it);
1268
assert(!bucket->have_redirect() && !bucket->is_locked(), "Table must be uncontended");
1269
while (bucket->first() != NULL) {
1270
Node* move_node = bucket->first();
1271
bool ok = bucket->cas_first(move_node->next(), move_node);
1272
assert(ok, "Uncontended cas must work");
1273
bool dead_hash = false;
1274
size_t insert_hash = CONFIG::get_hash(*move_node->value(), &dead_hash);
1275
if (!dead_hash) {
1276
Bucket* insert_bucket = to_cht->get_bucket(insert_hash);
1277
assert(!bucket->have_redirect() && !bucket->is_locked(), "Not bit should be present");
1278
move_node->set_next(insert_bucket->first());
1279
ok = insert_bucket->cas_first(move_node, insert_bucket->first());
1280
assert(ok, "Uncontended cas must work");
1281
}
1282
}
1283
}
1284
unlock_resize_lock(thread);
1285
return true;
1286
}
1287
1288
#endif // SHARE_UTILITIES_CONCURRENTHASHTABLE_INLINE_HPP
1289
1290