Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/utilities/concurrentHashTable.hpp
40949 views
1
/*
2
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_UTILITIES_CONCURRENTHASHTABLE_HPP
26
#define SHARE_UTILITIES_CONCURRENTHASHTABLE_HPP
27
28
#include "memory/allocation.hpp"
29
#include "utilities/globalCounter.hpp"
30
#include "utilities/globalDefinitions.hpp"
31
#include "utilities/tableStatistics.hpp"
32
33
// A mostly concurrent-hash-table where the read-side is wait-free, inserts are
34
// CAS and deletes mutual exclude each other on per bucket-basis. VALUE is the
35
// type kept inside each Node and CONFIG contains hash and allocation methods.
36
// A CALLBACK_FUNC and LOOKUP_FUNC needs to be provided for get and insert.
37
38
class Thread;
39
class Mutex;
40
41
template <typename CONFIG, MEMFLAGS F>
42
class ConcurrentHashTable : public CHeapObj<F> {
43
typedef typename CONFIG::Value VALUE;
44
private:
45
// This is the internal node structure.
46
// Only constructed with placement new from memory allocated with MEMFLAGS of
47
// the InternalTable or user-defined memory.
48
class Node {
49
private:
50
Node * volatile _next;
51
VALUE _value;
52
public:
53
Node(const VALUE& value, Node* next = NULL)
54
: _next(next), _value(value) {
55
assert((((uintptr_t)this) & ((uintptr_t)0x3)) == 0,
56
"Must 16 bit aligned.");
57
}
58
59
Node* next() const;
60
void set_next(Node* node) { _next = node; }
61
Node* const volatile * next_ptr() { return &_next; }
62
63
VALUE* value() { return &_value; }
64
65
// Creates a node.
66
static Node* create_node(void* context, const VALUE& value, Node* next = NULL) {
67
return new (CONFIG::allocate_node(context, sizeof(Node), value)) Node(value, next);
68
}
69
// Destroys a node.
70
static void destroy_node(void* context, Node* node) {
71
CONFIG::free_node(context, (void*)node, node->_value);
72
}
73
74
void print_on(outputStream* st) const {};
75
void print_value_on(outputStream* st) const {};
76
};
77
78
// Only constructed with placement new from an array allocated with MEMFLAGS
79
// of InternalTable.
80
class Bucket {
81
private:
82
83
// Embedded state in two low bits in first pointer is a spinlock with 3
84
// states, unlocked, locked, redirect. You must never busy-spin on trylock()
85
// or call lock() without _resize_lock, that would deadlock. Redirect can
86
// only be installed by owner and is the final state of a bucket.
87
// The only two valid flows are:
88
// unlocked -> locked -> unlocked
89
// unlocked -> locked -> redirect
90
// Locked state only applies to an updater.
91
// Reader only check for redirect.
92
Node * volatile _first;
93
94
static const uintptr_t STATE_LOCK_BIT = 0x1;
95
static const uintptr_t STATE_REDIRECT_BIT = 0x2;
96
static const uintptr_t STATE_MASK = 0x3;
97
98
// Get the first pointer unmasked.
99
Node* first_raw() const;
100
101
// Methods to manipulate the embedded.
102
static bool is_state(Node* node, uintptr_t bits) {
103
return (bits & (uintptr_t)node) == bits;
104
}
105
106
static Node* set_state(Node* n, uintptr_t bits) {
107
return (Node*)(bits | (uintptr_t)n);
108
}
109
110
static uintptr_t get_state(Node* node) {
111
return (((uintptr_t)node) & STATE_MASK);
112
}
113
114
static Node* clear_state(Node* node) {
115
return (Node*)(((uintptr_t)node) & (~(STATE_MASK)));
116
}
117
118
static Node* clear_set_state(Node* node, Node* state) {
119
return (Node*)(((uintptr_t)clear_state(node)) ^ get_state(state));
120
}
121
122
public:
123
// A bucket is only one pointer with the embedded state.
124
Bucket() : _first(NULL) {};
125
126
// Get the first pointer unmasked.
127
Node* first() const;
128
129
// Get a pointer to the const first pointer. Do not deference this
130
// pointer, the pointer pointed to _may_ contain an embedded state. Such
131
// pointer should only be used as input to release_assign_node_ptr.
132
Node* const volatile * first_ptr() { return &_first; }
133
134
// This is the only place where a pointer to a Node pointer that potentially
135
// is _first should be changed. Otherwise we destroy the embedded state. We
136
// only give out pointer to const Node pointer to avoid accidental
137
// assignment, thus here we must cast const part away. Method is not static
138
// due to an assert.
139
void release_assign_node_ptr(Node* const volatile * dst, Node* node) const;
140
141
// This method assigns this buckets last Node next ptr to input Node.
142
void release_assign_last_node_next(Node* node);
143
144
// Setting the first pointer must be done with CAS.
145
bool cas_first(Node *node, Node* expect);
146
147
// Returns true if this bucket is redirecting to a new table.
148
// Redirect is a terminal state and will never change.
149
bool have_redirect() const;
150
151
// Return true if this bucket is locked for updates.
152
bool is_locked() const;
153
154
// Return true if this bucket was locked.
155
bool trylock();
156
157
// The bucket might be invalid, due to a concurrent resize. The lock()
158
// method do no respect that and can deadlock if caller do not hold
159
// _resize_lock.
160
void lock();
161
162
// Unlocks this bucket.
163
void unlock();
164
165
// Installs redirect in this bucket.
166
// Prior to doing so you must have successfully locked this bucket.
167
void redirect();
168
};
169
170
// The backing storage table holding the buckets and it's size and mask-bits.
171
// Table is always a power of two for two reasons:
172
// - Re-size can only change the size into half or double
173
// (any pow 2 would also be possible).
174
// - Use masking of hash for bucket index.
175
class InternalTable : public CHeapObj<F> {
176
private:
177
Bucket* _buckets; // Bucket array.
178
public:
179
const size_t _log2_size; // Size in log2.
180
const size_t _size; // Size in log10.
181
182
// The mask used on hash for selecting bucket.
183
// The masked value is guaranteed be to inside the buckets array.
184
const size_t _hash_mask;
185
186
// Create a backing table
187
InternalTable(size_t log2_size);
188
~InternalTable();
189
190
Bucket* get_buckets() { return _buckets; }
191
Bucket* get_bucket(size_t idx) { return &_buckets[idx]; }
192
};
193
194
// For materializing a supplied value.
195
class LazyValueRetrieve {
196
private:
197
const VALUE& _val;
198
public:
199
LazyValueRetrieve(const VALUE& val) : _val(val) {}
200
const VALUE& operator()() { return _val; }
201
};
202
203
void* _context;
204
205
InternalTable* _table; // Active table.
206
InternalTable* _new_table; // Table we are resizing to.
207
208
// Default sizes
209
static const size_t DEFAULT_MAX_SIZE_LOG2 = 21;
210
static const size_t DEFAULT_START_SIZE_LOG2 = 13;
211
static const size_t DEFAULT_GROW_HINT = 4; // Chain length
212
213
const size_t _log2_size_limit; // The biggest size.
214
const size_t _log2_start_size; // Start size.
215
const size_t _grow_hint; // Number of linked items
216
217
volatile bool _size_limit_reached;
218
219
// We serialize resizers and other bulk operations which do not support
220
// concurrent resize with this lock.
221
Mutex* _resize_lock;
222
// Since we need to drop mutex for safepoints, but stop other threads from
223
// taking the mutex after a safepoint this bool is the actual state. After
224
// acquiring the mutex you must check if this is already locked. If so you
225
// must drop the mutex until the real lock holder grabs the mutex.
226
volatile Thread* _resize_lock_owner;
227
228
// Return true if lock mutex/state succeeded.
229
bool try_resize_lock(Thread* locker);
230
// Returns when both mutex and state are proper locked.
231
void lock_resize_lock(Thread* locker);
232
// Unlocks mutex and state.
233
void unlock_resize_lock(Thread* locker);
234
235
// This method sets the _invisible_epoch and do a write_synchronize.
236
// Subsequent calls check the state of _invisible_epoch and determine if the
237
// write_synchronize can be avoided. If not, it sets the _invisible_epoch
238
// again and do a write_synchronize.
239
void write_synchonize_on_visible_epoch(Thread* thread);
240
// To be-able to avoid write_synchronize in resize and other bulk operation,
241
// this field keep tracks if a version of the hash-table was ever been seen.
242
// We the working thread pointer as tag for debugging. The _invisible_epoch
243
// can only be used by the owner of _resize_lock.
244
volatile Thread* _invisible_epoch;
245
246
// Scoped critical section, which also handles the invisible epochs.
247
// An invisible epoch/version do not need a write_synchronize().
248
class ScopedCS: public StackObj {
249
protected:
250
Thread* _thread;
251
ConcurrentHashTable<CONFIG, F>* _cht;
252
GlobalCounter::CSContext _cs_context;
253
public:
254
ScopedCS(Thread* thread, ConcurrentHashTable<CONFIG, F>* cht);
255
~ScopedCS();
256
};
257
258
259
// Max number of deletes in one bucket chain during bulk delete.
260
static const size_t BULK_DELETE_LIMIT = 256;
261
262
// Simple getters and setters for the internal table.
263
InternalTable* get_table() const;
264
InternalTable* get_new_table() const;
265
InternalTable* set_table_from_new();
266
267
// Destroys all nodes.
268
void free_nodes();
269
270
// Mask away high bits of hash.
271
static size_t bucket_idx_hash(InternalTable* table, const uintx hash) {
272
return ((size_t)hash) & table->_hash_mask;
273
}
274
275
// Returns bucket for hash for that internal table.
276
Bucket* get_bucket_in(InternalTable* table, const uintx hash) const {
277
size_t bucket_index = bucket_idx_hash(table, hash);
278
return table->get_bucket(bucket_index);
279
}
280
281
// Return correct bucket for reading and handles resizing.
282
Bucket* get_bucket(const uintx hash) const;
283
284
// Return correct bucket for updates and handles resizing.
285
Bucket* get_bucket_locked(Thread* thread, const uintx hash);
286
287
// Finds a node.
288
template <typename LOOKUP_FUNC>
289
Node* get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f,
290
bool* have_dead, size_t* loops = NULL) const;
291
292
// Method for shrinking.
293
bool internal_shrink_prolog(Thread* thread, size_t log2_size);
294
void internal_shrink_epilog(Thread* thread);
295
void internal_shrink_range(Thread* thread, size_t start, size_t stop);
296
bool internal_shrink(Thread* thread, size_t size_limit_log2);
297
void internal_reset(size_t log2_size);
298
299
// Methods for growing.
300
bool unzip_bucket(Thread* thread, InternalTable* old_table,
301
InternalTable* new_table, size_t even_index,
302
size_t odd_index);
303
bool internal_grow_prolog(Thread* thread, size_t log2_size);
304
void internal_grow_epilog(Thread* thread);
305
void internal_grow_range(Thread* thread, size_t start, size_t stop);
306
bool internal_grow(Thread* thread, size_t log2_size);
307
308
// Get a value.
309
template <typename LOOKUP_FUNC>
310
VALUE* internal_get(Thread* thread, LOOKUP_FUNC& lookup_f,
311
bool* grow_hint = NULL);
312
313
// Insert and get current value.
314
template <typename LOOKUP_FUNC, typename FOUND_FUNC>
315
bool internal_insert_get(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value,
316
FOUND_FUNC& foundf, bool* grow_hint, bool* clean_hint);
317
318
// Returns true if an item matching LOOKUP_FUNC is removed.
319
// Calls DELETE_FUNC before destroying the node.
320
template <typename LOOKUP_FUNC, typename DELETE_FUNC>
321
bool internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f,
322
DELETE_FUNC& delete_f);
323
324
// Visits nodes with FUNC.
325
template <typename FUNC>
326
static bool visit_nodes(Bucket* bucket, FUNC& visitor_f);
327
328
// During shrink/grow we cannot guarantee that we only visit nodes once, with
329
// current algorithm. To keep it simple caller will have locked
330
// _resize_lock.
331
template <typename FUNC>
332
void do_scan_locked(Thread* thread, FUNC& scan_f);
333
334
// Check for dead items in a bucket.
335
template <typename EVALUATE_FUNC>
336
size_t delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f,
337
size_t num_del, Node** ndel);
338
339
// Check for dead items in this table. During shrink/grow we cannot guarantee
340
// that we only visit nodes once. To keep it simple caller will have locked
341
// _resize_lock.
342
template <typename EVALUATE_FUNC, typename DELETE_FUNC>
343
void do_bulk_delete_locked(Thread* thread, EVALUATE_FUNC& eval_f
344
, DELETE_FUNC& del_f) {
345
do_bulk_delete_locked_for(thread, 0, _table->_size, eval_f, del_f);
346
}
347
348
// To have prefetching for a VALUE that is pointer during
349
// do_bulk_delete_locked, we have this helper classes. One for non-pointer
350
// case without prefect and one for pointer with prefect.
351
template <bool b, typename EVALUATE_FUNC>
352
struct HaveDeletables {
353
static bool have_deletable(Bucket* bucket, EVALUATE_FUNC& eval_f,
354
Bucket* prefetch_bucket);
355
};
356
template<typename EVALUATE_FUNC>
357
struct HaveDeletables<true, EVALUATE_FUNC> {
358
static bool have_deletable(Bucket* bucket, EVALUATE_FUNC& eval_f,
359
Bucket* prefetch_bucket);
360
};
361
362
// Check for dead items in this table with range. During shrink/grow we cannot
363
// guarantee that we only visit nodes once. To keep it simple caller will
364
// have locked _resize_lock.
365
template <typename EVALUATE_FUNC, typename DELETE_FUNC>
366
void do_bulk_delete_locked_for(Thread* thread, size_t start_idx,
367
size_t stop_idx, EVALUATE_FUNC& eval_f,
368
DELETE_FUNC& del_f, bool is_mt = false);
369
370
// Method to delete one items.
371
template <typename LOOKUP_FUNC>
372
void delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f);
373
374
public:
375
ConcurrentHashTable(size_t log2size = DEFAULT_START_SIZE_LOG2,
376
size_t log2size_limit = DEFAULT_MAX_SIZE_LOG2,
377
size_t grow_hint = DEFAULT_GROW_HINT,
378
void* context = NULL);
379
380
explicit ConcurrentHashTable(void* context, size_t log2size = DEFAULT_START_SIZE_LOG2) :
381
ConcurrentHashTable(log2size, DEFAULT_MAX_SIZE_LOG2, DEFAULT_GROW_HINT, context) {}
382
383
~ConcurrentHashTable();
384
385
TableRateStatistics _stats_rate;
386
387
size_t get_size_log2(Thread* thread);
388
size_t get_node_size() const { return sizeof(Node); }
389
bool is_max_size_reached() { return _size_limit_reached; }
390
391
// This means no paused bucket resize operation is going to resume
392
// on this table.
393
bool is_safepoint_safe() { return _resize_lock_owner == NULL; }
394
395
// Re-size operations.
396
bool shrink(Thread* thread, size_t size_limit_log2 = 0);
397
bool grow(Thread* thread, size_t size_limit_log2 = 0);
398
// Unsafe reset and resize the table. This method assumes that we
399
// want to clear and maybe resize the internal table without the
400
// overhead of clearing individual items in the table.
401
void unsafe_reset(size_t size_log2 = 0);
402
403
// All callbacks for get are under critical sections. Other callbacks may be
404
// under critical section or may have locked parts of table. Calling any
405
// methods on the table during a callback is not supported.Only MultiGetHandle
406
// supports multiple gets.
407
408
// Get methods return true on found item with LOOKUP_FUNC and FOUND_FUNC is
409
// called.
410
template <typename LOOKUP_FUNC, typename FOUND_FUNC>
411
bool get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& foundf,
412
bool* grow_hint = NULL);
413
414
// Returns true true if the item was inserted, duplicates are found with
415
// LOOKUP_FUNC.
416
template <typename LOOKUP_FUNC>
417
bool insert(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value,
418
bool* grow_hint = NULL, bool* clean_hint = NULL) {
419
struct NOP {
420
void operator()(...) const {}
421
} nop;
422
return internal_insert_get(thread, lookup_f, value, nop, grow_hint, clean_hint);
423
}
424
425
// Returns true if the item was inserted, duplicates are found with
426
// LOOKUP_FUNC then FOUND_FUNC is called.
427
template <typename LOOKUP_FUNC, typename FOUND_FUNC>
428
bool insert_get(Thread* thread, LOOKUP_FUNC& lookup_f, VALUE& value, FOUND_FUNC& foundf,
429
bool* grow_hint = NULL, bool* clean_hint = NULL) {
430
return internal_insert_get(thread, lookup_f, value, foundf, grow_hint, clean_hint);
431
}
432
433
// This does a fast unsafe insert and can thus only be used when there is no
434
// risk for a duplicates and no other threads uses this table.
435
bool unsafe_insert(const VALUE& value);
436
437
// Returns true if items was deleted matching LOOKUP_FUNC and
438
// prior to destruction DELETE_FUNC is called.
439
template <typename LOOKUP_FUNC, typename DELETE_FUNC>
440
bool remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& del_f) {
441
return internal_remove(thread, lookup_f, del_f);
442
}
443
444
// Same without DELETE_FUNC.
445
template <typename LOOKUP_FUNC>
446
bool remove(Thread* thread, LOOKUP_FUNC& lookup_f) {
447
struct {
448
void operator()(VALUE*) {}
449
} ignore_del_f;
450
return internal_remove(thread, lookup_f, ignore_del_f);
451
}
452
453
// Visit all items with SCAN_FUNC if no concurrent resize. Takes the resize
454
// lock to avoid concurrent resizes. Else returns false.
455
template <typename SCAN_FUNC>
456
bool try_scan(Thread* thread, SCAN_FUNC& scan_f);
457
458
// Visit all items with SCAN_FUNC when the resize lock is obtained.
459
template <typename SCAN_FUNC>
460
void do_scan(Thread* thread, SCAN_FUNC& scan_f);
461
462
// Visit all items with SCAN_FUNC without any protection.
463
// It will assume there is no other thread accessing this
464
// table during the safepoint. Must be called with VM thread.
465
template <typename SCAN_FUNC>
466
void do_safepoint_scan(SCAN_FUNC& scan_f);
467
468
// Destroying items matching EVALUATE_FUNC, before destroying items
469
// DELETE_FUNC is called, if resize lock is obtained. Else returns false.
470
template <typename EVALUATE_FUNC, typename DELETE_FUNC>
471
bool try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f,
472
DELETE_FUNC& del_f);
473
474
// Destroying items matching EVALUATE_FUNC, before destroying items
475
// DELETE_FUNC is called, when the resize lock is successfully obtained.
476
template <typename EVALUATE_FUNC, typename DELETE_FUNC>
477
void bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f);
478
479
// Calcuate statistics. Item sizes are calculated with VALUE_SIZE_FUNC.
480
template <typename VALUE_SIZE_FUNC>
481
TableStatistics statistics_calculate(Thread* thread, VALUE_SIZE_FUNC& vs_f);
482
483
// Gets statistics if available, if not return old one. Item sizes are calculated with
484
// VALUE_SIZE_FUNC.
485
template <typename VALUE_SIZE_FUNC>
486
TableStatistics statistics_get(Thread* thread, VALUE_SIZE_FUNC& vs_f, TableStatistics old);
487
488
// Writes statistics to the outputStream. Item sizes are calculated with
489
// VALUE_SIZE_FUNC.
490
template <typename VALUE_SIZE_FUNC>
491
void statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f, outputStream* st,
492
const char* table_name);
493
494
// Moves all nodes from this table to to_cht
495
bool try_move_nodes_to(Thread* thread, ConcurrentHashTable<CONFIG, F>* to_cht);
496
497
// Scoped multi getter.
498
class MultiGetHandle : private ScopedCS {
499
public:
500
MultiGetHandle(Thread* thread, ConcurrentHashTable<CONFIG, F>* cht)
501
: ScopedCS(thread, cht) {}
502
// In the MultiGetHandle scope you can lookup items matching LOOKUP_FUNC.
503
// The VALUEs are safe as long as you never save the VALUEs outside the
504
// scope, e.g. after ~MultiGetHandle().
505
template <typename LOOKUP_FUNC>
506
VALUE* get(LOOKUP_FUNC& lookup_f, bool* grow_hint = NULL);
507
};
508
509
private:
510
class BucketsOperation;
511
512
public:
513
class BulkDeleteTask;
514
class GrowTask;
515
};
516
517
#endif // SHARE_UTILITIES_CONCURRENTHASHTABLE_HPP
518
519