Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/thirdparty/jolt_physics/Jolt/Core/HashTable.h
21362 views
1
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics)
2
// SPDX-FileCopyrightText: 2024 Jorrit Rouwe
3
// SPDX-License-Identifier: MIT
4
5
#pragma once
6
7
#include <Jolt/Math/BVec16.h>
8
9
JPH_NAMESPACE_BEGIN
10
11
/// Helper class for implementing an UnorderedSet or UnorderedMap
12
/// Based on CppCon 2017: Matt Kulukundis "Designing a Fast, Efficient, Cache-friendly Hash Table, Step by Step"
13
/// See: https://www.youtube.com/watch?v=ncHmEUmJZf4
14
template <class Key, class KeyValue, class HashTableDetail, class Hash, class KeyEqual>
15
class HashTable
16
{
17
public:
18
/// Properties
19
using value_type = KeyValue;
20
using size_type = uint32;
21
using difference_type = ptrdiff_t;
22
23
private:
24
/// Base class for iterators
25
template <class Table, class Iterator>
26
class IteratorBase
27
{
28
public:
29
/// Properties
30
using difference_type = typename Table::difference_type;
31
using value_type = typename Table::value_type;
32
using iterator_category = std::forward_iterator_tag;
33
34
/// Copy constructor
35
IteratorBase(const IteratorBase &inRHS) = default;
36
37
/// Assignment operator
38
IteratorBase & operator = (const IteratorBase &inRHS) = default;
39
40
/// Iterator at start of table
41
explicit IteratorBase(Table *inTable) :
42
mTable(inTable),
43
mIndex(0)
44
{
45
while (mIndex < mTable->mMaxSize && (mTable->mControl[mIndex] & cBucketUsed) == 0)
46
++mIndex;
47
}
48
49
/// Iterator at specific index
50
IteratorBase(Table *inTable, size_type inIndex) :
51
mTable(inTable),
52
mIndex(inIndex)
53
{
54
}
55
56
/// Prefix increment
57
Iterator & operator ++ ()
58
{
59
JPH_ASSERT(IsValid());
60
61
do
62
{
63
++mIndex;
64
}
65
while (mIndex < mTable->mMaxSize && (mTable->mControl[mIndex] & cBucketUsed) == 0);
66
67
return static_cast<Iterator &>(*this);
68
}
69
70
/// Postfix increment
71
Iterator operator ++ (int)
72
{
73
Iterator result(mTable, mIndex);
74
++(*this);
75
return result;
76
}
77
78
/// Access to key value pair
79
const KeyValue & operator * () const
80
{
81
JPH_ASSERT(IsValid());
82
return mTable->mData[mIndex];
83
}
84
85
/// Access to key value pair
86
const KeyValue * operator -> () const
87
{
88
JPH_ASSERT(IsValid());
89
return mTable->mData + mIndex;
90
}
91
92
/// Equality operator
93
bool operator == (const Iterator &inRHS) const
94
{
95
return mIndex == inRHS.mIndex && mTable == inRHS.mTable;
96
}
97
98
/// Inequality operator
99
bool operator != (const Iterator &inRHS) const
100
{
101
return !(*this == inRHS);
102
}
103
104
/// Check that the iterator is valid
105
bool IsValid() const
106
{
107
return mIndex < mTable->mMaxSize
108
&& (mTable->mControl[mIndex] & cBucketUsed) != 0;
109
}
110
111
Table * mTable;
112
size_type mIndex;
113
};
114
115
/// Get the maximum number of elements that we can support given a number of buckets
116
static constexpr size_type sGetMaxLoad(size_type inBucketCount)
117
{
118
return uint32((cMaxLoadFactorNumerator * inBucketCount) / cMaxLoadFactorDenominator);
119
}
120
121
/// Update the control value for a bucket
122
JPH_INLINE void SetControlValue(size_type inIndex, uint8 inValue)
123
{
124
JPH_ASSERT(inIndex < mMaxSize);
125
mControl[inIndex] = inValue;
126
127
// Mirror the first 15 bytes to the 15 bytes beyond mMaxSize
128
// Note that this is equivalent to:
129
// if (inIndex < 15)
130
// mControl[inIndex + mMaxSize] = inValue
131
// else
132
// mControl[inIndex] = inValue
133
// Which performs a needless write if inIndex >= 15 but at least it is branch-less
134
mControl[((inIndex - 15) & (mMaxSize - 1)) + 15] = inValue;
135
}
136
137
/// Get the index and control value for a particular key
138
JPH_INLINE void GetIndexAndControlValue(const Key &inKey, size_type &outIndex, uint8 &outControl) const
139
{
140
// Calculate hash
141
uint64 hash_value = Hash { } (inKey);
142
143
// Split hash into index and control value
144
outIndex = size_type(hash_value >> 7) & (mMaxSize - 1);
145
outControl = cBucketUsed | uint8(hash_value);
146
}
147
148
/// Allocate space for the hash table
149
void AllocateTable(size_type inMaxSize)
150
{
151
JPH_ASSERT(mData == nullptr);
152
153
mMaxSize = inMaxSize;
154
mLoadLeft = sGetMaxLoad(inMaxSize);
155
size_t required_size = size_t(mMaxSize) * (sizeof(KeyValue) + 1) + 15; // Add 15 bytes to mirror the first 15 bytes of the control values
156
if constexpr (cNeedsAlignedAllocate)
157
mData = reinterpret_cast<KeyValue *>(AlignedAllocate(required_size, alignof(KeyValue)));
158
else
159
mData = reinterpret_cast<KeyValue *>(Allocate(required_size));
160
mControl = reinterpret_cast<uint8 *>(mData + mMaxSize);
161
}
162
163
/// Copy the contents of another hash table
164
void CopyTable(const HashTable &inRHS)
165
{
166
if (inRHS.empty())
167
return;
168
169
AllocateTable(inRHS.mMaxSize);
170
171
// Copy control bytes
172
memcpy(mControl, inRHS.mControl, mMaxSize + 15);
173
174
// Copy elements
175
uint index = 0;
176
for (const uint8 *control = mControl, *control_end = mControl + mMaxSize; control != control_end; ++control, ++index)
177
if (*control & cBucketUsed)
178
new (mData + index) KeyValue(inRHS.mData[index]);
179
mSize = inRHS.mSize;
180
}
181
182
/// Grow the table to a new size
183
void GrowTable(size_type inNewMaxSize)
184
{
185
// Move the old table to a temporary structure
186
size_type old_max_size = mMaxSize;
187
KeyValue *old_data = mData;
188
const uint8 *old_control = mControl;
189
mData = nullptr;
190
mControl = nullptr;
191
mSize = 0;
192
mMaxSize = 0;
193
mLoadLeft = 0;
194
195
// Allocate new table
196
AllocateTable(inNewMaxSize);
197
198
// Reset all control bytes
199
memset(mControl, cBucketEmpty, mMaxSize + 15);
200
201
if (old_data != nullptr)
202
{
203
// Copy all elements from the old table
204
for (size_type i = 0; i < old_max_size; ++i)
205
if (old_control[i] & cBucketUsed)
206
{
207
size_type index;
208
KeyValue *element = old_data + i;
209
JPH_IF_ENABLE_ASSERTS(bool inserted =) InsertKey</* InsertAfterGrow= */ true>(HashTableDetail::sGetKey(*element), index);
210
JPH_ASSERT(inserted);
211
new (mData + index) KeyValue(std::move(*element));
212
element->~KeyValue();
213
}
214
215
// Free memory
216
if constexpr (cNeedsAlignedAllocate)
217
AlignedFree(old_data);
218
else
219
Free(old_data);
220
}
221
}
222
223
protected:
224
/// Get an element by index
225
KeyValue & GetElement(size_type inIndex) const
226
{
227
return mData[inIndex];
228
}
229
230
/// Insert a key into the map, returns true if the element was inserted, false if it already existed.
231
/// outIndex is the index at which the element should be constructed / where it is located.
232
template <bool InsertAfterGrow = false>
233
bool InsertKey(const Key &inKey, size_type &outIndex)
234
{
235
// Ensure we have enough space
236
if (mLoadLeft == 0)
237
{
238
// Should not be growing if we're already growing!
239
if constexpr (InsertAfterGrow)
240
JPH_ASSERT(false);
241
242
// Decide if we need to clean up all tombstones or if we need to grow the map
243
size_type num_deleted = sGetMaxLoad(mMaxSize) - mSize;
244
if (num_deleted * cMaxDeletedElementsDenominator > mMaxSize * cMaxDeletedElementsNumerator)
245
rehash(0);
246
else
247
{
248
// Grow by a power of 2
249
size_type new_max_size = max<size_type>(mMaxSize << 1, 16);
250
if (new_max_size < mMaxSize)
251
{
252
JPH_ASSERT(false, "Overflow in hash table size, can't grow!");
253
return false;
254
}
255
GrowTable(new_max_size);
256
}
257
}
258
259
// Split hash into index and control value
260
size_type index;
261
uint8 control;
262
GetIndexAndControlValue(inKey, index, control);
263
264
// Keeps track of the index of the first deleted bucket we found
265
constexpr size_type cNoDeleted = ~size_type(0);
266
size_type first_deleted_index = cNoDeleted;
267
268
// Linear probing
269
KeyEqual equal;
270
size_type bucket_mask = mMaxSize - 1;
271
BVec16 control16 = BVec16::sReplicate(control);
272
BVec16 bucket_empty = BVec16::sZero();
273
BVec16 bucket_deleted = BVec16::sReplicate(cBucketDeleted);
274
for (;;)
275
{
276
// Read 16 control values (note that we added 15 bytes at the end of the control values that mirror the first 15 bytes)
277
BVec16 control_bytes = BVec16::sLoadByte16(mControl + index);
278
279
// Check if we must find the element before we can insert
280
if constexpr (!InsertAfterGrow)
281
{
282
// Check for the control value we're looking for
283
// Note that when deleting we can create empty buckets instead of deleted buckets.
284
// This means we must unconditionally check all buckets in this batch for equality
285
// (also beyond the first empty bucket).
286
uint32 control_equal = uint32(BVec16::sEquals(control_bytes, control16).GetTrues());
287
288
// Index within the 16 buckets
289
size_type local_index = index;
290
291
// Loop while there's still buckets to process
292
while (control_equal != 0)
293
{
294
// Get the first equal bucket
295
uint first_equal = CountTrailingZeros(control_equal);
296
297
// Skip to the bucket
298
local_index += first_equal;
299
300
// Make sure that our index is not beyond the end of the table
301
local_index &= bucket_mask;
302
303
// We found a bucket with same control value
304
if (equal(HashTableDetail::sGetKey(mData[local_index]), inKey))
305
{
306
// Element already exists
307
outIndex = local_index;
308
return false;
309
}
310
311
// Skip past this bucket
312
control_equal >>= first_equal + 1;
313
local_index++;
314
}
315
316
// Check if we're still scanning for deleted buckets
317
if (first_deleted_index == cNoDeleted)
318
{
319
// Check if any buckets have been deleted, if so store the first one
320
uint32 control_deleted = uint32(BVec16::sEquals(control_bytes, bucket_deleted).GetTrues());
321
if (control_deleted != 0)
322
first_deleted_index = index + CountTrailingZeros(control_deleted);
323
}
324
}
325
326
// Check for empty buckets
327
uint32 control_empty = uint32(BVec16::sEquals(control_bytes, bucket_empty).GetTrues());
328
if (control_empty != 0)
329
{
330
// If we found a deleted bucket, use it.
331
// It doesn't matter if it is before or after the first empty bucket we found
332
// since we will always be scanning in batches of 16 buckets.
333
if (first_deleted_index == cNoDeleted || InsertAfterGrow)
334
{
335
index += CountTrailingZeros(control_empty);
336
--mLoadLeft; // Using an empty bucket decreases the load left
337
}
338
else
339
{
340
index = first_deleted_index;
341
}
342
343
// Make sure that our index is not beyond the end of the table
344
index &= bucket_mask;
345
346
// Update control byte
347
SetControlValue(index, control);
348
++mSize;
349
350
// Return index to newly allocated bucket
351
outIndex = index;
352
return true;
353
}
354
355
// Move to next batch of 16 buckets
356
index = (index + 16) & bucket_mask;
357
}
358
}
359
360
public:
361
/// Non-const iterator
362
class iterator : public IteratorBase<HashTable, iterator>
363
{
364
using Base = IteratorBase<HashTable, iterator>;
365
366
public:
367
using IteratorBase<HashTable, iterator>::operator ==;
368
369
/// Properties
370
using reference = typename Base::value_type &;
371
using pointer = typename Base::value_type *;
372
373
/// Constructors
374
explicit iterator(HashTable *inTable) : Base(inTable) { }
375
iterator(HashTable *inTable, size_type inIndex) : Base(inTable, inIndex) { }
376
iterator(const iterator &inIterator) : Base(inIterator) { }
377
378
/// Assignment
379
iterator & operator = (const iterator &inRHS) { Base::operator = (inRHS); return *this; }
380
381
using Base::operator *;
382
383
/// Non-const access to key value pair
384
KeyValue & operator * ()
385
{
386
JPH_ASSERT(this->IsValid());
387
return this->mTable->mData[this->mIndex];
388
}
389
390
using Base::operator ->;
391
392
/// Non-const access to key value pair
393
KeyValue * operator -> ()
394
{
395
JPH_ASSERT(this->IsValid());
396
return this->mTable->mData + this->mIndex;
397
}
398
};
399
400
/// Const iterator
401
class const_iterator : public IteratorBase<const HashTable, const_iterator>
402
{
403
using Base = IteratorBase<const HashTable, const_iterator>;
404
405
public:
406
using IteratorBase<const HashTable, const_iterator>::operator ==;
407
408
/// Properties
409
using reference = const typename Base::value_type &;
410
using pointer = const typename Base::value_type *;
411
412
/// Constructors
413
explicit const_iterator(const HashTable *inTable) : Base(inTable) { }
414
const_iterator(const HashTable *inTable, size_type inIndex) : Base(inTable, inIndex) { }
415
const_iterator(const const_iterator &inRHS) : Base(inRHS) { }
416
const_iterator(const iterator &inIterator) : Base(inIterator.mTable, inIterator.mIndex) { }
417
418
/// Assignment
419
const_iterator & operator = (const iterator &inRHS) { this->mTable = inRHS.mTable; this->mIndex = inRHS.mIndex; return *this; }
420
const_iterator & operator = (const const_iterator &inRHS) { Base::operator = (inRHS); return *this; }
421
};
422
423
/// Default constructor
424
HashTable() = default;
425
426
/// Copy constructor
427
HashTable(const HashTable &inRHS)
428
{
429
CopyTable(inRHS);
430
}
431
432
/// Move constructor
433
HashTable(HashTable &&ioRHS) noexcept :
434
mData(ioRHS.mData),
435
mControl(ioRHS.mControl),
436
mSize(ioRHS.mSize),
437
mMaxSize(ioRHS.mMaxSize),
438
mLoadLeft(ioRHS.mLoadLeft)
439
{
440
ioRHS.mData = nullptr;
441
ioRHS.mControl = nullptr;
442
ioRHS.mSize = 0;
443
ioRHS.mMaxSize = 0;
444
ioRHS.mLoadLeft = 0;
445
}
446
447
/// Assignment operator
448
HashTable & operator = (const HashTable &inRHS)
449
{
450
if (this != &inRHS)
451
{
452
clear();
453
454
CopyTable(inRHS);
455
}
456
457
return *this;
458
}
459
460
/// Move assignment operator
461
HashTable & operator = (HashTable &&ioRHS) noexcept
462
{
463
if (this != &ioRHS)
464
{
465
clear();
466
467
mData = ioRHS.mData;
468
mControl = ioRHS.mControl;
469
mSize = ioRHS.mSize;
470
mMaxSize = ioRHS.mMaxSize;
471
mLoadLeft = ioRHS.mLoadLeft;
472
473
ioRHS.mData = nullptr;
474
ioRHS.mControl = nullptr;
475
ioRHS.mSize = 0;
476
ioRHS.mMaxSize = 0;
477
ioRHS.mLoadLeft = 0;
478
}
479
480
return *this;
481
}
482
483
/// Destructor
484
~HashTable()
485
{
486
clear();
487
}
488
489
/// Reserve memory for a certain number of elements
490
void reserve(size_type inMaxSize)
491
{
492
// Calculate max size based on load factor
493
size_type max_size = GetNextPowerOf2(max<uint32>((cMaxLoadFactorDenominator * inMaxSize) / cMaxLoadFactorNumerator, 16));
494
if (max_size <= mMaxSize)
495
return;
496
497
GrowTable(max_size);
498
}
499
500
/// Destroy the entire hash table
501
void clear()
502
{
503
// Delete all elements
504
if constexpr (!std::is_trivially_destructible<KeyValue>())
505
if (!empty())
506
for (size_type i = 0; i < mMaxSize; ++i)
507
if (mControl[i] & cBucketUsed)
508
mData[i].~KeyValue();
509
510
if (mData != nullptr)
511
{
512
// Free memory
513
if constexpr (cNeedsAlignedAllocate)
514
AlignedFree(mData);
515
else
516
Free(mData);
517
518
// Reset members
519
mData = nullptr;
520
mControl = nullptr;
521
mSize = 0;
522
mMaxSize = 0;
523
mLoadLeft = 0;
524
}
525
}
526
527
/// Destroy the entire hash table but keeps the memory allocated
528
void ClearAndKeepMemory()
529
{
530
// Destruct elements
531
if constexpr (!std::is_trivially_destructible<KeyValue>())
532
if (!empty())
533
for (size_type i = 0; i < mMaxSize; ++i)
534
if (mControl[i] & cBucketUsed)
535
mData[i].~KeyValue();
536
mSize = 0;
537
538
// If there are elements that are not marked cBucketEmpty, we reset them
539
size_type max_load = sGetMaxLoad(mMaxSize);
540
if (mLoadLeft != max_load)
541
{
542
// Reset all control bytes
543
memset(mControl, cBucketEmpty, mMaxSize + 15);
544
mLoadLeft = max_load;
545
}
546
}
547
548
/// Iterator to first element
549
iterator begin()
550
{
551
return iterator(this);
552
}
553
554
/// Iterator to one beyond last element
555
iterator end()
556
{
557
return iterator(this, mMaxSize);
558
}
559
560
/// Iterator to first element
561
const_iterator begin() const
562
{
563
return const_iterator(this);
564
}
565
566
/// Iterator to one beyond last element
567
const_iterator end() const
568
{
569
return const_iterator(this, mMaxSize);
570
}
571
572
/// Iterator to first element
573
const_iterator cbegin() const
574
{
575
return const_iterator(this);
576
}
577
578
/// Iterator to one beyond last element
579
const_iterator cend() const
580
{
581
return const_iterator(this, mMaxSize);
582
}
583
584
/// Number of buckets in the table
585
size_type bucket_count() const
586
{
587
return mMaxSize;
588
}
589
590
/// Max number of buckets that the table can have
591
constexpr size_type max_bucket_count() const
592
{
593
return size_type(1) << (sizeof(size_type) * 8 - 1);
594
}
595
596
/// Check if there are no elements in the table
597
bool empty() const
598
{
599
return mSize == 0;
600
}
601
602
/// Number of elements in the table
603
size_type size() const
604
{
605
return mSize;
606
}
607
608
/// Max number of elements that the table can hold
609
constexpr size_type max_size() const
610
{
611
return size_type((uint64(max_bucket_count()) * cMaxLoadFactorNumerator) / cMaxLoadFactorDenominator);
612
}
613
614
/// Get the max load factor for this table (max number of elements / number of buckets)
615
constexpr float max_load_factor() const
616
{
617
return float(cMaxLoadFactorNumerator) / float(cMaxLoadFactorDenominator);
618
}
619
620
/// Insert a new element, returns iterator and if the element was inserted
621
std::pair<iterator, bool> insert(const value_type &inValue)
622
{
623
size_type index;
624
bool inserted = InsertKey(HashTableDetail::sGetKey(inValue), index);
625
if (inserted)
626
new (mData + index) KeyValue(inValue);
627
return std::make_pair(iterator(this, index), inserted);
628
}
629
630
/// Find an element, returns iterator to element or end() if not found
631
const_iterator find(const Key &inKey) const
632
{
633
// Check if we have any data
634
if (empty())
635
return cend();
636
637
// Split hash into index and control value
638
size_type index;
639
uint8 control;
640
GetIndexAndControlValue(inKey, index, control);
641
642
// Linear probing
643
KeyEqual equal;
644
size_type bucket_mask = mMaxSize - 1;
645
BVec16 control16 = BVec16::sReplicate(control);
646
BVec16 bucket_empty = BVec16::sZero();
647
for (;;)
648
{
649
// Read 16 control values
650
// (note that we added 15 bytes at the end of the control values that mirror the first 15 bytes)
651
BVec16 control_bytes = BVec16::sLoadByte16(mControl + index);
652
653
// Check for the control value we're looking for
654
// Note that when deleting we can create empty buckets instead of deleted buckets.
655
// This means we must unconditionally check all buckets in this batch for equality
656
// (also beyond the first empty bucket).
657
uint32 control_equal = uint32(BVec16::sEquals(control_bytes, control16).GetTrues());
658
659
// Index within the 16 buckets
660
size_type local_index = index;
661
662
// Loop while there's still buckets to process
663
while (control_equal != 0)
664
{
665
// Get the first equal bucket
666
uint first_equal = CountTrailingZeros(control_equal);
667
668
// Skip to the bucket
669
local_index += first_equal;
670
671
// Make sure that our index is not beyond the end of the table
672
local_index &= bucket_mask;
673
674
// We found a bucket with same control value
675
if (equal(HashTableDetail::sGetKey(mData[local_index]), inKey))
676
{
677
// Element found
678
return const_iterator(this, local_index);
679
}
680
681
// Skip past this bucket
682
control_equal >>= first_equal + 1;
683
local_index++;
684
}
685
686
// Check for empty buckets
687
uint32 control_empty = uint32(BVec16::sEquals(control_bytes, bucket_empty).GetTrues());
688
if (control_empty != 0)
689
{
690
// An empty bucket was found, we didn't find the element
691
return cend();
692
}
693
694
// Move to next batch of 16 buckets
695
index = (index + 16) & bucket_mask;
696
}
697
}
698
699
/// @brief Erase an element by iterator
700
void erase(const const_iterator &inIterator)
701
{
702
JPH_ASSERT(inIterator.IsValid());
703
704
// Read 16 control values before and after the current index
705
// (note that we added 15 bytes at the end of the control values that mirror the first 15 bytes)
706
BVec16 control_bytes_before = BVec16::sLoadByte16(mControl + ((inIterator.mIndex - 16) & (mMaxSize - 1)));
707
BVec16 control_bytes_after = BVec16::sLoadByte16(mControl + inIterator.mIndex);
708
BVec16 bucket_empty = BVec16::sZero();
709
uint32 control_empty_before = uint32(BVec16::sEquals(control_bytes_before, bucket_empty).GetTrues());
710
uint32 control_empty_after = uint32(BVec16::sEquals(control_bytes_after, bucket_empty).GetTrues());
711
712
// If (this index including) there exist 16 consecutive non-empty slots (represented by a bit being 0) then
713
// a probe looking for some element needs to continue probing so we cannot mark the bucket as empty
714
// but must mark it as deleted instead.
715
// Note that we use: CountLeadingZeros(uint16) = CountLeadingZeros(uint32) - 16.
716
uint8 control_value = CountLeadingZeros(control_empty_before) - 16 + CountTrailingZeros(control_empty_after) < 16? cBucketEmpty : cBucketDeleted;
717
718
// Mark the bucket as empty/deleted
719
SetControlValue(inIterator.mIndex, control_value);
720
721
// Destruct the element
722
mData[inIterator.mIndex].~KeyValue();
723
724
// If we marked the bucket as empty we can increase the load left
725
if (control_value == cBucketEmpty)
726
++mLoadLeft;
727
728
// Decrease size
729
--mSize;
730
}
731
732
/// @brief Erase an element by key
733
size_type erase(const Key &inKey)
734
{
735
const_iterator it = find(inKey);
736
if (it == cend())
737
return 0;
738
739
erase(it);
740
return 1;
741
}
742
743
/// Swap the contents of two hash tables
744
void swap(HashTable &ioRHS) noexcept
745
{
746
std::swap(mData, ioRHS.mData);
747
std::swap(mControl, ioRHS.mControl);
748
std::swap(mSize, ioRHS.mSize);
749
std::swap(mMaxSize, ioRHS.mMaxSize);
750
std::swap(mLoadLeft, ioRHS.mLoadLeft);
751
}
752
753
/// In place re-hashing of all elements in the table. Removes all cBucketDeleted elements
754
/// The std version takes a bucket count, but we just re-hash to the same size.
755
void rehash(size_type)
756
{
757
// Update the control value for all buckets
758
for (size_type i = 0; i < mMaxSize; ++i)
759
{
760
uint8 &control = mControl[i];
761
switch (control)
762
{
763
case cBucketDeleted:
764
// Deleted buckets become empty
765
control = cBucketEmpty;
766
break;
767
case cBucketEmpty:
768
// Remains empty
769
break;
770
default:
771
// Mark all occupied as deleted, to indicate it needs to move to the correct place
772
control = cBucketDeleted;
773
break;
774
}
775
}
776
777
// Replicate control values to the last 15 entries
778
for (size_type i = 0; i < 15; ++i)
779
mControl[mMaxSize + i] = mControl[i];
780
781
// Loop over all elements that have been 'deleted' and move them to their new spot
782
BVec16 bucket_used = BVec16::sReplicate(cBucketUsed);
783
size_type bucket_mask = mMaxSize - 1;
784
uint32 probe_mask = bucket_mask & ~uint32(0b1111); // Mask out lower 4 bits because we test 16 buckets at a time
785
for (size_type src = 0; src < mMaxSize; ++src)
786
if (mControl[src] == cBucketDeleted)
787
for (;;)
788
{
789
// Split hash into index and control value
790
size_type src_index;
791
uint8 src_control;
792
GetIndexAndControlValue(HashTableDetail::sGetKey(mData[src]), src_index, src_control);
793
794
// Linear probing
795
size_type dst = src_index;
796
for (;;)
797
{
798
// Check if any buckets are free
799
BVec16 control_bytes = BVec16::sLoadByte16(mControl + dst);
800
uint32 control_free = uint32(BVec16::sAnd(control_bytes, bucket_used).GetTrues()) ^ 0xffff;
801
if (control_free != 0)
802
{
803
// Select this bucket as destination
804
dst += CountTrailingZeros(control_free);
805
dst &= bucket_mask;
806
break;
807
}
808
809
// Move to next batch of 16 buckets
810
dst = (dst + 16) & bucket_mask;
811
}
812
813
// Check if we stay in the same probe group
814
if (((dst - src_index) & probe_mask) == ((src - src_index) & probe_mask))
815
{
816
// We stay in the same group, we can stay where we are
817
SetControlValue(src, src_control);
818
break;
819
}
820
else if (mControl[dst] == cBucketEmpty)
821
{
822
// There's an empty bucket, move us there
823
SetControlValue(dst, src_control);
824
SetControlValue(src, cBucketEmpty);
825
new (mData + dst) KeyValue(std::move(mData[src]));
826
mData[src].~KeyValue();
827
break;
828
}
829
else
830
{
831
// There's an element in the bucket we want to move to, swap them
832
JPH_ASSERT(mControl[dst] == cBucketDeleted);
833
SetControlValue(dst, src_control);
834
std::swap(mData[src], mData[dst]);
835
// Iterate again with the same source bucket
836
}
837
}
838
839
// Reinitialize load left
840
mLoadLeft = sGetMaxLoad(mMaxSize) - mSize;
841
}
842
843
private:
844
/// If this allocator needs to fall back to aligned allocations because the type requires it
845
static constexpr bool cNeedsAlignedAllocate = alignof(KeyValue) > (JPH_CPU_ADDRESS_BITS == 32? 8 : 16);
846
847
/// Max load factor is cMaxLoadFactorNumerator / cMaxLoadFactorDenominator
848
static constexpr uint64 cMaxLoadFactorNumerator = 7;
849
static constexpr uint64 cMaxLoadFactorDenominator = 8;
850
851
/// If we can recover this fraction of deleted elements, we'll reshuffle the buckets in place rather than growing the table
852
static constexpr uint64 cMaxDeletedElementsNumerator = 1;
853
static constexpr uint64 cMaxDeletedElementsDenominator = 8;
854
855
/// Values that the control bytes can have
856
static constexpr uint8 cBucketEmpty = 0;
857
static constexpr uint8 cBucketDeleted = 0x7f;
858
static constexpr uint8 cBucketUsed = 0x80; // Lowest 7 bits are lowest 7 bits of the hash value
859
860
/// The buckets, an array of size mMaxSize
861
KeyValue * mData = nullptr;
862
863
/// Control bytes, an array of size mMaxSize + 15
864
uint8 * mControl = nullptr;
865
866
/// Number of elements in the table
867
size_type mSize = 0;
868
869
/// Max number of elements that can be stored in the table
870
size_type mMaxSize = 0;
871
872
/// Number of elements we can add to the table before we need to grow
873
size_type mLoadLeft = 0;
874
};
875
876
JPH_NAMESPACE_END
877
878