CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
hrydgard

CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!

GitHub Repository: hrydgard/ppsspp
Path: blob/master/ext/basis_universal/basisu_containers.h
Views: 1401
1
// basisu_containers.h
2
#pragma once
3
4
#undef new
5
6
#include <stdlib.h>
7
#include <stdio.h>
8
#include <stdint.h>
9
#include <assert.h>
10
#include <algorithm>
11
12
#if defined(__linux__) && !defined(ANDROID)
13
// Only for malloc_usable_size() in basisu_containers_impl.h
14
#include <malloc.h>
15
#define HAS_MALLOC_USABLE_SIZE 1
16
#endif
17
18
// Set to 1 to always check vector operator[], front(), and back() even in release.
19
#define BASISU_VECTOR_FORCE_CHECKING 0
20
21
// If 1, the vector container will not query the CRT to get the size of resized memory blocks.
22
#define BASISU_VECTOR_DETERMINISTIC 1
23
24
#ifdef _MSC_VER
25
#define BASISU_FORCE_INLINE __forceinline
26
#else
27
#define BASISU_FORCE_INLINE inline
28
#endif
29
30
namespace basisu
31
{
32
enum { cInvalidIndex = -1 };
33
34
namespace helpers
35
{
36
inline bool is_power_of_2(uint32_t x) { return x && ((x & (x - 1U)) == 0U); }
37
inline bool is_power_of_2(uint64_t x) { return x && ((x & (x - 1U)) == 0U); }
38
template<class T> const T& minimum(const T& a, const T& b) { return (b < a) ? b : a; }
39
template<class T> const T& maximum(const T& a, const T& b) { return (a < b) ? b : a; }
40
41
inline uint32_t floor_log2i(uint32_t v)
42
{
43
uint32_t l = 0;
44
while (v > 1U)
45
{
46
v >>= 1;
47
l++;
48
}
49
return l;
50
}
51
52
inline uint32_t next_pow2(uint32_t val)
53
{
54
val--;
55
val |= val >> 16;
56
val |= val >> 8;
57
val |= val >> 4;
58
val |= val >> 2;
59
val |= val >> 1;
60
return val + 1;
61
}
62
63
inline uint64_t next_pow2(uint64_t val)
64
{
65
val--;
66
val |= val >> 32;
67
val |= val >> 16;
68
val |= val >> 8;
69
val |= val >> 4;
70
val |= val >> 2;
71
val |= val >> 1;
72
return val + 1;
73
}
74
} // namespace helpers
75
76
template <typename T>
77
inline T* construct(T* p)
78
{
79
return new (static_cast<void*>(p)) T;
80
}
81
82
template <typename T, typename U>
83
inline T* construct(T* p, const U& init)
84
{
85
return new (static_cast<void*>(p)) T(init);
86
}
87
88
template <typename T>
89
inline void construct_array(T* p, size_t n)
90
{
91
T* q = p + n;
92
for (; p != q; ++p)
93
new (static_cast<void*>(p)) T;
94
}
95
96
template <typename T, typename U>
97
inline void construct_array(T* p, size_t n, const U& init)
98
{
99
T* q = p + n;
100
for (; p != q; ++p)
101
new (static_cast<void*>(p)) T(init);
102
}
103
104
template <typename T>
105
inline void destruct(T* p)
106
{
107
(void)p;
108
p->~T();
109
}
110
111
template <typename T> inline void destruct_array(T* p, size_t n)
112
{
113
T* q = p + n;
114
for (; p != q; ++p)
115
p->~T();
116
}
117
118
template<typename T> struct int_traits { enum { cMin = INT32_MIN, cMax = INT32_MAX, cSigned = true }; };
119
120
template<> struct int_traits<int8_t> { enum { cMin = INT8_MIN, cMax = INT8_MAX, cSigned = true }; };
121
template<> struct int_traits<int16_t> { enum { cMin = INT16_MIN, cMax = INT16_MAX, cSigned = true }; };
122
template<> struct int_traits<int32_t> { enum { cMin = INT32_MIN, cMax = INT32_MAX, cSigned = true }; };
123
124
template<> struct int_traits<uint8_t> { enum { cMin = 0, cMax = UINT8_MAX, cSigned = false }; };
125
template<> struct int_traits<uint16_t> { enum { cMin = 0, cMax = UINT16_MAX, cSigned = false }; };
126
template<> struct int_traits<uint32_t> { enum { cMin = 0, cMax = UINT32_MAX, cSigned = false }; };
127
128
template<typename T>
129
struct scalar_type
130
{
131
enum { cFlag = false };
132
static inline void construct(T* p) { basisu::construct(p); }
133
static inline void construct(T* p, const T& init) { basisu::construct(p, init); }
134
static inline void construct_array(T* p, size_t n) { basisu::construct_array(p, n); }
135
static inline void destruct(T* p) { basisu::destruct(p); }
136
static inline void destruct_array(T* p, size_t n) { basisu::destruct_array(p, n); }
137
};
138
139
template<typename T> struct scalar_type<T*>
140
{
141
enum { cFlag = true };
142
static inline void construct(T** p) { memset(p, 0, sizeof(T*)); }
143
static inline void construct(T** p, T* init) { *p = init; }
144
static inline void construct_array(T** p, size_t n) { memset(p, 0, sizeof(T*) * n); }
145
static inline void destruct(T** p) { p; }
146
static inline void destruct_array(T** p, size_t n) { p, n; }
147
};
148
149
#define BASISU_DEFINE_BUILT_IN_TYPE(X) \
150
template<> struct scalar_type<X> { \
151
enum { cFlag = true }; \
152
static inline void construct(X* p) { memset(p, 0, sizeof(X)); } \
153
static inline void construct(X* p, const X& init) { memcpy(p, &init, sizeof(X)); } \
154
static inline void construct_array(X* p, size_t n) { memset(p, 0, sizeof(X) * n); } \
155
static inline void destruct(X* p) { p; } \
156
static inline void destruct_array(X* p, size_t n) { p, n; } };
157
158
BASISU_DEFINE_BUILT_IN_TYPE(bool)
159
BASISU_DEFINE_BUILT_IN_TYPE(char)
160
BASISU_DEFINE_BUILT_IN_TYPE(unsigned char)
161
BASISU_DEFINE_BUILT_IN_TYPE(short)
162
BASISU_DEFINE_BUILT_IN_TYPE(unsigned short)
163
BASISU_DEFINE_BUILT_IN_TYPE(int)
164
BASISU_DEFINE_BUILT_IN_TYPE(unsigned int)
165
BASISU_DEFINE_BUILT_IN_TYPE(long)
166
BASISU_DEFINE_BUILT_IN_TYPE(unsigned long)
167
#ifdef __GNUC__
168
BASISU_DEFINE_BUILT_IN_TYPE(long long)
169
BASISU_DEFINE_BUILT_IN_TYPE(unsigned long long)
170
#else
171
BASISU_DEFINE_BUILT_IN_TYPE(__int64)
172
BASISU_DEFINE_BUILT_IN_TYPE(unsigned __int64)
173
#endif
174
BASISU_DEFINE_BUILT_IN_TYPE(float)
175
BASISU_DEFINE_BUILT_IN_TYPE(double)
176
BASISU_DEFINE_BUILT_IN_TYPE(long double)
177
178
#undef BASISU_DEFINE_BUILT_IN_TYPE
179
180
template<typename T>
181
struct bitwise_movable { enum { cFlag = false }; };
182
183
#define BASISU_DEFINE_BITWISE_MOVABLE(Q) template<> struct bitwise_movable<Q> { enum { cFlag = true }; };
184
185
template<typename T>
186
struct bitwise_copyable { enum { cFlag = false }; };
187
188
#define BASISU_DEFINE_BITWISE_COPYABLE(Q) template<> struct bitwise_copyable<Q> { enum { cFlag = true }; };
189
190
#define BASISU_IS_POD(T) __is_pod(T)
191
192
#define BASISU_IS_SCALAR_TYPE(T) (scalar_type<T>::cFlag)
193
194
#if defined(__GNUC__) && __GNUC__<5
195
#define BASISU_IS_TRIVIALLY_COPYABLE(...) __has_trivial_copy(__VA_ARGS__)
196
#else
197
#define BASISU_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value
198
#endif
199
200
// TODO: clean this up
201
#define BASISU_IS_BITWISE_COPYABLE(T) (BASISU_IS_SCALAR_TYPE(T) || BASISU_IS_POD(T) || BASISU_IS_TRIVIALLY_COPYABLE(T) || (bitwise_copyable<T>::cFlag))
202
203
#define BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(T) (BASISU_IS_BITWISE_COPYABLE(T) || (bitwise_movable<T>::cFlag))
204
205
#define BASISU_HAS_DESTRUCTOR(T) ((!scalar_type<T>::cFlag) && (!__is_pod(T)))
206
207
typedef char(&yes_t)[1];
208
typedef char(&no_t)[2];
209
210
template <class U> yes_t class_test(int U::*);
211
template <class U> no_t class_test(...);
212
213
template <class T> struct is_class
214
{
215
enum { value = (sizeof(class_test<T>(0)) == sizeof(yes_t)) };
216
};
217
218
template <typename T> struct is_pointer
219
{
220
enum { value = false };
221
};
222
223
template <typename T> struct is_pointer<T*>
224
{
225
enum { value = true };
226
};
227
228
struct empty_type { };
229
230
BASISU_DEFINE_BITWISE_COPYABLE(empty_type);
231
BASISU_DEFINE_BITWISE_MOVABLE(empty_type);
232
233
template<typename T> struct rel_ops
234
{
235
friend bool operator!=(const T& x, const T& y) { return (!(x == y)); }
236
friend bool operator> (const T& x, const T& y) { return (y < x); }
237
friend bool operator<=(const T& x, const T& y) { return (!(y < x)); }
238
friend bool operator>=(const T& x, const T& y) { return (!(x < y)); }
239
};
240
241
struct elemental_vector
242
{
243
void* m_p;
244
uint32_t m_size;
245
uint32_t m_capacity;
246
247
typedef void (*object_mover)(void* pDst, void* pSrc, uint32_t num);
248
249
bool increase_capacity(uint32_t min_new_capacity, bool grow_hint, uint32_t element_size, object_mover pRelocate, bool nofail);
250
};
251
252
template<typename T>
253
class vector : public rel_ops< vector<T> >
254
{
255
public:
256
typedef T* iterator;
257
typedef const T* const_iterator;
258
typedef T value_type;
259
typedef T& reference;
260
typedef const T& const_reference;
261
typedef T* pointer;
262
typedef const T* const_pointer;
263
264
inline vector() :
265
m_p(NULL),
266
m_size(0),
267
m_capacity(0)
268
{
269
}
270
271
inline vector(uint32_t n, const T& init) :
272
m_p(NULL),
273
m_size(0),
274
m_capacity(0)
275
{
276
increase_capacity(n, false);
277
construct_array(m_p, n, init);
278
m_size = n;
279
}
280
281
inline vector(const vector& other) :
282
m_p(NULL),
283
m_size(0),
284
m_capacity(0)
285
{
286
increase_capacity(other.m_size, false);
287
288
m_size = other.m_size;
289
290
if (BASISU_IS_BITWISE_COPYABLE(T))
291
{
292
if ((m_p) && (other.m_p))
293
memcpy(m_p, other.m_p, m_size * sizeof(T));
294
}
295
else
296
{
297
T* pDst = m_p;
298
const T* pSrc = other.m_p;
299
for (uint32_t i = m_size; i > 0; i--)
300
construct(pDst++, *pSrc++);
301
}
302
}
303
304
inline explicit vector(size_t size) :
305
m_p(NULL),
306
m_size(0),
307
m_capacity(0)
308
{
309
resize(size);
310
}
311
312
inline ~vector()
313
{
314
if (m_p)
315
{
316
scalar_type<T>::destruct_array(m_p, m_size);
317
free(m_p);
318
m_p = nullptr;
319
}
320
}
321
322
inline vector& operator= (const vector& other)
323
{
324
if (this == &other)
325
return *this;
326
327
if (m_capacity >= other.m_size)
328
resize(0);
329
else
330
{
331
clear();
332
increase_capacity(other.m_size, false);
333
}
334
335
if (BASISU_IS_BITWISE_COPYABLE(T))
336
{
337
if ((m_p) && (other.m_p))
338
memcpy(m_p, other.m_p, other.m_size * sizeof(T));
339
}
340
else
341
{
342
T* pDst = m_p;
343
const T* pSrc = other.m_p;
344
for (uint32_t i = other.m_size; i > 0; i--)
345
construct(pDst++, *pSrc++);
346
}
347
348
m_size = other.m_size;
349
350
return *this;
351
}
352
353
BASISU_FORCE_INLINE const T* begin() const { return m_p; }
354
BASISU_FORCE_INLINE T* begin() { return m_p; }
355
356
BASISU_FORCE_INLINE const T* end() const { return m_p + m_size; }
357
BASISU_FORCE_INLINE T* end() { return m_p + m_size; }
358
359
BASISU_FORCE_INLINE bool empty() const { return !m_size; }
360
BASISU_FORCE_INLINE uint32_t size() const { return m_size; }
361
BASISU_FORCE_INLINE uint32_t size_in_bytes() const { return m_size * sizeof(T); }
362
BASISU_FORCE_INLINE uint32_t capacity() const { return m_capacity; }
363
364
// operator[] will assert on out of range indices, but in final builds there is (and will never be) any range checking on this method.
365
//BASISU_FORCE_INLINE const T& operator[] (uint32_t i) const { assert(i < m_size); return m_p[i]; }
366
//BASISU_FORCE_INLINE T& operator[] (uint32_t i) { assert(i < m_size); return m_p[i]; }
367
368
#if !BASISU_VECTOR_FORCE_CHECKING
369
BASISU_FORCE_INLINE const T& operator[] (size_t i) const { assert(i < m_size); return m_p[i]; }
370
BASISU_FORCE_INLINE T& operator[] (size_t i) { assert(i < m_size); return m_p[i]; }
371
#else
372
BASISU_FORCE_INLINE const T& operator[] (size_t i) const
373
{
374
if (i >= m_size)
375
{
376
fprintf(stderr, "operator[] invalid index: %u, max entries %u, type size %u\n", (uint32_t)i, m_size, (uint32_t)sizeof(T));
377
abort();
378
}
379
return m_p[i];
380
}
381
BASISU_FORCE_INLINE T& operator[] (size_t i)
382
{
383
if (i >= m_size)
384
{
385
fprintf(stderr, "operator[] invalid index: %u, max entries %u, type size %u\n", (uint32_t)i, m_size, (uint32_t)sizeof(T));
386
abort();
387
}
388
return m_p[i];
389
}
390
#endif
391
392
// at() always includes range checking, even in final builds, unlike operator [].
393
// The first element is returned if the index is out of range.
394
BASISU_FORCE_INLINE const T& at(size_t i) const { assert(i < m_size); return (i >= m_size) ? m_p[0] : m_p[i]; }
395
BASISU_FORCE_INLINE T& at(size_t i) { assert(i < m_size); return (i >= m_size) ? m_p[0] : m_p[i]; }
396
397
#if !BASISU_VECTOR_FORCE_CHECKING
398
BASISU_FORCE_INLINE const T& front() const { assert(m_size); return m_p[0]; }
399
BASISU_FORCE_INLINE T& front() { assert(m_size); return m_p[0]; }
400
401
BASISU_FORCE_INLINE const T& back() const { assert(m_size); return m_p[m_size - 1]; }
402
BASISU_FORCE_INLINE T& back() { assert(m_size); return m_p[m_size - 1]; }
403
#else
404
BASISU_FORCE_INLINE const T& front() const
405
{
406
if (!m_size)
407
{
408
fprintf(stderr, "front: vector is empty, type size %u\n", (uint32_t)sizeof(T));
409
abort();
410
}
411
return m_p[0];
412
}
413
BASISU_FORCE_INLINE T& front()
414
{
415
if (!m_size)
416
{
417
fprintf(stderr, "front: vector is empty, type size %u\n", (uint32_t)sizeof(T));
418
abort();
419
}
420
return m_p[0];
421
}
422
423
BASISU_FORCE_INLINE const T& back() const
424
{
425
if(!m_size)
426
{
427
fprintf(stderr, "back: vector is empty, type size %u\n", (uint32_t)sizeof(T));
428
abort();
429
}
430
return m_p[m_size - 1];
431
}
432
BASISU_FORCE_INLINE T& back()
433
{
434
if (!m_size)
435
{
436
fprintf(stderr, "back: vector is empty, type size %u\n", (uint32_t)sizeof(T));
437
abort();
438
}
439
return m_p[m_size - 1];
440
}
441
#endif
442
443
BASISU_FORCE_INLINE const T* get_ptr() const { return m_p; }
444
BASISU_FORCE_INLINE T* get_ptr() { return m_p; }
445
446
BASISU_FORCE_INLINE const T* data() const { return m_p; }
447
BASISU_FORCE_INLINE T* data() { return m_p; }
448
449
// clear() sets the container to empty, then frees the allocated block.
450
inline void clear()
451
{
452
if (m_p)
453
{
454
scalar_type<T>::destruct_array(m_p, m_size);
455
free(m_p);
456
m_p = NULL;
457
m_size = 0;
458
m_capacity = 0;
459
}
460
}
461
462
inline void clear_no_destruction()
463
{
464
if (m_p)
465
{
466
free(m_p);
467
m_p = NULL;
468
m_size = 0;
469
m_capacity = 0;
470
}
471
}
472
473
inline void reserve(size_t new_capacity_size_t)
474
{
475
if (new_capacity_size_t > UINT32_MAX)
476
{
477
assert(0);
478
return;
479
}
480
481
uint32_t new_capacity = (uint32_t)new_capacity_size_t;
482
483
if (new_capacity > m_capacity)
484
increase_capacity(new_capacity, false);
485
else if (new_capacity < m_capacity)
486
{
487
// Must work around the lack of a "decrease_capacity()" method.
488
// This case is rare enough in practice that it's probably not worth implementing an optimized in-place resize.
489
vector tmp;
490
tmp.increase_capacity(helpers::maximum(m_size, new_capacity), false);
491
tmp = *this;
492
swap(tmp);
493
}
494
}
495
496
inline bool try_reserve(size_t new_capacity_size_t)
497
{
498
if (new_capacity_size_t > UINT32_MAX)
499
{
500
assert(0);
501
return false;
502
}
503
504
uint32_t new_capacity = (uint32_t)new_capacity_size_t;
505
506
if (new_capacity > m_capacity)
507
{
508
if (!increase_capacity(new_capacity, false))
509
return false;
510
}
511
else if (new_capacity < m_capacity)
512
{
513
// Must work around the lack of a "decrease_capacity()" method.
514
// This case is rare enough in practice that it's probably not worth implementing an optimized in-place resize.
515
vector tmp;
516
tmp.increase_capacity(helpers::maximum(m_size, new_capacity), false);
517
tmp = *this;
518
swap(tmp);
519
}
520
521
return true;
522
}
523
524
// resize(0) sets the container to empty, but does not free the allocated block.
525
inline void resize(size_t new_size_size_t, bool grow_hint = false)
526
{
527
if (new_size_size_t > UINT32_MAX)
528
{
529
assert(0);
530
return;
531
}
532
533
uint32_t new_size = (uint32_t)new_size_size_t;
534
535
if (m_size != new_size)
536
{
537
if (new_size < m_size)
538
scalar_type<T>::destruct_array(m_p + new_size, m_size - new_size);
539
else
540
{
541
if (new_size > m_capacity)
542
increase_capacity(new_size, (new_size == (m_size + 1)) || grow_hint);
543
544
scalar_type<T>::construct_array(m_p + m_size, new_size - m_size);
545
}
546
547
m_size = new_size;
548
}
549
}
550
551
inline bool try_resize(size_t new_size_size_t, bool grow_hint = false)
552
{
553
if (new_size_size_t > UINT32_MAX)
554
{
555
assert(0);
556
return false;
557
}
558
559
uint32_t new_size = (uint32_t)new_size_size_t;
560
561
if (m_size != new_size)
562
{
563
if (new_size < m_size)
564
scalar_type<T>::destruct_array(m_p + new_size, m_size - new_size);
565
else
566
{
567
if (new_size > m_capacity)
568
{
569
if (!increase_capacity(new_size, (new_size == (m_size + 1)) || grow_hint, true))
570
return false;
571
}
572
573
scalar_type<T>::construct_array(m_p + m_size, new_size - m_size);
574
}
575
576
m_size = new_size;
577
}
578
579
return true;
580
}
581
582
// If size >= capacity/2, reset() sets the container's size to 0 but doesn't free the allocated block (because the container may be similarly loaded in the future).
583
// Otherwise it blows away the allocated block. See http://www.codercorner.com/blog/?p=494
584
inline void reset()
585
{
586
if (m_size >= (m_capacity >> 1))
587
resize(0);
588
else
589
clear();
590
}
591
592
inline T* enlarge(uint32_t i)
593
{
594
uint32_t cur_size = m_size;
595
resize(cur_size + i, true);
596
return get_ptr() + cur_size;
597
}
598
599
inline T* try_enlarge(uint32_t i)
600
{
601
uint32_t cur_size = m_size;
602
if (!try_resize(cur_size + i, true))
603
return NULL;
604
return get_ptr() + cur_size;
605
}
606
607
BASISU_FORCE_INLINE void push_back(const T& obj)
608
{
609
assert(!m_p || (&obj < m_p) || (&obj >= (m_p + m_size)));
610
611
if (m_size >= m_capacity)
612
increase_capacity(m_size + 1, true);
613
614
scalar_type<T>::construct(m_p + m_size, obj);
615
m_size++;
616
}
617
618
inline bool try_push_back(const T& obj)
619
{
620
assert(!m_p || (&obj < m_p) || (&obj >= (m_p + m_size)));
621
622
if (m_size >= m_capacity)
623
{
624
if (!increase_capacity(m_size + 1, true, true))
625
return false;
626
}
627
628
scalar_type<T>::construct(m_p + m_size, obj);
629
m_size++;
630
631
return true;
632
}
633
634
inline void push_back_value(T obj)
635
{
636
if (m_size >= m_capacity)
637
increase_capacity(m_size + 1, true);
638
639
scalar_type<T>::construct(m_p + m_size, obj);
640
m_size++;
641
}
642
643
inline void pop_back()
644
{
645
assert(m_size);
646
647
if (m_size)
648
{
649
m_size--;
650
scalar_type<T>::destruct(&m_p[m_size]);
651
}
652
}
653
654
inline void insert(uint32_t index, const T* p, uint32_t n)
655
{
656
assert(index <= m_size);
657
if (!n)
658
return;
659
660
const uint32_t orig_size = m_size;
661
resize(m_size + n, true);
662
663
const uint32_t num_to_move = orig_size - index;
664
665
if (BASISU_IS_BITWISE_COPYABLE(T))
666
{
667
// This overwrites the destination object bits, but bitwise copyable means we don't need to worry about destruction.
668
memmove(m_p + index + n, m_p + index, sizeof(T) * num_to_move);
669
}
670
else
671
{
672
const T* pSrc = m_p + orig_size - 1;
673
T* pDst = const_cast<T*>(pSrc) + n;
674
675
for (uint32_t i = 0; i < num_to_move; i++)
676
{
677
assert((pDst - m_p) < (int)m_size);
678
*pDst-- = *pSrc--;
679
}
680
}
681
682
T* pDst = m_p + index;
683
684
if (BASISU_IS_BITWISE_COPYABLE(T))
685
{
686
// This copies in the new bits, overwriting the existing objects, which is OK for copyable types that don't need destruction.
687
memcpy(pDst, p, sizeof(T) * n);
688
}
689
else
690
{
691
for (uint32_t i = 0; i < n; i++)
692
{
693
assert((pDst - m_p) < (int)m_size);
694
*pDst++ = *p++;
695
}
696
}
697
}
698
699
inline void insert(T* p, const T& obj)
700
{
701
int64_t ofs = p - begin();
702
if ((ofs < 0) || (ofs > UINT32_MAX))
703
{
704
assert(0);
705
return;
706
}
707
708
insert((uint32_t)ofs, &obj, 1);
709
}
710
711
// push_front() isn't going to be very fast - it's only here for usability.
712
inline void push_front(const T& obj)
713
{
714
insert(0, &obj, 1);
715
}
716
717
vector& append(const vector& other)
718
{
719
if (other.m_size)
720
insert(m_size, &other[0], other.m_size);
721
return *this;
722
}
723
724
vector& append(const T* p, uint32_t n)
725
{
726
if (n)
727
insert(m_size, p, n);
728
return *this;
729
}
730
731
inline void erase(uint32_t start, uint32_t n)
732
{
733
assert((start + n) <= m_size);
734
if ((start + n) > m_size)
735
return;
736
737
if (!n)
738
return;
739
740
const uint32_t num_to_move = m_size - (start + n);
741
742
T* pDst = m_p + start;
743
744
const T* pSrc = m_p + start + n;
745
746
if (BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(T))
747
{
748
// This test is overly cautious.
749
if ((!BASISU_IS_BITWISE_COPYABLE(T)) || (BASISU_HAS_DESTRUCTOR(T)))
750
{
751
// Type has been marked explictly as bitwise movable, which means we can move them around but they may need to be destructed.
752
// First destroy the erased objects.
753
scalar_type<T>::destruct_array(pDst, n);
754
}
755
756
// Copy "down" the objects to preserve, filling in the empty slots.
757
memmove(pDst, pSrc, num_to_move * sizeof(T));
758
}
759
else
760
{
761
// Type is not bitwise copyable or movable.
762
// Move them down one at a time by using the equals operator, and destroying anything that's left over at the end.
763
T* pDst_end = pDst + num_to_move;
764
while (pDst != pDst_end)
765
*pDst++ = *pSrc++;
766
767
scalar_type<T>::destruct_array(pDst_end, n);
768
}
769
770
m_size -= n;
771
}
772
773
inline void erase(uint32_t index)
774
{
775
erase(index, 1);
776
}
777
778
inline void erase(T* p)
779
{
780
assert((p >= m_p) && (p < (m_p + m_size)));
781
erase(static_cast<uint32_t>(p - m_p));
782
}
783
784
inline void erase(T *pFirst, T *pEnd)
785
{
786
assert(pFirst <= pEnd);
787
assert(pFirst >= begin() && pFirst <= end());
788
assert(pEnd >= begin() && pEnd <= end());
789
790
int64_t ofs = pFirst - begin();
791
if ((ofs < 0) || (ofs > UINT32_MAX))
792
{
793
assert(0);
794
return;
795
}
796
797
int64_t n = pEnd - pFirst;
798
if ((n < 0) || (n > UINT32_MAX))
799
{
800
assert(0);
801
return;
802
}
803
804
erase((uint32_t)ofs, (uint32_t)n);
805
}
806
807
void erase_unordered(uint32_t index)
808
{
809
assert(index < m_size);
810
811
if ((index + 1) < m_size)
812
(*this)[index] = back();
813
814
pop_back();
815
}
816
817
inline bool operator== (const vector& rhs) const
818
{
819
if (m_size != rhs.m_size)
820
return false;
821
else if (m_size)
822
{
823
if (scalar_type<T>::cFlag)
824
return memcmp(m_p, rhs.m_p, sizeof(T) * m_size) == 0;
825
else
826
{
827
const T* pSrc = m_p;
828
const T* pDst = rhs.m_p;
829
for (uint32_t i = m_size; i; i--)
830
if (!(*pSrc++ == *pDst++))
831
return false;
832
}
833
}
834
835
return true;
836
}
837
838
inline bool operator< (const vector& rhs) const
839
{
840
const uint32_t min_size = helpers::minimum(m_size, rhs.m_size);
841
842
const T* pSrc = m_p;
843
const T* pSrc_end = m_p + min_size;
844
const T* pDst = rhs.m_p;
845
846
while ((pSrc < pSrc_end) && (*pSrc == *pDst))
847
{
848
pSrc++;
849
pDst++;
850
}
851
852
if (pSrc < pSrc_end)
853
return *pSrc < *pDst;
854
855
return m_size < rhs.m_size;
856
}
857
858
inline void swap(vector& other)
859
{
860
std::swap(m_p, other.m_p);
861
std::swap(m_size, other.m_size);
862
std::swap(m_capacity, other.m_capacity);
863
}
864
865
inline void sort()
866
{
867
std::sort(begin(), end());
868
}
869
870
inline void unique()
871
{
872
if (!empty())
873
{
874
sort();
875
876
resize(std::unique(begin(), end()) - begin());
877
}
878
}
879
880
inline void reverse()
881
{
882
uint32_t j = m_size >> 1;
883
for (uint32_t i = 0; i < j; i++)
884
std::swap(m_p[i], m_p[m_size - 1 - i]);
885
}
886
887
inline int find(const T& key) const
888
{
889
const T* p = m_p;
890
const T* p_end = m_p + m_size;
891
892
uint32_t index = 0;
893
894
while (p != p_end)
895
{
896
if (key == *p)
897
return index;
898
899
p++;
900
index++;
901
}
902
903
return cInvalidIndex;
904
}
905
906
inline int find_sorted(const T& key) const
907
{
908
if (m_size)
909
{
910
// Uniform binary search - Knuth Algorithm 6.2.1 U, unrolled twice.
911
int i = ((m_size + 1) >> 1) - 1;
912
int m = m_size;
913
914
for (; ; )
915
{
916
assert(i >= 0 && i < (int)m_size);
917
const T* pKey_i = m_p + i;
918
int cmp = key < *pKey_i;
919
#if defined(_DEBUG) || defined(DEBUG)
920
int cmp2 = *pKey_i < key;
921
assert((cmp != cmp2) || (key == *pKey_i));
922
#endif
923
if ((!cmp) && (key == *pKey_i)) return i;
924
m >>= 1;
925
if (!m) break;
926
cmp = -cmp;
927
i += (((m + 1) >> 1) ^ cmp) - cmp;
928
if (i < 0)
929
break;
930
931
assert(i >= 0 && i < (int)m_size);
932
pKey_i = m_p + i;
933
cmp = key < *pKey_i;
934
#if defined(_DEBUG) || defined(DEBUG)
935
cmp2 = *pKey_i < key;
936
assert((cmp != cmp2) || (key == *pKey_i));
937
#endif
938
if ((!cmp) && (key == *pKey_i)) return i;
939
m >>= 1;
940
if (!m) break;
941
cmp = -cmp;
942
i += (((m + 1) >> 1) ^ cmp) - cmp;
943
if (i < 0)
944
break;
945
}
946
}
947
948
return cInvalidIndex;
949
}
950
951
template<typename Q>
952
inline int find_sorted(const T& key, Q less_than) const
953
{
954
if (m_size)
955
{
956
// Uniform binary search - Knuth Algorithm 6.2.1 U, unrolled twice.
957
int i = ((m_size + 1) >> 1) - 1;
958
int m = m_size;
959
960
for (; ; )
961
{
962
assert(i >= 0 && i < (int)m_size);
963
const T* pKey_i = m_p + i;
964
int cmp = less_than(key, *pKey_i);
965
if ((!cmp) && (!less_than(*pKey_i, key))) return i;
966
m >>= 1;
967
if (!m) break;
968
cmp = -cmp;
969
i += (((m + 1) >> 1) ^ cmp) - cmp;
970
if (i < 0)
971
break;
972
973
assert(i >= 0 && i < (int)m_size);
974
pKey_i = m_p + i;
975
cmp = less_than(key, *pKey_i);
976
if ((!cmp) && (!less_than(*pKey_i, key))) return i;
977
m >>= 1;
978
if (!m) break;
979
cmp = -cmp;
980
i += (((m + 1) >> 1) ^ cmp) - cmp;
981
if (i < 0)
982
break;
983
}
984
}
985
986
return cInvalidIndex;
987
}
988
989
inline uint32_t count_occurences(const T& key) const
990
{
991
uint32_t c = 0;
992
993
const T* p = m_p;
994
const T* p_end = m_p + m_size;
995
996
while (p != p_end)
997
{
998
if (key == *p)
999
c++;
1000
1001
p++;
1002
}
1003
1004
return c;
1005
}
1006
1007
inline void set_all(const T& o)
1008
{
1009
if ((sizeof(T) == 1) && (scalar_type<T>::cFlag))
1010
memset(m_p, *reinterpret_cast<const uint8_t*>(&o), m_size);
1011
else
1012
{
1013
T* pDst = m_p;
1014
T* pDst_end = pDst + m_size;
1015
while (pDst != pDst_end)
1016
*pDst++ = o;
1017
}
1018
}
1019
1020
// Caller assumes ownership of the heap block associated with the container. Container is cleared.
1021
inline void* assume_ownership()
1022
{
1023
T* p = m_p;
1024
m_p = NULL;
1025
m_size = 0;
1026
m_capacity = 0;
1027
return p;
1028
}
1029
1030
// Caller is granting ownership of the indicated heap block.
1031
// Block must have size constructed elements, and have enough room for capacity elements.
1032
// The block must have been allocated using malloc().
1033
// Important: This method is used in Basis Universal. If you change how this container allocates memory, you'll need to change any users of this method.
1034
inline bool grant_ownership(T* p, uint32_t size, uint32_t capacity)
1035
{
1036
// To to prevent the caller from obviously shooting themselves in the foot.
1037
if (((p + capacity) > m_p) && (p < (m_p + m_capacity)))
1038
{
1039
// Can grant ownership of a block inside the container itself!
1040
assert(0);
1041
return false;
1042
}
1043
1044
if (size > capacity)
1045
{
1046
assert(0);
1047
return false;
1048
}
1049
1050
if (!p)
1051
{
1052
if (capacity)
1053
{
1054
assert(0);
1055
return false;
1056
}
1057
}
1058
else if (!capacity)
1059
{
1060
assert(0);
1061
return false;
1062
}
1063
1064
clear();
1065
m_p = p;
1066
m_size = size;
1067
m_capacity = capacity;
1068
return true;
1069
}
1070
1071
private:
1072
T* m_p;
1073
uint32_t m_size;
1074
uint32_t m_capacity;
1075
1076
template<typename Q> struct is_vector { enum { cFlag = false }; };
1077
template<typename Q> struct is_vector< vector<Q> > { enum { cFlag = true }; };
1078
1079
static void object_mover(void* pDst_void, void* pSrc_void, uint32_t num)
1080
{
1081
T* pSrc = static_cast<T*>(pSrc_void);
1082
T* const pSrc_end = pSrc + num;
1083
T* pDst = static_cast<T*>(pDst_void);
1084
1085
while (pSrc != pSrc_end)
1086
{
1087
// placement new
1088
new (static_cast<void*>(pDst)) T(*pSrc);
1089
pSrc->~T();
1090
++pSrc;
1091
++pDst;
1092
}
1093
}
1094
1095
inline bool increase_capacity(uint32_t min_new_capacity, bool grow_hint, bool nofail = false)
1096
{
1097
return reinterpret_cast<elemental_vector*>(this)->increase_capacity(
1098
min_new_capacity, grow_hint, sizeof(T),
1099
(BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(T) || (is_vector<T>::cFlag)) ? NULL : object_mover, nofail);
1100
}
1101
};
1102
1103
template<typename T> struct bitwise_movable< vector<T> > { enum { cFlag = true }; };
1104
1105
// Hash map
1106
1107
template <typename T>
1108
struct hasher
1109
{
1110
inline size_t operator() (const T& key) const { return static_cast<size_t>(key); }
1111
};
1112
1113
template <typename T>
1114
struct equal_to
1115
{
1116
inline bool operator()(const T& a, const T& b) const { return a == b; }
1117
};
1118
1119
// Important: The Hasher and Equals objects must be bitwise movable!
1120
template<typename Key, typename Value = empty_type, typename Hasher = hasher<Key>, typename Equals = equal_to<Key> >
1121
class hash_map
1122
{
1123
public:
1124
class iterator;
1125
class const_iterator;
1126
1127
private:
1128
friend class iterator;
1129
friend class const_iterator;
1130
1131
enum state
1132
{
1133
cStateInvalid = 0,
1134
cStateValid = 1
1135
};
1136
1137
enum
1138
{
1139
cMinHashSize = 4U
1140
};
1141
1142
public:
1143
typedef hash_map<Key, Value, Hasher, Equals> hash_map_type;
1144
typedef std::pair<Key, Value> value_type;
1145
typedef Key key_type;
1146
typedef Value referent_type;
1147
typedef Hasher hasher_type;
1148
typedef Equals equals_type;
1149
1150
hash_map() :
1151
m_hash_shift(32), m_num_valid(0), m_grow_threshold(0)
1152
{
1153
}
1154
1155
hash_map(const hash_map& other) :
1156
m_values(other.m_values),
1157
m_hash_shift(other.m_hash_shift),
1158
m_hasher(other.m_hasher),
1159
m_equals(other.m_equals),
1160
m_num_valid(other.m_num_valid),
1161
m_grow_threshold(other.m_grow_threshold)
1162
{
1163
}
1164
1165
hash_map& operator= (const hash_map& other)
1166
{
1167
if (this == &other)
1168
return *this;
1169
1170
clear();
1171
1172
m_values = other.m_values;
1173
m_hash_shift = other.m_hash_shift;
1174
m_num_valid = other.m_num_valid;
1175
m_grow_threshold = other.m_grow_threshold;
1176
m_hasher = other.m_hasher;
1177
m_equals = other.m_equals;
1178
1179
return *this;
1180
}
1181
1182
inline ~hash_map()
1183
{
1184
clear();
1185
}
1186
1187
const Equals& get_equals() const { return m_equals; }
1188
Equals& get_equals() { return m_equals; }
1189
1190
void set_equals(const Equals& equals) { m_equals = equals; }
1191
1192
const Hasher& get_hasher() const { return m_hasher; }
1193
Hasher& get_hasher() { return m_hasher; }
1194
1195
void set_hasher(const Hasher& hasher) { m_hasher = hasher; }
1196
1197
inline void clear()
1198
{
1199
if (!m_values.empty())
1200
{
1201
if (BASISU_HAS_DESTRUCTOR(Key) || BASISU_HAS_DESTRUCTOR(Value))
1202
{
1203
node* p = &get_node(0);
1204
node* p_end = p + m_values.size();
1205
1206
uint32_t num_remaining = m_num_valid;
1207
while (p != p_end)
1208
{
1209
if (p->state)
1210
{
1211
destruct_value_type(p);
1212
num_remaining--;
1213
if (!num_remaining)
1214
break;
1215
}
1216
1217
p++;
1218
}
1219
}
1220
1221
m_values.clear_no_destruction();
1222
1223
m_hash_shift = 32;
1224
m_num_valid = 0;
1225
m_grow_threshold = 0;
1226
}
1227
}
1228
1229
inline void reset()
1230
{
1231
if (!m_num_valid)
1232
return;
1233
1234
if (BASISU_HAS_DESTRUCTOR(Key) || BASISU_HAS_DESTRUCTOR(Value))
1235
{
1236
node* p = &get_node(0);
1237
node* p_end = p + m_values.size();
1238
1239
uint32_t num_remaining = m_num_valid;
1240
while (p != p_end)
1241
{
1242
if (p->state)
1243
{
1244
destruct_value_type(p);
1245
p->state = cStateInvalid;
1246
1247
num_remaining--;
1248
if (!num_remaining)
1249
break;
1250
}
1251
1252
p++;
1253
}
1254
}
1255
else if (sizeof(node) <= 32)
1256
{
1257
memset(&m_values[0], 0, m_values.size_in_bytes());
1258
}
1259
else
1260
{
1261
node* p = &get_node(0);
1262
node* p_end = p + m_values.size();
1263
1264
uint32_t num_remaining = m_num_valid;
1265
while (p != p_end)
1266
{
1267
if (p->state)
1268
{
1269
p->state = cStateInvalid;
1270
1271
num_remaining--;
1272
if (!num_remaining)
1273
break;
1274
}
1275
1276
p++;
1277
}
1278
}
1279
1280
m_num_valid = 0;
1281
}
1282
1283
inline uint32_t size()
1284
{
1285
return m_num_valid;
1286
}
1287
1288
inline uint32_t get_table_size()
1289
{
1290
return m_values.size();
1291
}
1292
1293
inline bool empty()
1294
{
1295
return !m_num_valid;
1296
}
1297
1298
inline void reserve(uint32_t new_capacity)
1299
{
1300
uint64_t new_hash_size = helpers::maximum(1U, new_capacity);
1301
1302
new_hash_size = new_hash_size * 2ULL;
1303
1304
if (!helpers::is_power_of_2(new_hash_size))
1305
new_hash_size = helpers::next_pow2(new_hash_size);
1306
1307
new_hash_size = helpers::maximum<uint64_t>(cMinHashSize, new_hash_size);
1308
1309
new_hash_size = helpers::minimum<uint64_t>(0x80000000UL, new_hash_size);
1310
1311
if (new_hash_size > m_values.size())
1312
rehash((uint32_t)new_hash_size);
1313
}
1314
1315
class iterator
1316
{
1317
friend class hash_map<Key, Value, Hasher, Equals>;
1318
friend class hash_map<Key, Value, Hasher, Equals>::const_iterator;
1319
1320
public:
1321
inline iterator() : m_pTable(NULL), m_index(0) { }
1322
inline iterator(hash_map_type& table, uint32_t index) : m_pTable(&table), m_index(index) { }
1323
inline iterator(const iterator& other) : m_pTable(other.m_pTable), m_index(other.m_index) { }
1324
1325
inline iterator& operator= (const iterator& other)
1326
{
1327
m_pTable = other.m_pTable;
1328
m_index = other.m_index;
1329
return *this;
1330
}
1331
1332
// post-increment
1333
inline iterator operator++(int)
1334
{
1335
iterator result(*this);
1336
++*this;
1337
return result;
1338
}
1339
1340
// pre-increment
1341
inline iterator& operator++()
1342
{
1343
probe();
1344
return *this;
1345
}
1346
1347
inline value_type& operator*() const { return *get_cur(); }
1348
inline value_type* operator->() const { return get_cur(); }
1349
1350
inline bool operator == (const iterator& b) const { return (m_pTable == b.m_pTable) && (m_index == b.m_index); }
1351
inline bool operator != (const iterator& b) const { return !(*this == b); }
1352
inline bool operator == (const const_iterator& b) const { return (m_pTable == b.m_pTable) && (m_index == b.m_index); }
1353
inline bool operator != (const const_iterator& b) const { return !(*this == b); }
1354
1355
private:
1356
hash_map_type* m_pTable;
1357
uint32_t m_index;
1358
1359
inline value_type* get_cur() const
1360
{
1361
assert(m_pTable && (m_index < m_pTable->m_values.size()));
1362
assert(m_pTable->get_node_state(m_index) == cStateValid);
1363
1364
return &m_pTable->get_node(m_index);
1365
}
1366
1367
inline void probe()
1368
{
1369
assert(m_pTable);
1370
m_index = m_pTable->find_next(m_index);
1371
}
1372
};
1373
1374
class const_iterator
1375
{
1376
friend class hash_map<Key, Value, Hasher, Equals>;
1377
friend class hash_map<Key, Value, Hasher, Equals>::iterator;
1378
1379
public:
1380
inline const_iterator() : m_pTable(NULL), m_index(0) { }
1381
inline const_iterator(const hash_map_type& table, uint32_t index) : m_pTable(&table), m_index(index) { }
1382
inline const_iterator(const iterator& other) : m_pTable(other.m_pTable), m_index(other.m_index) { }
1383
inline const_iterator(const const_iterator& other) : m_pTable(other.m_pTable), m_index(other.m_index) { }
1384
1385
inline const_iterator& operator= (const const_iterator& other)
1386
{
1387
m_pTable = other.m_pTable;
1388
m_index = other.m_index;
1389
return *this;
1390
}
1391
1392
inline const_iterator& operator= (const iterator& other)
1393
{
1394
m_pTable = other.m_pTable;
1395
m_index = other.m_index;
1396
return *this;
1397
}
1398
1399
// post-increment
1400
inline const_iterator operator++(int)
1401
{
1402
const_iterator result(*this);
1403
++*this;
1404
return result;
1405
}
1406
1407
// pre-increment
1408
inline const_iterator& operator++()
1409
{
1410
probe();
1411
return *this;
1412
}
1413
1414
inline const value_type& operator*() const { return *get_cur(); }
1415
inline const value_type* operator->() const { return get_cur(); }
1416
1417
inline bool operator == (const const_iterator& b) const { return (m_pTable == b.m_pTable) && (m_index == b.m_index); }
1418
inline bool operator != (const const_iterator& b) const { return !(*this == b); }
1419
inline bool operator == (const iterator& b) const { return (m_pTable == b.m_pTable) && (m_index == b.m_index); }
1420
inline bool operator != (const iterator& b) const { return !(*this == b); }
1421
1422
private:
1423
const hash_map_type* m_pTable;
1424
uint32_t m_index;
1425
1426
inline const value_type* get_cur() const
1427
{
1428
assert(m_pTable && (m_index < m_pTable->m_values.size()));
1429
assert(m_pTable->get_node_state(m_index) == cStateValid);
1430
1431
return &m_pTable->get_node(m_index);
1432
}
1433
1434
inline void probe()
1435
{
1436
assert(m_pTable);
1437
m_index = m_pTable->find_next(m_index);
1438
}
1439
};
1440
1441
inline const_iterator begin() const
1442
{
1443
if (!m_num_valid)
1444
return end();
1445
1446
return const_iterator(*this, find_next(UINT32_MAX));
1447
}
1448
1449
inline const_iterator end() const
1450
{
1451
return const_iterator(*this, m_values.size());
1452
}
1453
1454
inline iterator begin()
1455
{
1456
if (!m_num_valid)
1457
return end();
1458
1459
return iterator(*this, find_next(UINT32_MAX));
1460
}
1461
1462
inline iterator end()
1463
{
1464
return iterator(*this, m_values.size());
1465
}
1466
1467
// insert_result.first will always point to inserted key/value (or the already existing key/value).
1468
// insert_resutt.second will be true if a new key/value was inserted, or false if the key already existed (in which case first will point to the already existing value).
1469
typedef std::pair<iterator, bool> insert_result;
1470
1471
inline insert_result insert(const Key& k, const Value& v = Value())
1472
{
1473
insert_result result;
1474
if (!insert_no_grow(result, k, v))
1475
{
1476
grow();
1477
1478
// This must succeed.
1479
if (!insert_no_grow(result, k, v))
1480
{
1481
fprintf(stderr, "insert() failed");
1482
abort();
1483
}
1484
}
1485
1486
return result;
1487
}
1488
1489
inline insert_result insert(const value_type& v)
1490
{
1491
return insert(v.first, v.second);
1492
}
1493
1494
inline const_iterator find(const Key& k) const
1495
{
1496
return const_iterator(*this, find_index(k));
1497
}
1498
1499
inline iterator find(const Key& k)
1500
{
1501
return iterator(*this, find_index(k));
1502
}
1503
1504
inline bool erase(const Key& k)
1505
{
1506
uint32_t i = find_index(k);
1507
1508
if (i >= m_values.size())
1509
return false;
1510
1511
node* pDst = &get_node(i);
1512
destruct_value_type(pDst);
1513
pDst->state = cStateInvalid;
1514
1515
m_num_valid--;
1516
1517
for (; ; )
1518
{
1519
uint32_t r, j = i;
1520
1521
node* pSrc = pDst;
1522
1523
do
1524
{
1525
if (!i)
1526
{
1527
i = m_values.size() - 1;
1528
pSrc = &get_node(i);
1529
}
1530
else
1531
{
1532
i--;
1533
pSrc--;
1534
}
1535
1536
if (!pSrc->state)
1537
return true;
1538
1539
r = hash_key(pSrc->first);
1540
1541
} while ((i <= r && r < j) || (r < j && j < i) || (j < i && i <= r));
1542
1543
move_node(pDst, pSrc);
1544
1545
pDst = pSrc;
1546
}
1547
}
1548
1549
inline void swap(hash_map_type& other)
1550
{
1551
m_values.swap(other.m_values);
1552
std::swap(m_hash_shift, other.m_hash_shift);
1553
std::swap(m_num_valid, other.m_num_valid);
1554
std::swap(m_grow_threshold, other.m_grow_threshold);
1555
std::swap(m_hasher, other.m_hasher);
1556
std::swap(m_equals, other.m_equals);
1557
}
1558
1559
private:
1560
struct node : public value_type
1561
{
1562
uint8_t state;
1563
};
1564
1565
static inline void construct_value_type(value_type* pDst, const Key& k, const Value& v)
1566
{
1567
if (BASISU_IS_BITWISE_COPYABLE(Key))
1568
memcpy(&pDst->first, &k, sizeof(Key));
1569
else
1570
scalar_type<Key>::construct(&pDst->first, k);
1571
1572
if (BASISU_IS_BITWISE_COPYABLE(Value))
1573
memcpy(&pDst->second, &v, sizeof(Value));
1574
else
1575
scalar_type<Value>::construct(&pDst->second, v);
1576
}
1577
1578
static inline void construct_value_type(value_type* pDst, const value_type* pSrc)
1579
{
1580
if ((BASISU_IS_BITWISE_COPYABLE(Key)) && (BASISU_IS_BITWISE_COPYABLE(Value)))
1581
{
1582
memcpy(pDst, pSrc, sizeof(value_type));
1583
}
1584
else
1585
{
1586
if (BASISU_IS_BITWISE_COPYABLE(Key))
1587
memcpy(&pDst->first, &pSrc->first, sizeof(Key));
1588
else
1589
scalar_type<Key>::construct(&pDst->first, pSrc->first);
1590
1591
if (BASISU_IS_BITWISE_COPYABLE(Value))
1592
memcpy(&pDst->second, &pSrc->second, sizeof(Value));
1593
else
1594
scalar_type<Value>::construct(&pDst->second, pSrc->second);
1595
}
1596
}
1597
1598
static inline void destruct_value_type(value_type* p)
1599
{
1600
scalar_type<Key>::destruct(&p->first);
1601
scalar_type<Value>::destruct(&p->second);
1602
}
1603
1604
// Moves *pSrc to *pDst efficiently.
1605
// pDst should NOT be constructed on entry.
1606
static inline void move_node(node* pDst, node* pSrc, bool update_src_state = true)
1607
{
1608
assert(!pDst->state);
1609
1610
if (BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(Key) && BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(Value))
1611
{
1612
memcpy(pDst, pSrc, sizeof(node));
1613
}
1614
else
1615
{
1616
if (BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(Key))
1617
memcpy(&pDst->first, &pSrc->first, sizeof(Key));
1618
else
1619
{
1620
scalar_type<Key>::construct(&pDst->first, pSrc->first);
1621
scalar_type<Key>::destruct(&pSrc->first);
1622
}
1623
1624
if (BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(Value))
1625
memcpy(&pDst->second, &pSrc->second, sizeof(Value));
1626
else
1627
{
1628
scalar_type<Value>::construct(&pDst->second, pSrc->second);
1629
scalar_type<Value>::destruct(&pSrc->second);
1630
}
1631
1632
pDst->state = cStateValid;
1633
}
1634
1635
if (update_src_state)
1636
pSrc->state = cStateInvalid;
1637
}
1638
1639
struct raw_node
1640
{
1641
inline raw_node()
1642
{
1643
node* p = reinterpret_cast<node*>(this);
1644
p->state = cStateInvalid;
1645
}
1646
1647
inline ~raw_node()
1648
{
1649
node* p = reinterpret_cast<node*>(this);
1650
if (p->state)
1651
hash_map_type::destruct_value_type(p);
1652
}
1653
1654
inline raw_node(const raw_node& other)
1655
{
1656
node* pDst = reinterpret_cast<node*>(this);
1657
const node* pSrc = reinterpret_cast<const node*>(&other);
1658
1659
if (pSrc->state)
1660
{
1661
hash_map_type::construct_value_type(pDst, pSrc);
1662
pDst->state = cStateValid;
1663
}
1664
else
1665
pDst->state = cStateInvalid;
1666
}
1667
1668
inline raw_node& operator= (const raw_node& rhs)
1669
{
1670
if (this == &rhs)
1671
return *this;
1672
1673
node* pDst = reinterpret_cast<node*>(this);
1674
const node* pSrc = reinterpret_cast<const node*>(&rhs);
1675
1676
if (pSrc->state)
1677
{
1678
if (pDst->state)
1679
{
1680
pDst->first = pSrc->first;
1681
pDst->second = pSrc->second;
1682
}
1683
else
1684
{
1685
hash_map_type::construct_value_type(pDst, pSrc);
1686
pDst->state = cStateValid;
1687
}
1688
}
1689
else if (pDst->state)
1690
{
1691
hash_map_type::destruct_value_type(pDst);
1692
pDst->state = cStateInvalid;
1693
}
1694
1695
return *this;
1696
}
1697
1698
uint8_t m_bits[sizeof(node)];
1699
};
1700
1701
typedef basisu::vector<raw_node> node_vector;
1702
1703
node_vector m_values;
1704
uint32_t m_hash_shift;
1705
1706
Hasher m_hasher;
1707
Equals m_equals;
1708
1709
uint32_t m_num_valid;
1710
1711
uint32_t m_grow_threshold;
1712
1713
inline uint32_t hash_key(const Key& k) const
1714
{
1715
assert((1U << (32U - m_hash_shift)) == m_values.size());
1716
1717
uint32_t hash = static_cast<uint32_t>(m_hasher(k));
1718
1719
// Fibonacci hashing
1720
hash = (2654435769U * hash) >> m_hash_shift;
1721
1722
assert(hash < m_values.size());
1723
return hash;
1724
}
1725
1726
inline const node& get_node(uint32_t index) const
1727
{
1728
return *reinterpret_cast<const node*>(&m_values[index]);
1729
}
1730
1731
inline node& get_node(uint32_t index)
1732
{
1733
return *reinterpret_cast<node*>(&m_values[index]);
1734
}
1735
1736
inline state get_node_state(uint32_t index) const
1737
{
1738
return static_cast<state>(get_node(index).state);
1739
}
1740
1741
inline void set_node_state(uint32_t index, bool valid)
1742
{
1743
get_node(index).state = valid;
1744
}
1745
1746
inline void grow()
1747
{
1748
uint64_t n = m_values.size() * 3ULL; // was * 2
1749
1750
if (!helpers::is_power_of_2(n))
1751
n = helpers::next_pow2(n);
1752
1753
if (n > 0x80000000UL)
1754
n = 0x80000000UL;
1755
1756
rehash(helpers::maximum<uint32_t>(cMinHashSize, (uint32_t)n));
1757
}
1758
1759
inline void rehash(uint32_t new_hash_size)
1760
{
1761
assert(new_hash_size >= m_num_valid);
1762
assert(helpers::is_power_of_2(new_hash_size));
1763
1764
if ((new_hash_size < m_num_valid) || (new_hash_size == m_values.size()))
1765
return;
1766
1767
hash_map new_map;
1768
new_map.m_values.resize(new_hash_size);
1769
new_map.m_hash_shift = 32U - helpers::floor_log2i(new_hash_size);
1770
assert(new_hash_size == (1U << (32U - new_map.m_hash_shift)));
1771
new_map.m_grow_threshold = UINT_MAX;
1772
1773
node* pNode = reinterpret_cast<node*>(m_values.begin());
1774
node* pNode_end = pNode + m_values.size();
1775
1776
while (pNode != pNode_end)
1777
{
1778
if (pNode->state)
1779
{
1780
new_map.move_into(pNode);
1781
1782
if (new_map.m_num_valid == m_num_valid)
1783
break;
1784
}
1785
1786
pNode++;
1787
}
1788
1789
new_map.m_grow_threshold = (new_hash_size + 1U) >> 1U;
1790
1791
m_values.clear_no_destruction();
1792
m_hash_shift = 32;
1793
1794
swap(new_map);
1795
}
1796
1797
inline uint32_t find_next(uint32_t index) const
1798
{
1799
index++;
1800
1801
if (index >= m_values.size())
1802
return index;
1803
1804
const node* pNode = &get_node(index);
1805
1806
for (; ; )
1807
{
1808
if (pNode->state)
1809
break;
1810
1811
if (++index >= m_values.size())
1812
break;
1813
1814
pNode++;
1815
}
1816
1817
return index;
1818
}
1819
1820
inline uint32_t find_index(const Key& k) const
1821
{
1822
if (m_num_valid)
1823
{
1824
uint32_t index = hash_key(k);
1825
const node* pNode = &get_node(index);
1826
1827
if (pNode->state)
1828
{
1829
if (m_equals(pNode->first, k))
1830
return index;
1831
1832
const uint32_t orig_index = index;
1833
1834
for (; ; )
1835
{
1836
if (!index)
1837
{
1838
index = m_values.size() - 1;
1839
pNode = &get_node(index);
1840
}
1841
else
1842
{
1843
index--;
1844
pNode--;
1845
}
1846
1847
if (index == orig_index)
1848
break;
1849
1850
if (!pNode->state)
1851
break;
1852
1853
if (m_equals(pNode->first, k))
1854
return index;
1855
}
1856
}
1857
}
1858
1859
return m_values.size();
1860
}
1861
1862
inline bool insert_no_grow(insert_result& result, const Key& k, const Value& v = Value())
1863
{
1864
if (!m_values.size())
1865
return false;
1866
1867
uint32_t index = hash_key(k);
1868
node* pNode = &get_node(index);
1869
1870
if (pNode->state)
1871
{
1872
if (m_equals(pNode->first, k))
1873
{
1874
result.first = iterator(*this, index);
1875
result.second = false;
1876
return true;
1877
}
1878
1879
const uint32_t orig_index = index;
1880
1881
for (; ; )
1882
{
1883
if (!index)
1884
{
1885
index = m_values.size() - 1;
1886
pNode = &get_node(index);
1887
}
1888
else
1889
{
1890
index--;
1891
pNode--;
1892
}
1893
1894
if (orig_index == index)
1895
return false;
1896
1897
if (!pNode->state)
1898
break;
1899
1900
if (m_equals(pNode->first, k))
1901
{
1902
result.first = iterator(*this, index);
1903
result.second = false;
1904
return true;
1905
}
1906
}
1907
}
1908
1909
if (m_num_valid >= m_grow_threshold)
1910
return false;
1911
1912
construct_value_type(pNode, k, v);
1913
1914
pNode->state = cStateValid;
1915
1916
m_num_valid++;
1917
assert(m_num_valid <= m_values.size());
1918
1919
result.first = iterator(*this, index);
1920
result.second = true;
1921
1922
return true;
1923
}
1924
1925
inline void move_into(node* pNode)
1926
{
1927
uint32_t index = hash_key(pNode->first);
1928
node* pDst_node = &get_node(index);
1929
1930
if (pDst_node->state)
1931
{
1932
const uint32_t orig_index = index;
1933
1934
for (; ; )
1935
{
1936
if (!index)
1937
{
1938
index = m_values.size() - 1;
1939
pDst_node = &get_node(index);
1940
}
1941
else
1942
{
1943
index--;
1944
pDst_node--;
1945
}
1946
1947
if (index == orig_index)
1948
{
1949
assert(false);
1950
return;
1951
}
1952
1953
if (!pDst_node->state)
1954
break;
1955
}
1956
}
1957
1958
move_node(pDst_node, pNode, false);
1959
1960
m_num_valid++;
1961
}
1962
};
1963
1964
template<typename Key, typename Value, typename Hasher, typename Equals>
1965
struct bitwise_movable< hash_map<Key, Value, Hasher, Equals> > { enum { cFlag = true }; };
1966
1967
#if BASISU_HASHMAP_TEST
1968
extern void hash_map_test();
1969
#endif
1970
1971
} // namespace basisu
1972
1973
namespace std
1974
{
1975
template<typename T>
1976
inline void swap(basisu::vector<T>& a, basisu::vector<T>& b)
1977
{
1978
a.swap(b);
1979
}
1980
1981
template<typename Key, typename Value, typename Hasher, typename Equals>
1982
inline void swap(basisu::hash_map<Key, Value, Hasher, Equals>& a, basisu::hash_map<Key, Value, Hasher, Equals>& b)
1983
{
1984
a.swap(b);
1985
}
1986
1987
} // namespace std
1988
1989